xref: /titanic_52/usr/src/uts/sfmmu/ml/sfmmu_asm.s (revision 4bff34e37def8a90f9194d81bc345c52ba20086a)
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
23 * Use is subject to license terms.
24 */
25
26#pragma ident	"%Z%%M%	%I%	%E% SMI"
27
28/*
29 * SFMMU primitives.  These primitives should only be used by sfmmu
30 * routines.
31 */
32
33#if defined(lint)
34#include <sys/types.h>
35#else	/* lint */
36#include "assym.h"
37#endif	/* lint */
38
39#include <sys/asm_linkage.h>
40#include <sys/machtrap.h>
41#include <sys/machasi.h>
42#include <sys/sun4asi.h>
43#include <sys/pte.h>
44#include <sys/mmu.h>
45#include <vm/hat_sfmmu.h>
46#include <vm/seg_spt.h>
47#include <sys/machparam.h>
48#include <sys/privregs.h>
49#include <sys/scb.h>
50#include <sys/intreg.h>
51#include <sys/machthread.h>
52#include <sys/intr.h>
53#include <sys/clock.h>
54#include <sys/trapstat.h>
55
56#ifdef TRAPTRACE
57#include <sys/traptrace.h>
58
59/*
60 * Tracing macro. Adds two instructions if TRAPTRACE is defined.
61 */
62#define	TT_TRACE(label)		\
63	ba	label		;\
64	rd	%pc, %g7
65#else
66
67#define	TT_TRACE(label)
68
69#endif /* TRAPTRACE */
70
71#ifndef	lint
72
73#if (TTE_SUSPEND_SHIFT > 0)
74#define	TTE_SUSPEND_INT_SHIFT(reg)				\
75	sllx	reg, TTE_SUSPEND_SHIFT, reg
76#else
77#define	TTE_SUSPEND_INT_SHIFT(reg)
78#endif
79
80#endif /* lint */
81
82#ifndef	lint
83
84/*
85 * Assumes TSBE_TAG is 0
86 * Assumes TSBE_INTHI is 0
87 * Assumes TSBREG.split is 0
88 */
89
90#if TSBE_TAG != 0
91#error "TSB_UPDATE and TSB_INVALIDATE assume TSBE_TAG = 0"
92#endif
93
94#if TSBTAG_INTHI != 0
95#error "TSB_UPDATE and TSB_INVALIDATE assume TSBTAG_INTHI = 0"
96#endif
97
98/*
99 * The following code assumes the tsb is not split.
100 *
101 * With TSBs no longer shared between processes, it's no longer
102 * necessary to hash the context bits into the tsb index to get
103 * tsb coloring; the new implementation treats the TSB as a
104 * direct-mapped, virtually-addressed cache.
105 *
106 * In:
107 *    vpshift = virtual page shift; e.g. 13 for 8K TTEs (constant or ro)
108 *    tsbbase = base address of TSB (clobbered)
109 *    tagacc = tag access register (clobbered)
110 *    szc = size code of TSB (ro)
111 *    tmp = scratch reg
112 * Out:
113 *    tsbbase = pointer to entry in TSB
114 */
115#define	GET_TSBE_POINTER(vpshift, tsbbase, tagacc, szc, tmp)		\
116	mov	TSB_ENTRIES(0), tmp	/* nentries in TSB size 0 */	;\
117	srlx	tagacc, vpshift, tagacc 				;\
118	sllx	tmp, szc, tmp		/* tmp = nentries in TSB */	;\
119	sub	tmp, 1, tmp		/* mask = nentries - 1 */	;\
120	and	tagacc, tmp, tmp	/* tsbent = virtpage & mask */	;\
121	sllx	tmp, TSB_ENTRY_SHIFT, tmp	/* entry num --> ptr */	;\
122	add	tsbbase, tmp, tsbbase	/* add entry offset to TSB base */
123
124/*
125 * When the kpm TSB is used it is assumed that it is direct mapped
126 * using (vaddr>>vpshift)%tsbsz as the index.
127 *
128 * Note that, for now, the kpm TSB and kernel TSB are the same for
129 * each mapping size.  However that need not always be the case.  If
130 * the trap handlers are updated to search a different TSB for kpm
131 * addresses than for kernel addresses then kpm_tsbbase and kpm_tsbsz
132 * (and/or kpmsm_tsbbase/kpmsm_tsbsz) may be entirely independent.
133 *
134 * In:
135 *    vpshift = virtual page shift; e.g. 13 for 8K TTEs (constant or ro)
136 *    vaddr = virtual address (clobbered)
137 *    tsbp, szc, tmp = scratch
138 * Out:
139 *    tsbp = pointer to entry in TSB
140 */
141#define	GET_KPM_TSBE_POINTER(vpshift, tsbp, vaddr, szc, tmp)		\
142	cmp	vpshift, MMU_PAGESHIFT					;\
143	bne,pn	%icc, 1f		/* branch if large case */	;\
144	  sethi	%hi(kpmsm_tsbsz), szc					;\
145	sethi	%hi(kpmsm_tsbbase), tsbp				;\
146	ld	[szc + %lo(kpmsm_tsbsz)], szc				;\
147	ldx	[tsbp + %lo(kpmsm_tsbbase)], tsbp			;\
148	ba,pt	%icc, 2f						;\
149	  nop								;\
1501:	sethi	%hi(kpm_tsbsz), szc					;\
151	sethi	%hi(kpm_tsbbase), tsbp					;\
152	ld	[szc + %lo(kpm_tsbsz)], szc				;\
153	ldx	[tsbp + %lo(kpm_tsbbase)], tsbp				;\
1542:	GET_TSBE_POINTER(vpshift, tsbp, vaddr, szc, tmp)
155
156/*
157 * Lock the TSBE at virtual address tsbep.
158 *
159 * tsbep = TSBE va (ro)
160 * tmp1, tmp2 = scratch registers (clobbered)
161 * label = label to use for branches (text)
162 * %asi = ASI to use for TSB access
163 *
164 * NOTE that we flush the TSB using fast VIS instructions that
165 * set all 1's in the TSB tag, so TSBTAG_LOCKED|TSBTAG_INVALID must
166 * not be treated as a locked entry or we'll get stuck spinning on
167 * an entry that isn't locked but really invalid.
168 */
169
170#if defined(UTSB_PHYS)
171
172#define	TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label)			\
173	lda	[tsbep]ASI_MEM, tmp1					;\
174label:									;\
175	sethi	%hi(TSBTAG_LOCKED), tmp2				;\
176	cmp	tmp1, tmp2 						;\
177	be,a,pn	%icc, label/**/b	/* if locked spin */		;\
178	  lda	[tsbep]ASI_MEM, tmp1					;\
179	casa	[tsbep]ASI_MEM, tmp1, tmp2				;\
180	cmp	tmp1, tmp2 						;\
181	bne,a,pn %icc, label/**/b	/* didn't lock so try again */	;\
182	  lda	[tsbep]ASI_MEM, tmp1					;\
183	/* tsbe lock acquired */					;\
184	membar #StoreStore
185
186#else /* UTSB_PHYS */
187
188#define	TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label)			\
189	lda	[tsbep]%asi, tmp1					;\
190label:									;\
191	sethi	%hi(TSBTAG_LOCKED), tmp2				;\
192	cmp	tmp1, tmp2 						;\
193	be,a,pn	%icc, label/**/b	/* if locked spin */		;\
194	  lda	[tsbep]%asi, tmp1					;\
195	casa	[tsbep]%asi, tmp1, tmp2					;\
196	cmp	tmp1, tmp2 						;\
197	bne,a,pn %icc, label/**/b	/* didn't lock so try again */	;\
198	  lda	[tsbep]%asi, tmp1					;\
199	/* tsbe lock acquired */					;\
200	membar #StoreStore
201
202#endif /* UTSB_PHYS */
203
204/*
205 * Atomically write TSBE at virtual address tsbep.
206 *
207 * tsbep = TSBE va (ro)
208 * tte = TSBE TTE (ro)
209 * tagtarget = TSBE tag (ro)
210 * %asi = ASI to use for TSB access
211 */
212
213#if defined(UTSB_PHYS)
214
215#define	TSB_INSERT_UNLOCK_ENTRY(tsbep, tte, tagtarget, tmp1)		\
216	add	tsbep, TSBE_TTE, tmp1					;\
217	stxa	tte, [tmp1]ASI_MEM		/* write tte data */	;\
218	membar #StoreStore						;\
219	add	tsbep, TSBE_TAG, tmp1					;\
220	stxa	tagtarget, [tmp1]ASI_MEM	/* write tte tag & unlock */
221
222#else /* UTSB_PHYS */
223
224#define	TSB_INSERT_UNLOCK_ENTRY(tsbep, tte, tagtarget,tmp1)		\
225	stxa	tte, [tsbep + TSBE_TTE]%asi	/* write tte data */	;\
226	membar #StoreStore						;\
227	stxa	tagtarget, [tsbep + TSBE_TAG]%asi /* write tte tag & unlock */
228
229#endif /* UTSB_PHYS */
230
231/*
232 * Load an entry into the TSB at TL > 0.
233 *
234 * tsbep = pointer to the TSBE to load as va (ro)
235 * tte = value of the TTE retrieved and loaded (wo)
236 * tagtarget = tag target register.  To get TSBE tag to load,
237 *   we need to mask off the context and leave only the va (clobbered)
238 * ttepa = pointer to the TTE to retrieve/load as pa (ro)
239 * tmp1, tmp2 = scratch registers
240 * label = label to use for branches (text)
241 * %asi = ASI to use for TSB access
242 */
243
244#if defined(UTSB_PHYS)
245
246#define	TSB_UPDATE_TL(tsbep, tte, tagtarget, ttepa, tmp1, tmp2, label) \
247	TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label)			;\
248	/*								;\
249	 * I don't need to update the TSB then check for the valid tte.	;\
250	 * TSB invalidate will spin till the entry is unlocked.	Note,	;\
251	 * we always invalidate the hash table before we unload the TSB.;\
252	 */								;\
253	sllx	tagtarget, TTARGET_VA_SHIFT, tagtarget			;\
254	ldxa	[ttepa]ASI_MEM, tte					;\
255	srlx	tagtarget, TTARGET_VA_SHIFT, tagtarget			;\
256	sethi	%hi(TSBTAG_INVALID), tmp2				;\
257	add	tsbep, TSBE_TAG, tmp1					;\
258	brgez,a,pn tte, label/**/f					;\
259	 sta	tmp2, [tmp1]ASI_MEM			/* unlock */	;\
260	TSB_INSERT_UNLOCK_ENTRY(tsbep, tte, tagtarget, tmp1)		;\
261label:
262
263#else /* UTSB_PHYS */
264
265#define	TSB_UPDATE_TL(tsbep, tte, tagtarget, ttepa, tmp1, tmp2, label) \
266	TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label)			;\
267	/*								;\
268	 * I don't need to update the TSB then check for the valid tte.	;\
269	 * TSB invalidate will spin till the entry is unlocked.	Note,	;\
270	 * we always invalidate the hash table before we unload the TSB.;\
271	 */								;\
272	sllx	tagtarget, TTARGET_VA_SHIFT, tagtarget			;\
273	ldxa	[ttepa]ASI_MEM, tte					;\
274	srlx	tagtarget, TTARGET_VA_SHIFT, tagtarget			;\
275	sethi	%hi(TSBTAG_INVALID), tmp2				;\
276	brgez,a,pn tte, label/**/f					;\
277	 sta	tmp2, [tsbep + TSBE_TAG]%asi		/* unlock */	;\
278	TSB_INSERT_UNLOCK_ENTRY(tsbep, tte, tagtarget, tmp1)		;\
279label:
280
281#endif /* UTSB_PHYS */
282
283/*
284 * Load a 32M/256M Panther TSB entry into the TSB at TL > 0,
285 *   for ITLB synthesis.
286 *
287 * tsbep = pointer to the TSBE to load as va (ro)
288 * tte = 4M pfn offset (in), value of the TTE retrieved and loaded (out)
289 *   with exec_perm turned off and exec_synth turned on
290 * tagtarget = tag target register.  To get TSBE tag to load,
291 *   we need to mask off the context and leave only the va (clobbered)
292 * ttepa = pointer to the TTE to retrieve/load as pa (ro)
293 * tmp1, tmp2 = scratch registers
294 * label = label to use for branch (text)
295 * %asi = ASI to use for TSB access
296 */
297
298#define	TSB_UPDATE_TL_PN(tsbep, tte, tagtarget, ttepa, tmp1, tmp2, label) \
299	TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label)			;\
300	/*								;\
301	 * I don't need to update the TSB then check for the valid tte.	;\
302	 * TSB invalidate will spin till the entry is unlocked.	Note,	;\
303	 * we always invalidate the hash table before we unload the TSB.;\
304	 * Or in 4M pfn offset to TTE and set the exec_perm bit to 0	;\
305	 * and exec_synth bit to 1.					;\
306	 */								;\
307	sllx	tagtarget, TTARGET_VA_SHIFT, tagtarget			;\
308	mov	tte, tmp1						;\
309	ldxa	[ttepa]ASI_MEM, tte					;\
310	srlx	tagtarget, TTARGET_VA_SHIFT, tagtarget			;\
311	sethi	%hi(TSBTAG_INVALID), tmp2				;\
312	brgez,a,pn tte, label/**/f					;\
313	 sta	tmp2, [tsbep + TSBE_TAG]%asi		/* unlock */	;\
314	or	tte, tmp1, tte						;\
315	andn	tte, TTE_EXECPRM_INT, tte				;\
316	or	tte, TTE_E_SYNTH_INT, tte				;\
317	TSB_INSERT_UNLOCK_ENTRY(tsbep, tte, tagtarget, tmp1)		;\
318label:
319
320/*
321 * Build a 4M pfn offset for a Panther 32M/256M page, for ITLB synthesis.
322 *
323 * tte = value of the TTE, used to get tte_size bits (ro)
324 * tagaccess = tag access register, used to get 4M pfn bits (ro)
325 * pfn = 4M pfn bits shifted to offset for tte (out)
326 * tmp1 = scratch register
327 * label = label to use for branch (text)
328 */
329
330#define	GET_4M_PFN_OFF(tte, tagaccess, pfn, tmp, label)			\
331	/*								;\
332	 * Get 4M bits from tagaccess for 32M, 256M pagesizes.		;\
333	 * Return them, shifted, in pfn.				;\
334	 */								;\
335	srlx	tagaccess, MMU_PAGESHIFT4M, tagaccess			;\
336	srlx	tte, TTE_SZ_SHFT, tmp		/* isolate the */	;\
337	andcc	tmp, TTE_SZ_BITS, %g0		/* tte_size bits */	;\
338	bz,a,pt %icc, label/**/f		/* if 0, is */		;\
339	  and	tagaccess, 0x7, tagaccess	/* 32M page size */	;\
340	and	tagaccess, 0x3f, tagaccess /* else 256M page size */	;\
341label:									;\
342	sllx	tagaccess, MMU_PAGESHIFT4M, pfn
343
344/*
345 * Add 4M TTE size code to a tte for a Panther 32M/256M page,
346 * for ITLB synthesis.
347 *
348 * tte = value of the TTE, used to get tte_size bits (rw)
349 * tmp1 = scratch register
350 */
351
352#define	SET_TTE4M_PN(tte, tmp)						\
353	/*								;\
354	 * Set 4M pagesize tte bits. 					;\
355	 */								;\
356	set	TTE4M, tmp						;\
357	sllx	tmp, TTE_SZ_SHFT, tmp					;\
358	or	tte, tmp, tte
359
360/*
361 * Load an entry into the TSB at TL=0.
362 *
363 * tsbep = pointer to the TSBE to load as va (ro)
364 * tteva = pointer to the TTE to load as va (ro)
365 * tagtarget = TSBE tag to load (which contains no context), synthesized
366 * to match va of MMU tag target register only (ro)
367 * tmp1, tmp2 = scratch registers (clobbered)
368 * label = label to use for branches (text)
369 * %asi = ASI to use for TSB access
370 */
371
372#if defined(UTSB_PHYS)
373
374#define	TSB_UPDATE(tsbep, tteva, tagtarget, tmp1, tmp2, label)		\
375	/* can't rd tteva after locking tsb because it can tlb miss */	;\
376	ldx	[tteva], tteva			/* load tte */		;\
377	TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label)			;\
378	sethi	%hi(TSBTAG_INVALID), tmp2				;\
379	add	tsbep, TSBE_TAG, tmp1					;\
380	brgez,a,pn tteva, label/**/f					;\
381	 sta	tmp2, [tmp1]ASI_MEM			/* unlock */	;\
382	TSB_INSERT_UNLOCK_ENTRY(tsbep, tteva, tagtarget, tmp1)		;\
383label:
384
385#else /* UTSB_PHYS */
386
387#define	TSB_UPDATE(tsbep, tteva, tagtarget, tmp1, tmp2, label)		\
388	/* can't rd tteva after locking tsb because it can tlb miss */	;\
389	ldx	[tteva], tteva			/* load tte */		;\
390	TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label)			;\
391	sethi	%hi(TSBTAG_INVALID), tmp2				;\
392	brgez,a,pn tteva, label/**/f					;\
393	 sta	tmp2, [tsbep + TSBE_TAG]%asi		/* unlock */	;\
394	TSB_INSERT_UNLOCK_ENTRY(tsbep, tteva, tagtarget, tmp1)		;\
395label:
396
397#endif /* UTSB_PHYS */
398
399/*
400 * Invalidate a TSB entry in the TSB.
401 *
402 * NOTE: TSBE_TAG is assumed to be zero.  There is a compile time check
403 *	 about this earlier to ensure this is true.  Thus when we are
404 *	 directly referencing tsbep below, we are referencing the tte_tag
405 *	 field of the TSBE.  If this  offset ever changes, the code below
406 *	 will need to be modified.
407 *
408 * tsbep = pointer to TSBE as va (ro)
409 * tag = invalidation is done if this matches the TSBE tag (ro)
410 * tmp1 - tmp3 = scratch registers (clobbered)
411 * label = label name to use for branches (text)
412 * %asi = ASI to use for TSB access
413 */
414
415#if defined(UTSB_PHYS)
416
417#define	TSB_INVALIDATE(tsbep, tag, tmp1, tmp2, tmp3, label)		\
418	lda	[tsbep]ASI_MEM, tmp1	/* tmp1 = tsbe tag */		;\
419	sethi	%hi(TSBTAG_LOCKED), tmp2				;\
420label/**/1:								;\
421	cmp	tmp1, tmp2		/* see if tsbe is locked, if */	;\
422	be,a,pn	%icc, label/**/1	/* so, loop until unlocked */	;\
423	  lda	[tsbep]ASI_MEM, tmp1	/* reloading value each time */	;\
424	ldxa	[tsbep]ASI_MEM, tmp3	/* tmp3 = tsbe tag */		;\
425	cmp	tag, tmp3		/* compare tags */		;\
426	bne,pt	%xcc, label/**/2	/* if different, do nothing */	;\
427	  sethi	%hi(TSBTAG_INVALID), tmp3				;\
428	casa	[tsbep]ASI_MEM, tmp1, tmp3 /* try to set tag invalid */	;\
429	cmp	tmp1, tmp3		/* if not successful */		;\
430	bne,a,pn %icc, label/**/1	/* start over from the top */	;\
431	  lda	[tsbep]ASI_MEM, tmp1	/* reloading tsbe tag */	;\
432label/**/2:
433
434#else /* UTSB_PHYS */
435
436#define	TSB_INVALIDATE(tsbep, tag, tmp1, tmp2, tmp3, label)		\
437	lda	[tsbep]%asi, tmp1	/* tmp1 = tsbe tag */		;\
438	sethi	%hi(TSBTAG_LOCKED), tmp2				;\
439label/**/1:								;\
440	cmp	tmp1, tmp2		/* see if tsbe is locked, if */	;\
441	be,a,pn	%icc, label/**/1	/* so, loop until unlocked */	;\
442	  lda	[tsbep]%asi, tmp1	/* reloading value each time */	;\
443	ldxa	[tsbep]%asi, tmp3	/* tmp3 = tsbe tag */		;\
444	cmp	tag, tmp3		/* compare tags */		;\
445	bne,pt	%xcc, label/**/2	/* if different, do nothing */	;\
446	  sethi	%hi(TSBTAG_INVALID), tmp3				;\
447	casa	[tsbep]%asi, tmp1, tmp3	/* try to set tag invalid */	;\
448	cmp	tmp1, tmp3		/* if not successful */		;\
449	bne,a,pn %icc, label/**/1	/* start over from the top */	;\
450	  lda	[tsbep]%asi, tmp1	/* reloading tsbe tag */	;\
451label/**/2:
452
453#endif /* UTSB_PHYS */
454
455#if TSB_SOFTSZ_MASK < TSB_SZ_MASK
456#error	- TSB_SOFTSZ_MASK too small
457#endif
458
459
460/*
461 * An implementation of setx which will be hot patched at run time.
462 * since it is being hot patched, there is no value passed in.
463 * Thus, essentially we are implementing
464 *	setx value, tmp, dest
465 * where value is RUNTIME_PATCH (aka 0) in this case.
466 */
467#define	RUNTIME_PATCH_SETX(dest, tmp)					\
468	sethi	%hh(RUNTIME_PATCH), tmp					;\
469	sethi	%lm(RUNTIME_PATCH), dest				;\
470	or	tmp, %hm(RUNTIME_PATCH), tmp				;\
471	or	dest, %lo(RUNTIME_PATCH), dest				;\
472	sllx	tmp, 32, tmp						;\
473	nop				/* for perf reasons */		;\
474	or	tmp, dest, dest		/* contents of patched value */
475
476
477#endif (lint)
478
479
480#if defined (lint)
481
482/*
483 * sfmmu related subroutines
484 */
485uint_t
486sfmmu_disable_intrs()
487{ return(0); }
488
489/* ARGSUSED */
490void
491sfmmu_enable_intrs(uint_t pstate_save)
492{}
493
494/* ARGSUSED */
495int
496sfmmu_alloc_ctx(sfmmu_t *sfmmup, int allocflag, struct cpu *cp, int shflag)
497{ return(0); }
498
499/*
500 * Use cas, if tte has changed underneath us then reread and try again.
501 * In the case of a retry, it will update sttep with the new original.
502 */
503/* ARGSUSED */
504int
505sfmmu_modifytte(tte_t *sttep, tte_t *stmodttep, tte_t *dttep)
506{ return(0); }
507
508/*
509 * Use cas, if tte has changed underneath us then return 1, else return 0
510 */
511/* ARGSUSED */
512int
513sfmmu_modifytte_try(tte_t *sttep, tte_t *stmodttep, tte_t *dttep)
514{ return(0); }
515
516/* ARGSUSED */
517void
518sfmmu_copytte(tte_t *sttep, tte_t *dttep)
519{}
520
521/*ARGSUSED*/
522struct tsbe *
523sfmmu_get_tsbe(uint64_t tsbeptr, caddr_t vaddr, int vpshift, int tsb_szc)
524{ return(0); }
525
526/*ARGSUSED*/
527uint64_t
528sfmmu_make_tsbtag(caddr_t va)
529{ return(0); }
530
531#else	/* lint */
532
533	.seg	".data"
534	.global	sfmmu_panic1
535sfmmu_panic1:
536	.asciz	"sfmmu_asm: interrupts already disabled"
537
538	.global	sfmmu_panic3
539sfmmu_panic3:
540	.asciz	"sfmmu_asm: sfmmu_vatopfn called for user"
541
542	.global	sfmmu_panic4
543sfmmu_panic4:
544	.asciz	"sfmmu_asm: 4M tsb pointer mis-match"
545
546	.global	sfmmu_panic5
547sfmmu_panic5:
548	.asciz	"sfmmu_asm: no unlocked TTEs in TLB 0"
549
550	.global	sfmmu_panic6
551sfmmu_panic6:
552	.asciz	"sfmmu_asm: interrupts not disabled"
553
554	.global	sfmmu_panic7
555sfmmu_panic7:
556	.asciz	"sfmmu_asm: kernel as"
557
558	.global	sfmmu_panic8
559sfmmu_panic8:
560	.asciz	"sfmmu_asm: gnum is zero"
561
562	.global	sfmmu_panic9
563sfmmu_panic9:
564	.asciz	"sfmmu_asm: cnum is greater than MAX_SFMMU_CTX_VAL"
565
566	.global	sfmmu_panic10
567sfmmu_panic10:
568	.asciz	"sfmmu_asm: valid SCD with no 3rd scd TSB"
569
570        ENTRY(sfmmu_disable_intrs)
571        rdpr    %pstate, %o0
572#ifdef DEBUG
573	PANIC_IF_INTR_DISABLED_PSTR(%o0, sfmmu_di_l0, %g1)
574#endif /* DEBUG */
575        retl
576          wrpr   %o0, PSTATE_IE, %pstate
577        SET_SIZE(sfmmu_disable_intrs)
578
579	ENTRY(sfmmu_enable_intrs)
580        retl
581          wrpr    %g0, %o0, %pstate
582        SET_SIZE(sfmmu_enable_intrs)
583
584/*
585 * This routine is called both by resume() and sfmmu_get_ctx() to
586 * allocate a new context for the process on a MMU.
587 * if allocflag == 1, then alloc ctx when HAT mmu cnum == INVALID .
588 * if allocflag == 0, then do not alloc ctx if HAT mmu cnum == INVALID, which
589 * is the case when sfmmu_alloc_ctx is called from resume().
590 *
591 * The caller must disable interrupts before entering this routine.
592 * To reduce ctx switch overhead, the code contains both 'fast path' and
593 * 'slow path' code. The fast path code covers the common case where only
594 * a quick check is needed and the real ctx allocation is not required.
595 * It can be done without holding the per-process (PP) lock.
596 * The 'slow path' code must be protected by the PP Lock and performs ctx
597 * allocation.
598 * Hardware context register and HAT mmu cnum are updated accordingly.
599 *
600 * %o0 - sfmmup
601 * %o1 - allocflag
602 * %o2 - CPU
603 * %o3 - sfmmu private/shared flag
604 *
605 * ret - 0: no ctx is allocated
606 *       1: a ctx is allocated
607 */
608        ENTRY_NP(sfmmu_alloc_ctx)
609
610#ifdef DEBUG
611	sethi   %hi(ksfmmup), %g1
612	ldx     [%g1 + %lo(ksfmmup)], %g1
613	cmp     %g1, %o0
614	bne,pt   %xcc, 0f
615	  nop
616
617	sethi   %hi(panicstr), %g1		! if kernel as, panic
618        ldx     [%g1 + %lo(panicstr)], %g1
619        tst     %g1
620        bnz,pn  %icc, 7f
621          nop
622
623	sethi	%hi(sfmmu_panic7), %o0
624	call	panic
625	  or	%o0, %lo(sfmmu_panic7), %o0
626
6277:
628	retl
629	  mov	%g0, %o0			! %o0 = ret = 0
630
6310:
632	PANIC_IF_INTR_ENABLED_PSTR(sfmmu_ei_l1, %g1)
633#endif /* DEBUG */
634
635	mov	%o3, %g1			! save sfmmu pri/sh flag in %g1
636
637	! load global mmu_ctxp info
638	ldx	[%o2 + CPU_MMU_CTXP], %o3		! %o3 = mmu_ctx_t ptr
639        lduw	[%o2 + CPU_MMU_IDX], %g2		! %g2 = mmu index
640
641	! load global mmu_ctxp gnum
642	ldx	[%o3 + MMU_CTX_GNUM], %o4		! %o4 = mmu_ctxp->gnum
643
644#ifdef DEBUG
645	cmp	%o4, %g0		! mmu_ctxp->gnum should never be 0
646	bne,pt	%xcc, 3f
647	  nop
648
649	sethi   %hi(panicstr), %g1	! test if panicstr is already set
650        ldx     [%g1 + %lo(panicstr)], %g1
651        tst     %g1
652        bnz,pn  %icc, 1f
653          nop
654
655	sethi	%hi(sfmmu_panic8), %o0
656	call	panic
657	  or	%o0, %lo(sfmmu_panic8), %o0
6581:
659	retl
660	  mov	%g0, %o0			! %o0 = ret = 0
6613:
662#endif
663
664	! load HAT sfmmu_ctxs[mmuid] gnum, cnum
665
666	sllx	%g2, SFMMU_MMU_CTX_SHIFT, %g2
667	add	%o0, %g2, %g2		! %g2 = &sfmmu_ctxs[mmuid] - SFMMU_CTXS
668
669	/*
670	 * %g5 = sfmmu gnum returned
671	 * %g6 = sfmmu cnum returned
672	 * %g2 = &sfmmu_ctxs[mmuid] - SFMMU_CTXS
673	 * %g4 = scratch
674	 *
675	 * Fast path code, do a quick check.
676	 */
677	SFMMU_MMUID_GNUM_CNUM(%g2, %g5, %g6, %g4)
678
679	cmp	%g6, INVALID_CONTEXT		! hat cnum == INVALID ??
680	bne,pt	%icc, 1f			! valid hat cnum, check gnum
681	  nop
682
683	! cnum == INVALID, check allocflag
684	mov	%g0, %g4	! %g4 = ret = 0
685	brz,pt  %o1, 8f		! allocflag == 0, skip ctx allocation, bail
686	  mov	%g6, %o1
687
688	! (invalid HAT cnum) && (allocflag == 1)
689	ba,pt	%icc, 2f
690	  nop
6911:
692	! valid HAT cnum, check gnum
693	cmp	%g5, %o4
694	mov	1, %g4				!%g4 = ret = 1
695	be,a,pt	%icc, 8f			! gnum unchanged, go to done
696	  mov	%g6, %o1
697
6982:
699	/*
700	 * Grab per process (PP) sfmmu_ctx_lock spinlock,
701	 * followed by the 'slow path' code.
702	 */
703	ldstub	[%o0 + SFMMU_CTX_LOCK], %g3	! %g3 = per process (PP) lock
7043:
705	brz	%g3, 5f
706	  nop
7074:
708	brnz,a,pt       %g3, 4b				! spin if lock is 1
709	  ldub	[%o0 + SFMMU_CTX_LOCK], %g3
710	ba	%xcc, 3b				! retry the lock
711	  ldstub	[%o0 + SFMMU_CTX_LOCK], %g3    ! %g3 = PP lock
712
7135:
714	membar  #LoadLoad
715	/*
716	 * %g5 = sfmmu gnum returned
717	 * %g6 = sfmmu cnum returned
718	 * %g2 = &sfmmu_ctxs[mmuid] - SFMMU_CTXS
719	 * %g4 = scratch
720	 */
721	SFMMU_MMUID_GNUM_CNUM(%g2, %g5, %g6, %g4)
722
723	cmp	%g6, INVALID_CONTEXT		! hat cnum == INVALID ??
724	bne,pt	%icc, 1f			! valid hat cnum, check gnum
725	  nop
726
727	! cnum == INVALID, check allocflag
728	mov	%g0, %g4	! %g4 = ret = 0
729	brz,pt	%o1, 2f		! allocflag == 0, called from resume, set hw
730	  mov	%g6, %o1
731
732	! (invalid HAT cnum) && (allocflag == 1)
733	ba,pt	%icc, 6f
734	  nop
7351:
736	! valid HAT cnum, check gnum
737	cmp	%g5, %o4
738	mov	1, %g4				! %g4 = ret  = 1
739	be,a,pt	%icc, 2f			! gnum unchanged, go to done
740	  mov	%g6, %o1
741
742	ba,pt	%icc, 6f
743	  nop
7442:
745	membar  #LoadStore|#StoreStore
746	ba,pt %icc, 8f
747	  clrb  [%o0 + SFMMU_CTX_LOCK]
7486:
749	/*
750	 * We get here if we do not have a valid context, or
751	 * the HAT gnum does not match global gnum. We hold
752	 * sfmmu_ctx_lock spinlock. Allocate that context.
753	 *
754	 * %o3 = mmu_ctxp
755	 */
756	add	%o3, MMU_CTX_CNUM, %g3
757	ld	[%o3 + MMU_CTX_NCTXS], %g4
758
759	/*
760         * %g2 = &sfmmu_ctx_t[mmuid] - SFMMU_CTXS;
761         * %g3 = mmu cnum address
762	 * %g4 = mmu nctxs
763	 *
764	 * %o0 = sfmmup
765	 * %o1 = mmu current cnum value (used as new cnum)
766	 * %o4 = mmu gnum
767	 *
768	 * %o5 = scratch
769	 */
770	ld	[%g3], %o1
7710:
772	cmp	%o1, %g4
773	bl,a,pt %icc, 1f
774	  add	%o1, 1, %o5		! %o5 = mmu_ctxp->cnum + 1
775
776	/*
777	 * cnum reachs max, bail, so wrap around can be performed later.
778	 */
779	set	INVALID_CONTEXT, %o1
780	/*
781	 * When the routine is called by shared ctx, we want to set
782	 * both private and shared ctx regs to INVALID. In order to
783	 * do so, we set the sfmmu priv/shared flag to 'private' regardless.
784	 * so that private ctx reg will be set to invalid.
785	 * Note that values written to private context register are
786	 * automatically written to shared context register as well.
787	 */
788	mov	%g0, %g1		! %g1 = sfmmu private/shared flag
789	mov	%g0, %g4		! %g4 = ret = 0
790
791	membar  #LoadStore|#StoreStore
792	ba,pt	%icc, 8f
793	  clrb	[%o0 + SFMMU_CTX_LOCK]
7941:
795	! %g3 = addr of mmu_ctxp->cnum
796	! %o5 = mmu_ctxp->cnum + 1
797	cas	[%g3], %o1, %o5
798	cmp	%o1, %o5
799	bne,a,pn %xcc, 0b	! cas failed
800	  ld	[%g3], %o1
801
802#ifdef DEBUG
803        set	MAX_SFMMU_CTX_VAL, %o5
804	cmp	%o1, %o5
805	ble,pt %icc, 2f
806	  nop
807
808	sethi	%hi(sfmmu_panic9), %o0
809	call	panic
810	  or	%o0, %lo(sfmmu_panic9), %o0
8112:
812#endif
813	! update hat gnum and cnum
814	sllx	%o4, SFMMU_MMU_GNUM_RSHIFT, %o4
815	or	%o4, %o1, %o4
816	stx	%o4, [%g2 + SFMMU_CTXS]
817
818	membar  #LoadStore|#StoreStore
819	clrb	[%o0 + SFMMU_CTX_LOCK]
820
821	mov	1, %g4			! %g4 = ret = 1
8228:
823	/*
824	 * program the secondary context register
825	 *
826	 * %o1 = cnum
827	 * %g1 = sfmmu private/shared flag (0:private,  1:shared)
828	 */
829
830#ifdef	sun4u
831	ldub	[%o0 + SFMMU_CEXT], %o2
832	sll	%o2, CTXREG_EXT_SHIFT, %o2
833	or	%o1, %o2, %o1
834#endif
835	SET_SECCTX(%o1, %g1, %o4, %o5)
836
837	retl
838	  mov	%g4, %o0			! %o0 = ret
839
840	SET_SIZE(sfmmu_alloc_ctx)
841
842	ENTRY_NP(sfmmu_modifytte)
843	ldx	[%o2], %g3			/* current */
844	ldx	[%o0], %g1			/* original */
8452:
846	ldx	[%o1], %g2			/* modified */
847	cmp	%g2, %g3			/* is modified = current? */
848	be,a,pt	%xcc,1f				/* yes, don't write */
849	stx	%g3, [%o0]			/* update new original */
850	casx	[%o2], %g1, %g2
851	cmp	%g1, %g2
852	be,pt	%xcc, 1f			/* cas succeeded - return */
853	  nop
854	ldx	[%o2], %g3			/* new current */
855	stx	%g3, [%o0]			/* save as new original */
856	ba,pt	%xcc, 2b
857	  mov	%g3, %g1
8581:	retl
859	membar	#StoreLoad
860	SET_SIZE(sfmmu_modifytte)
861
862	ENTRY_NP(sfmmu_modifytte_try)
863	ldx	[%o1], %g2			/* modified */
864	ldx	[%o2], %g3			/* current */
865	ldx	[%o0], %g1			/* original */
866	cmp	%g3, %g2			/* is modified = current? */
867	be,a,pn %xcc,1f				/* yes, don't write */
868	mov	0, %o1				/* as if cas failed. */
869
870	casx	[%o2], %g1, %g2
871	membar	#StoreLoad
872	cmp	%g1, %g2
873	movne	%xcc, -1, %o1			/* cas failed. */
874	move	%xcc, 1, %o1			/* cas succeeded. */
8751:
876	stx	%g2, [%o0]			/* report "current" value */
877	retl
878	mov	%o1, %o0
879	SET_SIZE(sfmmu_modifytte_try)
880
881	ENTRY_NP(sfmmu_copytte)
882	ldx	[%o0], %g1
883	retl
884	stx	%g1, [%o1]
885	SET_SIZE(sfmmu_copytte)
886
887
888	/*
889	 * Calculate a TSB entry pointer for the given TSB, va, pagesize.
890	 * %o0 = TSB base address (in), pointer to TSB entry (out)
891	 * %o1 = vaddr (in)
892	 * %o2 = vpshift (in)
893	 * %o3 = tsb size code (in)
894	 * %o4 = scratch register
895	 */
896	ENTRY_NP(sfmmu_get_tsbe)
897	GET_TSBE_POINTER(%o2, %o0, %o1, %o3, %o4)
898	retl
899	nop
900	SET_SIZE(sfmmu_get_tsbe)
901
902	/*
903	 * Return a TSB tag for the given va.
904	 * %o0 = va (in/clobbered)
905	 * %o0 = va shifted to be in tsb tag format (with no context) (out)
906	 */
907	ENTRY_NP(sfmmu_make_tsbtag)
908	retl
909	srln	%o0, TTARGET_VA_SHIFT, %o0
910	SET_SIZE(sfmmu_make_tsbtag)
911
912#endif /* lint */
913
914/*
915 * Other sfmmu primitives
916 */
917
918
919#if defined (lint)
920void
921sfmmu_patch_ktsb(void)
922{
923}
924
925void
926sfmmu_kpm_patch_tlbm(void)
927{
928}
929
930void
931sfmmu_kpm_patch_tsbm(void)
932{
933}
934
935void
936sfmmu_patch_shctx(void)
937{
938}
939
940/* ARGSUSED */
941void
942sfmmu_load_tsbe(struct tsbe *tsbep, uint64_t vaddr, tte_t *ttep, int phys)
943{
944}
945
946/* ARGSUSED */
947void
948sfmmu_unload_tsbe(struct tsbe *tsbep, uint64_t vaddr, int phys)
949{
950}
951
952/* ARGSUSED */
953void
954sfmmu_kpm_load_tsb(caddr_t addr, tte_t *ttep, int vpshift)
955{
956}
957
958/* ARGSUSED */
959void
960sfmmu_kpm_unload_tsb(caddr_t addr, int vpshift)
961{
962}
963
964#else /* lint */
965
966#define	I_SIZE		4
967
968	ENTRY_NP(sfmmu_fix_ktlb_traptable)
969	/*
970	 * %o0 = start of patch area
971	 * %o1 = size code of TSB to patch
972	 * %o3 = scratch
973	 */
974	/* fix sll */
975	ld	[%o0], %o3			/* get sll */
976	sub	%o3, %o1, %o3			/* decrease shift by tsb szc */
977	st	%o3, [%o0]			/* write sll */
978	flush	%o0
979	/* fix srl */
980	add	%o0, I_SIZE, %o0		/* goto next instr. */
981	ld	[%o0], %o3			/* get srl */
982	sub	%o3, %o1, %o3			/* decrease shift by tsb szc */
983	st	%o3, [%o0]			/* write srl */
984	retl
985	flush	%o0
986	SET_SIZE(sfmmu_fix_ktlb_traptable)
987
988	ENTRY_NP(sfmmu_fixup_ktsbbase)
989	/*
990	 * %o0 = start of patch area
991	 * %o5 = kernel virtual or physical tsb base address
992	 * %o2, %o3 are used as scratch registers.
993	 */
994	/* fixup sethi instruction */
995	ld	[%o0], %o3
996	srl	%o5, 10, %o2			! offset is bits 32:10
997	or	%o3, %o2, %o3			! set imm22
998	st	%o3, [%o0]
999	/* fixup offset of lduw/ldx */
1000	add	%o0, I_SIZE, %o0		! next instr
1001	ld	[%o0], %o3
1002	and	%o5, 0x3ff, %o2			! set imm13 to bits 9:0
1003	or	%o3, %o2, %o3
1004	st	%o3, [%o0]
1005	retl
1006	flush	%o0
1007	SET_SIZE(sfmmu_fixup_ktsbbase)
1008
1009	ENTRY_NP(sfmmu_fixup_setx)
1010	/*
1011	 * %o0 = start of patch area
1012	 * %o4 = 64 bit value to patch
1013	 * %o2, %o3 are used as scratch registers.
1014	 *
1015	 * Note: Assuming that all parts of the instructions which need to be
1016	 *	 patched correspond to RUNTIME_PATCH (aka 0)
1017	 *
1018	 * Note the implementation of setx which is being patched is as follows:
1019	 *
1020	 * sethi   %hh(RUNTIME_PATCH), tmp
1021	 * sethi   %lm(RUNTIME_PATCH), dest
1022	 * or      tmp, %hm(RUNTIME_PATCH), tmp
1023	 * or      dest, %lo(RUNTIME_PATCH), dest
1024	 * sllx    tmp, 32, tmp
1025	 * nop
1026	 * or      tmp, dest, dest
1027	 *
1028	 * which differs from the implementation in the
1029	 * "SPARC Architecture Manual"
1030	 */
1031	/* fixup sethi instruction */
1032	ld	[%o0], %o3
1033	srlx	%o4, 42, %o2			! bits [63:42]
1034	or	%o3, %o2, %o3			! set imm22
1035	st	%o3, [%o0]
1036	/* fixup sethi instruction */
1037	add	%o0, I_SIZE, %o0		! next instr
1038	ld	[%o0], %o3
1039	sllx	%o4, 32, %o2			! clear upper bits
1040	srlx	%o2, 42, %o2			! bits [31:10]
1041	or	%o3, %o2, %o3			! set imm22
1042	st	%o3, [%o0]
1043	/* fixup or instruction */
1044	add	%o0, I_SIZE, %o0		! next instr
1045	ld	[%o0], %o3
1046	srlx	%o4, 32, %o2			! bits [63:32]
1047	and	%o2, 0x3ff, %o2			! bits [41:32]
1048	or	%o3, %o2, %o3			! set imm
1049	st	%o3, [%o0]
1050	/* fixup or instruction */
1051	add	%o0, I_SIZE, %o0		! next instr
1052	ld	[%o0], %o3
1053	and	%o4, 0x3ff, %o2			! bits [9:0]
1054	or	%o3, %o2, %o3			! set imm
1055	st	%o3, [%o0]
1056	retl
1057	flush	%o0
1058	SET_SIZE(sfmmu_fixup_setx)
1059
1060	ENTRY_NP(sfmmu_fixup_or)
1061	/*
1062	 * %o0 = start of patch area
1063	 * %o4 = 32 bit value to patch
1064	 * %o2, %o3 are used as scratch registers.
1065	 * Note: Assuming that all parts of the instructions which need to be
1066	 *	 patched correspond to RUNTIME_PATCH (aka 0)
1067	 */
1068	ld	[%o0], %o3
1069	and	%o4, 0x3ff, %o2			! bits [9:0]
1070	or	%o3, %o2, %o3			! set imm
1071	st	%o3, [%o0]
1072	retl
1073	flush	%o0
1074	SET_SIZE(sfmmu_fixup_or)
1075
1076	ENTRY_NP(sfmmu_fixup_shiftx)
1077	/*
1078	 * %o0 = start of patch area
1079	 * %o4 = signed int immediate value to add to sllx/srlx imm field
1080	 * %o2, %o3 are used as scratch registers.
1081	 *
1082	 * sllx/srlx store the 6 bit immediate value in the lowest order bits
1083	 * so we do a simple add.  The caller must be careful to prevent
1084	 * overflow, which could easily occur if the initial value is nonzero!
1085	 */
1086	ld	[%o0], %o3			! %o3 = instruction to patch
1087	and	%o3, 0x3f, %o2			! %o2 = existing imm value
1088	add	%o2, %o4, %o2			! %o2 = new imm value
1089	andn	%o3, 0x3f, %o3			! clear old imm value
1090	and	%o2, 0x3f, %o2			! truncate new imm value
1091	or	%o3, %o2, %o3			! set new imm value
1092	st	%o3, [%o0]			! store updated instruction
1093	retl
1094	flush	%o0
1095	SET_SIZE(sfmmu_fixup_shiftx)
1096
1097	ENTRY_NP(sfmmu_fixup_mmu_asi)
1098	/*
1099	 * Patch imm_asi of all ldda instructions in the MMU
1100	 * trap handlers.  We search MMU_PATCH_INSTR instructions
1101	 * starting from the itlb miss handler (trap 0x64).
1102	 * %o0 = address of tt[0,1]_itlbmiss
1103	 * %o1 = imm_asi to setup, shifted by appropriate offset.
1104	 * %o3 = number of instructions to search
1105	 * %o4 = reserved by caller: called from leaf routine
1106	 */
11071:	ldsw	[%o0], %o2			! load instruction to %o2
1108	brgez,pt %o2, 2f
1109	  srl	%o2, 30, %o5
1110	btst	1, %o5				! test bit 30; skip if not set
1111	bz,pt	%icc, 2f
1112	  sllx	%o2, 39, %o5			! bit 24 -> bit 63
1113	srlx	%o5, 58, %o5			! isolate op3 part of opcode
1114	xor	%o5, 0x13, %o5			! 01 0011 binary == ldda
1115	brnz,pt	%o5, 2f				! skip if not a match
1116	  or	%o2, %o1, %o2			! or in imm_asi
1117	st	%o2, [%o0]			! write patched instruction
11182:	dec	%o3
1119	brnz,a,pt %o3, 1b			! loop until we're done
1120	  add	%o0, I_SIZE, %o0
1121	retl
1122	flush	%o0
1123	SET_SIZE(sfmmu_fixup_mmu_asi)
1124
1125	/*
1126	 * Patch immediate ASI used to access the TSB in the
1127	 * trap table.
1128	 * inputs: %o0 = value of ktsb_phys
1129	 */
1130	ENTRY_NP(sfmmu_patch_mmu_asi)
1131	mov	%o7, %o4			! save return pc in %o4
1132	movrnz	%o0, ASI_QUAD_LDD_PHYS, %o3
1133	movrz	%o0, ASI_NQUAD_LD, %o3
1134	sll	%o3, 5, %o1			! imm_asi offset
1135	mov	6, %o3				! number of instructions
1136	sethi	%hi(dktsb), %o0			! to search
1137	call	sfmmu_fixup_mmu_asi		! patch kdtlb miss
1138	  or	%o0, %lo(dktsb), %o0
1139	mov	6, %o3				! number of instructions
1140	sethi	%hi(dktsb4m), %o0		! to search
1141	call	sfmmu_fixup_mmu_asi		! patch kdtlb4m miss
1142	  or	%o0, %lo(dktsb4m), %o0
1143	mov	6, %o3				! number of instructions
1144	sethi	%hi(iktsb), %o0			! to search
1145	call	sfmmu_fixup_mmu_asi		! patch kitlb miss
1146	  or	%o0, %lo(iktsb), %o0
1147	mov	6, %o3				! number of instructions
1148	sethi	%hi(iktsb4m), %o0		! to search
1149	call	sfmmu_fixup_mmu_asi		! patch kitlb4m miss
1150	  or	%o0, %lo(iktsb4m), %o0
1151	mov	%o4, %o7			! retore return pc -- leaf
1152	retl
1153	nop
1154	SET_SIZE(sfmmu_patch_mmu_asi)
1155
1156	ENTRY_NP(sfmmu_patch_ktsb)
1157	/*
1158	 * We need to fix iktsb, dktsb, et. al.
1159	 */
1160	save	%sp, -SA(MINFRAME), %sp
1161	set	ktsb_phys, %o1
1162	ld	[%o1], %o4
1163	set	ktsb_base, %o5
1164	set	ktsb4m_base, %l1
1165	brz,pt	%o4, 1f
1166	  nop
1167	set	ktsb_pbase, %o5
1168	set	ktsb4m_pbase, %l1
11691:
1170	sethi	%hi(ktsb_szcode), %o1
1171	ld	[%o1 + %lo(ktsb_szcode)], %o1	/* %o1 = ktsb size code */
1172
1173	sethi	%hi(iktsb), %o0
1174	call	sfmmu_fix_ktlb_traptable
1175	  or	%o0, %lo(iktsb), %o0
1176
1177	sethi	%hi(dktsb), %o0
1178	call	sfmmu_fix_ktlb_traptable
1179	  or	%o0, %lo(dktsb), %o0
1180
1181	sethi	%hi(ktsb4m_szcode), %o1
1182	ld	[%o1 + %lo(ktsb4m_szcode)], %o1	/* %o1 = ktsb4m size code */
1183
1184	sethi	%hi(iktsb4m), %o0
1185	call	sfmmu_fix_ktlb_traptable
1186	  or	%o0, %lo(iktsb4m), %o0
1187
1188	sethi	%hi(dktsb4m), %o0
1189	call	sfmmu_fix_ktlb_traptable
1190	  or	%o0, %lo(dktsb4m), %o0
1191
1192#ifndef sun4v
1193	mov	ASI_N, %o2
1194	movrnz	%o4, ASI_MEM, %o2	! setup kernel 32bit ASI to patch
1195	mov	%o2, %o4		! sfmmu_fixup_or needs this in %o4
1196	sethi	%hi(tsb_kernel_patch_asi), %o0
1197	call	sfmmu_fixup_or
1198	  or	%o0, %lo(tsb_kernel_patch_asi), %o0
1199#endif
1200
1201	ldx 	[%o5], %o4		! load ktsb base addr (VA or PA)
1202
1203	sethi	%hi(dktsbbase), %o0
1204	call	sfmmu_fixup_setx	! patch value of ktsb base addr
1205	  or	%o0, %lo(dktsbbase), %o0
1206
1207	sethi	%hi(iktsbbase), %o0
1208	call	sfmmu_fixup_setx	! patch value of ktsb base addr
1209	  or	%o0, %lo(iktsbbase), %o0
1210
1211	sethi	%hi(sfmmu_kprot_patch_ktsb_base), %o0
1212	call	sfmmu_fixup_setx	! patch value of ktsb base addr
1213	  or	%o0, %lo(sfmmu_kprot_patch_ktsb_base), %o0
1214
1215#ifdef sun4v
1216	sethi	%hi(sfmmu_dslow_patch_ktsb_base), %o0
1217	call	sfmmu_fixup_setx	! patch value of ktsb base addr
1218	  or	%o0, %lo(sfmmu_dslow_patch_ktsb_base), %o0
1219#endif /* sun4v */
1220
1221	ldx 	[%l1], %o4		! load ktsb4m base addr (VA or PA)
1222
1223	sethi	%hi(dktsb4mbase), %o0
1224	call	sfmmu_fixup_setx	! patch value of ktsb4m base addr
1225	  or	%o0, %lo(dktsb4mbase), %o0
1226
1227	sethi	%hi(iktsb4mbase), %o0
1228	call	sfmmu_fixup_setx	! patch value of ktsb4m base addr
1229	  or	%o0, %lo(iktsb4mbase), %o0
1230
1231	sethi	%hi(sfmmu_kprot_patch_ktsb4m_base), %o0
1232	call	sfmmu_fixup_setx	! patch value of ktsb4m base addr
1233	  or	%o0, %lo(sfmmu_kprot_patch_ktsb4m_base), %o0
1234
1235#ifdef sun4v
1236	sethi	%hi(sfmmu_dslow_patch_ktsb4m_base), %o0
1237	call	sfmmu_fixup_setx	! patch value of ktsb4m base addr
1238	  or	%o0, %lo(sfmmu_dslow_patch_ktsb4m_base), %o0
1239#endif /* sun4v */
1240
1241	set	ktsb_szcode, %o4
1242	ld	[%o4], %o4
1243	sethi	%hi(sfmmu_kprot_patch_ktsb_szcode), %o0
1244	call	sfmmu_fixup_or		! patch value of ktsb_szcode
1245	  or	%o0, %lo(sfmmu_kprot_patch_ktsb_szcode), %o0
1246
1247#ifdef sun4v
1248	sethi	%hi(sfmmu_dslow_patch_ktsb_szcode), %o0
1249	call	sfmmu_fixup_or		! patch value of ktsb_szcode
1250	  or	%o0, %lo(sfmmu_dslow_patch_ktsb_szcode), %o0
1251#endif /* sun4v */
1252
1253	set	ktsb4m_szcode, %o4
1254	ld	[%o4], %o4
1255	sethi	%hi(sfmmu_kprot_patch_ktsb4m_szcode), %o0
1256	call	sfmmu_fixup_or		! patch value of ktsb4m_szcode
1257	  or	%o0, %lo(sfmmu_kprot_patch_ktsb4m_szcode), %o0
1258
1259#ifdef sun4v
1260	sethi	%hi(sfmmu_dslow_patch_ktsb4m_szcode), %o0
1261	call	sfmmu_fixup_or		! patch value of ktsb4m_szcode
1262	  or	%o0, %lo(sfmmu_dslow_patch_ktsb4m_szcode), %o0
1263#endif /* sun4v */
1264
1265	ret
1266	restore
1267	SET_SIZE(sfmmu_patch_ktsb)
1268
1269	ENTRY_NP(sfmmu_kpm_patch_tlbm)
1270	/*
1271	 * Fixup trap handlers in common segkpm case.  This is reserved
1272	 * for future use should kpm TSB be changed to be other than the
1273	 * kernel TSB.
1274	 */
1275	retl
1276	nop
1277	SET_SIZE(sfmmu_kpm_patch_tlbm)
1278
1279	ENTRY_NP(sfmmu_kpm_patch_tsbm)
1280	/*
1281	 * nop the branch to sfmmu_kpm_dtsb_miss_small
1282	 * in the case where we are using large pages for
1283	 * seg_kpm (and hence must probe the second TSB for
1284	 * seg_kpm VAs)
1285	 */
1286	set	dktsb4m_kpmcheck_small, %o0
1287	MAKE_NOP_INSTR(%o1)
1288	st	%o1, [%o0]
1289	flush	%o0
1290	retl
1291	nop
1292	SET_SIZE(sfmmu_kpm_patch_tsbm)
1293
1294	ENTRY_NP(sfmmu_patch_utsb)
1295#ifdef UTSB_PHYS
1296	retl
1297	nop
1298#else /* UTSB_PHYS */
1299	/*
1300	 * We need to hot patch utsb_vabase and utsb4m_vabase
1301	 */
1302	save	%sp, -SA(MINFRAME), %sp
1303
1304	/* patch value of utsb_vabase */
1305	set	utsb_vabase, %o1
1306	ldx	[%o1], %o4
1307	sethi	%hi(sfmmu_uprot_get_1st_tsbe_ptr), %o0
1308	call	sfmmu_fixup_setx
1309	  or	%o0, %lo(sfmmu_uprot_get_1st_tsbe_ptr), %o0
1310	sethi	%hi(sfmmu_uitlb_get_1st_tsbe_ptr), %o0
1311	call	sfmmu_fixup_setx
1312	  or	%o0, %lo(sfmmu_uitlb_get_1st_tsbe_ptr), %o0
1313	sethi	%hi(sfmmu_udtlb_get_1st_tsbe_ptr), %o0
1314	call	sfmmu_fixup_setx
1315	  or	%o0, %lo(sfmmu_udtlb_get_1st_tsbe_ptr), %o0
1316
1317	/* patch value of utsb4m_vabase */
1318	set	utsb4m_vabase, %o1
1319	ldx	[%o1], %o4
1320	sethi	%hi(sfmmu_uprot_get_2nd_tsb_base), %o0
1321	call	sfmmu_fixup_setx
1322	  or	%o0, %lo(sfmmu_uprot_get_2nd_tsb_base), %o0
1323	sethi	%hi(sfmmu_uitlb_get_2nd_tsb_base), %o0
1324	call	sfmmu_fixup_setx
1325	  or	%o0, %lo(sfmmu_uitlb_get_2nd_tsb_base), %o0
1326	sethi	%hi(sfmmu_udtlb_get_2nd_tsb_base), %o0
1327	call	sfmmu_fixup_setx
1328	  or	%o0, %lo(sfmmu_udtlb_get_2nd_tsb_base), %o0
1329
1330	/*
1331	 * Patch TSB base register masks and shifts if needed.
1332	 * By default the TSB base register contents are set up for 4M slab.
1333	 * If we're using a smaller slab size and reserved VA range we need
1334	 * to patch up those values here.
1335	 */
1336	set	tsb_slab_shift, %o1
1337	set	MMU_PAGESHIFT4M, %o4
1338	lduw	[%o1], %o3
1339	subcc	%o4, %o3, %o4
1340	bz,pt	%icc, 1f
1341	  /* delay slot safe */
1342
1343	/* patch reserved VA range size if needed. */
1344	sethi	%hi(sfmmu_tsb_1st_resv_offset), %o0
1345	call	sfmmu_fixup_shiftx
1346	  or	%o0, %lo(sfmmu_tsb_1st_resv_offset), %o0
1347	call	sfmmu_fixup_shiftx
1348	  add	%o0, I_SIZE, %o0
1349	sethi	%hi(sfmmu_tsb_2nd_resv_offset), %o0
1350	call	sfmmu_fixup_shiftx
1351	  or	%o0, %lo(sfmmu_tsb_2nd_resv_offset), %o0
1352	call	sfmmu_fixup_shiftx
1353	  add	%o0, I_SIZE, %o0
13541:
1355	/* patch TSBREG_VAMASK used to set up TSB base register */
1356	set	tsb_slab_mask, %o1
1357	ldx	[%o1], %o4
1358	sethi	%hi(sfmmu_tsb_1st_tsbreg_vamask), %o0
1359	call	sfmmu_fixup_or
1360	  or	%o0, %lo(sfmmu_tsb_1st_tsbreg_vamask), %o0
1361	sethi	%hi(sfmmu_tsb_2nd_tsbreg_vamask), %o0
1362	call	sfmmu_fixup_or
1363	  or	%o0, %lo(sfmmu_tsb_2nd_tsbreg_vamask), %o0
1364
1365	ret
1366	restore
1367#endif /* UTSB_PHYS */
1368	SET_SIZE(sfmmu_patch_utsb)
1369
1370	ENTRY_NP(sfmmu_patch_shctx)
1371#ifdef sun4u
1372	retl
1373	  nop
1374#else /* sun4u */
1375	set	sfmmu_shctx_cpu_mondo_patch, %o0
1376	MAKE_JMP_INSTR(5, %o1, %o2)	! jmp       %g5
1377	st	%o1, [%o0]
1378	flush	%o0
1379	MAKE_NOP_INSTR(%o1)
1380	add	%o0, I_SIZE, %o0	! next instr
1381	st	%o1, [%o0]
1382	flush	%o0
1383
1384	set	sfmmu_shctx_user_rtt_patch, %o0
1385	st      %o1, [%o0]		! nop 1st instruction
1386	flush	%o0
1387	add     %o0, I_SIZE, %o0
1388	st      %o1, [%o0]		! nop 2nd instruction
1389	flush	%o0
1390	add     %o0, I_SIZE, %o0
1391	st      %o1, [%o0]		! nop 3rd instruction
1392	flush	%o0
1393	add     %o0, I_SIZE, %o0
1394	st      %o1, [%o0]		! nop 4th instruction
1395	flush	%o0
1396	add     %o0, I_SIZE, %o0
1397	st      %o1, [%o0]		! nop 5th instruction
1398	retl
1399	  flush	%o0
1400#endif /* sun4u */
1401	SET_SIZE(sfmmu_patch_shctx)
1402
1403	/*
1404	 * Routine that loads an entry into a tsb using virtual addresses.
1405	 * Locking is required since all cpus can use the same TSB.
1406	 * Note that it is no longer required to have a valid context
1407	 * when calling this function.
1408	 */
1409	ENTRY_NP(sfmmu_load_tsbe)
1410	/*
1411	 * %o0 = pointer to tsbe to load
1412	 * %o1 = tsb tag
1413	 * %o2 = virtual pointer to TTE
1414	 * %o3 = 1 if physical address in %o0 else 0
1415	 */
1416	rdpr	%pstate, %o5
1417#ifdef DEBUG
1418	PANIC_IF_INTR_DISABLED_PSTR(%o5, sfmmu_di_l2, %g1)
1419#endif /* DEBUG */
1420
1421	wrpr	%o5, PSTATE_IE, %pstate		/* disable interrupts */
1422
1423	SETUP_TSB_ASI(%o3, %g3)
1424	TSB_UPDATE(%o0, %o2, %o1, %g1, %g2, 1)
1425
1426	wrpr	%g0, %o5, %pstate		/* enable interrupts */
1427
1428	retl
1429	membar	#StoreStore|#StoreLoad
1430	SET_SIZE(sfmmu_load_tsbe)
1431
1432	/*
1433	 * Flush TSB of a given entry if the tag matches.
1434	 */
1435	ENTRY(sfmmu_unload_tsbe)
1436	/*
1437	 * %o0 = pointer to tsbe to be flushed
1438	 * %o1 = tag to match
1439	 * %o2 = 1 if physical address in %o0 else 0
1440	 */
1441	SETUP_TSB_ASI(%o2, %g1)
1442	TSB_INVALIDATE(%o0, %o1, %g1, %o2, %o3, unload_tsbe)
1443	retl
1444	membar	#StoreStore|#StoreLoad
1445	SET_SIZE(sfmmu_unload_tsbe)
1446
1447	/*
1448	 * Routine that loads a TTE into the kpm TSB from C code.
1449	 * Locking is required since kpm TSB is shared among all CPUs.
1450	 */
1451	ENTRY_NP(sfmmu_kpm_load_tsb)
1452	/*
1453	 * %o0 = vaddr
1454	 * %o1 = ttep
1455	 * %o2 = virtpg to TSB index shift (e.g. TTE pagesize shift)
1456	 */
1457	rdpr	%pstate, %o5			! %o5 = saved pstate
1458#ifdef DEBUG
1459	PANIC_IF_INTR_DISABLED_PSTR(%o5, sfmmu_di_l3, %g1)
1460#endif /* DEBUG */
1461	wrpr	%o5, PSTATE_IE, %pstate		! disable interrupts
1462
1463#ifndef sun4v
1464	sethi	%hi(ktsb_phys), %o4
1465	mov	ASI_N, %o3
1466	ld	[%o4 + %lo(ktsb_phys)], %o4
1467	movrnz	%o4, ASI_MEM, %o3
1468	mov	%o3, %asi
1469#endif
1470	mov	%o0, %g1			! %g1 = vaddr
1471
1472	/* GET_KPM_TSBE_POINTER(vpshift, tsbp, vaddr (clobbers), tmp1, tmp2) */
1473	GET_KPM_TSBE_POINTER(%o2, %g2, %g1, %o3, %o4)
1474	/* %g2 = tsbep, %g1 clobbered */
1475
1476	srlx	%o0, TTARGET_VA_SHIFT, %g1;	! %g1 = tag target
1477	/* TSB_UPDATE(tsbep, tteva, tagtarget, tmp1, tmp2, label) */
1478	TSB_UPDATE(%g2, %o1, %g1, %o3, %o4, 1)
1479
1480	wrpr	%g0, %o5, %pstate		! enable interrupts
1481	retl
1482	  membar #StoreStore|#StoreLoad
1483	SET_SIZE(sfmmu_kpm_load_tsb)
1484
1485	/*
1486	 * Routine that shoots down a TTE in the kpm TSB or in the
1487	 * kernel TSB depending on virtpg. Locking is required since
1488	 * kpm/kernel TSB is shared among all CPUs.
1489	 */
1490	ENTRY_NP(sfmmu_kpm_unload_tsb)
1491	/*
1492	 * %o0 = vaddr
1493	 * %o1 = virtpg to TSB index shift (e.g. TTE page shift)
1494	 */
1495#ifndef sun4v
1496	sethi	%hi(ktsb_phys), %o4
1497	mov	ASI_N, %o3
1498	ld	[%o4 + %lo(ktsb_phys)], %o4
1499	movrnz	%o4, ASI_MEM, %o3
1500	mov	%o3, %asi
1501#endif
1502	mov	%o0, %g1			! %g1 = vaddr
1503
1504	/* GET_KPM_TSBE_POINTER(vpshift, tsbp, vaddr (clobbers), tmp1, tmp2) */
1505	GET_KPM_TSBE_POINTER(%o1, %g2, %g1, %o3, %o4)
1506	/* %g2 = tsbep, %g1 clobbered */
1507
1508	srlx	%o0, TTARGET_VA_SHIFT, %g1;	! %g1 = tag target
1509	/* TSB_INVALIDATE(tsbep, tag, tmp1, tmp2, tmp3, label) */
1510	TSB_INVALIDATE(%g2, %g1, %o3, %o4, %o1, kpm_tsbinval)
1511
1512	retl
1513	  membar	#StoreStore|#StoreLoad
1514	SET_SIZE(sfmmu_kpm_unload_tsb)
1515
1516#endif /* lint */
1517
1518
1519#if defined (lint)
1520
1521/*ARGSUSED*/
1522pfn_t
1523sfmmu_ttetopfn(tte_t *tte, caddr_t vaddr)
1524{ return(0); }
1525
1526#else /* lint */
1527
1528	ENTRY_NP(sfmmu_ttetopfn)
1529	ldx	[%o0], %g1			/* read tte */
1530	TTETOPFN(%g1, %o1, sfmmu_ttetopfn_l1, %g2, %g3, %g4)
1531	/*
1532	 * g1 = pfn
1533	 */
1534	retl
1535	mov	%g1, %o0
1536	SET_SIZE(sfmmu_ttetopfn)
1537
1538#endif /* !lint */
1539
1540
1541#if defined (lint)
1542/*
1543 * The sfmmu_hblk_hash_add is the assembly primitive for adding hmeblks to the
1544 * the hash list.
1545 */
1546/* ARGSUSED */
1547void
1548sfmmu_hblk_hash_add(struct hmehash_bucket *hmebp, struct hme_blk *hmeblkp,
1549	uint64_t hblkpa)
1550{
1551}
1552
1553/*
1554 * The sfmmu_hblk_hash_rm is the assembly primitive to remove hmeblks from the
1555 * hash list.
1556 */
1557/* ARGSUSED */
1558void
1559sfmmu_hblk_hash_rm(struct hmehash_bucket *hmebp, struct hme_blk *hmeblkp,
1560	uint64_t hblkpa, struct hme_blk *prev_hblkp)
1561{
1562}
1563#else /* lint */
1564
1565/*
1566 * Functions to grab/release hme bucket list lock.  I only use a byte
1567 * instead of the whole int because eventually we might want to
1568 * put some counters on the other bytes (of course, these routines would
1569 * have to change).  The code that grab this lock should execute
1570 * with interrupts disabled and hold the lock for the least amount of time
1571 * possible.
1572 */
1573
1574/*
1575 * Even though hmeh_listlock is updated using pa there's no need to flush
1576 * dcache since hmeh_listlock will be restored to the original value (0)
1577 * before interrupts are reenabled.
1578 */
1579
1580/*
1581 * For sparcv9 hme hash buckets may not be in the nucleus.  hme hash update
1582 * routines still use virtual addresses to update the bucket fields. But they
1583 * must not cause a TLB miss after grabbing the low level bucket lock. To
1584 * achieve this we must make sure the bucket structure is completely within an
1585 * 8K page.
1586 */
1587
1588#if (HMEBUCK_SIZE & (HMEBUCK_SIZE - 1))
1589#error - the size of hmehash_bucket structure is not power of 2
1590#endif
1591
1592#ifdef HMELOCK_BACKOFF_ENABLE
1593
1594#define HMELOCK_BACKOFF(reg, val)                               \
1595	set     val, reg                                        ;\
1596	brnz    reg, .                                          ;\
1597	  dec   reg
1598
1599#define CAS_HME(tmp1, tmp2, exitlabel, asi)                     \
1600	mov     0xff, tmp2                                      ;\
1601	casa    [tmp1]asi, %g0, tmp2                            ;\
1602	brz,a,pt tmp2, exitlabel                                ;\
1603	membar  #LoadLoad
1604
1605#define HMELOCK_ENTER(hmebp, tmp1, tmp2, label, asi)            \
1606	mov     0xff, tmp2                                      ;\
1607	add     hmebp, HMEBUCK_LOCK, tmp1                       ;\
1608	casa    [tmp1]asi, %g0, tmp2                            ;\
1609	brz,a,pt tmp2, label/**/2                               ;\
1610	membar  #LoadLoad                                       ;\
1611	HMELOCK_BACKOFF(tmp2,0x80)                              ;\
1612	CAS_HME(tmp1, tmp2, label/**/2, asi)                    ;\
1613	HMELOCK_BACKOFF(tmp2,0x100)                             ;\
1614	CAS_HME(tmp1, tmp2, label/**/2, asi)                    ;\
1615	HMELOCK_BACKOFF(tmp2,0x200)                             ;\
1616	CAS_HME(tmp1, tmp2, label/**/2, asi)                    ;\
1617label/**/1:                                                     ;\
1618	HMELOCK_BACKOFF(tmp2,0x400)                             ;\
1619	CAS_HME(tmp1, tmp2, label/**/2, asi)                    ;\
1620	HMELOCK_BACKOFF(tmp2,0x800)                             ;\
1621	CAS_HME(tmp1, tmp2, label/**/2, asi)                    ;\
1622	HMELOCK_BACKOFF(tmp2,0x1000)                            ;\
1623	CAS_HME(tmp1, tmp2, label/**/2, asi)                    ;\
1624	HMELOCK_BACKOFF(tmp2,0x2000)                            ;\
1625	mov     0xff, tmp2                                      ;\
1626	casa    [tmp1]asi, %g0, tmp2                            ;\
1627	brnz,pn tmp2, label/**/1     /* reset backoff */        ;\
1628	membar  #LoadLoad                                       ;\
1629label/**/2:
1630
1631#else /* HMELOCK_BACKOFF_ENABLE */
1632
1633#define HMELOCK_ENTER(hmebp, tmp1, tmp2, label1, asi)           \
1634	mov     0xff, tmp2                                      ;\
1635	add     hmebp, HMEBUCK_LOCK, tmp1                       ;\
1636label1:                                                         ;\
1637	casa    [tmp1]asi, %g0, tmp2                            ;\
1638	brnz,pn tmp2, label1                                    ;\
1639	mov     0xff, tmp2                                      ;\
1640	membar  #LoadLoad
1641
1642#endif /* HMELOCK_BACKOFF_ENABLE */
1643
1644#define HMELOCK_EXIT(hmebp, tmp1, asi)                          \
1645	membar  #LoadStore|#StoreStore                          ;\
1646	add     hmebp, HMEBUCK_LOCK, tmp1                       ;\
1647	sta     %g0, [tmp1]asi
1648
1649	.seg	".data"
1650hblk_add_panic1:
1651	.ascii	"sfmmu_hblk_hash_add: interrupts disabled"
1652	.byte	0
1653hblk_add_panic2:
1654	.ascii	"sfmmu_hblk_hash_add: va hmeblkp is NULL but pa is not"
1655	.byte	0
1656	.align	4
1657	.seg	".text"
1658
1659	ENTRY_NP(sfmmu_hblk_hash_add)
1660	/*
1661	 * %o0 = hmebp
1662	 * %o1 = hmeblkp
1663	 * %o2 = hblkpa
1664	 */
1665	rdpr	%pstate, %o5
1666#ifdef DEBUG
1667	andcc	%o5, PSTATE_IE, %g0		/* if interrupts already */
1668	bnz,pt %icc, 3f				/* disabled, panic	 */
1669	  nop
1670	save	%sp, -SA(MINFRAME), %sp
1671	sethi	%hi(hblk_add_panic1), %o0
1672	call	panic
1673	 or	%o0, %lo(hblk_add_panic1), %o0
1674	ret
1675	restore
1676
16773:
1678#endif /* DEBUG */
1679	wrpr	%o5, PSTATE_IE, %pstate		/* disable interrupts */
1680	mov	%o2, %g1
1681
1682	/*
1683	 * g1 = hblkpa
1684	 */
1685	ldn	[%o0 + HMEBUCK_HBLK], %o4	/* next hmeblk */
1686	ldx	[%o0 + HMEBUCK_NEXTPA], %g2	/* g2 = next hblkpa */
1687#ifdef	DEBUG
1688	cmp	%o4, %g0
1689	bne,pt %xcc, 1f
1690	 nop
1691	brz,pt %g2, 1f
1692	 nop
1693	wrpr	%g0, %o5, %pstate		/* enable interrupts */
1694	save	%sp, -SA(MINFRAME), %sp
1695	sethi	%hi(hblk_add_panic2), %o0
1696	call	panic
1697	  or	%o0, %lo(hblk_add_panic2), %o0
1698	ret
1699	restore
17001:
1701#endif /* DEBUG */
1702	/*
1703	 * We update hmeblks entries before grabbing lock because the stores
1704	 * could take a tlb miss and require the hash lock.  The buckets
1705	 * are part of the nucleus so we are cool with those stores.
1706	 *
1707	 * if buckets are not part of the nucleus our game is to
1708	 * not touch any other page via va until we drop the lock.
1709	 * This guarantees we won't get a tlb miss before the lock release
1710	 * since interrupts are disabled.
1711	 */
1712	stn	%o4, [%o1 + HMEBLK_NEXT]	/* update hmeblk's next */
1713	stx	%g2, [%o1 + HMEBLK_NEXTPA]	/* update hmeblk's next pa */
1714	HMELOCK_ENTER(%o0, %o2, %o3, hashadd1, ASI_N)
1715	stn	%o1, [%o0 + HMEBUCK_HBLK]	/* update bucket hblk next */
1716	stx	%g1, [%o0 + HMEBUCK_NEXTPA]	/* add hmeblk to list */
1717	HMELOCK_EXIT(%o0, %g2, ASI_N)
1718	retl
1719	  wrpr	%g0, %o5, %pstate		/* enable interrupts */
1720	SET_SIZE(sfmmu_hblk_hash_add)
1721
1722	ENTRY_NP(sfmmu_hblk_hash_rm)
1723	/*
1724	 * This function removes an hmeblk from the hash chain.
1725	 * It is written to guarantee we don't take a tlb miss
1726	 * by using physical addresses to update the list.
1727	 *
1728	 * %o0 = hmebp
1729	 * %o1 = hmeblkp
1730	 * %o2 = hmeblkp previous pa
1731	 * %o3 = hmeblkp previous
1732	 */
1733
1734	mov	%o3, %o4			/* o4 = hmeblkp previous */
1735
1736	rdpr	%pstate, %o5
1737#ifdef DEBUG
1738	PANIC_IF_INTR_DISABLED_PSTR(%o5, sfmmu_di_l4, %g1)
1739#endif /* DEBUG */
1740	/*
1741	 * disable interrupts, clear Address Mask to access 64 bit physaddr
1742	 */
1743	andn    %o5, PSTATE_IE, %g1
1744	wrpr    %g1, 0, %pstate
1745
1746#ifndef sun4v
1747	sethi   %hi(dcache_line_mask), %g4
1748	ld      [%g4 + %lo(dcache_line_mask)], %g4
1749#endif /* sun4v */
1750
1751	/*
1752	 * if buckets are not part of the nucleus our game is to
1753	 * not touch any other page via va until we drop the lock.
1754	 * This guarantees we won't get a tlb miss before the lock release
1755	 * since interrupts are disabled.
1756	 */
1757	HMELOCK_ENTER(%o0, %g1, %g3, hashrm1, ASI_N)
1758	ldn	[%o0 + HMEBUCK_HBLK], %g2	/* first hmeblk in list */
1759	cmp	%g2, %o1
1760	bne,pt	%ncc,1f
1761	 mov	ASI_MEM, %asi
1762	/*
1763	 * hmeblk is first on list
1764	 */
1765	ldx	[%o0 + HMEBUCK_NEXTPA], %g2	/* g2 = hmeblk pa */
1766	ldna	[%g2 + HMEBLK_NEXT] %asi, %o3	/* read next hmeblk va */
1767	ldxa	[%g2 + HMEBLK_NEXTPA] %asi, %g1	/* read next hmeblk pa */
1768	stn	%o3, [%o0 + HMEBUCK_HBLK]	/* write va */
1769	ba,pt	%xcc, 2f
1770	  stx	%g1, [%o0 + HMEBUCK_NEXTPA]	/* write pa */
17711:
1772	/* hmeblk is not first on list */
1773
1774	mov	%o2, %g3
1775#ifndef sun4v
1776	GET_CPU_IMPL(%g2)
1777	cmp 	%g2, CHEETAH_IMPL
1778	bge,a,pt %icc, hblk_hash_rm_1
1779	  and	%o4, %g4, %g2
1780	cmp	%g2, SPITFIRE_IMPL
1781	blt	%icc, hblk_hash_rm_2		/* no flushing needed for OPL */
1782	  and	%o4, %g4, %g2
1783	stxa	%g0, [%g2]ASI_DC_TAG		/* flush prev pa from dcache */
1784	add	%o4, HMEBLK_NEXT, %o4
1785	and	%o4, %g4, %g2
1786	ba	hblk_hash_rm_2
1787	stxa	%g0, [%g2]ASI_DC_TAG		/* flush prev va from dcache */
1788hblk_hash_rm_1:
1789
1790	stxa	%g0, [%g3]ASI_DC_INVAL		/* flush prev pa from dcache */
1791	membar	#Sync
1792	add     %g3, HMEBLK_NEXT, %g2
1793	stxa	%g0, [%g2]ASI_DC_INVAL		/* flush prev va from dcache */
1794hblk_hash_rm_2:
1795	membar	#Sync
1796#endif /* sun4v */
1797	ldxa	[%g3 + HMEBLK_NEXTPA] %asi, %g2	/* g2 = hmeblk pa */
1798	ldna	[%g2 + HMEBLK_NEXT] %asi, %o3	/* read next hmeblk va */
1799	ldxa	[%g2 + HMEBLK_NEXTPA] %asi, %g1	/* read next hmeblk pa */
1800	stna	%o3, [%g3 + HMEBLK_NEXT] %asi	/* write va */
1801	stxa	%g1, [%g3 + HMEBLK_NEXTPA] %asi	/* write pa */
18022:
1803	HMELOCK_EXIT(%o0, %g2, ASI_N)
1804	retl
1805	  wrpr	%g0, %o5, %pstate		/* enable interrupts */
1806	SET_SIZE(sfmmu_hblk_hash_rm)
1807
1808#endif /* lint */
1809
1810/*
1811 * These macros are used to update global sfmmu hme hash statistics
1812 * in perf critical paths. It is only enabled in debug kernels or
1813 * if SFMMU_STAT_GATHER is defined
1814 */
1815#if defined(DEBUG) || defined(SFMMU_STAT_GATHER)
1816#define	HAT_HSEARCH_DBSTAT(hatid, tsbarea, tmp1, tmp2)			\
1817	ldn	[tsbarea + TSBMISS_KHATID], tmp1			;\
1818	mov	HATSTAT_KHASH_SEARCH, tmp2				;\
1819	cmp	tmp1, hatid						;\
1820	movne	%ncc, HATSTAT_UHASH_SEARCH, tmp2			;\
1821	set	sfmmu_global_stat, tmp1					;\
1822	add	tmp1, tmp2, tmp1					;\
1823	ld	[tmp1], tmp2						;\
1824	inc	tmp2							;\
1825	st	tmp2, [tmp1]
1826
1827#define	HAT_HLINK_DBSTAT(hatid, tsbarea, tmp1, tmp2)			\
1828	ldn	[tsbarea + TSBMISS_KHATID], tmp1			;\
1829	mov	HATSTAT_KHASH_LINKS, tmp2				;\
1830	cmp	tmp1, hatid						;\
1831	movne	%ncc, HATSTAT_UHASH_LINKS, tmp2				;\
1832	set	sfmmu_global_stat, tmp1					;\
1833	add	tmp1, tmp2, tmp1					;\
1834	ld	[tmp1], tmp2						;\
1835	inc	tmp2							;\
1836	st	tmp2, [tmp1]
1837
1838
1839#else /* DEBUG || SFMMU_STAT_GATHER */
1840
1841#define	HAT_HSEARCH_DBSTAT(hatid, tsbarea, tmp1, tmp2)
1842
1843#define	HAT_HLINK_DBSTAT(hatid, tsbarea, tmp1, tmp2)
1844
1845#endif  /* DEBUG || SFMMU_STAT_GATHER */
1846
1847/*
1848 * This macro is used to update global sfmmu kstas in non
1849 * perf critical areas so they are enabled all the time
1850 */
1851#define	HAT_GLOBAL_STAT(statname, tmp1, tmp2)				\
1852	sethi	%hi(sfmmu_global_stat), tmp1				;\
1853	add	tmp1, statname, tmp1					;\
1854	ld	[tmp1 + %lo(sfmmu_global_stat)], tmp2			;\
1855	inc	tmp2							;\
1856	st	tmp2, [tmp1 + %lo(sfmmu_global_stat)]
1857
1858/*
1859 * These macros are used to update per cpu stats in non perf
1860 * critical areas so they are enabled all the time
1861 */
1862#define	HAT_PERCPU_STAT32(tsbarea, stat, tmp1)				\
1863	ld	[tsbarea + stat], tmp1					;\
1864	inc	tmp1							;\
1865	st	tmp1, [tsbarea + stat]
1866
1867/*
1868 * These macros are used to update per cpu stats in non perf
1869 * critical areas so they are enabled all the time
1870 */
1871#define	HAT_PERCPU_STAT16(tsbarea, stat, tmp1)				\
1872	lduh	[tsbarea + stat], tmp1					;\
1873	inc	tmp1							;\
1874	stuh	tmp1, [tsbarea + stat]
1875
1876#if defined(KPM_TLBMISS_STATS_GATHER)
1877	/*
1878	 * Count kpm dtlb misses separately to allow a different
1879	 * evaluation of hme and kpm tlbmisses. kpm tsb hits can
1880	 * be calculated by (kpm_dtlb_misses - kpm_tsb_misses).
1881	 */
1882#define	KPM_TLBMISS_STAT_INCR(tagacc, val, tsbma, tmp1, label)		\
1883	brgez	tagacc, label	/* KPM VA? */				;\
1884	nop								;\
1885	CPU_INDEX(tmp1, tsbma)						;\
1886	sethi	%hi(kpmtsbm_area), tsbma				;\
1887	sllx	tmp1, KPMTSBM_SHIFT, tmp1				;\
1888	or	tsbma, %lo(kpmtsbm_area), tsbma				;\
1889	add	tsbma, tmp1, tsbma		/* kpmtsbm area */	;\
1890	/* VA range check */						;\
1891	ldx	[tsbma + KPMTSBM_VBASE], val				;\
1892	cmp	tagacc, val						;\
1893	blu,pn	%xcc, label						;\
1894	  ldx	[tsbma + KPMTSBM_VEND], tmp1				;\
1895	cmp	tagacc, tmp1						;\
1896	bgeu,pn	%xcc, label						;\
1897	  lduw	[tsbma + KPMTSBM_DTLBMISS], val				;\
1898	inc	val							;\
1899	st	val, [tsbma + KPMTSBM_DTLBMISS]				;\
1900label:
1901#else
1902#define	KPM_TLBMISS_STAT_INCR(tagacc, val, tsbma, tmp1, label)
1903#endif	/* KPM_TLBMISS_STATS_GATHER */
1904
1905#if defined (lint)
1906/*
1907 * The following routines are jumped to from the mmu trap handlers to do
1908 * the setting up to call systrap.  They are separate routines instead of
1909 * being part of the handlers because the handlers would exceed 32
1910 * instructions and since this is part of the slow path the jump
1911 * cost is irrelevant.
1912 */
1913void
1914sfmmu_pagefault(void)
1915{
1916}
1917
1918void
1919sfmmu_mmu_trap(void)
1920{
1921}
1922
1923void
1924sfmmu_window_trap(void)
1925{
1926}
1927
1928void
1929sfmmu_kpm_exception(void)
1930{
1931}
1932
1933#else /* lint */
1934
1935#ifdef	PTL1_PANIC_DEBUG
1936	.seg	".data"
1937	.global	test_ptl1_panic
1938test_ptl1_panic:
1939	.word	0
1940	.align	8
1941
1942	.seg	".text"
1943	.align	4
1944#endif	/* PTL1_PANIC_DEBUG */
1945
1946
1947	ENTRY_NP(sfmmu_pagefault)
1948	SET_GL_REG(1)
1949	USE_ALTERNATE_GLOBALS(%g5)
1950	GET_MMU_BOTH_TAGACC(%g5 /*dtag*/, %g2 /*itag*/, %g6, %g4)
1951	rdpr	%tt, %g6
1952	cmp	%g6, FAST_IMMU_MISS_TT
1953	be,a,pn	%icc, 1f
1954	  mov	T_INSTR_MMU_MISS, %g3
1955	cmp	%g6, T_INSTR_MMU_MISS
1956	be,a,pn	%icc, 1f
1957	  mov	T_INSTR_MMU_MISS, %g3
1958	mov	%g5, %g2
1959	mov	T_DATA_PROT, %g3		/* arg2 = traptype */
1960	cmp	%g6, FAST_DMMU_MISS_TT
1961	move	%icc, T_DATA_MMU_MISS, %g3	/* arg2 = traptype */
1962	cmp	%g6, T_DATA_MMU_MISS
1963	move	%icc, T_DATA_MMU_MISS, %g3	/* arg2 = traptype */
1964
1965#ifdef  PTL1_PANIC_DEBUG
1966	/* check if we want to test the tl1 panic */
1967	sethi	%hi(test_ptl1_panic), %g4
1968	ld	[%g4 + %lo(test_ptl1_panic)], %g1
1969	st	%g0, [%g4 + %lo(test_ptl1_panic)]
1970	cmp	%g1, %g0
1971	bne,a,pn %icc, ptl1_panic
1972	  or	%g0, PTL1_BAD_DEBUG, %g1
1973#endif	/* PTL1_PANIC_DEBUG */
19741:
1975	HAT_GLOBAL_STAT(HATSTAT_PAGEFAULT, %g6, %g4)
1976	/*
1977	 * g2 = tag access reg
1978	 * g3.l = type
1979	 * g3.h = 0
1980	 */
1981	sethi	%hi(trap), %g1
1982	or	%g1, %lo(trap), %g1
19832:
1984	ba,pt	%xcc, sys_trap
1985	  mov	-1, %g4
1986	SET_SIZE(sfmmu_pagefault)
1987
1988	ENTRY_NP(sfmmu_mmu_trap)
1989	SET_GL_REG(1)
1990	USE_ALTERNATE_GLOBALS(%g5)
1991	GET_MMU_BOTH_TAGACC(%g5 /*dtag*/, %g2 /*itag*/, %g4, %g6)
1992	rdpr	%tt, %g6
1993	cmp	%g6, FAST_IMMU_MISS_TT
1994	be,a,pn	%icc, 1f
1995	  mov	T_INSTR_MMU_MISS, %g3
1996	cmp	%g6, T_INSTR_MMU_MISS
1997	be,a,pn	%icc, 1f
1998	  mov	T_INSTR_MMU_MISS, %g3
1999	mov	%g5, %g2
2000	mov	T_DATA_PROT, %g3		/* arg2 = traptype */
2001	cmp	%g6, FAST_DMMU_MISS_TT
2002	move	%icc, T_DATA_MMU_MISS, %g3	/* arg2 = traptype */
2003	cmp	%g6, T_DATA_MMU_MISS
2004	move	%icc, T_DATA_MMU_MISS, %g3	/* arg2 = traptype */
20051:
2006	/*
2007	 * g2 = tag access reg
2008	 * g3 = type
2009	 */
2010	sethi	%hi(sfmmu_tsbmiss_exception), %g1
2011	or	%g1, %lo(sfmmu_tsbmiss_exception), %g1
2012	ba,pt	%xcc, sys_trap
2013	  mov	-1, %g4
2014	/*NOTREACHED*/
2015	SET_SIZE(sfmmu_mmu_trap)
2016
2017	ENTRY_NP(sfmmu_suspend_tl)
2018	SET_GL_REG(1)
2019	USE_ALTERNATE_GLOBALS(%g5)
2020	GET_MMU_BOTH_TAGACC(%g5 /*dtag*/, %g2 /*itag*/, %g4, %g3)
2021	rdpr	%tt, %g6
2022	cmp	%g6, FAST_IMMU_MISS_TT
2023	be,a,pn	%icc, 1f
2024	  mov	T_INSTR_MMU_MISS, %g3
2025	mov	%g5, %g2
2026	cmp	%g6, FAST_DMMU_MISS_TT
2027	move	%icc, T_DATA_MMU_MISS, %g3
2028	movne	%icc, T_DATA_PROT, %g3
20291:
2030	sethi	%hi(sfmmu_tsbmiss_suspended), %g1
2031	or	%g1, %lo(sfmmu_tsbmiss_suspended), %g1
2032	/* g1 = TL0 handler, g2 = tagacc, g3 = trap type */
2033	ba,pt	%xcc, sys_trap
2034	  mov	PIL_15, %g4
2035	/*NOTREACHED*/
2036	SET_SIZE(sfmmu_suspend_tl)
2037
2038	/*
2039	 * No %g registers in use at this point.
2040	 */
2041	ENTRY_NP(sfmmu_window_trap)
2042	rdpr	%tpc, %g1
2043#ifdef sun4v
2044#ifdef DEBUG
2045	/* We assume previous %gl was 1 */
2046	rdpr	%tstate, %g4
2047	srlx	%g4, TSTATE_GL_SHIFT, %g4
2048	and	%g4, TSTATE_GL_MASK, %g4
2049	cmp	%g4, 1
2050	bne,a,pn %icc, ptl1_panic
2051	  mov	PTL1_BAD_WTRAP, %g1
2052#endif /* DEBUG */
2053	/* user miss at tl>1. better be the window handler or user_rtt */
2054	/* in user_rtt? */
2055	set	rtt_fill_start, %g4
2056	cmp	%g1, %g4
2057	blu,pn %xcc, 6f
2058	 .empty
2059	set	rtt_fill_end, %g4
2060	cmp	%g1, %g4
2061	bgeu,pn %xcc, 6f
2062	 nop
2063	set	fault_rtt_fn1, %g1
2064	wrpr	%g0, %g1, %tnpc
2065	ba,a	7f
20666:
2067	! must save this trap level before descending trap stack
2068	! no need to save %tnpc, either overwritten or discarded
2069	! already got it: rdpr	%tpc, %g1
2070	rdpr	%tstate, %g6
2071	rdpr	%tt, %g7
2072	! trap level saved, go get underlying trap type
2073	rdpr	%tl, %g5
2074	sub	%g5, 1, %g3
2075	wrpr	%g3, %tl
2076	rdpr	%tt, %g2
2077	wrpr	%g5, %tl
2078	! restore saved trap level
2079	wrpr	%g1, %tpc
2080	wrpr	%g6, %tstate
2081	wrpr	%g7, %tt
2082#else /* sun4v */
2083	/* user miss at tl>1. better be the window handler */
2084	rdpr	%tl, %g5
2085	sub	%g5, 1, %g3
2086	wrpr	%g3, %tl
2087	rdpr	%tt, %g2
2088	wrpr	%g5, %tl
2089#endif /* sun4v */
2090	and	%g2, WTRAP_TTMASK, %g4
2091	cmp	%g4, WTRAP_TYPE
2092	bne,pn	%xcc, 1f
2093	 nop
2094	/* tpc should be in the trap table */
2095	set	trap_table, %g4
2096	cmp	%g1, %g4
2097	blt,pn %xcc, 1f
2098	 .empty
2099	set	etrap_table, %g4
2100	cmp	%g1, %g4
2101	bge,pn %xcc, 1f
2102	 .empty
2103	andn	%g1, WTRAP_ALIGN, %g1	/* 128 byte aligned */
2104	add	%g1, WTRAP_FAULTOFF, %g1
2105	wrpr	%g0, %g1, %tnpc
21067:
2107	/*
2108	 * some wbuf handlers will call systrap to resolve the fault
2109	 * we pass the trap type so they figure out the correct parameters.
2110	 * g5 = trap type, g6 = tag access reg
2111	 */
2112
2113	/*
2114	 * only use g5, g6, g7 registers after we have switched to alternate
2115	 * globals.
2116	 */
2117	SET_GL_REG(1)
2118	USE_ALTERNATE_GLOBALS(%g5)
2119	GET_MMU_D_TAGACC(%g6 /*dtag*/, %g5 /*scratch*/)
2120	rdpr	%tt, %g7
2121	cmp	%g7, FAST_IMMU_MISS_TT
2122	be,a,pn	%icc, ptl1_panic
2123	  mov	PTL1_BAD_WTRAP, %g1
2124	cmp	%g7, T_INSTR_MMU_MISS
2125	be,a,pn	%icc, ptl1_panic
2126	  mov	PTL1_BAD_WTRAP, %g1
2127	mov	T_DATA_PROT, %g5
2128	cmp	%g7, FAST_DMMU_MISS_TT
2129	move	%icc, T_DATA_MMU_MISS, %g5
2130	cmp	%g7, T_DATA_MMU_MISS
2131	move	%icc, T_DATA_MMU_MISS, %g5
2132	! XXXQ AGS re-check out this one
2133	done
21341:
2135	CPU_PADDR(%g1, %g4)
2136	add	%g1, CPU_TL1_HDLR, %g1
2137	lda	[%g1]ASI_MEM, %g4
2138	brnz,a,pt %g4, sfmmu_mmu_trap
2139	  sta	%g0, [%g1]ASI_MEM
2140	ba,pt	%icc, ptl1_panic
2141	  mov	PTL1_BAD_TRAP, %g1
2142	SET_SIZE(sfmmu_window_trap)
2143
2144	ENTRY_NP(sfmmu_kpm_exception)
2145	/*
2146	 * We have accessed an unmapped segkpm address or a legal segkpm
2147	 * address which is involved in a VAC alias conflict prevention.
2148	 * Before we go to trap(), check to see if CPU_DTRACE_NOFAULT is
2149	 * set. If it is, we will instead note that a fault has occurred
2150	 * by setting CPU_DTRACE_BADADDR and issue a "done" (instead of
2151	 * a "retry"). This will step over the faulting instruction.
2152	 * Note that this means that a legal segkpm address involved in
2153	 * a VAC alias conflict prevention (a rare case to begin with)
2154	 * cannot be used in DTrace.
2155	 */
2156	CPU_INDEX(%g1, %g2)
2157	set	cpu_core, %g2
2158	sllx	%g1, CPU_CORE_SHIFT, %g1
2159	add	%g1, %g2, %g1
2160	lduh	[%g1 + CPUC_DTRACE_FLAGS], %g2
2161	andcc	%g2, CPU_DTRACE_NOFAULT, %g0
2162	bz	0f
2163	or	%g2, CPU_DTRACE_BADADDR, %g2
2164	stuh	%g2, [%g1 + CPUC_DTRACE_FLAGS]
2165	GET_MMU_D_ADDR(%g3, /*scratch*/ %g4)
2166	stx	%g3, [%g1 + CPUC_DTRACE_ILLVAL]
2167	done
21680:
2169	TSTAT_CHECK_TL1(1f, %g1, %g2)
21701:
2171	SET_GL_REG(1)
2172	USE_ALTERNATE_GLOBALS(%g5)
2173	GET_MMU_D_TAGACC(%g2 /* tagacc */, %g4 /*scratch*/)
2174	mov	T_DATA_MMU_MISS, %g3	/* arg2 = traptype */
2175	/*
2176	 * g2=tagacc g3.l=type g3.h=0
2177	 */
2178	sethi	%hi(trap), %g1
2179	or	%g1, %lo(trap), %g1
2180	ba,pt	%xcc, sys_trap
2181	mov	-1, %g4
2182	SET_SIZE(sfmmu_kpm_exception)
2183
2184#endif /* lint */
2185
2186#if defined (lint)
2187
2188void
2189sfmmu_tsb_miss(void)
2190{
2191}
2192
2193void
2194sfmmu_kpm_dtsb_miss(void)
2195{
2196}
2197
2198void
2199sfmmu_kpm_dtsb_miss_small(void)
2200{
2201}
2202
2203#else /* lint */
2204
2205#if (IMAP_SEG != 0)
2206#error - ism_map->ism_seg offset is not zero
2207#endif
2208
2209/*
2210 * Copies ism mapping for this ctx in param "ism" if this is a ISM
2211 * tlb miss and branches to label "ismhit". If this is not an ISM
2212 * process or an ISM tlb miss it falls thru.
2213 *
2214 * Checks to see if the vaddr passed in via tagacc is in an ISM segment for
2215 * this process.
2216 * If so, it will branch to label "ismhit".  If not, it will fall through.
2217 *
2218 * Also hat_unshare() will set the context for this process to INVALID_CONTEXT
2219 * so that any other threads of this process will not try and walk the ism
2220 * maps while they are being changed.
2221 *
2222 * NOTE: We will never have any holes in our ISM maps. sfmmu_share/unshare
2223 *       will make sure of that. This means we can terminate our search on
2224 *       the first zero mapping we find.
2225 *
2226 * Parameters:
2227 * tagacc	= (pseudo-)tag access register (vaddr + ctx) (in)
2228 * tsbmiss	= address of tsb miss area (in)
2229 * ismseg	= contents of ism_seg for this ism map (out)
2230 * ismhat	= physical address of imap_ismhat for this ism map (out)
2231 * tmp1		= scratch reg (CLOBBERED)
2232 * tmp2		= scratch reg (CLOBBERED)
2233 * tmp3		= scratch reg (CLOBBERED)
2234 * label:    temporary labels
2235 * ismhit:   label where to jump to if an ism dtlb miss
2236 * exitlabel:label where to jump if hat is busy due to hat_unshare.
2237 */
2238#define ISM_CHECK(tagacc, tsbmiss, ismseg, ismhat, tmp1, tmp2, tmp3 \
2239	label, ismhit)							\
2240	ldx	[tsbmiss + TSBMISS_ISMBLKPA], tmp1 /* tmp1 = &ismblk */	;\
2241	brlz,pt  tmp1, label/**/3		/* exit if -1 */	;\
2242	  add	tmp1, IBLK_MAPS, ismhat	/* ismhat = &ismblk.map[0] */	;\
2243label/**/1:								;\
2244	ldxa	[ismhat]ASI_MEM, ismseg	/* ismblk.map[0].ism_seg */	;\
2245	mov	tmp1, tmp3	/* update current ismblkpa head */	;\
2246label/**/2:								;\
2247	brz,pt  ismseg, label/**/3		/* no mapping */	;\
2248	  add	ismhat, IMAP_VB_SHIFT, tmp1 /* tmp1 = vb_shift addr */	;\
2249	lduba	[tmp1]ASI_MEM, tmp1 		/* tmp1 = vb shift*/	;\
2250	srlx	ismseg, tmp1, tmp2		/* tmp2 = vbase */	;\
2251	srlx	tagacc, tmp1, tmp1		/* tmp1 =  va seg*/	;\
2252	sub	tmp1, tmp2, tmp2		/* tmp2 = va - vbase */	;\
2253	add	ismhat, IMAP_SZ_MASK, tmp1 /* tmp1 = sz_mask addr */	;\
2254	lda	[tmp1]ASI_MEM, tmp1		/* tmp1 = sz_mask */	;\
2255	and	ismseg, tmp1, tmp1		/* tmp1 = size */	;\
2256	cmp	tmp2, tmp1		 	/* check va <= offset*/	;\
2257	blu,a,pt  %xcc, ismhit			/* ism hit */		;\
2258	  add	ismhat, IMAP_ISMHAT, ismhat 	/* ismhat = &ism_sfmmu*/ ;\
2259									;\
2260	add	ismhat, ISM_MAP_SZ, ismhat /* ismhat += sizeof(map) */ 	;\
2261	add	tmp3, (IBLK_MAPS + ISM_MAP_SLOTS * ISM_MAP_SZ), tmp1	;\
2262	cmp	ismhat, tmp1						;\
2263	bl,pt	%xcc, label/**/2		/* keep looking  */	;\
2264	  ldxa	[ismhat]ASI_MEM, ismseg	/* ismseg = map[ismhat] */	;\
2265									;\
2266	add	tmp3, IBLK_NEXTPA, tmp1					;\
2267	ldxa	[tmp1]ASI_MEM, tmp1		/* check blk->nextpa */	;\
2268	brgez,pt tmp1, label/**/1		/* continue if not -1*/	;\
2269	  add	tmp1, IBLK_MAPS, ismhat	/* ismhat = &ismblk.map[0]*/	;\
2270label/**/3:
2271
2272/*
2273 * Returns the hme hash bucket (hmebp) given the vaddr, and the hatid
2274 * It also returns the virtual pg for vaddr (ie. vaddr << hmeshift)
2275 * Parameters:
2276 * tagacc = reg containing virtual address
2277 * hatid = reg containing sfmmu pointer
2278 * hmeshift = constant/register to shift vaddr to obtain vapg
2279 * hmebp = register where bucket pointer will be stored
2280 * vapg = register where virtual page will be stored
2281 * tmp1, tmp2 = tmp registers
2282 */
2283
2284
2285#define	HMEHASH_FUNC_ASM(tagacc, hatid, tsbarea, hmeshift, hmebp,	\
2286	vapg, label, tmp1, tmp2)					\
2287	sllx	tagacc, TAGACC_CTX_LSHIFT, tmp1				;\
2288	brnz,a,pt tmp1, label/**/1					;\
2289	  ld    [tsbarea + TSBMISS_UHASHSZ], hmebp			;\
2290	ld	[tsbarea + TSBMISS_KHASHSZ], hmebp			;\
2291	ba,pt	%xcc, label/**/2					;\
2292	  ldx	[tsbarea + TSBMISS_KHASHSTART], tmp1			;\
2293label/**/1:								;\
2294	ldx	[tsbarea + TSBMISS_UHASHSTART], tmp1			;\
2295label/**/2:								;\
2296	srlx	tagacc, hmeshift, vapg					;\
2297	xor	vapg, hatid, tmp2	/* hatid ^ (vaddr >> shift) */	;\
2298	and	tmp2, hmebp, hmebp	/* index into hme_hash */	;\
2299	mulx	hmebp, HMEBUCK_SIZE, hmebp				;\
2300	add	hmebp, tmp1, hmebp
2301
2302/*
2303 * hashtag includes bspage + hashno (64 bits).
2304 */
2305
2306#define	MAKE_HASHTAG(vapg, hatid, hmeshift, hashno, hblktag)		\
2307	sllx	vapg, hmeshift, vapg					;\
2308	mov	hashno, hblktag						;\
2309	sllx	hblktag, HTAG_REHASH_SHIFT, hblktag			;\
2310	or	vapg, hblktag, hblktag
2311
2312/*
2313 * Function to traverse hmeblk hash link list and find corresponding match.
2314 * The search is done using physical pointers. It returns the physical address
2315 * and virtual address pointers to the hmeblk that matches with the tag
2316 * provided.
2317 * Parameters:
2318 * hmebp	= register that points to hme hash bucket, also used as
2319 *		  tmp reg (clobbered)
2320 * hmeblktag	= register with hmeblk tag match
2321 * hatid	= register with hatid
2322 * hmeblkpa	= register where physical ptr will be stored
2323 * hmeblkva	= register where virtual ptr will be stored
2324 * tmp1		= tmp reg
2325 * label: temporary label
2326 */
2327
2328#define	HMEHASH_SEARCH(hmebp, hmeblktag, hatid, hmeblkpa, hmeblkva,	\
2329	tsbarea, tmp1, label)					 	\
2330	add     hmebp, HMEBUCK_NEXTPA, hmeblkpa				;\
2331	ldxa    [hmeblkpa]ASI_MEM, hmeblkpa				;\
2332	add     hmebp, HMEBUCK_HBLK, hmeblkva				;\
2333	ldxa    [hmeblkva]ASI_MEM, hmeblkva				;\
2334	HAT_HSEARCH_DBSTAT(hatid, tsbarea, hmebp, tmp1)			;\
2335label/**/1:								;\
2336	brz,pn	hmeblkva, label/**/2					;\
2337	HAT_HLINK_DBSTAT(hatid, tsbarea, hmebp, tmp1)			;\
2338	add	hmeblkpa, HMEBLK_TAG, hmebp				;\
2339	ldxa	[hmebp]ASI_MEM, tmp1	 /* read 1st part of tag */	;\
2340	add	hmebp, CLONGSIZE, hmebp					;\
2341	ldxa	[hmebp]ASI_MEM, hmebp 	/* read 2nd part of tag */	;\
2342	xor	tmp1, hmeblktag, tmp1					;\
2343	xor	hmebp, hatid, hmebp					;\
2344	or	hmebp, tmp1, hmebp					;\
2345	brz,pn	hmebp, label/**/2	/* branch on hit */		;\
2346	  add	hmeblkpa, HMEBLK_NEXT, hmebp				;\
2347	ldna	[hmebp]ASI_MEM, hmeblkva	/* hmeblk ptr va */	;\
2348	add	hmeblkpa, HMEBLK_NEXTPA, hmebp				;\
2349	ba,pt	%xcc, label/**/1					;\
2350	  ldxa	[hmebp]ASI_MEM, hmeblkpa	/* hmeblk ptr pa */	;\
2351label/**/2:
2352
2353/*
2354 * Function to traverse hmeblk hash link list and find corresponding match.
2355 * The search is done using physical pointers. It returns the physical address
2356 * and virtual address pointers to the hmeblk that matches with the tag
2357 * provided.
2358 * Parameters:
2359 * hmeblktag	= register with hmeblk tag match (rid field is 0)
2360 * hatid	= register with hatid (pointer to SRD)
2361 * hmeblkpa	= register where physical ptr will be stored
2362 * hmeblkva	= register where virtual ptr will be stored
2363 * tmp1		= tmp reg
2364 * tmp2		= tmp reg
2365 * label: temporary label
2366 */
2367
2368#define	HMEHASH_SEARCH_SHME(hmeblktag, hatid, hmeblkpa, hmeblkva,	\
2369	tsbarea, tmp1, tmp2, label)			 		\
2370label/**/1:								;\
2371	brz,pn	hmeblkva, label/**/4					;\
2372	HAT_HLINK_DBSTAT(hatid, tsbarea, tmp1, tmp2)			;\
2373	add	hmeblkpa, HMEBLK_TAG, tmp2				;\
2374	ldxa	[tmp2]ASI_MEM, tmp1	 /* read 1st part of tag */	;\
2375	add	tmp2, CLONGSIZE, tmp2					;\
2376	ldxa	[tmp2]ASI_MEM, tmp2 	/* read 2nd part of tag */	;\
2377	xor	tmp1, hmeblktag, tmp1					;\
2378	xor	tmp2, hatid, tmp2					;\
2379	brz,pn	tmp2, label/**/3	/* branch on hit */		;\
2380	  add	hmeblkpa, HMEBLK_NEXT, tmp2				;\
2381label/**/2:								;\
2382	ldna	[tmp2]ASI_MEM, hmeblkva	/* hmeblk ptr va */		;\
2383	add	hmeblkpa, HMEBLK_NEXTPA, tmp2				;\
2384	ba,pt	%xcc, label/**/1					;\
2385	  ldxa	[tmp2]ASI_MEM, hmeblkpa	/* hmeblk ptr pa */		;\
2386label/**/3:								;\
2387	cmp	tmp1, SFMMU_MAX_HME_REGIONS				;\
2388	bgeu,pt	%xcc, label/**/2					;\
2389	  add	hmeblkpa, HMEBLK_NEXT, tmp2				;\
2390	and	tmp1, BT_ULMASK, tmp2					;\
2391	srlx	tmp1, BT_ULSHIFT, tmp1					;\
2392	sllx	tmp1, CLONGSHIFT, tmp1					;\
2393	add	tsbarea, tmp1, tmp1					;\
2394	ldx	[tmp1 + TSBMISS_SHMERMAP], tmp1				;\
2395	srlx	tmp1, tmp2, tmp1					;\
2396	btst	0x1, tmp1						;\
2397	bz,pn	%xcc, label/**/2					;\
2398	  add	hmeblkpa, HMEBLK_NEXT, tmp2				;\
2399label/**/4:
2400
2401#if ((1 << SFHME_SHIFT) != SFHME_SIZE)
2402#error HMEBLK_TO_HMENT assumes sf_hment is power of 2 in size
2403#endif
2404
2405/*
2406 * HMEBLK_TO_HMENT is a macro that given an hmeblk and a vaddr returns
2407 * he offset for the corresponding hment.
2408 * Parameters:
2409 * In:
2410 *	vaddr = register with virtual address
2411 *	hmeblkpa = physical pointer to hme_blk
2412 * Out:
2413 *	hmentoff = register where hment offset will be stored
2414 *	hmemisc = hblk_misc
2415 * Scratch:
2416 *	tmp1
2417 */
2418#define	HMEBLK_TO_HMENT(vaddr, hmeblkpa, hmentoff, hmemisc, tmp1, label1)\
2419	add	hmeblkpa, HMEBLK_MISC, hmentoff				;\
2420	lda	[hmentoff]ASI_MEM, hmemisc 				;\
2421	andcc	hmemisc, HBLK_SZMASK, %g0				;\
2422	bnz,a,pn  %icc, label1		/* if sz != TTE8K branch */	;\
2423	  or	%g0, HMEBLK_HME1, hmentoff				;\
2424	srl	vaddr, MMU_PAGESHIFT, tmp1				;\
2425	and	tmp1, NHMENTS - 1, tmp1		/* tmp1 = index */	;\
2426	sllx	tmp1, SFHME_SHIFT, tmp1					;\
2427	add	tmp1, HMEBLK_HME1, hmentoff				;\
2428label1:
2429
2430/*
2431 * GET_TTE is a macro that returns a TTE given a tag and hatid.
2432 *
2433 * tagacc	= (pseudo-)tag access register (in)
2434 * hatid	= sfmmu pointer for TSB miss (in)
2435 * tte		= tte for TLB miss if found, otherwise clobbered (out)
2436 * hmeblkpa	= PA of hment if found, otherwise clobbered (out)
2437 * hmeblkva	= VA of hment if found, otherwise clobbered (out)
2438 * tsbarea	= pointer to the tsbmiss area for this cpu. (in)
2439 * hmemisc	= hblk_misc if TTE is found (out), otherwise clobbered
2440 * hmeshift	= constant/register to shift VA to obtain the virtual pfn
2441 *		  for this page size.
2442 * hashno	= constant/register hash number
2443 * label	= temporary label for branching within macro.
2444 * foundlabel	= label to jump to when tte is found.
2445 * suspendlabel= label to jump to when tte is suspended.
2446 * exitlabel	= label to jump to when tte is not found.
2447 *
2448 */
2449#define GET_TTE(tagacc, hatid, tte, hmeblkpa, hmeblkva, tsbarea, hmemisc, \
2450		hmeshift, hashno, label, foundlabel, suspendlabel, exitlabel) \
2451									;\
2452	stn	tagacc, [tsbarea + (TSBMISS_SCRATCH + TSB_TAGACC)]	;\
2453	stn	hatid, [tsbarea + (TSBMISS_SCRATCH + TSBMISS_HATID)]	;\
2454	HMEHASH_FUNC_ASM(tagacc, hatid, tsbarea, hmeshift, tte,		\
2455		hmeblkpa, label/**/5, hmemisc, hmeblkva)		;\
2456									;\
2457	/*								;\
2458	 * tagacc = tagacc						;\
2459	 * hatid = hatid						;\
2460	 * tsbarea = tsbarea						;\
2461	 * tte   = hmebp (hme bucket pointer)				;\
2462	 * hmeblkpa  = vapg  (virtual page)				;\
2463	 * hmemisc, hmeblkva = scratch					;\
2464	 */								;\
2465	MAKE_HASHTAG(hmeblkpa, hatid, hmeshift, hashno, hmemisc)	;\
2466	or	hmemisc, SFMMU_INVALID_SHMERID, hmemisc			;\
2467									;\
2468	/*								;\
2469	 * tagacc = tagacc						;\
2470	 * hatid = hatid						;\
2471	 * tte   = hmebp						;\
2472	 * hmeblkpa  = CLOBBERED					;\
2473	 * hmemisc  = htag_bspage+hashno+invalid_rid			;\
2474	 * hmeblkva  = scratch						;\
2475	 */								;\
2476	stn	tte, [tsbarea + (TSBMISS_SCRATCH + TSBMISS_HMEBP)]	;\
2477	HMELOCK_ENTER(tte, hmeblkpa, hmeblkva, label/**/3, ASI_MEM)	;\
2478	HMEHASH_SEARCH(tte, hmemisc, hatid, hmeblkpa, hmeblkva, 	\
2479		tsbarea, tagacc, label/**/1)				;\
2480	/*								;\
2481	 * tagacc = CLOBBERED						;\
2482	 * tte = CLOBBERED						;\
2483	 * hmeblkpa = hmeblkpa						;\
2484	 * hmeblkva = hmeblkva						;\
2485	 */								;\
2486	brnz,pt	hmeblkva, label/**/4	/* branch if hmeblk found */	;\
2487	  ldn	[tsbarea + (TSBMISS_SCRATCH + TSB_TAGACC)], tagacc	;\
2488	ldn	[tsbarea + (TSBMISS_SCRATCH + TSBMISS_HMEBP)], hmeblkva	;\
2489	HMELOCK_EXIT(hmeblkva, hmeblkva, ASI_MEM)  /* drop lock */	;\
2490	ba,pt	%xcc, exitlabel		/* exit if hblk not found */	;\
2491	  nop								;\
2492label/**/4:								;\
2493	/*								;\
2494	 * We have found the hmeblk containing the hment.		;\
2495	 * Now we calculate the corresponding tte.			;\
2496	 *								;\
2497	 * tagacc = tagacc						;\
2498	 * hatid = hatid						;\
2499	 * tte   = clobbered						;\
2500	 * hmeblkpa  = hmeblkpa						;\
2501	 * hmemisc  = hblktag						;\
2502	 * hmeblkva  = hmeblkva 					;\
2503	 */								;\
2504	HMEBLK_TO_HMENT(tagacc, hmeblkpa, hatid, hmemisc, tte,		\
2505		label/**/2)						;\
2506									;\
2507	/*								;\
2508	 * tagacc = tagacc						;\
2509	 * hatid = hmentoff						;\
2510	 * tte   = clobbered						;\
2511	 * hmeblkpa  = hmeblkpa						;\
2512	 * hmemisc  = hblk_misc						;\
2513	 * hmeblkva  = hmeblkva 					;\
2514	 */								;\
2515									;\
2516	add	hatid, SFHME_TTE, hatid					;\
2517	add	hmeblkpa, hatid, hmeblkpa				;\
2518	ldxa	[hmeblkpa]ASI_MEM, tte	/* MMU_READTTE through pa */	;\
2519	add	hmeblkva, hatid, hmeblkva				;\
2520	ldn	[tsbarea + (TSBMISS_SCRATCH + TSBMISS_HMEBP)], hatid 	;\
2521	HMELOCK_EXIT(hatid, hatid, ASI_MEM)	/* drop lock */		;\
2522	set	TTE_SUSPEND, hatid					;\
2523	TTE_SUSPEND_INT_SHIFT(hatid)					;\
2524	btst	tte, hatid						;\
2525	bz,pt	%xcc, foundlabel					;\
2526	ldn	[tsbarea + (TSBMISS_SCRATCH + TSBMISS_HATID)], hatid	;\
2527									;\
2528	/*								;\
2529	 * Mapping is suspended, so goto suspend label.			;\
2530	 */								;\
2531	ba,pt	%xcc, suspendlabel					;\
2532	  nop
2533
2534/*
2535 * GET_SHME_TTE is similar to GET_TTE() except it searches
2536 * shared hmeblks via HMEHASH_SEARCH_SHME() macro.
2537 * If valid tte is found, hmemisc = shctx flag, i.e., shme is
2538 * either 0 (not part of scd) or 1 (part of scd).
2539 */
2540#define GET_SHME_TTE(tagacc, hatid, tte, hmeblkpa, hmeblkva, tsbarea,	\
2541		hmemisc, hmeshift, hashno, label, foundlabel,		\
2542		suspendlabel, exitlabel)				\
2543									;\
2544	stn	tagacc, [tsbarea + (TSBMISS_SCRATCH + TSB_TAGACC)]	;\
2545	stn	hatid, [tsbarea + (TSBMISS_SCRATCH + TSBMISS_HATID)]	;\
2546	HMEHASH_FUNC_ASM(tagacc, hatid, tsbarea, hmeshift, tte,		\
2547		hmeblkpa, label/**/5, hmemisc, hmeblkva)		;\
2548									;\
2549	/*								;\
2550	 * tagacc = tagacc						;\
2551	 * hatid = hatid						;\
2552	 * tsbarea = tsbarea						;\
2553	 * tte   = hmebp (hme bucket pointer)				;\
2554	 * hmeblkpa  = vapg  (virtual page)				;\
2555	 * hmemisc, hmeblkva = scratch					;\
2556	 */								;\
2557	MAKE_HASHTAG(hmeblkpa, hatid, hmeshift, hashno, hmemisc)	;\
2558									;\
2559	/*								;\
2560	 * tagacc = tagacc						;\
2561	 * hatid = hatid						;\
2562	 * tsbarea = tsbarea						;\
2563	 * tte   = hmebp						;\
2564	 * hmemisc  = htag_bspage + hashno + 0 (for rid)		;\
2565	 * hmeblkpa  = CLOBBERED					;\
2566	 * hmeblkva  = scratch						;\
2567	 */								;\
2568	stn	tte, [tsbarea + (TSBMISS_SCRATCH + TSBMISS_HMEBP)]	;\
2569	HMELOCK_ENTER(tte, hmeblkpa, hmeblkva, label/**/3, ASI_MEM)	;\
2570									;\
2571	add     tte, HMEBUCK_NEXTPA, hmeblkpa				;\
2572	ldxa    [hmeblkpa]ASI_MEM, hmeblkpa				;\
2573	add     tte, HMEBUCK_HBLK, hmeblkva				;\
2574	ldxa    [hmeblkva]ASI_MEM, hmeblkva				;\
2575	HAT_HSEARCH_DBSTAT(hatid, tsbarea, tagacc, tte)			;\
2576									;\
2577label/**/8:								;\
2578	HMEHASH_SEARCH_SHME(hmemisc, hatid, hmeblkpa, hmeblkva, 	\
2579		tsbarea, tagacc, tte, label/**/1)			;\
2580	/*								;\
2581	 * tagacc = CLOBBERED						;\
2582	 * tte = CLOBBERED						;\
2583	 * hmeblkpa = hmeblkpa						;\
2584	 * hmeblkva = hmeblkva						;\
2585	 */								;\
2586	brnz,pt	hmeblkva, label/**/4	/* branch if hmeblk found */	;\
2587	  ldn	[tsbarea + (TSBMISS_SCRATCH + TSB_TAGACC)], tagacc	;\
2588	ldn	[tsbarea + (TSBMISS_SCRATCH + TSBMISS_HMEBP)], hmeblkva	;\
2589	HMELOCK_EXIT(hmeblkva, hmeblkva, ASI_MEM)  /* drop lock */	;\
2590	ba,pt	%xcc, exitlabel		/* exit if hblk not found */	;\
2591	  nop								;\
2592label/**/4:								;\
2593	/*								;\
2594	 * We have found the hmeblk containing the hment.		;\
2595	 * Now we calculate the corresponding tte.			;\
2596	 *								;\
2597	 * tagacc = tagacc						;\
2598	 * hatid = hatid						;\
2599	 * tte   = clobbered						;\
2600	 * hmeblkpa  = hmeblkpa						;\
2601	 * hmemisc  = hblktag						;\
2602	 * hmeblkva  = hmeblkva 					;\
2603	 * tsbarea = tsbmiss area					;\
2604	 */								;\
2605	HMEBLK_TO_HMENT(tagacc, hmeblkpa, hatid, hmemisc, tte,		\
2606		label/**/2)						;\
2607									;\
2608	/*								;\
2609	 * tagacc = tagacc						;\
2610	 * hatid = hmentoff						;\
2611	 * tte = clobbered						;\
2612	 * hmeblkpa  = hmeblkpa						;\
2613	 * hmemisc  = hblk_misc						;\
2614	 * hmeblkva  = hmeblkva						;\
2615	 * tsbarea = tsbmiss area					;\
2616	 */								;\
2617									;\
2618	add	hatid, SFHME_TTE, hatid					;\
2619	add	hmeblkpa, hatid, hmeblkpa				;\
2620	ldxa	[hmeblkpa]ASI_MEM, tte	/* MMU_READTTE through pa */	;\
2621	brlz,pt tte, label/**/6						;\
2622	  add	hmeblkva, hatid, hmeblkva				;\
2623	btst	HBLK_SZMASK, hmemisc					;\
2624	bnz,a,pt %icc, label/**/7					;\
2625	  ldn	[tsbarea + (TSBMISS_SCRATCH + TSBMISS_HMEBP)], hatid 	;\
2626									;\
2627	/*								;\
2628 	 * We found an invalid 8K tte in shme.				;\
2629	 * it may not belong to shme's region since			;\
2630	 * region size/alignment granularity is 8K but different	;\
2631	 * regions don't share hmeblks. Continue the search.		;\
2632	 */								;\
2633	sub	hmeblkpa, hatid, hmeblkpa				;\
2634	ldn	[tsbarea + (TSBMISS_SCRATCH + TSBMISS_HATID)], hatid	;\
2635	srlx	tagacc, hmeshift, tte					;\
2636	add	hmeblkpa, HMEBLK_NEXT, hmeblkva				;\
2637	ldxa	[hmeblkva]ASI_MEM, hmeblkva				;\
2638	add	hmeblkpa, HMEBLK_NEXTPA, hmeblkpa			;\
2639	ldxa	[hmeblkpa]ASI_MEM, hmeblkpa				;\
2640	MAKE_HASHTAG(tte, hatid, hmeshift, hashno, hmemisc)		;\
2641	ba,a,pt	%xcc, label/**/8					;\
2642label/**/6:								;\
2643	GET_SCDSHMERMAP(tsbarea, hmeblkpa, hatid, hmemisc)		;\
2644	ldn	[tsbarea + (TSBMISS_SCRATCH + TSBMISS_HMEBP)], hatid 	;\
2645label/**/7:								;\
2646	HMELOCK_EXIT(hatid, hatid, ASI_MEM)	/* drop lock */		;\
2647	set	TTE_SUSPEND, hatid					;\
2648	TTE_SUSPEND_INT_SHIFT(hatid)					;\
2649	btst	tte, hatid						;\
2650	bz,pt	%xcc, foundlabel					;\
2651	ldn	[tsbarea + (TSBMISS_SCRATCH + TSBMISS_HATID)], hatid	;\
2652									;\
2653	/*								;\
2654	 * Mapping is suspended, so goto suspend label.			;\
2655	 */								;\
2656	ba,pt	%xcc, suspendlabel					;\
2657	  nop
2658
2659	/*
2660	 * KERNEL PROTECTION HANDLER
2661	 *
2662	 * g1 = tsb8k pointer register (clobbered)
2663	 * g2 = tag access register (ro)
2664	 * g3 - g7 = scratch registers
2665	 *
2666	 * Note: This function is patched at runtime for performance reasons.
2667	 * 	 Any changes here require sfmmu_patch_ktsb fixed.
2668	 */
2669	ENTRY_NP(sfmmu_kprot_trap)
2670	mov	%g2, %g7		! TSB pointer macro clobbers tagacc
2671sfmmu_kprot_patch_ktsb_base:
2672	RUNTIME_PATCH_SETX(%g1, %g6)
2673	/* %g1 = contents of ktsb_base or ktsb_pbase */
2674sfmmu_kprot_patch_ktsb_szcode:
2675	or	%g0, RUNTIME_PATCH, %g3	! ktsb_szcode (hot patched)
2676
2677	GET_TSBE_POINTER(MMU_PAGESHIFT, %g1, %g7, %g3, %g5)
2678	! %g1 = First TSB entry pointer, as TSB miss handler expects
2679
2680	mov	%g2, %g7		! TSB pointer macro clobbers tagacc
2681sfmmu_kprot_patch_ktsb4m_base:
2682	RUNTIME_PATCH_SETX(%g3, %g6)
2683	/* %g3 = contents of ktsb4m_base or ktsb4m_pbase */
2684sfmmu_kprot_patch_ktsb4m_szcode:
2685	or	%g0, RUNTIME_PATCH, %g6	! ktsb4m_szcode (hot patched)
2686
2687	GET_TSBE_POINTER(MMU_PAGESHIFT4M, %g3, %g7, %g6, %g5)
2688	! %g3 = 4M tsb entry pointer, as TSB miss handler expects
2689
2690        CPU_TSBMISS_AREA(%g6, %g7)
2691        HAT_PERCPU_STAT16(%g6, TSBMISS_KPROTS, %g7)
2692	ba,pt	%xcc, sfmmu_tsb_miss_tt
2693	  nop
2694
2695	/*
2696	 * USER PROTECTION HANDLER
2697	 *
2698	 * g1 = tsb8k pointer register (ro)
2699	 * g2 = tag access register (ro)
2700	 * g3 = faulting context (clobbered, currently not used)
2701	 * g4 - g7 = scratch registers
2702	 */
2703	ALTENTRY(sfmmu_uprot_trap)
2704#ifdef sun4v
2705	GET_1ST_TSBE_PTR(%g2, %g1, %g4, %g5)
2706	/* %g1 = first TSB entry ptr now, %g2 preserved */
2707
2708	GET_UTSBREG(SCRATCHPAD_UTSBREG2, %g3)	/* get 2nd utsbreg */
2709	brlz,pt %g3, 9f				/* check for 2nd TSB */
2710	  nop
2711
2712	GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5)
2713	/* %g3 = second TSB entry ptr now, %g2 preserved */
2714
2715#else /* sun4v */
2716#ifdef UTSB_PHYS
2717	/* g1 = first TSB entry ptr */
2718	GET_2ND_TSBREG(%g3)
2719	brlz,pt %g3, 9f			/* check for 2nd TSB */
2720	  nop
2721
2722	GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5)
2723	/* %g3 = second TSB entry ptr now, %g2 preserved */
2724#else /* UTSB_PHYS */
2725	brgez,pt %g1, 9f		/* check for 2nd TSB */
2726	  mov	-1, %g3			/* set second tsbe ptr to -1 */
2727
2728	mov	%g2, %g7
2729	GET_2ND_TSBE_PTR(%g7, %g1, %g3, %g4, %g5, sfmmu_uprot)
2730	/* %g3 = second TSB entry ptr now, %g7 clobbered */
2731	mov	%g1, %g7
2732	GET_1ST_TSBE_PTR(%g7, %g1, %g5, sfmmu_uprot)
2733#endif /* UTSB_PHYS */
2734#endif /* sun4v */
27359:
2736	CPU_TSBMISS_AREA(%g6, %g7)
2737	HAT_PERCPU_STAT16(%g6, TSBMISS_UPROTS, %g7)
2738	ba,pt	%xcc, sfmmu_tsb_miss_tt		/* branch TSB miss handler */
2739	  nop
2740
2741	/*
2742	 * Kernel 8K page iTLB miss.  We also get here if we took a
2743	 * fast instruction access mmu miss trap while running in
2744	 * invalid context.
2745	 *
2746	 * %g1 = 8K TSB pointer register (not used, clobbered)
2747	 * %g2 = tag access register (used)
2748	 * %g3 = faulting context id (used)
2749	 * %g7 = TSB tag to match (used)
2750	 */
2751	.align	64
2752	ALTENTRY(sfmmu_kitlb_miss)
2753	brnz,pn %g3, tsb_tl0_noctxt
2754	  nop
2755
2756	/* kernel miss */
2757	/* get kernel tsb pointer */
2758	/* we patch the next set of instructions at run time */
2759	/* NOTE: any changes here require sfmmu_patch_ktsb fixed */
2760iktsbbase:
2761	RUNTIME_PATCH_SETX(%g4, %g5)
2762	/* %g4 = contents of ktsb_base or ktsb_pbase */
2763
2764iktsb:	sllx	%g2, 64-(TAGACC_SHIFT + TSB_START_SIZE + RUNTIME_PATCH), %g1
2765	srlx	%g1, 64-(TSB_START_SIZE + TSB_ENTRY_SHIFT + RUNTIME_PATCH), %g1
2766	or	%g4, %g1, %g1			! form tsb ptr
2767	ldda	[%g1]RUNTIME_PATCH, %g4		! %g4 = tag, %g5 = data
2768	cmp	%g4, %g7
2769	bne,pn	%xcc, iktsb4mbase		! check 4m ktsb
2770	  srlx    %g2, MMU_PAGESHIFT4M, %g3	! use 4m virt-page as TSB index
2771
2772	andcc %g5, TTE_EXECPRM_INT, %g0		! check exec bit
2773	bz,pn	%icc, exec_fault
2774	  nop
2775	TT_TRACE(trace_tsbhit)			! 2 instr traptrace
2776	ITLB_STUFF(%g5, %g1, %g2, %g3, %g4)
2777	retry
2778
2779iktsb4mbase:
2780        RUNTIME_PATCH_SETX(%g4, %g6)
2781        /* %g4 = contents of ktsb4m_base or ktsb4m_pbase */
2782iktsb4m:
2783	sllx    %g3, 64-(TSB_START_SIZE + RUNTIME_PATCH), %g3
2784        srlx    %g3, 64-(TSB_START_SIZE + TSB_ENTRY_SHIFT + RUNTIME_PATCH), %g3
2785	add	%g4, %g3, %g3			! %g3 = 4m tsbe ptr
2786	ldda	[%g3]RUNTIME_PATCH, %g4		! %g4 = tag, %g5 = data
2787	cmp	%g4, %g7
2788	bne,pn	%xcc, sfmmu_tsb_miss_tt		! branch on miss
2789	  andcc %g5, TTE_EXECPRM_INT, %g0		! check exec bit
2790	bz,pn	%icc, exec_fault
2791	  nop
2792	TT_TRACE(trace_tsbhit)			! 2 instr traptrace
2793	ITLB_STUFF(%g5, %g1, %g2, %g3, %g4)
2794	retry
2795
2796	/*
2797	 * Kernel dTLB miss.  We also get here if we took a fast data
2798	 * access mmu miss trap while running in invalid context.
2799	 *
2800	 * Note: for now we store kpm TTEs in the kernel TSB as usual.
2801	 *	We select the TSB miss handler to branch to depending on
2802	 *	the virtual address of the access.  In the future it may
2803	 *	be desirable to separate kpm TTEs into their own TSB,
2804	 *	in which case all that needs to be done is to set
2805	 *	kpm_tsbbase/kpm_tsbsz to point to the new TSB and branch
2806	 *	early in the miss if we detect a kpm VA to a new handler.
2807	 *
2808	 * %g1 = 8K TSB pointer register (not used, clobbered)
2809	 * %g2 = tag access register (used)
2810	 * %g3 = faulting context id (used)
2811	 */
2812	.align	64
2813	ALTENTRY(sfmmu_kdtlb_miss)
2814	brnz,pn	%g3, tsb_tl0_noctxt		/* invalid context? */
2815	  nop
2816
2817	/* Gather some stats for kpm misses in the TLB. */
2818	/* KPM_TLBMISS_STAT_INCR(tagacc, val, tsbma, tmp1, label) */
2819	KPM_TLBMISS_STAT_INCR(%g2, %g4, %g5, %g6, kpmtlbm_stat_out)
2820
2821	/*
2822	 * Get first TSB offset and look for 8K/64K/512K mapping
2823	 * using the 8K virtual page as the index.
2824	 *
2825	 * We patch the next set of instructions at run time;
2826	 * any changes here require sfmmu_patch_ktsb changes too.
2827	 */
2828dktsbbase:
2829	RUNTIME_PATCH_SETX(%g7, %g6)
2830	/* %g7 = contents of ktsb_base or ktsb_pbase */
2831
2832dktsb:	sllx	%g2, 64-(TAGACC_SHIFT + TSB_START_SIZE + RUNTIME_PATCH), %g1
2833	srlx	%g1, 64-(TSB_START_SIZE + TSB_ENTRY_SHIFT + RUNTIME_PATCH), %g1
2834
2835	/*
2836	 * At this point %g1 is our index into the TSB.
2837	 * We just masked off enough bits of the VA depending
2838	 * on our TSB size code.
2839	 */
2840	ldda	[%g7 + %g1]RUNTIME_PATCH, %g4	! %g4 = tag, %g5 = data
2841	srlx	%g2, TAG_VALO_SHIFT, %g6	! make tag to compare
2842	cmp	%g6, %g4			! compare tag
2843	bne,pn	%xcc, dktsb4m_kpmcheck_small
2844	  add	%g7, %g1, %g1			/* form tsb ptr */
2845	TT_TRACE(trace_tsbhit)
2846	DTLB_STUFF(%g5, %g1, %g2, %g3, %g4)
2847	/* trapstat expects tte in %g5 */
2848	retry
2849
2850	/*
2851	 * If kpm is using large pages, the following instruction needs
2852	 * to be patched to a nop at boot time (by sfmmu_kpm_patch_tsbm)
2853	 * so that we will probe the 4M TSB regardless of the VA.  In
2854	 * the case kpm is using small pages, we know no large kernel
2855	 * mappings are located above 0x80000000.00000000 so we skip the
2856	 * probe as an optimization.
2857	 */
2858dktsb4m_kpmcheck_small:
2859	brlz,pn %g2, sfmmu_kpm_dtsb_miss_small
2860	  /* delay slot safe, below */
2861
2862	/*
2863	 * Get second TSB offset and look for 4M mapping
2864	 * using 4M virtual page as the TSB index.
2865	 *
2866	 * Here:
2867	 * %g1 = 8K TSB pointer.  Don't squash it.
2868	 * %g2 = tag access register (we still need it)
2869	 */
2870	srlx	%g2, MMU_PAGESHIFT4M, %g3
2871
2872	/*
2873	 * We patch the next set of instructions at run time;
2874	 * any changes here require sfmmu_patch_ktsb changes too.
2875	 */
2876dktsb4mbase:
2877	RUNTIME_PATCH_SETX(%g7, %g6)
2878	/* %g7 = contents of ktsb4m_base or ktsb4m_pbase */
2879dktsb4m:
2880	sllx	%g3, 64-(TSB_START_SIZE + RUNTIME_PATCH), %g3
2881	srlx	%g3, 64-(TSB_START_SIZE + TSB_ENTRY_SHIFT + RUNTIME_PATCH), %g3
2882
2883	/*
2884	 * At this point %g3 is our index into the TSB.
2885	 * We just masked off enough bits of the VA depending
2886	 * on our TSB size code.
2887	 */
2888	ldda	[%g7 + %g3]RUNTIME_PATCH, %g4	! %g4 = tag, %g5 = data
2889	srlx	%g2, TAG_VALO_SHIFT, %g6	! make tag to compare
2890	cmp	%g6, %g4			! compare tag
2891
2892dktsb4m_tsbmiss:
2893	bne,pn	%xcc, dktsb4m_kpmcheck
2894	  add	%g7, %g3, %g3			! %g3 = kernel second TSB ptr
2895	TT_TRACE(trace_tsbhit)
2896	/* we don't check TTE size here since we assume 4M TSB is separate */
2897	DTLB_STUFF(%g5, %g1, %g2, %g3, %g4)
2898	/* trapstat expects tte in %g5 */
2899	retry
2900
2901	/*
2902	 * So, we failed to find a valid TTE to match the faulting
2903	 * address in either TSB.  There are a few cases that could land
2904	 * us here:
2905	 *
2906	 * 1) This is a kernel VA below 0x80000000.00000000.  We branch
2907	 *    to sfmmu_tsb_miss_tt to handle the miss.
2908	 * 2) We missed on a kpm VA, and we didn't find the mapping in the
2909	 *    4M TSB.  Let segkpm handle it.
2910	 *
2911	 * Note that we shouldn't land here in the case of a kpm VA when
2912	 * kpm_smallpages is active -- we handled that case earlier at
2913	 * dktsb4m_kpmcheck_small.
2914	 *
2915	 * At this point:
2916	 *  g1 = 8K-indexed primary TSB pointer
2917	 *  g2 = tag access register
2918	 *  g3 = 4M-indexed secondary TSB pointer
2919	 */
2920dktsb4m_kpmcheck:
2921	cmp	%g2, %g0
2922	bl,pn	%xcc, sfmmu_kpm_dtsb_miss
2923	  nop
2924	ba,a,pt	%icc, sfmmu_tsb_miss_tt
2925	  nop
2926
2927#ifdef sun4v
2928	/*
2929	 * User instruction miss w/ single TSB.
2930	 * The first probe covers 8K, 64K, and 512K page sizes,
2931	 * because 64K and 512K mappings are replicated off 8K
2932	 * pointer.
2933	 *
2934	 * g1 = tsb8k pointer register
2935	 * g2 = tag access register
2936	 * g3 - g6 = scratch registers
2937	 * g7 = TSB tag to match
2938	 */
2939	.align	64
2940	ALTENTRY(sfmmu_uitlb_fastpath)
2941
2942	PROBE_1ST_ITSB(%g1, %g7, uitlb_fast_8k_probefail)
2943	/* g4 - g5 = clobbered by PROBE_1ST_ITSB */
2944	ba,pn	%xcc, sfmmu_tsb_miss_tt
2945	  mov	-1, %g3
2946
2947	/*
2948	 * User data miss w/ single TSB.
2949	 * The first probe covers 8K, 64K, and 512K page sizes,
2950	 * because 64K and 512K mappings are replicated off 8K
2951	 * pointer.
2952	 *
2953	 * g1 = tsb8k pointer register
2954	 * g2 = tag access register
2955	 * g3 - g6 = scratch registers
2956	 * g7 = TSB tag to match
2957	 */
2958	.align 64
2959	ALTENTRY(sfmmu_udtlb_fastpath)
2960
2961	PROBE_1ST_DTSB(%g1, %g7, udtlb_fast_8k_probefail)
2962	/* g4 - g5 = clobbered by PROBE_1ST_DTSB */
2963	ba,pn	%xcc, sfmmu_tsb_miss_tt
2964	  mov	-1, %g3
2965
2966	/*
2967	 * User instruction miss w/ multiple TSBs (sun4v).
2968	 * The first probe covers 8K, 64K, and 512K page sizes,
2969	 * because 64K and 512K mappings are replicated off 8K
2970	 * pointer.  Second probe covers 4M page size only.
2971	 *
2972	 * Just like sfmmu_udtlb_slowpath, except:
2973	 *   o Uses ASI_ITLB_IN
2974	 *   o checks for execute permission
2975	 *   o No ISM prediction.
2976	 *
2977	 * g1 = tsb8k pointer register
2978	 * g2 = tag access register
2979	 * g3 - g6 = scratch registers
2980	 * g7 = TSB tag to match
2981	 */
2982	.align	64
2983	ALTENTRY(sfmmu_uitlb_slowpath)
2984
2985	GET_1ST_TSBE_PTR(%g2, %g1, %g4, %g5)
2986	PROBE_1ST_ITSB(%g1, %g7, uitlb_8k_probefail)
2987	/* g4 - g5 = clobbered here */
2988
2989	GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5)
2990	/* g1 = first TSB pointer, g3 = second TSB pointer */
2991	srlx	%g2, TAG_VALO_SHIFT, %g7
2992	PROBE_2ND_ITSB(%g3, %g7)
2993	/* NOT REACHED */
2994
2995#else /* sun4v */
2996
2997	/*
2998	 * User instruction miss w/ multiple TSBs (sun4u).
2999	 * The first probe covers 8K, 64K, and 512K page sizes,
3000	 * because 64K and 512K mappings are replicated off 8K
3001	 * pointer.  Second probe covers 4M page size only.
3002	 *
3003	 * Just like sfmmu_udtlb_slowpath, except:
3004	 *   o Uses ASI_ITLB_IN
3005	 *   o checks for execute permission
3006	 *   o No ISM prediction.
3007	 *
3008	 * g1 = tsb8k pointer register
3009	 * g2 = tag access register
3010	 * g3 = 2nd tsbreg if defined UTSB_PHYS, else scratch
3011	 * g4 - g6 = scratch registers
3012	 * g7 = TSB tag to match
3013	 */
3014	.align	64
3015	ALTENTRY(sfmmu_uitlb_slowpath)
3016
3017#ifdef UTSB_PHYS
3018	/*
3019	 * g1 = 1st TSB entry pointer
3020	 * g3 = 2nd TSB base register
3021	 * Need 2nd TSB entry pointer for 2nd probe.
3022	 */
3023	PROBE_1ST_ITSB(%g1, %g7, uitlb_8k_probefail)
3024
3025	GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5)
3026#else /* UTSB_PHYS */
3027	mov	%g1, %g3	/* save tsb8k reg in %g3 */
3028	GET_1ST_TSBE_PTR(%g3, %g1, %g5, sfmmu_uitlb)
3029	PROBE_1ST_ITSB(%g1, %g7, uitlb_8k_probefail)
3030
3031	mov	%g2, %g6	/* GET_2ND_TSBE_PTR clobbers tagacc */
3032	mov	%g3, %g7	/* copy tsb8k reg in %g7 */
3033	GET_2ND_TSBE_PTR(%g6, %g7, %g3, %g4, %g5, sfmmu_uitlb)
3034#endif /* UTSB_PHYS */
3035	/* g1 = first TSB pointer, g3 = second TSB pointer */
3036	srlx	%g2, TAG_VALO_SHIFT, %g7
3037	PROBE_2ND_ITSB(%g3, %g7, isynth)
3038	/* NOT REACHED */
3039#endif /* sun4v */
3040
3041	/*
3042	 * User data miss w/ multiple TSBs.
3043	 * The first probe covers 8K, 64K, and 512K page sizes,
3044	 * because 64K and 512K mappings are replicated off 8K
3045	 * pointer.  Second probe covers 4M page size only.
3046	 *
3047	 * We consider probing for 4M pages first if the VA falls
3048	 * in a range that's likely to be ISM.
3049	 *
3050	 * g1 = tsb8k pointer register
3051	 * g2 = tag access register
3052	 * g3 = 2nd tsbreg if defined UTSB_PHYS, else scratch
3053	 * g4 - g6 = scratch registers
3054	 * g7 = TSB tag to match
3055	 */
3056	.align 64
3057	ALTENTRY(sfmmu_udtlb_slowpath)
3058
3059	/*
3060	 * Check for ISM.  If it exists, look for 4M mappings in the second TSB
3061	 * first, then probe for other mappings in the first TSB if that fails.
3062	 */
3063	srax	%g2, PREDISM_BASESHIFT, %g6	/* g6 > 0 : ISM predicted */
3064	brgz,pn %g6, udtlb_miss_probesecond	/* check for ISM */
3065	  mov	%g1, %g3
3066
3067udtlb_miss_probefirst:
3068	/*
3069	 * g1 = 8K TSB pointer register
3070	 * g2 = tag access register
3071	 * g3 = (potentially) second TSB entry ptr
3072	 * g6 = ism pred.
3073	 * g7 = vpg_4m
3074	 */
3075#ifdef sun4v
3076	GET_1ST_TSBE_PTR(%g2, %g1, %g4, %g5)
3077	PROBE_1ST_DTSB(%g1, %g7, udtlb_first_probefail)
3078
3079	/*
3080	 * Here:
3081	 *   g1 = first TSB pointer
3082	 *   g2 = tag access reg
3083	 *   g3 = second TSB ptr IFF ISM pred. (else don't care)
3084	 */
3085	brgz,pn	%g6, sfmmu_tsb_miss_tt
3086	  nop
3087#else /* sun4v */
3088#ifndef UTSB_PHYS
3089	mov	%g1, %g4
3090	GET_1ST_TSBE_PTR(%g4, %g1, %g5, sfmmu_udtlb)
3091#endif UTSB_PHYS
3092	PROBE_1ST_DTSB(%g1, %g7, udtlb_first_probefail)
3093
3094	/*
3095	 * Here:
3096	 *   g1 = first TSB pointer
3097	 *   g2 = tag access reg
3098	 *   g3 = second TSB ptr IFF ISM pred. (else don't care)
3099	 */
3100	brgz,pn	%g6, sfmmu_tsb_miss_tt
3101	  nop
3102#ifndef UTSB_PHYS
3103	ldxa	[%g0]ASI_DMMU_TSB_8K, %g3
3104#endif UTSB_PHYS
3105	/* fall through in 8K->4M probe order */
3106#endif /* sun4v */
3107
3108udtlb_miss_probesecond:
3109	/*
3110	 * Look in the second TSB for the TTE
3111	 * g1 = First TSB entry ptr if !ISM pred, TSB8K ptr reg if ISM pred.
3112	 * g2 = tag access reg
3113	 * g3 = 8K TSB pointer register
3114	 * g6 = ism pred.
3115	 * g7 = vpg_4m
3116	 */
3117#ifdef sun4v
3118	/* GET_2ND_TSBE_PTR(tagacc, tsbe_ptr, tmp1, tmp2) */
3119	GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5)
3120	/* %g2 is okay, no need to reload, %g3 = second tsbe ptr */
3121#else /* sun4v */
3122#ifdef UTSB_PHYS
3123	GET_2ND_TSBREG(%g3)
3124	GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5)
3125	/* tagacc (%g2) is okay, no need to reload, %g3 = second tsbe ptr */
3126#else /* UTSB_PHYS */
3127	mov	%g3, %g7
3128	GET_2ND_TSBE_PTR(%g2, %g7, %g3, %g4, %g5, sfmmu_udtlb)
3129	/* %g2 clobbered, %g3 =second tsbe ptr */
3130	mov	MMU_TAG_ACCESS, %g2
3131	ldxa	[%g2]ASI_DMMU, %g2
3132#endif /* UTSB_PHYS */
3133#endif /* sun4v */
3134
3135	srlx	%g2, TAG_VALO_SHIFT, %g7
3136	PROBE_2ND_DTSB(%g3, %g7, udtlb_4m_probefail)
3137	/* g4 - g5 = clobbered here; %g7 still vpg_4m at this point */
3138	brgz,pn	%g6, udtlb_miss_probefirst
3139	  nop
3140
3141	/* fall through to sfmmu_tsb_miss_tt */
3142
3143	ALTENTRY(sfmmu_tsb_miss_tt)
3144	TT_TRACE(trace_tsbmiss)
3145	/*
3146	 * We get here if there is a TSB miss OR a write protect trap.
3147	 *
3148	 * g1 = First TSB entry pointer
3149	 * g2 = tag access register
3150	 * g3 = 4M TSB entry pointer; -1 if no 2nd TSB
3151	 * g4 - g7 = scratch registers
3152	 */
3153
3154	ALTENTRY(sfmmu_tsb_miss)
3155
3156	/*
3157	 * If trapstat is running, we need to shift the %tpc and %tnpc to
3158	 * point to trapstat's TSB miss return code (note that trapstat
3159	 * itself will patch the correct offset to add).
3160	 */
3161	rdpr	%tl, %g7
3162	cmp	%g7, 1
3163	ble,pt	%xcc, 0f
3164	  sethi	%hi(KERNELBASE), %g6
3165	rdpr	%tpc, %g7
3166	or	%g6, %lo(KERNELBASE), %g6
3167	cmp	%g7, %g6
3168	bgeu,pt	%xcc, 0f
3169	/* delay slot safe */
3170
3171	ALTENTRY(tsbmiss_trapstat_patch_point)
3172	add	%g7, RUNTIME_PATCH, %g7	/* must match TSTAT_TSBMISS_INSTR */
3173	wrpr	%g7, %tpc
3174	add	%g7, 4, %g7
3175	wrpr	%g7, %tnpc
31760:
3177	CPU_TSBMISS_AREA(%g6, %g7)
3178
3179	stn	%g1, [%g6 + TSBMISS_TSBPTR]	/* save first tsb pointer */
3180	stn	%g3, [%g6 + TSBMISS_TSBPTR4M]	/* save second tsb pointer */
3181
3182	sllx	%g2, TAGACC_CTX_LSHIFT, %g3
3183	brz,a,pn %g3, 1f			/* skip ahead if kernel */
3184	  ldn	[%g6 + TSBMISS_KHATID], %g7
3185	srlx	%g3, TAGACC_CTX_LSHIFT, %g3	/* g3 = ctxnum */
3186	ldn	[%g6 + TSBMISS_UHATID], %g7     /* g7 = hatid */
3187
3188	HAT_PERCPU_STAT32(%g6, TSBMISS_UTSBMISS, %g5)
3189
3190	cmp	%g3, INVALID_CONTEXT
3191	be,pn	%icc, tsb_tl0_noctxt		/* no ctx miss exception */
3192	  stn	%g7, [%g6 + (TSBMISS_SCRATCH + TSBMISS_HATID)]
3193
3194#ifdef sun4v
3195        ldub    [%g6 + TSBMISS_URTTEFLAGS], %g7	/* clear ctx1 flag set from */
3196        andn    %g7, HAT_CHKCTX1_FLAG, %g7	/* the previous tsb miss    */
3197        stub    %g7, [%g6 + TSBMISS_URTTEFLAGS]
3198#endif
3199
3200	ISM_CHECK(%g2, %g6, %g3, %g4, %g5, %g7, %g1, tsb_l1, tsb_ism)
3201	/*
3202	 * The miss wasn't in an ISM segment.
3203	 *
3204	 * %g1 %g3, %g4, %g5, %g7 all clobbered
3205	 * %g2 = (pseudo) tag access
3206	 */
3207
3208	ba,pt	%icc, 2f
3209	  ldn	[%g6 + (TSBMISS_SCRATCH + TSBMISS_HATID)], %g7
3210
32111:
3212	HAT_PERCPU_STAT32(%g6, TSBMISS_KTSBMISS, %g5)
3213	/*
3214	 * 8K and 64K hash.
3215	 */
32162:
3217
3218	GET_TTE(%g2, %g7, %g3, %g4, %g5, %g6, %g1,
3219		MMU_PAGESHIFT64K, TTE64K, tsb_l8K, tsb_checktte,
3220		sfmmu_suspend_tl, tsb_512K)
3221	/* NOT REACHED */
3222
3223tsb_512K:
3224	sllx	%g2, TAGACC_CTX_LSHIFT, %g5
3225	brz,pn	%g5, 3f
3226	  ldub	[%g6 + TSBMISS_UTTEFLAGS], %g4
3227	and	%g4, HAT_512K_FLAG, %g5
3228
3229	/*
3230	 * Note that there is a small window here where we may have
3231	 * a 512k page in the hash list but have not set the HAT_512K_FLAG
3232	 * flag yet, so we will skip searching the 512k hash list.
3233	 * In this case we will end up in pagefault which will find
3234	 * the mapping and return.  So, in this instance we will end up
3235	 * spending a bit more time resolving this TSB miss, but it can
3236	 * only happen once per process and even then, the chances of that
3237	 * are very small, so it's not worth the extra overhead it would
3238	 * take to close this window.
3239	 */
3240	brz,pn	%g5, tsb_4M
3241	  nop
32423:
3243	/*
3244	 * 512K hash
3245	 */
3246
3247	GET_TTE(%g2, %g7, %g3, %g4, %g5, %g6, %g1,
3248		MMU_PAGESHIFT512K, TTE512K, tsb_l512K, tsb_checktte,
3249		sfmmu_suspend_tl, tsb_4M)
3250	/* NOT REACHED */
3251
3252tsb_4M:
3253	sllx	%g2, TAGACC_CTX_LSHIFT, %g5
3254	brz,pn	%g5, 4f
3255	  ldub	[%g6 + TSBMISS_UTTEFLAGS], %g4
3256	and	%g4, HAT_4M_FLAG, %g5
3257	brz,pn	%g5, tsb_32M
3258	  nop
32594:
3260	/*
3261	 * 4M hash
3262	 */
3263
3264	GET_TTE(%g2, %g7, %g3, %g4, %g5, %g6, %g1,
3265		MMU_PAGESHIFT4M, TTE4M, tsb_l4M, tsb_checktte,
3266		sfmmu_suspend_tl, tsb_32M)
3267	/* NOT REACHED */
3268
3269tsb_32M:
3270	sllx	%g2, TAGACC_CTX_LSHIFT, %g5
3271#ifdef sun4v
3272        brz,pn	%g5, 6f
3273#else
3274	brz,pn	%g5, tsb_pagefault
3275#endif
3276	  ldub	[%g6 + TSBMISS_UTTEFLAGS], %g4
3277	and	%g4, HAT_32M_FLAG, %g5
3278	brz,pn	%g5, tsb_256M
3279	  nop
32805:
3281	/*
3282	 * 32M hash
3283	 */
3284
3285	GET_TTE(%g2, %g7, %g3, %g4, %g5, %g6, %g1,
3286		MMU_PAGESHIFT32M, TTE32M, tsb_l32M, tsb_checktte,
3287		sfmmu_suspend_tl, tsb_256M)
3288	/* NOT REACHED */
3289
3290#ifdef sun4u
3291#define tsb_shme        tsb_pagefault
3292#endif
3293tsb_256M:
3294	ldub	[%g6 + TSBMISS_UTTEFLAGS], %g4
3295	and	%g4, HAT_256M_FLAG, %g5
3296	brz,pn	%g5, tsb_shme
3297	  nop
32986:
3299	/*
3300	 * 256M hash
3301	 */
3302
3303	GET_TTE(%g2, %g7, %g3, %g4, %g5, %g6, %g1,
3304	    MMU_PAGESHIFT256M, TTE256M, tsb_l256M, tsb_checktte,
3305	    sfmmu_suspend_tl, tsb_shme)
3306	/* NOT REACHED */
3307
3308tsb_checktte:
3309	/*
3310	 * g1 = hblk_misc
3311	 * g2 = tagacc
3312	 * g3 = tte
3313	 * g4 = tte pa
3314	 * g5 = tte va
3315	 * g6 = tsbmiss area
3316	 * g7 = hatid
3317	 */
3318	brlz,a,pt %g3, tsb_validtte
3319	  rdpr	%tt, %g7
3320
3321#ifdef sun4u
3322#undef tsb_shme
3323	ba	tsb_pagefault
3324	  nop
3325#else
3326
3327tsb_shme:
3328	/*
3329	 * g2 = tagacc
3330	 * g6 = tsbmiss area
3331	 */
3332	sllx	%g2, TAGACC_CTX_LSHIFT, %g5
3333	brz,pn	%g5, tsb_pagefault
3334	  nop
3335	ldx	[%g6 + TSBMISS_SHARED_UHATID], %g7	/* g7 = srdp */
3336	brz,pn	%g7, tsb_pagefault
3337	  nop
3338
3339	GET_SHME_TTE(%g2, %g7, %g3, %g4, %g5, %g6, %g1,
3340		MMU_PAGESHIFT64K, TTE64K, tsb_shme_l8K, tsb_shme_checktte,
3341		sfmmu_suspend_tl, tsb_shme_512K)
3342	/* NOT REACHED */
3343
3344tsb_shme_512K:
3345	ldub	[%g6 + TSBMISS_URTTEFLAGS], %g4
3346	and	%g4, HAT_512K_FLAG, %g5
3347	brz,pn	%g5, tsb_shme_4M
3348	  nop
3349
3350	/*
3351	 * 512K hash
3352	 */
3353
3354	GET_SHME_TTE(%g2, %g7, %g3, %g4, %g5, %g6, %g1,
3355		MMU_PAGESHIFT512K, TTE512K, tsb_shme_l512K, tsb_shme_checktte,
3356		sfmmu_suspend_tl, tsb_shme_4M)
3357	/* NOT REACHED */
3358
3359tsb_shme_4M:
3360	ldub	[%g6 + TSBMISS_URTTEFLAGS], %g4
3361	and	%g4, HAT_4M_FLAG, %g5
3362	brz,pn	%g5, tsb_shme_32M
3363	  nop
33644:
3365	/*
3366	 * 4M hash
3367	 */
3368	GET_SHME_TTE(%g2, %g7, %g3, %g4, %g5, %g6, %g1,
3369		MMU_PAGESHIFT4M, TTE4M, tsb_shme_l4M, tsb_shme_checktte,
3370		sfmmu_suspend_tl, tsb_shme_32M)
3371	/* NOT REACHED */
3372
3373tsb_shme_32M:
3374	ldub	[%g6 + TSBMISS_URTTEFLAGS], %g4
3375	and	%g4, HAT_32M_FLAG, %g5
3376	brz,pn	%g5, tsb_shme_256M
3377	  nop
3378
3379	/*
3380	 * 32M hash
3381	 */
3382
3383	GET_SHME_TTE(%g2, %g7, %g3, %g4, %g5, %g6, %g1,
3384		MMU_PAGESHIFT32M, TTE32M, tsb_shme_l32M, tsb_shme_checktte,
3385		sfmmu_suspend_tl, tsb_shme_256M)
3386	/* NOT REACHED */
3387
3388tsb_shme_256M:
3389	ldub	[%g6 + TSBMISS_URTTEFLAGS], %g4
3390	and	%g4, HAT_256M_FLAG, %g5
3391	brz,pn	%g5, tsb_pagefault
3392	  nop
3393
3394	/*
3395	 * 256M hash
3396	 */
3397
3398	GET_SHME_TTE(%g2, %g7, %g3, %g4, %g5, %g6, %g1,
3399	    MMU_PAGESHIFT256M, TTE256M, tsb_shme_l256M, tsb_shme_checktte,
3400	    sfmmu_suspend_tl, tsb_pagefault)
3401	/* NOT REACHED */
3402
3403tsb_shme_checktte:
3404
3405	brgez,pn %g3, tsb_pagefault
3406	  rdpr	%tt, %g7
3407	/*
3408	 * g1 = ctx1 flag
3409	 * g3 = tte
3410	 * g4 = tte pa
3411	 * g5 = tte va
3412	 * g6 = tsbmiss area
3413	 * g7 = tt
3414	 */
3415
3416	brz,pt  %g1, tsb_validtte
3417	  nop
3418	ldub    [%g6 + TSBMISS_URTTEFLAGS], %g1
3419	  or	%g1, HAT_CHKCTX1_FLAG, %g1
3420	stub    %g1, [%g6 + TSBMISS_URTTEFLAGS]
3421
3422	SAVE_CTX1(%g7, %g2, %g1, tsb_shmel)
3423#endif /* sun4u */
3424
3425tsb_validtte:
3426	/*
3427	 * g3 = tte
3428	 * g4 = tte pa
3429	 * g5 = tte va
3430	 * g6 = tsbmiss area
3431	 * g7 = tt
3432	 */
3433
3434	/*
3435	 * Set ref/mod bits if this is a prot trap.  Usually, it isn't.
3436	 */
3437	cmp	%g7, FAST_PROT_TT
3438	bne,pt	%icc, 4f
3439	  nop
3440
3441	TTE_SET_REFMOD_ML(%g3, %g4, %g5, %g6, %g7, tsb_lset_refmod,
3442	    tsb_protfault)
3443
3444	rdpr	%tt, %g5
3445	GET_MMU_D_TTARGET(%g2, %g7)		/* %g2 = ttarget */
3446#ifdef sun4v
3447	MMU_FAULT_STATUS_AREA(%g7)
3448	ldx	[%g7 + MMFSA_D_ADDR], %g5	/* save fault addr for later */
3449#endif
3450	ba,pt	%xcc, tsb_update_tl1
3451	  nop
3452
34534:
3454	/*
3455	 * If ITLB miss check exec bit.
3456	 * If not set treat as invalid TTE.
3457	 */
3458	cmp     %g7, T_INSTR_MMU_MISS
3459	be,pn	%icc, 5f
3460	  andcc   %g3, TTE_EXECPRM_INT, %g0	/* check execute bit is set */
3461	cmp     %g7, FAST_IMMU_MISS_TT
3462	bne,pt %icc, 3f
3463	  andcc   %g3, TTE_EXECPRM_INT, %g0	/* check execute bit is set */
34645:
3465	bz,pn %icc, tsb_protfault
3466	  nop
3467
34683:
3469	/*
3470	 * Set reference bit if not already set
3471	 */
3472	TTE_SET_REF_ML(%g3, %g4, %g5, %g6, %g7, tsb_lset_ref)
3473
3474	/*
3475	 * Now, load into TSB/TLB.  At this point:
3476	 * g3 = tte
3477	 * g4 = patte
3478	 * g6 = tsbmiss area
3479	 */
3480	rdpr	%tt, %g5
3481#ifdef sun4v
3482	MMU_FAULT_STATUS_AREA(%g2)
3483	cmp	%g5, T_INSTR_MMU_MISS
3484	be,a,pt	%icc, 9f
3485	  nop
3486	cmp	%g5, FAST_IMMU_MISS_TT
3487	be,a,pt	%icc, 9f
3488	  nop
3489	add	%g2, MMFSA_D_, %g2
34909:
3491	ldx	[%g2 + MMFSA_CTX_], %g7
3492	sllx	%g7, TTARGET_CTX_SHIFT, %g7
3493	ldx	[%g2 + MMFSA_ADDR_], %g2
3494	mov	%g2, %g5		! save the fault addr for later use
3495	srlx	%g2, TTARGET_VA_SHIFT, %g2
3496	or	%g2, %g7, %g2
3497#else
3498	cmp	%g5, FAST_IMMU_MISS_TT
3499	be,a,pt	%icc, tsb_update_tl1
3500	  ldxa	[%g0]ASI_IMMU, %g2
3501	ldxa	[%g0]ASI_DMMU, %g2
3502#endif
3503tsb_update_tl1:
3504	srlx	%g2, TTARGET_CTX_SHIFT, %g7
3505	brz,pn	%g7, tsb_kernel
3506#ifdef sun4v
3507	  and	%g3, TTE_SZ_BITS, %g7	! assumes TTE_SZ_SHFT is 0
3508#else
3509	  srlx	%g3, TTE_SZ_SHFT, %g7
3510#endif
3511
3512tsb_user:
3513#ifdef sun4v
3514	cmp	%g7, TTE4M
3515	bge,pn	%icc, tsb_user4m
3516	  nop
3517#else /* sun4v */
3518	cmp	%g7, TTESZ_VALID | TTE4M
3519	be,pn	%icc, tsb_user4m
3520	  srlx	%g3, TTE_SZ2_SHFT, %g7
3521	andcc	%g7, TTE_SZ2_BITS, %g7		! check 32/256MB
3522#ifdef ITLB_32M_256M_SUPPORT
3523	bnz,pn	%icc, tsb_user4m
3524	  nop
3525#else /* ITLB_32M_256M_SUPPORT */
3526	bnz,a,pn %icc, tsb_user_pn_synth
3527	 cmp	%g5, FAST_IMMU_MISS_TT
3528#endif /* ITLB_32M_256M_SUPPORT */
3529#endif /* sun4v */
3530
3531tsb_user8k:
3532#ifdef sun4v
3533	ldub	[%g6 + TSBMISS_URTTEFLAGS], %g7
3534	and	%g7, HAT_CHKCTX1_FLAG, %g1
3535	brz,a,pn %g1, 1f
3536	  ldn	[%g6 + TSBMISS_TSBPTR], %g1	! g1 = first TSB ptr
3537	GET_UTSBREG_SHCTX(%g6, TSBMISS_TSBSCDPTR, %g1)
3538	brlz,a,pn %g1, ptl1_panic			! if no shared tsb
3539	  mov PTL1_NO_SCDTSB8K, %g1			! panic
3540	GET_3RD_TSBE_PTR(%g5, %g1, %g6, %g7)
35411:
3542#else
3543	ldn	[%g6 + TSBMISS_TSBPTR], %g1	! g1 = first TSB ptr
3544
3545#ifndef UTSB_PHYS
3546	mov	ASI_N, %g7	! user TSBs accessed by VA
3547	mov	%g7, %asi
3548#endif /* UTSB_PHYS */
3549
3550#endif /* sun4v */
3551
3552	TSB_UPDATE_TL(%g1, %g3, %g2, %g4, %g7, %g6, 5)
3553
3554#ifdef sun4v
3555	rdpr    %tt, %g5
3556	cmp	%g5, T_INSTR_MMU_MISS
3557	be,a,pn	%xcc, 9f
3558	  mov	%g3, %g5
3559#endif /* sun4v */
3560	cmp	%g5, FAST_IMMU_MISS_TT
3561	be,pn	%xcc, 9f
3562	  mov	%g3, %g5
3563
3564	DTLB_STUFF(%g5, %g1, %g2, %g3, %g4)
3565	! trapstat wants TTE in %g5
3566	retry
35679:
3568	ITLB_STUFF(%g5, %g1, %g2, %g3, %g4)
3569	! trapstat wants TTE in %g5
3570	retry
3571
3572tsb_user4m:
3573#ifdef sun4v
3574	ldub	[%g6 + TSBMISS_URTTEFLAGS], %g7
3575	and	%g7, HAT_CHKCTX1_FLAG, %g1
3576	brz,a,pn %g1, 4f
3577	  ldn	[%g6 + TSBMISS_TSBPTR4M], %g1		! g1 = TSB ptr
3578	GET_UTSBREG_SHCTX(%g6, TSBMISS_TSBSCDPTR4M, %g1)
3579	brlz,a,pn %g1, 5f				! if no shared 2nd tsb
3580	  nop
3581	GET_4TH_TSBE_PTR(%g5, %g1, %g6, %g7)
3582#else
3583	ldn	[%g6 + TSBMISS_TSBPTR4M], %g1		! g1 = TSB ptr
3584#endif
35854:
3586	brlz,pn %g1, 5f	/* Check to see if we have 2nd TSB programmed */
3587	  nop
3588
3589#ifndef UTSB_PHYS
3590	mov	ASI_N, %g7	! user TSBs accessed by VA
3591	mov	%g7, %asi
3592#endif /* UTSB_PHYS */
3593
3594        TSB_UPDATE_TL(%g1, %g3, %g2, %g4, %g7, %g6, 6)
3595
35965:
3597#ifdef sun4v
3598	rdpr    %tt, %g5
3599        cmp     %g5, T_INSTR_MMU_MISS
3600        be,a,pn %xcc, 9f
3601          mov   %g3, %g5
3602#endif /* sun4v */
3603        cmp     %g5, FAST_IMMU_MISS_TT
3604        be,pn   %xcc, 9f
3605        mov     %g3, %g5
3606
3607        DTLB_STUFF(%g5, %g1, %g2, %g3, %g4)
3608        ! trapstat wants TTE in %g5
3609        retry
36109:
3611        ITLB_STUFF(%g5, %g1, %g2, %g3, %g4)
3612        ! trapstat wants TTE in %g5
3613        retry
3614
3615#if !defined(sun4v) && !defined(ITLB_32M_256M_SUPPORT)
3616	/*
3617	 * Panther ITLB synthesis.
3618	 * The Panther 32M and 256M ITLB code simulates these two large page
3619	 * sizes with 4M pages, to provide support for programs, for example
3620	 * Java, that may copy instructions into a 32M or 256M data page and
3621	 * then execute them. The code below generates the 4M pfn bits and
3622	 * saves them in the modified 32M/256M ttes in the TSB. If the tte is
3623	 * stored in the DTLB to map a 32M/256M page, the 4M pfn offset bits
3624	 * are ignored by the hardware.
3625	 *
3626	 * Now, load into TSB/TLB.  At this point:
3627	 * g2 = tagtarget
3628	 * g3 = tte
3629	 * g4 = patte
3630	 * g5 = tt
3631	 * g6 = tsbmiss area
3632	 */
3633tsb_user_pn_synth:
3634	be,pt	%xcc, tsb_user_itlb_synth	/* ITLB miss */
3635	  andcc %g3, TTE_EXECPRM_INT, %g0	/* is execprm bit set */
3636	bz,pn %icc, 4b				/* if not, been here before */
3637	  ldn	[%g6 + TSBMISS_TSBPTR4M], %g1	/* g1 = tsbp */
3638	brlz,a,pn %g1, 5f			/* no 2nd tsb */
3639	  mov	%g3, %g5
3640
3641	mov	MMU_TAG_ACCESS, %g7
3642	ldxa	[%g7]ASI_DMMU, %g6		/* get tag access va */
3643	GET_4M_PFN_OFF(%g3, %g6, %g5, %g7, 1)	/* make 4M pfn offset */
3644
3645	mov	ASI_N, %g7	/* user TSBs always accessed by VA */
3646	mov	%g7, %asi
3647	TSB_UPDATE_TL_PN(%g1, %g5, %g2, %g4, %g7, %g3, 4) /* update TSB */
36485:
3649        DTLB_STUFF(%g5, %g1, %g2, %g3, %g4)
3650        retry
3651
3652tsb_user_itlb_synth:
3653	ldn	[%g6 + TSBMISS_TSBPTR4M], %g1		/* g1 = tsbp */
3654
3655	mov	MMU_TAG_ACCESS, %g7
3656	ldxa	[%g7]ASI_IMMU, %g6		/* get tag access va */
3657	GET_4M_PFN_OFF(%g3, %g6, %g5, %g7, 2)	/* make 4M pfn offset */
3658	brlz,a,pn %g1, 7f	/* Check to see if we have 2nd TSB programmed */
3659	  or	%g5, %g3, %g5			/* add 4M bits to TTE */
3660
3661	mov	ASI_N, %g7	/* user TSBs always accessed by VA */
3662	mov	%g7, %asi
3663	TSB_UPDATE_TL_PN(%g1, %g5, %g2, %g4, %g7, %g3, 6) /* update TSB */
36647:
3665	SET_TTE4M_PN(%g5, %g7)			/* add TTE4M pagesize to TTE */
3666        ITLB_STUFF(%g5, %g1, %g2, %g3, %g4)
3667        retry
3668#endif /* sun4v && ITLB_32M_256M_SUPPORT */
3669
3670tsb_kernel:
3671#ifdef sun4v
3672	rdpr	%tt, %g5
3673	cmp	%g7, TTE4M
3674	bge,pn	%icc, 5f
3675#else
3676	cmp	%g7, TTESZ_VALID | TTE4M	! no 32M or 256M support
3677	be,pn	%icc, 5f
3678#endif
3679	  nop
3680	ldn	[%g6 + TSBMISS_TSBPTR], %g1	! g1 = 8k tsbptr
3681	ba,pt	%xcc, 6f
3682	  nop
36835:
3684	ldn	[%g6 + TSBMISS_TSBPTR4M], %g1	! g1 = 4m tsbptr
3685	brlz,pn	%g1, 3f		/* skip programming if 4m TSB ptr is -1 */
3686	  nop
36876:
3688#ifndef sun4v
3689tsb_kernel_patch_asi:
3690	or	%g0, RUNTIME_PATCH, %g6
3691	mov	%g6, %asi	! XXX avoid writing to %asi !!
3692#endif
3693	TSB_UPDATE_TL(%g1, %g3, %g2, %g4, %g7, %g6, 7)
36943:
3695#ifdef sun4v
3696	cmp	%g5, T_INSTR_MMU_MISS
3697	be,a,pn	%icc, 1f
3698	  mov	%g3, %g5			! trapstat wants TTE in %g5
3699#endif /* sun4v */
3700	cmp	%g5, FAST_IMMU_MISS_TT
3701	be,pn	%icc, 1f
3702	  mov	%g3, %g5			! trapstat wants TTE in %g5
3703	DTLB_STUFF(%g5, %g1, %g2, %g3, %g4)
3704	! trapstat wants TTE in %g5
3705	retry
37061:
3707	ITLB_STUFF(%g5, %g1, %g2, %g3, %g4)
3708	! trapstat wants TTE in %g5
3709	retry
3710
3711tsb_ism:
3712	/*
3713	 * This is an ISM [i|d]tlb miss.  We optimize for largest
3714	 * page size down to smallest.
3715	 *
3716	 * g2 = vaddr + ctx(or ctxtype (sun4v)) aka (pseudo-)tag access
3717	 *	register
3718	 * g3 = ismmap->ism_seg
3719	 * g4 = physical address of ismmap->ism_sfmmu
3720	 * g6 = tsbmiss area
3721	 */
3722	ldna	[%g4]ASI_MEM, %g7		/* g7 = ism hatid */
3723	brz,a,pn %g7, ptl1_panic		/* if zero jmp ahead */
3724	  mov	PTL1_BAD_ISM, %g1
3725						/* g5 = pa of imap_vb_shift */
3726	sub	%g4, (IMAP_ISMHAT - IMAP_VB_SHIFT), %g5
3727	lduba	[%g5]ASI_MEM, %g4		/* g4 = imap_vb_shift */
3728	srlx	%g3, %g4, %g3			/* clr size field */
3729	set	TAGACC_CTX_MASK, %g1		/* mask off ctx number */
3730	sllx    %g3, %g4, %g3                   /* g3 = ism vbase */
3731	and     %g2, %g1, %g4                   /* g4 = ctx number */
3732	andn    %g2, %g1, %g1                   /* g1 = tlb miss vaddr */
3733	sub     %g1, %g3, %g2                   /* g2 = offset in ISM seg */
3734	or      %g2, %g4, %g2                   /* g2 = (pseudo-)tagacc */
3735	sub     %g5, (IMAP_VB_SHIFT - IMAP_HATFLAGS), %g5
3736	lduha   [%g5]ASI_MEM, %g4               /* g5 = pa of imap_hatflags */
3737#ifdef sun4v
3738	and     %g4, HAT_CTX1_FLAG, %g5         /* g5 = imap_hatflags */
3739	brz,pt %g5, tsb_chk4M_ism
3740	  nop
3741	ldub    [%g6 + TSBMISS_URTTEFLAGS], %g5
3742	or      %g5, HAT_CHKCTX1_FLAG, %g5
3743	stub    %g5, [%g6 + TSBMISS_URTTEFLAGS]
3744	rdpr    %tt, %g5
3745	SAVE_CTX1(%g5, %g3, %g1, tsb_shctxl)
3746#endif
3747	/*
3748	 * ISM pages are always locked down.
3749	 * If we can't find the tte then pagefault
3750	 * and let the spt segment driver resolve it.
3751	 *
3752	 * g2 = tagacc w/ISM vaddr (offset in ISM seg)
3753	 * g4 = imap_hatflags
3754	 * g6 = tsb miss area
3755	 * g7 = ISM hatid
3756	 */
3757
3758tsb_chk4M_ism:
3759	and	%g4, HAT_4M_FLAG, %g5		/* g4 = imap_hatflags */
3760	brnz,pt	%g5, tsb_ism_4M			/* branch if 4M pages */
3761	  nop
3762
3763tsb_ism_32M:
3764	and	%g4, HAT_32M_FLAG, %g5		/* check default 32M next */
3765	brz,pn	%g5, tsb_ism_256M
3766	  nop
3767
3768	/*
3769	 * 32M hash.
3770	 */
3771
3772	GET_TTE(%g2, %g7, %g3, %g4, %g5, %g6, %g1, MMU_PAGESHIFT32M,
3773	    TTE32M, tsb_ism_l32M, tsb_ism_32M_found, sfmmu_suspend_tl,
3774	    tsb_ism_4M)
3775	/* NOT REACHED */
3776
3777tsb_ism_32M_found:
3778	brlz,a,pt %g3, tsb_validtte
3779	  rdpr	%tt, %g7
3780	ba,pt	%xcc, tsb_ism_4M
3781	  nop
3782
3783tsb_ism_256M:
3784	and	%g4, HAT_256M_FLAG, %g5		/* 256M is last resort */
3785	brz,a,pn %g5, ptl1_panic
3786	  mov	PTL1_BAD_ISM, %g1
3787
3788	/*
3789	 * 256M hash.
3790	 */
3791	GET_TTE(%g2, %g7, %g3, %g4, %g5, %g6, %g1, MMU_PAGESHIFT256M,
3792	    TTE256M, tsb_ism_l256M, tsb_ism_256M_found, sfmmu_suspend_tl,
3793	    tsb_ism_4M)
3794
3795tsb_ism_256M_found:
3796	brlz,a,pt %g3, tsb_validtte
3797	  rdpr	%tt, %g7
3798
3799tsb_ism_4M:
3800	/*
3801	 * 4M hash.
3802	 */
3803	GET_TTE(%g2, %g7, %g3, %g4, %g5, %g6, %g1, MMU_PAGESHIFT4M,
3804	    TTE4M, tsb_ism_l4M, tsb_ism_4M_found, sfmmu_suspend_tl,
3805	    tsb_ism_8K)
3806	/* NOT REACHED */
3807
3808tsb_ism_4M_found:
3809	brlz,a,pt %g3, tsb_validtte
3810	  rdpr	%tt, %g7
3811
3812tsb_ism_8K:
3813	/*
3814	 * 8K and 64K hash.
3815	 */
3816
3817	GET_TTE(%g2, %g7, %g3, %g4, %g5, %g6, %g1, MMU_PAGESHIFT64K,
3818	    TTE64K, tsb_ism_l8K, tsb_ism_8K_found, sfmmu_suspend_tl,
3819	    tsb_pagefault)
3820	/* NOT REACHED */
3821
3822tsb_ism_8K_found:
3823	brlz,a,pt %g3, tsb_validtte
3824	  rdpr	%tt, %g7
3825
3826tsb_pagefault:
3827	rdpr	%tt, %g7
3828	cmp	%g7, FAST_PROT_TT
3829	be,a,pn	%icc, tsb_protfault
3830	  wrpr	%g0, FAST_DMMU_MISS_TT, %tt
3831
3832tsb_protfault:
3833	/*
3834	 * we get here if we couldn't find a valid tte in the hash.
3835	 *
3836	 * If user and we are at tl>1 we go to window handling code.
3837	 *
3838	 * If kernel and the fault is on the same page as our stack
3839	 * pointer, then we know the stack is bad and the trap handler
3840	 * will fail, so we call ptl1_panic with PTL1_BAD_STACK.
3841	 *
3842	 * If this is a kernel trap and tl>1, panic.
3843	 *
3844	 * Otherwise we call pagefault.
3845	 */
3846	cmp	%g7, FAST_IMMU_MISS_TT
3847#ifdef sun4v
3848	MMU_FAULT_STATUS_AREA(%g4)
3849	ldx	[%g4 + MMFSA_I_CTX], %g5
3850	ldx	[%g4 + MMFSA_D_CTX], %g4
3851	move	%icc, %g5, %g4
3852	cmp	%g7, T_INSTR_MMU_MISS
3853	move	%icc, %g5, %g4
3854#else
3855	mov	MMU_TAG_ACCESS, %g4
3856	ldxa	[%g4]ASI_DMMU, %g2
3857	ldxa	[%g4]ASI_IMMU, %g5
3858	move	%icc, %g5, %g2
3859	cmp	%g7, T_INSTR_MMU_MISS
3860	move	%icc, %g5, %g2
3861	sllx	%g2, TAGACC_CTX_LSHIFT, %g4
3862#endif
3863	brnz,pn	%g4, 3f				/* skip if not kernel */
3864	  rdpr	%tl, %g5
3865
3866	add	%sp, STACK_BIAS, %g3
3867	srlx	%g3, MMU_PAGESHIFT, %g3
3868	srlx	%g2, MMU_PAGESHIFT, %g4
3869	cmp	%g3, %g4
3870	be,a,pn	%icc, ptl1_panic		/* panic if bad %sp */
3871	  mov	PTL1_BAD_STACK, %g1
3872
3873	cmp	%g5, 1
3874	ble,pt	%icc, 2f
3875	  nop
3876	TSTAT_CHECK_TL1(2f, %g1, %g2)
3877	rdpr	%tt, %g2
3878	cmp	%g2, FAST_PROT_TT
3879	mov	PTL1_BAD_KPROT_FAULT, %g1
3880	movne	%icc, PTL1_BAD_KMISS, %g1
3881	ba,pt	%icc, ptl1_panic
3882	  nop
3883
38842:
3885	/*
3886	 * We are taking a pagefault in the kernel on a kernel address.  If
3887	 * CPU_DTRACE_NOFAULT is set in the cpuc_dtrace_flags, we don't actually
3888	 * want to call sfmmu_pagefault -- we will instead note that a fault
3889	 * has occurred by setting CPU_DTRACE_BADADDR and issue a "done"
3890	 * (instead of a "retry").  This will step over the faulting
3891	 * instruction.
3892	 */
3893	CPU_INDEX(%g1, %g2)
3894	set	cpu_core, %g2
3895	sllx	%g1, CPU_CORE_SHIFT, %g1
3896	add	%g1, %g2, %g1
3897	lduh	[%g1 + CPUC_DTRACE_FLAGS], %g2
3898	andcc	%g2, CPU_DTRACE_NOFAULT, %g0
3899	bz	sfmmu_pagefault
3900	or	%g2, CPU_DTRACE_BADADDR, %g2
3901	stuh	%g2, [%g1 + CPUC_DTRACE_FLAGS]
3902	GET_MMU_D_ADDR(%g3, %g4)
3903	stx	%g3, [%g1 + CPUC_DTRACE_ILLVAL]
3904	done
3905
39063:
3907	cmp	%g5, 1
3908	ble,pt	%icc, 4f
3909	  nop
3910	TSTAT_CHECK_TL1(4f, %g1, %g2)
3911	ba,pt	%icc, sfmmu_window_trap
3912	  nop
3913
39144:
3915	/*
3916	 * We are taking a pagefault on a non-kernel address.  If we are in
3917	 * the kernel (e.g., due to a copyin()), we will check cpuc_dtrace_flags
3918	 * and (if CPU_DTRACE_NOFAULT is set) will proceed as outlined above.
3919	 */
3920	CPU_INDEX(%g1, %g2)
3921	set	cpu_core, %g2
3922	sllx	%g1, CPU_CORE_SHIFT, %g1
3923	add	%g1, %g2, %g1
3924	lduh	[%g1 + CPUC_DTRACE_FLAGS], %g2
3925	andcc	%g2, CPU_DTRACE_NOFAULT, %g0
3926	bz	sfmmu_mmu_trap
3927	or	%g2, CPU_DTRACE_BADADDR, %g2
3928	stuh	%g2, [%g1 + CPUC_DTRACE_FLAGS]
3929	GET_MMU_D_ADDR(%g3, %g4)
3930	stx	%g3, [%g1 + CPUC_DTRACE_ILLVAL]
3931
3932	/*
3933	 * Be sure that we're actually taking this miss from the kernel --
3934	 * otherwise we have managed to return to user-level with
3935	 * CPU_DTRACE_NOFAULT set in cpuc_dtrace_flags.
3936	 */
3937	rdpr	%tstate, %g2
3938	btst	TSTATE_PRIV, %g2
3939	bz,a	ptl1_panic
3940	  mov	PTL1_BAD_DTRACE_FLAGS, %g1
3941	done
3942
3943	ALTENTRY(tsb_tl0_noctxt)
3944	/*
3945	 * If we have no context, check to see if CPU_DTRACE_NOFAULT is set;
3946	 * if it is, indicated that we have faulted and issue a done.
3947	 */
3948	CPU_INDEX(%g5, %g6)
3949	set	cpu_core, %g6
3950	sllx	%g5, CPU_CORE_SHIFT, %g5
3951	add	%g5, %g6, %g5
3952	lduh	[%g5 + CPUC_DTRACE_FLAGS], %g6
3953	andcc	%g6, CPU_DTRACE_NOFAULT, %g0
3954	bz	1f
3955	or	%g6, CPU_DTRACE_BADADDR, %g6
3956	stuh	%g6, [%g5 + CPUC_DTRACE_FLAGS]
3957	GET_MMU_D_ADDR(%g3, %g4)
3958	stx	%g3, [%g5 + CPUC_DTRACE_ILLVAL]
3959
3960	/*
3961	 * Be sure that we're actually taking this miss from the kernel --
3962	 * otherwise we have managed to return to user-level with
3963	 * CPU_DTRACE_NOFAULT set in cpuc_dtrace_flags.
3964	 */
3965	rdpr	%tstate, %g5
3966	btst	TSTATE_PRIV, %g5
3967	bz,a	ptl1_panic
3968	  mov	PTL1_BAD_DTRACE_FLAGS, %g1
3969	TSTAT_CHECK_TL1(2f, %g1, %g2);
39702:
3971	done
3972
39731:
3974	rdpr	%tt, %g5
3975	cmp	%g5, FAST_IMMU_MISS_TT
3976#ifdef sun4v
3977	MMU_FAULT_STATUS_AREA(%g2)
3978	be,a,pt	%icc, 2f
3979	  ldx	[%g2 + MMFSA_I_CTX], %g3
3980	cmp	%g5, T_INSTR_MMU_MISS
3981	be,a,pt	%icc, 2f
3982	  ldx	[%g2 + MMFSA_I_CTX], %g3
3983	ldx	[%g2 + MMFSA_D_CTX], %g3
39842:
3985#else
3986	mov	MMU_TAG_ACCESS, %g2
3987	be,a,pt	%icc, 2f
3988	  ldxa	[%g2]ASI_IMMU, %g3
3989	ldxa	[%g2]ASI_DMMU, %g3
39902:	sllx	%g3, TAGACC_CTX_LSHIFT, %g3
3991#endif
3992	brz,a,pn %g3, ptl1_panic		! panic if called for kernel
3993	  mov	PTL1_BAD_CTX_STEAL, %g1		! since kernel ctx was stolen
3994	rdpr	%tl, %g5
3995	cmp	%g5, 1
3996	ble,pt	%icc, sfmmu_mmu_trap
3997	  nop
3998	TSTAT_CHECK_TL1(sfmmu_mmu_trap, %g1, %g2)
3999	ba,pt	%icc, sfmmu_window_trap
4000	  nop
4001	SET_SIZE(sfmmu_tsb_miss)
4002#endif /* lint */
4003
4004#if defined (lint)
4005/*
4006 * This routine will look for a user or kernel vaddr in the hash
4007 * structure.  It returns a valid pfn or PFN_INVALID.  It doesn't
4008 * grab any locks.  It should only be used by other sfmmu routines.
4009 */
4010/* ARGSUSED */
4011pfn_t
4012sfmmu_vatopfn(caddr_t vaddr, sfmmu_t *sfmmup, tte_t *ttep)
4013{
4014	return(0);
4015}
4016
4017/* ARGSUSED */
4018pfn_t
4019sfmmu_kvaszc2pfn(caddr_t vaddr, int hashno)
4020{
4021	return(0);
4022}
4023
4024#else /* lint */
4025
4026	ENTRY_NP(sfmmu_vatopfn)
4027 	/*
4028 	 * disable interrupts
4029 	 */
4030 	rdpr	%pstate, %o3
4031#ifdef DEBUG
4032	PANIC_IF_INTR_DISABLED_PSTR(%o3, sfmmu_di_l5, %g1)
4033#endif
4034	/*
4035	 * disable interrupts to protect the TSBMISS area
4036	 */
4037	andn    %o3, PSTATE_IE, %o5
4038	wrpr    %o5, 0, %pstate
4039
4040	/*
4041	 * o0 = vaddr
4042	 * o1 = sfmmup
4043	 * o2 = ttep
4044	 */
4045	CPU_TSBMISS_AREA(%g1, %o5)
4046	ldn	[%g1 + TSBMISS_KHATID], %o4
4047	cmp	%o4, %o1
4048	bne,pn	%ncc, vatopfn_nokernel
4049	  mov	TTE64K, %g5			/* g5 = rehash # */
4050	mov %g1,%o5				/* o5 = tsbmiss_area */
4051	/*
4052	 * o0 = vaddr
4053	 * o1 & o4 = hatid
4054	 * o2 = ttep
4055	 * o5 = tsbmiss area
4056	 */
4057	mov	HBLK_RANGE_SHIFT, %g6
40581:
4059
4060	/*
4061	 * o0 = vaddr
4062	 * o1 = sfmmup
4063	 * o2 = ttep
4064	 * o3 = old %pstate
4065	 * o4 = hatid
4066	 * o5 = tsbmiss
4067	 * g5 = rehash #
4068	 * g6 = hmeshift
4069	 *
4070	 * The first arg to GET_TTE is actually tagaccess register
4071	 * not just vaddr. Since this call is for kernel we need to clear
4072	 * any lower vaddr bits that would be interpreted as ctx bits.
4073	 */
4074	set     TAGACC_CTX_MASK, %g1
4075	andn    %o0, %g1, %o0
4076	GET_TTE(%o0, %o4, %g1, %g2, %g3, %o5, %g4, %g6, %g5,
4077		vatopfn_l1, kvtop_hblk_found, tsb_suspend, kvtop_nohblk)
4078
4079kvtop_hblk_found:
4080	/*
4081	 * o0 = vaddr
4082	 * o1 = sfmmup
4083	 * o2 = ttep
4084	 * g1 = tte
4085	 * g2 = tte pa
4086	 * g3 = tte va
4087	 * o2 = tsbmiss area
4088	 * o1 = hat id
4089	 */
4090	brgez,a,pn %g1, 6f			/* if tte invalid goto tl0 */
4091	  mov	-1, %o0				/* output = -1 (PFN_INVALID) */
4092	stx %g1,[%o2]				/* put tte into *ttep */
4093	TTETOPFN(%g1, %o0, vatopfn_l2, %g2, %g3, %g4)
4094	/*
4095	 * o0 = vaddr
4096	 * o1 = sfmmup
4097	 * o2 = ttep
4098	 * g1 = pfn
4099	 */
4100	ba,pt	%xcc, 6f
4101	  mov	%g1, %o0
4102
4103kvtop_nohblk:
4104	/*
4105	 * we get here if we couldn't find valid hblk in hash.  We rehash
4106	 * if neccesary.
4107	 */
4108	ldn	[%o5 + (TSBMISS_SCRATCH + TSB_TAGACC)], %o0
4109#ifdef sun4v
4110	cmp	%g5, MAX_HASHCNT
4111#else
4112	cmp	%g5, DEFAULT_MAX_HASHCNT	/* no 32/256M kernel pages */
4113#endif
4114	be,a,pn	%icc, 6f
4115	  mov	-1, %o0				/* output = -1 (PFN_INVALID) */
4116	mov	%o1, %o4			/* restore hatid */
4117#ifdef sun4v
4118        add	%g5, 2, %g5
4119	cmp	%g5, 3
4120	move	%icc, MMU_PAGESHIFT4M, %g6
4121	ba,pt	%icc, 1b
4122	movne	%icc, MMU_PAGESHIFT256M, %g6
4123#else
4124        inc	%g5
4125	cmp	%g5, 2
4126	move	%icc, MMU_PAGESHIFT512K, %g6
4127	ba,pt	%icc, 1b
4128	movne	%icc, MMU_PAGESHIFT4M, %g6
4129#endif
41306:
4131	retl
4132 	  wrpr	%g0, %o3, %pstate		/* re-enable interrupts */
4133
4134tsb_suspend:
4135	/*
4136	 * o0 = vaddr
4137	 * o1 = sfmmup
4138	 * o2 = ttep
4139	 * g1 = tte
4140	 * g2 = tte pa
4141	 * g3 = tte va
4142	 * o2 = tsbmiss area  use o5 instead of o2 for tsbmiss
4143	 */
4144	stx %g1,[%o2]				/* put tte into *ttep */
4145	brgez,a,pn %g1, 8f			/* if tte invalid goto 8: */
4146	  sub	%g0, 1, %o0			/* output = PFN_INVALID */
4147	sub	%g0, 2, %o0			/* output = PFN_SUSPENDED */
41488:
4149	retl
4150	 wrpr	%g0, %o3, %pstate		/* enable interrupts */
4151
4152vatopfn_nokernel:
4153	/*
4154	 * This routine does NOT support user addresses
4155	 * There is a routine in C that supports this.
4156	 * The only reason why we don't have the C routine
4157	 * support kernel addresses as well is because
4158	 * we do va_to_pa while holding the hashlock.
4159	 */
4160 	wrpr	%g0, %o3, %pstate		/* re-enable interrupts */
4161	save	%sp, -SA(MINFRAME), %sp
4162	sethi	%hi(sfmmu_panic3), %o0
4163	call	panic
4164	 or	%o0, %lo(sfmmu_panic3), %o0
4165
4166	SET_SIZE(sfmmu_vatopfn)
4167
4168	/*
4169	 * %o0 = vaddr
4170	 * %o1 = hashno (aka szc)
4171	 *
4172	 *
4173	 * This routine is similar to sfmmu_vatopfn() but will only look for
4174	 * a kernel vaddr in the hash structure for the specified rehash value.
4175	 * It's just an optimization for the case when pagesize for a given
4176	 * va range is already known (e.g. large page heap) and we don't want
4177	 * to start the search with rehash value 1 as sfmmu_vatopfn() does.
4178	 *
4179	 * Returns valid pfn or PFN_INVALID if
4180	 * tte for specified rehash # is not found, invalid or suspended.
4181	 */
4182	ENTRY_NP(sfmmu_kvaszc2pfn)
4183 	/*
4184 	 * disable interrupts
4185 	 */
4186 	rdpr	%pstate, %o3
4187#ifdef DEBUG
4188	PANIC_IF_INTR_DISABLED_PSTR(%o3, sfmmu_di_l6, %g1)
4189#endif
4190	/*
4191	 * disable interrupts to protect the TSBMISS area
4192	 */
4193	andn    %o3, PSTATE_IE, %o5
4194	wrpr    %o5, 0, %pstate
4195
4196	CPU_TSBMISS_AREA(%g1, %o5)
4197	ldn	[%g1 + TSBMISS_KHATID], %o4
4198	sll	%o1, 1, %g6
4199	add	%g6, %o1, %g6
4200	add	%g6, MMU_PAGESHIFT, %g6
4201	/*
4202	 * %o0 = vaddr
4203	 * %o1 = hashno
4204	 * %o3 = old %pstate
4205	 * %o4 = ksfmmup
4206	 * %g1 = tsbmiss area
4207	 * %g6 = hmeshift
4208	 */
4209
4210	/*
4211	 * The first arg to GET_TTE is actually tagaccess register
4212	 * not just vaddr. Since this call is for kernel we need to clear
4213	 * any lower vaddr bits that would be interpreted as ctx bits.
4214	 */
4215	srlx	%o0, MMU_PAGESHIFT, %o0
4216	sllx	%o0, MMU_PAGESHIFT, %o0
4217	GET_TTE(%o0, %o4, %g3, %g4, %g5, %g1, %o5, %g6, %o1,
4218		kvaszc2pfn_l1, kvaszc2pfn_hblk_found, kvaszc2pfn_nohblk,
4219		kvaszc2pfn_nohblk)
4220
4221kvaszc2pfn_hblk_found:
4222	/*
4223	 * %g3 = tte
4224	 * %o0 = vaddr
4225	 */
4226	brgez,a,pn %g3, 1f			/* check if tte is invalid */
4227	  mov	-1, %o0				/* output = -1 (PFN_INVALID) */
4228	TTETOPFN(%g3, %o0, kvaszc2pfn_l2, %g2, %g4, %g5)
4229	/*
4230	 * g3 = pfn
4231	 */
4232	ba,pt	%xcc, 1f
4233	  mov	%g3, %o0
4234
4235kvaszc2pfn_nohblk:
4236	mov	-1, %o0
4237
42381:
4239	retl
4240 	  wrpr	%g0, %o3, %pstate		/* re-enable interrupts */
4241
4242	SET_SIZE(sfmmu_kvaszc2pfn)
4243
4244#endif /* lint */
4245
4246
4247
4248#if !defined(lint)
4249
4250/*
4251 * kpm lock used between trap level tsbmiss handler and kpm C level.
4252 */
4253#define KPMLOCK_ENTER(kpmlckp, tmp1, label1, asi)			\
4254	mov     0xff, tmp1						;\
4255label1:									;\
4256	casa    [kpmlckp]asi, %g0, tmp1					;\
4257	brnz,pn tmp1, label1						;\
4258	mov     0xff, tmp1						;\
4259	membar  #LoadLoad
4260
4261#define KPMLOCK_EXIT(kpmlckp, asi)					\
4262	membar  #LoadStore|#StoreStore					;\
4263	sta     %g0, [kpmlckp]asi
4264
4265/*
4266 * Lookup a memseg for a given pfn and if found, return the physical
4267 * address of the corresponding struct memseg in mseg, otherwise
4268 * return MSEG_NULLPTR_PA. The kpmtsbm pointer must be provided in
4269 * tsbmp, %asi is assumed to be ASI_MEM.
4270 * This lookup is done by strictly traversing only the physical memseg
4271 * linkage. The more generic approach, to check the virtual linkage
4272 * before using the physical (used e.g. with hmehash buckets), cannot
4273 * be used here. Memory DR operations can run in parallel to this
4274 * lookup w/o any locks and updates of the physical and virtual linkage
4275 * cannot be done atomically wrt. to each other. Because physical
4276 * address zero can be valid physical address, MSEG_NULLPTR_PA acts
4277 * as "physical NULL" pointer.
4278 */
4279#define	PAGE_NUM2MEMSEG_NOLOCK_PA(pfn, mseg, tsbmp, tmp1, tmp2, tmp3, label) \
4280	sethi	%hi(mhash_per_slot), tmp3 /* no tsbmp use due to DR */	;\
4281	ldx	[tmp3 + %lo(mhash_per_slot)], mseg			;\
4282	udivx	pfn, mseg, mseg						;\
4283	ldx	[tsbmp + KPMTSBM_MSEGPHASHPA], tmp1			;\
4284	and	mseg, SFMMU_N_MEM_SLOTS - 1, mseg			;\
4285	sllx	mseg, SFMMU_MEM_HASH_ENTRY_SHIFT, mseg			;\
4286	add	tmp1, mseg, tmp1					;\
4287	ldxa	[tmp1]%asi, mseg					;\
4288	cmp	mseg, MSEG_NULLPTR_PA					;\
4289	be,pn	%xcc, label/**/1		/* if not found */	;\
4290	  nop								;\
4291	ldxa	[mseg + MEMSEG_PAGES_BASE]%asi, tmp1			;\
4292	cmp	pfn, tmp1			/* pfn - pages_base */	;\
4293	blu,pn	%xcc, label/**/1					;\
4294	  ldxa	[mseg + MEMSEG_PAGES_END]%asi, tmp2			;\
4295	cmp	pfn, tmp2			/* pfn - pages_end */	;\
4296	bgeu,pn	%xcc, label/**/1					;\
4297	  sub	pfn, tmp1, tmp1			/* pfn - pages_base */	;\
4298	mulx	tmp1, PAGE_SIZE, tmp1					;\
4299	ldxa	[mseg + MEMSEG_PAGESPA]%asi, tmp2	/* pages */	;\
4300	add	tmp2, tmp1, tmp1			/* pp */	;\
4301	lduwa	[tmp1 + PAGE_PAGENUM]%asi, tmp2				;\
4302	cmp	tmp2, pfn						;\
4303	be,pt	%xcc, label/**/_ok			/* found */	;\
4304label/**/1:								;\
4305	/* brute force lookup */					;\
4306	sethi	%hi(memsegspa), tmp3 /* no tsbmp use due to DR */	;\
4307	ldx	[tmp3 + %lo(memsegspa)], mseg				;\
4308label/**/2:								;\
4309	cmp	mseg, MSEG_NULLPTR_PA					;\
4310	be,pn	%xcc, label/**/_ok		/* if not found */	;\
4311	  nop								;\
4312	ldxa	[mseg + MEMSEG_PAGES_BASE]%asi, tmp1			;\
4313	cmp	pfn, tmp1			/* pfn - pages_base */	;\
4314	blu,a,pt %xcc, label/**/2					;\
4315	  ldxa	[mseg + MEMSEG_NEXTPA]%asi, mseg			;\
4316	ldxa	[mseg + MEMSEG_PAGES_END]%asi, tmp2			;\
4317	cmp	pfn, tmp2			/* pfn - pages_end */	;\
4318	bgeu,a,pt %xcc, label/**/2					;\
4319	  ldxa	[mseg + MEMSEG_NEXTPA]%asi, mseg			;\
4320label/**/_ok:
4321
4322	/*
4323	 * kpm tsb miss handler large pages
4324	 * g1 = 8K kpm TSB entry pointer
4325	 * g2 = tag access register
4326	 * g3 = 4M kpm TSB entry pointer
4327	 */
4328	ALTENTRY(sfmmu_kpm_dtsb_miss)
4329	TT_TRACE(trace_tsbmiss)
4330
4331	CPU_INDEX(%g7, %g6)
4332	sethi	%hi(kpmtsbm_area), %g6
4333	sllx	%g7, KPMTSBM_SHIFT, %g7
4334	or	%g6, %lo(kpmtsbm_area), %g6
4335	add	%g6, %g7, %g6			/* g6 = kpmtsbm ptr */
4336
4337	/* check enable flag */
4338	ldub	[%g6 + KPMTSBM_FLAGS], %g4
4339	and	%g4, KPMTSBM_ENABLE_FLAG, %g5
4340	brz,pn	%g5, sfmmu_tsb_miss		/* if kpm not enabled */
4341	  nop
4342
4343	/* VA range check */
4344	ldx	[%g6 + KPMTSBM_VBASE], %g7
4345	cmp	%g2, %g7
4346	blu,pn	%xcc, sfmmu_tsb_miss
4347	  ldx	[%g6 + KPMTSBM_VEND], %g5
4348	cmp	%g2, %g5
4349	bgeu,pn	%xcc, sfmmu_tsb_miss
4350	  stx	%g3, [%g6 + KPMTSBM_TSBPTR]
4351
4352	/*
4353	 * check TL tsbmiss handling flag
4354	 * bump tsbmiss counter
4355	 */
4356	lduw	[%g6 + KPMTSBM_TSBMISS], %g5
4357#ifdef	DEBUG
4358	and	%g4, KPMTSBM_TLTSBM_FLAG, %g3
4359	inc	%g5
4360	brz,pn	%g3, sfmmu_kpm_exception
4361	  st	%g5, [%g6 + KPMTSBM_TSBMISS]
4362#else
4363	inc	%g5
4364	st	%g5, [%g6 + KPMTSBM_TSBMISS]
4365#endif
4366	/*
4367	 * At this point:
4368	 *  g1 = 8K kpm TSB pointer (not used)
4369	 *  g2 = tag access register
4370	 *  g3 = clobbered
4371	 *  g6 = per-CPU kpm tsbmiss area
4372	 *  g7 = kpm_vbase
4373	 */
4374
4375	/* vaddr2pfn */
4376	ldub	[%g6 + KPMTSBM_SZSHIFT], %g3
4377	sub	%g2, %g7, %g4			/* paddr = vaddr-kpm_vbase */
4378	srax    %g4, %g3, %g2			/* which alias range (r) */
4379	brnz,pn	%g2, sfmmu_kpm_exception	/* if (r != 0) goto C handler */
4380	  srlx	%g4, MMU_PAGESHIFT, %g2		/* %g2 = pfn */
4381
4382	/*
4383	 * Setup %asi
4384	 * mseg_pa = page_numtomemseg_nolock(pfn)
4385	 * if (mseg_pa == NULL) sfmmu_kpm_exception
4386	 * g2=pfn
4387	 */
4388	mov	ASI_MEM, %asi
4389	PAGE_NUM2MEMSEG_NOLOCK_PA(%g2, %g3, %g6, %g4, %g5, %g7, kpmtsbmp2m)
4390	cmp	%g3, MSEG_NULLPTR_PA
4391	be,pn	%xcc, sfmmu_kpm_exception	/* if mseg not found */
4392	  nop
4393
4394	/*
4395	 * inx = ptokpmp((kpmptop((ptopkpmp(pfn))) - mseg_pa->kpm_pbase));
4396	 * g2=pfn g3=mseg_pa
4397	 */
4398	ldub	[%g6 + KPMTSBM_KPMP2PSHFT], %g5
4399	ldxa	[%g3 + MEMSEG_KPM_PBASE]%asi, %g7
4400	srlx	%g2, %g5, %g4
4401	sllx	%g4, %g5, %g4
4402	sub	%g4, %g7, %g4
4403	srlx	%g4, %g5, %g4
4404
4405	/*
4406	 * Validate inx value
4407	 * g2=pfn g3=mseg_pa g4=inx
4408	 */
4409#ifdef	DEBUG
4410	ldxa	[%g3 + MEMSEG_KPM_NKPMPGS]%asi, %g5
4411	cmp	%g4, %g5			/* inx - nkpmpgs */
4412	bgeu,pn	%xcc, sfmmu_kpm_exception	/* if out of range */
4413	  ld	[%g6 + KPMTSBM_KPMPTABLESZ], %g7
4414#else
4415	ld	[%g6 + KPMTSBM_KPMPTABLESZ], %g7
4416#endif
4417	/*
4418	 * kp = &mseg_pa->kpm_pages[inx]
4419	 */
4420	sllx	%g4, KPMPAGE_SHIFT, %g4		/* kpm_pages offset */
4421	ldxa	[%g3 + MEMSEG_KPM_PAGES]%asi, %g5 /* kpm_pages */
4422	add	%g5, %g4, %g5			/* kp */
4423
4424	/*
4425	 * KPMP_HASH(kp)
4426	 * g2=pfn g3=mseg_pa g4=offset g5=kp g7=kpmp_table_sz
4427	 */
4428	ldub	[%g6 + KPMTSBM_KPMPSHIFT], %g1	/* kpmp_shift */
4429	sub	%g7, 1, %g7			/* mask */
4430	srlx	%g5, %g1, %g1			/* x = ksp >> kpmp_shift */
4431	add	%g5, %g1, %g5			/* y = ksp + x */
4432	and 	%g5, %g7, %g5			/* hashinx = y & mask */
4433
4434	/*
4435	 * Calculate physical kpm_page pointer
4436	 * g2=pfn g3=mseg_pa g4=offset g5=hashinx
4437	 */
4438	ldxa	[%g3 + MEMSEG_KPM_PAGESPA]%asi, %g1 /* kpm_pagespa */
4439	add	%g1, %g4, %g1			/* kp_pa */
4440
4441	/*
4442	 * Calculate physical hash lock address
4443	 * g1=kp_refcntc_pa g2=pfn g5=hashinx
4444	 */
4445	ldx	[%g6 + KPMTSBM_KPMPTABLEPA], %g4 /* kpmp_tablepa */
4446	sllx	%g5, KPMHLK_SHIFT, %g5
4447	add	%g4, %g5, %g3
4448	add	%g3, KPMHLK_LOCK, %g3		/* hlck_pa */
4449
4450	/*
4451	 * Assemble tte
4452	 * g1=kp_pa g2=pfn g3=hlck_pa
4453	 */
4454#ifdef sun4v
4455	sethi	%hi(TTE_VALID_INT), %g5		/* upper part */
4456	sllx	%g5, 32, %g5
4457	mov	(TTE_CP_INT|TTE_CV_INT|TTE_PRIV_INT|TTE_HWWR_INT), %g4
4458	or	%g4, TTE4M, %g4
4459	or	%g5, %g4, %g5
4460#else
4461	sethi	%hi(TTE_VALID_INT), %g4
4462	mov	TTE4M, %g5
4463	sllx	%g5, TTE_SZ_SHFT_INT, %g5
4464	or	%g5, %g4, %g5			/* upper part */
4465	sllx	%g5, 32, %g5
4466	mov	(TTE_CP_INT|TTE_CV_INT|TTE_PRIV_INT|TTE_HWWR_INT), %g4
4467	or	%g5, %g4, %g5
4468#endif
4469	sllx	%g2, MMU_PAGESHIFT, %g4
4470	or	%g5, %g4, %g5			/* tte */
4471	ldx	[%g6 + KPMTSBM_TSBPTR], %g4
4472	GET_MMU_D_TTARGET(%g2, %g7)		/* %g2 = ttarget */
4473
4474	/*
4475	 * tsb dropin
4476	 * g1=kp_pa g2=ttarget g3=hlck_pa g4=kpmtsbp4m g5=tte g6=kpmtsbm_area
4477	 */
4478
4479	/* KPMLOCK_ENTER(kpmlckp, tmp1, label1, asi) */
4480	KPMLOCK_ENTER(%g3, %g7, kpmtsbmhdlr1, ASI_MEM)
4481
4482	/* use C-handler if there's no go for dropin */
4483	ldsha	[%g1 + KPMPAGE_REFCNTC]%asi, %g7 /* kp_refcntc */
4484	cmp	%g7, -1
4485	bne,pn	%xcc, 5f	/* use C-handler if there's no go for dropin */
4486	  nop
4487
4488#ifdef	DEBUG
4489	/* double check refcnt */
4490	ldsha	[%g1 + KPMPAGE_REFCNT]%asi, %g7
4491	brz,pn	%g7, 5f			/* let C-handler deal with this */
4492	  nop
4493#endif
4494
4495#ifndef sun4v
4496	ldub	[%g6 + KPMTSBM_FLAGS], %g7
4497	mov	ASI_N, %g1
4498	andcc	%g7, KPMTSBM_TSBPHYS_FLAG, %g0
4499	movnz	%icc, ASI_MEM, %g1
4500	mov	%g1, %asi
4501#endif
4502
4503	/* TSB_LOCK_ENTRY(tsbp, tmp1, tmp2, label) (needs %asi set) */
4504	TSB_LOCK_ENTRY(%g4, %g1, %g7, 6)
4505
4506	/* TSB_INSERT_UNLOCK_ENTRY(tsbp, tte, tagtarget, tmp) */
4507	TSB_INSERT_UNLOCK_ENTRY(%g4, %g5, %g2, %g7)
4508
4509	DTLB_STUFF(%g5, %g1, %g2, %g4, %g6)
4510
4511	/* KPMLOCK_EXIT(kpmlckp, asi) */
4512	KPMLOCK_EXIT(%g3, ASI_MEM)
4513
4514	/*
4515	 * If trapstat is running, we need to shift the %tpc and %tnpc to
4516	 * point to trapstat's TSB miss return code (note that trapstat
4517	 * itself will patch the correct offset to add).
4518	 * Note: TTE is expected in %g5 (allows per pagesize reporting).
4519	 */
4520	rdpr	%tl, %g7
4521	cmp	%g7, 1
4522	ble	%icc, 0f
4523	sethi	%hi(KERNELBASE), %g6
4524	rdpr	%tpc, %g7
4525	or	%g6, %lo(KERNELBASE), %g6
4526	cmp	%g7, %g6
4527	bgeu	%xcc, 0f
4528	ALTENTRY(tsbmiss_trapstat_patch_point_kpm)
4529	add	%g7, RUNTIME_PATCH, %g7	/* must match TSTAT_TSBMISS_INSTR */
4530	wrpr	%g7, %tpc
4531	add	%g7, 4, %g7
4532	wrpr	%g7, %tnpc
45330:
4534	retry
45355:
4536	/* g3=hlck_pa */
4537	KPMLOCK_EXIT(%g3, ASI_MEM)
4538	ba,pt	%icc, sfmmu_kpm_exception
4539	  nop
4540	SET_SIZE(sfmmu_kpm_dtsb_miss)
4541
4542	/*
4543	 * kpm tsbmiss handler for smallpages
4544	 * g1 = 8K kpm TSB pointer
4545	 * g2 = tag access register
4546	 * g3 = 4M kpm TSB pointer
4547	 */
4548	ALTENTRY(sfmmu_kpm_dtsb_miss_small)
4549	TT_TRACE(trace_tsbmiss)
4550	CPU_INDEX(%g7, %g6)
4551	sethi	%hi(kpmtsbm_area), %g6
4552	sllx	%g7, KPMTSBM_SHIFT, %g7
4553	or	%g6, %lo(kpmtsbm_area), %g6
4554	add	%g6, %g7, %g6			/* g6 = kpmtsbm ptr */
4555
4556	/* check enable flag */
4557	ldub	[%g6 + KPMTSBM_FLAGS], %g4
4558	and	%g4, KPMTSBM_ENABLE_FLAG, %g5
4559	brz,pn	%g5, sfmmu_tsb_miss		/* if kpm not enabled */
4560	  nop
4561
4562	/*
4563	 * VA range check
4564	 * On fail: goto sfmmu_tsb_miss
4565	 */
4566	ldx	[%g6 + KPMTSBM_VBASE], %g7
4567	cmp	%g2, %g7
4568	blu,pn	%xcc, sfmmu_tsb_miss
4569	  ldx	[%g6 + KPMTSBM_VEND], %g5
4570	cmp	%g2, %g5
4571	bgeu,pn	%xcc, sfmmu_tsb_miss
4572	  stx	%g1, [%g6 + KPMTSBM_TSBPTR]	/* save 8K kpm TSB pointer */
4573
4574	/*
4575	 * check TL tsbmiss handling flag
4576	 * bump tsbmiss counter
4577	 */
4578	lduw	[%g6 + KPMTSBM_TSBMISS], %g5
4579#ifdef	DEBUG
4580	and	%g4, KPMTSBM_TLTSBM_FLAG, %g1
4581	inc	%g5
4582	brz,pn	%g1, sfmmu_kpm_exception
4583	  st	%g5, [%g6 + KPMTSBM_TSBMISS]
4584#else
4585	inc	%g5
4586	st	%g5, [%g6 + KPMTSBM_TSBMISS]
4587#endif
4588	/*
4589	 * At this point:
4590	 *  g1 = clobbered
4591	 *  g2 = tag access register
4592	 *  g3 = 4M kpm TSB pointer (not used)
4593	 *  g6 = per-CPU kpm tsbmiss area
4594	 *  g7 = kpm_vbase
4595	 */
4596
4597	/* vaddr2pfn */
4598	ldub	[%g6 + KPMTSBM_SZSHIFT], %g3
4599	sub	%g2, %g7, %g4			/* paddr = vaddr-kpm_vbase */
4600	srax    %g4, %g3, %g2			/* which alias range (r) */
4601	brnz,pn	%g2, sfmmu_kpm_exception	/* if (r != 0) goto C handler */
4602	  srlx	%g4, MMU_PAGESHIFT, %g2		/* %g2 = pfn */
4603
4604	/*
4605	 * Setup %asi
4606	 * mseg_pa = page_numtomemseg_nolock_pa(pfn)
4607	 * if (mseg not found) sfmmu_kpm_exception
4608	 * g2=pfn
4609	 */
4610	mov	ASI_MEM, %asi
4611	PAGE_NUM2MEMSEG_NOLOCK_PA(%g2, %g3, %g6, %g4, %g5, %g7, kpmtsbmsp2m)
4612	cmp	%g3, MSEG_NULLPTR_PA
4613	be,pn	%xcc, sfmmu_kpm_exception	/* if mseg not found */
4614	  nop
4615
4616	/*
4617	 * inx = pfn - mseg_pa->kpm_pbase
4618	 * g2=pfn g3=mseg_pa
4619	 */
4620	ldxa	[%g3 + MEMSEG_KPM_PBASE]%asi, %g7
4621	sub	%g2, %g7, %g4
4622
4623#ifdef	DEBUG
4624	/*
4625	 * Validate inx value
4626	 * g2=pfn g3=mseg_pa g4=inx
4627	 */
4628	ldxa	[%g3 + MEMSEG_KPM_NKPMPGS]%asi, %g5
4629	cmp	%g4, %g5			/* inx - nkpmpgs */
4630	bgeu,pn	%xcc, sfmmu_kpm_exception	/* if out of range */
4631	  ld	[%g6 + KPMTSBM_KPMPTABLESZ], %g7
4632#else
4633	ld	[%g6 + KPMTSBM_KPMPTABLESZ], %g7
4634#endif
4635	/* ksp = &mseg_pa->kpm_spages[inx] */
4636	ldxa	[%g3 + MEMSEG_KPM_SPAGES]%asi, %g5
4637	add	%g5, %g4, %g5			/* ksp */
4638
4639	/*
4640	 * KPMP_SHASH(kp)
4641	 * g2=pfn g3=mseg_pa g4=inx g5=ksp g7=kpmp_stable_sz
4642	 */
4643	ldub	[%g6 + KPMTSBM_KPMPSHIFT], %g1	/* kpmp_shift */
4644	sub	%g7, 1, %g7			/* mask */
4645	sllx	%g5, %g1, %g1			/* x = ksp << kpmp_shift */
4646	add	%g5, %g1, %g5			/* y = ksp + x */
4647	and 	%g5, %g7, %g5			/* hashinx = y & mask */
4648
4649	/*
4650	 * Calculate physical kpm_spage pointer
4651	 * g2=pfn g3=mseg_pa g4=offset g5=hashinx
4652	 */
4653	ldxa	[%g3 + MEMSEG_KPM_PAGESPA]%asi, %g1 /* kpm_spagespa */
4654	add	%g1, %g4, %g1			/* ksp_pa */
4655
4656	/*
4657	 * Calculate physical hash lock address.
4658	 * Note: Changes in kpm_shlk_t must be reflected here.
4659	 * g1=ksp_pa g2=pfn g5=hashinx
4660	 */
4661	ldx	[%g6 + KPMTSBM_KPMPTABLEPA], %g4 /* kpmp_stablepa */
4662	sllx	%g5, KPMSHLK_SHIFT, %g5
4663	add	%g4, %g5, %g3			/* hlck_pa */
4664
4665	/*
4666	 * Assemble tte
4667	 * g1=ksp_pa g2=pfn g3=hlck_pa
4668	 */
4669	sethi	%hi(TTE_VALID_INT), %g5		/* upper part */
4670	sllx	%g5, 32, %g5
4671	mov	(TTE_CP_INT|TTE_CV_INT|TTE_PRIV_INT|TTE_HWWR_INT), %g4
4672	or	%g5, %g4, %g5
4673	sllx	%g2, MMU_PAGESHIFT, %g4
4674	or	%g5, %g4, %g5			/* tte */
4675	ldx	[%g6 + KPMTSBM_TSBPTR], %g4
4676	GET_MMU_D_TTARGET(%g2, %g7)		/* %g2 = ttarget */
4677
4678	/*
4679	 * tsb dropin
4680	 * g1=ksp_pa g2=ttarget g3=hlck_pa g4=ktsbp g5=tte
4681	 */
4682
4683	/* KPMLOCK_ENTER(kpmlckp, tmp1, label1, asi) */
4684	KPMLOCK_ENTER(%g3, %g7, kpmtsbsmlock, ASI_MEM)
4685
4686	/* use C-handler if there's no go for dropin */
4687	ldsba	[%g1 + KPMSPAGE_MAPPED]%asi, %g7 /* kp_mapped */
4688	cmp	%g7, -1
4689	bne,pn	%xcc, 5f
4690	  nop
4691
4692#ifndef sun4v
4693	ldub	[%g6 + KPMTSBM_FLAGS], %g7
4694	mov	ASI_N, %g1
4695	andcc	%g7, KPMTSBM_TSBPHYS_FLAG, %g0
4696	movnz	%icc, ASI_MEM, %g1
4697	mov	%g1, %asi
4698#endif
4699
4700	/* TSB_LOCK_ENTRY(tsbp, tmp1, tmp2, label) (needs %asi set) */
4701	TSB_LOCK_ENTRY(%g4, %g1, %g7, 6)
4702
4703	/* TSB_INSERT_UNLOCK_ENTRY(tsbp, tte, tagtarget, tmp) */
4704	TSB_INSERT_UNLOCK_ENTRY(%g4, %g5, %g2, %g7)
4705
4706	DTLB_STUFF(%g5, %g2, %g4, %g5, %g6)
4707
4708	/* KPMLOCK_EXIT(kpmlckp, asi) */
4709	KPMLOCK_EXIT(%g3, ASI_MEM)
4710
4711	/*
4712	 * If trapstat is running, we need to shift the %tpc and %tnpc to
4713	 * point to trapstat's TSB miss return code (note that trapstat
4714	 * itself will patch the correct offset to add).
4715	 * Note: TTE is expected in %g5 (allows per pagesize reporting).
4716	 */
4717	rdpr	%tl, %g7
4718	cmp	%g7, 1
4719	ble	%icc, 0f
4720	sethi	%hi(KERNELBASE), %g6
4721	rdpr	%tpc, %g7
4722	or	%g6, %lo(KERNELBASE), %g6
4723	cmp	%g7, %g6
4724	bgeu	%xcc, 0f
4725	ALTENTRY(tsbmiss_trapstat_patch_point_kpm_small)
4726	add	%g7, RUNTIME_PATCH, %g7	/* must match TSTAT_TSBMISS_INSTR */
4727	wrpr	%g7, %tpc
4728	add	%g7, 4, %g7
4729	wrpr	%g7, %tnpc
47300:
4731	retry
47325:
4733	/* g3=hlck_pa */
4734	KPMLOCK_EXIT(%g3, ASI_MEM)
4735	ba,pt	%icc, sfmmu_kpm_exception
4736	  nop
4737	SET_SIZE(sfmmu_kpm_dtsb_miss_small)
4738
4739#if (1<< KPMTSBM_SHIFT) != KPMTSBM_SIZE
4740#error - KPMTSBM_SHIFT does not correspond to size of kpmtsbm struct
4741#endif
4742
4743#endif /* lint */
4744
4745#ifdef	lint
4746/*
4747 * Enable/disable tsbmiss handling at trap level for a kpm (large) page.
4748 * Called from C-level, sets/clears "go" indication for trap level handler.
4749 * khl_lock is a low level spin lock to protect the kp_tsbmtl field.
4750 * Assumed that &kp->kp_refcntc is checked for zero or -1 at C-level.
4751 * Assumes khl_mutex is held when called from C-level.
4752 */
4753/* ARGSUSED */
4754void
4755sfmmu_kpm_tsbmtl(short *kp_refcntc, uint_t *khl_lock, int cmd)
4756{
4757}
4758
4759/*
4760 * kpm_smallpages: stores val to byte at address mapped within
4761 * low level lock brackets. The old value is returned.
4762 * Called from C-level.
4763 */
4764/* ARGSUSED */
4765int
4766sfmmu_kpm_stsbmtl(char *mapped, uint_t *kshl_lock, int val)
4767{
4768	return (0);
4769}
4770
4771#else /* lint */
4772
4773	.seg	".data"
4774sfmmu_kpm_tsbmtl_panic:
4775	.ascii	"sfmmu_kpm_tsbmtl: interrupts disabled"
4776	.byte	0
4777sfmmu_kpm_stsbmtl_panic:
4778	.ascii	"sfmmu_kpm_stsbmtl: interrupts disabled"
4779	.byte	0
4780	.align	4
4781	.seg	".text"
4782
4783	ENTRY_NP(sfmmu_kpm_tsbmtl)
4784	rdpr	%pstate, %o3
4785	/*
4786	 * %o0 = &kp_refcntc
4787	 * %o1 = &khl_lock
4788	 * %o2 = 0/1 (off/on)
4789	 * %o3 = pstate save
4790	 */
4791#ifdef DEBUG
4792	andcc	%o3, PSTATE_IE, %g0		/* if interrupts already */
4793	bnz,pt %icc, 1f				/* disabled, panic	 */
4794	  nop
4795	save	%sp, -SA(MINFRAME), %sp
4796	sethi	%hi(sfmmu_kpm_tsbmtl_panic), %o0
4797	call	panic
4798	 or	%o0, %lo(sfmmu_kpm_tsbmtl_panic), %o0
4799	ret
4800	restore
48011:
4802#endif /* DEBUG */
4803	wrpr	%o3, PSTATE_IE, %pstate		/* disable interrupts */
4804
4805	KPMLOCK_ENTER(%o1, %o4, kpmtsbmtl1, ASI_N)
4806	mov	-1, %o5
4807	brz,a	%o2, 2f
4808	  mov	0, %o5
48092:
4810	sth	%o5, [%o0]
4811	KPMLOCK_EXIT(%o1, ASI_N)
4812
4813	retl
4814	  wrpr	%g0, %o3, %pstate		/* enable interrupts */
4815	SET_SIZE(sfmmu_kpm_tsbmtl)
4816
4817	ENTRY_NP(sfmmu_kpm_stsbmtl)
4818	rdpr	%pstate, %o3
4819	/*
4820	 * %o0 = &mapped
4821	 * %o1 = &kshl_lock
4822	 * %o2 = val
4823	 * %o3 = pstate save
4824	 */
4825#ifdef DEBUG
4826	andcc	%o3, PSTATE_IE, %g0		/* if interrupts already */
4827	bnz,pt %icc, 1f				/* disabled, panic	 */
4828	  nop
4829	save	%sp, -SA(MINFRAME), %sp
4830	sethi	%hi(sfmmu_kpm_stsbmtl_panic), %o0
4831	call	panic
4832	  or	%o0, %lo(sfmmu_kpm_stsbmtl_panic), %o0
4833	ret
4834	restore
48351:
4836#endif /* DEBUG */
4837	wrpr	%o3, PSTATE_IE, %pstate		/* disable interrupts */
4838
4839	KPMLOCK_ENTER(%o1, %o4, kpmstsbmtl1, ASI_N)
4840	ldsb	[%o0], %o5
4841	stb	%o2, [%o0]
4842	KPMLOCK_EXIT(%o1, ASI_N)
4843
4844	mov	%o5, %o0			/* return old val */
4845	retl
4846	  wrpr	%g0, %o3, %pstate		/* enable interrupts */
4847	SET_SIZE(sfmmu_kpm_stsbmtl)
4848
4849#endif /* lint */
4850
4851#ifndef lint
4852#ifdef sun4v
4853	/*
4854	 * User/kernel data miss w// multiple TSBs
4855	 * The first probe covers 8K, 64K, and 512K page sizes,
4856	 * because 64K and 512K mappings are replicated off 8K
4857	 * pointer.  Second probe covers 4M page size only.
4858	 *
4859	 * MMU fault area contains miss address and context.
4860	 */
4861	ALTENTRY(sfmmu_slow_dmmu_miss)
4862	GET_MMU_D_PTAGACC_CTXTYPE(%g2, %g3)	! %g2 = ptagacc, %g3 = ctx type
4863
4864slow_miss_common:
4865	/*
4866	 *  %g2 = tagacc register (needed for sfmmu_tsb_miss_tt)
4867	 *  %g3 = ctx (cannot be INVALID_CONTEXT)
4868	 */
4869	brnz,pt	%g3, 8f			! check for user context
4870	  nop
4871
4872	/*
4873	 * Kernel miss
4874	 * Get 8K and 4M TSB pointers in %g1 and %g3 and
4875	 * branch to sfmmu_tsb_miss_tt to handle it.
4876	 */
4877	mov	%g2, %g7		! TSB pointer macro clobbers tagacc
4878sfmmu_dslow_patch_ktsb_base:
4879	RUNTIME_PATCH_SETX(%g1, %g6)	! %g1 = contents of ktsb_pbase
4880sfmmu_dslow_patch_ktsb_szcode:
4881	or	%g0, RUNTIME_PATCH, %g3	! ktsb_szcode (hot patched)
4882
4883	GET_TSBE_POINTER(MMU_PAGESHIFT, %g1, %g7, %g3, %g5)
4884	! %g1 = First TSB entry pointer, as TSB miss handler expects
4885
4886	mov	%g2, %g7		! TSB pointer macro clobbers tagacc
4887sfmmu_dslow_patch_ktsb4m_base:
4888	RUNTIME_PATCH_SETX(%g3, %g6)	! %g3 = contents of ktsb4m_pbase
4889sfmmu_dslow_patch_ktsb4m_szcode:
4890	or	%g0, RUNTIME_PATCH, %g6	! ktsb4m_szcode (hot patched)
4891
4892	GET_TSBE_POINTER(MMU_PAGESHIFT4M, %g3, %g7, %g6, %g5)
4893	! %g3 = 4M tsb entry pointer, as TSB miss handler expects
4894	ba,a,pt	%xcc, sfmmu_tsb_miss_tt
4895	.empty
4896
48978:
4898	/*
4899	 * User miss
4900	 * Get first TSB pointer in %g1
4901	 * Get second TSB pointer (or NULL if no second TSB) in %g3
4902	 * Branch to sfmmu_tsb_miss_tt to handle it
4903	 */
4904	GET_1ST_TSBE_PTR(%g2, %g1, %g4, %g5)
4905	/* %g1 = first TSB entry ptr now, %g2 preserved */
4906
4907	GET_UTSBREG(SCRATCHPAD_UTSBREG2, %g3)	/* get 2nd utsbreg */
4908	brlz,pt %g3, sfmmu_tsb_miss_tt		/* done if no 2nd TSB */
4909	  nop
4910
4911	GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5)
4912	/* %g3 = second TSB entry ptr now, %g2 preserved */
49139:
4914	ba,a,pt	%xcc, sfmmu_tsb_miss_tt
4915	.empty
4916	SET_SIZE(sfmmu_slow_dmmu_miss)
4917
4918
4919	/*
4920	 * User/kernel instruction miss w/ multiple TSBs
4921	 * The first probe covers 8K, 64K, and 512K page sizes,
4922	 * because 64K and 512K mappings are replicated off 8K
4923	 * pointer.  Second probe covers 4M page size only.
4924	 *
4925	 * MMU fault area contains miss address and context.
4926	 */
4927	ALTENTRY(sfmmu_slow_immu_miss)
4928	GET_MMU_I_PTAGACC_CTXTYPE(%g2, %g3)
4929	ba,a,pt	%xcc, slow_miss_common
4930	SET_SIZE(sfmmu_slow_immu_miss)
4931
4932#endif /* sun4v */
4933#endif	/* lint */
4934
4935#ifndef lint
4936
4937/*
4938 * Per-CPU tsbmiss areas to avoid cache misses in TSB miss handlers.
4939 */
4940	.seg	".data"
4941	.align	64
4942	.global tsbmiss_area
4943tsbmiss_area:
4944	.skip	(TSBMISS_SIZE * NCPU)
4945
4946	.align	64
4947	.global kpmtsbm_area
4948kpmtsbm_area:
4949	.skip	(KPMTSBM_SIZE * NCPU)
4950#endif	/* lint */
4951