xref: /titanic_41/usr/src/uts/sfmmu/ml/sfmmu_asm.s (revision 223f6c28404c8d330c123e98936160b8da9a26b2)
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
23 * Use is subject to license terms.
24 */
25
26#pragma ident	"%Z%%M%	%I%	%E% SMI"
27
28/*
29 * SFMMU primitives.  These primitives should only be used by sfmmu
30 * routines.
31 */
32
33#if defined(lint)
34#include <sys/types.h>
35#else	/* lint */
36#include "assym.h"
37#endif	/* lint */
38
39#include <sys/asm_linkage.h>
40#include <sys/machtrap.h>
41#include <sys/machasi.h>
42#include <sys/sun4asi.h>
43#include <sys/pte.h>
44#include <sys/mmu.h>
45#include <vm/hat_sfmmu.h>
46#include <vm/seg_spt.h>
47#include <sys/machparam.h>
48#include <sys/privregs.h>
49#include <sys/scb.h>
50#include <sys/intreg.h>
51#include <sys/machthread.h>
52#include <sys/intr.h>
53#include <sys/clock.h>
54#include <sys/trapstat.h>
55
56#ifdef TRAPTRACE
57#include <sys/traptrace.h>
58
59/*
60 * Tracing macro. Adds two instructions if TRAPTRACE is defined.
61 */
62#define	TT_TRACE(label)		\
63	ba	label		;\
64	rd	%pc, %g7
65#else
66
67#define	TT_TRACE(label)
68
69#endif /* TRAPTRACE */
70
71#ifndef	lint
72
73#if (TTE_SUSPEND_SHIFT > 0)
74#define	TTE_SUSPEND_INT_SHIFT(reg)				\
75	sllx	reg, TTE_SUSPEND_SHIFT, reg
76#else
77#define	TTE_SUSPEND_INT_SHIFT(reg)
78#endif
79
80#endif /* lint */
81
82#ifndef	lint
83
84/*
85 * Assumes TSBE_TAG is 0
86 * Assumes TSBE_INTHI is 0
87 * Assumes TSBREG.split is 0
88 */
89
90#if TSBE_TAG != 0
91#error "TSB_UPDATE and TSB_INVALIDATE assume TSBE_TAG = 0"
92#endif
93
94#if TSBTAG_INTHI != 0
95#error "TSB_UPDATE and TSB_INVALIDATE assume TSBTAG_INTHI = 0"
96#endif
97
98/*
99 * The following code assumes the tsb is not split.
100 *
101 * With TSBs no longer shared between processes, it's no longer
102 * necessary to hash the context bits into the tsb index to get
103 * tsb coloring; the new implementation treats the TSB as a
104 * direct-mapped, virtually-addressed cache.
105 *
106 * In:
107 *    vpshift = virtual page shift; e.g. 13 for 8K TTEs (constant or ro)
108 *    tsbbase = base address of TSB (clobbered)
109 *    tagacc = tag access register (clobbered)
110 *    szc = size code of TSB (ro)
111 *    tmp = scratch reg
112 * Out:
113 *    tsbbase = pointer to entry in TSB
114 */
115#define	GET_TSBE_POINTER(vpshift, tsbbase, tagacc, szc, tmp)		\
116	mov	TSB_ENTRIES(0), tmp	/* nentries in TSB size 0 */	;\
117	srlx	tagacc, vpshift, tagacc 				;\
118	sllx	tmp, szc, tmp		/* tmp = nentries in TSB */	;\
119	sub	tmp, 1, tmp		/* mask = nentries - 1 */	;\
120	and	tagacc, tmp, tmp	/* tsbent = virtpage & mask */	;\
121	sllx	tmp, TSB_ENTRY_SHIFT, tmp	/* entry num --> ptr */	;\
122	add	tsbbase, tmp, tsbbase	/* add entry offset to TSB base */
123
124/*
125 * When the kpm TSB is used it is assumed that it is direct mapped
126 * using (vaddr>>vpshift)%tsbsz as the index.
127 *
128 * Note that, for now, the kpm TSB and kernel TSB are the same for
129 * each mapping size.  However that need not always be the case.  If
130 * the trap handlers are updated to search a different TSB for kpm
131 * addresses than for kernel addresses then kpm_tsbbase and kpm_tsbsz
132 * (and/or kpmsm_tsbbase/kpmsm_tsbsz) may be entirely independent.
133 *
134 * In:
135 *    vpshift = virtual page shift; e.g. 13 for 8K TTEs (constant or ro)
136 *    vaddr = virtual address (clobbered)
137 *    tsbp, szc, tmp = scratch
138 * Out:
139 *    tsbp = pointer to entry in TSB
140 */
141#define	GET_KPM_TSBE_POINTER(vpshift, tsbp, vaddr, szc, tmp)		\
142	cmp	vpshift, MMU_PAGESHIFT					;\
143	bne,pn	%icc, 1f		/* branch if large case */	;\
144	  sethi	%hi(kpmsm_tsbsz), szc					;\
145	sethi	%hi(kpmsm_tsbbase), tsbp				;\
146	ld	[szc + %lo(kpmsm_tsbsz)], szc				;\
147	ldx	[tsbp + %lo(kpmsm_tsbbase)], tsbp			;\
148	ba,pt	%icc, 2f						;\
149	  nop								;\
1501:	sethi	%hi(kpm_tsbsz), szc					;\
151	sethi	%hi(kpm_tsbbase), tsbp					;\
152	ld	[szc + %lo(kpm_tsbsz)], szc				;\
153	ldx	[tsbp + %lo(kpm_tsbbase)], tsbp				;\
1542:	GET_TSBE_POINTER(vpshift, tsbp, vaddr, szc, tmp)
155
156/*
157 * Lock the TSBE at virtual address tsbep.
158 *
159 * tsbep = TSBE va (ro)
160 * tmp1, tmp2 = scratch registers (clobbered)
161 * label = label to use for branches (text)
162 * %asi = ASI to use for TSB access
163 *
164 * NOTE that we flush the TSB using fast VIS instructions that
165 * set all 1's in the TSB tag, so TSBTAG_LOCKED|TSBTAG_INVALID must
166 * not be treated as a locked entry or we'll get stuck spinning on
167 * an entry that isn't locked but really invalid.
168 */
169
170#if defined(UTSB_PHYS)
171
172#define	TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label)			\
173	lda	[tsbep]ASI_MEM, tmp1					;\
174label:									;\
175	sethi	%hi(TSBTAG_LOCKED), tmp2				;\
176	cmp	tmp1, tmp2 						;\
177	be,a,pn	%icc, label/**/b	/* if locked spin */		;\
178	  lda	[tsbep]ASI_MEM, tmp1					;\
179	casa	[tsbep]ASI_MEM, tmp1, tmp2				;\
180	cmp	tmp1, tmp2 						;\
181	bne,a,pn %icc, label/**/b	/* didn't lock so try again */	;\
182	  lda	[tsbep]ASI_MEM, tmp1					;\
183	/* tsbe lock acquired */					;\
184	membar #StoreStore
185
186#else /* UTSB_PHYS */
187
188#define	TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label)			\
189	lda	[tsbep]%asi, tmp1					;\
190label:									;\
191	sethi	%hi(TSBTAG_LOCKED), tmp2				;\
192	cmp	tmp1, tmp2 						;\
193	be,a,pn	%icc, label/**/b	/* if locked spin */		;\
194	  lda	[tsbep]%asi, tmp1					;\
195	casa	[tsbep]%asi, tmp1, tmp2					;\
196	cmp	tmp1, tmp2 						;\
197	bne,a,pn %icc, label/**/b	/* didn't lock so try again */	;\
198	  lda	[tsbep]%asi, tmp1					;\
199	/* tsbe lock acquired */					;\
200	membar #StoreStore
201
202#endif /* UTSB_PHYS */
203
204/*
205 * Atomically write TSBE at virtual address tsbep.
206 *
207 * tsbep = TSBE va (ro)
208 * tte = TSBE TTE (ro)
209 * tagtarget = TSBE tag (ro)
210 * %asi = ASI to use for TSB access
211 */
212
213#if defined(UTSB_PHYS)
214
215#define	TSB_INSERT_UNLOCK_ENTRY(tsbep, tte, tagtarget, tmp1)		\
216	add	tsbep, TSBE_TTE, tmp1					;\
217	stxa	tte, [tmp1]ASI_MEM		/* write tte data */	;\
218	membar #StoreStore						;\
219	add	tsbep, TSBE_TAG, tmp1					;\
220	stxa	tagtarget, [tmp1]ASI_MEM	/* write tte tag & unlock */
221
222#else /* UTSB_PHYS */
223
224#define	TSB_INSERT_UNLOCK_ENTRY(tsbep, tte, tagtarget,tmp1)		\
225	stxa	tte, [tsbep + TSBE_TTE]%asi	/* write tte data */	;\
226	membar #StoreStore						;\
227	stxa	tagtarget, [tsbep + TSBE_TAG]%asi /* write tte tag & unlock */
228
229#endif /* UTSB_PHYS */
230
231/*
232 * Load an entry into the TSB at TL > 0.
233 *
234 * tsbep = pointer to the TSBE to load as va (ro)
235 * tte = value of the TTE retrieved and loaded (wo)
236 * tagtarget = tag target register.  To get TSBE tag to load,
237 *   we need to mask off the context and leave only the va (clobbered)
238 * ttepa = pointer to the TTE to retrieve/load as pa (ro)
239 * tmp1, tmp2 = scratch registers
240 * label = label to use for branches (text)
241 * %asi = ASI to use for TSB access
242 */
243
244#if defined(UTSB_PHYS)
245
246#define	TSB_UPDATE_TL(tsbep, tte, tagtarget, ttepa, tmp1, tmp2, label) \
247	TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label)			;\
248	/*								;\
249	 * I don't need to update the TSB then check for the valid tte.	;\
250	 * TSB invalidate will spin till the entry is unlocked.	Note,	;\
251	 * we always invalidate the hash table before we unload the TSB.;\
252	 */								;\
253	sllx	tagtarget, TTARGET_VA_SHIFT, tagtarget			;\
254	ldxa	[ttepa]ASI_MEM, tte					;\
255	srlx	tagtarget, TTARGET_VA_SHIFT, tagtarget			;\
256	sethi	%hi(TSBTAG_INVALID), tmp2				;\
257	add	tsbep, TSBE_TAG, tmp1					;\
258	brgez,a,pn tte, label/**/f					;\
259	 sta	tmp2, [tmp1]ASI_MEM			/* unlock */	;\
260	TSB_INSERT_UNLOCK_ENTRY(tsbep, tte, tagtarget, tmp1)		;\
261label:
262
263#else /* UTSB_PHYS */
264
265#define	TSB_UPDATE_TL(tsbep, tte, tagtarget, ttepa, tmp1, tmp2, label) \
266	TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label)			;\
267	/*								;\
268	 * I don't need to update the TSB then check for the valid tte.	;\
269	 * TSB invalidate will spin till the entry is unlocked.	Note,	;\
270	 * we always invalidate the hash table before we unload the TSB.;\
271	 */								;\
272	sllx	tagtarget, TTARGET_VA_SHIFT, tagtarget			;\
273	ldxa	[ttepa]ASI_MEM, tte					;\
274	srlx	tagtarget, TTARGET_VA_SHIFT, tagtarget			;\
275	sethi	%hi(TSBTAG_INVALID), tmp2				;\
276	brgez,a,pn tte, label/**/f					;\
277	 sta	tmp2, [tsbep + TSBE_TAG]%asi		/* unlock */	;\
278	TSB_INSERT_UNLOCK_ENTRY(tsbep, tte, tagtarget, tmp1)		;\
279label:
280
281#endif /* UTSB_PHYS */
282
283/*
284 * Load a 32M/256M Panther TSB entry into the TSB at TL > 0,
285 *   for ITLB synthesis.
286 *
287 * tsbep = pointer to the TSBE to load as va (ro)
288 * tte = 4M pfn offset (in), value of the TTE retrieved and loaded (out)
289 *   with exec_perm turned off and exec_synth turned on
290 * tagtarget = tag target register.  To get TSBE tag to load,
291 *   we need to mask off the context and leave only the va (clobbered)
292 * ttepa = pointer to the TTE to retrieve/load as pa (ro)
293 * tmp1, tmp2 = scratch registers
294 * label = label to use for branch (text)
295 * %asi = ASI to use for TSB access
296 */
297
298#define	TSB_UPDATE_TL_PN(tsbep, tte, tagtarget, ttepa, tmp1, tmp2, label) \
299	TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label)			;\
300	/*								;\
301	 * I don't need to update the TSB then check for the valid tte.	;\
302	 * TSB invalidate will spin till the entry is unlocked.	Note,	;\
303	 * we always invalidate the hash table before we unload the TSB.;\
304	 * Or in 4M pfn offset to TTE and set the exec_perm bit to 0	;\
305	 * and exec_synth bit to 1.					;\
306	 */								;\
307	sllx	tagtarget, TTARGET_VA_SHIFT, tagtarget			;\
308	mov	tte, tmp1						;\
309	ldxa	[ttepa]ASI_MEM, tte					;\
310	srlx	tagtarget, TTARGET_VA_SHIFT, tagtarget			;\
311	sethi	%hi(TSBTAG_INVALID), tmp2				;\
312	brgez,a,pn tte, label/**/f					;\
313	 sta	tmp2, [tsbep + TSBE_TAG]%asi		/* unlock */	;\
314	or	tte, tmp1, tte						;\
315	andn	tte, TTE_EXECPRM_INT, tte				;\
316	or	tte, TTE_E_SYNTH_INT, tte				;\
317	TSB_INSERT_UNLOCK_ENTRY(tsbep, tte, tagtarget, tmp1)		;\
318label:
319
320/*
321 * Build a 4M pfn offset for a Panther 32M/256M page, for ITLB synthesis.
322 *
323 * tte = value of the TTE, used to get tte_size bits (ro)
324 * tagaccess = tag access register, used to get 4M pfn bits (ro)
325 * pfn = 4M pfn bits shifted to offset for tte (out)
326 * tmp1 = scratch register
327 * label = label to use for branch (text)
328 */
329
330#define	GET_4M_PFN_OFF(tte, tagaccess, pfn, tmp, label)			\
331	/*								;\
332	 * Get 4M bits from tagaccess for 32M, 256M pagesizes.		;\
333	 * Return them, shifted, in pfn.				;\
334	 */								;\
335	srlx	tagaccess, MMU_PAGESHIFT4M, tagaccess			;\
336	srlx	tte, TTE_SZ_SHFT, tmp		/* isolate the */	;\
337	andcc	tmp, TTE_SZ_BITS, %g0		/* tte_size bits */	;\
338	bz,a,pt %icc, label/**/f		/* if 0, is */		;\
339	  and	tagaccess, 0x7, tagaccess	/* 32M page size */	;\
340	and	tagaccess, 0x3f, tagaccess /* else 256M page size */	;\
341label:									;\
342	sllx	tagaccess, MMU_PAGESHIFT4M, pfn
343
344/*
345 * Add 4M TTE size code to a tte for a Panther 32M/256M page,
346 * for ITLB synthesis.
347 *
348 * tte = value of the TTE, used to get tte_size bits (rw)
349 * tmp1 = scratch register
350 */
351
352#define	SET_TTE4M_PN(tte, tmp)						\
353	/*								;\
354	 * Set 4M pagesize tte bits. 					;\
355	 */								;\
356	set	TTE4M, tmp						;\
357	sllx	tmp, TTE_SZ_SHFT, tmp					;\
358	or	tte, tmp, tte
359
360/*
361 * Load an entry into the TSB at TL=0.
362 *
363 * tsbep = pointer to the TSBE to load as va (ro)
364 * tteva = pointer to the TTE to load as va (ro)
365 * tagtarget = TSBE tag to load (which contains no context), synthesized
366 * to match va of MMU tag target register only (ro)
367 * tmp1, tmp2 = scratch registers (clobbered)
368 * label = label to use for branches (text)
369 * %asi = ASI to use for TSB access
370 */
371
372#if defined(UTSB_PHYS)
373
374#define	TSB_UPDATE(tsbep, tteva, tagtarget, tmp1, tmp2, label)		\
375	/* can't rd tteva after locking tsb because it can tlb miss */	;\
376	ldx	[tteva], tteva			/* load tte */		;\
377	TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label)			;\
378	sethi	%hi(TSBTAG_INVALID), tmp2				;\
379	add	tsbep, TSBE_TAG, tmp1					;\
380	brgez,a,pn tteva, label/**/f					;\
381	 sta	tmp2, [tmp1]ASI_MEM			/* unlock */	;\
382	TSB_INSERT_UNLOCK_ENTRY(tsbep, tteva, tagtarget, tmp1)		;\
383label:
384
385#else /* UTSB_PHYS */
386
387#define	TSB_UPDATE(tsbep, tteva, tagtarget, tmp1, tmp2, label)		\
388	/* can't rd tteva after locking tsb because it can tlb miss */	;\
389	ldx	[tteva], tteva			/* load tte */		;\
390	TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label)			;\
391	sethi	%hi(TSBTAG_INVALID), tmp2				;\
392	brgez,a,pn tteva, label/**/f					;\
393	 sta	tmp2, [tsbep + TSBE_TAG]%asi		/* unlock */	;\
394	TSB_INSERT_UNLOCK_ENTRY(tsbep, tteva, tagtarget, tmp1)		;\
395label:
396
397#endif /* UTSB_PHYS */
398
399/*
400 * Invalidate a TSB entry in the TSB.
401 *
402 * NOTE: TSBE_TAG is assumed to be zero.  There is a compile time check
403 *	 about this earlier to ensure this is true.  Thus when we are
404 *	 directly referencing tsbep below, we are referencing the tte_tag
405 *	 field of the TSBE.  If this  offset ever changes, the code below
406 *	 will need to be modified.
407 *
408 * tsbep = pointer to TSBE as va (ro)
409 * tag = invalidation is done if this matches the TSBE tag (ro)
410 * tmp1 - tmp3 = scratch registers (clobbered)
411 * label = label name to use for branches (text)
412 * %asi = ASI to use for TSB access
413 */
414
415#if defined(UTSB_PHYS)
416
417#define	TSB_INVALIDATE(tsbep, tag, tmp1, tmp2, tmp3, label)		\
418	lda	[tsbep]ASI_MEM, tmp1	/* tmp1 = tsbe tag */		;\
419	sethi	%hi(TSBTAG_LOCKED), tmp2				;\
420label/**/1:								;\
421	cmp	tmp1, tmp2		/* see if tsbe is locked, if */	;\
422	be,a,pn	%icc, label/**/1	/* so, loop until unlocked */	;\
423	  lda	[tsbep]ASI_MEM, tmp1	/* reloading value each time */	;\
424	ldxa	[tsbep]ASI_MEM, tmp3	/* tmp3 = tsbe tag */		;\
425	cmp	tag, tmp3		/* compare tags */		;\
426	bne,pt	%xcc, label/**/2	/* if different, do nothing */	;\
427	  sethi	%hi(TSBTAG_INVALID), tmp3				;\
428	casa	[tsbep]ASI_MEM, tmp1, tmp3 /* try to set tag invalid */	;\
429	cmp	tmp1, tmp3		/* if not successful */		;\
430	bne,a,pn %icc, label/**/1	/* start over from the top */	;\
431	  lda	[tsbep]ASI_MEM, tmp1	/* reloading tsbe tag */	;\
432label/**/2:
433
434#else /* UTSB_PHYS */
435
436#define	TSB_INVALIDATE(tsbep, tag, tmp1, tmp2, tmp3, label)		\
437	lda	[tsbep]%asi, tmp1	/* tmp1 = tsbe tag */		;\
438	sethi	%hi(TSBTAG_LOCKED), tmp2				;\
439label/**/1:								;\
440	cmp	tmp1, tmp2		/* see if tsbe is locked, if */	;\
441	be,a,pn	%icc, label/**/1	/* so, loop until unlocked */	;\
442	  lda	[tsbep]%asi, tmp1	/* reloading value each time */	;\
443	ldxa	[tsbep]%asi, tmp3	/* tmp3 = tsbe tag */		;\
444	cmp	tag, tmp3		/* compare tags */		;\
445	bne,pt	%xcc, label/**/2	/* if different, do nothing */	;\
446	  sethi	%hi(TSBTAG_INVALID), tmp3				;\
447	casa	[tsbep]%asi, tmp1, tmp3	/* try to set tag invalid */	;\
448	cmp	tmp1, tmp3		/* if not successful */		;\
449	bne,a,pn %icc, label/**/1	/* start over from the top */	;\
450	  lda	[tsbep]%asi, tmp1	/* reloading tsbe tag */	;\
451label/**/2:
452
453#endif /* UTSB_PHYS */
454
455#if TSB_SOFTSZ_MASK < TSB_SZ_MASK
456#error	- TSB_SOFTSZ_MASK too small
457#endif
458
459
460/*
461 * An implementation of setx which will be hot patched at run time.
462 * since it is being hot patched, there is no value passed in.
463 * Thus, essentially we are implementing
464 *	setx value, tmp, dest
465 * where value is RUNTIME_PATCH (aka 0) in this case.
466 */
467#define	RUNTIME_PATCH_SETX(dest, tmp)					\
468	sethi	%hh(RUNTIME_PATCH), tmp					;\
469	sethi	%lm(RUNTIME_PATCH), dest				;\
470	or	tmp, %hm(RUNTIME_PATCH), tmp				;\
471	or	dest, %lo(RUNTIME_PATCH), dest				;\
472	sllx	tmp, 32, tmp						;\
473	nop				/* for perf reasons */		;\
474	or	tmp, dest, dest		/* contents of patched value */
475
476
477#endif (lint)
478
479
480#if defined (lint)
481
482/*
483 * sfmmu related subroutines
484 */
485uint_t
486sfmmu_disable_intrs()
487{ return(0); }
488
489/* ARGSUSED */
490void
491sfmmu_enable_intrs(uint_t pstate_save)
492{}
493
494/* ARGSUSED */
495void
496sfmmu_alloc_ctx(sfmmu_t *sfmmup, int allocflag, struct cpu *cp)
497{}
498
499/*
500 * Use cas, if tte has changed underneath us then reread and try again.
501 * In the case of a retry, it will update sttep with the new original.
502 */
503/* ARGSUSED */
504int
505sfmmu_modifytte(tte_t *sttep, tte_t *stmodttep, tte_t *dttep)
506{ return(0); }
507
508/*
509 * Use cas, if tte has changed underneath us then return 1, else return 0
510 */
511/* ARGSUSED */
512int
513sfmmu_modifytte_try(tte_t *sttep, tte_t *stmodttep, tte_t *dttep)
514{ return(0); }
515
516/* ARGSUSED */
517void
518sfmmu_copytte(tte_t *sttep, tte_t *dttep)
519{}
520
521/*ARGSUSED*/
522struct tsbe *
523sfmmu_get_tsbe(uint64_t tsbeptr, caddr_t vaddr, int vpshift, int tsb_szc)
524{ return(0); }
525
526/*ARGSUSED*/
527uint64_t
528sfmmu_make_tsbtag(caddr_t va)
529{ return(0); }
530
531#else	/* lint */
532
533	.seg	".data"
534	.global	sfmmu_panic1
535sfmmu_panic1:
536	.asciz	"sfmmu_asm: interrupts already disabled"
537
538	.global	sfmmu_panic3
539sfmmu_panic3:
540	.asciz	"sfmmu_asm: sfmmu_vatopfn called for user"
541
542	.global	sfmmu_panic4
543sfmmu_panic4:
544	.asciz	"sfmmu_asm: 4M tsb pointer mis-match"
545
546	.global	sfmmu_panic5
547sfmmu_panic5:
548	.asciz	"sfmmu_asm: no unlocked TTEs in TLB 0"
549
550	.global	sfmmu_panic6
551sfmmu_panic6:
552	.asciz	"sfmmu_asm: interrupts not disabled"
553
554	.global	sfmmu_panic7
555sfmmu_panic7:
556	.asciz	"sfmmu_asm: kernel as"
557
558	.global	sfmmu_panic8
559sfmmu_panic8:
560	.asciz	"sfmmu_asm: gnum is zero"
561
562	.global	sfmmu_panic9
563sfmmu_panic9:
564	.asciz	"sfmmu_asm: cnum is greater than MAX_SFMMU_CTX_VAL"
565
566        ENTRY(sfmmu_disable_intrs)
567        rdpr    %pstate, %o0
568#ifdef DEBUG
569	PANIC_IF_INTR_DISABLED_PSTR(%o0, sfmmu_di_l0, %g1)
570#endif /* DEBUG */
571        retl
572          wrpr   %o0, PSTATE_IE, %pstate
573        SET_SIZE(sfmmu_disable_intrs)
574
575	ENTRY(sfmmu_enable_intrs)
576        retl
577          wrpr    %g0, %o0, %pstate
578        SET_SIZE(sfmmu_enable_intrs)
579
580/*
581 * This routine is called both by resume() and sfmmu_get_ctx() to
582 * allocate a new context for the process on a MMU.
583 * if allocflag == 1, then alloc ctx when HAT mmu cnum == INVALID .
584 * if allocflag == 0, then do not alloc ctx if HAT mmu cnum == INVALID, which
585 * is the case when sfmmu_alloc_ctx is called from resume().
586 *
587 * The caller must disable interrupts before entering this routine.
588 * To reduce ctx switch overhead, the code contains both 'fast path' and
589 * 'slow path' code. The fast path code covers the common case where only
590 * a quick check is needed and the real ctx allocation is not required.
591 * It can be done without holding the per-process (PP) lock.
592 * The 'slow path' code must be protected by the PP Lock and performs ctx
593 * allocation.
594 * Hardware context register and HAT mmu cnum are updated accordingly.
595 *
596 * %o0 - sfmmup
597 * %o1 - allocflag
598 * %o2 - CPU
599 */
600        ENTRY_NP(sfmmu_alloc_ctx)
601
602#ifdef DEBUG
603	sethi   %hi(ksfmmup), %o3
604	ldx     [%o3 + %lo(ksfmmup)], %o3
605	cmp     %o3, %o0
606	bne,pt   %xcc, 0f
607	  nop
608
609	sethi   %hi(panicstr), %g1		! if kernel as, panic
610        ldx     [%g1 + %lo(panicstr)], %g1
611        tst     %g1
612        bnz,pn  %icc, 7f
613          nop
614
615	sethi	%hi(sfmmu_panic7), %o0
616	call	panic
617	  or	%o0, %lo(sfmmu_panic7), %o0
618
6197:
620	retl
621	  nop
622
6230:
624	PANIC_IF_INTR_ENABLED_PSTR(sfmmu_ei_l1, %g1)
625#endif /* DEBUG */
626
627	! load global mmu_ctxp info
628	ldx	[%o2 + CPU_MMU_CTXP], %o3		! %o3 = mmu_ctx_t ptr
629        lduw	[%o2 + CPU_MMU_IDX], %g2		! %g2 = mmu index
630
631	! load global mmu_ctxp gnum
632	ldx	[%o3 + MMU_CTX_GNUM], %o4		! %o4 = mmu_ctxp->gnum
633
634#ifdef DEBUG
635	cmp	%o4, %g0		! mmu_ctxp->gnum should never be 0
636	bne,pt	%xcc, 3f
637	  nop
638
639	sethi   %hi(panicstr), %g1	! test if panicstr is already set
640        ldx     [%g1 + %lo(panicstr)], %g1
641        tst     %g1
642        bnz,pn  %icc, 3f
643          nop
644
645	sethi	%hi(sfmmu_panic8), %o0
646	call	panic
647	  or	%o0, %lo(sfmmu_panic8), %o0
6483:
649#endif
650
651	! load HAT sfmmu_ctxs[mmuid] gnum, cnum
652
653	sllx	%g2, SFMMU_MMU_CTX_SHIFT, %g2
654	add	%o0, %g2, %g2		! %g2 = &sfmmu_ctxs[mmuid] - SFMMU_CTXS
655
656	/*
657	 * %g5 = sfmmu gnum returned
658	 * %g6 = sfmmu cnum returned
659	 * %g2 = &sfmmu_ctxs[mmuid] - SFMMU_CTXS
660	 * %g4 = scratch
661	 *
662	 * Fast path code, do a quick check.
663	 */
664	SFMMU_MMUID_GNUM_CNUM(%g2, %g5, %g6, %g4)
665
666	cmp	%g6, INVALID_CONTEXT		! hat cnum == INVALID ??
667	bne,pt	%icc, 1f			! valid hat cnum, check gnum
668	  nop
669
670	! cnum == INVALID, check allocflag
671	brz,pt  %o1, 8f		! allocflag == 0, skip ctx allocation, bail
672	  mov	%g6, %o1
673
674	! (invalid HAT cnum) && (allocflag == 1)
675	ba,pt	%icc, 2f
676	  nop
6771:
678	! valid HAT cnum, check gnum
679	cmp	%g5, %o4
680	be,a,pt	%icc, 8f			! gnum unchanged, go to done
681	  mov	%g6, %o1
682
6832:
684	/*
685	 * Grab per process (PP) sfmmu_ctx_lock spinlock,
686	 * followed by the 'slow path' code.
687	 */
688	ldstub	[%o0 + SFMMU_CTX_LOCK], %g3	! %g3 = per process (PP) lock
6893:
690	brz	%g3, 5f
691	  nop
6924:
693	brnz,a,pt       %g3, 4b				! spin if lock is 1
694	  ldub	[%o0 + SFMMU_CTX_LOCK], %g3
695	ba	%xcc, 3b				! retry the lock
696	  ldstub	[%o0 + SFMMU_CTX_LOCK], %g3    ! %g3 = PP lock
697
6985:
699	membar  #LoadLoad
700	/*
701	 * %g5 = sfmmu gnum returned
702	 * %g6 = sfmmu cnum returned
703	 * %g2 = &sfmmu_ctxs[mmuid] - SFMMU_CTXS
704	 * %g4 = scratch
705	 */
706	SFMMU_MMUID_GNUM_CNUM(%g2, %g5, %g6, %g4)
707
708	cmp	%g6, INVALID_CONTEXT		! hat cnum == INVALID ??
709	bne,pt	%icc, 1f			! valid hat cnum, check gnum
710	  nop
711
712	! cnum == INVALID, check allocflag
713	brz,pt	%o1, 2f		! allocflag == 0, called from resume, set hw
714	  mov	%g6, %o1
715
716	! (invalid HAT cnum) && (allocflag == 1)
717	ba,pt	%icc, 6f
718	  nop
7191:
720	! valid HAT cnum, check gnum
721	cmp	%g5, %o4
722	be,a,pt	%icc, 2f			! gnum unchanged, go to done
723	  mov	%g6, %o1
724
725	ba,pt	%icc, 6f
726	  nop
7272:
728	membar  #LoadStore|#StoreStore
729	ba,pt %icc, 8f
730	  clrb  [%o0 + SFMMU_CTX_LOCK]
7316:
732	/*
733	 * We get here if we do not have a valid context, or
734	 * the HAT gnum does not match global gnum. We hold
735	 * sfmmu_ctx_lock spinlock. Allocate that context.
736	 *
737	 * %o3 = mmu_ctxp
738	 */
739	add	%o3, MMU_CTX_CNUM, %g3
740	ld	[%o3 + MMU_CTX_NCTXS], %g4
741
742	/*
743         * %g2 = &sfmmu_ctx_t[mmuid] - SFMMU_CTXS;
744         * %g3 = mmu cnum address
745	 * %g4 = mmu nctxs
746	 *
747	 * %o0 = sfmmup
748	 * %o1 = mmu current cnum value (used as new cnum)
749	 * %o4 = mmu gnum
750	 *
751	 * %o5 = scratch
752	 */
753	ld	[%g3], %o1
7540:
755	cmp	%o1, %g4
756	bl,a,pt %icc, 1f
757	  add	%o1, 1, %o5		! %o5 = mmu_ctxp->cnum + 1
758
759	/*
760	 * cnum reachs max, update HAT with INVALID
761	 */
762	set	INVALID_CONTEXT, %o1
763
764	/*
765	 * update hat cnum to INVALID, sun4v sfmmu_load_mmustate checks
766	 * hat cnum to determine if set the number of TSBs to 0.
767	 */
768	sllx	%o4, SFMMU_MMU_GNUM_RSHIFT, %o4
769	or	%o4, %o1, %o4
770	stx	%o4, [%g2 + SFMMU_CTXS]
771
772	membar  #LoadStore|#StoreStore
773	ba,pt	%icc, 8f
774	  clrb	[%o0 + SFMMU_CTX_LOCK]
7751:
776	! %g3 = addr of mmu_ctxp->cnum
777	! %o5 = mmu_ctxp->cnum + 1
778	cas	[%g3], %o1, %o5
779	cmp	%o1, %o5
780	bne,a,pn %xcc, 0b	! cas failed
781	  ld	[%g3], %o1
782
783#ifdef DEBUG
784        set	MAX_SFMMU_CTX_VAL, %o5
785	cmp	%o1, %o5
786	ble,pt %icc, 2f
787	  nop
788
789	sethi	%hi(sfmmu_panic9), %o0
790	call	panic
791	  or	%o0, %lo(sfmmu_panic9), %o0
7922:
793#endif
794	! update hat gnum and cnum
795	sllx	%o4, SFMMU_MMU_GNUM_RSHIFT, %o4
796	or	%o4, %o1, %o4
797	stx	%o4, [%g2 + SFMMU_CTXS]
798
799	membar  #LoadStore|#StoreStore
800	clrb	[%o0 + SFMMU_CTX_LOCK]
801
8028:
803	/*
804	 * program the secondary context register
805	 *
806	 * %o1 = cnum
807	 */
808#ifdef	sun4u
809	ldub	[%o0 + SFMMU_CEXT], %o2
810	sll	%o2, CTXREG_EXT_SHIFT, %o2
811	or	%o1, %o2, %o1
812#endif
813
814	mov	MMU_SCONTEXT, %o4
815	sethi	%hi(FLUSH_ADDR), %o5
816	stxa	%o1, [%o4]ASI_MMU_CTX	         ! set 2nd context reg.
817	flush	%o5
818
819	retl
820	nop
821
822	SET_SIZE(sfmmu_alloc_ctx)
823
824
825	ENTRY_NP(sfmmu_modifytte)
826	ldx	[%o2], %g3			/* current */
827	ldx	[%o0], %g1			/* original */
8282:
829	ldx	[%o1], %g2			/* modified */
830	cmp	%g2, %g3			/* is modified = current? */
831	be,a,pt	%xcc,1f				/* yes, don't write */
832	stx	%g3, [%o0]			/* update new original */
833	casx	[%o2], %g1, %g2
834	cmp	%g1, %g2
835	be,pt	%xcc, 1f			/* cas succeeded - return */
836	  nop
837	ldx	[%o2], %g3			/* new current */
838	stx	%g3, [%o0]			/* save as new original */
839	ba,pt	%xcc, 2b
840	  mov	%g3, %g1
8411:	retl
842	membar	#StoreLoad
843	SET_SIZE(sfmmu_modifytte)
844
845	ENTRY_NP(sfmmu_modifytte_try)
846	ldx	[%o1], %g2			/* modified */
847	ldx	[%o2], %g3			/* current */
848	ldx	[%o0], %g1			/* original */
849	cmp	%g3, %g2			/* is modified = current? */
850	be,a,pn %xcc,1f				/* yes, don't write */
851	mov	0, %o1				/* as if cas failed. */
852
853	casx	[%o2], %g1, %g2
854	membar	#StoreLoad
855	cmp	%g1, %g2
856	movne	%xcc, -1, %o1			/* cas failed. */
857	move	%xcc, 1, %o1			/* cas succeeded. */
8581:
859	stx	%g2, [%o0]			/* report "current" value */
860	retl
861	mov	%o1, %o0
862	SET_SIZE(sfmmu_modifytte_try)
863
864	ENTRY_NP(sfmmu_copytte)
865	ldx	[%o0], %g1
866	retl
867	stx	%g1, [%o1]
868	SET_SIZE(sfmmu_copytte)
869
870
871	/*
872	 * Calculate a TSB entry pointer for the given TSB, va, pagesize.
873	 * %o0 = TSB base address (in), pointer to TSB entry (out)
874	 * %o1 = vaddr (in)
875	 * %o2 = vpshift (in)
876	 * %o3 = tsb size code (in)
877	 * %o4 = scratch register
878	 */
879	ENTRY_NP(sfmmu_get_tsbe)
880	GET_TSBE_POINTER(%o2, %o0, %o1, %o3, %o4)
881	retl
882	nop
883	SET_SIZE(sfmmu_get_tsbe)
884
885	/*
886	 * Return a TSB tag for the given va.
887	 * %o0 = va (in/clobbered)
888	 * %o0 = va shifted to be in tsb tag format (with no context) (out)
889	 */
890	ENTRY_NP(sfmmu_make_tsbtag)
891	retl
892	srln	%o0, TTARGET_VA_SHIFT, %o0
893	SET_SIZE(sfmmu_make_tsbtag)
894
895#endif /* lint */
896
897/*
898 * Other sfmmu primitives
899 */
900
901
902#if defined (lint)
903void
904sfmmu_patch_ktsb(void)
905{
906}
907
908void
909sfmmu_kpm_patch_tlbm(void)
910{
911}
912
913void
914sfmmu_kpm_patch_tsbm(void)
915{
916}
917
918/* ARGSUSED */
919void
920sfmmu_load_tsbe(struct tsbe *tsbep, uint64_t vaddr, tte_t *ttep, int phys)
921{
922}
923
924/* ARGSUSED */
925void
926sfmmu_unload_tsbe(struct tsbe *tsbep, uint64_t vaddr, int phys)
927{
928}
929
930/* ARGSUSED */
931void
932sfmmu_kpm_load_tsb(caddr_t addr, tte_t *ttep, int vpshift)
933{
934}
935
936/* ARGSUSED */
937void
938sfmmu_kpm_unload_tsb(caddr_t addr, int vpshift)
939{
940}
941
942#else /* lint */
943
944#define	I_SIZE		4
945
946	ENTRY_NP(sfmmu_fix_ktlb_traptable)
947	/*
948	 * %o0 = start of patch area
949	 * %o1 = size code of TSB to patch
950	 * %o3 = scratch
951	 */
952	/* fix sll */
953	ld	[%o0], %o3			/* get sll */
954	sub	%o3, %o1, %o3			/* decrease shift by tsb szc */
955	st	%o3, [%o0]			/* write sll */
956	flush	%o0
957	/* fix srl */
958	add	%o0, I_SIZE, %o0		/* goto next instr. */
959	ld	[%o0], %o3			/* get srl */
960	sub	%o3, %o1, %o3			/* decrease shift by tsb szc */
961	st	%o3, [%o0]			/* write srl */
962	retl
963	flush	%o0
964	SET_SIZE(sfmmu_fix_ktlb_traptable)
965
966	ENTRY_NP(sfmmu_fixup_ktsbbase)
967	/*
968	 * %o0 = start of patch area
969	 * %o5 = kernel virtual or physical tsb base address
970	 * %o2, %o3 are used as scratch registers.
971	 */
972	/* fixup sethi instruction */
973	ld	[%o0], %o3
974	srl	%o5, 10, %o2			! offset is bits 32:10
975	or	%o3, %o2, %o3			! set imm22
976	st	%o3, [%o0]
977	/* fixup offset of lduw/ldx */
978	add	%o0, I_SIZE, %o0		! next instr
979	ld	[%o0], %o3
980	and	%o5, 0x3ff, %o2			! set imm13 to bits 9:0
981	or	%o3, %o2, %o3
982	st	%o3, [%o0]
983	retl
984	flush	%o0
985	SET_SIZE(sfmmu_fixup_ktsbbase)
986
987	ENTRY_NP(sfmmu_fixup_setx)
988	/*
989	 * %o0 = start of patch area
990	 * %o4 = 64 bit value to patch
991	 * %o2, %o3 are used as scratch registers.
992	 *
993	 * Note: Assuming that all parts of the instructions which need to be
994	 *	 patched correspond to RUNTIME_PATCH (aka 0)
995	 *
996	 * Note the implementation of setx which is being patched is as follows:
997	 *
998	 * sethi   %hh(RUNTIME_PATCH), tmp
999	 * sethi   %lm(RUNTIME_PATCH), dest
1000	 * or      tmp, %hm(RUNTIME_PATCH), tmp
1001	 * or      dest, %lo(RUNTIME_PATCH), dest
1002	 * sllx    tmp, 32, tmp
1003	 * nop
1004	 * or      tmp, dest, dest
1005	 *
1006	 * which differs from the implementation in the
1007	 * "SPARC Architecture Manual"
1008	 */
1009	/* fixup sethi instruction */
1010	ld	[%o0], %o3
1011	srlx	%o4, 42, %o2			! bits [63:42]
1012	or	%o3, %o2, %o3			! set imm22
1013	st	%o3, [%o0]
1014	/* fixup sethi instruction */
1015	add	%o0, I_SIZE, %o0		! next instr
1016	ld	[%o0], %o3
1017	sllx	%o4, 32, %o2			! clear upper bits
1018	srlx	%o2, 42, %o2			! bits [31:10]
1019	or	%o3, %o2, %o3			! set imm22
1020	st	%o3, [%o0]
1021	/* fixup or instruction */
1022	add	%o0, I_SIZE, %o0		! next instr
1023	ld	[%o0], %o3
1024	srlx	%o4, 32, %o2			! bits [63:32]
1025	and	%o2, 0x3ff, %o2			! bits [41:32]
1026	or	%o3, %o2, %o3			! set imm
1027	st	%o3, [%o0]
1028	/* fixup or instruction */
1029	add	%o0, I_SIZE, %o0		! next instr
1030	ld	[%o0], %o3
1031	and	%o4, 0x3ff, %o2			! bits [9:0]
1032	or	%o3, %o2, %o3			! set imm
1033	st	%o3, [%o0]
1034	retl
1035	flush	%o0
1036	SET_SIZE(sfmmu_fixup_setx)
1037
1038	ENTRY_NP(sfmmu_fixup_or)
1039	/*
1040	 * %o0 = start of patch area
1041	 * %o4 = 32 bit value to patch
1042	 * %o2, %o3 are used as scratch registers.
1043	 * Note: Assuming that all parts of the instructions which need to be
1044	 *	 patched correspond to RUNTIME_PATCH (aka 0)
1045	 */
1046	ld	[%o0], %o3
1047	and	%o4, 0x3ff, %o2			! bits [9:0]
1048	or	%o3, %o2, %o3			! set imm
1049	st	%o3, [%o0]
1050	retl
1051	flush	%o0
1052	SET_SIZE(sfmmu_fixup_or)
1053
1054	ENTRY_NP(sfmmu_fixup_shiftx)
1055	/*
1056	 * %o0 = start of patch area
1057	 * %o4 = signed int immediate value to add to sllx/srlx imm field
1058	 * %o2, %o3 are used as scratch registers.
1059	 *
1060	 * sllx/srlx store the 6 bit immediate value in the lowest order bits
1061	 * so we do a simple add.  The caller must be careful to prevent
1062	 * overflow, which could easily occur if the initial value is nonzero!
1063	 */
1064	ld	[%o0], %o3			! %o3 = instruction to patch
1065	and	%o3, 0x3f, %o2			! %o2 = existing imm value
1066	add	%o2, %o4, %o2			! %o2 = new imm value
1067	andn	%o3, 0x3f, %o3			! clear old imm value
1068	and	%o2, 0x3f, %o2			! truncate new imm value
1069	or	%o3, %o2, %o3			! set new imm value
1070	st	%o3, [%o0]			! store updated instruction
1071	retl
1072	flush	%o0
1073	SET_SIZE(sfmmu_fixup_shiftx)
1074
1075	ENTRY_NP(sfmmu_fixup_mmu_asi)
1076	/*
1077	 * Patch imm_asi of all ldda instructions in the MMU
1078	 * trap handlers.  We search MMU_PATCH_INSTR instructions
1079	 * starting from the itlb miss handler (trap 0x64).
1080	 * %o0 = address of tt[0,1]_itlbmiss
1081	 * %o1 = imm_asi to setup, shifted by appropriate offset.
1082	 * %o3 = number of instructions to search
1083	 * %o4 = reserved by caller: called from leaf routine
1084	 */
10851:	ldsw	[%o0], %o2			! load instruction to %o2
1086	brgez,pt %o2, 2f
1087	  srl	%o2, 30, %o5
1088	btst	1, %o5				! test bit 30; skip if not set
1089	bz,pt	%icc, 2f
1090	  sllx	%o2, 39, %o5			! bit 24 -> bit 63
1091	srlx	%o5, 58, %o5			! isolate op3 part of opcode
1092	xor	%o5, 0x13, %o5			! 01 0011 binary == ldda
1093	brnz,pt	%o5, 2f				! skip if not a match
1094	  or	%o2, %o1, %o2			! or in imm_asi
1095	st	%o2, [%o0]			! write patched instruction
10962:	dec	%o3
1097	brnz,a,pt %o3, 1b			! loop until we're done
1098	  add	%o0, I_SIZE, %o0
1099	retl
1100	flush	%o0
1101	SET_SIZE(sfmmu_fixup_mmu_asi)
1102
1103	/*
1104	 * Patch immediate ASI used to access the TSB in the
1105	 * trap table.
1106	 * inputs: %o0 = value of ktsb_phys
1107	 */
1108	ENTRY_NP(sfmmu_patch_mmu_asi)
1109	mov	%o7, %o4			! save return pc in %o4
1110	movrnz	%o0, ASI_QUAD_LDD_PHYS, %o3
1111	movrz	%o0, ASI_NQUAD_LD, %o3
1112	sll	%o3, 5, %o1			! imm_asi offset
1113	mov	6, %o3				! number of instructions
1114	sethi	%hi(dktsb), %o0			! to search
1115	call	sfmmu_fixup_mmu_asi		! patch kdtlb miss
1116	  or	%o0, %lo(dktsb), %o0
1117	mov	6, %o3				! number of instructions
1118	sethi	%hi(dktsb4m), %o0		! to search
1119	call	sfmmu_fixup_mmu_asi		! patch kdtlb4m miss
1120	  or	%o0, %lo(dktsb4m), %o0
1121	mov	6, %o3				! number of instructions
1122	sethi	%hi(iktsb), %o0			! to search
1123	call	sfmmu_fixup_mmu_asi		! patch kitlb miss
1124	  or	%o0, %lo(iktsb), %o0
1125	mov	%o4, %o7			! retore return pc -- leaf
1126	retl
1127	nop
1128	SET_SIZE(sfmmu_patch_mmu_asi)
1129
1130	ENTRY_NP(sfmmu_patch_ktsb)
1131	/*
1132	 * We need to fix iktsb, dktsb, et. al.
1133	 */
1134	save	%sp, -SA(MINFRAME), %sp
1135	set	ktsb_phys, %o1
1136	ld	[%o1], %o4
1137	set	ktsb_base, %o5
1138	set	ktsb4m_base, %l1
1139	brz,pt	%o4, 1f
1140	  nop
1141	set	ktsb_pbase, %o5
1142	set	ktsb4m_pbase, %l1
11431:
1144	sethi	%hi(ktsb_szcode), %o1
1145	ld	[%o1 + %lo(ktsb_szcode)], %o1	/* %o1 = ktsb size code */
1146
1147	sethi	%hi(iktsb), %o0
1148	call	sfmmu_fix_ktlb_traptable
1149	  or	%o0, %lo(iktsb), %o0
1150
1151	sethi	%hi(dktsb), %o0
1152	call	sfmmu_fix_ktlb_traptable
1153	  or	%o0, %lo(dktsb), %o0
1154
1155	sethi	%hi(ktsb4m_szcode), %o1
1156	ld	[%o1 + %lo(ktsb4m_szcode)], %o1	/* %o1 = ktsb4m size code */
1157
1158	sethi	%hi(dktsb4m), %o0
1159	call	sfmmu_fix_ktlb_traptable
1160	  or	%o0, %lo(dktsb4m), %o0
1161
1162#ifndef sun4v
1163	mov	ASI_N, %o2
1164	movrnz	%o4, ASI_MEM, %o2	! setup kernel 32bit ASI to patch
1165	mov	%o2, %o4		! sfmmu_fixup_or needs this in %o4
1166	sethi	%hi(tsb_kernel_patch_asi), %o0
1167	call	sfmmu_fixup_or
1168	  or	%o0, %lo(tsb_kernel_patch_asi), %o0
1169#endif
1170
1171	ldx 	[%o5], %o4		! load ktsb base addr (VA or PA)
1172
1173	sethi	%hi(dktsbbase), %o0
1174	call	sfmmu_fixup_setx	! patch value of ktsb base addr
1175	  or	%o0, %lo(dktsbbase), %o0
1176
1177	sethi	%hi(iktsbbase), %o0
1178	call	sfmmu_fixup_setx	! patch value of ktsb base addr
1179	  or	%o0, %lo(iktsbbase), %o0
1180
1181	sethi	%hi(sfmmu_kprot_patch_ktsb_base), %o0
1182	call	sfmmu_fixup_setx	! patch value of ktsb base addr
1183	  or	%o0, %lo(sfmmu_kprot_patch_ktsb_base), %o0
1184
1185#ifdef sun4v
1186	sethi	%hi(sfmmu_dslow_patch_ktsb_base), %o0
1187	call	sfmmu_fixup_setx	! patch value of ktsb base addr
1188	  or	%o0, %lo(sfmmu_dslow_patch_ktsb_base), %o0
1189#endif /* sun4v */
1190
1191	ldx 	[%l1], %o4		! load ktsb4m base addr (VA or PA)
1192
1193	sethi	%hi(dktsb4mbase), %o0
1194	call	sfmmu_fixup_setx	! patch value of ktsb4m base addr
1195	  or	%o0, %lo(dktsb4mbase), %o0
1196
1197	sethi	%hi(sfmmu_kprot_patch_ktsb4m_base), %o0
1198	call	sfmmu_fixup_setx	! patch value of ktsb4m base addr
1199	  or	%o0, %lo(sfmmu_kprot_patch_ktsb4m_base), %o0
1200
1201#ifdef sun4v
1202	sethi	%hi(sfmmu_dslow_patch_ktsb4m_base), %o0
1203	call	sfmmu_fixup_setx	! patch value of ktsb4m base addr
1204	  or	%o0, %lo(sfmmu_dslow_patch_ktsb4m_base), %o0
1205#endif /* sun4v */
1206
1207	set	ktsb_szcode, %o4
1208	ld	[%o4], %o4
1209	sethi	%hi(sfmmu_kprot_patch_ktsb_szcode), %o0
1210	call	sfmmu_fixup_or		! patch value of ktsb_szcode
1211	  or	%o0, %lo(sfmmu_kprot_patch_ktsb_szcode), %o0
1212
1213#ifdef sun4v
1214	sethi	%hi(sfmmu_dslow_patch_ktsb_szcode), %o0
1215	call	sfmmu_fixup_or		! patch value of ktsb_szcode
1216	  or	%o0, %lo(sfmmu_dslow_patch_ktsb_szcode), %o0
1217#endif /* sun4v */
1218
1219	set	ktsb4m_szcode, %o4
1220	ld	[%o4], %o4
1221	sethi	%hi(sfmmu_kprot_patch_ktsb4m_szcode), %o0
1222	call	sfmmu_fixup_or		! patch value of ktsb4m_szcode
1223	  or	%o0, %lo(sfmmu_kprot_patch_ktsb4m_szcode), %o0
1224
1225#ifdef sun4v
1226	sethi	%hi(sfmmu_dslow_patch_ktsb4m_szcode), %o0
1227	call	sfmmu_fixup_or		! patch value of ktsb4m_szcode
1228	  or	%o0, %lo(sfmmu_dslow_patch_ktsb4m_szcode), %o0
1229#endif /* sun4v */
1230
1231	ret
1232	restore
1233	SET_SIZE(sfmmu_patch_ktsb)
1234
1235	ENTRY_NP(sfmmu_kpm_patch_tlbm)
1236	/*
1237	 * Fixup trap handlers in common segkpm case.  This is reserved
1238	 * for future use should kpm TSB be changed to be other than the
1239	 * kernel TSB.
1240	 */
1241	retl
1242	nop
1243	SET_SIZE(sfmmu_kpm_patch_tlbm)
1244
1245	ENTRY_NP(sfmmu_kpm_patch_tsbm)
1246	/*
1247	 * nop the branch to sfmmu_kpm_dtsb_miss_small
1248	 * in the case where we are using large pages for
1249	 * seg_kpm (and hence must probe the second TSB for
1250	 * seg_kpm VAs)
1251	 */
1252	set	dktsb4m_kpmcheck_small, %o0
1253	MAKE_NOP_INSTR(%o1)
1254	st	%o1, [%o0]
1255	flush	%o0
1256	retl
1257	nop
1258	SET_SIZE(sfmmu_kpm_patch_tsbm)
1259
1260	ENTRY_NP(sfmmu_patch_utsb)
1261#ifdef UTSB_PHYS
1262	retl
1263	nop
1264#else /* UTSB_PHYS */
1265	/*
1266	 * We need to hot patch utsb_vabase and utsb4m_vabase
1267	 */
1268	save	%sp, -SA(MINFRAME), %sp
1269
1270	/* patch value of utsb_vabase */
1271	set	utsb_vabase, %o1
1272	ldx	[%o1], %o4
1273	sethi	%hi(sfmmu_uprot_get_1st_tsbe_ptr), %o0
1274	call	sfmmu_fixup_setx
1275	  or	%o0, %lo(sfmmu_uprot_get_1st_tsbe_ptr), %o0
1276	sethi	%hi(sfmmu_uitlb_get_1st_tsbe_ptr), %o0
1277	call	sfmmu_fixup_setx
1278	  or	%o0, %lo(sfmmu_uitlb_get_1st_tsbe_ptr), %o0
1279	sethi	%hi(sfmmu_udtlb_get_1st_tsbe_ptr), %o0
1280	call	sfmmu_fixup_setx
1281	  or	%o0, %lo(sfmmu_udtlb_get_1st_tsbe_ptr), %o0
1282
1283	/* patch value of utsb4m_vabase */
1284	set	utsb4m_vabase, %o1
1285	ldx	[%o1], %o4
1286	sethi	%hi(sfmmu_uprot_get_2nd_tsb_base), %o0
1287	call	sfmmu_fixup_setx
1288	  or	%o0, %lo(sfmmu_uprot_get_2nd_tsb_base), %o0
1289	sethi	%hi(sfmmu_uitlb_get_2nd_tsb_base), %o0
1290	call	sfmmu_fixup_setx
1291	  or	%o0, %lo(sfmmu_uitlb_get_2nd_tsb_base), %o0
1292	sethi	%hi(sfmmu_udtlb_get_2nd_tsb_base), %o0
1293	call	sfmmu_fixup_setx
1294	  or	%o0, %lo(sfmmu_udtlb_get_2nd_tsb_base), %o0
1295
1296	/*
1297	 * Patch TSB base register masks and shifts if needed.
1298	 * By default the TSB base register contents are set up for 4M slab.
1299	 * If we're using a smaller slab size and reserved VA range we need
1300	 * to patch up those values here.
1301	 */
1302	set	tsb_slab_shift, %o1
1303	set	MMU_PAGESHIFT4M, %o4
1304	ldsw	[%o1], %o3
1305	subcc	%o4, %o3, %o4
1306	bz,pt	%icc, 1f
1307	  /* delay slot safe */
1308
1309	/* patch reserved VA range size if needed. */
1310	sethi	%hi(sfmmu_tsb_1st_resv_offset), %o0
1311	call	sfmmu_fixup_shiftx
1312	  or	%o0, %lo(sfmmu_tsb_1st_resv_offset), %o0
1313	call	sfmmu_fixup_shiftx
1314	  add	%o0, I_SIZE, %o0
1315	sethi	%hi(sfmmu_tsb_2nd_resv_offset), %o0
1316	call	sfmmu_fixup_shiftx
1317	  or	%o0, %lo(sfmmu_tsb_2nd_resv_offset), %o0
1318	call	sfmmu_fixup_shiftx
1319	  add	%o0, I_SIZE, %o0
13201:
1321	/* patch TSBREG_VAMASK used to set up TSB base register */
1322	set	tsb_slab_mask, %o1
1323	lduw	[%o1], %o4
1324	sethi	%hi(sfmmu_tsb_1st_tsbreg_vamask), %o0
1325	call	sfmmu_fixup_or
1326	  or	%o0, %lo(sfmmu_tsb_1st_tsbreg_vamask), %o0
1327	sethi	%hi(sfmmu_tsb_2nd_tsbreg_vamask), %o0
1328	call	sfmmu_fixup_or
1329	  or	%o0, %lo(sfmmu_tsb_2nd_tsbreg_vamask), %o0
1330
1331	ret
1332	restore
1333#endif /* UTSB_PHYS */
1334	SET_SIZE(sfmmu_patch_utsb)
1335
1336
1337	/*
1338	 * Routine that loads an entry into a tsb using virtual addresses.
1339	 * Locking is required since all cpus can use the same TSB.
1340	 * Note that it is no longer required to have a valid context
1341	 * when calling this function.
1342	 */
1343	ENTRY_NP(sfmmu_load_tsbe)
1344	/*
1345	 * %o0 = pointer to tsbe to load
1346	 * %o1 = tsb tag
1347	 * %o2 = virtual pointer to TTE
1348	 * %o3 = 1 if physical address in %o0 else 0
1349	 */
1350	rdpr	%pstate, %o5
1351#ifdef DEBUG
1352	PANIC_IF_INTR_DISABLED_PSTR(%o5, sfmmu_di_l2, %g1)
1353#endif /* DEBUG */
1354
1355	wrpr	%o5, PSTATE_IE, %pstate		/* disable interrupts */
1356
1357	SETUP_TSB_ASI(%o3, %g3)
1358	TSB_UPDATE(%o0, %o2, %o1, %g1, %g2, 1)
1359
1360	wrpr	%g0, %o5, %pstate		/* enable interrupts */
1361
1362	retl
1363	membar	#StoreStore|#StoreLoad
1364	SET_SIZE(sfmmu_load_tsbe)
1365
1366	/*
1367	 * Flush TSB of a given entry if the tag matches.
1368	 */
1369	ENTRY(sfmmu_unload_tsbe)
1370	/*
1371	 * %o0 = pointer to tsbe to be flushed
1372	 * %o1 = tag to match
1373	 * %o2 = 1 if physical address in %o0 else 0
1374	 */
1375	SETUP_TSB_ASI(%o2, %g1)
1376	TSB_INVALIDATE(%o0, %o1, %g1, %o2, %o3, unload_tsbe)
1377	retl
1378	membar	#StoreStore|#StoreLoad
1379	SET_SIZE(sfmmu_unload_tsbe)
1380
1381	/*
1382	 * Routine that loads a TTE into the kpm TSB from C code.
1383	 * Locking is required since kpm TSB is shared among all CPUs.
1384	 */
1385	ENTRY_NP(sfmmu_kpm_load_tsb)
1386	/*
1387	 * %o0 = vaddr
1388	 * %o1 = ttep
1389	 * %o2 = virtpg to TSB index shift (e.g. TTE pagesize shift)
1390	 */
1391	rdpr	%pstate, %o5			! %o5 = saved pstate
1392#ifdef DEBUG
1393	PANIC_IF_INTR_DISABLED_PSTR(%o5, sfmmu_di_l3, %g1)
1394#endif /* DEBUG */
1395	wrpr	%o5, PSTATE_IE, %pstate		! disable interrupts
1396
1397#ifndef sun4v
1398	sethi	%hi(ktsb_phys), %o4
1399	mov	ASI_N, %o3
1400	ld	[%o4 + %lo(ktsb_phys)], %o4
1401	movrnz	%o4, ASI_MEM, %o3
1402	mov	%o3, %asi
1403#endif
1404	mov	%o0, %g1			! %g1 = vaddr
1405
1406	/* GET_KPM_TSBE_POINTER(vpshift, tsbp, vaddr (clobbers), tmp1, tmp2) */
1407	GET_KPM_TSBE_POINTER(%o2, %g2, %g1, %o3, %o4)
1408	/* %g2 = tsbep, %g1 clobbered */
1409
1410	srlx	%o0, TTARGET_VA_SHIFT, %g1;	! %g1 = tag target
1411	/* TSB_UPDATE(tsbep, tteva, tagtarget, tmp1, tmp2, label) */
1412	TSB_UPDATE(%g2, %o1, %g1, %o3, %o4, 1)
1413
1414	wrpr	%g0, %o5, %pstate		! enable interrupts
1415	retl
1416	  membar #StoreStore|#StoreLoad
1417	SET_SIZE(sfmmu_kpm_load_tsb)
1418
1419	/*
1420	 * Routine that shoots down a TTE in the kpm TSB or in the
1421	 * kernel TSB depending on virtpg. Locking is required since
1422	 * kpm/kernel TSB is shared among all CPUs.
1423	 */
1424	ENTRY_NP(sfmmu_kpm_unload_tsb)
1425	/*
1426	 * %o0 = vaddr
1427	 * %o1 = virtpg to TSB index shift (e.g. TTE page shift)
1428	 */
1429#ifndef sun4v
1430	sethi	%hi(ktsb_phys), %o4
1431	mov	ASI_N, %o3
1432	ld	[%o4 + %lo(ktsb_phys)], %o4
1433	movrnz	%o4, ASI_MEM, %o3
1434	mov	%o3, %asi
1435#endif
1436	mov	%o0, %g1			! %g1 = vaddr
1437
1438	/* GET_KPM_TSBE_POINTER(vpshift, tsbp, vaddr (clobbers), tmp1, tmp2) */
1439	GET_KPM_TSBE_POINTER(%o1, %g2, %g1, %o3, %o4)
1440	/* %g2 = tsbep, %g1 clobbered */
1441
1442	srlx	%o0, TTARGET_VA_SHIFT, %g1;	! %g1 = tag target
1443	/* TSB_INVALIDATE(tsbep, tag, tmp1, tmp2, tmp3, label) */
1444	TSB_INVALIDATE(%g2, %g1, %o3, %o4, %o1, kpm_tsbinval)
1445
1446	retl
1447	  membar	#StoreStore|#StoreLoad
1448	SET_SIZE(sfmmu_kpm_unload_tsb)
1449
1450#endif /* lint */
1451
1452
1453#if defined (lint)
1454
1455/*ARGSUSED*/
1456pfn_t
1457sfmmu_ttetopfn(tte_t *tte, caddr_t vaddr)
1458{ return(0); }
1459
1460#else /* lint */
1461
1462	ENTRY_NP(sfmmu_ttetopfn)
1463	ldx	[%o0], %g1			/* read tte */
1464	TTETOPFN(%g1, %o1, sfmmu_ttetopfn_l1, %g2, %g3, %g4)
1465	/*
1466	 * g1 = pfn
1467	 */
1468	retl
1469	mov	%g1, %o0
1470	SET_SIZE(sfmmu_ttetopfn)
1471
1472#endif /* !lint */
1473
1474
1475#if defined (lint)
1476/*
1477 * The sfmmu_hblk_hash_add is the assembly primitive for adding hmeblks to the
1478 * the hash list.
1479 */
1480/* ARGSUSED */
1481void
1482sfmmu_hblk_hash_add(struct hmehash_bucket *hmebp, struct hme_blk *hmeblkp,
1483	uint64_t hblkpa)
1484{
1485}
1486
1487/*
1488 * The sfmmu_hblk_hash_rm is the assembly primitive to remove hmeblks from the
1489 * hash list.
1490 */
1491/* ARGSUSED */
1492void
1493sfmmu_hblk_hash_rm(struct hmehash_bucket *hmebp, struct hme_blk *hmeblkp,
1494	uint64_t hblkpa, struct hme_blk *prev_hblkp)
1495{
1496}
1497#else /* lint */
1498
1499/*
1500 * Functions to grab/release hme bucket list lock.  I only use a byte
1501 * instead of the whole int because eventually we might want to
1502 * put some counters on the other bytes (of course, these routines would
1503 * have to change).  The code that grab this lock should execute
1504 * with interrupts disabled and hold the lock for the least amount of time
1505 * possible.
1506 */
1507
1508/*
1509 * Even though hmeh_listlock is updated using pa there's no need to flush
1510 * dcache since hmeh_listlock will be restored to the original value (0)
1511 * before interrupts are reenabled.
1512 */
1513
1514/*
1515 * For sparcv9 hme hash buckets may not be in the nucleus.  hme hash update
1516 * routines still use virtual addresses to update the bucket fields. But they
1517 * must not cause a TLB miss after grabbing the low level bucket lock. To
1518 * achieve this we must make sure the bucket structure is completely within an
1519 * 8K page.
1520 */
1521
1522#if (HMEBUCK_SIZE & (HMEBUCK_SIZE - 1))
1523#error - the size of hmehash_bucket structure is not power of 2
1524#endif
1525
1526#define HMELOCK_ENTER(hmebp, tmp1, tmp2, label1, asi)           \
1527	mov     0xff, tmp2                                      ;\
1528	add     hmebp, HMEBUCK_LOCK, tmp1                       ;\
1529label1:                                                         ;\
1530	casa    [tmp1]asi, %g0, tmp2                            ;\
1531	brnz,pn tmp2, label1                                    ;\
1532	mov     0xff, tmp2                                      ;\
1533	membar  #LoadLoad
1534
1535#define HMELOCK_EXIT(hmebp, tmp1, asi)                          \
1536	membar  #LoadStore|#StoreStore                          ;\
1537	add     hmebp, HMEBUCK_LOCK, tmp1                       ;\
1538	sta     %g0, [tmp1]asi
1539
1540	.seg	".data"
1541hblk_add_panic1:
1542	.ascii	"sfmmu_hblk_hash_add: interrupts disabled"
1543	.byte	0
1544hblk_add_panic2:
1545	.ascii	"sfmmu_hblk_hash_add: va hmeblkp is NULL but pa is not"
1546	.byte	0
1547	.align	4
1548	.seg	".text"
1549
1550	ENTRY_NP(sfmmu_hblk_hash_add)
1551	/*
1552	 * %o0 = hmebp
1553	 * %o1 = hmeblkp
1554	 * %o2 = hblkpa
1555	 */
1556	rdpr	%pstate, %o5
1557#ifdef DEBUG
1558	andcc	%o5, PSTATE_IE, %g0		/* if interrupts already */
1559	bnz,pt %icc, 3f				/* disabled, panic	 */
1560	  nop
1561	save	%sp, -SA(MINFRAME), %sp
1562	sethi	%hi(hblk_add_panic1), %o0
1563	call	panic
1564	 or	%o0, %lo(hblk_add_panic1), %o0
1565	ret
1566	restore
1567
15683:
1569#endif /* DEBUG */
1570	wrpr	%o5, PSTATE_IE, %pstate		/* disable interrupts */
1571	mov	%o2, %g1
1572
1573	/*
1574	 * g1 = hblkpa
1575	 */
1576	ldn	[%o0 + HMEBUCK_HBLK], %o4	/* next hmeblk */
1577	ldx	[%o0 + HMEBUCK_NEXTPA], %g2	/* g2 = next hblkpa */
1578#ifdef	DEBUG
1579	cmp	%o4, %g0
1580	bne,pt %xcc, 1f
1581	 nop
1582	brz,pt %g2, 1f
1583	 nop
1584	wrpr	%g0, %o5, %pstate		/* enable interrupts */
1585	save	%sp, -SA(MINFRAME), %sp
1586	sethi	%hi(hblk_add_panic2), %o0
1587	call	panic
1588	  or	%o0, %lo(hblk_add_panic2), %o0
1589	ret
1590	restore
15911:
1592#endif /* DEBUG */
1593	/*
1594	 * We update hmeblks entries before grabbing lock because the stores
1595	 * could take a tlb miss and require the hash lock.  The buckets
1596	 * are part of the nucleus so we are cool with those stores.
1597	 *
1598	 * if buckets are not part of the nucleus our game is to
1599	 * not touch any other page via va until we drop the lock.
1600	 * This guarantees we won't get a tlb miss before the lock release
1601	 * since interrupts are disabled.
1602	 */
1603	stn	%o4, [%o1 + HMEBLK_NEXT]	/* update hmeblk's next */
1604	stx	%g2, [%o1 + HMEBLK_NEXTPA]	/* update hmeblk's next pa */
1605	HMELOCK_ENTER(%o0, %o2, %o3, hashadd1, ASI_N)
1606	stn	%o1, [%o0 + HMEBUCK_HBLK]	/* update bucket hblk next */
1607	stx	%g1, [%o0 + HMEBUCK_NEXTPA]	/* add hmeblk to list */
1608	HMELOCK_EXIT(%o0, %g2, ASI_N)
1609	retl
1610	  wrpr	%g0, %o5, %pstate		/* enable interrupts */
1611	SET_SIZE(sfmmu_hblk_hash_add)
1612
1613	ENTRY_NP(sfmmu_hblk_hash_rm)
1614	/*
1615	 * This function removes an hmeblk from the hash chain.
1616	 * It is written to guarantee we don't take a tlb miss
1617	 * by using physical addresses to update the list.
1618	 *
1619	 * %o0 = hmebp
1620	 * %o1 = hmeblkp
1621	 * %o2 = hmeblkp previous pa
1622	 * %o3 = hmeblkp previous
1623	 */
1624
1625	mov	%o3, %o4			/* o4 = hmeblkp previous */
1626
1627	rdpr	%pstate, %o5
1628#ifdef DEBUG
1629	PANIC_IF_INTR_DISABLED_PSTR(%o5, sfmmu_di_l4, %g1)
1630#endif /* DEBUG */
1631	/*
1632	 * disable interrupts, clear Address Mask to access 64 bit physaddr
1633	 */
1634	andn    %o5, PSTATE_IE, %g1
1635	wrpr    %g1, 0, %pstate
1636
1637#ifndef sun4v
1638	sethi   %hi(dcache_line_mask), %g4
1639	ld      [%g4 + %lo(dcache_line_mask)], %g4
1640#endif /* sun4v */
1641
1642	/*
1643	 * if buckets are not part of the nucleus our game is to
1644	 * not touch any other page via va until we drop the lock.
1645	 * This guarantees we won't get a tlb miss before the lock release
1646	 * since interrupts are disabled.
1647	 */
1648	HMELOCK_ENTER(%o0, %g1, %g3, hashrm1, ASI_N)
1649	ldn	[%o0 + HMEBUCK_HBLK], %g2	/* first hmeblk in list */
1650	cmp	%g2, %o1
1651	bne,pt	%ncc,1f
1652	 mov	ASI_MEM, %asi
1653	/*
1654	 * hmeblk is first on list
1655	 */
1656	ldx	[%o0 + HMEBUCK_NEXTPA], %g2	/* g2 = hmeblk pa */
1657	ldna	[%g2 + HMEBLK_NEXT] %asi, %o3	/* read next hmeblk va */
1658	ldxa	[%g2 + HMEBLK_NEXTPA] %asi, %g1	/* read next hmeblk pa */
1659	stn	%o3, [%o0 + HMEBUCK_HBLK]	/* write va */
1660	ba,pt	%xcc, 2f
1661	  stx	%g1, [%o0 + HMEBUCK_NEXTPA]	/* write pa */
16621:
1663	/* hmeblk is not first on list */
1664
1665	mov	%o2, %g3
1666#ifndef sun4v
1667	GET_CPU_IMPL(%g2)
1668	cmp 	%g2, CHEETAH_IMPL
1669	bge,a,pt %icc, hblk_hash_rm_1
1670	  and	%o4, %g4, %g2
1671	cmp	%g2, SPITFIRE_IMPL
1672	blt	%icc, hblk_hash_rm_2		/* no flushing needed for OPL */
1673	  and	%o4, %g4, %g2
1674	stxa	%g0, [%g2]ASI_DC_TAG		/* flush prev pa from dcache */
1675	add	%o4, HMEBLK_NEXT, %o4
1676	and	%o4, %g4, %g2
1677	ba	hblk_hash_rm_2
1678	stxa	%g0, [%g2]ASI_DC_TAG		/* flush prev va from dcache */
1679hblk_hash_rm_1:
1680
1681	stxa	%g0, [%g3]ASI_DC_INVAL		/* flush prev pa from dcache */
1682	membar	#Sync
1683	add     %g3, HMEBLK_NEXT, %g2
1684	stxa	%g0, [%g2]ASI_DC_INVAL		/* flush prev va from dcache */
1685hblk_hash_rm_2:
1686	membar	#Sync
1687#endif /* sun4v */
1688	ldxa	[%g3 + HMEBLK_NEXTPA] %asi, %g2	/* g2 = hmeblk pa */
1689	ldna	[%g2 + HMEBLK_NEXT] %asi, %o3	/* read next hmeblk va */
1690	ldxa	[%g2 + HMEBLK_NEXTPA] %asi, %g1	/* read next hmeblk pa */
1691	stna	%o3, [%g3 + HMEBLK_NEXT] %asi	/* write va */
1692	stxa	%g1, [%g3 + HMEBLK_NEXTPA] %asi	/* write pa */
16932:
1694	HMELOCK_EXIT(%o0, %g2, ASI_N)
1695	retl
1696	  wrpr	%g0, %o5, %pstate		/* enable interrupts */
1697	SET_SIZE(sfmmu_hblk_hash_rm)
1698
1699#endif /* lint */
1700
1701/*
1702 * These macros are used to update global sfmmu hme hash statistics
1703 * in perf critical paths. It is only enabled in debug kernels or
1704 * if SFMMU_STAT_GATHER is defined
1705 */
1706#if defined(DEBUG) || defined(SFMMU_STAT_GATHER)
1707#define	HAT_HSEARCH_DBSTAT(hatid, tsbarea, tmp1, tmp2)			\
1708	ldn	[tsbarea + TSBMISS_KHATID], tmp1			;\
1709	mov	HATSTAT_KHASH_SEARCH, tmp2				;\
1710	cmp	tmp1, hatid						;\
1711	movne	%ncc, HATSTAT_UHASH_SEARCH, tmp2			;\
1712	set	sfmmu_global_stat, tmp1					;\
1713	add	tmp1, tmp2, tmp1					;\
1714	ld	[tmp1], tmp2						;\
1715	inc	tmp2							;\
1716	st	tmp2, [tmp1]
1717
1718#define	HAT_HLINK_DBSTAT(hatid, tsbarea, tmp1, tmp2)			\
1719	ldn	[tsbarea + TSBMISS_KHATID], tmp1			;\
1720	mov	HATSTAT_KHASH_LINKS, tmp2				;\
1721	cmp	tmp1, hatid						;\
1722	movne	%ncc, HATSTAT_UHASH_LINKS, tmp2				;\
1723	set	sfmmu_global_stat, tmp1					;\
1724	add	tmp1, tmp2, tmp1					;\
1725	ld	[tmp1], tmp2						;\
1726	inc	tmp2							;\
1727	st	tmp2, [tmp1]
1728
1729
1730#else /* DEBUG || SFMMU_STAT_GATHER */
1731
1732#define	HAT_HSEARCH_DBSTAT(hatid, tsbarea, tmp1, tmp2)
1733
1734#define	HAT_HLINK_DBSTAT(hatid, tsbarea, tmp1, tmp2)
1735
1736#endif  /* DEBUG || SFMMU_STAT_GATHER */
1737
1738/*
1739 * This macro is used to update global sfmmu kstas in non
1740 * perf critical areas so they are enabled all the time
1741 */
1742#define	HAT_GLOBAL_STAT(statname, tmp1, tmp2)				\
1743	sethi	%hi(sfmmu_global_stat), tmp1				;\
1744	add	tmp1, statname, tmp1					;\
1745	ld	[tmp1 + %lo(sfmmu_global_stat)], tmp2			;\
1746	inc	tmp2							;\
1747	st	tmp2, [tmp1 + %lo(sfmmu_global_stat)]
1748
1749/*
1750 * These macros are used to update per cpu stats in non perf
1751 * critical areas so they are enabled all the time
1752 */
1753#define	HAT_PERCPU_STAT32(tsbarea, stat, tmp1)				\
1754	ld	[tsbarea + stat], tmp1					;\
1755	inc	tmp1							;\
1756	st	tmp1, [tsbarea + stat]
1757
1758/*
1759 * These macros are used to update per cpu stats in non perf
1760 * critical areas so they are enabled all the time
1761 */
1762#define	HAT_PERCPU_STAT16(tsbarea, stat, tmp1)				\
1763	lduh	[tsbarea + stat], tmp1					;\
1764	inc	tmp1							;\
1765	stuh	tmp1, [tsbarea + stat]
1766
1767#if defined(KPM_TLBMISS_STATS_GATHER)
1768	/*
1769	 * Count kpm dtlb misses separately to allow a different
1770	 * evaluation of hme and kpm tlbmisses. kpm tsb hits can
1771	 * be calculated by (kpm_dtlb_misses - kpm_tsb_misses).
1772	 */
1773#define	KPM_TLBMISS_STAT_INCR(tagacc, val, tsbma, tmp1, label)		\
1774	brgez	tagacc, label	/* KPM VA? */				;\
1775	nop								;\
1776	CPU_INDEX(tmp1, tsbma)						;\
1777	sethi	%hi(kpmtsbm_area), tsbma				;\
1778	sllx	tmp1, KPMTSBM_SHIFT, tmp1				;\
1779	or	tsbma, %lo(kpmtsbm_area), tsbma				;\
1780	add	tsbma, tmp1, tsbma		/* kpmtsbm area */	;\
1781	/* VA range check */						;\
1782	ldx	[tsbma + KPMTSBM_VBASE], val				;\
1783	cmp	tagacc, val						;\
1784	blu,pn	%xcc, label						;\
1785	  ldx	[tsbma + KPMTSBM_VEND], tmp1				;\
1786	cmp	tagacc, tmp1						;\
1787	bgeu,pn	%xcc, label						;\
1788	  lduw	[tsbma + KPMTSBM_DTLBMISS], val				;\
1789	inc	val							;\
1790	st	val, [tsbma + KPMTSBM_DTLBMISS]				;\
1791label:
1792#else
1793#define	KPM_TLBMISS_STAT_INCR(tagacc, val, tsbma, tmp1, label)
1794#endif	/* KPM_TLBMISS_STATS_GATHER */
1795
1796#if defined (lint)
1797/*
1798 * The following routines are jumped to from the mmu trap handlers to do
1799 * the setting up to call systrap.  They are separate routines instead of
1800 * being part of the handlers because the handlers would exceed 32
1801 * instructions and since this is part of the slow path the jump
1802 * cost is irrelevant.
1803 */
1804void
1805sfmmu_pagefault(void)
1806{
1807}
1808
1809void
1810sfmmu_mmu_trap(void)
1811{
1812}
1813
1814void
1815sfmmu_window_trap(void)
1816{
1817}
1818
1819void
1820sfmmu_kpm_exception(void)
1821{
1822}
1823
1824#else /* lint */
1825
1826#ifdef	PTL1_PANIC_DEBUG
1827	.seg	".data"
1828	.global	test_ptl1_panic
1829test_ptl1_panic:
1830	.word	0
1831	.align	8
1832
1833	.seg	".text"
1834	.align	4
1835#endif	/* PTL1_PANIC_DEBUG */
1836
1837
1838	ENTRY_NP(sfmmu_pagefault)
1839	SET_GL_REG(1)
1840	USE_ALTERNATE_GLOBALS(%g5)
1841	GET_MMU_BOTH_TAGACC(%g5 /*dtag*/, %g2 /*itag*/, %g6, %g4)
1842	rdpr	%tt, %g6
1843	cmp	%g6, FAST_IMMU_MISS_TT
1844	be,a,pn	%icc, 1f
1845	  mov	T_INSTR_MMU_MISS, %g3
1846	cmp	%g6, T_INSTR_MMU_MISS
1847	be,a,pn	%icc, 1f
1848	  mov	T_INSTR_MMU_MISS, %g3
1849	mov	%g5, %g2
1850	mov	T_DATA_PROT, %g3		/* arg2 = traptype */
1851	cmp	%g6, FAST_DMMU_MISS_TT
1852	move	%icc, T_DATA_MMU_MISS, %g3	/* arg2 = traptype */
1853	cmp	%g6, T_DATA_MMU_MISS
1854	move	%icc, T_DATA_MMU_MISS, %g3	/* arg2 = traptype */
1855
1856#ifdef  PTL1_PANIC_DEBUG
1857	/* check if we want to test the tl1 panic */
1858	sethi	%hi(test_ptl1_panic), %g4
1859	ld	[%g4 + %lo(test_ptl1_panic)], %g1
1860	st	%g0, [%g4 + %lo(test_ptl1_panic)]
1861	cmp	%g1, %g0
1862	bne,a,pn %icc, ptl1_panic
1863	  or	%g0, PTL1_BAD_DEBUG, %g1
1864#endif	/* PTL1_PANIC_DEBUG */
18651:
1866	HAT_GLOBAL_STAT(HATSTAT_PAGEFAULT, %g6, %g4)
1867	/*
1868	 * g2 = tag access reg
1869	 * g3.l = type
1870	 * g3.h = 0
1871	 */
1872	sethi	%hi(trap), %g1
1873	or	%g1, %lo(trap), %g1
18742:
1875	ba,pt	%xcc, sys_trap
1876	  mov	-1, %g4
1877	SET_SIZE(sfmmu_pagefault)
1878
1879	ENTRY_NP(sfmmu_mmu_trap)
1880	SET_GL_REG(1)
1881	USE_ALTERNATE_GLOBALS(%g5)
1882	GET_MMU_BOTH_TAGACC(%g5 /*dtag*/, %g2 /*itag*/, %g4, %g6)
1883	rdpr	%tt, %g6
1884	cmp	%g6, FAST_IMMU_MISS_TT
1885	be,a,pn	%icc, 1f
1886	  mov	T_INSTR_MMU_MISS, %g3
1887	cmp	%g6, T_INSTR_MMU_MISS
1888	be,a,pn	%icc, 1f
1889	  mov	T_INSTR_MMU_MISS, %g3
1890	mov	%g5, %g2
1891	mov	T_DATA_PROT, %g3		/* arg2 = traptype */
1892	cmp	%g6, FAST_DMMU_MISS_TT
1893	move	%icc, T_DATA_MMU_MISS, %g3	/* arg2 = traptype */
1894	cmp	%g6, T_DATA_MMU_MISS
1895	move	%icc, T_DATA_MMU_MISS, %g3	/* arg2 = traptype */
18961:
1897	/*
1898	 * g2 = tag access reg
1899	 * g3 = type
1900	 */
1901	sethi	%hi(sfmmu_tsbmiss_exception), %g1
1902	or	%g1, %lo(sfmmu_tsbmiss_exception), %g1
1903	ba,pt	%xcc, sys_trap
1904	  mov	-1, %g4
1905	/*NOTREACHED*/
1906	SET_SIZE(sfmmu_mmu_trap)
1907
1908	ENTRY_NP(sfmmu_suspend_tl)
1909	SET_GL_REG(1)
1910	USE_ALTERNATE_GLOBALS(%g5)
1911	GET_MMU_BOTH_TAGACC(%g5 /*dtag*/, %g2 /*itag*/, %g4, %g3)
1912	rdpr	%tt, %g6
1913	cmp	%g6, FAST_IMMU_MISS_TT
1914	be,a,pn	%icc, 1f
1915	  mov	T_INSTR_MMU_MISS, %g3
1916	mov	%g5, %g2
1917	cmp	%g6, FAST_DMMU_MISS_TT
1918	move	%icc, T_DATA_MMU_MISS, %g3
1919	movne	%icc, T_DATA_PROT, %g3
19201:
1921	sethi	%hi(sfmmu_tsbmiss_suspended), %g1
1922	or	%g1, %lo(sfmmu_tsbmiss_suspended), %g1
1923	/* g1 = TL0 handler, g2 = tagacc, g3 = trap type */
1924	ba,pt	%xcc, sys_trap
1925	  mov	PIL_15, %g4
1926	/*NOTREACHED*/
1927	SET_SIZE(sfmmu_suspend_tl)
1928
1929	/*
1930	 * No %g registers in use at this point.
1931	 */
1932	ENTRY_NP(sfmmu_window_trap)
1933	rdpr	%tpc, %g1
1934#ifdef sun4v
1935#ifdef DEBUG
1936	/* We assume previous %gl was 1 */
1937	rdpr	%tstate, %g4
1938	srlx	%g4, TSTATE_GL_SHIFT, %g4
1939	and	%g4, TSTATE_GL_MASK, %g4
1940	cmp	%g4, 1
1941	bne,a,pn %icc, ptl1_panic
1942	  mov	PTL1_BAD_WTRAP, %g1
1943#endif /* DEBUG */
1944	/* user miss at tl>1. better be the window handler or user_rtt */
1945	/* in user_rtt? */
1946	set	rtt_fill_start, %g4
1947	cmp	%g1, %g4
1948	blu,pn %xcc, 6f
1949	 .empty
1950	set	rtt_fill_end, %g4
1951	cmp	%g1, %g4
1952	bgeu,pn %xcc, 6f
1953	 nop
1954	set	fault_rtt_fn1, %g1
1955	wrpr	%g0, %g1, %tnpc
1956	ba,a	7f
19576:
1958	! must save this trap level before descending trap stack
1959	! no need to save %tnpc, either overwritten or discarded
1960	! already got it: rdpr	%tpc, %g1
1961	rdpr	%tstate, %g6
1962	rdpr	%tt, %g7
1963	! trap level saved, go get underlying trap type
1964	rdpr	%tl, %g5
1965	sub	%g5, 1, %g3
1966	wrpr	%g3, %tl
1967	rdpr	%tt, %g2
1968	wrpr	%g5, %tl
1969	! restore saved trap level
1970	wrpr	%g1, %tpc
1971	wrpr	%g6, %tstate
1972	wrpr	%g7, %tt
1973#else /* sun4v */
1974	/* user miss at tl>1. better be the window handler */
1975	rdpr	%tl, %g5
1976	sub	%g5, 1, %g3
1977	wrpr	%g3, %tl
1978	rdpr	%tt, %g2
1979	wrpr	%g5, %tl
1980#endif /* sun4v */
1981	and	%g2, WTRAP_TTMASK, %g4
1982	cmp	%g4, WTRAP_TYPE
1983	bne,pn	%xcc, 1f
1984	 nop
1985	/* tpc should be in the trap table */
1986	set	trap_table, %g4
1987	cmp	%g1, %g4
1988	blt,pn %xcc, 1f
1989	 .empty
1990	set	etrap_table, %g4
1991	cmp	%g1, %g4
1992	bge,pn %xcc, 1f
1993	 .empty
1994	andn	%g1, WTRAP_ALIGN, %g1	/* 128 byte aligned */
1995	add	%g1, WTRAP_FAULTOFF, %g1
1996	wrpr	%g0, %g1, %tnpc
19977:
1998	/*
1999	 * some wbuf handlers will call systrap to resolve the fault
2000	 * we pass the trap type so they figure out the correct parameters.
2001	 * g5 = trap type, g6 = tag access reg
2002	 */
2003
2004	/*
2005	 * only use g5, g6, g7 registers after we have switched to alternate
2006	 * globals.
2007	 */
2008	SET_GL_REG(1)
2009	USE_ALTERNATE_GLOBALS(%g5)
2010	GET_MMU_D_TAGACC(%g6 /*dtag*/, %g5 /*scratch*/)
2011	rdpr	%tt, %g7
2012	cmp	%g7, FAST_IMMU_MISS_TT
2013	be,a,pn	%icc, ptl1_panic
2014	  mov	PTL1_BAD_WTRAP, %g1
2015	cmp	%g7, T_INSTR_MMU_MISS
2016	be,a,pn	%icc, ptl1_panic
2017	  mov	PTL1_BAD_WTRAP, %g1
2018	mov	T_DATA_PROT, %g5
2019	cmp	%g7, FAST_DMMU_MISS_TT
2020	move	%icc, T_DATA_MMU_MISS, %g5
2021	cmp	%g7, T_DATA_MMU_MISS
2022	move	%icc, T_DATA_MMU_MISS, %g5
2023	! XXXQ AGS re-check out this one
2024	done
20251:
2026	CPU_ADDR(%g1, %g4)
2027	ld	[%g1 + CPU_TL1_HDLR], %g4
2028	brnz,a,pt %g4, sfmmu_mmu_trap
2029	  st	%g0, [%g1 + CPU_TL1_HDLR]
2030	ba,pt	%icc, ptl1_panic
2031	  mov	PTL1_BAD_TRAP, %g1
2032	SET_SIZE(sfmmu_window_trap)
2033
2034	ENTRY_NP(sfmmu_kpm_exception)
2035	/*
2036	 * We have accessed an unmapped segkpm address or a legal segkpm
2037	 * address which is involved in a VAC alias conflict prevention.
2038	 * Before we go to trap(), check to see if CPU_DTRACE_NOFAULT is
2039	 * set. If it is, we will instead note that a fault has occurred
2040	 * by setting CPU_DTRACE_BADADDR and issue a "done" (instead of
2041	 * a "retry"). This will step over the faulting instruction.
2042	 * Note that this means that a legal segkpm address involved in
2043	 * a VAC alias conflict prevention (a rare case to begin with)
2044	 * cannot be used in DTrace.
2045	 */
2046	CPU_INDEX(%g1, %g2)
2047	set	cpu_core, %g2
2048	sllx	%g1, CPU_CORE_SHIFT, %g1
2049	add	%g1, %g2, %g1
2050	lduh	[%g1 + CPUC_DTRACE_FLAGS], %g2
2051	andcc	%g2, CPU_DTRACE_NOFAULT, %g0
2052	bz	0f
2053	or	%g2, CPU_DTRACE_BADADDR, %g2
2054	stuh	%g2, [%g1 + CPUC_DTRACE_FLAGS]
2055	GET_MMU_D_ADDR(%g3, /*scratch*/ %g4)
2056	stx	%g3, [%g1 + CPUC_DTRACE_ILLVAL]
2057	done
20580:
2059	TSTAT_CHECK_TL1(1f, %g1, %g2)
20601:
2061	SET_GL_REG(1)
2062	USE_ALTERNATE_GLOBALS(%g5)
2063	GET_MMU_D_TAGACC(%g2 /* tagacc */, %g4 /*scratch*/)
2064	mov	T_DATA_MMU_MISS, %g3	/* arg2 = traptype */
2065	/*
2066	 * g2=tagacc g3.l=type g3.h=0
2067	 */
2068	sethi	%hi(trap), %g1
2069	or	%g1, %lo(trap), %g1
2070	ba,pt	%xcc, sys_trap
2071	mov	-1, %g4
2072	SET_SIZE(sfmmu_kpm_exception)
2073
2074#endif /* lint */
2075
2076#if defined (lint)
2077
2078void
2079sfmmu_tsb_miss(void)
2080{
2081}
2082
2083void
2084sfmmu_kpm_dtsb_miss(void)
2085{
2086}
2087
2088void
2089sfmmu_kpm_dtsb_miss_small(void)
2090{
2091}
2092
2093#else /* lint */
2094
2095#if (IMAP_SEG != 0)
2096#error - ism_map->ism_seg offset is not zero
2097#endif
2098
2099/*
2100 * Copies ism mapping for this ctx in param "ism" if this is a ISM
2101 * tlb miss and branches to label "ismhit". If this is not an ISM
2102 * process or an ISM tlb miss it falls thru.
2103 *
2104 * Checks to see if the vaddr passed in via tagacc is in an ISM segment for
2105 * this process.
2106 * If so, it will branch to label "ismhit".  If not, it will fall through.
2107 *
2108 * Also hat_unshare() will set the context for this process to INVALID_CONTEXT
2109 * so that any other threads of this process will not try and walk the ism
2110 * maps while they are being changed.
2111 *
2112 * NOTE: We will never have any holes in our ISM maps. sfmmu_share/unshare
2113 *       will make sure of that. This means we can terminate our search on
2114 *       the first zero mapping we find.
2115 *
2116 * Parameters:
2117 * tagacc	= (pseudo-)tag access register (vaddr + ctx) (in)
2118 * tsbmiss	= address of tsb miss area (in)
2119 * ismseg	= contents of ism_seg for this ism map (out)
2120 * ismhat	= physical address of imap_ismhat for this ism map (out)
2121 * tmp1		= scratch reg (CLOBBERED)
2122 * tmp2		= scratch reg (CLOBBERED)
2123 * tmp3		= scratch reg (CLOBBERED)
2124 * label:    temporary labels
2125 * ismhit:   label where to jump to if an ism dtlb miss
2126 * exitlabel:label where to jump if hat is busy due to hat_unshare.
2127 */
2128#define ISM_CHECK(tagacc, tsbmiss, ismseg, ismhat, tmp1, tmp2, tmp3 \
2129	label, ismhit)							\
2130	ldx	[tsbmiss + TSBMISS_ISMBLKPA], tmp1 /* tmp1 = &ismblk */	;\
2131	brlz,pt  tmp1, label/**/3		/* exit if -1 */	;\
2132	  add	tmp1, IBLK_MAPS, ismhat	/* ismhat = &ismblk.map[0] */	;\
2133label/**/1:								;\
2134	ldxa	[ismhat]ASI_MEM, ismseg	/* ismblk.map[0].ism_seg */	;\
2135	mov	tmp1, tmp3	/* update current ismblkpa head */	;\
2136label/**/2:								;\
2137	brz,pt  ismseg, label/**/3		/* no mapping */	;\
2138	  add	ismhat, IMAP_VB_SHIFT, tmp1 /* tmp1 = vb_shift addr */	;\
2139	lduha	[tmp1]ASI_MEM, tmp1 		/* tmp1 = vb shift*/	;\
2140	srlx	ismseg, tmp1, tmp2		/* tmp2 = vbase */	;\
2141	srlx	tagacc, tmp1, tmp1		/* tmp1 =  va seg*/	;\
2142	sub	tmp1, tmp2, tmp2		/* tmp2 = va - vbase */	;\
2143	add	ismhat, IMAP_SZ_MASK, tmp1 /* tmp1 = sz_mask addr */	;\
2144	lda	[tmp1]ASI_MEM, tmp1		/* tmp1 = sz_mask */	;\
2145	and	ismseg, tmp1, tmp1		/* tmp1 = size */	;\
2146	cmp	tmp2, tmp1		 	/* check va <= offset*/	;\
2147	blu,a,pt  %xcc, ismhit			/* ism hit */		;\
2148	  add	ismhat, IMAP_ISMHAT, ismhat 	/* ismhat = &ism_sfmmu*/ ;\
2149									;\
2150	add	ismhat, ISM_MAP_SZ, ismhat /* ismhat += sizeof(map) */ 	;\
2151	add	tmp3, (IBLK_MAPS + ISM_MAP_SLOTS * ISM_MAP_SZ), tmp1	;\
2152	cmp	ismhat, tmp1						;\
2153	bl,pt	%xcc, label/**/2		/* keep looking  */	;\
2154	  ldxa	[ismhat]ASI_MEM, ismseg	/* ismseg = map[ismhat] */	;\
2155									;\
2156	add	tmp3, IBLK_NEXTPA, tmp1					;\
2157	ldxa	[tmp1]ASI_MEM, tmp1		/* check blk->nextpa */	;\
2158	brgez,pt tmp1, label/**/1		/* continue if not -1*/	;\
2159	  add	tmp1, IBLK_MAPS, ismhat	/* ismhat = &ismblk.map[0]*/	;\
2160label/**/3:
2161
2162/*
2163 * Returns the hme hash bucket (hmebp) given the vaddr, and the hatid
2164 * It also returns the virtual pg for vaddr (ie. vaddr << hmeshift)
2165 * Parameters:
2166 * tagacc = reg containing virtual address
2167 * hatid = reg containing sfmmu pointer
2168 * hmeshift = constant/register to shift vaddr to obtain vapg
2169 * hmebp = register where bucket pointer will be stored
2170 * vapg = register where virtual page will be stored
2171 * tmp1, tmp2 = tmp registers
2172 */
2173
2174
2175#define	HMEHASH_FUNC_ASM(tagacc, hatid, tsbarea, hmeshift, hmebp,	\
2176	vapg, label, tmp1, tmp2)					\
2177	sllx	tagacc, TAGACC_CTX_LSHIFT, tmp1				;\
2178	brnz,a,pt tmp1, label/**/1					;\
2179	  ld    [tsbarea + TSBMISS_UHASHSZ], hmebp			;\
2180	ld	[tsbarea + TSBMISS_KHASHSZ], hmebp			;\
2181	ba,pt	%xcc, label/**/2					;\
2182	  ldx	[tsbarea + TSBMISS_KHASHSTART], tmp1			;\
2183label/**/1:								;\
2184	ldx	[tsbarea + TSBMISS_UHASHSTART], tmp1			;\
2185label/**/2:								;\
2186	srlx	tagacc, hmeshift, vapg					;\
2187	xor	vapg, hatid, tmp2	/* hatid ^ (vaddr >> shift) */	;\
2188	and	tmp2, hmebp, hmebp	/* index into hme_hash */	;\
2189	mulx	hmebp, HMEBUCK_SIZE, hmebp				;\
2190	add	hmebp, tmp1, hmebp
2191
2192/*
2193 * hashtag includes bspage + hashno (64 bits).
2194 */
2195
2196#define	MAKE_HASHTAG(vapg, hatid, hmeshift, hashno, hblktag)		\
2197	sllx	vapg, hmeshift, vapg					;\
2198	or	vapg, hashno, hblktag
2199
2200/*
2201 * Function to traverse hmeblk hash link list and find corresponding match.
2202 * The search is done using physical pointers. It returns the physical address
2203 * and virtual address pointers to the hmeblk that matches with the tag
2204 * provided.
2205 * Parameters:
2206 * hmebp	= register that points to hme hash bucket, also used as
2207 *		  tmp reg (clobbered)
2208 * hmeblktag	= register with hmeblk tag match
2209 * hatid	= register with hatid
2210 * hmeblkpa	= register where physical ptr will be stored
2211 * hmeblkva	= register where virtual ptr will be stored
2212 * tmp1		= tmp reg
2213 * label: temporary label
2214 */
2215
2216#define	HMEHASH_SEARCH(hmebp, hmeblktag, hatid, hmeblkpa, hmeblkva,	\
2217	tsbarea, tmp1, label)					 	\
2218	add     hmebp, HMEBUCK_NEXTPA, hmeblkpa				;\
2219	ldxa    [hmeblkpa]ASI_MEM, hmeblkpa				;\
2220	add     hmebp, HMEBUCK_HBLK, hmeblkva				;\
2221	ldxa    [hmeblkva]ASI_MEM, hmeblkva				;\
2222	HAT_HSEARCH_DBSTAT(hatid, tsbarea, hmebp, tmp1)			;\
2223label/**/1:								;\
2224	brz,pn	hmeblkva, label/**/2					;\
2225	HAT_HLINK_DBSTAT(hatid, tsbarea, hmebp, tmp1)			;\
2226	add	hmeblkpa, HMEBLK_TAG, hmebp				;\
2227	ldxa	[hmebp]ASI_MEM, tmp1	 /* read 1st part of tag */	;\
2228	add	hmebp, CLONGSIZE, hmebp					;\
2229	ldxa	[hmebp]ASI_MEM, hmebp 	/* read 2nd part of tag */	;\
2230	xor	tmp1, hmeblktag, tmp1					;\
2231	xor	hmebp, hatid, hmebp					;\
2232	or	hmebp, tmp1, hmebp					;\
2233	brz,pn	hmebp, label/**/2	/* branch on hit */		;\
2234	  add	hmeblkpa, HMEBLK_NEXT, hmebp				;\
2235	ldna	[hmebp]ASI_MEM, hmeblkva	/* hmeblk ptr va */	;\
2236	add	hmeblkpa, HMEBLK_NEXTPA, hmebp				;\
2237	ba,pt	%xcc, label/**/1					;\
2238	  ldxa	[hmebp]ASI_MEM, hmeblkpa	/* hmeblk ptr pa */	;\
2239label/**/2:
2240
2241
2242#if ((1 << SFHME_SHIFT) != SFHME_SIZE)
2243#error HMEBLK_TO_HMENT assumes sf_hment is power of 2 in size
2244#endif
2245
2246/*
2247 * HMEBLK_TO_HMENT is a macro that given an hmeblk and a vaddr returns
2248 * he offset for the corresponding hment.
2249 * Parameters:
2250 * vaddr = register with virtual address
2251 * hmeblkpa = physical pointer to hme_blk
2252 * hment = register where address of hment will be stored
2253 * hmentoff = register where hment offset will be stored
2254 * label1 = temporary label
2255 */
2256#define	HMEBLK_TO_HMENT(vaddr, hmeblkpa, hmentoff, tmp1, label1)	\
2257	add	hmeblkpa, HMEBLK_MISC, hmentoff				;\
2258	lda	[hmentoff]ASI_MEM, tmp1 				;\
2259	andcc	tmp1, HBLK_SZMASK, %g0	 /* tmp1 = get_hblk_sz(%g5) */	;\
2260	bnz,a,pn  %icc, label1		/* if sz != TTE8K branch */	;\
2261	  or	%g0, HMEBLK_HME1, hmentoff				;\
2262	srl	vaddr, MMU_PAGESHIFT, tmp1				;\
2263	and	tmp1, NHMENTS - 1, tmp1		/* tmp1 = index */	;\
2264	sllx	tmp1, SFHME_SHIFT, tmp1					;\
2265	add	tmp1, HMEBLK_HME1, hmentoff				;\
2266label1:
2267
2268/*
2269 * GET_TTE is a macro that returns a TTE given a tag and hatid.
2270 *
2271 * tagacc	= (pseudo-)tag access register (in)
2272 * hatid	= sfmmu pointer for TSB miss (in)
2273 * tte		= tte for TLB miss if found, otherwise clobbered (out)
2274 * hmeblkpa	= PA of hment if found, otherwise clobbered (out)
2275 * hmeblkva	= VA of hment if found, otherwise clobbered (out)
2276 * tsbarea	= pointer to the tsbmiss area for this cpu. (in)
2277 * hmentoff	= temporarily stores hment offset (clobbered)
2278 * hmeshift	= constant/register to shift VA to obtain the virtual pfn
2279 *		  for this page size.
2280 * hashno	= constant/register hash number
2281 * label	= temporary label for branching within macro.
2282 * foundlabel	= label to jump to when tte is found.
2283 * suspendlabel= label to jump to when tte is suspended.
2284 * exitlabel	= label to jump to when tte is not found.  The hmebp lock
2285 *		  is still held at this time.
2286 *
2287 * The caller should set up the tsbmiss->scratch[2] field correctly before
2288 * calling this funciton  (aka TSBMISS_SCRATCH + TSBMISS_HATID)
2289 */
2290#define GET_TTE(tagacc, hatid, tte, hmeblkpa, hmeblkva, tsbarea, hmentoff, \
2291		hmeshift, hashno, label, foundlabel, suspendlabel, exitlabel) \
2292									;\
2293	stn	tagacc, [tsbarea + (TSBMISS_SCRATCH + TSB_TAGACC)]	;\
2294	stn	hatid, [tsbarea + (TSBMISS_SCRATCH + TSBMISS_HATID)]	;\
2295	HMEHASH_FUNC_ASM(tagacc, hatid, tsbarea, hmeshift, tte,		\
2296		hmeblkpa, label/**/5, hmentoff, hmeblkva)		;\
2297									;\
2298	/*								;\
2299	 * tagacc = tagacc						;\
2300	 * hatid = hatid						;\
2301	 * tsbarea = tsbarea						;\
2302	 * tte   = hmebp (hme bucket pointer)				;\
2303	 * hmeblkpa  = vapg  (virtual page)				;\
2304	 * hmentoff, hmeblkva = scratch					;\
2305	 */								;\
2306	MAKE_HASHTAG(hmeblkpa, hatid, hmeshift, hashno, hmentoff)	;\
2307									;\
2308	/*								;\
2309	 * tagacc = tagacc						;\
2310	 * hatid = hatid						;\
2311	 * tte   = hmebp						;\
2312	 * hmeblkpa  = CLOBBERED					;\
2313	 * hmentoff  = htag_bspage & hashno				;\
2314	 * hmeblkva  = scratch						;\
2315	 */								;\
2316	stn	tte, [tsbarea + (TSBMISS_SCRATCH + TSBMISS_HMEBP)]	;\
2317	HMELOCK_ENTER(tte, hmeblkpa, hmeblkva, label/**/3, ASI_MEM)	;\
2318	HMEHASH_SEARCH(tte, hmentoff, hatid, hmeblkpa, hmeblkva, 	\
2319		tsbarea, tagacc, label/**/1)				;\
2320	/*								;\
2321	 * tagacc = CLOBBERED						;\
2322	 * tte = CLOBBERED						;\
2323	 * hmeblkpa = hmeblkpa						;\
2324	 * hmeblkva = hmeblkva						;\
2325	 */								;\
2326	brnz,pt	hmeblkva, label/**/4	/* branch if hmeblk found */	;\
2327	  ldn	[tsbarea + (TSBMISS_SCRATCH + TSB_TAGACC)], tagacc	;\
2328	ldn	[tsbarea + (TSBMISS_SCRATCH + TSBMISS_HMEBP)], hmeblkva	;\
2329	HMELOCK_EXIT(hmeblkva, hmeblkva, ASI_MEM)  /* drop lock */	;\
2330	ba,pt	%xcc, exitlabel		/* exit if hblk not found */	;\
2331	  nop								;\
2332label/**/4:								;\
2333	/*								;\
2334	 * We have found the hmeblk containing the hment.		;\
2335	 * Now we calculate the corresponding tte.			;\
2336	 *								;\
2337	 * tagacc = tagacc						;\
2338	 * hatid = clobbered						;\
2339	 * tte   = hmebp						;\
2340	 * hmeblkpa  = hmeblkpa						;\
2341	 * hmentoff  = hblktag						;\
2342	 * hmeblkva  = hmeblkva 					;\
2343	 */								;\
2344	HMEBLK_TO_HMENT(tagacc, hmeblkpa, hmentoff, hatid, label/**/2)	;\
2345									;\
2346	add	hmentoff, SFHME_TTE, hmentoff				;\
2347	add	hmeblkpa, hmentoff, hmeblkpa				;\
2348	ldxa	[hmeblkpa]ASI_MEM, tte	/* MMU_READTTE through pa */	;\
2349	add	hmeblkva, hmentoff, hmeblkva				;\
2350	ldn	[tsbarea + (TSBMISS_SCRATCH + TSBMISS_HATID)], hatid	;\
2351	ldn	[tsbarea + (TSBMISS_SCRATCH + TSBMISS_HMEBP)], hmentoff ;\
2352	HMELOCK_EXIT(hmentoff, hmentoff, ASI_MEM)	/* drop lock */	;\
2353	set	TTE_SUSPEND, hmentoff					;\
2354	TTE_SUSPEND_INT_SHIFT(hmentoff)					;\
2355	btst	tte, hmentoff						;\
2356	bz,pt	%xcc, foundlabel					;\
2357	  nop								;\
2358									;\
2359	/*								;\
2360	 * Mapping is suspended, so goto suspend label.			;\
2361	 */								;\
2362	ba,pt	%xcc, suspendlabel					;\
2363	  nop
2364
2365	/*
2366	 * KERNEL PROTECTION HANDLER
2367	 *
2368	 * g1 = tsb8k pointer register (clobbered)
2369	 * g2 = tag access register (ro)
2370	 * g3 - g7 = scratch registers
2371	 *
2372	 * Note: This function is patched at runtime for performance reasons.
2373	 * 	 Any changes here require sfmmu_patch_ktsb fixed.
2374	 */
2375	ENTRY_NP(sfmmu_kprot_trap)
2376	mov	%g2, %g7		! TSB pointer macro clobbers tagacc
2377sfmmu_kprot_patch_ktsb_base:
2378	RUNTIME_PATCH_SETX(%g1, %g6)
2379	/* %g1 = contents of ktsb_base or ktsb_pbase */
2380sfmmu_kprot_patch_ktsb_szcode:
2381	or	%g0, RUNTIME_PATCH, %g3	! ktsb_szcode (hot patched)
2382
2383	GET_TSBE_POINTER(MMU_PAGESHIFT, %g1, %g7, %g3, %g5)
2384	! %g1 = First TSB entry pointer, as TSB miss handler expects
2385
2386	mov	%g2, %g7		! TSB pointer macro clobbers tagacc
2387sfmmu_kprot_patch_ktsb4m_base:
2388	RUNTIME_PATCH_SETX(%g3, %g6)
2389	/* %g3 = contents of ktsb4m_base or ktsb4m_pbase */
2390sfmmu_kprot_patch_ktsb4m_szcode:
2391	or	%g0, RUNTIME_PATCH, %g6	! ktsb4m_szcode (hot patched)
2392
2393	GET_TSBE_POINTER(MMU_PAGESHIFT4M, %g3, %g7, %g6, %g5)
2394	! %g3 = 4M tsb entry pointer, as TSB miss handler expects
2395
2396	CPU_TSBMISS_AREA(%g6, %g7)
2397	HAT_PERCPU_STAT16(%g6, TSBMISS_KPROTS, %g7)
2398	ba,pt	%xcc, sfmmu_tsb_miss_tt
2399	  nop
2400
2401	/*
2402	 * USER PROTECTION HANDLER
2403	 *
2404	 * g1 = tsb8k pointer register (ro)
2405	 * g2 = tag access register (ro)
2406	 * g3 = faulting context (clobbered, currently not used)
2407	 * g4 - g7 = scratch registers
2408	 */
2409	ALTENTRY(sfmmu_uprot_trap)
2410#ifdef sun4v
2411	GET_1ST_TSBE_PTR(%g2, %g1, %g4, %g5)
2412	/* %g1 = first TSB entry ptr now, %g2 preserved */
2413
2414	GET_UTSBREG(SCRATCHPAD_UTSBREG2, %g3)	/* get 2nd utsbreg */
2415	brlz,pt %g3, 9f			/* check for 2nd TSB */
2416	  mov	%g0, %g3		/* clear second tsbe ptr */
2417
2418	GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5)
2419	/* %g3 = second TSB entry ptr now, %g2 preserved */
2420
2421#else /* sun4v */
2422#ifdef UTSB_PHYS
2423	/* g1 = first TSB entry ptr */
2424	GET_2ND_TSBREG(%g3)
2425	brlz,a,pt %g3, 9f		/* check for 2nd TSB */
2426	  mov	%g0, %g3		/* clear second tsbe ptr */
2427
2428	GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5)
2429	/* %g3 = second TSB entry ptr now, %g2 preserved */
2430#else /* UTSB_PHYS */
2431	brgez,pt %g1, 9f		/* check for 2nd TSB */
2432	  mov	%g0, %g3		/* clear second tsbe ptr */
2433
2434	mov	%g2, %g7
2435	GET_2ND_TSBE_PTR(%g7, %g1, %g3, %g4, %g5, sfmmu_uprot)
2436	/* %g3 = second TSB entry ptr now, %g7 clobbered */
2437	mov	%g1, %g7
2438	GET_1ST_TSBE_PTR(%g7, %g1, %g5, sfmmu_uprot)
2439#endif /* UTSB_PHYS */
2440#endif /* sun4v */
24419:
2442	CPU_TSBMISS_AREA(%g6, %g7)
2443	HAT_PERCPU_STAT16(%g6, TSBMISS_UPROTS, %g7)
2444	ba,pt	%xcc, sfmmu_tsb_miss_tt		/* branch TSB miss handler */
2445	  nop
2446
2447	/*
2448	 * Kernel 8K page iTLB miss.  We also get here if we took a
2449	 * fast instruction access mmu miss trap while running in
2450	 * invalid context.
2451	 *
2452	 * %g1 = 8K TSB pointer register (not used, clobbered)
2453	 * %g2 = tag access register (used)
2454	 * %g3 = faulting context id (used)
2455	 * %g7 = 4M virtual page number for tag matching  (used)
2456	 */
2457	.align	64
2458	ALTENTRY(sfmmu_kitlb_miss)
2459	brnz,pn %g3, tsb_tl0_noctxt
2460	  nop
2461
2462	/* kernel miss */
2463	/* get kernel tsb pointer */
2464	/* we patch the next set of instructions at run time */
2465	/* NOTE: any changes here require sfmmu_patch_ktsb fixed */
2466iktsbbase:
2467	RUNTIME_PATCH_SETX(%g4, %g5)
2468	/* %g4 = contents of ktsb_base or ktsb_pbase */
2469
2470iktsb:	sllx	%g2, 64-(TAGACC_SHIFT + TSB_START_SIZE + RUNTIME_PATCH), %g1
2471	srlx	%g1, 64-(TSB_START_SIZE + TSB_ENTRY_SHIFT + RUNTIME_PATCH), %g1
2472	or	%g4, %g1, %g1			! form tsb ptr
2473	ldda	[%g1]RUNTIME_PATCH, %g4		! %g4 = tag, %g5 = data
2474	cmp	%g4, %g7
2475	bne,pn	%xcc, sfmmu_tsb_miss_tt		! branch on miss
2476	  andcc %g5, TTE_EXECPRM_INT, %g0	! check exec bit
2477	bz,pn	%icc, exec_fault
2478	  nop
2479	TT_TRACE(trace_tsbhit)			! 2 instr traptrace
2480	ITLB_STUFF(%g5, %g1, %g2, %g3, %g4)
2481	retry
2482
2483	/*
2484	 * Kernel dTLB miss.  We also get here if we took a fast data
2485	 * access mmu miss trap while running in invalid context.
2486	 *
2487	 * Note: for now we store kpm TTEs in the kernel TSB as usual.
2488	 *	We select the TSB miss handler to branch to depending on
2489	 *	the virtual address of the access.  In the future it may
2490	 *	be desirable to separate kpm TTEs into their own TSB,
2491	 *	in which case all that needs to be done is to set
2492	 *	kpm_tsbbase/kpm_tsbsz to point to the new TSB and branch
2493	 *	early in the miss if we detect a kpm VA to a new handler.
2494	 *
2495	 * %g1 = 8K TSB pointer register (not used, clobbered)
2496	 * %g2 = tag access register (used)
2497	 * %g3 = faulting context id (used)
2498	 */
2499	.align	64
2500	ALTENTRY(sfmmu_kdtlb_miss)
2501	brnz,pn	%g3, tsb_tl0_noctxt		/* invalid context? */
2502	  nop
2503
2504	/* Gather some stats for kpm misses in the TLB. */
2505	/* KPM_TLBMISS_STAT_INCR(tagacc, val, tsbma, tmp1, label) */
2506	KPM_TLBMISS_STAT_INCR(%g2, %g4, %g5, %g6, kpmtlbm_stat_out)
2507
2508	/*
2509	 * Get first TSB offset and look for 8K/64K/512K mapping
2510	 * using the 8K virtual page as the index.
2511	 *
2512	 * We patch the next set of instructions at run time;
2513	 * any changes here require sfmmu_patch_ktsb changes too.
2514	 */
2515dktsbbase:
2516	RUNTIME_PATCH_SETX(%g7, %g6)
2517	/* %g7 = contents of ktsb_base or ktsb_pbase */
2518
2519dktsb:	sllx	%g2, 64-(TAGACC_SHIFT + TSB_START_SIZE + RUNTIME_PATCH), %g1
2520	srlx	%g1, 64-(TSB_START_SIZE + TSB_ENTRY_SHIFT + RUNTIME_PATCH), %g1
2521
2522	/*
2523	 * At this point %g1 is our index into the TSB.
2524	 * We just masked off enough bits of the VA depending
2525	 * on our TSB size code.
2526	 */
2527	ldda	[%g7 + %g1]RUNTIME_PATCH, %g4	! %g4 = tag, %g5 = data
2528	srlx	%g2, TAG_VALO_SHIFT, %g6	! make tag to compare
2529	cmp	%g6, %g4			! compare tag
2530	bne,pn	%xcc, dktsb4m_kpmcheck_small
2531	  add	%g7, %g1, %g1			/* form tsb ptr */
2532	TT_TRACE(trace_tsbhit)
2533	DTLB_STUFF(%g5, %g1, %g2, %g3, %g4)
2534	/* trapstat expects tte in %g5 */
2535	retry
2536
2537	/*
2538	 * If kpm is using large pages, the following instruction needs
2539	 * to be patched to a nop at boot time (by sfmmu_kpm_patch_tsbm)
2540	 * so that we will probe the 4M TSB regardless of the VA.  In
2541	 * the case kpm is using small pages, we know no large kernel
2542	 * mappings are located above 0x80000000.00000000 so we skip the
2543	 * probe as an optimization.
2544	 */
2545dktsb4m_kpmcheck_small:
2546	brlz,pn %g2, sfmmu_kpm_dtsb_miss_small
2547	  /* delay slot safe, below */
2548
2549	/*
2550	 * Get second TSB offset and look for 4M mapping
2551	 * using 4M virtual page as the TSB index.
2552	 *
2553	 * Here:
2554	 * %g1 = 8K TSB pointer.  Don't squash it.
2555	 * %g2 = tag access register (we still need it)
2556	 */
2557	srlx	%g2, MMU_PAGESHIFT4M, %g3
2558
2559	/*
2560	 * We patch the next set of instructions at run time;
2561	 * any changes here require sfmmu_patch_ktsb changes too.
2562	 */
2563dktsb4mbase:
2564	RUNTIME_PATCH_SETX(%g7, %g6)
2565	/* %g7 = contents of ktsb4m_base or ktsb4m_pbase */
2566dktsb4m:
2567	sllx	%g3, 64-(TSB_START_SIZE + RUNTIME_PATCH), %g3
2568	srlx	%g3, 64-(TSB_START_SIZE + TSB_ENTRY_SHIFT + RUNTIME_PATCH), %g3
2569
2570	/*
2571	 * At this point %g3 is our index into the TSB.
2572	 * We just masked off enough bits of the VA depending
2573	 * on our TSB size code.
2574	 */
2575	ldda	[%g7 + %g3]RUNTIME_PATCH, %g4	! %g4 = tag, %g5 = data
2576	srlx	%g2, TAG_VALO_SHIFT, %g6	! make tag to compare
2577	cmp	%g6, %g4			! compare tag
2578
2579dktsb4m_tsbmiss:
2580	bne,pn	%xcc, dktsb4m_kpmcheck
2581	  add	%g7, %g3, %g3			! %g3 = kernel second TSB ptr
2582	TT_TRACE(trace_tsbhit)
2583	/* we don't check TTE size here since we assume 4M TSB is separate */
2584	DTLB_STUFF(%g5, %g1, %g2, %g3, %g4)
2585	/* trapstat expects tte in %g5 */
2586	retry
2587
2588	/*
2589	 * So, we failed to find a valid TTE to match the faulting
2590	 * address in either TSB.  There are a few cases that could land
2591	 * us here:
2592	 *
2593	 * 1) This is a kernel VA below 0x80000000.00000000.  We branch
2594	 *    to sfmmu_tsb_miss_tt to handle the miss.
2595	 * 2) We missed on a kpm VA, and we didn't find the mapping in the
2596	 *    4M TSB.  Let segkpm handle it.
2597	 *
2598	 * Note that we shouldn't land here in the case of a kpm VA when
2599	 * kpm_smallpages is active -- we handled that case earlier at
2600	 * dktsb4m_kpmcheck_small.
2601	 *
2602	 * At this point:
2603	 *  g1 = 8K-indexed primary TSB pointer
2604	 *  g2 = tag access register
2605	 *  g3 = 4M-indexed secondary TSB pointer
2606	 */
2607dktsb4m_kpmcheck:
2608	cmp	%g2, %g0
2609	bl,pn	%xcc, sfmmu_kpm_dtsb_miss
2610	  nop
2611	ba,a,pt	%icc, sfmmu_tsb_miss_tt
2612	  nop
2613
2614#ifdef sun4v
2615	/*
2616	 * User instruction miss w/ single TSB.
2617	 * The first probe covers 8K, 64K, and 512K page sizes,
2618	 * because 64K and 512K mappings are replicated off 8K
2619	 * pointer.
2620	 *
2621	 * g1 = tsb8k pointer register
2622	 * g2 = tag access register
2623	 * g3 - g6 = scratch registers
2624	 * g7 = TSB tag to match
2625	 */
2626	.align	64
2627	ALTENTRY(sfmmu_uitlb_fastpath)
2628
2629	PROBE_1ST_ITSB(%g1, %g7, uitlb_fast_8k_probefail)
2630	/* g4 - g5 = clobbered by PROBE_1ST_ITSB */
2631	ba,pn	%xcc, sfmmu_tsb_miss_tt
2632	  mov	%g0, %g3
2633
2634	/*
2635	 * User data miss w/ single TSB.
2636	 * The first probe covers 8K, 64K, and 512K page sizes,
2637	 * because 64K and 512K mappings are replicated off 8K
2638	 * pointer.
2639	 *
2640	 * g1 = tsb8k pointer register
2641	 * g2 = tag access register
2642	 * g3 - g6 = scratch registers
2643	 * g7 = TSB tag to match
2644	 */
2645	.align 64
2646	ALTENTRY(sfmmu_udtlb_fastpath)
2647
2648	PROBE_1ST_DTSB(%g1, %g7, udtlb_fast_8k_probefail)
2649	/* g4 - g5 = clobbered by PROBE_1ST_DTSB */
2650	ba,pn	%xcc, sfmmu_tsb_miss_tt
2651	  mov	%g0, %g3
2652
2653	/*
2654	 * User instruction miss w/ multiple TSBs (sun4v).
2655	 * The first probe covers 8K, 64K, and 512K page sizes,
2656	 * because 64K and 512K mappings are replicated off 8K
2657	 * pointer.  Second probe covers 4M page size only.
2658	 *
2659	 * Just like sfmmu_udtlb_slowpath, except:
2660	 *   o Uses ASI_ITLB_IN
2661	 *   o checks for execute permission
2662	 *   o No ISM prediction.
2663	 *
2664	 * g1 = tsb8k pointer register
2665	 * g2 = tag access register
2666	 * g3 - g6 = scratch registers
2667	 * g7 = TSB tag to match
2668	 */
2669	.align	64
2670	ALTENTRY(sfmmu_uitlb_slowpath)
2671
2672	GET_1ST_TSBE_PTR(%g2, %g1, %g4, %g5)
2673	PROBE_1ST_ITSB(%g1, %g7, uitlb_8k_probefail)
2674	/* g4 - g5 = clobbered here */
2675
2676	GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5)
2677	/* g1 = first TSB pointer, g3 = second TSB pointer */
2678	srlx	%g2, TAG_VALO_SHIFT, %g7
2679	PROBE_2ND_ITSB(%g3, %g7)
2680	/* NOT REACHED */
2681
2682#else /* sun4v */
2683
2684	/*
2685	 * User instruction miss w/ multiple TSBs (sun4u).
2686	 * The first probe covers 8K, 64K, and 512K page sizes,
2687	 * because 64K and 512K mappings are replicated off 8K
2688	 * pointer.  Second probe covers 4M page size only.
2689	 *
2690	 * Just like sfmmu_udtlb_slowpath, except:
2691	 *   o Uses ASI_ITLB_IN
2692	 *   o checks for execute permission
2693	 *   o No ISM prediction.
2694	 *
2695	 * g1 = tsb8k pointer register
2696	 * g2 = tag access register
2697	 * g3 = 2nd tsbreg if defined UTSB_PHYS, else scratch
2698	 * g4 - g6 = scratch registers
2699	 * g7 = TSB tag to match
2700	 */
2701	.align	64
2702	ALTENTRY(sfmmu_uitlb_slowpath)
2703
2704#ifdef UTSB_PHYS
2705	/*
2706	 * g1 = 1st TSB entry pointer
2707	 * g3 = 2nd TSB base register
2708	 * Need 2nd TSB entry pointer for 2nd probe.
2709	 */
2710	PROBE_1ST_ITSB(%g1, %g7, uitlb_8k_probefail)
2711
2712	GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5)
2713#else /* UTSB_PHYS */
2714	mov	%g1, %g3	/* save tsb8k reg in %g3 */
2715	GET_1ST_TSBE_PTR(%g3, %g1, %g5, sfmmu_uitlb)
2716	PROBE_1ST_ITSB(%g1, %g7, uitlb_8k_probefail)
2717
2718	mov	%g2, %g6	/* GET_2ND_TSBE_PTR clobbers tagacc */
2719	mov	%g3, %g7	/* copy tsb8k reg in %g7 */
2720	GET_2ND_TSBE_PTR(%g6, %g7, %g3, %g4, %g5, sfmmu_uitlb)
2721#endif /* UTSB_PHYS */
2722	/* g1 = first TSB pointer, g3 = second TSB pointer */
2723	srlx	%g2, TAG_VALO_SHIFT, %g7
2724	PROBE_2ND_ITSB(%g3, %g7, isynth)
2725	/* NOT REACHED */
2726#endif /* sun4v */
2727
2728	/*
2729	 * User data miss w/ multiple TSBs.
2730	 * The first probe covers 8K, 64K, and 512K page sizes,
2731	 * because 64K and 512K mappings are replicated off 8K
2732	 * pointer.  Second probe covers 4M page size only.
2733	 *
2734	 * We consider probing for 4M pages first if the VA falls
2735	 * in a range that's likely to be ISM.
2736	 *
2737	 * g1 = tsb8k pointer register
2738	 * g2 = tag access register
2739	 * g3 = 2nd tsbreg if defined UTSB_PHYS, else scratch
2740	 * g4 - g6 = scratch registers
2741	 * g7 = TSB tag to match
2742	 */
2743	.align 64
2744	ALTENTRY(sfmmu_udtlb_slowpath)
2745
2746	/*
2747	 * Check for ISM.  If it exists, look for 4M mappings in the second TSB
2748	 * first, then probe for other mappings in the first TSB if that fails.
2749	 */
2750	srax	%g2, PREDISM_BASESHIFT, %g6	/* g6 > 0 : ISM predicted */
2751	brgz,pn %g6, udtlb_miss_probesecond	/* check for ISM */
2752	  mov	%g1, %g3
2753
2754udtlb_miss_probefirst:
2755	/*
2756	 * g1 = 8K TSB pointer register
2757	 * g2 = tag access register
2758	 * g3 = (potentially) second TSB entry ptr
2759	 * g6 = ism pred.
2760	 * g7 = vpg_4m
2761	 */
2762#ifdef sun4v
2763	GET_1ST_TSBE_PTR(%g2, %g1, %g4, %g5)
2764	PROBE_1ST_DTSB(%g1, %g7, udtlb_first_probefail)
2765
2766	/*
2767	 * Here:
2768	 *   g1 = first TSB pointer
2769	 *   g2 = tag access reg
2770	 *   g3 = second TSB ptr IFF ISM pred. (else don't care)
2771	 */
2772	brgz,pn	%g6, sfmmu_tsb_miss_tt
2773	  nop
2774#else /* sun4v */
2775#ifndef UTSB_PHYS
2776	mov	%g1, %g4
2777	GET_1ST_TSBE_PTR(%g4, %g1, %g5, sfmmu_udtlb)
2778#endif UTSB_PHYS
2779	PROBE_1ST_DTSB(%g1, %g7, udtlb_first_probefail)
2780
2781	/*
2782	 * Here:
2783	 *   g1 = first TSB pointer
2784	 *   g2 = tag access reg
2785	 *   g3 = second TSB ptr IFF ISM pred. (else don't care)
2786	 */
2787	brgz,pn	%g6, sfmmu_tsb_miss_tt
2788	  nop
2789#ifndef UTSB_PHYS
2790	ldxa	[%g0]ASI_DMMU_TSB_8K, %g3
2791#endif UTSB_PHYS
2792	/* fall through in 8K->4M probe order */
2793#endif /* sun4v */
2794
2795udtlb_miss_probesecond:
2796	/*
2797	 * Look in the second TSB for the TTE
2798	 * g1 = First TSB entry ptr if !ISM pred, TSB8K ptr reg if ISM pred.
2799	 * g2 = tag access reg
2800	 * g3 = 8K TSB pointer register
2801	 * g6 = ism pred.
2802	 * g7 = vpg_4m
2803	 */
2804#ifdef sun4v
2805	/* GET_2ND_TSBE_PTR(tagacc, tsbe_ptr, tmp1, tmp2) */
2806	GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5)
2807	/* %g2 is okay, no need to reload, %g3 = second tsbe ptr */
2808#else /* sun4v */
2809#ifdef UTSB_PHYS
2810	GET_2ND_TSBREG(%g3)
2811	GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5)
2812	/* tagacc (%g2) is okay, no need to reload, %g3 = second tsbe ptr */
2813#else /* UTSB_PHYS */
2814	mov	%g3, %g7
2815	GET_2ND_TSBE_PTR(%g2, %g7, %g3, %g4, %g5, sfmmu_udtlb)
2816	/* %g2 clobbered, %g3 =second tsbe ptr */
2817	mov	MMU_TAG_ACCESS, %g2
2818	ldxa	[%g2]ASI_DMMU, %g2
2819#endif /* UTSB_PHYS */
2820#endif /* sun4v */
2821
2822	srlx	%g2, TAG_VALO_SHIFT, %g7
2823	PROBE_2ND_DTSB(%g3, %g7, udtlb_4m_probefail)
2824	/* g4 - g5 = clobbered here; %g7 still vpg_4m at this point */
2825	brgz,pn	%g6, udtlb_miss_probefirst
2826	  nop
2827
2828	/* fall through to sfmmu_tsb_miss_tt */
2829
2830	ALTENTRY(sfmmu_tsb_miss_tt)
2831	TT_TRACE(trace_tsbmiss)
2832	/*
2833	 * We get here if there is a TSB miss OR a write protect trap.
2834	 *
2835	 * g1 = First TSB entry pointer
2836	 * g2 = tag access register
2837	 * g3 = 4M TSB entry pointer; NULL if no 2nd TSB
2838	 * g4 - g7 = scratch registers
2839	 */
2840
2841	ALTENTRY(sfmmu_tsb_miss)
2842
2843	/*
2844	 * If trapstat is running, we need to shift the %tpc and %tnpc to
2845	 * point to trapstat's TSB miss return code (note that trapstat
2846	 * itself will patch the correct offset to add).
2847	 */
2848	rdpr	%tl, %g7
2849	cmp	%g7, 1
2850	ble,pt	%xcc, 0f
2851	  sethi	%hi(KERNELBASE), %g6
2852	rdpr	%tpc, %g7
2853	or	%g6, %lo(KERNELBASE), %g6
2854	cmp	%g7, %g6
2855	bgeu,pt	%xcc, 0f
2856	/* delay slot safe */
2857
2858	ALTENTRY(tsbmiss_trapstat_patch_point)
2859	add	%g7, RUNTIME_PATCH, %g7	/* must match TSTAT_TSBMISS_INSTR */
2860	wrpr	%g7, %tpc
2861	add	%g7, 4, %g7
2862	wrpr	%g7, %tnpc
28630:
2864	CPU_TSBMISS_AREA(%g6, %g7)
2865
2866	stn	%g1, [%g6 + TSBMISS_TSBPTR]	/* save first tsb pointer */
2867	stn	%g3, [%g6 + TSBMISS_TSBPTR4M]	/* save second tsb pointer */
2868
2869	sllx	%g2, TAGACC_CTX_LSHIFT, %g3
2870	brz,a,pn %g3, 1f			/* skip ahead if kernel */
2871	  ldn	[%g6 + TSBMISS_KHATID], %g7
2872	srlx	%g3, TAGACC_CTX_LSHIFT, %g3	/* g3 = ctxnum */
2873	ldn	[%g6 + TSBMISS_UHATID], %g7     /* g7 = hatid */
2874
2875	HAT_PERCPU_STAT32(%g6, TSBMISS_UTSBMISS, %g5)
2876
2877	cmp	%g3, INVALID_CONTEXT
2878	be,pn	%icc, tsb_tl0_noctxt		/* no ctx miss exception */
2879	  stn	%g7, [%g6 + (TSBMISS_SCRATCH + TSBMISS_HATID)]
2880
2881	ISM_CHECK(%g2, %g6, %g3, %g4, %g5, %g7, %g1, tsb_l1, tsb_ism)
2882	/*
2883	 * The miss wasn't in an ISM segment.
2884	 *
2885	 * %g1 %g3, %g4, %g5, %g7 all clobbered
2886	 * %g2 = (pseudo) tag access
2887	 */
2888
2889	ba,pt	%icc, 2f
2890	  ldn	[%g6 + (TSBMISS_SCRATCH + TSBMISS_HATID)], %g7
2891
28921:
2893	HAT_PERCPU_STAT32(%g6, TSBMISS_KTSBMISS, %g5)
2894	/*
2895	 * 8K and 64K hash.
2896	 */
28972:
2898
2899	GET_TTE(%g2, %g7, %g3, %g4, %g5, %g6, %g1,
2900		MMU_PAGESHIFT64K, TTE64K, tsb_l8K, tsb_checktte,
2901		sfmmu_suspend_tl, tsb_512K)
2902	/* NOT REACHED */
2903
2904tsb_512K:
2905	ldn	[%g6 + (TSBMISS_SCRATCH + TSB_TAGACC)], %g3
2906	sllx	%g3, TAGACC_CTX_LSHIFT, %g5
2907	brz,pn	%g5, 3f
2908	  lduh	[%g6 + TSBMISS_HATFLAGS], %g4
2909	and	%g4, HAT_512K_FLAG, %g5
2910
2911	/*
2912	 * Note that there is a small window here where we may have
2913	 * a 512k page in the hash list but have not set the HAT_512K_FLAG
2914	 * flag yet, so we will skip searching the 512k hash list.
2915	 * In this case we will end up in pagefault which will find
2916	 * the mapping and return.  So, in this instance we will end up
2917	 * spending a bit more time resolving this TSB miss, but it can
2918	 * only happen once per process and even then, the chances of that
2919	 * are very small, so it's not worth the extra overhead it would
2920	 * take to close this window.
2921	 */
2922	brz,pn	%g5, tsb_4M
2923	  nop
29243:
2925	/*
2926	 * 512K hash
2927	 */
2928
2929	GET_TTE(%g2, %g7, %g3, %g4, %g5, %g6, %g1,
2930		MMU_PAGESHIFT512K, TTE512K, tsb_l512K, tsb_checktte,
2931		sfmmu_suspend_tl, tsb_4M)
2932	/* NOT REACHED */
2933
2934tsb_4M:
2935	ldn	[%g6 + (TSBMISS_SCRATCH + TSB_TAGACC)], %g3
2936	sllx	%g3, TAGACC_CTX_LSHIFT, %g5
2937	brz,pn	%g5, 4f
2938	  lduh	[%g6 + TSBMISS_HATFLAGS], %g4
2939	and	%g4, HAT_4M_FLAG, %g5
2940	brz,pn	%g5, tsb_32M
2941	  nop
29424:
2943	/*
2944	 * 4M hash
2945	 */
2946
2947	GET_TTE(%g2, %g7, %g3, %g4, %g5, %g6, %g1,
2948		MMU_PAGESHIFT4M, TTE4M, tsb_l4M, tsb_checktte,
2949		sfmmu_suspend_tl, tsb_32M)
2950	/* NOT REACHED */
2951
2952tsb_32M:
2953#ifndef sun4v
2954	GET_CPU_IMPL(%g5)
2955	cmp	%g5, OLYMPUS_C_IMPL
2956	be,pn	%xcc, 0f
2957	  nop
2958	cmp	%g5, PANTHER_IMPL
2959	bne,pt	%xcc, tsb_pagefault
2960	  nop
2961#endif
2962
29630:
2964	ldn	[%g6 + (TSBMISS_SCRATCH + TSB_TAGACC)], %g3
2965	sllx	%g3, TAGACC_CTX_LSHIFT, %g5
2966#ifdef sun4v
2967        brz,pn	%g5, 6f
2968#else
2969	brz,pn	%g5, tsb_pagefault
2970#endif
2971	  lduh	[%g6 + TSBMISS_HATFLAGS], %g4
2972	and	%g4, HAT_32M_FLAG, %g5
2973	brz,pn	%g5, tsb_256M
2974	  nop
29755:
2976	/*
2977	 * 32M hash
2978	 */
2979
2980	GET_TTE(%g2, %g7, %g3, %g4, %g5, %g6, %g1,
2981		MMU_PAGESHIFT32M, TTE32M, tsb_l32M, tsb_checktte,
2982		sfmmu_suspend_tl, tsb_256M)
2983	/* NOT REACHED */
2984
2985tsb_256M:
2986	lduh	[%g6 + TSBMISS_HATFLAGS], %g4
2987	and	%g4, HAT_256M_FLAG, %g5
2988	brz,pn	%g5, tsb_pagefault
2989	  nop
29906:
2991	/*
2992	 * 256M hash
2993	 */
2994
2995	GET_TTE(%g2, %g7, %g3, %g4, %g5, %g6, %g1,
2996	    MMU_PAGESHIFT256M, TTE256M, tsb_l256M, tsb_checktte,
2997	    sfmmu_suspend_tl, tsb_pagefault)
2998	/* NOT REACHED */
2999
3000tsb_checktte:
3001	/*
3002	 * g3 = tte
3003	 * g4 = tte pa
3004	 * g5 = tte va
3005	 * g6 = tsbmiss area
3006	 */
3007	brgez,pn %g3, tsb_pagefault	/* if tte invalid branch */
3008	  nop
3009
3010tsb_validtte:
3011	/*
3012	 * Set ref/mod bits if this is a prot trap.  Usually, it isn't.
3013	 */
3014	rdpr	%tt, %g7
3015	cmp	%g7, FAST_PROT_TT
3016	bne,pt	%icc, 4f
3017	  nop
3018
3019	TTE_SET_REFMOD_ML(%g3, %g4, %g5, %g6, %g7, tsb_lset_refmod,
3020	    tsb_protfault)
3021
3022	rdpr	%tt, %g5
3023	GET_MMU_D_TTARGET(%g2, %g7)		/* %g2 = ttarget */
3024	ba,pt	%xcc, tsb_update_tl1
3025	  nop
3026
30274:
3028	/*
3029	 * If ITLB miss check exec bit.
3030	 * If not set treat as invalid TTE.
3031	 */
3032	cmp     %g7, T_INSTR_MMU_MISS
3033	be,pn	%icc, 5f
3034	  andcc   %g3, TTE_EXECPRM_INT, %g0	/* check execute bit is set */
3035	cmp     %g7, FAST_IMMU_MISS_TT
3036	bne,pt %icc, 3f
3037	  andcc   %g3, TTE_EXECPRM_INT, %g0	/* check execute bit is set */
30385:
3039	bz,pn %icc, tsb_protfault
3040	  nop
3041
30423:
3043	/*
3044	 * Set reference bit if not already set
3045	 */
3046	TTE_SET_REF_ML(%g3, %g4, %g5, %g6, %g7, tsb_lset_ref)
3047
3048	/*
3049	 * Now, load into TSB/TLB.  At this point:
3050	 * g3 = tte
3051	 * g4 = patte
3052	 * g6 = tsbmiss area
3053	 */
3054	rdpr	%tt, %g5
3055#ifdef sun4v
3056	MMU_FAULT_STATUS_AREA(%g2)
3057	cmp	%g5, T_INSTR_MMU_MISS
3058	be,a,pt	%icc, 9f
3059	  nop
3060	cmp	%g5, FAST_IMMU_MISS_TT
3061	be,a,pt	%icc, 9f
3062	  nop
3063	add	%g2, MMFSA_D_, %g2
30649:
3065	ldx	[%g2 + MMFSA_CTX_], %g7
3066	sllx	%g7, TTARGET_CTX_SHIFT, %g7
3067	ldx	[%g2 + MMFSA_ADDR_], %g2
3068	srlx	%g2, TTARGET_VA_SHIFT, %g2
3069	or	%g2, %g7, %g2
3070#else
3071	cmp	%g5, FAST_IMMU_MISS_TT
3072	be,a,pt	%icc, tsb_update_tl1
3073	  ldxa	[%g0]ASI_IMMU, %g2
3074	ldxa	[%g0]ASI_DMMU, %g2
3075#endif
3076tsb_update_tl1:
3077	srlx	%g2, TTARGET_CTX_SHIFT, %g7
3078	brz,pn	%g7, tsb_kernel
3079#ifdef sun4v
3080	  and	%g3, TTE_SZ_BITS, %g7	! assumes TTE_SZ_SHFT is 0
3081#else
3082	  srlx	%g3, TTE_SZ_SHFT, %g7
3083#endif
3084
3085tsb_user:
3086#ifdef sun4v
3087	cmp	%g7, TTE4M
3088	bge,pn	%icc, tsb_user4m
3089	  nop
3090#else /* sun4v */
3091	cmp	%g7, TTESZ_VALID | TTE4M
3092	be,pn	%icc, tsb_user4m
3093	  srlx	%g3, TTE_SZ2_SHFT, %g7
3094	andcc	%g7, TTE_SZ2_BITS, %g7		! check 32/256MB
3095#ifdef ITLB_32M_256M_SUPPORT
3096	bnz,pn	%icc, tsb_user4m
3097	  nop
3098#else /* ITLB_32M_256M_SUPPORT */
3099	bnz,a,pn %icc, tsb_user_pn_synth
3100	 cmp	%g5, FAST_IMMU_MISS_TT
3101#endif /* ITLB_32M_256M_SUPPORT */
3102#endif /* sun4v */
3103
3104tsb_user8k:
3105	ldn	[%g6 + TSBMISS_TSBPTR], %g1	! g1 = first TSB ptr
3106
3107#ifndef UTSB_PHYS
3108	mov	ASI_N, %g7	! user TSBs accessed by VA
3109	mov	%g7, %asi
3110#endif /* UTSB_PHYS */
3111
3112	TSB_UPDATE_TL(%g1, %g3, %g2, %g4, %g7, %g6, 5)
3113
3114#ifdef sun4v
3115	cmp	%g5, T_INSTR_MMU_MISS
3116	be,a,pn	%xcc, 9f
3117	  mov	%g3, %g5
3118#endif /* sun4v */
3119	cmp	%g5, FAST_IMMU_MISS_TT
3120	be,pn	%xcc, 9f
3121	  mov	%g3, %g5
3122
3123	DTLB_STUFF(%g5, %g1, %g2, %g3, %g4)
3124	! trapstat wants TTE in %g5
3125	retry
31269:
3127	ITLB_STUFF(%g5, %g1, %g2, %g3, %g4)
3128	! trapstat wants TTE in %g5
3129	retry
3130
3131tsb_user4m:
3132	ldn	[%g6 + TSBMISS_TSBPTR4M], %g1		/* g1 = tsbp */
31334:
3134	brz,pn	%g1, 5f	/* Check to see if we have 2nd TSB programmed */
3135	  nop
3136
3137#ifndef UTSB_PHYS
3138	mov	ASI_N, %g7	! user TSBs accessed by VA
3139	mov	%g7, %asi
3140#endif /* UTSB_PHYS */
3141
3142        TSB_UPDATE_TL(%g1, %g3, %g2, %g4, %g7, %g6, 6)
3143
31445:
3145#ifdef sun4v
3146        cmp     %g5, T_INSTR_MMU_MISS
3147        be,a,pn %xcc, 9f
3148          mov   %g3, %g5
3149#endif /* sun4v */
3150        cmp     %g5, FAST_IMMU_MISS_TT
3151        be,pn   %xcc, 9f
3152        mov     %g3, %g5
3153
3154        DTLB_STUFF(%g5, %g1, %g2, %g3, %g4)
3155        ! trapstat wants TTE in %g5
3156        retry
31579:
3158        ITLB_STUFF(%g5, %g1, %g2, %g3, %g4)
3159        ! trapstat wants TTE in %g5
3160        retry
3161
3162#if !defined(sun4v) && !defined(ITLB_32M_256M_SUPPORT)
3163	/*
3164	 * Panther ITLB synthesis.
3165	 * The Panther 32M and 256M ITLB code simulates these two large page
3166	 * sizes with 4M pages, to provide support for programs, for example
3167	 * Java, that may copy instructions into a 32M or 256M data page and
3168	 * then execute them. The code below generates the 4M pfn bits and
3169	 * saves them in the modified 32M/256M ttes in the TSB. If the tte is
3170	 * stored in the DTLB to map a 32M/256M page, the 4M pfn offset bits
3171	 * are ignored by the hardware.
3172	 *
3173	 * Now, load into TSB/TLB.  At this point:
3174	 * g2 = tagtarget
3175	 * g3 = tte
3176	 * g4 = patte
3177	 * g5 = tt
3178	 * g6 = tsbmiss area
3179	 */
3180tsb_user_pn_synth:
3181	be,pt	%xcc, tsb_user_itlb_synth	/* ITLB miss */
3182	  andcc %g3, TTE_EXECPRM_INT, %g0	/* is execprm bit set */
3183	bz,pn %icc, 4b				/* if not, been here before */
3184	  ldn	[%g6 + TSBMISS_TSBPTR4M], %g1	/* g1 = tsbp */
3185	brz,a,pn %g1, 5f			/* no 2nd tsb */
3186	  mov	%g3, %g5
3187
3188	mov	MMU_TAG_ACCESS, %g7
3189	ldxa	[%g7]ASI_DMMU, %g6		/* get tag access va */
3190	GET_4M_PFN_OFF(%g3, %g6, %g5, %g7, 1)	/* make 4M pfn offset */
3191
3192	mov	ASI_N, %g7	/* user TSBs always accessed by VA */
3193	mov	%g7, %asi
3194	TSB_UPDATE_TL_PN(%g1, %g5, %g2, %g4, %g7, %g3, 4) /* update TSB */
31955:
3196        DTLB_STUFF(%g5, %g1, %g2, %g3, %g4)
3197        retry
3198
3199tsb_user_itlb_synth:
3200	ldn	[%g6 + TSBMISS_TSBPTR4M], %g1		/* g1 = tsbp */
3201
3202	mov	MMU_TAG_ACCESS, %g7
3203	ldxa	[%g7]ASI_IMMU, %g6		/* get tag access va */
3204	GET_4M_PFN_OFF(%g3, %g6, %g5, %g7, 2)	/* make 4M pfn offset */
3205	brz,a,pn %g1, 7f	/* Check to see if we have 2nd TSB programmed */
3206	  or	%g5, %g3, %g5			/* add 4M bits to TTE */
3207
3208	mov	ASI_N, %g7	/* user TSBs always accessed by VA */
3209	mov	%g7, %asi
3210	TSB_UPDATE_TL_PN(%g1, %g5, %g2, %g4, %g7, %g3, 6) /* update TSB */
32117:
3212	SET_TTE4M_PN(%g5, %g7)			/* add TTE4M pagesize to TTE */
3213        ITLB_STUFF(%g5, %g1, %g2, %g3, %g4)
3214        retry
3215#endif /* sun4v && ITLB_32M_256M_SUPPORT */
3216
3217tsb_kernel:
3218#ifdef sun4v
3219	cmp	%g7, TTE4M
3220	bge,pn	%icc, 5f
3221#else
3222	cmp	%g7, TTESZ_VALID | TTE4M	! no 32M or 256M support
3223	be,pn	%icc, 5f
3224#endif
3225	  nop
3226	ldn	[%g6 + TSBMISS_TSBPTR], %g1	! g1 = 8k tsbptr
3227	ba,pt	%xcc, 6f
3228	  nop
32295:
3230	ldn	[%g6 + TSBMISS_TSBPTR4M], %g1	! g1 = 4m tsbptr
3231	brz,pn	%g1, 3f		/* skip programming if 4m TSB ptr is NULL */
3232	  nop
32336:
3234#ifndef sun4v
3235tsb_kernel_patch_asi:
3236	or	%g0, RUNTIME_PATCH, %g6
3237	mov	%g6, %asi	! XXX avoid writing to %asi !!
3238#endif
3239	TSB_UPDATE_TL(%g1, %g3, %g2, %g4, %g7, %g6, 7)
32403:
3241#ifdef sun4v
3242	cmp	%g5, T_INSTR_MMU_MISS
3243	be,a,pn	%icc, 1f
3244	  mov	%g3, %g5			! trapstat wants TTE in %g5
3245#endif /* sun4v */
3246	cmp	%g5, FAST_IMMU_MISS_TT
3247	be,pn	%icc, 1f
3248	  mov	%g3, %g5			! trapstat wants TTE in %g5
3249	DTLB_STUFF(%g5, %g1, %g2, %g3, %g4)
3250	! trapstat wants TTE in %g5
3251	retry
32521:
3253	ITLB_STUFF(%g5, %g1, %g2, %g3, %g4)
3254	! trapstat wants TTE in %g5
3255	retry
3256
3257tsb_ism:
3258	/*
3259	 * This is an ISM [i|d]tlb miss.  We optimize for largest
3260	 * page size down to smallest.
3261	 *
3262	 * g2 = vaddr + ctx(or ctxtype (sun4v)) aka (pseudo-)tag access
3263	 *	register
3264	 * g3 = ismmap->ism_seg
3265	 * g4 = physical address of ismmap->ism_sfmmu
3266	 * g6 = tsbmiss area
3267	 */
3268	ldna	[%g4]ASI_MEM, %g7		/* g7 = ism hatid */
3269	brz,a,pn %g7, ptl1_panic		/* if zero jmp ahead */
3270	  mov	PTL1_BAD_ISM, %g1
3271						/* g5 = pa of imap_vb_shift */
3272	sub	%g4, (IMAP_ISMHAT - IMAP_VB_SHIFT), %g5
3273	lduha	[%g5]ASI_MEM, %g4		/* g4 = imap_vb_shift */
3274	srlx	%g3, %g4, %g3			/* clr size field */
3275	set	TAGACC_CTX_MASK, %g1		/* mask off ctx number/type */
3276	sllx	%g3, %g4, %g3			/* g3 = ism vbase */
3277	and	%g2, %g1, %g4			/* g4 = ctx number */
3278	andn	%g2, %g1, %g1			/* g1 = tlb miss vaddr */
3279	sub	%g1, %g3, %g2			/* g2 = offset in ISM seg */
3280	or	%g2, %g4, %g2			/* g2 = (pseudo-)tagacc */
3281
3282	/*
3283	 * ISM pages are always locked down.
3284	 * If we can't find the tte then pagefault
3285	 * and let the spt segment driver resovle it.
3286	 *
3287	 * g2 = ISM vaddr (offset in ISM seg)
3288	 * g6 = tsb miss area
3289	 * g7 = ISM hatid
3290	 */
3291	sub	%g5, (IMAP_VB_SHIFT - IMAP_HATFLAGS), %g5
3292	lduha	[%g5]ASI_MEM, %g4		/* g5 = pa of imap_hatflags */
3293	and	%g4, HAT_4M_FLAG, %g5		/* g4 = imap_hatflags */
3294	brnz,pt	%g5, tsb_ism_4M			/* branch if 4M pages */
3295	  nop
3296
3297tsb_ism_32M:
3298	and	%g4, HAT_32M_FLAG, %g5		/* check default 32M next */
3299	brz,pn	%g5, tsb_ism_256M
3300	  nop
3301
3302	/*
3303	 * 32M hash.
3304	 */
3305
3306	GET_TTE(%g2, %g7, %g3, %g4, %g5, %g6, %g1, MMU_PAGESHIFT32M,
3307	    TTE32M, tsb_ism_l32M, tsb_ism_32M_found, sfmmu_suspend_tl,
3308	    tsb_ism_4M)
3309	/* NOT REACHED */
3310
3311tsb_ism_32M_found:
3312	brlz,pt %g3, tsb_validtte
3313	  nop
3314	ba,pt	%xcc, tsb_ism_4M
3315	  nop
3316
3317tsb_ism_256M:
3318	and	%g4, HAT_256M_FLAG, %g5		/* 256M is last resort */
3319	brz,a,pn %g5, ptl1_panic
3320	  mov	PTL1_BAD_ISM, %g1
3321
3322	/*
3323	 * 256M hash.
3324	 */
3325	GET_TTE(%g2, %g7, %g3, %g4, %g5, %g6, %g1, MMU_PAGESHIFT256M,
3326	    TTE256M, tsb_ism_l256M, tsb_ism_256M_found, sfmmu_suspend_tl,
3327	    tsb_ism_4M)
3328
3329tsb_ism_256M_found:
3330	brlz,pt %g3, tsb_validtte
3331	  nop
3332
3333tsb_ism_4M:
3334	/*
3335	 * 4M hash.
3336	 */
3337	GET_TTE(%g2, %g7, %g3, %g4, %g5, %g6, %g1, MMU_PAGESHIFT4M,
3338	    TTE4M, tsb_ism_l4M, tsb_ism_4M_found, sfmmu_suspend_tl,
3339	    tsb_ism_8K)
3340	/* NOT REACHED */
3341
3342tsb_ism_4M_found:
3343	brlz,pt %g3, tsb_validtte
3344	  nop
3345
3346tsb_ism_8K:
3347	/*
3348	 * 8K and 64K hash.
3349	 */
3350
3351	GET_TTE(%g2, %g7, %g3, %g4, %g5, %g6, %g1, MMU_PAGESHIFT64K,
3352	    TTE64K, tsb_ism_l8K, tsb_ism_8K_found, sfmmu_suspend_tl,
3353	    tsb_pagefault)
3354	/* NOT REACHED */
3355
3356tsb_ism_8K_found:
3357	brlz,pt	%g3, tsb_validtte
3358	  nop
3359
3360tsb_pagefault:
3361	rdpr	%tt, %g7
3362	cmp	%g7, FAST_PROT_TT
3363	be,a,pn	%icc, tsb_protfault
3364	  wrpr	%g0, FAST_DMMU_MISS_TT, %tt
3365
3366tsb_protfault:
3367	/*
3368	 * we get here if we couldn't find a valid tte in the hash.
3369	 *
3370	 * If user and we are at tl>1 we go to window handling code.
3371	 *
3372	 * If kernel and the fault is on the same page as our stack
3373	 * pointer, then we know the stack is bad and the trap handler
3374	 * will fail, so we call ptl1_panic with PTL1_BAD_STACK.
3375	 *
3376	 * If this is a kernel trap and tl>1, panic.
3377	 *
3378	 * Otherwise we call pagefault.
3379	 */
3380	cmp	%g7, FAST_IMMU_MISS_TT
3381#ifdef sun4v
3382	MMU_FAULT_STATUS_AREA(%g4)
3383	ldx	[%g4 + MMFSA_I_CTX], %g5
3384	ldx	[%g4 + MMFSA_D_CTX], %g4
3385	move	%icc, %g5, %g4
3386	cmp	%g7, T_INSTR_MMU_MISS
3387	move	%icc, %g5, %g4
3388#else
3389	mov	MMU_TAG_ACCESS, %g4
3390	ldxa	[%g4]ASI_DMMU, %g2
3391	ldxa	[%g4]ASI_IMMU, %g5
3392	move	%icc, %g5, %g2
3393	cmp	%g7, T_INSTR_MMU_MISS
3394	move	%icc, %g5, %g2
3395	sllx	%g2, TAGACC_CTX_LSHIFT, %g4
3396#endif
3397	brnz,pn	%g4, 3f				/* skip if not kernel */
3398	  rdpr	%tl, %g5
3399
3400	add	%sp, STACK_BIAS, %g3
3401	srlx	%g3, MMU_PAGESHIFT, %g3
3402	srlx	%g2, MMU_PAGESHIFT, %g4
3403	cmp	%g3, %g4
3404	be,a,pn	%icc, ptl1_panic		/* panic if bad %sp */
3405	  mov	PTL1_BAD_STACK, %g1
3406
3407	cmp	%g5, 1
3408	ble,pt	%icc, 2f
3409	  nop
3410	TSTAT_CHECK_TL1(2f, %g1, %g2)
3411	rdpr	%tt, %g2
3412	cmp	%g2, FAST_PROT_TT
3413	mov	PTL1_BAD_KPROT_FAULT, %g1
3414	movne	%icc, PTL1_BAD_KMISS, %g1
3415	ba,pt	%icc, ptl1_panic
3416	  nop
3417
34182:
3419	/*
3420	 * We are taking a pagefault in the kernel on a kernel address.  If
3421	 * CPU_DTRACE_NOFAULT is set in the cpuc_dtrace_flags, we don't actually
3422	 * want to call sfmmu_pagefault -- we will instead note that a fault
3423	 * has occurred by setting CPU_DTRACE_BADADDR and issue a "done"
3424	 * (instead of a "retry").  This will step over the faulting
3425	 * instruction.
3426	 */
3427	CPU_INDEX(%g1, %g2)
3428	set	cpu_core, %g2
3429	sllx	%g1, CPU_CORE_SHIFT, %g1
3430	add	%g1, %g2, %g1
3431	lduh	[%g1 + CPUC_DTRACE_FLAGS], %g2
3432	andcc	%g2, CPU_DTRACE_NOFAULT, %g0
3433	bz	sfmmu_pagefault
3434	or	%g2, CPU_DTRACE_BADADDR, %g2
3435	stuh	%g2, [%g1 + CPUC_DTRACE_FLAGS]
3436	GET_MMU_D_ADDR(%g3, %g4)
3437	stx	%g3, [%g1 + CPUC_DTRACE_ILLVAL]
3438	done
3439
34403:
3441	cmp	%g5, 1
3442	ble,pt	%icc, 4f
3443	  nop
3444	TSTAT_CHECK_TL1(4f, %g1, %g2)
3445	ba,pt	%icc, sfmmu_window_trap
3446	  nop
3447
34484:
3449	/*
3450	 * We are taking a pagefault on a non-kernel address.  If we are in
3451	 * the kernel (e.g., due to a copyin()), we will check cpuc_dtrace_flags
3452	 * and (if CPU_DTRACE_NOFAULT is set) will proceed as outlined above.
3453	 */
3454	CPU_INDEX(%g1, %g2)
3455	set	cpu_core, %g2
3456	sllx	%g1, CPU_CORE_SHIFT, %g1
3457	add	%g1, %g2, %g1
3458	lduh	[%g1 + CPUC_DTRACE_FLAGS], %g2
3459	andcc	%g2, CPU_DTRACE_NOFAULT, %g0
3460	bz	sfmmu_pagefault
3461	or	%g2, CPU_DTRACE_BADADDR, %g2
3462	stuh	%g2, [%g1 + CPUC_DTRACE_FLAGS]
3463	GET_MMU_D_ADDR(%g3, %g4)
3464	stx	%g3, [%g1 + CPUC_DTRACE_ILLVAL]
3465
3466	/*
3467	 * Be sure that we're actually taking this miss from the kernel --
3468	 * otherwise we have managed to return to user-level with
3469	 * CPU_DTRACE_NOFAULT set in cpuc_dtrace_flags.
3470	 */
3471	rdpr	%tstate, %g2
3472	btst	TSTATE_PRIV, %g2
3473	bz,a	ptl1_panic
3474	  mov	PTL1_BAD_DTRACE_FLAGS, %g1
3475	done
3476
3477	ALTENTRY(tsb_tl0_noctxt)
3478	/*
3479	 * If we have no context, check to see if CPU_DTRACE_NOFAULT is set;
3480	 * if it is, indicated that we have faulted and issue a done.
3481	 */
3482	CPU_INDEX(%g5, %g6)
3483	set	cpu_core, %g6
3484	sllx	%g5, CPU_CORE_SHIFT, %g5
3485	add	%g5, %g6, %g5
3486	lduh	[%g5 + CPUC_DTRACE_FLAGS], %g6
3487	andcc	%g6, CPU_DTRACE_NOFAULT, %g0
3488	bz	1f
3489	or	%g6, CPU_DTRACE_BADADDR, %g6
3490	stuh	%g6, [%g5 + CPUC_DTRACE_FLAGS]
3491	GET_MMU_D_ADDR(%g3, %g4)
3492	stx	%g3, [%g5 + CPUC_DTRACE_ILLVAL]
3493
3494	/*
3495	 * Be sure that we're actually taking this miss from the kernel --
3496	 * otherwise we have managed to return to user-level with
3497	 * CPU_DTRACE_NOFAULT set in cpuc_dtrace_flags.
3498	 */
3499	rdpr	%tstate, %g5
3500	btst	TSTATE_PRIV, %g5
3501	bz,a	ptl1_panic
3502	  mov	PTL1_BAD_DTRACE_FLAGS, %g1
3503	done
3504
35051:
3506	rdpr	%tt, %g5
3507	cmp	%g5, FAST_IMMU_MISS_TT
3508#ifdef sun4v
3509	MMU_FAULT_STATUS_AREA(%g2)
3510	be,a,pt	%icc, 2f
3511	  ldx	[%g2 + MMFSA_I_CTX], %g3
3512	cmp	%g5, T_INSTR_MMU_MISS
3513	be,a,pt	%icc, 2f
3514	  ldx	[%g2 + MMFSA_I_CTX], %g3
3515	ldx	[%g2 + MMFSA_D_CTX], %g3
35162:
3517#else
3518	mov	MMU_TAG_ACCESS, %g2
3519	be,a,pt	%icc, 2f
3520	  ldxa	[%g2]ASI_IMMU, %g3
3521	ldxa	[%g2]ASI_DMMU, %g3
35222:	sllx	%g3, TAGACC_CTX_LSHIFT, %g3
3523#endif
3524	brz,a,pn %g3, ptl1_panic		! panic if called for kernel
3525	  mov	PTL1_BAD_CTX_STEAL, %g1		! since kernel ctx was stolen
3526	rdpr	%tl, %g5
3527	cmp	%g5, 1
3528	ble,pt	%icc, sfmmu_mmu_trap
3529	  nop
3530	TSTAT_CHECK_TL1(sfmmu_mmu_trap, %g1, %g2)
3531	ba,pt	%icc, sfmmu_window_trap
3532	  nop
3533	SET_SIZE(sfmmu_tsb_miss)
3534
3535#if (1<< TSBMISS_SHIFT) != TSBMISS_SIZE
3536#error - TSBMISS_SHIFT does not correspond to size of tsbmiss struct
3537#endif
3538
3539#endif /* lint */
3540
3541#if defined (lint)
3542/*
3543 * This routine will look for a user or kernel vaddr in the hash
3544 * structure.  It returns a valid pfn or PFN_INVALID.  It doesn't
3545 * grab any locks.  It should only be used by other sfmmu routines.
3546 */
3547/* ARGSUSED */
3548pfn_t
3549sfmmu_vatopfn(caddr_t vaddr, sfmmu_t *sfmmup, tte_t *ttep)
3550{
3551	return(0);
3552}
3553
3554/* ARGSUSED */
3555pfn_t
3556sfmmu_kvaszc2pfn(caddr_t vaddr, int hashno)
3557{
3558	return(0);
3559}
3560
3561#else /* lint */
3562
3563	ENTRY_NP(sfmmu_vatopfn)
3564 	/*
3565 	 * disable interrupts
3566 	 */
3567 	rdpr	%pstate, %o3
3568#ifdef DEBUG
3569	PANIC_IF_INTR_DISABLED_PSTR(%o3, sfmmu_di_l5, %g1)
3570#endif
3571	/*
3572	 * disable interrupts to protect the TSBMISS area
3573	 */
3574	andn    %o3, PSTATE_IE, %o5
3575	wrpr    %o5, 0, %pstate
3576
3577	/*
3578	 * o0 = vaddr
3579	 * o1 = sfmmup
3580	 * o2 = ttep
3581	 */
3582	CPU_TSBMISS_AREA(%g1, %o5)
3583	ldn	[%g1 + TSBMISS_KHATID], %o4
3584	cmp	%o4, %o1
3585	bne,pn	%ncc, vatopfn_nokernel
3586	  mov	TTE64K, %g5			/* g5 = rehash # */
3587	mov %g1,%o5				/* o5 = tsbmiss_area */
3588	/*
3589	 * o0 = vaddr
3590	 * o1 & o4 = hatid
3591	 * o2 = ttep
3592	 * o5 = tsbmiss area
3593	 */
3594	mov	HBLK_RANGE_SHIFT, %g6
35951:
3596
3597	/*
3598	 * o0 = vaddr
3599	 * o1 = sfmmup
3600	 * o2 = ttep
3601	 * o3 = old %pstate
3602	 * o4 = hatid
3603	 * o5 = tsbmiss
3604	 * g5 = rehash #
3605	 * g6 = hmeshift
3606	 *
3607	 * The first arg to GET_TTE is actually tagaccess register
3608	 * not just vaddr. Since this call is for kernel we need to clear
3609	 * any lower vaddr bits that would be interpreted as ctx bits.
3610	 */
3611	set     TAGACC_CTX_MASK, %g1
3612	andn    %o0, %g1, %o0
3613	GET_TTE(%o0, %o4, %g1, %g2, %g3, %o5, %g4, %g6, %g5,
3614		vatopfn_l1, kvtop_hblk_found, tsb_suspend, kvtop_nohblk)
3615
3616kvtop_hblk_found:
3617	/*
3618	 * o0 = vaddr
3619	 * o1 = sfmmup
3620	 * o2 = ttep
3621	 * g1 = tte
3622	 * g2 = tte pa
3623	 * g3 = tte va
3624	 * o2 = tsbmiss area
3625	 * o1 = hat id
3626	 */
3627	brgez,a,pn %g1, 6f			/* if tte invalid goto tl0 */
3628	  mov	-1, %o0				/* output = -1 (PFN_INVALID) */
3629	stx %g1,[%o2]				/* put tte into *ttep */
3630	TTETOPFN(%g1, %o0, vatopfn_l2, %g2, %g3, %g4)
3631	/*
3632	 * o0 = vaddr
3633	 * o1 = sfmmup
3634	 * o2 = ttep
3635	 * g1 = pfn
3636	 */
3637	ba,pt	%xcc, 6f
3638	  mov	%g1, %o0
3639
3640kvtop_nohblk:
3641	/*
3642	 * we get here if we couldn't find valid hblk in hash.  We rehash
3643	 * if neccesary.
3644	 */
3645	ldn	[%o5 + (TSBMISS_SCRATCH + TSB_TAGACC)], %o0
3646#ifdef sun4v
3647	cmp	%g5, MAX_HASHCNT
3648#else
3649	cmp	%g5, DEFAULT_MAX_HASHCNT	/* no 32/256M kernel pages */
3650#endif
3651	be,a,pn	%icc, 6f
3652	  mov	-1, %o0				/* output = -1 (PFN_INVALID) */
3653	mov	%o1, %o4			/* restore hatid */
3654#ifdef sun4v
3655        add	%g5, 2, %g5
3656	cmp	%g5, 3
3657	move	%icc, MMU_PAGESHIFT4M, %g6
3658	ba,pt	%icc, 1b
3659	movne	%icc, MMU_PAGESHIFT256M, %g6
3660#else
3661        inc	%g5
3662	cmp	%g5, 2
3663	move	%icc, MMU_PAGESHIFT512K, %g6
3664	ba,pt	%icc, 1b
3665	movne	%icc, MMU_PAGESHIFT4M, %g6
3666#endif
36676:
3668	retl
3669 	  wrpr	%g0, %o3, %pstate		/* re-enable interrupts */
3670
3671tsb_suspend:
3672	/*
3673	 * o0 = vaddr
3674	 * o1 = sfmmup
3675	 * o2 = ttep
3676	 * g1 = tte
3677	 * g2 = tte pa
3678	 * g3 = tte va
3679	 * o2 = tsbmiss area  use o5 instead of o2 for tsbmiss
3680	 */
3681	stx %g1,[%o2]				/* put tte into *ttep */
3682	brgez,a,pn %g1, 8f			/* if tte invalid goto 8: */
3683	  sub	%g0, 1, %o0			/* output = -1 (PFN_INVALID) */
3684	TTETOPFN(%g1, %o0, vatopfn_l3, %g2, %g3, %g4)
3685	/*
3686	 * o0 = PFN return value PFN_INVALID, PFN_SUSPENDED, or pfn#
3687	 * o1 = sfmmup
3688	 * o2 = ttep
3689	 * g1 = pfn
3690	 */
3691	sub	%g0, 2, %o0			/* output = PFN_SUSPENDED */
36928:
3693	retl
3694	 wrpr	%g0, %o3, %pstate		/* enable interrupts */
3695
3696vatopfn_nokernel:
3697	/*
3698	 * This routine does NOT support user addresses
3699	 * There is a routine in C that supports this.
3700	 * The only reason why we don't have the C routine
3701	 * support kernel addresses as well is because
3702	 * we do va_to_pa while holding the hashlock.
3703	 */
3704 	wrpr	%g0, %o3, %pstate		/* re-enable interrupts */
3705	save	%sp, -SA(MINFRAME), %sp
3706	sethi	%hi(sfmmu_panic3), %o0
3707	call	panic
3708	 or	%o0, %lo(sfmmu_panic3), %o0
3709
3710	SET_SIZE(sfmmu_vatopfn)
3711
3712	/*
3713	 * %o0 = vaddr
3714	 * %o1 = hashno (aka szc)
3715	 *
3716	 *
3717	 * This routine is similar to sfmmu_vatopfn() but will only look for
3718	 * a kernel vaddr in the hash structure for the specified rehash value.
3719	 * It's just an optimization for the case when pagesize for a given
3720	 * va range is already known (e.g. large page heap) and we don't want
3721	 * to start the search with rehash value 1 as sfmmu_vatopfn() does.
3722	 *
3723	 * Returns valid pfn or PFN_INVALID if
3724	 * tte for specified rehash # is not found, invalid or suspended.
3725	 */
3726	ENTRY_NP(sfmmu_kvaszc2pfn)
3727 	/*
3728 	 * disable interrupts
3729 	 */
3730 	rdpr	%pstate, %o3
3731#ifdef DEBUG
3732	PANIC_IF_INTR_DISABLED_PSTR(%o3, sfmmu_di_l6, %g1)
3733#endif
3734	/*
3735	 * disable interrupts to protect the TSBMISS area
3736	 */
3737	andn    %o3, PSTATE_IE, %o5
3738	wrpr    %o5, 0, %pstate
3739
3740	CPU_TSBMISS_AREA(%g1, %o5)
3741	ldn	[%g1 + TSBMISS_KHATID], %o4
3742	sll	%o1, 1, %g6
3743	add	%g6, %o1, %g6
3744	add	%g6, MMU_PAGESHIFT, %g6
3745	/*
3746	 * %o0 = vaddr
3747	 * %o1 = hashno
3748	 * %o3 = old %pstate
3749	 * %o4 = ksfmmup
3750	 * %g1 = tsbmiss area
3751	 * %g6 = hmeshift
3752	 */
3753
3754	/*
3755	 * The first arg to GET_TTE is actually tagaccess register
3756	 * not just vaddr. Since this call is for kernel we need to clear
3757	 * any lower vaddr bits that would be interpreted as ctx bits.
3758	 */
3759	srlx	%o0, MMU_PAGESHIFT, %o0
3760	sllx	%o0, MMU_PAGESHIFT, %o0
3761	GET_TTE(%o0, %o4, %g3, %g4, %g5, %g1, %o5, %g6, %o1,
3762		kvaszc2pfn_l1, kvaszc2pfn_hblk_found, kvaszc2pfn_nohblk,
3763		kvaszc2pfn_nohblk)
3764
3765kvaszc2pfn_hblk_found:
3766	/*
3767	 * %g3 = tte
3768	 * %o0 = vaddr
3769	 */
3770	brgez,a,pn %g3, 1f			/* check if tte is invalid */
3771	  mov	-1, %o0				/* output = -1 (PFN_INVALID) */
3772	TTETOPFN(%g3, %o0, kvaszc2pfn_l2, %g2, %g4, %g5)
3773	/*
3774	 * g3 = pfn
3775	 */
3776	ba,pt	%xcc, 1f
3777	  mov	%g3, %o0
3778
3779kvaszc2pfn_nohblk:
3780	mov	-1, %o0
3781
37821:
3783	retl
3784 	  wrpr	%g0, %o3, %pstate		/* re-enable interrupts */
3785
3786	SET_SIZE(sfmmu_kvaszc2pfn)
3787
3788#endif /* lint */
3789
3790
3791
3792#if !defined(lint)
3793
3794/*
3795 * kpm lock used between trap level tsbmiss handler and kpm C level.
3796 */
3797#define KPMLOCK_ENTER(kpmlckp, tmp1, label1, asi)			\
3798	mov     0xff, tmp1						;\
3799label1:									;\
3800	casa    [kpmlckp]asi, %g0, tmp1					;\
3801	brnz,pn tmp1, label1						;\
3802	mov     0xff, tmp1						;\
3803	membar  #LoadLoad
3804
3805#define KPMLOCK_EXIT(kpmlckp, asi)					\
3806	membar  #LoadStore|#StoreStore					;\
3807	sta     %g0, [kpmlckp]asi
3808
3809/*
3810 * Lookup a memseg for a given pfn and if found, return the physical
3811 * address of the corresponding struct memseg in mseg, otherwise
3812 * return MSEG_NULLPTR_PA. The kpmtsbm pointer must be provided in
3813 * tsbmp, %asi is assumed to be ASI_MEM.
3814 * This lookup is done by strictly traversing only the physical memseg
3815 * linkage. The more generic approach, to check the virtual linkage
3816 * before using the physical (used e.g. with hmehash buckets), cannot
3817 * be used here. Memory DR operations can run in parallel to this
3818 * lookup w/o any locks and updates of the physical and virtual linkage
3819 * cannot be done atomically wrt. to each other. Because physical
3820 * address zero can be valid physical address, MSEG_NULLPTR_PA acts
3821 * as "physical NULL" pointer.
3822 */
3823#define	PAGE_NUM2MEMSEG_NOLOCK_PA(pfn, mseg, tsbmp, tmp1, tmp2, tmp3, label) \
3824	sethi	%hi(mhash_per_slot), tmp3 /* no tsbmp use due to DR */	;\
3825	ldx	[tmp3 + %lo(mhash_per_slot)], mseg			;\
3826	udivx	pfn, mseg, mseg						;\
3827	ldx	[tsbmp + KPMTSBM_MSEGPHASHPA], tmp1			;\
3828	and	mseg, SFMMU_N_MEM_SLOTS - 1, mseg			;\
3829	sllx	mseg, SFMMU_MEM_HASH_ENTRY_SHIFT, mseg			;\
3830	add	tmp1, mseg, tmp1					;\
3831	ldxa	[tmp1]%asi, mseg					;\
3832	cmp	mseg, MSEG_NULLPTR_PA					;\
3833	be,pn	%xcc, label/**/1		/* if not found */	;\
3834	  nop								;\
3835	ldxa	[mseg + MEMSEG_PAGES_BASE]%asi, tmp1			;\
3836	cmp	pfn, tmp1			/* pfn - pages_base */	;\
3837	blu,pn	%xcc, label/**/1					;\
3838	  ldxa	[mseg + MEMSEG_PAGES_END]%asi, tmp2			;\
3839	cmp	pfn, tmp2			/* pfn - pages_end */	;\
3840	bgeu,pn	%xcc, label/**/1					;\
3841	  sub	pfn, tmp1, tmp1			/* pfn - pages_base */	;\
3842	mulx	tmp1, PAGE_SIZE, tmp1					;\
3843	ldxa	[mseg + MEMSEG_PAGESPA]%asi, tmp2	/* pages */	;\
3844	add	tmp2, tmp1, tmp1			/* pp */	;\
3845	lduwa	[tmp1 + PAGE_PAGENUM]%asi, tmp2				;\
3846	cmp	tmp2, pfn						;\
3847	be,pt	%xcc, label/**/_ok			/* found */	;\
3848label/**/1:								;\
3849	/* brute force lookup */					;\
3850	sethi	%hi(memsegspa), tmp3 /* no tsbmp use due to DR */	;\
3851	ldx	[tmp3 + %lo(memsegspa)], mseg				;\
3852label/**/2:								;\
3853	cmp	mseg, MSEG_NULLPTR_PA					;\
3854	be,pn	%xcc, label/**/_ok		/* if not found */	;\
3855	  nop								;\
3856	ldxa	[mseg + MEMSEG_PAGES_BASE]%asi, tmp1			;\
3857	cmp	pfn, tmp1			/* pfn - pages_base */	;\
3858	blu,a,pt %xcc, label/**/2					;\
3859	  ldxa	[mseg + MEMSEG_NEXTPA]%asi, mseg			;\
3860	ldxa	[mseg + MEMSEG_PAGES_END]%asi, tmp2			;\
3861	cmp	pfn, tmp2			/* pfn - pages_end */	;\
3862	bgeu,a,pt %xcc, label/**/2					;\
3863	  ldxa	[mseg + MEMSEG_NEXTPA]%asi, mseg			;\
3864label/**/_ok:
3865
3866	/*
3867	 * kpm tsb miss handler large pages
3868	 * g1 = 8K kpm TSB entry pointer
3869	 * g2 = tag access register
3870	 * g3 = 4M kpm TSB entry pointer
3871	 */
3872	ALTENTRY(sfmmu_kpm_dtsb_miss)
3873	TT_TRACE(trace_tsbmiss)
3874
3875	CPU_INDEX(%g7, %g6)
3876	sethi	%hi(kpmtsbm_area), %g6
3877	sllx	%g7, KPMTSBM_SHIFT, %g7
3878	or	%g6, %lo(kpmtsbm_area), %g6
3879	add	%g6, %g7, %g6			/* g6 = kpmtsbm ptr */
3880
3881	/* check enable flag */
3882	ldub	[%g6 + KPMTSBM_FLAGS], %g4
3883	and	%g4, KPMTSBM_ENABLE_FLAG, %g5
3884	brz,pn	%g5, sfmmu_tsb_miss		/* if kpm not enabled */
3885	  nop
3886
3887	/* VA range check */
3888	ldx	[%g6 + KPMTSBM_VBASE], %g7
3889	cmp	%g2, %g7
3890	blu,pn	%xcc, sfmmu_tsb_miss
3891	  ldx	[%g6 + KPMTSBM_VEND], %g5
3892	cmp	%g2, %g5
3893	bgeu,pn	%xcc, sfmmu_tsb_miss
3894	  stx	%g3, [%g6 + KPMTSBM_TSBPTR]
3895
3896	/*
3897	 * check TL tsbmiss handling flag
3898	 * bump tsbmiss counter
3899	 */
3900	lduw	[%g6 + KPMTSBM_TSBMISS], %g5
3901#ifdef	DEBUG
3902	and	%g4, KPMTSBM_TLTSBM_FLAG, %g3
3903	inc	%g5
3904	brz,pn	%g3, sfmmu_kpm_exception
3905	  st	%g5, [%g6 + KPMTSBM_TSBMISS]
3906#else
3907	inc	%g5
3908	st	%g5, [%g6 + KPMTSBM_TSBMISS]
3909#endif
3910	/*
3911	 * At this point:
3912	 *  g1 = 8K kpm TSB pointer (not used)
3913	 *  g2 = tag access register
3914	 *  g3 = clobbered
3915	 *  g6 = per-CPU kpm tsbmiss area
3916	 *  g7 = kpm_vbase
3917	 */
3918
3919	/* vaddr2pfn */
3920	ldub	[%g6 + KPMTSBM_SZSHIFT], %g3
3921	sub	%g2, %g7, %g4			/* paddr = vaddr-kpm_vbase */
3922	srax    %g4, %g3, %g2			/* which alias range (r) */
3923	brnz,pn	%g2, sfmmu_kpm_exception	/* if (r != 0) goto C handler */
3924	  srlx	%g4, MMU_PAGESHIFT, %g2		/* %g2 = pfn */
3925
3926	/*
3927	 * Setup %asi
3928	 * mseg_pa = page_numtomemseg_nolock(pfn)
3929	 * if (mseg_pa == NULL) sfmmu_kpm_exception
3930	 * g2=pfn
3931	 */
3932	mov	ASI_MEM, %asi
3933	PAGE_NUM2MEMSEG_NOLOCK_PA(%g2, %g3, %g6, %g4, %g5, %g7, kpmtsbmp2m)
3934	cmp	%g3, MSEG_NULLPTR_PA
3935	be,pn	%xcc, sfmmu_kpm_exception	/* if mseg not found */
3936	  nop
3937
3938	/*
3939	 * inx = ptokpmp((kpmptop((ptopkpmp(pfn))) - mseg_pa->kpm_pbase));
3940	 * g2=pfn g3=mseg_pa
3941	 */
3942	ldub	[%g6 + KPMTSBM_KPMP2PSHFT], %g5
3943	ldxa	[%g3 + MEMSEG_KPM_PBASE]%asi, %g7
3944	srlx	%g2, %g5, %g4
3945	sllx	%g4, %g5, %g4
3946	sub	%g4, %g7, %g4
3947	srlx	%g4, %g5, %g4
3948
3949	/*
3950	 * Validate inx value
3951	 * g2=pfn g3=mseg_pa g4=inx
3952	 */
3953#ifdef	DEBUG
3954	ldxa	[%g3 + MEMSEG_KPM_NKPMPGS]%asi, %g5
3955	cmp	%g4, %g5			/* inx - nkpmpgs */
3956	bgeu,pn	%xcc, sfmmu_kpm_exception	/* if out of range */
3957	  ld	[%g6 + KPMTSBM_KPMPTABLESZ], %g7
3958#else
3959	ld	[%g6 + KPMTSBM_KPMPTABLESZ], %g7
3960#endif
3961	/*
3962	 * kp = &mseg_pa->kpm_pages[inx]
3963	 */
3964	sllx	%g4, KPMPAGE_SHIFT, %g4		/* kpm_pages offset */
3965	ldxa	[%g3 + MEMSEG_KPM_PAGES]%asi, %g5 /* kpm_pages */
3966	add	%g5, %g4, %g5			/* kp */
3967
3968	/*
3969	 * KPMP_HASH(kp)
3970	 * g2=pfn g3=mseg_pa g4=offset g5=kp g7=kpmp_table_sz
3971	 */
3972	ldub	[%g6 + KPMTSBM_KPMPSHIFT], %g1	/* kpmp_shift */
3973	sub	%g7, 1, %g7			/* mask */
3974	srlx	%g5, %g1, %g1			/* x = ksp >> kpmp_shift */
3975	add	%g5, %g1, %g5			/* y = ksp + x */
3976	and 	%g5, %g7, %g5			/* hashinx = y & mask */
3977
3978	/*
3979	 * Calculate physical kpm_page pointer
3980	 * g2=pfn g3=mseg_pa g4=offset g5=hashinx
3981	 */
3982	ldxa	[%g3 + MEMSEG_KPM_PAGESPA]%asi, %g1 /* kpm_pagespa */
3983	add	%g1, %g4, %g1			/* kp_pa */
3984
3985	/*
3986	 * Calculate physical hash lock address
3987	 * g1=kp_refcntc_pa g2=pfn g5=hashinx
3988	 */
3989	ldx	[%g6 + KPMTSBM_KPMPTABLEPA], %g4 /* kpmp_tablepa */
3990	sllx	%g5, KPMHLK_SHIFT, %g5
3991	add	%g4, %g5, %g3
3992	add	%g3, KPMHLK_LOCK, %g3		/* hlck_pa */
3993
3994	/*
3995	 * Assemble tte
3996	 * g1=kp_pa g2=pfn g3=hlck_pa
3997	 */
3998#ifdef sun4v
3999	sethi	%hi(TTE_VALID_INT), %g5		/* upper part */
4000	sllx	%g5, 32, %g5
4001	mov	(TTE_CP_INT|TTE_CV_INT|TTE_PRIV_INT|TTE_HWWR_INT), %g4
4002	or	%g4, TTE4M, %g4
4003	or	%g5, %g4, %g5
4004#else
4005	sethi	%hi(TTE_VALID_INT), %g4
4006	mov	TTE4M, %g5
4007	sllx	%g5, TTE_SZ_SHFT_INT, %g5
4008	or	%g5, %g4, %g5			/* upper part */
4009	sllx	%g5, 32, %g5
4010	mov	(TTE_CP_INT|TTE_CV_INT|TTE_PRIV_INT|TTE_HWWR_INT), %g4
4011	or	%g5, %g4, %g5
4012#endif
4013	sllx	%g2, MMU_PAGESHIFT, %g4
4014	or	%g5, %g4, %g5			/* tte */
4015	ldx	[%g6 + KPMTSBM_TSBPTR], %g4
4016	GET_MMU_D_TTARGET(%g2, %g7)		/* %g2 = ttarget */
4017
4018	/*
4019	 * tsb dropin
4020	 * g1=kp_pa g2=ttarget g3=hlck_pa g4=kpmtsbp4m g5=tte g6=kpmtsbm_area
4021	 */
4022
4023	/* KPMLOCK_ENTER(kpmlckp, tmp1, label1, asi) */
4024	KPMLOCK_ENTER(%g3, %g7, kpmtsbmhdlr1, ASI_MEM)
4025
4026	/* use C-handler if there's no go for dropin */
4027	ldsha	[%g1 + KPMPAGE_REFCNTC]%asi, %g7 /* kp_refcntc */
4028	cmp	%g7, -1
4029	bne,pn	%xcc, 5f	/* use C-handler if there's no go for dropin */
4030	  nop
4031
4032#ifdef	DEBUG
4033	/* double check refcnt */
4034	ldsha	[%g1 + KPMPAGE_REFCNT]%asi, %g7
4035	brz,pn	%g7, 5f			/* let C-handler deal with this */
4036	  nop
4037#endif
4038
4039#ifndef sun4v
4040	ldub	[%g6 + KPMTSBM_FLAGS], %g7
4041	mov	ASI_N, %g1
4042	andcc	%g7, KPMTSBM_TSBPHYS_FLAG, %g0
4043	movnz	%icc, ASI_MEM, %g1
4044	mov	%g1, %asi
4045#endif
4046
4047	/* TSB_LOCK_ENTRY(tsbp, tmp1, tmp2, label) (needs %asi set) */
4048	TSB_LOCK_ENTRY(%g4, %g1, %g7, 6)
4049
4050	/* TSB_INSERT_UNLOCK_ENTRY(tsbp, tte, tagtarget, tmp) */
4051	TSB_INSERT_UNLOCK_ENTRY(%g4, %g5, %g2, %g7)
4052
4053	DTLB_STUFF(%g5, %g1, %g2, %g4, %g6)
4054
4055	/* KPMLOCK_EXIT(kpmlckp, asi) */
4056	KPMLOCK_EXIT(%g3, ASI_MEM)
4057
4058	/*
4059	 * If trapstat is running, we need to shift the %tpc and %tnpc to
4060	 * point to trapstat's TSB miss return code (note that trapstat
4061	 * itself will patch the correct offset to add).
4062	 * Note: TTE is expected in %g5 (allows per pagesize reporting).
4063	 */
4064	rdpr	%tl, %g7
4065	cmp	%g7, 1
4066	ble	%icc, 0f
4067	sethi	%hi(KERNELBASE), %g6
4068	rdpr	%tpc, %g7
4069	or	%g6, %lo(KERNELBASE), %g6
4070	cmp	%g7, %g6
4071	bgeu	%xcc, 0f
4072	ALTENTRY(tsbmiss_trapstat_patch_point_kpm)
4073	add	%g7, RUNTIME_PATCH, %g7	/* must match TSTAT_TSBMISS_INSTR */
4074	wrpr	%g7, %tpc
4075	add	%g7, 4, %g7
4076	wrpr	%g7, %tnpc
40770:
4078	retry
40795:
4080	/* g3=hlck_pa */
4081	KPMLOCK_EXIT(%g3, ASI_MEM)
4082	ba,pt	%icc, sfmmu_kpm_exception
4083	  nop
4084	SET_SIZE(sfmmu_kpm_dtsb_miss)
4085
4086	/*
4087	 * kpm tsbmiss handler for smallpages
4088	 * g1 = 8K kpm TSB pointer
4089	 * g2 = tag access register
4090	 * g3 = 4M kpm TSB pointer
4091	 */
4092	ALTENTRY(sfmmu_kpm_dtsb_miss_small)
4093	TT_TRACE(trace_tsbmiss)
4094	CPU_INDEX(%g7, %g6)
4095	sethi	%hi(kpmtsbm_area), %g6
4096	sllx	%g7, KPMTSBM_SHIFT, %g7
4097	or	%g6, %lo(kpmtsbm_area), %g6
4098	add	%g6, %g7, %g6			/* g6 = kpmtsbm ptr */
4099
4100	/* check enable flag */
4101	ldub	[%g6 + KPMTSBM_FLAGS], %g4
4102	and	%g4, KPMTSBM_ENABLE_FLAG, %g5
4103	brz,pn	%g5, sfmmu_tsb_miss		/* if kpm not enabled */
4104	  nop
4105
4106	/*
4107	 * VA range check
4108	 * On fail: goto sfmmu_tsb_miss
4109	 */
4110	ldx	[%g6 + KPMTSBM_VBASE], %g7
4111	cmp	%g2, %g7
4112	blu,pn	%xcc, sfmmu_tsb_miss
4113	  ldx	[%g6 + KPMTSBM_VEND], %g5
4114	cmp	%g2, %g5
4115	bgeu,pn	%xcc, sfmmu_tsb_miss
4116	  stx	%g1, [%g6 + KPMTSBM_TSBPTR]	/* save 8K kpm TSB pointer */
4117
4118	/*
4119	 * check TL tsbmiss handling flag
4120	 * bump tsbmiss counter
4121	 */
4122	lduw	[%g6 + KPMTSBM_TSBMISS], %g5
4123#ifdef	DEBUG
4124	and	%g4, KPMTSBM_TLTSBM_FLAG, %g1
4125	inc	%g5
4126	brz,pn	%g1, sfmmu_kpm_exception
4127	  st	%g5, [%g6 + KPMTSBM_TSBMISS]
4128#else
4129	inc	%g5
4130	st	%g5, [%g6 + KPMTSBM_TSBMISS]
4131#endif
4132	/*
4133	 * At this point:
4134	 *  g1 = clobbered
4135	 *  g2 = tag access register
4136	 *  g3 = 4M kpm TSB pointer (not used)
4137	 *  g6 = per-CPU kpm tsbmiss area
4138	 *  g7 = kpm_vbase
4139	 */
4140
4141	/* vaddr2pfn */
4142	ldub	[%g6 + KPMTSBM_SZSHIFT], %g3
4143	sub	%g2, %g7, %g4			/* paddr = vaddr-kpm_vbase */
4144	srax    %g4, %g3, %g2			/* which alias range (r) */
4145	brnz,pn	%g2, sfmmu_kpm_exception	/* if (r != 0) goto C handler */
4146	  srlx	%g4, MMU_PAGESHIFT, %g2		/* %g2 = pfn */
4147
4148	/*
4149	 * Setup %asi
4150	 * mseg_pa = page_numtomemseg_nolock_pa(pfn)
4151	 * if (mseg not found) sfmmu_kpm_exception
4152	 * g2=pfn
4153	 */
4154	mov	ASI_MEM, %asi
4155	PAGE_NUM2MEMSEG_NOLOCK_PA(%g2, %g3, %g6, %g4, %g5, %g7, kpmtsbmsp2m)
4156	cmp	%g3, MSEG_NULLPTR_PA
4157	be,pn	%xcc, sfmmu_kpm_exception	/* if mseg not found */
4158	  nop
4159
4160	/*
4161	 * inx = pfn - mseg_pa->kpm_pbase
4162	 * g2=pfn g3=mseg_pa
4163	 */
4164	ldxa	[%g3 + MEMSEG_KPM_PBASE]%asi, %g7
4165	sub	%g2, %g7, %g4
4166
4167#ifdef	DEBUG
4168	/*
4169	 * Validate inx value
4170	 * g2=pfn g3=mseg_pa g4=inx
4171	 */
4172	ldxa	[%g3 + MEMSEG_KPM_NKPMPGS]%asi, %g5
4173	cmp	%g4, %g5			/* inx - nkpmpgs */
4174	bgeu,pn	%xcc, sfmmu_kpm_exception	/* if out of range */
4175	  ld	[%g6 + KPMTSBM_KPMPTABLESZ], %g7
4176#else
4177	ld	[%g6 + KPMTSBM_KPMPTABLESZ], %g7
4178#endif
4179	/* ksp = &mseg_pa->kpm_spages[inx] */
4180	ldxa	[%g3 + MEMSEG_KPM_SPAGES]%asi, %g5
4181	add	%g5, %g4, %g5			/* ksp */
4182
4183	/*
4184	 * KPMP_SHASH(kp)
4185	 * g2=pfn g3=mseg_pa g4=inx g5=ksp g7=kpmp_stable_sz
4186	 */
4187	ldub	[%g6 + KPMTSBM_KPMPSHIFT], %g1	/* kpmp_shift */
4188	sub	%g7, 1, %g7			/* mask */
4189	sllx	%g5, %g1, %g1			/* x = ksp << kpmp_shift */
4190	add	%g5, %g1, %g5			/* y = ksp + x */
4191	and 	%g5, %g7, %g5			/* hashinx = y & mask */
4192
4193	/*
4194	 * Calculate physical kpm_spage pointer
4195	 * g2=pfn g3=mseg_pa g4=offset g5=hashinx
4196	 */
4197	ldxa	[%g3 + MEMSEG_KPM_PAGESPA]%asi, %g1 /* kpm_spagespa */
4198	add	%g1, %g4, %g1			/* ksp_pa */
4199
4200	/*
4201	 * Calculate physical hash lock address.
4202	 * Note: Changes in kpm_shlk_t must be reflected here.
4203	 * g1=ksp_pa g2=pfn g5=hashinx
4204	 */
4205	ldx	[%g6 + KPMTSBM_KPMPTABLEPA], %g4 /* kpmp_stablepa */
4206	sllx	%g5, KPMSHLK_SHIFT, %g5
4207	add	%g4, %g5, %g3			/* hlck_pa */
4208
4209	/*
4210	 * Assemble tte
4211	 * g1=ksp_pa g2=pfn g3=hlck_pa
4212	 */
4213	sethi	%hi(TTE_VALID_INT), %g5		/* upper part */
4214	sllx	%g5, 32, %g5
4215	mov	(TTE_CP_INT|TTE_CV_INT|TTE_PRIV_INT|TTE_HWWR_INT), %g4
4216	or	%g5, %g4, %g5
4217	sllx	%g2, MMU_PAGESHIFT, %g4
4218	or	%g5, %g4, %g5			/* tte */
4219	ldx	[%g6 + KPMTSBM_TSBPTR], %g4
4220	GET_MMU_D_TTARGET(%g2, %g7)		/* %g2 = ttarget */
4221
4222	/*
4223	 * tsb dropin
4224	 * g1=ksp_pa g2=ttarget g3=hlck_pa g4=ktsbp g5=tte
4225	 */
4226
4227	/* KPMLOCK_ENTER(kpmlckp, tmp1, label1, asi) */
4228	KPMLOCK_ENTER(%g3, %g7, kpmtsbsmlock, ASI_MEM)
4229
4230	/* use C-handler if there's no go for dropin */
4231	ldsba	[%g1 + KPMSPAGE_MAPPED]%asi, %g7 /* kp_mapped */
4232	cmp	%g7, -1
4233	bne,pn	%xcc, 5f
4234	  nop
4235
4236#ifndef sun4v
4237	ldub	[%g6 + KPMTSBM_FLAGS], %g7
4238	mov	ASI_N, %g1
4239	andcc	%g7, KPMTSBM_TSBPHYS_FLAG, %g0
4240	movnz	%icc, ASI_MEM, %g1
4241	mov	%g1, %asi
4242#endif
4243
4244	/* TSB_LOCK_ENTRY(tsbp, tmp1, tmp2, label) (needs %asi set) */
4245	TSB_LOCK_ENTRY(%g4, %g1, %g7, 6)
4246
4247	/* TSB_INSERT_UNLOCK_ENTRY(tsbp, tte, tagtarget, tmp) */
4248	TSB_INSERT_UNLOCK_ENTRY(%g4, %g5, %g2, %g7)
4249
4250	DTLB_STUFF(%g5, %g2, %g4, %g5, %g6)
4251
4252	/* KPMLOCK_EXIT(kpmlckp, asi) */
4253	KPMLOCK_EXIT(%g3, ASI_MEM)
4254
4255	/*
4256	 * If trapstat is running, we need to shift the %tpc and %tnpc to
4257	 * point to trapstat's TSB miss return code (note that trapstat
4258	 * itself will patch the correct offset to add).
4259	 * Note: TTE is expected in %g5 (allows per pagesize reporting).
4260	 */
4261	rdpr	%tl, %g7
4262	cmp	%g7, 1
4263	ble	%icc, 0f
4264	sethi	%hi(KERNELBASE), %g6
4265	rdpr	%tpc, %g7
4266	or	%g6, %lo(KERNELBASE), %g6
4267	cmp	%g7, %g6
4268	bgeu	%xcc, 0f
4269	ALTENTRY(tsbmiss_trapstat_patch_point_kpm_small)
4270	add	%g7, RUNTIME_PATCH, %g7	/* must match TSTAT_TSBMISS_INSTR */
4271	wrpr	%g7, %tpc
4272	add	%g7, 4, %g7
4273	wrpr	%g7, %tnpc
42740:
4275	retry
42765:
4277	/* g3=hlck_pa */
4278	KPMLOCK_EXIT(%g3, ASI_MEM)
4279	ba,pt	%icc, sfmmu_kpm_exception
4280	  nop
4281	SET_SIZE(sfmmu_kpm_dtsb_miss_small)
4282
4283#if (1<< KPMTSBM_SHIFT) != KPMTSBM_SIZE
4284#error - KPMTSBM_SHIFT does not correspond to size of kpmtsbm struct
4285#endif
4286
4287#endif /* lint */
4288
4289#ifdef	lint
4290/*
4291 * Enable/disable tsbmiss handling at trap level for a kpm (large) page.
4292 * Called from C-level, sets/clears "go" indication for trap level handler.
4293 * khl_lock is a low level spin lock to protect the kp_tsbmtl field.
4294 * Assumed that &kp->kp_refcntc is checked for zero or -1 at C-level.
4295 * Assumes khl_mutex is held when called from C-level.
4296 */
4297/* ARGSUSED */
4298void
4299sfmmu_kpm_tsbmtl(short *kp_refcntc, uint_t *khl_lock, int cmd)
4300{
4301}
4302
4303/*
4304 * kpm_smallpages: stores val to byte at address mapped within
4305 * low level lock brackets. The old value is returned.
4306 * Called from C-level.
4307 */
4308/* ARGSUSED */
4309int
4310sfmmu_kpm_stsbmtl(char *mapped, uint_t *kshl_lock, int val)
4311{
4312	return (0);
4313}
4314
4315#else /* lint */
4316
4317	.seg	".data"
4318sfmmu_kpm_tsbmtl_panic:
4319	.ascii	"sfmmu_kpm_tsbmtl: interrupts disabled"
4320	.byte	0
4321sfmmu_kpm_stsbmtl_panic:
4322	.ascii	"sfmmu_kpm_stsbmtl: interrupts disabled"
4323	.byte	0
4324	.align	4
4325	.seg	".text"
4326
4327	ENTRY_NP(sfmmu_kpm_tsbmtl)
4328	rdpr	%pstate, %o3
4329	/*
4330	 * %o0 = &kp_refcntc
4331	 * %o1 = &khl_lock
4332	 * %o2 = 0/1 (off/on)
4333	 * %o3 = pstate save
4334	 */
4335#ifdef DEBUG
4336	andcc	%o3, PSTATE_IE, %g0		/* if interrupts already */
4337	bnz,pt %icc, 1f				/* disabled, panic	 */
4338	  nop
4339	save	%sp, -SA(MINFRAME), %sp
4340	sethi	%hi(sfmmu_kpm_tsbmtl_panic), %o0
4341	call	panic
4342	 or	%o0, %lo(sfmmu_kpm_tsbmtl_panic), %o0
4343	ret
4344	restore
43451:
4346#endif /* DEBUG */
4347	wrpr	%o3, PSTATE_IE, %pstate		/* disable interrupts */
4348
4349	KPMLOCK_ENTER(%o1, %o4, kpmtsbmtl1, ASI_N)
4350	mov	-1, %o5
4351	brz,a	%o2, 2f
4352	  mov	0, %o5
43532:
4354	sth	%o5, [%o0]
4355	KPMLOCK_EXIT(%o1, ASI_N)
4356
4357	retl
4358	  wrpr	%g0, %o3, %pstate		/* enable interrupts */
4359	SET_SIZE(sfmmu_kpm_tsbmtl)
4360
4361	ENTRY_NP(sfmmu_kpm_stsbmtl)
4362	rdpr	%pstate, %o3
4363	/*
4364	 * %o0 = &mapped
4365	 * %o1 = &kshl_lock
4366	 * %o2 = val
4367	 * %o3 = pstate save
4368	 */
4369#ifdef DEBUG
4370	andcc	%o3, PSTATE_IE, %g0		/* if interrupts already */
4371	bnz,pt %icc, 1f				/* disabled, panic	 */
4372	  nop
4373	save	%sp, -SA(MINFRAME), %sp
4374	sethi	%hi(sfmmu_kpm_stsbmtl_panic), %o0
4375	call	panic
4376	  or	%o0, %lo(sfmmu_kpm_stsbmtl_panic), %o0
4377	ret
4378	restore
43791:
4380#endif /* DEBUG */
4381	wrpr	%o3, PSTATE_IE, %pstate		/* disable interrupts */
4382
4383	KPMLOCK_ENTER(%o1, %o4, kpmstsbmtl1, ASI_N)
4384	ldsb	[%o0], %o5
4385	stb	%o2, [%o0]
4386	KPMLOCK_EXIT(%o1, ASI_N)
4387
4388	mov	%o5, %o0			/* return old val */
4389	retl
4390	  wrpr	%g0, %o3, %pstate		/* enable interrupts */
4391	SET_SIZE(sfmmu_kpm_stsbmtl)
4392
4393#endif /* lint */
4394
4395#ifndef lint
4396#ifdef sun4v
4397	/*
4398	 * User/kernel data miss w// multiple TSBs
4399	 * The first probe covers 8K, 64K, and 512K page sizes,
4400	 * because 64K and 512K mappings are replicated off 8K
4401	 * pointer.  Second probe covers 4M page size only.
4402	 *
4403	 * MMU fault area contains miss address and context.
4404	 */
4405	ALTENTRY(sfmmu_slow_dmmu_miss)
4406	GET_MMU_D_PTAGACC_CTXTYPE(%g2, %g3)	! %g2 = ptagacc, %g3 = ctx type
4407
4408slow_miss_common:
4409	/*
4410	 *  %g2 = tagacc register (needed for sfmmu_tsb_miss_tt)
4411	 *  %g3 = ctx (cannot be INVALID_CONTEXT)
4412	 */
4413	brnz,pt	%g3, 8f			! check for user context
4414	  nop
4415
4416	/*
4417	 * Kernel miss
4418	 * Get 8K and 4M TSB pointers in %g1 and %g3 and
4419	 * branch to sfmmu_tsb_miss_tt to handle it.
4420	 */
4421	mov	%g2, %g7		! TSB pointer macro clobbers tagacc
4422sfmmu_dslow_patch_ktsb_base:
4423	RUNTIME_PATCH_SETX(%g1, %g6)	! %g1 = contents of ktsb_pbase
4424sfmmu_dslow_patch_ktsb_szcode:
4425	or	%g0, RUNTIME_PATCH, %g3	! ktsb_szcode (hot patched)
4426
4427	GET_TSBE_POINTER(MMU_PAGESHIFT, %g1, %g7, %g3, %g5)
4428	! %g1 = First TSB entry pointer, as TSB miss handler expects
4429
4430	mov	%g2, %g7		! TSB pointer macro clobbers tagacc
4431sfmmu_dslow_patch_ktsb4m_base:
4432	RUNTIME_PATCH_SETX(%g3, %g6)	! %g3 = contents of ktsb4m_pbase
4433sfmmu_dslow_patch_ktsb4m_szcode:
4434	or	%g0, RUNTIME_PATCH, %g6	! ktsb4m_szcode (hot patched)
4435
4436	GET_TSBE_POINTER(MMU_PAGESHIFT4M, %g3, %g7, %g6, %g5)
4437	! %g3 = 4M tsb entry pointer, as TSB miss handler expects
4438	ba,a,pt	%xcc, sfmmu_tsb_miss_tt
4439	.empty
4440
44418:
4442	/*
4443	 * User miss
4444	 * Get first TSB pointer in %g1
4445	 * Get second TSB pointer (or NULL if no second TSB) in %g3
4446	 * Branch to sfmmu_tsb_miss_tt to handle it
4447	 */
4448	GET_1ST_TSBE_PTR(%g2, %g1, %g4, %g5)
4449	/* %g1 = first TSB entry ptr now, %g2 preserved */
4450
4451	GET_UTSBREG(SCRATCHPAD_UTSBREG2, %g3)	/* get 2nd utsbreg */
4452	brlz,a,pt %g3, sfmmu_tsb_miss_tt	/* done if no 2nd TSB */
4453	  mov	%g0, %g3
4454
4455	GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5)
4456	/* %g3 = second TSB entry ptr now, %g2 preserved */
44579:
4458	ba,a,pt	%xcc, sfmmu_tsb_miss_tt
4459	.empty
4460	SET_SIZE(sfmmu_slow_dmmu_miss)
4461
4462
4463	/*
4464	 * User/kernel instruction miss w/ multiple TSBs
4465	 * The first probe covers 8K, 64K, and 512K page sizes,
4466	 * because 64K and 512K mappings are replicated off 8K
4467	 * pointer.  Second probe covers 4M page size only.
4468	 *
4469	 * MMU fault area contains miss address and context.
4470	 */
4471	ALTENTRY(sfmmu_slow_immu_miss)
4472	GET_MMU_I_PTAGACC_CTXTYPE(%g2, %g3)
4473	ba,a,pt	%xcc, slow_miss_common
4474	SET_SIZE(sfmmu_slow_immu_miss)
4475
4476#endif /* sun4v */
4477#endif	/* lint */
4478
4479#ifndef lint
4480
4481/*
4482 * Per-CPU tsbmiss areas to avoid cache misses in TSB miss handlers.
4483 */
4484	.seg	".data"
4485	.align	64
4486	.global tsbmiss_area
4487tsbmiss_area:
4488	.skip	(TSBMISS_SIZE * NCPU)
4489
4490	.align	64
4491	.global kpmtsbm_area
4492kpmtsbm_area:
4493	.skip	(KPMTSBM_SIZE * NCPU)
4494#endif	/* lint */
4495