xref: /titanic_44/usr/src/uts/sfmmu/ml/sfmmu_asm.s (revision d2365b013d4199b49b3a1438d57aea23423e02ad)
17c478bd9Sstevel@tonic-gate/*
27c478bd9Sstevel@tonic-gate * CDDL HEADER START
37c478bd9Sstevel@tonic-gate *
47c478bd9Sstevel@tonic-gate * The contents of this file are subject to the terms of the
525cf1a30Sjl139090 * Common Development and Distribution License (the "License").
625cf1a30Sjl139090 * You may not use this file except in compliance with the License.
77c478bd9Sstevel@tonic-gate *
87c478bd9Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
97c478bd9Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing.
107c478bd9Sstevel@tonic-gate * See the License for the specific language governing permissions
117c478bd9Sstevel@tonic-gate * and limitations under the License.
127c478bd9Sstevel@tonic-gate *
137c478bd9Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each
147c478bd9Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
157c478bd9Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the
167c478bd9Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying
177c478bd9Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner]
187c478bd9Sstevel@tonic-gate *
197c478bd9Sstevel@tonic-gate * CDDL HEADER END
207c478bd9Sstevel@tonic-gate */
217c478bd9Sstevel@tonic-gate/*
22*d2365b01SPavel Tatashin * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
237c478bd9Sstevel@tonic-gate * Use is subject to license terms.
247c478bd9Sstevel@tonic-gate */
257c478bd9Sstevel@tonic-gate
267c478bd9Sstevel@tonic-gate/*
277c478bd9Sstevel@tonic-gate * SFMMU primitives.  These primitives should only be used by sfmmu
287c478bd9Sstevel@tonic-gate * routines.
297c478bd9Sstevel@tonic-gate */
307c478bd9Sstevel@tonic-gate
317c478bd9Sstevel@tonic-gate#if defined(lint)
327c478bd9Sstevel@tonic-gate#include <sys/types.h>
337c478bd9Sstevel@tonic-gate#else	/* lint */
347c478bd9Sstevel@tonic-gate#include "assym.h"
357c478bd9Sstevel@tonic-gate#endif	/* lint */
367c478bd9Sstevel@tonic-gate
377c478bd9Sstevel@tonic-gate#include <sys/asm_linkage.h>
387c478bd9Sstevel@tonic-gate#include <sys/machtrap.h>
397c478bd9Sstevel@tonic-gate#include <sys/machasi.h>
407c478bd9Sstevel@tonic-gate#include <sys/sun4asi.h>
417c478bd9Sstevel@tonic-gate#include <sys/pte.h>
427c478bd9Sstevel@tonic-gate#include <sys/mmu.h>
437c478bd9Sstevel@tonic-gate#include <vm/hat_sfmmu.h>
447c478bd9Sstevel@tonic-gate#include <vm/seg_spt.h>
457c478bd9Sstevel@tonic-gate#include <sys/machparam.h>
467c478bd9Sstevel@tonic-gate#include <sys/privregs.h>
477c478bd9Sstevel@tonic-gate#include <sys/scb.h>
487c478bd9Sstevel@tonic-gate#include <sys/intreg.h>
497c478bd9Sstevel@tonic-gate#include <sys/machthread.h>
507c478bd9Sstevel@tonic-gate#include <sys/intr.h>
517c478bd9Sstevel@tonic-gate#include <sys/clock.h>
527c478bd9Sstevel@tonic-gate#include <sys/trapstat.h>
537c478bd9Sstevel@tonic-gate
547c478bd9Sstevel@tonic-gate#ifdef TRAPTRACE
557c478bd9Sstevel@tonic-gate#include <sys/traptrace.h>
567c478bd9Sstevel@tonic-gate
577c478bd9Sstevel@tonic-gate/*
587c478bd9Sstevel@tonic-gate * Tracing macro. Adds two instructions if TRAPTRACE is defined.
597c478bd9Sstevel@tonic-gate */
607c478bd9Sstevel@tonic-gate#define	TT_TRACE(label)		\
617c478bd9Sstevel@tonic-gate	ba	label		;\
627c478bd9Sstevel@tonic-gate	rd	%pc, %g7
637c478bd9Sstevel@tonic-gate#else
647c478bd9Sstevel@tonic-gate
657c478bd9Sstevel@tonic-gate#define	TT_TRACE(label)
667c478bd9Sstevel@tonic-gate
677c478bd9Sstevel@tonic-gate#endif /* TRAPTRACE */
687c478bd9Sstevel@tonic-gate
697c478bd9Sstevel@tonic-gate#ifndef	lint
707c478bd9Sstevel@tonic-gate
717c478bd9Sstevel@tonic-gate#if (TTE_SUSPEND_SHIFT > 0)
727c478bd9Sstevel@tonic-gate#define	TTE_SUSPEND_INT_SHIFT(reg)				\
737c478bd9Sstevel@tonic-gate	sllx	reg, TTE_SUSPEND_SHIFT, reg
747c478bd9Sstevel@tonic-gate#else
757c478bd9Sstevel@tonic-gate#define	TTE_SUSPEND_INT_SHIFT(reg)
767c478bd9Sstevel@tonic-gate#endif
777c478bd9Sstevel@tonic-gate
787c478bd9Sstevel@tonic-gate#endif /* lint */
797c478bd9Sstevel@tonic-gate
807c478bd9Sstevel@tonic-gate#ifndef	lint
817c478bd9Sstevel@tonic-gate
827c478bd9Sstevel@tonic-gate/*
837c478bd9Sstevel@tonic-gate * Assumes TSBE_TAG is 0
847c478bd9Sstevel@tonic-gate * Assumes TSBE_INTHI is 0
857c478bd9Sstevel@tonic-gate * Assumes TSBREG.split is 0
867c478bd9Sstevel@tonic-gate */
877c478bd9Sstevel@tonic-gate
887c478bd9Sstevel@tonic-gate#if TSBE_TAG != 0
897c478bd9Sstevel@tonic-gate#error "TSB_UPDATE and TSB_INVALIDATE assume TSBE_TAG = 0"
907c478bd9Sstevel@tonic-gate#endif
917c478bd9Sstevel@tonic-gate
927c478bd9Sstevel@tonic-gate#if TSBTAG_INTHI != 0
937c478bd9Sstevel@tonic-gate#error "TSB_UPDATE and TSB_INVALIDATE assume TSBTAG_INTHI = 0"
947c478bd9Sstevel@tonic-gate#endif
957c478bd9Sstevel@tonic-gate
967c478bd9Sstevel@tonic-gate/*
977c478bd9Sstevel@tonic-gate * The following code assumes the tsb is not split.
987c478bd9Sstevel@tonic-gate *
997c478bd9Sstevel@tonic-gate * With TSBs no longer shared between processes, it's no longer
1007c478bd9Sstevel@tonic-gate * necessary to hash the context bits into the tsb index to get
1017c478bd9Sstevel@tonic-gate * tsb coloring; the new implementation treats the TSB as a
1027c478bd9Sstevel@tonic-gate * direct-mapped, virtually-addressed cache.
1037c478bd9Sstevel@tonic-gate *
1047c478bd9Sstevel@tonic-gate * In:
1057c478bd9Sstevel@tonic-gate *    vpshift = virtual page shift; e.g. 13 for 8K TTEs (constant or ro)
1067c478bd9Sstevel@tonic-gate *    tsbbase = base address of TSB (clobbered)
1077c478bd9Sstevel@tonic-gate *    tagacc = tag access register (clobbered)
1087c478bd9Sstevel@tonic-gate *    szc = size code of TSB (ro)
1097c478bd9Sstevel@tonic-gate *    tmp = scratch reg
1107c478bd9Sstevel@tonic-gate * Out:
1117c478bd9Sstevel@tonic-gate *    tsbbase = pointer to entry in TSB
1127c478bd9Sstevel@tonic-gate */
1137c478bd9Sstevel@tonic-gate#define	GET_TSBE_POINTER(vpshift, tsbbase, tagacc, szc, tmp)		\
1147c478bd9Sstevel@tonic-gate	mov	TSB_ENTRIES(0), tmp	/* nentries in TSB size 0 */	;\
1157c478bd9Sstevel@tonic-gate	srlx	tagacc, vpshift, tagacc 				;\
1167c478bd9Sstevel@tonic-gate	sllx	tmp, szc, tmp		/* tmp = nentries in TSB */	;\
1177c478bd9Sstevel@tonic-gate	sub	tmp, 1, tmp		/* mask = nentries - 1 */	;\
1187c478bd9Sstevel@tonic-gate	and	tagacc, tmp, tmp	/* tsbent = virtpage & mask */	;\
1197c478bd9Sstevel@tonic-gate	sllx	tmp, TSB_ENTRY_SHIFT, tmp	/* entry num --> ptr */	;\
1207c478bd9Sstevel@tonic-gate	add	tsbbase, tmp, tsbbase	/* add entry offset to TSB base */
1217c478bd9Sstevel@tonic-gate
1227c478bd9Sstevel@tonic-gate/*
1237c478bd9Sstevel@tonic-gate * When the kpm TSB is used it is assumed that it is direct mapped
1247c478bd9Sstevel@tonic-gate * using (vaddr>>vpshift)%tsbsz as the index.
1257c478bd9Sstevel@tonic-gate *
1267c478bd9Sstevel@tonic-gate * Note that, for now, the kpm TSB and kernel TSB are the same for
1277c478bd9Sstevel@tonic-gate * each mapping size.  However that need not always be the case.  If
1287c478bd9Sstevel@tonic-gate * the trap handlers are updated to search a different TSB for kpm
1297c478bd9Sstevel@tonic-gate * addresses than for kernel addresses then kpm_tsbbase and kpm_tsbsz
1307c478bd9Sstevel@tonic-gate * (and/or kpmsm_tsbbase/kpmsm_tsbsz) may be entirely independent.
1317c478bd9Sstevel@tonic-gate *
1327c478bd9Sstevel@tonic-gate * In:
1337c478bd9Sstevel@tonic-gate *    vpshift = virtual page shift; e.g. 13 for 8K TTEs (constant or ro)
1347c478bd9Sstevel@tonic-gate *    vaddr = virtual address (clobbered)
1357c478bd9Sstevel@tonic-gate *    tsbp, szc, tmp = scratch
1367c478bd9Sstevel@tonic-gate * Out:
1377c478bd9Sstevel@tonic-gate *    tsbp = pointer to entry in TSB
1387c478bd9Sstevel@tonic-gate */
1397c478bd9Sstevel@tonic-gate#define	GET_KPM_TSBE_POINTER(vpshift, tsbp, vaddr, szc, tmp)		\
1407c478bd9Sstevel@tonic-gate	cmp	vpshift, MMU_PAGESHIFT					;\
1417c478bd9Sstevel@tonic-gate	bne,pn	%icc, 1f		/* branch if large case */	;\
1427c478bd9Sstevel@tonic-gate	  sethi	%hi(kpmsm_tsbsz), szc					;\
1437c478bd9Sstevel@tonic-gate	sethi	%hi(kpmsm_tsbbase), tsbp				;\
1447c478bd9Sstevel@tonic-gate	ld	[szc + %lo(kpmsm_tsbsz)], szc				;\
1457c478bd9Sstevel@tonic-gate	ldx	[tsbp + %lo(kpmsm_tsbbase)], tsbp			;\
1467c478bd9Sstevel@tonic-gate	ba,pt	%icc, 2f						;\
1477c478bd9Sstevel@tonic-gate	  nop								;\
1487c478bd9Sstevel@tonic-gate1:	sethi	%hi(kpm_tsbsz), szc					;\
1497c478bd9Sstevel@tonic-gate	sethi	%hi(kpm_tsbbase), tsbp					;\
1507c478bd9Sstevel@tonic-gate	ld	[szc + %lo(kpm_tsbsz)], szc				;\
1517c478bd9Sstevel@tonic-gate	ldx	[tsbp + %lo(kpm_tsbbase)], tsbp				;\
1527c478bd9Sstevel@tonic-gate2:	GET_TSBE_POINTER(vpshift, tsbp, vaddr, szc, tmp)
1537c478bd9Sstevel@tonic-gate
1547c478bd9Sstevel@tonic-gate/*
1557c478bd9Sstevel@tonic-gate * Lock the TSBE at virtual address tsbep.
1567c478bd9Sstevel@tonic-gate *
1577c478bd9Sstevel@tonic-gate * tsbep = TSBE va (ro)
1587c478bd9Sstevel@tonic-gate * tmp1, tmp2 = scratch registers (clobbered)
1590a90a7fdSAmritpal Sandhu * label = label to jump to if we fail to lock the tsb entry
1607c478bd9Sstevel@tonic-gate * %asi = ASI to use for TSB access
1617c478bd9Sstevel@tonic-gate *
1627c478bd9Sstevel@tonic-gate * NOTE that we flush the TSB using fast VIS instructions that
1637c478bd9Sstevel@tonic-gate * set all 1's in the TSB tag, so TSBTAG_LOCKED|TSBTAG_INVALID must
1647c478bd9Sstevel@tonic-gate * not be treated as a locked entry or we'll get stuck spinning on
1657c478bd9Sstevel@tonic-gate * an entry that isn't locked but really invalid.
1667c478bd9Sstevel@tonic-gate */
1677c478bd9Sstevel@tonic-gate
1687c478bd9Sstevel@tonic-gate#if defined(UTSB_PHYS)
1697c478bd9Sstevel@tonic-gate
1707c478bd9Sstevel@tonic-gate#define	TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label)			\
1717c478bd9Sstevel@tonic-gate	lda	[tsbep]ASI_MEM, tmp1					;\
1727c478bd9Sstevel@tonic-gate	sethi	%hi(TSBTAG_LOCKED), tmp2				;\
1737c478bd9Sstevel@tonic-gate	cmp	tmp1, tmp2 						;\
1740a90a7fdSAmritpal Sandhu	be,a,pn	%icc, label		/* if locked ignore */		;\
1750a90a7fdSAmritpal Sandhu	  nop								;\
1767c478bd9Sstevel@tonic-gate	casa	[tsbep]ASI_MEM, tmp1, tmp2				;\
1777c478bd9Sstevel@tonic-gate	cmp	tmp1, tmp2 						;\
1780a90a7fdSAmritpal Sandhu	bne,a,pn %icc, label		/* didn't lock so ignore */	;\
1790a90a7fdSAmritpal Sandhu	  nop								;\
1807c478bd9Sstevel@tonic-gate	/* tsbe lock acquired */					;\
1817c478bd9Sstevel@tonic-gate	membar #StoreStore
1827c478bd9Sstevel@tonic-gate
1837c478bd9Sstevel@tonic-gate#else /* UTSB_PHYS */
1847c478bd9Sstevel@tonic-gate
1857c478bd9Sstevel@tonic-gate#define	TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label)			\
1867c478bd9Sstevel@tonic-gate	lda	[tsbep]%asi, tmp1					;\
1877c478bd9Sstevel@tonic-gate	sethi	%hi(TSBTAG_LOCKED), tmp2				;\
1887c478bd9Sstevel@tonic-gate	cmp	tmp1, tmp2 						;\
1890a90a7fdSAmritpal Sandhu	be,a,pn	%icc, label		/* if locked ignore */		;\
1900a90a7fdSAmritpal Sandhu	  nop								;\
1917c478bd9Sstevel@tonic-gate	casa	[tsbep]%asi, tmp1, tmp2					;\
1927c478bd9Sstevel@tonic-gate	cmp	tmp1, tmp2 						;\
1930a90a7fdSAmritpal Sandhu	bne,a,pn %icc, label		/* didn't lock so ignore */	;\
1940a90a7fdSAmritpal Sandhu	  nop								;\
1957c478bd9Sstevel@tonic-gate	/* tsbe lock acquired */					;\
1967c478bd9Sstevel@tonic-gate	membar #StoreStore
1977c478bd9Sstevel@tonic-gate
1987c478bd9Sstevel@tonic-gate#endif /* UTSB_PHYS */
1997c478bd9Sstevel@tonic-gate
2007c478bd9Sstevel@tonic-gate/*
2017c478bd9Sstevel@tonic-gate * Atomically write TSBE at virtual address tsbep.
2027c478bd9Sstevel@tonic-gate *
2037c478bd9Sstevel@tonic-gate * tsbep = TSBE va (ro)
2047c478bd9Sstevel@tonic-gate * tte = TSBE TTE (ro)
2057c478bd9Sstevel@tonic-gate * tagtarget = TSBE tag (ro)
2067c478bd9Sstevel@tonic-gate * %asi = ASI to use for TSB access
2077c478bd9Sstevel@tonic-gate */
2087c478bd9Sstevel@tonic-gate
2097c478bd9Sstevel@tonic-gate#if defined(UTSB_PHYS)
2107c478bd9Sstevel@tonic-gate
2117c478bd9Sstevel@tonic-gate#define	TSB_INSERT_UNLOCK_ENTRY(tsbep, tte, tagtarget, tmp1)		\
2127c478bd9Sstevel@tonic-gate	add	tsbep, TSBE_TTE, tmp1					;\
2137c478bd9Sstevel@tonic-gate	stxa	tte, [tmp1]ASI_MEM		/* write tte data */	;\
2147c478bd9Sstevel@tonic-gate	membar #StoreStore						;\
2157c478bd9Sstevel@tonic-gate	add	tsbep, TSBE_TAG, tmp1					;\
2167c478bd9Sstevel@tonic-gate	stxa	tagtarget, [tmp1]ASI_MEM	/* write tte tag & unlock */
2177c478bd9Sstevel@tonic-gate
2187c478bd9Sstevel@tonic-gate#else /* UTSB_PHYS */
2197c478bd9Sstevel@tonic-gate
2207c478bd9Sstevel@tonic-gate#define	TSB_INSERT_UNLOCK_ENTRY(tsbep, tte, tagtarget,tmp1)		\
2217c478bd9Sstevel@tonic-gate	stxa	tte, [tsbep + TSBE_TTE]%asi	/* write tte data */	;\
2227c478bd9Sstevel@tonic-gate	membar #StoreStore						;\
2237c478bd9Sstevel@tonic-gate	stxa	tagtarget, [tsbep + TSBE_TAG]%asi /* write tte tag & unlock */
2247c478bd9Sstevel@tonic-gate
2257c478bd9Sstevel@tonic-gate#endif /* UTSB_PHYS */
2267c478bd9Sstevel@tonic-gate
2277c478bd9Sstevel@tonic-gate/*
2287c478bd9Sstevel@tonic-gate * Load an entry into the TSB at TL > 0.
2297c478bd9Sstevel@tonic-gate *
2307c478bd9Sstevel@tonic-gate * tsbep = pointer to the TSBE to load as va (ro)
2317c478bd9Sstevel@tonic-gate * tte = value of the TTE retrieved and loaded (wo)
2327c478bd9Sstevel@tonic-gate * tagtarget = tag target register.  To get TSBE tag to load,
2337c478bd9Sstevel@tonic-gate *   we need to mask off the context and leave only the va (clobbered)
2347c478bd9Sstevel@tonic-gate * ttepa = pointer to the TTE to retrieve/load as pa (ro)
2357c478bd9Sstevel@tonic-gate * tmp1, tmp2 = scratch registers
2360a90a7fdSAmritpal Sandhu * label = label to jump to if we fail to lock the tsb entry
2377c478bd9Sstevel@tonic-gate * %asi = ASI to use for TSB access
2387c478bd9Sstevel@tonic-gate */
2397c478bd9Sstevel@tonic-gate
2407c478bd9Sstevel@tonic-gate#if defined(UTSB_PHYS)
2417c478bd9Sstevel@tonic-gate
2427c478bd9Sstevel@tonic-gate#define	TSB_UPDATE_TL(tsbep, tte, tagtarget, ttepa, tmp1, tmp2, label) \
2437c478bd9Sstevel@tonic-gate	TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label)			;\
2447c478bd9Sstevel@tonic-gate	/*								;\
2457c478bd9Sstevel@tonic-gate	 * I don't need to update the TSB then check for the valid tte.	;\
2467c478bd9Sstevel@tonic-gate	 * TSB invalidate will spin till the entry is unlocked.	Note,	;\
2477c478bd9Sstevel@tonic-gate	 * we always invalidate the hash table before we unload the TSB.;\
2487c478bd9Sstevel@tonic-gate	 */								;\
2497c478bd9Sstevel@tonic-gate	sllx	tagtarget, TTARGET_VA_SHIFT, tagtarget			;\
2507c478bd9Sstevel@tonic-gate	ldxa	[ttepa]ASI_MEM, tte					;\
2517c478bd9Sstevel@tonic-gate	srlx	tagtarget, TTARGET_VA_SHIFT, tagtarget			;\
2527c478bd9Sstevel@tonic-gate	sethi	%hi(TSBTAG_INVALID), tmp2				;\
2537c478bd9Sstevel@tonic-gate	add	tsbep, TSBE_TAG, tmp1					;\
2540a90a7fdSAmritpal Sandhu	brgez,a,pn tte, label						;\
2557c478bd9Sstevel@tonic-gate	 sta	tmp2, [tmp1]ASI_MEM			/* unlock */	;\
2567c478bd9Sstevel@tonic-gate	TSB_INSERT_UNLOCK_ENTRY(tsbep, tte, tagtarget, tmp1)		;\
2577c478bd9Sstevel@tonic-gatelabel:
2587c478bd9Sstevel@tonic-gate
2597c478bd9Sstevel@tonic-gate#else /* UTSB_PHYS */
2607c478bd9Sstevel@tonic-gate
2617c478bd9Sstevel@tonic-gate#define	TSB_UPDATE_TL(tsbep, tte, tagtarget, ttepa, tmp1, tmp2, label) \
2627c478bd9Sstevel@tonic-gate	TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label)			;\
2637c478bd9Sstevel@tonic-gate	/*								;\
2647c478bd9Sstevel@tonic-gate	 * I don't need to update the TSB then check for the valid tte.	;\
2657c478bd9Sstevel@tonic-gate	 * TSB invalidate will spin till the entry is unlocked.	Note,	;\
2667c478bd9Sstevel@tonic-gate	 * we always invalidate the hash table before we unload the TSB.;\
2677c478bd9Sstevel@tonic-gate	 */								;\
2687c478bd9Sstevel@tonic-gate	sllx	tagtarget, TTARGET_VA_SHIFT, tagtarget			;\
2697c478bd9Sstevel@tonic-gate	ldxa	[ttepa]ASI_MEM, tte					;\
2707c478bd9Sstevel@tonic-gate	srlx	tagtarget, TTARGET_VA_SHIFT, tagtarget			;\
2717c478bd9Sstevel@tonic-gate	sethi	%hi(TSBTAG_INVALID), tmp2				;\
2720a90a7fdSAmritpal Sandhu	brgez,a,pn tte, label						;\
2737c478bd9Sstevel@tonic-gate	 sta	tmp2, [tsbep + TSBE_TAG]%asi		/* unlock */	;\
2747c478bd9Sstevel@tonic-gate	TSB_INSERT_UNLOCK_ENTRY(tsbep, tte, tagtarget, tmp1)		;\
2757c478bd9Sstevel@tonic-gatelabel:
2767c478bd9Sstevel@tonic-gate
27725cf1a30Sjl139090#endif /* UTSB_PHYS */
27825cf1a30Sjl139090
2791bd453f3Ssusans/*
2801bd453f3Ssusans * Load a 32M/256M Panther TSB entry into the TSB at TL > 0,
2811bd453f3Ssusans *   for ITLB synthesis.
2821bd453f3Ssusans *
2831bd453f3Ssusans * tsbep = pointer to the TSBE to load as va (ro)
2841bd453f3Ssusans * tte = 4M pfn offset (in), value of the TTE retrieved and loaded (out)
2851bd453f3Ssusans *   with exec_perm turned off and exec_synth turned on
2861bd453f3Ssusans * tagtarget = tag target register.  To get TSBE tag to load,
2871bd453f3Ssusans *   we need to mask off the context and leave only the va (clobbered)
2881bd453f3Ssusans * ttepa = pointer to the TTE to retrieve/load as pa (ro)
2891bd453f3Ssusans * tmp1, tmp2 = scratch registers
2901bd453f3Ssusans * label = label to use for branch (text)
2911bd453f3Ssusans * %asi = ASI to use for TSB access
2921bd453f3Ssusans */
2931bd453f3Ssusans
2941bd453f3Ssusans#define	TSB_UPDATE_TL_PN(tsbep, tte, tagtarget, ttepa, tmp1, tmp2, label) \
2951bd453f3Ssusans	TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label)			;\
2961bd453f3Ssusans	/*								;\
2971bd453f3Ssusans	 * I don't need to update the TSB then check for the valid tte.	;\
2981bd453f3Ssusans	 * TSB invalidate will spin till the entry is unlocked.	Note,	;\
2991bd453f3Ssusans	 * we always invalidate the hash table before we unload the TSB.;\
3001bd453f3Ssusans	 * Or in 4M pfn offset to TTE and set the exec_perm bit to 0	;\
3011bd453f3Ssusans	 * and exec_synth bit to 1.					;\
3021bd453f3Ssusans	 */								;\
3031bd453f3Ssusans	sllx	tagtarget, TTARGET_VA_SHIFT, tagtarget			;\
3041bd453f3Ssusans	mov	tte, tmp1						;\
3051bd453f3Ssusans	ldxa	[ttepa]ASI_MEM, tte					;\
3061bd453f3Ssusans	srlx	tagtarget, TTARGET_VA_SHIFT, tagtarget			;\
3071bd453f3Ssusans	sethi	%hi(TSBTAG_INVALID), tmp2				;\
3080a90a7fdSAmritpal Sandhu	brgez,a,pn tte, label						;\
3091bd453f3Ssusans	 sta	tmp2, [tsbep + TSBE_TAG]%asi		/* unlock */	;\
3101bd453f3Ssusans	or	tte, tmp1, tte						;\
3111bd453f3Ssusans	andn	tte, TTE_EXECPRM_INT, tte				;\
3121bd453f3Ssusans	or	tte, TTE_E_SYNTH_INT, tte				;\
3131bd453f3Ssusans	TSB_INSERT_UNLOCK_ENTRY(tsbep, tte, tagtarget, tmp1)		;\
3141bd453f3Ssusanslabel:
3151bd453f3Ssusans
3161bd453f3Ssusans/*
3171bd453f3Ssusans * Build a 4M pfn offset for a Panther 32M/256M page, for ITLB synthesis.
3181bd453f3Ssusans *
3191bd453f3Ssusans * tte = value of the TTE, used to get tte_size bits (ro)
3201bd453f3Ssusans * tagaccess = tag access register, used to get 4M pfn bits (ro)
3211bd453f3Ssusans * pfn = 4M pfn bits shifted to offset for tte (out)
3221bd453f3Ssusans * tmp1 = scratch register
3231bd453f3Ssusans * label = label to use for branch (text)
3241bd453f3Ssusans */
3251bd453f3Ssusans
3261bd453f3Ssusans#define	GET_4M_PFN_OFF(tte, tagaccess, pfn, tmp, label)			\
3271bd453f3Ssusans	/*								;\
3281bd453f3Ssusans	 * Get 4M bits from tagaccess for 32M, 256M pagesizes.		;\
3291bd453f3Ssusans	 * Return them, shifted, in pfn.				;\
3301bd453f3Ssusans	 */								;\
3311bd453f3Ssusans	srlx	tagaccess, MMU_PAGESHIFT4M, tagaccess			;\
3321bd453f3Ssusans	srlx	tte, TTE_SZ_SHFT, tmp		/* isolate the */	;\
3331bd453f3Ssusans	andcc	tmp, TTE_SZ_BITS, %g0		/* tte_size bits */	;\
3341bd453f3Ssusans	bz,a,pt %icc, label/**/f		/* if 0, is */		;\
3351bd453f3Ssusans	  and	tagaccess, 0x7, tagaccess	/* 32M page size */	;\
3361bd453f3Ssusans	and	tagaccess, 0x3f, tagaccess /* else 256M page size */	;\
3371bd453f3Ssusanslabel:									;\
3381bd453f3Ssusans	sllx	tagaccess, MMU_PAGESHIFT4M, pfn
3391bd453f3Ssusans
3401bd453f3Ssusans/*
3411bd453f3Ssusans * Add 4M TTE size code to a tte for a Panther 32M/256M page,
3421bd453f3Ssusans * for ITLB synthesis.
3431bd453f3Ssusans *
3441bd453f3Ssusans * tte = value of the TTE, used to get tte_size bits (rw)
3451bd453f3Ssusans * tmp1 = scratch register
3461bd453f3Ssusans */
3471bd453f3Ssusans
3481bd453f3Ssusans#define	SET_TTE4M_PN(tte, tmp)						\
3491bd453f3Ssusans	/*								;\
3501bd453f3Ssusans	 * Set 4M pagesize tte bits. 					;\
3511bd453f3Ssusans	 */								;\
3521bd453f3Ssusans	set	TTE4M, tmp						;\
3531bd453f3Ssusans	sllx	tmp, TTE_SZ_SHFT, tmp					;\
3541bd453f3Ssusans	or	tte, tmp, tte
3551bd453f3Ssusans
3567c478bd9Sstevel@tonic-gate/*
3577c478bd9Sstevel@tonic-gate * Load an entry into the TSB at TL=0.
3587c478bd9Sstevel@tonic-gate *
3597c478bd9Sstevel@tonic-gate * tsbep = pointer to the TSBE to load as va (ro)
3607c478bd9Sstevel@tonic-gate * tteva = pointer to the TTE to load as va (ro)
3617c478bd9Sstevel@tonic-gate * tagtarget = TSBE tag to load (which contains no context), synthesized
3627c478bd9Sstevel@tonic-gate * to match va of MMU tag target register only (ro)
3637c478bd9Sstevel@tonic-gate * tmp1, tmp2 = scratch registers (clobbered)
3647c478bd9Sstevel@tonic-gate * label = label to use for branches (text)
3657c478bd9Sstevel@tonic-gate * %asi = ASI to use for TSB access
3667c478bd9Sstevel@tonic-gate */
3677c478bd9Sstevel@tonic-gate
3687c478bd9Sstevel@tonic-gate#if defined(UTSB_PHYS)
3697c478bd9Sstevel@tonic-gate
3707c478bd9Sstevel@tonic-gate#define	TSB_UPDATE(tsbep, tteva, tagtarget, tmp1, tmp2, label)		\
3717c478bd9Sstevel@tonic-gate	/* can't rd tteva after locking tsb because it can tlb miss */	;\
3727c478bd9Sstevel@tonic-gate	ldx	[tteva], tteva			/* load tte */		;\
3737c478bd9Sstevel@tonic-gate	TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label)			;\
3747c478bd9Sstevel@tonic-gate	sethi	%hi(TSBTAG_INVALID), tmp2				;\
3757c478bd9Sstevel@tonic-gate	add	tsbep, TSBE_TAG, tmp1					;\
3760a90a7fdSAmritpal Sandhu	brgez,a,pn tteva, label						;\
3777c478bd9Sstevel@tonic-gate	 sta	tmp2, [tmp1]ASI_MEM			/* unlock */	;\
3787c478bd9Sstevel@tonic-gate	TSB_INSERT_UNLOCK_ENTRY(tsbep, tteva, tagtarget, tmp1)		;\
3797c478bd9Sstevel@tonic-gatelabel:
3807c478bd9Sstevel@tonic-gate
3817c478bd9Sstevel@tonic-gate#else /* UTSB_PHYS */
3827c478bd9Sstevel@tonic-gate
3837c478bd9Sstevel@tonic-gate#define	TSB_UPDATE(tsbep, tteva, tagtarget, tmp1, tmp2, label)		\
3847c478bd9Sstevel@tonic-gate	/* can't rd tteva after locking tsb because it can tlb miss */	;\
3857c478bd9Sstevel@tonic-gate	ldx	[tteva], tteva			/* load tte */		;\
3867c478bd9Sstevel@tonic-gate	TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label)			;\
3877c478bd9Sstevel@tonic-gate	sethi	%hi(TSBTAG_INVALID), tmp2				;\
3880a90a7fdSAmritpal Sandhu	brgez,a,pn tteva, label						;\
3897c478bd9Sstevel@tonic-gate	 sta	tmp2, [tsbep + TSBE_TAG]%asi		/* unlock */	;\
3907c478bd9Sstevel@tonic-gate	TSB_INSERT_UNLOCK_ENTRY(tsbep, tteva, tagtarget, tmp1)		;\
3917c478bd9Sstevel@tonic-gatelabel:
3927c478bd9Sstevel@tonic-gate
3937c478bd9Sstevel@tonic-gate#endif /* UTSB_PHYS */
3947c478bd9Sstevel@tonic-gate
3957c478bd9Sstevel@tonic-gate/*
3967c478bd9Sstevel@tonic-gate * Invalidate a TSB entry in the TSB.
3977c478bd9Sstevel@tonic-gate *
3987c478bd9Sstevel@tonic-gate * NOTE: TSBE_TAG is assumed to be zero.  There is a compile time check
3997c478bd9Sstevel@tonic-gate *	 about this earlier to ensure this is true.  Thus when we are
4007c478bd9Sstevel@tonic-gate *	 directly referencing tsbep below, we are referencing the tte_tag
4017c478bd9Sstevel@tonic-gate *	 field of the TSBE.  If this  offset ever changes, the code below
4027c478bd9Sstevel@tonic-gate *	 will need to be modified.
4037c478bd9Sstevel@tonic-gate *
4047c478bd9Sstevel@tonic-gate * tsbep = pointer to TSBE as va (ro)
4057c478bd9Sstevel@tonic-gate * tag = invalidation is done if this matches the TSBE tag (ro)
4067c478bd9Sstevel@tonic-gate * tmp1 - tmp3 = scratch registers (clobbered)
4077c478bd9Sstevel@tonic-gate * label = label name to use for branches (text)
4087c478bd9Sstevel@tonic-gate * %asi = ASI to use for TSB access
4097c478bd9Sstevel@tonic-gate */
4107c478bd9Sstevel@tonic-gate
4117c478bd9Sstevel@tonic-gate#if defined(UTSB_PHYS)
4127c478bd9Sstevel@tonic-gate
4137c478bd9Sstevel@tonic-gate#define	TSB_INVALIDATE(tsbep, tag, tmp1, tmp2, tmp3, label)		\
4147c478bd9Sstevel@tonic-gate	lda	[tsbep]ASI_MEM, tmp1	/* tmp1 = tsbe tag */		;\
4157c478bd9Sstevel@tonic-gate	sethi	%hi(TSBTAG_LOCKED), tmp2				;\
4167c478bd9Sstevel@tonic-gatelabel/**/1:								;\
4177c478bd9Sstevel@tonic-gate	cmp	tmp1, tmp2		/* see if tsbe is locked, if */	;\
4187c478bd9Sstevel@tonic-gate	be,a,pn	%icc, label/**/1	/* so, loop until unlocked */	;\
4197c478bd9Sstevel@tonic-gate	  lda	[tsbep]ASI_MEM, tmp1	/* reloading value each time */	;\
4207c478bd9Sstevel@tonic-gate	ldxa	[tsbep]ASI_MEM, tmp3	/* tmp3 = tsbe tag */		;\
4217c478bd9Sstevel@tonic-gate	cmp	tag, tmp3		/* compare tags */		;\
4227c478bd9Sstevel@tonic-gate	bne,pt	%xcc, label/**/2	/* if different, do nothing */	;\
4237c478bd9Sstevel@tonic-gate	  sethi	%hi(TSBTAG_INVALID), tmp3				;\
4247c478bd9Sstevel@tonic-gate	casa	[tsbep]ASI_MEM, tmp1, tmp3 /* try to set tag invalid */	;\
4257c478bd9Sstevel@tonic-gate	cmp	tmp1, tmp3		/* if not successful */		;\
4267c478bd9Sstevel@tonic-gate	bne,a,pn %icc, label/**/1	/* start over from the top */	;\
4277c478bd9Sstevel@tonic-gate	  lda	[tsbep]ASI_MEM, tmp1	/* reloading tsbe tag */	;\
4287c478bd9Sstevel@tonic-gatelabel/**/2:
4297c478bd9Sstevel@tonic-gate
4307c478bd9Sstevel@tonic-gate#else /* UTSB_PHYS */
4317c478bd9Sstevel@tonic-gate
4327c478bd9Sstevel@tonic-gate#define	TSB_INVALIDATE(tsbep, tag, tmp1, tmp2, tmp3, label)		\
4337c478bd9Sstevel@tonic-gate	lda	[tsbep]%asi, tmp1	/* tmp1 = tsbe tag */		;\
4347c478bd9Sstevel@tonic-gate	sethi	%hi(TSBTAG_LOCKED), tmp2				;\
4357c478bd9Sstevel@tonic-gatelabel/**/1:								;\
4367c478bd9Sstevel@tonic-gate	cmp	tmp1, tmp2		/* see if tsbe is locked, if */	;\
4377c478bd9Sstevel@tonic-gate	be,a,pn	%icc, label/**/1	/* so, loop until unlocked */	;\
4387c478bd9Sstevel@tonic-gate	  lda	[tsbep]%asi, tmp1	/* reloading value each time */	;\
4397c478bd9Sstevel@tonic-gate	ldxa	[tsbep]%asi, tmp3	/* tmp3 = tsbe tag */		;\
4407c478bd9Sstevel@tonic-gate	cmp	tag, tmp3		/* compare tags */		;\
4417c478bd9Sstevel@tonic-gate	bne,pt	%xcc, label/**/2	/* if different, do nothing */	;\
4427c478bd9Sstevel@tonic-gate	  sethi	%hi(TSBTAG_INVALID), tmp3				;\
4437c478bd9Sstevel@tonic-gate	casa	[tsbep]%asi, tmp1, tmp3	/* try to set tag invalid */	;\
4447c478bd9Sstevel@tonic-gate	cmp	tmp1, tmp3		/* if not successful */		;\
4457c478bd9Sstevel@tonic-gate	bne,a,pn %icc, label/**/1	/* start over from the top */	;\
4467c478bd9Sstevel@tonic-gate	  lda	[tsbep]%asi, tmp1	/* reloading tsbe tag */	;\
4477c478bd9Sstevel@tonic-gatelabel/**/2:
4487c478bd9Sstevel@tonic-gate
4497c478bd9Sstevel@tonic-gate#endif /* UTSB_PHYS */
4507c478bd9Sstevel@tonic-gate
4517c478bd9Sstevel@tonic-gate#if TSB_SOFTSZ_MASK < TSB_SZ_MASK
4527c478bd9Sstevel@tonic-gate#error	- TSB_SOFTSZ_MASK too small
4537c478bd9Sstevel@tonic-gate#endif
4547c478bd9Sstevel@tonic-gate
4557c478bd9Sstevel@tonic-gate
4567c478bd9Sstevel@tonic-gate/*
4577c478bd9Sstevel@tonic-gate * An implementation of setx which will be hot patched at run time.
4587c478bd9Sstevel@tonic-gate * since it is being hot patched, there is no value passed in.
4597c478bd9Sstevel@tonic-gate * Thus, essentially we are implementing
4607c478bd9Sstevel@tonic-gate *	setx value, tmp, dest
4617c478bd9Sstevel@tonic-gate * where value is RUNTIME_PATCH (aka 0) in this case.
4627c478bd9Sstevel@tonic-gate */
4637c478bd9Sstevel@tonic-gate#define	RUNTIME_PATCH_SETX(dest, tmp)					\
4647c478bd9Sstevel@tonic-gate	sethi	%hh(RUNTIME_PATCH), tmp					;\
4657c478bd9Sstevel@tonic-gate	sethi	%lm(RUNTIME_PATCH), dest				;\
4667c478bd9Sstevel@tonic-gate	or	tmp, %hm(RUNTIME_PATCH), tmp				;\
4677c478bd9Sstevel@tonic-gate	or	dest, %lo(RUNTIME_PATCH), dest				;\
4687c478bd9Sstevel@tonic-gate	sllx	tmp, 32, tmp						;\
4697c478bd9Sstevel@tonic-gate	nop				/* for perf reasons */		;\
4707c478bd9Sstevel@tonic-gate	or	tmp, dest, dest		/* contents of patched value */
4717c478bd9Sstevel@tonic-gate
4722f0fcb93SJason Beloro#endif /* lint */
4737c478bd9Sstevel@tonic-gate
4747c478bd9Sstevel@tonic-gate
4757c478bd9Sstevel@tonic-gate#if defined (lint)
4767c478bd9Sstevel@tonic-gate
4777c478bd9Sstevel@tonic-gate/*
4787c478bd9Sstevel@tonic-gate * sfmmu related subroutines
4797c478bd9Sstevel@tonic-gate */
4801e2e7a75Shuahuint_t
4811e2e7a75Shuahsfmmu_disable_intrs()
4821e2e7a75Shuah{ return(0); }
4831e2e7a75Shuah
4841e2e7a75Shuah/* ARGSUSED */
4851e2e7a75Shuahvoid
4861e2e7a75Shuahsfmmu_enable_intrs(uint_t pstate_save)
4871e2e7a75Shuah{}
4881e2e7a75Shuah
4891e2e7a75Shuah/* ARGSUSED */
49005d3dc4bSpaulsanint
49105d3dc4bSpaulsansfmmu_alloc_ctx(sfmmu_t *sfmmup, int allocflag, struct cpu *cp, int shflag)
49205d3dc4bSpaulsan{ return(0); }
4937c478bd9Sstevel@tonic-gate
4947c478bd9Sstevel@tonic-gate/*
4957c478bd9Sstevel@tonic-gate * Use cas, if tte has changed underneath us then reread and try again.
4967c478bd9Sstevel@tonic-gate * In the case of a retry, it will update sttep with the new original.
4977c478bd9Sstevel@tonic-gate */
4987c478bd9Sstevel@tonic-gate/* ARGSUSED */
4997c478bd9Sstevel@tonic-gateint
5007c478bd9Sstevel@tonic-gatesfmmu_modifytte(tte_t *sttep, tte_t *stmodttep, tte_t *dttep)
5017c478bd9Sstevel@tonic-gate{ return(0); }
5027c478bd9Sstevel@tonic-gate
5037c478bd9Sstevel@tonic-gate/*
5047c478bd9Sstevel@tonic-gate * Use cas, if tte has changed underneath us then return 1, else return 0
5057c478bd9Sstevel@tonic-gate */
5067c478bd9Sstevel@tonic-gate/* ARGSUSED */
5077c478bd9Sstevel@tonic-gateint
5087c478bd9Sstevel@tonic-gatesfmmu_modifytte_try(tte_t *sttep, tte_t *stmodttep, tte_t *dttep)
5097c478bd9Sstevel@tonic-gate{ return(0); }
5107c478bd9Sstevel@tonic-gate
5117c478bd9Sstevel@tonic-gate/* ARGSUSED */
5127c478bd9Sstevel@tonic-gatevoid
5137c478bd9Sstevel@tonic-gatesfmmu_copytte(tte_t *sttep, tte_t *dttep)
5147c478bd9Sstevel@tonic-gate{}
5157c478bd9Sstevel@tonic-gate
5167c478bd9Sstevel@tonic-gate/*ARGSUSED*/
5177c478bd9Sstevel@tonic-gatestruct tsbe *
5187c478bd9Sstevel@tonic-gatesfmmu_get_tsbe(uint64_t tsbeptr, caddr_t vaddr, int vpshift, int tsb_szc)
5197c478bd9Sstevel@tonic-gate{ return(0); }
5207c478bd9Sstevel@tonic-gate
5217c478bd9Sstevel@tonic-gate/*ARGSUSED*/
5227c478bd9Sstevel@tonic-gateuint64_t
5237c478bd9Sstevel@tonic-gatesfmmu_make_tsbtag(caddr_t va)
5247c478bd9Sstevel@tonic-gate{ return(0); }
5257c478bd9Sstevel@tonic-gate
5267c478bd9Sstevel@tonic-gate#else	/* lint */
5277c478bd9Sstevel@tonic-gate
5287c478bd9Sstevel@tonic-gate	.seg	".data"
5297c478bd9Sstevel@tonic-gate	.global	sfmmu_panic1
5307c478bd9Sstevel@tonic-gatesfmmu_panic1:
5317c478bd9Sstevel@tonic-gate	.asciz	"sfmmu_asm: interrupts already disabled"
5327c478bd9Sstevel@tonic-gate
5337c478bd9Sstevel@tonic-gate	.global	sfmmu_panic3
5347c478bd9Sstevel@tonic-gatesfmmu_panic3:
5357c478bd9Sstevel@tonic-gate	.asciz	"sfmmu_asm: sfmmu_vatopfn called for user"
5367c478bd9Sstevel@tonic-gate
5377c478bd9Sstevel@tonic-gate	.global	sfmmu_panic4
5387c478bd9Sstevel@tonic-gatesfmmu_panic4:
5397c478bd9Sstevel@tonic-gate	.asciz	"sfmmu_asm: 4M tsb pointer mis-match"
5407c478bd9Sstevel@tonic-gate
5417c478bd9Sstevel@tonic-gate	.global	sfmmu_panic5
5427c478bd9Sstevel@tonic-gatesfmmu_panic5:
5437c478bd9Sstevel@tonic-gate	.asciz	"sfmmu_asm: no unlocked TTEs in TLB 0"
5447c478bd9Sstevel@tonic-gate
5451e2e7a75Shuah	.global	sfmmu_panic6
5461e2e7a75Shuahsfmmu_panic6:
5471e2e7a75Shuah	.asciz	"sfmmu_asm: interrupts not disabled"
5481e2e7a75Shuah
5491e2e7a75Shuah	.global	sfmmu_panic7
5501e2e7a75Shuahsfmmu_panic7:
5511e2e7a75Shuah	.asciz	"sfmmu_asm: kernel as"
5521e2e7a75Shuah
5531e2e7a75Shuah	.global	sfmmu_panic8
5541e2e7a75Shuahsfmmu_panic8:
5551e2e7a75Shuah	.asciz	"sfmmu_asm: gnum is zero"
5561e2e7a75Shuah
5571e2e7a75Shuah	.global	sfmmu_panic9
5581e2e7a75Shuahsfmmu_panic9:
5591e2e7a75Shuah	.asciz	"sfmmu_asm: cnum is greater than MAX_SFMMU_CTX_VAL"
5601e2e7a75Shuah
56105d3dc4bSpaulsan	.global	sfmmu_panic10
56205d3dc4bSpaulsansfmmu_panic10:
56305d3dc4bSpaulsan	.asciz	"sfmmu_asm: valid SCD with no 3rd scd TSB"
56405d3dc4bSpaulsan
565125be069SJason Beloro	.global	sfmmu_panic11
566125be069SJason Belorosfmmu_panic11:
567125be069SJason Beloro	.asciz	"sfmmu_asm: ktsb_phys must not be 0 on a sun4v platform"
568125be069SJason Beloro
5691e2e7a75Shuah        ENTRY(sfmmu_disable_intrs)
5701e2e7a75Shuah        rdpr    %pstate, %o0
5711e2e7a75Shuah#ifdef DEBUG
5721e2e7a75Shuah	PANIC_IF_INTR_DISABLED_PSTR(%o0, sfmmu_di_l0, %g1)
5731e2e7a75Shuah#endif /* DEBUG */
5741e2e7a75Shuah        retl
5751e2e7a75Shuah          wrpr   %o0, PSTATE_IE, %pstate
5761e2e7a75Shuah        SET_SIZE(sfmmu_disable_intrs)
5771e2e7a75Shuah
5781e2e7a75Shuah	ENTRY(sfmmu_enable_intrs)
5791e2e7a75Shuah        retl
5801e2e7a75Shuah          wrpr    %g0, %o0, %pstate
5811e2e7a75Shuah        SET_SIZE(sfmmu_enable_intrs)
5821e2e7a75Shuah
5831e2e7a75Shuah/*
5841e2e7a75Shuah * This routine is called both by resume() and sfmmu_get_ctx() to
5851e2e7a75Shuah * allocate a new context for the process on a MMU.
5861e2e7a75Shuah * if allocflag == 1, then alloc ctx when HAT mmu cnum == INVALID .
5871e2e7a75Shuah * if allocflag == 0, then do not alloc ctx if HAT mmu cnum == INVALID, which
5881e2e7a75Shuah * is the case when sfmmu_alloc_ctx is called from resume().
5891e2e7a75Shuah *
5901e2e7a75Shuah * The caller must disable interrupts before entering this routine.
5911e2e7a75Shuah * To reduce ctx switch overhead, the code contains both 'fast path' and
5921e2e7a75Shuah * 'slow path' code. The fast path code covers the common case where only
5931e2e7a75Shuah * a quick check is needed and the real ctx allocation is not required.
5941e2e7a75Shuah * It can be done without holding the per-process (PP) lock.
5951e2e7a75Shuah * The 'slow path' code must be protected by the PP Lock and performs ctx
5961e2e7a75Shuah * allocation.
5971e2e7a75Shuah * Hardware context register and HAT mmu cnum are updated accordingly.
5981e2e7a75Shuah *
5991e2e7a75Shuah * %o0 - sfmmup
6001e2e7a75Shuah * %o1 - allocflag
6011e2e7a75Shuah * %o2 - CPU
60205d3dc4bSpaulsan * %o3 - sfmmu private/shared flag
60305d3dc4bSpaulsan *
60405d3dc4bSpaulsan * ret - 0: no ctx is allocated
60505d3dc4bSpaulsan *       1: a ctx is allocated
6061e2e7a75Shuah */
6071e2e7a75Shuah        ENTRY_NP(sfmmu_alloc_ctx)
6081e2e7a75Shuah
6091e2e7a75Shuah#ifdef DEBUG
61005d3dc4bSpaulsan	sethi   %hi(ksfmmup), %g1
61105d3dc4bSpaulsan	ldx     [%g1 + %lo(ksfmmup)], %g1
61205d3dc4bSpaulsan	cmp     %g1, %o0
6131e2e7a75Shuah	bne,pt   %xcc, 0f
6141e2e7a75Shuah	  nop
6151e2e7a75Shuah
6161e2e7a75Shuah	sethi   %hi(panicstr), %g1		! if kernel as, panic
6171e2e7a75Shuah        ldx     [%g1 + %lo(panicstr)], %g1
6181e2e7a75Shuah        tst     %g1
6191e2e7a75Shuah        bnz,pn  %icc, 7f
6201e2e7a75Shuah          nop
6211e2e7a75Shuah
6221e2e7a75Shuah	sethi	%hi(sfmmu_panic7), %o0
6231e2e7a75Shuah	call	panic
6241e2e7a75Shuah	  or	%o0, %lo(sfmmu_panic7), %o0
6251e2e7a75Shuah
6261e2e7a75Shuah7:
6271e2e7a75Shuah	retl
62805d3dc4bSpaulsan	  mov	%g0, %o0			! %o0 = ret = 0
6291e2e7a75Shuah
6301e2e7a75Shuah0:
6311e2e7a75Shuah	PANIC_IF_INTR_ENABLED_PSTR(sfmmu_ei_l1, %g1)
6321e2e7a75Shuah#endif /* DEBUG */
6331e2e7a75Shuah
63405d3dc4bSpaulsan	mov	%o3, %g1			! save sfmmu pri/sh flag in %g1
63505d3dc4bSpaulsan
6361e2e7a75Shuah	! load global mmu_ctxp info
6371e2e7a75Shuah	ldx	[%o2 + CPU_MMU_CTXP], %o3		! %o3 = mmu_ctx_t ptr
638*d2365b01SPavel Tatashin
639*d2365b01SPavel Tatashin#ifdef sun4v
640*d2365b01SPavel Tatashin	/* During suspend on sun4v, context domains can be temporary removed */
641*d2365b01SPavel Tatashin	brz,a,pn       %o3, 0f
642*d2365b01SPavel Tatashin	  nop
643*d2365b01SPavel Tatashin#endif
644*d2365b01SPavel Tatashin
6451e2e7a75Shuah        lduw	[%o2 + CPU_MMU_IDX], %g2		! %g2 = mmu index
6461e2e7a75Shuah
6471e2e7a75Shuah	! load global mmu_ctxp gnum
6481e2e7a75Shuah	ldx	[%o3 + MMU_CTX_GNUM], %o4		! %o4 = mmu_ctxp->gnum
6491e2e7a75Shuah
6501e2e7a75Shuah#ifdef DEBUG
6511e2e7a75Shuah	cmp	%o4, %g0		! mmu_ctxp->gnum should never be 0
6521e2e7a75Shuah	bne,pt	%xcc, 3f
6531e2e7a75Shuah	  nop
6541e2e7a75Shuah
6551e2e7a75Shuah	sethi   %hi(panicstr), %g1	! test if panicstr is already set
6561e2e7a75Shuah        ldx     [%g1 + %lo(panicstr)], %g1
6571e2e7a75Shuah        tst     %g1
65805d3dc4bSpaulsan        bnz,pn  %icc, 1f
6591e2e7a75Shuah          nop
6601e2e7a75Shuah
6611e2e7a75Shuah	sethi	%hi(sfmmu_panic8), %o0
6621e2e7a75Shuah	call	panic
6631e2e7a75Shuah	  or	%o0, %lo(sfmmu_panic8), %o0
66405d3dc4bSpaulsan1:
66505d3dc4bSpaulsan	retl
66605d3dc4bSpaulsan	  mov	%g0, %o0			! %o0 = ret = 0
6671e2e7a75Shuah3:
6681e2e7a75Shuah#endif
6691e2e7a75Shuah
6701e2e7a75Shuah	! load HAT sfmmu_ctxs[mmuid] gnum, cnum
6711e2e7a75Shuah
6721e2e7a75Shuah	sllx	%g2, SFMMU_MMU_CTX_SHIFT, %g2
6731e2e7a75Shuah	add	%o0, %g2, %g2		! %g2 = &sfmmu_ctxs[mmuid] - SFMMU_CTXS
6741e2e7a75Shuah
6751e2e7a75Shuah	/*
6761e2e7a75Shuah	 * %g5 = sfmmu gnum returned
6771e2e7a75Shuah	 * %g6 = sfmmu cnum returned
6781e2e7a75Shuah	 * %g2 = &sfmmu_ctxs[mmuid] - SFMMU_CTXS
6791e2e7a75Shuah	 * %g4 = scratch
6801e2e7a75Shuah	 *
6811e2e7a75Shuah	 * Fast path code, do a quick check.
6821e2e7a75Shuah	 */
6831e2e7a75Shuah	SFMMU_MMUID_GNUM_CNUM(%g2, %g5, %g6, %g4)
6841e2e7a75Shuah
6851e2e7a75Shuah	cmp	%g6, INVALID_CONTEXT		! hat cnum == INVALID ??
6861e2e7a75Shuah	bne,pt	%icc, 1f			! valid hat cnum, check gnum
6871e2e7a75Shuah	  nop
6881e2e7a75Shuah
6891e2e7a75Shuah	! cnum == INVALID, check allocflag
69005d3dc4bSpaulsan	mov	%g0, %g4	! %g4 = ret = 0
6911e2e7a75Shuah	brz,pt  %o1, 8f		! allocflag == 0, skip ctx allocation, bail
6921e2e7a75Shuah	  mov	%g6, %o1
6931e2e7a75Shuah
6941e2e7a75Shuah	! (invalid HAT cnum) && (allocflag == 1)
6951e2e7a75Shuah	ba,pt	%icc, 2f
6961e2e7a75Shuah	  nop
697*d2365b01SPavel Tatashin#ifdef sun4v
698*d2365b01SPavel Tatashin0:
699*d2365b01SPavel Tatashin	set	INVALID_CONTEXT, %o1
700*d2365b01SPavel Tatashin	membar	#LoadStore|#StoreStore
701*d2365b01SPavel Tatashin	ba,pt	%icc, 8f
702*d2365b01SPavel Tatashin	  mov   %g0, %g4                ! %g4 = ret = 0
703*d2365b01SPavel Tatashin#endif
7041e2e7a75Shuah1:
7051e2e7a75Shuah	! valid HAT cnum, check gnum
7061e2e7a75Shuah	cmp	%g5, %o4
70705d3dc4bSpaulsan	mov	1, %g4				!%g4 = ret = 1
7081e2e7a75Shuah	be,a,pt	%icc, 8f			! gnum unchanged, go to done
7091e2e7a75Shuah	  mov	%g6, %o1
7101e2e7a75Shuah
7111e2e7a75Shuah2:
7121e2e7a75Shuah	/*
7131e2e7a75Shuah	 * Grab per process (PP) sfmmu_ctx_lock spinlock,
7141e2e7a75Shuah	 * followed by the 'slow path' code.
7151e2e7a75Shuah	 */
7161e2e7a75Shuah	ldstub	[%o0 + SFMMU_CTX_LOCK], %g3	! %g3 = per process (PP) lock
7171e2e7a75Shuah3:
7181e2e7a75Shuah	brz	%g3, 5f
7191e2e7a75Shuah	  nop
7201e2e7a75Shuah4:
7211e2e7a75Shuah	brnz,a,pt       %g3, 4b				! spin if lock is 1
7221e2e7a75Shuah	  ldub	[%o0 + SFMMU_CTX_LOCK], %g3
7231e2e7a75Shuah	ba	%xcc, 3b				! retry the lock
7241e2e7a75Shuah	  ldstub	[%o0 + SFMMU_CTX_LOCK], %g3    ! %g3 = PP lock
7251e2e7a75Shuah
7261e2e7a75Shuah5:
7271e2e7a75Shuah	membar  #LoadLoad
7281e2e7a75Shuah	/*
7291e2e7a75Shuah	 * %g5 = sfmmu gnum returned
7301e2e7a75Shuah	 * %g6 = sfmmu cnum returned
7311e2e7a75Shuah	 * %g2 = &sfmmu_ctxs[mmuid] - SFMMU_CTXS
7321e2e7a75Shuah	 * %g4 = scratch
7331e2e7a75Shuah	 */
7341e2e7a75Shuah	SFMMU_MMUID_GNUM_CNUM(%g2, %g5, %g6, %g4)
7351e2e7a75Shuah
7361e2e7a75Shuah	cmp	%g6, INVALID_CONTEXT		! hat cnum == INVALID ??
7371e2e7a75Shuah	bne,pt	%icc, 1f			! valid hat cnum, check gnum
7381e2e7a75Shuah	  nop
7391e2e7a75Shuah
7401e2e7a75Shuah	! cnum == INVALID, check allocflag
74105d3dc4bSpaulsan	mov	%g0, %g4	! %g4 = ret = 0
7421e2e7a75Shuah	brz,pt	%o1, 2f		! allocflag == 0, called from resume, set hw
7431e2e7a75Shuah	  mov	%g6, %o1
7441e2e7a75Shuah
7451e2e7a75Shuah	! (invalid HAT cnum) && (allocflag == 1)
7461e2e7a75Shuah	ba,pt	%icc, 6f
7471e2e7a75Shuah	  nop
7481e2e7a75Shuah1:
7491e2e7a75Shuah	! valid HAT cnum, check gnum
7501e2e7a75Shuah	cmp	%g5, %o4
75105d3dc4bSpaulsan	mov	1, %g4				! %g4 = ret  = 1
7521e2e7a75Shuah	be,a,pt	%icc, 2f			! gnum unchanged, go to done
7531e2e7a75Shuah	  mov	%g6, %o1
7541e2e7a75Shuah
7551e2e7a75Shuah	ba,pt	%icc, 6f
7561e2e7a75Shuah	  nop
7571e2e7a75Shuah2:
7581e2e7a75Shuah	membar  #LoadStore|#StoreStore
7591e2e7a75Shuah	ba,pt %icc, 8f
7601e2e7a75Shuah	  clrb  [%o0 + SFMMU_CTX_LOCK]
7611e2e7a75Shuah6:
7621e2e7a75Shuah	/*
7631e2e7a75Shuah	 * We get here if we do not have a valid context, or
7641e2e7a75Shuah	 * the HAT gnum does not match global gnum. We hold
7651e2e7a75Shuah	 * sfmmu_ctx_lock spinlock. Allocate that context.
7661e2e7a75Shuah	 *
7671e2e7a75Shuah	 * %o3 = mmu_ctxp
7681e2e7a75Shuah	 */
7691e2e7a75Shuah	add	%o3, MMU_CTX_CNUM, %g3
7701e2e7a75Shuah	ld	[%o3 + MMU_CTX_NCTXS], %g4
7711e2e7a75Shuah
7721e2e7a75Shuah	/*
7731e2e7a75Shuah         * %g2 = &sfmmu_ctx_t[mmuid] - SFMMU_CTXS;
7741e2e7a75Shuah         * %g3 = mmu cnum address
7751e2e7a75Shuah	 * %g4 = mmu nctxs
7761e2e7a75Shuah	 *
7771e2e7a75Shuah	 * %o0 = sfmmup
7781e2e7a75Shuah	 * %o1 = mmu current cnum value (used as new cnum)
7791e2e7a75Shuah	 * %o4 = mmu gnum
7801e2e7a75Shuah	 *
7811e2e7a75Shuah	 * %o5 = scratch
7821e2e7a75Shuah	 */
7831e2e7a75Shuah	ld	[%g3], %o1
7841e2e7a75Shuah0:
7851e2e7a75Shuah	cmp	%o1, %g4
7861e2e7a75Shuah	bl,a,pt %icc, 1f
7871e2e7a75Shuah	  add	%o1, 1, %o5		! %o5 = mmu_ctxp->cnum + 1
7881e2e7a75Shuah
7891e2e7a75Shuah	/*
79005d3dc4bSpaulsan	 * cnum reachs max, bail, so wrap around can be performed later.
7911e2e7a75Shuah	 */
7921e2e7a75Shuah	set	INVALID_CONTEXT, %o1
79305d3dc4bSpaulsan	mov	%g0, %g4		! %g4 = ret = 0
7941e2e7a75Shuah
7951e2e7a75Shuah	membar  #LoadStore|#StoreStore
7961e2e7a75Shuah	ba,pt	%icc, 8f
7971e2e7a75Shuah	  clrb	[%o0 + SFMMU_CTX_LOCK]
7981e2e7a75Shuah1:
7991e2e7a75Shuah	! %g3 = addr of mmu_ctxp->cnum
8001e2e7a75Shuah	! %o5 = mmu_ctxp->cnum + 1
8011e2e7a75Shuah	cas	[%g3], %o1, %o5
8021e2e7a75Shuah	cmp	%o1, %o5
8031e2e7a75Shuah	bne,a,pn %xcc, 0b	! cas failed
8041e2e7a75Shuah	  ld	[%g3], %o1
8051e2e7a75Shuah
8061e2e7a75Shuah#ifdef DEBUG
8071e2e7a75Shuah        set	MAX_SFMMU_CTX_VAL, %o5
8081e2e7a75Shuah	cmp	%o1, %o5
8091e2e7a75Shuah	ble,pt %icc, 2f
8101e2e7a75Shuah	  nop
8111e2e7a75Shuah
8121e2e7a75Shuah	sethi	%hi(sfmmu_panic9), %o0
8131e2e7a75Shuah	call	panic
8141e2e7a75Shuah	  or	%o0, %lo(sfmmu_panic9), %o0
8151e2e7a75Shuah2:
8161e2e7a75Shuah#endif
8171e2e7a75Shuah	! update hat gnum and cnum
8181e2e7a75Shuah	sllx	%o4, SFMMU_MMU_GNUM_RSHIFT, %o4
8191e2e7a75Shuah	or	%o4, %o1, %o4
8201e2e7a75Shuah	stx	%o4, [%g2 + SFMMU_CTXS]
8211e2e7a75Shuah
8221e2e7a75Shuah	membar  #LoadStore|#StoreStore
8231e2e7a75Shuah	clrb	[%o0 + SFMMU_CTX_LOCK]
8241e2e7a75Shuah
82505d3dc4bSpaulsan	mov	1, %g4			! %g4 = ret = 1
8261e2e7a75Shuah8:
8271e2e7a75Shuah	/*
8281e2e7a75Shuah	 * program the secondary context register
8291e2e7a75Shuah	 *
8301e2e7a75Shuah	 * %o1 = cnum
83105d3dc4bSpaulsan	 * %g1 = sfmmu private/shared flag (0:private,  1:shared)
8321e2e7a75Shuah	 */
83305d3dc4bSpaulsan
834f0856d05Ssm142603	/*
835f0856d05Ssm142603	 * When we come here and context is invalid, we want to set both
836f0856d05Ssm142603	 * private and shared ctx regs to INVALID. In order to
837f0856d05Ssm142603	 * do so, we set the sfmmu priv/shared flag to 'private' regardless
838f0856d05Ssm142603	 * so that private ctx reg will be set to invalid.
839f0856d05Ssm142603	 * Note that on sun4v values written to private context register are
840f0856d05Ssm142603	 * automatically written to corresponding shared context register as
841f0856d05Ssm142603	 * well. On sun4u SET_SECCTX() will invalidate shared context register
842f0856d05Ssm142603	 * when it sets a private secondary context register.
843f0856d05Ssm142603	 */
844f0856d05Ssm142603
845f0856d05Ssm142603	cmp	%o1, INVALID_CONTEXT
846f0856d05Ssm142603	be,a,pn	%icc, 9f
847f0856d05Ssm142603	  clr	%g1
848f0856d05Ssm1426039:
849f0856d05Ssm142603
8501e2e7a75Shuah#ifdef	sun4u
8511e2e7a75Shuah	ldub	[%o0 + SFMMU_CEXT], %o2
8521e2e7a75Shuah	sll	%o2, CTXREG_EXT_SHIFT, %o2
8531e2e7a75Shuah	or	%o1, %o2, %o1
8541426d65aSsm142603#endif /* sun4u */
8551426d65aSsm142603
8561426d65aSsm142603	SET_SECCTX(%o1, %g1, %o4, %o5, alloc_ctx_lbl1)
8571e2e7a75Shuah
8581e2e7a75Shuah        retl
85905d3dc4bSpaulsan          mov   %g4, %o0                        ! %o0 = ret
8601e2e7a75Shuah
8611e2e7a75Shuah	SET_SIZE(sfmmu_alloc_ctx)
8621e2e7a75Shuah
8637c478bd9Sstevel@tonic-gate	ENTRY_NP(sfmmu_modifytte)
8647c478bd9Sstevel@tonic-gate	ldx	[%o2], %g3			/* current */
8657c478bd9Sstevel@tonic-gate	ldx	[%o0], %g1			/* original */
8667c478bd9Sstevel@tonic-gate2:
8677c478bd9Sstevel@tonic-gate	ldx	[%o1], %g2			/* modified */
8687c478bd9Sstevel@tonic-gate	cmp	%g2, %g3			/* is modified = current? */
8697c478bd9Sstevel@tonic-gate	be,a,pt	%xcc,1f				/* yes, don't write */
8707c478bd9Sstevel@tonic-gate	stx	%g3, [%o0]			/* update new original */
8717c478bd9Sstevel@tonic-gate	casx	[%o2], %g1, %g2
8727c478bd9Sstevel@tonic-gate	cmp	%g1, %g2
8737c478bd9Sstevel@tonic-gate	be,pt	%xcc, 1f			/* cas succeeded - return */
8747c478bd9Sstevel@tonic-gate	  nop
8757c478bd9Sstevel@tonic-gate	ldx	[%o2], %g3			/* new current */
8767c478bd9Sstevel@tonic-gate	stx	%g3, [%o0]			/* save as new original */
8777c478bd9Sstevel@tonic-gate	ba,pt	%xcc, 2b
8787c478bd9Sstevel@tonic-gate	  mov	%g3, %g1
8797c478bd9Sstevel@tonic-gate1:	retl
8807c478bd9Sstevel@tonic-gate	membar	#StoreLoad
8817c478bd9Sstevel@tonic-gate	SET_SIZE(sfmmu_modifytte)
8827c478bd9Sstevel@tonic-gate
8837c478bd9Sstevel@tonic-gate	ENTRY_NP(sfmmu_modifytte_try)
8847c478bd9Sstevel@tonic-gate	ldx	[%o1], %g2			/* modified */
8857c478bd9Sstevel@tonic-gate	ldx	[%o2], %g3			/* current */
8867c478bd9Sstevel@tonic-gate	ldx	[%o0], %g1			/* original */
8877c478bd9Sstevel@tonic-gate	cmp	%g3, %g2			/* is modified = current? */
8887c478bd9Sstevel@tonic-gate	be,a,pn %xcc,1f				/* yes, don't write */
8897c478bd9Sstevel@tonic-gate	mov	0, %o1				/* as if cas failed. */
8907c478bd9Sstevel@tonic-gate
8917c478bd9Sstevel@tonic-gate	casx	[%o2], %g1, %g2
8927c478bd9Sstevel@tonic-gate	membar	#StoreLoad
8937c478bd9Sstevel@tonic-gate	cmp	%g1, %g2
8947c478bd9Sstevel@tonic-gate	movne	%xcc, -1, %o1			/* cas failed. */
8957c478bd9Sstevel@tonic-gate	move	%xcc, 1, %o1			/* cas succeeded. */
8967c478bd9Sstevel@tonic-gate1:
8977c478bd9Sstevel@tonic-gate	stx	%g2, [%o0]			/* report "current" value */
8987c478bd9Sstevel@tonic-gate	retl
8997c478bd9Sstevel@tonic-gate	mov	%o1, %o0
9007c478bd9Sstevel@tonic-gate	SET_SIZE(sfmmu_modifytte_try)
9017c478bd9Sstevel@tonic-gate
9027c478bd9Sstevel@tonic-gate	ENTRY_NP(sfmmu_copytte)
9037c478bd9Sstevel@tonic-gate	ldx	[%o0], %g1
9047c478bd9Sstevel@tonic-gate	retl
9057c478bd9Sstevel@tonic-gate	stx	%g1, [%o1]
9067c478bd9Sstevel@tonic-gate	SET_SIZE(sfmmu_copytte)
9077c478bd9Sstevel@tonic-gate
9087c478bd9Sstevel@tonic-gate
9097c478bd9Sstevel@tonic-gate	/*
9107c478bd9Sstevel@tonic-gate	 * Calculate a TSB entry pointer for the given TSB, va, pagesize.
9117c478bd9Sstevel@tonic-gate	 * %o0 = TSB base address (in), pointer to TSB entry (out)
9127c478bd9Sstevel@tonic-gate	 * %o1 = vaddr (in)
9137c478bd9Sstevel@tonic-gate	 * %o2 = vpshift (in)
9147c478bd9Sstevel@tonic-gate	 * %o3 = tsb size code (in)
9157c478bd9Sstevel@tonic-gate	 * %o4 = scratch register
9167c478bd9Sstevel@tonic-gate	 */
9177c478bd9Sstevel@tonic-gate	ENTRY_NP(sfmmu_get_tsbe)
9187c478bd9Sstevel@tonic-gate	GET_TSBE_POINTER(%o2, %o0, %o1, %o3, %o4)
9197c478bd9Sstevel@tonic-gate	retl
9207c478bd9Sstevel@tonic-gate	nop
9217c478bd9Sstevel@tonic-gate	SET_SIZE(sfmmu_get_tsbe)
9227c478bd9Sstevel@tonic-gate
9237c478bd9Sstevel@tonic-gate	/*
9247c478bd9Sstevel@tonic-gate	 * Return a TSB tag for the given va.
9257c478bd9Sstevel@tonic-gate	 * %o0 = va (in/clobbered)
9267c478bd9Sstevel@tonic-gate	 * %o0 = va shifted to be in tsb tag format (with no context) (out)
9277c478bd9Sstevel@tonic-gate	 */
9287c478bd9Sstevel@tonic-gate	ENTRY_NP(sfmmu_make_tsbtag)
9297c478bd9Sstevel@tonic-gate	retl
9307c478bd9Sstevel@tonic-gate	srln	%o0, TTARGET_VA_SHIFT, %o0
9317c478bd9Sstevel@tonic-gate	SET_SIZE(sfmmu_make_tsbtag)
9327c478bd9Sstevel@tonic-gate
9337c478bd9Sstevel@tonic-gate#endif /* lint */
9347c478bd9Sstevel@tonic-gate
9357c478bd9Sstevel@tonic-gate/*
9367c478bd9Sstevel@tonic-gate * Other sfmmu primitives
9377c478bd9Sstevel@tonic-gate */
9387c478bd9Sstevel@tonic-gate
9397c478bd9Sstevel@tonic-gate
9407c478bd9Sstevel@tonic-gate#if defined (lint)
9417c478bd9Sstevel@tonic-gatevoid
9427c478bd9Sstevel@tonic-gatesfmmu_patch_ktsb(void)
9437c478bd9Sstevel@tonic-gate{
9447c478bd9Sstevel@tonic-gate}
9457c478bd9Sstevel@tonic-gate
9467c478bd9Sstevel@tonic-gatevoid
9477c478bd9Sstevel@tonic-gatesfmmu_kpm_patch_tlbm(void)
9487c478bd9Sstevel@tonic-gate{
9497c478bd9Sstevel@tonic-gate}
9507c478bd9Sstevel@tonic-gate
9517c478bd9Sstevel@tonic-gatevoid
9527c478bd9Sstevel@tonic-gatesfmmu_kpm_patch_tsbm(void)
9537c478bd9Sstevel@tonic-gate{
9547c478bd9Sstevel@tonic-gate}
9557c478bd9Sstevel@tonic-gate
95605d3dc4bSpaulsanvoid
95705d3dc4bSpaulsansfmmu_patch_shctx(void)
95805d3dc4bSpaulsan{
95905d3dc4bSpaulsan}
96005d3dc4bSpaulsan
9617c478bd9Sstevel@tonic-gate/* ARGSUSED */
9627c478bd9Sstevel@tonic-gatevoid
9637c478bd9Sstevel@tonic-gatesfmmu_load_tsbe(struct tsbe *tsbep, uint64_t vaddr, tte_t *ttep, int phys)
9647c478bd9Sstevel@tonic-gate{
9657c478bd9Sstevel@tonic-gate}
9667c478bd9Sstevel@tonic-gate
9677c478bd9Sstevel@tonic-gate/* ARGSUSED */
9687c478bd9Sstevel@tonic-gatevoid
9697c478bd9Sstevel@tonic-gatesfmmu_unload_tsbe(struct tsbe *tsbep, uint64_t vaddr, int phys)
9707c478bd9Sstevel@tonic-gate{
9717c478bd9Sstevel@tonic-gate}
9727c478bd9Sstevel@tonic-gate
9737c478bd9Sstevel@tonic-gate/* ARGSUSED */
9747c478bd9Sstevel@tonic-gatevoid
9757c478bd9Sstevel@tonic-gatesfmmu_kpm_load_tsb(caddr_t addr, tte_t *ttep, int vpshift)
9767c478bd9Sstevel@tonic-gate{
9777c478bd9Sstevel@tonic-gate}
9787c478bd9Sstevel@tonic-gate
9797c478bd9Sstevel@tonic-gate/* ARGSUSED */
9807c478bd9Sstevel@tonic-gatevoid
9817c478bd9Sstevel@tonic-gatesfmmu_kpm_unload_tsb(caddr_t addr, int vpshift)
9827c478bd9Sstevel@tonic-gate{
9837c478bd9Sstevel@tonic-gate}
9847c478bd9Sstevel@tonic-gate
9857c478bd9Sstevel@tonic-gate#else /* lint */
9867c478bd9Sstevel@tonic-gate
9877c478bd9Sstevel@tonic-gate#define	I_SIZE		4
9887c478bd9Sstevel@tonic-gate
9897c478bd9Sstevel@tonic-gate	ENTRY_NP(sfmmu_fix_ktlb_traptable)
9907c478bd9Sstevel@tonic-gate	/*
9917c478bd9Sstevel@tonic-gate	 * %o0 = start of patch area
9927c478bd9Sstevel@tonic-gate	 * %o1 = size code of TSB to patch
9937c478bd9Sstevel@tonic-gate	 * %o3 = scratch
9947c478bd9Sstevel@tonic-gate	 */
9957c478bd9Sstevel@tonic-gate	/* fix sll */
9967c478bd9Sstevel@tonic-gate	ld	[%o0], %o3			/* get sll */
9977c478bd9Sstevel@tonic-gate	sub	%o3, %o1, %o3			/* decrease shift by tsb szc */
9987c478bd9Sstevel@tonic-gate	st	%o3, [%o0]			/* write sll */
9997c478bd9Sstevel@tonic-gate	flush	%o0
10007c478bd9Sstevel@tonic-gate	/* fix srl */
10017c478bd9Sstevel@tonic-gate	add	%o0, I_SIZE, %o0		/* goto next instr. */
10027c478bd9Sstevel@tonic-gate	ld	[%o0], %o3			/* get srl */
10037c478bd9Sstevel@tonic-gate	sub	%o3, %o1, %o3			/* decrease shift by tsb szc */
10047c478bd9Sstevel@tonic-gate	st	%o3, [%o0]			/* write srl */
10057c478bd9Sstevel@tonic-gate	retl
10067c478bd9Sstevel@tonic-gate	flush	%o0
10077c478bd9Sstevel@tonic-gate	SET_SIZE(sfmmu_fix_ktlb_traptable)
10087c478bd9Sstevel@tonic-gate
10097c478bd9Sstevel@tonic-gate	ENTRY_NP(sfmmu_fixup_ktsbbase)
10107c478bd9Sstevel@tonic-gate	/*
10117c478bd9Sstevel@tonic-gate	 * %o0 = start of patch area
10127c478bd9Sstevel@tonic-gate	 * %o5 = kernel virtual or physical tsb base address
10137c478bd9Sstevel@tonic-gate	 * %o2, %o3 are used as scratch registers.
10147c478bd9Sstevel@tonic-gate	 */
10157c478bd9Sstevel@tonic-gate	/* fixup sethi instruction */
10167c478bd9Sstevel@tonic-gate	ld	[%o0], %o3
10177c478bd9Sstevel@tonic-gate	srl	%o5, 10, %o2			! offset is bits 32:10
10187c478bd9Sstevel@tonic-gate	or	%o3, %o2, %o3			! set imm22
10197c478bd9Sstevel@tonic-gate	st	%o3, [%o0]
10207c478bd9Sstevel@tonic-gate	/* fixup offset of lduw/ldx */
10217c478bd9Sstevel@tonic-gate	add	%o0, I_SIZE, %o0		! next instr
10227c478bd9Sstevel@tonic-gate	ld	[%o0], %o3
10237c478bd9Sstevel@tonic-gate	and	%o5, 0x3ff, %o2			! set imm13 to bits 9:0
10247c478bd9Sstevel@tonic-gate	or	%o3, %o2, %o3
10257c478bd9Sstevel@tonic-gate	st	%o3, [%o0]
10267c478bd9Sstevel@tonic-gate	retl
10277c478bd9Sstevel@tonic-gate	flush	%o0
10287c478bd9Sstevel@tonic-gate	SET_SIZE(sfmmu_fixup_ktsbbase)
10297c478bd9Sstevel@tonic-gate
10307c478bd9Sstevel@tonic-gate	ENTRY_NP(sfmmu_fixup_setx)
10317c478bd9Sstevel@tonic-gate	/*
10327c478bd9Sstevel@tonic-gate	 * %o0 = start of patch area
10337c478bd9Sstevel@tonic-gate	 * %o4 = 64 bit value to patch
10347c478bd9Sstevel@tonic-gate	 * %o2, %o3 are used as scratch registers.
10357c478bd9Sstevel@tonic-gate	 *
10367c478bd9Sstevel@tonic-gate	 * Note: Assuming that all parts of the instructions which need to be
10377c478bd9Sstevel@tonic-gate	 *	 patched correspond to RUNTIME_PATCH (aka 0)
10387c478bd9Sstevel@tonic-gate	 *
10397c478bd9Sstevel@tonic-gate	 * Note the implementation of setx which is being patched is as follows:
10407c478bd9Sstevel@tonic-gate	 *
10417c478bd9Sstevel@tonic-gate	 * sethi   %hh(RUNTIME_PATCH), tmp
10427c478bd9Sstevel@tonic-gate	 * sethi   %lm(RUNTIME_PATCH), dest
10437c478bd9Sstevel@tonic-gate	 * or      tmp, %hm(RUNTIME_PATCH), tmp
10447c478bd9Sstevel@tonic-gate	 * or      dest, %lo(RUNTIME_PATCH), dest
10457c478bd9Sstevel@tonic-gate	 * sllx    tmp, 32, tmp
10467c478bd9Sstevel@tonic-gate	 * nop
10477c478bd9Sstevel@tonic-gate	 * or      tmp, dest, dest
10487c478bd9Sstevel@tonic-gate	 *
10497c478bd9Sstevel@tonic-gate	 * which differs from the implementation in the
10507c478bd9Sstevel@tonic-gate	 * "SPARC Architecture Manual"
10517c478bd9Sstevel@tonic-gate	 */
10527c478bd9Sstevel@tonic-gate	/* fixup sethi instruction */
10537c478bd9Sstevel@tonic-gate	ld	[%o0], %o3
10547c478bd9Sstevel@tonic-gate	srlx	%o4, 42, %o2			! bits [63:42]
10557c478bd9Sstevel@tonic-gate	or	%o3, %o2, %o3			! set imm22
10567c478bd9Sstevel@tonic-gate	st	%o3, [%o0]
10577c478bd9Sstevel@tonic-gate	/* fixup sethi instruction */
10587c478bd9Sstevel@tonic-gate	add	%o0, I_SIZE, %o0		! next instr
10597c478bd9Sstevel@tonic-gate	ld	[%o0], %o3
10607c478bd9Sstevel@tonic-gate	sllx	%o4, 32, %o2			! clear upper bits
10617c478bd9Sstevel@tonic-gate	srlx	%o2, 42, %o2			! bits [31:10]
10627c478bd9Sstevel@tonic-gate	or	%o3, %o2, %o3			! set imm22
10637c478bd9Sstevel@tonic-gate	st	%o3, [%o0]
10647c478bd9Sstevel@tonic-gate	/* fixup or instruction */
10657c478bd9Sstevel@tonic-gate	add	%o0, I_SIZE, %o0		! next instr
10667c478bd9Sstevel@tonic-gate	ld	[%o0], %o3
10677c478bd9Sstevel@tonic-gate	srlx	%o4, 32, %o2			! bits [63:32]
10687c478bd9Sstevel@tonic-gate	and	%o2, 0x3ff, %o2			! bits [41:32]
10697c478bd9Sstevel@tonic-gate	or	%o3, %o2, %o3			! set imm
10707c478bd9Sstevel@tonic-gate	st	%o3, [%o0]
10717c478bd9Sstevel@tonic-gate	/* fixup or instruction */
10727c478bd9Sstevel@tonic-gate	add	%o0, I_SIZE, %o0		! next instr
10737c478bd9Sstevel@tonic-gate	ld	[%o0], %o3
10747c478bd9Sstevel@tonic-gate	and	%o4, 0x3ff, %o2			! bits [9:0]
10757c478bd9Sstevel@tonic-gate	or	%o3, %o2, %o3			! set imm
10767c478bd9Sstevel@tonic-gate	st	%o3, [%o0]
10777c478bd9Sstevel@tonic-gate	retl
10787c478bd9Sstevel@tonic-gate	flush	%o0
10797c478bd9Sstevel@tonic-gate	SET_SIZE(sfmmu_fixup_setx)
10807c478bd9Sstevel@tonic-gate
10817c478bd9Sstevel@tonic-gate	ENTRY_NP(sfmmu_fixup_or)
10827c478bd9Sstevel@tonic-gate	/*
10837c478bd9Sstevel@tonic-gate	 * %o0 = start of patch area
10847c478bd9Sstevel@tonic-gate	 * %o4 = 32 bit value to patch
10857c478bd9Sstevel@tonic-gate	 * %o2, %o3 are used as scratch registers.
10867c478bd9Sstevel@tonic-gate	 * Note: Assuming that all parts of the instructions which need to be
10877c478bd9Sstevel@tonic-gate	 *	 patched correspond to RUNTIME_PATCH (aka 0)
10887c478bd9Sstevel@tonic-gate	 */
10897c478bd9Sstevel@tonic-gate	ld	[%o0], %o3
10907c478bd9Sstevel@tonic-gate	and	%o4, 0x3ff, %o2			! bits [9:0]
10917c478bd9Sstevel@tonic-gate	or	%o3, %o2, %o3			! set imm
10927c478bd9Sstevel@tonic-gate	st	%o3, [%o0]
10937c478bd9Sstevel@tonic-gate	retl
10947c478bd9Sstevel@tonic-gate	flush	%o0
10957c478bd9Sstevel@tonic-gate	SET_SIZE(sfmmu_fixup_or)
10967c478bd9Sstevel@tonic-gate
10977c478bd9Sstevel@tonic-gate	ENTRY_NP(sfmmu_fixup_shiftx)
10987c478bd9Sstevel@tonic-gate	/*
10997c478bd9Sstevel@tonic-gate	 * %o0 = start of patch area
11007c478bd9Sstevel@tonic-gate	 * %o4 = signed int immediate value to add to sllx/srlx imm field
11017c478bd9Sstevel@tonic-gate	 * %o2, %o3 are used as scratch registers.
11027c478bd9Sstevel@tonic-gate	 *
11037c478bd9Sstevel@tonic-gate	 * sllx/srlx store the 6 bit immediate value in the lowest order bits
11047c478bd9Sstevel@tonic-gate	 * so we do a simple add.  The caller must be careful to prevent
11057c478bd9Sstevel@tonic-gate	 * overflow, which could easily occur if the initial value is nonzero!
11067c478bd9Sstevel@tonic-gate	 */
11077c478bd9Sstevel@tonic-gate	ld	[%o0], %o3			! %o3 = instruction to patch
11087c478bd9Sstevel@tonic-gate	and	%o3, 0x3f, %o2			! %o2 = existing imm value
11097c478bd9Sstevel@tonic-gate	add	%o2, %o4, %o2			! %o2 = new imm value
11107c478bd9Sstevel@tonic-gate	andn	%o3, 0x3f, %o3			! clear old imm value
11117c478bd9Sstevel@tonic-gate	and	%o2, 0x3f, %o2			! truncate new imm value
11127c478bd9Sstevel@tonic-gate	or	%o3, %o2, %o3			! set new imm value
11137c478bd9Sstevel@tonic-gate	st	%o3, [%o0]			! store updated instruction
11147c478bd9Sstevel@tonic-gate	retl
11157c478bd9Sstevel@tonic-gate	flush	%o0
11167c478bd9Sstevel@tonic-gate	SET_SIZE(sfmmu_fixup_shiftx)
11177c478bd9Sstevel@tonic-gate
11187c478bd9Sstevel@tonic-gate	ENTRY_NP(sfmmu_fixup_mmu_asi)
11197c478bd9Sstevel@tonic-gate	/*
11207c478bd9Sstevel@tonic-gate	 * Patch imm_asi of all ldda instructions in the MMU
11217c478bd9Sstevel@tonic-gate	 * trap handlers.  We search MMU_PATCH_INSTR instructions
11227c478bd9Sstevel@tonic-gate	 * starting from the itlb miss handler (trap 0x64).
11237c478bd9Sstevel@tonic-gate	 * %o0 = address of tt[0,1]_itlbmiss
11247c478bd9Sstevel@tonic-gate	 * %o1 = imm_asi to setup, shifted by appropriate offset.
11257c478bd9Sstevel@tonic-gate	 * %o3 = number of instructions to search
11267c478bd9Sstevel@tonic-gate	 * %o4 = reserved by caller: called from leaf routine
11277c478bd9Sstevel@tonic-gate	 */
11287c478bd9Sstevel@tonic-gate1:	ldsw	[%o0], %o2			! load instruction to %o2
11297c478bd9Sstevel@tonic-gate	brgez,pt %o2, 2f
11307c478bd9Sstevel@tonic-gate	  srl	%o2, 30, %o5
11317c478bd9Sstevel@tonic-gate	btst	1, %o5				! test bit 30; skip if not set
11327c478bd9Sstevel@tonic-gate	bz,pt	%icc, 2f
11337c478bd9Sstevel@tonic-gate	  sllx	%o2, 39, %o5			! bit 24 -> bit 63
11347c478bd9Sstevel@tonic-gate	srlx	%o5, 58, %o5			! isolate op3 part of opcode
11357c478bd9Sstevel@tonic-gate	xor	%o5, 0x13, %o5			! 01 0011 binary == ldda
11367c478bd9Sstevel@tonic-gate	brnz,pt	%o5, 2f				! skip if not a match
11377c478bd9Sstevel@tonic-gate	  or	%o2, %o1, %o2			! or in imm_asi
11387c478bd9Sstevel@tonic-gate	st	%o2, [%o0]			! write patched instruction
11397c478bd9Sstevel@tonic-gate2:	dec	%o3
11407c478bd9Sstevel@tonic-gate	brnz,a,pt %o3, 1b			! loop until we're done
11417c478bd9Sstevel@tonic-gate	  add	%o0, I_SIZE, %o0
11427c478bd9Sstevel@tonic-gate	retl
11437c478bd9Sstevel@tonic-gate	flush	%o0
11447c478bd9Sstevel@tonic-gate	SET_SIZE(sfmmu_fixup_mmu_asi)
11457c478bd9Sstevel@tonic-gate
11467c478bd9Sstevel@tonic-gate	/*
11477c478bd9Sstevel@tonic-gate	 * Patch immediate ASI used to access the TSB in the
11487c478bd9Sstevel@tonic-gate	 * trap table.
11497c478bd9Sstevel@tonic-gate	 * inputs: %o0 = value of ktsb_phys
11507c478bd9Sstevel@tonic-gate	 */
11517c478bd9Sstevel@tonic-gate	ENTRY_NP(sfmmu_patch_mmu_asi)
11527c478bd9Sstevel@tonic-gate	mov	%o7, %o4			! save return pc in %o4
1153125be069SJason Beloro	mov	ASI_QUAD_LDD_PHYS, %o3		! set QUAD_LDD_PHYS by default
1154125be069SJason Beloro
1155125be069SJason Beloro#ifdef sun4v
1156125be069SJason Beloro
1157125be069SJason Beloro	/*
1158125be069SJason Beloro	 * Check ktsb_phys. It must be non-zero for sun4v, panic if not.
1159125be069SJason Beloro	 */
1160125be069SJason Beloro
1161125be069SJason Beloro	brnz,pt %o0, do_patch
1162125be069SJason Beloro	nop
1163125be069SJason Beloro
1164125be069SJason Beloro	sethi	%hi(sfmmu_panic11), %o0
1165125be069SJason Beloro	call	panic
1166125be069SJason Beloro	  or	%o0, %lo(sfmmu_panic11), %o0
1167125be069SJason Belorodo_patch:
1168125be069SJason Beloro
1169125be069SJason Beloro#else /* sun4v */
1170125be069SJason Beloro	/*
1171125be069SJason Beloro	 * Some non-sun4v platforms deploy virtual ktsb (ktsb_phys==0).
1172125be069SJason Beloro	 * Note that ASI_NQUAD_LD is not defined/used for sun4v
1173125be069SJason Beloro	 */
11747c478bd9Sstevel@tonic-gate	movrz	%o0, ASI_NQUAD_LD, %o3
1175125be069SJason Beloro
1176125be069SJason Beloro#endif /* sun4v */
1177125be069SJason Beloro
11787c478bd9Sstevel@tonic-gate	sll	%o3, 5, %o1			! imm_asi offset
11797c478bd9Sstevel@tonic-gate	mov	6, %o3				! number of instructions
11807c478bd9Sstevel@tonic-gate	sethi	%hi(dktsb), %o0			! to search
11817c478bd9Sstevel@tonic-gate	call	sfmmu_fixup_mmu_asi		! patch kdtlb miss
11827c478bd9Sstevel@tonic-gate	  or	%o0, %lo(dktsb), %o0
11837c478bd9Sstevel@tonic-gate	mov	6, %o3				! number of instructions
11847c478bd9Sstevel@tonic-gate	sethi	%hi(dktsb4m), %o0		! to search
11857c478bd9Sstevel@tonic-gate	call	sfmmu_fixup_mmu_asi		! patch kdtlb4m miss
11867c478bd9Sstevel@tonic-gate	  or	%o0, %lo(dktsb4m), %o0
11877c478bd9Sstevel@tonic-gate	mov	6, %o3				! number of instructions
11887c478bd9Sstevel@tonic-gate	sethi	%hi(iktsb), %o0			! to search
11897c478bd9Sstevel@tonic-gate	call	sfmmu_fixup_mmu_asi		! patch kitlb miss
11907c478bd9Sstevel@tonic-gate	  or	%o0, %lo(iktsb), %o0
119105d3dc4bSpaulsan	mov	6, %o3				! number of instructions
119205d3dc4bSpaulsan	sethi	%hi(iktsb4m), %o0		! to search
119305d3dc4bSpaulsan	call	sfmmu_fixup_mmu_asi		! patch kitlb4m miss
119405d3dc4bSpaulsan	  or	%o0, %lo(iktsb4m), %o0
11957c478bd9Sstevel@tonic-gate	mov	%o4, %o7			! retore return pc -- leaf
11967c478bd9Sstevel@tonic-gate	retl
11977c478bd9Sstevel@tonic-gate	nop
11987c478bd9Sstevel@tonic-gate	SET_SIZE(sfmmu_patch_mmu_asi)
11997c478bd9Sstevel@tonic-gate
1200125be069SJason Beloro
12017c478bd9Sstevel@tonic-gate	ENTRY_NP(sfmmu_patch_ktsb)
12027c478bd9Sstevel@tonic-gate	/*
12037c478bd9Sstevel@tonic-gate	 * We need to fix iktsb, dktsb, et. al.
12047c478bd9Sstevel@tonic-gate	 */
12057c478bd9Sstevel@tonic-gate	save	%sp, -SA(MINFRAME), %sp
12067c478bd9Sstevel@tonic-gate	set	ktsb_phys, %o1
12077c478bd9Sstevel@tonic-gate	ld	[%o1], %o4
12087c478bd9Sstevel@tonic-gate	set	ktsb_base, %o5
12097c478bd9Sstevel@tonic-gate	set	ktsb4m_base, %l1
12107c478bd9Sstevel@tonic-gate	brz,pt	%o4, 1f
12117c478bd9Sstevel@tonic-gate	  nop
12127c478bd9Sstevel@tonic-gate	set	ktsb_pbase, %o5
12137c478bd9Sstevel@tonic-gate	set	ktsb4m_pbase, %l1
12147c478bd9Sstevel@tonic-gate1:
12157c478bd9Sstevel@tonic-gate	sethi	%hi(ktsb_szcode), %o1
12167c478bd9Sstevel@tonic-gate	ld	[%o1 + %lo(ktsb_szcode)], %o1	/* %o1 = ktsb size code */
12177c478bd9Sstevel@tonic-gate
12187c478bd9Sstevel@tonic-gate	sethi	%hi(iktsb), %o0
12197c478bd9Sstevel@tonic-gate	call	sfmmu_fix_ktlb_traptable
12207c478bd9Sstevel@tonic-gate	  or	%o0, %lo(iktsb), %o0
12217c478bd9Sstevel@tonic-gate
12227c478bd9Sstevel@tonic-gate	sethi	%hi(dktsb), %o0
12237c478bd9Sstevel@tonic-gate	call	sfmmu_fix_ktlb_traptable
12247c478bd9Sstevel@tonic-gate	  or	%o0, %lo(dktsb), %o0
12257c478bd9Sstevel@tonic-gate
12267c478bd9Sstevel@tonic-gate	sethi	%hi(ktsb4m_szcode), %o1
12277c478bd9Sstevel@tonic-gate	ld	[%o1 + %lo(ktsb4m_szcode)], %o1	/* %o1 = ktsb4m size code */
12287c478bd9Sstevel@tonic-gate
122905d3dc4bSpaulsan	sethi	%hi(iktsb4m), %o0
123005d3dc4bSpaulsan	call	sfmmu_fix_ktlb_traptable
123105d3dc4bSpaulsan	  or	%o0, %lo(iktsb4m), %o0
123205d3dc4bSpaulsan
12337c478bd9Sstevel@tonic-gate	sethi	%hi(dktsb4m), %o0
12347c478bd9Sstevel@tonic-gate	call	sfmmu_fix_ktlb_traptable
12357c478bd9Sstevel@tonic-gate	  or	%o0, %lo(dktsb4m), %o0
12367c478bd9Sstevel@tonic-gate
12377c478bd9Sstevel@tonic-gate#ifndef sun4v
12387c478bd9Sstevel@tonic-gate	mov	ASI_N, %o2
12397c478bd9Sstevel@tonic-gate	movrnz	%o4, ASI_MEM, %o2	! setup kernel 32bit ASI to patch
12407c478bd9Sstevel@tonic-gate	mov	%o2, %o4		! sfmmu_fixup_or needs this in %o4
12417c478bd9Sstevel@tonic-gate	sethi	%hi(tsb_kernel_patch_asi), %o0
12427c478bd9Sstevel@tonic-gate	call	sfmmu_fixup_or
12437c478bd9Sstevel@tonic-gate	  or	%o0, %lo(tsb_kernel_patch_asi), %o0
12441426d65aSsm142603#endif /* !sun4v */
12457c478bd9Sstevel@tonic-gate
12467c478bd9Sstevel@tonic-gate	ldx 	[%o5], %o4		! load ktsb base addr (VA or PA)
12477c478bd9Sstevel@tonic-gate
12487c478bd9Sstevel@tonic-gate	sethi	%hi(dktsbbase), %o0
12497c478bd9Sstevel@tonic-gate	call	sfmmu_fixup_setx	! patch value of ktsb base addr
12507c478bd9Sstevel@tonic-gate	  or	%o0, %lo(dktsbbase), %o0
12517c478bd9Sstevel@tonic-gate
12527c478bd9Sstevel@tonic-gate	sethi	%hi(iktsbbase), %o0
12537c478bd9Sstevel@tonic-gate	call	sfmmu_fixup_setx	! patch value of ktsb base addr
12547c478bd9Sstevel@tonic-gate	  or	%o0, %lo(iktsbbase), %o0
12557c478bd9Sstevel@tonic-gate
12567c478bd9Sstevel@tonic-gate	sethi	%hi(sfmmu_kprot_patch_ktsb_base), %o0
12577c478bd9Sstevel@tonic-gate	call	sfmmu_fixup_setx	! patch value of ktsb base addr
12587c478bd9Sstevel@tonic-gate	  or	%o0, %lo(sfmmu_kprot_patch_ktsb_base), %o0
12597c478bd9Sstevel@tonic-gate
12607c478bd9Sstevel@tonic-gate#ifdef sun4v
12617c478bd9Sstevel@tonic-gate	sethi	%hi(sfmmu_dslow_patch_ktsb_base), %o0
12627c478bd9Sstevel@tonic-gate	call	sfmmu_fixup_setx	! patch value of ktsb base addr
12637c478bd9Sstevel@tonic-gate	  or	%o0, %lo(sfmmu_dslow_patch_ktsb_base), %o0
12647c478bd9Sstevel@tonic-gate#endif /* sun4v */
12657c478bd9Sstevel@tonic-gate
12667c478bd9Sstevel@tonic-gate	ldx 	[%l1], %o4		! load ktsb4m base addr (VA or PA)
12677c478bd9Sstevel@tonic-gate
12687c478bd9Sstevel@tonic-gate	sethi	%hi(dktsb4mbase), %o0
12697c478bd9Sstevel@tonic-gate	call	sfmmu_fixup_setx	! patch value of ktsb4m base addr
12707c478bd9Sstevel@tonic-gate	  or	%o0, %lo(dktsb4mbase), %o0
12717c478bd9Sstevel@tonic-gate
127205d3dc4bSpaulsan	sethi	%hi(iktsb4mbase), %o0
127305d3dc4bSpaulsan	call	sfmmu_fixup_setx	! patch value of ktsb4m base addr
127405d3dc4bSpaulsan	  or	%o0, %lo(iktsb4mbase), %o0
127505d3dc4bSpaulsan
12767c478bd9Sstevel@tonic-gate	sethi	%hi(sfmmu_kprot_patch_ktsb4m_base), %o0
12777c478bd9Sstevel@tonic-gate	call	sfmmu_fixup_setx	! patch value of ktsb4m base addr
12787c478bd9Sstevel@tonic-gate	  or	%o0, %lo(sfmmu_kprot_patch_ktsb4m_base), %o0
12797c478bd9Sstevel@tonic-gate
12807c478bd9Sstevel@tonic-gate#ifdef sun4v
12817c478bd9Sstevel@tonic-gate	sethi	%hi(sfmmu_dslow_patch_ktsb4m_base), %o0
12827c478bd9Sstevel@tonic-gate	call	sfmmu_fixup_setx	! patch value of ktsb4m base addr
12837c478bd9Sstevel@tonic-gate	  or	%o0, %lo(sfmmu_dslow_patch_ktsb4m_base), %o0
12847c478bd9Sstevel@tonic-gate#endif /* sun4v */
12857c478bd9Sstevel@tonic-gate
12867c478bd9Sstevel@tonic-gate	set	ktsb_szcode, %o4
12877c478bd9Sstevel@tonic-gate	ld	[%o4], %o4
12887c478bd9Sstevel@tonic-gate	sethi	%hi(sfmmu_kprot_patch_ktsb_szcode), %o0
12897c478bd9Sstevel@tonic-gate	call	sfmmu_fixup_or		! patch value of ktsb_szcode
12907c478bd9Sstevel@tonic-gate	  or	%o0, %lo(sfmmu_kprot_patch_ktsb_szcode), %o0
12917c478bd9Sstevel@tonic-gate
12927c478bd9Sstevel@tonic-gate#ifdef sun4v
12937c478bd9Sstevel@tonic-gate	sethi	%hi(sfmmu_dslow_patch_ktsb_szcode), %o0
12947c478bd9Sstevel@tonic-gate	call	sfmmu_fixup_or		! patch value of ktsb_szcode
12957c478bd9Sstevel@tonic-gate	  or	%o0, %lo(sfmmu_dslow_patch_ktsb_szcode), %o0
12967c478bd9Sstevel@tonic-gate#endif /* sun4v */
12977c478bd9Sstevel@tonic-gate
12987c478bd9Sstevel@tonic-gate	set	ktsb4m_szcode, %o4
12997c478bd9Sstevel@tonic-gate	ld	[%o4], %o4
13007c478bd9Sstevel@tonic-gate	sethi	%hi(sfmmu_kprot_patch_ktsb4m_szcode), %o0
13017c478bd9Sstevel@tonic-gate	call	sfmmu_fixup_or		! patch value of ktsb4m_szcode
13027c478bd9Sstevel@tonic-gate	  or	%o0, %lo(sfmmu_kprot_patch_ktsb4m_szcode), %o0
13037c478bd9Sstevel@tonic-gate
13047c478bd9Sstevel@tonic-gate#ifdef sun4v
13057c478bd9Sstevel@tonic-gate	sethi	%hi(sfmmu_dslow_patch_ktsb4m_szcode), %o0
13067c478bd9Sstevel@tonic-gate	call	sfmmu_fixup_or		! patch value of ktsb4m_szcode
13077c478bd9Sstevel@tonic-gate	  or	%o0, %lo(sfmmu_dslow_patch_ktsb4m_szcode), %o0
13087c478bd9Sstevel@tonic-gate#endif /* sun4v */
13097c478bd9Sstevel@tonic-gate
13107c478bd9Sstevel@tonic-gate	ret
13117c478bd9Sstevel@tonic-gate	restore
13127c478bd9Sstevel@tonic-gate	SET_SIZE(sfmmu_patch_ktsb)
13137c478bd9Sstevel@tonic-gate
13147c478bd9Sstevel@tonic-gate	ENTRY_NP(sfmmu_kpm_patch_tlbm)
13157c478bd9Sstevel@tonic-gate	/*
13167c478bd9Sstevel@tonic-gate	 * Fixup trap handlers in common segkpm case.  This is reserved
13177c478bd9Sstevel@tonic-gate	 * for future use should kpm TSB be changed to be other than the
13187c478bd9Sstevel@tonic-gate	 * kernel TSB.
13197c478bd9Sstevel@tonic-gate	 */
13207c478bd9Sstevel@tonic-gate	retl
13217c478bd9Sstevel@tonic-gate	nop
13227c478bd9Sstevel@tonic-gate	SET_SIZE(sfmmu_kpm_patch_tlbm)
13237c478bd9Sstevel@tonic-gate
13247c478bd9Sstevel@tonic-gate	ENTRY_NP(sfmmu_kpm_patch_tsbm)
13257c478bd9Sstevel@tonic-gate	/*
13267c478bd9Sstevel@tonic-gate	 * nop the branch to sfmmu_kpm_dtsb_miss_small
13277c478bd9Sstevel@tonic-gate	 * in the case where we are using large pages for
13287c478bd9Sstevel@tonic-gate	 * seg_kpm (and hence must probe the second TSB for
13297c478bd9Sstevel@tonic-gate	 * seg_kpm VAs)
13307c478bd9Sstevel@tonic-gate	 */
13317c478bd9Sstevel@tonic-gate	set	dktsb4m_kpmcheck_small, %o0
13327c478bd9Sstevel@tonic-gate	MAKE_NOP_INSTR(%o1)
13337c478bd9Sstevel@tonic-gate	st	%o1, [%o0]
13347c478bd9Sstevel@tonic-gate	flush	%o0
13357c478bd9Sstevel@tonic-gate	retl
13367c478bd9Sstevel@tonic-gate	nop
13377c478bd9Sstevel@tonic-gate	SET_SIZE(sfmmu_kpm_patch_tsbm)
13387c478bd9Sstevel@tonic-gate
13397c478bd9Sstevel@tonic-gate	ENTRY_NP(sfmmu_patch_utsb)
134025cf1a30Sjl139090#ifdef UTSB_PHYS
13417c478bd9Sstevel@tonic-gate	retl
13427c478bd9Sstevel@tonic-gate	nop
134325cf1a30Sjl139090#else /* UTSB_PHYS */
13447c478bd9Sstevel@tonic-gate	/*
13457c478bd9Sstevel@tonic-gate	 * We need to hot patch utsb_vabase and utsb4m_vabase
13467c478bd9Sstevel@tonic-gate	 */
13477c478bd9Sstevel@tonic-gate	save	%sp, -SA(MINFRAME), %sp
13487c478bd9Sstevel@tonic-gate
13497c478bd9Sstevel@tonic-gate	/* patch value of utsb_vabase */
13507c478bd9Sstevel@tonic-gate	set	utsb_vabase, %o1
13517c478bd9Sstevel@tonic-gate	ldx	[%o1], %o4
13527c478bd9Sstevel@tonic-gate	sethi	%hi(sfmmu_uprot_get_1st_tsbe_ptr), %o0
13537c478bd9Sstevel@tonic-gate	call	sfmmu_fixup_setx
13547c478bd9Sstevel@tonic-gate	  or	%o0, %lo(sfmmu_uprot_get_1st_tsbe_ptr), %o0
13557c478bd9Sstevel@tonic-gate	sethi	%hi(sfmmu_uitlb_get_1st_tsbe_ptr), %o0
13567c478bd9Sstevel@tonic-gate	call	sfmmu_fixup_setx
13577c478bd9Sstevel@tonic-gate	  or	%o0, %lo(sfmmu_uitlb_get_1st_tsbe_ptr), %o0
13587c478bd9Sstevel@tonic-gate	sethi	%hi(sfmmu_udtlb_get_1st_tsbe_ptr), %o0
13597c478bd9Sstevel@tonic-gate	call	sfmmu_fixup_setx
13607c478bd9Sstevel@tonic-gate	  or	%o0, %lo(sfmmu_udtlb_get_1st_tsbe_ptr), %o0
13617c478bd9Sstevel@tonic-gate
13627c478bd9Sstevel@tonic-gate	/* patch value of utsb4m_vabase */
13637c478bd9Sstevel@tonic-gate	set	utsb4m_vabase, %o1
13647c478bd9Sstevel@tonic-gate	ldx	[%o1], %o4
13657c478bd9Sstevel@tonic-gate	sethi	%hi(sfmmu_uprot_get_2nd_tsb_base), %o0
13667c478bd9Sstevel@tonic-gate	call	sfmmu_fixup_setx
13677c478bd9Sstevel@tonic-gate	  or	%o0, %lo(sfmmu_uprot_get_2nd_tsb_base), %o0
13687c478bd9Sstevel@tonic-gate	sethi	%hi(sfmmu_uitlb_get_2nd_tsb_base), %o0
13697c478bd9Sstevel@tonic-gate	call	sfmmu_fixup_setx
13707c478bd9Sstevel@tonic-gate	  or	%o0, %lo(sfmmu_uitlb_get_2nd_tsb_base), %o0
13717c478bd9Sstevel@tonic-gate	sethi	%hi(sfmmu_udtlb_get_2nd_tsb_base), %o0
13727c478bd9Sstevel@tonic-gate	call	sfmmu_fixup_setx
13737c478bd9Sstevel@tonic-gate	  or	%o0, %lo(sfmmu_udtlb_get_2nd_tsb_base), %o0
13747c478bd9Sstevel@tonic-gate
13757c478bd9Sstevel@tonic-gate	/*
13767c478bd9Sstevel@tonic-gate	 * Patch TSB base register masks and shifts if needed.
13777c478bd9Sstevel@tonic-gate	 * By default the TSB base register contents are set up for 4M slab.
13787c478bd9Sstevel@tonic-gate	 * If we're using a smaller slab size and reserved VA range we need
13797c478bd9Sstevel@tonic-gate	 * to patch up those values here.
13807c478bd9Sstevel@tonic-gate	 */
13817c478bd9Sstevel@tonic-gate	set	tsb_slab_shift, %o1
13827c478bd9Sstevel@tonic-gate	set	MMU_PAGESHIFT4M, %o4
138305d3dc4bSpaulsan	lduw	[%o1], %o3
13847c478bd9Sstevel@tonic-gate	subcc	%o4, %o3, %o4
13857c478bd9Sstevel@tonic-gate	bz,pt	%icc, 1f
13867c478bd9Sstevel@tonic-gate	  /* delay slot safe */
13877c478bd9Sstevel@tonic-gate
13887c478bd9Sstevel@tonic-gate	/* patch reserved VA range size if needed. */
13897c478bd9Sstevel@tonic-gate	sethi	%hi(sfmmu_tsb_1st_resv_offset), %o0
13907c478bd9Sstevel@tonic-gate	call	sfmmu_fixup_shiftx
13917c478bd9Sstevel@tonic-gate	  or	%o0, %lo(sfmmu_tsb_1st_resv_offset), %o0
13927c478bd9Sstevel@tonic-gate	call	sfmmu_fixup_shiftx
13937c478bd9Sstevel@tonic-gate	  add	%o0, I_SIZE, %o0
13947c478bd9Sstevel@tonic-gate	sethi	%hi(sfmmu_tsb_2nd_resv_offset), %o0
13957c478bd9Sstevel@tonic-gate	call	sfmmu_fixup_shiftx
13967c478bd9Sstevel@tonic-gate	  or	%o0, %lo(sfmmu_tsb_2nd_resv_offset), %o0
13977c478bd9Sstevel@tonic-gate	call	sfmmu_fixup_shiftx
13987c478bd9Sstevel@tonic-gate	  add	%o0, I_SIZE, %o0
13997c478bd9Sstevel@tonic-gate1:
14007c478bd9Sstevel@tonic-gate	/* patch TSBREG_VAMASK used to set up TSB base register */
14017c478bd9Sstevel@tonic-gate	set	tsb_slab_mask, %o1
140205d3dc4bSpaulsan	ldx	[%o1], %o4
14037c478bd9Sstevel@tonic-gate	sethi	%hi(sfmmu_tsb_1st_tsbreg_vamask), %o0
14047c478bd9Sstevel@tonic-gate	call	sfmmu_fixup_or
14057c478bd9Sstevel@tonic-gate	  or	%o0, %lo(sfmmu_tsb_1st_tsbreg_vamask), %o0
14067c478bd9Sstevel@tonic-gate	sethi	%hi(sfmmu_tsb_2nd_tsbreg_vamask), %o0
14077c478bd9Sstevel@tonic-gate	call	sfmmu_fixup_or
14087c478bd9Sstevel@tonic-gate	  or	%o0, %lo(sfmmu_tsb_2nd_tsbreg_vamask), %o0
14097c478bd9Sstevel@tonic-gate
14107c478bd9Sstevel@tonic-gate	ret
14117c478bd9Sstevel@tonic-gate	restore
141225cf1a30Sjl139090#endif /* UTSB_PHYS */
14137c478bd9Sstevel@tonic-gate	SET_SIZE(sfmmu_patch_utsb)
14147c478bd9Sstevel@tonic-gate
141505d3dc4bSpaulsan	ENTRY_NP(sfmmu_patch_shctx)
141605d3dc4bSpaulsan#ifdef sun4u
141705d3dc4bSpaulsan	retl
141805d3dc4bSpaulsan	  nop
141905d3dc4bSpaulsan#else /* sun4u */
142005d3dc4bSpaulsan	set	sfmmu_shctx_cpu_mondo_patch, %o0
142105d3dc4bSpaulsan	MAKE_JMP_INSTR(5, %o1, %o2)	! jmp       %g5
142205d3dc4bSpaulsan	st	%o1, [%o0]
142305d3dc4bSpaulsan	flush	%o0
142405d3dc4bSpaulsan	MAKE_NOP_INSTR(%o1)
142505d3dc4bSpaulsan	add	%o0, I_SIZE, %o0	! next instr
142605d3dc4bSpaulsan	st	%o1, [%o0]
142705d3dc4bSpaulsan	flush	%o0
142805d3dc4bSpaulsan
142905d3dc4bSpaulsan	set	sfmmu_shctx_user_rtt_patch, %o0
143005d3dc4bSpaulsan	st      %o1, [%o0]		! nop 1st instruction
143105d3dc4bSpaulsan	flush	%o0
143205d3dc4bSpaulsan	add     %o0, I_SIZE, %o0
143305d3dc4bSpaulsan	st      %o1, [%o0]		! nop 2nd instruction
143405d3dc4bSpaulsan	flush	%o0
143505d3dc4bSpaulsan	add     %o0, I_SIZE, %o0
143605d3dc4bSpaulsan	st      %o1, [%o0]		! nop 3rd instruction
143705d3dc4bSpaulsan	flush	%o0
143805d3dc4bSpaulsan	add     %o0, I_SIZE, %o0
143905d3dc4bSpaulsan	st      %o1, [%o0]		! nop 4th instruction
1440a6a91161SJason Beloro	flush	%o0
1441a6a91161SJason Beloro	add     %o0, I_SIZE, %o0
1442a6a91161SJason Beloro	st      %o1, [%o0]		! nop 5th instruction
1443a6a91161SJason Beloro	flush	%o0
1444a6a91161SJason Beloro	add     %o0, I_SIZE, %o0
1445a6a91161SJason Beloro	st      %o1, [%o0]		! nop 6th instruction
144605d3dc4bSpaulsan	retl
144705d3dc4bSpaulsan	flush	%o0
144805d3dc4bSpaulsan#endif /* sun4u */
144905d3dc4bSpaulsan	SET_SIZE(sfmmu_patch_shctx)
14507c478bd9Sstevel@tonic-gate
14517c478bd9Sstevel@tonic-gate	/*
14527c478bd9Sstevel@tonic-gate	 * Routine that loads an entry into a tsb using virtual addresses.
14537c478bd9Sstevel@tonic-gate	 * Locking is required since all cpus can use the same TSB.
14547c478bd9Sstevel@tonic-gate	 * Note that it is no longer required to have a valid context
14557c478bd9Sstevel@tonic-gate	 * when calling this function.
14567c478bd9Sstevel@tonic-gate	 */
14577c478bd9Sstevel@tonic-gate	ENTRY_NP(sfmmu_load_tsbe)
14587c478bd9Sstevel@tonic-gate	/*
14597c478bd9Sstevel@tonic-gate	 * %o0 = pointer to tsbe to load
14607c478bd9Sstevel@tonic-gate	 * %o1 = tsb tag
14617c478bd9Sstevel@tonic-gate	 * %o2 = virtual pointer to TTE
14627c478bd9Sstevel@tonic-gate	 * %o3 = 1 if physical address in %o0 else 0
14637c478bd9Sstevel@tonic-gate	 */
14647c478bd9Sstevel@tonic-gate	rdpr	%pstate, %o5
14657c478bd9Sstevel@tonic-gate#ifdef DEBUG
14661e2e7a75Shuah	PANIC_IF_INTR_DISABLED_PSTR(%o5, sfmmu_di_l2, %g1)
14677c478bd9Sstevel@tonic-gate#endif /* DEBUG */
14687c478bd9Sstevel@tonic-gate
14697c478bd9Sstevel@tonic-gate	wrpr	%o5, PSTATE_IE, %pstate		/* disable interrupts */
14707c478bd9Sstevel@tonic-gate
14717c478bd9Sstevel@tonic-gate	SETUP_TSB_ASI(%o3, %g3)
14720a90a7fdSAmritpal Sandhu	TSB_UPDATE(%o0, %o2, %o1, %g1, %g2, locked_tsb_l8)
14737c478bd9Sstevel@tonic-gate
14747c478bd9Sstevel@tonic-gate	wrpr	%g0, %o5, %pstate		/* enable interrupts */
14757c478bd9Sstevel@tonic-gate
14767c478bd9Sstevel@tonic-gate	retl
14777c478bd9Sstevel@tonic-gate	membar	#StoreStore|#StoreLoad
14787c478bd9Sstevel@tonic-gate	SET_SIZE(sfmmu_load_tsbe)
14797c478bd9Sstevel@tonic-gate
14807c478bd9Sstevel@tonic-gate	/*
14817c478bd9Sstevel@tonic-gate	 * Flush TSB of a given entry if the tag matches.
14827c478bd9Sstevel@tonic-gate	 */
14837c478bd9Sstevel@tonic-gate	ENTRY(sfmmu_unload_tsbe)
14847c478bd9Sstevel@tonic-gate	/*
14857c478bd9Sstevel@tonic-gate	 * %o0 = pointer to tsbe to be flushed
14867c478bd9Sstevel@tonic-gate	 * %o1 = tag to match
14877c478bd9Sstevel@tonic-gate	 * %o2 = 1 if physical address in %o0 else 0
14887c478bd9Sstevel@tonic-gate	 */
14897c478bd9Sstevel@tonic-gate	SETUP_TSB_ASI(%o2, %g1)
14907c478bd9Sstevel@tonic-gate	TSB_INVALIDATE(%o0, %o1, %g1, %o2, %o3, unload_tsbe)
14917c478bd9Sstevel@tonic-gate	retl
14927c478bd9Sstevel@tonic-gate	membar	#StoreStore|#StoreLoad
14937c478bd9Sstevel@tonic-gate	SET_SIZE(sfmmu_unload_tsbe)
14947c478bd9Sstevel@tonic-gate
14957c478bd9Sstevel@tonic-gate	/*
14967c478bd9Sstevel@tonic-gate	 * Routine that loads a TTE into the kpm TSB from C code.
14977c478bd9Sstevel@tonic-gate	 * Locking is required since kpm TSB is shared among all CPUs.
14987c478bd9Sstevel@tonic-gate	 */
14997c478bd9Sstevel@tonic-gate	ENTRY_NP(sfmmu_kpm_load_tsb)
15007c478bd9Sstevel@tonic-gate	/*
15017c478bd9Sstevel@tonic-gate	 * %o0 = vaddr
15027c478bd9Sstevel@tonic-gate	 * %o1 = ttep
15037c478bd9Sstevel@tonic-gate	 * %o2 = virtpg to TSB index shift (e.g. TTE pagesize shift)
15047c478bd9Sstevel@tonic-gate	 */
15057c478bd9Sstevel@tonic-gate	rdpr	%pstate, %o5			! %o5 = saved pstate
15067c478bd9Sstevel@tonic-gate#ifdef DEBUG
15071e2e7a75Shuah	PANIC_IF_INTR_DISABLED_PSTR(%o5, sfmmu_di_l3, %g1)
15087c478bd9Sstevel@tonic-gate#endif /* DEBUG */
15097c478bd9Sstevel@tonic-gate	wrpr	%o5, PSTATE_IE, %pstate		! disable interrupts
15107c478bd9Sstevel@tonic-gate
15117c478bd9Sstevel@tonic-gate#ifndef sun4v
15127c478bd9Sstevel@tonic-gate	sethi	%hi(ktsb_phys), %o4
15137c478bd9Sstevel@tonic-gate	mov	ASI_N, %o3
15147c478bd9Sstevel@tonic-gate	ld	[%o4 + %lo(ktsb_phys)], %o4
15157c478bd9Sstevel@tonic-gate	movrnz	%o4, ASI_MEM, %o3
15167c478bd9Sstevel@tonic-gate	mov	%o3, %asi
15171426d65aSsm142603#endif /* !sun4v */
15187c478bd9Sstevel@tonic-gate	mov	%o0, %g1			! %g1 = vaddr
15197c478bd9Sstevel@tonic-gate
15207c478bd9Sstevel@tonic-gate	/* GET_KPM_TSBE_POINTER(vpshift, tsbp, vaddr (clobbers), tmp1, tmp2) */
15217c478bd9Sstevel@tonic-gate	GET_KPM_TSBE_POINTER(%o2, %g2, %g1, %o3, %o4)
15227c478bd9Sstevel@tonic-gate	/* %g2 = tsbep, %g1 clobbered */
15237c478bd9Sstevel@tonic-gate
15247c478bd9Sstevel@tonic-gate	srlx	%o0, TTARGET_VA_SHIFT, %g1;	! %g1 = tag target
15257c478bd9Sstevel@tonic-gate	/* TSB_UPDATE(tsbep, tteva, tagtarget, tmp1, tmp2, label) */
15260a90a7fdSAmritpal Sandhu	TSB_UPDATE(%g2, %o1, %g1, %o3, %o4, locked_tsb_l9)
15277c478bd9Sstevel@tonic-gate
15287c478bd9Sstevel@tonic-gate	wrpr	%g0, %o5, %pstate		! enable interrupts
15297c478bd9Sstevel@tonic-gate	retl
15307c478bd9Sstevel@tonic-gate	  membar #StoreStore|#StoreLoad
15317c478bd9Sstevel@tonic-gate	SET_SIZE(sfmmu_kpm_load_tsb)
15327c478bd9Sstevel@tonic-gate
15337c478bd9Sstevel@tonic-gate	/*
15347c478bd9Sstevel@tonic-gate	 * Routine that shoots down a TTE in the kpm TSB or in the
15357c478bd9Sstevel@tonic-gate	 * kernel TSB depending on virtpg. Locking is required since
15367c478bd9Sstevel@tonic-gate	 * kpm/kernel TSB is shared among all CPUs.
15377c478bd9Sstevel@tonic-gate	 */
15387c478bd9Sstevel@tonic-gate	ENTRY_NP(sfmmu_kpm_unload_tsb)
15397c478bd9Sstevel@tonic-gate	/*
15407c478bd9Sstevel@tonic-gate	 * %o0 = vaddr
15417c478bd9Sstevel@tonic-gate	 * %o1 = virtpg to TSB index shift (e.g. TTE page shift)
15427c478bd9Sstevel@tonic-gate	 */
15437c478bd9Sstevel@tonic-gate#ifndef sun4v
15447c478bd9Sstevel@tonic-gate	sethi	%hi(ktsb_phys), %o4
15457c478bd9Sstevel@tonic-gate	mov	ASI_N, %o3
15467c478bd9Sstevel@tonic-gate	ld	[%o4 + %lo(ktsb_phys)], %o4
15477c478bd9Sstevel@tonic-gate	movrnz	%o4, ASI_MEM, %o3
15487c478bd9Sstevel@tonic-gate	mov	%o3, %asi
15491426d65aSsm142603#endif /* !sun4v */
15507c478bd9Sstevel@tonic-gate	mov	%o0, %g1			! %g1 = vaddr
15517c478bd9Sstevel@tonic-gate
15527c478bd9Sstevel@tonic-gate	/* GET_KPM_TSBE_POINTER(vpshift, tsbp, vaddr (clobbers), tmp1, tmp2) */
15537c478bd9Sstevel@tonic-gate	GET_KPM_TSBE_POINTER(%o1, %g2, %g1, %o3, %o4)
15547c478bd9Sstevel@tonic-gate	/* %g2 = tsbep, %g1 clobbered */
15557c478bd9Sstevel@tonic-gate
15567c478bd9Sstevel@tonic-gate	srlx	%o0, TTARGET_VA_SHIFT, %g1;	! %g1 = tag target
15577c478bd9Sstevel@tonic-gate	/* TSB_INVALIDATE(tsbep, tag, tmp1, tmp2, tmp3, label) */
15587c478bd9Sstevel@tonic-gate	TSB_INVALIDATE(%g2, %g1, %o3, %o4, %o1, kpm_tsbinval)
15597c478bd9Sstevel@tonic-gate
15607c478bd9Sstevel@tonic-gate	retl
15617c478bd9Sstevel@tonic-gate	  membar	#StoreStore|#StoreLoad
15627c478bd9Sstevel@tonic-gate	SET_SIZE(sfmmu_kpm_unload_tsb)
15637c478bd9Sstevel@tonic-gate
15647c478bd9Sstevel@tonic-gate#endif /* lint */
15657c478bd9Sstevel@tonic-gate
15667c478bd9Sstevel@tonic-gate
15677c478bd9Sstevel@tonic-gate#if defined (lint)
15687c478bd9Sstevel@tonic-gate
15697c478bd9Sstevel@tonic-gate/*ARGSUSED*/
15707c478bd9Sstevel@tonic-gatepfn_t
15717c478bd9Sstevel@tonic-gatesfmmu_ttetopfn(tte_t *tte, caddr_t vaddr)
15727c478bd9Sstevel@tonic-gate{ return(0); }
15737c478bd9Sstevel@tonic-gate
15747c478bd9Sstevel@tonic-gate#else /* lint */
15757c478bd9Sstevel@tonic-gate
15767c478bd9Sstevel@tonic-gate	ENTRY_NP(sfmmu_ttetopfn)
15777c478bd9Sstevel@tonic-gate	ldx	[%o0], %g1			/* read tte */
15787c478bd9Sstevel@tonic-gate	TTETOPFN(%g1, %o1, sfmmu_ttetopfn_l1, %g2, %g3, %g4)
15797c478bd9Sstevel@tonic-gate	/*
15807c478bd9Sstevel@tonic-gate	 * g1 = pfn
15817c478bd9Sstevel@tonic-gate	 */
15827c478bd9Sstevel@tonic-gate	retl
15837c478bd9Sstevel@tonic-gate	mov	%g1, %o0
15847c478bd9Sstevel@tonic-gate	SET_SIZE(sfmmu_ttetopfn)
15857c478bd9Sstevel@tonic-gate
15867c478bd9Sstevel@tonic-gate#endif /* !lint */
15877c478bd9Sstevel@tonic-gate
15887c478bd9Sstevel@tonic-gate/*
15897c478bd9Sstevel@tonic-gate * These macros are used to update global sfmmu hme hash statistics
15907c478bd9Sstevel@tonic-gate * in perf critical paths. It is only enabled in debug kernels or
15917c478bd9Sstevel@tonic-gate * if SFMMU_STAT_GATHER is defined
15927c478bd9Sstevel@tonic-gate */
15937c478bd9Sstevel@tonic-gate#if defined(DEBUG) || defined(SFMMU_STAT_GATHER)
15947c478bd9Sstevel@tonic-gate#define	HAT_HSEARCH_DBSTAT(hatid, tsbarea, tmp1, tmp2)			\
15957c478bd9Sstevel@tonic-gate	ldn	[tsbarea + TSBMISS_KHATID], tmp1			;\
15967c478bd9Sstevel@tonic-gate	mov	HATSTAT_KHASH_SEARCH, tmp2				;\
15977c478bd9Sstevel@tonic-gate	cmp	tmp1, hatid						;\
15987c478bd9Sstevel@tonic-gate	movne	%ncc, HATSTAT_UHASH_SEARCH, tmp2			;\
15997c478bd9Sstevel@tonic-gate	set	sfmmu_global_stat, tmp1					;\
16007c478bd9Sstevel@tonic-gate	add	tmp1, tmp2, tmp1					;\
16017c478bd9Sstevel@tonic-gate	ld	[tmp1], tmp2						;\
16027c478bd9Sstevel@tonic-gate	inc	tmp2							;\
16037c478bd9Sstevel@tonic-gate	st	tmp2, [tmp1]
16047c478bd9Sstevel@tonic-gate
16057c478bd9Sstevel@tonic-gate#define	HAT_HLINK_DBSTAT(hatid, tsbarea, tmp1, tmp2)			\
16067c478bd9Sstevel@tonic-gate	ldn	[tsbarea + TSBMISS_KHATID], tmp1			;\
16077c478bd9Sstevel@tonic-gate	mov	HATSTAT_KHASH_LINKS, tmp2				;\
16087c478bd9Sstevel@tonic-gate	cmp	tmp1, hatid						;\
16097c478bd9Sstevel@tonic-gate	movne	%ncc, HATSTAT_UHASH_LINKS, tmp2				;\
16107c478bd9Sstevel@tonic-gate	set	sfmmu_global_stat, tmp1					;\
16117c478bd9Sstevel@tonic-gate	add	tmp1, tmp2, tmp1					;\
16127c478bd9Sstevel@tonic-gate	ld	[tmp1], tmp2						;\
16137c478bd9Sstevel@tonic-gate	inc	tmp2							;\
16147c478bd9Sstevel@tonic-gate	st	tmp2, [tmp1]
16157c478bd9Sstevel@tonic-gate
16167c478bd9Sstevel@tonic-gate
16177c478bd9Sstevel@tonic-gate#else /* DEBUG || SFMMU_STAT_GATHER */
16187c478bd9Sstevel@tonic-gate
16197c478bd9Sstevel@tonic-gate#define	HAT_HSEARCH_DBSTAT(hatid, tsbarea, tmp1, tmp2)
16207c478bd9Sstevel@tonic-gate
16217c478bd9Sstevel@tonic-gate#define	HAT_HLINK_DBSTAT(hatid, tsbarea, tmp1, tmp2)
16227c478bd9Sstevel@tonic-gate
16237c478bd9Sstevel@tonic-gate#endif  /* DEBUG || SFMMU_STAT_GATHER */
16247c478bd9Sstevel@tonic-gate
16257c478bd9Sstevel@tonic-gate/*
16267c478bd9Sstevel@tonic-gate * This macro is used to update global sfmmu kstas in non
16277c478bd9Sstevel@tonic-gate * perf critical areas so they are enabled all the time
16287c478bd9Sstevel@tonic-gate */
16297c478bd9Sstevel@tonic-gate#define	HAT_GLOBAL_STAT(statname, tmp1, tmp2)				\
16307c478bd9Sstevel@tonic-gate	sethi	%hi(sfmmu_global_stat), tmp1				;\
16317c478bd9Sstevel@tonic-gate	add	tmp1, statname, tmp1					;\
16327c478bd9Sstevel@tonic-gate	ld	[tmp1 + %lo(sfmmu_global_stat)], tmp2			;\
16337c478bd9Sstevel@tonic-gate	inc	tmp2							;\
16347c478bd9Sstevel@tonic-gate	st	tmp2, [tmp1 + %lo(sfmmu_global_stat)]
16357c478bd9Sstevel@tonic-gate
16367c478bd9Sstevel@tonic-gate/*
16377c478bd9Sstevel@tonic-gate * These macros are used to update per cpu stats in non perf
16387c478bd9Sstevel@tonic-gate * critical areas so they are enabled all the time
16397c478bd9Sstevel@tonic-gate */
16407c478bd9Sstevel@tonic-gate#define	HAT_PERCPU_STAT32(tsbarea, stat, tmp1)				\
16417c478bd9Sstevel@tonic-gate	ld	[tsbarea + stat], tmp1					;\
16427c478bd9Sstevel@tonic-gate	inc	tmp1							;\
16437c478bd9Sstevel@tonic-gate	st	tmp1, [tsbarea + stat]
16447c478bd9Sstevel@tonic-gate
16457c478bd9Sstevel@tonic-gate/*
16467c478bd9Sstevel@tonic-gate * These macros are used to update per cpu stats in non perf
16477c478bd9Sstevel@tonic-gate * critical areas so they are enabled all the time
16487c478bd9Sstevel@tonic-gate */
16497c478bd9Sstevel@tonic-gate#define	HAT_PERCPU_STAT16(tsbarea, stat, tmp1)				\
16507c478bd9Sstevel@tonic-gate	lduh	[tsbarea + stat], tmp1					;\
16517c478bd9Sstevel@tonic-gate	inc	tmp1							;\
16527c478bd9Sstevel@tonic-gate	stuh	tmp1, [tsbarea + stat]
16537c478bd9Sstevel@tonic-gate
16547c478bd9Sstevel@tonic-gate#if defined(KPM_TLBMISS_STATS_GATHER)
16557c478bd9Sstevel@tonic-gate	/*
16567c478bd9Sstevel@tonic-gate	 * Count kpm dtlb misses separately to allow a different
16577c478bd9Sstevel@tonic-gate	 * evaluation of hme and kpm tlbmisses. kpm tsb hits can
16587c478bd9Sstevel@tonic-gate	 * be calculated by (kpm_dtlb_misses - kpm_tsb_misses).
16597c478bd9Sstevel@tonic-gate	 */
16607c478bd9Sstevel@tonic-gate#define	KPM_TLBMISS_STAT_INCR(tagacc, val, tsbma, tmp1, label)		\
16617c478bd9Sstevel@tonic-gate	brgez	tagacc, label	/* KPM VA? */				;\
16627c478bd9Sstevel@tonic-gate	nop								;\
16637c478bd9Sstevel@tonic-gate	CPU_INDEX(tmp1, tsbma)						;\
16647c478bd9Sstevel@tonic-gate	sethi	%hi(kpmtsbm_area), tsbma				;\
16657c478bd9Sstevel@tonic-gate	sllx	tmp1, KPMTSBM_SHIFT, tmp1				;\
16667c478bd9Sstevel@tonic-gate	or	tsbma, %lo(kpmtsbm_area), tsbma				;\
16677c478bd9Sstevel@tonic-gate	add	tsbma, tmp1, tsbma		/* kpmtsbm area */	;\
16687c478bd9Sstevel@tonic-gate	/* VA range check */						;\
16697c478bd9Sstevel@tonic-gate	ldx	[tsbma + KPMTSBM_VBASE], val				;\
16707c478bd9Sstevel@tonic-gate	cmp	tagacc, val						;\
16717c478bd9Sstevel@tonic-gate	blu,pn	%xcc, label						;\
16727c478bd9Sstevel@tonic-gate	  ldx	[tsbma + KPMTSBM_VEND], tmp1				;\
16737c478bd9Sstevel@tonic-gate	cmp	tagacc, tmp1						;\
16747c478bd9Sstevel@tonic-gate	bgeu,pn	%xcc, label						;\
16757c478bd9Sstevel@tonic-gate	  lduw	[tsbma + KPMTSBM_DTLBMISS], val				;\
16767c478bd9Sstevel@tonic-gate	inc	val							;\
16777c478bd9Sstevel@tonic-gate	st	val, [tsbma + KPMTSBM_DTLBMISS]				;\
16787c478bd9Sstevel@tonic-gatelabel:
16797c478bd9Sstevel@tonic-gate#else
16807c478bd9Sstevel@tonic-gate#define	KPM_TLBMISS_STAT_INCR(tagacc, val, tsbma, tmp1, label)
16817c478bd9Sstevel@tonic-gate#endif	/* KPM_TLBMISS_STATS_GATHER */
16827c478bd9Sstevel@tonic-gate
16837c478bd9Sstevel@tonic-gate#if defined (lint)
16847c478bd9Sstevel@tonic-gate/*
16857c478bd9Sstevel@tonic-gate * The following routines are jumped to from the mmu trap handlers to do
16867c478bd9Sstevel@tonic-gate * the setting up to call systrap.  They are separate routines instead of
16877c478bd9Sstevel@tonic-gate * being part of the handlers because the handlers would exceed 32
16887c478bd9Sstevel@tonic-gate * instructions and since this is part of the slow path the jump
16897c478bd9Sstevel@tonic-gate * cost is irrelevant.
16907c478bd9Sstevel@tonic-gate */
16917c478bd9Sstevel@tonic-gatevoid
16927c478bd9Sstevel@tonic-gatesfmmu_pagefault(void)
16937c478bd9Sstevel@tonic-gate{
16947c478bd9Sstevel@tonic-gate}
16957c478bd9Sstevel@tonic-gate
16967c478bd9Sstevel@tonic-gatevoid
16977c478bd9Sstevel@tonic-gatesfmmu_mmu_trap(void)
16987c478bd9Sstevel@tonic-gate{
16997c478bd9Sstevel@tonic-gate}
17007c478bd9Sstevel@tonic-gate
17017c478bd9Sstevel@tonic-gatevoid
17027c478bd9Sstevel@tonic-gatesfmmu_window_trap(void)
17037c478bd9Sstevel@tonic-gate{
17047c478bd9Sstevel@tonic-gate}
17057c478bd9Sstevel@tonic-gate
17067c478bd9Sstevel@tonic-gatevoid
17077c478bd9Sstevel@tonic-gatesfmmu_kpm_exception(void)
17087c478bd9Sstevel@tonic-gate{
17097c478bd9Sstevel@tonic-gate}
17107c478bd9Sstevel@tonic-gate
17117c478bd9Sstevel@tonic-gate#else /* lint */
17127c478bd9Sstevel@tonic-gate
17137c478bd9Sstevel@tonic-gate#ifdef	PTL1_PANIC_DEBUG
17147c478bd9Sstevel@tonic-gate	.seg	".data"
17157c478bd9Sstevel@tonic-gate	.global	test_ptl1_panic
17167c478bd9Sstevel@tonic-gatetest_ptl1_panic:
17177c478bd9Sstevel@tonic-gate	.word	0
17187c478bd9Sstevel@tonic-gate	.align	8
17197c478bd9Sstevel@tonic-gate
17207c478bd9Sstevel@tonic-gate	.seg	".text"
17217c478bd9Sstevel@tonic-gate	.align	4
17227c478bd9Sstevel@tonic-gate#endif	/* PTL1_PANIC_DEBUG */
17237c478bd9Sstevel@tonic-gate
17247c478bd9Sstevel@tonic-gate
17257c478bd9Sstevel@tonic-gate	ENTRY_NP(sfmmu_pagefault)
1726efaef81fSarao	SET_GL_REG(1)
17277c478bd9Sstevel@tonic-gate	USE_ALTERNATE_GLOBALS(%g5)
17287c478bd9Sstevel@tonic-gate	GET_MMU_BOTH_TAGACC(%g5 /*dtag*/, %g2 /*itag*/, %g6, %g4)
17297c478bd9Sstevel@tonic-gate	rdpr	%tt, %g6
17307c478bd9Sstevel@tonic-gate	cmp	%g6, FAST_IMMU_MISS_TT
17317c478bd9Sstevel@tonic-gate	be,a,pn	%icc, 1f
17327c478bd9Sstevel@tonic-gate	  mov	T_INSTR_MMU_MISS, %g3
17337c478bd9Sstevel@tonic-gate	cmp	%g6, T_INSTR_MMU_MISS
17347c478bd9Sstevel@tonic-gate	be,a,pn	%icc, 1f
17357c478bd9Sstevel@tonic-gate	  mov	T_INSTR_MMU_MISS, %g3
17367c478bd9Sstevel@tonic-gate	mov	%g5, %g2
17377c478bd9Sstevel@tonic-gate	mov	T_DATA_PROT, %g3		/* arg2 = traptype */
17387c478bd9Sstevel@tonic-gate	cmp	%g6, FAST_DMMU_MISS_TT
17397c478bd9Sstevel@tonic-gate	move	%icc, T_DATA_MMU_MISS, %g3	/* arg2 = traptype */
17407c478bd9Sstevel@tonic-gate	cmp	%g6, T_DATA_MMU_MISS
17417c478bd9Sstevel@tonic-gate	move	%icc, T_DATA_MMU_MISS, %g3	/* arg2 = traptype */
17427c478bd9Sstevel@tonic-gate
17437c478bd9Sstevel@tonic-gate#ifdef  PTL1_PANIC_DEBUG
17447c478bd9Sstevel@tonic-gate	/* check if we want to test the tl1 panic */
17457c478bd9Sstevel@tonic-gate	sethi	%hi(test_ptl1_panic), %g4
17467c478bd9Sstevel@tonic-gate	ld	[%g4 + %lo(test_ptl1_panic)], %g1
17477c478bd9Sstevel@tonic-gate	st	%g0, [%g4 + %lo(test_ptl1_panic)]
17487c478bd9Sstevel@tonic-gate	cmp	%g1, %g0
17497c478bd9Sstevel@tonic-gate	bne,a,pn %icc, ptl1_panic
17507c478bd9Sstevel@tonic-gate	  or	%g0, PTL1_BAD_DEBUG, %g1
17517c478bd9Sstevel@tonic-gate#endif	/* PTL1_PANIC_DEBUG */
17527c478bd9Sstevel@tonic-gate1:
17537c478bd9Sstevel@tonic-gate	HAT_GLOBAL_STAT(HATSTAT_PAGEFAULT, %g6, %g4)
17547c478bd9Sstevel@tonic-gate	/*
17557c478bd9Sstevel@tonic-gate	 * g2 = tag access reg
17567c478bd9Sstevel@tonic-gate	 * g3.l = type
17577c478bd9Sstevel@tonic-gate	 * g3.h = 0
17587c478bd9Sstevel@tonic-gate	 */
17597c478bd9Sstevel@tonic-gate	sethi	%hi(trap), %g1
17607c478bd9Sstevel@tonic-gate	or	%g1, %lo(trap), %g1
17617c478bd9Sstevel@tonic-gate2:
17627c478bd9Sstevel@tonic-gate	ba,pt	%xcc, sys_trap
17637c478bd9Sstevel@tonic-gate	  mov	-1, %g4
17647c478bd9Sstevel@tonic-gate	SET_SIZE(sfmmu_pagefault)
17657c478bd9Sstevel@tonic-gate
17667c478bd9Sstevel@tonic-gate	ENTRY_NP(sfmmu_mmu_trap)
1767efaef81fSarao	SET_GL_REG(1)
17687c478bd9Sstevel@tonic-gate	USE_ALTERNATE_GLOBALS(%g5)
17697c478bd9Sstevel@tonic-gate	GET_MMU_BOTH_TAGACC(%g5 /*dtag*/, %g2 /*itag*/, %g4, %g6)
17707c478bd9Sstevel@tonic-gate	rdpr	%tt, %g6
17717c478bd9Sstevel@tonic-gate	cmp	%g6, FAST_IMMU_MISS_TT
17727c478bd9Sstevel@tonic-gate	be,a,pn	%icc, 1f
17737c478bd9Sstevel@tonic-gate	  mov	T_INSTR_MMU_MISS, %g3
17747c478bd9Sstevel@tonic-gate	cmp	%g6, T_INSTR_MMU_MISS
17757c478bd9Sstevel@tonic-gate	be,a,pn	%icc, 1f
17767c478bd9Sstevel@tonic-gate	  mov	T_INSTR_MMU_MISS, %g3
17777c478bd9Sstevel@tonic-gate	mov	%g5, %g2
17787c478bd9Sstevel@tonic-gate	mov	T_DATA_PROT, %g3		/* arg2 = traptype */
17797c478bd9Sstevel@tonic-gate	cmp	%g6, FAST_DMMU_MISS_TT
17807c478bd9Sstevel@tonic-gate	move	%icc, T_DATA_MMU_MISS, %g3	/* arg2 = traptype */
17817c478bd9Sstevel@tonic-gate	cmp	%g6, T_DATA_MMU_MISS
17827c478bd9Sstevel@tonic-gate	move	%icc, T_DATA_MMU_MISS, %g3	/* arg2 = traptype */
17837c478bd9Sstevel@tonic-gate1:
17847c478bd9Sstevel@tonic-gate	/*
17857c478bd9Sstevel@tonic-gate	 * g2 = tag access reg
17867c478bd9Sstevel@tonic-gate	 * g3 = type
17877c478bd9Sstevel@tonic-gate	 */
17887c478bd9Sstevel@tonic-gate	sethi	%hi(sfmmu_tsbmiss_exception), %g1
17897c478bd9Sstevel@tonic-gate	or	%g1, %lo(sfmmu_tsbmiss_exception), %g1
17907c478bd9Sstevel@tonic-gate	ba,pt	%xcc, sys_trap
17917c478bd9Sstevel@tonic-gate	  mov	-1, %g4
17927c478bd9Sstevel@tonic-gate	/*NOTREACHED*/
17937c478bd9Sstevel@tonic-gate	SET_SIZE(sfmmu_mmu_trap)
17947c478bd9Sstevel@tonic-gate
17957c478bd9Sstevel@tonic-gate	ENTRY_NP(sfmmu_suspend_tl)
1796efaef81fSarao	SET_GL_REG(1)
17977c478bd9Sstevel@tonic-gate	USE_ALTERNATE_GLOBALS(%g5)
17987c478bd9Sstevel@tonic-gate	GET_MMU_BOTH_TAGACC(%g5 /*dtag*/, %g2 /*itag*/, %g4, %g3)
17997c478bd9Sstevel@tonic-gate	rdpr	%tt, %g6
18007c478bd9Sstevel@tonic-gate	cmp	%g6, FAST_IMMU_MISS_TT
18017c478bd9Sstevel@tonic-gate	be,a,pn	%icc, 1f
18027c478bd9Sstevel@tonic-gate	  mov	T_INSTR_MMU_MISS, %g3
18037c478bd9Sstevel@tonic-gate	mov	%g5, %g2
18047c478bd9Sstevel@tonic-gate	cmp	%g6, FAST_DMMU_MISS_TT
18057c478bd9Sstevel@tonic-gate	move	%icc, T_DATA_MMU_MISS, %g3
18067c478bd9Sstevel@tonic-gate	movne	%icc, T_DATA_PROT, %g3
18077c478bd9Sstevel@tonic-gate1:
18087c478bd9Sstevel@tonic-gate	sethi	%hi(sfmmu_tsbmiss_suspended), %g1
18097c478bd9Sstevel@tonic-gate	or	%g1, %lo(sfmmu_tsbmiss_suspended), %g1
18107c478bd9Sstevel@tonic-gate	/* g1 = TL0 handler, g2 = tagacc, g3 = trap type */
18117c478bd9Sstevel@tonic-gate	ba,pt	%xcc, sys_trap
18127c478bd9Sstevel@tonic-gate	  mov	PIL_15, %g4
18137c478bd9Sstevel@tonic-gate	/*NOTREACHED*/
18147c478bd9Sstevel@tonic-gate	SET_SIZE(sfmmu_suspend_tl)
18157c478bd9Sstevel@tonic-gate
18167c478bd9Sstevel@tonic-gate	/*
18177c478bd9Sstevel@tonic-gate	 * No %g registers in use at this point.
18187c478bd9Sstevel@tonic-gate	 */
18197c478bd9Sstevel@tonic-gate	ENTRY_NP(sfmmu_window_trap)
18207c478bd9Sstevel@tonic-gate	rdpr	%tpc, %g1
18217c478bd9Sstevel@tonic-gate#ifdef sun4v
18227c478bd9Sstevel@tonic-gate#ifdef DEBUG
18237c478bd9Sstevel@tonic-gate	/* We assume previous %gl was 1 */
18247c478bd9Sstevel@tonic-gate	rdpr	%tstate, %g4
18257c478bd9Sstevel@tonic-gate	srlx	%g4, TSTATE_GL_SHIFT, %g4
18267c478bd9Sstevel@tonic-gate	and	%g4, TSTATE_GL_MASK, %g4
18277c478bd9Sstevel@tonic-gate	cmp	%g4, 1
18287c478bd9Sstevel@tonic-gate	bne,a,pn %icc, ptl1_panic
18297c478bd9Sstevel@tonic-gate	  mov	PTL1_BAD_WTRAP, %g1
18307c478bd9Sstevel@tonic-gate#endif /* DEBUG */
18317c478bd9Sstevel@tonic-gate	/* user miss at tl>1. better be the window handler or user_rtt */
18327c478bd9Sstevel@tonic-gate	/* in user_rtt? */
18337c478bd9Sstevel@tonic-gate	set	rtt_fill_start, %g4
18347c478bd9Sstevel@tonic-gate	cmp	%g1, %g4
18357c478bd9Sstevel@tonic-gate	blu,pn %xcc, 6f
18367c478bd9Sstevel@tonic-gate	 .empty
18377c478bd9Sstevel@tonic-gate	set	rtt_fill_end, %g4
18387c478bd9Sstevel@tonic-gate	cmp	%g1, %g4
18397c478bd9Sstevel@tonic-gate	bgeu,pn %xcc, 6f
18407c478bd9Sstevel@tonic-gate	 nop
18417c478bd9Sstevel@tonic-gate	set	fault_rtt_fn1, %g1
18427c478bd9Sstevel@tonic-gate	wrpr	%g0, %g1, %tnpc
18437c478bd9Sstevel@tonic-gate	ba,a	7f
18447c478bd9Sstevel@tonic-gate6:
18457c478bd9Sstevel@tonic-gate	! must save this trap level before descending trap stack
18467c478bd9Sstevel@tonic-gate	! no need to save %tnpc, either overwritten or discarded
18477c478bd9Sstevel@tonic-gate	! already got it: rdpr	%tpc, %g1
18487c478bd9Sstevel@tonic-gate	rdpr	%tstate, %g6
18497c478bd9Sstevel@tonic-gate	rdpr	%tt, %g7
18507c478bd9Sstevel@tonic-gate	! trap level saved, go get underlying trap type
18517c478bd9Sstevel@tonic-gate	rdpr	%tl, %g5
18527c478bd9Sstevel@tonic-gate	sub	%g5, 1, %g3
18537c478bd9Sstevel@tonic-gate	wrpr	%g3, %tl
18547c478bd9Sstevel@tonic-gate	rdpr	%tt, %g2
18557c478bd9Sstevel@tonic-gate	wrpr	%g5, %tl
18567c478bd9Sstevel@tonic-gate	! restore saved trap level
18577c478bd9Sstevel@tonic-gate	wrpr	%g1, %tpc
18587c478bd9Sstevel@tonic-gate	wrpr	%g6, %tstate
18597c478bd9Sstevel@tonic-gate	wrpr	%g7, %tt
18607c478bd9Sstevel@tonic-gate#else /* sun4v */
18617c478bd9Sstevel@tonic-gate	/* user miss at tl>1. better be the window handler */
18627c478bd9Sstevel@tonic-gate	rdpr	%tl, %g5
18637c478bd9Sstevel@tonic-gate	sub	%g5, 1, %g3
18647c478bd9Sstevel@tonic-gate	wrpr	%g3, %tl
18657c478bd9Sstevel@tonic-gate	rdpr	%tt, %g2
18667c478bd9Sstevel@tonic-gate	wrpr	%g5, %tl
18677c478bd9Sstevel@tonic-gate#endif /* sun4v */
18687c478bd9Sstevel@tonic-gate	and	%g2, WTRAP_TTMASK, %g4
18697c478bd9Sstevel@tonic-gate	cmp	%g4, WTRAP_TYPE
18707c478bd9Sstevel@tonic-gate	bne,pn	%xcc, 1f
18717c478bd9Sstevel@tonic-gate	 nop
18727c478bd9Sstevel@tonic-gate	/* tpc should be in the trap table */
18737c478bd9Sstevel@tonic-gate	set	trap_table, %g4
18747c478bd9Sstevel@tonic-gate	cmp	%g1, %g4
18757c478bd9Sstevel@tonic-gate	blt,pn %xcc, 1f
18767c478bd9Sstevel@tonic-gate	 .empty
18777c478bd9Sstevel@tonic-gate	set	etrap_table, %g4
18787c478bd9Sstevel@tonic-gate	cmp	%g1, %g4
18797c478bd9Sstevel@tonic-gate	bge,pn %xcc, 1f
18807c478bd9Sstevel@tonic-gate	 .empty
18817c478bd9Sstevel@tonic-gate	andn	%g1, WTRAP_ALIGN, %g1	/* 128 byte aligned */
18827c478bd9Sstevel@tonic-gate	add	%g1, WTRAP_FAULTOFF, %g1
18837c478bd9Sstevel@tonic-gate	wrpr	%g0, %g1, %tnpc
18847c478bd9Sstevel@tonic-gate7:
18857c478bd9Sstevel@tonic-gate	/*
18867c478bd9Sstevel@tonic-gate	 * some wbuf handlers will call systrap to resolve the fault
18877c478bd9Sstevel@tonic-gate	 * we pass the trap type so they figure out the correct parameters.
18887c478bd9Sstevel@tonic-gate	 * g5 = trap type, g6 = tag access reg
18897c478bd9Sstevel@tonic-gate	 */
18907c478bd9Sstevel@tonic-gate
18917c478bd9Sstevel@tonic-gate	/*
18927c478bd9Sstevel@tonic-gate	 * only use g5, g6, g7 registers after we have switched to alternate
18937c478bd9Sstevel@tonic-gate	 * globals.
18947c478bd9Sstevel@tonic-gate	 */
18957c478bd9Sstevel@tonic-gate	SET_GL_REG(1)
18967c478bd9Sstevel@tonic-gate	USE_ALTERNATE_GLOBALS(%g5)
18977c478bd9Sstevel@tonic-gate	GET_MMU_D_TAGACC(%g6 /*dtag*/, %g5 /*scratch*/)
18987c478bd9Sstevel@tonic-gate	rdpr	%tt, %g7
18997c478bd9Sstevel@tonic-gate	cmp	%g7, FAST_IMMU_MISS_TT
19007c478bd9Sstevel@tonic-gate	be,a,pn	%icc, ptl1_panic
19017c478bd9Sstevel@tonic-gate	  mov	PTL1_BAD_WTRAP, %g1
19027c478bd9Sstevel@tonic-gate	cmp	%g7, T_INSTR_MMU_MISS
19037c478bd9Sstevel@tonic-gate	be,a,pn	%icc, ptl1_panic
19047c478bd9Sstevel@tonic-gate	  mov	PTL1_BAD_WTRAP, %g1
19057c478bd9Sstevel@tonic-gate	mov	T_DATA_PROT, %g5
19067c478bd9Sstevel@tonic-gate	cmp	%g7, FAST_DMMU_MISS_TT
19077c478bd9Sstevel@tonic-gate	move	%icc, T_DATA_MMU_MISS, %g5
19087c478bd9Sstevel@tonic-gate	cmp	%g7, T_DATA_MMU_MISS
19097c478bd9Sstevel@tonic-gate	move	%icc, T_DATA_MMU_MISS, %g5
19107c478bd9Sstevel@tonic-gate	! XXXQ AGS re-check out this one
19117c478bd9Sstevel@tonic-gate	done
19127c478bd9Sstevel@tonic-gate1:
191301bd5185Swh94709	CPU_PADDR(%g1, %g4)
191401bd5185Swh94709	add	%g1, CPU_TL1_HDLR, %g1
191501bd5185Swh94709	lda	[%g1]ASI_MEM, %g4
19167c478bd9Sstevel@tonic-gate	brnz,a,pt %g4, sfmmu_mmu_trap
191701bd5185Swh94709	  sta	%g0, [%g1]ASI_MEM
19187c478bd9Sstevel@tonic-gate	ba,pt	%icc, ptl1_panic
19197c478bd9Sstevel@tonic-gate	  mov	PTL1_BAD_TRAP, %g1
19207c478bd9Sstevel@tonic-gate	SET_SIZE(sfmmu_window_trap)
19217c478bd9Sstevel@tonic-gate
19227c478bd9Sstevel@tonic-gate	ENTRY_NP(sfmmu_kpm_exception)
19237c478bd9Sstevel@tonic-gate	/*
19247c478bd9Sstevel@tonic-gate	 * We have accessed an unmapped segkpm address or a legal segkpm
19257c478bd9Sstevel@tonic-gate	 * address which is involved in a VAC alias conflict prevention.
19267c478bd9Sstevel@tonic-gate	 * Before we go to trap(), check to see if CPU_DTRACE_NOFAULT is
19277c478bd9Sstevel@tonic-gate	 * set. If it is, we will instead note that a fault has occurred
19287c478bd9Sstevel@tonic-gate	 * by setting CPU_DTRACE_BADADDR and issue a "done" (instead of
19297c478bd9Sstevel@tonic-gate	 * a "retry"). This will step over the faulting instruction.
19307c478bd9Sstevel@tonic-gate	 * Note that this means that a legal segkpm address involved in
19317c478bd9Sstevel@tonic-gate	 * a VAC alias conflict prevention (a rare case to begin with)
19327c478bd9Sstevel@tonic-gate	 * cannot be used in DTrace.
19337c478bd9Sstevel@tonic-gate	 */
19347c478bd9Sstevel@tonic-gate	CPU_INDEX(%g1, %g2)
19357c478bd9Sstevel@tonic-gate	set	cpu_core, %g2
19367c478bd9Sstevel@tonic-gate	sllx	%g1, CPU_CORE_SHIFT, %g1
19377c478bd9Sstevel@tonic-gate	add	%g1, %g2, %g1
19387c478bd9Sstevel@tonic-gate	lduh	[%g1 + CPUC_DTRACE_FLAGS], %g2
19397c478bd9Sstevel@tonic-gate	andcc	%g2, CPU_DTRACE_NOFAULT, %g0
19407c478bd9Sstevel@tonic-gate	bz	0f
19417c478bd9Sstevel@tonic-gate	or	%g2, CPU_DTRACE_BADADDR, %g2
19427c478bd9Sstevel@tonic-gate	stuh	%g2, [%g1 + CPUC_DTRACE_FLAGS]
19437c478bd9Sstevel@tonic-gate	GET_MMU_D_ADDR(%g3, /*scratch*/ %g4)
19447c478bd9Sstevel@tonic-gate	stx	%g3, [%g1 + CPUC_DTRACE_ILLVAL]
19457c478bd9Sstevel@tonic-gate	done
19467c478bd9Sstevel@tonic-gate0:
19477c478bd9Sstevel@tonic-gate	TSTAT_CHECK_TL1(1f, %g1, %g2)
19487c478bd9Sstevel@tonic-gate1:
1949efaef81fSarao	SET_GL_REG(1)
19507c478bd9Sstevel@tonic-gate	USE_ALTERNATE_GLOBALS(%g5)
19517c478bd9Sstevel@tonic-gate	GET_MMU_D_TAGACC(%g2 /* tagacc */, %g4 /*scratch*/)
19527c478bd9Sstevel@tonic-gate	mov	T_DATA_MMU_MISS, %g3	/* arg2 = traptype */
19537c478bd9Sstevel@tonic-gate	/*
19547c478bd9Sstevel@tonic-gate	 * g2=tagacc g3.l=type g3.h=0
19557c478bd9Sstevel@tonic-gate	 */
19567c478bd9Sstevel@tonic-gate	sethi	%hi(trap), %g1
19577c478bd9Sstevel@tonic-gate	or	%g1, %lo(trap), %g1
19587c478bd9Sstevel@tonic-gate	ba,pt	%xcc, sys_trap
19597c478bd9Sstevel@tonic-gate	mov	-1, %g4
19607c478bd9Sstevel@tonic-gate	SET_SIZE(sfmmu_kpm_exception)
19617c478bd9Sstevel@tonic-gate
19627c478bd9Sstevel@tonic-gate#endif /* lint */
19637c478bd9Sstevel@tonic-gate
19647c478bd9Sstevel@tonic-gate#if defined (lint)
19657c478bd9Sstevel@tonic-gate
19667c478bd9Sstevel@tonic-gatevoid
19677c478bd9Sstevel@tonic-gatesfmmu_tsb_miss(void)
19687c478bd9Sstevel@tonic-gate{
19697c478bd9Sstevel@tonic-gate}
19707c478bd9Sstevel@tonic-gate
19717c478bd9Sstevel@tonic-gatevoid
19727c478bd9Sstevel@tonic-gatesfmmu_kpm_dtsb_miss(void)
19737c478bd9Sstevel@tonic-gate{
19747c478bd9Sstevel@tonic-gate}
19757c478bd9Sstevel@tonic-gate
19767c478bd9Sstevel@tonic-gatevoid
19777c478bd9Sstevel@tonic-gatesfmmu_kpm_dtsb_miss_small(void)
19787c478bd9Sstevel@tonic-gate{
19797c478bd9Sstevel@tonic-gate}
19807c478bd9Sstevel@tonic-gate
19817c478bd9Sstevel@tonic-gate#else /* lint */
19827c478bd9Sstevel@tonic-gate
19837c478bd9Sstevel@tonic-gate#if (IMAP_SEG != 0)
19847c478bd9Sstevel@tonic-gate#error - ism_map->ism_seg offset is not zero
19857c478bd9Sstevel@tonic-gate#endif
19867c478bd9Sstevel@tonic-gate
19877c478bd9Sstevel@tonic-gate/*
19887c478bd9Sstevel@tonic-gate * Copies ism mapping for this ctx in param "ism" if this is a ISM
19897c478bd9Sstevel@tonic-gate * tlb miss and branches to label "ismhit". If this is not an ISM
19907c478bd9Sstevel@tonic-gate * process or an ISM tlb miss it falls thru.
19917c478bd9Sstevel@tonic-gate *
19927c478bd9Sstevel@tonic-gate * Checks to see if the vaddr passed in via tagacc is in an ISM segment for
19937c478bd9Sstevel@tonic-gate * this process.
19947c478bd9Sstevel@tonic-gate * If so, it will branch to label "ismhit".  If not, it will fall through.
19957c478bd9Sstevel@tonic-gate *
19967c478bd9Sstevel@tonic-gate * Also hat_unshare() will set the context for this process to INVALID_CONTEXT
19977c478bd9Sstevel@tonic-gate * so that any other threads of this process will not try and walk the ism
19987c478bd9Sstevel@tonic-gate * maps while they are being changed.
19997c478bd9Sstevel@tonic-gate *
20007c478bd9Sstevel@tonic-gate * NOTE: We will never have any holes in our ISM maps. sfmmu_share/unshare
20017c478bd9Sstevel@tonic-gate *       will make sure of that. This means we can terminate our search on
20027c478bd9Sstevel@tonic-gate *       the first zero mapping we find.
20037c478bd9Sstevel@tonic-gate *
20047c478bd9Sstevel@tonic-gate * Parameters:
200560972f37Sjb145095 * tagacc	= (pseudo-)tag access register (vaddr + ctx) (in)
20067c478bd9Sstevel@tonic-gate * tsbmiss	= address of tsb miss area (in)
20077c478bd9Sstevel@tonic-gate * ismseg	= contents of ism_seg for this ism map (out)
20087c478bd9Sstevel@tonic-gate * ismhat	= physical address of imap_ismhat for this ism map (out)
20097c478bd9Sstevel@tonic-gate * tmp1		= scratch reg (CLOBBERED)
20107c478bd9Sstevel@tonic-gate * tmp2		= scratch reg (CLOBBERED)
20117c478bd9Sstevel@tonic-gate * tmp3		= scratch reg (CLOBBERED)
20127c478bd9Sstevel@tonic-gate * label:    temporary labels
20137c478bd9Sstevel@tonic-gate * ismhit:   label where to jump to if an ism dtlb miss
20147c478bd9Sstevel@tonic-gate * exitlabel:label where to jump if hat is busy due to hat_unshare.
20157c478bd9Sstevel@tonic-gate */
20167c478bd9Sstevel@tonic-gate#define ISM_CHECK(tagacc, tsbmiss, ismseg, ismhat, tmp1, tmp2, tmp3 \
20177c478bd9Sstevel@tonic-gate	label, ismhit)							\
20187c478bd9Sstevel@tonic-gate	ldx	[tsbmiss + TSBMISS_ISMBLKPA], tmp1 /* tmp1 = &ismblk */	;\
20197c478bd9Sstevel@tonic-gate	brlz,pt  tmp1, label/**/3		/* exit if -1 */	;\
20207c478bd9Sstevel@tonic-gate	  add	tmp1, IBLK_MAPS, ismhat	/* ismhat = &ismblk.map[0] */	;\
20217c478bd9Sstevel@tonic-gatelabel/**/1:								;\
20227c478bd9Sstevel@tonic-gate	ldxa	[ismhat]ASI_MEM, ismseg	/* ismblk.map[0].ism_seg */	;\
20237c478bd9Sstevel@tonic-gate	mov	tmp1, tmp3	/* update current ismblkpa head */	;\
20247c478bd9Sstevel@tonic-gatelabel/**/2:								;\
20257c478bd9Sstevel@tonic-gate	brz,pt  ismseg, label/**/3		/* no mapping */	;\
20267c478bd9Sstevel@tonic-gate	  add	ismhat, IMAP_VB_SHIFT, tmp1 /* tmp1 = vb_shift addr */	;\
202705d3dc4bSpaulsan	lduba	[tmp1]ASI_MEM, tmp1 		/* tmp1 = vb shift*/	;\
20287c478bd9Sstevel@tonic-gate	srlx	ismseg, tmp1, tmp2		/* tmp2 = vbase */	;\
20297c478bd9Sstevel@tonic-gate	srlx	tagacc, tmp1, tmp1		/* tmp1 =  va seg*/	;\
20307c478bd9Sstevel@tonic-gate	sub	tmp1, tmp2, tmp2		/* tmp2 = va - vbase */	;\
20317c478bd9Sstevel@tonic-gate	add	ismhat, IMAP_SZ_MASK, tmp1 /* tmp1 = sz_mask addr */	;\
20327c478bd9Sstevel@tonic-gate	lda	[tmp1]ASI_MEM, tmp1		/* tmp1 = sz_mask */	;\
20337c478bd9Sstevel@tonic-gate	and	ismseg, tmp1, tmp1		/* tmp1 = size */	;\
20347c478bd9Sstevel@tonic-gate	cmp	tmp2, tmp1		 	/* check va <= offset*/	;\
20357c478bd9Sstevel@tonic-gate	blu,a,pt  %xcc, ismhit			/* ism hit */		;\
20367c478bd9Sstevel@tonic-gate	  add	ismhat, IMAP_ISMHAT, ismhat 	/* ismhat = &ism_sfmmu*/ ;\
20377c478bd9Sstevel@tonic-gate									;\
20387c478bd9Sstevel@tonic-gate	add	ismhat, ISM_MAP_SZ, ismhat /* ismhat += sizeof(map) */ 	;\
20397c478bd9Sstevel@tonic-gate	add	tmp3, (IBLK_MAPS + ISM_MAP_SLOTS * ISM_MAP_SZ), tmp1	;\
20407c478bd9Sstevel@tonic-gate	cmp	ismhat, tmp1						;\
20417c478bd9Sstevel@tonic-gate	bl,pt	%xcc, label/**/2		/* keep looking  */	;\
20427c478bd9Sstevel@tonic-gate	  ldxa	[ismhat]ASI_MEM, ismseg	/* ismseg = map[ismhat] */	;\
20437c478bd9Sstevel@tonic-gate									;\
20447c478bd9Sstevel@tonic-gate	add	tmp3, IBLK_NEXTPA, tmp1					;\
20457c478bd9Sstevel@tonic-gate	ldxa	[tmp1]ASI_MEM, tmp1		/* check blk->nextpa */	;\
20467c478bd9Sstevel@tonic-gate	brgez,pt tmp1, label/**/1		/* continue if not -1*/	;\
20477c478bd9Sstevel@tonic-gate	  add	tmp1, IBLK_MAPS, ismhat	/* ismhat = &ismblk.map[0]*/	;\
20487c478bd9Sstevel@tonic-gatelabel/**/3:
20497c478bd9Sstevel@tonic-gate
20507c478bd9Sstevel@tonic-gate/*
20517c478bd9Sstevel@tonic-gate * Returns the hme hash bucket (hmebp) given the vaddr, and the hatid
20527c478bd9Sstevel@tonic-gate * It also returns the virtual pg for vaddr (ie. vaddr << hmeshift)
20537c478bd9Sstevel@tonic-gate * Parameters:
205460972f37Sjb145095 * tagacc = reg containing virtual address
20557c478bd9Sstevel@tonic-gate * hatid = reg containing sfmmu pointer
20567c478bd9Sstevel@tonic-gate * hmeshift = constant/register to shift vaddr to obtain vapg
20577c478bd9Sstevel@tonic-gate * hmebp = register where bucket pointer will be stored
20587c478bd9Sstevel@tonic-gate * vapg = register where virtual page will be stored
20597c478bd9Sstevel@tonic-gate * tmp1, tmp2 = tmp registers
20607c478bd9Sstevel@tonic-gate */
20617c478bd9Sstevel@tonic-gate
20627c478bd9Sstevel@tonic-gate
20637c478bd9Sstevel@tonic-gate#define	HMEHASH_FUNC_ASM(tagacc, hatid, tsbarea, hmeshift, hmebp,	\
20647c478bd9Sstevel@tonic-gate	vapg, label, tmp1, tmp2)					\
20657c478bd9Sstevel@tonic-gate	sllx	tagacc, TAGACC_CTX_LSHIFT, tmp1				;\
20667c478bd9Sstevel@tonic-gate	brnz,a,pt tmp1, label/**/1					;\
20677c478bd9Sstevel@tonic-gate	  ld    [tsbarea + TSBMISS_UHASHSZ], hmebp			;\
20687c478bd9Sstevel@tonic-gate	ld	[tsbarea + TSBMISS_KHASHSZ], hmebp			;\
20697c478bd9Sstevel@tonic-gate	ba,pt	%xcc, label/**/2					;\
20707c478bd9Sstevel@tonic-gate	  ldx	[tsbarea + TSBMISS_KHASHSTART], tmp1			;\
20717c478bd9Sstevel@tonic-gatelabel/**/1:								;\
20727c478bd9Sstevel@tonic-gate	ldx	[tsbarea + TSBMISS_UHASHSTART], tmp1			;\
20737c478bd9Sstevel@tonic-gatelabel/**/2:								;\
20747c478bd9Sstevel@tonic-gate	srlx	tagacc, hmeshift, vapg					;\
20757c478bd9Sstevel@tonic-gate	xor	vapg, hatid, tmp2	/* hatid ^ (vaddr >> shift) */	;\
20767c478bd9Sstevel@tonic-gate	and	tmp2, hmebp, hmebp	/* index into hme_hash */	;\
20777c478bd9Sstevel@tonic-gate	mulx	hmebp, HMEBUCK_SIZE, hmebp				;\
20787c478bd9Sstevel@tonic-gate	add	hmebp, tmp1, hmebp
20797c478bd9Sstevel@tonic-gate
20807c478bd9Sstevel@tonic-gate/*
20817c478bd9Sstevel@tonic-gate * hashtag includes bspage + hashno (64 bits).
20827c478bd9Sstevel@tonic-gate */
20837c478bd9Sstevel@tonic-gate
20847c478bd9Sstevel@tonic-gate#define	MAKE_HASHTAG(vapg, hatid, hmeshift, hashno, hblktag)		\
20857c478bd9Sstevel@tonic-gate	sllx	vapg, hmeshift, vapg					;\
208605d3dc4bSpaulsan	mov	hashno, hblktag						;\
208705d3dc4bSpaulsan	sllx	hblktag, HTAG_REHASH_SHIFT, hblktag			;\
208805d3dc4bSpaulsan	or	vapg, hblktag, hblktag
20897c478bd9Sstevel@tonic-gate
20907c478bd9Sstevel@tonic-gate/*
20917c478bd9Sstevel@tonic-gate * Function to traverse hmeblk hash link list and find corresponding match.
20927c478bd9Sstevel@tonic-gate * The search is done using physical pointers. It returns the physical address
20930a90a7fdSAmritpal Sandhu * pointer to the hmeblk that matches with the tag provided.
20947c478bd9Sstevel@tonic-gate * Parameters:
20957c478bd9Sstevel@tonic-gate * hmebp	= register that points to hme hash bucket, also used as
20967c478bd9Sstevel@tonic-gate *		  tmp reg (clobbered)
20977c478bd9Sstevel@tonic-gate * hmeblktag	= register with hmeblk tag match
20987c478bd9Sstevel@tonic-gate * hatid	= register with hatid
20997c478bd9Sstevel@tonic-gate * hmeblkpa	= register where physical ptr will be stored
21007c478bd9Sstevel@tonic-gate * tmp1		= tmp reg
21017c478bd9Sstevel@tonic-gate * label: temporary label
21027c478bd9Sstevel@tonic-gate */
21037c478bd9Sstevel@tonic-gate
21040a90a7fdSAmritpal Sandhu#define	HMEHASH_SEARCH(hmebp, hmeblktag, hatid, hmeblkpa, tsbarea, 	\
21050a90a7fdSAmritpal Sandhu	tmp1, label)							\
21067c478bd9Sstevel@tonic-gate	add     hmebp, HMEBUCK_NEXTPA, hmeblkpa				;\
21077c478bd9Sstevel@tonic-gate	ldxa    [hmeblkpa]ASI_MEM, hmeblkpa				;\
21087c478bd9Sstevel@tonic-gate	HAT_HSEARCH_DBSTAT(hatid, tsbarea, hmebp, tmp1)			;\
21097c478bd9Sstevel@tonic-gatelabel/**/1:								;\
21100a90a7fdSAmritpal Sandhu	cmp	hmeblkpa, HMEBLK_ENDPA					;\
21110a90a7fdSAmritpal Sandhu	be,pn   %xcc, label/**/2					;\
21127c478bd9Sstevel@tonic-gate	HAT_HLINK_DBSTAT(hatid, tsbarea, hmebp, tmp1)			;\
21137c478bd9Sstevel@tonic-gate	add	hmeblkpa, HMEBLK_TAG, hmebp				;\
21147c478bd9Sstevel@tonic-gate	ldxa	[hmebp]ASI_MEM, tmp1	 /* read 1st part of tag */	;\
21157c478bd9Sstevel@tonic-gate	add	hmebp, CLONGSIZE, hmebp					;\
21167c478bd9Sstevel@tonic-gate	ldxa	[hmebp]ASI_MEM, hmebp 	/* read 2nd part of tag */	;\
21177c478bd9Sstevel@tonic-gate	xor	tmp1, hmeblktag, tmp1					;\
21187c478bd9Sstevel@tonic-gate	xor	hmebp, hatid, hmebp					;\
21197c478bd9Sstevel@tonic-gate	or	hmebp, tmp1, hmebp					;\
21207c478bd9Sstevel@tonic-gate	brz,pn	hmebp, label/**/2	/* branch on hit */		;\
21217c478bd9Sstevel@tonic-gate	  add	hmeblkpa, HMEBLK_NEXTPA, hmebp				;\
21227c478bd9Sstevel@tonic-gate	ba,pt	%xcc, label/**/1					;\
21237c478bd9Sstevel@tonic-gate	  ldxa	[hmebp]ASI_MEM, hmeblkpa	/* hmeblk ptr pa */	;\
21247c478bd9Sstevel@tonic-gatelabel/**/2:
21257c478bd9Sstevel@tonic-gate
212605d3dc4bSpaulsan/*
212705d3dc4bSpaulsan * Function to traverse hmeblk hash link list and find corresponding match.
212805d3dc4bSpaulsan * The search is done using physical pointers. It returns the physical address
21290a90a7fdSAmritpal Sandhu * pointer to the hmeblk that matches with the tag
213005d3dc4bSpaulsan * provided.
213105d3dc4bSpaulsan * Parameters:
213205d3dc4bSpaulsan * hmeblktag	= register with hmeblk tag match (rid field is 0)
213305d3dc4bSpaulsan * hatid	= register with hatid (pointer to SRD)
213405d3dc4bSpaulsan * hmeblkpa	= register where physical ptr will be stored
213505d3dc4bSpaulsan * tmp1		= tmp reg
213605d3dc4bSpaulsan * tmp2		= tmp reg
213705d3dc4bSpaulsan * label: temporary label
213805d3dc4bSpaulsan */
213905d3dc4bSpaulsan
21400a90a7fdSAmritpal Sandhu#define	HMEHASH_SEARCH_SHME(hmeblktag, hatid, hmeblkpa, tsbarea,	\
21410a90a7fdSAmritpal Sandhu	tmp1, tmp2, label)			 			\
214205d3dc4bSpaulsanlabel/**/1:								;\
21430a90a7fdSAmritpal Sandhu	cmp	hmeblkpa, HMEBLK_ENDPA					;\
21440a90a7fdSAmritpal Sandhu	be,pn   %xcc, label/**/4					;\
214505d3dc4bSpaulsan	HAT_HLINK_DBSTAT(hatid, tsbarea, tmp1, tmp2)			;\
214605d3dc4bSpaulsan	add	hmeblkpa, HMEBLK_TAG, tmp2				;\
214705d3dc4bSpaulsan	ldxa	[tmp2]ASI_MEM, tmp1	 /* read 1st part of tag */	;\
214805d3dc4bSpaulsan	add	tmp2, CLONGSIZE, tmp2					;\
214905d3dc4bSpaulsan	ldxa	[tmp2]ASI_MEM, tmp2 	/* read 2nd part of tag */	;\
215005d3dc4bSpaulsan	xor	tmp1, hmeblktag, tmp1					;\
215105d3dc4bSpaulsan	xor	tmp2, hatid, tmp2					;\
215205d3dc4bSpaulsan	brz,pn	tmp2, label/**/3	/* branch on hit */		;\
215305d3dc4bSpaulsan	  add	hmeblkpa, HMEBLK_NEXTPA, tmp2				;\
21540a90a7fdSAmritpal Sandhulabel/**/2:								;\
215505d3dc4bSpaulsan	ba,pt	%xcc, label/**/1					;\
215605d3dc4bSpaulsan	  ldxa	[tmp2]ASI_MEM, hmeblkpa	/* hmeblk ptr pa */		;\
215705d3dc4bSpaulsanlabel/**/3:								;\
215805d3dc4bSpaulsan	cmp	tmp1, SFMMU_MAX_HME_REGIONS				;\
215905d3dc4bSpaulsan	bgeu,pt	%xcc, label/**/2					;\
21600a90a7fdSAmritpal Sandhu	  add	hmeblkpa, HMEBLK_NEXTPA, tmp2				;\
216105d3dc4bSpaulsan	and	tmp1, BT_ULMASK, tmp2					;\
216205d3dc4bSpaulsan	srlx	tmp1, BT_ULSHIFT, tmp1					;\
216305d3dc4bSpaulsan	sllx	tmp1, CLONGSHIFT, tmp1					;\
216405d3dc4bSpaulsan	add	tsbarea, tmp1, tmp1					;\
216505d3dc4bSpaulsan	ldx	[tmp1 + TSBMISS_SHMERMAP], tmp1				;\
216605d3dc4bSpaulsan	srlx	tmp1, tmp2, tmp1					;\
216705d3dc4bSpaulsan	btst	0x1, tmp1						;\
216805d3dc4bSpaulsan	bz,pn	%xcc, label/**/2					;\
21690a90a7fdSAmritpal Sandhu	  add	hmeblkpa, HMEBLK_NEXTPA, tmp2				;\
217005d3dc4bSpaulsanlabel/**/4:
21717c478bd9Sstevel@tonic-gate
21727c478bd9Sstevel@tonic-gate#if ((1 << SFHME_SHIFT) != SFHME_SIZE)
21737c478bd9Sstevel@tonic-gate#error HMEBLK_TO_HMENT assumes sf_hment is power of 2 in size
21747c478bd9Sstevel@tonic-gate#endif
21757c478bd9Sstevel@tonic-gate
21767c478bd9Sstevel@tonic-gate/*
21777c478bd9Sstevel@tonic-gate * HMEBLK_TO_HMENT is a macro that given an hmeblk and a vaddr returns
21787c478bd9Sstevel@tonic-gate * he offset for the corresponding hment.
21797c478bd9Sstevel@tonic-gate * Parameters:
218005d3dc4bSpaulsan * In:
21817c478bd9Sstevel@tonic-gate *	vaddr = register with virtual address
21827c478bd9Sstevel@tonic-gate *	hmeblkpa = physical pointer to hme_blk
218305d3dc4bSpaulsan * Out:
21847c478bd9Sstevel@tonic-gate *	hmentoff = register where hment offset will be stored
218505d3dc4bSpaulsan *	hmemisc = hblk_misc
218605d3dc4bSpaulsan * Scratch:
218705d3dc4bSpaulsan *	tmp1
21887c478bd9Sstevel@tonic-gate */
218905d3dc4bSpaulsan#define	HMEBLK_TO_HMENT(vaddr, hmeblkpa, hmentoff, hmemisc, tmp1, label1)\
21907c478bd9Sstevel@tonic-gate	add	hmeblkpa, HMEBLK_MISC, hmentoff				;\
219105d3dc4bSpaulsan	lda	[hmentoff]ASI_MEM, hmemisc 				;\
219205d3dc4bSpaulsan	andcc	hmemisc, HBLK_SZMASK, %g0				;\
21937c478bd9Sstevel@tonic-gate	bnz,a,pn  %icc, label1		/* if sz != TTE8K branch */	;\
21947c478bd9Sstevel@tonic-gate	  or	%g0, HMEBLK_HME1, hmentoff				;\
21957c478bd9Sstevel@tonic-gate	srl	vaddr, MMU_PAGESHIFT, tmp1				;\
21967c478bd9Sstevel@tonic-gate	and	tmp1, NHMENTS - 1, tmp1		/* tmp1 = index */	;\
21977c478bd9Sstevel@tonic-gate	sllx	tmp1, SFHME_SHIFT, tmp1					;\
21987c478bd9Sstevel@tonic-gate	add	tmp1, HMEBLK_HME1, hmentoff				;\
21997c478bd9Sstevel@tonic-gatelabel1:
22007c478bd9Sstevel@tonic-gate
22017c478bd9Sstevel@tonic-gate/*
22027c478bd9Sstevel@tonic-gate * GET_TTE is a macro that returns a TTE given a tag and hatid.
22037c478bd9Sstevel@tonic-gate *
220460972f37Sjb145095 * tagacc	= (pseudo-)tag access register (in)
22057c478bd9Sstevel@tonic-gate * hatid	= sfmmu pointer for TSB miss (in)
22067c478bd9Sstevel@tonic-gate * tte		= tte for TLB miss if found, otherwise clobbered (out)
22077c478bd9Sstevel@tonic-gate * hmeblkpa	= PA of hment if found, otherwise clobbered (out)
22087c478bd9Sstevel@tonic-gate * tsbarea	= pointer to the tsbmiss area for this cpu. (in)
220905d3dc4bSpaulsan * hmemisc	= hblk_misc if TTE is found (out), otherwise clobbered
22107c478bd9Sstevel@tonic-gate * hmeshift	= constant/register to shift VA to obtain the virtual pfn
22117c478bd9Sstevel@tonic-gate *		  for this page size.
22127c478bd9Sstevel@tonic-gate * hashno	= constant/register hash number
22130a90a7fdSAmritpal Sandhu * tmp		= temp value - clobbered
22147c478bd9Sstevel@tonic-gate * label	= temporary label for branching within macro.
22157c478bd9Sstevel@tonic-gate * foundlabel	= label to jump to when tte is found.
22167c478bd9Sstevel@tonic-gate * suspendlabel= label to jump to when tte is suspended.
221705d3dc4bSpaulsan * exitlabel	= label to jump to when tte is not found.
22187c478bd9Sstevel@tonic-gate *
22197c478bd9Sstevel@tonic-gate */
22200a90a7fdSAmritpal Sandhu#define GET_TTE(tagacc, hatid, tte, hmeblkpa, tsbarea, hmemisc, hmeshift, \
22210a90a7fdSAmritpal Sandhu		 hashno, tmp, label, foundlabel, suspendlabel, exitlabel) \
22227c478bd9Sstevel@tonic-gate									;\
22237c478bd9Sstevel@tonic-gate	stn	tagacc, [tsbarea + (TSBMISS_SCRATCH + TSB_TAGACC)]	;\
22247c478bd9Sstevel@tonic-gate	stn	hatid, [tsbarea + (TSBMISS_SCRATCH + TSBMISS_HATID)]	;\
22257c478bd9Sstevel@tonic-gate	HMEHASH_FUNC_ASM(tagacc, hatid, tsbarea, hmeshift, tte,		\
22260a90a7fdSAmritpal Sandhu		hmeblkpa, label/**/5, hmemisc, tmp)			;\
22277c478bd9Sstevel@tonic-gate									;\
22287c478bd9Sstevel@tonic-gate	/*								;\
22297c478bd9Sstevel@tonic-gate	 * tagacc = tagacc						;\
22307c478bd9Sstevel@tonic-gate	 * hatid = hatid						;\
22317c478bd9Sstevel@tonic-gate	 * tsbarea = tsbarea						;\
22327c478bd9Sstevel@tonic-gate	 * tte   = hmebp (hme bucket pointer)				;\
22337c478bd9Sstevel@tonic-gate	 * hmeblkpa  = vapg  (virtual page)				;\
22340a90a7fdSAmritpal Sandhu	 * hmemisc, tmp = scratch					;\
22357c478bd9Sstevel@tonic-gate	 */								;\
223605d3dc4bSpaulsan	MAKE_HASHTAG(hmeblkpa, hatid, hmeshift, hashno, hmemisc)	;\
223705d3dc4bSpaulsan	or	hmemisc, SFMMU_INVALID_SHMERID, hmemisc			;\
22387c478bd9Sstevel@tonic-gate									;\
22397c478bd9Sstevel@tonic-gate	/*								;\
22407c478bd9Sstevel@tonic-gate	 * tagacc = tagacc						;\
22417c478bd9Sstevel@tonic-gate	 * hatid = hatid						;\
22427c478bd9Sstevel@tonic-gate	 * tte   = hmebp						;\
22437c478bd9Sstevel@tonic-gate	 * hmeblkpa  = CLOBBERED					;\
224405d3dc4bSpaulsan	 * hmemisc  = htag_bspage+hashno+invalid_rid			;\
22450a90a7fdSAmritpal Sandhu	 * tmp  = scratch						;\
22467c478bd9Sstevel@tonic-gate	 */								;\
22477c478bd9Sstevel@tonic-gate	stn	tte, [tsbarea + (TSBMISS_SCRATCH + TSBMISS_HMEBP)]	;\
22480a90a7fdSAmritpal Sandhu	HMEHASH_SEARCH(tte, hmemisc, hatid, hmeblkpa, 	 		\
22497c478bd9Sstevel@tonic-gate		tsbarea, tagacc, label/**/1)				;\
22507c478bd9Sstevel@tonic-gate	/*								;\
22517c478bd9Sstevel@tonic-gate	 * tagacc = CLOBBERED						;\
22527c478bd9Sstevel@tonic-gate	 * tte = CLOBBERED						;\
22537c478bd9Sstevel@tonic-gate	 * hmeblkpa = hmeblkpa						;\
22540a90a7fdSAmritpal Sandhu	 * tmp = scratch						;\
22557c478bd9Sstevel@tonic-gate	 */								;\
22560a90a7fdSAmritpal Sandhu	cmp	hmeblkpa, HMEBLK_ENDPA					;\
22570a90a7fdSAmritpal Sandhu	bne,pn   %xcc, label/**/4       /* branch if hmeblk found */    ;\
22587c478bd9Sstevel@tonic-gate	  ldn	[tsbarea + (TSBMISS_SCRATCH + TSB_TAGACC)], tagacc	;\
22597c478bd9Sstevel@tonic-gate	ba,pt	%xcc, exitlabel		/* exit if hblk not found */	;\
22607c478bd9Sstevel@tonic-gate	  nop								;\
22617c478bd9Sstevel@tonic-gatelabel/**/4:								;\
22627c478bd9Sstevel@tonic-gate	/*								;\
22637c478bd9Sstevel@tonic-gate	 * We have found the hmeblk containing the hment.		;\
22647c478bd9Sstevel@tonic-gate	 * Now we calculate the corresponding tte.			;\
22657c478bd9Sstevel@tonic-gate	 *								;\
22667c478bd9Sstevel@tonic-gate	 * tagacc = tagacc						;\
226705d3dc4bSpaulsan	 * hatid = hatid						;\
226805d3dc4bSpaulsan	 * tte   = clobbered						;\
22697c478bd9Sstevel@tonic-gate	 * hmeblkpa  = hmeblkpa						;\
227005d3dc4bSpaulsan	 * hmemisc  = hblktag						;\
22710a90a7fdSAmritpal Sandhu	 * tmp = scratch						;\
22727c478bd9Sstevel@tonic-gate	 */								;\
227305d3dc4bSpaulsan	HMEBLK_TO_HMENT(tagacc, hmeblkpa, hatid, hmemisc, tte,		\
227405d3dc4bSpaulsan		label/**/2)						;\
22757c478bd9Sstevel@tonic-gate									;\
227605d3dc4bSpaulsan	/*								;\
227705d3dc4bSpaulsan	 * tagacc = tagacc						;\
227805d3dc4bSpaulsan	 * hatid = hmentoff						;\
227905d3dc4bSpaulsan	 * tte   = clobbered						;\
228005d3dc4bSpaulsan	 * hmeblkpa  = hmeblkpa						;\
228105d3dc4bSpaulsan	 * hmemisc  = hblk_misc						;\
22820a90a7fdSAmritpal Sandhu	 * tmp = scratch						;\
228305d3dc4bSpaulsan	 */								;\
228405d3dc4bSpaulsan									;\
228505d3dc4bSpaulsan	add	hatid, SFHME_TTE, hatid					;\
228605d3dc4bSpaulsan	add	hmeblkpa, hatid, hmeblkpa				;\
22877c478bd9Sstevel@tonic-gate	ldxa	[hmeblkpa]ASI_MEM, tte	/* MMU_READTTE through pa */	;\
228805d3dc4bSpaulsan	ldn	[tsbarea + (TSBMISS_SCRATCH + TSBMISS_HMEBP)], hatid 	;\
228905d3dc4bSpaulsan	set	TTE_SUSPEND, hatid					;\
229005d3dc4bSpaulsan	TTE_SUSPEND_INT_SHIFT(hatid)					;\
229105d3dc4bSpaulsan	btst	tte, hatid						;\
22927c478bd9Sstevel@tonic-gate	bz,pt	%xcc, foundlabel					;\
229305d3dc4bSpaulsan	ldn	[tsbarea + (TSBMISS_SCRATCH + TSBMISS_HATID)], hatid	;\
229405d3dc4bSpaulsan									;\
229505d3dc4bSpaulsan	/*								;\
229605d3dc4bSpaulsan	 * Mapping is suspended, so goto suspend label.			;\
229705d3dc4bSpaulsan	 */								;\
229805d3dc4bSpaulsan	ba,pt	%xcc, suspendlabel					;\
229905d3dc4bSpaulsan	  nop
230005d3dc4bSpaulsan
230105d3dc4bSpaulsan/*
230205d3dc4bSpaulsan * GET_SHME_TTE is similar to GET_TTE() except it searches
230305d3dc4bSpaulsan * shared hmeblks via HMEHASH_SEARCH_SHME() macro.
230405d3dc4bSpaulsan * If valid tte is found, hmemisc = shctx flag, i.e., shme is
230505d3dc4bSpaulsan * either 0 (not part of scd) or 1 (part of scd).
230605d3dc4bSpaulsan */
23070a90a7fdSAmritpal Sandhu#define GET_SHME_TTE(tagacc, hatid, tte, hmeblkpa, tsbarea, hmemisc, 	\
23080a90a7fdSAmritpal Sandhu		hmeshift, hashno, tmp, label, foundlabel,		\
230905d3dc4bSpaulsan		suspendlabel, exitlabel)				\
231005d3dc4bSpaulsan									;\
231105d3dc4bSpaulsan	stn	tagacc, [tsbarea + (TSBMISS_SCRATCH + TSB_TAGACC)]	;\
231205d3dc4bSpaulsan	stn	hatid, [tsbarea + (TSBMISS_SCRATCH + TSBMISS_HATID)]	;\
231305d3dc4bSpaulsan	HMEHASH_FUNC_ASM(tagacc, hatid, tsbarea, hmeshift, tte,		\
23140a90a7fdSAmritpal Sandhu		hmeblkpa, label/**/5, hmemisc, tmp)			;\
231505d3dc4bSpaulsan									;\
231605d3dc4bSpaulsan	/*								;\
231705d3dc4bSpaulsan	 * tagacc = tagacc						;\
231805d3dc4bSpaulsan	 * hatid = hatid						;\
231905d3dc4bSpaulsan	 * tsbarea = tsbarea						;\
232005d3dc4bSpaulsan	 * tte   = hmebp (hme bucket pointer)				;\
232105d3dc4bSpaulsan	 * hmeblkpa  = vapg  (virtual page)				;\
23220a90a7fdSAmritpal Sandhu	 * hmemisc, tmp = scratch					;\
232305d3dc4bSpaulsan	 */								;\
232405d3dc4bSpaulsan	MAKE_HASHTAG(hmeblkpa, hatid, hmeshift, hashno, hmemisc)	;\
232505d3dc4bSpaulsan									;\
232605d3dc4bSpaulsan	/*								;\
232705d3dc4bSpaulsan	 * tagacc = tagacc						;\
232805d3dc4bSpaulsan	 * hatid = hatid						;\
232905d3dc4bSpaulsan	 * tsbarea = tsbarea						;\
233005d3dc4bSpaulsan	 * tte   = hmebp						;\
233105d3dc4bSpaulsan	 * hmemisc  = htag_bspage + hashno + 0 (for rid)		;\
233205d3dc4bSpaulsan	 * hmeblkpa  = CLOBBERED					;\
23330a90a7fdSAmritpal Sandhu	 * tmp = scratch						;\
233405d3dc4bSpaulsan	 */								;\
233505d3dc4bSpaulsan	stn	tte, [tsbarea + (TSBMISS_SCRATCH + TSBMISS_HMEBP)]	;\
233605d3dc4bSpaulsan									;\
233705d3dc4bSpaulsan	add     tte, HMEBUCK_NEXTPA, hmeblkpa				;\
233805d3dc4bSpaulsan	ldxa    [hmeblkpa]ASI_MEM, hmeblkpa				;\
233905d3dc4bSpaulsan	HAT_HSEARCH_DBSTAT(hatid, tsbarea, tagacc, tte)			;\
234005d3dc4bSpaulsan									;\
234105d3dc4bSpaulsanlabel/**/8:								;\
23420a90a7fdSAmritpal Sandhu	HMEHASH_SEARCH_SHME(hmemisc, hatid, hmeblkpa,			\
234305d3dc4bSpaulsan		tsbarea, tagacc, tte, label/**/1)			;\
234405d3dc4bSpaulsan	/*								;\
234505d3dc4bSpaulsan	 * tagacc = CLOBBERED						;\
234605d3dc4bSpaulsan	 * tte = CLOBBERED						;\
234705d3dc4bSpaulsan	 * hmeblkpa = hmeblkpa						;\
23480a90a7fdSAmritpal Sandhu	 * tmp = scratch						;\
234905d3dc4bSpaulsan	 */								;\
23500a90a7fdSAmritpal Sandhu	cmp	hmeblkpa, HMEBLK_ENDPA					;\
23510a90a7fdSAmritpal Sandhu	bne,pn   %xcc, label/**/4       /* branch if hmeblk found */    ;\
235205d3dc4bSpaulsan	  ldn	[tsbarea + (TSBMISS_SCRATCH + TSB_TAGACC)], tagacc	;\
235305d3dc4bSpaulsan	ba,pt	%xcc, exitlabel		/* exit if hblk not found */	;\
23547c478bd9Sstevel@tonic-gate	  nop								;\
235505d3dc4bSpaulsanlabel/**/4:								;\
235605d3dc4bSpaulsan	/*								;\
235705d3dc4bSpaulsan	 * We have found the hmeblk containing the hment.		;\
235805d3dc4bSpaulsan	 * Now we calculate the corresponding tte.			;\
235905d3dc4bSpaulsan	 *								;\
236005d3dc4bSpaulsan	 * tagacc = tagacc						;\
236105d3dc4bSpaulsan	 * hatid = hatid						;\
236205d3dc4bSpaulsan	 * tte   = clobbered						;\
236305d3dc4bSpaulsan	 * hmeblkpa  = hmeblkpa						;\
236405d3dc4bSpaulsan	 * hmemisc  = hblktag						;\
236505d3dc4bSpaulsan	 * tsbarea = tsbmiss area					;\
23660a90a7fdSAmritpal Sandhu	 * tmp = scratch						;\
236705d3dc4bSpaulsan	 */								;\
236805d3dc4bSpaulsan	HMEBLK_TO_HMENT(tagacc, hmeblkpa, hatid, hmemisc, tte,		\
236905d3dc4bSpaulsan		label/**/2)						;\
237005d3dc4bSpaulsan									;\
237105d3dc4bSpaulsan	/*								;\
237205d3dc4bSpaulsan	 * tagacc = tagacc						;\
237305d3dc4bSpaulsan	 * hatid = hmentoff						;\
237405d3dc4bSpaulsan	 * tte = clobbered						;\
237505d3dc4bSpaulsan	 * hmeblkpa  = hmeblkpa						;\
237605d3dc4bSpaulsan	 * hmemisc  = hblk_misc						;\
237705d3dc4bSpaulsan	 * tsbarea = tsbmiss area					;\
23780a90a7fdSAmritpal Sandhu	 * tmp = scratch						;\
237905d3dc4bSpaulsan	 */								;\
238005d3dc4bSpaulsan									;\
238105d3dc4bSpaulsan	add	hatid, SFHME_TTE, hatid					;\
238205d3dc4bSpaulsan	add	hmeblkpa, hatid, hmeblkpa				;\
238305d3dc4bSpaulsan	ldxa	[hmeblkpa]ASI_MEM, tte	/* MMU_READTTE through pa */	;\
238405d3dc4bSpaulsan	brlz,pt tte, label/**/6						;\
23850a90a7fdSAmritpal Sandhu	  nop								;\
238605d3dc4bSpaulsan	btst	HBLK_SZMASK, hmemisc					;\
238705d3dc4bSpaulsan	bnz,a,pt %icc, label/**/7					;\
238805d3dc4bSpaulsan	  ldn	[tsbarea + (TSBMISS_SCRATCH + TSBMISS_HMEBP)], hatid 	;\
238905d3dc4bSpaulsan									;\
239005d3dc4bSpaulsan	/*								;\
239105d3dc4bSpaulsan 	 * We found an invalid 8K tte in shme.				;\
239205d3dc4bSpaulsan	 * it may not belong to shme's region since			;\
239305d3dc4bSpaulsan	 * region size/alignment granularity is 8K but different	;\
239405d3dc4bSpaulsan	 * regions don't share hmeblks. Continue the search.		;\
239505d3dc4bSpaulsan	 */								;\
239605d3dc4bSpaulsan	sub	hmeblkpa, hatid, hmeblkpa				;\
239705d3dc4bSpaulsan	ldn	[tsbarea + (TSBMISS_SCRATCH + TSBMISS_HATID)], hatid	;\
239805d3dc4bSpaulsan	srlx	tagacc, hmeshift, tte					;\
239905d3dc4bSpaulsan	add	hmeblkpa, HMEBLK_NEXTPA, hmeblkpa			;\
240005d3dc4bSpaulsan	ldxa	[hmeblkpa]ASI_MEM, hmeblkpa				;\
240105d3dc4bSpaulsan	MAKE_HASHTAG(tte, hatid, hmeshift, hashno, hmemisc)		;\
240205d3dc4bSpaulsan	ba,a,pt	%xcc, label/**/8					;\
240305d3dc4bSpaulsanlabel/**/6:								;\
240405d3dc4bSpaulsan	GET_SCDSHMERMAP(tsbarea, hmeblkpa, hatid, hmemisc)		;\
240505d3dc4bSpaulsan	ldn	[tsbarea + (TSBMISS_SCRATCH + TSBMISS_HMEBP)], hatid 	;\
240605d3dc4bSpaulsanlabel/**/7:								;\
240705d3dc4bSpaulsan	set	TTE_SUSPEND, hatid					;\
240805d3dc4bSpaulsan	TTE_SUSPEND_INT_SHIFT(hatid)					;\
240905d3dc4bSpaulsan	btst	tte, hatid						;\
241005d3dc4bSpaulsan	bz,pt	%xcc, foundlabel					;\
241105d3dc4bSpaulsan	ldn	[tsbarea + (TSBMISS_SCRATCH + TSBMISS_HATID)], hatid	;\
24127c478bd9Sstevel@tonic-gate									;\
24137c478bd9Sstevel@tonic-gate	/*								;\
24147c478bd9Sstevel@tonic-gate	 * Mapping is suspended, so goto suspend label.			;\
24157c478bd9Sstevel@tonic-gate	 */								;\
24167c478bd9Sstevel@tonic-gate	ba,pt	%xcc, suspendlabel					;\
24177c478bd9Sstevel@tonic-gate	  nop
24187c478bd9Sstevel@tonic-gate
24197c478bd9Sstevel@tonic-gate	/*
24207c478bd9Sstevel@tonic-gate	 * KERNEL PROTECTION HANDLER
24217c478bd9Sstevel@tonic-gate	 *
24227c478bd9Sstevel@tonic-gate	 * g1 = tsb8k pointer register (clobbered)
24237c478bd9Sstevel@tonic-gate	 * g2 = tag access register (ro)
24247c478bd9Sstevel@tonic-gate	 * g3 - g7 = scratch registers
24257c478bd9Sstevel@tonic-gate	 *
24267c478bd9Sstevel@tonic-gate	 * Note: This function is patched at runtime for performance reasons.
24277c478bd9Sstevel@tonic-gate	 * 	 Any changes here require sfmmu_patch_ktsb fixed.
24287c478bd9Sstevel@tonic-gate	 */
24297c478bd9Sstevel@tonic-gate	ENTRY_NP(sfmmu_kprot_trap)
24307c478bd9Sstevel@tonic-gate	mov	%g2, %g7		! TSB pointer macro clobbers tagacc
24317c478bd9Sstevel@tonic-gatesfmmu_kprot_patch_ktsb_base:
24327c478bd9Sstevel@tonic-gate	RUNTIME_PATCH_SETX(%g1, %g6)
24337c478bd9Sstevel@tonic-gate	/* %g1 = contents of ktsb_base or ktsb_pbase */
24347c478bd9Sstevel@tonic-gatesfmmu_kprot_patch_ktsb_szcode:
24357c478bd9Sstevel@tonic-gate	or	%g0, RUNTIME_PATCH, %g3	! ktsb_szcode (hot patched)
24367c478bd9Sstevel@tonic-gate
24377c478bd9Sstevel@tonic-gate	GET_TSBE_POINTER(MMU_PAGESHIFT, %g1, %g7, %g3, %g5)
24387c478bd9Sstevel@tonic-gate	! %g1 = First TSB entry pointer, as TSB miss handler expects
24397c478bd9Sstevel@tonic-gate
24407c478bd9Sstevel@tonic-gate	mov	%g2, %g7		! TSB pointer macro clobbers tagacc
24417c478bd9Sstevel@tonic-gatesfmmu_kprot_patch_ktsb4m_base:
24427c478bd9Sstevel@tonic-gate	RUNTIME_PATCH_SETX(%g3, %g6)
24437c478bd9Sstevel@tonic-gate	/* %g3 = contents of ktsb4m_base or ktsb4m_pbase */
24447c478bd9Sstevel@tonic-gatesfmmu_kprot_patch_ktsb4m_szcode:
24457c478bd9Sstevel@tonic-gate	or	%g0, RUNTIME_PATCH, %g6	! ktsb4m_szcode (hot patched)
24467c478bd9Sstevel@tonic-gate
24477c478bd9Sstevel@tonic-gate	GET_TSBE_POINTER(MMU_PAGESHIFT4M, %g3, %g7, %g6, %g5)
24487c478bd9Sstevel@tonic-gate	! %g3 = 4M tsb entry pointer, as TSB miss handler expects
24497c478bd9Sstevel@tonic-gate
24507c478bd9Sstevel@tonic-gate        CPU_TSBMISS_AREA(%g6, %g7)
24517c478bd9Sstevel@tonic-gate        HAT_PERCPU_STAT16(%g6, TSBMISS_KPROTS, %g7)
24527c478bd9Sstevel@tonic-gate	ba,pt	%xcc, sfmmu_tsb_miss_tt
24537c478bd9Sstevel@tonic-gate	  nop
24547c478bd9Sstevel@tonic-gate
24557c478bd9Sstevel@tonic-gate	/*
24567c478bd9Sstevel@tonic-gate	 * USER PROTECTION HANDLER
24577c478bd9Sstevel@tonic-gate	 *
24587c478bd9Sstevel@tonic-gate	 * g1 = tsb8k pointer register (ro)
24597c478bd9Sstevel@tonic-gate	 * g2 = tag access register (ro)
24607c478bd9Sstevel@tonic-gate	 * g3 = faulting context (clobbered, currently not used)
24617c478bd9Sstevel@tonic-gate	 * g4 - g7 = scratch registers
24627c478bd9Sstevel@tonic-gate	 */
24637c478bd9Sstevel@tonic-gate	ALTENTRY(sfmmu_uprot_trap)
24647c478bd9Sstevel@tonic-gate#ifdef sun4v
24657c478bd9Sstevel@tonic-gate	GET_1ST_TSBE_PTR(%g2, %g1, %g4, %g5)
24667c478bd9Sstevel@tonic-gate	/* %g1 = first TSB entry ptr now, %g2 preserved */
24677c478bd9Sstevel@tonic-gate
24687c478bd9Sstevel@tonic-gate	GET_UTSBREG(SCRATCHPAD_UTSBREG2, %g3)	/* get 2nd utsbreg */
24697c478bd9Sstevel@tonic-gate	brlz,pt %g3, 9f				/* check for 2nd TSB */
247005d3dc4bSpaulsan	  nop
24717c478bd9Sstevel@tonic-gate
24727c478bd9Sstevel@tonic-gate	GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5)
24737c478bd9Sstevel@tonic-gate	/* %g3 = second TSB entry ptr now, %g2 preserved */
24747c478bd9Sstevel@tonic-gate
24757c478bd9Sstevel@tonic-gate#else /* sun4v */
247625cf1a30Sjl139090#ifdef UTSB_PHYS
247725cf1a30Sjl139090	/* g1 = first TSB entry ptr */
24781426d65aSsm142603	GET_UTSBREG(SCRATCHPAD_UTSBREG2, %g3)
247905d3dc4bSpaulsan	brlz,pt %g3, 9f			/* check for 2nd TSB */
248005d3dc4bSpaulsan	  nop
24817c478bd9Sstevel@tonic-gate
248225cf1a30Sjl139090	GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5)
248325cf1a30Sjl139090	/* %g3 = second TSB entry ptr now, %g2 preserved */
248425cf1a30Sjl139090#else /* UTSB_PHYS */
24857c478bd9Sstevel@tonic-gate	brgez,pt %g1, 9f		/* check for 2nd TSB */
248605d3dc4bSpaulsan	  mov	-1, %g3			/* set second tsbe ptr to -1 */
24877c478bd9Sstevel@tonic-gate
24887c478bd9Sstevel@tonic-gate	mov	%g2, %g7
24897c478bd9Sstevel@tonic-gate	GET_2ND_TSBE_PTR(%g7, %g1, %g3, %g4, %g5, sfmmu_uprot)
24907c478bd9Sstevel@tonic-gate	/* %g3 = second TSB entry ptr now, %g7 clobbered */
24917c478bd9Sstevel@tonic-gate	mov	%g1, %g7
24927c478bd9Sstevel@tonic-gate	GET_1ST_TSBE_PTR(%g7, %g1, %g5, sfmmu_uprot)
249325cf1a30Sjl139090#endif /* UTSB_PHYS */
24947c478bd9Sstevel@tonic-gate#endif /* sun4v */
24957c478bd9Sstevel@tonic-gate9:
24967c478bd9Sstevel@tonic-gate	CPU_TSBMISS_AREA(%g6, %g7)
24977c478bd9Sstevel@tonic-gate	HAT_PERCPU_STAT16(%g6, TSBMISS_UPROTS, %g7)
24987c478bd9Sstevel@tonic-gate	ba,pt	%xcc, sfmmu_tsb_miss_tt		/* branch TSB miss handler */
24997c478bd9Sstevel@tonic-gate	  nop
25007c478bd9Sstevel@tonic-gate
25017c478bd9Sstevel@tonic-gate	/*
25027c478bd9Sstevel@tonic-gate	 * Kernel 8K page iTLB miss.  We also get here if we took a
25037c478bd9Sstevel@tonic-gate	 * fast instruction access mmu miss trap while running in
25047c478bd9Sstevel@tonic-gate	 * invalid context.
25057c478bd9Sstevel@tonic-gate	 *
25067c478bd9Sstevel@tonic-gate	 * %g1 = 8K TSB pointer register (not used, clobbered)
25077c478bd9Sstevel@tonic-gate	 * %g2 = tag access register (used)
25087c478bd9Sstevel@tonic-gate	 * %g3 = faulting context id (used)
250905d3dc4bSpaulsan	 * %g7 = TSB tag to match (used)
25107c478bd9Sstevel@tonic-gate	 */
25117c478bd9Sstevel@tonic-gate	.align	64
25127c478bd9Sstevel@tonic-gate	ALTENTRY(sfmmu_kitlb_miss)
25137c478bd9Sstevel@tonic-gate	brnz,pn %g3, tsb_tl0_noctxt
25147c478bd9Sstevel@tonic-gate	  nop
25157c478bd9Sstevel@tonic-gate
25167c478bd9Sstevel@tonic-gate	/* kernel miss */
25177c478bd9Sstevel@tonic-gate	/* get kernel tsb pointer */
25187c478bd9Sstevel@tonic-gate	/* we patch the next set of instructions at run time */
25197c478bd9Sstevel@tonic-gate	/* NOTE: any changes here require sfmmu_patch_ktsb fixed */
25207c478bd9Sstevel@tonic-gateiktsbbase:
25217c478bd9Sstevel@tonic-gate	RUNTIME_PATCH_SETX(%g4, %g5)
25227c478bd9Sstevel@tonic-gate	/* %g4 = contents of ktsb_base or ktsb_pbase */
25237c478bd9Sstevel@tonic-gate
25247c478bd9Sstevel@tonic-gateiktsb:	sllx	%g2, 64-(TAGACC_SHIFT + TSB_START_SIZE + RUNTIME_PATCH), %g1
25257c478bd9Sstevel@tonic-gate	srlx	%g1, 64-(TSB_START_SIZE + TSB_ENTRY_SHIFT + RUNTIME_PATCH), %g1
25267c478bd9Sstevel@tonic-gate	or	%g4, %g1, %g1			! form tsb ptr
25277c478bd9Sstevel@tonic-gate	ldda	[%g1]RUNTIME_PATCH, %g4		! %g4 = tag, %g5 = data
25287c478bd9Sstevel@tonic-gate	cmp	%g4, %g7
252905d3dc4bSpaulsan	bne,pn	%xcc, iktsb4mbase		! check 4m ktsb
253005d3dc4bSpaulsan	  srlx    %g2, MMU_PAGESHIFT4M, %g3	! use 4m virt-page as TSB index
253105d3dc4bSpaulsan
253205d3dc4bSpaulsan	andcc %g5, TTE_EXECPRM_INT, %g0		! check exec bit
253305d3dc4bSpaulsan	bz,pn	%icc, exec_fault
253405d3dc4bSpaulsan	  nop
253505d3dc4bSpaulsan	TT_TRACE(trace_tsbhit)			! 2 instr traptrace
253605d3dc4bSpaulsan	ITLB_STUFF(%g5, %g1, %g2, %g3, %g4)
253705d3dc4bSpaulsan	retry
253805d3dc4bSpaulsan
253905d3dc4bSpaulsaniktsb4mbase:
254005d3dc4bSpaulsan        RUNTIME_PATCH_SETX(%g4, %g6)
254105d3dc4bSpaulsan        /* %g4 = contents of ktsb4m_base or ktsb4m_pbase */
254205d3dc4bSpaulsaniktsb4m:
254305d3dc4bSpaulsan	sllx    %g3, 64-(TSB_START_SIZE + RUNTIME_PATCH), %g3
254405d3dc4bSpaulsan        srlx    %g3, 64-(TSB_START_SIZE + TSB_ENTRY_SHIFT + RUNTIME_PATCH), %g3
254505d3dc4bSpaulsan	add	%g4, %g3, %g3			! %g3 = 4m tsbe ptr
254605d3dc4bSpaulsan	ldda	[%g3]RUNTIME_PATCH, %g4		! %g4 = tag, %g5 = data
254705d3dc4bSpaulsan	cmp	%g4, %g7
25487c478bd9Sstevel@tonic-gate	bne,pn	%xcc, sfmmu_tsb_miss_tt		! branch on miss
25497c478bd9Sstevel@tonic-gate	  andcc %g5, TTE_EXECPRM_INT, %g0		! check exec bit
25507c478bd9Sstevel@tonic-gate	bz,pn	%icc, exec_fault
25517c478bd9Sstevel@tonic-gate	  nop
25527c478bd9Sstevel@tonic-gate	TT_TRACE(trace_tsbhit)			! 2 instr traptrace
25537c478bd9Sstevel@tonic-gate	ITLB_STUFF(%g5, %g1, %g2, %g3, %g4)
25547c478bd9Sstevel@tonic-gate	retry
25557c478bd9Sstevel@tonic-gate
25567c478bd9Sstevel@tonic-gate	/*
25577c478bd9Sstevel@tonic-gate	 * Kernel dTLB miss.  We also get here if we took a fast data
25587c478bd9Sstevel@tonic-gate	 * access mmu miss trap while running in invalid context.
25597c478bd9Sstevel@tonic-gate	 *
25607c478bd9Sstevel@tonic-gate	 * Note: for now we store kpm TTEs in the kernel TSB as usual.
25617c478bd9Sstevel@tonic-gate	 *	We select the TSB miss handler to branch to depending on
25627c478bd9Sstevel@tonic-gate	 *	the virtual address of the access.  In the future it may
25637c478bd9Sstevel@tonic-gate	 *	be desirable to separate kpm TTEs into their own TSB,
25647c478bd9Sstevel@tonic-gate	 *	in which case all that needs to be done is to set
25657c478bd9Sstevel@tonic-gate	 *	kpm_tsbbase/kpm_tsbsz to point to the new TSB and branch
25667c478bd9Sstevel@tonic-gate	 *	early in the miss if we detect a kpm VA to a new handler.
25677c478bd9Sstevel@tonic-gate	 *
25687c478bd9Sstevel@tonic-gate	 * %g1 = 8K TSB pointer register (not used, clobbered)
25697c478bd9Sstevel@tonic-gate	 * %g2 = tag access register (used)
25707c478bd9Sstevel@tonic-gate	 * %g3 = faulting context id (used)
25717c478bd9Sstevel@tonic-gate	 */
25727c478bd9Sstevel@tonic-gate	.align	64
25737c478bd9Sstevel@tonic-gate	ALTENTRY(sfmmu_kdtlb_miss)
25747c478bd9Sstevel@tonic-gate	brnz,pn	%g3, tsb_tl0_noctxt		/* invalid context? */
25757c478bd9Sstevel@tonic-gate	  nop
25767c478bd9Sstevel@tonic-gate
25777c478bd9Sstevel@tonic-gate	/* Gather some stats for kpm misses in the TLB. */
25787c478bd9Sstevel@tonic-gate	/* KPM_TLBMISS_STAT_INCR(tagacc, val, tsbma, tmp1, label) */
25797c478bd9Sstevel@tonic-gate	KPM_TLBMISS_STAT_INCR(%g2, %g4, %g5, %g6, kpmtlbm_stat_out)
25807c478bd9Sstevel@tonic-gate
25817c478bd9Sstevel@tonic-gate	/*
25827c478bd9Sstevel@tonic-gate	 * Get first TSB offset and look for 8K/64K/512K mapping
25837c478bd9Sstevel@tonic-gate	 * using the 8K virtual page as the index.
25847c478bd9Sstevel@tonic-gate	 *
25857c478bd9Sstevel@tonic-gate	 * We patch the next set of instructions at run time;
25867c478bd9Sstevel@tonic-gate	 * any changes here require sfmmu_patch_ktsb changes too.
25877c478bd9Sstevel@tonic-gate	 */
25887c478bd9Sstevel@tonic-gatedktsbbase:
25897c478bd9Sstevel@tonic-gate	RUNTIME_PATCH_SETX(%g7, %g6)
25907c478bd9Sstevel@tonic-gate	/* %g7 = contents of ktsb_base or ktsb_pbase */
25917c478bd9Sstevel@tonic-gate
25927c478bd9Sstevel@tonic-gatedktsb:	sllx	%g2, 64-(TAGACC_SHIFT + TSB_START_SIZE + RUNTIME_PATCH), %g1
25937c478bd9Sstevel@tonic-gate	srlx	%g1, 64-(TSB_START_SIZE + TSB_ENTRY_SHIFT + RUNTIME_PATCH), %g1
25947c478bd9Sstevel@tonic-gate
25957c478bd9Sstevel@tonic-gate	/*
25967c478bd9Sstevel@tonic-gate	 * At this point %g1 is our index into the TSB.
25977c478bd9Sstevel@tonic-gate	 * We just masked off enough bits of the VA depending
25987c478bd9Sstevel@tonic-gate	 * on our TSB size code.
25997c478bd9Sstevel@tonic-gate	 */
26007c478bd9Sstevel@tonic-gate	ldda	[%g7 + %g1]RUNTIME_PATCH, %g4	! %g4 = tag, %g5 = data
26017c478bd9Sstevel@tonic-gate	srlx	%g2, TAG_VALO_SHIFT, %g6	! make tag to compare
26027c478bd9Sstevel@tonic-gate	cmp	%g6, %g4			! compare tag
26037c478bd9Sstevel@tonic-gate	bne,pn	%xcc, dktsb4m_kpmcheck_small
26047c478bd9Sstevel@tonic-gate	  add	%g7, %g1, %g1			/* form tsb ptr */
26057c478bd9Sstevel@tonic-gate	TT_TRACE(trace_tsbhit)
26067c478bd9Sstevel@tonic-gate	DTLB_STUFF(%g5, %g1, %g2, %g3, %g4)
26077c478bd9Sstevel@tonic-gate	/* trapstat expects tte in %g5 */
26087c478bd9Sstevel@tonic-gate	retry
26097c478bd9Sstevel@tonic-gate
26107c478bd9Sstevel@tonic-gate	/*
26117c478bd9Sstevel@tonic-gate	 * If kpm is using large pages, the following instruction needs
26127c478bd9Sstevel@tonic-gate	 * to be patched to a nop at boot time (by sfmmu_kpm_patch_tsbm)
26137c478bd9Sstevel@tonic-gate	 * so that we will probe the 4M TSB regardless of the VA.  In
26147c478bd9Sstevel@tonic-gate	 * the case kpm is using small pages, we know no large kernel
26157c478bd9Sstevel@tonic-gate	 * mappings are located above 0x80000000.00000000 so we skip the
26167c478bd9Sstevel@tonic-gate	 * probe as an optimization.
26177c478bd9Sstevel@tonic-gate	 */
26187c478bd9Sstevel@tonic-gatedktsb4m_kpmcheck_small:
26197c478bd9Sstevel@tonic-gate	brlz,pn %g2, sfmmu_kpm_dtsb_miss_small
26207c478bd9Sstevel@tonic-gate	  /* delay slot safe, below */
26217c478bd9Sstevel@tonic-gate
26227c478bd9Sstevel@tonic-gate	/*
26237c478bd9Sstevel@tonic-gate	 * Get second TSB offset and look for 4M mapping
26247c478bd9Sstevel@tonic-gate	 * using 4M virtual page as the TSB index.
26257c478bd9Sstevel@tonic-gate	 *
26267c478bd9Sstevel@tonic-gate	 * Here:
26277c478bd9Sstevel@tonic-gate	 * %g1 = 8K TSB pointer.  Don't squash it.
26287c478bd9Sstevel@tonic-gate	 * %g2 = tag access register (we still need it)
26297c478bd9Sstevel@tonic-gate	 */
26307c478bd9Sstevel@tonic-gate	srlx	%g2, MMU_PAGESHIFT4M, %g3
26317c478bd9Sstevel@tonic-gate
26327c478bd9Sstevel@tonic-gate	/*
26337c478bd9Sstevel@tonic-gate	 * We patch the next set of instructions at run time;
26347c478bd9Sstevel@tonic-gate	 * any changes here require sfmmu_patch_ktsb changes too.
26357c478bd9Sstevel@tonic-gate	 */
26367c478bd9Sstevel@tonic-gatedktsb4mbase:
26377c478bd9Sstevel@tonic-gate	RUNTIME_PATCH_SETX(%g7, %g6)
26387c478bd9Sstevel@tonic-gate	/* %g7 = contents of ktsb4m_base or ktsb4m_pbase */
26397c478bd9Sstevel@tonic-gatedktsb4m:
26407c478bd9Sstevel@tonic-gate	sllx	%g3, 64-(TSB_START_SIZE + RUNTIME_PATCH), %g3
26417c478bd9Sstevel@tonic-gate	srlx	%g3, 64-(TSB_START_SIZE + TSB_ENTRY_SHIFT + RUNTIME_PATCH), %g3
26427c478bd9Sstevel@tonic-gate
26437c478bd9Sstevel@tonic-gate	/*
26447c478bd9Sstevel@tonic-gate	 * At this point %g3 is our index into the TSB.
26457c478bd9Sstevel@tonic-gate	 * We just masked off enough bits of the VA depending
26467c478bd9Sstevel@tonic-gate	 * on our TSB size code.
26477c478bd9Sstevel@tonic-gate	 */
26487c478bd9Sstevel@tonic-gate	ldda	[%g7 + %g3]RUNTIME_PATCH, %g4	! %g4 = tag, %g5 = data
26497c478bd9Sstevel@tonic-gate	srlx	%g2, TAG_VALO_SHIFT, %g6	! make tag to compare
26507c478bd9Sstevel@tonic-gate	cmp	%g6, %g4			! compare tag
26517c478bd9Sstevel@tonic-gate
26527c478bd9Sstevel@tonic-gatedktsb4m_tsbmiss:
26537c478bd9Sstevel@tonic-gate	bne,pn	%xcc, dktsb4m_kpmcheck
26547c478bd9Sstevel@tonic-gate	  add	%g7, %g3, %g3			! %g3 = kernel second TSB ptr
26557c478bd9Sstevel@tonic-gate	TT_TRACE(trace_tsbhit)
26567c478bd9Sstevel@tonic-gate	/* we don't check TTE size here since we assume 4M TSB is separate */
26577c478bd9Sstevel@tonic-gate	DTLB_STUFF(%g5, %g1, %g2, %g3, %g4)
26587c478bd9Sstevel@tonic-gate	/* trapstat expects tte in %g5 */
26597c478bd9Sstevel@tonic-gate	retry
26607c478bd9Sstevel@tonic-gate
26617c478bd9Sstevel@tonic-gate	/*
26627c478bd9Sstevel@tonic-gate	 * So, we failed to find a valid TTE to match the faulting
26637c478bd9Sstevel@tonic-gate	 * address in either TSB.  There are a few cases that could land
26647c478bd9Sstevel@tonic-gate	 * us here:
26657c478bd9Sstevel@tonic-gate	 *
26667c478bd9Sstevel@tonic-gate	 * 1) This is a kernel VA below 0x80000000.00000000.  We branch
26677c478bd9Sstevel@tonic-gate	 *    to sfmmu_tsb_miss_tt to handle the miss.
26687c478bd9Sstevel@tonic-gate	 * 2) We missed on a kpm VA, and we didn't find the mapping in the
26697c478bd9Sstevel@tonic-gate	 *    4M TSB.  Let segkpm handle it.
26707c478bd9Sstevel@tonic-gate	 *
26717c478bd9Sstevel@tonic-gate	 * Note that we shouldn't land here in the case of a kpm VA when
26727c478bd9Sstevel@tonic-gate	 * kpm_smallpages is active -- we handled that case earlier at
26737c478bd9Sstevel@tonic-gate	 * dktsb4m_kpmcheck_small.
26747c478bd9Sstevel@tonic-gate	 *
26757c478bd9Sstevel@tonic-gate	 * At this point:
26767c478bd9Sstevel@tonic-gate	 *  g1 = 8K-indexed primary TSB pointer
26777c478bd9Sstevel@tonic-gate	 *  g2 = tag access register
26787c478bd9Sstevel@tonic-gate	 *  g3 = 4M-indexed secondary TSB pointer
26797c478bd9Sstevel@tonic-gate	 */
26807c478bd9Sstevel@tonic-gatedktsb4m_kpmcheck:
26817c478bd9Sstevel@tonic-gate	cmp	%g2, %g0
26827c478bd9Sstevel@tonic-gate	bl,pn	%xcc, sfmmu_kpm_dtsb_miss
26837c478bd9Sstevel@tonic-gate	  nop
26847c478bd9Sstevel@tonic-gate	ba,a,pt	%icc, sfmmu_tsb_miss_tt
26857c478bd9Sstevel@tonic-gate	  nop
26867c478bd9Sstevel@tonic-gate
26877c478bd9Sstevel@tonic-gate#ifdef sun4v
26887c478bd9Sstevel@tonic-gate	/*
26897c478bd9Sstevel@tonic-gate	 * User instruction miss w/ single TSB.
26907c478bd9Sstevel@tonic-gate	 * The first probe covers 8K, 64K, and 512K page sizes,
26917c478bd9Sstevel@tonic-gate	 * because 64K and 512K mappings are replicated off 8K
26927c478bd9Sstevel@tonic-gate	 * pointer.
26937c478bd9Sstevel@tonic-gate	 *
26947c478bd9Sstevel@tonic-gate	 * g1 = tsb8k pointer register
26957c478bd9Sstevel@tonic-gate	 * g2 = tag access register
26967c478bd9Sstevel@tonic-gate	 * g3 - g6 = scratch registers
26977c478bd9Sstevel@tonic-gate	 * g7 = TSB tag to match
26987c478bd9Sstevel@tonic-gate	 */
26997c478bd9Sstevel@tonic-gate	.align	64
27007c478bd9Sstevel@tonic-gate	ALTENTRY(sfmmu_uitlb_fastpath)
27017c478bd9Sstevel@tonic-gate
27027c478bd9Sstevel@tonic-gate	PROBE_1ST_ITSB(%g1, %g7, uitlb_fast_8k_probefail)
27037c478bd9Sstevel@tonic-gate	/* g4 - g5 = clobbered by PROBE_1ST_ITSB */
27047c478bd9Sstevel@tonic-gate	ba,pn	%xcc, sfmmu_tsb_miss_tt
270505d3dc4bSpaulsan	  mov	-1, %g3
27067c478bd9Sstevel@tonic-gate
27077c478bd9Sstevel@tonic-gate	/*
27087c478bd9Sstevel@tonic-gate	 * User data miss w/ single TSB.
27097c478bd9Sstevel@tonic-gate	 * The first probe covers 8K, 64K, and 512K page sizes,
27107c478bd9Sstevel@tonic-gate	 * because 64K and 512K mappings are replicated off 8K
27117c478bd9Sstevel@tonic-gate	 * pointer.
27127c478bd9Sstevel@tonic-gate	 *
27137c478bd9Sstevel@tonic-gate	 * g1 = tsb8k pointer register
27147c478bd9Sstevel@tonic-gate	 * g2 = tag access register
27157c478bd9Sstevel@tonic-gate	 * g3 - g6 = scratch registers
27167c478bd9Sstevel@tonic-gate	 * g7 = TSB tag to match
27177c478bd9Sstevel@tonic-gate	 */
27187c478bd9Sstevel@tonic-gate	.align 64
27197c478bd9Sstevel@tonic-gate	ALTENTRY(sfmmu_udtlb_fastpath)
27207c478bd9Sstevel@tonic-gate
27217c478bd9Sstevel@tonic-gate	PROBE_1ST_DTSB(%g1, %g7, udtlb_fast_8k_probefail)
27227c478bd9Sstevel@tonic-gate	/* g4 - g5 = clobbered by PROBE_1ST_DTSB */
27237c478bd9Sstevel@tonic-gate	ba,pn	%xcc, sfmmu_tsb_miss_tt
272405d3dc4bSpaulsan	  mov	-1, %g3
27257c478bd9Sstevel@tonic-gate
27267c478bd9Sstevel@tonic-gate	/*
272725cf1a30Sjl139090	 * User instruction miss w/ multiple TSBs (sun4v).
27287c478bd9Sstevel@tonic-gate	 * The first probe covers 8K, 64K, and 512K page sizes,
27297c478bd9Sstevel@tonic-gate	 * because 64K and 512K mappings are replicated off 8K
27307c478bd9Sstevel@tonic-gate	 * pointer.  Second probe covers 4M page size only.
27317c478bd9Sstevel@tonic-gate	 *
27327c478bd9Sstevel@tonic-gate	 * Just like sfmmu_udtlb_slowpath, except:
27337c478bd9Sstevel@tonic-gate	 *   o Uses ASI_ITLB_IN
27347c478bd9Sstevel@tonic-gate	 *   o checks for execute permission
27357c478bd9Sstevel@tonic-gate	 *   o No ISM prediction.
27367c478bd9Sstevel@tonic-gate	 *
27377c478bd9Sstevel@tonic-gate	 * g1 = tsb8k pointer register
27387c478bd9Sstevel@tonic-gate	 * g2 = tag access register
27397c478bd9Sstevel@tonic-gate	 * g3 - g6 = scratch registers
27407c478bd9Sstevel@tonic-gate	 * g7 = TSB tag to match
27417c478bd9Sstevel@tonic-gate	 */
27427c478bd9Sstevel@tonic-gate	.align	64
27437c478bd9Sstevel@tonic-gate	ALTENTRY(sfmmu_uitlb_slowpath)
27447c478bd9Sstevel@tonic-gate
27457c478bd9Sstevel@tonic-gate	GET_1ST_TSBE_PTR(%g2, %g1, %g4, %g5)
27467c478bd9Sstevel@tonic-gate	PROBE_1ST_ITSB(%g1, %g7, uitlb_8k_probefail)
27477c478bd9Sstevel@tonic-gate	/* g4 - g5 = clobbered here */
27487c478bd9Sstevel@tonic-gate
27497c478bd9Sstevel@tonic-gate	GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5)
27507c478bd9Sstevel@tonic-gate	/* g1 = first TSB pointer, g3 = second TSB pointer */
27517c478bd9Sstevel@tonic-gate	srlx	%g2, TAG_VALO_SHIFT, %g7
27527c478bd9Sstevel@tonic-gate	PROBE_2ND_ITSB(%g3, %g7)
27537c478bd9Sstevel@tonic-gate	/* NOT REACHED */
27547c478bd9Sstevel@tonic-gate
275525cf1a30Sjl139090#else /* sun4v */
275625cf1a30Sjl139090
275725cf1a30Sjl139090	/*
275825cf1a30Sjl139090	 * User instruction miss w/ multiple TSBs (sun4u).
275925cf1a30Sjl139090	 * The first probe covers 8K, 64K, and 512K page sizes,
276025cf1a30Sjl139090	 * because 64K and 512K mappings are replicated off 8K
27611426d65aSsm142603	 * pointer.  Probe of 1st TSB has already been done prior to entry
27621426d65aSsm142603	 * into this routine. For the UTSB_PHYS case we probe up to 3
27631426d65aSsm142603	 * valid other TSBs in the following order:
27641426d65aSsm142603	 * 1) shared TSB for 4M-256M pages
27651426d65aSsm142603	 * 2) private TSB for 4M-256M pages
27661426d65aSsm142603	 * 3) shared TSB for 8K-512K pages
27671426d65aSsm142603	 *
27681426d65aSsm142603	 * For the non UTSB_PHYS case we probe the 2nd TSB here that backs
27691426d65aSsm142603	 * 4M-256M pages.
277025cf1a30Sjl139090	 *
277125cf1a30Sjl139090	 * Just like sfmmu_udtlb_slowpath, except:
277225cf1a30Sjl139090	 *   o Uses ASI_ITLB_IN
277325cf1a30Sjl139090	 *   o checks for execute permission
277425cf1a30Sjl139090	 *   o No ISM prediction.
277525cf1a30Sjl139090	 *
277625cf1a30Sjl139090	 * g1 = tsb8k pointer register
277725cf1a30Sjl139090	 * g2 = tag access register
277825cf1a30Sjl139090	 * g4 - g6 = scratch registers
277925cf1a30Sjl139090	 * g7 = TSB tag to match
278025cf1a30Sjl139090	 */
278125cf1a30Sjl139090	.align	64
278225cf1a30Sjl139090	ALTENTRY(sfmmu_uitlb_slowpath)
278325cf1a30Sjl139090
278425cf1a30Sjl139090#ifdef UTSB_PHYS
278525cf1a30Sjl139090
27861426d65aSsm142603       GET_UTSBREG(SCRATCHPAD_UTSBREG4, %g6)
27871426d65aSsm142603        brlz,pt %g6, 1f
27881426d65aSsm142603          nop
27891426d65aSsm142603        GET_4TH_TSBE_PTR(%g2, %g6, %g4, %g5)
27901426d65aSsm142603        PROBE_4TH_ITSB(%g6, %g7, uitlb_4m_scd_probefail)
27911426d65aSsm1426031:
27921426d65aSsm142603        GET_UTSBREG(SCRATCHPAD_UTSBREG2, %g3)
27931426d65aSsm142603        brlz,pt %g3, 2f
27941426d65aSsm142603          nop
279525cf1a30Sjl139090        GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5)
27961426d65aSsm142603        PROBE_2ND_ITSB(%g3, %g7, uitlb_4m_probefail)
27971426d65aSsm1426032:
27981426d65aSsm142603        GET_UTSBREG(SCRATCHPAD_UTSBREG3, %g6)
27991426d65aSsm142603        brlz,pt %g6, sfmmu_tsb_miss_tt
28001426d65aSsm142603          nop
28011426d65aSsm142603        GET_3RD_TSBE_PTR(%g2, %g6, %g4, %g5)
28021426d65aSsm142603        PROBE_3RD_ITSB(%g6, %g7, uitlb_8K_scd_probefail)
28031426d65aSsm142603        ba,pn   %xcc, sfmmu_tsb_miss_tt
28041426d65aSsm142603          nop
28051426d65aSsm142603
280625cf1a30Sjl139090#else /* UTSB_PHYS */
280725cf1a30Sjl139090	mov	%g1, %g3	/* save tsb8k reg in %g3 */
280825cf1a30Sjl139090	GET_1ST_TSBE_PTR(%g3, %g1, %g5, sfmmu_uitlb)
280925cf1a30Sjl139090	PROBE_1ST_ITSB(%g1, %g7, uitlb_8k_probefail)
28107c478bd9Sstevel@tonic-gate	mov	%g2, %g6	/* GET_2ND_TSBE_PTR clobbers tagacc */
28117c478bd9Sstevel@tonic-gate	mov	%g3, %g7	/* copy tsb8k reg in %g7 */
28127c478bd9Sstevel@tonic-gate	GET_2ND_TSBE_PTR(%g6, %g7, %g3, %g4, %g5, sfmmu_uitlb)
28137c478bd9Sstevel@tonic-gate       /* g1 = first TSB pointer, g3 = second TSB pointer */
28147c478bd9Sstevel@tonic-gate        srlx    %g2, TAG_VALO_SHIFT, %g7
28151bd453f3Ssusans        PROBE_2ND_ITSB(%g3, %g7, isynth)
28161426d65aSsm142603	ba,pn	%xcc, sfmmu_tsb_miss_tt
28171426d65aSsm142603	  nop
28181426d65aSsm142603
28191426d65aSsm142603#endif /* UTSB_PHYS */
28207c478bd9Sstevel@tonic-gate#endif /* sun4v */
28217c478bd9Sstevel@tonic-gate
28221426d65aSsm142603#if defined(sun4u) && defined(UTSB_PHYS)
28231426d65aSsm142603
28247c478bd9Sstevel@tonic-gate        /*
28251426d65aSsm142603	 * We come here for ism predict DTLB_MISS case or if
28261426d65aSsm142603	 * if probe in first TSB failed.
28271426d65aSsm142603         */
28281426d65aSsm142603
28291426d65aSsm142603        .align 64
28301426d65aSsm142603        ALTENTRY(sfmmu_udtlb_slowpath_noismpred)
28311426d65aSsm142603
28321426d65aSsm142603	/*
28337c478bd9Sstevel@tonic-gate         * g1 = tsb8k pointer register
28347c478bd9Sstevel@tonic-gate         * g2 = tag access register
28351426d65aSsm142603         * g4 - %g6 = scratch registers
28361426d65aSsm142603         * g7 = TSB tag to match
28371426d65aSsm142603	 */
28381426d65aSsm142603
28391426d65aSsm142603	/*
28401426d65aSsm142603	 * ISM non-predict probe order
28411426d65aSsm142603         * probe 1ST_TSB (8K index)
28421426d65aSsm142603         * probe 2ND_TSB (4M index)
28431426d65aSsm142603         * probe 4TH_TSB (4M index)
28441426d65aSsm142603         * probe 3RD_TSB (8K index)
28451426d65aSsm142603	 *
28461426d65aSsm142603	 * We already probed first TSB in DTLB_MISS handler.
28471426d65aSsm142603	 */
28481426d65aSsm142603
28491426d65aSsm142603        /*
28501426d65aSsm142603         * Private 2ND TSB 4M-256 pages
28511426d65aSsm142603         */
28521426d65aSsm142603	GET_UTSBREG(SCRATCHPAD_UTSBREG2, %g3)
28531426d65aSsm142603	brlz,pt %g3, 1f
28541426d65aSsm142603	  nop
28551426d65aSsm142603        GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5)
28561426d65aSsm142603        PROBE_2ND_DTSB(%g3, %g7, udtlb_4m_probefail)
28571426d65aSsm142603
28581426d65aSsm142603	/*
28591426d65aSsm142603	 * Shared Context 4TH TSB 4M-256 pages
28601426d65aSsm142603	 */
28611426d65aSsm1426031:
28621426d65aSsm142603	GET_UTSBREG(SCRATCHPAD_UTSBREG4, %g6)
28631426d65aSsm142603	brlz,pt %g6, 2f
28641426d65aSsm142603	  nop
28651426d65aSsm142603        GET_4TH_TSBE_PTR(%g2, %g6, %g4, %g5)
28661426d65aSsm142603        PROBE_4TH_DTSB(%g6, %g7, udtlb_4m_shctx_probefail)
28671426d65aSsm142603
28681426d65aSsm142603        /*
28691426d65aSsm142603         * Shared Context 3RD TSB 8K-512K pages
28701426d65aSsm142603         */
28711426d65aSsm1426032:
28721426d65aSsm142603	GET_UTSBREG(SCRATCHPAD_UTSBREG3, %g6)
28731426d65aSsm142603	brlz,pt %g6, sfmmu_tsb_miss_tt
28741426d65aSsm142603	  nop
28751426d65aSsm142603        GET_3RD_TSBE_PTR(%g2, %g6, %g4, %g5)
28761426d65aSsm142603        PROBE_3RD_DTSB(%g6, %g7, udtlb_8k_shctx_probefail)
28771426d65aSsm142603	ba,pn	%xcc, sfmmu_tsb_miss_tt
28781426d65aSsm142603	  nop
28791426d65aSsm142603
28801426d65aSsm142603	.align 64
28811426d65aSsm142603        ALTENTRY(sfmmu_udtlb_slowpath_ismpred)
28821426d65aSsm142603
28831426d65aSsm142603	/*
28841426d65aSsm142603         * g1 = tsb8k pointer register
28851426d65aSsm142603         * g2 = tag access register
288625cf1a30Sjl139090         * g4 - g6 = scratch registers
28877c478bd9Sstevel@tonic-gate         * g7 = TSB tag to match
28887c478bd9Sstevel@tonic-gate	 */
28891426d65aSsm142603
28901426d65aSsm142603	/*
28911426d65aSsm142603	 * ISM predict probe order
28921426d65aSsm142603	 * probe 4TH_TSB (4M index)
28931426d65aSsm142603	 * probe 2ND_TSB (4M index)
28941426d65aSsm142603	 * probe 1ST_TSB (8K index)
28951426d65aSsm142603	 * probe 3RD_TSB (8K index)
28961426d65aSsm142603
28971426d65aSsm142603	/*
28981426d65aSsm142603	 * Shared Context 4TH TSB 4M-256 pages
28991426d65aSsm142603	 */
29001426d65aSsm142603	GET_UTSBREG(SCRATCHPAD_UTSBREG4, %g6)
29011426d65aSsm142603	brlz,pt %g6, 4f
29021426d65aSsm142603	  nop
29031426d65aSsm142603        GET_4TH_TSBE_PTR(%g2, %g6, %g4, %g5)
29041426d65aSsm142603        PROBE_4TH_DTSB(%g6, %g7, udtlb_4m_shctx_probefail2)
29051426d65aSsm142603
29061426d65aSsm142603        /*
29071426d65aSsm142603         * Private 2ND TSB 4M-256 pages
29081426d65aSsm142603         */
29091426d65aSsm1426034:
29101426d65aSsm142603	GET_UTSBREG(SCRATCHPAD_UTSBREG2, %g3)
29111426d65aSsm142603	brlz,pt %g3, 5f
29121426d65aSsm142603	  nop
29131426d65aSsm142603        GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5)
29141426d65aSsm142603        PROBE_2ND_DTSB(%g3, %g7, udtlb_4m_probefail2)
29151426d65aSsm142603
29161426d65aSsm1426035:
29171426d65aSsm142603        PROBE_1ST_DTSB(%g1, %g7, udtlb_8k_first_probefail2)
29181426d65aSsm142603
29191426d65aSsm142603        /*
29201426d65aSsm142603         * Shared Context 3RD TSB 8K-512K pages
29211426d65aSsm142603         */
29221426d65aSsm142603	GET_UTSBREG(SCRATCHPAD_UTSBREG3, %g6)
29231426d65aSsm142603	brlz,pt %g6, 6f
29241426d65aSsm142603	  nop
29251426d65aSsm142603        GET_3RD_TSBE_PTR(%g2, %g6, %g4, %g5)
29261426d65aSsm142603        PROBE_3RD_DTSB(%g6, %g7, udtlb_8k_shctx_probefail2)
29271426d65aSsm1426036:
29281426d65aSsm142603	ba,pn	%xcc, sfmmu_tsb_miss_tt /* ISM Predict and ISM non-predict path */
29291426d65aSsm142603	  nop
29301426d65aSsm142603
29311426d65aSsm142603#else /* sun4u && UTSB_PHYS */
29321426d65aSsm142603
29337c478bd9Sstevel@tonic-gate       .align 64
29347c478bd9Sstevel@tonic-gate        ALTENTRY(sfmmu_udtlb_slowpath)
29357c478bd9Sstevel@tonic-gate
29367c478bd9Sstevel@tonic-gate	srax	%g2, PREDISM_BASESHIFT, %g6	/* g6 > 0 : ISM predicted */
29377c478bd9Sstevel@tonic-gate	brgz,pn %g6, udtlb_miss_probesecond	/* check for ISM */
29387c478bd9Sstevel@tonic-gate	  mov	%g1, %g3
29397c478bd9Sstevel@tonic-gate
29407c478bd9Sstevel@tonic-gateudtlb_miss_probefirst:
29417c478bd9Sstevel@tonic-gate	/*
29427c478bd9Sstevel@tonic-gate	 * g1 = 8K TSB pointer register
29437c478bd9Sstevel@tonic-gate	 * g2 = tag access register
29447c478bd9Sstevel@tonic-gate	 * g3 = (potentially) second TSB entry ptr
29457c478bd9Sstevel@tonic-gate	 * g6 = ism pred.
29467c478bd9Sstevel@tonic-gate	 * g7 = vpg_4m
29477c478bd9Sstevel@tonic-gate	 */
29487c478bd9Sstevel@tonic-gate#ifdef sun4v
29497c478bd9Sstevel@tonic-gate	GET_1ST_TSBE_PTR(%g2, %g1, %g4, %g5)
29507c478bd9Sstevel@tonic-gate	PROBE_1ST_DTSB(%g1, %g7, udtlb_first_probefail)
29517c478bd9Sstevel@tonic-gate
29527c478bd9Sstevel@tonic-gate	/*
29537c478bd9Sstevel@tonic-gate	 * Here:
29547c478bd9Sstevel@tonic-gate	 *   g1 = first TSB pointer
29557c478bd9Sstevel@tonic-gate	 *   g2 = tag access reg
29567c478bd9Sstevel@tonic-gate	 *   g3 = second TSB ptr IFF ISM pred. (else don't care)
29577c478bd9Sstevel@tonic-gate	 */
29587c478bd9Sstevel@tonic-gate	brgz,pn	%g6, sfmmu_tsb_miss_tt
29597c478bd9Sstevel@tonic-gate	  nop
29607c478bd9Sstevel@tonic-gate#else /* sun4v */
29617c478bd9Sstevel@tonic-gate	mov	%g1, %g4
29627c478bd9Sstevel@tonic-gate	GET_1ST_TSBE_PTR(%g4, %g1, %g5, sfmmu_udtlb)
29637c478bd9Sstevel@tonic-gate	PROBE_1ST_DTSB(%g1, %g7, udtlb_first_probefail)
29647c478bd9Sstevel@tonic-gate
29657c478bd9Sstevel@tonic-gate	/*
29667c478bd9Sstevel@tonic-gate	 * Here:
29677c478bd9Sstevel@tonic-gate	 *   g1 = first TSB pointer
29687c478bd9Sstevel@tonic-gate	 *   g2 = tag access reg
29697c478bd9Sstevel@tonic-gate	 *   g3 = second TSB ptr IFF ISM pred. (else don't care)
29707c478bd9Sstevel@tonic-gate	 */
29717c478bd9Sstevel@tonic-gate	brgz,pn	%g6, sfmmu_tsb_miss_tt
29727c478bd9Sstevel@tonic-gate	  nop
29737c478bd9Sstevel@tonic-gate	ldxa	[%g0]ASI_DMMU_TSB_8K, %g3
29747c478bd9Sstevel@tonic-gate	/* fall through in 8K->4M probe order */
29757c478bd9Sstevel@tonic-gate#endif /* sun4v */
29767c478bd9Sstevel@tonic-gate
29777c478bd9Sstevel@tonic-gateudtlb_miss_probesecond:
29787c478bd9Sstevel@tonic-gate	/*
29797c478bd9Sstevel@tonic-gate	 * Look in the second TSB for the TTE
29807c478bd9Sstevel@tonic-gate	 * g1 = First TSB entry ptr if !ISM pred, TSB8K ptr reg if ISM pred.
29817c478bd9Sstevel@tonic-gate	 * g2 = tag access reg
29827c478bd9Sstevel@tonic-gate	 * g3 = 8K TSB pointer register
29837c478bd9Sstevel@tonic-gate	 * g6 = ism pred.
29847c478bd9Sstevel@tonic-gate	 * g7 = vpg_4m
29857c478bd9Sstevel@tonic-gate	 */
29867c478bd9Sstevel@tonic-gate#ifdef sun4v
29877c478bd9Sstevel@tonic-gate	/* GET_2ND_TSBE_PTR(tagacc, tsbe_ptr, tmp1, tmp2) */
29887c478bd9Sstevel@tonic-gate	GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5)
29897c478bd9Sstevel@tonic-gate	/* %g2 is okay, no need to reload, %g3 = second tsbe ptr */
299025cf1a30Sjl139090#else /* sun4v */
29917c478bd9Sstevel@tonic-gate	mov	%g3, %g7
29927c478bd9Sstevel@tonic-gate	GET_2ND_TSBE_PTR(%g2, %g7, %g3, %g4, %g5, sfmmu_udtlb)
29937c478bd9Sstevel@tonic-gate	/* %g2 clobbered, %g3 =second tsbe ptr */
29947c478bd9Sstevel@tonic-gate	mov	MMU_TAG_ACCESS, %g2
29957c478bd9Sstevel@tonic-gate	ldxa	[%g2]ASI_DMMU, %g2
299625cf1a30Sjl139090#endif /* sun4v */
29977c478bd9Sstevel@tonic-gate
29987c478bd9Sstevel@tonic-gate	srlx	%g2, TAG_VALO_SHIFT, %g7
29997c478bd9Sstevel@tonic-gate	PROBE_2ND_DTSB(%g3, %g7, udtlb_4m_probefail)
30007c478bd9Sstevel@tonic-gate	/* g4 - g5 = clobbered here; %g7 still vpg_4m at this point */
30017c478bd9Sstevel@tonic-gate	brgz,pn	%g6, udtlb_miss_probefirst
30027c478bd9Sstevel@tonic-gate	  nop
30037c478bd9Sstevel@tonic-gate
30047c478bd9Sstevel@tonic-gate	/* fall through to sfmmu_tsb_miss_tt */
30051426d65aSsm142603#endif /* sun4u && UTSB_PHYS */
30061426d65aSsm142603
30077c478bd9Sstevel@tonic-gate
30087c478bd9Sstevel@tonic-gate	ALTENTRY(sfmmu_tsb_miss_tt)
30097c478bd9Sstevel@tonic-gate	TT_TRACE(trace_tsbmiss)
30107c478bd9Sstevel@tonic-gate	/*
30117c478bd9Sstevel@tonic-gate	 * We get here if there is a TSB miss OR a write protect trap.
30127c478bd9Sstevel@tonic-gate	 *
30137c478bd9Sstevel@tonic-gate	 * g1 = First TSB entry pointer
30147c478bd9Sstevel@tonic-gate	 * g2 = tag access register
301505d3dc4bSpaulsan	 * g3 = 4M TSB entry pointer; -1 if no 2nd TSB
30167c478bd9Sstevel@tonic-gate	 * g4 - g7 = scratch registers
30177c478bd9Sstevel@tonic-gate	 */
30187c478bd9Sstevel@tonic-gate
30197c478bd9Sstevel@tonic-gate	ALTENTRY(sfmmu_tsb_miss)
30207c478bd9Sstevel@tonic-gate
30217c478bd9Sstevel@tonic-gate	/*
30227c478bd9Sstevel@tonic-gate	 * If trapstat is running, we need to shift the %tpc and %tnpc to
30237c478bd9Sstevel@tonic-gate	 * point to trapstat's TSB miss return code (note that trapstat
30247c478bd9Sstevel@tonic-gate	 * itself will patch the correct offset to add).
30257c478bd9Sstevel@tonic-gate	 */
30267c478bd9Sstevel@tonic-gate	rdpr	%tl, %g7
30277c478bd9Sstevel@tonic-gate	cmp	%g7, 1
30287c478bd9Sstevel@tonic-gate	ble,pt	%xcc, 0f
30297c478bd9Sstevel@tonic-gate	  sethi	%hi(KERNELBASE), %g6
30307c478bd9Sstevel@tonic-gate	rdpr	%tpc, %g7
30317c478bd9Sstevel@tonic-gate	or	%g6, %lo(KERNELBASE), %g6
30327c478bd9Sstevel@tonic-gate	cmp	%g7, %g6
30337c478bd9Sstevel@tonic-gate	bgeu,pt	%xcc, 0f
30347c478bd9Sstevel@tonic-gate	/* delay slot safe */
30357c478bd9Sstevel@tonic-gate
30367c478bd9Sstevel@tonic-gate	ALTENTRY(tsbmiss_trapstat_patch_point)
30377c478bd9Sstevel@tonic-gate	add	%g7, RUNTIME_PATCH, %g7	/* must match TSTAT_TSBMISS_INSTR */
30387c478bd9Sstevel@tonic-gate	wrpr	%g7, %tpc
30397c478bd9Sstevel@tonic-gate	add	%g7, 4, %g7
30407c478bd9Sstevel@tonic-gate	wrpr	%g7, %tnpc
30417c478bd9Sstevel@tonic-gate0:
30427c478bd9Sstevel@tonic-gate	CPU_TSBMISS_AREA(%g6, %g7)
30431426d65aSsm142603	stn	%g1, [%g6 + TSBMISS_TSBPTR]	/* save 1ST tsb pointer */
30441426d65aSsm142603	stn	%g3, [%g6 + TSBMISS_TSBPTR4M]	/* save 2ND tsb pointer */
30457c478bd9Sstevel@tonic-gate
30467c478bd9Sstevel@tonic-gate	sllx	%g2, TAGACC_CTX_LSHIFT, %g3
30477c478bd9Sstevel@tonic-gate	brz,a,pn %g3, 1f			/* skip ahead if kernel */
30487c478bd9Sstevel@tonic-gate	  ldn	[%g6 + TSBMISS_KHATID], %g7
30497c478bd9Sstevel@tonic-gate	srlx	%g3, TAGACC_CTX_LSHIFT, %g3	/* g3 = ctxnum */
30507c478bd9Sstevel@tonic-gate	ldn	[%g6 + TSBMISS_UHATID], %g7     /* g7 = hatid */
30517c478bd9Sstevel@tonic-gate
30527c478bd9Sstevel@tonic-gate	HAT_PERCPU_STAT32(%g6, TSBMISS_UTSBMISS, %g5)
30537c478bd9Sstevel@tonic-gate
30547c478bd9Sstevel@tonic-gate	cmp	%g3, INVALID_CONTEXT
30557c478bd9Sstevel@tonic-gate	be,pn	%icc, tsb_tl0_noctxt		/* no ctx miss exception */
30567c478bd9Sstevel@tonic-gate	  stn	%g7, [%g6 + (TSBMISS_SCRATCH + TSBMISS_HATID)]
30577c478bd9Sstevel@tonic-gate
30581426d65aSsm142603#if defined(sun4v) || defined(UTSB_PHYS)
305905d3dc4bSpaulsan        ldub    [%g6 + TSBMISS_URTTEFLAGS], %g7	/* clear ctx1 flag set from */
306005d3dc4bSpaulsan        andn    %g7, HAT_CHKCTX1_FLAG, %g7	/* the previous tsb miss    */
306105d3dc4bSpaulsan        stub    %g7, [%g6 + TSBMISS_URTTEFLAGS]
30621426d65aSsm142603#endif /* sun4v || UTSB_PHYS */
306305d3dc4bSpaulsan
30647c478bd9Sstevel@tonic-gate	ISM_CHECK(%g2, %g6, %g3, %g4, %g5, %g7, %g1, tsb_l1, tsb_ism)
30657c478bd9Sstevel@tonic-gate	/*
30667c478bd9Sstevel@tonic-gate	 * The miss wasn't in an ISM segment.
30677c478bd9Sstevel@tonic-gate	 *
30687c478bd9Sstevel@tonic-gate	 * %g1 %g3, %g4, %g5, %g7 all clobbered
306960972f37Sjb145095	 * %g2 = (pseudo) tag access
30707c478bd9Sstevel@tonic-gate	 */
30717c478bd9Sstevel@tonic-gate
30727c478bd9Sstevel@tonic-gate	ba,pt	%icc, 2f
30737c478bd9Sstevel@tonic-gate	  ldn	[%g6 + (TSBMISS_SCRATCH + TSBMISS_HATID)], %g7
30747c478bd9Sstevel@tonic-gate
30757c478bd9Sstevel@tonic-gate1:
30767c478bd9Sstevel@tonic-gate	HAT_PERCPU_STAT32(%g6, TSBMISS_KTSBMISS, %g5)
30777c478bd9Sstevel@tonic-gate	/*
30787c478bd9Sstevel@tonic-gate	 * 8K and 64K hash.
30797c478bd9Sstevel@tonic-gate	 */
30807c478bd9Sstevel@tonic-gate2:
30817c478bd9Sstevel@tonic-gate
30820a90a7fdSAmritpal Sandhu	GET_TTE(%g2, %g7, %g3, %g4, %g6, %g1,
30830a90a7fdSAmritpal Sandhu		MMU_PAGESHIFT64K, TTE64K, %g5, tsb_l8K, tsb_checktte,
30847c478bd9Sstevel@tonic-gate		sfmmu_suspend_tl, tsb_512K)
30857c478bd9Sstevel@tonic-gate	/* NOT REACHED */
30867c478bd9Sstevel@tonic-gate
30877c478bd9Sstevel@tonic-gatetsb_512K:
308805d3dc4bSpaulsan	sllx	%g2, TAGACC_CTX_LSHIFT, %g5
30897c478bd9Sstevel@tonic-gate	brz,pn	%g5, 3f
309005d3dc4bSpaulsan	  ldub	[%g6 + TSBMISS_UTTEFLAGS], %g4
30917c478bd9Sstevel@tonic-gate	and	%g4, HAT_512K_FLAG, %g5
30927c478bd9Sstevel@tonic-gate
30937c478bd9Sstevel@tonic-gate	/*
30947c478bd9Sstevel@tonic-gate	 * Note that there is a small window here where we may have
30957c478bd9Sstevel@tonic-gate	 * a 512k page in the hash list but have not set the HAT_512K_FLAG
30967c478bd9Sstevel@tonic-gate	 * flag yet, so we will skip searching the 512k hash list.
30977c478bd9Sstevel@tonic-gate	 * In this case we will end up in pagefault which will find
30987c478bd9Sstevel@tonic-gate	 * the mapping and return.  So, in this instance we will end up
30997c478bd9Sstevel@tonic-gate	 * spending a bit more time resolving this TSB miss, but it can
31007c478bd9Sstevel@tonic-gate	 * only happen once per process and even then, the chances of that
31017c478bd9Sstevel@tonic-gate	 * are very small, so it's not worth the extra overhead it would
31027c478bd9Sstevel@tonic-gate	 * take to close this window.
31037c478bd9Sstevel@tonic-gate	 */
31047c478bd9Sstevel@tonic-gate	brz,pn	%g5, tsb_4M
31057c478bd9Sstevel@tonic-gate	  nop
31067c478bd9Sstevel@tonic-gate3:
31077c478bd9Sstevel@tonic-gate	/*
31087c478bd9Sstevel@tonic-gate	 * 512K hash
31097c478bd9Sstevel@tonic-gate	 */
31107c478bd9Sstevel@tonic-gate
31110a90a7fdSAmritpal Sandhu	GET_TTE(%g2, %g7, %g3, %g4, %g6, %g1,
31120a90a7fdSAmritpal Sandhu		MMU_PAGESHIFT512K, TTE512K, %g5, tsb_l512K, tsb_checktte,
31137c478bd9Sstevel@tonic-gate		sfmmu_suspend_tl, tsb_4M)
31147c478bd9Sstevel@tonic-gate	/* NOT REACHED */
31157c478bd9Sstevel@tonic-gate
31167c478bd9Sstevel@tonic-gatetsb_4M:
311705d3dc4bSpaulsan	sllx	%g2, TAGACC_CTX_LSHIFT, %g5
31187c478bd9Sstevel@tonic-gate	brz,pn	%g5, 4f
311905d3dc4bSpaulsan	  ldub	[%g6 + TSBMISS_UTTEFLAGS], %g4
31207c478bd9Sstevel@tonic-gate	and	%g4, HAT_4M_FLAG, %g5
31217c478bd9Sstevel@tonic-gate	brz,pn	%g5, tsb_32M
31227c478bd9Sstevel@tonic-gate	  nop
31237c478bd9Sstevel@tonic-gate4:
31247c478bd9Sstevel@tonic-gate	/*
31257c478bd9Sstevel@tonic-gate	 * 4M hash
31267c478bd9Sstevel@tonic-gate	 */
31277c478bd9Sstevel@tonic-gate
31280a90a7fdSAmritpal Sandhu	GET_TTE(%g2, %g7, %g3, %g4, %g6, %g1,
31290a90a7fdSAmritpal Sandhu		MMU_PAGESHIFT4M, TTE4M, %g5, tsb_l4M, tsb_checktte,
31307c478bd9Sstevel@tonic-gate		sfmmu_suspend_tl, tsb_32M)
31317c478bd9Sstevel@tonic-gate	/* NOT REACHED */
31327c478bd9Sstevel@tonic-gate
31337c478bd9Sstevel@tonic-gatetsb_32M:
313405d3dc4bSpaulsan	sllx	%g2, TAGACC_CTX_LSHIFT, %g5
31357c478bd9Sstevel@tonic-gate#ifdef	sun4v
31367c478bd9Sstevel@tonic-gate        brz,pn	%g5, 6f
31377c478bd9Sstevel@tonic-gate#else
31387c478bd9Sstevel@tonic-gate	brz,pn  %g5, tsb_pagefault
31397c478bd9Sstevel@tonic-gate#endif
314005d3dc4bSpaulsan	  ldub	[%g6 + TSBMISS_UTTEFLAGS], %g4
31417c478bd9Sstevel@tonic-gate	and	%g4, HAT_32M_FLAG, %g5
31427c478bd9Sstevel@tonic-gate	brz,pn	%g5, tsb_256M
31437c478bd9Sstevel@tonic-gate	  nop
31447c478bd9Sstevel@tonic-gate5:
31457c478bd9Sstevel@tonic-gate	/*
31467c478bd9Sstevel@tonic-gate	 * 32M hash
31477c478bd9Sstevel@tonic-gate	 */
31487c478bd9Sstevel@tonic-gate
31490a90a7fdSAmritpal Sandhu	GET_TTE(%g2, %g7, %g3, %g4, %g6, %g1,
31500a90a7fdSAmritpal Sandhu		MMU_PAGESHIFT32M, TTE32M, %g5, tsb_l32M, tsb_checktte,
31517c478bd9Sstevel@tonic-gate		sfmmu_suspend_tl, tsb_256M)
31527c478bd9Sstevel@tonic-gate	/* NOT REACHED */
31537c478bd9Sstevel@tonic-gate
31541426d65aSsm142603#if defined(sun4u) && !defined(UTSB_PHYS)
315505d3dc4bSpaulsan#define tsb_shme        tsb_pagefault
315605d3dc4bSpaulsan#endif
31577c478bd9Sstevel@tonic-gatetsb_256M:
315805d3dc4bSpaulsan	ldub	[%g6 + TSBMISS_UTTEFLAGS], %g4
31597c478bd9Sstevel@tonic-gate	and	%g4, HAT_256M_FLAG, %g5
316005d3dc4bSpaulsan	brz,pn	%g5, tsb_shme
31617c478bd9Sstevel@tonic-gate	  nop
31627c478bd9Sstevel@tonic-gate6:
31637c478bd9Sstevel@tonic-gate	/*
31647c478bd9Sstevel@tonic-gate	 * 256M hash
31657c478bd9Sstevel@tonic-gate	 */
31667c478bd9Sstevel@tonic-gate
31670a90a7fdSAmritpal Sandhu	GET_TTE(%g2, %g7, %g3, %g4, %g6, %g1,
31680a90a7fdSAmritpal Sandhu	    MMU_PAGESHIFT256M, TTE256M, %g5, tsb_l256M, tsb_checktte,
316905d3dc4bSpaulsan	    sfmmu_suspend_tl, tsb_shme)
31707c478bd9Sstevel@tonic-gate	/* NOT REACHED */
31717c478bd9Sstevel@tonic-gate
31727c478bd9Sstevel@tonic-gatetsb_checktte:
31737c478bd9Sstevel@tonic-gate	/*
317405d3dc4bSpaulsan	 * g1 = hblk_misc
317505d3dc4bSpaulsan	 * g2 = tagacc
317605d3dc4bSpaulsan	 * g3 = tte
317705d3dc4bSpaulsan	 * g4 = tte pa
317805d3dc4bSpaulsan	 * g6 = tsbmiss area
317905d3dc4bSpaulsan	 * g7 = hatid
318005d3dc4bSpaulsan	 */
318105d3dc4bSpaulsan	brlz,a,pt %g3, tsb_validtte
318205d3dc4bSpaulsan	  rdpr	%tt, %g7
318305d3dc4bSpaulsan
31841426d65aSsm142603#if defined(sun4u) && !defined(UTSB_PHYS)
318505d3dc4bSpaulsan#undef tsb_shme
318605d3dc4bSpaulsan	ba      tsb_pagefault
318705d3dc4bSpaulsan	  nop
31881426d65aSsm142603#else /* sun4u && !UTSB_PHYS */
318905d3dc4bSpaulsan
319005d3dc4bSpaulsantsb_shme:
319105d3dc4bSpaulsan	/*
319205d3dc4bSpaulsan	 * g2 = tagacc
319305d3dc4bSpaulsan	 * g6 = tsbmiss area
319405d3dc4bSpaulsan	 */
319505d3dc4bSpaulsan	sllx	%g2, TAGACC_CTX_LSHIFT, %g5
319605d3dc4bSpaulsan	brz,pn	%g5, tsb_pagefault
319705d3dc4bSpaulsan	  nop
319805d3dc4bSpaulsan	ldx	[%g6 + TSBMISS_SHARED_UHATID], %g7	/* g7 = srdp */
319905d3dc4bSpaulsan	brz,pn	%g7, tsb_pagefault
320005d3dc4bSpaulsan	  nop
320105d3dc4bSpaulsan
32020a90a7fdSAmritpal Sandhu	GET_SHME_TTE(%g2, %g7, %g3, %g4, %g6, %g1,
32030a90a7fdSAmritpal Sandhu		MMU_PAGESHIFT64K, TTE64K, %g5, tsb_shme_l8K, tsb_shme_checktte,
320405d3dc4bSpaulsan		sfmmu_suspend_tl, tsb_shme_512K)
320505d3dc4bSpaulsan	/* NOT REACHED */
320605d3dc4bSpaulsan
320705d3dc4bSpaulsantsb_shme_512K:
320805d3dc4bSpaulsan	ldub	[%g6 + TSBMISS_URTTEFLAGS], %g4
320905d3dc4bSpaulsan	and	%g4, HAT_512K_FLAG, %g5
321005d3dc4bSpaulsan	brz,pn	%g5, tsb_shme_4M
321105d3dc4bSpaulsan	  nop
321205d3dc4bSpaulsan
321305d3dc4bSpaulsan	/*
321405d3dc4bSpaulsan	 * 512K hash
321505d3dc4bSpaulsan	 */
321605d3dc4bSpaulsan
32170a90a7fdSAmritpal Sandhu	GET_SHME_TTE(%g2, %g7, %g3, %g4, %g6, %g1,
32180a90a7fdSAmritpal Sandhu		MMU_PAGESHIFT512K, TTE512K, %g5, tsb_shme_l512K, tsb_shme_checktte,
321905d3dc4bSpaulsan		sfmmu_suspend_tl, tsb_shme_4M)
322005d3dc4bSpaulsan	/* NOT REACHED */
322105d3dc4bSpaulsan
322205d3dc4bSpaulsantsb_shme_4M:
322305d3dc4bSpaulsan	ldub	[%g6 + TSBMISS_URTTEFLAGS], %g4
322405d3dc4bSpaulsan	and	%g4, HAT_4M_FLAG, %g5
322505d3dc4bSpaulsan	brz,pn	%g5, tsb_shme_32M
322605d3dc4bSpaulsan	  nop
322705d3dc4bSpaulsan4:
322805d3dc4bSpaulsan	/*
322905d3dc4bSpaulsan	 * 4M hash
323005d3dc4bSpaulsan	 */
32310a90a7fdSAmritpal Sandhu	GET_SHME_TTE(%g2, %g7, %g3, %g4, %g6, %g1,
32320a90a7fdSAmritpal Sandhu		MMU_PAGESHIFT4M, TTE4M, %g5, tsb_shme_l4M, tsb_shme_checktte,
323305d3dc4bSpaulsan		sfmmu_suspend_tl, tsb_shme_32M)
323405d3dc4bSpaulsan	/* NOT REACHED */
323505d3dc4bSpaulsan
323605d3dc4bSpaulsantsb_shme_32M:
323705d3dc4bSpaulsan	ldub	[%g6 + TSBMISS_URTTEFLAGS], %g4
323805d3dc4bSpaulsan	and	%g4, HAT_32M_FLAG, %g5
323905d3dc4bSpaulsan	brz,pn	%g5, tsb_shme_256M
324005d3dc4bSpaulsan	  nop
324105d3dc4bSpaulsan
324205d3dc4bSpaulsan	/*
324305d3dc4bSpaulsan	 * 32M hash
324405d3dc4bSpaulsan	 */
324505d3dc4bSpaulsan
32460a90a7fdSAmritpal Sandhu	GET_SHME_TTE(%g2, %g7, %g3, %g4, %g6, %g1,
32470a90a7fdSAmritpal Sandhu		MMU_PAGESHIFT32M, TTE32M, %g5, tsb_shme_l32M, tsb_shme_checktte,
324805d3dc4bSpaulsan		sfmmu_suspend_tl, tsb_shme_256M)
324905d3dc4bSpaulsan	/* NOT REACHED */
325005d3dc4bSpaulsan
325105d3dc4bSpaulsantsb_shme_256M:
325205d3dc4bSpaulsan	ldub	[%g6 + TSBMISS_URTTEFLAGS], %g4
325305d3dc4bSpaulsan	and	%g4, HAT_256M_FLAG, %g5
325405d3dc4bSpaulsan	brz,pn	%g5, tsb_pagefault
325505d3dc4bSpaulsan	  nop
325605d3dc4bSpaulsan
325705d3dc4bSpaulsan	/*
325805d3dc4bSpaulsan	 * 256M hash
325905d3dc4bSpaulsan	 */
326005d3dc4bSpaulsan
32610a90a7fdSAmritpal Sandhu	GET_SHME_TTE(%g2, %g7, %g3, %g4, %g6, %g1,
32620a90a7fdSAmritpal Sandhu	    MMU_PAGESHIFT256M, TTE256M, %g5, tsb_shme_l256M, tsb_shme_checktte,
326305d3dc4bSpaulsan	    sfmmu_suspend_tl, tsb_pagefault)
326405d3dc4bSpaulsan	/* NOT REACHED */
326505d3dc4bSpaulsan
326605d3dc4bSpaulsantsb_shme_checktte:
326705d3dc4bSpaulsan
326805d3dc4bSpaulsan	brgez,pn %g3, tsb_pagefault
326905d3dc4bSpaulsan	  rdpr	%tt, %g7
327005d3dc4bSpaulsan	/*
327105d3dc4bSpaulsan	 * g1 = ctx1 flag
327205d3dc4bSpaulsan	 * g3 = tte
327305d3dc4bSpaulsan	 * g4 = tte pa
327405d3dc4bSpaulsan	 * g6 = tsbmiss area
327505d3dc4bSpaulsan	 * g7 = tt
327605d3dc4bSpaulsan	 */
327705d3dc4bSpaulsan
327805d3dc4bSpaulsan	brz,pt  %g1, tsb_validtte
327905d3dc4bSpaulsan	  nop
328005d3dc4bSpaulsan	ldub    [%g6 + TSBMISS_URTTEFLAGS], %g1
328105d3dc4bSpaulsan	  or	%g1, HAT_CHKCTX1_FLAG, %g1
328205d3dc4bSpaulsan	stub    %g1, [%g6 + TSBMISS_URTTEFLAGS]
328305d3dc4bSpaulsan
328405d3dc4bSpaulsan	SAVE_CTX1(%g7, %g2, %g1, tsb_shmel)
32851426d65aSsm142603#endif /* sun4u && !UTSB_PHYS */
328605d3dc4bSpaulsan
328705d3dc4bSpaulsantsb_validtte:
328805d3dc4bSpaulsan	/*
32897c478bd9Sstevel@tonic-gate	 * g3 = tte
32907c478bd9Sstevel@tonic-gate	 * g4 = tte pa
32917c478bd9Sstevel@tonic-gate	 * g6 = tsbmiss area
329205d3dc4bSpaulsan	 * g7 = tt
32937c478bd9Sstevel@tonic-gate	 */
32947c478bd9Sstevel@tonic-gate
32957c478bd9Sstevel@tonic-gate	/*
32967c478bd9Sstevel@tonic-gate	 * Set ref/mod bits if this is a prot trap.  Usually, it isn't.
32977c478bd9Sstevel@tonic-gate	 */
32987c478bd9Sstevel@tonic-gate	cmp	%g7, FAST_PROT_TT
32997c478bd9Sstevel@tonic-gate	bne,pt	%icc, 4f
33007c478bd9Sstevel@tonic-gate	  nop
33017c478bd9Sstevel@tonic-gate
33020a90a7fdSAmritpal Sandhu	TTE_SET_REFMOD_ML(%g3, %g4, %g6, %g7, %g5, tsb_lset_refmod,
33037c478bd9Sstevel@tonic-gate	    tsb_protfault)
33047c478bd9Sstevel@tonic-gate
33057c478bd9Sstevel@tonic-gate	GET_MMU_D_TTARGET(%g2, %g7)		/* %g2 = ttarget */
330605d3dc4bSpaulsan#ifdef sun4v
330705d3dc4bSpaulsan	MMU_FAULT_STATUS_AREA(%g7)
33081426d65aSsm142603	ldx	[%g7 + MMFSA_D_ADDR], %g5	/* load fault addr for later */
33091426d65aSsm142603#else /* sun4v */
33101426d65aSsm142603	mov     MMU_TAG_ACCESS, %g5
33111426d65aSsm142603	ldxa    [%g5]ASI_DMMU, %g5
33121426d65aSsm142603#endif /* sun4v */
33137c478bd9Sstevel@tonic-gate	ba,pt	%xcc, tsb_update_tl1
33147c478bd9Sstevel@tonic-gate	  nop
33157c478bd9Sstevel@tonic-gate4:
33167c478bd9Sstevel@tonic-gate	/*
33179d0d62adSJason Beloro	 * If ITLB miss check exec bit.
33189d0d62adSJason Beloro	 * If not set treat as invalid TTE.
33197c478bd9Sstevel@tonic-gate	 */
33207c478bd9Sstevel@tonic-gate	cmp     %g7, T_INSTR_MMU_MISS
33217c478bd9Sstevel@tonic-gate	be,pn	%icc, 5f
33227c478bd9Sstevel@tonic-gate	  andcc   %g3, TTE_EXECPRM_INT, %g0	/* check execute bit is set */
33237c478bd9Sstevel@tonic-gate	cmp     %g7, FAST_IMMU_MISS_TT
33247c478bd9Sstevel@tonic-gate	bne,pt %icc, 3f
33257c478bd9Sstevel@tonic-gate	  andcc   %g3, TTE_EXECPRM_INT, %g0	/* check execute bit is set */
33267c478bd9Sstevel@tonic-gate5:
33277c478bd9Sstevel@tonic-gate	bz,pn %icc, tsb_protfault
33287c478bd9Sstevel@tonic-gate	  nop
33299d0d62adSJason Beloro
33307c478bd9Sstevel@tonic-gate3:
33317c478bd9Sstevel@tonic-gate	/*
33327c478bd9Sstevel@tonic-gate	 * Set reference bit if not already set
33337c478bd9Sstevel@tonic-gate	 */
33340a90a7fdSAmritpal Sandhu	TTE_SET_REF_ML(%g3, %g4, %g6, %g7, %g5, tsb_lset_ref)
33357c478bd9Sstevel@tonic-gate
33367c478bd9Sstevel@tonic-gate	/*
33377c478bd9Sstevel@tonic-gate	 * Now, load into TSB/TLB.  At this point:
33387c478bd9Sstevel@tonic-gate	 * g3 = tte
33397c478bd9Sstevel@tonic-gate	 * g4 = patte
33407c478bd9Sstevel@tonic-gate	 * g6 = tsbmiss area
33417c478bd9Sstevel@tonic-gate	 */
33421426d65aSsm142603	rdpr	%tt, %g7
33437c478bd9Sstevel@tonic-gate#ifdef sun4v
33447c478bd9Sstevel@tonic-gate	MMU_FAULT_STATUS_AREA(%g2)
33451426d65aSsm142603	cmp	%g7, T_INSTR_MMU_MISS
33467c478bd9Sstevel@tonic-gate	be,a,pt	%icc, 9f
33477c478bd9Sstevel@tonic-gate	  nop
33481426d65aSsm142603	cmp	%g7, FAST_IMMU_MISS_TT
33497c478bd9Sstevel@tonic-gate	be,a,pt	%icc, 9f
33507c478bd9Sstevel@tonic-gate	  nop
33517c478bd9Sstevel@tonic-gate	add	%g2, MMFSA_D_, %g2
33527c478bd9Sstevel@tonic-gate9:
33537c478bd9Sstevel@tonic-gate	ldx	[%g2 + MMFSA_CTX_], %g7
33547c478bd9Sstevel@tonic-gate	sllx	%g7, TTARGET_CTX_SHIFT, %g7
33557c478bd9Sstevel@tonic-gate	ldx	[%g2 + MMFSA_ADDR_], %g2
33561426d65aSsm142603	mov	%g2, %g5		! load the fault addr for later use
33577c478bd9Sstevel@tonic-gate	srlx	%g2, TTARGET_VA_SHIFT, %g2
33587c478bd9Sstevel@tonic-gate	or	%g2, %g7, %g2
33591426d65aSsm142603#else /* sun4v */
33601426d65aSsm142603	mov     MMU_TAG_ACCESS, %g5
33611426d65aSsm142603	cmp     %g7, FAST_IMMU_MISS_TT
33621426d65aSsm142603	be,a,pt %icc, 9f
33637c478bd9Sstevel@tonic-gate	   ldxa  [%g0]ASI_IMMU, %g2
33647c478bd9Sstevel@tonic-gate	ldxa    [%g0]ASI_DMMU, %g2
33651426d65aSsm142603	ba,pt   %icc, tsb_update_tl1
33661426d65aSsm142603	   ldxa  [%g5]ASI_DMMU, %g5
33671426d65aSsm1426039:
33681426d65aSsm142603	ldxa    [%g5]ASI_IMMU, %g5
33691426d65aSsm142603#endif /* sun4v */
33701426d65aSsm142603
33717c478bd9Sstevel@tonic-gatetsb_update_tl1:
33727c478bd9Sstevel@tonic-gate	srlx	%g2, TTARGET_CTX_SHIFT, %g7
33737c478bd9Sstevel@tonic-gate	brz,pn	%g7, tsb_kernel
33747c478bd9Sstevel@tonic-gate#ifdef sun4v
33757c478bd9Sstevel@tonic-gate	  and	%g3, TTE_SZ_BITS, %g7	! assumes TTE_SZ_SHFT is 0
33761426d65aSsm142603#else  /* sun4v */
33777c478bd9Sstevel@tonic-gate	  srlx	%g3, TTE_SZ_SHFT, %g7
33781426d65aSsm142603#endif /* sun4v */
33797c478bd9Sstevel@tonic-gate
33807c478bd9Sstevel@tonic-gatetsb_user:
33817c478bd9Sstevel@tonic-gate#ifdef sun4v
33827c478bd9Sstevel@tonic-gate	cmp	%g7, TTE4M
33837c478bd9Sstevel@tonic-gate	bge,pn	%icc, tsb_user4m
33847c478bd9Sstevel@tonic-gate	  nop
33850066ea64Sjimand#else /* sun4v */
33867c478bd9Sstevel@tonic-gate	cmp	%g7, TTESZ_VALID | TTE4M
33877c478bd9Sstevel@tonic-gate	be,pn	%icc, tsb_user4m
33887c478bd9Sstevel@tonic-gate	  srlx	%g3, TTE_SZ2_SHFT, %g7
33897c478bd9Sstevel@tonic-gate	andcc	%g7, TTE_SZ2_BITS, %g7		! check 32/256MB
33900066ea64Sjimand#ifdef ITLB_32M_256M_SUPPORT
33910066ea64Sjimand	bnz,pn	%icc, tsb_user4m
33920066ea64Sjimand	  nop
33930066ea64Sjimand#else /* ITLB_32M_256M_SUPPORT */
33941bd453f3Ssusans	bnz,a,pn %icc, tsb_user_pn_synth
33951426d65aSsm142603	 nop
33960066ea64Sjimand#endif /* ITLB_32M_256M_SUPPORT */
33970066ea64Sjimand#endif /* sun4v */
33987c478bd9Sstevel@tonic-gate
33997c478bd9Sstevel@tonic-gatetsb_user8k:
34001426d65aSsm142603#if defined(sun4v) || defined(UTSB_PHYS)
340105d3dc4bSpaulsan	ldub	[%g6 + TSBMISS_URTTEFLAGS], %g7
340205d3dc4bSpaulsan	and	%g7, HAT_CHKCTX1_FLAG, %g1
340305d3dc4bSpaulsan	brz,a,pn %g1, 1f
34041426d65aSsm142603	  ldn	[%g6 + TSBMISS_TSBPTR], %g1		! g1 = 1ST TSB ptr
340505d3dc4bSpaulsan	GET_UTSBREG_SHCTX(%g6, TSBMISS_TSBSCDPTR, %g1)
34061426d65aSsm142603	brlz,a,pn %g1, ptl1_panic			! if no shared 3RD tsb
340705d3dc4bSpaulsan	  mov PTL1_NO_SCDTSB8K, %g1			! panic
340805d3dc4bSpaulsan        GET_3RD_TSBE_PTR(%g5, %g1, %g6, %g7)
340905d3dc4bSpaulsan1:
34101426d65aSsm142603#else /* defined(sun4v) || defined(UTSB_PHYS) */
34111426d65aSsm142603	ldn   [%g6 + TSBMISS_TSBPTR], %g1             ! g1 = 1ST TSB ptr
34121426d65aSsm142603#endif /* defined(sun4v) || defined(UTSB_PHYS) */
34137c478bd9Sstevel@tonic-gate
341425cf1a30Sjl139090#ifndef UTSB_PHYS
341525cf1a30Sjl139090	mov	ASI_N, %g7	! user TSBs accessed by VA
34167c478bd9Sstevel@tonic-gate	mov	%g7, %asi
34171426d65aSsm142603#endif /* !UTSB_PHYS */
341805d3dc4bSpaulsan
34190a90a7fdSAmritpal Sandhu	TSB_UPDATE_TL(%g1, %g3, %g2, %g4, %g7, %g6, locked_tsb_l3)
34207c478bd9Sstevel@tonic-gate
342105d3dc4bSpaulsan	rdpr    %tt, %g5
34221426d65aSsm142603#ifdef sun4v
34237c478bd9Sstevel@tonic-gate	cmp	%g5, T_INSTR_MMU_MISS
34247c478bd9Sstevel@tonic-gate	be,a,pn	%xcc, 9f
34257c478bd9Sstevel@tonic-gate	  mov	%g3, %g5
34267c478bd9Sstevel@tonic-gate#endif /* sun4v */
34277c478bd9Sstevel@tonic-gate	cmp	%g5, FAST_IMMU_MISS_TT
34287c478bd9Sstevel@tonic-gate	be,pn	%xcc, 9f
34297c478bd9Sstevel@tonic-gate	  mov	%g3, %g5
34307c478bd9Sstevel@tonic-gate
34317c478bd9Sstevel@tonic-gate	DTLB_STUFF(%g5, %g1, %g2, %g3, %g4)
34327c478bd9Sstevel@tonic-gate	! trapstat wants TTE in %g5
34337c478bd9Sstevel@tonic-gate	retry
34347c478bd9Sstevel@tonic-gate9:
34357c478bd9Sstevel@tonic-gate	ITLB_STUFF(%g5, %g1, %g2, %g3, %g4)
34367c478bd9Sstevel@tonic-gate	! trapstat wants TTE in %g5
34377c478bd9Sstevel@tonic-gate	retry
34387c478bd9Sstevel@tonic-gate
34397c478bd9Sstevel@tonic-gatetsb_user4m:
34401426d65aSsm142603#if defined(sun4v) || defined(UTSB_PHYS)
344105d3dc4bSpaulsan	ldub	[%g6 + TSBMISS_URTTEFLAGS], %g7
344205d3dc4bSpaulsan	and	%g7, HAT_CHKCTX1_FLAG, %g1
344305d3dc4bSpaulsan	brz,a,pn %g1, 4f
34441426d65aSsm142603	  ldn	[%g6 + TSBMISS_TSBPTR4M], %g1		! g1 = 2ND TSB ptr
34451426d65aSsm142603	GET_UTSBREG_SHCTX(%g6, TSBMISS_TSBSCDPTR4M, %g1)! g1 = 4TH TSB ptr
34461426d65aSsm142603	brlz,a,pn %g1, 5f				! if no shared 4TH TSB
344705d3dc4bSpaulsan	  nop
344805d3dc4bSpaulsan        GET_4TH_TSBE_PTR(%g5, %g1, %g6, %g7)
3449a9425c25Ssm142603
34501426d65aSsm142603#else /* defined(sun4v) || defined(UTSB_PHYS) */
34511426d65aSsm142603	ldn   [%g6 + TSBMISS_TSBPTR4M], %g1             ! g1 = 2ND TSB ptr
34521426d65aSsm142603#endif /* defined(sun4v) || defined(UTSB_PHYS) */
3453a9425c25Ssm1426034:
345405d3dc4bSpaulsan	brlz,pn %g1, 5f	/* Check to see if we have 2nd TSB programmed */
34557c478bd9Sstevel@tonic-gate	  nop
34567c478bd9Sstevel@tonic-gate
345725cf1a30Sjl139090#ifndef UTSB_PHYS
345825cf1a30Sjl139090	mov	ASI_N, %g7	! user TSBs accessed by VA
34597c478bd9Sstevel@tonic-gate	mov	%g7, %asi
346025cf1a30Sjl139090#endif /* UTSB_PHYS */
34617c478bd9Sstevel@tonic-gate
34620a90a7fdSAmritpal Sandhu        TSB_UPDATE_TL(%g1, %g3, %g2, %g4, %g7, %g6, locked_tsb_l4)
34637c478bd9Sstevel@tonic-gate
34647c478bd9Sstevel@tonic-gate5:
346505d3dc4bSpaulsan	rdpr    %tt, %g5
34661426d65aSsm142603#ifdef sun4v
34677c478bd9Sstevel@tonic-gate        cmp     %g5, T_INSTR_MMU_MISS
34687c478bd9Sstevel@tonic-gate        be,a,pn %xcc, 9f
34697c478bd9Sstevel@tonic-gate          mov   %g3, %g5
34707c478bd9Sstevel@tonic-gate#endif /* sun4v */
34717c478bd9Sstevel@tonic-gate        cmp     %g5, FAST_IMMU_MISS_TT
34727c478bd9Sstevel@tonic-gate        be,pn   %xcc, 9f
34737c478bd9Sstevel@tonic-gate        mov     %g3, %g5
34747c478bd9Sstevel@tonic-gate
34757c478bd9Sstevel@tonic-gate        DTLB_STUFF(%g5, %g1, %g2, %g3, %g4)
34767c478bd9Sstevel@tonic-gate        ! trapstat wants TTE in %g5
34777c478bd9Sstevel@tonic-gate        retry
34787c478bd9Sstevel@tonic-gate9:
34797c478bd9Sstevel@tonic-gate        ITLB_STUFF(%g5, %g1, %g2, %g3, %g4)
34807c478bd9Sstevel@tonic-gate        ! trapstat wants TTE in %g5
34817c478bd9Sstevel@tonic-gate        retry
34827c478bd9Sstevel@tonic-gate
34830066ea64Sjimand#if !defined(sun4v) && !defined(ITLB_32M_256M_SUPPORT)
34841bd453f3Ssusans	/*
34851bd453f3Ssusans	 * Panther ITLB synthesis.
34861bd453f3Ssusans	 * The Panther 32M and 256M ITLB code simulates these two large page
34871bd453f3Ssusans	 * sizes with 4M pages, to provide support for programs, for example
34881bd453f3Ssusans	 * Java, that may copy instructions into a 32M or 256M data page and
34891bd453f3Ssusans	 * then execute them. The code below generates the 4M pfn bits and
34901bd453f3Ssusans	 * saves them in the modified 32M/256M ttes in the TSB. If the tte is
34911bd453f3Ssusans	 * stored in the DTLB to map a 32M/256M page, the 4M pfn offset bits
34921bd453f3Ssusans	 * are ignored by the hardware.
34931bd453f3Ssusans	 *
34941bd453f3Ssusans	 * Now, load into TSB/TLB.  At this point:
34951bd453f3Ssusans	 * g2 = tagtarget
34961bd453f3Ssusans	 * g3 = tte
34971bd453f3Ssusans	 * g4 = patte
34981bd453f3Ssusans	 * g5 = tt
34991bd453f3Ssusans	 * g6 = tsbmiss area
35001bd453f3Ssusans	 */
35011bd453f3Ssusanstsb_user_pn_synth:
35021426d65aSsm142603	rdpr %tt, %g5
35031426d65aSsm142603	cmp    %g5, FAST_IMMU_MISS_TT
35041bd453f3Ssusans	be,pt	%xcc, tsb_user_itlb_synth	/* ITLB miss */
35051bd453f3Ssusans	  andcc %g3, TTE_EXECPRM_INT, %g0	/* is execprm bit set */
35061bd453f3Ssusans	bz,pn %icc, 4b				/* if not, been here before */
35071bd453f3Ssusans	  ldn	[%g6 + TSBMISS_TSBPTR4M], %g1	/* g1 = tsbp */
350805d3dc4bSpaulsan	brlz,a,pn %g1, 5f			/* no 2nd tsb */
35091bd453f3Ssusans	  mov	%g3, %g5
35101bd453f3Ssusans
35111bd453f3Ssusans	mov	MMU_TAG_ACCESS, %g7
35121bd453f3Ssusans	ldxa	[%g7]ASI_DMMU, %g6		/* get tag access va */
35131bd453f3Ssusans	GET_4M_PFN_OFF(%g3, %g6, %g5, %g7, 1)	/* make 4M pfn offset */
35141bd453f3Ssusans
35151bd453f3Ssusans	mov	ASI_N, %g7	/* user TSBs always accessed by VA */
35161bd453f3Ssusans	mov	%g7, %asi
35170a90a7fdSAmritpal Sandhu	TSB_UPDATE_TL_PN(%g1, %g5, %g2, %g4, %g7, %g3, locked_tsb_l5) /* update TSB */
35181bd453f3Ssusans5:
35191bd453f3Ssusans        DTLB_STUFF(%g5, %g1, %g2, %g3, %g4)
35201bd453f3Ssusans        retry
35211bd453f3Ssusans
35221bd453f3Ssusanstsb_user_itlb_synth:
35231426d65aSsm142603	ldn	[%g6 + TSBMISS_TSBPTR4M], %g1		/* g1 =  2ND TSB */
35241bd453f3Ssusans
35251bd453f3Ssusans	mov	MMU_TAG_ACCESS, %g7
35261bd453f3Ssusans	ldxa	[%g7]ASI_IMMU, %g6		/* get tag access va */
35271bd453f3Ssusans	GET_4M_PFN_OFF(%g3, %g6, %g5, %g7, 2)	/* make 4M pfn offset */
352805d3dc4bSpaulsan	brlz,a,pn %g1, 7f	/* Check to see if we have 2nd TSB programmed */
35291bd453f3Ssusans	  or	%g5, %g3, %g5			/* add 4M bits to TTE */
35301bd453f3Ssusans
35311bd453f3Ssusans	mov	ASI_N, %g7	/* user TSBs always accessed by VA */
35321bd453f3Ssusans	mov	%g7, %asi
35330a90a7fdSAmritpal Sandhu	TSB_UPDATE_TL_PN(%g1, %g5, %g2, %g4, %g7, %g3, locked_tsb_l6) /* update TSB */
35341bd453f3Ssusans7:
35351bd453f3Ssusans	SET_TTE4M_PN(%g5, %g7)			/* add TTE4M pagesize to TTE */
35361bd453f3Ssusans        ITLB_STUFF(%g5, %g1, %g2, %g3, %g4)
35371bd453f3Ssusans        retry
35380066ea64Sjimand#endif /* sun4v && ITLB_32M_256M_SUPPORT */
35391bd453f3Ssusans
354012b2e3e7Seg155566tsb_kernel:
354105d3dc4bSpaulsan	rdpr	%tt, %g5
35421426d65aSsm142603#ifdef sun4v
35437c478bd9Sstevel@tonic-gate	cmp	%g7, TTE4M
354412b2e3e7Seg155566	bge,pn	%icc, 5f
35457c478bd9Sstevel@tonic-gate#else
354612b2e3e7Seg155566	cmp	%g7, TTESZ_VALID | TTE4M	! no 32M or 256M support
35477c478bd9Sstevel@tonic-gate	be,pn	%icc, 5f
35481426d65aSsm142603#endif /* sun4v */
35497c478bd9Sstevel@tonic-gate	  nop
35501426d65aSsm142603	ldn	[%g6 + TSBMISS_TSBPTR], %g1	! g1 = 8K TSB ptr
35517c478bd9Sstevel@tonic-gate	ba,pt	%xcc, 6f
35527c478bd9Sstevel@tonic-gate	  nop
35537c478bd9Sstevel@tonic-gate5:
35541426d65aSsm142603	ldn	[%g6 + TSBMISS_TSBPTR4M], %g1	! g1 = 4M TSB ptr
35551426d65aSsm142603	brlz,pn	%g1, 3f		/* skip programming if 4M TSB ptr is -1 */
35567c478bd9Sstevel@tonic-gate	  nop
35577c478bd9Sstevel@tonic-gate6:
35587c478bd9Sstevel@tonic-gate#ifndef sun4v
35597c478bd9Sstevel@tonic-gatetsb_kernel_patch_asi:
35607c478bd9Sstevel@tonic-gate	or	%g0, RUNTIME_PATCH, %g6
35617c478bd9Sstevel@tonic-gate	mov	%g6, %asi	! XXX avoid writing to %asi !!
35627c478bd9Sstevel@tonic-gate#endif
35630a90a7fdSAmritpal Sandhu	TSB_UPDATE_TL(%g1, %g3, %g2, %g4, %g7, %g6, locked_tsb_l7)
35647c478bd9Sstevel@tonic-gate3:
35657c478bd9Sstevel@tonic-gate#ifdef sun4v
35667c478bd9Sstevel@tonic-gate	cmp	%g5, T_INSTR_MMU_MISS
35677c478bd9Sstevel@tonic-gate	be,a,pn	%icc, 1f
35687c478bd9Sstevel@tonic-gate	  mov	%g3, %g5			! trapstat wants TTE in %g5
35697c478bd9Sstevel@tonic-gate#endif /* sun4v */
35707c478bd9Sstevel@tonic-gate	cmp	%g5, FAST_IMMU_MISS_TT
35717c478bd9Sstevel@tonic-gate	be,pn	%icc, 1f
35727c478bd9Sstevel@tonic-gate	  mov	%g3, %g5			! trapstat wants TTE in %g5
35737c478bd9Sstevel@tonic-gate	DTLB_STUFF(%g5, %g1, %g2, %g3, %g4)
35747c478bd9Sstevel@tonic-gate	! trapstat wants TTE in %g5
35757c478bd9Sstevel@tonic-gate	retry
35767c478bd9Sstevel@tonic-gate1:
35777c478bd9Sstevel@tonic-gate	ITLB_STUFF(%g5, %g1, %g2, %g3, %g4)
35787c478bd9Sstevel@tonic-gate	! trapstat wants TTE in %g5
35797c478bd9Sstevel@tonic-gate	retry
35807c478bd9Sstevel@tonic-gate
35817c478bd9Sstevel@tonic-gatetsb_ism:
35827c478bd9Sstevel@tonic-gate	/*
35837c478bd9Sstevel@tonic-gate	 * This is an ISM [i|d]tlb miss.  We optimize for largest
35847c478bd9Sstevel@tonic-gate	 * page size down to smallest.
35857c478bd9Sstevel@tonic-gate	 *
358660972f37Sjb145095	 * g2 = vaddr + ctx(or ctxtype (sun4v)) aka (pseudo-)tag access
358760972f37Sjb145095	 *	register
35887c478bd9Sstevel@tonic-gate	 * g3 = ismmap->ism_seg
35897c478bd9Sstevel@tonic-gate	 * g4 = physical address of ismmap->ism_sfmmu
35907c478bd9Sstevel@tonic-gate	 * g6 = tsbmiss area
35917c478bd9Sstevel@tonic-gate	 */
35927c478bd9Sstevel@tonic-gate	ldna	[%g4]ASI_MEM, %g7		/* g7 = ism hatid */
35937c478bd9Sstevel@tonic-gate	brz,a,pn %g7, ptl1_panic		/* if zero jmp ahead */
35947c478bd9Sstevel@tonic-gate	  mov	PTL1_BAD_ISM, %g1
35957c478bd9Sstevel@tonic-gate						/* g5 = pa of imap_vb_shift */
35967c478bd9Sstevel@tonic-gate	sub	%g4, (IMAP_ISMHAT - IMAP_VB_SHIFT), %g5
359705d3dc4bSpaulsan	lduba	[%g5]ASI_MEM, %g4		/* g4 = imap_vb_shift */
35987c478bd9Sstevel@tonic-gate	srlx	%g3, %g4, %g3			/* clr size field */
359905d3dc4bSpaulsan	set	TAGACC_CTX_MASK, %g1		/* mask off ctx number */
36007c478bd9Sstevel@tonic-gate	sllx    %g3, %g4, %g3                   /* g3 = ism vbase */
36017c478bd9Sstevel@tonic-gate	and     %g2, %g1, %g4                   /* g4 = ctx number */
36027c478bd9Sstevel@tonic-gate	andn    %g2, %g1, %g1                   /* g1 = tlb miss vaddr */
36037c478bd9Sstevel@tonic-gate	sub     %g1, %g3, %g2                   /* g2 = offset in ISM seg */
360460972f37Sjb145095	or      %g2, %g4, %g2                   /* g2 = (pseudo-)tagacc */
360505d3dc4bSpaulsan	sub     %g5, (IMAP_VB_SHIFT - IMAP_HATFLAGS), %g5
360605d3dc4bSpaulsan	lduha   [%g5]ASI_MEM, %g4               /* g5 = pa of imap_hatflags */
36071426d65aSsm142603#if defined(sun4v) || defined(UTSB_PHYS)
360805d3dc4bSpaulsan	and     %g4, HAT_CTX1_FLAG, %g5         /* g5 = imap_hatflags */
360905d3dc4bSpaulsan	brz,pt %g5, tsb_chk4M_ism
361005d3dc4bSpaulsan	  nop
361105d3dc4bSpaulsan	ldub    [%g6 + TSBMISS_URTTEFLAGS], %g5
361205d3dc4bSpaulsan	or      %g5, HAT_CHKCTX1_FLAG, %g5
361305d3dc4bSpaulsan	stub    %g5, [%g6 + TSBMISS_URTTEFLAGS]
36149d0d62adSJason Beloro	rdpr    %tt, %g5
36159d0d62adSJason Beloro	SAVE_CTX1(%g5, %g3, %g1, tsb_shctxl)
36161426d65aSsm142603#endif /* defined(sun4v) || defined(UTSB_PHYS) */
36179d0d62adSJason Beloro
36187c478bd9Sstevel@tonic-gate	/*
36197c478bd9Sstevel@tonic-gate	 * ISM pages are always locked down.
36207c478bd9Sstevel@tonic-gate	 * If we can't find the tte then pagefault
362105d3dc4bSpaulsan	 * and let the spt segment driver resolve it.
36227c478bd9Sstevel@tonic-gate	 *
362305d3dc4bSpaulsan	 * g2 = tagacc w/ISM vaddr (offset in ISM seg)
362405d3dc4bSpaulsan	 * g4 = imap_hatflags
36257c478bd9Sstevel@tonic-gate	 * g6 = tsb miss area
36267c478bd9Sstevel@tonic-gate	 * g7 = ISM hatid
36277c478bd9Sstevel@tonic-gate	 */
362805d3dc4bSpaulsan
362905d3dc4bSpaulsantsb_chk4M_ism:
36307c478bd9Sstevel@tonic-gate	and	%g4, HAT_4M_FLAG, %g5		/* g4 = imap_hatflags */
36317c478bd9Sstevel@tonic-gate	brnz,pt	%g5, tsb_ism_4M			/* branch if 4M pages */
36327c478bd9Sstevel@tonic-gate	  nop
36337c478bd9Sstevel@tonic-gate
36347c478bd9Sstevel@tonic-gatetsb_ism_32M:
36357c478bd9Sstevel@tonic-gate	and	%g4, HAT_32M_FLAG, %g5		/* check default 32M next */
36367c478bd9Sstevel@tonic-gate	brz,pn	%g5, tsb_ism_256M
36377c478bd9Sstevel@tonic-gate	  nop
36387c478bd9Sstevel@tonic-gate
36397c478bd9Sstevel@tonic-gate	/*
36407c478bd9Sstevel@tonic-gate	 * 32M hash.
36417c478bd9Sstevel@tonic-gate	 */
36427c478bd9Sstevel@tonic-gate
36430a90a7fdSAmritpal Sandhu	GET_TTE(%g2, %g7, %g3, %g4, %g6, %g1, MMU_PAGESHIFT32M,
36440a90a7fdSAmritpal Sandhu	    TTE32M, %g5, tsb_ism_l32M, tsb_ism_32M_found, sfmmu_suspend_tl,
36457c478bd9Sstevel@tonic-gate	    tsb_ism_4M)
36467c478bd9Sstevel@tonic-gate	/* NOT REACHED */
36477c478bd9Sstevel@tonic-gate
36487c478bd9Sstevel@tonic-gatetsb_ism_32M_found:
36499d0d62adSJason Beloro	brlz,a,pt %g3, tsb_validtte
365005d3dc4bSpaulsan	  rdpr	%tt, %g7
36517c478bd9Sstevel@tonic-gate	ba,pt	%xcc, tsb_ism_4M
36527c478bd9Sstevel@tonic-gate	  nop
36537c478bd9Sstevel@tonic-gate
36547c478bd9Sstevel@tonic-gatetsb_ism_256M:
36557c478bd9Sstevel@tonic-gate	and	%g4, HAT_256M_FLAG, %g5		/* 256M is last resort */
36567c478bd9Sstevel@tonic-gate	brz,a,pn %g5, ptl1_panic
36577c478bd9Sstevel@tonic-gate	  mov	PTL1_BAD_ISM, %g1
36587c478bd9Sstevel@tonic-gate
36597c478bd9Sstevel@tonic-gate	/*
36607c478bd9Sstevel@tonic-gate	 * 256M hash.
36617c478bd9Sstevel@tonic-gate	 */
36620a90a7fdSAmritpal Sandhu	GET_TTE(%g2, %g7, %g3, %g4, %g6, %g1, MMU_PAGESHIFT256M,
36630a90a7fdSAmritpal Sandhu	    TTE256M, %g5, tsb_ism_l256M, tsb_ism_256M_found, sfmmu_suspend_tl,
36647c478bd9Sstevel@tonic-gate	    tsb_ism_4M)
36657c478bd9Sstevel@tonic-gate
36667c478bd9Sstevel@tonic-gatetsb_ism_256M_found:
36679d0d62adSJason Beloro	brlz,a,pt %g3, tsb_validtte
366805d3dc4bSpaulsan	  rdpr	%tt, %g7
36697c478bd9Sstevel@tonic-gate
36707c478bd9Sstevel@tonic-gatetsb_ism_4M:
36717c478bd9Sstevel@tonic-gate	/*
36727c478bd9Sstevel@tonic-gate	 * 4M hash.
36737c478bd9Sstevel@tonic-gate	 */
36740a90a7fdSAmritpal Sandhu	GET_TTE(%g2, %g7, %g3, %g4, %g6, %g1, MMU_PAGESHIFT4M,
36750a90a7fdSAmritpal Sandhu	    TTE4M, %g5, tsb_ism_l4M, tsb_ism_4M_found, sfmmu_suspend_tl,
36767c478bd9Sstevel@tonic-gate	    tsb_ism_8K)
36777c478bd9Sstevel@tonic-gate	/* NOT REACHED */
36787c478bd9Sstevel@tonic-gate
36797c478bd9Sstevel@tonic-gatetsb_ism_4M_found:
36809d0d62adSJason Beloro	brlz,a,pt %g3, tsb_validtte
368105d3dc4bSpaulsan	  rdpr	%tt, %g7
36827c478bd9Sstevel@tonic-gate
36837c478bd9Sstevel@tonic-gatetsb_ism_8K:
36847c478bd9Sstevel@tonic-gate	/*
36857c478bd9Sstevel@tonic-gate	 * 8K and 64K hash.
36867c478bd9Sstevel@tonic-gate	 */
36877c478bd9Sstevel@tonic-gate
36880a90a7fdSAmritpal Sandhu	GET_TTE(%g2, %g7, %g3, %g4, %g6, %g1, MMU_PAGESHIFT64K,
36890a90a7fdSAmritpal Sandhu	    TTE64K, %g5, tsb_ism_l8K, tsb_ism_8K_found, sfmmu_suspend_tl,
36907c478bd9Sstevel@tonic-gate	    tsb_pagefault)
36917c478bd9Sstevel@tonic-gate	/* NOT REACHED */
36927c478bd9Sstevel@tonic-gate
36937c478bd9Sstevel@tonic-gatetsb_ism_8K_found:
36949d0d62adSJason Beloro	brlz,a,pt %g3, tsb_validtte
369505d3dc4bSpaulsan	  rdpr	%tt, %g7
36967c478bd9Sstevel@tonic-gate
36977c478bd9Sstevel@tonic-gatetsb_pagefault:
36987c478bd9Sstevel@tonic-gate	rdpr	%tt, %g7
36997c478bd9Sstevel@tonic-gate	cmp	%g7, FAST_PROT_TT
37007c478bd9Sstevel@tonic-gate	be,a,pn	%icc, tsb_protfault
37017c478bd9Sstevel@tonic-gate	  wrpr	%g0, FAST_DMMU_MISS_TT, %tt
37027c478bd9Sstevel@tonic-gate
37037c478bd9Sstevel@tonic-gatetsb_protfault:
37047c478bd9Sstevel@tonic-gate	/*
37057c478bd9Sstevel@tonic-gate	 * we get here if we couldn't find a valid tte in the hash.
37067c478bd9Sstevel@tonic-gate	 *
37077c478bd9Sstevel@tonic-gate	 * If user and we are at tl>1 we go to window handling code.
37087c478bd9Sstevel@tonic-gate	 *
37097c478bd9Sstevel@tonic-gate	 * If kernel and the fault is on the same page as our stack
37107c478bd9Sstevel@tonic-gate	 * pointer, then we know the stack is bad and the trap handler
37117c478bd9Sstevel@tonic-gate	 * will fail, so we call ptl1_panic with PTL1_BAD_STACK.
37127c478bd9Sstevel@tonic-gate	 *
37137c478bd9Sstevel@tonic-gate	 * If this is a kernel trap and tl>1, panic.
37147c478bd9Sstevel@tonic-gate	 *
37157c478bd9Sstevel@tonic-gate	 * Otherwise we call pagefault.
37167c478bd9Sstevel@tonic-gate	 */
37177c478bd9Sstevel@tonic-gate	cmp	%g7, FAST_IMMU_MISS_TT
37187c478bd9Sstevel@tonic-gate#ifdef sun4v
37197c478bd9Sstevel@tonic-gate	MMU_FAULT_STATUS_AREA(%g4)
37207c478bd9Sstevel@tonic-gate	ldx	[%g4 + MMFSA_I_CTX], %g5
37217c478bd9Sstevel@tonic-gate	ldx	[%g4 + MMFSA_D_CTX], %g4
37227c478bd9Sstevel@tonic-gate	move	%icc, %g5, %g4
37237c478bd9Sstevel@tonic-gate	cmp	%g7, T_INSTR_MMU_MISS
37247c478bd9Sstevel@tonic-gate	move	%icc, %g5, %g4
37257c478bd9Sstevel@tonic-gate#else
37267c478bd9Sstevel@tonic-gate	mov	MMU_TAG_ACCESS, %g4
37277c478bd9Sstevel@tonic-gate	ldxa	[%g4]ASI_DMMU, %g2
37287c478bd9Sstevel@tonic-gate	ldxa	[%g4]ASI_IMMU, %g5
37297c478bd9Sstevel@tonic-gate	move	%icc, %g5, %g2
37307c478bd9Sstevel@tonic-gate	cmp	%g7, T_INSTR_MMU_MISS
37317c478bd9Sstevel@tonic-gate	move	%icc, %g5, %g2
37327c478bd9Sstevel@tonic-gate	sllx	%g2, TAGACC_CTX_LSHIFT, %g4
37331426d65aSsm142603#endif /* sun4v */
37347c478bd9Sstevel@tonic-gate	brnz,pn	%g4, 3f				/* skip if not kernel */
37357c478bd9Sstevel@tonic-gate	  rdpr	%tl, %g5
37367c478bd9Sstevel@tonic-gate
37377c478bd9Sstevel@tonic-gate	add	%sp, STACK_BIAS, %g3
37387c478bd9Sstevel@tonic-gate	srlx	%g3, MMU_PAGESHIFT, %g3
37397c478bd9Sstevel@tonic-gate	srlx	%g2, MMU_PAGESHIFT, %g4
37407c478bd9Sstevel@tonic-gate	cmp	%g3, %g4
37417c478bd9Sstevel@tonic-gate	be,a,pn	%icc, ptl1_panic		/* panic if bad %sp */
37427c478bd9Sstevel@tonic-gate	  mov	PTL1_BAD_STACK, %g1
37437c478bd9Sstevel@tonic-gate
37447c478bd9Sstevel@tonic-gate	cmp	%g5, 1
37457c478bd9Sstevel@tonic-gate	ble,pt	%icc, 2f
37467c478bd9Sstevel@tonic-gate	  nop
37477c478bd9Sstevel@tonic-gate	TSTAT_CHECK_TL1(2f, %g1, %g2)
37487c478bd9Sstevel@tonic-gate	rdpr	%tt, %g2
37497c478bd9Sstevel@tonic-gate	cmp	%g2, FAST_PROT_TT
37507c478bd9Sstevel@tonic-gate	mov	PTL1_BAD_KPROT_FAULT, %g1
37517c478bd9Sstevel@tonic-gate	movne	%icc, PTL1_BAD_KMISS, %g1
37527c478bd9Sstevel@tonic-gate	ba,pt	%icc, ptl1_panic
37537c478bd9Sstevel@tonic-gate	  nop
37547c478bd9Sstevel@tonic-gate
37557c478bd9Sstevel@tonic-gate2:
37567c478bd9Sstevel@tonic-gate	/*
37577c478bd9Sstevel@tonic-gate	 * We are taking a pagefault in the kernel on a kernel address.  If
37587c478bd9Sstevel@tonic-gate	 * CPU_DTRACE_NOFAULT is set in the cpuc_dtrace_flags, we don't actually
37597c478bd9Sstevel@tonic-gate	 * want to call sfmmu_pagefault -- we will instead note that a fault
37607c478bd9Sstevel@tonic-gate	 * has occurred by setting CPU_DTRACE_BADADDR and issue a "done"
37617c478bd9Sstevel@tonic-gate	 * (instead of a "retry").  This will step over the faulting
37627c478bd9Sstevel@tonic-gate	 * instruction.
37637c478bd9Sstevel@tonic-gate	 */
37647c478bd9Sstevel@tonic-gate	CPU_INDEX(%g1, %g2)
37657c478bd9Sstevel@tonic-gate	set	cpu_core, %g2
37667c478bd9Sstevel@tonic-gate	sllx	%g1, CPU_CORE_SHIFT, %g1
37677c478bd9Sstevel@tonic-gate	add	%g1, %g2, %g1
37687c478bd9Sstevel@tonic-gate	lduh	[%g1 + CPUC_DTRACE_FLAGS], %g2
37697c478bd9Sstevel@tonic-gate	andcc	%g2, CPU_DTRACE_NOFAULT, %g0
37707c478bd9Sstevel@tonic-gate	bz	sfmmu_pagefault
37717c478bd9Sstevel@tonic-gate	or	%g2, CPU_DTRACE_BADADDR, %g2
37727c478bd9Sstevel@tonic-gate	stuh	%g2, [%g1 + CPUC_DTRACE_FLAGS]
37737c478bd9Sstevel@tonic-gate	GET_MMU_D_ADDR(%g3, %g4)
37747c478bd9Sstevel@tonic-gate	stx	%g3, [%g1 + CPUC_DTRACE_ILLVAL]
37757c478bd9Sstevel@tonic-gate	done
37767c478bd9Sstevel@tonic-gate
37777c478bd9Sstevel@tonic-gate3:
37787c478bd9Sstevel@tonic-gate	cmp	%g5, 1
37797c478bd9Sstevel@tonic-gate	ble,pt	%icc, 4f
37807c478bd9Sstevel@tonic-gate	  nop
37817c478bd9Sstevel@tonic-gate	TSTAT_CHECK_TL1(4f, %g1, %g2)
37827c478bd9Sstevel@tonic-gate	ba,pt	%icc, sfmmu_window_trap
37837c478bd9Sstevel@tonic-gate	  nop
37847c478bd9Sstevel@tonic-gate
37857c478bd9Sstevel@tonic-gate4:
37867c478bd9Sstevel@tonic-gate	/*
37877c478bd9Sstevel@tonic-gate	 * We are taking a pagefault on a non-kernel address.  If we are in
37887c478bd9Sstevel@tonic-gate	 * the kernel (e.g., due to a copyin()), we will check cpuc_dtrace_flags
37897c478bd9Sstevel@tonic-gate	 * and (if CPU_DTRACE_NOFAULT is set) will proceed as outlined above.
37907c478bd9Sstevel@tonic-gate	 */
37917c478bd9Sstevel@tonic-gate	CPU_INDEX(%g1, %g2)
37927c478bd9Sstevel@tonic-gate	set	cpu_core, %g2
37937c478bd9Sstevel@tonic-gate	sllx	%g1, CPU_CORE_SHIFT, %g1
37947c478bd9Sstevel@tonic-gate	add	%g1, %g2, %g1
37957c478bd9Sstevel@tonic-gate	lduh	[%g1 + CPUC_DTRACE_FLAGS], %g2
37967c478bd9Sstevel@tonic-gate	andcc	%g2, CPU_DTRACE_NOFAULT, %g0
379705d3dc4bSpaulsan	bz	sfmmu_mmu_trap
37987c478bd9Sstevel@tonic-gate	or	%g2, CPU_DTRACE_BADADDR, %g2
37997c478bd9Sstevel@tonic-gate	stuh	%g2, [%g1 + CPUC_DTRACE_FLAGS]
38007c478bd9Sstevel@tonic-gate	GET_MMU_D_ADDR(%g3, %g4)
38017c478bd9Sstevel@tonic-gate	stx	%g3, [%g1 + CPUC_DTRACE_ILLVAL]
38027c478bd9Sstevel@tonic-gate
38037c478bd9Sstevel@tonic-gate	/*
38047c478bd9Sstevel@tonic-gate	 * Be sure that we're actually taking this miss from the kernel --
38057c478bd9Sstevel@tonic-gate	 * otherwise we have managed to return to user-level with
38067c478bd9Sstevel@tonic-gate	 * CPU_DTRACE_NOFAULT set in cpuc_dtrace_flags.
38077c478bd9Sstevel@tonic-gate	 */
38087c478bd9Sstevel@tonic-gate	rdpr	%tstate, %g2
38097c478bd9Sstevel@tonic-gate	btst	TSTATE_PRIV, %g2
38107c478bd9Sstevel@tonic-gate	bz,a	ptl1_panic
38117c478bd9Sstevel@tonic-gate	  mov	PTL1_BAD_DTRACE_FLAGS, %g1
38127c478bd9Sstevel@tonic-gate	done
38137c478bd9Sstevel@tonic-gate
38147c478bd9Sstevel@tonic-gate	ALTENTRY(tsb_tl0_noctxt)
38157c478bd9Sstevel@tonic-gate	/*
38167c478bd9Sstevel@tonic-gate	 * If we have no context, check to see if CPU_DTRACE_NOFAULT is set;
38177c478bd9Sstevel@tonic-gate	 * if it is, indicated that we have faulted and issue a done.
38187c478bd9Sstevel@tonic-gate	 */
38197c478bd9Sstevel@tonic-gate	CPU_INDEX(%g5, %g6)
38207c478bd9Sstevel@tonic-gate	set	cpu_core, %g6
38217c478bd9Sstevel@tonic-gate	sllx	%g5, CPU_CORE_SHIFT, %g5
38227c478bd9Sstevel@tonic-gate	add	%g5, %g6, %g5
38237c478bd9Sstevel@tonic-gate	lduh	[%g5 + CPUC_DTRACE_FLAGS], %g6
38247c478bd9Sstevel@tonic-gate	andcc	%g6, CPU_DTRACE_NOFAULT, %g0
38257c478bd9Sstevel@tonic-gate	bz	1f
38267c478bd9Sstevel@tonic-gate	or	%g6, CPU_DTRACE_BADADDR, %g6
38277c478bd9Sstevel@tonic-gate	stuh	%g6, [%g5 + CPUC_DTRACE_FLAGS]
38287c478bd9Sstevel@tonic-gate	GET_MMU_D_ADDR(%g3, %g4)
38297c478bd9Sstevel@tonic-gate	stx	%g3, [%g5 + CPUC_DTRACE_ILLVAL]
38307c478bd9Sstevel@tonic-gate
38317c478bd9Sstevel@tonic-gate	/*
38327c478bd9Sstevel@tonic-gate	 * Be sure that we're actually taking this miss from the kernel --
38337c478bd9Sstevel@tonic-gate	 * otherwise we have managed to return to user-level with
38347c478bd9Sstevel@tonic-gate	 * CPU_DTRACE_NOFAULT set in cpuc_dtrace_flags.
38357c478bd9Sstevel@tonic-gate	 */
38367c478bd9Sstevel@tonic-gate	rdpr	%tstate, %g5
38377c478bd9Sstevel@tonic-gate	btst	TSTATE_PRIV, %g5
38387c478bd9Sstevel@tonic-gate	bz,a	ptl1_panic
38397c478bd9Sstevel@tonic-gate	  mov	PTL1_BAD_DTRACE_FLAGS, %g1
3840ac20c57dSjhaslam	TSTAT_CHECK_TL1(2f, %g1, %g2);
3841ac20c57dSjhaslam2:
38427c478bd9Sstevel@tonic-gate	done
38437c478bd9Sstevel@tonic-gate
38447c478bd9Sstevel@tonic-gate1:
38457c478bd9Sstevel@tonic-gate	rdpr	%tt, %g5
38467c478bd9Sstevel@tonic-gate	cmp	%g5, FAST_IMMU_MISS_TT
38477c478bd9Sstevel@tonic-gate#ifdef sun4v
38487c478bd9Sstevel@tonic-gate	MMU_FAULT_STATUS_AREA(%g2)
38497c478bd9Sstevel@tonic-gate	be,a,pt	%icc, 2f
38507c478bd9Sstevel@tonic-gate	  ldx	[%g2 + MMFSA_I_CTX], %g3
38517c478bd9Sstevel@tonic-gate	cmp	%g5, T_INSTR_MMU_MISS
38527c478bd9Sstevel@tonic-gate	be,a,pt	%icc, 2f
38537c478bd9Sstevel@tonic-gate	  ldx	[%g2 + MMFSA_I_CTX], %g3
38547c478bd9Sstevel@tonic-gate	ldx	[%g2 + MMFSA_D_CTX], %g3
38557c478bd9Sstevel@tonic-gate2:
38567c478bd9Sstevel@tonic-gate#else
38577c478bd9Sstevel@tonic-gate	mov	MMU_TAG_ACCESS, %g2
38587c478bd9Sstevel@tonic-gate	be,a,pt	%icc, 2f
38597c478bd9Sstevel@tonic-gate	  ldxa	[%g2]ASI_IMMU, %g3
38607c478bd9Sstevel@tonic-gate	ldxa	[%g2]ASI_DMMU, %g3
38617c478bd9Sstevel@tonic-gate2:	sllx	%g3, TAGACC_CTX_LSHIFT, %g3
38621426d65aSsm142603#endif /* sun4v */
38637c478bd9Sstevel@tonic-gate	brz,a,pn %g3, ptl1_panic		! panic if called for kernel
38647c478bd9Sstevel@tonic-gate	  mov	PTL1_BAD_CTX_STEAL, %g1		! since kernel ctx was stolen
38657c478bd9Sstevel@tonic-gate	rdpr	%tl, %g5
38667c478bd9Sstevel@tonic-gate	cmp	%g5, 1
38677c478bd9Sstevel@tonic-gate	ble,pt	%icc, sfmmu_mmu_trap
38687c478bd9Sstevel@tonic-gate	  nop
38697c478bd9Sstevel@tonic-gate	TSTAT_CHECK_TL1(sfmmu_mmu_trap, %g1, %g2)
38707c478bd9Sstevel@tonic-gate	ba,pt	%icc, sfmmu_window_trap
38717c478bd9Sstevel@tonic-gate	  nop
38727c478bd9Sstevel@tonic-gate	SET_SIZE(sfmmu_tsb_miss)
38737c478bd9Sstevel@tonic-gate#endif  /* lint */
38747c478bd9Sstevel@tonic-gate
38757c478bd9Sstevel@tonic-gate#if defined (lint)
38767c478bd9Sstevel@tonic-gate/*
38777c478bd9Sstevel@tonic-gate * This routine will look for a user or kernel vaddr in the hash
38787c478bd9Sstevel@tonic-gate * structure.  It returns a valid pfn or PFN_INVALID.  It doesn't
38797c478bd9Sstevel@tonic-gate * grab any locks.  It should only be used by other sfmmu routines.
38807c478bd9Sstevel@tonic-gate */
38817c478bd9Sstevel@tonic-gate/* ARGSUSED */
38827c478bd9Sstevel@tonic-gatepfn_t
38837c478bd9Sstevel@tonic-gatesfmmu_vatopfn(caddr_t vaddr, sfmmu_t *sfmmup, tte_t *ttep)
38847c478bd9Sstevel@tonic-gate{
38857c478bd9Sstevel@tonic-gate	return(0);
38867c478bd9Sstevel@tonic-gate}
38877c478bd9Sstevel@tonic-gate
3888081a94b0Saguzovsk/* ARGSUSED */
3889081a94b0Saguzovskpfn_t
3890081a94b0Saguzovsksfmmu_kvaszc2pfn(caddr_t vaddr, int hashno)
3891081a94b0Saguzovsk{
3892081a94b0Saguzovsk	return(0);
3893081a94b0Saguzovsk}
3894081a94b0Saguzovsk
38957c478bd9Sstevel@tonic-gate#else /* lint */
38967c478bd9Sstevel@tonic-gate
38977c478bd9Sstevel@tonic-gate	ENTRY_NP(sfmmu_vatopfn)
38987c478bd9Sstevel@tonic-gate 	/*
38997c478bd9Sstevel@tonic-gate 	 * disable interrupts
39007c478bd9Sstevel@tonic-gate 	 */
39017c478bd9Sstevel@tonic-gate 	rdpr	%pstate, %o3
39027c478bd9Sstevel@tonic-gate#ifdef DEBUG
39031e2e7a75Shuah	PANIC_IF_INTR_DISABLED_PSTR(%o3, sfmmu_di_l5, %g1)
39047c478bd9Sstevel@tonic-gate#endif
39057c478bd9Sstevel@tonic-gate	/*
39067c478bd9Sstevel@tonic-gate	 * disable interrupts to protect the TSBMISS area
39077c478bd9Sstevel@tonic-gate	 */
39087c478bd9Sstevel@tonic-gate	andn    %o3, PSTATE_IE, %o5
39097c478bd9Sstevel@tonic-gate	wrpr    %o5, 0, %pstate
39107c478bd9Sstevel@tonic-gate
39117c478bd9Sstevel@tonic-gate	/*
39127c478bd9Sstevel@tonic-gate	 * o0 = vaddr
39137c478bd9Sstevel@tonic-gate	 * o1 = sfmmup
39147c478bd9Sstevel@tonic-gate	 * o2 = ttep
39157c478bd9Sstevel@tonic-gate	 */
39167c478bd9Sstevel@tonic-gate	CPU_TSBMISS_AREA(%g1, %o5)
39177c478bd9Sstevel@tonic-gate	ldn	[%g1 + TSBMISS_KHATID], %o4
39187c478bd9Sstevel@tonic-gate	cmp	%o4, %o1
39197c478bd9Sstevel@tonic-gate	bne,pn	%ncc, vatopfn_nokernel
39207c478bd9Sstevel@tonic-gate	  mov	TTE64K, %g5			/* g5 = rehash # */
39217c478bd9Sstevel@tonic-gate	mov %g1,%o5				/* o5 = tsbmiss_area */
39227c478bd9Sstevel@tonic-gate	/*
39237c478bd9Sstevel@tonic-gate	 * o0 = vaddr
39247c478bd9Sstevel@tonic-gate	 * o1 & o4 = hatid
39257c478bd9Sstevel@tonic-gate	 * o2 = ttep
39267c478bd9Sstevel@tonic-gate	 * o5 = tsbmiss area
39277c478bd9Sstevel@tonic-gate	 */
39287c478bd9Sstevel@tonic-gate	mov	HBLK_RANGE_SHIFT, %g6
39297c478bd9Sstevel@tonic-gate1:
39307c478bd9Sstevel@tonic-gate
39317c478bd9Sstevel@tonic-gate	/*
39327c478bd9Sstevel@tonic-gate	 * o0 = vaddr
39337c478bd9Sstevel@tonic-gate	 * o1 = sfmmup
39347c478bd9Sstevel@tonic-gate	 * o2 = ttep
39357c478bd9Sstevel@tonic-gate	 * o3 = old %pstate
39367c478bd9Sstevel@tonic-gate	 * o4 = hatid
39377c478bd9Sstevel@tonic-gate	 * o5 = tsbmiss
39387c478bd9Sstevel@tonic-gate	 * g5 = rehash #
39397c478bd9Sstevel@tonic-gate	 * g6 = hmeshift
39407c478bd9Sstevel@tonic-gate	 *
39417c478bd9Sstevel@tonic-gate	 * The first arg to GET_TTE is actually tagaccess register
39427c478bd9Sstevel@tonic-gate	 * not just vaddr. Since this call is for kernel we need to clear
39437c478bd9Sstevel@tonic-gate	 * any lower vaddr bits that would be interpreted as ctx bits.
39447c478bd9Sstevel@tonic-gate	 */
39457c478bd9Sstevel@tonic-gate	set     TAGACC_CTX_MASK, %g1
39467c478bd9Sstevel@tonic-gate	andn    %o0, %g1, %o0
39470a90a7fdSAmritpal Sandhu	GET_TTE(%o0, %o4, %g1, %g2, %o5, %g4, %g6, %g5, %g3,
39487c478bd9Sstevel@tonic-gate		vatopfn_l1, kvtop_hblk_found, tsb_suspend, kvtop_nohblk)
39497c478bd9Sstevel@tonic-gate
39507c478bd9Sstevel@tonic-gatekvtop_hblk_found:
39517c478bd9Sstevel@tonic-gate	/*
39527c478bd9Sstevel@tonic-gate	 * o0 = vaddr
39537c478bd9Sstevel@tonic-gate	 * o1 = sfmmup
39547c478bd9Sstevel@tonic-gate	 * o2 = ttep
39557c478bd9Sstevel@tonic-gate	 * g1 = tte
39567c478bd9Sstevel@tonic-gate	 * g2 = tte pa
39570a90a7fdSAmritpal Sandhu	 * g3 = scratch
39587c478bd9Sstevel@tonic-gate	 * o2 = tsbmiss area
39597c478bd9Sstevel@tonic-gate	 * o1 = hat id
39607c478bd9Sstevel@tonic-gate	 */
39617c478bd9Sstevel@tonic-gate	brgez,a,pn %g1, 6f			/* if tte invalid goto tl0 */
39627c478bd9Sstevel@tonic-gate	  mov	-1, %o0				/* output = -1 (PFN_INVALID) */
39637c478bd9Sstevel@tonic-gate	stx %g1,[%o2]				/* put tte into *ttep */
39647c478bd9Sstevel@tonic-gate	TTETOPFN(%g1, %o0, vatopfn_l2, %g2, %g3, %g4)
39657c478bd9Sstevel@tonic-gate	/*
39667c478bd9Sstevel@tonic-gate	 * o0 = vaddr
39677c478bd9Sstevel@tonic-gate	 * o1 = sfmmup
39687c478bd9Sstevel@tonic-gate	 * o2 = ttep
39697c478bd9Sstevel@tonic-gate	 * g1 = pfn
39707c478bd9Sstevel@tonic-gate	 */
39717c478bd9Sstevel@tonic-gate	ba,pt	%xcc, 6f
39727c478bd9Sstevel@tonic-gate	  mov	%g1, %o0
39737c478bd9Sstevel@tonic-gate
39747c478bd9Sstevel@tonic-gatekvtop_nohblk:
39757c478bd9Sstevel@tonic-gate	/*
39767c478bd9Sstevel@tonic-gate	 * we get here if we couldn't find valid hblk in hash.  We rehash
39777c478bd9Sstevel@tonic-gate	 * if neccesary.
39787c478bd9Sstevel@tonic-gate	 */
39797c478bd9Sstevel@tonic-gate	ldn	[%o5 + (TSBMISS_SCRATCH + TSB_TAGACC)], %o0
39807c478bd9Sstevel@tonic-gate#ifdef sun4v
39817c478bd9Sstevel@tonic-gate	cmp	%g5, MAX_HASHCNT
39827c478bd9Sstevel@tonic-gate#else
39837c478bd9Sstevel@tonic-gate	cmp	%g5, DEFAULT_MAX_HASHCNT	/* no 32/256M kernel pages */
39841426d65aSsm142603#endif /* sun4v */
39857c478bd9Sstevel@tonic-gate	be,a,pn	%icc, 6f
39867c478bd9Sstevel@tonic-gate	  mov	-1, %o0				/* output = -1 (PFN_INVALID) */
39877c478bd9Sstevel@tonic-gate	mov	%o1, %o4			/* restore hatid */
39887c478bd9Sstevel@tonic-gate#ifdef sun4v
39897c478bd9Sstevel@tonic-gate        add	%g5, 2, %g5
39907c478bd9Sstevel@tonic-gate	cmp	%g5, 3
39917c478bd9Sstevel@tonic-gate	move	%icc, MMU_PAGESHIFT4M, %g6
39927c478bd9Sstevel@tonic-gate	ba,pt	%icc, 1b
39937c478bd9Sstevel@tonic-gate	movne	%icc, MMU_PAGESHIFT256M, %g6
39947c478bd9Sstevel@tonic-gate#else
39957c478bd9Sstevel@tonic-gate        inc	%g5
39967c478bd9Sstevel@tonic-gate	cmp	%g5, 2
39977c478bd9Sstevel@tonic-gate	move	%icc, MMU_PAGESHIFT512K, %g6
39987c478bd9Sstevel@tonic-gate	ba,pt	%icc, 1b
39997c478bd9Sstevel@tonic-gate	movne	%icc, MMU_PAGESHIFT4M, %g6
40001426d65aSsm142603#endif /* sun4v */
40017c478bd9Sstevel@tonic-gate6:
40027c478bd9Sstevel@tonic-gate	retl
40037c478bd9Sstevel@tonic-gate 	  wrpr	%g0, %o3, %pstate		/* re-enable interrupts */
40047c478bd9Sstevel@tonic-gate
40057c478bd9Sstevel@tonic-gatetsb_suspend:
40067c478bd9Sstevel@tonic-gate	/*
40077c478bd9Sstevel@tonic-gate	 * o0 = vaddr
40087c478bd9Sstevel@tonic-gate	 * o1 = sfmmup
40097c478bd9Sstevel@tonic-gate	 * o2 = ttep
40107c478bd9Sstevel@tonic-gate	 * g1 = tte
40117c478bd9Sstevel@tonic-gate	 * g2 = tte pa
40127c478bd9Sstevel@tonic-gate	 * g3 = tte va
40137c478bd9Sstevel@tonic-gate	 * o2 = tsbmiss area  use o5 instead of o2 for tsbmiss
40147c478bd9Sstevel@tonic-gate	 */
40157c478bd9Sstevel@tonic-gate	stx %g1,[%o2]				/* put tte into *ttep */
40167c478bd9Sstevel@tonic-gate	brgez,a,pn %g1, 8f			/* if tte invalid goto 8: */
4017affff3f8Sjj204856	  sub	%g0, 1, %o0			/* output = PFN_INVALID */
40187c478bd9Sstevel@tonic-gate	sub	%g0, 2, %o0			/* output = PFN_SUSPENDED */
40197c478bd9Sstevel@tonic-gate8:
40207c478bd9Sstevel@tonic-gate	retl
40217c478bd9Sstevel@tonic-gate	 wrpr	%g0, %o3, %pstate		/* enable interrupts */
40227c478bd9Sstevel@tonic-gate
40237c478bd9Sstevel@tonic-gatevatopfn_nokernel:
40247c478bd9Sstevel@tonic-gate	/*
40257c478bd9Sstevel@tonic-gate	 * This routine does NOT support user addresses
40267c478bd9Sstevel@tonic-gate	 * There is a routine in C that supports this.
40277c478bd9Sstevel@tonic-gate	 * The only reason why we don't have the C routine
40287c478bd9Sstevel@tonic-gate	 * support kernel addresses as well is because
40297c478bd9Sstevel@tonic-gate	 * we do va_to_pa while holding the hashlock.
40307c478bd9Sstevel@tonic-gate	 */
40317c478bd9Sstevel@tonic-gate 	wrpr	%g0, %o3, %pstate		/* re-enable interrupts */
40327c478bd9Sstevel@tonic-gate	save	%sp, -SA(MINFRAME), %sp
40337c478bd9Sstevel@tonic-gate	sethi	%hi(sfmmu_panic3), %o0
40347c478bd9Sstevel@tonic-gate	call	panic
40357c478bd9Sstevel@tonic-gate	 or	%o0, %lo(sfmmu_panic3), %o0
40367c478bd9Sstevel@tonic-gate
40377c478bd9Sstevel@tonic-gate	SET_SIZE(sfmmu_vatopfn)
4038081a94b0Saguzovsk
4039081a94b0Saguzovsk	/*
4040081a94b0Saguzovsk	 * %o0 = vaddr
4041081a94b0Saguzovsk	 * %o1 = hashno (aka szc)
4042081a94b0Saguzovsk	 *
4043081a94b0Saguzovsk	 *
4044081a94b0Saguzovsk	 * This routine is similar to sfmmu_vatopfn() but will only look for
4045081a94b0Saguzovsk	 * a kernel vaddr in the hash structure for the specified rehash value.
4046081a94b0Saguzovsk	 * It's just an optimization for the case when pagesize for a given
4047081a94b0Saguzovsk	 * va range is already known (e.g. large page heap) and we don't want
4048081a94b0Saguzovsk	 * to start the search with rehash value 1 as sfmmu_vatopfn() does.
4049081a94b0Saguzovsk	 *
4050081a94b0Saguzovsk	 * Returns valid pfn or PFN_INVALID if
4051081a94b0Saguzovsk	 * tte for specified rehash # is not found, invalid or suspended.
4052081a94b0Saguzovsk	 */
4053081a94b0Saguzovsk	ENTRY_NP(sfmmu_kvaszc2pfn)
4054081a94b0Saguzovsk 	/*
4055081a94b0Saguzovsk 	 * disable interrupts
4056081a94b0Saguzovsk 	 */
4057081a94b0Saguzovsk 	rdpr	%pstate, %o3
4058081a94b0Saguzovsk#ifdef DEBUG
4059081a94b0Saguzovsk	PANIC_IF_INTR_DISABLED_PSTR(%o3, sfmmu_di_l6, %g1)
4060081a94b0Saguzovsk#endif
4061081a94b0Saguzovsk	/*
4062081a94b0Saguzovsk	 * disable interrupts to protect the TSBMISS area
4063081a94b0Saguzovsk	 */
4064081a94b0Saguzovsk	andn    %o3, PSTATE_IE, %o5
4065081a94b0Saguzovsk	wrpr    %o5, 0, %pstate
4066081a94b0Saguzovsk
4067081a94b0Saguzovsk	CPU_TSBMISS_AREA(%g1, %o5)
4068081a94b0Saguzovsk	ldn	[%g1 + TSBMISS_KHATID], %o4
4069081a94b0Saguzovsk	sll	%o1, 1, %g6
4070081a94b0Saguzovsk	add	%g6, %o1, %g6
4071081a94b0Saguzovsk	add	%g6, MMU_PAGESHIFT, %g6
4072081a94b0Saguzovsk	/*
4073081a94b0Saguzovsk	 * %o0 = vaddr
4074081a94b0Saguzovsk	 * %o1 = hashno
4075081a94b0Saguzovsk	 * %o3 = old %pstate
4076081a94b0Saguzovsk	 * %o4 = ksfmmup
4077081a94b0Saguzovsk	 * %g1 = tsbmiss area
4078081a94b0Saguzovsk	 * %g6 = hmeshift
4079081a94b0Saguzovsk	 */
4080081a94b0Saguzovsk
4081081a94b0Saguzovsk	/*
4082081a94b0Saguzovsk	 * The first arg to GET_TTE is actually tagaccess register
4083081a94b0Saguzovsk	 * not just vaddr. Since this call is for kernel we need to clear
4084081a94b0Saguzovsk	 * any lower vaddr bits that would be interpreted as ctx bits.
4085081a94b0Saguzovsk	 */
4086081a94b0Saguzovsk	srlx	%o0, MMU_PAGESHIFT, %o0
4087081a94b0Saguzovsk	sllx	%o0, MMU_PAGESHIFT, %o0
40880a90a7fdSAmritpal Sandhu	GET_TTE(%o0, %o4, %g3, %g4, %g1, %o5, %g6, %o1, %g5,
4089081a94b0Saguzovsk		kvaszc2pfn_l1, kvaszc2pfn_hblk_found, kvaszc2pfn_nohblk,
4090081a94b0Saguzovsk		kvaszc2pfn_nohblk)
4091081a94b0Saguzovsk
4092081a94b0Saguzovskkvaszc2pfn_hblk_found:
4093081a94b0Saguzovsk	/*
4094081a94b0Saguzovsk	 * %g3 = tte
4095081a94b0Saguzovsk	 * %o0 = vaddr
4096081a94b0Saguzovsk	 */
4097081a94b0Saguzovsk	brgez,a,pn %g3, 1f			/* check if tte is invalid */
4098081a94b0Saguzovsk	  mov	-1, %o0				/* output = -1 (PFN_INVALID) */
4099081a94b0Saguzovsk	TTETOPFN(%g3, %o0, kvaszc2pfn_l2, %g2, %g4, %g5)
4100081a94b0Saguzovsk	/*
4101081a94b0Saguzovsk	 * g3 = pfn
4102081a94b0Saguzovsk	 */
4103081a94b0Saguzovsk	ba,pt	%xcc, 1f
4104081a94b0Saguzovsk	  mov	%g3, %o0
4105081a94b0Saguzovsk
4106081a94b0Saguzovskkvaszc2pfn_nohblk:
4107081a94b0Saguzovsk	mov	-1, %o0
4108081a94b0Saguzovsk
4109081a94b0Saguzovsk1:
4110081a94b0Saguzovsk	retl
4111081a94b0Saguzovsk 	  wrpr	%g0, %o3, %pstate		/* re-enable interrupts */
4112081a94b0Saguzovsk
4113081a94b0Saguzovsk	SET_SIZE(sfmmu_kvaszc2pfn)
4114081a94b0Saguzovsk
41157c478bd9Sstevel@tonic-gate#endif /* lint */
41167c478bd9Sstevel@tonic-gate
41177c478bd9Sstevel@tonic-gate
41187c478bd9Sstevel@tonic-gate
41197c478bd9Sstevel@tonic-gate#if !defined(lint)
41207c478bd9Sstevel@tonic-gate
41217c478bd9Sstevel@tonic-gate/*
41227c478bd9Sstevel@tonic-gate * kpm lock used between trap level tsbmiss handler and kpm C level.
41237c478bd9Sstevel@tonic-gate */
41247c478bd9Sstevel@tonic-gate#define KPMLOCK_ENTER(kpmlckp, tmp1, label1, asi)			\
41257c478bd9Sstevel@tonic-gate	mov     0xff, tmp1						;\
41267c478bd9Sstevel@tonic-gatelabel1:									;\
41277c478bd9Sstevel@tonic-gate	casa    [kpmlckp]asi, %g0, tmp1					;\
41287c478bd9Sstevel@tonic-gate	brnz,pn tmp1, label1						;\
41297c478bd9Sstevel@tonic-gate	mov     0xff, tmp1						;\
41307c478bd9Sstevel@tonic-gate	membar  #LoadLoad
41317c478bd9Sstevel@tonic-gate
41327c478bd9Sstevel@tonic-gate#define KPMLOCK_EXIT(kpmlckp, asi)					\
41337c478bd9Sstevel@tonic-gate	membar  #LoadStore|#StoreStore					;\
41347c478bd9Sstevel@tonic-gate	sta     %g0, [kpmlckp]asi
41357c478bd9Sstevel@tonic-gate
41367c478bd9Sstevel@tonic-gate/*
41377c478bd9Sstevel@tonic-gate * Lookup a memseg for a given pfn and if found, return the physical
41387c478bd9Sstevel@tonic-gate * address of the corresponding struct memseg in mseg, otherwise
41397c478bd9Sstevel@tonic-gate * return MSEG_NULLPTR_PA. The kpmtsbm pointer must be provided in
41407c478bd9Sstevel@tonic-gate * tsbmp, %asi is assumed to be ASI_MEM.
41417c478bd9Sstevel@tonic-gate * This lookup is done by strictly traversing only the physical memseg
41427c478bd9Sstevel@tonic-gate * linkage. The more generic approach, to check the virtual linkage
41437c478bd9Sstevel@tonic-gate * before using the physical (used e.g. with hmehash buckets), cannot
41447c478bd9Sstevel@tonic-gate * be used here. Memory DR operations can run in parallel to this
41457c478bd9Sstevel@tonic-gate * lookup w/o any locks and updates of the physical and virtual linkage
41467c478bd9Sstevel@tonic-gate * cannot be done atomically wrt. to each other. Because physical
41477c478bd9Sstevel@tonic-gate * address zero can be valid physical address, MSEG_NULLPTR_PA acts
41487c478bd9Sstevel@tonic-gate * as "physical NULL" pointer.
41497c478bd9Sstevel@tonic-gate */
41507c478bd9Sstevel@tonic-gate#define	PAGE_NUM2MEMSEG_NOLOCK_PA(pfn, mseg, tsbmp, tmp1, tmp2, tmp3, label) \
41517c478bd9Sstevel@tonic-gate	sethi	%hi(mhash_per_slot), tmp3 /* no tsbmp use due to DR */	;\
41527c478bd9Sstevel@tonic-gate	ldx	[tmp3 + %lo(mhash_per_slot)], mseg			;\
41537c478bd9Sstevel@tonic-gate	udivx	pfn, mseg, mseg						;\
41547c478bd9Sstevel@tonic-gate	ldx	[tsbmp + KPMTSBM_MSEGPHASHPA], tmp1			;\
41557c478bd9Sstevel@tonic-gate	and	mseg, SFMMU_N_MEM_SLOTS - 1, mseg			;\
41567c478bd9Sstevel@tonic-gate	sllx	mseg, SFMMU_MEM_HASH_ENTRY_SHIFT, mseg			;\
41577c478bd9Sstevel@tonic-gate	add	tmp1, mseg, tmp1					;\
41587c478bd9Sstevel@tonic-gate	ldxa	[tmp1]%asi, mseg					;\
41597c478bd9Sstevel@tonic-gate	cmp	mseg, MSEG_NULLPTR_PA					;\
41607c478bd9Sstevel@tonic-gate	be,pn	%xcc, label/**/1		/* if not found */	;\
41617c478bd9Sstevel@tonic-gate	  nop								;\
41627c478bd9Sstevel@tonic-gate	ldxa	[mseg + MEMSEG_PAGES_BASE]%asi, tmp1			;\
41637c478bd9Sstevel@tonic-gate	cmp	pfn, tmp1			/* pfn - pages_base */	;\
41647c478bd9Sstevel@tonic-gate	blu,pn	%xcc, label/**/1					;\
41657c478bd9Sstevel@tonic-gate	  ldxa	[mseg + MEMSEG_PAGES_END]%asi, tmp2			;\
41667c478bd9Sstevel@tonic-gate	cmp	pfn, tmp2			/* pfn - pages_end */	;\
41677c478bd9Sstevel@tonic-gate	bgeu,pn	%xcc, label/**/1					;\
41687c478bd9Sstevel@tonic-gate	  sub	pfn, tmp1, tmp1			/* pfn - pages_base */	;\
41697c478bd9Sstevel@tonic-gate	mulx	tmp1, PAGE_SIZE, tmp1					;\
41707c478bd9Sstevel@tonic-gate	ldxa	[mseg + MEMSEG_PAGESPA]%asi, tmp2	/* pages */	;\
41717c478bd9Sstevel@tonic-gate	add	tmp2, tmp1, tmp1			/* pp */	;\
41727c478bd9Sstevel@tonic-gate	lduwa	[tmp1 + PAGE_PAGENUM]%asi, tmp2				;\
41737c478bd9Sstevel@tonic-gate	cmp	tmp2, pfn						;\
41747c478bd9Sstevel@tonic-gate	be,pt	%xcc, label/**/_ok			/* found */	;\
41757c478bd9Sstevel@tonic-gatelabel/**/1:								;\
41767c478bd9Sstevel@tonic-gate	/* brute force lookup */					;\
41777c478bd9Sstevel@tonic-gate	sethi	%hi(memsegspa), tmp3 /* no tsbmp use due to DR */	;\
41787c478bd9Sstevel@tonic-gate	ldx	[tmp3 + %lo(memsegspa)], mseg				;\
41797c478bd9Sstevel@tonic-gatelabel/**/2:								;\
41807c478bd9Sstevel@tonic-gate	cmp	mseg, MSEG_NULLPTR_PA					;\
41817c478bd9Sstevel@tonic-gate	be,pn	%xcc, label/**/_ok		/* if not found */	;\
41827c478bd9Sstevel@tonic-gate	  nop								;\
41837c478bd9Sstevel@tonic-gate	ldxa	[mseg + MEMSEG_PAGES_BASE]%asi, tmp1			;\
41847c478bd9Sstevel@tonic-gate	cmp	pfn, tmp1			/* pfn - pages_base */	;\
41857c478bd9Sstevel@tonic-gate	blu,a,pt %xcc, label/**/2					;\
41867c478bd9Sstevel@tonic-gate	  ldxa	[mseg + MEMSEG_NEXTPA]%asi, mseg			;\
41877c478bd9Sstevel@tonic-gate	ldxa	[mseg + MEMSEG_PAGES_END]%asi, tmp2			;\
41887c478bd9Sstevel@tonic-gate	cmp	pfn, tmp2			/* pfn - pages_end */	;\
41897c478bd9Sstevel@tonic-gate	bgeu,a,pt %xcc, label/**/2					;\
41907c478bd9Sstevel@tonic-gate	  ldxa	[mseg + MEMSEG_NEXTPA]%asi, mseg			;\
41917c478bd9Sstevel@tonic-gatelabel/**/_ok:
41927c478bd9Sstevel@tonic-gate
41937c478bd9Sstevel@tonic-gate	/*
41947c478bd9Sstevel@tonic-gate	 * kpm tsb miss handler large pages
41957c478bd9Sstevel@tonic-gate	 * g1 = 8K kpm TSB entry pointer
41967c478bd9Sstevel@tonic-gate	 * g2 = tag access register
41977c478bd9Sstevel@tonic-gate	 * g3 = 4M kpm TSB entry pointer
41987c478bd9Sstevel@tonic-gate	 */
41997c478bd9Sstevel@tonic-gate	ALTENTRY(sfmmu_kpm_dtsb_miss)
42007c478bd9Sstevel@tonic-gate	TT_TRACE(trace_tsbmiss)
42017c478bd9Sstevel@tonic-gate
42027c478bd9Sstevel@tonic-gate	CPU_INDEX(%g7, %g6)
42037c478bd9Sstevel@tonic-gate	sethi	%hi(kpmtsbm_area), %g6
42047c478bd9Sstevel@tonic-gate	sllx	%g7, KPMTSBM_SHIFT, %g7
42057c478bd9Sstevel@tonic-gate	or	%g6, %lo(kpmtsbm_area), %g6
42067c478bd9Sstevel@tonic-gate	add	%g6, %g7, %g6			/* g6 = kpmtsbm ptr */
42077c478bd9Sstevel@tonic-gate
42087c478bd9Sstevel@tonic-gate	/* check enable flag */
42097c478bd9Sstevel@tonic-gate	ldub	[%g6 + KPMTSBM_FLAGS], %g4
42107c478bd9Sstevel@tonic-gate	and	%g4, KPMTSBM_ENABLE_FLAG, %g5
42117c478bd9Sstevel@tonic-gate	brz,pn	%g5, sfmmu_tsb_miss		/* if kpm not enabled */
42127c478bd9Sstevel@tonic-gate	  nop
42137c478bd9Sstevel@tonic-gate
42147c478bd9Sstevel@tonic-gate	/* VA range check */
42157c478bd9Sstevel@tonic-gate	ldx	[%g6 + KPMTSBM_VBASE], %g7
42167c478bd9Sstevel@tonic-gate	cmp	%g2, %g7
42177c478bd9Sstevel@tonic-gate	blu,pn	%xcc, sfmmu_tsb_miss
42187c478bd9Sstevel@tonic-gate	  ldx	[%g6 + KPMTSBM_VEND], %g5
42197c478bd9Sstevel@tonic-gate	cmp	%g2, %g5
42207c478bd9Sstevel@tonic-gate	bgeu,pn	%xcc, sfmmu_tsb_miss
42217c478bd9Sstevel@tonic-gate	  stx	%g3, [%g6 + KPMTSBM_TSBPTR]
42227c478bd9Sstevel@tonic-gate
42237c478bd9Sstevel@tonic-gate	/*
42247c478bd9Sstevel@tonic-gate	 * check TL tsbmiss handling flag
42257c478bd9Sstevel@tonic-gate	 * bump tsbmiss counter
42267c478bd9Sstevel@tonic-gate	 */
42277c478bd9Sstevel@tonic-gate	lduw	[%g6 + KPMTSBM_TSBMISS], %g5
42287c478bd9Sstevel@tonic-gate#ifdef	DEBUG
42297c478bd9Sstevel@tonic-gate	and	%g4, KPMTSBM_TLTSBM_FLAG, %g3
42307c478bd9Sstevel@tonic-gate	inc	%g5
42317c478bd9Sstevel@tonic-gate	brz,pn	%g3, sfmmu_kpm_exception
42327c478bd9Sstevel@tonic-gate	  st	%g5, [%g6 + KPMTSBM_TSBMISS]
42337c478bd9Sstevel@tonic-gate#else
42347c478bd9Sstevel@tonic-gate	inc	%g5
42357c478bd9Sstevel@tonic-gate	st	%g5, [%g6 + KPMTSBM_TSBMISS]
42367c478bd9Sstevel@tonic-gate#endif
42377c478bd9Sstevel@tonic-gate	/*
42387c478bd9Sstevel@tonic-gate	 * At this point:
42397c478bd9Sstevel@tonic-gate	 *  g1 = 8K kpm TSB pointer (not used)
42407c478bd9Sstevel@tonic-gate	 *  g2 = tag access register
42417c478bd9Sstevel@tonic-gate	 *  g3 = clobbered
42427c478bd9Sstevel@tonic-gate	 *  g6 = per-CPU kpm tsbmiss area
42437c478bd9Sstevel@tonic-gate	 *  g7 = kpm_vbase
42447c478bd9Sstevel@tonic-gate	 */
42457c478bd9Sstevel@tonic-gate
42467c478bd9Sstevel@tonic-gate	/* vaddr2pfn */
42477c478bd9Sstevel@tonic-gate	ldub	[%g6 + KPMTSBM_SZSHIFT], %g3
42487c478bd9Sstevel@tonic-gate	sub	%g2, %g7, %g4			/* paddr = vaddr-kpm_vbase */
42497c478bd9Sstevel@tonic-gate	srax    %g4, %g3, %g2			/* which alias range (r) */
42507c478bd9Sstevel@tonic-gate	brnz,pn	%g2, sfmmu_kpm_exception	/* if (r != 0) goto C handler */
42517c478bd9Sstevel@tonic-gate	  srlx	%g4, MMU_PAGESHIFT, %g2		/* %g2 = pfn */
42527c478bd9Sstevel@tonic-gate
42537c478bd9Sstevel@tonic-gate	/*
42547c478bd9Sstevel@tonic-gate	 * Setup %asi
42557c478bd9Sstevel@tonic-gate	 * mseg_pa = page_numtomemseg_nolock(pfn)
42567c478bd9Sstevel@tonic-gate	 * if (mseg_pa == NULL) sfmmu_kpm_exception
42577c478bd9Sstevel@tonic-gate	 * g2=pfn
42587c478bd9Sstevel@tonic-gate	 */
42597c478bd9Sstevel@tonic-gate	mov	ASI_MEM, %asi
42607c478bd9Sstevel@tonic-gate	PAGE_NUM2MEMSEG_NOLOCK_PA(%g2, %g3, %g6, %g4, %g5, %g7, kpmtsbmp2m)
42617c478bd9Sstevel@tonic-gate	cmp	%g3, MSEG_NULLPTR_PA
42627c478bd9Sstevel@tonic-gate	be,pn	%xcc, sfmmu_kpm_exception	/* if mseg not found */
42637c478bd9Sstevel@tonic-gate	  nop
42647c478bd9Sstevel@tonic-gate
42657c478bd9Sstevel@tonic-gate	/*
42667c478bd9Sstevel@tonic-gate	 * inx = ptokpmp((kpmptop((ptopkpmp(pfn))) - mseg_pa->kpm_pbase));
42677c478bd9Sstevel@tonic-gate	 * g2=pfn g3=mseg_pa
42687c478bd9Sstevel@tonic-gate	 */
42697c478bd9Sstevel@tonic-gate	ldub	[%g6 + KPMTSBM_KPMP2PSHFT], %g5
42707c478bd9Sstevel@tonic-gate	ldxa	[%g3 + MEMSEG_KPM_PBASE]%asi, %g7
42717c478bd9Sstevel@tonic-gate	srlx	%g2, %g5, %g4
42727c478bd9Sstevel@tonic-gate	sllx	%g4, %g5, %g4
42737c478bd9Sstevel@tonic-gate	sub	%g4, %g7, %g4
42747c478bd9Sstevel@tonic-gate	srlx	%g4, %g5, %g4
42757c478bd9Sstevel@tonic-gate
42767c478bd9Sstevel@tonic-gate	/*
42777c478bd9Sstevel@tonic-gate	 * Validate inx value
42787c478bd9Sstevel@tonic-gate	 * g2=pfn g3=mseg_pa g4=inx
42797c478bd9Sstevel@tonic-gate	 */
42807c478bd9Sstevel@tonic-gate#ifdef	DEBUG
42817c478bd9Sstevel@tonic-gate	ldxa	[%g3 + MEMSEG_KPM_NKPMPGS]%asi, %g5
42827c478bd9Sstevel@tonic-gate	cmp	%g4, %g5			/* inx - nkpmpgs */
42837c478bd9Sstevel@tonic-gate	bgeu,pn	%xcc, sfmmu_kpm_exception	/* if out of range */
42847c478bd9Sstevel@tonic-gate	  ld	[%g6 + KPMTSBM_KPMPTABLESZ], %g7
42857c478bd9Sstevel@tonic-gate#else
42867c478bd9Sstevel@tonic-gate	ld	[%g6 + KPMTSBM_KPMPTABLESZ], %g7
42877c478bd9Sstevel@tonic-gate#endif
42887c478bd9Sstevel@tonic-gate	/*
42897c478bd9Sstevel@tonic-gate	 * kp = &mseg_pa->kpm_pages[inx]
42907c478bd9Sstevel@tonic-gate	 */
42917c478bd9Sstevel@tonic-gate	sllx	%g4, KPMPAGE_SHIFT, %g4		/* kpm_pages offset */
42927c478bd9Sstevel@tonic-gate	ldxa	[%g3 + MEMSEG_KPM_PAGES]%asi, %g5 /* kpm_pages */
42937c478bd9Sstevel@tonic-gate	add	%g5, %g4, %g5			/* kp */
42947c478bd9Sstevel@tonic-gate
42957c478bd9Sstevel@tonic-gate	/*
42967c478bd9Sstevel@tonic-gate	 * KPMP_HASH(kp)
42977c478bd9Sstevel@tonic-gate	 * g2=pfn g3=mseg_pa g4=offset g5=kp g7=kpmp_table_sz
42987c478bd9Sstevel@tonic-gate	 */
42997c478bd9Sstevel@tonic-gate	ldub	[%g6 + KPMTSBM_KPMPSHIFT], %g1	/* kpmp_shift */
43007c478bd9Sstevel@tonic-gate	sub	%g7, 1, %g7			/* mask */
43017c478bd9Sstevel@tonic-gate	srlx	%g5, %g1, %g1			/* x = ksp >> kpmp_shift */
43027c478bd9Sstevel@tonic-gate	add	%g5, %g1, %g5			/* y = ksp + x */
43037c478bd9Sstevel@tonic-gate	and 	%g5, %g7, %g5			/* hashinx = y & mask */
43047c478bd9Sstevel@tonic-gate
43057c478bd9Sstevel@tonic-gate	/*
43067c478bd9Sstevel@tonic-gate	 * Calculate physical kpm_page pointer
43077c478bd9Sstevel@tonic-gate	 * g2=pfn g3=mseg_pa g4=offset g5=hashinx
43087c478bd9Sstevel@tonic-gate	 */
43097c478bd9Sstevel@tonic-gate	ldxa	[%g3 + MEMSEG_KPM_PAGESPA]%asi, %g1 /* kpm_pagespa */
43107c478bd9Sstevel@tonic-gate	add	%g1, %g4, %g1			/* kp_pa */
43117c478bd9Sstevel@tonic-gate
43127c478bd9Sstevel@tonic-gate	/*
43137c478bd9Sstevel@tonic-gate	 * Calculate physical hash lock address
43147c478bd9Sstevel@tonic-gate	 * g1=kp_refcntc_pa g2=pfn g5=hashinx
43157c478bd9Sstevel@tonic-gate	 */
43167c478bd9Sstevel@tonic-gate	ldx	[%g6 + KPMTSBM_KPMPTABLEPA], %g4 /* kpmp_tablepa */
43177c478bd9Sstevel@tonic-gate	sllx	%g5, KPMHLK_SHIFT, %g5
43187c478bd9Sstevel@tonic-gate	add	%g4, %g5, %g3
43197c478bd9Sstevel@tonic-gate	add	%g3, KPMHLK_LOCK, %g3		/* hlck_pa */
43207c478bd9Sstevel@tonic-gate
43217c478bd9Sstevel@tonic-gate	/*
43227c478bd9Sstevel@tonic-gate	 * Assemble tte
43237c478bd9Sstevel@tonic-gate	 * g1=kp_pa g2=pfn g3=hlck_pa
43247c478bd9Sstevel@tonic-gate	 */
43257c478bd9Sstevel@tonic-gate#ifdef sun4v
43267c478bd9Sstevel@tonic-gate	sethi	%hi(TTE_VALID_INT), %g5		/* upper part */
43277c478bd9Sstevel@tonic-gate	sllx	%g5, 32, %g5
43287c478bd9Sstevel@tonic-gate	mov	(TTE_CP_INT|TTE_CV_INT|TTE_PRIV_INT|TTE_HWWR_INT), %g4
43297c478bd9Sstevel@tonic-gate	or	%g4, TTE4M, %g4
43307c478bd9Sstevel@tonic-gate	or	%g5, %g4, %g5
43317c478bd9Sstevel@tonic-gate#else
43327c478bd9Sstevel@tonic-gate	sethi	%hi(TTE_VALID_INT), %g4
43337c478bd9Sstevel@tonic-gate	mov	TTE4M, %g5
43347c478bd9Sstevel@tonic-gate	sllx	%g5, TTE_SZ_SHFT_INT, %g5
43357c478bd9Sstevel@tonic-gate	or	%g5, %g4, %g5			/* upper part */
43367c478bd9Sstevel@tonic-gate	sllx	%g5, 32, %g5
43377c478bd9Sstevel@tonic-gate	mov	(TTE_CP_INT|TTE_CV_INT|TTE_PRIV_INT|TTE_HWWR_INT), %g4
43387c478bd9Sstevel@tonic-gate	or	%g5, %g4, %g5
43397c478bd9Sstevel@tonic-gate#endif
43407c478bd9Sstevel@tonic-gate	sllx	%g2, MMU_PAGESHIFT, %g4
43417c478bd9Sstevel@tonic-gate	or	%g5, %g4, %g5			/* tte */
43427c478bd9Sstevel@tonic-gate	ldx	[%g6 + KPMTSBM_TSBPTR], %g4
43437c478bd9Sstevel@tonic-gate	GET_MMU_D_TTARGET(%g2, %g7)		/* %g2 = ttarget */
43447c478bd9Sstevel@tonic-gate
43457c478bd9Sstevel@tonic-gate	/*
43467c478bd9Sstevel@tonic-gate	 * tsb dropin
43477c478bd9Sstevel@tonic-gate	 * g1=kp_pa g2=ttarget g3=hlck_pa g4=kpmtsbp4m g5=tte g6=kpmtsbm_area
43487c478bd9Sstevel@tonic-gate	 */
43497c478bd9Sstevel@tonic-gate
43507c478bd9Sstevel@tonic-gate	/* KPMLOCK_ENTER(kpmlckp, tmp1, label1, asi) */
43517c478bd9Sstevel@tonic-gate	KPMLOCK_ENTER(%g3, %g7, kpmtsbmhdlr1, ASI_MEM)
43527c478bd9Sstevel@tonic-gate
43537c478bd9Sstevel@tonic-gate	/* use C-handler if there's no go for dropin */
43547c478bd9Sstevel@tonic-gate	ldsha	[%g1 + KPMPAGE_REFCNTC]%asi, %g7 /* kp_refcntc */
43557c478bd9Sstevel@tonic-gate	cmp	%g7, -1
43567c478bd9Sstevel@tonic-gate	bne,pn	%xcc, 5f	/* use C-handler if there's no go for dropin */
43577c478bd9Sstevel@tonic-gate	  nop
43587c478bd9Sstevel@tonic-gate
43597c478bd9Sstevel@tonic-gate#ifdef	DEBUG
43607c478bd9Sstevel@tonic-gate	/* double check refcnt */
43617c478bd9Sstevel@tonic-gate	ldsha	[%g1 + KPMPAGE_REFCNT]%asi, %g7
43627c478bd9Sstevel@tonic-gate	brz,pn	%g7, 5f			/* let C-handler deal with this */
43637c478bd9Sstevel@tonic-gate	  nop
43647c478bd9Sstevel@tonic-gate#endif
43657c478bd9Sstevel@tonic-gate
43667c478bd9Sstevel@tonic-gate#ifndef sun4v
43677c478bd9Sstevel@tonic-gate	ldub	[%g6 + KPMTSBM_FLAGS], %g7
43687c478bd9Sstevel@tonic-gate	mov	ASI_N, %g1
43697c478bd9Sstevel@tonic-gate	andcc	%g7, KPMTSBM_TSBPHYS_FLAG, %g0
43707c478bd9Sstevel@tonic-gate	movnz	%icc, ASI_MEM, %g1
43717c478bd9Sstevel@tonic-gate	mov	%g1, %asi
43727c478bd9Sstevel@tonic-gate#endif
43737c478bd9Sstevel@tonic-gate
43740a90a7fdSAmritpal Sandhu	/*
43750a90a7fdSAmritpal Sandhu	 * TSB_LOCK_ENTRY(tsbp, tmp1, tmp2, label) (needs %asi set)
43760a90a7fdSAmritpal Sandhu	 * If we fail to lock the TSB entry then just load the tte into the
43770a90a7fdSAmritpal Sandhu	 * TLB.
43780a90a7fdSAmritpal Sandhu	 */
43790a90a7fdSAmritpal Sandhu	TSB_LOCK_ENTRY(%g4, %g1, %g7, locked_tsb_l1)
43807c478bd9Sstevel@tonic-gate
43817c478bd9Sstevel@tonic-gate	/* TSB_INSERT_UNLOCK_ENTRY(tsbp, tte, tagtarget, tmp) */
43827c478bd9Sstevel@tonic-gate	TSB_INSERT_UNLOCK_ENTRY(%g4, %g5, %g2, %g7)
43830a90a7fdSAmritpal Sandhulocked_tsb_l1:
43847c478bd9Sstevel@tonic-gate	DTLB_STUFF(%g5, %g1, %g2, %g4, %g6)
43857c478bd9Sstevel@tonic-gate
43867c478bd9Sstevel@tonic-gate	/* KPMLOCK_EXIT(kpmlckp, asi) */
43877c478bd9Sstevel@tonic-gate	KPMLOCK_EXIT(%g3, ASI_MEM)
43887c478bd9Sstevel@tonic-gate
43897c478bd9Sstevel@tonic-gate	/*
43907c478bd9Sstevel@tonic-gate	 * If trapstat is running, we need to shift the %tpc and %tnpc to
43917c478bd9Sstevel@tonic-gate	 * point to trapstat's TSB miss return code (note that trapstat
43927c478bd9Sstevel@tonic-gate	 * itself will patch the correct offset to add).
43937c478bd9Sstevel@tonic-gate	 * Note: TTE is expected in %g5 (allows per pagesize reporting).
43947c478bd9Sstevel@tonic-gate	 */
43957c478bd9Sstevel@tonic-gate	rdpr	%tl, %g7
43967c478bd9Sstevel@tonic-gate	cmp	%g7, 1
43977c478bd9Sstevel@tonic-gate	ble	%icc, 0f
43987c478bd9Sstevel@tonic-gate	sethi	%hi(KERNELBASE), %g6
43997c478bd9Sstevel@tonic-gate	rdpr	%tpc, %g7
44007c478bd9Sstevel@tonic-gate	or	%g6, %lo(KERNELBASE), %g6
44017c478bd9Sstevel@tonic-gate	cmp	%g7, %g6
44027c478bd9Sstevel@tonic-gate	bgeu	%xcc, 0f
44037c478bd9Sstevel@tonic-gate	ALTENTRY(tsbmiss_trapstat_patch_point_kpm)
44047c478bd9Sstevel@tonic-gate	add	%g7, RUNTIME_PATCH, %g7	/* must match TSTAT_TSBMISS_INSTR */
44057c478bd9Sstevel@tonic-gate	wrpr	%g7, %tpc
44067c478bd9Sstevel@tonic-gate	add	%g7, 4, %g7
44077c478bd9Sstevel@tonic-gate	wrpr	%g7, %tnpc
44087c478bd9Sstevel@tonic-gate0:
44097c478bd9Sstevel@tonic-gate	retry
44107c478bd9Sstevel@tonic-gate5:
44117c478bd9Sstevel@tonic-gate	/* g3=hlck_pa */
44127c478bd9Sstevel@tonic-gate	KPMLOCK_EXIT(%g3, ASI_MEM)
44137c478bd9Sstevel@tonic-gate	ba,pt	%icc, sfmmu_kpm_exception
44147c478bd9Sstevel@tonic-gate	  nop
44157c478bd9Sstevel@tonic-gate	SET_SIZE(sfmmu_kpm_dtsb_miss)
44167c478bd9Sstevel@tonic-gate
44177c478bd9Sstevel@tonic-gate	/*
44187c478bd9Sstevel@tonic-gate	 * kpm tsbmiss handler for smallpages
44197c478bd9Sstevel@tonic-gate	 * g1 = 8K kpm TSB pointer
44207c478bd9Sstevel@tonic-gate	 * g2 = tag access register
44217c478bd9Sstevel@tonic-gate	 * g3 = 4M kpm TSB pointer
44227c478bd9Sstevel@tonic-gate	 */
44237c478bd9Sstevel@tonic-gate	ALTENTRY(sfmmu_kpm_dtsb_miss_small)
44247c478bd9Sstevel@tonic-gate	TT_TRACE(trace_tsbmiss)
44257c478bd9Sstevel@tonic-gate	CPU_INDEX(%g7, %g6)
44267c478bd9Sstevel@tonic-gate	sethi	%hi(kpmtsbm_area), %g6
44277c478bd9Sstevel@tonic-gate	sllx	%g7, KPMTSBM_SHIFT, %g7
44287c478bd9Sstevel@tonic-gate	or	%g6, %lo(kpmtsbm_area), %g6
44297c478bd9Sstevel@tonic-gate	add	%g6, %g7, %g6			/* g6 = kpmtsbm ptr */
44307c478bd9Sstevel@tonic-gate
44317c478bd9Sstevel@tonic-gate	/* check enable flag */
44327c478bd9Sstevel@tonic-gate	ldub	[%g6 + KPMTSBM_FLAGS], %g4
44337c478bd9Sstevel@tonic-gate	and	%g4, KPMTSBM_ENABLE_FLAG, %g5
44347c478bd9Sstevel@tonic-gate	brz,pn	%g5, sfmmu_tsb_miss		/* if kpm not enabled */
44357c478bd9Sstevel@tonic-gate	  nop
44367c478bd9Sstevel@tonic-gate
44377c478bd9Sstevel@tonic-gate	/*
44387c478bd9Sstevel@tonic-gate	 * VA range check
44397c478bd9Sstevel@tonic-gate	 * On fail: goto sfmmu_tsb_miss
44407c478bd9Sstevel@tonic-gate	 */
44417c478bd9Sstevel@tonic-gate	ldx	[%g6 + KPMTSBM_VBASE], %g7
44427c478bd9Sstevel@tonic-gate	cmp	%g2, %g7
44437c478bd9Sstevel@tonic-gate	blu,pn	%xcc, sfmmu_tsb_miss
44447c478bd9Sstevel@tonic-gate	  ldx	[%g6 + KPMTSBM_VEND], %g5
44457c478bd9Sstevel@tonic-gate	cmp	%g2, %g5
44467c478bd9Sstevel@tonic-gate	bgeu,pn	%xcc, sfmmu_tsb_miss
44477c478bd9Sstevel@tonic-gate	  stx	%g1, [%g6 + KPMTSBM_TSBPTR]	/* save 8K kpm TSB pointer */
44487c478bd9Sstevel@tonic-gate
44497c478bd9Sstevel@tonic-gate	/*
44507c478bd9Sstevel@tonic-gate	 * check TL tsbmiss handling flag
44517c478bd9Sstevel@tonic-gate	 * bump tsbmiss counter
44527c478bd9Sstevel@tonic-gate	 */
44537c478bd9Sstevel@tonic-gate	lduw	[%g6 + KPMTSBM_TSBMISS], %g5
44547c478bd9Sstevel@tonic-gate#ifdef	DEBUG
44557c478bd9Sstevel@tonic-gate	and	%g4, KPMTSBM_TLTSBM_FLAG, %g1
44567c478bd9Sstevel@tonic-gate	inc	%g5
44577c478bd9Sstevel@tonic-gate	brz,pn	%g1, sfmmu_kpm_exception
44587c478bd9Sstevel@tonic-gate	  st	%g5, [%g6 + KPMTSBM_TSBMISS]
44597c478bd9Sstevel@tonic-gate#else
44607c478bd9Sstevel@tonic-gate	inc	%g5
44617c478bd9Sstevel@tonic-gate	st	%g5, [%g6 + KPMTSBM_TSBMISS]
44627c478bd9Sstevel@tonic-gate#endif
44637c478bd9Sstevel@tonic-gate	/*
44647c478bd9Sstevel@tonic-gate	 * At this point:
44657c478bd9Sstevel@tonic-gate	 *  g1 = clobbered
44667c478bd9Sstevel@tonic-gate	 *  g2 = tag access register
44677c478bd9Sstevel@tonic-gate	 *  g3 = 4M kpm TSB pointer (not used)
44687c478bd9Sstevel@tonic-gate	 *  g6 = per-CPU kpm tsbmiss area
44697c478bd9Sstevel@tonic-gate	 *  g7 = kpm_vbase
44707c478bd9Sstevel@tonic-gate	 */
44717c478bd9Sstevel@tonic-gate
4472444ce08eSDonghai Qiao	/*
4473444ce08eSDonghai Qiao	 * Assembly implementation of SFMMU_KPM_VTOP(vaddr, paddr)
4474444ce08eSDonghai Qiao	 * which is defined in mach_kpm.h. Any changes in that macro
4475444ce08eSDonghai Qiao	 * should also be ported back to this assembly code.
4476444ce08eSDonghai Qiao	 */
4477444ce08eSDonghai Qiao	ldub	[%g6 + KPMTSBM_SZSHIFT], %g3	/* g3 = kpm_size_shift */
44787c478bd9Sstevel@tonic-gate	sub	%g2, %g7, %g4			/* paddr = vaddr-kpm_vbase */
4479444ce08eSDonghai Qiao	srax    %g4, %g3, %g7			/* which alias range (r) */
4480444ce08eSDonghai Qiao	brz,pt	%g7, 2f
4481444ce08eSDonghai Qiao	  sethi   %hi(vac_colors_mask), %g5
4482444ce08eSDonghai Qiao	ld	[%g5 + %lo(vac_colors_mask)], %g5
4483444ce08eSDonghai Qiao
4484444ce08eSDonghai Qiao	srlx	%g2, MMU_PAGESHIFT, %g1		/* vaddr >> MMU_PAGESHIFT */
4485444ce08eSDonghai Qiao	and	%g1, %g5, %g1			/* g1 = v */
4486444ce08eSDonghai Qiao	sllx	%g7, %g3, %g5			/* g5 = r << kpm_size_shift */
4487444ce08eSDonghai Qiao	cmp	%g7, %g1			/* if (r > v) */
4488444ce08eSDonghai Qiao	bleu,pn %xcc, 1f
4489444ce08eSDonghai Qiao	  sub   %g4, %g5, %g4			/* paddr -= r << kpm_size_shift */
4490444ce08eSDonghai Qiao	sub	%g7, %g1, %g5			/* g5 = r - v */
4491444ce08eSDonghai Qiao	sllx	%g5, MMU_PAGESHIFT, %g7		/* (r-v) << MMU_PAGESHIFT */
4492444ce08eSDonghai Qiao	add	%g4, %g7, %g4			/* paddr += (r-v)<<MMU_PAGESHIFT */
4493444ce08eSDonghai Qiao	ba	2f
4494444ce08eSDonghai Qiao	  nop
4495444ce08eSDonghai Qiao1:
4496444ce08eSDonghai Qiao	sllx	%g7, MMU_PAGESHIFT, %g5		/* else */
4497444ce08eSDonghai Qiao	sub	%g4, %g5, %g4			/* paddr -= r << MMU_PAGESHIFT */
4498444ce08eSDonghai Qiao
4499444ce08eSDonghai Qiao	/*
4500444ce08eSDonghai Qiao	 * paddr2pfn
4501444ce08eSDonghai Qiao	 *  g1 = vcolor (not used)
4502444ce08eSDonghai Qiao	 *  g2 = tag access register
4503444ce08eSDonghai Qiao	 *  g3 = clobbered
4504444ce08eSDonghai Qiao	 *  g4 = paddr
4505444ce08eSDonghai Qiao	 *  g5 = clobbered
4506444ce08eSDonghai Qiao	 *  g6 = per-CPU kpm tsbmiss area
4507444ce08eSDonghai Qiao	 *  g7 = clobbered
4508444ce08eSDonghai Qiao	 */
4509444ce08eSDonghai Qiao2:
4510444ce08eSDonghai Qiao	srlx	%g4, MMU_PAGESHIFT, %g2		/* g2 = pfn */
45117c478bd9Sstevel@tonic-gate
45127c478bd9Sstevel@tonic-gate	/*
45137c478bd9Sstevel@tonic-gate	 * Setup %asi
45147c478bd9Sstevel@tonic-gate	 * mseg_pa = page_numtomemseg_nolock_pa(pfn)
45157c478bd9Sstevel@tonic-gate	 * if (mseg not found) sfmmu_kpm_exception
4516444ce08eSDonghai Qiao	 * g2=pfn g6=per-CPU kpm tsbmiss area
4517444ce08eSDonghai Qiao	 * g4 g5 g7 for scratch use.
45187c478bd9Sstevel@tonic-gate	 */
45197c478bd9Sstevel@tonic-gate	mov	ASI_MEM, %asi
45207c478bd9Sstevel@tonic-gate	PAGE_NUM2MEMSEG_NOLOCK_PA(%g2, %g3, %g6, %g4, %g5, %g7, kpmtsbmsp2m)
45217c478bd9Sstevel@tonic-gate	cmp	%g3, MSEG_NULLPTR_PA
45227c478bd9Sstevel@tonic-gate	be,pn	%xcc, sfmmu_kpm_exception	/* if mseg not found */
45237c478bd9Sstevel@tonic-gate	  nop
45247c478bd9Sstevel@tonic-gate
45257c478bd9Sstevel@tonic-gate	/*
45267c478bd9Sstevel@tonic-gate	 * inx = pfn - mseg_pa->kpm_pbase
4527444ce08eSDonghai Qiao	 * g2=pfn  g3=mseg_pa  g6=per-CPU kpm tsbmiss area
45287c478bd9Sstevel@tonic-gate	 */
45297c478bd9Sstevel@tonic-gate	ldxa	[%g3 + MEMSEG_KPM_PBASE]%asi, %g7
45307c478bd9Sstevel@tonic-gate	sub	%g2, %g7, %g4
45317c478bd9Sstevel@tonic-gate
45327c478bd9Sstevel@tonic-gate#ifdef	DEBUG
45337c478bd9Sstevel@tonic-gate	/*
45347c478bd9Sstevel@tonic-gate	 * Validate inx value
4535444ce08eSDonghai Qiao	 * g2=pfn g3=mseg_pa g4=inx g6=per-CPU tsbmiss area
45367c478bd9Sstevel@tonic-gate	 */
45377c478bd9Sstevel@tonic-gate	ldxa	[%g3 + MEMSEG_KPM_NKPMPGS]%asi, %g5
45387c478bd9Sstevel@tonic-gate	cmp	%g4, %g5			/* inx - nkpmpgs */
45397c478bd9Sstevel@tonic-gate	bgeu,pn	%xcc, sfmmu_kpm_exception	/* if out of range */
45407c478bd9Sstevel@tonic-gate	  ld	[%g6 + KPMTSBM_KPMPTABLESZ], %g7
45417c478bd9Sstevel@tonic-gate#else
45427c478bd9Sstevel@tonic-gate	ld	[%g6 + KPMTSBM_KPMPTABLESZ], %g7
45437c478bd9Sstevel@tonic-gate#endif
45447c478bd9Sstevel@tonic-gate	/* ksp = &mseg_pa->kpm_spages[inx] */
45457c478bd9Sstevel@tonic-gate	ldxa	[%g3 + MEMSEG_KPM_SPAGES]%asi, %g5
45467c478bd9Sstevel@tonic-gate	add	%g5, %g4, %g5			/* ksp */
45477c478bd9Sstevel@tonic-gate
45487c478bd9Sstevel@tonic-gate	/*
45497c478bd9Sstevel@tonic-gate	 * KPMP_SHASH(kp)
4550444ce08eSDonghai Qiao	 * g2=pfn g3=mseg_pa g4=inx g5=ksp
4551444ce08eSDonghai Qiao	 * g6=per-CPU kpm tsbmiss area  g7=kpmp_stable_sz
45527c478bd9Sstevel@tonic-gate	 */
45537c478bd9Sstevel@tonic-gate	ldub	[%g6 + KPMTSBM_KPMPSHIFT], %g1	/* kpmp_shift */
45547c478bd9Sstevel@tonic-gate	sub	%g7, 1, %g7			/* mask */
45557c478bd9Sstevel@tonic-gate	sllx	%g5, %g1, %g1			/* x = ksp << kpmp_shift */
45567c478bd9Sstevel@tonic-gate	add	%g5, %g1, %g5			/* y = ksp + x */
45577c478bd9Sstevel@tonic-gate	and 	%g5, %g7, %g5			/* hashinx = y & mask */
45587c478bd9Sstevel@tonic-gate
45597c478bd9Sstevel@tonic-gate	/*
45607c478bd9Sstevel@tonic-gate	 * Calculate physical kpm_spage pointer
45617c478bd9Sstevel@tonic-gate	 * g2=pfn g3=mseg_pa g4=offset g5=hashinx
4562444ce08eSDonghai Qiao	 * g6=per-CPU kpm tsbmiss area
45637c478bd9Sstevel@tonic-gate	 */
45647c478bd9Sstevel@tonic-gate	ldxa	[%g3 + MEMSEG_KPM_PAGESPA]%asi, %g1 /* kpm_spagespa */
45657c478bd9Sstevel@tonic-gate	add	%g1, %g4, %g1			/* ksp_pa */
45667c478bd9Sstevel@tonic-gate
45677c478bd9Sstevel@tonic-gate	/*
45687c478bd9Sstevel@tonic-gate	 * Calculate physical hash lock address.
45697c478bd9Sstevel@tonic-gate	 * Note: Changes in kpm_shlk_t must be reflected here.
45707c478bd9Sstevel@tonic-gate	 * g1=ksp_pa g2=pfn g5=hashinx
4571444ce08eSDonghai Qiao	 * g6=per-CPU kpm tsbmiss area
45727c478bd9Sstevel@tonic-gate	 */
45737c478bd9Sstevel@tonic-gate	ldx	[%g6 + KPMTSBM_KPMPTABLEPA], %g4 /* kpmp_stablepa */
45747c478bd9Sstevel@tonic-gate	sllx	%g5, KPMSHLK_SHIFT, %g5
45757c478bd9Sstevel@tonic-gate	add	%g4, %g5, %g3			/* hlck_pa */
45767c478bd9Sstevel@tonic-gate
45777c478bd9Sstevel@tonic-gate	/*
4578444ce08eSDonghai Qiao	 * Assemble non-cacheable tte initially
45797c478bd9Sstevel@tonic-gate	 * g1=ksp_pa g2=pfn g3=hlck_pa
4580444ce08eSDonghai Qiao	 * g6=per-CPU kpm tsbmiss area
45817c478bd9Sstevel@tonic-gate	 */
45827c478bd9Sstevel@tonic-gate	sethi	%hi(TTE_VALID_INT), %g5		/* upper part */
45837c478bd9Sstevel@tonic-gate	sllx	%g5, 32, %g5
4584444ce08eSDonghai Qiao	mov	(TTE_CP_INT|TTE_PRIV_INT|TTE_HWWR_INT), %g4
45857c478bd9Sstevel@tonic-gate	or	%g5, %g4, %g5
45867c478bd9Sstevel@tonic-gate	sllx	%g2, MMU_PAGESHIFT, %g4
45877c478bd9Sstevel@tonic-gate	or	%g5, %g4, %g5			/* tte */
45887c478bd9Sstevel@tonic-gate	ldx	[%g6 + KPMTSBM_TSBPTR], %g4
45897c478bd9Sstevel@tonic-gate	GET_MMU_D_TTARGET(%g2, %g7)		/* %g2 = ttarget */
45907c478bd9Sstevel@tonic-gate
45917c478bd9Sstevel@tonic-gate	/*
45927c478bd9Sstevel@tonic-gate	 * tsb dropin
4593444ce08eSDonghai Qiao	 * g1=ksp_pa g2=ttarget g3=hlck_pa g4=ktsbp g5=tte (non-cacheable)
4594444ce08eSDonghai Qiao	 * g6=per-CPU kpm tsbmiss area  g7=scratch register
45957c478bd9Sstevel@tonic-gate	 */
45967c478bd9Sstevel@tonic-gate
45977c478bd9Sstevel@tonic-gate	/* KPMLOCK_ENTER(kpmlckp, tmp1, label1, asi) */
45987c478bd9Sstevel@tonic-gate	KPMLOCK_ENTER(%g3, %g7, kpmtsbsmlock, ASI_MEM)
45997c478bd9Sstevel@tonic-gate
46007c478bd9Sstevel@tonic-gate	/* use C-handler if there's no go for dropin */
46017c478bd9Sstevel@tonic-gate	ldsba	[%g1 + KPMSPAGE_MAPPED]%asi, %g7	/* kp_mapped */
4602444ce08eSDonghai Qiao	andcc	%g7, KPM_MAPPED_GO, %g0			/* go or no go ? */
4603444ce08eSDonghai Qiao	bz,pt	%icc, 5f				/* no go */
46047c478bd9Sstevel@tonic-gate	  nop
4605444ce08eSDonghai Qiao	and	%g7, KPM_MAPPED_MASK, %g7		/* go */
4606444ce08eSDonghai Qiao	cmp	%g7, KPM_MAPPEDS			/* cacheable ? */
4607444ce08eSDonghai Qiao	be,a,pn	%xcc, 3f
4608444ce08eSDonghai Qiao	  or	%g5, TTE_CV_INT, %g5			/* cacheable */
4609444ce08eSDonghai Qiao3:
46107c478bd9Sstevel@tonic-gate#ifndef sun4v
46117c478bd9Sstevel@tonic-gate	ldub	[%g6 + KPMTSBM_FLAGS], %g7
46127c478bd9Sstevel@tonic-gate	mov	ASI_N, %g1
46137c478bd9Sstevel@tonic-gate	andcc	%g7, KPMTSBM_TSBPHYS_FLAG, %g0
46147c478bd9Sstevel@tonic-gate	movnz	%icc, ASI_MEM, %g1
46157c478bd9Sstevel@tonic-gate	mov	%g1, %asi
46167c478bd9Sstevel@tonic-gate#endif
46177c478bd9Sstevel@tonic-gate
46180a90a7fdSAmritpal Sandhu	/*
46190a90a7fdSAmritpal Sandhu	 * TSB_LOCK_ENTRY(tsbp, tmp1, tmp2, label) (needs %asi set)
46200a90a7fdSAmritpal Sandhu	 * If we fail to lock the TSB entry then just load the tte into the
46210a90a7fdSAmritpal Sandhu	 * TLB.
46220a90a7fdSAmritpal Sandhu	 */
46230a90a7fdSAmritpal Sandhu	TSB_LOCK_ENTRY(%g4, %g1, %g7, locked_tsb_l2)
46247c478bd9Sstevel@tonic-gate
46257c478bd9Sstevel@tonic-gate	/* TSB_INSERT_UNLOCK_ENTRY(tsbp, tte, tagtarget, tmp) */
46267c478bd9Sstevel@tonic-gate	TSB_INSERT_UNLOCK_ENTRY(%g4, %g5, %g2, %g7)
46270a90a7fdSAmritpal Sandhulocked_tsb_l2:
46287c478bd9Sstevel@tonic-gate	DTLB_STUFF(%g5, %g2, %g4, %g5, %g6)
46297c478bd9Sstevel@tonic-gate
46307c478bd9Sstevel@tonic-gate	/* KPMLOCK_EXIT(kpmlckp, asi) */
46317c478bd9Sstevel@tonic-gate	KPMLOCK_EXIT(%g3, ASI_MEM)
46327c478bd9Sstevel@tonic-gate
46337c478bd9Sstevel@tonic-gate	/*
46347c478bd9Sstevel@tonic-gate	 * If trapstat is running, we need to shift the %tpc and %tnpc to
46357c478bd9Sstevel@tonic-gate	 * point to trapstat's TSB miss return code (note that trapstat
46367c478bd9Sstevel@tonic-gate	 * itself will patch the correct offset to add).
46377c478bd9Sstevel@tonic-gate	 * Note: TTE is expected in %g5 (allows per pagesize reporting).
46387c478bd9Sstevel@tonic-gate	 */
46397c478bd9Sstevel@tonic-gate	rdpr	%tl, %g7
46407c478bd9Sstevel@tonic-gate	cmp	%g7, 1
46417c478bd9Sstevel@tonic-gate	ble	%icc, 0f
46427c478bd9Sstevel@tonic-gate	sethi	%hi(KERNELBASE), %g6
46437c478bd9Sstevel@tonic-gate	rdpr	%tpc, %g7
46447c478bd9Sstevel@tonic-gate	or	%g6, %lo(KERNELBASE), %g6
46457c478bd9Sstevel@tonic-gate	cmp	%g7, %g6
46467c478bd9Sstevel@tonic-gate	bgeu	%xcc, 0f
46477c478bd9Sstevel@tonic-gate	ALTENTRY(tsbmiss_trapstat_patch_point_kpm_small)
46487c478bd9Sstevel@tonic-gate	add	%g7, RUNTIME_PATCH, %g7	/* must match TSTAT_TSBMISS_INSTR */
46497c478bd9Sstevel@tonic-gate	wrpr	%g7, %tpc
46507c478bd9Sstevel@tonic-gate	add	%g7, 4, %g7
46517c478bd9Sstevel@tonic-gate	wrpr	%g7, %tnpc
46527c478bd9Sstevel@tonic-gate0:
46537c478bd9Sstevel@tonic-gate	retry
46547c478bd9Sstevel@tonic-gate5:
46557c478bd9Sstevel@tonic-gate	/* g3=hlck_pa */
46567c478bd9Sstevel@tonic-gate	KPMLOCK_EXIT(%g3, ASI_MEM)
46577c478bd9Sstevel@tonic-gate	ba,pt	%icc, sfmmu_kpm_exception
46587c478bd9Sstevel@tonic-gate	  nop
46597c478bd9Sstevel@tonic-gate	SET_SIZE(sfmmu_kpm_dtsb_miss_small)
46607c478bd9Sstevel@tonic-gate
46617c478bd9Sstevel@tonic-gate#if (1<< KPMTSBM_SHIFT) != KPMTSBM_SIZE
46627c478bd9Sstevel@tonic-gate#error - KPMTSBM_SHIFT does not correspond to size of kpmtsbm struct
46637c478bd9Sstevel@tonic-gate#endif
46647c478bd9Sstevel@tonic-gate
46657c478bd9Sstevel@tonic-gate#endif /* lint */
46667c478bd9Sstevel@tonic-gate
46677c478bd9Sstevel@tonic-gate#ifdef	lint
46687c478bd9Sstevel@tonic-gate/*
46697c478bd9Sstevel@tonic-gate * Enable/disable tsbmiss handling at trap level for a kpm (large) page.
46707c478bd9Sstevel@tonic-gate * Called from C-level, sets/clears "go" indication for trap level handler.
46717c478bd9Sstevel@tonic-gate * khl_lock is a low level spin lock to protect the kp_tsbmtl field.
46727c478bd9Sstevel@tonic-gate * Assumed that &kp->kp_refcntc is checked for zero or -1 at C-level.
46737c478bd9Sstevel@tonic-gate * Assumes khl_mutex is held when called from C-level.
46747c478bd9Sstevel@tonic-gate */
46757c478bd9Sstevel@tonic-gate/* ARGSUSED */
46767c478bd9Sstevel@tonic-gatevoid
46777c478bd9Sstevel@tonic-gatesfmmu_kpm_tsbmtl(short *kp_refcntc, uint_t *khl_lock, int cmd)
46787c478bd9Sstevel@tonic-gate{
46797c478bd9Sstevel@tonic-gate}
46807c478bd9Sstevel@tonic-gate
46817c478bd9Sstevel@tonic-gate/*
46827c478bd9Sstevel@tonic-gate * kpm_smallpages: stores val to byte at address mapped within
46837c478bd9Sstevel@tonic-gate * low level lock brackets. The old value is returned.
46847c478bd9Sstevel@tonic-gate * Called from C-level.
46857c478bd9Sstevel@tonic-gate */
46867c478bd9Sstevel@tonic-gate/* ARGSUSED */
46877c478bd9Sstevel@tonic-gateint
4688444ce08eSDonghai Qiaosfmmu_kpm_stsbmtl(uchar_t *mapped, uint_t *kshl_lock, int val)
46897c478bd9Sstevel@tonic-gate{
46907c478bd9Sstevel@tonic-gate	return (0);
46917c478bd9Sstevel@tonic-gate}
46927c478bd9Sstevel@tonic-gate
46937c478bd9Sstevel@tonic-gate#else /* lint */
46947c478bd9Sstevel@tonic-gate
46957c478bd9Sstevel@tonic-gate	.seg	".data"
46967c478bd9Sstevel@tonic-gatesfmmu_kpm_tsbmtl_panic:
46977c478bd9Sstevel@tonic-gate	.ascii	"sfmmu_kpm_tsbmtl: interrupts disabled"
46987c478bd9Sstevel@tonic-gate	.byte	0
46997c478bd9Sstevel@tonic-gatesfmmu_kpm_stsbmtl_panic:
47007c478bd9Sstevel@tonic-gate	.ascii	"sfmmu_kpm_stsbmtl: interrupts disabled"
47017c478bd9Sstevel@tonic-gate	.byte	0
47027c478bd9Sstevel@tonic-gate	.align	4
47037c478bd9Sstevel@tonic-gate	.seg	".text"
47047c478bd9Sstevel@tonic-gate
47057c478bd9Sstevel@tonic-gate	ENTRY_NP(sfmmu_kpm_tsbmtl)
47067c478bd9Sstevel@tonic-gate	rdpr	%pstate, %o3
47077c478bd9Sstevel@tonic-gate	/*
47087c478bd9Sstevel@tonic-gate	 * %o0 = &kp_refcntc
47097c478bd9Sstevel@tonic-gate	 * %o1 = &khl_lock
47107c478bd9Sstevel@tonic-gate	 * %o2 = 0/1 (off/on)
47117c478bd9Sstevel@tonic-gate	 * %o3 = pstate save
47127c478bd9Sstevel@tonic-gate	 */
47137c478bd9Sstevel@tonic-gate#ifdef DEBUG
47147c478bd9Sstevel@tonic-gate	andcc	%o3, PSTATE_IE, %g0		/* if interrupts already */
47157c478bd9Sstevel@tonic-gate	bnz,pt %icc, 1f				/* disabled, panic	 */
47167c478bd9Sstevel@tonic-gate	  nop
47177c478bd9Sstevel@tonic-gate	save	%sp, -SA(MINFRAME), %sp
47187c478bd9Sstevel@tonic-gate	sethi	%hi(sfmmu_kpm_tsbmtl_panic), %o0
47197c478bd9Sstevel@tonic-gate	call	panic
47207c478bd9Sstevel@tonic-gate	 or	%o0, %lo(sfmmu_kpm_tsbmtl_panic), %o0
47217c478bd9Sstevel@tonic-gate	ret
47227c478bd9Sstevel@tonic-gate	restore
47237c478bd9Sstevel@tonic-gate1:
47247c478bd9Sstevel@tonic-gate#endif /* DEBUG */
47257c478bd9Sstevel@tonic-gate	wrpr	%o3, PSTATE_IE, %pstate		/* disable interrupts */
47267c478bd9Sstevel@tonic-gate
47277c478bd9Sstevel@tonic-gate	KPMLOCK_ENTER(%o1, %o4, kpmtsbmtl1, ASI_N)
47287c478bd9Sstevel@tonic-gate	mov	-1, %o5
47297c478bd9Sstevel@tonic-gate	brz,a	%o2, 2f
47307c478bd9Sstevel@tonic-gate	  mov	0, %o5
47317c478bd9Sstevel@tonic-gate2:
47327c478bd9Sstevel@tonic-gate	sth	%o5, [%o0]
47337c478bd9Sstevel@tonic-gate	KPMLOCK_EXIT(%o1, ASI_N)
47347c478bd9Sstevel@tonic-gate
47357c478bd9Sstevel@tonic-gate	retl
47367c478bd9Sstevel@tonic-gate	  wrpr	%g0, %o3, %pstate		/* enable interrupts */
47377c478bd9Sstevel@tonic-gate	SET_SIZE(sfmmu_kpm_tsbmtl)
47387c478bd9Sstevel@tonic-gate
47397c478bd9Sstevel@tonic-gate	ENTRY_NP(sfmmu_kpm_stsbmtl)
47407c478bd9Sstevel@tonic-gate	rdpr	%pstate, %o3
47417c478bd9Sstevel@tonic-gate	/*
47427c478bd9Sstevel@tonic-gate	 * %o0 = &mapped
47437c478bd9Sstevel@tonic-gate	 * %o1 = &kshl_lock
47447c478bd9Sstevel@tonic-gate	 * %o2 = val
47457c478bd9Sstevel@tonic-gate	 * %o3 = pstate save
47467c478bd9Sstevel@tonic-gate	 */
47477c478bd9Sstevel@tonic-gate#ifdef DEBUG
47487c478bd9Sstevel@tonic-gate	andcc	%o3, PSTATE_IE, %g0		/* if interrupts already */
47497c478bd9Sstevel@tonic-gate	bnz,pt %icc, 1f				/* disabled, panic	 */
47507c478bd9Sstevel@tonic-gate	  nop
47517c478bd9Sstevel@tonic-gate	save	%sp, -SA(MINFRAME), %sp
47527c478bd9Sstevel@tonic-gate	sethi	%hi(sfmmu_kpm_stsbmtl_panic), %o0
47537c478bd9Sstevel@tonic-gate	call	panic
47547c478bd9Sstevel@tonic-gate	  or	%o0, %lo(sfmmu_kpm_stsbmtl_panic), %o0
47557c478bd9Sstevel@tonic-gate	ret
47567c478bd9Sstevel@tonic-gate	restore
47577c478bd9Sstevel@tonic-gate1:
47587c478bd9Sstevel@tonic-gate#endif /* DEBUG */
47597c478bd9Sstevel@tonic-gate	wrpr	%o3, PSTATE_IE, %pstate		/* disable interrupts */
47607c478bd9Sstevel@tonic-gate
47617c478bd9Sstevel@tonic-gate	KPMLOCK_ENTER(%o1, %o4, kpmstsbmtl1, ASI_N)
47627c478bd9Sstevel@tonic-gate	ldsb	[%o0], %o5
47637c478bd9Sstevel@tonic-gate	stb	%o2, [%o0]
47647c478bd9Sstevel@tonic-gate	KPMLOCK_EXIT(%o1, ASI_N)
47657c478bd9Sstevel@tonic-gate
4766444ce08eSDonghai Qiao	and	%o5, KPM_MAPPED_MASK, %o0	/* return old val */
47677c478bd9Sstevel@tonic-gate	retl
47687c478bd9Sstevel@tonic-gate	  wrpr	%g0, %o3, %pstate		/* enable interrupts */
47697c478bd9Sstevel@tonic-gate	SET_SIZE(sfmmu_kpm_stsbmtl)
47707c478bd9Sstevel@tonic-gate
47717c478bd9Sstevel@tonic-gate#endif /* lint */
47727c478bd9Sstevel@tonic-gate
47737c478bd9Sstevel@tonic-gate#ifndef lint
47747c478bd9Sstevel@tonic-gate#ifdef sun4v
47757c478bd9Sstevel@tonic-gate	/*
47767c478bd9Sstevel@tonic-gate	 * User/kernel data miss w// multiple TSBs
47777c478bd9Sstevel@tonic-gate	 * The first probe covers 8K, 64K, and 512K page sizes,
47787c478bd9Sstevel@tonic-gate	 * because 64K and 512K mappings are replicated off 8K
47797c478bd9Sstevel@tonic-gate	 * pointer.  Second probe covers 4M page size only.
47807c478bd9Sstevel@tonic-gate	 *
47817c478bd9Sstevel@tonic-gate	 * MMU fault area contains miss address and context.
47827c478bd9Sstevel@tonic-gate	 */
47837c478bd9Sstevel@tonic-gate	ALTENTRY(sfmmu_slow_dmmu_miss)
478460972f37Sjb145095	GET_MMU_D_PTAGACC_CTXTYPE(%g2, %g3)	! %g2 = ptagacc, %g3 = ctx type
47857c478bd9Sstevel@tonic-gate
47867c478bd9Sstevel@tonic-gateslow_miss_common:
47877c478bd9Sstevel@tonic-gate	/*
47887c478bd9Sstevel@tonic-gate	 *  %g2 = tagacc register (needed for sfmmu_tsb_miss_tt)
47897c478bd9Sstevel@tonic-gate	 *  %g3 = ctx (cannot be INVALID_CONTEXT)
47907c478bd9Sstevel@tonic-gate	 */
47917c478bd9Sstevel@tonic-gate	brnz,pt	%g3, 8f			! check for user context
47927c478bd9Sstevel@tonic-gate	  nop
47937c478bd9Sstevel@tonic-gate
47947c478bd9Sstevel@tonic-gate	/*
47957c478bd9Sstevel@tonic-gate	 * Kernel miss
47967c478bd9Sstevel@tonic-gate	 * Get 8K and 4M TSB pointers in %g1 and %g3 and
47977c478bd9Sstevel@tonic-gate	 * branch to sfmmu_tsb_miss_tt to handle it.
47987c478bd9Sstevel@tonic-gate	 */
47997c478bd9Sstevel@tonic-gate	mov	%g2, %g7		! TSB pointer macro clobbers tagacc
48007c478bd9Sstevel@tonic-gatesfmmu_dslow_patch_ktsb_base:
48017c478bd9Sstevel@tonic-gate	RUNTIME_PATCH_SETX(%g1, %g6)	! %g1 = contents of ktsb_pbase
48027c478bd9Sstevel@tonic-gatesfmmu_dslow_patch_ktsb_szcode:
48037c478bd9Sstevel@tonic-gate	or	%g0, RUNTIME_PATCH, %g3	! ktsb_szcode (hot patched)
48047c478bd9Sstevel@tonic-gate
48057c478bd9Sstevel@tonic-gate	GET_TSBE_POINTER(MMU_PAGESHIFT, %g1, %g7, %g3, %g5)
48067c478bd9Sstevel@tonic-gate	! %g1 = First TSB entry pointer, as TSB miss handler expects
48077c478bd9Sstevel@tonic-gate
48087c478bd9Sstevel@tonic-gate	mov	%g2, %g7		! TSB pointer macro clobbers tagacc
48097c478bd9Sstevel@tonic-gatesfmmu_dslow_patch_ktsb4m_base:
48107c478bd9Sstevel@tonic-gate	RUNTIME_PATCH_SETX(%g3, %g6)	! %g3 = contents of ktsb4m_pbase
48117c478bd9Sstevel@tonic-gatesfmmu_dslow_patch_ktsb4m_szcode:
48127c478bd9Sstevel@tonic-gate	or	%g0, RUNTIME_PATCH, %g6	! ktsb4m_szcode (hot patched)
48137c478bd9Sstevel@tonic-gate
48147c478bd9Sstevel@tonic-gate	GET_TSBE_POINTER(MMU_PAGESHIFT4M, %g3, %g7, %g6, %g5)
48157c478bd9Sstevel@tonic-gate	! %g3 = 4M tsb entry pointer, as TSB miss handler expects
48167c478bd9Sstevel@tonic-gate	ba,a,pt	%xcc, sfmmu_tsb_miss_tt
48177c478bd9Sstevel@tonic-gate	.empty
48187c478bd9Sstevel@tonic-gate
48197c478bd9Sstevel@tonic-gate8:
48207c478bd9Sstevel@tonic-gate	/*
48217c478bd9Sstevel@tonic-gate	 * User miss
48227c478bd9Sstevel@tonic-gate	 * Get first TSB pointer in %g1
48237c478bd9Sstevel@tonic-gate	 * Get second TSB pointer (or NULL if no second TSB) in %g3
48247c478bd9Sstevel@tonic-gate	 * Branch to sfmmu_tsb_miss_tt to handle it
48257c478bd9Sstevel@tonic-gate	 */
48267c478bd9Sstevel@tonic-gate	GET_1ST_TSBE_PTR(%g2, %g1, %g4, %g5)
48277c478bd9Sstevel@tonic-gate	/* %g1 = first TSB entry ptr now, %g2 preserved */
48287c478bd9Sstevel@tonic-gate
48297c478bd9Sstevel@tonic-gate	GET_UTSBREG(SCRATCHPAD_UTSBREG2, %g3)	/* get 2nd utsbreg */
483005d3dc4bSpaulsan	brlz,pt %g3, sfmmu_tsb_miss_tt		/* done if no 2nd TSB */
483105d3dc4bSpaulsan	  nop
48327c478bd9Sstevel@tonic-gate
48337c478bd9Sstevel@tonic-gate	GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5)
48347c478bd9Sstevel@tonic-gate	/* %g3 = second TSB entry ptr now, %g2 preserved */
48357c478bd9Sstevel@tonic-gate9:
48367c478bd9Sstevel@tonic-gate	ba,a,pt	%xcc, sfmmu_tsb_miss_tt
48377c478bd9Sstevel@tonic-gate	.empty
48387c478bd9Sstevel@tonic-gate	SET_SIZE(sfmmu_slow_dmmu_miss)
48397c478bd9Sstevel@tonic-gate
48407c478bd9Sstevel@tonic-gate
48417c478bd9Sstevel@tonic-gate	/*
48427c478bd9Sstevel@tonic-gate	 * User/kernel instruction miss w/ multiple TSBs
48437c478bd9Sstevel@tonic-gate	 * The first probe covers 8K, 64K, and 512K page sizes,
48447c478bd9Sstevel@tonic-gate	 * because 64K and 512K mappings are replicated off 8K
48457c478bd9Sstevel@tonic-gate	 * pointer.  Second probe covers 4M page size only.
48467c478bd9Sstevel@tonic-gate	 *
48477c478bd9Sstevel@tonic-gate	 * MMU fault area contains miss address and context.
48487c478bd9Sstevel@tonic-gate	 */
48497c478bd9Sstevel@tonic-gate	ALTENTRY(sfmmu_slow_immu_miss)
485060972f37Sjb145095	GET_MMU_I_PTAGACC_CTXTYPE(%g2, %g3)
485160972f37Sjb145095	ba,a,pt	%xcc, slow_miss_common
48527c478bd9Sstevel@tonic-gate	SET_SIZE(sfmmu_slow_immu_miss)
48537c478bd9Sstevel@tonic-gate
48547c478bd9Sstevel@tonic-gate#endif /* sun4v */
48557c478bd9Sstevel@tonic-gate#endif	/* lint */
48567c478bd9Sstevel@tonic-gate
48577c478bd9Sstevel@tonic-gate#ifndef lint
48587c478bd9Sstevel@tonic-gate
48597c478bd9Sstevel@tonic-gate/*
48607c478bd9Sstevel@tonic-gate * Per-CPU tsbmiss areas to avoid cache misses in TSB miss handlers.
48617c478bd9Sstevel@tonic-gate */
48627c478bd9Sstevel@tonic-gate	.seg	".data"
48637c478bd9Sstevel@tonic-gate	.align	64
48647c478bd9Sstevel@tonic-gate	.global tsbmiss_area
48657c478bd9Sstevel@tonic-gatetsbmiss_area:
48667c478bd9Sstevel@tonic-gate	.skip	(TSBMISS_SIZE * NCPU)
48677c478bd9Sstevel@tonic-gate
48687c478bd9Sstevel@tonic-gate	.align	64
48697c478bd9Sstevel@tonic-gate	.global kpmtsbm_area
48707c478bd9Sstevel@tonic-gatekpmtsbm_area:
48717c478bd9Sstevel@tonic-gate	.skip	(KPMTSBM_SIZE * NCPU)
48727c478bd9Sstevel@tonic-gate#endif	/* lint */
4873