xref: /titanic_51/usr/src/uts/sfmmu/vm/hat_sfmmu.c (revision 0d928757379972073af9fb22bdc827b74e8ba6ac)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 1993, 2010, Oracle and/or its affiliates. All rights reserved.
23  */
24 /*
25  * Copyright 2011 Nexenta Systems, Inc.  All rights reserved.
26  */
27 
28 /*
29  * VM - Hardware Address Translation management for Spitfire MMU.
30  *
31  * This file implements the machine specific hardware translation
32  * needed by the VM system.  The machine independent interface is
33  * described in <vm/hat.h> while the machine dependent interface
34  * and data structures are described in <vm/hat_sfmmu.h>.
35  *
36  * The hat layer manages the address translation hardware as a cache
37  * driven by calls from the higher levels in the VM system.
38  */
39 
40 #include <sys/types.h>
41 #include <sys/kstat.h>
42 #include <vm/hat.h>
43 #include <vm/hat_sfmmu.h>
44 #include <vm/page.h>
45 #include <sys/pte.h>
46 #include <sys/systm.h>
47 #include <sys/mman.h>
48 #include <sys/sysmacros.h>
49 #include <sys/machparam.h>
50 #include <sys/vtrace.h>
51 #include <sys/kmem.h>
52 #include <sys/mmu.h>
53 #include <sys/cmn_err.h>
54 #include <sys/cpu.h>
55 #include <sys/cpuvar.h>
56 #include <sys/debug.h>
57 #include <sys/lgrp.h>
58 #include <sys/archsystm.h>
59 #include <sys/machsystm.h>
60 #include <sys/vmsystm.h>
61 #include <vm/as.h>
62 #include <vm/seg.h>
63 #include <vm/seg_kp.h>
64 #include <vm/seg_kmem.h>
65 #include <vm/seg_kpm.h>
66 #include <vm/rm.h>
67 #include <sys/t_lock.h>
68 #include <sys/obpdefs.h>
69 #include <sys/vm_machparam.h>
70 #include <sys/var.h>
71 #include <sys/trap.h>
72 #include <sys/machtrap.h>
73 #include <sys/scb.h>
74 #include <sys/bitmap.h>
75 #include <sys/machlock.h>
76 #include <sys/membar.h>
77 #include <sys/atomic.h>
78 #include <sys/cpu_module.h>
79 #include <sys/prom_debug.h>
80 #include <sys/ksynch.h>
81 #include <sys/mem_config.h>
82 #include <sys/mem_cage.h>
83 #include <vm/vm_dep.h>
84 #include <vm/xhat_sfmmu.h>
85 #include <sys/fpu/fpusystm.h>
86 #include <vm/mach_kpm.h>
87 #include <sys/callb.h>
88 
89 #ifdef	DEBUG
90 #define	SFMMU_VALIDATE_HMERID(hat, rid, saddr, len)			\
91 	if (SFMMU_IS_SHMERID_VALID(rid)) {				\
92 		caddr_t _eaddr = (saddr) + (len);			\
93 		sf_srd_t *_srdp;					\
94 		sf_region_t *_rgnp;					\
95 		ASSERT((rid) < SFMMU_MAX_HME_REGIONS);			\
96 		ASSERT(SF_RGNMAP_TEST(hat->sfmmu_hmeregion_map, rid));	\
97 		ASSERT((hat) != ksfmmup);				\
98 		_srdp = (hat)->sfmmu_srdp;				\
99 		ASSERT(_srdp != NULL);					\
100 		ASSERT(_srdp->srd_refcnt != 0);				\
101 		_rgnp = _srdp->srd_hmergnp[(rid)];			\
102 		ASSERT(_rgnp != NULL && _rgnp->rgn_id == rid);		\
103 		ASSERT(_rgnp->rgn_refcnt != 0);				\
104 		ASSERT(!(_rgnp->rgn_flags & SFMMU_REGION_FREE));	\
105 		ASSERT((_rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK) ==	\
106 		    SFMMU_REGION_HME);					\
107 		ASSERT((saddr) >= _rgnp->rgn_saddr);			\
108 		ASSERT((saddr) < _rgnp->rgn_saddr + _rgnp->rgn_size);	\
109 		ASSERT(_eaddr > _rgnp->rgn_saddr);			\
110 		ASSERT(_eaddr <= _rgnp->rgn_saddr + _rgnp->rgn_size);	\
111 	}
112 
113 #define	SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid) 	 	 \
114 {						 			 \
115 		caddr_t _hsva;						 \
116 		caddr_t _heva;						 \
117 		caddr_t _rsva;					 	 \
118 		caddr_t _reva;					 	 \
119 		int	_ttesz = get_hblk_ttesz(hmeblkp);		 \
120 		int	_flagtte;					 \
121 		ASSERT((srdp)->srd_refcnt != 0);			 \
122 		ASSERT((rid) < SFMMU_MAX_HME_REGIONS);			 \
123 		ASSERT((rgnp)->rgn_id == rid);				 \
124 		ASSERT(!((rgnp)->rgn_flags & SFMMU_REGION_FREE));	 \
125 		ASSERT(((rgnp)->rgn_flags & SFMMU_REGION_TYPE_MASK) ==	 \
126 		    SFMMU_REGION_HME);					 \
127 		ASSERT(_ttesz <= (rgnp)->rgn_pgszc);			 \
128 		_hsva = (caddr_t)get_hblk_base(hmeblkp);		 \
129 		_heva = get_hblk_endaddr(hmeblkp);			 \
130 		_rsva = (caddr_t)P2ALIGN(				 \
131 		    (uintptr_t)(rgnp)->rgn_saddr, HBLK_MIN_BYTES);	 \
132 		_reva = (caddr_t)P2ROUNDUP(				 \
133 		    (uintptr_t)((rgnp)->rgn_saddr + (rgnp)->rgn_size),	 \
134 		    HBLK_MIN_BYTES);					 \
135 		ASSERT(_hsva >= _rsva);				 	 \
136 		ASSERT(_hsva < _reva);				 	 \
137 		ASSERT(_heva > _rsva);				 	 \
138 		ASSERT(_heva <= _reva);				 	 \
139 		_flagtte = (_ttesz < HBLK_MIN_TTESZ) ? HBLK_MIN_TTESZ :  \
140 			_ttesz;						 \
141 		ASSERT(rgnp->rgn_hmeflags & (0x1 << _flagtte));		 \
142 }
143 
144 #else /* DEBUG */
145 #define	SFMMU_VALIDATE_HMERID(hat, rid, addr, len)
146 #define	SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid)
147 #endif /* DEBUG */
148 
149 #if defined(SF_ERRATA_57)
150 extern caddr_t errata57_limit;
151 #endif
152 
153 #define	HME8BLK_SZ_RND		((roundup(HME8BLK_SZ, sizeof (int64_t))) /  \
154 				(sizeof (int64_t)))
155 #define	HBLK_RESERVE		((struct hme_blk *)hblk_reserve)
156 
157 #define	HBLK_RESERVE_CNT	128
158 #define	HBLK_RESERVE_MIN	20
159 
160 static struct hme_blk		*freehblkp;
161 static kmutex_t			freehblkp_lock;
162 static int			freehblkcnt;
163 
164 static int64_t			hblk_reserve[HME8BLK_SZ_RND];
165 static kmutex_t			hblk_reserve_lock;
166 static kthread_t		*hblk_reserve_thread;
167 
168 static nucleus_hblk8_info_t	nucleus_hblk8;
169 static nucleus_hblk1_info_t	nucleus_hblk1;
170 
171 /*
172  * Data to manage per-cpu hmeblk pending queues, hmeblks are queued here
173  * after the initial phase of removing an hmeblk from the hash chain, see
174  * the detailed comment in sfmmu_hblk_hash_rm() for further details.
175  */
176 static cpu_hme_pend_t		*cpu_hme_pend;
177 static uint_t			cpu_hme_pend_thresh;
178 /*
179  * SFMMU specific hat functions
180  */
181 void	hat_pagecachectl(struct page *, int);
182 
183 /* flags for hat_pagecachectl */
184 #define	HAT_CACHE	0x1
185 #define	HAT_UNCACHE	0x2
186 #define	HAT_TMPNC	0x4
187 
188 /*
189  * Flag to allow the creation of non-cacheable translations
190  * to system memory. It is off by default. At the moment this
191  * flag is used by the ecache error injector. The error injector
192  * will turn it on when creating such a translation then shut it
193  * off when it's finished.
194  */
195 
196 int	sfmmu_allow_nc_trans = 0;
197 
198 /*
199  * Flag to disable large page support.
200  * 	value of 1 => disable all large pages.
201  *	bits 1, 2, and 3 are to disable 64K, 512K and 4M pages respectively.
202  *
203  * For example, use the value 0x4 to disable 512K pages.
204  *
205  */
206 #define	LARGE_PAGES_OFF		0x1
207 
208 /*
209  * The disable_large_pages and disable_ism_large_pages variables control
210  * hat_memload_array and the page sizes to be used by ISM and the kernel.
211  *
212  * The disable_auto_data_large_pages and disable_auto_text_large_pages variables
213  * are only used to control which OOB pages to use at upper VM segment creation
214  * time, and are set in hat_init_pagesizes and used in the map_pgsz* routines.
215  * Their values may come from platform or CPU specific code to disable page
216  * sizes that should not be used.
217  *
218  * WARNING: 512K pages are currently not supported for ISM/DISM.
219  */
220 uint_t	disable_large_pages = 0;
221 uint_t	disable_ism_large_pages = (1 << TTE512K);
222 uint_t	disable_auto_data_large_pages = 0;
223 uint_t	disable_auto_text_large_pages = 0;
224 
225 /*
226  * Private sfmmu data structures for hat management
227  */
228 static struct kmem_cache *sfmmuid_cache;
229 static struct kmem_cache *mmuctxdom_cache;
230 
231 /*
232  * Private sfmmu data structures for tsb management
233  */
234 static struct kmem_cache *sfmmu_tsbinfo_cache;
235 static struct kmem_cache *sfmmu_tsb8k_cache;
236 static struct kmem_cache *sfmmu_tsb_cache[NLGRPS_MAX];
237 static vmem_t *kmem_bigtsb_arena;
238 static vmem_t *kmem_tsb_arena;
239 
240 /*
241  * sfmmu static variables for hmeblk resource management.
242  */
243 static vmem_t *hat_memload1_arena; /* HAT translation arena for sfmmu1_cache */
244 static struct kmem_cache *sfmmu8_cache;
245 static struct kmem_cache *sfmmu1_cache;
246 static struct kmem_cache *pa_hment_cache;
247 
248 static kmutex_t 	ism_mlist_lock;	/* mutex for ism mapping list */
249 /*
250  * private data for ism
251  */
252 static struct kmem_cache *ism_blk_cache;
253 static struct kmem_cache *ism_ment_cache;
254 #define	ISMID_STARTADDR	NULL
255 
256 /*
257  * Region management data structures and function declarations.
258  */
259 
260 static void	sfmmu_leave_srd(sfmmu_t *);
261 static int	sfmmu_srdcache_constructor(void *, void *, int);
262 static void	sfmmu_srdcache_destructor(void *, void *);
263 static int	sfmmu_rgncache_constructor(void *, void *, int);
264 static void	sfmmu_rgncache_destructor(void *, void *);
265 static int	sfrgnmap_isnull(sf_region_map_t *);
266 static int	sfhmergnmap_isnull(sf_hmeregion_map_t *);
267 static int	sfmmu_scdcache_constructor(void *, void *, int);
268 static void	sfmmu_scdcache_destructor(void *, void *);
269 static void	sfmmu_rgn_cb_noop(caddr_t, caddr_t, caddr_t,
270     size_t, void *, u_offset_t);
271 
272 static uint_t srd_hashmask = SFMMU_MAX_SRD_BUCKETS - 1;
273 static sf_srd_bucket_t *srd_buckets;
274 static struct kmem_cache *srd_cache;
275 static uint_t srd_rgn_hashmask = SFMMU_MAX_REGION_BUCKETS - 1;
276 static struct kmem_cache *region_cache;
277 static struct kmem_cache *scd_cache;
278 
279 #ifdef sun4v
280 int use_bigtsb_arena = 1;
281 #else
282 int use_bigtsb_arena = 0;
283 #endif
284 
285 /* External /etc/system tunable, for turning on&off the shctx support */
286 int disable_shctx = 0;
287 /* Internal variable, set by MD if the HW supports shctx feature */
288 int shctx_on = 0;
289 
290 #ifdef DEBUG
291 static void check_scd_sfmmu_list(sfmmu_t **, sfmmu_t *, int);
292 #endif
293 static void sfmmu_to_scd_list(sfmmu_t **, sfmmu_t *);
294 static void sfmmu_from_scd_list(sfmmu_t **, sfmmu_t *);
295 
296 static sf_scd_t *sfmmu_alloc_scd(sf_srd_t *, sf_region_map_t *);
297 static void sfmmu_find_scd(sfmmu_t *);
298 static void sfmmu_join_scd(sf_scd_t *, sfmmu_t *);
299 static void sfmmu_finish_join_scd(sfmmu_t *);
300 static void sfmmu_leave_scd(sfmmu_t *, uchar_t);
301 static void sfmmu_destroy_scd(sf_srd_t *, sf_scd_t *, sf_region_map_t *);
302 static int sfmmu_alloc_scd_tsbs(sf_srd_t *, sf_scd_t *);
303 static void sfmmu_free_scd_tsbs(sfmmu_t *);
304 static void sfmmu_tsb_inv_ctx(sfmmu_t *);
305 static int find_ism_rid(sfmmu_t *, sfmmu_t *, caddr_t, uint_t *);
306 static void sfmmu_ism_hatflags(sfmmu_t *, int);
307 static int sfmmu_srd_lock_held(sf_srd_t *);
308 static void sfmmu_remove_scd(sf_scd_t **, sf_scd_t *);
309 static void sfmmu_add_scd(sf_scd_t **headp, sf_scd_t *);
310 static void sfmmu_link_scd_to_regions(sf_srd_t *, sf_scd_t *);
311 static void sfmmu_unlink_scd_from_regions(sf_srd_t *, sf_scd_t *);
312 static void sfmmu_link_to_hmeregion(sfmmu_t *, sf_region_t *);
313 static void sfmmu_unlink_from_hmeregion(sfmmu_t *, sf_region_t *);
314 
315 /*
316  * ``hat_lock'' is a hashed mutex lock for protecting sfmmu TSB lists,
317  * HAT flags, synchronizing TLB/TSB coherency, and context management.
318  * The lock is hashed on the sfmmup since the case where we need to lock
319  * all processes is rare but does occur (e.g. we need to unload a shared
320  * mapping from all processes using the mapping).  We have a lot of buckets,
321  * and each slab of sfmmu_t's can use about a quarter of them, giving us
322  * a fairly good distribution without wasting too much space and overhead
323  * when we have to grab them all.
324  */
325 #define	SFMMU_NUM_LOCK	128		/* must be power of two */
326 hatlock_t	hat_lock[SFMMU_NUM_LOCK];
327 
328 /*
329  * Hash algorithm optimized for a small number of slabs.
330  *  7 is (highbit((sizeof sfmmu_t)) - 1)
331  * This hash algorithm is based upon the knowledge that sfmmu_t's come from a
332  * kmem_cache, and thus they will be sequential within that cache.  In
333  * addition, each new slab will have a different "color" up to cache_maxcolor
334  * which will skew the hashing for each successive slab which is allocated.
335  * If the size of sfmmu_t changed to a larger size, this algorithm may need
336  * to be revisited.
337  */
338 #define	TSB_HASH_SHIFT_BITS (7)
339 #define	PTR_HASH(x) ((uintptr_t)x >> TSB_HASH_SHIFT_BITS)
340 
341 #ifdef DEBUG
342 int tsb_hash_debug = 0;
343 #define	TSB_HASH(sfmmup)	\
344 	(tsb_hash_debug ? &hat_lock[0] : \
345 	&hat_lock[PTR_HASH(sfmmup) & (SFMMU_NUM_LOCK-1)])
346 #else	/* DEBUG */
347 #define	TSB_HASH(sfmmup)	&hat_lock[PTR_HASH(sfmmup) & (SFMMU_NUM_LOCK-1)]
348 #endif	/* DEBUG */
349 
350 
351 /* sfmmu_replace_tsb() return codes. */
352 typedef enum tsb_replace_rc {
353 	TSB_SUCCESS,
354 	TSB_ALLOCFAIL,
355 	TSB_LOSTRACE,
356 	TSB_ALREADY_SWAPPED,
357 	TSB_CANTGROW
358 } tsb_replace_rc_t;
359 
360 /*
361  * Flags for TSB allocation routines.
362  */
363 #define	TSB_ALLOC	0x01
364 #define	TSB_FORCEALLOC	0x02
365 #define	TSB_GROW	0x04
366 #define	TSB_SHRINK	0x08
367 #define	TSB_SWAPIN	0x10
368 
369 /*
370  * Support for HAT callbacks.
371  */
372 #define	SFMMU_MAX_RELOC_CALLBACKS	10
373 int sfmmu_max_cb_id = SFMMU_MAX_RELOC_CALLBACKS;
374 static id_t sfmmu_cb_nextid = 0;
375 static id_t sfmmu_tsb_cb_id;
376 struct sfmmu_callback *sfmmu_cb_table;
377 
378 kmutex_t	kpr_mutex;
379 kmutex_t	kpr_suspendlock;
380 kthread_t	*kreloc_thread;
381 
382 /*
383  * Enable VA->PA translation sanity checking on DEBUG kernels.
384  * Disabled by default.  This is incompatible with some
385  * drivers (error injector, RSM) so if it breaks you get
386  * to keep both pieces.
387  */
388 int hat_check_vtop = 0;
389 
390 /*
391  * Private sfmmu routines (prototypes)
392  */
393 static struct hme_blk *sfmmu_shadow_hcreate(sfmmu_t *, caddr_t, int, uint_t);
394 static struct 	hme_blk *sfmmu_hblk_alloc(sfmmu_t *, caddr_t,
395 			struct hmehash_bucket *, uint_t, hmeblk_tag, uint_t,
396 			uint_t);
397 static caddr_t	sfmmu_hblk_unload(struct hat *, struct hme_blk *, caddr_t,
398 			caddr_t, demap_range_t *, uint_t);
399 static caddr_t	sfmmu_hblk_sync(struct hat *, struct hme_blk *, caddr_t,
400 			caddr_t, int);
401 static void	sfmmu_hblk_free(struct hme_blk **);
402 static void	sfmmu_hblks_list_purge(struct hme_blk **, int);
403 static uint_t	sfmmu_get_free_hblk(struct hme_blk **, uint_t);
404 static uint_t	sfmmu_put_free_hblk(struct hme_blk *, uint_t);
405 static struct hme_blk *sfmmu_hblk_steal(int);
406 static int	sfmmu_steal_this_hblk(struct hmehash_bucket *,
407 			struct hme_blk *, uint64_t, struct hme_blk *);
408 static caddr_t	sfmmu_hblk_unlock(struct hme_blk *, caddr_t, caddr_t);
409 
410 static void	hat_do_memload_array(struct hat *, caddr_t, size_t,
411 		    struct page **, uint_t, uint_t, uint_t);
412 static void	hat_do_memload(struct hat *, caddr_t, struct page *,
413 		    uint_t, uint_t, uint_t);
414 static void	sfmmu_memload_batchsmall(struct hat *, caddr_t, page_t **,
415 		    uint_t, uint_t, pgcnt_t, uint_t);
416 void		sfmmu_tteload(struct hat *, tte_t *, caddr_t, page_t *,
417 			uint_t);
418 static int	sfmmu_tteload_array(sfmmu_t *, tte_t *, caddr_t, page_t **,
419 			uint_t, uint_t);
420 static struct hmehash_bucket *sfmmu_tteload_acquire_hashbucket(sfmmu_t *,
421 					caddr_t, int, uint_t);
422 static struct hme_blk *sfmmu_tteload_find_hmeblk(sfmmu_t *,
423 			struct hmehash_bucket *, caddr_t, uint_t, uint_t,
424 			uint_t);
425 static int	sfmmu_tteload_addentry(sfmmu_t *, struct hme_blk *, tte_t *,
426 			caddr_t, page_t **, uint_t, uint_t);
427 static void	sfmmu_tteload_release_hashbucket(struct hmehash_bucket *);
428 
429 static int	sfmmu_pagearray_setup(caddr_t, page_t **, tte_t *, int);
430 static pfn_t	sfmmu_uvatopfn(caddr_t, sfmmu_t *, tte_t *);
431 void		sfmmu_memtte(tte_t *, pfn_t, uint_t, int);
432 #ifdef VAC
433 static void	sfmmu_vac_conflict(struct hat *, caddr_t, page_t *);
434 static int	sfmmu_vacconflict_array(caddr_t, page_t *, int *);
435 int	tst_tnc(page_t *pp, pgcnt_t);
436 void	conv_tnc(page_t *pp, int);
437 #endif
438 
439 static void	sfmmu_get_ctx(sfmmu_t *);
440 static void	sfmmu_free_sfmmu(sfmmu_t *);
441 
442 static void	sfmmu_ttesync(struct hat *, caddr_t, tte_t *, page_t *);
443 static void	sfmmu_chgattr(struct hat *, caddr_t, size_t, uint_t, int);
444 
445 cpuset_t	sfmmu_pageunload(page_t *, struct sf_hment *, int);
446 static void	hat_pagereload(struct page *, struct page *);
447 static cpuset_t	sfmmu_pagesync(page_t *, struct sf_hment *, uint_t);
448 #ifdef VAC
449 void	sfmmu_page_cache_array(page_t *, int, int, pgcnt_t);
450 static void	sfmmu_page_cache(page_t *, int, int, int);
451 #endif
452 
453 cpuset_t	sfmmu_rgntlb_demap(caddr_t, sf_region_t *,
454     struct hme_blk *, int);
455 static void	sfmmu_tlbcache_demap(caddr_t, sfmmu_t *, struct hme_blk *,
456 			pfn_t, int, int, int, int);
457 static void	sfmmu_ismtlbcache_demap(caddr_t, sfmmu_t *, struct hme_blk *,
458 			pfn_t, int);
459 static void	sfmmu_tlb_demap(caddr_t, sfmmu_t *, struct hme_blk *, int, int);
460 static void	sfmmu_tlb_range_demap(demap_range_t *);
461 static void	sfmmu_invalidate_ctx(sfmmu_t *);
462 static void	sfmmu_sync_mmustate(sfmmu_t *);
463 
464 static void 	sfmmu_tsbinfo_setup_phys(struct tsb_info *, pfn_t);
465 static int	sfmmu_tsbinfo_alloc(struct tsb_info **, int, int, uint_t,
466 			sfmmu_t *);
467 static void	sfmmu_tsb_free(struct tsb_info *);
468 static void	sfmmu_tsbinfo_free(struct tsb_info *);
469 static int	sfmmu_init_tsbinfo(struct tsb_info *, int, int, uint_t,
470 			sfmmu_t *);
471 static void	sfmmu_tsb_chk_reloc(sfmmu_t *, hatlock_t *);
472 static void	sfmmu_tsb_swapin(sfmmu_t *, hatlock_t *);
473 static int	sfmmu_select_tsb_szc(pgcnt_t);
474 static void	sfmmu_mod_tsb(sfmmu_t *, caddr_t, tte_t *, int);
475 #define		sfmmu_load_tsb(sfmmup, vaddr, tte, szc) \
476 	sfmmu_mod_tsb(sfmmup, vaddr, tte, szc)
477 #define		sfmmu_unload_tsb(sfmmup, vaddr, szc)    \
478 	sfmmu_mod_tsb(sfmmup, vaddr, NULL, szc)
479 static void	sfmmu_copy_tsb(struct tsb_info *, struct tsb_info *);
480 static tsb_replace_rc_t sfmmu_replace_tsb(sfmmu_t *, struct tsb_info *, uint_t,
481     hatlock_t *, uint_t);
482 static void	sfmmu_size_tsb(sfmmu_t *, int, uint64_t, uint64_t, int);
483 
484 #ifdef VAC
485 void	sfmmu_cache_flush(pfn_t, int);
486 void	sfmmu_cache_flushcolor(int, pfn_t);
487 #endif
488 static caddr_t	sfmmu_hblk_chgattr(sfmmu_t *, struct hme_blk *, caddr_t,
489 			caddr_t, demap_range_t *, uint_t, int);
490 
491 static uint64_t	sfmmu_vtop_attr(uint_t, int mode, tte_t *);
492 static uint_t	sfmmu_ptov_attr(tte_t *);
493 static caddr_t	sfmmu_hblk_chgprot(sfmmu_t *, struct hme_blk *, caddr_t,
494 			caddr_t, demap_range_t *, uint_t);
495 static uint_t	sfmmu_vtop_prot(uint_t, uint_t *);
496 static int	sfmmu_idcache_constructor(void *, void *, int);
497 static void	sfmmu_idcache_destructor(void *, void *);
498 static int	sfmmu_hblkcache_constructor(void *, void *, int);
499 static void	sfmmu_hblkcache_destructor(void *, void *);
500 static void	sfmmu_hblkcache_reclaim(void *);
501 static void	sfmmu_shadow_hcleanup(sfmmu_t *, struct hme_blk *,
502 			struct hmehash_bucket *);
503 static void	sfmmu_hblk_hash_rm(struct hmehash_bucket *, struct hme_blk *,
504 			struct hme_blk *, struct hme_blk **, int);
505 static void	sfmmu_hblk_hash_add(struct hmehash_bucket *, struct hme_blk *,
506 			uint64_t);
507 static struct hme_blk *sfmmu_check_pending_hblks(int);
508 static void	sfmmu_free_hblks(sfmmu_t *, caddr_t, caddr_t, int);
509 static void	sfmmu_cleanup_rhblk(sf_srd_t *, caddr_t, uint_t, int);
510 static void	sfmmu_unload_hmeregion_va(sf_srd_t *, uint_t, caddr_t, caddr_t,
511 			int, caddr_t *);
512 static void	sfmmu_unload_hmeregion(sf_srd_t *, sf_region_t *);
513 
514 static void	sfmmu_rm_large_mappings(page_t *, int);
515 
516 static void	hat_lock_init(void);
517 static void	hat_kstat_init(void);
518 static int	sfmmu_kstat_percpu_update(kstat_t *ksp, int rw);
519 static void	sfmmu_set_scd_rttecnt(sf_srd_t *, sf_scd_t *);
520 static	int	sfmmu_is_rgnva(sf_srd_t *, caddr_t, ulong_t, ulong_t);
521 static void	sfmmu_check_page_sizes(sfmmu_t *, int);
522 int	fnd_mapping_sz(page_t *);
523 static void	iment_add(struct ism_ment *,  struct hat *);
524 static void	iment_sub(struct ism_ment *, struct hat *);
525 static pgcnt_t	ism_tsb_entries(sfmmu_t *, int szc);
526 extern void	sfmmu_setup_tsbinfo(sfmmu_t *);
527 extern void	sfmmu_clear_utsbinfo(void);
528 
529 static void		sfmmu_ctx_wrap_around(mmu_ctx_t *, boolean_t);
530 
531 extern int vpm_enable;
532 
533 /* kpm globals */
534 #ifdef	DEBUG
535 /*
536  * Enable trap level tsbmiss handling
537  */
538 int	kpm_tsbmtl = 1;
539 
540 /*
541  * Flush the TLB on kpm mapout. Note: Xcalls are used (again) for the
542  * required TLB shootdowns in this case, so handle w/ care. Off by default.
543  */
544 int	kpm_tlb_flush;
545 #endif	/* DEBUG */
546 
547 static void	*sfmmu_vmem_xalloc_aligned_wrapper(vmem_t *, size_t, int);
548 
549 #ifdef DEBUG
550 static void	sfmmu_check_hblk_flist();
551 #endif
552 
553 /*
554  * Semi-private sfmmu data structures.  Some of them are initialize in
555  * startup or in hat_init. Some of them are private but accessed by
556  * assembly code or mach_sfmmu.c
557  */
558 struct hmehash_bucket *uhme_hash;	/* user hmeblk hash table */
559 struct hmehash_bucket *khme_hash;	/* kernel hmeblk hash table */
560 uint64_t	uhme_hash_pa;		/* PA of uhme_hash */
561 uint64_t	khme_hash_pa;		/* PA of khme_hash */
562 int 		uhmehash_num;		/* # of buckets in user hash table */
563 int 		khmehash_num;		/* # of buckets in kernel hash table */
564 
565 uint_t		max_mmu_ctxdoms = 0;	/* max context domains in the system */
566 mmu_ctx_t	**mmu_ctxs_tbl;		/* global array of context domains */
567 uint64_t	mmu_saved_gnum = 0;	/* to init incoming MMUs' gnums */
568 
569 #define	DEFAULT_NUM_CTXS_PER_MMU 8192
570 static uint_t	nctxs = DEFAULT_NUM_CTXS_PER_MMU;
571 
572 int		cache;			/* describes system cache */
573 
574 caddr_t		ktsb_base;		/* kernel 8k-indexed tsb base address */
575 uint64_t	ktsb_pbase;		/* kernel 8k-indexed tsb phys address */
576 int		ktsb_szcode;		/* kernel 8k-indexed tsb size code */
577 int		ktsb_sz;		/* kernel 8k-indexed tsb size */
578 
579 caddr_t		ktsb4m_base;		/* kernel 4m-indexed tsb base address */
580 uint64_t	ktsb4m_pbase;		/* kernel 4m-indexed tsb phys address */
581 int		ktsb4m_szcode;		/* kernel 4m-indexed tsb size code */
582 int		ktsb4m_sz;		/* kernel 4m-indexed tsb size */
583 
584 uint64_t	kpm_tsbbase;		/* kernel seg_kpm 4M TSB base address */
585 int		kpm_tsbsz;		/* kernel seg_kpm 4M TSB size code */
586 uint64_t	kpmsm_tsbbase;		/* kernel seg_kpm 8K TSB base address */
587 int		kpmsm_tsbsz;		/* kernel seg_kpm 8K TSB size code */
588 
589 #ifndef sun4v
590 int		utsb_dtlb_ttenum = -1;	/* index in TLB for utsb locked TTE */
591 int		utsb4m_dtlb_ttenum = -1; /* index in TLB for 4M TSB TTE */
592 int		dtlb_resv_ttenum;	/* index in TLB of first reserved TTE */
593 caddr_t		utsb_vabase;		/* reserved kernel virtual memory */
594 caddr_t		utsb4m_vabase;		/* for trap handler TSB accesses */
595 #endif /* sun4v */
596 uint64_t	tsb_alloc_bytes = 0;	/* bytes allocated to TSBs */
597 vmem_t		*kmem_tsb_default_arena[NLGRPS_MAX];	/* For dynamic TSBs */
598 vmem_t		*kmem_bigtsb_default_arena[NLGRPS_MAX]; /* dynamic 256M TSBs */
599 
600 /*
601  * Size to use for TSB slabs.  Future platforms that support page sizes
602  * larger than 4M may wish to change these values, and provide their own
603  * assembly macros for building and decoding the TSB base register contents.
604  * Note disable_large_pages will override the value set here.
605  */
606 static	uint_t tsb_slab_ttesz = TTE4M;
607 size_t	tsb_slab_size = MMU_PAGESIZE4M;
608 uint_t	tsb_slab_shift = MMU_PAGESHIFT4M;
609 /* PFN mask for TTE */
610 size_t	tsb_slab_mask = MMU_PAGEOFFSET4M >> MMU_PAGESHIFT;
611 
612 /*
613  * Size to use for TSB slabs.  These are used only when 256M tsb arenas
614  * exist.
615  */
616 static uint_t	bigtsb_slab_ttesz = TTE256M;
617 static size_t	bigtsb_slab_size = MMU_PAGESIZE256M;
618 static uint_t	bigtsb_slab_shift = MMU_PAGESHIFT256M;
619 /* 256M page alignment for 8K pfn */
620 static size_t	bigtsb_slab_mask = MMU_PAGEOFFSET256M >> MMU_PAGESHIFT;
621 
622 /* largest TSB size to grow to, will be smaller on smaller memory systems */
623 static int	tsb_max_growsize = 0;
624 
625 /*
626  * Tunable parameters dealing with TSB policies.
627  */
628 
629 /*
630  * This undocumented tunable forces all 8K TSBs to be allocated from
631  * the kernel heap rather than from the kmem_tsb_default_arena arenas.
632  */
633 #ifdef	DEBUG
634 int	tsb_forceheap = 0;
635 #endif	/* DEBUG */
636 
637 /*
638  * Decide whether to use per-lgroup arenas, or one global set of
639  * TSB arenas.  The default is not to break up per-lgroup, since
640  * most platforms don't recognize any tangible benefit from it.
641  */
642 int	tsb_lgrp_affinity = 0;
643 
644 /*
645  * Used for growing the TSB based on the process RSS.
646  * tsb_rss_factor is based on the smallest TSB, and is
647  * shifted by the TSB size to determine if we need to grow.
648  * The default will grow the TSB if the number of TTEs for
649  * this page size exceeds 75% of the number of TSB entries,
650  * which should _almost_ eliminate all conflict misses
651  * (at the expense of using up lots and lots of memory).
652  */
653 #define	TSB_RSS_FACTOR		(TSB_ENTRIES(TSB_MIN_SZCODE) * 0.75)
654 #define	SFMMU_RSS_TSBSIZE(tsbszc)	(tsb_rss_factor << tsbszc)
655 #define	SELECT_TSB_SIZECODE(pgcnt) ( \
656 	(enable_tsb_rss_sizing)? sfmmu_select_tsb_szc(pgcnt) : \
657 	default_tsb_size)
658 #define	TSB_OK_SHRINK()	\
659 	(tsb_alloc_bytes > tsb_alloc_hiwater || freemem < desfree)
660 #define	TSB_OK_GROW()	\
661 	(tsb_alloc_bytes < tsb_alloc_hiwater && freemem > desfree)
662 
663 int	enable_tsb_rss_sizing = 1;
664 int	tsb_rss_factor	= (int)TSB_RSS_FACTOR;
665 
666 /* which TSB size code to use for new address spaces or if rss sizing off */
667 int default_tsb_size = TSB_8K_SZCODE;
668 
669 static uint64_t tsb_alloc_hiwater; /* limit TSB reserved memory */
670 uint64_t tsb_alloc_hiwater_factor; /* tsb_alloc_hiwater = physmem / this */
671 #define	TSB_ALLOC_HIWATER_FACTOR_DEFAULT	32
672 
673 #ifdef DEBUG
674 static int tsb_random_size = 0;	/* set to 1 to test random tsb sizes on alloc */
675 static int tsb_grow_stress = 0;	/* if set to 1, keep replacing TSB w/ random */
676 static int tsb_alloc_mtbf = 0;	/* fail allocation every n attempts */
677 static int tsb_alloc_fail_mtbf = 0;
678 static int tsb_alloc_count = 0;
679 #endif /* DEBUG */
680 
681 /* if set to 1, will remap valid TTEs when growing TSB. */
682 int tsb_remap_ttes = 1;
683 
684 /*
685  * If we have more than this many mappings, allocate a second TSB.
686  * This default is chosen because the I/D fully associative TLBs are
687  * assumed to have at least 8 available entries. Platforms with a
688  * larger fully-associative TLB could probably override the default.
689  */
690 
691 #ifdef sun4v
692 int tsb_sectsb_threshold = 0;
693 #else
694 int tsb_sectsb_threshold = 8;
695 #endif
696 
697 /*
698  * kstat data
699  */
700 struct sfmmu_global_stat sfmmu_global_stat;
701 struct sfmmu_tsbsize_stat sfmmu_tsbsize_stat;
702 
703 /*
704  * Global data
705  */
706 sfmmu_t 	*ksfmmup;		/* kernel's hat id */
707 
708 #ifdef DEBUG
709 static void	chk_tte(tte_t *, tte_t *, tte_t *, struct hme_blk *);
710 #endif
711 
712 /* sfmmu locking operations */
713 static kmutex_t *sfmmu_mlspl_enter(struct page *, int);
714 static int	sfmmu_mlspl_held(struct page *, int);
715 
716 kmutex_t *sfmmu_page_enter(page_t *);
717 void	sfmmu_page_exit(kmutex_t *);
718 int	sfmmu_page_spl_held(struct page *);
719 
720 /* sfmmu internal locking operations - accessed directly */
721 static void	sfmmu_mlist_reloc_enter(page_t *, page_t *,
722 				kmutex_t **, kmutex_t **);
723 static void	sfmmu_mlist_reloc_exit(kmutex_t *, kmutex_t *);
724 static hatlock_t *
725 		sfmmu_hat_enter(sfmmu_t *);
726 static hatlock_t *
727 		sfmmu_hat_tryenter(sfmmu_t *);
728 static void	sfmmu_hat_exit(hatlock_t *);
729 static void	sfmmu_hat_lock_all(void);
730 static void	sfmmu_hat_unlock_all(void);
731 static void	sfmmu_ismhat_enter(sfmmu_t *, int);
732 static void	sfmmu_ismhat_exit(sfmmu_t *, int);
733 
734 kpm_hlk_t	*kpmp_table;
735 uint_t		kpmp_table_sz;	/* must be a power of 2 */
736 uchar_t		kpmp_shift;
737 
738 kpm_shlk_t	*kpmp_stable;
739 uint_t		kpmp_stable_sz;	/* must be a power of 2 */
740 
741 /*
742  * SPL_TABLE_SIZE is 2 * NCPU, but no smaller than 128.
743  * SPL_SHIFT is log2(SPL_TABLE_SIZE).
744  */
745 #if ((2*NCPU_P2) > 128)
746 #define	SPL_SHIFT	((unsigned)(NCPU_LOG2 + 1))
747 #else
748 #define	SPL_SHIFT	7U
749 #endif
750 #define	SPL_TABLE_SIZE	(1U << SPL_SHIFT)
751 #define	SPL_MASK	(SPL_TABLE_SIZE - 1)
752 
753 /*
754  * We shift by PP_SHIFT to take care of the low-order 0 bits of a page_t
755  * and by multiples of SPL_SHIFT to get as many varied bits as we can.
756  */
757 #define	SPL_INDEX(pp) \
758 	((((uintptr_t)(pp) >> PP_SHIFT) ^ \
759 	((uintptr_t)(pp) >> (PP_SHIFT + SPL_SHIFT)) ^ \
760 	((uintptr_t)(pp) >> (PP_SHIFT + SPL_SHIFT * 2)) ^ \
761 	((uintptr_t)(pp) >> (PP_SHIFT + SPL_SHIFT * 3))) & \
762 	SPL_MASK)
763 
764 #define	SPL_HASH(pp)    \
765 	(&sfmmu_page_lock[SPL_INDEX(pp)].pad_mutex)
766 
767 static	pad_mutex_t	sfmmu_page_lock[SPL_TABLE_SIZE];
768 
769 /* Array of mutexes protecting a page's mapping list and p_nrm field. */
770 
771 #define	MML_TABLE_SIZE	SPL_TABLE_SIZE
772 #define	MLIST_HASH(pp)	(&mml_table[SPL_INDEX(pp)].pad_mutex)
773 
774 static pad_mutex_t	mml_table[MML_TABLE_SIZE];
775 
776 /*
777  * hat_unload_callback() will group together callbacks in order
778  * to avoid xt_sync() calls.  This is the maximum size of the group.
779  */
780 #define	MAX_CB_ADDR	32
781 
782 tte_t	hw_tte;
783 static ulong_t sfmmu_dmr_maxbit = DMR_MAXBIT;
784 
785 static char	*mmu_ctx_kstat_names[] = {
786 	"mmu_ctx_tsb_exceptions",
787 	"mmu_ctx_tsb_raise_exception",
788 	"mmu_ctx_wrap_around",
789 };
790 
791 /*
792  * Wrapper for vmem_xalloc since vmem_create only allows limited
793  * parameters for vm_source_alloc functions.  This function allows us
794  * to specify alignment consistent with the size of the object being
795  * allocated.
796  */
797 static void *
798 sfmmu_vmem_xalloc_aligned_wrapper(vmem_t *vmp, size_t size, int vmflag)
799 {
800 	return (vmem_xalloc(vmp, size, size, 0, 0, NULL, NULL, vmflag));
801 }
802 
803 /* Common code for setting tsb_alloc_hiwater. */
804 #define	SFMMU_SET_TSB_ALLOC_HIWATER(pages)	tsb_alloc_hiwater = \
805 		ptob(pages) / tsb_alloc_hiwater_factor
806 
807 /*
808  * Set tsb_max_growsize to allow at most all of physical memory to be mapped by
809  * a single TSB.  physmem is the number of physical pages so we need physmem 8K
810  * TTEs to represent all those physical pages.  We round this up by using
811  * 1<<highbit().  To figure out which size code to use, remember that the size
812  * code is just an amount to shift the smallest TSB size to get the size of
813  * this TSB.  So we subtract that size, TSB_START_SIZE, from highbit() (or
814  * highbit() - 1) to get the size code for the smallest TSB that can represent
815  * all of physical memory, while erring on the side of too much.
816  *
817  * Restrict tsb_max_growsize to make sure that:
818  *	1) TSBs can't grow larger than the TSB slab size
819  *	2) TSBs can't grow larger than UTSB_MAX_SZCODE.
820  */
821 #define	SFMMU_SET_TSB_MAX_GROWSIZE(pages) {				\
822 	int	_i, _szc, _slabszc, _tsbszc;				\
823 									\
824 	_i = highbit(pages);						\
825 	if ((1 << (_i - 1)) == (pages))					\
826 		_i--;		/* 2^n case, round down */              \
827 	_szc = _i - TSB_START_SIZE;					\
828 	_slabszc = bigtsb_slab_shift - (TSB_START_SIZE + TSB_ENTRY_SHIFT); \
829 	_tsbszc = MIN(_szc, _slabszc);                                  \
830 	tsb_max_growsize = MIN(_tsbszc, UTSB_MAX_SZCODE);               \
831 }
832 
833 /*
834  * Given a pointer to an sfmmu and a TTE size code, return a pointer to the
835  * tsb_info which handles that TTE size.
836  */
837 #define	SFMMU_GET_TSBINFO(tsbinfop, sfmmup, tte_szc) {			\
838 	(tsbinfop) = (sfmmup)->sfmmu_tsb;				\
839 	ASSERT(((tsbinfop)->tsb_flags & TSB_SHAREDCTX) ||		\
840 	    sfmmu_hat_lock_held(sfmmup));				\
841 	if ((tte_szc) >= TTE4M)	{					\
842 		ASSERT((tsbinfop) != NULL);				\
843 		(tsbinfop) = (tsbinfop)->tsb_next;			\
844 	}								\
845 }
846 
847 /*
848  * Macro to use to unload entries from the TSB.
849  * It has knowledge of which page sizes get replicated in the TSB
850  * and will call the appropriate unload routine for the appropriate size.
851  */
852 #define	SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp, ismhat)		\
853 {									\
854 	int ttesz = get_hblk_ttesz(hmeblkp);				\
855 	if (ttesz == TTE8K || ttesz == TTE4M) {				\
856 		sfmmu_unload_tsb(sfmmup, addr, ttesz);			\
857 	} else {							\
858 		caddr_t sva = ismhat ? addr : 				\
859 		    (caddr_t)get_hblk_base(hmeblkp);			\
860 		caddr_t eva = sva + get_hblk_span(hmeblkp);		\
861 		ASSERT(addr >= sva && addr < eva);			\
862 		sfmmu_unload_tsb_range(sfmmup, sva, eva, ttesz);	\
863 	}								\
864 }
865 
866 
867 /* Update tsb_alloc_hiwater after memory is configured. */
868 /*ARGSUSED*/
869 static void
870 sfmmu_update_post_add(void *arg, pgcnt_t delta_pages)
871 {
872 	/* Assumes physmem has already been updated. */
873 	SFMMU_SET_TSB_ALLOC_HIWATER(physmem);
874 	SFMMU_SET_TSB_MAX_GROWSIZE(physmem);
875 }
876 
877 /*
878  * Update tsb_alloc_hiwater before memory is deleted.  We'll do nothing here
879  * and update tsb_alloc_hiwater and tsb_max_growsize after the memory is
880  * deleted.
881  */
882 /*ARGSUSED*/
883 static int
884 sfmmu_update_pre_del(void *arg, pgcnt_t delta_pages)
885 {
886 	return (0);
887 }
888 
889 /* Update tsb_alloc_hiwater after memory fails to be unconfigured. */
890 /*ARGSUSED*/
891 static void
892 sfmmu_update_post_del(void *arg, pgcnt_t delta_pages, int cancelled)
893 {
894 	/*
895 	 * Whether the delete was cancelled or not, just go ahead and update
896 	 * tsb_alloc_hiwater and tsb_max_growsize.
897 	 */
898 	SFMMU_SET_TSB_ALLOC_HIWATER(physmem);
899 	SFMMU_SET_TSB_MAX_GROWSIZE(physmem);
900 }
901 
902 static kphysm_setup_vector_t sfmmu_update_vec = {
903 	KPHYSM_SETUP_VECTOR_VERSION,	/* version */
904 	sfmmu_update_post_add,		/* post_add */
905 	sfmmu_update_pre_del,		/* pre_del */
906 	sfmmu_update_post_del		/* post_del */
907 };
908 
909 
910 /*
911  * HME_BLK HASH PRIMITIVES
912  */
913 
914 /*
915  * Enter a hme on the mapping list for page pp.
916  * When large pages are more prevalent in the system we might want to
917  * keep the mapping list in ascending order by the hment size. For now,
918  * small pages are more frequent, so don't slow it down.
919  */
920 #define	HME_ADD(hme, pp)					\
921 {								\
922 	ASSERT(sfmmu_mlist_held(pp));				\
923 								\
924 	hme->hme_prev = NULL;					\
925 	hme->hme_next = pp->p_mapping;				\
926 	hme->hme_page = pp;					\
927 	if (pp->p_mapping) {					\
928 		((struct sf_hment *)(pp->p_mapping))->hme_prev = hme;\
929 		ASSERT(pp->p_share > 0);			\
930 	} else  {						\
931 		/* EMPTY */					\
932 		ASSERT(pp->p_share == 0);			\
933 	}							\
934 	pp->p_mapping = hme;					\
935 	pp->p_share++;						\
936 }
937 
938 /*
939  * Enter a hme on the mapping list for page pp.
940  * If we are unmapping a large translation, we need to make sure that the
941  * change is reflect in the corresponding bit of the p_index field.
942  */
943 #define	HME_SUB(hme, pp)					\
944 {								\
945 	ASSERT(sfmmu_mlist_held(pp));				\
946 	ASSERT(hme->hme_page == pp || IS_PAHME(hme));		\
947 								\
948 	if (pp->p_mapping == NULL) {				\
949 		panic("hme_remove - no mappings");		\
950 	}							\
951 								\
952 	membar_stst();	/* ensure previous stores finish */	\
953 								\
954 	ASSERT(pp->p_share > 0);				\
955 	pp->p_share--;						\
956 								\
957 	if (hme->hme_prev) {					\
958 		ASSERT(pp->p_mapping != hme);			\
959 		ASSERT(hme->hme_prev->hme_page == pp ||		\
960 			IS_PAHME(hme->hme_prev));		\
961 		hme->hme_prev->hme_next = hme->hme_next;	\
962 	} else {						\
963 		ASSERT(pp->p_mapping == hme);			\
964 		pp->p_mapping = hme->hme_next;			\
965 		ASSERT((pp->p_mapping == NULL) ?		\
966 			(pp->p_share == 0) : 1);		\
967 	}							\
968 								\
969 	if (hme->hme_next) {					\
970 		ASSERT(hme->hme_next->hme_page == pp ||		\
971 			IS_PAHME(hme->hme_next));		\
972 		hme->hme_next->hme_prev = hme->hme_prev;	\
973 	}							\
974 								\
975 	/* zero out the entry */				\
976 	hme->hme_next = NULL;					\
977 	hme->hme_prev = NULL;					\
978 	hme->hme_page = NULL;					\
979 								\
980 	if (hme_size(hme) > TTE8K) {				\
981 		/* remove mappings for remainder of large pg */	\
982 		sfmmu_rm_large_mappings(pp, hme_size(hme));	\
983 	}							\
984 }
985 
986 /*
987  * This function returns the hment given the hme_blk and a vaddr.
988  * It assumes addr has already been checked to belong to hme_blk's
989  * range.
990  */
991 #define	HBLKTOHME(hment, hmeblkp, addr)					\
992 {									\
993 	int index;							\
994 	HBLKTOHME_IDX(hment, hmeblkp, addr, index)			\
995 }
996 
997 /*
998  * Version of HBLKTOHME that also returns the index in hmeblkp
999  * of the hment.
1000  */
1001 #define	HBLKTOHME_IDX(hment, hmeblkp, addr, idx)			\
1002 {									\
1003 	ASSERT(in_hblk_range((hmeblkp), (addr)));			\
1004 									\
1005 	if (get_hblk_ttesz(hmeblkp) == TTE8K) {				\
1006 		idx = (((uintptr_t)(addr) >> MMU_PAGESHIFT) & (NHMENTS-1)); \
1007 	} else								\
1008 		idx = 0;						\
1009 									\
1010 	(hment) = &(hmeblkp)->hblk_hme[idx];				\
1011 }
1012 
1013 /*
1014  * Disable any page sizes not supported by the CPU
1015  */
1016 void
1017 hat_init_pagesizes()
1018 {
1019 	int 		i;
1020 
1021 	mmu_exported_page_sizes = 0;
1022 	for (i = TTE8K; i < max_mmu_page_sizes; i++) {
1023 
1024 		szc_2_userszc[i] = (uint_t)-1;
1025 		userszc_2_szc[i] = (uint_t)-1;
1026 
1027 		if ((mmu_exported_pagesize_mask & (1 << i)) == 0) {
1028 			disable_large_pages |= (1 << i);
1029 		} else {
1030 			szc_2_userszc[i] = mmu_exported_page_sizes;
1031 			userszc_2_szc[mmu_exported_page_sizes] = i;
1032 			mmu_exported_page_sizes++;
1033 		}
1034 	}
1035 
1036 	disable_ism_large_pages |= disable_large_pages;
1037 	disable_auto_data_large_pages = disable_large_pages;
1038 	disable_auto_text_large_pages = disable_large_pages;
1039 
1040 	/*
1041 	 * Initialize mmu-specific large page sizes.
1042 	 */
1043 	if (&mmu_large_pages_disabled) {
1044 		disable_large_pages |= mmu_large_pages_disabled(HAT_LOAD);
1045 		disable_ism_large_pages |=
1046 		    mmu_large_pages_disabled(HAT_LOAD_SHARE);
1047 		disable_auto_data_large_pages |=
1048 		    mmu_large_pages_disabled(HAT_AUTO_DATA);
1049 		disable_auto_text_large_pages |=
1050 		    mmu_large_pages_disabled(HAT_AUTO_TEXT);
1051 	}
1052 }
1053 
1054 /*
1055  * Initialize the hardware address translation structures.
1056  */
1057 void
1058 hat_init(void)
1059 {
1060 	int 		i;
1061 	uint_t		sz;
1062 	size_t		size;
1063 
1064 	hat_lock_init();
1065 	hat_kstat_init();
1066 
1067 	/*
1068 	 * Hardware-only bits in a TTE
1069 	 */
1070 	MAKE_TTE_MASK(&hw_tte);
1071 
1072 	hat_init_pagesizes();
1073 
1074 	/* Initialize the hash locks */
1075 	for (i = 0; i < khmehash_num; i++) {
1076 		mutex_init(&khme_hash[i].hmehash_mutex, NULL,
1077 		    MUTEX_DEFAULT, NULL);
1078 		khme_hash[i].hmeh_nextpa = HMEBLK_ENDPA;
1079 	}
1080 	for (i = 0; i < uhmehash_num; i++) {
1081 		mutex_init(&uhme_hash[i].hmehash_mutex, NULL,
1082 		    MUTEX_DEFAULT, NULL);
1083 		uhme_hash[i].hmeh_nextpa = HMEBLK_ENDPA;
1084 	}
1085 	khmehash_num--;		/* make sure counter starts from 0 */
1086 	uhmehash_num--;		/* make sure counter starts from 0 */
1087 
1088 	/*
1089 	 * Allocate context domain structures.
1090 	 *
1091 	 * A platform may choose to modify max_mmu_ctxdoms in
1092 	 * set_platform_defaults(). If a platform does not define
1093 	 * a set_platform_defaults() or does not choose to modify
1094 	 * max_mmu_ctxdoms, it gets one MMU context domain for every CPU.
1095 	 *
1096 	 * For all platforms that have CPUs sharing MMUs, this
1097 	 * value must be defined.
1098 	 */
1099 	if (max_mmu_ctxdoms == 0)
1100 		max_mmu_ctxdoms = max_ncpus;
1101 
1102 	size = max_mmu_ctxdoms * sizeof (mmu_ctx_t *);
1103 	mmu_ctxs_tbl = kmem_zalloc(size, KM_SLEEP);
1104 
1105 	/* mmu_ctx_t is 64 bytes aligned */
1106 	mmuctxdom_cache = kmem_cache_create("mmuctxdom_cache",
1107 	    sizeof (mmu_ctx_t), 64, NULL, NULL, NULL, NULL, NULL, 0);
1108 	/*
1109 	 * MMU context domain initialization for the Boot CPU.
1110 	 * This needs the context domains array allocated above.
1111 	 */
1112 	mutex_enter(&cpu_lock);
1113 	sfmmu_cpu_init(CPU);
1114 	mutex_exit(&cpu_lock);
1115 
1116 	/*
1117 	 * Intialize ism mapping list lock.
1118 	 */
1119 
1120 	mutex_init(&ism_mlist_lock, NULL, MUTEX_DEFAULT, NULL);
1121 
1122 	/*
1123 	 * Each sfmmu structure carries an array of MMU context info
1124 	 * structures, one per context domain. The size of this array depends
1125 	 * on the maximum number of context domains. So, the size of the
1126 	 * sfmmu structure varies per platform.
1127 	 *
1128 	 * sfmmu is allocated from static arena, because trap
1129 	 * handler at TL > 0 is not allowed to touch kernel relocatable
1130 	 * memory. sfmmu's alignment is changed to 64 bytes from
1131 	 * default 8 bytes, as the lower 6 bits will be used to pass
1132 	 * pgcnt to vtag_flush_pgcnt_tl1.
1133 	 */
1134 	size = sizeof (sfmmu_t) + sizeof (sfmmu_ctx_t) * (max_mmu_ctxdoms - 1);
1135 
1136 	sfmmuid_cache = kmem_cache_create("sfmmuid_cache", size,
1137 	    64, sfmmu_idcache_constructor, sfmmu_idcache_destructor,
1138 	    NULL, NULL, static_arena, 0);
1139 
1140 	sfmmu_tsbinfo_cache = kmem_cache_create("sfmmu_tsbinfo_cache",
1141 	    sizeof (struct tsb_info), 0, NULL, NULL, NULL, NULL, NULL, 0);
1142 
1143 	/*
1144 	 * Since we only use the tsb8k cache to "borrow" pages for TSBs
1145 	 * from the heap when low on memory or when TSB_FORCEALLOC is
1146 	 * specified, don't use magazines to cache them--we want to return
1147 	 * them to the system as quickly as possible.
1148 	 */
1149 	sfmmu_tsb8k_cache = kmem_cache_create("sfmmu_tsb8k_cache",
1150 	    MMU_PAGESIZE, MMU_PAGESIZE, NULL, NULL, NULL, NULL,
1151 	    static_arena, KMC_NOMAGAZINE);
1152 
1153 	/*
1154 	 * Set tsb_alloc_hiwater to 1/tsb_alloc_hiwater_factor of physical
1155 	 * memory, which corresponds to the old static reserve for TSBs.
1156 	 * tsb_alloc_hiwater_factor defaults to 32.  This caps the amount of
1157 	 * memory we'll allocate for TSB slabs; beyond this point TSB
1158 	 * allocations will be taken from the kernel heap (via
1159 	 * sfmmu_tsb8k_cache) and will be throttled as would any other kmem
1160 	 * consumer.
1161 	 */
1162 	if (tsb_alloc_hiwater_factor == 0) {
1163 		tsb_alloc_hiwater_factor = TSB_ALLOC_HIWATER_FACTOR_DEFAULT;
1164 	}
1165 	SFMMU_SET_TSB_ALLOC_HIWATER(physmem);
1166 
1167 	for (sz = tsb_slab_ttesz; sz > 0; sz--) {
1168 		if (!(disable_large_pages & (1 << sz)))
1169 			break;
1170 	}
1171 
1172 	if (sz < tsb_slab_ttesz) {
1173 		tsb_slab_ttesz = sz;
1174 		tsb_slab_shift = MMU_PAGESHIFT + (sz << 1) + sz;
1175 		tsb_slab_size = 1 << tsb_slab_shift;
1176 		tsb_slab_mask = (1 << (tsb_slab_shift - MMU_PAGESHIFT)) - 1;
1177 		use_bigtsb_arena = 0;
1178 	} else if (use_bigtsb_arena &&
1179 	    (disable_large_pages & (1 << bigtsb_slab_ttesz))) {
1180 		use_bigtsb_arena = 0;
1181 	}
1182 
1183 	if (!use_bigtsb_arena) {
1184 		bigtsb_slab_shift = tsb_slab_shift;
1185 	}
1186 	SFMMU_SET_TSB_MAX_GROWSIZE(physmem);
1187 
1188 	/*
1189 	 * On smaller memory systems, allocate TSB memory in smaller chunks
1190 	 * than the default 4M slab size. We also honor disable_large_pages
1191 	 * here.
1192 	 *
1193 	 * The trap handlers need to be patched with the final slab shift,
1194 	 * since they need to be able to construct the TSB pointer at runtime.
1195 	 */
1196 	if ((tsb_max_growsize <= TSB_512K_SZCODE) &&
1197 	    !(disable_large_pages & (1 << TTE512K))) {
1198 		tsb_slab_ttesz = TTE512K;
1199 		tsb_slab_shift = MMU_PAGESHIFT512K;
1200 		tsb_slab_size = MMU_PAGESIZE512K;
1201 		tsb_slab_mask = MMU_PAGEOFFSET512K >> MMU_PAGESHIFT;
1202 		use_bigtsb_arena = 0;
1203 	}
1204 
1205 	if (!use_bigtsb_arena) {
1206 		bigtsb_slab_ttesz = tsb_slab_ttesz;
1207 		bigtsb_slab_shift = tsb_slab_shift;
1208 		bigtsb_slab_size = tsb_slab_size;
1209 		bigtsb_slab_mask = tsb_slab_mask;
1210 	}
1211 
1212 
1213 	/*
1214 	 * Set up memory callback to update tsb_alloc_hiwater and
1215 	 * tsb_max_growsize.
1216 	 */
1217 	i = kphysm_setup_func_register(&sfmmu_update_vec, (void *) 0);
1218 	ASSERT(i == 0);
1219 
1220 	/*
1221 	 * kmem_tsb_arena is the source from which large TSB slabs are
1222 	 * drawn.  The quantum of this arena corresponds to the largest
1223 	 * TSB size we can dynamically allocate for user processes.
1224 	 * Currently it must also be a supported page size since we
1225 	 * use exactly one translation entry to map each slab page.
1226 	 *
1227 	 * The per-lgroup kmem_tsb_default_arena arenas are the arenas from
1228 	 * which most TSBs are allocated.  Since most TSB allocations are
1229 	 * typically 8K we have a kmem cache we stack on top of each
1230 	 * kmem_tsb_default_arena to speed up those allocations.
1231 	 *
1232 	 * Note the two-level scheme of arenas is required only
1233 	 * because vmem_create doesn't allow us to specify alignment
1234 	 * requirements.  If this ever changes the code could be
1235 	 * simplified to use only one level of arenas.
1236 	 *
1237 	 * If 256M page support exists on sun4v, 256MB kmem_bigtsb_arena
1238 	 * will be provided in addition to the 4M kmem_tsb_arena.
1239 	 */
1240 	if (use_bigtsb_arena) {
1241 		kmem_bigtsb_arena = vmem_create("kmem_bigtsb", NULL, 0,
1242 		    bigtsb_slab_size, sfmmu_vmem_xalloc_aligned_wrapper,
1243 		    vmem_xfree, heap_arena, 0, VM_SLEEP);
1244 	}
1245 
1246 	kmem_tsb_arena = vmem_create("kmem_tsb", NULL, 0, tsb_slab_size,
1247 	    sfmmu_vmem_xalloc_aligned_wrapper,
1248 	    vmem_xfree, heap_arena, 0, VM_SLEEP);
1249 
1250 	if (tsb_lgrp_affinity) {
1251 		char s[50];
1252 		for (i = 0; i < NLGRPS_MAX; i++) {
1253 			if (use_bigtsb_arena) {
1254 				(void) sprintf(s, "kmem_bigtsb_lgrp%d", i);
1255 				kmem_bigtsb_default_arena[i] = vmem_create(s,
1256 				    NULL, 0, 2 * tsb_slab_size,
1257 				    sfmmu_tsb_segkmem_alloc,
1258 				    sfmmu_tsb_segkmem_free, kmem_bigtsb_arena,
1259 				    0, VM_SLEEP | VM_BESTFIT);
1260 			}
1261 
1262 			(void) sprintf(s, "kmem_tsb_lgrp%d", i);
1263 			kmem_tsb_default_arena[i] = vmem_create(s,
1264 			    NULL, 0, PAGESIZE, sfmmu_tsb_segkmem_alloc,
1265 			    sfmmu_tsb_segkmem_free, kmem_tsb_arena, 0,
1266 			    VM_SLEEP | VM_BESTFIT);
1267 
1268 			(void) sprintf(s, "sfmmu_tsb_lgrp%d_cache", i);
1269 			sfmmu_tsb_cache[i] = kmem_cache_create(s,
1270 			    PAGESIZE, PAGESIZE, NULL, NULL, NULL, NULL,
1271 			    kmem_tsb_default_arena[i], 0);
1272 		}
1273 	} else {
1274 		if (use_bigtsb_arena) {
1275 			kmem_bigtsb_default_arena[0] =
1276 			    vmem_create("kmem_bigtsb_default", NULL, 0,
1277 			    2 * tsb_slab_size, sfmmu_tsb_segkmem_alloc,
1278 			    sfmmu_tsb_segkmem_free, kmem_bigtsb_arena, 0,
1279 			    VM_SLEEP | VM_BESTFIT);
1280 		}
1281 
1282 		kmem_tsb_default_arena[0] = vmem_create("kmem_tsb_default",
1283 		    NULL, 0, PAGESIZE, sfmmu_tsb_segkmem_alloc,
1284 		    sfmmu_tsb_segkmem_free, kmem_tsb_arena, 0,
1285 		    VM_SLEEP | VM_BESTFIT);
1286 		sfmmu_tsb_cache[0] = kmem_cache_create("sfmmu_tsb_cache",
1287 		    PAGESIZE, PAGESIZE, NULL, NULL, NULL, NULL,
1288 		    kmem_tsb_default_arena[0], 0);
1289 	}
1290 
1291 	sfmmu8_cache = kmem_cache_create("sfmmu8_cache", HME8BLK_SZ,
1292 	    HMEBLK_ALIGN, sfmmu_hblkcache_constructor,
1293 	    sfmmu_hblkcache_destructor,
1294 	    sfmmu_hblkcache_reclaim, (void *)HME8BLK_SZ,
1295 	    hat_memload_arena, KMC_NOHASH);
1296 
1297 	hat_memload1_arena = vmem_create("hat_memload1", NULL, 0, PAGESIZE,
1298 	    segkmem_alloc_permanent, segkmem_free, heap_arena, 0,
1299 	    VMC_DUMPSAFE | VM_SLEEP);
1300 
1301 	sfmmu1_cache = kmem_cache_create("sfmmu1_cache", HME1BLK_SZ,
1302 	    HMEBLK_ALIGN, sfmmu_hblkcache_constructor,
1303 	    sfmmu_hblkcache_destructor,
1304 	    NULL, (void *)HME1BLK_SZ,
1305 	    hat_memload1_arena, KMC_NOHASH);
1306 
1307 	pa_hment_cache = kmem_cache_create("pa_hment_cache", PAHME_SZ,
1308 	    0, NULL, NULL, NULL, NULL, static_arena, KMC_NOHASH);
1309 
1310 	ism_blk_cache = kmem_cache_create("ism_blk_cache",
1311 	    sizeof (ism_blk_t), ecache_alignsize, NULL, NULL,
1312 	    NULL, NULL, static_arena, KMC_NOHASH);
1313 
1314 	ism_ment_cache = kmem_cache_create("ism_ment_cache",
1315 	    sizeof (ism_ment_t), 0, NULL, NULL,
1316 	    NULL, NULL, NULL, 0);
1317 
1318 	/*
1319 	 * We grab the first hat for the kernel,
1320 	 */
1321 	AS_LOCK_ENTER(&kas, &kas.a_lock, RW_WRITER);
1322 	kas.a_hat = hat_alloc(&kas);
1323 	AS_LOCK_EXIT(&kas, &kas.a_lock);
1324 
1325 	/*
1326 	 * Initialize hblk_reserve.
1327 	 */
1328 	((struct hme_blk *)hblk_reserve)->hblk_nextpa =
1329 	    va_to_pa((caddr_t)hblk_reserve);
1330 
1331 #ifndef UTSB_PHYS
1332 	/*
1333 	 * Reserve some kernel virtual address space for the locked TTEs
1334 	 * that allow us to probe the TSB from TL>0.
1335 	 */
1336 	utsb_vabase = vmem_xalloc(heap_arena, tsb_slab_size, tsb_slab_size,
1337 	    0, 0, NULL, NULL, VM_SLEEP);
1338 	utsb4m_vabase = vmem_xalloc(heap_arena, tsb_slab_size, tsb_slab_size,
1339 	    0, 0, NULL, NULL, VM_SLEEP);
1340 #endif
1341 
1342 #ifdef VAC
1343 	/*
1344 	 * The big page VAC handling code assumes VAC
1345 	 * will not be bigger than the smallest big
1346 	 * page- which is 64K.
1347 	 */
1348 	if (TTEPAGES(TTE64K) < CACHE_NUM_COLOR) {
1349 		cmn_err(CE_PANIC, "VAC too big!");
1350 	}
1351 #endif
1352 
1353 	(void) xhat_init();
1354 
1355 	uhme_hash_pa = va_to_pa(uhme_hash);
1356 	khme_hash_pa = va_to_pa(khme_hash);
1357 
1358 	/*
1359 	 * Initialize relocation locks. kpr_suspendlock is held
1360 	 * at PIL_MAX to prevent interrupts from pinning the holder
1361 	 * of a suspended TTE which may access it leading to a
1362 	 * deadlock condition.
1363 	 */
1364 	mutex_init(&kpr_mutex, NULL, MUTEX_DEFAULT, NULL);
1365 	mutex_init(&kpr_suspendlock, NULL, MUTEX_SPIN, (void *)PIL_MAX);
1366 
1367 	/*
1368 	 * If Shared context support is disabled via /etc/system
1369 	 * set shctx_on to 0 here if it was set to 1 earlier in boot
1370 	 * sequence by cpu module initialization code.
1371 	 */
1372 	if (shctx_on && disable_shctx) {
1373 		shctx_on = 0;
1374 	}
1375 
1376 	if (shctx_on) {
1377 		srd_buckets = kmem_zalloc(SFMMU_MAX_SRD_BUCKETS *
1378 		    sizeof (srd_buckets[0]), KM_SLEEP);
1379 		for (i = 0; i < SFMMU_MAX_SRD_BUCKETS; i++) {
1380 			mutex_init(&srd_buckets[i].srdb_lock, NULL,
1381 			    MUTEX_DEFAULT, NULL);
1382 		}
1383 
1384 		srd_cache = kmem_cache_create("srd_cache", sizeof (sf_srd_t),
1385 		    0, sfmmu_srdcache_constructor, sfmmu_srdcache_destructor,
1386 		    NULL, NULL, NULL, 0);
1387 		region_cache = kmem_cache_create("region_cache",
1388 		    sizeof (sf_region_t), 0, sfmmu_rgncache_constructor,
1389 		    sfmmu_rgncache_destructor, NULL, NULL, NULL, 0);
1390 		scd_cache = kmem_cache_create("scd_cache", sizeof (sf_scd_t),
1391 		    0, sfmmu_scdcache_constructor,  sfmmu_scdcache_destructor,
1392 		    NULL, NULL, NULL, 0);
1393 	}
1394 
1395 	/*
1396 	 * Pre-allocate hrm_hashtab before enabling the collection of
1397 	 * refmod statistics.  Allocating on the fly would mean us
1398 	 * running the risk of suffering recursive mutex enters or
1399 	 * deadlocks.
1400 	 */
1401 	hrm_hashtab = kmem_zalloc(HRM_HASHSIZE * sizeof (struct hrmstat *),
1402 	    KM_SLEEP);
1403 
1404 	/* Allocate per-cpu pending freelist of hmeblks */
1405 	cpu_hme_pend = kmem_zalloc((NCPU * sizeof (cpu_hme_pend_t)) + 64,
1406 	    KM_SLEEP);
1407 	cpu_hme_pend = (cpu_hme_pend_t *)P2ROUNDUP(
1408 	    (uintptr_t)cpu_hme_pend, 64);
1409 
1410 	for (i = 0; i < NCPU; i++) {
1411 		mutex_init(&cpu_hme_pend[i].chp_mutex, NULL, MUTEX_DEFAULT,
1412 		    NULL);
1413 	}
1414 
1415 	if (cpu_hme_pend_thresh == 0) {
1416 		cpu_hme_pend_thresh = CPU_HME_PEND_THRESH;
1417 	}
1418 }
1419 
1420 /*
1421  * Initialize locking for the hat layer, called early during boot.
1422  */
1423 static void
1424 hat_lock_init()
1425 {
1426 	int i;
1427 
1428 	/*
1429 	 * initialize the array of mutexes protecting a page's mapping
1430 	 * list and p_nrm field.
1431 	 */
1432 	for (i = 0; i < MML_TABLE_SIZE; i++)
1433 		mutex_init(&mml_table[i].pad_mutex, NULL, MUTEX_DEFAULT, NULL);
1434 
1435 	if (kpm_enable) {
1436 		for (i = 0; i < kpmp_table_sz; i++) {
1437 			mutex_init(&kpmp_table[i].khl_mutex, NULL,
1438 			    MUTEX_DEFAULT, NULL);
1439 		}
1440 	}
1441 
1442 	/*
1443 	 * Initialize array of mutex locks that protects sfmmu fields and
1444 	 * TSB lists.
1445 	 */
1446 	for (i = 0; i < SFMMU_NUM_LOCK; i++)
1447 		mutex_init(HATLOCK_MUTEXP(&hat_lock[i]), NULL, MUTEX_DEFAULT,
1448 		    NULL);
1449 }
1450 
1451 #define	SFMMU_KERNEL_MAXVA \
1452 	(kmem64_base ? (uintptr_t)kmem64_end : (SYSLIMIT))
1453 
1454 /*
1455  * Allocate a hat structure.
1456  * Called when an address space first uses a hat.
1457  */
1458 struct hat *
1459 hat_alloc(struct as *as)
1460 {
1461 	sfmmu_t *sfmmup;
1462 	int i;
1463 	uint64_t cnum;
1464 	extern uint_t get_color_start(struct as *);
1465 
1466 	ASSERT(AS_WRITE_HELD(as, &as->a_lock));
1467 	sfmmup = kmem_cache_alloc(sfmmuid_cache, KM_SLEEP);
1468 	sfmmup->sfmmu_as = as;
1469 	sfmmup->sfmmu_flags = 0;
1470 	sfmmup->sfmmu_tteflags = 0;
1471 	sfmmup->sfmmu_rtteflags = 0;
1472 	LOCK_INIT_CLEAR(&sfmmup->sfmmu_ctx_lock);
1473 
1474 	if (as == &kas) {
1475 		ksfmmup = sfmmup;
1476 		sfmmup->sfmmu_cext = 0;
1477 		cnum = KCONTEXT;
1478 
1479 		sfmmup->sfmmu_clrstart = 0;
1480 		sfmmup->sfmmu_tsb = NULL;
1481 		/*
1482 		 * hat_kern_setup() will call sfmmu_init_ktsbinfo()
1483 		 * to setup tsb_info for ksfmmup.
1484 		 */
1485 	} else {
1486 
1487 		/*
1488 		 * Just set to invalid ctx. When it faults, it will
1489 		 * get a valid ctx. This would avoid the situation
1490 		 * where we get a ctx, but it gets stolen and then
1491 		 * we fault when we try to run and so have to get
1492 		 * another ctx.
1493 		 */
1494 		sfmmup->sfmmu_cext = 0;
1495 		cnum = INVALID_CONTEXT;
1496 
1497 		/* initialize original physical page coloring bin */
1498 		sfmmup->sfmmu_clrstart = get_color_start(as);
1499 #ifdef DEBUG
1500 		if (tsb_random_size) {
1501 			uint32_t randval = (uint32_t)gettick() >> 4;
1502 			int size = randval % (tsb_max_growsize + 1);
1503 
1504 			/* chose a random tsb size for stress testing */
1505 			(void) sfmmu_tsbinfo_alloc(&sfmmup->sfmmu_tsb, size,
1506 			    TSB8K|TSB64K|TSB512K, 0, sfmmup);
1507 		} else
1508 #endif /* DEBUG */
1509 			(void) sfmmu_tsbinfo_alloc(&sfmmup->sfmmu_tsb,
1510 			    default_tsb_size,
1511 			    TSB8K|TSB64K|TSB512K, 0, sfmmup);
1512 		sfmmup->sfmmu_flags = HAT_SWAPPED | HAT_ALLCTX_INVALID;
1513 		ASSERT(sfmmup->sfmmu_tsb != NULL);
1514 	}
1515 
1516 	ASSERT(max_mmu_ctxdoms > 0);
1517 	for (i = 0; i < max_mmu_ctxdoms; i++) {
1518 		sfmmup->sfmmu_ctxs[i].cnum = cnum;
1519 		sfmmup->sfmmu_ctxs[i].gnum = 0;
1520 	}
1521 
1522 	for (i = 0; i < max_mmu_page_sizes; i++) {
1523 		sfmmup->sfmmu_ttecnt[i] = 0;
1524 		sfmmup->sfmmu_scdrttecnt[i] = 0;
1525 		sfmmup->sfmmu_ismttecnt[i] = 0;
1526 		sfmmup->sfmmu_scdismttecnt[i] = 0;
1527 		sfmmup->sfmmu_pgsz[i] = TTE8K;
1528 	}
1529 	sfmmup->sfmmu_tsb0_4minflcnt = 0;
1530 	sfmmup->sfmmu_iblk = NULL;
1531 	sfmmup->sfmmu_ismhat = 0;
1532 	sfmmup->sfmmu_scdhat = 0;
1533 	sfmmup->sfmmu_ismblkpa = (uint64_t)-1;
1534 	if (sfmmup == ksfmmup) {
1535 		CPUSET_ALL(sfmmup->sfmmu_cpusran);
1536 	} else {
1537 		CPUSET_ZERO(sfmmup->sfmmu_cpusran);
1538 	}
1539 	sfmmup->sfmmu_free = 0;
1540 	sfmmup->sfmmu_rmstat = 0;
1541 	sfmmup->sfmmu_clrbin = sfmmup->sfmmu_clrstart;
1542 	sfmmup->sfmmu_xhat_provider = NULL;
1543 	cv_init(&sfmmup->sfmmu_tsb_cv, NULL, CV_DEFAULT, NULL);
1544 	sfmmup->sfmmu_srdp = NULL;
1545 	SF_RGNMAP_ZERO(sfmmup->sfmmu_region_map);
1546 	bzero(sfmmup->sfmmu_hmeregion_links, SFMMU_L1_HMERLINKS_SIZE);
1547 	sfmmup->sfmmu_scdp = NULL;
1548 	sfmmup->sfmmu_scd_link.next = NULL;
1549 	sfmmup->sfmmu_scd_link.prev = NULL;
1550 	return (sfmmup);
1551 }
1552 
1553 /*
1554  * Create per-MMU context domain kstats for a given MMU ctx.
1555  */
1556 static void
1557 sfmmu_mmu_kstat_create(mmu_ctx_t *mmu_ctxp)
1558 {
1559 	mmu_ctx_stat_t	stat;
1560 	kstat_t		*mmu_kstat;
1561 
1562 	ASSERT(MUTEX_HELD(&cpu_lock));
1563 	ASSERT(mmu_ctxp->mmu_kstat == NULL);
1564 
1565 	mmu_kstat = kstat_create("unix", mmu_ctxp->mmu_idx, "mmu_ctx",
1566 	    "hat", KSTAT_TYPE_NAMED, MMU_CTX_NUM_STATS, KSTAT_FLAG_VIRTUAL);
1567 
1568 	if (mmu_kstat == NULL) {
1569 		cmn_err(CE_WARN, "kstat_create for MMU %d failed",
1570 		    mmu_ctxp->mmu_idx);
1571 	} else {
1572 		mmu_kstat->ks_data = mmu_ctxp->mmu_kstat_data;
1573 		for (stat = 0; stat < MMU_CTX_NUM_STATS; stat++)
1574 			kstat_named_init(&mmu_ctxp->mmu_kstat_data[stat],
1575 			    mmu_ctx_kstat_names[stat], KSTAT_DATA_INT64);
1576 		mmu_ctxp->mmu_kstat = mmu_kstat;
1577 		kstat_install(mmu_kstat);
1578 	}
1579 }
1580 
1581 /*
1582  * plat_cpuid_to_mmu_ctx_info() is a platform interface that returns MMU
1583  * context domain information for a given CPU. If a platform does not
1584  * specify that interface, then the function below is used instead to return
1585  * default information. The defaults are as follows:
1586  *
1587  *	- The number of MMU context IDs supported on any CPU in the
1588  *	  system is 8K.
1589  *	- There is one MMU context domain per CPU.
1590  */
1591 /*ARGSUSED*/
1592 static void
1593 sfmmu_cpuid_to_mmu_ctx_info(processorid_t cpuid, mmu_ctx_info_t *infop)
1594 {
1595 	infop->mmu_nctxs = nctxs;
1596 	infop->mmu_idx = cpu[cpuid]->cpu_seqid;
1597 }
1598 
1599 /*
1600  * Called during CPU initialization to set the MMU context-related information
1601  * for a CPU.
1602  *
1603  * cpu_lock serializes accesses to mmu_ctxs and mmu_saved_gnum.
1604  */
1605 void
1606 sfmmu_cpu_init(cpu_t *cp)
1607 {
1608 	mmu_ctx_info_t	info;
1609 	mmu_ctx_t	*mmu_ctxp;
1610 
1611 	ASSERT(MUTEX_HELD(&cpu_lock));
1612 
1613 	if (&plat_cpuid_to_mmu_ctx_info == NULL)
1614 		sfmmu_cpuid_to_mmu_ctx_info(cp->cpu_id, &info);
1615 	else
1616 		plat_cpuid_to_mmu_ctx_info(cp->cpu_id, &info);
1617 
1618 	ASSERT(info.mmu_idx < max_mmu_ctxdoms);
1619 
1620 	if ((mmu_ctxp = mmu_ctxs_tbl[info.mmu_idx]) == NULL) {
1621 		/* Each mmu_ctx is cacheline aligned. */
1622 		mmu_ctxp = kmem_cache_alloc(mmuctxdom_cache, KM_SLEEP);
1623 		bzero(mmu_ctxp, sizeof (mmu_ctx_t));
1624 
1625 		mutex_init(&mmu_ctxp->mmu_lock, NULL, MUTEX_SPIN,
1626 		    (void *)ipltospl(DISP_LEVEL));
1627 		mmu_ctxp->mmu_idx = info.mmu_idx;
1628 		mmu_ctxp->mmu_nctxs = info.mmu_nctxs;
1629 		/*
1630 		 * Globally for lifetime of a system,
1631 		 * gnum must always increase.
1632 		 * mmu_saved_gnum is protected by the cpu_lock.
1633 		 */
1634 		mmu_ctxp->mmu_gnum = mmu_saved_gnum + 1;
1635 		mmu_ctxp->mmu_cnum = NUM_LOCKED_CTXS;
1636 
1637 		sfmmu_mmu_kstat_create(mmu_ctxp);
1638 
1639 		mmu_ctxs_tbl[info.mmu_idx] = mmu_ctxp;
1640 	} else {
1641 		ASSERT(mmu_ctxp->mmu_idx == info.mmu_idx);
1642 		ASSERT(mmu_ctxp->mmu_nctxs <= info.mmu_nctxs);
1643 	}
1644 
1645 	/*
1646 	 * The mmu_lock is acquired here to prevent races with
1647 	 * the wrap-around code.
1648 	 */
1649 	mutex_enter(&mmu_ctxp->mmu_lock);
1650 
1651 
1652 	mmu_ctxp->mmu_ncpus++;
1653 	CPUSET_ADD(mmu_ctxp->mmu_cpuset, cp->cpu_id);
1654 	CPU_MMU_IDX(cp) = info.mmu_idx;
1655 	CPU_MMU_CTXP(cp) = mmu_ctxp;
1656 
1657 	mutex_exit(&mmu_ctxp->mmu_lock);
1658 }
1659 
1660 static void
1661 sfmmu_ctxdom_free(mmu_ctx_t *mmu_ctxp)
1662 {
1663 	ASSERT(MUTEX_HELD(&cpu_lock));
1664 	ASSERT(!MUTEX_HELD(&mmu_ctxp->mmu_lock));
1665 
1666 	mutex_destroy(&mmu_ctxp->mmu_lock);
1667 
1668 	if (mmu_ctxp->mmu_kstat)
1669 		kstat_delete(mmu_ctxp->mmu_kstat);
1670 
1671 	/* mmu_saved_gnum is protected by the cpu_lock. */
1672 	if (mmu_saved_gnum < mmu_ctxp->mmu_gnum)
1673 		mmu_saved_gnum = mmu_ctxp->mmu_gnum;
1674 
1675 	kmem_cache_free(mmuctxdom_cache, mmu_ctxp);
1676 }
1677 
1678 /*
1679  * Called to perform MMU context-related cleanup for a CPU.
1680  */
1681 void
1682 sfmmu_cpu_cleanup(cpu_t *cp)
1683 {
1684 	mmu_ctx_t	*mmu_ctxp;
1685 
1686 	ASSERT(MUTEX_HELD(&cpu_lock));
1687 
1688 	mmu_ctxp = CPU_MMU_CTXP(cp);
1689 	ASSERT(mmu_ctxp != NULL);
1690 
1691 	/*
1692 	 * The mmu_lock is acquired here to prevent races with
1693 	 * the wrap-around code.
1694 	 */
1695 	mutex_enter(&mmu_ctxp->mmu_lock);
1696 
1697 	CPU_MMU_CTXP(cp) = NULL;
1698 
1699 	CPUSET_DEL(mmu_ctxp->mmu_cpuset, cp->cpu_id);
1700 	if (--mmu_ctxp->mmu_ncpus == 0) {
1701 		mmu_ctxs_tbl[mmu_ctxp->mmu_idx] = NULL;
1702 		mutex_exit(&mmu_ctxp->mmu_lock);
1703 		sfmmu_ctxdom_free(mmu_ctxp);
1704 		return;
1705 	}
1706 
1707 	mutex_exit(&mmu_ctxp->mmu_lock);
1708 }
1709 
1710 uint_t
1711 sfmmu_ctxdom_nctxs(int idx)
1712 {
1713 	return (mmu_ctxs_tbl[idx]->mmu_nctxs);
1714 }
1715 
1716 #ifdef sun4v
1717 /*
1718  * sfmmu_ctxdoms_* is an interface provided to help keep context domains
1719  * consistant after suspend/resume on system that can resume on a different
1720  * hardware than it was suspended.
1721  *
1722  * sfmmu_ctxdom_lock(void) locks all context domains and prevents new contexts
1723  * from being allocated.  It acquires all hat_locks, which blocks most access to
1724  * context data, except for a few cases that are handled separately or are
1725  * harmless.  It wraps each domain to increment gnum and invalidate on-CPU
1726  * contexts, and forces cnum to its max.  As a result of this call all user
1727  * threads that are running on CPUs trap and try to perform wrap around but
1728  * can't because hat_locks are taken.  Threads that were not on CPUs but started
1729  * by scheduler go to sfmmu_alloc_ctx() to aquire context without checking
1730  * hat_lock, but fail, because cnum == nctxs, and therefore also trap and block
1731  * on hat_lock trying to wrap.  sfmmu_ctxdom_lock() must be called before CPUs
1732  * are paused, else it could deadlock acquiring locks held by paused CPUs.
1733  *
1734  * sfmmu_ctxdoms_remove() removes context domains from every CPUs and records
1735  * the CPUs that had them.  It must be called after CPUs have been paused. This
1736  * ensures that no threads are in sfmmu_alloc_ctx() accessing domain data,
1737  * because pause_cpus sends a mondo interrupt to every CPU, and sfmmu_alloc_ctx
1738  * runs with interrupts disabled.  When CPUs are later resumed, they may enter
1739  * sfmmu_alloc_ctx, but it will check for CPU_MMU_CTXP = NULL and immediately
1740  * return failure.  Or, they will be blocked trying to acquire hat_lock. Thus
1741  * after sfmmu_ctxdoms_remove returns, we are guaranteed that no one is
1742  * accessing the old context domains.
1743  *
1744  * sfmmu_ctxdoms_update(void) frees space used by old context domains and
1745  * allocates new context domains based on hardware layout.  It initializes
1746  * every CPU that had context domain before migration to have one again.
1747  * sfmmu_ctxdoms_update must be called after CPUs are resumed, else it
1748  * could deadlock acquiring locks held by paused CPUs.
1749  *
1750  * sfmmu_ctxdoms_unlock(void) releases all hat_locks after which user threads
1751  * acquire new context ids and continue execution.
1752  *
1753  * Therefore functions should be called in the following order:
1754  *       suspend_routine()
1755  *		sfmmu_ctxdom_lock()
1756  *		pause_cpus()
1757  *		suspend()
1758  *			if (suspend failed)
1759  *				sfmmu_ctxdom_unlock()
1760  *		...
1761  *		sfmmu_ctxdom_remove()
1762  *		resume_cpus()
1763  *		sfmmu_ctxdom_update()
1764  *		sfmmu_ctxdom_unlock()
1765  */
1766 static cpuset_t sfmmu_ctxdoms_pset;
1767 
1768 void
1769 sfmmu_ctxdoms_remove()
1770 {
1771 	processorid_t	id;
1772 	cpu_t		*cp;
1773 
1774 	/*
1775 	 * Record the CPUs that have domains in sfmmu_ctxdoms_pset, so they can
1776 	 * be restored post-migration. A CPU may be powered off and not have a
1777 	 * domain, for example.
1778 	 */
1779 	CPUSET_ZERO(sfmmu_ctxdoms_pset);
1780 
1781 	for (id = 0; id < NCPU; id++) {
1782 		if ((cp = cpu[id]) != NULL && CPU_MMU_CTXP(cp) != NULL) {
1783 			CPUSET_ADD(sfmmu_ctxdoms_pset, id);
1784 			CPU_MMU_CTXP(cp) = NULL;
1785 		}
1786 	}
1787 }
1788 
1789 void
1790 sfmmu_ctxdoms_lock(void)
1791 {
1792 	int		idx;
1793 	mmu_ctx_t	*mmu_ctxp;
1794 
1795 	sfmmu_hat_lock_all();
1796 
1797 	/*
1798 	 * At this point, no thread can be in sfmmu_ctx_wrap_around, because
1799 	 * hat_lock is always taken before calling it.
1800 	 *
1801 	 * For each domain, set mmu_cnum to max so no more contexts can be
1802 	 * allocated, and wrap to flush on-CPU contexts and force threads to
1803 	 * acquire a new context when we later drop hat_lock after migration.
1804 	 * Setting mmu_cnum may race with sfmmu_alloc_ctx which also sets cnum,
1805 	 * but the latter uses CAS and will miscompare and not overwrite it.
1806 	 */
1807 	kpreempt_disable(); /* required by sfmmu_ctx_wrap_around */
1808 	for (idx = 0; idx < max_mmu_ctxdoms; idx++) {
1809 		if ((mmu_ctxp = mmu_ctxs_tbl[idx]) != NULL) {
1810 			mutex_enter(&mmu_ctxp->mmu_lock);
1811 			mmu_ctxp->mmu_cnum = mmu_ctxp->mmu_nctxs;
1812 			/* make sure updated cnum visible */
1813 			membar_enter();
1814 			mutex_exit(&mmu_ctxp->mmu_lock);
1815 			sfmmu_ctx_wrap_around(mmu_ctxp, B_FALSE);
1816 		}
1817 	}
1818 	kpreempt_enable();
1819 }
1820 
1821 void
1822 sfmmu_ctxdoms_unlock(void)
1823 {
1824 	sfmmu_hat_unlock_all();
1825 }
1826 
1827 void
1828 sfmmu_ctxdoms_update(void)
1829 {
1830 	processorid_t	id;
1831 	cpu_t		*cp;
1832 	uint_t		idx;
1833 	mmu_ctx_t	*mmu_ctxp;
1834 
1835 	/*
1836 	 * Free all context domains.  As side effect, this increases
1837 	 * mmu_saved_gnum to the maximum gnum over all domains, which is used to
1838 	 * init gnum in the new domains, which therefore will be larger than the
1839 	 * sfmmu gnum for any process, guaranteeing that every process will see
1840 	 * a new generation and allocate a new context regardless of what new
1841 	 * domain it runs in.
1842 	 */
1843 	mutex_enter(&cpu_lock);
1844 
1845 	for (idx = 0; idx < max_mmu_ctxdoms; idx++) {
1846 		if (mmu_ctxs_tbl[idx] != NULL) {
1847 			mmu_ctxp = mmu_ctxs_tbl[idx];
1848 			mmu_ctxs_tbl[idx] = NULL;
1849 			sfmmu_ctxdom_free(mmu_ctxp);
1850 		}
1851 	}
1852 
1853 	for (id = 0; id < NCPU; id++) {
1854 		if (CPU_IN_SET(sfmmu_ctxdoms_pset, id) &&
1855 		    (cp = cpu[id]) != NULL)
1856 			sfmmu_cpu_init(cp);
1857 	}
1858 	mutex_exit(&cpu_lock);
1859 }
1860 #endif
1861 
1862 /*
1863  * Hat_setup, makes an address space context the current active one.
1864  * In sfmmu this translates to setting the secondary context with the
1865  * corresponding context.
1866  */
1867 void
1868 hat_setup(struct hat *sfmmup, int allocflag)
1869 {
1870 	hatlock_t *hatlockp;
1871 
1872 	/* Init needs some special treatment. */
1873 	if (allocflag == HAT_INIT) {
1874 		/*
1875 		 * Make sure that we have
1876 		 * 1. a TSB
1877 		 * 2. a valid ctx that doesn't get stolen after this point.
1878 		 */
1879 		hatlockp = sfmmu_hat_enter(sfmmup);
1880 
1881 		/*
1882 		 * Swap in the TSB.  hat_init() allocates tsbinfos without
1883 		 * TSBs, but we need one for init, since the kernel does some
1884 		 * special things to set up its stack and needs the TSB to
1885 		 * resolve page faults.
1886 		 */
1887 		sfmmu_tsb_swapin(sfmmup, hatlockp);
1888 
1889 		sfmmu_get_ctx(sfmmup);
1890 
1891 		sfmmu_hat_exit(hatlockp);
1892 	} else {
1893 		ASSERT(allocflag == HAT_ALLOC);
1894 
1895 		hatlockp = sfmmu_hat_enter(sfmmup);
1896 		kpreempt_disable();
1897 
1898 		CPUSET_ADD(sfmmup->sfmmu_cpusran, CPU->cpu_id);
1899 		/*
1900 		 * sfmmu_setctx_sec takes <pgsz|cnum> as a parameter,
1901 		 * pagesize bits don't matter in this case since we are passing
1902 		 * INVALID_CONTEXT to it.
1903 		 * Compatibility Note: hw takes care of MMU_SCONTEXT1
1904 		 */
1905 		sfmmu_setctx_sec(INVALID_CONTEXT);
1906 		sfmmu_clear_utsbinfo();
1907 
1908 		kpreempt_enable();
1909 		sfmmu_hat_exit(hatlockp);
1910 	}
1911 }
1912 
1913 /*
1914  * Free all the translation resources for the specified address space.
1915  * Called from as_free when an address space is being destroyed.
1916  */
1917 void
1918 hat_free_start(struct hat *sfmmup)
1919 {
1920 	ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock));
1921 	ASSERT(sfmmup != ksfmmup);
1922 	ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
1923 
1924 	sfmmup->sfmmu_free = 1;
1925 	if (sfmmup->sfmmu_scdp != NULL) {
1926 		sfmmu_leave_scd(sfmmup, 0);
1927 	}
1928 
1929 	ASSERT(sfmmup->sfmmu_scdp == NULL);
1930 }
1931 
1932 void
1933 hat_free_end(struct hat *sfmmup)
1934 {
1935 	int i;
1936 
1937 	ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
1938 	ASSERT(sfmmup->sfmmu_free == 1);
1939 	ASSERT(sfmmup->sfmmu_ttecnt[TTE8K] == 0);
1940 	ASSERT(sfmmup->sfmmu_ttecnt[TTE64K] == 0);
1941 	ASSERT(sfmmup->sfmmu_ttecnt[TTE512K] == 0);
1942 	ASSERT(sfmmup->sfmmu_ttecnt[TTE4M] == 0);
1943 	ASSERT(sfmmup->sfmmu_ttecnt[TTE32M] == 0);
1944 	ASSERT(sfmmup->sfmmu_ttecnt[TTE256M] == 0);
1945 
1946 	if (sfmmup->sfmmu_rmstat) {
1947 		hat_freestat(sfmmup->sfmmu_as, NULL);
1948 	}
1949 
1950 	while (sfmmup->sfmmu_tsb != NULL) {
1951 		struct tsb_info *next = sfmmup->sfmmu_tsb->tsb_next;
1952 		sfmmu_tsbinfo_free(sfmmup->sfmmu_tsb);
1953 		sfmmup->sfmmu_tsb = next;
1954 	}
1955 
1956 	if (sfmmup->sfmmu_srdp != NULL) {
1957 		sfmmu_leave_srd(sfmmup);
1958 		ASSERT(sfmmup->sfmmu_srdp == NULL);
1959 		for (i = 0; i < SFMMU_L1_HMERLINKS; i++) {
1960 			if (sfmmup->sfmmu_hmeregion_links[i] != NULL) {
1961 				kmem_free(sfmmup->sfmmu_hmeregion_links[i],
1962 				    SFMMU_L2_HMERLINKS_SIZE);
1963 				sfmmup->sfmmu_hmeregion_links[i] = NULL;
1964 			}
1965 		}
1966 	}
1967 	sfmmu_free_sfmmu(sfmmup);
1968 
1969 #ifdef DEBUG
1970 	for (i = 0; i < SFMMU_L1_HMERLINKS; i++) {
1971 		ASSERT(sfmmup->sfmmu_hmeregion_links[i] == NULL);
1972 	}
1973 #endif
1974 
1975 	kmem_cache_free(sfmmuid_cache, sfmmup);
1976 }
1977 
1978 /*
1979  * Set up any translation structures, for the specified address space,
1980  * that are needed or preferred when the process is being swapped in.
1981  */
1982 /* ARGSUSED */
1983 void
1984 hat_swapin(struct hat *hat)
1985 {
1986 	ASSERT(hat->sfmmu_xhat_provider == NULL);
1987 }
1988 
1989 /*
1990  * Free all of the translation resources, for the specified address space,
1991  * that can be freed while the process is swapped out. Called from as_swapout.
1992  * Also, free up the ctx that this process was using.
1993  */
1994 void
1995 hat_swapout(struct hat *sfmmup)
1996 {
1997 	struct hmehash_bucket *hmebp;
1998 	struct hme_blk *hmeblkp;
1999 	struct hme_blk *pr_hblk = NULL;
2000 	struct hme_blk *nx_hblk;
2001 	int i;
2002 	struct hme_blk *list = NULL;
2003 	hatlock_t *hatlockp;
2004 	struct tsb_info *tsbinfop;
2005 	struct free_tsb {
2006 		struct free_tsb *next;
2007 		struct tsb_info *tsbinfop;
2008 	};			/* free list of TSBs */
2009 	struct free_tsb *freelist, *last, *next;
2010 
2011 	ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
2012 	SFMMU_STAT(sf_swapout);
2013 
2014 	/*
2015 	 * There is no way to go from an as to all its translations in sfmmu.
2016 	 * Here is one of the times when we take the big hit and traverse
2017 	 * the hash looking for hme_blks to free up.  Not only do we free up
2018 	 * this as hme_blks but all those that are free.  We are obviously
2019 	 * swapping because we need memory so let's free up as much
2020 	 * as we can.
2021 	 *
2022 	 * Note that we don't flush TLB/TSB here -- it's not necessary
2023 	 * because:
2024 	 *  1) we free the ctx we're using and throw away the TSB(s);
2025 	 *  2) processes aren't runnable while being swapped out.
2026 	 */
2027 	ASSERT(sfmmup != KHATID);
2028 	for (i = 0; i <= UHMEHASH_SZ; i++) {
2029 		hmebp = &uhme_hash[i];
2030 		SFMMU_HASH_LOCK(hmebp);
2031 		hmeblkp = hmebp->hmeblkp;
2032 		pr_hblk = NULL;
2033 		while (hmeblkp) {
2034 
2035 			ASSERT(!hmeblkp->hblk_xhat_bit);
2036 
2037 			if ((hmeblkp->hblk_tag.htag_id == sfmmup) &&
2038 			    !hmeblkp->hblk_shw_bit && !hmeblkp->hblk_lckcnt) {
2039 				ASSERT(!hmeblkp->hblk_shared);
2040 				(void) sfmmu_hblk_unload(sfmmup, hmeblkp,
2041 				    (caddr_t)get_hblk_base(hmeblkp),
2042 				    get_hblk_endaddr(hmeblkp),
2043 				    NULL, HAT_UNLOAD);
2044 			}
2045 			nx_hblk = hmeblkp->hblk_next;
2046 			if (!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) {
2047 				ASSERT(!hmeblkp->hblk_lckcnt);
2048 				sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk,
2049 				    &list, 0);
2050 			} else {
2051 				pr_hblk = hmeblkp;
2052 			}
2053 			hmeblkp = nx_hblk;
2054 		}
2055 		SFMMU_HASH_UNLOCK(hmebp);
2056 	}
2057 
2058 	sfmmu_hblks_list_purge(&list, 0);
2059 
2060 	/*
2061 	 * Now free up the ctx so that others can reuse it.
2062 	 */
2063 	hatlockp = sfmmu_hat_enter(sfmmup);
2064 
2065 	sfmmu_invalidate_ctx(sfmmup);
2066 
2067 	/*
2068 	 * Free TSBs, but not tsbinfos, and set SWAPPED flag.
2069 	 * If TSBs were never swapped in, just return.
2070 	 * This implies that we don't support partial swapping
2071 	 * of TSBs -- either all are swapped out, or none are.
2072 	 *
2073 	 * We must hold the HAT lock here to prevent racing with another
2074 	 * thread trying to unmap TTEs from the TSB or running the post-
2075 	 * relocator after relocating the TSB's memory.  Unfortunately, we
2076 	 * can't free memory while holding the HAT lock or we could
2077 	 * deadlock, so we build a list of TSBs to be freed after marking
2078 	 * the tsbinfos as swapped out and free them after dropping the
2079 	 * lock.
2080 	 */
2081 	if (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) {
2082 		sfmmu_hat_exit(hatlockp);
2083 		return;
2084 	}
2085 
2086 	SFMMU_FLAGS_SET(sfmmup, HAT_SWAPPED);
2087 	last = freelist = NULL;
2088 	for (tsbinfop = sfmmup->sfmmu_tsb; tsbinfop != NULL;
2089 	    tsbinfop = tsbinfop->tsb_next) {
2090 		ASSERT((tsbinfop->tsb_flags & TSB_SWAPPED) == 0);
2091 
2092 		/*
2093 		 * Cast the TSB into a struct free_tsb and put it on the free
2094 		 * list.
2095 		 */
2096 		if (freelist == NULL) {
2097 			last = freelist = (struct free_tsb *)tsbinfop->tsb_va;
2098 		} else {
2099 			last->next = (struct free_tsb *)tsbinfop->tsb_va;
2100 			last = last->next;
2101 		}
2102 		last->next = NULL;
2103 		last->tsbinfop = tsbinfop;
2104 		tsbinfop->tsb_flags |= TSB_SWAPPED;
2105 		/*
2106 		 * Zero out the TTE to clear the valid bit.
2107 		 * Note we can't use a value like 0xbad because we want to
2108 		 * ensure diagnostic bits are NEVER set on TTEs that might
2109 		 * be loaded.  The intent is to catch any invalid access
2110 		 * to the swapped TSB, such as a thread running with a valid
2111 		 * context without first calling sfmmu_tsb_swapin() to
2112 		 * allocate TSB memory.
2113 		 */
2114 		tsbinfop->tsb_tte.ll = 0;
2115 	}
2116 
2117 	/* Now we can drop the lock and free the TSB memory. */
2118 	sfmmu_hat_exit(hatlockp);
2119 	for (; freelist != NULL; freelist = next) {
2120 		next = freelist->next;
2121 		sfmmu_tsb_free(freelist->tsbinfop);
2122 	}
2123 }
2124 
2125 /*
2126  * Duplicate the translations of an as into another newas
2127  */
2128 /* ARGSUSED */
2129 int
2130 hat_dup(struct hat *hat, struct hat *newhat, caddr_t addr, size_t len,
2131 	uint_t flag)
2132 {
2133 	sf_srd_t *srdp;
2134 	sf_scd_t *scdp;
2135 	int i;
2136 	extern uint_t get_color_start(struct as *);
2137 
2138 	ASSERT(hat->sfmmu_xhat_provider == NULL);
2139 	ASSERT((flag == 0) || (flag == HAT_DUP_ALL) || (flag == HAT_DUP_COW) ||
2140 	    (flag == HAT_DUP_SRD));
2141 	ASSERT(hat != ksfmmup);
2142 	ASSERT(newhat != ksfmmup);
2143 	ASSERT(flag != HAT_DUP_ALL || hat->sfmmu_srdp == newhat->sfmmu_srdp);
2144 
2145 	if (flag == HAT_DUP_COW) {
2146 		panic("hat_dup: HAT_DUP_COW not supported");
2147 	}
2148 
2149 	if (flag == HAT_DUP_SRD && ((srdp = hat->sfmmu_srdp) != NULL)) {
2150 		ASSERT(srdp->srd_evp != NULL);
2151 		VN_HOLD(srdp->srd_evp);
2152 		ASSERT(srdp->srd_refcnt > 0);
2153 		newhat->sfmmu_srdp = srdp;
2154 		atomic_add_32((volatile uint_t *)&srdp->srd_refcnt, 1);
2155 	}
2156 
2157 	/*
2158 	 * HAT_DUP_ALL flag is used after as duplication is done.
2159 	 */
2160 	if (flag == HAT_DUP_ALL && ((srdp = newhat->sfmmu_srdp) != NULL)) {
2161 		ASSERT(newhat->sfmmu_srdp->srd_refcnt >= 2);
2162 		newhat->sfmmu_rtteflags = hat->sfmmu_rtteflags;
2163 		if (hat->sfmmu_flags & HAT_4MTEXT_FLAG) {
2164 			newhat->sfmmu_flags |= HAT_4MTEXT_FLAG;
2165 		}
2166 
2167 		/* check if need to join scd */
2168 		if ((scdp = hat->sfmmu_scdp) != NULL &&
2169 		    newhat->sfmmu_scdp != scdp) {
2170 			int ret;
2171 			SF_RGNMAP_IS_SUBSET(&newhat->sfmmu_region_map,
2172 			    &scdp->scd_region_map, ret);
2173 			ASSERT(ret);
2174 			sfmmu_join_scd(scdp, newhat);
2175 			ASSERT(newhat->sfmmu_scdp == scdp &&
2176 			    scdp->scd_refcnt >= 2);
2177 			for (i = 0; i < max_mmu_page_sizes; i++) {
2178 				newhat->sfmmu_ismttecnt[i] =
2179 				    hat->sfmmu_ismttecnt[i];
2180 				newhat->sfmmu_scdismttecnt[i] =
2181 				    hat->sfmmu_scdismttecnt[i];
2182 			}
2183 		}
2184 
2185 		sfmmu_check_page_sizes(newhat, 1);
2186 	}
2187 
2188 	if (flag == HAT_DUP_ALL && consistent_coloring == 0 &&
2189 	    update_proc_pgcolorbase_after_fork != 0) {
2190 		hat->sfmmu_clrbin = get_color_start(hat->sfmmu_as);
2191 	}
2192 	return (0);
2193 }
2194 
2195 void
2196 hat_memload(struct hat *hat, caddr_t addr, struct page *pp,
2197 	uint_t attr, uint_t flags)
2198 {
2199 	hat_do_memload(hat, addr, pp, attr, flags,
2200 	    SFMMU_INVALID_SHMERID);
2201 }
2202 
2203 void
2204 hat_memload_region(struct hat *hat, caddr_t addr, struct page *pp,
2205 	uint_t attr, uint_t flags, hat_region_cookie_t rcookie)
2206 {
2207 	uint_t rid;
2208 	if (rcookie == HAT_INVALID_REGION_COOKIE ||
2209 	    hat->sfmmu_xhat_provider != NULL) {
2210 		hat_do_memload(hat, addr, pp, attr, flags,
2211 		    SFMMU_INVALID_SHMERID);
2212 		return;
2213 	}
2214 	rid = (uint_t)((uint64_t)rcookie);
2215 	ASSERT(rid < SFMMU_MAX_HME_REGIONS);
2216 	hat_do_memload(hat, addr, pp, attr, flags, rid);
2217 }
2218 
2219 /*
2220  * Set up addr to map to page pp with protection prot.
2221  * As an optimization we also load the TSB with the
2222  * corresponding tte but it is no big deal if  the tte gets kicked out.
2223  */
2224 static void
2225 hat_do_memload(struct hat *hat, caddr_t addr, struct page *pp,
2226 	uint_t attr, uint_t flags, uint_t rid)
2227 {
2228 	tte_t tte;
2229 
2230 
2231 	ASSERT(hat != NULL);
2232 	ASSERT(PAGE_LOCKED(pp));
2233 	ASSERT(!((uintptr_t)addr & MMU_PAGEOFFSET));
2234 	ASSERT(!(flags & ~SFMMU_LOAD_ALLFLAG));
2235 	ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR));
2236 	SFMMU_VALIDATE_HMERID(hat, rid, addr, MMU_PAGESIZE);
2237 
2238 	if (PP_ISFREE(pp)) {
2239 		panic("hat_memload: loading a mapping to free page %p",
2240 		    (void *)pp);
2241 	}
2242 
2243 	if (hat->sfmmu_xhat_provider) {
2244 		/* no regions for xhats */
2245 		ASSERT(!SFMMU_IS_SHMERID_VALID(rid));
2246 		XHAT_MEMLOAD(hat, addr, pp, attr, flags);
2247 		return;
2248 	}
2249 
2250 	ASSERT((hat == ksfmmup) ||
2251 	    AS_LOCK_HELD(hat->sfmmu_as, &hat->sfmmu_as->a_lock));
2252 
2253 	if (flags & ~SFMMU_LOAD_ALLFLAG)
2254 		cmn_err(CE_NOTE, "hat_memload: unsupported flags %d",
2255 		    flags & ~SFMMU_LOAD_ALLFLAG);
2256 
2257 	if (hat->sfmmu_rmstat)
2258 		hat_resvstat(MMU_PAGESIZE, hat->sfmmu_as, addr);
2259 
2260 #if defined(SF_ERRATA_57)
2261 	if ((hat != ksfmmup) && AS_TYPE_64BIT(hat->sfmmu_as) &&
2262 	    (addr < errata57_limit) && (attr & PROT_EXEC) &&
2263 	    !(flags & HAT_LOAD_SHARE)) {
2264 		cmn_err(CE_WARN, "hat_memload: illegal attempt to make user "
2265 		    " page executable");
2266 		attr &= ~PROT_EXEC;
2267 	}
2268 #endif
2269 
2270 	sfmmu_memtte(&tte, pp->p_pagenum, attr, TTE8K);
2271 	(void) sfmmu_tteload_array(hat, &tte, addr, &pp, flags, rid);
2272 
2273 	/*
2274 	 * Check TSB and TLB page sizes.
2275 	 */
2276 	if ((flags & HAT_LOAD_SHARE) == 0) {
2277 		sfmmu_check_page_sizes(hat, 1);
2278 	}
2279 }
2280 
2281 /*
2282  * hat_devload can be called to map real memory (e.g.
2283  * /dev/kmem) and even though hat_devload will determine pf is
2284  * for memory, it will be unable to get a shared lock on the
2285  * page (because someone else has it exclusively) and will
2286  * pass dp = NULL.  If tteload doesn't get a non-NULL
2287  * page pointer it can't cache memory.
2288  */
2289 void
2290 hat_devload(struct hat *hat, caddr_t addr, size_t len, pfn_t pfn,
2291 	uint_t attr, int flags)
2292 {
2293 	tte_t tte;
2294 	struct page *pp = NULL;
2295 	int use_lgpg = 0;
2296 
2297 	ASSERT(hat != NULL);
2298 
2299 	if (hat->sfmmu_xhat_provider) {
2300 		XHAT_DEVLOAD(hat, addr, len, pfn, attr, flags);
2301 		return;
2302 	}
2303 
2304 	ASSERT(!(flags & ~SFMMU_LOAD_ALLFLAG));
2305 	ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR));
2306 	ASSERT((hat == ksfmmup) ||
2307 	    AS_LOCK_HELD(hat->sfmmu_as, &hat->sfmmu_as->a_lock));
2308 	if (len == 0)
2309 		panic("hat_devload: zero len");
2310 	if (flags & ~SFMMU_LOAD_ALLFLAG)
2311 		cmn_err(CE_NOTE, "hat_devload: unsupported flags %d",
2312 		    flags & ~SFMMU_LOAD_ALLFLAG);
2313 
2314 #if defined(SF_ERRATA_57)
2315 	if ((hat != ksfmmup) && AS_TYPE_64BIT(hat->sfmmu_as) &&
2316 	    (addr < errata57_limit) && (attr & PROT_EXEC) &&
2317 	    !(flags & HAT_LOAD_SHARE)) {
2318 		cmn_err(CE_WARN, "hat_devload: illegal attempt to make user "
2319 		    " page executable");
2320 		attr &= ~PROT_EXEC;
2321 	}
2322 #endif
2323 
2324 	/*
2325 	 * If it's a memory page find its pp
2326 	 */
2327 	if (!(flags & HAT_LOAD_NOCONSIST) && pf_is_memory(pfn)) {
2328 		pp = page_numtopp_nolock(pfn);
2329 		if (pp == NULL) {
2330 			flags |= HAT_LOAD_NOCONSIST;
2331 		} else {
2332 			if (PP_ISFREE(pp)) {
2333 				panic("hat_memload: loading "
2334 				    "a mapping to free page %p",
2335 				    (void *)pp);
2336 			}
2337 			if (!PAGE_LOCKED(pp) && !PP_ISNORELOC(pp)) {
2338 				panic("hat_memload: loading a mapping "
2339 				    "to unlocked relocatable page %p",
2340 				    (void *)pp);
2341 			}
2342 			ASSERT(len == MMU_PAGESIZE);
2343 		}
2344 	}
2345 
2346 	if (hat->sfmmu_rmstat)
2347 		hat_resvstat(len, hat->sfmmu_as, addr);
2348 
2349 	if (flags & HAT_LOAD_NOCONSIST) {
2350 		attr |= SFMMU_UNCACHEVTTE;
2351 		use_lgpg = 1;
2352 	}
2353 	if (!pf_is_memory(pfn)) {
2354 		attr |= SFMMU_UNCACHEPTTE | HAT_NOSYNC;
2355 		use_lgpg = 1;
2356 		switch (attr & HAT_ORDER_MASK) {
2357 			case HAT_STRICTORDER:
2358 			case HAT_UNORDERED_OK:
2359 				/*
2360 				 * we set the side effect bit for all non
2361 				 * memory mappings unless merging is ok
2362 				 */
2363 				attr |= SFMMU_SIDEFFECT;
2364 				break;
2365 			case HAT_MERGING_OK:
2366 			case HAT_LOADCACHING_OK:
2367 			case HAT_STORECACHING_OK:
2368 				break;
2369 			default:
2370 				panic("hat_devload: bad attr");
2371 				break;
2372 		}
2373 	}
2374 	while (len) {
2375 		if (!use_lgpg) {
2376 			sfmmu_memtte(&tte, pfn, attr, TTE8K);
2377 			(void) sfmmu_tteload_array(hat, &tte, addr, &pp,
2378 			    flags, SFMMU_INVALID_SHMERID);
2379 			len -= MMU_PAGESIZE;
2380 			addr += MMU_PAGESIZE;
2381 			pfn++;
2382 			continue;
2383 		}
2384 		/*
2385 		 *  try to use large pages, check va/pa alignments
2386 		 *  Note that 32M/256M page sizes are not (yet) supported.
2387 		 */
2388 		if ((len >= MMU_PAGESIZE4M) &&
2389 		    !((uintptr_t)addr & MMU_PAGEOFFSET4M) &&
2390 		    !(disable_large_pages & (1 << TTE4M)) &&
2391 		    !(mmu_ptob(pfn) & MMU_PAGEOFFSET4M)) {
2392 			sfmmu_memtte(&tte, pfn, attr, TTE4M);
2393 			(void) sfmmu_tteload_array(hat, &tte, addr, &pp,
2394 			    flags, SFMMU_INVALID_SHMERID);
2395 			len -= MMU_PAGESIZE4M;
2396 			addr += MMU_PAGESIZE4M;
2397 			pfn += MMU_PAGESIZE4M / MMU_PAGESIZE;
2398 		} else if ((len >= MMU_PAGESIZE512K) &&
2399 		    !((uintptr_t)addr & MMU_PAGEOFFSET512K) &&
2400 		    !(disable_large_pages & (1 << TTE512K)) &&
2401 		    !(mmu_ptob(pfn) & MMU_PAGEOFFSET512K)) {
2402 			sfmmu_memtte(&tte, pfn, attr, TTE512K);
2403 			(void) sfmmu_tteload_array(hat, &tte, addr, &pp,
2404 			    flags, SFMMU_INVALID_SHMERID);
2405 			len -= MMU_PAGESIZE512K;
2406 			addr += MMU_PAGESIZE512K;
2407 			pfn += MMU_PAGESIZE512K / MMU_PAGESIZE;
2408 		} else if ((len >= MMU_PAGESIZE64K) &&
2409 		    !((uintptr_t)addr & MMU_PAGEOFFSET64K) &&
2410 		    !(disable_large_pages & (1 << TTE64K)) &&
2411 		    !(mmu_ptob(pfn) & MMU_PAGEOFFSET64K)) {
2412 			sfmmu_memtte(&tte, pfn, attr, TTE64K);
2413 			(void) sfmmu_tteload_array(hat, &tte, addr, &pp,
2414 			    flags, SFMMU_INVALID_SHMERID);
2415 			len -= MMU_PAGESIZE64K;
2416 			addr += MMU_PAGESIZE64K;
2417 			pfn += MMU_PAGESIZE64K / MMU_PAGESIZE;
2418 		} else {
2419 			sfmmu_memtte(&tte, pfn, attr, TTE8K);
2420 			(void) sfmmu_tteload_array(hat, &tte, addr, &pp,
2421 			    flags, SFMMU_INVALID_SHMERID);
2422 			len -= MMU_PAGESIZE;
2423 			addr += MMU_PAGESIZE;
2424 			pfn++;
2425 		}
2426 	}
2427 
2428 	/*
2429 	 * Check TSB and TLB page sizes.
2430 	 */
2431 	if ((flags & HAT_LOAD_SHARE) == 0) {
2432 		sfmmu_check_page_sizes(hat, 1);
2433 	}
2434 }
2435 
2436 void
2437 hat_memload_array(struct hat *hat, caddr_t addr, size_t len,
2438 	struct page **pps, uint_t attr, uint_t flags)
2439 {
2440 	hat_do_memload_array(hat, addr, len, pps, attr, flags,
2441 	    SFMMU_INVALID_SHMERID);
2442 }
2443 
2444 void
2445 hat_memload_array_region(struct hat *hat, caddr_t addr, size_t len,
2446 	struct page **pps, uint_t attr, uint_t flags,
2447 	hat_region_cookie_t rcookie)
2448 {
2449 	uint_t rid;
2450 	if (rcookie == HAT_INVALID_REGION_COOKIE ||
2451 	    hat->sfmmu_xhat_provider != NULL) {
2452 		hat_do_memload_array(hat, addr, len, pps, attr, flags,
2453 		    SFMMU_INVALID_SHMERID);
2454 		return;
2455 	}
2456 	rid = (uint_t)((uint64_t)rcookie);
2457 	ASSERT(rid < SFMMU_MAX_HME_REGIONS);
2458 	hat_do_memload_array(hat, addr, len, pps, attr, flags, rid);
2459 }
2460 
2461 /*
2462  * Map the largest extend possible out of the page array. The array may NOT
2463  * be in order.  The largest possible mapping a page can have
2464  * is specified in the p_szc field.  The p_szc field
2465  * cannot change as long as there any mappings (large or small)
2466  * to any of the pages that make up the large page. (ie. any
2467  * promotion/demotion of page size is not up to the hat but up to
2468  * the page free list manager).  The array
2469  * should consist of properly aligned contigous pages that are
2470  * part of a big page for a large mapping to be created.
2471  */
2472 static void
2473 hat_do_memload_array(struct hat *hat, caddr_t addr, size_t len,
2474 	struct page **pps, uint_t attr, uint_t flags, uint_t rid)
2475 {
2476 	int  ttesz;
2477 	size_t mapsz;
2478 	pgcnt_t	numpg, npgs;
2479 	tte_t tte;
2480 	page_t *pp;
2481 	uint_t large_pages_disable;
2482 
2483 	ASSERT(!((uintptr_t)addr & MMU_PAGEOFFSET));
2484 	SFMMU_VALIDATE_HMERID(hat, rid, addr, len);
2485 
2486 	if (hat->sfmmu_xhat_provider) {
2487 		ASSERT(!SFMMU_IS_SHMERID_VALID(rid));
2488 		XHAT_MEMLOAD_ARRAY(hat, addr, len, pps, attr, flags);
2489 		return;
2490 	}
2491 
2492 	if (hat->sfmmu_rmstat)
2493 		hat_resvstat(len, hat->sfmmu_as, addr);
2494 
2495 #if defined(SF_ERRATA_57)
2496 	if ((hat != ksfmmup) && AS_TYPE_64BIT(hat->sfmmu_as) &&
2497 	    (addr < errata57_limit) && (attr & PROT_EXEC) &&
2498 	    !(flags & HAT_LOAD_SHARE)) {
2499 		cmn_err(CE_WARN, "hat_memload_array: illegal attempt to make "
2500 		    "user page executable");
2501 		attr &= ~PROT_EXEC;
2502 	}
2503 #endif
2504 
2505 	/* Get number of pages */
2506 	npgs = len >> MMU_PAGESHIFT;
2507 
2508 	if (flags & HAT_LOAD_SHARE) {
2509 		large_pages_disable = disable_ism_large_pages;
2510 	} else {
2511 		large_pages_disable = disable_large_pages;
2512 	}
2513 
2514 	if (npgs < NHMENTS || large_pages_disable == LARGE_PAGES_OFF) {
2515 		sfmmu_memload_batchsmall(hat, addr, pps, attr, flags, npgs,
2516 		    rid);
2517 		return;
2518 	}
2519 
2520 	while (npgs >= NHMENTS) {
2521 		pp = *pps;
2522 		for (ttesz = pp->p_szc; ttesz != TTE8K; ttesz--) {
2523 			/*
2524 			 * Check if this page size is disabled.
2525 			 */
2526 			if (large_pages_disable & (1 << ttesz))
2527 				continue;
2528 
2529 			numpg = TTEPAGES(ttesz);
2530 			mapsz = numpg << MMU_PAGESHIFT;
2531 			if ((npgs >= numpg) &&
2532 			    IS_P2ALIGNED(addr, mapsz) &&
2533 			    IS_P2ALIGNED(pp->p_pagenum, numpg)) {
2534 				/*
2535 				 * At this point we have enough pages and
2536 				 * we know the virtual address and the pfn
2537 				 * are properly aligned.  We still need
2538 				 * to check for physical contiguity but since
2539 				 * it is very likely that this is the case
2540 				 * we will assume they are so and undo
2541 				 * the request if necessary.  It would
2542 				 * be great if we could get a hint flag
2543 				 * like HAT_CONTIG which would tell us
2544 				 * the pages are contigous for sure.
2545 				 */
2546 				sfmmu_memtte(&tte, (*pps)->p_pagenum,
2547 				    attr, ttesz);
2548 				if (!sfmmu_tteload_array(hat, &tte, addr,
2549 				    pps, flags, rid)) {
2550 					break;
2551 				}
2552 			}
2553 		}
2554 		if (ttesz == TTE8K) {
2555 			/*
2556 			 * We were not able to map array using a large page
2557 			 * batch a hmeblk or fraction at a time.
2558 			 */
2559 			numpg = ((uintptr_t)addr >> MMU_PAGESHIFT)
2560 			    & (NHMENTS-1);
2561 			numpg = NHMENTS - numpg;
2562 			ASSERT(numpg <= npgs);
2563 			mapsz = numpg * MMU_PAGESIZE;
2564 			sfmmu_memload_batchsmall(hat, addr, pps, attr, flags,
2565 			    numpg, rid);
2566 		}
2567 		addr += mapsz;
2568 		npgs -= numpg;
2569 		pps += numpg;
2570 	}
2571 
2572 	if (npgs) {
2573 		sfmmu_memload_batchsmall(hat, addr, pps, attr, flags, npgs,
2574 		    rid);
2575 	}
2576 
2577 	/*
2578 	 * Check TSB and TLB page sizes.
2579 	 */
2580 	if ((flags & HAT_LOAD_SHARE) == 0) {
2581 		sfmmu_check_page_sizes(hat, 1);
2582 	}
2583 }
2584 
2585 /*
2586  * Function tries to batch 8K pages into the same hme blk.
2587  */
2588 static void
2589 sfmmu_memload_batchsmall(struct hat *hat, caddr_t vaddr, page_t **pps,
2590 		    uint_t attr, uint_t flags, pgcnt_t npgs, uint_t rid)
2591 {
2592 	tte_t	tte;
2593 	page_t *pp;
2594 	struct hmehash_bucket *hmebp;
2595 	struct hme_blk *hmeblkp;
2596 	int	index;
2597 
2598 	while (npgs) {
2599 		/*
2600 		 * Acquire the hash bucket.
2601 		 */
2602 		hmebp = sfmmu_tteload_acquire_hashbucket(hat, vaddr, TTE8K,
2603 		    rid);
2604 		ASSERT(hmebp);
2605 
2606 		/*
2607 		 * Find the hment block.
2608 		 */
2609 		hmeblkp = sfmmu_tteload_find_hmeblk(hat, hmebp, vaddr,
2610 		    TTE8K, flags, rid);
2611 		ASSERT(hmeblkp);
2612 
2613 		do {
2614 			/*
2615 			 * Make the tte.
2616 			 */
2617 			pp = *pps;
2618 			sfmmu_memtte(&tte, pp->p_pagenum, attr, TTE8K);
2619 
2620 			/*
2621 			 * Add the translation.
2622 			 */
2623 			(void) sfmmu_tteload_addentry(hat, hmeblkp, &tte,
2624 			    vaddr, pps, flags, rid);
2625 
2626 			/*
2627 			 * Goto next page.
2628 			 */
2629 			pps++;
2630 			npgs--;
2631 
2632 			/*
2633 			 * Goto next address.
2634 			 */
2635 			vaddr += MMU_PAGESIZE;
2636 
2637 			/*
2638 			 * Don't crossover into a different hmentblk.
2639 			 */
2640 			index = (int)(((uintptr_t)vaddr >> MMU_PAGESHIFT) &
2641 			    (NHMENTS-1));
2642 
2643 		} while (index != 0 && npgs != 0);
2644 
2645 		/*
2646 		 * Release the hash bucket.
2647 		 */
2648 
2649 		sfmmu_tteload_release_hashbucket(hmebp);
2650 	}
2651 }
2652 
2653 /*
2654  * Construct a tte for a page:
2655  *
2656  * tte_valid = 1
2657  * tte_size2 = size & TTE_SZ2_BITS (Panther and Olympus-C only)
2658  * tte_size = size
2659  * tte_nfo = attr & HAT_NOFAULT
2660  * tte_ie = attr & HAT_STRUCTURE_LE
2661  * tte_hmenum = hmenum
2662  * tte_pahi = pp->p_pagenum >> TTE_PASHIFT;
2663  * tte_palo = pp->p_pagenum & TTE_PALOMASK;
2664  * tte_ref = 1 (optimization)
2665  * tte_wr_perm = attr & PROT_WRITE;
2666  * tte_no_sync = attr & HAT_NOSYNC
2667  * tte_lock = attr & SFMMU_LOCKTTE
2668  * tte_cp = !(attr & SFMMU_UNCACHEPTTE)
2669  * tte_cv = !(attr & SFMMU_UNCACHEVTTE)
2670  * tte_e = attr & SFMMU_SIDEFFECT
2671  * tte_priv = !(attr & PROT_USER)
2672  * tte_hwwr = if nosync is set and it is writable we set the mod bit (opt)
2673  * tte_glb = 0
2674  */
2675 void
2676 sfmmu_memtte(tte_t *ttep, pfn_t pfn, uint_t attr, int tte_sz)
2677 {
2678 	ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR));
2679 
2680 	ttep->tte_inthi = MAKE_TTE_INTHI(pfn, attr, tte_sz, 0 /* hmenum */);
2681 	ttep->tte_intlo = MAKE_TTE_INTLO(pfn, attr, tte_sz, 0 /* hmenum */);
2682 
2683 	if (TTE_IS_NOSYNC(ttep)) {
2684 		TTE_SET_REF(ttep);
2685 		if (TTE_IS_WRITABLE(ttep)) {
2686 			TTE_SET_MOD(ttep);
2687 		}
2688 	}
2689 	if (TTE_IS_NFO(ttep) && TTE_IS_EXECUTABLE(ttep)) {
2690 		panic("sfmmu_memtte: can't set both NFO and EXEC bits");
2691 	}
2692 }
2693 
2694 /*
2695  * This function will add a translation to the hme_blk and allocate the
2696  * hme_blk if one does not exist.
2697  * If a page structure is specified then it will add the
2698  * corresponding hment to the mapping list.
2699  * It will also update the hmenum field for the tte.
2700  *
2701  * Currently this function is only used for kernel mappings.
2702  * So pass invalid region to sfmmu_tteload_array().
2703  */
2704 void
2705 sfmmu_tteload(struct hat *sfmmup, tte_t *ttep, caddr_t vaddr, page_t *pp,
2706 	uint_t flags)
2707 {
2708 	ASSERT(sfmmup == ksfmmup);
2709 	(void) sfmmu_tteload_array(sfmmup, ttep, vaddr, &pp, flags,
2710 	    SFMMU_INVALID_SHMERID);
2711 }
2712 
2713 /*
2714  * Load (ttep != NULL) or unload (ttep == NULL) one entry in the TSB.
2715  * Assumes that a particular page size may only be resident in one TSB.
2716  */
2717 static void
2718 sfmmu_mod_tsb(sfmmu_t *sfmmup, caddr_t vaddr, tte_t *ttep, int ttesz)
2719 {
2720 	struct tsb_info *tsbinfop = NULL;
2721 	uint64_t tag;
2722 	struct tsbe *tsbe_addr;
2723 	uint64_t tsb_base;
2724 	uint_t tsb_size;
2725 	int vpshift = MMU_PAGESHIFT;
2726 	int phys = 0;
2727 
2728 	if (sfmmup == ksfmmup) { /* No support for 32/256M ksfmmu pages */
2729 		phys = ktsb_phys;
2730 		if (ttesz >= TTE4M) {
2731 #ifndef sun4v
2732 			ASSERT((ttesz != TTE32M) && (ttesz != TTE256M));
2733 #endif
2734 			tsb_base = (phys)? ktsb4m_pbase : (uint64_t)ktsb4m_base;
2735 			tsb_size = ktsb4m_szcode;
2736 		} else {
2737 			tsb_base = (phys)? ktsb_pbase : (uint64_t)ktsb_base;
2738 			tsb_size = ktsb_szcode;
2739 		}
2740 	} else {
2741 		SFMMU_GET_TSBINFO(tsbinfop, sfmmup, ttesz);
2742 
2743 		/*
2744 		 * If there isn't a TSB for this page size, or the TSB is
2745 		 * swapped out, there is nothing to do.  Note that the latter
2746 		 * case seems impossible but can occur if hat_pageunload()
2747 		 * is called on an ISM mapping while the process is swapped
2748 		 * out.
2749 		 */
2750 		if (tsbinfop == NULL || (tsbinfop->tsb_flags & TSB_SWAPPED))
2751 			return;
2752 
2753 		/*
2754 		 * If another thread is in the middle of relocating a TSB
2755 		 * we can't unload the entry so set a flag so that the
2756 		 * TSB will be flushed before it can be accessed by the
2757 		 * process.
2758 		 */
2759 		if ((tsbinfop->tsb_flags & TSB_RELOC_FLAG) != 0) {
2760 			if (ttep == NULL)
2761 				tsbinfop->tsb_flags |= TSB_FLUSH_NEEDED;
2762 			return;
2763 		}
2764 #if defined(UTSB_PHYS)
2765 		phys = 1;
2766 		tsb_base = (uint64_t)tsbinfop->tsb_pa;
2767 #else
2768 		tsb_base = (uint64_t)tsbinfop->tsb_va;
2769 #endif
2770 		tsb_size = tsbinfop->tsb_szc;
2771 	}
2772 	if (ttesz >= TTE4M)
2773 		vpshift = MMU_PAGESHIFT4M;
2774 
2775 	tsbe_addr = sfmmu_get_tsbe(tsb_base, vaddr, vpshift, tsb_size);
2776 	tag = sfmmu_make_tsbtag(vaddr);
2777 
2778 	if (ttep == NULL) {
2779 		sfmmu_unload_tsbe(tsbe_addr, tag, phys);
2780 	} else {
2781 		if (ttesz >= TTE4M) {
2782 			SFMMU_STAT(sf_tsb_load4m);
2783 		} else {
2784 			SFMMU_STAT(sf_tsb_load8k);
2785 		}
2786 
2787 		sfmmu_load_tsbe(tsbe_addr, tag, ttep, phys);
2788 	}
2789 }
2790 
2791 /*
2792  * Unmap all entries from [start, end) matching the given page size.
2793  *
2794  * This function is used primarily to unmap replicated 64K or 512K entries
2795  * from the TSB that are inserted using the base page size TSB pointer, but
2796  * it may also be called to unmap a range of addresses from the TSB.
2797  */
2798 void
2799 sfmmu_unload_tsb_range(sfmmu_t *sfmmup, caddr_t start, caddr_t end, int ttesz)
2800 {
2801 	struct tsb_info *tsbinfop;
2802 	uint64_t tag;
2803 	struct tsbe *tsbe_addr;
2804 	caddr_t vaddr;
2805 	uint64_t tsb_base;
2806 	int vpshift, vpgsz;
2807 	uint_t tsb_size;
2808 	int phys = 0;
2809 
2810 	/*
2811 	 * Assumptions:
2812 	 *  If ttesz == 8K, 64K or 512K, we walk through the range 8K
2813 	 *  at a time shooting down any valid entries we encounter.
2814 	 *
2815 	 *  If ttesz >= 4M we walk the range 4M at a time shooting
2816 	 *  down any valid mappings we find.
2817 	 */
2818 	if (sfmmup == ksfmmup) {
2819 		phys = ktsb_phys;
2820 		if (ttesz >= TTE4M) {
2821 #ifndef sun4v
2822 			ASSERT((ttesz != TTE32M) && (ttesz != TTE256M));
2823 #endif
2824 			tsb_base = (phys)? ktsb4m_pbase : (uint64_t)ktsb4m_base;
2825 			tsb_size = ktsb4m_szcode;
2826 		} else {
2827 			tsb_base = (phys)? ktsb_pbase : (uint64_t)ktsb_base;
2828 			tsb_size = ktsb_szcode;
2829 		}
2830 	} else {
2831 		SFMMU_GET_TSBINFO(tsbinfop, sfmmup, ttesz);
2832 
2833 		/*
2834 		 * If there isn't a TSB for this page size, or the TSB is
2835 		 * swapped out, there is nothing to do.  Note that the latter
2836 		 * case seems impossible but can occur if hat_pageunload()
2837 		 * is called on an ISM mapping while the process is swapped
2838 		 * out.
2839 		 */
2840 		if (tsbinfop == NULL || (tsbinfop->tsb_flags & TSB_SWAPPED))
2841 			return;
2842 
2843 		/*
2844 		 * If another thread is in the middle of relocating a TSB
2845 		 * we can't unload the entry so set a flag so that the
2846 		 * TSB will be flushed before it can be accessed by the
2847 		 * process.
2848 		 */
2849 		if ((tsbinfop->tsb_flags & TSB_RELOC_FLAG) != 0) {
2850 			tsbinfop->tsb_flags |= TSB_FLUSH_NEEDED;
2851 			return;
2852 		}
2853 #if defined(UTSB_PHYS)
2854 		phys = 1;
2855 		tsb_base = (uint64_t)tsbinfop->tsb_pa;
2856 #else
2857 		tsb_base = (uint64_t)tsbinfop->tsb_va;
2858 #endif
2859 		tsb_size = tsbinfop->tsb_szc;
2860 	}
2861 	if (ttesz >= TTE4M) {
2862 		vpshift = MMU_PAGESHIFT4M;
2863 		vpgsz = MMU_PAGESIZE4M;
2864 	} else {
2865 		vpshift = MMU_PAGESHIFT;
2866 		vpgsz = MMU_PAGESIZE;
2867 	}
2868 
2869 	for (vaddr = start; vaddr < end; vaddr += vpgsz) {
2870 		tag = sfmmu_make_tsbtag(vaddr);
2871 		tsbe_addr = sfmmu_get_tsbe(tsb_base, vaddr, vpshift, tsb_size);
2872 		sfmmu_unload_tsbe(tsbe_addr, tag, phys);
2873 	}
2874 }
2875 
2876 /*
2877  * Select the optimum TSB size given the number of mappings
2878  * that need to be cached.
2879  */
2880 static int
2881 sfmmu_select_tsb_szc(pgcnt_t pgcnt)
2882 {
2883 	int szc = 0;
2884 
2885 #ifdef DEBUG
2886 	if (tsb_grow_stress) {
2887 		uint32_t randval = (uint32_t)gettick() >> 4;
2888 		return (randval % (tsb_max_growsize + 1));
2889 	}
2890 #endif	/* DEBUG */
2891 
2892 	while ((szc < tsb_max_growsize) && (pgcnt > SFMMU_RSS_TSBSIZE(szc)))
2893 		szc++;
2894 	return (szc);
2895 }
2896 
2897 /*
2898  * This function will add a translation to the hme_blk and allocate the
2899  * hme_blk if one does not exist.
2900  * If a page structure is specified then it will add the
2901  * corresponding hment to the mapping list.
2902  * It will also update the hmenum field for the tte.
2903  * Furthermore, it attempts to create a large page translation
2904  * for <addr,hat> at page array pps.  It assumes addr and first
2905  * pp is correctly aligned.  It returns 0 if successful and 1 otherwise.
2906  */
2907 static int
2908 sfmmu_tteload_array(sfmmu_t *sfmmup, tte_t *ttep, caddr_t vaddr,
2909 	page_t **pps, uint_t flags, uint_t rid)
2910 {
2911 	struct hmehash_bucket *hmebp;
2912 	struct hme_blk *hmeblkp;
2913 	int 	ret;
2914 	uint_t	size;
2915 
2916 	/*
2917 	 * Get mapping size.
2918 	 */
2919 	size = TTE_CSZ(ttep);
2920 	ASSERT(!((uintptr_t)vaddr & TTE_PAGE_OFFSET(size)));
2921 
2922 	/*
2923 	 * Acquire the hash bucket.
2924 	 */
2925 	hmebp = sfmmu_tteload_acquire_hashbucket(sfmmup, vaddr, size, rid);
2926 	ASSERT(hmebp);
2927 
2928 	/*
2929 	 * Find the hment block.
2930 	 */
2931 	hmeblkp = sfmmu_tteload_find_hmeblk(sfmmup, hmebp, vaddr, size, flags,
2932 	    rid);
2933 	ASSERT(hmeblkp);
2934 
2935 	/*
2936 	 * Add the translation.
2937 	 */
2938 	ret = sfmmu_tteload_addentry(sfmmup, hmeblkp, ttep, vaddr, pps, flags,
2939 	    rid);
2940 
2941 	/*
2942 	 * Release the hash bucket.
2943 	 */
2944 	sfmmu_tteload_release_hashbucket(hmebp);
2945 
2946 	return (ret);
2947 }
2948 
2949 /*
2950  * Function locks and returns a pointer to the hash bucket for vaddr and size.
2951  */
2952 static struct hmehash_bucket *
2953 sfmmu_tteload_acquire_hashbucket(sfmmu_t *sfmmup, caddr_t vaddr, int size,
2954     uint_t rid)
2955 {
2956 	struct hmehash_bucket *hmebp;
2957 	int hmeshift;
2958 	void *htagid = sfmmutohtagid(sfmmup, rid);
2959 
2960 	ASSERT(htagid != NULL);
2961 
2962 	hmeshift = HME_HASH_SHIFT(size);
2963 
2964 	hmebp = HME_HASH_FUNCTION(htagid, vaddr, hmeshift);
2965 
2966 	SFMMU_HASH_LOCK(hmebp);
2967 
2968 	return (hmebp);
2969 }
2970 
2971 /*
2972  * Function returns a pointer to an hmeblk in the hash bucket, hmebp. If the
2973  * hmeblk doesn't exists for the [sfmmup, vaddr & size] signature, a hmeblk is
2974  * allocated.
2975  */
2976 static struct hme_blk *
2977 sfmmu_tteload_find_hmeblk(sfmmu_t *sfmmup, struct hmehash_bucket *hmebp,
2978 	caddr_t vaddr, uint_t size, uint_t flags, uint_t rid)
2979 {
2980 	hmeblk_tag hblktag;
2981 	int hmeshift;
2982 	struct hme_blk *hmeblkp, *pr_hblk, *list = NULL;
2983 
2984 	SFMMU_VALIDATE_HMERID(sfmmup, rid, vaddr, TTEBYTES(size));
2985 
2986 	hblktag.htag_id = sfmmutohtagid(sfmmup, rid);
2987 	ASSERT(hblktag.htag_id != NULL);
2988 	hmeshift = HME_HASH_SHIFT(size);
2989 	hblktag.htag_bspage = HME_HASH_BSPAGE(vaddr, hmeshift);
2990 	hblktag.htag_rehash = HME_HASH_REHASH(size);
2991 	hblktag.htag_rid = rid;
2992 
2993 ttearray_realloc:
2994 
2995 	HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, pr_hblk, &list);
2996 
2997 	/*
2998 	 * We block until hblk_reserve_lock is released; it's held by
2999 	 * the thread, temporarily using hblk_reserve, until hblk_reserve is
3000 	 * replaced by a hblk from sfmmu8_cache.
3001 	 */
3002 	if (hmeblkp == (struct hme_blk *)hblk_reserve &&
3003 	    hblk_reserve_thread != curthread) {
3004 		SFMMU_HASH_UNLOCK(hmebp);
3005 		mutex_enter(&hblk_reserve_lock);
3006 		mutex_exit(&hblk_reserve_lock);
3007 		SFMMU_STAT(sf_hblk_reserve_hit);
3008 		SFMMU_HASH_LOCK(hmebp);
3009 		goto ttearray_realloc;
3010 	}
3011 
3012 	if (hmeblkp == NULL) {
3013 		hmeblkp = sfmmu_hblk_alloc(sfmmup, vaddr, hmebp, size,
3014 		    hblktag, flags, rid);
3015 		ASSERT(!SFMMU_IS_SHMERID_VALID(rid) || hmeblkp->hblk_shared);
3016 		ASSERT(SFMMU_IS_SHMERID_VALID(rid) || !hmeblkp->hblk_shared);
3017 	} else {
3018 		/*
3019 		 * It is possible for 8k and 64k hblks to collide since they
3020 		 * have the same rehash value. This is because we
3021 		 * lazily free hblks and 8K/64K blks could be lingering.
3022 		 * If we find size mismatch we free the block and & try again.
3023 		 */
3024 		if (get_hblk_ttesz(hmeblkp) != size) {
3025 			ASSERT(!hmeblkp->hblk_vcnt);
3026 			ASSERT(!hmeblkp->hblk_hmecnt);
3027 			sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk,
3028 			    &list, 0);
3029 			goto ttearray_realloc;
3030 		}
3031 		if (hmeblkp->hblk_shw_bit) {
3032 			/*
3033 			 * if the hblk was previously used as a shadow hblk then
3034 			 * we will change it to a normal hblk
3035 			 */
3036 			ASSERT(!hmeblkp->hblk_shared);
3037 			if (hmeblkp->hblk_shw_mask) {
3038 				sfmmu_shadow_hcleanup(sfmmup, hmeblkp, hmebp);
3039 				ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp));
3040 				goto ttearray_realloc;
3041 			} else {
3042 				hmeblkp->hblk_shw_bit = 0;
3043 			}
3044 		}
3045 		SFMMU_STAT(sf_hblk_hit);
3046 	}
3047 
3048 	/*
3049 	 * hat_memload() should never call kmem_cache_free() for kernel hmeblks;
3050 	 * see block comment showing the stacktrace in sfmmu_hblk_alloc();
3051 	 * set the flag parameter to 1 so that sfmmu_hblks_list_purge() will
3052 	 * just add these hmeblks to the per-cpu pending queue.
3053 	 */
3054 	sfmmu_hblks_list_purge(&list, 1);
3055 
3056 	ASSERT(get_hblk_ttesz(hmeblkp) == size);
3057 	ASSERT(!hmeblkp->hblk_shw_bit);
3058 	ASSERT(!SFMMU_IS_SHMERID_VALID(rid) || hmeblkp->hblk_shared);
3059 	ASSERT(SFMMU_IS_SHMERID_VALID(rid) || !hmeblkp->hblk_shared);
3060 	ASSERT(hmeblkp->hblk_tag.htag_rid == rid);
3061 
3062 	return (hmeblkp);
3063 }
3064 
3065 /*
3066  * Function adds a tte entry into the hmeblk. It returns 0 if successful and 1
3067  * otherwise.
3068  */
3069 static int
3070 sfmmu_tteload_addentry(sfmmu_t *sfmmup, struct hme_blk *hmeblkp, tte_t *ttep,
3071 	caddr_t vaddr, page_t **pps, uint_t flags, uint_t rid)
3072 {
3073 	page_t *pp = *pps;
3074 	int hmenum, size, remap;
3075 	tte_t tteold, flush_tte;
3076 #ifdef DEBUG
3077 	tte_t orig_old;
3078 #endif /* DEBUG */
3079 	struct sf_hment *sfhme;
3080 	kmutex_t *pml, *pmtx;
3081 	hatlock_t *hatlockp;
3082 	int myflt;
3083 
3084 	/*
3085 	 * remove this panic when we decide to let user virtual address
3086 	 * space be >= USERLIMIT.
3087 	 */
3088 	if (!TTE_IS_PRIVILEGED(ttep) && vaddr >= (caddr_t)USERLIMIT)
3089 		panic("user addr %p in kernel space", (void *)vaddr);
3090 #if defined(TTE_IS_GLOBAL)
3091 	if (TTE_IS_GLOBAL(ttep))
3092 		panic("sfmmu_tteload: creating global tte");
3093 #endif
3094 
3095 #ifdef DEBUG
3096 	if (pf_is_memory(sfmmu_ttetopfn(ttep, vaddr)) &&
3097 	    !TTE_IS_PCACHEABLE(ttep) && !sfmmu_allow_nc_trans)
3098 		panic("sfmmu_tteload: non cacheable memory tte");
3099 #endif /* DEBUG */
3100 
3101 	/* don't simulate dirty bit for writeable ISM/DISM mappings */
3102 	if ((flags & HAT_LOAD_SHARE) && TTE_IS_WRITABLE(ttep)) {
3103 		TTE_SET_REF(ttep);
3104 		TTE_SET_MOD(ttep);
3105 	}
3106 
3107 	if ((flags & HAT_LOAD_SHARE) || !TTE_IS_REF(ttep) ||
3108 	    !TTE_IS_MOD(ttep)) {
3109 		/*
3110 		 * Don't load TSB for dummy as in ISM.  Also don't preload
3111 		 * the TSB if the TTE isn't writable since we're likely to
3112 		 * fault on it again -- preloading can be fairly expensive.
3113 		 */
3114 		flags |= SFMMU_NO_TSBLOAD;
3115 	}
3116 
3117 	size = TTE_CSZ(ttep);
3118 	switch (size) {
3119 	case TTE8K:
3120 		SFMMU_STAT(sf_tteload8k);
3121 		break;
3122 	case TTE64K:
3123 		SFMMU_STAT(sf_tteload64k);
3124 		break;
3125 	case TTE512K:
3126 		SFMMU_STAT(sf_tteload512k);
3127 		break;
3128 	case TTE4M:
3129 		SFMMU_STAT(sf_tteload4m);
3130 		break;
3131 	case (TTE32M):
3132 		SFMMU_STAT(sf_tteload32m);
3133 		ASSERT(mmu_page_sizes == max_mmu_page_sizes);
3134 		break;
3135 	case (TTE256M):
3136 		SFMMU_STAT(sf_tteload256m);
3137 		ASSERT(mmu_page_sizes == max_mmu_page_sizes);
3138 		break;
3139 	}
3140 
3141 	ASSERT(!((uintptr_t)vaddr & TTE_PAGE_OFFSET(size)));
3142 	SFMMU_VALIDATE_HMERID(sfmmup, rid, vaddr, TTEBYTES(size));
3143 	ASSERT(!SFMMU_IS_SHMERID_VALID(rid) || hmeblkp->hblk_shared);
3144 	ASSERT(SFMMU_IS_SHMERID_VALID(rid) || !hmeblkp->hblk_shared);
3145 
3146 	HBLKTOHME_IDX(sfhme, hmeblkp, vaddr, hmenum);
3147 
3148 	/*
3149 	 * Need to grab mlist lock here so that pageunload
3150 	 * will not change tte behind us.
3151 	 */
3152 	if (pp) {
3153 		pml = sfmmu_mlist_enter(pp);
3154 	}
3155 
3156 	sfmmu_copytte(&sfhme->hme_tte, &tteold);
3157 	/*
3158 	 * Look for corresponding hment and if valid verify
3159 	 * pfns are equal.
3160 	 */
3161 	remap = TTE_IS_VALID(&tteold);
3162 	if (remap) {
3163 		pfn_t	new_pfn, old_pfn;
3164 
3165 		old_pfn = TTE_TO_PFN(vaddr, &tteold);
3166 		new_pfn = TTE_TO_PFN(vaddr, ttep);
3167 
3168 		if (flags & HAT_LOAD_REMAP) {
3169 			/* make sure we are remapping same type of pages */
3170 			if (pf_is_memory(old_pfn) != pf_is_memory(new_pfn)) {
3171 				panic("sfmmu_tteload - tte remap io<->memory");
3172 			}
3173 			if (old_pfn != new_pfn &&
3174 			    (pp != NULL || sfhme->hme_page != NULL)) {
3175 				panic("sfmmu_tteload - tte remap pp != NULL");
3176 			}
3177 		} else if (old_pfn != new_pfn) {
3178 			panic("sfmmu_tteload - tte remap, hmeblkp 0x%p",
3179 			    (void *)hmeblkp);
3180 		}
3181 		ASSERT(TTE_CSZ(&tteold) == TTE_CSZ(ttep));
3182 	}
3183 
3184 	if (pp) {
3185 		if (size == TTE8K) {
3186 #ifdef VAC
3187 			/*
3188 			 * Handle VAC consistency
3189 			 */
3190 			if (!remap && (cache & CACHE_VAC) && !PP_ISNC(pp)) {
3191 				sfmmu_vac_conflict(sfmmup, vaddr, pp);
3192 			}
3193 #endif
3194 
3195 			if (TTE_IS_WRITABLE(ttep) && PP_ISRO(pp)) {
3196 				pmtx = sfmmu_page_enter(pp);
3197 				PP_CLRRO(pp);
3198 				sfmmu_page_exit(pmtx);
3199 			} else if (!PP_ISMAPPED(pp) &&
3200 			    (!TTE_IS_WRITABLE(ttep)) && !(PP_ISMOD(pp))) {
3201 				pmtx = sfmmu_page_enter(pp);
3202 				if (!(PP_ISMOD(pp))) {
3203 					PP_SETRO(pp);
3204 				}
3205 				sfmmu_page_exit(pmtx);
3206 			}
3207 
3208 		} else if (sfmmu_pagearray_setup(vaddr, pps, ttep, remap)) {
3209 			/*
3210 			 * sfmmu_pagearray_setup failed so return
3211 			 */
3212 			sfmmu_mlist_exit(pml);
3213 			return (1);
3214 		}
3215 	}
3216 
3217 	/*
3218 	 * Make sure hment is not on a mapping list.
3219 	 */
3220 	ASSERT(remap || (sfhme->hme_page == NULL));
3221 
3222 	/* if it is not a remap then hme->next better be NULL */
3223 	ASSERT((!remap) ? sfhme->hme_next == NULL : 1);
3224 
3225 	if (flags & HAT_LOAD_LOCK) {
3226 		if ((hmeblkp->hblk_lckcnt + 1) >= MAX_HBLK_LCKCNT) {
3227 			panic("too high lckcnt-hmeblk %p",
3228 			    (void *)hmeblkp);
3229 		}
3230 		atomic_add_32(&hmeblkp->hblk_lckcnt, 1);
3231 
3232 		HBLK_STACK_TRACE(hmeblkp, HBLK_LOCK);
3233 	}
3234 
3235 #ifdef VAC
3236 	if (pp && PP_ISNC(pp)) {
3237 		/*
3238 		 * If the physical page is marked to be uncacheable, like
3239 		 * by a vac conflict, make sure the new mapping is also
3240 		 * uncacheable.
3241 		 */
3242 		TTE_CLR_VCACHEABLE(ttep);
3243 		ASSERT(PP_GET_VCOLOR(pp) == NO_VCOLOR);
3244 	}
3245 #endif
3246 	ttep->tte_hmenum = hmenum;
3247 
3248 #ifdef DEBUG
3249 	orig_old = tteold;
3250 #endif /* DEBUG */
3251 
3252 	while (sfmmu_modifytte_try(&tteold, ttep, &sfhme->hme_tte) < 0) {
3253 		if ((sfmmup == KHATID) &&
3254 		    (flags & (HAT_LOAD_LOCK | HAT_LOAD_REMAP))) {
3255 			sfmmu_copytte(&sfhme->hme_tte, &tteold);
3256 		}
3257 #ifdef DEBUG
3258 		chk_tte(&orig_old, &tteold, ttep, hmeblkp);
3259 #endif /* DEBUG */
3260 	}
3261 	ASSERT(TTE_IS_VALID(&sfhme->hme_tte));
3262 
3263 	if (!TTE_IS_VALID(&tteold)) {
3264 
3265 		atomic_add_16(&hmeblkp->hblk_vcnt, 1);
3266 		if (rid == SFMMU_INVALID_SHMERID) {
3267 			atomic_add_long(&sfmmup->sfmmu_ttecnt[size], 1);
3268 		} else {
3269 			sf_srd_t *srdp = sfmmup->sfmmu_srdp;
3270 			sf_region_t *rgnp = srdp->srd_hmergnp[rid];
3271 			/*
3272 			 * We already accounted for region ttecnt's in sfmmu
3273 			 * during hat_join_region() processing. Here we
3274 			 * only update ttecnt's in region struture.
3275 			 */
3276 			atomic_add_long(&rgnp->rgn_ttecnt[size], 1);
3277 		}
3278 	}
3279 
3280 	myflt = (astosfmmu(curthread->t_procp->p_as) == sfmmup);
3281 	if (size > TTE8K && (flags & HAT_LOAD_SHARE) == 0 &&
3282 	    sfmmup != ksfmmup) {
3283 		uchar_t tteflag = 1 << size;
3284 		if (rid == SFMMU_INVALID_SHMERID) {
3285 			if (!(sfmmup->sfmmu_tteflags & tteflag)) {
3286 				hatlockp = sfmmu_hat_enter(sfmmup);
3287 				sfmmup->sfmmu_tteflags |= tteflag;
3288 				sfmmu_hat_exit(hatlockp);
3289 			}
3290 		} else if (!(sfmmup->sfmmu_rtteflags & tteflag)) {
3291 			hatlockp = sfmmu_hat_enter(sfmmup);
3292 			sfmmup->sfmmu_rtteflags |= tteflag;
3293 			sfmmu_hat_exit(hatlockp);
3294 		}
3295 		/*
3296 		 * Update the current CPU tsbmiss area, so the current thread
3297 		 * won't need to take the tsbmiss for the new pagesize.
3298 		 * The other threads in the process will update their tsb
3299 		 * miss area lazily in sfmmu_tsbmiss_exception() when they
3300 		 * fail to find the translation for a newly added pagesize.
3301 		 */
3302 		if (size > TTE64K && myflt) {
3303 			struct tsbmiss *tsbmp;
3304 			kpreempt_disable();
3305 			tsbmp = &tsbmiss_area[CPU->cpu_id];
3306 			if (rid == SFMMU_INVALID_SHMERID) {
3307 				if (!(tsbmp->uhat_tteflags & tteflag)) {
3308 					tsbmp->uhat_tteflags |= tteflag;
3309 				}
3310 			} else {
3311 				if (!(tsbmp->uhat_rtteflags & tteflag)) {
3312 					tsbmp->uhat_rtteflags |= tteflag;
3313 				}
3314 			}
3315 			kpreempt_enable();
3316 		}
3317 	}
3318 
3319 	if (size >= TTE4M && (flags & HAT_LOAD_TEXT) &&
3320 	    !SFMMU_FLAGS_ISSET(sfmmup, HAT_4MTEXT_FLAG)) {
3321 		hatlockp = sfmmu_hat_enter(sfmmup);
3322 		SFMMU_FLAGS_SET(sfmmup, HAT_4MTEXT_FLAG);
3323 		sfmmu_hat_exit(hatlockp);
3324 	}
3325 
3326 	flush_tte.tte_intlo = (tteold.tte_intlo ^ ttep->tte_intlo) &
3327 	    hw_tte.tte_intlo;
3328 	flush_tte.tte_inthi = (tteold.tte_inthi ^ ttep->tte_inthi) &
3329 	    hw_tte.tte_inthi;
3330 
3331 	if (remap && (flush_tte.tte_inthi || flush_tte.tte_intlo)) {
3332 		/*
3333 		 * If remap and new tte differs from old tte we need
3334 		 * to sync the mod bit and flush TLB/TSB.  We don't
3335 		 * need to sync ref bit because we currently always set
3336 		 * ref bit in tteload.
3337 		 */
3338 		ASSERT(TTE_IS_REF(ttep));
3339 		if (TTE_IS_MOD(&tteold)) {
3340 			sfmmu_ttesync(sfmmup, vaddr, &tteold, pp);
3341 		}
3342 		/*
3343 		 * hwtte bits shouldn't change for SRD hmeblks as long as SRD
3344 		 * hmes are only used for read only text. Adding this code for
3345 		 * completeness and future use of shared hmeblks with writable
3346 		 * mappings of VMODSORT vnodes.
3347 		 */
3348 		if (hmeblkp->hblk_shared) {
3349 			cpuset_t cpuset = sfmmu_rgntlb_demap(vaddr,
3350 			    sfmmup->sfmmu_srdp->srd_hmergnp[rid], hmeblkp, 1);
3351 			xt_sync(cpuset);
3352 			SFMMU_STAT_ADD(sf_region_remap_demap, 1);
3353 		} else {
3354 			sfmmu_tlb_demap(vaddr, sfmmup, hmeblkp, 0, 0);
3355 			xt_sync(sfmmup->sfmmu_cpusran);
3356 		}
3357 	}
3358 
3359 	if ((flags & SFMMU_NO_TSBLOAD) == 0) {
3360 		/*
3361 		 * We only preload 8K and 4M mappings into the TSB, since
3362 		 * 64K and 512K mappings are replicated and hence don't
3363 		 * have a single, unique TSB entry. Ditto for 32M/256M.
3364 		 */
3365 		if (size == TTE8K || size == TTE4M) {
3366 			sf_scd_t *scdp;
3367 			hatlockp = sfmmu_hat_enter(sfmmup);
3368 			/*
3369 			 * Don't preload private TSB if the mapping is used
3370 			 * by the shctx in the SCD.
3371 			 */
3372 			scdp = sfmmup->sfmmu_scdp;
3373 			if (rid == SFMMU_INVALID_SHMERID || scdp == NULL ||
3374 			    !SF_RGNMAP_TEST(scdp->scd_hmeregion_map, rid)) {
3375 				sfmmu_load_tsb(sfmmup, vaddr, &sfhme->hme_tte,
3376 				    size);
3377 			}
3378 			sfmmu_hat_exit(hatlockp);
3379 		}
3380 	}
3381 	if (pp) {
3382 		if (!remap) {
3383 			HME_ADD(sfhme, pp);
3384 			atomic_add_16(&hmeblkp->hblk_hmecnt, 1);
3385 			ASSERT(hmeblkp->hblk_hmecnt > 0);
3386 
3387 			/*
3388 			 * Cannot ASSERT(hmeblkp->hblk_hmecnt <= NHMENTS)
3389 			 * see pageunload() for comment.
3390 			 */
3391 		}
3392 		sfmmu_mlist_exit(pml);
3393 	}
3394 
3395 	return (0);
3396 }
3397 /*
3398  * Function unlocks hash bucket.
3399  */
3400 static void
3401 sfmmu_tteload_release_hashbucket(struct hmehash_bucket *hmebp)
3402 {
3403 	ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp));
3404 	SFMMU_HASH_UNLOCK(hmebp);
3405 }
3406 
3407 /*
3408  * function which checks and sets up page array for a large
3409  * translation.  Will set p_vcolor, p_index, p_ro fields.
3410  * Assumes addr and pfnum of first page are properly aligned.
3411  * Will check for physical contiguity. If check fails it return
3412  * non null.
3413  */
3414 static int
3415 sfmmu_pagearray_setup(caddr_t addr, page_t **pps, tte_t *ttep, int remap)
3416 {
3417 	int 	i, index, ttesz;
3418 	pfn_t	pfnum;
3419 	pgcnt_t	npgs;
3420 	page_t *pp, *pp1;
3421 	kmutex_t *pmtx;
3422 #ifdef VAC
3423 	int osz;
3424 	int cflags = 0;
3425 	int vac_err = 0;
3426 #endif
3427 	int newidx = 0;
3428 
3429 	ttesz = TTE_CSZ(ttep);
3430 
3431 	ASSERT(ttesz > TTE8K);
3432 
3433 	npgs = TTEPAGES(ttesz);
3434 	index = PAGESZ_TO_INDEX(ttesz);
3435 
3436 	pfnum = (*pps)->p_pagenum;
3437 	ASSERT(IS_P2ALIGNED(pfnum, npgs));
3438 
3439 	/*
3440 	 * Save the first pp so we can do HAT_TMPNC at the end.
3441 	 */
3442 	pp1 = *pps;
3443 #ifdef VAC
3444 	osz = fnd_mapping_sz(pp1);
3445 #endif
3446 
3447 	for (i = 0; i < npgs; i++, pps++) {
3448 		pp = *pps;
3449 		ASSERT(PAGE_LOCKED(pp));
3450 		ASSERT(pp->p_szc >= ttesz);
3451 		ASSERT(pp->p_szc == pp1->p_szc);
3452 		ASSERT(sfmmu_mlist_held(pp));
3453 
3454 		/*
3455 		 * XXX is it possible to maintain P_RO on the root only?
3456 		 */
3457 		if (TTE_IS_WRITABLE(ttep) && PP_ISRO(pp)) {
3458 			pmtx = sfmmu_page_enter(pp);
3459 			PP_CLRRO(pp);
3460 			sfmmu_page_exit(pmtx);
3461 		} else if (!PP_ISMAPPED(pp) && !TTE_IS_WRITABLE(ttep) &&
3462 		    !PP_ISMOD(pp)) {
3463 			pmtx = sfmmu_page_enter(pp);
3464 			if (!(PP_ISMOD(pp))) {
3465 				PP_SETRO(pp);
3466 			}
3467 			sfmmu_page_exit(pmtx);
3468 		}
3469 
3470 		/*
3471 		 * If this is a remap we skip vac & contiguity checks.
3472 		 */
3473 		if (remap)
3474 			continue;
3475 
3476 		/*
3477 		 * set p_vcolor and detect any vac conflicts.
3478 		 */
3479 #ifdef VAC
3480 		if (vac_err == 0) {
3481 			vac_err = sfmmu_vacconflict_array(addr, pp, &cflags);
3482 
3483 		}
3484 #endif
3485 
3486 		/*
3487 		 * Save current index in case we need to undo it.
3488 		 * Note: "PAGESZ_TO_INDEX(sz)	(1 << (sz))"
3489 		 *	"SFMMU_INDEX_SHIFT	6"
3490 		 *	 "SFMMU_INDEX_MASK	((1 << SFMMU_INDEX_SHIFT) - 1)"
3491 		 *	 "PP_MAPINDEX(p_index)	(p_index & SFMMU_INDEX_MASK)"
3492 		 *
3493 		 * So:	index = PAGESZ_TO_INDEX(ttesz);
3494 		 *	if ttesz == 1 then index = 0x2
3495 		 *		    2 then index = 0x4
3496 		 *		    3 then index = 0x8
3497 		 *		    4 then index = 0x10
3498 		 *		    5 then index = 0x20
3499 		 * The code below checks if it's a new pagesize (ie, newidx)
3500 		 * in case we need to take it back out of p_index,
3501 		 * and then or's the new index into the existing index.
3502 		 */
3503 		if ((PP_MAPINDEX(pp) & index) == 0)
3504 			newidx = 1;
3505 		pp->p_index = (PP_MAPINDEX(pp) | index);
3506 
3507 		/*
3508 		 * contiguity check
3509 		 */
3510 		if (pp->p_pagenum != pfnum) {
3511 			/*
3512 			 * If we fail the contiguity test then
3513 			 * the only thing we need to fix is the p_index field.
3514 			 * We might get a few extra flushes but since this
3515 			 * path is rare that is ok.  The p_ro field will
3516 			 * get automatically fixed on the next tteload to
3517 			 * the page.  NO TNC bit is set yet.
3518 			 */
3519 			while (i >= 0) {
3520 				pp = *pps;
3521 				if (newidx)
3522 					pp->p_index = (PP_MAPINDEX(pp) &
3523 					    ~index);
3524 				pps--;
3525 				i--;
3526 			}
3527 			return (1);
3528 		}
3529 		pfnum++;
3530 		addr += MMU_PAGESIZE;
3531 	}
3532 
3533 #ifdef VAC
3534 	if (vac_err) {
3535 		if (ttesz > osz) {
3536 			/*
3537 			 * There are some smaller mappings that causes vac
3538 			 * conflicts. Convert all existing small mappings to
3539 			 * TNC.
3540 			 */
3541 			SFMMU_STAT_ADD(sf_uncache_conflict, npgs);
3542 			sfmmu_page_cache_array(pp1, HAT_TMPNC, CACHE_FLUSH,
3543 			    npgs);
3544 		} else {
3545 			/* EMPTY */
3546 			/*
3547 			 * If there exists an big page mapping,
3548 			 * that means the whole existing big page
3549 			 * has TNC setting already. No need to covert to
3550 			 * TNC again.
3551 			 */
3552 			ASSERT(PP_ISTNC(pp1));
3553 		}
3554 	}
3555 #endif	/* VAC */
3556 
3557 	return (0);
3558 }
3559 
3560 #ifdef VAC
3561 /*
3562  * Routine that detects vac consistency for a large page. It also
3563  * sets virtual color for all pp's for this big mapping.
3564  */
3565 static int
3566 sfmmu_vacconflict_array(caddr_t addr, page_t *pp, int *cflags)
3567 {
3568 	int vcolor, ocolor;
3569 
3570 	ASSERT(sfmmu_mlist_held(pp));
3571 
3572 	if (PP_ISNC(pp)) {
3573 		return (HAT_TMPNC);
3574 	}
3575 
3576 	vcolor = addr_to_vcolor(addr);
3577 	if (PP_NEWPAGE(pp)) {
3578 		PP_SET_VCOLOR(pp, vcolor);
3579 		return (0);
3580 	}
3581 
3582 	ocolor = PP_GET_VCOLOR(pp);
3583 	if (ocolor == vcolor) {
3584 		return (0);
3585 	}
3586 
3587 	if (!PP_ISMAPPED(pp) && !PP_ISMAPPED_KPM(pp)) {
3588 		/*
3589 		 * Previous user of page had a differnet color
3590 		 * but since there are no current users
3591 		 * we just flush the cache and change the color.
3592 		 * As an optimization for large pages we flush the
3593 		 * entire cache of that color and set a flag.
3594 		 */
3595 		SFMMU_STAT(sf_pgcolor_conflict);
3596 		if (!CacheColor_IsFlushed(*cflags, ocolor)) {
3597 			CacheColor_SetFlushed(*cflags, ocolor);
3598 			sfmmu_cache_flushcolor(ocolor, pp->p_pagenum);
3599 		}
3600 		PP_SET_VCOLOR(pp, vcolor);
3601 		return (0);
3602 	}
3603 
3604 	/*
3605 	 * We got a real conflict with a current mapping.
3606 	 * set flags to start unencaching all mappings
3607 	 * and return failure so we restart looping
3608 	 * the pp array from the beginning.
3609 	 */
3610 	return (HAT_TMPNC);
3611 }
3612 #endif	/* VAC */
3613 
3614 /*
3615  * creates a large page shadow hmeblk for a tte.
3616  * The purpose of this routine is to allow us to do quick unloads because
3617  * the vm layer can easily pass a very large but sparsely populated range.
3618  */
3619 static struct hme_blk *
3620 sfmmu_shadow_hcreate(sfmmu_t *sfmmup, caddr_t vaddr, int ttesz, uint_t flags)
3621 {
3622 	struct hmehash_bucket *hmebp;
3623 	hmeblk_tag hblktag;
3624 	int hmeshift, size, vshift;
3625 	uint_t shw_mask, newshw_mask;
3626 	struct hme_blk *hmeblkp;
3627 
3628 	ASSERT(sfmmup != KHATID);
3629 	if (mmu_page_sizes == max_mmu_page_sizes) {
3630 		ASSERT(ttesz < TTE256M);
3631 	} else {
3632 		ASSERT(ttesz < TTE4M);
3633 		ASSERT(sfmmup->sfmmu_ttecnt[TTE32M] == 0);
3634 		ASSERT(sfmmup->sfmmu_ttecnt[TTE256M] == 0);
3635 	}
3636 
3637 	if (ttesz == TTE8K) {
3638 		size = TTE512K;
3639 	} else {
3640 		size = ++ttesz;
3641 	}
3642 
3643 	hblktag.htag_id = sfmmup;
3644 	hmeshift = HME_HASH_SHIFT(size);
3645 	hblktag.htag_bspage = HME_HASH_BSPAGE(vaddr, hmeshift);
3646 	hblktag.htag_rehash = HME_HASH_REHASH(size);
3647 	hblktag.htag_rid = SFMMU_INVALID_SHMERID;
3648 	hmebp = HME_HASH_FUNCTION(sfmmup, vaddr, hmeshift);
3649 
3650 	SFMMU_HASH_LOCK(hmebp);
3651 
3652 	HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp);
3653 	ASSERT(hmeblkp != (struct hme_blk *)hblk_reserve);
3654 	if (hmeblkp == NULL) {
3655 		hmeblkp = sfmmu_hblk_alloc(sfmmup, vaddr, hmebp, size,
3656 		    hblktag, flags, SFMMU_INVALID_SHMERID);
3657 	}
3658 	ASSERT(hmeblkp);
3659 	if (!hmeblkp->hblk_shw_mask) {
3660 		/*
3661 		 * if this is a unused hblk it was just allocated or could
3662 		 * potentially be a previous large page hblk so we need to
3663 		 * set the shadow bit.
3664 		 */
3665 		ASSERT(!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt);
3666 		hmeblkp->hblk_shw_bit = 1;
3667 	} else if (hmeblkp->hblk_shw_bit == 0) {
3668 		panic("sfmmu_shadow_hcreate: shw bit not set in hmeblkp 0x%p",
3669 		    (void *)hmeblkp);
3670 	}
3671 	ASSERT(hmeblkp->hblk_shw_bit == 1);
3672 	ASSERT(!hmeblkp->hblk_shared);
3673 	vshift = vaddr_to_vshift(hblktag, vaddr, size);
3674 	ASSERT(vshift < 8);
3675 	/*
3676 	 * Atomically set shw mask bit
3677 	 */
3678 	do {
3679 		shw_mask = hmeblkp->hblk_shw_mask;
3680 		newshw_mask = shw_mask | (1 << vshift);
3681 		newshw_mask = cas32(&hmeblkp->hblk_shw_mask, shw_mask,
3682 		    newshw_mask);
3683 	} while (newshw_mask != shw_mask);
3684 
3685 	SFMMU_HASH_UNLOCK(hmebp);
3686 
3687 	return (hmeblkp);
3688 }
3689 
3690 /*
3691  * This routine cleanup a previous shadow hmeblk and changes it to
3692  * a regular hblk.  This happens rarely but it is possible
3693  * when a process wants to use large pages and there are hblks still
3694  * lying around from the previous as that used these hmeblks.
3695  * The alternative was to cleanup the shadow hblks at unload time
3696  * but since so few user processes actually use large pages, it is
3697  * better to be lazy and cleanup at this time.
3698  */
3699 static void
3700 sfmmu_shadow_hcleanup(sfmmu_t *sfmmup, struct hme_blk *hmeblkp,
3701 	struct hmehash_bucket *hmebp)
3702 {
3703 	caddr_t addr, endaddr;
3704 	int hashno, size;
3705 
3706 	ASSERT(hmeblkp->hblk_shw_bit);
3707 	ASSERT(!hmeblkp->hblk_shared);
3708 
3709 	ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp));
3710 
3711 	if (!hmeblkp->hblk_shw_mask) {
3712 		hmeblkp->hblk_shw_bit = 0;
3713 		return;
3714 	}
3715 	addr = (caddr_t)get_hblk_base(hmeblkp);
3716 	endaddr = get_hblk_endaddr(hmeblkp);
3717 	size = get_hblk_ttesz(hmeblkp);
3718 	hashno = size - 1;
3719 	ASSERT(hashno > 0);
3720 	SFMMU_HASH_UNLOCK(hmebp);
3721 
3722 	sfmmu_free_hblks(sfmmup, addr, endaddr, hashno);
3723 
3724 	SFMMU_HASH_LOCK(hmebp);
3725 }
3726 
3727 static void
3728 sfmmu_free_hblks(sfmmu_t *sfmmup, caddr_t addr, caddr_t endaddr,
3729 	int hashno)
3730 {
3731 	int hmeshift, shadow = 0;
3732 	hmeblk_tag hblktag;
3733 	struct hmehash_bucket *hmebp;
3734 	struct hme_blk *hmeblkp;
3735 	struct hme_blk *nx_hblk, *pr_hblk, *list = NULL;
3736 
3737 	ASSERT(hashno > 0);
3738 	hblktag.htag_id = sfmmup;
3739 	hblktag.htag_rehash = hashno;
3740 	hblktag.htag_rid = SFMMU_INVALID_SHMERID;
3741 
3742 	hmeshift = HME_HASH_SHIFT(hashno);
3743 
3744 	while (addr < endaddr) {
3745 		hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift);
3746 		hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift);
3747 		SFMMU_HASH_LOCK(hmebp);
3748 		/* inline HME_HASH_SEARCH */
3749 		hmeblkp = hmebp->hmeblkp;
3750 		pr_hblk = NULL;
3751 		while (hmeblkp) {
3752 			if (HTAGS_EQ(hmeblkp->hblk_tag, hblktag)) {
3753 				/* found hme_blk */
3754 				ASSERT(!hmeblkp->hblk_shared);
3755 				if (hmeblkp->hblk_shw_bit) {
3756 					if (hmeblkp->hblk_shw_mask) {
3757 						shadow = 1;
3758 						sfmmu_shadow_hcleanup(sfmmup,
3759 						    hmeblkp, hmebp);
3760 						break;
3761 					} else {
3762 						hmeblkp->hblk_shw_bit = 0;
3763 					}
3764 				}
3765 
3766 				/*
3767 				 * Hblk_hmecnt and hblk_vcnt could be non zero
3768 				 * since hblk_unload() does not gurantee that.
3769 				 *
3770 				 * XXX - this could cause tteload() to spin
3771 				 * where sfmmu_shadow_hcleanup() is called.
3772 				 */
3773 			}
3774 
3775 			nx_hblk = hmeblkp->hblk_next;
3776 			if (!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) {
3777 				sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk,
3778 				    &list, 0);
3779 			} else {
3780 				pr_hblk = hmeblkp;
3781 			}
3782 			hmeblkp = nx_hblk;
3783 		}
3784 
3785 		SFMMU_HASH_UNLOCK(hmebp);
3786 
3787 		if (shadow) {
3788 			/*
3789 			 * We found another shadow hblk so cleaned its
3790 			 * children.  We need to go back and cleanup
3791 			 * the original hblk so we don't change the
3792 			 * addr.
3793 			 */
3794 			shadow = 0;
3795 		} else {
3796 			addr = (caddr_t)roundup((uintptr_t)addr + 1,
3797 			    (1 << hmeshift));
3798 		}
3799 	}
3800 	sfmmu_hblks_list_purge(&list, 0);
3801 }
3802 
3803 /*
3804  * This routine's job is to delete stale invalid shared hmeregions hmeblks that
3805  * may still linger on after pageunload.
3806  */
3807 static void
3808 sfmmu_cleanup_rhblk(sf_srd_t *srdp, caddr_t addr, uint_t rid, int ttesz)
3809 {
3810 	int hmeshift;
3811 	hmeblk_tag hblktag;
3812 	struct hmehash_bucket *hmebp;
3813 	struct hme_blk *hmeblkp;
3814 	struct hme_blk *pr_hblk;
3815 	struct hme_blk *list = NULL;
3816 
3817 	ASSERT(SFMMU_IS_SHMERID_VALID(rid));
3818 	ASSERT(rid < SFMMU_MAX_HME_REGIONS);
3819 
3820 	hmeshift = HME_HASH_SHIFT(ttesz);
3821 	hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift);
3822 	hblktag.htag_rehash = ttesz;
3823 	hblktag.htag_rid = rid;
3824 	hblktag.htag_id = srdp;
3825 	hmebp = HME_HASH_FUNCTION(srdp, addr, hmeshift);
3826 
3827 	SFMMU_HASH_LOCK(hmebp);
3828 	HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, pr_hblk, &list);
3829 	if (hmeblkp != NULL) {
3830 		ASSERT(hmeblkp->hblk_shared);
3831 		ASSERT(!hmeblkp->hblk_shw_bit);
3832 		if (hmeblkp->hblk_vcnt || hmeblkp->hblk_hmecnt) {
3833 			panic("sfmmu_cleanup_rhblk: valid hmeblk");
3834 		}
3835 		ASSERT(!hmeblkp->hblk_lckcnt);
3836 		sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk,
3837 		    &list, 0);
3838 	}
3839 	SFMMU_HASH_UNLOCK(hmebp);
3840 	sfmmu_hblks_list_purge(&list, 0);
3841 }
3842 
3843 /* ARGSUSED */
3844 static void
3845 sfmmu_rgn_cb_noop(caddr_t saddr, caddr_t eaddr, caddr_t r_saddr,
3846     size_t r_size, void *r_obj, u_offset_t r_objoff)
3847 {
3848 }
3849 
3850 /*
3851  * Searches for an hmeblk which maps addr, then unloads this mapping
3852  * and updates *eaddrp, if the hmeblk is found.
3853  */
3854 static void
3855 sfmmu_unload_hmeregion_va(sf_srd_t *srdp, uint_t rid, caddr_t addr,
3856     caddr_t eaddr, int ttesz, caddr_t *eaddrp)
3857 {
3858 	int hmeshift;
3859 	hmeblk_tag hblktag;
3860 	struct hmehash_bucket *hmebp;
3861 	struct hme_blk *hmeblkp;
3862 	struct hme_blk *pr_hblk;
3863 	struct hme_blk *list = NULL;
3864 
3865 	ASSERT(SFMMU_IS_SHMERID_VALID(rid));
3866 	ASSERT(rid < SFMMU_MAX_HME_REGIONS);
3867 	ASSERT(ttesz >= HBLK_MIN_TTESZ);
3868 
3869 	hmeshift = HME_HASH_SHIFT(ttesz);
3870 	hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift);
3871 	hblktag.htag_rehash = ttesz;
3872 	hblktag.htag_rid = rid;
3873 	hblktag.htag_id = srdp;
3874 	hmebp = HME_HASH_FUNCTION(srdp, addr, hmeshift);
3875 
3876 	SFMMU_HASH_LOCK(hmebp);
3877 	HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, pr_hblk, &list);
3878 	if (hmeblkp != NULL) {
3879 		ASSERT(hmeblkp->hblk_shared);
3880 		ASSERT(!hmeblkp->hblk_lckcnt);
3881 		if (hmeblkp->hblk_vcnt || hmeblkp->hblk_hmecnt) {
3882 			*eaddrp = sfmmu_hblk_unload(NULL, hmeblkp, addr,
3883 			    eaddr, NULL, HAT_UNLOAD);
3884 			ASSERT(*eaddrp > addr);
3885 		}
3886 		ASSERT(!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt);
3887 		sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk,
3888 		    &list, 0);
3889 	}
3890 	SFMMU_HASH_UNLOCK(hmebp);
3891 	sfmmu_hblks_list_purge(&list, 0);
3892 }
3893 
3894 static void
3895 sfmmu_unload_hmeregion(sf_srd_t *srdp, sf_region_t *rgnp)
3896 {
3897 	int ttesz = rgnp->rgn_pgszc;
3898 	size_t rsz = rgnp->rgn_size;
3899 	caddr_t rsaddr = rgnp->rgn_saddr;
3900 	caddr_t readdr = rsaddr + rsz;
3901 	caddr_t rhsaddr;
3902 	caddr_t va;
3903 	uint_t rid = rgnp->rgn_id;
3904 	caddr_t cbsaddr;
3905 	caddr_t cbeaddr;
3906 	hat_rgn_cb_func_t rcbfunc;
3907 	ulong_t cnt;
3908 
3909 	ASSERT(SFMMU_IS_SHMERID_VALID(rid));
3910 	ASSERT(rid < SFMMU_MAX_HME_REGIONS);
3911 
3912 	ASSERT(IS_P2ALIGNED(rsaddr, TTEBYTES(ttesz)));
3913 	ASSERT(IS_P2ALIGNED(rsz, TTEBYTES(ttesz)));
3914 	if (ttesz < HBLK_MIN_TTESZ) {
3915 		ttesz = HBLK_MIN_TTESZ;
3916 		rhsaddr = (caddr_t)P2ALIGN((uintptr_t)rsaddr, HBLK_MIN_BYTES);
3917 	} else {
3918 		rhsaddr = rsaddr;
3919 	}
3920 
3921 	if ((rcbfunc = rgnp->rgn_cb_function) == NULL) {
3922 		rcbfunc = sfmmu_rgn_cb_noop;
3923 	}
3924 
3925 	while (ttesz >= HBLK_MIN_TTESZ) {
3926 		cbsaddr = rsaddr;
3927 		cbeaddr = rsaddr;
3928 		if (!(rgnp->rgn_hmeflags & (1 << ttesz))) {
3929 			ttesz--;
3930 			continue;
3931 		}
3932 		cnt = 0;
3933 		va = rsaddr;
3934 		while (va < readdr) {
3935 			ASSERT(va >= rhsaddr);
3936 			if (va != cbeaddr) {
3937 				if (cbeaddr != cbsaddr) {
3938 					ASSERT(cbeaddr > cbsaddr);
3939 					(*rcbfunc)(cbsaddr, cbeaddr,
3940 					    rsaddr, rsz, rgnp->rgn_obj,
3941 					    rgnp->rgn_objoff);
3942 				}
3943 				cbsaddr = va;
3944 				cbeaddr = va;
3945 			}
3946 			sfmmu_unload_hmeregion_va(srdp, rid, va, readdr,
3947 			    ttesz, &cbeaddr);
3948 			cnt++;
3949 			va = rhsaddr + (cnt << TTE_PAGE_SHIFT(ttesz));
3950 		}
3951 		if (cbeaddr != cbsaddr) {
3952 			ASSERT(cbeaddr > cbsaddr);
3953 			(*rcbfunc)(cbsaddr, cbeaddr, rsaddr,
3954 			    rsz, rgnp->rgn_obj,
3955 			    rgnp->rgn_objoff);
3956 		}
3957 		ttesz--;
3958 	}
3959 }
3960 
3961 /*
3962  * Release one hardware address translation lock on the given address range.
3963  */
3964 void
3965 hat_unlock(struct hat *sfmmup, caddr_t addr, size_t len)
3966 {
3967 	struct hmehash_bucket *hmebp;
3968 	hmeblk_tag hblktag;
3969 	int hmeshift, hashno = 1;
3970 	struct hme_blk *hmeblkp, *list = NULL;
3971 	caddr_t endaddr;
3972 
3973 	ASSERT(sfmmup != NULL);
3974 	ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
3975 
3976 	ASSERT((sfmmup == ksfmmup) ||
3977 	    AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock));
3978 	ASSERT((len & MMU_PAGEOFFSET) == 0);
3979 	endaddr = addr + len;
3980 	hblktag.htag_id = sfmmup;
3981 	hblktag.htag_rid = SFMMU_INVALID_SHMERID;
3982 
3983 	/*
3984 	 * Spitfire supports 4 page sizes.
3985 	 * Most pages are expected to be of the smallest page size (8K) and
3986 	 * these will not need to be rehashed. 64K pages also don't need to be
3987 	 * rehashed because an hmeblk spans 64K of address space. 512K pages
3988 	 * might need 1 rehash and and 4M pages might need 2 rehashes.
3989 	 */
3990 	while (addr < endaddr) {
3991 		hmeshift = HME_HASH_SHIFT(hashno);
3992 		hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift);
3993 		hblktag.htag_rehash = hashno;
3994 		hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift);
3995 
3996 		SFMMU_HASH_LOCK(hmebp);
3997 
3998 		HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list);
3999 		if (hmeblkp != NULL) {
4000 			ASSERT(!hmeblkp->hblk_shared);
4001 			/*
4002 			 * If we encounter a shadow hmeblk then
4003 			 * we know there are no valid hmeblks mapping
4004 			 * this address at this size or larger.
4005 			 * Just increment address by the smallest
4006 			 * page size.
4007 			 */
4008 			if (hmeblkp->hblk_shw_bit) {
4009 				addr += MMU_PAGESIZE;
4010 			} else {
4011 				addr = sfmmu_hblk_unlock(hmeblkp, addr,
4012 				    endaddr);
4013 			}
4014 			SFMMU_HASH_UNLOCK(hmebp);
4015 			hashno = 1;
4016 			continue;
4017 		}
4018 		SFMMU_HASH_UNLOCK(hmebp);
4019 
4020 		if (!HME_REHASH(sfmmup) || (hashno >= mmu_hashcnt)) {
4021 			/*
4022 			 * We have traversed the whole list and rehashed
4023 			 * if necessary without finding the address to unlock
4024 			 * which should never happen.
4025 			 */
4026 			panic("sfmmu_unlock: addr not found. "
4027 			    "addr %p hat %p", (void *)addr, (void *)sfmmup);
4028 		} else {
4029 			hashno++;
4030 		}
4031 	}
4032 
4033 	sfmmu_hblks_list_purge(&list, 0);
4034 }
4035 
4036 void
4037 hat_unlock_region(struct hat *sfmmup, caddr_t addr, size_t len,
4038     hat_region_cookie_t rcookie)
4039 {
4040 	sf_srd_t *srdp;
4041 	sf_region_t *rgnp;
4042 	int ttesz;
4043 	uint_t rid;
4044 	caddr_t eaddr;
4045 	caddr_t va;
4046 	int hmeshift;
4047 	hmeblk_tag hblktag;
4048 	struct hmehash_bucket *hmebp;
4049 	struct hme_blk *hmeblkp;
4050 	struct hme_blk *pr_hblk;
4051 	struct hme_blk *list;
4052 
4053 	if (rcookie == HAT_INVALID_REGION_COOKIE) {
4054 		hat_unlock(sfmmup, addr, len);
4055 		return;
4056 	}
4057 
4058 	ASSERT(sfmmup != NULL);
4059 	ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
4060 	ASSERT(sfmmup != ksfmmup);
4061 
4062 	srdp = sfmmup->sfmmu_srdp;
4063 	rid = (uint_t)((uint64_t)rcookie);
4064 	VERIFY3U(rid, <, SFMMU_MAX_HME_REGIONS);
4065 	eaddr = addr + len;
4066 	va = addr;
4067 	list = NULL;
4068 	rgnp = srdp->srd_hmergnp[rid];
4069 	SFMMU_VALIDATE_HMERID(sfmmup, rid, addr, len);
4070 
4071 	ASSERT(IS_P2ALIGNED(addr, TTEBYTES(rgnp->rgn_pgszc)));
4072 	ASSERT(IS_P2ALIGNED(len, TTEBYTES(rgnp->rgn_pgszc)));
4073 	if (rgnp->rgn_pgszc < HBLK_MIN_TTESZ) {
4074 		ttesz = HBLK_MIN_TTESZ;
4075 	} else {
4076 		ttesz = rgnp->rgn_pgszc;
4077 	}
4078 	while (va < eaddr) {
4079 		while (ttesz < rgnp->rgn_pgszc &&
4080 		    IS_P2ALIGNED(va, TTEBYTES(ttesz + 1))) {
4081 			ttesz++;
4082 		}
4083 		while (ttesz >= HBLK_MIN_TTESZ) {
4084 			if (!(rgnp->rgn_hmeflags & (1 << ttesz))) {
4085 				ttesz--;
4086 				continue;
4087 			}
4088 			hmeshift = HME_HASH_SHIFT(ttesz);
4089 			hblktag.htag_bspage = HME_HASH_BSPAGE(va, hmeshift);
4090 			hblktag.htag_rehash = ttesz;
4091 			hblktag.htag_rid = rid;
4092 			hblktag.htag_id = srdp;
4093 			hmebp = HME_HASH_FUNCTION(srdp, va, hmeshift);
4094 			SFMMU_HASH_LOCK(hmebp);
4095 			HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, pr_hblk,
4096 			    &list);
4097 			if (hmeblkp == NULL) {
4098 				SFMMU_HASH_UNLOCK(hmebp);
4099 				ttesz--;
4100 				continue;
4101 			}
4102 			ASSERT(hmeblkp->hblk_shared);
4103 			va = sfmmu_hblk_unlock(hmeblkp, va, eaddr);
4104 			ASSERT(va >= eaddr ||
4105 			    IS_P2ALIGNED((uintptr_t)va, TTEBYTES(ttesz)));
4106 			SFMMU_HASH_UNLOCK(hmebp);
4107 			break;
4108 		}
4109 		if (ttesz < HBLK_MIN_TTESZ) {
4110 			panic("hat_unlock_region: addr not found "
4111 			    "addr %p hat %p", (void *)va, (void *)sfmmup);
4112 		}
4113 	}
4114 	sfmmu_hblks_list_purge(&list, 0);
4115 }
4116 
4117 /*
4118  * Function to unlock a range of addresses in an hmeblk.  It returns the
4119  * next address that needs to be unlocked.
4120  * Should be called with the hash lock held.
4121  */
4122 static caddr_t
4123 sfmmu_hblk_unlock(struct hme_blk *hmeblkp, caddr_t addr, caddr_t endaddr)
4124 {
4125 	struct sf_hment *sfhme;
4126 	tte_t tteold, ttemod;
4127 	int ttesz, ret;
4128 
4129 	ASSERT(in_hblk_range(hmeblkp, addr));
4130 	ASSERT(hmeblkp->hblk_shw_bit == 0);
4131 
4132 	endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp));
4133 	ttesz = get_hblk_ttesz(hmeblkp);
4134 
4135 	HBLKTOHME(sfhme, hmeblkp, addr);
4136 	while (addr < endaddr) {
4137 readtte:
4138 		sfmmu_copytte(&sfhme->hme_tte, &tteold);
4139 		if (TTE_IS_VALID(&tteold)) {
4140 
4141 			ttemod = tteold;
4142 
4143 			ret = sfmmu_modifytte_try(&tteold, &ttemod,
4144 			    &sfhme->hme_tte);
4145 
4146 			if (ret < 0)
4147 				goto readtte;
4148 
4149 			if (hmeblkp->hblk_lckcnt == 0)
4150 				panic("zero hblk lckcnt");
4151 
4152 			if (((uintptr_t)addr + TTEBYTES(ttesz)) >
4153 			    (uintptr_t)endaddr)
4154 				panic("can't unlock large tte");
4155 
4156 			ASSERT(hmeblkp->hblk_lckcnt > 0);
4157 			atomic_add_32(&hmeblkp->hblk_lckcnt, -1);
4158 			HBLK_STACK_TRACE(hmeblkp, HBLK_UNLOCK);
4159 		} else {
4160 			panic("sfmmu_hblk_unlock: invalid tte");
4161 		}
4162 		addr += TTEBYTES(ttesz);
4163 		sfhme++;
4164 	}
4165 	return (addr);
4166 }
4167 
4168 /*
4169  * Physical Address Mapping Framework
4170  *
4171  * General rules:
4172  *
4173  * (1) Applies only to seg_kmem memory pages. To make things easier,
4174  *     seg_kpm addresses are also accepted by the routines, but nothing
4175  *     is done with them since by definition their PA mappings are static.
4176  * (2) hat_add_callback() may only be called while holding the page lock
4177  *     SE_SHARED or SE_EXCL of the underlying page (e.g., as_pagelock()),
4178  *     or passing HAC_PAGELOCK flag.
4179  * (3) prehandler() and posthandler() may not call hat_add_callback() or
4180  *     hat_delete_callback(), nor should they allocate memory. Post quiesce
4181  *     callbacks may not sleep or acquire adaptive mutex locks.
4182  * (4) Either prehandler() or posthandler() (but not both) may be specified
4183  *     as being NULL.  Specifying an errhandler() is optional.
4184  *
4185  * Details of using the framework:
4186  *
4187  * registering a callback (hat_register_callback())
4188  *
4189  *	Pass prehandler, posthandler, errhandler addresses
4190  *	as described below. If capture_cpus argument is nonzero,
4191  *	suspend callback to the prehandler will occur with CPUs
4192  *	captured and executing xc_loop() and CPUs will remain
4193  *	captured until after the posthandler suspend callback
4194  *	occurs.
4195  *
4196  * adding a callback (hat_add_callback())
4197  *
4198  *      as_pagelock();
4199  *	hat_add_callback();
4200  *      save returned pfn in private data structures or program registers;
4201  *      as_pageunlock();
4202  *
4203  * prehandler()
4204  *
4205  *	Stop all accesses by physical address to this memory page.
4206  *	Called twice: the first, PRESUSPEND, is a context safe to acquire
4207  *	adaptive locks. The second, SUSPEND, is called at high PIL with
4208  *	CPUs captured so adaptive locks may NOT be acquired (and all spin
4209  *	locks must be XCALL_PIL or higher locks).
4210  *
4211  *	May return the following errors:
4212  *		EIO:	A fatal error has occurred. This will result in panic.
4213  *		EAGAIN:	The page cannot be suspended. This will fail the
4214  *			relocation.
4215  *		0:	Success.
4216  *
4217  * posthandler()
4218  *
4219  *      Save new pfn in private data structures or program registers;
4220  *	not allowed to fail (non-zero return values will result in panic).
4221  *
4222  * errhandler()
4223  *
4224  *	called when an error occurs related to the callback.  Currently
4225  *	the only such error is HAT_CB_ERR_LEAKED which indicates that
4226  *	a page is being freed, but there are still outstanding callback(s)
4227  *	registered on the page.
4228  *
4229  * removing a callback (hat_delete_callback(); e.g., prior to freeing memory)
4230  *
4231  *	stop using physical address
4232  *	hat_delete_callback();
4233  *
4234  */
4235 
4236 /*
4237  * Register a callback class.  Each subsystem should do this once and
4238  * cache the id_t returned for use in setting up and tearing down callbacks.
4239  *
4240  * There is no facility for removing callback IDs once they are created;
4241  * the "key" should be unique for each module, so in case a module is unloaded
4242  * and subsequently re-loaded, we can recycle the module's previous entry.
4243  */
4244 id_t
4245 hat_register_callback(int key,
4246 	int (*prehandler)(caddr_t, uint_t, uint_t, void *),
4247 	int (*posthandler)(caddr_t, uint_t, uint_t, void *, pfn_t),
4248 	int (*errhandler)(caddr_t, uint_t, uint_t, void *),
4249 	int capture_cpus)
4250 {
4251 	id_t id;
4252 
4253 	/*
4254 	 * Search the table for a pre-existing callback associated with
4255 	 * the identifier "key".  If one exists, we re-use that entry in
4256 	 * the table for this instance, otherwise we assign the next
4257 	 * available table slot.
4258 	 */
4259 	for (id = 0; id < sfmmu_max_cb_id; id++) {
4260 		if (sfmmu_cb_table[id].key == key)
4261 			break;
4262 	}
4263 
4264 	if (id == sfmmu_max_cb_id) {
4265 		id = sfmmu_cb_nextid++;
4266 		if (id >= sfmmu_max_cb_id)
4267 			panic("hat_register_callback: out of callback IDs");
4268 	}
4269 
4270 	ASSERT(prehandler != NULL || posthandler != NULL);
4271 
4272 	sfmmu_cb_table[id].key = key;
4273 	sfmmu_cb_table[id].prehandler = prehandler;
4274 	sfmmu_cb_table[id].posthandler = posthandler;
4275 	sfmmu_cb_table[id].errhandler = errhandler;
4276 	sfmmu_cb_table[id].capture_cpus = capture_cpus;
4277 
4278 	return (id);
4279 }
4280 
4281 #define	HAC_COOKIE_NONE	(void *)-1
4282 
4283 /*
4284  * Add relocation callbacks to the specified addr/len which will be called
4285  * when relocating the associated page. See the description of pre and
4286  * posthandler above for more details.
4287  *
4288  * If HAC_PAGELOCK is included in flags, the underlying memory page is
4289  * locked internally so the caller must be able to deal with the callback
4290  * running even before this function has returned.  If HAC_PAGELOCK is not
4291  * set, it is assumed that the underlying memory pages are locked.
4292  *
4293  * Since the caller must track the individual page boundaries anyway,
4294  * we only allow a callback to be added to a single page (large
4295  * or small).  Thus [addr, addr + len) MUST be contained within a single
4296  * page.
4297  *
4298  * Registering multiple callbacks on the same [addr, addr+len) is supported,
4299  * _provided_that_ a unique parameter is specified for each callback.
4300  * If multiple callbacks are registered on the same range the callback will
4301  * be invoked with each unique parameter. Registering the same callback with
4302  * the same argument more than once will result in corrupted kernel state.
4303  *
4304  * Returns the pfn of the underlying kernel page in *rpfn
4305  * on success, or PFN_INVALID on failure.
4306  *
4307  * cookiep (if passed) provides storage space for an opaque cookie
4308  * to return later to hat_delete_callback(). This cookie makes the callback
4309  * deletion significantly quicker by avoiding a potentially lengthy hash
4310  * search.
4311  *
4312  * Returns values:
4313  *    0:      success
4314  *    ENOMEM: memory allocation failure (e.g. flags was passed as HAC_NOSLEEP)
4315  *    EINVAL: callback ID is not valid
4316  *    ENXIO:  ["vaddr", "vaddr" + len) is not mapped in the kernel's address
4317  *            space
4318  *    ERANGE: ["vaddr", "vaddr" + len) crosses a page boundary
4319  */
4320 int
4321 hat_add_callback(id_t callback_id, caddr_t vaddr, uint_t len, uint_t flags,
4322 	void *pvt, pfn_t *rpfn, void **cookiep)
4323 {
4324 	struct 		hmehash_bucket *hmebp;
4325 	hmeblk_tag 	hblktag;
4326 	struct hme_blk	*hmeblkp;
4327 	int 		hmeshift, hashno;
4328 	caddr_t 	saddr, eaddr, baseaddr;
4329 	struct pa_hment *pahmep;
4330 	struct sf_hment *sfhmep, *osfhmep;
4331 	kmutex_t	*pml;
4332 	tte_t   	tte;
4333 	page_t		*pp;
4334 	vnode_t		*vp;
4335 	u_offset_t	off;
4336 	pfn_t		pfn;
4337 	int		kmflags = (flags & HAC_SLEEP)? KM_SLEEP : KM_NOSLEEP;
4338 	int		locked = 0;
4339 
4340 	/*
4341 	 * For KPM mappings, just return the physical address since we
4342 	 * don't need to register any callbacks.
4343 	 */
4344 	if (IS_KPM_ADDR(vaddr)) {
4345 		uint64_t paddr;
4346 		SFMMU_KPM_VTOP(vaddr, paddr);
4347 		*rpfn = btop(paddr);
4348 		if (cookiep != NULL)
4349 			*cookiep = HAC_COOKIE_NONE;
4350 		return (0);
4351 	}
4352 
4353 	if (callback_id < (id_t)0 || callback_id >= sfmmu_cb_nextid) {
4354 		*rpfn = PFN_INVALID;
4355 		return (EINVAL);
4356 	}
4357 
4358 	if ((pahmep = kmem_cache_alloc(pa_hment_cache, kmflags)) == NULL) {
4359 		*rpfn = PFN_INVALID;
4360 		return (ENOMEM);
4361 	}
4362 
4363 	sfhmep = &pahmep->sfment;
4364 
4365 	saddr = (caddr_t)((uintptr_t)vaddr & MMU_PAGEMASK);
4366 	eaddr = saddr + len;
4367 
4368 rehash:
4369 	/* Find the mapping(s) for this page */
4370 	for (hashno = TTE64K, hmeblkp = NULL;
4371 	    hmeblkp == NULL && hashno <= mmu_hashcnt;
4372 	    hashno++) {
4373 		hmeshift = HME_HASH_SHIFT(hashno);
4374 		hblktag.htag_id = ksfmmup;
4375 		hblktag.htag_rid = SFMMU_INVALID_SHMERID;
4376 		hblktag.htag_bspage = HME_HASH_BSPAGE(saddr, hmeshift);
4377 		hblktag.htag_rehash = hashno;
4378 		hmebp = HME_HASH_FUNCTION(ksfmmup, saddr, hmeshift);
4379 
4380 		SFMMU_HASH_LOCK(hmebp);
4381 
4382 		HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp);
4383 
4384 		if (hmeblkp == NULL)
4385 			SFMMU_HASH_UNLOCK(hmebp);
4386 	}
4387 
4388 	if (hmeblkp == NULL) {
4389 		kmem_cache_free(pa_hment_cache, pahmep);
4390 		*rpfn = PFN_INVALID;
4391 		return (ENXIO);
4392 	}
4393 
4394 	ASSERT(!hmeblkp->hblk_shared);
4395 
4396 	HBLKTOHME(osfhmep, hmeblkp, saddr);
4397 	sfmmu_copytte(&osfhmep->hme_tte, &tte);
4398 
4399 	if (!TTE_IS_VALID(&tte)) {
4400 		SFMMU_HASH_UNLOCK(hmebp);
4401 		kmem_cache_free(pa_hment_cache, pahmep);
4402 		*rpfn = PFN_INVALID;
4403 		return (ENXIO);
4404 	}
4405 
4406 	/*
4407 	 * Make sure the boundaries for the callback fall within this
4408 	 * single mapping.
4409 	 */
4410 	baseaddr = (caddr_t)get_hblk_base(hmeblkp);
4411 	ASSERT(saddr >= baseaddr);
4412 	if (eaddr > saddr + TTEBYTES(TTE_CSZ(&tte))) {
4413 		SFMMU_HASH_UNLOCK(hmebp);
4414 		kmem_cache_free(pa_hment_cache, pahmep);
4415 		*rpfn = PFN_INVALID;
4416 		return (ERANGE);
4417 	}
4418 
4419 	pfn = sfmmu_ttetopfn(&tte, vaddr);
4420 
4421 	/*
4422 	 * The pfn may not have a page_t underneath in which case we
4423 	 * just return it. This can happen if we are doing I/O to a
4424 	 * static portion of the kernel's address space, for instance.
4425 	 */
4426 	pp = osfhmep->hme_page;
4427 	if (pp == NULL) {
4428 		SFMMU_HASH_UNLOCK(hmebp);
4429 		kmem_cache_free(pa_hment_cache, pahmep);
4430 		*rpfn = pfn;
4431 		if (cookiep)
4432 			*cookiep = HAC_COOKIE_NONE;
4433 		return (0);
4434 	}
4435 	ASSERT(pp == PP_PAGEROOT(pp));
4436 
4437 	vp = pp->p_vnode;
4438 	off = pp->p_offset;
4439 
4440 	pml = sfmmu_mlist_enter(pp);
4441 
4442 	if (flags & HAC_PAGELOCK) {
4443 		if (!page_trylock(pp, SE_SHARED)) {
4444 			/*
4445 			 * Somebody is holding SE_EXCL lock. Might
4446 			 * even be hat_page_relocate(). Drop all
4447 			 * our locks, lookup the page in &kvp, and
4448 			 * retry. If it doesn't exist in &kvp and &zvp,
4449 			 * then we must be dealing with a kernel mapped
4450 			 * page which doesn't actually belong to
4451 			 * segkmem so we punt.
4452 			 */
4453 			sfmmu_mlist_exit(pml);
4454 			SFMMU_HASH_UNLOCK(hmebp);
4455 			pp = page_lookup(&kvp, (u_offset_t)saddr, SE_SHARED);
4456 
4457 			/* check zvp before giving up */
4458 			if (pp == NULL)
4459 				pp = page_lookup(&zvp, (u_offset_t)saddr,
4460 				    SE_SHARED);
4461 
4462 			/* Okay, we didn't find it, give up */
4463 			if (pp == NULL) {
4464 				kmem_cache_free(pa_hment_cache, pahmep);
4465 				*rpfn = pfn;
4466 				if (cookiep)
4467 					*cookiep = HAC_COOKIE_NONE;
4468 				return (0);
4469 			}
4470 			page_unlock(pp);
4471 			goto rehash;
4472 		}
4473 		locked = 1;
4474 	}
4475 
4476 	if (!PAGE_LOCKED(pp) && !panicstr)
4477 		panic("hat_add_callback: page 0x%p not locked", (void *)pp);
4478 
4479 	if (osfhmep->hme_page != pp || pp->p_vnode != vp ||
4480 	    pp->p_offset != off) {
4481 		/*
4482 		 * The page moved before we got our hands on it.  Drop
4483 		 * all the locks and try again.
4484 		 */
4485 		ASSERT((flags & HAC_PAGELOCK) != 0);
4486 		sfmmu_mlist_exit(pml);
4487 		SFMMU_HASH_UNLOCK(hmebp);
4488 		page_unlock(pp);
4489 		locked = 0;
4490 		goto rehash;
4491 	}
4492 
4493 	if (!VN_ISKAS(vp)) {
4494 		/*
4495 		 * This is not a segkmem page but another page which
4496 		 * has been kernel mapped. It had better have at least
4497 		 * a share lock on it. Return the pfn.
4498 		 */
4499 		sfmmu_mlist_exit(pml);
4500 		SFMMU_HASH_UNLOCK(hmebp);
4501 		if (locked)
4502 			page_unlock(pp);
4503 		kmem_cache_free(pa_hment_cache, pahmep);
4504 		ASSERT(PAGE_LOCKED(pp));
4505 		*rpfn = pfn;
4506 		if (cookiep)
4507 			*cookiep = HAC_COOKIE_NONE;
4508 		return (0);
4509 	}
4510 
4511 	/*
4512 	 * Setup this pa_hment and link its embedded dummy sf_hment into
4513 	 * the mapping list.
4514 	 */
4515 	pp->p_share++;
4516 	pahmep->cb_id = callback_id;
4517 	pahmep->addr = vaddr;
4518 	pahmep->len = len;
4519 	pahmep->refcnt = 1;
4520 	pahmep->flags = 0;
4521 	pahmep->pvt = pvt;
4522 
4523 	sfhmep->hme_tte.ll = 0;
4524 	sfhmep->hme_data = pahmep;
4525 	sfhmep->hme_prev = osfhmep;
4526 	sfhmep->hme_next = osfhmep->hme_next;
4527 
4528 	if (osfhmep->hme_next)
4529 		osfhmep->hme_next->hme_prev = sfhmep;
4530 
4531 	osfhmep->hme_next = sfhmep;
4532 
4533 	sfmmu_mlist_exit(pml);
4534 	SFMMU_HASH_UNLOCK(hmebp);
4535 
4536 	if (locked)
4537 		page_unlock(pp);
4538 
4539 	*rpfn = pfn;
4540 	if (cookiep)
4541 		*cookiep = (void *)pahmep;
4542 
4543 	return (0);
4544 }
4545 
4546 /*
4547  * Remove the relocation callbacks from the specified addr/len.
4548  */
4549 void
4550 hat_delete_callback(caddr_t vaddr, uint_t len, void *pvt, uint_t flags,
4551 	void *cookie)
4552 {
4553 	struct		hmehash_bucket *hmebp;
4554 	hmeblk_tag	hblktag;
4555 	struct hme_blk	*hmeblkp;
4556 	int		hmeshift, hashno;
4557 	caddr_t		saddr;
4558 	struct pa_hment	*pahmep;
4559 	struct sf_hment	*sfhmep, *osfhmep;
4560 	kmutex_t	*pml;
4561 	tte_t		tte;
4562 	page_t		*pp;
4563 	vnode_t		*vp;
4564 	u_offset_t	off;
4565 	int		locked = 0;
4566 
4567 	/*
4568 	 * If the cookie is HAC_COOKIE_NONE then there is no pa_hment to
4569 	 * remove so just return.
4570 	 */
4571 	if (cookie == HAC_COOKIE_NONE || IS_KPM_ADDR(vaddr))
4572 		return;
4573 
4574 	saddr = (caddr_t)((uintptr_t)vaddr & MMU_PAGEMASK);
4575 
4576 rehash:
4577 	/* Find the mapping(s) for this page */
4578 	for (hashno = TTE64K, hmeblkp = NULL;
4579 	    hmeblkp == NULL && hashno <= mmu_hashcnt;
4580 	    hashno++) {
4581 		hmeshift = HME_HASH_SHIFT(hashno);
4582 		hblktag.htag_id = ksfmmup;
4583 		hblktag.htag_rid = SFMMU_INVALID_SHMERID;
4584 		hblktag.htag_bspage = HME_HASH_BSPAGE(saddr, hmeshift);
4585 		hblktag.htag_rehash = hashno;
4586 		hmebp = HME_HASH_FUNCTION(ksfmmup, saddr, hmeshift);
4587 
4588 		SFMMU_HASH_LOCK(hmebp);
4589 
4590 		HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp);
4591 
4592 		if (hmeblkp == NULL)
4593 			SFMMU_HASH_UNLOCK(hmebp);
4594 	}
4595 
4596 	if (hmeblkp == NULL)
4597 		return;
4598 
4599 	ASSERT(!hmeblkp->hblk_shared);
4600 
4601 	HBLKTOHME(osfhmep, hmeblkp, saddr);
4602 
4603 	sfmmu_copytte(&osfhmep->hme_tte, &tte);
4604 	if (!TTE_IS_VALID(&tte)) {
4605 		SFMMU_HASH_UNLOCK(hmebp);
4606 		return;
4607 	}
4608 
4609 	pp = osfhmep->hme_page;
4610 	if (pp == NULL) {
4611 		SFMMU_HASH_UNLOCK(hmebp);
4612 		ASSERT(cookie == NULL);
4613 		return;
4614 	}
4615 
4616 	vp = pp->p_vnode;
4617 	off = pp->p_offset;
4618 
4619 	pml = sfmmu_mlist_enter(pp);
4620 
4621 	if (flags & HAC_PAGELOCK) {
4622 		if (!page_trylock(pp, SE_SHARED)) {
4623 			/*
4624 			 * Somebody is holding SE_EXCL lock. Might
4625 			 * even be hat_page_relocate(). Drop all
4626 			 * our locks, lookup the page in &kvp, and
4627 			 * retry. If it doesn't exist in &kvp and &zvp,
4628 			 * then we must be dealing with a kernel mapped
4629 			 * page which doesn't actually belong to
4630 			 * segkmem so we punt.
4631 			 */
4632 			sfmmu_mlist_exit(pml);
4633 			SFMMU_HASH_UNLOCK(hmebp);
4634 			pp = page_lookup(&kvp, (u_offset_t)saddr, SE_SHARED);
4635 			/* check zvp before giving up */
4636 			if (pp == NULL)
4637 				pp = page_lookup(&zvp, (u_offset_t)saddr,
4638 				    SE_SHARED);
4639 
4640 			if (pp == NULL) {
4641 				ASSERT(cookie == NULL);
4642 				return;
4643 			}
4644 			page_unlock(pp);
4645 			goto rehash;
4646 		}
4647 		locked = 1;
4648 	}
4649 
4650 	ASSERT(PAGE_LOCKED(pp));
4651 
4652 	if (osfhmep->hme_page != pp || pp->p_vnode != vp ||
4653 	    pp->p_offset != off) {
4654 		/*
4655 		 * The page moved before we got our hands on it.  Drop
4656 		 * all the locks and try again.
4657 		 */
4658 		ASSERT((flags & HAC_PAGELOCK) != 0);
4659 		sfmmu_mlist_exit(pml);
4660 		SFMMU_HASH_UNLOCK(hmebp);
4661 		page_unlock(pp);
4662 		locked = 0;
4663 		goto rehash;
4664 	}
4665 
4666 	if (!VN_ISKAS(vp)) {
4667 		/*
4668 		 * This is not a segkmem page but another page which
4669 		 * has been kernel mapped.
4670 		 */
4671 		sfmmu_mlist_exit(pml);
4672 		SFMMU_HASH_UNLOCK(hmebp);
4673 		if (locked)
4674 			page_unlock(pp);
4675 		ASSERT(cookie == NULL);
4676 		return;
4677 	}
4678 
4679 	if (cookie != NULL) {
4680 		pahmep = (struct pa_hment *)cookie;
4681 		sfhmep = &pahmep->sfment;
4682 	} else {
4683 		for (sfhmep = pp->p_mapping; sfhmep != NULL;
4684 		    sfhmep = sfhmep->hme_next) {
4685 
4686 			/*
4687 			 * skip va<->pa mappings
4688 			 */
4689 			if (!IS_PAHME(sfhmep))
4690 				continue;
4691 
4692 			pahmep = sfhmep->hme_data;
4693 			ASSERT(pahmep != NULL);
4694 
4695 			/*
4696 			 * if pa_hment matches, remove it
4697 			 */
4698 			if ((pahmep->pvt == pvt) &&
4699 			    (pahmep->addr == vaddr) &&
4700 			    (pahmep->len == len)) {
4701 				break;
4702 			}
4703 		}
4704 	}
4705 
4706 	if (sfhmep == NULL) {
4707 		if (!panicstr) {
4708 			panic("hat_delete_callback: pa_hment not found, pp %p",
4709 			    (void *)pp);
4710 		}
4711 		return;
4712 	}
4713 
4714 	/*
4715 	 * Note: at this point a valid kernel mapping must still be
4716 	 * present on this page.
4717 	 */
4718 	pp->p_share--;
4719 	if (pp->p_share <= 0)
4720 		panic("hat_delete_callback: zero p_share");
4721 
4722 	if (--pahmep->refcnt == 0) {
4723 		if (pahmep->flags != 0)
4724 			panic("hat_delete_callback: pa_hment is busy");
4725 
4726 		/*
4727 		 * Remove sfhmep from the mapping list for the page.
4728 		 */
4729 		if (sfhmep->hme_prev) {
4730 			sfhmep->hme_prev->hme_next = sfhmep->hme_next;
4731 		} else {
4732 			pp->p_mapping = sfhmep->hme_next;
4733 		}
4734 
4735 		if (sfhmep->hme_next)
4736 			sfhmep->hme_next->hme_prev = sfhmep->hme_prev;
4737 
4738 		sfmmu_mlist_exit(pml);
4739 		SFMMU_HASH_UNLOCK(hmebp);
4740 
4741 		if (locked)
4742 			page_unlock(pp);
4743 
4744 		kmem_cache_free(pa_hment_cache, pahmep);
4745 		return;
4746 	}
4747 
4748 	sfmmu_mlist_exit(pml);
4749 	SFMMU_HASH_UNLOCK(hmebp);
4750 	if (locked)
4751 		page_unlock(pp);
4752 }
4753 
4754 /*
4755  * hat_probe returns 1 if the translation for the address 'addr' is
4756  * loaded, zero otherwise.
4757  *
4758  * hat_probe should be used only for advisorary purposes because it may
4759  * occasionally return the wrong value. The implementation must guarantee that
4760  * returning the wrong value is a very rare event. hat_probe is used
4761  * to implement optimizations in the segment drivers.
4762  *
4763  */
4764 int
4765 hat_probe(struct hat *sfmmup, caddr_t addr)
4766 {
4767 	pfn_t pfn;
4768 	tte_t tte;
4769 
4770 	ASSERT(sfmmup != NULL);
4771 	ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
4772 
4773 	ASSERT((sfmmup == ksfmmup) ||
4774 	    AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock));
4775 
4776 	if (sfmmup == ksfmmup) {
4777 		while ((pfn = sfmmu_vatopfn(addr, sfmmup, &tte))
4778 		    == PFN_SUSPENDED) {
4779 			sfmmu_vatopfn_suspended(addr, sfmmup, &tte);
4780 		}
4781 	} else {
4782 		pfn = sfmmu_uvatopfn(addr, sfmmup, NULL);
4783 	}
4784 
4785 	if (pfn != PFN_INVALID)
4786 		return (1);
4787 	else
4788 		return (0);
4789 }
4790 
4791 ssize_t
4792 hat_getpagesize(struct hat *sfmmup, caddr_t addr)
4793 {
4794 	tte_t tte;
4795 
4796 	ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
4797 
4798 	if (sfmmup == ksfmmup) {
4799 		if (sfmmu_vatopfn(addr, sfmmup, &tte) == PFN_INVALID) {
4800 			return (-1);
4801 		}
4802 	} else {
4803 		if (sfmmu_uvatopfn(addr, sfmmup, &tte) == PFN_INVALID) {
4804 			return (-1);
4805 		}
4806 	}
4807 
4808 	ASSERT(TTE_IS_VALID(&tte));
4809 	return (TTEBYTES(TTE_CSZ(&tte)));
4810 }
4811 
4812 uint_t
4813 hat_getattr(struct hat *sfmmup, caddr_t addr, uint_t *attr)
4814 {
4815 	tte_t tte;
4816 
4817 	ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
4818 
4819 	if (sfmmup == ksfmmup) {
4820 		if (sfmmu_vatopfn(addr, sfmmup, &tte) == PFN_INVALID) {
4821 			tte.ll = 0;
4822 		}
4823 	} else {
4824 		if (sfmmu_uvatopfn(addr, sfmmup, &tte) == PFN_INVALID) {
4825 			tte.ll = 0;
4826 		}
4827 	}
4828 	if (TTE_IS_VALID(&tte)) {
4829 		*attr = sfmmu_ptov_attr(&tte);
4830 		return (0);
4831 	}
4832 	*attr = 0;
4833 	return ((uint_t)0xffffffff);
4834 }
4835 
4836 /*
4837  * Enables more attributes on specified address range (ie. logical OR)
4838  */
4839 void
4840 hat_setattr(struct hat *hat, caddr_t addr, size_t len, uint_t attr)
4841 {
4842 	if (hat->sfmmu_xhat_provider) {
4843 		XHAT_SETATTR(hat, addr, len, attr);
4844 		return;
4845 	} else {
4846 		/*
4847 		 * This must be a CPU HAT. If the address space has
4848 		 * XHATs attached, change attributes for all of them,
4849 		 * just in case
4850 		 */
4851 		ASSERT(hat->sfmmu_as != NULL);
4852 		if (hat->sfmmu_as->a_xhat != NULL)
4853 			xhat_setattr_all(hat->sfmmu_as, addr, len, attr);
4854 	}
4855 
4856 	sfmmu_chgattr(hat, addr, len, attr, SFMMU_SETATTR);
4857 }
4858 
4859 /*
4860  * Assigns attributes to the specified address range.  All the attributes
4861  * are specified.
4862  */
4863 void
4864 hat_chgattr(struct hat *hat, caddr_t addr, size_t len, uint_t attr)
4865 {
4866 	if (hat->sfmmu_xhat_provider) {
4867 		XHAT_CHGATTR(hat, addr, len, attr);
4868 		return;
4869 	} else {
4870 		/*
4871 		 * This must be a CPU HAT. If the address space has
4872 		 * XHATs attached, change attributes for all of them,
4873 		 * just in case
4874 		 */
4875 		ASSERT(hat->sfmmu_as != NULL);
4876 		if (hat->sfmmu_as->a_xhat != NULL)
4877 			xhat_chgattr_all(hat->sfmmu_as, addr, len, attr);
4878 	}
4879 
4880 	sfmmu_chgattr(hat, addr, len, attr, SFMMU_CHGATTR);
4881 }
4882 
4883 /*
4884  * Remove attributes on the specified address range (ie. loginal NAND)
4885  */
4886 void
4887 hat_clrattr(struct hat *hat, caddr_t addr, size_t len, uint_t attr)
4888 {
4889 	if (hat->sfmmu_xhat_provider) {
4890 		XHAT_CLRATTR(hat, addr, len, attr);
4891 		return;
4892 	} else {
4893 		/*
4894 		 * This must be a CPU HAT. If the address space has
4895 		 * XHATs attached, change attributes for all of them,
4896 		 * just in case
4897 		 */
4898 		ASSERT(hat->sfmmu_as != NULL);
4899 		if (hat->sfmmu_as->a_xhat != NULL)
4900 			xhat_clrattr_all(hat->sfmmu_as, addr, len, attr);
4901 	}
4902 
4903 	sfmmu_chgattr(hat, addr, len, attr, SFMMU_CLRATTR);
4904 }
4905 
4906 /*
4907  * Change attributes on an address range to that specified by attr and mode.
4908  */
4909 static void
4910 sfmmu_chgattr(struct hat *sfmmup, caddr_t addr, size_t len, uint_t attr,
4911 	int mode)
4912 {
4913 	struct hmehash_bucket *hmebp;
4914 	hmeblk_tag hblktag;
4915 	int hmeshift, hashno = 1;
4916 	struct hme_blk *hmeblkp, *list = NULL;
4917 	caddr_t endaddr;
4918 	cpuset_t cpuset;
4919 	demap_range_t dmr;
4920 
4921 	CPUSET_ZERO(cpuset);
4922 
4923 	ASSERT((sfmmup == ksfmmup) ||
4924 	    AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock));
4925 	ASSERT((len & MMU_PAGEOFFSET) == 0);
4926 	ASSERT(((uintptr_t)addr & MMU_PAGEOFFSET) == 0);
4927 
4928 	if ((attr & PROT_USER) && (mode != SFMMU_CLRATTR) &&
4929 	    ((addr + len) > (caddr_t)USERLIMIT)) {
4930 		panic("user addr %p in kernel space",
4931 		    (void *)addr);
4932 	}
4933 
4934 	endaddr = addr + len;
4935 	hblktag.htag_id = sfmmup;
4936 	hblktag.htag_rid = SFMMU_INVALID_SHMERID;
4937 	DEMAP_RANGE_INIT(sfmmup, &dmr);
4938 
4939 	while (addr < endaddr) {
4940 		hmeshift = HME_HASH_SHIFT(hashno);
4941 		hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift);
4942 		hblktag.htag_rehash = hashno;
4943 		hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift);
4944 
4945 		SFMMU_HASH_LOCK(hmebp);
4946 
4947 		HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list);
4948 		if (hmeblkp != NULL) {
4949 			ASSERT(!hmeblkp->hblk_shared);
4950 			/*
4951 			 * We've encountered a shadow hmeblk so skip the range
4952 			 * of the next smaller mapping size.
4953 			 */
4954 			if (hmeblkp->hblk_shw_bit) {
4955 				ASSERT(sfmmup != ksfmmup);
4956 				ASSERT(hashno > 1);
4957 				addr = (caddr_t)P2END((uintptr_t)addr,
4958 				    TTEBYTES(hashno - 1));
4959 			} else {
4960 				addr = sfmmu_hblk_chgattr(sfmmup,
4961 				    hmeblkp, addr, endaddr, &dmr, attr, mode);
4962 			}
4963 			SFMMU_HASH_UNLOCK(hmebp);
4964 			hashno = 1;
4965 			continue;
4966 		}
4967 		SFMMU_HASH_UNLOCK(hmebp);
4968 
4969 		if (!HME_REHASH(sfmmup) || (hashno >= mmu_hashcnt)) {
4970 			/*
4971 			 * We have traversed the whole list and rehashed
4972 			 * if necessary without finding the address to chgattr.
4973 			 * This is ok, so we increment the address by the
4974 			 * smallest hmeblk range for kernel mappings or for
4975 			 * user mappings with no large pages, and the largest
4976 			 * hmeblk range, to account for shadow hmeblks, for
4977 			 * user mappings with large pages and continue.
4978 			 */
4979 			if (sfmmup == ksfmmup)
4980 				addr = (caddr_t)P2END((uintptr_t)addr,
4981 				    TTEBYTES(1));
4982 			else
4983 				addr = (caddr_t)P2END((uintptr_t)addr,
4984 				    TTEBYTES(hashno));
4985 			hashno = 1;
4986 		} else {
4987 			hashno++;
4988 		}
4989 	}
4990 
4991 	sfmmu_hblks_list_purge(&list, 0);
4992 	DEMAP_RANGE_FLUSH(&dmr);
4993 	cpuset = sfmmup->sfmmu_cpusran;
4994 	xt_sync(cpuset);
4995 }
4996 
4997 /*
4998  * This function chgattr on a range of addresses in an hmeblk.  It returns the
4999  * next addres that needs to be chgattr.
5000  * It should be called with the hash lock held.
5001  * XXX It should be possible to optimize chgattr by not flushing every time but
5002  * on the other hand:
5003  * 1. do one flush crosscall.
5004  * 2. only flush if we are increasing permissions (make sure this will work)
5005  */
5006 static caddr_t
5007 sfmmu_hblk_chgattr(struct hat *sfmmup, struct hme_blk *hmeblkp, caddr_t addr,
5008 	caddr_t endaddr, demap_range_t *dmrp, uint_t attr, int mode)
5009 {
5010 	tte_t tte, tteattr, tteflags, ttemod;
5011 	struct sf_hment *sfhmep;
5012 	int ttesz;
5013 	struct page *pp = NULL;
5014 	kmutex_t *pml, *pmtx;
5015 	int ret;
5016 	int use_demap_range;
5017 #if defined(SF_ERRATA_57)
5018 	int check_exec;
5019 #endif
5020 
5021 	ASSERT(in_hblk_range(hmeblkp, addr));
5022 	ASSERT(hmeblkp->hblk_shw_bit == 0);
5023 	ASSERT(!hmeblkp->hblk_shared);
5024 
5025 	endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp));
5026 	ttesz = get_hblk_ttesz(hmeblkp);
5027 
5028 	/*
5029 	 * Flush the current demap region if addresses have been
5030 	 * skipped or the page size doesn't match.
5031 	 */
5032 	use_demap_range = (TTEBYTES(ttesz) == DEMAP_RANGE_PGSZ(dmrp));
5033 	if (use_demap_range) {
5034 		DEMAP_RANGE_CONTINUE(dmrp, addr, endaddr);
5035 	} else {
5036 		DEMAP_RANGE_FLUSH(dmrp);
5037 	}
5038 
5039 	tteattr.ll = sfmmu_vtop_attr(attr, mode, &tteflags);
5040 #if defined(SF_ERRATA_57)
5041 	check_exec = (sfmmup != ksfmmup) &&
5042 	    AS_TYPE_64BIT(sfmmup->sfmmu_as) &&
5043 	    TTE_IS_EXECUTABLE(&tteattr);
5044 #endif
5045 	HBLKTOHME(sfhmep, hmeblkp, addr);
5046 	while (addr < endaddr) {
5047 		sfmmu_copytte(&sfhmep->hme_tte, &tte);
5048 		if (TTE_IS_VALID(&tte)) {
5049 			if ((tte.ll & tteflags.ll) == tteattr.ll) {
5050 				/*
5051 				 * if the new attr is the same as old
5052 				 * continue
5053 				 */
5054 				goto next_addr;
5055 			}
5056 			if (!TTE_IS_WRITABLE(&tteattr)) {
5057 				/*
5058 				 * make sure we clear hw modify bit if we
5059 				 * removing write protections
5060 				 */
5061 				tteflags.tte_intlo |= TTE_HWWR_INT;
5062 			}
5063 
5064 			pml = NULL;
5065 			pp = sfhmep->hme_page;
5066 			if (pp) {
5067 				pml = sfmmu_mlist_enter(pp);
5068 			}
5069 
5070 			if (pp != sfhmep->hme_page) {
5071 				/*
5072 				 * tte must have been unloaded.
5073 				 */
5074 				ASSERT(pml);
5075 				sfmmu_mlist_exit(pml);
5076 				continue;
5077 			}
5078 
5079 			ASSERT(pp == NULL || sfmmu_mlist_held(pp));
5080 
5081 			ttemod = tte;
5082 			ttemod.ll = (ttemod.ll & ~tteflags.ll) | tteattr.ll;
5083 			ASSERT(TTE_TO_TTEPFN(&ttemod) == TTE_TO_TTEPFN(&tte));
5084 
5085 #if defined(SF_ERRATA_57)
5086 			if (check_exec && addr < errata57_limit)
5087 				ttemod.tte_exec_perm = 0;
5088 #endif
5089 			ret = sfmmu_modifytte_try(&tte, &ttemod,
5090 			    &sfhmep->hme_tte);
5091 
5092 			if (ret < 0) {
5093 				/* tte changed underneath us */
5094 				if (pml) {
5095 					sfmmu_mlist_exit(pml);
5096 				}
5097 				continue;
5098 			}
5099 
5100 			if (tteflags.tte_intlo & TTE_HWWR_INT) {
5101 				/*
5102 				 * need to sync if we are clearing modify bit.
5103 				 */
5104 				sfmmu_ttesync(sfmmup, addr, &tte, pp);
5105 			}
5106 
5107 			if (pp && PP_ISRO(pp)) {
5108 				if (tteattr.tte_intlo & TTE_WRPRM_INT) {
5109 					pmtx = sfmmu_page_enter(pp);
5110 					PP_CLRRO(pp);
5111 					sfmmu_page_exit(pmtx);
5112 				}
5113 			}
5114 
5115 			if (ret > 0 && use_demap_range) {
5116 				DEMAP_RANGE_MARKPG(dmrp, addr);
5117 			} else if (ret > 0) {
5118 				sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0);
5119 			}
5120 
5121 			if (pml) {
5122 				sfmmu_mlist_exit(pml);
5123 			}
5124 		}
5125 next_addr:
5126 		addr += TTEBYTES(ttesz);
5127 		sfhmep++;
5128 		DEMAP_RANGE_NEXTPG(dmrp);
5129 	}
5130 	return (addr);
5131 }
5132 
5133 /*
5134  * This routine converts virtual attributes to physical ones.  It will
5135  * update the tteflags field with the tte mask corresponding to the attributes
5136  * affected and it returns the new attributes.  It will also clear the modify
5137  * bit if we are taking away write permission.  This is necessary since the
5138  * modify bit is the hardware permission bit and we need to clear it in order
5139  * to detect write faults.
5140  */
5141 static uint64_t
5142 sfmmu_vtop_attr(uint_t attr, int mode, tte_t *ttemaskp)
5143 {
5144 	tte_t ttevalue;
5145 
5146 	ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR));
5147 
5148 	switch (mode) {
5149 	case SFMMU_CHGATTR:
5150 		/* all attributes specified */
5151 		ttevalue.tte_inthi = MAKE_TTEATTR_INTHI(attr);
5152 		ttevalue.tte_intlo = MAKE_TTEATTR_INTLO(attr);
5153 		ttemaskp->tte_inthi = TTEINTHI_ATTR;
5154 		ttemaskp->tte_intlo = TTEINTLO_ATTR;
5155 		break;
5156 	case SFMMU_SETATTR:
5157 		ASSERT(!(attr & ~HAT_PROT_MASK));
5158 		ttemaskp->ll = 0;
5159 		ttevalue.ll = 0;
5160 		/*
5161 		 * a valid tte implies exec and read for sfmmu
5162 		 * so no need to do anything about them.
5163 		 * since priviledged access implies user access
5164 		 * PROT_USER doesn't make sense either.
5165 		 */
5166 		if (attr & PROT_WRITE) {
5167 			ttemaskp->tte_intlo |= TTE_WRPRM_INT;
5168 			ttevalue.tte_intlo |= TTE_WRPRM_INT;
5169 		}
5170 		break;
5171 	case SFMMU_CLRATTR:
5172 		/* attributes will be nand with current ones */
5173 		if (attr & ~(PROT_WRITE | PROT_USER)) {
5174 			panic("sfmmu: attr %x not supported", attr);
5175 		}
5176 		ttemaskp->ll = 0;
5177 		ttevalue.ll = 0;
5178 		if (attr & PROT_WRITE) {
5179 			/* clear both writable and modify bit */
5180 			ttemaskp->tte_intlo |= TTE_WRPRM_INT | TTE_HWWR_INT;
5181 		}
5182 		if (attr & PROT_USER) {
5183 			ttemaskp->tte_intlo |= TTE_PRIV_INT;
5184 			ttevalue.tte_intlo |= TTE_PRIV_INT;
5185 		}
5186 		break;
5187 	default:
5188 		panic("sfmmu_vtop_attr: bad mode %x", mode);
5189 	}
5190 	ASSERT(TTE_TO_TTEPFN(&ttevalue) == 0);
5191 	return (ttevalue.ll);
5192 }
5193 
5194 static uint_t
5195 sfmmu_ptov_attr(tte_t *ttep)
5196 {
5197 	uint_t attr;
5198 
5199 	ASSERT(TTE_IS_VALID(ttep));
5200 
5201 	attr = PROT_READ;
5202 
5203 	if (TTE_IS_WRITABLE(ttep)) {
5204 		attr |= PROT_WRITE;
5205 	}
5206 	if (TTE_IS_EXECUTABLE(ttep)) {
5207 		attr |= PROT_EXEC;
5208 	}
5209 	if (!TTE_IS_PRIVILEGED(ttep)) {
5210 		attr |= PROT_USER;
5211 	}
5212 	if (TTE_IS_NFO(ttep)) {
5213 		attr |= HAT_NOFAULT;
5214 	}
5215 	if (TTE_IS_NOSYNC(ttep)) {
5216 		attr |= HAT_NOSYNC;
5217 	}
5218 	if (TTE_IS_SIDEFFECT(ttep)) {
5219 		attr |= SFMMU_SIDEFFECT;
5220 	}
5221 	if (!TTE_IS_VCACHEABLE(ttep)) {
5222 		attr |= SFMMU_UNCACHEVTTE;
5223 	}
5224 	if (!TTE_IS_PCACHEABLE(ttep)) {
5225 		attr |= SFMMU_UNCACHEPTTE;
5226 	}
5227 	return (attr);
5228 }
5229 
5230 /*
5231  * hat_chgprot is a deprecated hat call.  New segment drivers
5232  * should store all attributes and use hat_*attr calls.
5233  *
5234  * Change the protections in the virtual address range
5235  * given to the specified virtual protection.  If vprot is ~PROT_WRITE,
5236  * then remove write permission, leaving the other
5237  * permissions unchanged.  If vprot is ~PROT_USER, remove user permissions.
5238  *
5239  */
5240 void
5241 hat_chgprot(struct hat *sfmmup, caddr_t addr, size_t len, uint_t vprot)
5242 {
5243 	struct hmehash_bucket *hmebp;
5244 	hmeblk_tag hblktag;
5245 	int hmeshift, hashno = 1;
5246 	struct hme_blk *hmeblkp, *list = NULL;
5247 	caddr_t endaddr;
5248 	cpuset_t cpuset;
5249 	demap_range_t dmr;
5250 
5251 	ASSERT((len & MMU_PAGEOFFSET) == 0);
5252 	ASSERT(((uintptr_t)addr & MMU_PAGEOFFSET) == 0);
5253 
5254 	if (sfmmup->sfmmu_xhat_provider) {
5255 		XHAT_CHGPROT(sfmmup, addr, len, vprot);
5256 		return;
5257 	} else {
5258 		/*
5259 		 * This must be a CPU HAT. If the address space has
5260 		 * XHATs attached, change attributes for all of them,
5261 		 * just in case
5262 		 */
5263 		ASSERT(sfmmup->sfmmu_as != NULL);
5264 		if (sfmmup->sfmmu_as->a_xhat != NULL)
5265 			xhat_chgprot_all(sfmmup->sfmmu_as, addr, len, vprot);
5266 	}
5267 
5268 	CPUSET_ZERO(cpuset);
5269 
5270 	if ((vprot != (uint_t)~PROT_WRITE) && (vprot & PROT_USER) &&
5271 	    ((addr + len) > (caddr_t)USERLIMIT)) {
5272 		panic("user addr %p vprot %x in kernel space",
5273 		    (void *)addr, vprot);
5274 	}
5275 	endaddr = addr + len;
5276 	hblktag.htag_id = sfmmup;
5277 	hblktag.htag_rid = SFMMU_INVALID_SHMERID;
5278 	DEMAP_RANGE_INIT(sfmmup, &dmr);
5279 
5280 	while (addr < endaddr) {
5281 		hmeshift = HME_HASH_SHIFT(hashno);
5282 		hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift);
5283 		hblktag.htag_rehash = hashno;
5284 		hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift);
5285 
5286 		SFMMU_HASH_LOCK(hmebp);
5287 
5288 		HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list);
5289 		if (hmeblkp != NULL) {
5290 			ASSERT(!hmeblkp->hblk_shared);
5291 			/*
5292 			 * We've encountered a shadow hmeblk so skip the range
5293 			 * of the next smaller mapping size.
5294 			 */
5295 			if (hmeblkp->hblk_shw_bit) {
5296 				ASSERT(sfmmup != ksfmmup);
5297 				ASSERT(hashno > 1);
5298 				addr = (caddr_t)P2END((uintptr_t)addr,
5299 				    TTEBYTES(hashno - 1));
5300 			} else {
5301 				addr = sfmmu_hblk_chgprot(sfmmup, hmeblkp,
5302 				    addr, endaddr, &dmr, vprot);
5303 			}
5304 			SFMMU_HASH_UNLOCK(hmebp);
5305 			hashno = 1;
5306 			continue;
5307 		}
5308 		SFMMU_HASH_UNLOCK(hmebp);
5309 
5310 		if (!HME_REHASH(sfmmup) || (hashno >= mmu_hashcnt)) {
5311 			/*
5312 			 * We have traversed the whole list and rehashed
5313 			 * if necessary without finding the address to chgprot.
5314 			 * This is ok so we increment the address by the
5315 			 * smallest hmeblk range for kernel mappings and the
5316 			 * largest hmeblk range, to account for shadow hmeblks,
5317 			 * for user mappings and continue.
5318 			 */
5319 			if (sfmmup == ksfmmup)
5320 				addr = (caddr_t)P2END((uintptr_t)addr,
5321 				    TTEBYTES(1));
5322 			else
5323 				addr = (caddr_t)P2END((uintptr_t)addr,
5324 				    TTEBYTES(hashno));
5325 			hashno = 1;
5326 		} else {
5327 			hashno++;
5328 		}
5329 	}
5330 
5331 	sfmmu_hblks_list_purge(&list, 0);
5332 	DEMAP_RANGE_FLUSH(&dmr);
5333 	cpuset = sfmmup->sfmmu_cpusran;
5334 	xt_sync(cpuset);
5335 }
5336 
5337 /*
5338  * This function chgprots a range of addresses in an hmeblk.  It returns the
5339  * next addres that needs to be chgprot.
5340  * It should be called with the hash lock held.
5341  * XXX It shold be possible to optimize chgprot by not flushing every time but
5342  * on the other hand:
5343  * 1. do one flush crosscall.
5344  * 2. only flush if we are increasing permissions (make sure this will work)
5345  */
5346 static caddr_t
5347 sfmmu_hblk_chgprot(sfmmu_t *sfmmup, struct hme_blk *hmeblkp, caddr_t addr,
5348 	caddr_t endaddr, demap_range_t *dmrp, uint_t vprot)
5349 {
5350 	uint_t pprot;
5351 	tte_t tte, ttemod;
5352 	struct sf_hment *sfhmep;
5353 	uint_t tteflags;
5354 	int ttesz;
5355 	struct page *pp = NULL;
5356 	kmutex_t *pml, *pmtx;
5357 	int ret;
5358 	int use_demap_range;
5359 #if defined(SF_ERRATA_57)
5360 	int check_exec;
5361 #endif
5362 
5363 	ASSERT(in_hblk_range(hmeblkp, addr));
5364 	ASSERT(hmeblkp->hblk_shw_bit == 0);
5365 	ASSERT(!hmeblkp->hblk_shared);
5366 
5367 #ifdef DEBUG
5368 	if (get_hblk_ttesz(hmeblkp) != TTE8K &&
5369 	    (endaddr < get_hblk_endaddr(hmeblkp))) {
5370 		panic("sfmmu_hblk_chgprot: partial chgprot of large page");
5371 	}
5372 #endif /* DEBUG */
5373 
5374 	endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp));
5375 	ttesz = get_hblk_ttesz(hmeblkp);
5376 
5377 	pprot = sfmmu_vtop_prot(vprot, &tteflags);
5378 #if defined(SF_ERRATA_57)
5379 	check_exec = (sfmmup != ksfmmup) &&
5380 	    AS_TYPE_64BIT(sfmmup->sfmmu_as) &&
5381 	    ((vprot & PROT_EXEC) == PROT_EXEC);
5382 #endif
5383 	HBLKTOHME(sfhmep, hmeblkp, addr);
5384 
5385 	/*
5386 	 * Flush the current demap region if addresses have been
5387 	 * skipped or the page size doesn't match.
5388 	 */
5389 	use_demap_range = (TTEBYTES(ttesz) == MMU_PAGESIZE);
5390 	if (use_demap_range) {
5391 		DEMAP_RANGE_CONTINUE(dmrp, addr, endaddr);
5392 	} else {
5393 		DEMAP_RANGE_FLUSH(dmrp);
5394 	}
5395 
5396 	while (addr < endaddr) {
5397 		sfmmu_copytte(&sfhmep->hme_tte, &tte);
5398 		if (TTE_IS_VALID(&tte)) {
5399 			if (TTE_GET_LOFLAGS(&tte, tteflags) == pprot) {
5400 				/*
5401 				 * if the new protection is the same as old
5402 				 * continue
5403 				 */
5404 				goto next_addr;
5405 			}
5406 			pml = NULL;
5407 			pp = sfhmep->hme_page;
5408 			if (pp) {
5409 				pml = sfmmu_mlist_enter(pp);
5410 			}
5411 			if (pp != sfhmep->hme_page) {
5412 				/*
5413 				 * tte most have been unloaded
5414 				 * underneath us.  Recheck
5415 				 */
5416 				ASSERT(pml);
5417 				sfmmu_mlist_exit(pml);
5418 				continue;
5419 			}
5420 
5421 			ASSERT(pp == NULL || sfmmu_mlist_held(pp));
5422 
5423 			ttemod = tte;
5424 			TTE_SET_LOFLAGS(&ttemod, tteflags, pprot);
5425 #if defined(SF_ERRATA_57)
5426 			if (check_exec && addr < errata57_limit)
5427 				ttemod.tte_exec_perm = 0;
5428 #endif
5429 			ret = sfmmu_modifytte_try(&tte, &ttemod,
5430 			    &sfhmep->hme_tte);
5431 
5432 			if (ret < 0) {
5433 				/* tte changed underneath us */
5434 				if (pml) {
5435 					sfmmu_mlist_exit(pml);
5436 				}
5437 				continue;
5438 			}
5439 
5440 			if (tteflags & TTE_HWWR_INT) {
5441 				/*
5442 				 * need to sync if we are clearing modify bit.
5443 				 */
5444 				sfmmu_ttesync(sfmmup, addr, &tte, pp);
5445 			}
5446 
5447 			if (pp && PP_ISRO(pp)) {
5448 				if (pprot & TTE_WRPRM_INT) {
5449 					pmtx = sfmmu_page_enter(pp);
5450 					PP_CLRRO(pp);
5451 					sfmmu_page_exit(pmtx);
5452 				}
5453 			}
5454 
5455 			if (ret > 0 && use_demap_range) {
5456 				DEMAP_RANGE_MARKPG(dmrp, addr);
5457 			} else if (ret > 0) {
5458 				sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0);
5459 			}
5460 
5461 			if (pml) {
5462 				sfmmu_mlist_exit(pml);
5463 			}
5464 		}
5465 next_addr:
5466 		addr += TTEBYTES(ttesz);
5467 		sfhmep++;
5468 		DEMAP_RANGE_NEXTPG(dmrp);
5469 	}
5470 	return (addr);
5471 }
5472 
5473 /*
5474  * This routine is deprecated and should only be used by hat_chgprot.
5475  * The correct routine is sfmmu_vtop_attr.
5476  * This routine converts virtual page protections to physical ones.  It will
5477  * update the tteflags field with the tte mask corresponding to the protections
5478  * affected and it returns the new protections.  It will also clear the modify
5479  * bit if we are taking away write permission.  This is necessary since the
5480  * modify bit is the hardware permission bit and we need to clear it in order
5481  * to detect write faults.
5482  * It accepts the following special protections:
5483  * ~PROT_WRITE = remove write permissions.
5484  * ~PROT_USER = remove user permissions.
5485  */
5486 static uint_t
5487 sfmmu_vtop_prot(uint_t vprot, uint_t *tteflagsp)
5488 {
5489 	if (vprot == (uint_t)~PROT_WRITE) {
5490 		*tteflagsp = TTE_WRPRM_INT | TTE_HWWR_INT;
5491 		return (0);		/* will cause wrprm to be cleared */
5492 	}
5493 	if (vprot == (uint_t)~PROT_USER) {
5494 		*tteflagsp = TTE_PRIV_INT;
5495 		return (0);		/* will cause privprm to be cleared */
5496 	}
5497 	if ((vprot == 0) || (vprot == PROT_USER) ||
5498 	    ((vprot & PROT_ALL) != vprot)) {
5499 		panic("sfmmu_vtop_prot -- bad prot %x", vprot);
5500 	}
5501 
5502 	switch (vprot) {
5503 	case (PROT_READ):
5504 	case (PROT_EXEC):
5505 	case (PROT_EXEC | PROT_READ):
5506 		*tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT | TTE_HWWR_INT;
5507 		return (TTE_PRIV_INT); 		/* set prv and clr wrt */
5508 	case (PROT_WRITE):
5509 	case (PROT_WRITE | PROT_READ):
5510 	case (PROT_EXEC | PROT_WRITE):
5511 	case (PROT_EXEC | PROT_WRITE | PROT_READ):
5512 		*tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT;
5513 		return (TTE_PRIV_INT | TTE_WRPRM_INT); 	/* set prv and wrt */
5514 	case (PROT_USER | PROT_READ):
5515 	case (PROT_USER | PROT_EXEC):
5516 	case (PROT_USER | PROT_EXEC | PROT_READ):
5517 		*tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT | TTE_HWWR_INT;
5518 		return (0); 			/* clr prv and wrt */
5519 	case (PROT_USER | PROT_WRITE):
5520 	case (PROT_USER | PROT_WRITE | PROT_READ):
5521 	case (PROT_USER | PROT_EXEC | PROT_WRITE):
5522 	case (PROT_USER | PROT_EXEC | PROT_WRITE | PROT_READ):
5523 		*tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT;
5524 		return (TTE_WRPRM_INT); 	/* clr prv and set wrt */
5525 	default:
5526 		panic("sfmmu_vtop_prot -- bad prot %x", vprot);
5527 	}
5528 	return (0);
5529 }
5530 
5531 /*
5532  * Alternate unload for very large virtual ranges. With a true 64 bit VA,
5533  * the normal algorithm would take too long for a very large VA range with
5534  * few real mappings. This routine just walks thru all HMEs in the global
5535  * hash table to find and remove mappings.
5536  */
5537 static void
5538 hat_unload_large_virtual(
5539 	struct hat		*sfmmup,
5540 	caddr_t			startaddr,
5541 	size_t			len,
5542 	uint_t			flags,
5543 	hat_callback_t		*callback)
5544 {
5545 	struct hmehash_bucket *hmebp;
5546 	struct hme_blk *hmeblkp;
5547 	struct hme_blk *pr_hblk = NULL;
5548 	struct hme_blk *nx_hblk;
5549 	struct hme_blk *list = NULL;
5550 	int i;
5551 	demap_range_t dmr, *dmrp;
5552 	cpuset_t cpuset;
5553 	caddr_t	endaddr = startaddr + len;
5554 	caddr_t	sa;
5555 	caddr_t	ea;
5556 	caddr_t	cb_sa[MAX_CB_ADDR];
5557 	caddr_t	cb_ea[MAX_CB_ADDR];
5558 	int	addr_cnt = 0;
5559 	int	a = 0;
5560 
5561 	if (sfmmup->sfmmu_free) {
5562 		dmrp = NULL;
5563 	} else {
5564 		dmrp = &dmr;
5565 		DEMAP_RANGE_INIT(sfmmup, dmrp);
5566 	}
5567 
5568 	/*
5569 	 * Loop through all the hash buckets of HME blocks looking for matches.
5570 	 */
5571 	for (i = 0; i <= UHMEHASH_SZ; i++) {
5572 		hmebp = &uhme_hash[i];
5573 		SFMMU_HASH_LOCK(hmebp);
5574 		hmeblkp = hmebp->hmeblkp;
5575 		pr_hblk = NULL;
5576 		while (hmeblkp) {
5577 			nx_hblk = hmeblkp->hblk_next;
5578 
5579 			/*
5580 			 * skip if not this context, if a shadow block or
5581 			 * if the mapping is not in the requested range
5582 			 */
5583 			if (hmeblkp->hblk_tag.htag_id != sfmmup ||
5584 			    hmeblkp->hblk_shw_bit ||
5585 			    (sa = (caddr_t)get_hblk_base(hmeblkp)) >= endaddr ||
5586 			    (ea = get_hblk_endaddr(hmeblkp)) <= startaddr) {
5587 				pr_hblk = hmeblkp;
5588 				goto next_block;
5589 			}
5590 
5591 			ASSERT(!hmeblkp->hblk_shared);
5592 			/*
5593 			 * unload if there are any current valid mappings
5594 			 */
5595 			if (hmeblkp->hblk_vcnt != 0 ||
5596 			    hmeblkp->hblk_hmecnt != 0)
5597 				(void) sfmmu_hblk_unload(sfmmup, hmeblkp,
5598 				    sa, ea, dmrp, flags);
5599 
5600 			/*
5601 			 * on unmap we also release the HME block itself, once
5602 			 * all mappings are gone.
5603 			 */
5604 			if ((flags & HAT_UNLOAD_UNMAP) != 0 &&
5605 			    !hmeblkp->hblk_vcnt &&
5606 			    !hmeblkp->hblk_hmecnt) {
5607 				ASSERT(!hmeblkp->hblk_lckcnt);
5608 				sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk,
5609 				    &list, 0);
5610 			} else {
5611 				pr_hblk = hmeblkp;
5612 			}
5613 
5614 			if (callback == NULL)
5615 				goto next_block;
5616 
5617 			/*
5618 			 * HME blocks may span more than one page, but we may be
5619 			 * unmapping only one page, so check for a smaller range
5620 			 * for the callback
5621 			 */
5622 			if (sa < startaddr)
5623 				sa = startaddr;
5624 			if (--ea > endaddr)
5625 				ea = endaddr - 1;
5626 
5627 			cb_sa[addr_cnt] = sa;
5628 			cb_ea[addr_cnt] = ea;
5629 			if (++addr_cnt == MAX_CB_ADDR) {
5630 				if (dmrp != NULL) {
5631 					DEMAP_RANGE_FLUSH(dmrp);
5632 					cpuset = sfmmup->sfmmu_cpusran;
5633 					xt_sync(cpuset);
5634 				}
5635 
5636 				for (a = 0; a < MAX_CB_ADDR; ++a) {
5637 					callback->hcb_start_addr = cb_sa[a];
5638 					callback->hcb_end_addr = cb_ea[a];
5639 					callback->hcb_function(callback);
5640 				}
5641 				addr_cnt = 0;
5642 			}
5643 
5644 next_block:
5645 			hmeblkp = nx_hblk;
5646 		}
5647 		SFMMU_HASH_UNLOCK(hmebp);
5648 	}
5649 
5650 	sfmmu_hblks_list_purge(&list, 0);
5651 	if (dmrp != NULL) {
5652 		DEMAP_RANGE_FLUSH(dmrp);
5653 		cpuset = sfmmup->sfmmu_cpusran;
5654 		xt_sync(cpuset);
5655 	}
5656 
5657 	for (a = 0; a < addr_cnt; ++a) {
5658 		callback->hcb_start_addr = cb_sa[a];
5659 		callback->hcb_end_addr = cb_ea[a];
5660 		callback->hcb_function(callback);
5661 	}
5662 
5663 	/*
5664 	 * Check TSB and TLB page sizes if the process isn't exiting.
5665 	 */
5666 	if (!sfmmup->sfmmu_free)
5667 		sfmmu_check_page_sizes(sfmmup, 0);
5668 }
5669 
5670 /*
5671  * Unload all the mappings in the range [addr..addr+len). addr and len must
5672  * be MMU_PAGESIZE aligned.
5673  */
5674 
5675 extern struct seg *segkmap;
5676 #define	ISSEGKMAP(sfmmup, addr) (sfmmup == ksfmmup && \
5677 segkmap->s_base <= (addr) && (addr) < (segkmap->s_base + segkmap->s_size))
5678 
5679 
5680 void
5681 hat_unload_callback(
5682 	struct hat *sfmmup,
5683 	caddr_t addr,
5684 	size_t len,
5685 	uint_t flags,
5686 	hat_callback_t *callback)
5687 {
5688 	struct hmehash_bucket *hmebp;
5689 	hmeblk_tag hblktag;
5690 	int hmeshift, hashno, iskernel;
5691 	struct hme_blk *hmeblkp, *pr_hblk, *list = NULL;
5692 	caddr_t endaddr;
5693 	cpuset_t cpuset;
5694 	int addr_count = 0;
5695 	int a;
5696 	caddr_t cb_start_addr[MAX_CB_ADDR];
5697 	caddr_t cb_end_addr[MAX_CB_ADDR];
5698 	int issegkmap = ISSEGKMAP(sfmmup, addr);
5699 	demap_range_t dmr, *dmrp;
5700 
5701 	if (sfmmup->sfmmu_xhat_provider) {
5702 		XHAT_UNLOAD_CALLBACK(sfmmup, addr, len, flags, callback);
5703 		return;
5704 	} else {
5705 		/*
5706 		 * This must be a CPU HAT. If the address space has
5707 		 * XHATs attached, unload the mappings for all of them,
5708 		 * just in case
5709 		 */
5710 		ASSERT(sfmmup->sfmmu_as != NULL);
5711 		if (sfmmup->sfmmu_as->a_xhat != NULL)
5712 			xhat_unload_callback_all(sfmmup->sfmmu_as, addr,
5713 			    len, flags, callback);
5714 	}
5715 
5716 	ASSERT((sfmmup == ksfmmup) || (flags & HAT_UNLOAD_OTHER) || \
5717 	    AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock));
5718 
5719 	ASSERT(sfmmup != NULL);
5720 	ASSERT((len & MMU_PAGEOFFSET) == 0);
5721 	ASSERT(!((uintptr_t)addr & MMU_PAGEOFFSET));
5722 
5723 	/*
5724 	 * Probing through a large VA range (say 63 bits) will be slow, even
5725 	 * at 4 Meg steps between the probes. So, when the virtual address range
5726 	 * is very large, search the HME entries for what to unload.
5727 	 *
5728 	 *	len >> TTE_PAGE_SHIFT(TTE4M) is the # of 4Meg probes we'd need
5729 	 *
5730 	 *	UHMEHASH_SZ is number of hash buckets to examine
5731 	 *
5732 	 */
5733 	if (sfmmup != KHATID && (len >> TTE_PAGE_SHIFT(TTE4M)) > UHMEHASH_SZ) {
5734 		hat_unload_large_virtual(sfmmup, addr, len, flags, callback);
5735 		return;
5736 	}
5737 
5738 	CPUSET_ZERO(cpuset);
5739 
5740 	/*
5741 	 * If the process is exiting, we can save a lot of fuss since
5742 	 * we'll flush the TLB when we free the ctx anyway.
5743 	 */
5744 	if (sfmmup->sfmmu_free)
5745 		dmrp = NULL;
5746 	else
5747 		dmrp = &dmr;
5748 
5749 	DEMAP_RANGE_INIT(sfmmup, dmrp);
5750 	endaddr = addr + len;
5751 	hblktag.htag_id = sfmmup;
5752 	hblktag.htag_rid = SFMMU_INVALID_SHMERID;
5753 
5754 	/*
5755 	 * It is likely for the vm to call unload over a wide range of
5756 	 * addresses that are actually very sparsely populated by
5757 	 * translations.  In order to speed this up the sfmmu hat supports
5758 	 * the concept of shadow hmeblks. Dummy large page hmeblks that
5759 	 * correspond to actual small translations are allocated at tteload
5760 	 * time and are referred to as shadow hmeblks.  Now, during unload
5761 	 * time, we first check if we have a shadow hmeblk for that
5762 	 * translation.  The absence of one means the corresponding address
5763 	 * range is empty and can be skipped.
5764 	 *
5765 	 * The kernel is an exception to above statement and that is why
5766 	 * we don't use shadow hmeblks and hash starting from the smallest
5767 	 * page size.
5768 	 */
5769 	if (sfmmup == KHATID) {
5770 		iskernel = 1;
5771 		hashno = TTE64K;
5772 	} else {
5773 		iskernel = 0;
5774 		if (mmu_page_sizes == max_mmu_page_sizes) {
5775 			hashno = TTE256M;
5776 		} else {
5777 			hashno = TTE4M;
5778 		}
5779 	}
5780 	while (addr < endaddr) {
5781 		hmeshift = HME_HASH_SHIFT(hashno);
5782 		hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift);
5783 		hblktag.htag_rehash = hashno;
5784 		hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift);
5785 
5786 		SFMMU_HASH_LOCK(hmebp);
5787 
5788 		HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, pr_hblk, &list);
5789 		if (hmeblkp == NULL) {
5790 			/*
5791 			 * didn't find an hmeblk. skip the appropiate
5792 			 * address range.
5793 			 */
5794 			SFMMU_HASH_UNLOCK(hmebp);
5795 			if (iskernel) {
5796 				if (hashno < mmu_hashcnt) {
5797 					hashno++;
5798 					continue;
5799 				} else {
5800 					hashno = TTE64K;
5801 					addr = (caddr_t)roundup((uintptr_t)addr
5802 					    + 1, MMU_PAGESIZE64K);
5803 					continue;
5804 				}
5805 			}
5806 			addr = (caddr_t)roundup((uintptr_t)addr + 1,
5807 			    (1 << hmeshift));
5808 			if ((uintptr_t)addr & MMU_PAGEOFFSET512K) {
5809 				ASSERT(hashno == TTE64K);
5810 				continue;
5811 			}
5812 			if ((uintptr_t)addr & MMU_PAGEOFFSET4M) {
5813 				hashno = TTE512K;
5814 				continue;
5815 			}
5816 			if (mmu_page_sizes == max_mmu_page_sizes) {
5817 				if ((uintptr_t)addr & MMU_PAGEOFFSET32M) {
5818 					hashno = TTE4M;
5819 					continue;
5820 				}
5821 				if ((uintptr_t)addr & MMU_PAGEOFFSET256M) {
5822 					hashno = TTE32M;
5823 					continue;
5824 				}
5825 				hashno = TTE256M;
5826 				continue;
5827 			} else {
5828 				hashno = TTE4M;
5829 				continue;
5830 			}
5831 		}
5832 		ASSERT(hmeblkp);
5833 		ASSERT(!hmeblkp->hblk_shared);
5834 		if (!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) {
5835 			/*
5836 			 * If the valid count is zero we can skip the range
5837 			 * mapped by this hmeblk.
5838 			 * We free hblks in the case of HAT_UNMAP.  HAT_UNMAP
5839 			 * is used by segment drivers as a hint
5840 			 * that the mapping resource won't be used any longer.
5841 			 * The best example of this is during exit().
5842 			 */
5843 			addr = (caddr_t)roundup((uintptr_t)addr + 1,
5844 			    get_hblk_span(hmeblkp));
5845 			if ((flags & HAT_UNLOAD_UNMAP) ||
5846 			    (iskernel && !issegkmap)) {
5847 				sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk,
5848 				    &list, 0);
5849 			}
5850 			SFMMU_HASH_UNLOCK(hmebp);
5851 
5852 			if (iskernel) {
5853 				hashno = TTE64K;
5854 				continue;
5855 			}
5856 			if ((uintptr_t)addr & MMU_PAGEOFFSET512K) {
5857 				ASSERT(hashno == TTE64K);
5858 				continue;
5859 			}
5860 			if ((uintptr_t)addr & MMU_PAGEOFFSET4M) {
5861 				hashno = TTE512K;
5862 				continue;
5863 			}
5864 			if (mmu_page_sizes == max_mmu_page_sizes) {
5865 				if ((uintptr_t)addr & MMU_PAGEOFFSET32M) {
5866 					hashno = TTE4M;
5867 					continue;
5868 				}
5869 				if ((uintptr_t)addr & MMU_PAGEOFFSET256M) {
5870 					hashno = TTE32M;
5871 					continue;
5872 				}
5873 				hashno = TTE256M;
5874 				continue;
5875 			} else {
5876 				hashno = TTE4M;
5877 				continue;
5878 			}
5879 		}
5880 		if (hmeblkp->hblk_shw_bit) {
5881 			/*
5882 			 * If we encounter a shadow hmeblk we know there is
5883 			 * smaller sized hmeblks mapping the same address space.
5884 			 * Decrement the hash size and rehash.
5885 			 */
5886 			ASSERT(sfmmup != KHATID);
5887 			hashno--;
5888 			SFMMU_HASH_UNLOCK(hmebp);
5889 			continue;
5890 		}
5891 
5892 		/*
5893 		 * track callback address ranges.
5894 		 * only start a new range when it's not contiguous
5895 		 */
5896 		if (callback != NULL) {
5897 			if (addr_count > 0 &&
5898 			    addr == cb_end_addr[addr_count - 1])
5899 				--addr_count;
5900 			else
5901 				cb_start_addr[addr_count] = addr;
5902 		}
5903 
5904 		addr = sfmmu_hblk_unload(sfmmup, hmeblkp, addr, endaddr,
5905 		    dmrp, flags);
5906 
5907 		if (callback != NULL)
5908 			cb_end_addr[addr_count++] = addr;
5909 
5910 		if (((flags & HAT_UNLOAD_UNMAP) || (iskernel && !issegkmap)) &&
5911 		    !hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) {
5912 			sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk, &list, 0);
5913 		}
5914 		SFMMU_HASH_UNLOCK(hmebp);
5915 
5916 		/*
5917 		 * Notify our caller as to exactly which pages
5918 		 * have been unloaded. We do these in clumps,
5919 		 * to minimize the number of xt_sync()s that need to occur.
5920 		 */
5921 		if (callback != NULL && addr_count == MAX_CB_ADDR) {
5922 			DEMAP_RANGE_FLUSH(dmrp);
5923 			if (dmrp != NULL) {
5924 				cpuset = sfmmup->sfmmu_cpusran;
5925 				xt_sync(cpuset);
5926 			}
5927 
5928 			for (a = 0; a < MAX_CB_ADDR; ++a) {
5929 				callback->hcb_start_addr = cb_start_addr[a];
5930 				callback->hcb_end_addr = cb_end_addr[a];
5931 				callback->hcb_function(callback);
5932 			}
5933 			addr_count = 0;
5934 		}
5935 		if (iskernel) {
5936 			hashno = TTE64K;
5937 			continue;
5938 		}
5939 		if ((uintptr_t)addr & MMU_PAGEOFFSET512K) {
5940 			ASSERT(hashno == TTE64K);
5941 			continue;
5942 		}
5943 		if ((uintptr_t)addr & MMU_PAGEOFFSET4M) {
5944 			hashno = TTE512K;
5945 			continue;
5946 		}
5947 		if (mmu_page_sizes == max_mmu_page_sizes) {
5948 			if ((uintptr_t)addr & MMU_PAGEOFFSET32M) {
5949 				hashno = TTE4M;
5950 				continue;
5951 			}
5952 			if ((uintptr_t)addr & MMU_PAGEOFFSET256M) {
5953 				hashno = TTE32M;
5954 				continue;
5955 			}
5956 			hashno = TTE256M;
5957 		} else {
5958 			hashno = TTE4M;
5959 		}
5960 	}
5961 
5962 	sfmmu_hblks_list_purge(&list, 0);
5963 	DEMAP_RANGE_FLUSH(dmrp);
5964 	if (dmrp != NULL) {
5965 		cpuset = sfmmup->sfmmu_cpusran;
5966 		xt_sync(cpuset);
5967 	}
5968 	if (callback && addr_count != 0) {
5969 		for (a = 0; a < addr_count; ++a) {
5970 			callback->hcb_start_addr = cb_start_addr[a];
5971 			callback->hcb_end_addr = cb_end_addr[a];
5972 			callback->hcb_function(callback);
5973 		}
5974 	}
5975 
5976 	/*
5977 	 * Check TSB and TLB page sizes if the process isn't exiting.
5978 	 */
5979 	if (!sfmmup->sfmmu_free)
5980 		sfmmu_check_page_sizes(sfmmup, 0);
5981 }
5982 
5983 /*
5984  * Unload all the mappings in the range [addr..addr+len). addr and len must
5985  * be MMU_PAGESIZE aligned.
5986  */
5987 void
5988 hat_unload(struct hat *sfmmup, caddr_t addr, size_t len, uint_t flags)
5989 {
5990 	if (sfmmup->sfmmu_xhat_provider) {
5991 		XHAT_UNLOAD(sfmmup, addr, len, flags);
5992 		return;
5993 	}
5994 	hat_unload_callback(sfmmup, addr, len, flags, NULL);
5995 }
5996 
5997 
5998 /*
5999  * Find the largest mapping size for this page.
6000  */
6001 int
6002 fnd_mapping_sz(page_t *pp)
6003 {
6004 	int sz;
6005 	int p_index;
6006 
6007 	p_index = PP_MAPINDEX(pp);
6008 
6009 	sz = 0;
6010 	p_index >>= 1;	/* don't care about 8K bit */
6011 	for (; p_index; p_index >>= 1) {
6012 		sz++;
6013 	}
6014 
6015 	return (sz);
6016 }
6017 
6018 /*
6019  * This function unloads a range of addresses for an hmeblk.
6020  * It returns the next address to be unloaded.
6021  * It should be called with the hash lock held.
6022  */
6023 static caddr_t
6024 sfmmu_hblk_unload(struct hat *sfmmup, struct hme_blk *hmeblkp, caddr_t addr,
6025 	caddr_t endaddr, demap_range_t *dmrp, uint_t flags)
6026 {
6027 	tte_t	tte, ttemod;
6028 	struct	sf_hment *sfhmep;
6029 	int	ttesz;
6030 	long	ttecnt;
6031 	page_t *pp;
6032 	kmutex_t *pml;
6033 	int ret;
6034 	int use_demap_range;
6035 
6036 	ASSERT(in_hblk_range(hmeblkp, addr));
6037 	ASSERT(!hmeblkp->hblk_shw_bit);
6038 	ASSERT(sfmmup != NULL || hmeblkp->hblk_shared);
6039 	ASSERT(sfmmup == NULL || !hmeblkp->hblk_shared);
6040 	ASSERT(dmrp == NULL || !hmeblkp->hblk_shared);
6041 
6042 #ifdef DEBUG
6043 	if (get_hblk_ttesz(hmeblkp) != TTE8K &&
6044 	    (endaddr < get_hblk_endaddr(hmeblkp))) {
6045 		panic("sfmmu_hblk_unload: partial unload of large page");
6046 	}
6047 #endif /* DEBUG */
6048 
6049 	endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp));
6050 	ttesz = get_hblk_ttesz(hmeblkp);
6051 
6052 	use_demap_range = ((dmrp == NULL) ||
6053 	    (TTEBYTES(ttesz) == DEMAP_RANGE_PGSZ(dmrp)));
6054 
6055 	if (use_demap_range) {
6056 		DEMAP_RANGE_CONTINUE(dmrp, addr, endaddr);
6057 	} else {
6058 		DEMAP_RANGE_FLUSH(dmrp);
6059 	}
6060 	ttecnt = 0;
6061 	HBLKTOHME(sfhmep, hmeblkp, addr);
6062 
6063 	while (addr < endaddr) {
6064 		pml = NULL;
6065 		sfmmu_copytte(&sfhmep->hme_tte, &tte);
6066 		if (TTE_IS_VALID(&tte)) {
6067 			pp = sfhmep->hme_page;
6068 			if (pp != NULL) {
6069 				pml = sfmmu_mlist_enter(pp);
6070 			}
6071 
6072 			/*
6073 			 * Verify if hme still points to 'pp' now that
6074 			 * we have p_mapping lock.
6075 			 */
6076 			if (sfhmep->hme_page != pp) {
6077 				if (pp != NULL && sfhmep->hme_page != NULL) {
6078 					ASSERT(pml != NULL);
6079 					sfmmu_mlist_exit(pml);
6080 					/* Re-start this iteration. */
6081 					continue;
6082 				}
6083 				ASSERT((pp != NULL) &&
6084 				    (sfhmep->hme_page == NULL));
6085 				goto tte_unloaded;
6086 			}
6087 
6088 			/*
6089 			 * This point on we have both HASH and p_mapping
6090 			 * lock.
6091 			 */
6092 			ASSERT(pp == sfhmep->hme_page);
6093 			ASSERT(pp == NULL || sfmmu_mlist_held(pp));
6094 
6095 			/*
6096 			 * We need to loop on modify tte because it is
6097 			 * possible for pagesync to come along and
6098 			 * change the software bits beneath us.
6099 			 *
6100 			 * Page_unload can also invalidate the tte after
6101 			 * we read tte outside of p_mapping lock.
6102 			 */
6103 again:
6104 			ttemod = tte;
6105 
6106 			TTE_SET_INVALID(&ttemod);
6107 			ret = sfmmu_modifytte_try(&tte, &ttemod,
6108 			    &sfhmep->hme_tte);
6109 
6110 			if (ret <= 0) {
6111 				if (TTE_IS_VALID(&tte)) {
6112 					ASSERT(ret < 0);
6113 					goto again;
6114 				}
6115 				if (pp != NULL) {
6116 					panic("sfmmu_hblk_unload: pp = 0x%p "
6117 					    "tte became invalid under mlist"
6118 					    " lock = 0x%p", (void *)pp,
6119 					    (void *)pml);
6120 				}
6121 				continue;
6122 			}
6123 
6124 			if (!(flags & HAT_UNLOAD_NOSYNC)) {
6125 				sfmmu_ttesync(sfmmup, addr, &tte, pp);
6126 			}
6127 
6128 			/*
6129 			 * Ok- we invalidated the tte. Do the rest of the job.
6130 			 */
6131 			ttecnt++;
6132 
6133 			if (flags & HAT_UNLOAD_UNLOCK) {
6134 				ASSERT(hmeblkp->hblk_lckcnt > 0);
6135 				atomic_add_32(&hmeblkp->hblk_lckcnt, -1);
6136 				HBLK_STACK_TRACE(hmeblkp, HBLK_UNLOCK);
6137 			}
6138 
6139 			/*
6140 			 * Normally we would need to flush the page
6141 			 * from the virtual cache at this point in
6142 			 * order to prevent a potential cache alias
6143 			 * inconsistency.
6144 			 * The particular scenario we need to worry
6145 			 * about is:
6146 			 * Given:  va1 and va2 are two virtual address
6147 			 * that alias and map the same physical
6148 			 * address.
6149 			 * 1.   mapping exists from va1 to pa and data
6150 			 * has been read into the cache.
6151 			 * 2.   unload va1.
6152 			 * 3.   load va2 and modify data using va2.
6153 			 * 4    unload va2.
6154 			 * 5.   load va1 and reference data.  Unless we
6155 			 * flush the data cache when we unload we will
6156 			 * get stale data.
6157 			 * Fortunately, page coloring eliminates the
6158 			 * above scenario by remembering the color a
6159 			 * physical page was last or is currently
6160 			 * mapped to.  Now, we delay the flush until
6161 			 * the loading of translations.  Only when the
6162 			 * new translation is of a different color
6163 			 * are we forced to flush.
6164 			 */
6165 			if (use_demap_range) {
6166 				/*
6167 				 * Mark this page as needing a demap.
6168 				 */
6169 				DEMAP_RANGE_MARKPG(dmrp, addr);
6170 			} else {
6171 				ASSERT(sfmmup != NULL);
6172 				ASSERT(!hmeblkp->hblk_shared);
6173 				sfmmu_tlb_demap(addr, sfmmup, hmeblkp,
6174 				    sfmmup->sfmmu_free, 0);
6175 			}
6176 
6177 			if (pp) {
6178 				/*
6179 				 * Remove the hment from the mapping list
6180 				 */
6181 				ASSERT(hmeblkp->hblk_hmecnt > 0);
6182 
6183 				/*
6184 				 * Again, we cannot
6185 				 * ASSERT(hmeblkp->hblk_hmecnt <= NHMENTS);
6186 				 */
6187 				HME_SUB(sfhmep, pp);
6188 				membar_stst();
6189 				atomic_add_16(&hmeblkp->hblk_hmecnt, -1);
6190 			}
6191 
6192 			ASSERT(hmeblkp->hblk_vcnt > 0);
6193 			atomic_add_16(&hmeblkp->hblk_vcnt, -1);
6194 
6195 			ASSERT(hmeblkp->hblk_hmecnt || hmeblkp->hblk_vcnt ||
6196 			    !hmeblkp->hblk_lckcnt);
6197 
6198 #ifdef VAC
6199 			if (pp && (pp->p_nrm & (P_KPMC | P_KPMS | P_TNC))) {
6200 				if (PP_ISTNC(pp)) {
6201 					/*
6202 					 * If page was temporary
6203 					 * uncached, try to recache
6204 					 * it. Note that HME_SUB() was
6205 					 * called above so p_index and
6206 					 * mlist had been updated.
6207 					 */
6208 					conv_tnc(pp, ttesz);
6209 				} else if (pp->p_mapping == NULL) {
6210 					ASSERT(kpm_enable);
6211 					/*
6212 					 * Page is marked to be in VAC conflict
6213 					 * to an existing kpm mapping and/or is
6214 					 * kpm mapped using only the regular
6215 					 * pagesize.
6216 					 */
6217 					sfmmu_kpm_hme_unload(pp);
6218 				}
6219 			}
6220 #endif	/* VAC */
6221 		} else if ((pp = sfhmep->hme_page) != NULL) {
6222 				/*
6223 				 * TTE is invalid but the hme
6224 				 * still exists. let pageunload
6225 				 * complete its job.
6226 				 */
6227 				ASSERT(pml == NULL);
6228 				pml = sfmmu_mlist_enter(pp);
6229 				if (sfhmep->hme_page != NULL) {
6230 					sfmmu_mlist_exit(pml);
6231 					continue;
6232 				}
6233 				ASSERT(sfhmep->hme_page == NULL);
6234 		} else if (hmeblkp->hblk_hmecnt != 0) {
6235 			/*
6236 			 * pageunload may have not finished decrementing
6237 			 * hblk_vcnt and hblk_hmecnt. Find page_t if any and
6238 			 * wait for pageunload to finish. Rely on pageunload
6239 			 * to decrement hblk_hmecnt after hblk_vcnt.
6240 			 */
6241 			pfn_t pfn = TTE_TO_TTEPFN(&tte);
6242 			ASSERT(pml == NULL);
6243 			if (pf_is_memory(pfn)) {
6244 				pp = page_numtopp_nolock(pfn);
6245 				if (pp != NULL) {
6246 					pml = sfmmu_mlist_enter(pp);
6247 					sfmmu_mlist_exit(pml);
6248 					pml = NULL;
6249 				}
6250 			}
6251 		}
6252 
6253 tte_unloaded:
6254 		/*
6255 		 * At this point, the tte we are looking at
6256 		 * should be unloaded, and hme has been unlinked
6257 		 * from page too. This is important because in
6258 		 * pageunload, it does ttesync() then HME_SUB.
6259 		 * We need to make sure HME_SUB has been completed
6260 		 * so we know ttesync() has been completed. Otherwise,
6261 		 * at exit time, after return from hat layer, VM will
6262 		 * release as structure which hat_setstat() (called
6263 		 * by ttesync()) needs.
6264 		 */
6265 #ifdef DEBUG
6266 		{
6267 			tte_t	dtte;
6268 
6269 			ASSERT(sfhmep->hme_page == NULL);
6270 
6271 			sfmmu_copytte(&sfhmep->hme_tte, &dtte);
6272 			ASSERT(!TTE_IS_VALID(&dtte));
6273 		}
6274 #endif
6275 
6276 		if (pml) {
6277 			sfmmu_mlist_exit(pml);
6278 		}
6279 
6280 		addr += TTEBYTES(ttesz);
6281 		sfhmep++;
6282 		DEMAP_RANGE_NEXTPG(dmrp);
6283 	}
6284 	/*
6285 	 * For shared hmeblks this routine is only called when region is freed
6286 	 * and no longer referenced.  So no need to decrement ttecnt
6287 	 * in the region structure here.
6288 	 */
6289 	if (ttecnt > 0 && sfmmup != NULL) {
6290 		atomic_add_long(&sfmmup->sfmmu_ttecnt[ttesz], -ttecnt);
6291 	}
6292 	return (addr);
6293 }
6294 
6295 /*
6296  * Invalidate a virtual address range for the local CPU.
6297  * For best performance ensure that the va range is completely
6298  * mapped, otherwise the entire TLB will be flushed.
6299  */
6300 void
6301 hat_flush_range(struct hat *sfmmup, caddr_t va, size_t size)
6302 {
6303 	ssize_t sz;
6304 	caddr_t endva = va + size;
6305 
6306 	while (va < endva) {
6307 		sz = hat_getpagesize(sfmmup, va);
6308 		if (sz < 0) {
6309 			vtag_flushall();
6310 			break;
6311 		}
6312 		vtag_flushpage(va, (uint64_t)sfmmup);
6313 		va += sz;
6314 	}
6315 }
6316 
6317 /*
6318  * Synchronize all the mappings in the range [addr..addr+len).
6319  * Can be called with clearflag having two states:
6320  * HAT_SYNC_DONTZERO means just return the rm stats
6321  * HAT_SYNC_ZERORM means zero rm bits in the tte and return the stats
6322  */
6323 void
6324 hat_sync(struct hat *sfmmup, caddr_t addr, size_t len, uint_t clearflag)
6325 {
6326 	struct hmehash_bucket *hmebp;
6327 	hmeblk_tag hblktag;
6328 	int hmeshift, hashno = 1;
6329 	struct hme_blk *hmeblkp, *list = NULL;
6330 	caddr_t endaddr;
6331 	cpuset_t cpuset;
6332 
6333 	ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
6334 	ASSERT((sfmmup == ksfmmup) ||
6335 	    AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock));
6336 	ASSERT((len & MMU_PAGEOFFSET) == 0);
6337 	ASSERT((clearflag == HAT_SYNC_DONTZERO) ||
6338 	    (clearflag == HAT_SYNC_ZERORM));
6339 
6340 	CPUSET_ZERO(cpuset);
6341 
6342 	endaddr = addr + len;
6343 	hblktag.htag_id = sfmmup;
6344 	hblktag.htag_rid = SFMMU_INVALID_SHMERID;
6345 
6346 	/*
6347 	 * Spitfire supports 4 page sizes.
6348 	 * Most pages are expected to be of the smallest page
6349 	 * size (8K) and these will not need to be rehashed. 64K
6350 	 * pages also don't need to be rehashed because the an hmeblk
6351 	 * spans 64K of address space. 512K pages might need 1 rehash and
6352 	 * and 4M pages 2 rehashes.
6353 	 */
6354 	while (addr < endaddr) {
6355 		hmeshift = HME_HASH_SHIFT(hashno);
6356 		hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift);
6357 		hblktag.htag_rehash = hashno;
6358 		hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift);
6359 
6360 		SFMMU_HASH_LOCK(hmebp);
6361 
6362 		HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list);
6363 		if (hmeblkp != NULL) {
6364 			ASSERT(!hmeblkp->hblk_shared);
6365 			/*
6366 			 * We've encountered a shadow hmeblk so skip the range
6367 			 * of the next smaller mapping size.
6368 			 */
6369 			if (hmeblkp->hblk_shw_bit) {
6370 				ASSERT(sfmmup != ksfmmup);
6371 				ASSERT(hashno > 1);
6372 				addr = (caddr_t)P2END((uintptr_t)addr,
6373 				    TTEBYTES(hashno - 1));
6374 			} else {
6375 				addr = sfmmu_hblk_sync(sfmmup, hmeblkp,
6376 				    addr, endaddr, clearflag);
6377 			}
6378 			SFMMU_HASH_UNLOCK(hmebp);
6379 			hashno = 1;
6380 			continue;
6381 		}
6382 		SFMMU_HASH_UNLOCK(hmebp);
6383 
6384 		if (!HME_REHASH(sfmmup) || (hashno >= mmu_hashcnt)) {
6385 			/*
6386 			 * We have traversed the whole list and rehashed
6387 			 * if necessary without finding the address to sync.
6388 			 * This is ok so we increment the address by the
6389 			 * smallest hmeblk range for kernel mappings and the
6390 			 * largest hmeblk range, to account for shadow hmeblks,
6391 			 * for user mappings and continue.
6392 			 */
6393 			if (sfmmup == ksfmmup)
6394 				addr = (caddr_t)P2END((uintptr_t)addr,
6395 				    TTEBYTES(1));
6396 			else
6397 				addr = (caddr_t)P2END((uintptr_t)addr,
6398 				    TTEBYTES(hashno));
6399 			hashno = 1;
6400 		} else {
6401 			hashno++;
6402 		}
6403 	}
6404 	sfmmu_hblks_list_purge(&list, 0);
6405 	cpuset = sfmmup->sfmmu_cpusran;
6406 	xt_sync(cpuset);
6407 }
6408 
6409 static caddr_t
6410 sfmmu_hblk_sync(struct hat *sfmmup, struct hme_blk *hmeblkp, caddr_t addr,
6411 	caddr_t endaddr, int clearflag)
6412 {
6413 	tte_t	tte, ttemod;
6414 	struct sf_hment *sfhmep;
6415 	int ttesz;
6416 	struct page *pp;
6417 	kmutex_t *pml;
6418 	int ret;
6419 
6420 	ASSERT(hmeblkp->hblk_shw_bit == 0);
6421 	ASSERT(!hmeblkp->hblk_shared);
6422 
6423 	endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp));
6424 
6425 	ttesz = get_hblk_ttesz(hmeblkp);
6426 	HBLKTOHME(sfhmep, hmeblkp, addr);
6427 
6428 	while (addr < endaddr) {
6429 		sfmmu_copytte(&sfhmep->hme_tte, &tte);
6430 		if (TTE_IS_VALID(&tte)) {
6431 			pml = NULL;
6432 			pp = sfhmep->hme_page;
6433 			if (pp) {
6434 				pml = sfmmu_mlist_enter(pp);
6435 			}
6436 			if (pp != sfhmep->hme_page) {
6437 				/*
6438 				 * tte most have been unloaded
6439 				 * underneath us.  Recheck
6440 				 */
6441 				ASSERT(pml);
6442 				sfmmu_mlist_exit(pml);
6443 				continue;
6444 			}
6445 
6446 			ASSERT(pp == NULL || sfmmu_mlist_held(pp));
6447 
6448 			if (clearflag == HAT_SYNC_ZERORM) {
6449 				ttemod = tte;
6450 				TTE_CLR_RM(&ttemod);
6451 				ret = sfmmu_modifytte_try(&tte, &ttemod,
6452 				    &sfhmep->hme_tte);
6453 				if (ret < 0) {
6454 					if (pml) {
6455 						sfmmu_mlist_exit(pml);
6456 					}
6457 					continue;
6458 				}
6459 
6460 				if (ret > 0) {
6461 					sfmmu_tlb_demap(addr, sfmmup,
6462 					    hmeblkp, 0, 0);
6463 				}
6464 			}
6465 			sfmmu_ttesync(sfmmup, addr, &tte, pp);
6466 			if (pml) {
6467 				sfmmu_mlist_exit(pml);
6468 			}
6469 		}
6470 		addr += TTEBYTES(ttesz);
6471 		sfhmep++;
6472 	}
6473 	return (addr);
6474 }
6475 
6476 /*
6477  * This function will sync a tte to the page struct and it will
6478  * update the hat stats. Currently it allows us to pass a NULL pp
6479  * and we will simply update the stats.  We may want to change this
6480  * so we only keep stats for pages backed by pp's.
6481  */
6482 static void
6483 sfmmu_ttesync(struct hat *sfmmup, caddr_t addr, tte_t *ttep, page_t *pp)
6484 {
6485 	uint_t rm = 0;
6486 	int   	sz;
6487 	pgcnt_t	npgs;
6488 
6489 	ASSERT(TTE_IS_VALID(ttep));
6490 
6491 	if (TTE_IS_NOSYNC(ttep)) {
6492 		return;
6493 	}
6494 
6495 	if (TTE_IS_REF(ttep))  {
6496 		rm = P_REF;
6497 	}
6498 	if (TTE_IS_MOD(ttep))  {
6499 		rm |= P_MOD;
6500 	}
6501 
6502 	if (rm == 0) {
6503 		return;
6504 	}
6505 
6506 	sz = TTE_CSZ(ttep);
6507 	if (sfmmup != NULL && sfmmup->sfmmu_rmstat) {
6508 		int i;
6509 		caddr_t	vaddr = addr;
6510 
6511 		for (i = 0; i < TTEPAGES(sz); i++, vaddr += MMU_PAGESIZE) {
6512 			hat_setstat(sfmmup->sfmmu_as, vaddr, MMU_PAGESIZE, rm);
6513 		}
6514 
6515 	}
6516 
6517 	/*
6518 	 * XXX I want to use cas to update nrm bits but they
6519 	 * currently belong in common/vm and not in hat where
6520 	 * they should be.
6521 	 * The nrm bits are protected by the same mutex as
6522 	 * the one that protects the page's mapping list.
6523 	 */
6524 	if (!pp)
6525 		return;
6526 	ASSERT(sfmmu_mlist_held(pp));
6527 	/*
6528 	 * If the tte is for a large page, we need to sync all the
6529 	 * pages covered by the tte.
6530 	 */
6531 	if (sz != TTE8K) {
6532 		ASSERT(pp->p_szc != 0);
6533 		pp = PP_GROUPLEADER(pp, sz);
6534 		ASSERT(sfmmu_mlist_held(pp));
6535 	}
6536 
6537 	/* Get number of pages from tte size. */
6538 	npgs = TTEPAGES(sz);
6539 
6540 	do {
6541 		ASSERT(pp);
6542 		ASSERT(sfmmu_mlist_held(pp));
6543 		if (((rm & P_REF) != 0 && !PP_ISREF(pp)) ||
6544 		    ((rm & P_MOD) != 0 && !PP_ISMOD(pp)))
6545 			hat_page_setattr(pp, rm);
6546 
6547 		/*
6548 		 * Are we done? If not, we must have a large mapping.
6549 		 * For large mappings we need to sync the rest of the pages
6550 		 * covered by this tte; goto the next page.
6551 		 */
6552 	} while (--npgs > 0 && (pp = PP_PAGENEXT(pp)));
6553 }
6554 
6555 /*
6556  * Execute pre-callback handler of each pa_hment linked to pp
6557  *
6558  * Inputs:
6559  *   flag: either HAT_PRESUSPEND or HAT_SUSPEND.
6560  *   capture_cpus: pointer to return value (below)
6561  *
6562  * Returns:
6563  *   Propagates the subsystem callback return values back to the caller;
6564  *   returns 0 on success.  If capture_cpus is non-NULL, the value returned
6565  *   is zero if all of the pa_hments are of a type that do not require
6566  *   capturing CPUs prior to suspending the mapping, else it is 1.
6567  */
6568 static int
6569 hat_pageprocess_precallbacks(struct page *pp, uint_t flag, int *capture_cpus)
6570 {
6571 	struct sf_hment	*sfhmep;
6572 	struct pa_hment *pahmep;
6573 	int (*f)(caddr_t, uint_t, uint_t, void *);
6574 	int		ret;
6575 	id_t		id;
6576 	int		locked = 0;
6577 	kmutex_t	*pml;
6578 
6579 	ASSERT(PAGE_EXCL(pp));
6580 	if (!sfmmu_mlist_held(pp)) {
6581 		pml = sfmmu_mlist_enter(pp);
6582 		locked = 1;
6583 	}
6584 
6585 	if (capture_cpus)
6586 		*capture_cpus = 0;
6587 
6588 top:
6589 	for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) {
6590 		/*
6591 		 * skip sf_hments corresponding to VA<->PA mappings;
6592 		 * for pa_hment's, hme_tte.ll is zero
6593 		 */
6594 		if (!IS_PAHME(sfhmep))
6595 			continue;
6596 
6597 		pahmep = sfhmep->hme_data;
6598 		ASSERT(pahmep != NULL);
6599 
6600 		/*
6601 		 * skip if pre-handler has been called earlier in this loop
6602 		 */
6603 		if (pahmep->flags & flag)
6604 			continue;
6605 
6606 		id = pahmep->cb_id;
6607 		ASSERT(id >= (id_t)0 && id < sfmmu_cb_nextid);
6608 		if (capture_cpus && sfmmu_cb_table[id].capture_cpus != 0)
6609 			*capture_cpus = 1;
6610 		if ((f = sfmmu_cb_table[id].prehandler) == NULL) {
6611 			pahmep->flags |= flag;
6612 			continue;
6613 		}
6614 
6615 		/*
6616 		 * Drop the mapping list lock to avoid locking order issues.
6617 		 */
6618 		if (locked)
6619 			sfmmu_mlist_exit(pml);
6620 
6621 		ret = f(pahmep->addr, pahmep->len, flag, pahmep->pvt);
6622 		if (ret != 0)
6623 			return (ret);	/* caller must do the cleanup */
6624 
6625 		if (locked) {
6626 			pml = sfmmu_mlist_enter(pp);
6627 			pahmep->flags |= flag;
6628 			goto top;
6629 		}
6630 
6631 		pahmep->flags |= flag;
6632 	}
6633 
6634 	if (locked)
6635 		sfmmu_mlist_exit(pml);
6636 
6637 	return (0);
6638 }
6639 
6640 /*
6641  * Execute post-callback handler of each pa_hment linked to pp
6642  *
6643  * Same overall assumptions and restrictions apply as for
6644  * hat_pageprocess_precallbacks().
6645  */
6646 static void
6647 hat_pageprocess_postcallbacks(struct page *pp, uint_t flag)
6648 {
6649 	pfn_t pgpfn = pp->p_pagenum;
6650 	pfn_t pgmask = btop(page_get_pagesize(pp->p_szc)) - 1;
6651 	pfn_t newpfn;
6652 	struct sf_hment *sfhmep;
6653 	struct pa_hment *pahmep;
6654 	int (*f)(caddr_t, uint_t, uint_t, void *, pfn_t);
6655 	id_t	id;
6656 	int	locked = 0;
6657 	kmutex_t *pml;
6658 
6659 	ASSERT(PAGE_EXCL(pp));
6660 	if (!sfmmu_mlist_held(pp)) {
6661 		pml = sfmmu_mlist_enter(pp);
6662 		locked = 1;
6663 	}
6664 
6665 top:
6666 	for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) {
6667 		/*
6668 		 * skip sf_hments corresponding to VA<->PA mappings;
6669 		 * for pa_hment's, hme_tte.ll is zero
6670 		 */
6671 		if (!IS_PAHME(sfhmep))
6672 			continue;
6673 
6674 		pahmep = sfhmep->hme_data;
6675 		ASSERT(pahmep != NULL);
6676 
6677 		if ((pahmep->flags & flag) == 0)
6678 			continue;
6679 
6680 		pahmep->flags &= ~flag;
6681 
6682 		id = pahmep->cb_id;
6683 		ASSERT(id >= (id_t)0 && id < sfmmu_cb_nextid);
6684 		if ((f = sfmmu_cb_table[id].posthandler) == NULL)
6685 			continue;
6686 
6687 		/*
6688 		 * Convert the base page PFN into the constituent PFN
6689 		 * which is needed by the callback handler.
6690 		 */
6691 		newpfn = pgpfn | (btop((uintptr_t)pahmep->addr) & pgmask);
6692 
6693 		/*
6694 		 * Drop the mapping list lock to avoid locking order issues.
6695 		 */
6696 		if (locked)
6697 			sfmmu_mlist_exit(pml);
6698 
6699 		if (f(pahmep->addr, pahmep->len, flag, pahmep->pvt, newpfn)
6700 		    != 0)
6701 			panic("sfmmu: posthandler failed");
6702 
6703 		if (locked) {
6704 			pml = sfmmu_mlist_enter(pp);
6705 			goto top;
6706 		}
6707 	}
6708 
6709 	if (locked)
6710 		sfmmu_mlist_exit(pml);
6711 }
6712 
6713 /*
6714  * Suspend locked kernel mapping
6715  */
6716 void
6717 hat_pagesuspend(struct page *pp)
6718 {
6719 	struct sf_hment *sfhmep;
6720 	sfmmu_t *sfmmup;
6721 	tte_t tte, ttemod;
6722 	struct hme_blk *hmeblkp;
6723 	caddr_t addr;
6724 	int index, cons;
6725 	cpuset_t cpuset;
6726 
6727 	ASSERT(PAGE_EXCL(pp));
6728 	ASSERT(sfmmu_mlist_held(pp));
6729 
6730 	mutex_enter(&kpr_suspendlock);
6731 
6732 	/*
6733 	 * We're about to suspend a kernel mapping so mark this thread as
6734 	 * non-traceable by DTrace. This prevents us from running into issues
6735 	 * with probe context trying to touch a suspended page
6736 	 * in the relocation codepath itself.
6737 	 */
6738 	curthread->t_flag |= T_DONTDTRACE;
6739 
6740 	index = PP_MAPINDEX(pp);
6741 	cons = TTE8K;
6742 
6743 retry:
6744 	for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) {
6745 
6746 		if (IS_PAHME(sfhmep))
6747 			continue;
6748 
6749 		if (get_hblk_ttesz(sfmmu_hmetohblk(sfhmep)) != cons)
6750 			continue;
6751 
6752 		/*
6753 		 * Loop until we successfully set the suspend bit in
6754 		 * the TTE.
6755 		 */
6756 again:
6757 		sfmmu_copytte(&sfhmep->hme_tte, &tte);
6758 		ASSERT(TTE_IS_VALID(&tte));
6759 
6760 		ttemod = tte;
6761 		TTE_SET_SUSPEND(&ttemod);
6762 		if (sfmmu_modifytte_try(&tte, &ttemod,
6763 		    &sfhmep->hme_tte) < 0)
6764 			goto again;
6765 
6766 		/*
6767 		 * Invalidate TSB entry
6768 		 */
6769 		hmeblkp = sfmmu_hmetohblk(sfhmep);
6770 
6771 		sfmmup = hblktosfmmu(hmeblkp);
6772 		ASSERT(sfmmup == ksfmmup);
6773 		ASSERT(!hmeblkp->hblk_shared);
6774 
6775 		addr = tte_to_vaddr(hmeblkp, tte);
6776 
6777 		/*
6778 		 * No need to make sure that the TSB for this sfmmu is
6779 		 * not being relocated since it is ksfmmup and thus it
6780 		 * will never be relocated.
6781 		 */
6782 		SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp, 0);
6783 
6784 		/*
6785 		 * Update xcall stats
6786 		 */
6787 		cpuset = cpu_ready_set;
6788 		CPUSET_DEL(cpuset, CPU->cpu_id);
6789 
6790 		/* LINTED: constant in conditional context */
6791 		SFMMU_XCALL_STATS(ksfmmup);
6792 
6793 		/*
6794 		 * Flush TLB entry on remote CPU's
6795 		 */
6796 		xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)addr,
6797 		    (uint64_t)ksfmmup);
6798 		xt_sync(cpuset);
6799 
6800 		/*
6801 		 * Flush TLB entry on local CPU
6802 		 */
6803 		vtag_flushpage(addr, (uint64_t)ksfmmup);
6804 	}
6805 
6806 	while (index != 0) {
6807 		index = index >> 1;
6808 		if (index != 0)
6809 			cons++;
6810 		if (index & 0x1) {
6811 			pp = PP_GROUPLEADER(pp, cons);
6812 			goto retry;
6813 		}
6814 	}
6815 }
6816 
6817 #ifdef	DEBUG
6818 
6819 #define	N_PRLE	1024
6820 struct prle {
6821 	page_t *targ;
6822 	page_t *repl;
6823 	int status;
6824 	int pausecpus;
6825 	hrtime_t whence;
6826 };
6827 
6828 static struct prle page_relocate_log[N_PRLE];
6829 static int prl_entry;
6830 static kmutex_t prl_mutex;
6831 
6832 #define	PAGE_RELOCATE_LOG(t, r, s, p)					\
6833 	mutex_enter(&prl_mutex);					\
6834 	page_relocate_log[prl_entry].targ = *(t);			\
6835 	page_relocate_log[prl_entry].repl = *(r);			\
6836 	page_relocate_log[prl_entry].status = (s);			\
6837 	page_relocate_log[prl_entry].pausecpus = (p);			\
6838 	page_relocate_log[prl_entry].whence = gethrtime();		\
6839 	prl_entry = (prl_entry == (N_PRLE - 1))? 0 : prl_entry + 1;	\
6840 	mutex_exit(&prl_mutex);
6841 
6842 #else	/* !DEBUG */
6843 #define	PAGE_RELOCATE_LOG(t, r, s, p)
6844 #endif
6845 
6846 /*
6847  * Core Kernel Page Relocation Algorithm
6848  *
6849  * Input:
6850  *
6851  * target : 	constituent pages are SE_EXCL locked.
6852  * replacement:	constituent pages are SE_EXCL locked.
6853  *
6854  * Output:
6855  *
6856  * nrelocp:	number of pages relocated
6857  */
6858 int
6859 hat_page_relocate(page_t **target, page_t **replacement, spgcnt_t *nrelocp)
6860 {
6861 	page_t		*targ, *repl;
6862 	page_t		*tpp, *rpp;
6863 	kmutex_t	*low, *high;
6864 	spgcnt_t	npages, i;
6865 	page_t		*pl = NULL;
6866 	int		old_pil;
6867 	cpuset_t	cpuset;
6868 	int		cap_cpus;
6869 	int		ret;
6870 #ifdef VAC
6871 	int		cflags = 0;
6872 #endif
6873 
6874 	if (!kcage_on || PP_ISNORELOC(*target)) {
6875 		PAGE_RELOCATE_LOG(target, replacement, EAGAIN, -1);
6876 		return (EAGAIN);
6877 	}
6878 
6879 	mutex_enter(&kpr_mutex);
6880 	kreloc_thread = curthread;
6881 
6882 	targ = *target;
6883 	repl = *replacement;
6884 	ASSERT(repl != NULL);
6885 	ASSERT(targ->p_szc == repl->p_szc);
6886 
6887 	npages = page_get_pagecnt(targ->p_szc);
6888 
6889 	/*
6890 	 * unload VA<->PA mappings that are not locked
6891 	 */
6892 	tpp = targ;
6893 	for (i = 0; i < npages; i++) {
6894 		(void) hat_pageunload(tpp, SFMMU_KERNEL_RELOC);
6895 		tpp++;
6896 	}
6897 
6898 	/*
6899 	 * Do "presuspend" callbacks, in a context from which we can still
6900 	 * block as needed. Note that we don't hold the mapping list lock
6901 	 * of "targ" at this point due to potential locking order issues;
6902 	 * we assume that between the hat_pageunload() above and holding
6903 	 * the SE_EXCL lock that the mapping list *cannot* change at this
6904 	 * point.
6905 	 */
6906 	ret = hat_pageprocess_precallbacks(targ, HAT_PRESUSPEND, &cap_cpus);
6907 	if (ret != 0) {
6908 		/*
6909 		 * EIO translates to fatal error, for all others cleanup
6910 		 * and return EAGAIN.
6911 		 */
6912 		ASSERT(ret != EIO);
6913 		hat_pageprocess_postcallbacks(targ, HAT_POSTUNSUSPEND);
6914 		PAGE_RELOCATE_LOG(target, replacement, ret, -1);
6915 		kreloc_thread = NULL;
6916 		mutex_exit(&kpr_mutex);
6917 		return (EAGAIN);
6918 	}
6919 
6920 	/*
6921 	 * acquire p_mapping list lock for both the target and replacement
6922 	 * root pages.
6923 	 *
6924 	 * low and high refer to the need to grab the mlist locks in a
6925 	 * specific order in order to prevent race conditions.  Thus the
6926 	 * lower lock must be grabbed before the higher lock.
6927 	 *
6928 	 * This will block hat_unload's accessing p_mapping list.  Since
6929 	 * we have SE_EXCL lock, hat_memload and hat_pageunload will be
6930 	 * blocked.  Thus, no one else will be accessing the p_mapping list
6931 	 * while we suspend and reload the locked mapping below.
6932 	 */
6933 	tpp = targ;
6934 	rpp = repl;
6935 	sfmmu_mlist_reloc_enter(tpp, rpp, &low, &high);
6936 
6937 	kpreempt_disable();
6938 
6939 	/*
6940 	 * We raise our PIL to 13 so that we don't get captured by
6941 	 * another CPU or pinned by an interrupt thread.  We can't go to
6942 	 * PIL 14 since the nexus driver(s) may need to interrupt at
6943 	 * that level in the case of IOMMU pseudo mappings.
6944 	 */
6945 	cpuset = cpu_ready_set;
6946 	CPUSET_DEL(cpuset, CPU->cpu_id);
6947 	if (!cap_cpus || CPUSET_ISNULL(cpuset)) {
6948 		old_pil = splr(XCALL_PIL);
6949 	} else {
6950 		old_pil = -1;
6951 		xc_attention(cpuset);
6952 	}
6953 	ASSERT(getpil() == XCALL_PIL);
6954 
6955 	/*
6956 	 * Now do suspend callbacks. In the case of an IOMMU mapping
6957 	 * this will suspend all DMA activity to the page while it is
6958 	 * being relocated. Since we are well above LOCK_LEVEL and CPUs
6959 	 * may be captured at this point we should have acquired any needed
6960 	 * locks in the presuspend callback.
6961 	 */
6962 	ret = hat_pageprocess_precallbacks(targ, HAT_SUSPEND, NULL);
6963 	if (ret != 0) {
6964 		repl = targ;
6965 		goto suspend_fail;
6966 	}
6967 
6968 	/*
6969 	 * Raise the PIL yet again, this time to block all high-level
6970 	 * interrupts on this CPU. This is necessary to prevent an
6971 	 * interrupt routine from pinning the thread which holds the
6972 	 * mapping suspended and then touching the suspended page.
6973 	 *
6974 	 * Once the page is suspended we also need to be careful to
6975 	 * avoid calling any functions which touch any seg_kmem memory
6976 	 * since that memory may be backed by the very page we are
6977 	 * relocating in here!
6978 	 */
6979 	hat_pagesuspend(targ);
6980 
6981 	/*
6982 	 * Now that we are confident everybody has stopped using this page,
6983 	 * copy the page contents.  Note we use a physical copy to prevent
6984 	 * locking issues and to avoid fpRAS because we can't handle it in
6985 	 * this context.
6986 	 */
6987 	for (i = 0; i < npages; i++, tpp++, rpp++) {
6988 #ifdef VAC
6989 		/*
6990 		 * If the replacement has a different vcolor than
6991 		 * the one being replacd, we need to handle VAC
6992 		 * consistency for it just as we were setting up
6993 		 * a new mapping to it.
6994 		 */
6995 		if ((PP_GET_VCOLOR(rpp) != NO_VCOLOR) &&
6996 		    (tpp->p_vcolor != rpp->p_vcolor) &&
6997 		    !CacheColor_IsFlushed(cflags, PP_GET_VCOLOR(rpp))) {
6998 			CacheColor_SetFlushed(cflags, PP_GET_VCOLOR(rpp));
6999 			sfmmu_cache_flushcolor(PP_GET_VCOLOR(rpp),
7000 			    rpp->p_pagenum);
7001 		}
7002 #endif
7003 		/*
7004 		 * Copy the contents of the page.
7005 		 */
7006 		ppcopy_kernel(tpp, rpp);
7007 	}
7008 
7009 	tpp = targ;
7010 	rpp = repl;
7011 	for (i = 0; i < npages; i++, tpp++, rpp++) {
7012 		/*
7013 		 * Copy attributes.  VAC consistency was handled above,
7014 		 * if required.
7015 		 */
7016 		rpp->p_nrm = tpp->p_nrm;
7017 		tpp->p_nrm = 0;
7018 		rpp->p_index = tpp->p_index;
7019 		tpp->p_index = 0;
7020 #ifdef VAC
7021 		rpp->p_vcolor = tpp->p_vcolor;
7022 #endif
7023 	}
7024 
7025 	/*
7026 	 * First, unsuspend the page, if we set the suspend bit, and transfer
7027 	 * the mapping list from the target page to the replacement page.
7028 	 * Next process postcallbacks; since pa_hment's are linked only to the
7029 	 * p_mapping list of root page, we don't iterate over the constituent
7030 	 * pages.
7031 	 */
7032 	hat_pagereload(targ, repl);
7033 
7034 suspend_fail:
7035 	hat_pageprocess_postcallbacks(repl, HAT_UNSUSPEND);
7036 
7037 	/*
7038 	 * Now lower our PIL and release any captured CPUs since we
7039 	 * are out of the "danger zone".  After this it will again be
7040 	 * safe to acquire adaptive mutex locks, or to drop them...
7041 	 */
7042 	if (old_pil != -1) {
7043 		splx(old_pil);
7044 	} else {
7045 		xc_dismissed(cpuset);
7046 	}
7047 
7048 	kpreempt_enable();
7049 
7050 	sfmmu_mlist_reloc_exit(low, high);
7051 
7052 	/*
7053 	 * Postsuspend callbacks should drop any locks held across
7054 	 * the suspend callbacks.  As before, we don't hold the mapping
7055 	 * list lock at this point.. our assumption is that the mapping
7056 	 * list still can't change due to our holding SE_EXCL lock and
7057 	 * there being no unlocked mappings left. Hence the restriction
7058 	 * on calling context to hat_delete_callback()
7059 	 */
7060 	hat_pageprocess_postcallbacks(repl, HAT_POSTUNSUSPEND);
7061 	if (ret != 0) {
7062 		/*
7063 		 * The second presuspend call failed: we got here through
7064 		 * the suspend_fail label above.
7065 		 */
7066 		ASSERT(ret != EIO);
7067 		PAGE_RELOCATE_LOG(target, replacement, ret, cap_cpus);
7068 		kreloc_thread = NULL;
7069 		mutex_exit(&kpr_mutex);
7070 		return (EAGAIN);
7071 	}
7072 
7073 	/*
7074 	 * Now that we're out of the performance critical section we can
7075 	 * take care of updating the hash table, since we still
7076 	 * hold all the pages locked SE_EXCL at this point we
7077 	 * needn't worry about things changing out from under us.
7078 	 */
7079 	tpp = targ;
7080 	rpp = repl;
7081 	for (i = 0; i < npages; i++, tpp++, rpp++) {
7082 
7083 		/*
7084 		 * replace targ with replacement in page_hash table
7085 		 */
7086 		targ = tpp;
7087 		page_relocate_hash(rpp, targ);
7088 
7089 		/*
7090 		 * concatenate target; caller of platform_page_relocate()
7091 		 * expects target to be concatenated after returning.
7092 		 */
7093 		ASSERT(targ->p_next == targ);
7094 		ASSERT(targ->p_prev == targ);
7095 		page_list_concat(&pl, &targ);
7096 	}
7097 
7098 	ASSERT(*target == pl);
7099 	*nrelocp = npages;
7100 	PAGE_RELOCATE_LOG(target, replacement, 0, cap_cpus);
7101 	kreloc_thread = NULL;
7102 	mutex_exit(&kpr_mutex);
7103 	return (0);
7104 }
7105 
7106 /*
7107  * Called when stray pa_hments are found attached to a page which is
7108  * being freed.  Notify the subsystem which attached the pa_hment of
7109  * the error if it registered a suitable handler, else panic.
7110  */
7111 static void
7112 sfmmu_pahment_leaked(struct pa_hment *pahmep)
7113 {
7114 	id_t cb_id = pahmep->cb_id;
7115 
7116 	ASSERT(cb_id >= (id_t)0 && cb_id < sfmmu_cb_nextid);
7117 	if (sfmmu_cb_table[cb_id].errhandler != NULL) {
7118 		if (sfmmu_cb_table[cb_id].errhandler(pahmep->addr, pahmep->len,
7119 		    HAT_CB_ERR_LEAKED, pahmep->pvt) == 0)
7120 			return;		/* non-fatal */
7121 	}
7122 	panic("pa_hment leaked: 0x%p", (void *)pahmep);
7123 }
7124 
7125 /*
7126  * Remove all mappings to page 'pp'.
7127  */
7128 int
7129 hat_pageunload(struct page *pp, uint_t forceflag)
7130 {
7131 	struct page *origpp = pp;
7132 	struct sf_hment *sfhme, *tmphme;
7133 	struct hme_blk *hmeblkp;
7134 	kmutex_t *pml;
7135 #ifdef VAC
7136 	kmutex_t *pmtx;
7137 #endif
7138 	cpuset_t cpuset, tset;
7139 	int index, cons;
7140 	int xhme_blks;
7141 	int pa_hments;
7142 
7143 	ASSERT(PAGE_EXCL(pp));
7144 
7145 retry_xhat:
7146 	tmphme = NULL;
7147 	xhme_blks = 0;
7148 	pa_hments = 0;
7149 	CPUSET_ZERO(cpuset);
7150 
7151 	pml = sfmmu_mlist_enter(pp);
7152 
7153 #ifdef VAC
7154 	if (pp->p_kpmref)
7155 		sfmmu_kpm_pageunload(pp);
7156 	ASSERT(!PP_ISMAPPED_KPM(pp));
7157 #endif
7158 	/*
7159 	 * Clear vpm reference. Since the page is exclusively locked
7160 	 * vpm cannot be referencing it.
7161 	 */
7162 	if (vpm_enable) {
7163 		pp->p_vpmref = 0;
7164 	}
7165 
7166 	index = PP_MAPINDEX(pp);
7167 	cons = TTE8K;
7168 retry:
7169 	for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) {
7170 		tmphme = sfhme->hme_next;
7171 
7172 		if (IS_PAHME(sfhme)) {
7173 			ASSERT(sfhme->hme_data != NULL);
7174 			pa_hments++;
7175 			continue;
7176 		}
7177 
7178 		hmeblkp = sfmmu_hmetohblk(sfhme);
7179 		if (hmeblkp->hblk_xhat_bit) {
7180 			struct xhat_hme_blk *xblk =
7181 			    (struct xhat_hme_blk *)hmeblkp;
7182 
7183 			(void) XHAT_PAGEUNLOAD(xblk->xhat_hme_blk_hat,
7184 			    pp, forceflag, XBLK2PROVBLK(xblk));
7185 
7186 			xhme_blks = 1;
7187 			continue;
7188 		}
7189 
7190 		/*
7191 		 * If there are kernel mappings don't unload them, they will
7192 		 * be suspended.
7193 		 */
7194 		if (forceflag == SFMMU_KERNEL_RELOC && hmeblkp->hblk_lckcnt &&
7195 		    hmeblkp->hblk_tag.htag_id == ksfmmup)
7196 			continue;
7197 
7198 		tset = sfmmu_pageunload(pp, sfhme, cons);
7199 		CPUSET_OR(cpuset, tset);
7200 	}
7201 
7202 	while (index != 0) {
7203 		index = index >> 1;
7204 		if (index != 0)
7205 			cons++;
7206 		if (index & 0x1) {
7207 			/* Go to leading page */
7208 			pp = PP_GROUPLEADER(pp, cons);
7209 			ASSERT(sfmmu_mlist_held(pp));
7210 			goto retry;
7211 		}
7212 	}
7213 
7214 	/*
7215 	 * cpuset may be empty if the page was only mapped by segkpm,
7216 	 * in which case we won't actually cross-trap.
7217 	 */
7218 	xt_sync(cpuset);
7219 
7220 	/*
7221 	 * The page should have no mappings at this point, unless
7222 	 * we were called from hat_page_relocate() in which case we
7223 	 * leave the locked mappings which will be suspended later.
7224 	 */
7225 	ASSERT(!PP_ISMAPPED(origpp) || xhme_blks || pa_hments ||
7226 	    (forceflag == SFMMU_KERNEL_RELOC));
7227 
7228 #ifdef VAC
7229 	if (PP_ISTNC(pp)) {
7230 		if (cons == TTE8K) {
7231 			pmtx = sfmmu_page_enter(pp);
7232 			PP_CLRTNC(pp);
7233 			sfmmu_page_exit(pmtx);
7234 		} else {
7235 			conv_tnc(pp, cons);
7236 		}
7237 	}
7238 #endif	/* VAC */
7239 
7240 	if (pa_hments && forceflag != SFMMU_KERNEL_RELOC) {
7241 		/*
7242 		 * Unlink any pa_hments and free them, calling back
7243 		 * the responsible subsystem to notify it of the error.
7244 		 * This can occur in situations such as drivers leaking
7245 		 * DMA handles: naughty, but common enough that we'd like
7246 		 * to keep the system running rather than bringing it
7247 		 * down with an obscure error like "pa_hment leaked"
7248 		 * which doesn't aid the user in debugging their driver.
7249 		 */
7250 		for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) {
7251 			tmphme = sfhme->hme_next;
7252 			if (IS_PAHME(sfhme)) {
7253 				struct pa_hment *pahmep = sfhme->hme_data;
7254 				sfmmu_pahment_leaked(pahmep);
7255 				HME_SUB(sfhme, pp);
7256 				kmem_cache_free(pa_hment_cache, pahmep);
7257 			}
7258 		}
7259 
7260 		ASSERT(!PP_ISMAPPED(origpp) || xhme_blks);
7261 	}
7262 
7263 	sfmmu_mlist_exit(pml);
7264 
7265 	/*
7266 	 * XHAT may not have finished unloading pages
7267 	 * because some other thread was waiting for
7268 	 * mlist lock and XHAT_PAGEUNLOAD let it do
7269 	 * the job.
7270 	 */
7271 	if (xhme_blks) {
7272 		pp = origpp;
7273 		goto retry_xhat;
7274 	}
7275 
7276 	return (0);
7277 }
7278 
7279 cpuset_t
7280 sfmmu_pageunload(page_t *pp, struct sf_hment *sfhme, int cons)
7281 {
7282 	struct hme_blk *hmeblkp;
7283 	sfmmu_t *sfmmup;
7284 	tte_t tte, ttemod;
7285 #ifdef DEBUG
7286 	tte_t orig_old;
7287 #endif /* DEBUG */
7288 	caddr_t addr;
7289 	int ttesz;
7290 	int ret;
7291 	cpuset_t cpuset;
7292 
7293 	ASSERT(pp != NULL);
7294 	ASSERT(sfmmu_mlist_held(pp));
7295 	ASSERT(!PP_ISKAS(pp));
7296 
7297 	CPUSET_ZERO(cpuset);
7298 
7299 	hmeblkp = sfmmu_hmetohblk(sfhme);
7300 
7301 readtte:
7302 	sfmmu_copytte(&sfhme->hme_tte, &tte);
7303 	if (TTE_IS_VALID(&tte)) {
7304 		sfmmup = hblktosfmmu(hmeblkp);
7305 		ttesz = get_hblk_ttesz(hmeblkp);
7306 		/*
7307 		 * Only unload mappings of 'cons' size.
7308 		 */
7309 		if (ttesz != cons)
7310 			return (cpuset);
7311 
7312 		/*
7313 		 * Note that we have p_mapping lock, but no hash lock here.
7314 		 * hblk_unload() has to have both hash lock AND p_mapping
7315 		 * lock before it tries to modify tte. So, the tte could
7316 		 * not become invalid in the sfmmu_modifytte_try() below.
7317 		 */
7318 		ttemod = tte;
7319 #ifdef DEBUG
7320 		orig_old = tte;
7321 #endif /* DEBUG */
7322 
7323 		TTE_SET_INVALID(&ttemod);
7324 		ret = sfmmu_modifytte_try(&tte, &ttemod, &sfhme->hme_tte);
7325 		if (ret < 0) {
7326 #ifdef DEBUG
7327 			/* only R/M bits can change. */
7328 			chk_tte(&orig_old, &tte, &ttemod, hmeblkp);
7329 #endif /* DEBUG */
7330 			goto readtte;
7331 		}
7332 
7333 		if (ret == 0) {
7334 			panic("pageunload: cas failed?");
7335 		}
7336 
7337 		addr = tte_to_vaddr(hmeblkp, tte);
7338 
7339 		if (hmeblkp->hblk_shared) {
7340 			sf_srd_t *srdp = (sf_srd_t *)sfmmup;
7341 			uint_t rid = hmeblkp->hblk_tag.htag_rid;
7342 			sf_region_t *rgnp;
7343 			ASSERT(SFMMU_IS_SHMERID_VALID(rid));
7344 			ASSERT(rid < SFMMU_MAX_HME_REGIONS);
7345 			ASSERT(srdp != NULL);
7346 			rgnp = srdp->srd_hmergnp[rid];
7347 			SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid);
7348 			cpuset = sfmmu_rgntlb_demap(addr, rgnp, hmeblkp, 1);
7349 			sfmmu_ttesync(NULL, addr, &tte, pp);
7350 			ASSERT(rgnp->rgn_ttecnt[ttesz] > 0);
7351 			atomic_add_long(&rgnp->rgn_ttecnt[ttesz], -1);
7352 		} else {
7353 			sfmmu_ttesync(sfmmup, addr, &tte, pp);
7354 			atomic_add_long(&sfmmup->sfmmu_ttecnt[ttesz], -1);
7355 
7356 			/*
7357 			 * We need to flush the page from the virtual cache
7358 			 * in order to prevent a virtual cache alias
7359 			 * inconsistency. The particular scenario we need
7360 			 * to worry about is:
7361 			 * Given:  va1 and va2 are two virtual address that
7362 			 * alias and will map the same physical address.
7363 			 * 1.   mapping exists from va1 to pa and data has
7364 			 *	been read into the cache.
7365 			 * 2.   unload va1.
7366 			 * 3.   load va2 and modify data using va2.
7367 			 * 4    unload va2.
7368 			 * 5.   load va1 and reference data.  Unless we flush
7369 			 *	the data cache when we unload we will get
7370 			 *	stale data.
7371 			 * This scenario is taken care of by using virtual
7372 			 * page coloring.
7373 			 */
7374 			if (sfmmup->sfmmu_ismhat) {
7375 				/*
7376 				 * Flush TSBs, TLBs and caches
7377 				 * of every process
7378 				 * sharing this ism segment.
7379 				 */
7380 				sfmmu_hat_lock_all();
7381 				mutex_enter(&ism_mlist_lock);
7382 				kpreempt_disable();
7383 				sfmmu_ismtlbcache_demap(addr, sfmmup, hmeblkp,
7384 				    pp->p_pagenum, CACHE_NO_FLUSH);
7385 				kpreempt_enable();
7386 				mutex_exit(&ism_mlist_lock);
7387 				sfmmu_hat_unlock_all();
7388 				cpuset = cpu_ready_set;
7389 			} else {
7390 				sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0);
7391 				cpuset = sfmmup->sfmmu_cpusran;
7392 			}
7393 		}
7394 
7395 		/*
7396 		 * Hme_sub has to run after ttesync() and a_rss update.
7397 		 * See hblk_unload().
7398 		 */
7399 		HME_SUB(sfhme, pp);
7400 		membar_stst();
7401 
7402 		/*
7403 		 * We can not make ASSERT(hmeblkp->hblk_hmecnt <= NHMENTS)
7404 		 * since pteload may have done a HME_ADD() right after
7405 		 * we did the HME_SUB() above. Hmecnt is now maintained
7406 		 * by cas only. no lock guranteed its value. The only
7407 		 * gurantee we have is the hmecnt should not be less than
7408 		 * what it should be so the hblk will not be taken away.
7409 		 * It's also important that we decremented the hmecnt after
7410 		 * we are done with hmeblkp so that this hmeblk won't be
7411 		 * stolen.
7412 		 */
7413 		ASSERT(hmeblkp->hblk_hmecnt > 0);
7414 		ASSERT(hmeblkp->hblk_vcnt > 0);
7415 		atomic_add_16(&hmeblkp->hblk_vcnt, -1);
7416 		atomic_add_16(&hmeblkp->hblk_hmecnt, -1);
7417 		/*
7418 		 * This is bug 4063182.
7419 		 * XXX: fixme
7420 		 * ASSERT(hmeblkp->hblk_hmecnt || hmeblkp->hblk_vcnt ||
7421 		 *	!hmeblkp->hblk_lckcnt);
7422 		 */
7423 	} else {
7424 		panic("invalid tte? pp %p &tte %p",
7425 		    (void *)pp, (void *)&tte);
7426 	}
7427 
7428 	return (cpuset);
7429 }
7430 
7431 /*
7432  * While relocating a kernel page, this function will move the mappings
7433  * from tpp to dpp and modify any associated data with these mappings.
7434  * It also unsuspends the suspended kernel mapping.
7435  */
7436 static void
7437 hat_pagereload(struct page *tpp, struct page *dpp)
7438 {
7439 	struct sf_hment *sfhme;
7440 	tte_t tte, ttemod;
7441 	int index, cons;
7442 
7443 	ASSERT(getpil() == PIL_MAX);
7444 	ASSERT(sfmmu_mlist_held(tpp));
7445 	ASSERT(sfmmu_mlist_held(dpp));
7446 
7447 	index = PP_MAPINDEX(tpp);
7448 	cons = TTE8K;
7449 
7450 	/* Update real mappings to the page */
7451 retry:
7452 	for (sfhme = tpp->p_mapping; sfhme != NULL; sfhme = sfhme->hme_next) {
7453 		if (IS_PAHME(sfhme))
7454 			continue;
7455 		sfmmu_copytte(&sfhme->hme_tte, &tte);
7456 		ttemod = tte;
7457 
7458 		/*
7459 		 * replace old pfn with new pfn in TTE
7460 		 */
7461 		PFN_TO_TTE(ttemod, dpp->p_pagenum);
7462 
7463 		/*
7464 		 * clear suspend bit
7465 		 */
7466 		ASSERT(TTE_IS_SUSPEND(&ttemod));
7467 		TTE_CLR_SUSPEND(&ttemod);
7468 
7469 		if (sfmmu_modifytte_try(&tte, &ttemod, &sfhme->hme_tte) < 0)
7470 			panic("hat_pagereload(): sfmmu_modifytte_try() failed");
7471 
7472 		/*
7473 		 * set hme_page point to new page
7474 		 */
7475 		sfhme->hme_page = dpp;
7476 	}
7477 
7478 	/*
7479 	 * move p_mapping list from old page to new page
7480 	 */
7481 	dpp->p_mapping = tpp->p_mapping;
7482 	tpp->p_mapping = NULL;
7483 	dpp->p_share = tpp->p_share;
7484 	tpp->p_share = 0;
7485 
7486 	while (index != 0) {
7487 		index = index >> 1;
7488 		if (index != 0)
7489 			cons++;
7490 		if (index & 0x1) {
7491 			tpp = PP_GROUPLEADER(tpp, cons);
7492 			dpp = PP_GROUPLEADER(dpp, cons);
7493 			goto retry;
7494 		}
7495 	}
7496 
7497 	curthread->t_flag &= ~T_DONTDTRACE;
7498 	mutex_exit(&kpr_suspendlock);
7499 }
7500 
7501 uint_t
7502 hat_pagesync(struct page *pp, uint_t clearflag)
7503 {
7504 	struct sf_hment *sfhme, *tmphme = NULL;
7505 	struct hme_blk *hmeblkp;
7506 	kmutex_t *pml;
7507 	cpuset_t cpuset, tset;
7508 	int	index, cons;
7509 	extern	ulong_t po_share;
7510 	page_t	*save_pp = pp;
7511 	int	stop_on_sh = 0;
7512 	uint_t	shcnt;
7513 
7514 	CPUSET_ZERO(cpuset);
7515 
7516 	if (PP_ISRO(pp) && (clearflag & HAT_SYNC_STOPON_MOD)) {
7517 		return (PP_GENERIC_ATTR(pp));
7518 	}
7519 
7520 	if ((clearflag & HAT_SYNC_ZERORM) == 0) {
7521 		if ((clearflag & HAT_SYNC_STOPON_REF) && PP_ISREF(pp)) {
7522 			return (PP_GENERIC_ATTR(pp));
7523 		}
7524 		if ((clearflag & HAT_SYNC_STOPON_MOD) && PP_ISMOD(pp)) {
7525 			return (PP_GENERIC_ATTR(pp));
7526 		}
7527 		if (clearflag & HAT_SYNC_STOPON_SHARED) {
7528 			if (pp->p_share > po_share) {
7529 				hat_page_setattr(pp, P_REF);
7530 				return (PP_GENERIC_ATTR(pp));
7531 			}
7532 			stop_on_sh = 1;
7533 			shcnt = 0;
7534 		}
7535 	}
7536 
7537 	clearflag &= ~HAT_SYNC_STOPON_SHARED;
7538 	pml = sfmmu_mlist_enter(pp);
7539 	index = PP_MAPINDEX(pp);
7540 	cons = TTE8K;
7541 retry:
7542 	for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) {
7543 		/*
7544 		 * We need to save the next hment on the list since
7545 		 * it is possible for pagesync to remove an invalid hment
7546 		 * from the list.
7547 		 */
7548 		tmphme = sfhme->hme_next;
7549 		if (IS_PAHME(sfhme))
7550 			continue;
7551 		/*
7552 		 * If we are looking for large mappings and this hme doesn't
7553 		 * reach the range we are seeking, just ignore it.
7554 		 */
7555 		hmeblkp = sfmmu_hmetohblk(sfhme);
7556 		if (hmeblkp->hblk_xhat_bit)
7557 			continue;
7558 
7559 		if (hme_size(sfhme) < cons)
7560 			continue;
7561 
7562 		if (stop_on_sh) {
7563 			if (hmeblkp->hblk_shared) {
7564 				sf_srd_t *srdp = hblktosrd(hmeblkp);
7565 				uint_t rid = hmeblkp->hblk_tag.htag_rid;
7566 				sf_region_t *rgnp;
7567 				ASSERT(SFMMU_IS_SHMERID_VALID(rid));
7568 				ASSERT(rid < SFMMU_MAX_HME_REGIONS);
7569 				ASSERT(srdp != NULL);
7570 				rgnp = srdp->srd_hmergnp[rid];
7571 				SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp,
7572 				    rgnp, rid);
7573 				shcnt += rgnp->rgn_refcnt;
7574 			} else {
7575 				shcnt++;
7576 			}
7577 			if (shcnt > po_share) {
7578 				/*
7579 				 * tell the pager to spare the page this time
7580 				 * around.
7581 				 */
7582 				hat_page_setattr(save_pp, P_REF);
7583 				index = 0;
7584 				break;
7585 			}
7586 		}
7587 		tset = sfmmu_pagesync(pp, sfhme,
7588 		    clearflag & ~HAT_SYNC_STOPON_RM);
7589 		CPUSET_OR(cpuset, tset);
7590 
7591 		/*
7592 		 * If clearflag is HAT_SYNC_DONTZERO, break out as soon
7593 		 * as the "ref" or "mod" is set or share cnt exceeds po_share.
7594 		 */
7595 		if ((clearflag & ~HAT_SYNC_STOPON_RM) == HAT_SYNC_DONTZERO &&
7596 		    (((clearflag & HAT_SYNC_STOPON_MOD) && PP_ISMOD(save_pp)) ||
7597 		    ((clearflag & HAT_SYNC_STOPON_REF) && PP_ISREF(save_pp)))) {
7598 			index = 0;
7599 			break;
7600 		}
7601 	}
7602 
7603 	while (index) {
7604 		index = index >> 1;
7605 		cons++;
7606 		if (index & 0x1) {
7607 			/* Go to leading page */
7608 			pp = PP_GROUPLEADER(pp, cons);
7609 			goto retry;
7610 		}
7611 	}
7612 
7613 	xt_sync(cpuset);
7614 	sfmmu_mlist_exit(pml);
7615 	return (PP_GENERIC_ATTR(save_pp));
7616 }
7617 
7618 /*
7619  * Get all the hardware dependent attributes for a page struct
7620  */
7621 static cpuset_t
7622 sfmmu_pagesync(struct page *pp, struct sf_hment *sfhme,
7623 	uint_t clearflag)
7624 {
7625 	caddr_t addr;
7626 	tte_t tte, ttemod;
7627 	struct hme_blk *hmeblkp;
7628 	int ret;
7629 	sfmmu_t *sfmmup;
7630 	cpuset_t cpuset;
7631 
7632 	ASSERT(pp != NULL);
7633 	ASSERT(sfmmu_mlist_held(pp));
7634 	ASSERT((clearflag == HAT_SYNC_DONTZERO) ||
7635 	    (clearflag == HAT_SYNC_ZERORM));
7636 
7637 	SFMMU_STAT(sf_pagesync);
7638 
7639 	CPUSET_ZERO(cpuset);
7640 
7641 sfmmu_pagesync_retry:
7642 
7643 	sfmmu_copytte(&sfhme->hme_tte, &tte);
7644 	if (TTE_IS_VALID(&tte)) {
7645 		hmeblkp = sfmmu_hmetohblk(sfhme);
7646 		sfmmup = hblktosfmmu(hmeblkp);
7647 		addr = tte_to_vaddr(hmeblkp, tte);
7648 		if (clearflag == HAT_SYNC_ZERORM) {
7649 			ttemod = tte;
7650 			TTE_CLR_RM(&ttemod);
7651 			ret = sfmmu_modifytte_try(&tte, &ttemod,
7652 			    &sfhme->hme_tte);
7653 			if (ret < 0) {
7654 				/*
7655 				 * cas failed and the new value is not what
7656 				 * we want.
7657 				 */
7658 				goto sfmmu_pagesync_retry;
7659 			}
7660 
7661 			if (ret > 0) {
7662 				/* we win the cas */
7663 				if (hmeblkp->hblk_shared) {
7664 					sf_srd_t *srdp = (sf_srd_t *)sfmmup;
7665 					uint_t rid =
7666 					    hmeblkp->hblk_tag.htag_rid;
7667 					sf_region_t *rgnp;
7668 					ASSERT(SFMMU_IS_SHMERID_VALID(rid));
7669 					ASSERT(rid < SFMMU_MAX_HME_REGIONS);
7670 					ASSERT(srdp != NULL);
7671 					rgnp = srdp->srd_hmergnp[rid];
7672 					SFMMU_VALIDATE_SHAREDHBLK(hmeblkp,
7673 					    srdp, rgnp, rid);
7674 					cpuset = sfmmu_rgntlb_demap(addr,
7675 					    rgnp, hmeblkp, 1);
7676 				} else {
7677 					sfmmu_tlb_demap(addr, sfmmup, hmeblkp,
7678 					    0, 0);
7679 					cpuset = sfmmup->sfmmu_cpusran;
7680 				}
7681 			}
7682 		}
7683 		sfmmu_ttesync(hmeblkp->hblk_shared ? NULL : sfmmup, addr,
7684 		    &tte, pp);
7685 	}
7686 	return (cpuset);
7687 }
7688 
7689 /*
7690  * Remove write permission from a mappings to a page, so that
7691  * we can detect the next modification of it. This requires modifying
7692  * the TTE then invalidating (demap) any TLB entry using that TTE.
7693  * This code is similar to sfmmu_pagesync().
7694  */
7695 static cpuset_t
7696 sfmmu_pageclrwrt(struct page *pp, struct sf_hment *sfhme)
7697 {
7698 	caddr_t addr;
7699 	tte_t tte;
7700 	tte_t ttemod;
7701 	struct hme_blk *hmeblkp;
7702 	int ret;
7703 	sfmmu_t *sfmmup;
7704 	cpuset_t cpuset;
7705 
7706 	ASSERT(pp != NULL);
7707 	ASSERT(sfmmu_mlist_held(pp));
7708 
7709 	CPUSET_ZERO(cpuset);
7710 	SFMMU_STAT(sf_clrwrt);
7711 
7712 retry:
7713 
7714 	sfmmu_copytte(&sfhme->hme_tte, &tte);
7715 	if (TTE_IS_VALID(&tte) && TTE_IS_WRITABLE(&tte)) {
7716 		hmeblkp = sfmmu_hmetohblk(sfhme);
7717 
7718 		/*
7719 		 * xhat mappings should never be to a VMODSORT page.
7720 		 */
7721 		ASSERT(hmeblkp->hblk_xhat_bit == 0);
7722 
7723 		sfmmup = hblktosfmmu(hmeblkp);
7724 		addr = tte_to_vaddr(hmeblkp, tte);
7725 
7726 		ttemod = tte;
7727 		TTE_CLR_WRT(&ttemod);
7728 		TTE_CLR_MOD(&ttemod);
7729 		ret = sfmmu_modifytte_try(&tte, &ttemod, &sfhme->hme_tte);
7730 
7731 		/*
7732 		 * if cas failed and the new value is not what
7733 		 * we want retry
7734 		 */
7735 		if (ret < 0)
7736 			goto retry;
7737 
7738 		/* we win the cas */
7739 		if (ret > 0) {
7740 			if (hmeblkp->hblk_shared) {
7741 				sf_srd_t *srdp = (sf_srd_t *)sfmmup;
7742 				uint_t rid = hmeblkp->hblk_tag.htag_rid;
7743 				sf_region_t *rgnp;
7744 				ASSERT(SFMMU_IS_SHMERID_VALID(rid));
7745 				ASSERT(rid < SFMMU_MAX_HME_REGIONS);
7746 				ASSERT(srdp != NULL);
7747 				rgnp = srdp->srd_hmergnp[rid];
7748 				SFMMU_VALIDATE_SHAREDHBLK(hmeblkp,
7749 				    srdp, rgnp, rid);
7750 				cpuset = sfmmu_rgntlb_demap(addr,
7751 				    rgnp, hmeblkp, 1);
7752 			} else {
7753 				sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0);
7754 				cpuset = sfmmup->sfmmu_cpusran;
7755 			}
7756 		}
7757 	}
7758 
7759 	return (cpuset);
7760 }
7761 
7762 /*
7763  * Walk all mappings of a page, removing write permission and clearing the
7764  * ref/mod bits. This code is similar to hat_pagesync()
7765  */
7766 static void
7767 hat_page_clrwrt(page_t *pp)
7768 {
7769 	struct sf_hment *sfhme;
7770 	struct sf_hment *tmphme = NULL;
7771 	kmutex_t *pml;
7772 	cpuset_t cpuset;
7773 	cpuset_t tset;
7774 	int	index;
7775 	int	 cons;
7776 
7777 	CPUSET_ZERO(cpuset);
7778 
7779 	pml = sfmmu_mlist_enter(pp);
7780 	index = PP_MAPINDEX(pp);
7781 	cons = TTE8K;
7782 retry:
7783 	for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) {
7784 		tmphme = sfhme->hme_next;
7785 
7786 		/*
7787 		 * If we are looking for large mappings and this hme doesn't
7788 		 * reach the range we are seeking, just ignore its.
7789 		 */
7790 
7791 		if (hme_size(sfhme) < cons)
7792 			continue;
7793 
7794 		tset = sfmmu_pageclrwrt(pp, sfhme);
7795 		CPUSET_OR(cpuset, tset);
7796 	}
7797 
7798 	while (index) {
7799 		index = index >> 1;
7800 		cons++;
7801 		if (index & 0x1) {
7802 			/* Go to leading page */
7803 			pp = PP_GROUPLEADER(pp, cons);
7804 			goto retry;
7805 		}
7806 	}
7807 
7808 	xt_sync(cpuset);
7809 	sfmmu_mlist_exit(pml);
7810 }
7811 
7812 /*
7813  * Set the given REF/MOD/RO bits for the given page.
7814  * For a vnode with a sorted v_pages list, we need to change
7815  * the attributes and the v_pages list together under page_vnode_mutex.
7816  */
7817 void
7818 hat_page_setattr(page_t *pp, uint_t flag)
7819 {
7820 	vnode_t		*vp = pp->p_vnode;
7821 	page_t		**listp;
7822 	kmutex_t	*pmtx;
7823 	kmutex_t	*vphm = NULL;
7824 	int		noshuffle;
7825 
7826 	noshuffle = flag & P_NSH;
7827 	flag &= ~P_NSH;
7828 
7829 	ASSERT(!(flag & ~(P_MOD | P_REF | P_RO)));
7830 
7831 	/*
7832 	 * nothing to do if attribute already set
7833 	 */
7834 	if ((pp->p_nrm & flag) == flag)
7835 		return;
7836 
7837 	if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp) &&
7838 	    !noshuffle) {
7839 		vphm = page_vnode_mutex(vp);
7840 		mutex_enter(vphm);
7841 	}
7842 
7843 	pmtx = sfmmu_page_enter(pp);
7844 	pp->p_nrm |= flag;
7845 	sfmmu_page_exit(pmtx);
7846 
7847 	if (vphm != NULL) {
7848 		/*
7849 		 * Some File Systems examine v_pages for NULL w/o
7850 		 * grabbing the vphm mutex. Must not let it become NULL when
7851 		 * pp is the only page on the list.
7852 		 */
7853 		if (pp->p_vpnext != pp) {
7854 			page_vpsub(&vp->v_pages, pp);
7855 			if (vp->v_pages != NULL)
7856 				listp = &vp->v_pages->p_vpprev->p_vpnext;
7857 			else
7858 				listp = &vp->v_pages;
7859 			page_vpadd(listp, pp);
7860 		}
7861 		mutex_exit(vphm);
7862 	}
7863 }
7864 
7865 void
7866 hat_page_clrattr(page_t *pp, uint_t flag)
7867 {
7868 	vnode_t		*vp = pp->p_vnode;
7869 	kmutex_t	*pmtx;
7870 
7871 	ASSERT(!(flag & ~(P_MOD | P_REF | P_RO)));
7872 
7873 	pmtx = sfmmu_page_enter(pp);
7874 
7875 	/*
7876 	 * Caller is expected to hold page's io lock for VMODSORT to work
7877 	 * correctly with pvn_vplist_dirty() and pvn_getdirty() when mod
7878 	 * bit is cleared.
7879 	 * We don't have assert to avoid tripping some existing third party
7880 	 * code. The dirty page is moved back to top of the v_page list
7881 	 * after IO is done in pvn_write_done().
7882 	 */
7883 	pp->p_nrm &= ~flag;
7884 	sfmmu_page_exit(pmtx);
7885 
7886 	if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp)) {
7887 
7888 		/*
7889 		 * VMODSORT works by removing write permissions and getting
7890 		 * a fault when a page is made dirty. At this point
7891 		 * we need to remove write permission from all mappings
7892 		 * to this page.
7893 		 */
7894 		hat_page_clrwrt(pp);
7895 	}
7896 }
7897 
7898 uint_t
7899 hat_page_getattr(page_t *pp, uint_t flag)
7900 {
7901 	ASSERT(!(flag & ~(P_MOD | P_REF | P_RO)));
7902 	return ((uint_t)(pp->p_nrm & flag));
7903 }
7904 
7905 /*
7906  * DEBUG kernels: verify that a kernel va<->pa translation
7907  * is safe by checking the underlying page_t is in a page
7908  * relocation-safe state.
7909  */
7910 #ifdef	DEBUG
7911 void
7912 sfmmu_check_kpfn(pfn_t pfn)
7913 {
7914 	page_t *pp;
7915 	int index, cons;
7916 
7917 	if (hat_check_vtop == 0)
7918 		return;
7919 
7920 	if (kvseg.s_base == NULL || panicstr)
7921 		return;
7922 
7923 	pp = page_numtopp_nolock(pfn);
7924 	if (!pp)
7925 		return;
7926 
7927 	if (PAGE_LOCKED(pp) || PP_ISNORELOC(pp))
7928 		return;
7929 
7930 	/*
7931 	 * Handed a large kernel page, we dig up the root page since we
7932 	 * know the root page might have the lock also.
7933 	 */
7934 	if (pp->p_szc != 0) {
7935 		index = PP_MAPINDEX(pp);
7936 		cons = TTE8K;
7937 again:
7938 		while (index != 0) {
7939 			index >>= 1;
7940 			if (index != 0)
7941 				cons++;
7942 			if (index & 0x1) {
7943 				pp = PP_GROUPLEADER(pp, cons);
7944 				goto again;
7945 			}
7946 		}
7947 	}
7948 
7949 	if (PAGE_LOCKED(pp) || PP_ISNORELOC(pp))
7950 		return;
7951 
7952 	/*
7953 	 * Pages need to be locked or allocated "permanent" (either from
7954 	 * static_arena arena or explicitly setting PG_NORELOC when calling
7955 	 * page_create_va()) for VA->PA translations to be valid.
7956 	 */
7957 	if (!PP_ISNORELOC(pp))
7958 		panic("Illegal VA->PA translation, pp 0x%p not permanent",
7959 		    (void *)pp);
7960 	else
7961 		panic("Illegal VA->PA translation, pp 0x%p not locked",
7962 		    (void *)pp);
7963 }
7964 #endif	/* DEBUG */
7965 
7966 /*
7967  * Returns a page frame number for a given virtual address.
7968  * Returns PFN_INVALID to indicate an invalid mapping
7969  */
7970 pfn_t
7971 hat_getpfnum(struct hat *hat, caddr_t addr)
7972 {
7973 	pfn_t pfn;
7974 	tte_t tte;
7975 
7976 	/*
7977 	 * We would like to
7978 	 * ASSERT(AS_LOCK_HELD(as, &as->a_lock));
7979 	 * but we can't because the iommu driver will call this
7980 	 * routine at interrupt time and it can't grab the as lock
7981 	 * or it will deadlock: A thread could have the as lock
7982 	 * and be waiting for io.  The io can't complete
7983 	 * because the interrupt thread is blocked trying to grab
7984 	 * the as lock.
7985 	 */
7986 
7987 	ASSERT(hat->sfmmu_xhat_provider == NULL);
7988 
7989 	if (hat == ksfmmup) {
7990 		if (IS_KMEM_VA_LARGEPAGE(addr)) {
7991 			ASSERT(segkmem_lpszc > 0);
7992 			pfn = sfmmu_kvaszc2pfn(addr, segkmem_lpszc);
7993 			if (pfn != PFN_INVALID) {
7994 				sfmmu_check_kpfn(pfn);
7995 				return (pfn);
7996 			}
7997 		} else if (segkpm && IS_KPM_ADDR(addr)) {
7998 			return (sfmmu_kpm_vatopfn(addr));
7999 		}
8000 		while ((pfn = sfmmu_vatopfn(addr, ksfmmup, &tte))
8001 		    == PFN_SUSPENDED) {
8002 			sfmmu_vatopfn_suspended(addr, ksfmmup, &tte);
8003 		}
8004 		sfmmu_check_kpfn(pfn);
8005 		return (pfn);
8006 	} else {
8007 		return (sfmmu_uvatopfn(addr, hat, NULL));
8008 	}
8009 }
8010 
8011 /*
8012  * This routine will return both pfn and tte for the vaddr.
8013  */
8014 static pfn_t
8015 sfmmu_uvatopfn(caddr_t vaddr, struct hat *sfmmup, tte_t *ttep)
8016 {
8017 	struct hmehash_bucket *hmebp;
8018 	hmeblk_tag hblktag;
8019 	int hmeshift, hashno = 1;
8020 	struct hme_blk *hmeblkp = NULL;
8021 	tte_t tte;
8022 
8023 	struct sf_hment *sfhmep;
8024 	pfn_t pfn;
8025 
8026 	/* support for ISM */
8027 	ism_map_t	*ism_map;
8028 	ism_blk_t	*ism_blkp;
8029 	int		i;
8030 	sfmmu_t *ism_hatid = NULL;
8031 	sfmmu_t *locked_hatid = NULL;
8032 	sfmmu_t	*sv_sfmmup = sfmmup;
8033 	caddr_t	sv_vaddr = vaddr;
8034 	sf_srd_t *srdp;
8035 
8036 	if (ttep == NULL) {
8037 		ttep = &tte;
8038 	} else {
8039 		ttep->ll = 0;
8040 	}
8041 
8042 	ASSERT(sfmmup != ksfmmup);
8043 	SFMMU_STAT(sf_user_vtop);
8044 	/*
8045 	 * Set ism_hatid if vaddr falls in a ISM segment.
8046 	 */
8047 	ism_blkp = sfmmup->sfmmu_iblk;
8048 	if (ism_blkp != NULL) {
8049 		sfmmu_ismhat_enter(sfmmup, 0);
8050 		locked_hatid = sfmmup;
8051 	}
8052 	while (ism_blkp != NULL && ism_hatid == NULL) {
8053 		ism_map = ism_blkp->iblk_maps;
8054 		for (i = 0; ism_map[i].imap_ismhat && i < ISM_MAP_SLOTS; i++) {
8055 			if (vaddr >= ism_start(ism_map[i]) &&
8056 			    vaddr < ism_end(ism_map[i])) {
8057 				sfmmup = ism_hatid = ism_map[i].imap_ismhat;
8058 				vaddr = (caddr_t)(vaddr -
8059 				    ism_start(ism_map[i]));
8060 				break;
8061 			}
8062 		}
8063 		ism_blkp = ism_blkp->iblk_next;
8064 	}
8065 	if (locked_hatid) {
8066 		sfmmu_ismhat_exit(locked_hatid, 0);
8067 	}
8068 
8069 	hblktag.htag_id = sfmmup;
8070 	hblktag.htag_rid = SFMMU_INVALID_SHMERID;
8071 	do {
8072 		hmeshift = HME_HASH_SHIFT(hashno);
8073 		hblktag.htag_bspage = HME_HASH_BSPAGE(vaddr, hmeshift);
8074 		hblktag.htag_rehash = hashno;
8075 		hmebp = HME_HASH_FUNCTION(sfmmup, vaddr, hmeshift);
8076 
8077 		SFMMU_HASH_LOCK(hmebp);
8078 
8079 		HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp);
8080 		if (hmeblkp != NULL) {
8081 			ASSERT(!hmeblkp->hblk_shared);
8082 			HBLKTOHME(sfhmep, hmeblkp, vaddr);
8083 			sfmmu_copytte(&sfhmep->hme_tte, ttep);
8084 			SFMMU_HASH_UNLOCK(hmebp);
8085 			if (TTE_IS_VALID(ttep)) {
8086 				pfn = TTE_TO_PFN(vaddr, ttep);
8087 				return (pfn);
8088 			}
8089 			break;
8090 		}
8091 		SFMMU_HASH_UNLOCK(hmebp);
8092 		hashno++;
8093 	} while (HME_REHASH(sfmmup) && (hashno <= mmu_hashcnt));
8094 
8095 	if (SF_HMERGNMAP_ISNULL(sv_sfmmup)) {
8096 		return (PFN_INVALID);
8097 	}
8098 	srdp = sv_sfmmup->sfmmu_srdp;
8099 	ASSERT(srdp != NULL);
8100 	ASSERT(srdp->srd_refcnt != 0);
8101 	hblktag.htag_id = srdp;
8102 	hashno = 1;
8103 	do {
8104 		hmeshift = HME_HASH_SHIFT(hashno);
8105 		hblktag.htag_bspage = HME_HASH_BSPAGE(sv_vaddr, hmeshift);
8106 		hblktag.htag_rehash = hashno;
8107 		hmebp = HME_HASH_FUNCTION(srdp, sv_vaddr, hmeshift);
8108 
8109 		SFMMU_HASH_LOCK(hmebp);
8110 		for (hmeblkp = hmebp->hmeblkp; hmeblkp != NULL;
8111 		    hmeblkp = hmeblkp->hblk_next) {
8112 			uint_t rid;
8113 			sf_region_t *rgnp;
8114 			caddr_t rsaddr;
8115 			caddr_t readdr;
8116 
8117 			if (!HTAGS_EQ_SHME(hmeblkp->hblk_tag, hblktag,
8118 			    sv_sfmmup->sfmmu_hmeregion_map)) {
8119 				continue;
8120 			}
8121 			ASSERT(hmeblkp->hblk_shared);
8122 			rid = hmeblkp->hblk_tag.htag_rid;
8123 			ASSERT(SFMMU_IS_SHMERID_VALID(rid));
8124 			ASSERT(rid < SFMMU_MAX_HME_REGIONS);
8125 			rgnp = srdp->srd_hmergnp[rid];
8126 			SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid);
8127 			HBLKTOHME(sfhmep, hmeblkp, sv_vaddr);
8128 			sfmmu_copytte(&sfhmep->hme_tte, ttep);
8129 			rsaddr = rgnp->rgn_saddr;
8130 			readdr = rsaddr + rgnp->rgn_size;
8131 #ifdef DEBUG
8132 			if (TTE_IS_VALID(ttep) ||
8133 			    get_hblk_ttesz(hmeblkp) > TTE8K) {
8134 				caddr_t eva = tte_to_evaddr(hmeblkp, ttep);
8135 				ASSERT(eva > sv_vaddr);
8136 				ASSERT(sv_vaddr >= rsaddr);
8137 				ASSERT(sv_vaddr < readdr);
8138 				ASSERT(eva <= readdr);
8139 			}
8140 #endif /* DEBUG */
8141 			/*
8142 			 * Continue the search if we
8143 			 * found an invalid 8K tte outside of the area
8144 			 * covered by this hmeblk's region.
8145 			 */
8146 			if (TTE_IS_VALID(ttep)) {
8147 				SFMMU_HASH_UNLOCK(hmebp);
8148 				pfn = TTE_TO_PFN(sv_vaddr, ttep);
8149 				return (pfn);
8150 			} else if (get_hblk_ttesz(hmeblkp) > TTE8K ||
8151 			    (sv_vaddr >= rsaddr && sv_vaddr < readdr)) {
8152 				SFMMU_HASH_UNLOCK(hmebp);
8153 				pfn = PFN_INVALID;
8154 				return (pfn);
8155 			}
8156 		}
8157 		SFMMU_HASH_UNLOCK(hmebp);
8158 		hashno++;
8159 	} while (hashno <= mmu_hashcnt);
8160 	return (PFN_INVALID);
8161 }
8162 
8163 
8164 /*
8165  * For compatability with AT&T and later optimizations
8166  */
8167 /* ARGSUSED */
8168 void
8169 hat_map(struct hat *hat, caddr_t addr, size_t len, uint_t flags)
8170 {
8171 	ASSERT(hat != NULL);
8172 	ASSERT(hat->sfmmu_xhat_provider == NULL);
8173 }
8174 
8175 /*
8176  * Return the number of mappings to a particular page.  This number is an
8177  * approximation of the number of people sharing the page.
8178  *
8179  * shared hmeblks or ism hmeblks are counted as 1 mapping here.
8180  * hat_page_checkshare() can be used to compare threshold to share
8181  * count that reflects the number of region sharers albeit at higher cost.
8182  */
8183 ulong_t
8184 hat_page_getshare(page_t *pp)
8185 {
8186 	page_t *spp = pp;	/* start page */
8187 	kmutex_t *pml;
8188 	ulong_t	cnt;
8189 	int index, sz = TTE64K;
8190 
8191 	/*
8192 	 * We need to grab the mlist lock to make sure any outstanding
8193 	 * load/unloads complete.  Otherwise we could return zero
8194 	 * even though the unload(s) hasn't finished yet.
8195 	 */
8196 	pml = sfmmu_mlist_enter(spp);
8197 	cnt = spp->p_share;
8198 
8199 #ifdef VAC
8200 	if (kpm_enable)
8201 		cnt += spp->p_kpmref;
8202 #endif
8203 	if (vpm_enable && pp->p_vpmref) {
8204 		cnt += 1;
8205 	}
8206 
8207 	/*
8208 	 * If we have any large mappings, we count the number of
8209 	 * mappings that this large page is part of.
8210 	 */
8211 	index = PP_MAPINDEX(spp);
8212 	index >>= 1;
8213 	while (index) {
8214 		pp = PP_GROUPLEADER(spp, sz);
8215 		if ((index & 0x1) && pp != spp) {
8216 			cnt += pp->p_share;
8217 			spp = pp;
8218 		}
8219 		index >>= 1;
8220 		sz++;
8221 	}
8222 	sfmmu_mlist_exit(pml);
8223 	return (cnt);
8224 }
8225 
8226 /*
8227  * Return 1 if the number of mappings exceeds sh_thresh. Return 0
8228  * otherwise. Count shared hmeblks by region's refcnt.
8229  */
8230 int
8231 hat_page_checkshare(page_t *pp, ulong_t sh_thresh)
8232 {
8233 	kmutex_t *pml;
8234 	ulong_t	cnt = 0;
8235 	int index, sz = TTE8K;
8236 	struct sf_hment *sfhme, *tmphme = NULL;
8237 	struct hme_blk *hmeblkp;
8238 
8239 	pml = sfmmu_mlist_enter(pp);
8240 
8241 #ifdef VAC
8242 	if (kpm_enable)
8243 		cnt = pp->p_kpmref;
8244 #endif
8245 
8246 	if (vpm_enable && pp->p_vpmref) {
8247 		cnt += 1;
8248 	}
8249 
8250 	if (pp->p_share + cnt > sh_thresh) {
8251 		sfmmu_mlist_exit(pml);
8252 		return (1);
8253 	}
8254 
8255 	index = PP_MAPINDEX(pp);
8256 
8257 again:
8258 	for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) {
8259 		tmphme = sfhme->hme_next;
8260 		if (IS_PAHME(sfhme)) {
8261 			continue;
8262 		}
8263 
8264 		hmeblkp = sfmmu_hmetohblk(sfhme);
8265 		if (hmeblkp->hblk_xhat_bit) {
8266 			cnt++;
8267 			if (cnt > sh_thresh) {
8268 				sfmmu_mlist_exit(pml);
8269 				return (1);
8270 			}
8271 			continue;
8272 		}
8273 		if (hme_size(sfhme) != sz) {
8274 			continue;
8275 		}
8276 
8277 		if (hmeblkp->hblk_shared) {
8278 			sf_srd_t *srdp = hblktosrd(hmeblkp);
8279 			uint_t rid = hmeblkp->hblk_tag.htag_rid;
8280 			sf_region_t *rgnp;
8281 			ASSERT(SFMMU_IS_SHMERID_VALID(rid));
8282 			ASSERT(rid < SFMMU_MAX_HME_REGIONS);
8283 			ASSERT(srdp != NULL);
8284 			rgnp = srdp->srd_hmergnp[rid];
8285 			SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp,
8286 			    rgnp, rid);
8287 			cnt += rgnp->rgn_refcnt;
8288 		} else {
8289 			cnt++;
8290 		}
8291 		if (cnt > sh_thresh) {
8292 			sfmmu_mlist_exit(pml);
8293 			return (1);
8294 		}
8295 	}
8296 
8297 	index >>= 1;
8298 	sz++;
8299 	while (index) {
8300 		pp = PP_GROUPLEADER(pp, sz);
8301 		ASSERT(sfmmu_mlist_held(pp));
8302 		if (index & 0x1) {
8303 			goto again;
8304 		}
8305 		index >>= 1;
8306 		sz++;
8307 	}
8308 	sfmmu_mlist_exit(pml);
8309 	return (0);
8310 }
8311 
8312 /*
8313  * Unload all large mappings to the pp and reset the p_szc field of every
8314  * constituent page according to the remaining mappings.
8315  *
8316  * pp must be locked SE_EXCL. Even though no other constituent pages are
8317  * locked it's legal to unload the large mappings to the pp because all
8318  * constituent pages of large locked mappings have to be locked SE_SHARED.
8319  * This means if we have SE_EXCL lock on one of constituent pages none of the
8320  * large mappings to pp are locked.
8321  *
8322  * Decrease p_szc field starting from the last constituent page and ending
8323  * with the root page. This method is used because other threads rely on the
8324  * root's p_szc to find the lock to syncronize on. After a root page_t's p_szc
8325  * is demoted then other threads will succeed in sfmmu_mlspl_enter(). This
8326  * ensures that p_szc changes of the constituent pages appears atomic for all
8327  * threads that use sfmmu_mlspl_enter() to examine p_szc field.
8328  *
8329  * This mechanism is only used for file system pages where it's not always
8330  * possible to get SE_EXCL locks on all constituent pages to demote the size
8331  * code (as is done for anonymous or kernel large pages).
8332  *
8333  * See more comments in front of sfmmu_mlspl_enter().
8334  */
8335 void
8336 hat_page_demote(page_t *pp)
8337 {
8338 	int index;
8339 	int sz;
8340 	cpuset_t cpuset;
8341 	int sync = 0;
8342 	page_t *rootpp;
8343 	struct sf_hment *sfhme;
8344 	struct sf_hment *tmphme = NULL;
8345 	struct hme_blk *hmeblkp;
8346 	uint_t pszc;
8347 	page_t *lastpp;
8348 	cpuset_t tset;
8349 	pgcnt_t npgs;
8350 	kmutex_t *pml;
8351 	kmutex_t *pmtx = NULL;
8352 
8353 	ASSERT(PAGE_EXCL(pp));
8354 	ASSERT(!PP_ISFREE(pp));
8355 	ASSERT(!PP_ISKAS(pp));
8356 	ASSERT(page_szc_lock_assert(pp));
8357 	pml = sfmmu_mlist_enter(pp);
8358 
8359 	pszc = pp->p_szc;
8360 	if (pszc == 0) {
8361 		goto out;
8362 	}
8363 
8364 	index = PP_MAPINDEX(pp) >> 1;
8365 
8366 	if (index) {
8367 		CPUSET_ZERO(cpuset);
8368 		sz = TTE64K;
8369 		sync = 1;
8370 	}
8371 
8372 	while (index) {
8373 		if (!(index & 0x1)) {
8374 			index >>= 1;
8375 			sz++;
8376 			continue;
8377 		}
8378 		ASSERT(sz <= pszc);
8379 		rootpp = PP_GROUPLEADER(pp, sz);
8380 		for (sfhme = rootpp->p_mapping; sfhme; sfhme = tmphme) {
8381 			tmphme = sfhme->hme_next;
8382 			ASSERT(!IS_PAHME(sfhme));
8383 			hmeblkp = sfmmu_hmetohblk(sfhme);
8384 			if (hme_size(sfhme) != sz) {
8385 				continue;
8386 			}
8387 			if (hmeblkp->hblk_xhat_bit) {
8388 				cmn_err(CE_PANIC,
8389 				    "hat_page_demote: xhat hmeblk");
8390 			}
8391 			tset = sfmmu_pageunload(rootpp, sfhme, sz);
8392 			CPUSET_OR(cpuset, tset);
8393 		}
8394 		if (index >>= 1) {
8395 			sz++;
8396 		}
8397 	}
8398 
8399 	ASSERT(!PP_ISMAPPED_LARGE(pp));
8400 
8401 	if (sync) {
8402 		xt_sync(cpuset);
8403 #ifdef VAC
8404 		if (PP_ISTNC(pp)) {
8405 			conv_tnc(rootpp, sz);
8406 		}
8407 #endif	/* VAC */
8408 	}
8409 
8410 	pmtx = sfmmu_page_enter(pp);
8411 
8412 	ASSERT(pp->p_szc == pszc);
8413 	rootpp = PP_PAGEROOT(pp);
8414 	ASSERT(rootpp->p_szc == pszc);
8415 	lastpp = PP_PAGENEXT_N(rootpp, TTEPAGES(pszc) - 1);
8416 
8417 	while (lastpp != rootpp) {
8418 		sz = PP_MAPINDEX(lastpp) ? fnd_mapping_sz(lastpp) : 0;
8419 		ASSERT(sz < pszc);
8420 		npgs = (sz == 0) ? 1 : TTEPAGES(sz);
8421 		ASSERT(P2PHASE(lastpp->p_pagenum, npgs) == npgs - 1);
8422 		while (--npgs > 0) {
8423 			lastpp->p_szc = (uchar_t)sz;
8424 			lastpp = PP_PAGEPREV(lastpp);
8425 		}
8426 		if (sz) {
8427 			/*
8428 			 * make sure before current root's pszc
8429 			 * is updated all updates to constituent pages pszc
8430 			 * fields are globally visible.
8431 			 */
8432 			membar_producer();
8433 		}
8434 		lastpp->p_szc = sz;
8435 		ASSERT(IS_P2ALIGNED(lastpp->p_pagenum, TTEPAGES(sz)));
8436 		if (lastpp != rootpp) {
8437 			lastpp = PP_PAGEPREV(lastpp);
8438 		}
8439 	}
8440 	if (sz == 0) {
8441 		/* the loop above doesn't cover this case */
8442 		rootpp->p_szc = 0;
8443 	}
8444 out:
8445 	ASSERT(pp->p_szc == 0);
8446 	if (pmtx != NULL) {
8447 		sfmmu_page_exit(pmtx);
8448 	}
8449 	sfmmu_mlist_exit(pml);
8450 }
8451 
8452 /*
8453  * Refresh the HAT ismttecnt[] element for size szc.
8454  * Caller must have set ISM busy flag to prevent mapping
8455  * lists from changing while we're traversing them.
8456  */
8457 pgcnt_t
8458 ism_tsb_entries(sfmmu_t *sfmmup, int szc)
8459 {
8460 	ism_blk_t	*ism_blkp = sfmmup->sfmmu_iblk;
8461 	ism_map_t	*ism_map;
8462 	pgcnt_t		npgs = 0;
8463 	pgcnt_t		npgs_scd = 0;
8464 	int		j;
8465 	sf_scd_t	*scdp;
8466 	uchar_t		rid;
8467 
8468 	ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY));
8469 	scdp = sfmmup->sfmmu_scdp;
8470 
8471 	for (; ism_blkp != NULL; ism_blkp = ism_blkp->iblk_next) {
8472 		ism_map = ism_blkp->iblk_maps;
8473 		for (j = 0; ism_map[j].imap_ismhat && j < ISM_MAP_SLOTS; j++) {
8474 			rid = ism_map[j].imap_rid;
8475 			ASSERT(rid == SFMMU_INVALID_ISMRID ||
8476 			    rid < sfmmup->sfmmu_srdp->srd_next_ismrid);
8477 
8478 			if (scdp != NULL && rid != SFMMU_INVALID_ISMRID &&
8479 			    SF_RGNMAP_TEST(scdp->scd_ismregion_map, rid)) {
8480 				/* ISM is in sfmmup's SCD */
8481 				npgs_scd +=
8482 				    ism_map[j].imap_ismhat->sfmmu_ttecnt[szc];
8483 			} else {
8484 				/* ISMs is not in SCD */
8485 				npgs +=
8486 				    ism_map[j].imap_ismhat->sfmmu_ttecnt[szc];
8487 			}
8488 		}
8489 	}
8490 	sfmmup->sfmmu_ismttecnt[szc] = npgs;
8491 	sfmmup->sfmmu_scdismttecnt[szc] = npgs_scd;
8492 	return (npgs);
8493 }
8494 
8495 /*
8496  * Yield the memory claim requirement for an address space.
8497  *
8498  * This is currently implemented as the number of bytes that have active
8499  * hardware translations that have page structures.  Therefore, it can
8500  * underestimate the traditional resident set size, eg, if the
8501  * physical page is present and the hardware translation is missing;
8502  * and it can overestimate the rss, eg, if there are active
8503  * translations to a frame buffer with page structs.
8504  * Also, it does not take sharing into account.
8505  *
8506  * Note that we don't acquire locks here since this function is most often
8507  * called from the clock thread.
8508  */
8509 size_t
8510 hat_get_mapped_size(struct hat *hat)
8511 {
8512 	size_t		assize = 0;
8513 	int 		i;
8514 
8515 	if (hat == NULL)
8516 		return (0);
8517 
8518 	ASSERT(hat->sfmmu_xhat_provider == NULL);
8519 
8520 	for (i = 0; i < mmu_page_sizes; i++)
8521 		assize += ((pgcnt_t)hat->sfmmu_ttecnt[i] +
8522 		    (pgcnt_t)hat->sfmmu_scdrttecnt[i]) * TTEBYTES(i);
8523 
8524 	if (hat->sfmmu_iblk == NULL)
8525 		return (assize);
8526 
8527 	for (i = 0; i < mmu_page_sizes; i++)
8528 		assize += ((pgcnt_t)hat->sfmmu_ismttecnt[i] +
8529 		    (pgcnt_t)hat->sfmmu_scdismttecnt[i]) * TTEBYTES(i);
8530 
8531 	return (assize);
8532 }
8533 
8534 int
8535 hat_stats_enable(struct hat *hat)
8536 {
8537 	hatlock_t	*hatlockp;
8538 
8539 	ASSERT(hat->sfmmu_xhat_provider == NULL);
8540 
8541 	hatlockp = sfmmu_hat_enter(hat);
8542 	hat->sfmmu_rmstat++;
8543 	sfmmu_hat_exit(hatlockp);
8544 	return (1);
8545 }
8546 
8547 void
8548 hat_stats_disable(struct hat *hat)
8549 {
8550 	hatlock_t	*hatlockp;
8551 
8552 	ASSERT(hat->sfmmu_xhat_provider == NULL);
8553 
8554 	hatlockp = sfmmu_hat_enter(hat);
8555 	hat->sfmmu_rmstat--;
8556 	sfmmu_hat_exit(hatlockp);
8557 }
8558 
8559 /*
8560  * Routines for entering or removing  ourselves from the
8561  * ism_hat's mapping list. This is used for both private and
8562  * SCD hats.
8563  */
8564 static void
8565 iment_add(struct ism_ment *iment,  struct hat *ism_hat)
8566 {
8567 	ASSERT(MUTEX_HELD(&ism_mlist_lock));
8568 
8569 	iment->iment_prev = NULL;
8570 	iment->iment_next = ism_hat->sfmmu_iment;
8571 	if (ism_hat->sfmmu_iment) {
8572 		ism_hat->sfmmu_iment->iment_prev = iment;
8573 	}
8574 	ism_hat->sfmmu_iment = iment;
8575 }
8576 
8577 static void
8578 iment_sub(struct ism_ment *iment, struct hat *ism_hat)
8579 {
8580 	ASSERT(MUTEX_HELD(&ism_mlist_lock));
8581 
8582 	if (ism_hat->sfmmu_iment == NULL) {
8583 		panic("ism map entry remove - no entries");
8584 	}
8585 
8586 	if (iment->iment_prev) {
8587 		ASSERT(ism_hat->sfmmu_iment != iment);
8588 		iment->iment_prev->iment_next = iment->iment_next;
8589 	} else {
8590 		ASSERT(ism_hat->sfmmu_iment == iment);
8591 		ism_hat->sfmmu_iment = iment->iment_next;
8592 	}
8593 
8594 	if (iment->iment_next) {
8595 		iment->iment_next->iment_prev = iment->iment_prev;
8596 	}
8597 
8598 	/*
8599 	 * zero out the entry
8600 	 */
8601 	iment->iment_next = NULL;
8602 	iment->iment_prev = NULL;
8603 	iment->iment_hat =  NULL;
8604 	iment->iment_base_va = 0;
8605 }
8606 
8607 /*
8608  * Hat_share()/unshare() return an (non-zero) error
8609  * when saddr and daddr are not properly aligned.
8610  *
8611  * The top level mapping element determines the alignment
8612  * requirement for saddr and daddr, depending on different
8613  * architectures.
8614  *
8615  * When hat_share()/unshare() are not supported,
8616  * HATOP_SHARE()/UNSHARE() return 0
8617  */
8618 int
8619 hat_share(struct hat *sfmmup, caddr_t addr,
8620 	struct hat *ism_hatid, caddr_t sptaddr, size_t len, uint_t ismszc)
8621 {
8622 	ism_blk_t	*ism_blkp;
8623 	ism_blk_t	*new_iblk;
8624 	ism_map_t 	*ism_map;
8625 	ism_ment_t	*ism_ment;
8626 	int		i, added;
8627 	hatlock_t	*hatlockp;
8628 	int		reload_mmu = 0;
8629 	uint_t		ismshift = page_get_shift(ismszc);
8630 	size_t		ismpgsz = page_get_pagesize(ismszc);
8631 	uint_t		ismmask = (uint_t)ismpgsz - 1;
8632 	size_t		sh_size = ISM_SHIFT(ismshift, len);
8633 	ushort_t	ismhatflag;
8634 	hat_region_cookie_t rcookie;
8635 	sf_scd_t	*old_scdp;
8636 
8637 #ifdef DEBUG
8638 	caddr_t		eaddr = addr + len;
8639 #endif /* DEBUG */
8640 
8641 	ASSERT(ism_hatid != NULL && sfmmup != NULL);
8642 	ASSERT(sptaddr == ISMID_STARTADDR);
8643 	/*
8644 	 * Check the alignment.
8645 	 */
8646 	if (!ISM_ALIGNED(ismshift, addr) || !ISM_ALIGNED(ismshift, sptaddr))
8647 		return (EINVAL);
8648 
8649 	/*
8650 	 * Check size alignment.
8651 	 */
8652 	if (!ISM_ALIGNED(ismshift, len))
8653 		return (EINVAL);
8654 
8655 	ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
8656 
8657 	/*
8658 	 * Allocate ism_ment for the ism_hat's mapping list, and an
8659 	 * ism map blk in case we need one.  We must do our
8660 	 * allocations before acquiring locks to prevent a deadlock
8661 	 * in the kmem allocator on the mapping list lock.
8662 	 */
8663 	new_iblk = kmem_cache_alloc(ism_blk_cache, KM_SLEEP);
8664 	ism_ment = kmem_cache_alloc(ism_ment_cache, KM_SLEEP);
8665 
8666 	/*
8667 	 * Serialize ISM mappings with the ISM busy flag, and also the
8668 	 * trap handlers.
8669 	 */
8670 	sfmmu_ismhat_enter(sfmmup, 0);
8671 
8672 	/*
8673 	 * Allocate an ism map blk if necessary.
8674 	 */
8675 	if (sfmmup->sfmmu_iblk == NULL) {
8676 		sfmmup->sfmmu_iblk = new_iblk;
8677 		bzero(new_iblk, sizeof (*new_iblk));
8678 		new_iblk->iblk_nextpa = (uint64_t)-1;
8679 		membar_stst();	/* make sure next ptr visible to all CPUs */
8680 		sfmmup->sfmmu_ismblkpa = va_to_pa((caddr_t)new_iblk);
8681 		reload_mmu = 1;
8682 		new_iblk = NULL;
8683 	}
8684 
8685 #ifdef DEBUG
8686 	/*
8687 	 * Make sure mapping does not already exist.
8688 	 */
8689 	ism_blkp = sfmmup->sfmmu_iblk;
8690 	while (ism_blkp != NULL) {
8691 		ism_map = ism_blkp->iblk_maps;
8692 		for (i = 0; i < ISM_MAP_SLOTS && ism_map[i].imap_ismhat; i++) {
8693 			if ((addr >= ism_start(ism_map[i]) &&
8694 			    addr < ism_end(ism_map[i])) ||
8695 			    eaddr > ism_start(ism_map[i]) &&
8696 			    eaddr <= ism_end(ism_map[i])) {
8697 				panic("sfmmu_share: Already mapped!");
8698 			}
8699 		}
8700 		ism_blkp = ism_blkp->iblk_next;
8701 	}
8702 #endif /* DEBUG */
8703 
8704 	ASSERT(ismszc >= TTE4M);
8705 	if (ismszc == TTE4M) {
8706 		ismhatflag = HAT_4M_FLAG;
8707 	} else if (ismszc == TTE32M) {
8708 		ismhatflag = HAT_32M_FLAG;
8709 	} else if (ismszc == TTE256M) {
8710 		ismhatflag = HAT_256M_FLAG;
8711 	}
8712 	/*
8713 	 * Add mapping to first available mapping slot.
8714 	 */
8715 	ism_blkp = sfmmup->sfmmu_iblk;
8716 	added = 0;
8717 	while (!added) {
8718 		ism_map = ism_blkp->iblk_maps;
8719 		for (i = 0; i < ISM_MAP_SLOTS; i++)  {
8720 			if (ism_map[i].imap_ismhat == NULL) {
8721 
8722 				ism_map[i].imap_ismhat = ism_hatid;
8723 				ism_map[i].imap_vb_shift = (uchar_t)ismshift;
8724 				ism_map[i].imap_rid = SFMMU_INVALID_ISMRID;
8725 				ism_map[i].imap_hatflags = ismhatflag;
8726 				ism_map[i].imap_sz_mask = ismmask;
8727 				/*
8728 				 * imap_seg is checked in ISM_CHECK to see if
8729 				 * non-NULL, then other info assumed valid.
8730 				 */
8731 				membar_stst();
8732 				ism_map[i].imap_seg = (uintptr_t)addr | sh_size;
8733 				ism_map[i].imap_ment = ism_ment;
8734 
8735 				/*
8736 				 * Now add ourselves to the ism_hat's
8737 				 * mapping list.
8738 				 */
8739 				ism_ment->iment_hat = sfmmup;
8740 				ism_ment->iment_base_va = addr;
8741 				ism_hatid->sfmmu_ismhat = 1;
8742 				mutex_enter(&ism_mlist_lock);
8743 				iment_add(ism_ment, ism_hatid);
8744 				mutex_exit(&ism_mlist_lock);
8745 				added = 1;
8746 				break;
8747 			}
8748 		}
8749 		if (!added && ism_blkp->iblk_next == NULL) {
8750 			ism_blkp->iblk_next = new_iblk;
8751 			new_iblk = NULL;
8752 			bzero(ism_blkp->iblk_next,
8753 			    sizeof (*ism_blkp->iblk_next));
8754 			ism_blkp->iblk_next->iblk_nextpa = (uint64_t)-1;
8755 			membar_stst();
8756 			ism_blkp->iblk_nextpa =
8757 			    va_to_pa((caddr_t)ism_blkp->iblk_next);
8758 		}
8759 		ism_blkp = ism_blkp->iblk_next;
8760 	}
8761 
8762 	/*
8763 	 * After calling hat_join_region, sfmmup may join a new SCD or
8764 	 * move from the old scd to a new scd, in which case, we want to
8765 	 * shrink the sfmmup's private tsb size, i.e., pass shrink to
8766 	 * sfmmu_check_page_sizes at the end of this routine.
8767 	 */
8768 	old_scdp = sfmmup->sfmmu_scdp;
8769 
8770 	rcookie = hat_join_region(sfmmup, addr, len, (void *)ism_hatid, 0,
8771 	    PROT_ALL, ismszc, NULL, HAT_REGION_ISM);
8772 	if (rcookie != HAT_INVALID_REGION_COOKIE) {
8773 		ism_map[i].imap_rid = (uchar_t)((uint64_t)rcookie);
8774 	}
8775 	/*
8776 	 * Update our counters for this sfmmup's ism mappings.
8777 	 */
8778 	for (i = 0; i <= ismszc; i++) {
8779 		if (!(disable_ism_large_pages & (1 << i)))
8780 			(void) ism_tsb_entries(sfmmup, i);
8781 	}
8782 
8783 	/*
8784 	 * For ISM and DISM we do not support 512K pages, so we only only
8785 	 * search the 4M and 8K/64K hashes for 4 pagesize cpus, and search the
8786 	 * 256M or 32M, and 4M and 8K/64K hashes for 6 pagesize cpus.
8787 	 *
8788 	 * Need to set 32M/256M ISM flags to make sure
8789 	 * sfmmu_check_page_sizes() enables them on Panther.
8790 	 */
8791 	ASSERT((disable_ism_large_pages & (1 << TTE512K)) != 0);
8792 
8793 	switch (ismszc) {
8794 	case TTE256M:
8795 		if (!SFMMU_FLAGS_ISSET(sfmmup, HAT_256M_ISM)) {
8796 			hatlockp = sfmmu_hat_enter(sfmmup);
8797 			SFMMU_FLAGS_SET(sfmmup, HAT_256M_ISM);
8798 			sfmmu_hat_exit(hatlockp);
8799 		}
8800 		break;
8801 	case TTE32M:
8802 		if (!SFMMU_FLAGS_ISSET(sfmmup, HAT_32M_ISM)) {
8803 			hatlockp = sfmmu_hat_enter(sfmmup);
8804 			SFMMU_FLAGS_SET(sfmmup, HAT_32M_ISM);
8805 			sfmmu_hat_exit(hatlockp);
8806 		}
8807 		break;
8808 	default:
8809 		break;
8810 	}
8811 
8812 	/*
8813 	 * If we updated the ismblkpa for this HAT we must make
8814 	 * sure all CPUs running this process reload their tsbmiss area.
8815 	 * Otherwise they will fail to load the mappings in the tsbmiss
8816 	 * handler and will loop calling pagefault().
8817 	 */
8818 	if (reload_mmu) {
8819 		hatlockp = sfmmu_hat_enter(sfmmup);
8820 		sfmmu_sync_mmustate(sfmmup);
8821 		sfmmu_hat_exit(hatlockp);
8822 	}
8823 
8824 	sfmmu_ismhat_exit(sfmmup, 0);
8825 
8826 	/*
8827 	 * Free up ismblk if we didn't use it.
8828 	 */
8829 	if (new_iblk != NULL)
8830 		kmem_cache_free(ism_blk_cache, new_iblk);
8831 
8832 	/*
8833 	 * Check TSB and TLB page sizes.
8834 	 */
8835 	if (sfmmup->sfmmu_scdp != NULL && old_scdp != sfmmup->sfmmu_scdp) {
8836 		sfmmu_check_page_sizes(sfmmup, 0);
8837 	} else {
8838 		sfmmu_check_page_sizes(sfmmup, 1);
8839 	}
8840 	return (0);
8841 }
8842 
8843 /*
8844  * hat_unshare removes exactly one ism_map from
8845  * this process's as.  It expects multiple calls
8846  * to hat_unshare for multiple shm segments.
8847  */
8848 void
8849 hat_unshare(struct hat *sfmmup, caddr_t addr, size_t len, uint_t ismszc)
8850 {
8851 	ism_map_t 	*ism_map;
8852 	ism_ment_t	*free_ment = NULL;
8853 	ism_blk_t	*ism_blkp;
8854 	struct hat	*ism_hatid;
8855 	int 		found, i;
8856 	hatlock_t	*hatlockp;
8857 	struct tsb_info	*tsbinfo;
8858 	uint_t		ismshift = page_get_shift(ismszc);
8859 	size_t		sh_size = ISM_SHIFT(ismshift, len);
8860 	uchar_t		ism_rid;
8861 	sf_scd_t	*old_scdp;
8862 
8863 	ASSERT(ISM_ALIGNED(ismshift, addr));
8864 	ASSERT(ISM_ALIGNED(ismshift, len));
8865 	ASSERT(sfmmup != NULL);
8866 	ASSERT(sfmmup != ksfmmup);
8867 
8868 	if (sfmmup->sfmmu_xhat_provider) {
8869 		XHAT_UNSHARE(sfmmup, addr, len);
8870 		return;
8871 	} else {
8872 		/*
8873 		 * This must be a CPU HAT. If the address space has
8874 		 * XHATs attached, inform all XHATs that ISM segment
8875 		 * is going away
8876 		 */
8877 		ASSERT(sfmmup->sfmmu_as != NULL);
8878 		if (sfmmup->sfmmu_as->a_xhat != NULL)
8879 			xhat_unshare_all(sfmmup->sfmmu_as, addr, len);
8880 	}
8881 
8882 	/*
8883 	 * Make sure that during the entire time ISM mappings are removed,
8884 	 * the trap handlers serialize behind us, and that no one else
8885 	 * can be mucking with ISM mappings.  This also lets us get away
8886 	 * with not doing expensive cross calls to flush the TLB -- we
8887 	 * just discard the context, flush the entire TSB, and call it
8888 	 * a day.
8889 	 */
8890 	sfmmu_ismhat_enter(sfmmup, 0);
8891 
8892 	/*
8893 	 * Remove the mapping.
8894 	 *
8895 	 * We can't have any holes in the ism map.
8896 	 * The tsb miss code while searching the ism map will
8897 	 * stop on an empty map slot.  So we must move
8898 	 * everyone past the hole up 1 if any.
8899 	 *
8900 	 * Also empty ism map blks are not freed until the
8901 	 * process exits. This is to prevent a MT race condition
8902 	 * between sfmmu_unshare() and sfmmu_tsbmiss_exception().
8903 	 */
8904 	found = 0;
8905 	ism_blkp = sfmmup->sfmmu_iblk;
8906 	while (!found && ism_blkp != NULL) {
8907 		ism_map = ism_blkp->iblk_maps;
8908 		for (i = 0; i < ISM_MAP_SLOTS; i++) {
8909 			if (addr == ism_start(ism_map[i]) &&
8910 			    sh_size == (size_t)(ism_size(ism_map[i]))) {
8911 				found = 1;
8912 				break;
8913 			}
8914 		}
8915 		if (!found)
8916 			ism_blkp = ism_blkp->iblk_next;
8917 	}
8918 
8919 	if (found) {
8920 		ism_hatid = ism_map[i].imap_ismhat;
8921 		ism_rid = ism_map[i].imap_rid;
8922 		ASSERT(ism_hatid != NULL);
8923 		ASSERT(ism_hatid->sfmmu_ismhat == 1);
8924 
8925 		/*
8926 		 * After hat_leave_region, the sfmmup may leave SCD,
8927 		 * in which case, we want to grow the private tsb size when
8928 		 * calling sfmmu_check_page_sizes at the end of the routine.
8929 		 */
8930 		old_scdp = sfmmup->sfmmu_scdp;
8931 		/*
8932 		 * Then remove ourselves from the region.
8933 		 */
8934 		if (ism_rid != SFMMU_INVALID_ISMRID) {
8935 			hat_leave_region(sfmmup, (void *)((uint64_t)ism_rid),
8936 			    HAT_REGION_ISM);
8937 		}
8938 
8939 		/*
8940 		 * And now guarantee that any other cpu
8941 		 * that tries to process an ISM miss
8942 		 * will go to tl=0.
8943 		 */
8944 		hatlockp = sfmmu_hat_enter(sfmmup);
8945 		sfmmu_invalidate_ctx(sfmmup);
8946 		sfmmu_hat_exit(hatlockp);
8947 
8948 		/*
8949 		 * Remove ourselves from the ism mapping list.
8950 		 */
8951 		mutex_enter(&ism_mlist_lock);
8952 		iment_sub(ism_map[i].imap_ment, ism_hatid);
8953 		mutex_exit(&ism_mlist_lock);
8954 		free_ment = ism_map[i].imap_ment;
8955 
8956 		/*
8957 		 * We delete the ism map by copying
8958 		 * the next map over the current one.
8959 		 * We will take the next one in the maps
8960 		 * array or from the next ism_blk.
8961 		 */
8962 		while (ism_blkp != NULL) {
8963 			ism_map = ism_blkp->iblk_maps;
8964 			while (i < (ISM_MAP_SLOTS - 1)) {
8965 				ism_map[i] = ism_map[i + 1];
8966 				i++;
8967 			}
8968 			/* i == (ISM_MAP_SLOTS - 1) */
8969 			ism_blkp = ism_blkp->iblk_next;
8970 			if (ism_blkp != NULL) {
8971 				ism_map[i] = ism_blkp->iblk_maps[0];
8972 				i = 0;
8973 			} else {
8974 				ism_map[i].imap_seg = 0;
8975 				ism_map[i].imap_vb_shift = 0;
8976 				ism_map[i].imap_rid = SFMMU_INVALID_ISMRID;
8977 				ism_map[i].imap_hatflags = 0;
8978 				ism_map[i].imap_sz_mask = 0;
8979 				ism_map[i].imap_ismhat = NULL;
8980 				ism_map[i].imap_ment = NULL;
8981 			}
8982 		}
8983 
8984 		/*
8985 		 * Now flush entire TSB for the process, since
8986 		 * demapping page by page can be too expensive.
8987 		 * We don't have to flush the TLB here anymore
8988 		 * since we switch to a new TLB ctx instead.
8989 		 * Also, there is no need to flush if the process
8990 		 * is exiting since the TSB will be freed later.
8991 		 */
8992 		if (!sfmmup->sfmmu_free) {
8993 			hatlockp = sfmmu_hat_enter(sfmmup);
8994 			for (tsbinfo = sfmmup->sfmmu_tsb; tsbinfo != NULL;
8995 			    tsbinfo = tsbinfo->tsb_next) {
8996 				if (tsbinfo->tsb_flags & TSB_SWAPPED)
8997 					continue;
8998 				if (tsbinfo->tsb_flags & TSB_RELOC_FLAG) {
8999 					tsbinfo->tsb_flags |=
9000 					    TSB_FLUSH_NEEDED;
9001 					continue;
9002 				}
9003 
9004 				sfmmu_inv_tsb(tsbinfo->tsb_va,
9005 				    TSB_BYTES(tsbinfo->tsb_szc));
9006 			}
9007 			sfmmu_hat_exit(hatlockp);
9008 		}
9009 	}
9010 
9011 	/*
9012 	 * Update our counters for this sfmmup's ism mappings.
9013 	 */
9014 	for (i = 0; i <= ismszc; i++) {
9015 		if (!(disable_ism_large_pages & (1 << i)))
9016 			(void) ism_tsb_entries(sfmmup, i);
9017 	}
9018 
9019 	sfmmu_ismhat_exit(sfmmup, 0);
9020 
9021 	/*
9022 	 * We must do our freeing here after dropping locks
9023 	 * to prevent a deadlock in the kmem allocator on the
9024 	 * mapping list lock.
9025 	 */
9026 	if (free_ment != NULL)
9027 		kmem_cache_free(ism_ment_cache, free_ment);
9028 
9029 	/*
9030 	 * Check TSB and TLB page sizes if the process isn't exiting.
9031 	 */
9032 	if (!sfmmup->sfmmu_free) {
9033 		if (found && old_scdp != NULL && sfmmup->sfmmu_scdp == NULL) {
9034 			sfmmu_check_page_sizes(sfmmup, 1);
9035 		} else {
9036 			sfmmu_check_page_sizes(sfmmup, 0);
9037 		}
9038 	}
9039 }
9040 
9041 /* ARGSUSED */
9042 static int
9043 sfmmu_idcache_constructor(void *buf, void *cdrarg, int kmflags)
9044 {
9045 	/* void *buf is sfmmu_t pointer */
9046 	bzero(buf, sizeof (sfmmu_t));
9047 
9048 	return (0);
9049 }
9050 
9051 /* ARGSUSED */
9052 static void
9053 sfmmu_idcache_destructor(void *buf, void *cdrarg)
9054 {
9055 	/* void *buf is sfmmu_t pointer */
9056 }
9057 
9058 /*
9059  * setup kmem hmeblks by bzeroing all members and initializing the nextpa
9060  * field to be the pa of this hmeblk
9061  */
9062 /* ARGSUSED */
9063 static int
9064 sfmmu_hblkcache_constructor(void *buf, void *cdrarg, int kmflags)
9065 {
9066 	struct hme_blk *hmeblkp;
9067 
9068 	bzero(buf, (size_t)cdrarg);
9069 	hmeblkp = (struct hme_blk *)buf;
9070 	hmeblkp->hblk_nextpa = va_to_pa((caddr_t)hmeblkp);
9071 
9072 #ifdef	HBLK_TRACE
9073 	mutex_init(&hmeblkp->hblk_audit_lock, NULL, MUTEX_DEFAULT, NULL);
9074 #endif	/* HBLK_TRACE */
9075 
9076 	return (0);
9077 }
9078 
9079 /* ARGSUSED */
9080 static void
9081 sfmmu_hblkcache_destructor(void *buf, void *cdrarg)
9082 {
9083 
9084 #ifdef	HBLK_TRACE
9085 
9086 	struct hme_blk *hmeblkp;
9087 
9088 	hmeblkp = (struct hme_blk *)buf;
9089 	mutex_destroy(&hmeblkp->hblk_audit_lock);
9090 
9091 #endif	/* HBLK_TRACE */
9092 }
9093 
9094 #define	SFMMU_CACHE_RECLAIM_SCAN_RATIO 8
9095 static int sfmmu_cache_reclaim_scan_ratio = SFMMU_CACHE_RECLAIM_SCAN_RATIO;
9096 /*
9097  * The kmem allocator will callback into our reclaim routine when the system
9098  * is running low in memory.  We traverse the hash and free up all unused but
9099  * still cached hme_blks.  We also traverse the free list and free them up
9100  * as well.
9101  */
9102 /*ARGSUSED*/
9103 static void
9104 sfmmu_hblkcache_reclaim(void *cdrarg)
9105 {
9106 	int i;
9107 	struct hmehash_bucket *hmebp;
9108 	struct hme_blk *hmeblkp, *nx_hblk, *pr_hblk = NULL;
9109 	static struct hmehash_bucket *uhmehash_reclaim_hand;
9110 	static struct hmehash_bucket *khmehash_reclaim_hand;
9111 	struct hme_blk *list = NULL, *last_hmeblkp;
9112 	cpuset_t cpuset = cpu_ready_set;
9113 	cpu_hme_pend_t *cpuhp;
9114 
9115 	/* Free up hmeblks on the cpu pending lists */
9116 	for (i = 0; i < NCPU; i++) {
9117 		cpuhp = &cpu_hme_pend[i];
9118 		if (cpuhp->chp_listp != NULL)  {
9119 			mutex_enter(&cpuhp->chp_mutex);
9120 			if (cpuhp->chp_listp == NULL) {
9121 				mutex_exit(&cpuhp->chp_mutex);
9122 				continue;
9123 			}
9124 			for (last_hmeblkp = cpuhp->chp_listp;
9125 			    last_hmeblkp->hblk_next != NULL;
9126 			    last_hmeblkp = last_hmeblkp->hblk_next)
9127 				;
9128 			last_hmeblkp->hblk_next = list;
9129 			list = cpuhp->chp_listp;
9130 			cpuhp->chp_listp = NULL;
9131 			cpuhp->chp_count = 0;
9132 			mutex_exit(&cpuhp->chp_mutex);
9133 		}
9134 
9135 	}
9136 
9137 	if (list != NULL) {
9138 		kpreempt_disable();
9139 		CPUSET_DEL(cpuset, CPU->cpu_id);
9140 		xt_sync(cpuset);
9141 		xt_sync(cpuset);
9142 		kpreempt_enable();
9143 		sfmmu_hblk_free(&list);
9144 		list = NULL;
9145 	}
9146 
9147 	hmebp = uhmehash_reclaim_hand;
9148 	if (hmebp == NULL || hmebp > &uhme_hash[UHMEHASH_SZ])
9149 		uhmehash_reclaim_hand = hmebp = uhme_hash;
9150 	uhmehash_reclaim_hand += UHMEHASH_SZ / sfmmu_cache_reclaim_scan_ratio;
9151 
9152 	for (i = UHMEHASH_SZ / sfmmu_cache_reclaim_scan_ratio; i; i--) {
9153 		if (SFMMU_HASH_LOCK_TRYENTER(hmebp) != 0) {
9154 			hmeblkp = hmebp->hmeblkp;
9155 			pr_hblk = NULL;
9156 			while (hmeblkp) {
9157 				nx_hblk = hmeblkp->hblk_next;
9158 				if (!hmeblkp->hblk_vcnt &&
9159 				    !hmeblkp->hblk_hmecnt) {
9160 					sfmmu_hblk_hash_rm(hmebp, hmeblkp,
9161 					    pr_hblk, &list, 0);
9162 				} else {
9163 					pr_hblk = hmeblkp;
9164 				}
9165 				hmeblkp = nx_hblk;
9166 			}
9167 			SFMMU_HASH_UNLOCK(hmebp);
9168 		}
9169 		if (hmebp++ == &uhme_hash[UHMEHASH_SZ])
9170 			hmebp = uhme_hash;
9171 	}
9172 
9173 	hmebp = khmehash_reclaim_hand;
9174 	if (hmebp == NULL || hmebp > &khme_hash[KHMEHASH_SZ])
9175 		khmehash_reclaim_hand = hmebp = khme_hash;
9176 	khmehash_reclaim_hand += KHMEHASH_SZ / sfmmu_cache_reclaim_scan_ratio;
9177 
9178 	for (i = KHMEHASH_SZ / sfmmu_cache_reclaim_scan_ratio; i; i--) {
9179 		if (SFMMU_HASH_LOCK_TRYENTER(hmebp) != 0) {
9180 			hmeblkp = hmebp->hmeblkp;
9181 			pr_hblk = NULL;
9182 			while (hmeblkp) {
9183 				nx_hblk = hmeblkp->hblk_next;
9184 				if (!hmeblkp->hblk_vcnt &&
9185 				    !hmeblkp->hblk_hmecnt) {
9186 					sfmmu_hblk_hash_rm(hmebp, hmeblkp,
9187 					    pr_hblk, &list, 0);
9188 				} else {
9189 					pr_hblk = hmeblkp;
9190 				}
9191 				hmeblkp = nx_hblk;
9192 			}
9193 			SFMMU_HASH_UNLOCK(hmebp);
9194 		}
9195 		if (hmebp++ == &khme_hash[KHMEHASH_SZ])
9196 			hmebp = khme_hash;
9197 	}
9198 	sfmmu_hblks_list_purge(&list, 0);
9199 }
9200 
9201 /*
9202  * sfmmu_get_ppvcolor should become a vm_machdep or hatop interface.
9203  * same goes for sfmmu_get_addrvcolor().
9204  *
9205  * This function will return the virtual color for the specified page. The
9206  * virtual color corresponds to this page current mapping or its last mapping.
9207  * It is used by memory allocators to choose addresses with the correct
9208  * alignment so vac consistency is automatically maintained.  If the page
9209  * has no color it returns -1.
9210  */
9211 /*ARGSUSED*/
9212 int
9213 sfmmu_get_ppvcolor(struct page *pp)
9214 {
9215 #ifdef VAC
9216 	int color;
9217 
9218 	if (!(cache & CACHE_VAC) || PP_NEWPAGE(pp)) {
9219 		return (-1);
9220 	}
9221 	color = PP_GET_VCOLOR(pp);
9222 	ASSERT(color < mmu_btop(shm_alignment));
9223 	return (color);
9224 #else
9225 	return (-1);
9226 #endif	/* VAC */
9227 }
9228 
9229 /*
9230  * This function will return the desired alignment for vac consistency
9231  * (vac color) given a virtual address.  If no vac is present it returns -1.
9232  */
9233 /*ARGSUSED*/
9234 int
9235 sfmmu_get_addrvcolor(caddr_t vaddr)
9236 {
9237 #ifdef VAC
9238 	if (cache & CACHE_VAC) {
9239 		return (addr_to_vcolor(vaddr));
9240 	} else {
9241 		return (-1);
9242 	}
9243 #else
9244 	return (-1);
9245 #endif	/* VAC */
9246 }
9247 
9248 #ifdef VAC
9249 /*
9250  * Check for conflicts.
9251  * A conflict exists if the new and existent mappings do not match in
9252  * their "shm_alignment fields. If conflicts exist, the existant mappings
9253  * are flushed unless one of them is locked. If one of them is locked, then
9254  * the mappings are flushed and converted to non-cacheable mappings.
9255  */
9256 static void
9257 sfmmu_vac_conflict(struct hat *hat, caddr_t addr, page_t *pp)
9258 {
9259 	struct hat *tmphat;
9260 	struct sf_hment *sfhmep, *tmphme = NULL;
9261 	struct hme_blk *hmeblkp;
9262 	int vcolor;
9263 	tte_t tte;
9264 
9265 	ASSERT(sfmmu_mlist_held(pp));
9266 	ASSERT(!PP_ISNC(pp));		/* page better be cacheable */
9267 
9268 	vcolor = addr_to_vcolor(addr);
9269 	if (PP_NEWPAGE(pp)) {
9270 		PP_SET_VCOLOR(pp, vcolor);
9271 		return;
9272 	}
9273 
9274 	if (PP_GET_VCOLOR(pp) == vcolor) {
9275 		return;
9276 	}
9277 
9278 	if (!PP_ISMAPPED(pp) && !PP_ISMAPPED_KPM(pp)) {
9279 		/*
9280 		 * Previous user of page had a different color
9281 		 * but since there are no current users
9282 		 * we just flush the cache and change the color.
9283 		 */
9284 		SFMMU_STAT(sf_pgcolor_conflict);
9285 		sfmmu_cache_flush(pp->p_pagenum, PP_GET_VCOLOR(pp));
9286 		PP_SET_VCOLOR(pp, vcolor);
9287 		return;
9288 	}
9289 
9290 	/*
9291 	 * If we get here we have a vac conflict with a current
9292 	 * mapping.  VAC conflict policy is as follows.
9293 	 * - The default is to unload the other mappings unless:
9294 	 * - If we have a large mapping we uncache the page.
9295 	 * We need to uncache the rest of the large page too.
9296 	 * - If any of the mappings are locked we uncache the page.
9297 	 * - If the requested mapping is inconsistent
9298 	 * with another mapping and that mapping
9299 	 * is in the same address space we have to
9300 	 * make it non-cached.  The default thing
9301 	 * to do is unload the inconsistent mapping
9302 	 * but if they are in the same address space
9303 	 * we run the risk of unmapping the pc or the
9304 	 * stack which we will use as we return to the user,
9305 	 * in which case we can then fault on the thing
9306 	 * we just unloaded and get into an infinite loop.
9307 	 */
9308 	if (PP_ISMAPPED_LARGE(pp)) {
9309 		int sz;
9310 
9311 		/*
9312 		 * Existing mapping is for big pages. We don't unload
9313 		 * existing big mappings to satisfy new mappings.
9314 		 * Always convert all mappings to TNC.
9315 		 */
9316 		sz = fnd_mapping_sz(pp);
9317 		pp = PP_GROUPLEADER(pp, sz);
9318 		SFMMU_STAT_ADD(sf_uncache_conflict, TTEPAGES(sz));
9319 		sfmmu_page_cache_array(pp, HAT_TMPNC, CACHE_FLUSH,
9320 		    TTEPAGES(sz));
9321 
9322 		return;
9323 	}
9324 
9325 	/*
9326 	 * check if any mapping is in same as or if it is locked
9327 	 * since in that case we need to uncache.
9328 	 */
9329 	for (sfhmep = pp->p_mapping; sfhmep; sfhmep = tmphme) {
9330 		tmphme = sfhmep->hme_next;
9331 		if (IS_PAHME(sfhmep))
9332 			continue;
9333 		hmeblkp = sfmmu_hmetohblk(sfhmep);
9334 		if (hmeblkp->hblk_xhat_bit)
9335 			continue;
9336 		tmphat = hblktosfmmu(hmeblkp);
9337 		sfmmu_copytte(&sfhmep->hme_tte, &tte);
9338 		ASSERT(TTE_IS_VALID(&tte));
9339 		if (hmeblkp->hblk_shared || tmphat == hat ||
9340 		    hmeblkp->hblk_lckcnt) {
9341 			/*
9342 			 * We have an uncache conflict
9343 			 */
9344 			SFMMU_STAT(sf_uncache_conflict);
9345 			sfmmu_page_cache_array(pp, HAT_TMPNC, CACHE_FLUSH, 1);
9346 			return;
9347 		}
9348 	}
9349 
9350 	/*
9351 	 * We have an unload conflict
9352 	 * We have already checked for LARGE mappings, therefore
9353 	 * the remaining mapping(s) must be TTE8K.
9354 	 */
9355 	SFMMU_STAT(sf_unload_conflict);
9356 
9357 	for (sfhmep = pp->p_mapping; sfhmep; sfhmep = tmphme) {
9358 		tmphme = sfhmep->hme_next;
9359 		if (IS_PAHME(sfhmep))
9360 			continue;
9361 		hmeblkp = sfmmu_hmetohblk(sfhmep);
9362 		if (hmeblkp->hblk_xhat_bit)
9363 			continue;
9364 		ASSERT(!hmeblkp->hblk_shared);
9365 		(void) sfmmu_pageunload(pp, sfhmep, TTE8K);
9366 	}
9367 
9368 	if (PP_ISMAPPED_KPM(pp))
9369 		sfmmu_kpm_vac_unload(pp, addr);
9370 
9371 	/*
9372 	 * Unloads only do TLB flushes so we need to flush the
9373 	 * cache here.
9374 	 */
9375 	sfmmu_cache_flush(pp->p_pagenum, PP_GET_VCOLOR(pp));
9376 	PP_SET_VCOLOR(pp, vcolor);
9377 }
9378 
9379 /*
9380  * Whenever a mapping is unloaded and the page is in TNC state,
9381  * we see if the page can be made cacheable again. 'pp' is
9382  * the page that we just unloaded a mapping from, the size
9383  * of mapping that was unloaded is 'ottesz'.
9384  * Remark:
9385  * The recache policy for mpss pages can leave a performance problem
9386  * under the following circumstances:
9387  * . A large page in uncached mode has just been unmapped.
9388  * . All constituent pages are TNC due to a conflicting small mapping.
9389  * . There are many other, non conflicting, small mappings around for
9390  *   a lot of the constituent pages.
9391  * . We're called w/ the "old" groupleader page and the old ottesz,
9392  *   but this is irrelevant, since we're no more "PP_ISMAPPED_LARGE", so
9393  *   we end up w/ TTE8K or npages == 1.
9394  * . We call tst_tnc w/ the old groupleader only, and if there is no
9395  *   conflict, we re-cache only this page.
9396  * . All other small mappings are not checked and will be left in TNC mode.
9397  * The problem is not very serious because:
9398  * . mpss is actually only defined for heap and stack, so the probability
9399  *   is not very high that a large page mapping exists in parallel to a small
9400  *   one (this is possible, but seems to be bad programming style in the
9401  *   appl).
9402  * . The problem gets a little bit more serious, when those TNC pages
9403  *   have to be mapped into kernel space, e.g. for networking.
9404  * . When VAC alias conflicts occur in applications, this is regarded
9405  *   as an application bug. So if kstat's show them, the appl should
9406  *   be changed anyway.
9407  */
9408 void
9409 conv_tnc(page_t *pp, int ottesz)
9410 {
9411 	int cursz, dosz;
9412 	pgcnt_t curnpgs, dopgs;
9413 	pgcnt_t pg64k;
9414 	page_t *pp2;
9415 
9416 	/*
9417 	 * Determine how big a range we check for TNC and find
9418 	 * leader page. cursz is the size of the biggest
9419 	 * mapping that still exist on 'pp'.
9420 	 */
9421 	if (PP_ISMAPPED_LARGE(pp)) {
9422 		cursz = fnd_mapping_sz(pp);
9423 	} else {
9424 		cursz = TTE8K;
9425 	}
9426 
9427 	if (ottesz >= cursz) {
9428 		dosz = ottesz;
9429 		pp2 = pp;
9430 	} else {
9431 		dosz = cursz;
9432 		pp2 = PP_GROUPLEADER(pp, dosz);
9433 	}
9434 
9435 	pg64k = TTEPAGES(TTE64K);
9436 	dopgs = TTEPAGES(dosz);
9437 
9438 	ASSERT(dopgs == 1 || ((dopgs & (pg64k - 1)) == 0));
9439 
9440 	while (dopgs != 0) {
9441 		curnpgs = TTEPAGES(cursz);
9442 		if (tst_tnc(pp2, curnpgs)) {
9443 			SFMMU_STAT_ADD(sf_recache, curnpgs);
9444 			sfmmu_page_cache_array(pp2, HAT_CACHE, CACHE_NO_FLUSH,
9445 			    curnpgs);
9446 		}
9447 
9448 		ASSERT(dopgs >= curnpgs);
9449 		dopgs -= curnpgs;
9450 
9451 		if (dopgs == 0) {
9452 			break;
9453 		}
9454 
9455 		pp2 = PP_PAGENEXT_N(pp2, curnpgs);
9456 		if (((dopgs & (pg64k - 1)) == 0) && PP_ISMAPPED_LARGE(pp2)) {
9457 			cursz = fnd_mapping_sz(pp2);
9458 		} else {
9459 			cursz = TTE8K;
9460 		}
9461 	}
9462 }
9463 
9464 /*
9465  * Returns 1 if page(s) can be converted from TNC to cacheable setting,
9466  * returns 0 otherwise. Note that oaddr argument is valid for only
9467  * 8k pages.
9468  */
9469 int
9470 tst_tnc(page_t *pp, pgcnt_t npages)
9471 {
9472 	struct	sf_hment *sfhme;
9473 	struct	hme_blk *hmeblkp;
9474 	tte_t	tte;
9475 	caddr_t	vaddr;
9476 	int	clr_valid = 0;
9477 	int 	color, color1, bcolor;
9478 	int	i, ncolors;
9479 
9480 	ASSERT(pp != NULL);
9481 	ASSERT(!(cache & CACHE_WRITEBACK));
9482 
9483 	if (npages > 1) {
9484 		ncolors = CACHE_NUM_COLOR;
9485 	}
9486 
9487 	for (i = 0; i < npages; i++) {
9488 		ASSERT(sfmmu_mlist_held(pp));
9489 		ASSERT(PP_ISTNC(pp));
9490 		ASSERT(PP_GET_VCOLOR(pp) == NO_VCOLOR);
9491 
9492 		if (PP_ISPNC(pp)) {
9493 			return (0);
9494 		}
9495 
9496 		clr_valid = 0;
9497 		if (PP_ISMAPPED_KPM(pp)) {
9498 			caddr_t kpmvaddr;
9499 
9500 			ASSERT(kpm_enable);
9501 			kpmvaddr = hat_kpm_page2va(pp, 1);
9502 			ASSERT(!(npages > 1 && IS_KPM_ALIAS_RANGE(kpmvaddr)));
9503 			color1 = addr_to_vcolor(kpmvaddr);
9504 			clr_valid = 1;
9505 		}
9506 
9507 		for (sfhme = pp->p_mapping; sfhme; sfhme = sfhme->hme_next) {
9508 			if (IS_PAHME(sfhme))
9509 				continue;
9510 			hmeblkp = sfmmu_hmetohblk(sfhme);
9511 			if (hmeblkp->hblk_xhat_bit)
9512 				continue;
9513 
9514 			sfmmu_copytte(&sfhme->hme_tte, &tte);
9515 			ASSERT(TTE_IS_VALID(&tte));
9516 
9517 			vaddr = tte_to_vaddr(hmeblkp, tte);
9518 			color = addr_to_vcolor(vaddr);
9519 
9520 			if (npages > 1) {
9521 				/*
9522 				 * If there is a big mapping, make sure
9523 				 * 8K mapping is consistent with the big
9524 				 * mapping.
9525 				 */
9526 				bcolor = i % ncolors;
9527 				if (color != bcolor) {
9528 					return (0);
9529 				}
9530 			}
9531 			if (!clr_valid) {
9532 				clr_valid = 1;
9533 				color1 = color;
9534 			}
9535 
9536 			if (color1 != color) {
9537 				return (0);
9538 			}
9539 		}
9540 
9541 		pp = PP_PAGENEXT(pp);
9542 	}
9543 
9544 	return (1);
9545 }
9546 
9547 void
9548 sfmmu_page_cache_array(page_t *pp, int flags, int cache_flush_flag,
9549 	pgcnt_t npages)
9550 {
9551 	kmutex_t *pmtx;
9552 	int i, ncolors, bcolor;
9553 	kpm_hlk_t *kpmp;
9554 	cpuset_t cpuset;
9555 
9556 	ASSERT(pp != NULL);
9557 	ASSERT(!(cache & CACHE_WRITEBACK));
9558 
9559 	kpmp = sfmmu_kpm_kpmp_enter(pp, npages);
9560 	pmtx = sfmmu_page_enter(pp);
9561 
9562 	/*
9563 	 * Fast path caching single unmapped page
9564 	 */
9565 	if (npages == 1 && !PP_ISMAPPED(pp) && !PP_ISMAPPED_KPM(pp) &&
9566 	    flags == HAT_CACHE) {
9567 		PP_CLRTNC(pp);
9568 		PP_CLRPNC(pp);
9569 		sfmmu_page_exit(pmtx);
9570 		sfmmu_kpm_kpmp_exit(kpmp);
9571 		return;
9572 	}
9573 
9574 	/*
9575 	 * We need to capture all cpus in order to change cacheability
9576 	 * because we can't allow one cpu to access the same physical
9577 	 * page using a cacheable and a non-cachebale mapping at the same
9578 	 * time. Since we may end up walking the ism mapping list
9579 	 * have to grab it's lock now since we can't after all the
9580 	 * cpus have been captured.
9581 	 */
9582 	sfmmu_hat_lock_all();
9583 	mutex_enter(&ism_mlist_lock);
9584 	kpreempt_disable();
9585 	cpuset = cpu_ready_set;
9586 	xc_attention(cpuset);
9587 
9588 	if (npages > 1) {
9589 		/*
9590 		 * Make sure all colors are flushed since the
9591 		 * sfmmu_page_cache() only flushes one color-
9592 		 * it does not know big pages.
9593 		 */
9594 		ncolors = CACHE_NUM_COLOR;
9595 		if (flags & HAT_TMPNC) {
9596 			for (i = 0; i < ncolors; i++) {
9597 				sfmmu_cache_flushcolor(i, pp->p_pagenum);
9598 			}
9599 			cache_flush_flag = CACHE_NO_FLUSH;
9600 		}
9601 	}
9602 
9603 	for (i = 0; i < npages; i++) {
9604 
9605 		ASSERT(sfmmu_mlist_held(pp));
9606 
9607 		if (!(flags == HAT_TMPNC && PP_ISTNC(pp))) {
9608 
9609 			if (npages > 1) {
9610 				bcolor = i % ncolors;
9611 			} else {
9612 				bcolor = NO_VCOLOR;
9613 			}
9614 
9615 			sfmmu_page_cache(pp, flags, cache_flush_flag,
9616 			    bcolor);
9617 		}
9618 
9619 		pp = PP_PAGENEXT(pp);
9620 	}
9621 
9622 	xt_sync(cpuset);
9623 	xc_dismissed(cpuset);
9624 	mutex_exit(&ism_mlist_lock);
9625 	sfmmu_hat_unlock_all();
9626 	sfmmu_page_exit(pmtx);
9627 	sfmmu_kpm_kpmp_exit(kpmp);
9628 	kpreempt_enable();
9629 }
9630 
9631 /*
9632  * This function changes the virtual cacheability of all mappings to a
9633  * particular page.  When changing from uncache to cacheable the mappings will
9634  * only be changed if all of them have the same virtual color.
9635  * We need to flush the cache in all cpus.  It is possible that
9636  * a process referenced a page as cacheable but has sinced exited
9637  * and cleared the mapping list.  We still to flush it but have no
9638  * state so all cpus is the only alternative.
9639  */
9640 static void
9641 sfmmu_page_cache(page_t *pp, int flags, int cache_flush_flag, int bcolor)
9642 {
9643 	struct	sf_hment *sfhme;
9644 	struct	hme_blk *hmeblkp;
9645 	sfmmu_t *sfmmup;
9646 	tte_t	tte, ttemod;
9647 	caddr_t	vaddr;
9648 	int	ret, color;
9649 	pfn_t	pfn;
9650 
9651 	color = bcolor;
9652 	pfn = pp->p_pagenum;
9653 
9654 	for (sfhme = pp->p_mapping; sfhme; sfhme = sfhme->hme_next) {
9655 
9656 		if (IS_PAHME(sfhme))
9657 			continue;
9658 		hmeblkp = sfmmu_hmetohblk(sfhme);
9659 
9660 		if (hmeblkp->hblk_xhat_bit)
9661 			continue;
9662 
9663 		sfmmu_copytte(&sfhme->hme_tte, &tte);
9664 		ASSERT(TTE_IS_VALID(&tte));
9665 		vaddr = tte_to_vaddr(hmeblkp, tte);
9666 		color = addr_to_vcolor(vaddr);
9667 
9668 #ifdef DEBUG
9669 		if ((flags & HAT_CACHE) && bcolor != NO_VCOLOR) {
9670 			ASSERT(color == bcolor);
9671 		}
9672 #endif
9673 
9674 		ASSERT(flags != HAT_TMPNC || color == PP_GET_VCOLOR(pp));
9675 
9676 		ttemod = tte;
9677 		if (flags & (HAT_UNCACHE | HAT_TMPNC)) {
9678 			TTE_CLR_VCACHEABLE(&ttemod);
9679 		} else {	/* flags & HAT_CACHE */
9680 			TTE_SET_VCACHEABLE(&ttemod);
9681 		}
9682 		ret = sfmmu_modifytte_try(&tte, &ttemod, &sfhme->hme_tte);
9683 		if (ret < 0) {
9684 			/*
9685 			 * Since all cpus are captured modifytte should not
9686 			 * fail.
9687 			 */
9688 			panic("sfmmu_page_cache: write to tte failed");
9689 		}
9690 
9691 		sfmmup = hblktosfmmu(hmeblkp);
9692 		if (cache_flush_flag == CACHE_FLUSH) {
9693 			/*
9694 			 * Flush TSBs, TLBs and caches
9695 			 */
9696 			if (hmeblkp->hblk_shared) {
9697 				sf_srd_t *srdp = (sf_srd_t *)sfmmup;
9698 				uint_t rid = hmeblkp->hblk_tag.htag_rid;
9699 				sf_region_t *rgnp;
9700 				ASSERT(SFMMU_IS_SHMERID_VALID(rid));
9701 				ASSERT(rid < SFMMU_MAX_HME_REGIONS);
9702 				ASSERT(srdp != NULL);
9703 				rgnp = srdp->srd_hmergnp[rid];
9704 				SFMMU_VALIDATE_SHAREDHBLK(hmeblkp,
9705 				    srdp, rgnp, rid);
9706 				(void) sfmmu_rgntlb_demap(vaddr, rgnp,
9707 				    hmeblkp, 0);
9708 				sfmmu_cache_flush(pfn, addr_to_vcolor(vaddr));
9709 			} else if (sfmmup->sfmmu_ismhat) {
9710 				if (flags & HAT_CACHE) {
9711 					SFMMU_STAT(sf_ism_recache);
9712 				} else {
9713 					SFMMU_STAT(sf_ism_uncache);
9714 				}
9715 				sfmmu_ismtlbcache_demap(vaddr, sfmmup, hmeblkp,
9716 				    pfn, CACHE_FLUSH);
9717 			} else {
9718 				sfmmu_tlbcache_demap(vaddr, sfmmup, hmeblkp,
9719 				    pfn, 0, FLUSH_ALL_CPUS, CACHE_FLUSH, 1);
9720 			}
9721 
9722 			/*
9723 			 * all cache entries belonging to this pfn are
9724 			 * now flushed.
9725 			 */
9726 			cache_flush_flag = CACHE_NO_FLUSH;
9727 		} else {
9728 			/*
9729 			 * Flush only TSBs and TLBs.
9730 			 */
9731 			if (hmeblkp->hblk_shared) {
9732 				sf_srd_t *srdp = (sf_srd_t *)sfmmup;
9733 				uint_t rid = hmeblkp->hblk_tag.htag_rid;
9734 				sf_region_t *rgnp;
9735 				ASSERT(SFMMU_IS_SHMERID_VALID(rid));
9736 				ASSERT(rid < SFMMU_MAX_HME_REGIONS);
9737 				ASSERT(srdp != NULL);
9738 				rgnp = srdp->srd_hmergnp[rid];
9739 				SFMMU_VALIDATE_SHAREDHBLK(hmeblkp,
9740 				    srdp, rgnp, rid);
9741 				(void) sfmmu_rgntlb_demap(vaddr, rgnp,
9742 				    hmeblkp, 0);
9743 			} else if (sfmmup->sfmmu_ismhat) {
9744 				if (flags & HAT_CACHE) {
9745 					SFMMU_STAT(sf_ism_recache);
9746 				} else {
9747 					SFMMU_STAT(sf_ism_uncache);
9748 				}
9749 				sfmmu_ismtlbcache_demap(vaddr, sfmmup, hmeblkp,
9750 				    pfn, CACHE_NO_FLUSH);
9751 			} else {
9752 				sfmmu_tlb_demap(vaddr, sfmmup, hmeblkp, 0, 1);
9753 			}
9754 		}
9755 	}
9756 
9757 	if (PP_ISMAPPED_KPM(pp))
9758 		sfmmu_kpm_page_cache(pp, flags, cache_flush_flag);
9759 
9760 	switch (flags) {
9761 
9762 		default:
9763 			panic("sfmmu_pagecache: unknown flags");
9764 			break;
9765 
9766 		case HAT_CACHE:
9767 			PP_CLRTNC(pp);
9768 			PP_CLRPNC(pp);
9769 			PP_SET_VCOLOR(pp, color);
9770 			break;
9771 
9772 		case HAT_TMPNC:
9773 			PP_SETTNC(pp);
9774 			PP_SET_VCOLOR(pp, NO_VCOLOR);
9775 			break;
9776 
9777 		case HAT_UNCACHE:
9778 			PP_SETPNC(pp);
9779 			PP_CLRTNC(pp);
9780 			PP_SET_VCOLOR(pp, NO_VCOLOR);
9781 			break;
9782 	}
9783 }
9784 #endif	/* VAC */
9785 
9786 
9787 /*
9788  * Wrapper routine used to return a context.
9789  *
9790  * It's the responsibility of the caller to guarantee that the
9791  * process serializes on calls here by taking the HAT lock for
9792  * the hat.
9793  *
9794  */
9795 static void
9796 sfmmu_get_ctx(sfmmu_t *sfmmup)
9797 {
9798 	mmu_ctx_t *mmu_ctxp;
9799 	uint_t pstate_save;
9800 	int ret;
9801 
9802 	ASSERT(sfmmu_hat_lock_held(sfmmup));
9803 	ASSERT(sfmmup != ksfmmup);
9804 
9805 	if (SFMMU_FLAGS_ISSET(sfmmup, HAT_ALLCTX_INVALID)) {
9806 		sfmmu_setup_tsbinfo(sfmmup);
9807 		SFMMU_FLAGS_CLEAR(sfmmup, HAT_ALLCTX_INVALID);
9808 	}
9809 
9810 	kpreempt_disable();
9811 
9812 	mmu_ctxp = CPU_MMU_CTXP(CPU);
9813 	ASSERT(mmu_ctxp);
9814 	ASSERT(mmu_ctxp->mmu_idx < max_mmu_ctxdoms);
9815 	ASSERT(mmu_ctxp == mmu_ctxs_tbl[mmu_ctxp->mmu_idx]);
9816 
9817 	/*
9818 	 * Do a wrap-around if cnum reaches the max # cnum supported by a MMU.
9819 	 */
9820 	if (mmu_ctxp->mmu_cnum == mmu_ctxp->mmu_nctxs)
9821 		sfmmu_ctx_wrap_around(mmu_ctxp, B_TRUE);
9822 
9823 	/*
9824 	 * Let the MMU set up the page sizes to use for
9825 	 * this context in the TLB. Don't program 2nd dtlb for ism hat.
9826 	 */
9827 	if ((&mmu_set_ctx_page_sizes) && (sfmmup->sfmmu_ismhat == 0)) {
9828 		mmu_set_ctx_page_sizes(sfmmup);
9829 	}
9830 
9831 	/*
9832 	 * sfmmu_alloc_ctx and sfmmu_load_mmustate will be performed with
9833 	 * interrupts disabled to prevent race condition with wrap-around
9834 	 * ctx invalidatation. In sun4v, ctx invalidation also involves
9835 	 * a HV call to set the number of TSBs to 0. If interrupts are not
9836 	 * disabled until after sfmmu_load_mmustate is complete TSBs may
9837 	 * become assigned to INVALID_CONTEXT. This is not allowed.
9838 	 */
9839 	pstate_save = sfmmu_disable_intrs();
9840 
9841 	if (sfmmu_alloc_ctx(sfmmup, 1, CPU, SFMMU_PRIVATE) &&
9842 	    sfmmup->sfmmu_scdp != NULL) {
9843 		sf_scd_t *scdp = sfmmup->sfmmu_scdp;
9844 		sfmmu_t *scsfmmup = scdp->scd_sfmmup;
9845 		ret = sfmmu_alloc_ctx(scsfmmup, 1, CPU, SFMMU_SHARED);
9846 		/* debug purpose only */
9847 		ASSERT(!ret || scsfmmup->sfmmu_ctxs[CPU_MMU_IDX(CPU)].cnum
9848 		    != INVALID_CONTEXT);
9849 	}
9850 	sfmmu_load_mmustate(sfmmup);
9851 
9852 	sfmmu_enable_intrs(pstate_save);
9853 
9854 	kpreempt_enable();
9855 }
9856 
9857 /*
9858  * When all cnums are used up in a MMU, cnum will wrap around to the
9859  * next generation and start from 2.
9860  */
9861 static void
9862 sfmmu_ctx_wrap_around(mmu_ctx_t *mmu_ctxp, boolean_t reset_cnum)
9863 {
9864 
9865 	/* caller must have disabled the preemption */
9866 	ASSERT(curthread->t_preempt >= 1);
9867 	ASSERT(mmu_ctxp != NULL);
9868 
9869 	/* acquire Per-MMU (PM) spin lock */
9870 	mutex_enter(&mmu_ctxp->mmu_lock);
9871 
9872 	/* re-check to see if wrap-around is needed */
9873 	if (mmu_ctxp->mmu_cnum < mmu_ctxp->mmu_nctxs)
9874 		goto done;
9875 
9876 	SFMMU_MMU_STAT(mmu_wrap_around);
9877 
9878 	/* update gnum */
9879 	ASSERT(mmu_ctxp->mmu_gnum != 0);
9880 	mmu_ctxp->mmu_gnum++;
9881 	if (mmu_ctxp->mmu_gnum == 0 ||
9882 	    mmu_ctxp->mmu_gnum > MAX_SFMMU_GNUM_VAL) {
9883 		cmn_err(CE_PANIC, "mmu_gnum of mmu_ctx 0x%p is out of bound.",
9884 		    (void *)mmu_ctxp);
9885 	}
9886 
9887 	if (mmu_ctxp->mmu_ncpus > 1) {
9888 		cpuset_t cpuset;
9889 
9890 		membar_enter(); /* make sure updated gnum visible */
9891 
9892 		SFMMU_XCALL_STATS(NULL);
9893 
9894 		/* xcall to others on the same MMU to invalidate ctx */
9895 		cpuset = mmu_ctxp->mmu_cpuset;
9896 		ASSERT(CPU_IN_SET(cpuset, CPU->cpu_id) || !reset_cnum);
9897 		CPUSET_DEL(cpuset, CPU->cpu_id);
9898 		CPUSET_AND(cpuset, cpu_ready_set);
9899 
9900 		/*
9901 		 * Pass in INVALID_CONTEXT as the first parameter to
9902 		 * sfmmu_raise_tsb_exception, which invalidates the context
9903 		 * of any process running on the CPUs in the MMU.
9904 		 */
9905 		xt_some(cpuset, sfmmu_raise_tsb_exception,
9906 		    INVALID_CONTEXT, INVALID_CONTEXT);
9907 		xt_sync(cpuset);
9908 
9909 		SFMMU_MMU_STAT(mmu_tsb_raise_exception);
9910 	}
9911 
9912 	if (sfmmu_getctx_sec() != INVALID_CONTEXT) {
9913 		sfmmu_setctx_sec(INVALID_CONTEXT);
9914 		sfmmu_clear_utsbinfo();
9915 	}
9916 
9917 	/*
9918 	 * No xcall is needed here. For sun4u systems all CPUs in context
9919 	 * domain share a single physical MMU therefore it's enough to flush
9920 	 * TLB on local CPU. On sun4v systems we use 1 global context
9921 	 * domain and flush all remote TLBs in sfmmu_raise_tsb_exception
9922 	 * handler. Note that vtag_flushall_uctxs() is called
9923 	 * for Ultra II machine, where the equivalent flushall functionality
9924 	 * is implemented in SW, and only user ctx TLB entries are flushed.
9925 	 */
9926 	if (&vtag_flushall_uctxs != NULL) {
9927 		vtag_flushall_uctxs();
9928 	} else {
9929 		vtag_flushall();
9930 	}
9931 
9932 	/* reset mmu cnum, skips cnum 0 and 1 */
9933 	if (reset_cnum == B_TRUE)
9934 		mmu_ctxp->mmu_cnum = NUM_LOCKED_CTXS;
9935 
9936 done:
9937 	mutex_exit(&mmu_ctxp->mmu_lock);
9938 }
9939 
9940 
9941 /*
9942  * For multi-threaded process, set the process context to INVALID_CONTEXT
9943  * so that it faults and reloads the MMU state from TL=0. For single-threaded
9944  * process, we can just load the MMU state directly without having to
9945  * set context invalid. Caller must hold the hat lock since we don't
9946  * acquire it here.
9947  */
9948 static void
9949 sfmmu_sync_mmustate(sfmmu_t *sfmmup)
9950 {
9951 	uint_t cnum;
9952 	uint_t pstate_save;
9953 
9954 	ASSERT(sfmmup != ksfmmup);
9955 	ASSERT(sfmmu_hat_lock_held(sfmmup));
9956 
9957 	kpreempt_disable();
9958 
9959 	/*
9960 	 * We check whether the pass'ed-in sfmmup is the same as the
9961 	 * current running proc. This is to makes sure the current proc
9962 	 * stays single-threaded if it already is.
9963 	 */
9964 	if ((sfmmup == curthread->t_procp->p_as->a_hat) &&
9965 	    (curthread->t_procp->p_lwpcnt == 1)) {
9966 		/* single-thread */
9967 		cnum = sfmmup->sfmmu_ctxs[CPU_MMU_IDX(CPU)].cnum;
9968 		if (cnum != INVALID_CONTEXT) {
9969 			uint_t curcnum;
9970 			/*
9971 			 * Disable interrupts to prevent race condition
9972 			 * with sfmmu_ctx_wrap_around ctx invalidation.
9973 			 * In sun4v, ctx invalidation involves setting
9974 			 * TSB to NULL, hence, interrupts should be disabled
9975 			 * untill after sfmmu_load_mmustate is completed.
9976 			 */
9977 			pstate_save = sfmmu_disable_intrs();
9978 			curcnum = sfmmu_getctx_sec();
9979 			if (curcnum == cnum)
9980 				sfmmu_load_mmustate(sfmmup);
9981 			sfmmu_enable_intrs(pstate_save);
9982 			ASSERT(curcnum == cnum || curcnum == INVALID_CONTEXT);
9983 		}
9984 	} else {
9985 		/*
9986 		 * multi-thread
9987 		 * or when sfmmup is not the same as the curproc.
9988 		 */
9989 		sfmmu_invalidate_ctx(sfmmup);
9990 	}
9991 
9992 	kpreempt_enable();
9993 }
9994 
9995 
9996 /*
9997  * Replace the specified TSB with a new TSB.  This function gets called when
9998  * we grow, shrink or swapin a TSB.  When swapping in a TSB (TSB_SWAPIN), the
9999  * TSB_FORCEALLOC flag may be used to force allocation of a minimum-sized TSB
10000  * (8K).
10001  *
10002  * Caller must hold the HAT lock, but should assume any tsb_info
10003  * pointers it has are no longer valid after calling this function.
10004  *
10005  * Return values:
10006  *	TSB_ALLOCFAIL	Failed to allocate a TSB, due to memory constraints
10007  *	TSB_LOSTRACE	HAT is busy, i.e. another thread is already doing
10008  *			something to this tsbinfo/TSB
10009  *	TSB_SUCCESS	Operation succeeded
10010  */
10011 static tsb_replace_rc_t
10012 sfmmu_replace_tsb(sfmmu_t *sfmmup, struct tsb_info *old_tsbinfo, uint_t szc,
10013     hatlock_t *hatlockp, uint_t flags)
10014 {
10015 	struct tsb_info *new_tsbinfo = NULL;
10016 	struct tsb_info *curtsb, *prevtsb;
10017 	uint_t tte_sz_mask;
10018 	int i;
10019 
10020 	ASSERT(sfmmup != ksfmmup);
10021 	ASSERT(sfmmup->sfmmu_ismhat == 0);
10022 	ASSERT(sfmmu_hat_lock_held(sfmmup));
10023 	ASSERT(szc <= tsb_max_growsize);
10024 
10025 	if (SFMMU_FLAGS_ISSET(sfmmup, HAT_BUSY))
10026 		return (TSB_LOSTRACE);
10027 
10028 	/*
10029 	 * Find the tsb_info ahead of this one in the list, and
10030 	 * also make sure that the tsb_info passed in really
10031 	 * exists!
10032 	 */
10033 	for (prevtsb = NULL, curtsb = sfmmup->sfmmu_tsb;
10034 	    curtsb != old_tsbinfo && curtsb != NULL;
10035 	    prevtsb = curtsb, curtsb = curtsb->tsb_next)
10036 		;
10037 	ASSERT(curtsb != NULL);
10038 
10039 	if (!(flags & TSB_SWAPIN) && SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) {
10040 		/*
10041 		 * The process is swapped out, so just set the new size
10042 		 * code.  When it swaps back in, we'll allocate a new one
10043 		 * of the new chosen size.
10044 		 */
10045 		curtsb->tsb_szc = szc;
10046 		return (TSB_SUCCESS);
10047 	}
10048 	SFMMU_FLAGS_SET(sfmmup, HAT_BUSY);
10049 
10050 	tte_sz_mask = old_tsbinfo->tsb_ttesz_mask;
10051 
10052 	/*
10053 	 * All initialization is done inside of sfmmu_tsbinfo_alloc().
10054 	 * If we fail to allocate a TSB, exit.
10055 	 *
10056 	 * If tsb grows with new tsb size > 4M and old tsb size < 4M,
10057 	 * then try 4M slab after the initial alloc fails.
10058 	 *
10059 	 * If tsb swapin with tsb size > 4M, then try 4M after the
10060 	 * initial alloc fails.
10061 	 */
10062 	sfmmu_hat_exit(hatlockp);
10063 	if (sfmmu_tsbinfo_alloc(&new_tsbinfo, szc,
10064 	    tte_sz_mask, flags, sfmmup) &&
10065 	    (!(flags & (TSB_GROW | TSB_SWAPIN)) || (szc <= TSB_4M_SZCODE) ||
10066 	    (!(flags & TSB_SWAPIN) &&
10067 	    (old_tsbinfo->tsb_szc >= TSB_4M_SZCODE)) ||
10068 	    sfmmu_tsbinfo_alloc(&new_tsbinfo, TSB_4M_SZCODE,
10069 	    tte_sz_mask, flags, sfmmup))) {
10070 		(void) sfmmu_hat_enter(sfmmup);
10071 		if (!(flags & TSB_SWAPIN))
10072 			SFMMU_STAT(sf_tsb_resize_failures);
10073 		SFMMU_FLAGS_CLEAR(sfmmup, HAT_BUSY);
10074 		return (TSB_ALLOCFAIL);
10075 	}
10076 	(void) sfmmu_hat_enter(sfmmup);
10077 
10078 	/*
10079 	 * Re-check to make sure somebody else didn't muck with us while we
10080 	 * didn't hold the HAT lock.  If the process swapped out, fine, just
10081 	 * exit; this can happen if we try to shrink the TSB from the context
10082 	 * of another process (such as on an ISM unmap), though it is rare.
10083 	 */
10084 	if (!(flags & TSB_SWAPIN) && SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) {
10085 		SFMMU_STAT(sf_tsb_resize_failures);
10086 		SFMMU_FLAGS_CLEAR(sfmmup, HAT_BUSY);
10087 		sfmmu_hat_exit(hatlockp);
10088 		sfmmu_tsbinfo_free(new_tsbinfo);
10089 		(void) sfmmu_hat_enter(sfmmup);
10090 		return (TSB_LOSTRACE);
10091 	}
10092 
10093 #ifdef	DEBUG
10094 	/* Reverify that the tsb_info still exists.. for debugging only */
10095 	for (prevtsb = NULL, curtsb = sfmmup->sfmmu_tsb;
10096 	    curtsb != old_tsbinfo && curtsb != NULL;
10097 	    prevtsb = curtsb, curtsb = curtsb->tsb_next)
10098 		;
10099 	ASSERT(curtsb != NULL);
10100 #endif	/* DEBUG */
10101 
10102 	/*
10103 	 * Quiesce any CPUs running this process on their next TLB miss
10104 	 * so they atomically see the new tsb_info.  We temporarily set the
10105 	 * context to invalid context so new threads that come on processor
10106 	 * after we do the xcall to cpusran will also serialize behind the
10107 	 * HAT lock on TLB miss and will see the new TSB.  Since this short
10108 	 * race with a new thread coming on processor is relatively rare,
10109 	 * this synchronization mechanism should be cheaper than always
10110 	 * pausing all CPUs for the duration of the setup, which is what
10111 	 * the old implementation did.  This is particuarly true if we are
10112 	 * copying a huge chunk of memory around during that window.
10113 	 *
10114 	 * The memory barriers are to make sure things stay consistent
10115 	 * with resume() since it does not hold the HAT lock while
10116 	 * walking the list of tsb_info structures.
10117 	 */
10118 	if ((flags & TSB_SWAPIN) != TSB_SWAPIN) {
10119 		/* The TSB is either growing or shrinking. */
10120 		sfmmu_invalidate_ctx(sfmmup);
10121 	} else {
10122 		/*
10123 		 * It is illegal to swap in TSBs from a process other
10124 		 * than a process being swapped in.  This in turn
10125 		 * implies we do not have a valid MMU context here
10126 		 * since a process needs one to resolve translation
10127 		 * misses.
10128 		 */
10129 		ASSERT(curthread->t_procp->p_as->a_hat == sfmmup);
10130 	}
10131 
10132 #ifdef DEBUG
10133 	ASSERT(max_mmu_ctxdoms > 0);
10134 
10135 	/*
10136 	 * Process should have INVALID_CONTEXT on all MMUs
10137 	 */
10138 	for (i = 0; i < max_mmu_ctxdoms; i++) {
10139 
10140 		ASSERT(sfmmup->sfmmu_ctxs[i].cnum == INVALID_CONTEXT);
10141 	}
10142 #endif
10143 
10144 	new_tsbinfo->tsb_next = old_tsbinfo->tsb_next;
10145 	membar_stst();	/* strict ordering required */
10146 	if (prevtsb)
10147 		prevtsb->tsb_next = new_tsbinfo;
10148 	else
10149 		sfmmup->sfmmu_tsb = new_tsbinfo;
10150 	membar_enter();	/* make sure new TSB globally visible */
10151 
10152 	/*
10153 	 * We need to migrate TSB entries from the old TSB to the new TSB
10154 	 * if tsb_remap_ttes is set and the TSB is growing.
10155 	 */
10156 	if (tsb_remap_ttes && ((flags & TSB_GROW) == TSB_GROW))
10157 		sfmmu_copy_tsb(old_tsbinfo, new_tsbinfo);
10158 
10159 	SFMMU_FLAGS_CLEAR(sfmmup, HAT_BUSY);
10160 
10161 	/*
10162 	 * Drop the HAT lock to free our old tsb_info.
10163 	 */
10164 	sfmmu_hat_exit(hatlockp);
10165 
10166 	if ((flags & TSB_GROW) == TSB_GROW) {
10167 		SFMMU_STAT(sf_tsb_grow);
10168 	} else if ((flags & TSB_SHRINK) == TSB_SHRINK) {
10169 		SFMMU_STAT(sf_tsb_shrink);
10170 	}
10171 
10172 	sfmmu_tsbinfo_free(old_tsbinfo);
10173 
10174 	(void) sfmmu_hat_enter(sfmmup);
10175 	return (TSB_SUCCESS);
10176 }
10177 
10178 /*
10179  * This function will re-program hat pgsz array, and invalidate the
10180  * process' context, forcing the process to switch to another
10181  * context on the next TLB miss, and therefore start using the
10182  * TLB that is reprogrammed for the new page sizes.
10183  */
10184 void
10185 sfmmu_reprog_pgsz_arr(sfmmu_t *sfmmup, uint8_t *tmp_pgsz)
10186 {
10187 	int i;
10188 	hatlock_t *hatlockp = NULL;
10189 
10190 	hatlockp = sfmmu_hat_enter(sfmmup);
10191 	/* USIII+-IV+ optimization, requires hat lock */
10192 	if (tmp_pgsz) {
10193 		for (i = 0; i < mmu_page_sizes; i++)
10194 			sfmmup->sfmmu_pgsz[i] = tmp_pgsz[i];
10195 	}
10196 	SFMMU_STAT(sf_tlb_reprog_pgsz);
10197 
10198 	sfmmu_invalidate_ctx(sfmmup);
10199 
10200 	sfmmu_hat_exit(hatlockp);
10201 }
10202 
10203 /*
10204  * The scd_rttecnt field in the SCD must be updated to take account of the
10205  * regions which it contains.
10206  */
10207 static void
10208 sfmmu_set_scd_rttecnt(sf_srd_t *srdp, sf_scd_t *scdp)
10209 {
10210 	uint_t rid;
10211 	uint_t i, j;
10212 	ulong_t w;
10213 	sf_region_t *rgnp;
10214 
10215 	ASSERT(srdp != NULL);
10216 
10217 	for (i = 0; i < SFMMU_HMERGNMAP_WORDS; i++) {
10218 		if ((w = scdp->scd_region_map.bitmap[i]) == 0) {
10219 			continue;
10220 		}
10221 
10222 		j = 0;
10223 		while (w) {
10224 			if (!(w & 0x1)) {
10225 				j++;
10226 				w >>= 1;
10227 				continue;
10228 			}
10229 			rid = (i << BT_ULSHIFT) | j;
10230 			j++;
10231 			w >>= 1;
10232 
10233 			ASSERT(SFMMU_IS_SHMERID_VALID(rid));
10234 			ASSERT(rid < SFMMU_MAX_HME_REGIONS);
10235 			rgnp = srdp->srd_hmergnp[rid];
10236 			ASSERT(rgnp->rgn_refcnt > 0);
10237 			ASSERT(rgnp->rgn_id == rid);
10238 
10239 			scdp->scd_rttecnt[rgnp->rgn_pgszc] +=
10240 			    rgnp->rgn_size >> TTE_PAGE_SHIFT(rgnp->rgn_pgszc);
10241 
10242 			/*
10243 			 * Maintain the tsb0 inflation cnt for the regions
10244 			 * in the SCD.
10245 			 */
10246 			if (rgnp->rgn_pgszc >= TTE4M) {
10247 				scdp->scd_sfmmup->sfmmu_tsb0_4minflcnt +=
10248 				    rgnp->rgn_size >>
10249 				    (TTE_PAGE_SHIFT(TTE8K) + 2);
10250 			}
10251 		}
10252 	}
10253 }
10254 
10255 /*
10256  * This function assumes that there are either four or six supported page
10257  * sizes and at most two programmable TLBs, so we need to decide which
10258  * page sizes are most important and then tell the MMU layer so it
10259  * can adjust the TLB page sizes accordingly (if supported).
10260  *
10261  * If these assumptions change, this function will need to be
10262  * updated to support whatever the new limits are.
10263  *
10264  * The growing flag is nonzero if we are growing the address space,
10265  * and zero if it is shrinking.  This allows us to decide whether
10266  * to grow or shrink our TSB, depending upon available memory
10267  * conditions.
10268  */
10269 static void
10270 sfmmu_check_page_sizes(sfmmu_t *sfmmup, int growing)
10271 {
10272 	uint64_t ttecnt[MMU_PAGE_SIZES];
10273 	uint64_t tte8k_cnt, tte4m_cnt;
10274 	uint8_t i;
10275 	int sectsb_thresh;
10276 
10277 	/*
10278 	 * Kernel threads, processes with small address spaces not using
10279 	 * large pages, and dummy ISM HATs need not apply.
10280 	 */
10281 	if (sfmmup == ksfmmup || sfmmup->sfmmu_ismhat != NULL)
10282 		return;
10283 
10284 	if (!SFMMU_LGPGS_INUSE(sfmmup) &&
10285 	    sfmmup->sfmmu_ttecnt[TTE8K] <= tsb_rss_factor)
10286 		return;
10287 
10288 	for (i = 0; i < mmu_page_sizes; i++) {
10289 		ttecnt[i] = sfmmup->sfmmu_ttecnt[i] +
10290 		    sfmmup->sfmmu_ismttecnt[i];
10291 	}
10292 
10293 	/* Check pagesizes in use, and possibly reprogram DTLB. */
10294 	if (&mmu_check_page_sizes)
10295 		mmu_check_page_sizes(sfmmup, ttecnt);
10296 
10297 	/*
10298 	 * Calculate the number of 8k ttes to represent the span of these
10299 	 * pages.
10300 	 */
10301 	tte8k_cnt = ttecnt[TTE8K] +
10302 	    (ttecnt[TTE64K] << (MMU_PAGESHIFT64K - MMU_PAGESHIFT)) +
10303 	    (ttecnt[TTE512K] << (MMU_PAGESHIFT512K - MMU_PAGESHIFT));
10304 	if (mmu_page_sizes == max_mmu_page_sizes) {
10305 		tte4m_cnt = ttecnt[TTE4M] +
10306 		    (ttecnt[TTE32M] << (MMU_PAGESHIFT32M - MMU_PAGESHIFT4M)) +
10307 		    (ttecnt[TTE256M] << (MMU_PAGESHIFT256M - MMU_PAGESHIFT4M));
10308 	} else {
10309 		tte4m_cnt = ttecnt[TTE4M];
10310 	}
10311 
10312 	/*
10313 	 * Inflate tte8k_cnt to allow for region large page allocation failure.
10314 	 */
10315 	tte8k_cnt += sfmmup->sfmmu_tsb0_4minflcnt;
10316 
10317 	/*
10318 	 * Inflate TSB sizes by a factor of 2 if this process
10319 	 * uses 4M text pages to minimize extra conflict misses
10320 	 * in the first TSB since without counting text pages
10321 	 * 8K TSB may become too small.
10322 	 *
10323 	 * Also double the size of the second TSB to minimize
10324 	 * extra conflict misses due to competition between 4M text pages
10325 	 * and data pages.
10326 	 *
10327 	 * We need to adjust the second TSB allocation threshold by the
10328 	 * inflation factor, since there is no point in creating a second
10329 	 * TSB when we know all the mappings can fit in the I/D TLBs.
10330 	 */
10331 	sectsb_thresh = tsb_sectsb_threshold;
10332 	if (sfmmup->sfmmu_flags & HAT_4MTEXT_FLAG) {
10333 		tte8k_cnt <<= 1;
10334 		tte4m_cnt <<= 1;
10335 		sectsb_thresh <<= 1;
10336 	}
10337 
10338 	/*
10339 	 * Check to see if our TSB is the right size; we may need to
10340 	 * grow or shrink it.  If the process is small, our work is
10341 	 * finished at this point.
10342 	 */
10343 	if (tte8k_cnt <= tsb_rss_factor && tte4m_cnt <= sectsb_thresh) {
10344 		return;
10345 	}
10346 	sfmmu_size_tsb(sfmmup, growing, tte8k_cnt, tte4m_cnt, sectsb_thresh);
10347 }
10348 
10349 static void
10350 sfmmu_size_tsb(sfmmu_t *sfmmup, int growing, uint64_t tte8k_cnt,
10351 	uint64_t tte4m_cnt, int sectsb_thresh)
10352 {
10353 	int tsb_bits;
10354 	uint_t tsb_szc;
10355 	struct tsb_info *tsbinfop;
10356 	hatlock_t *hatlockp = NULL;
10357 
10358 	hatlockp = sfmmu_hat_enter(sfmmup);
10359 	ASSERT(hatlockp != NULL);
10360 	tsbinfop = sfmmup->sfmmu_tsb;
10361 	ASSERT(tsbinfop != NULL);
10362 
10363 	/*
10364 	 * If we're growing, select the size based on RSS.  If we're
10365 	 * shrinking, leave some room so we don't have to turn around and
10366 	 * grow again immediately.
10367 	 */
10368 	if (growing)
10369 		tsb_szc = SELECT_TSB_SIZECODE(tte8k_cnt);
10370 	else
10371 		tsb_szc = SELECT_TSB_SIZECODE(tte8k_cnt << 1);
10372 
10373 	if (!growing && (tsb_szc < tsbinfop->tsb_szc) &&
10374 	    (tsb_szc >= default_tsb_size) && TSB_OK_SHRINK()) {
10375 		(void) sfmmu_replace_tsb(sfmmup, tsbinfop, tsb_szc,
10376 		    hatlockp, TSB_SHRINK);
10377 	} else if (growing && tsb_szc > tsbinfop->tsb_szc && TSB_OK_GROW()) {
10378 		(void) sfmmu_replace_tsb(sfmmup, tsbinfop, tsb_szc,
10379 		    hatlockp, TSB_GROW);
10380 	}
10381 	tsbinfop = sfmmup->sfmmu_tsb;
10382 
10383 	/*
10384 	 * With the TLB and first TSB out of the way, we need to see if
10385 	 * we need a second TSB for 4M pages.  If we managed to reprogram
10386 	 * the TLB page sizes above, the process will start using this new
10387 	 * TSB right away; otherwise, it will start using it on the next
10388 	 * context switch.  Either way, it's no big deal so there's no
10389 	 * synchronization with the trap handlers here unless we grow the
10390 	 * TSB (in which case it's required to prevent using the old one
10391 	 * after it's freed). Note: second tsb is required for 32M/256M
10392 	 * page sizes.
10393 	 */
10394 	if (tte4m_cnt > sectsb_thresh) {
10395 		/*
10396 		 * If we're growing, select the size based on RSS.  If we're
10397 		 * shrinking, leave some room so we don't have to turn
10398 		 * around and grow again immediately.
10399 		 */
10400 		if (growing)
10401 			tsb_szc = SELECT_TSB_SIZECODE(tte4m_cnt);
10402 		else
10403 			tsb_szc = SELECT_TSB_SIZECODE(tte4m_cnt << 1);
10404 		if (tsbinfop->tsb_next == NULL) {
10405 			struct tsb_info *newtsb;
10406 			int allocflags = SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)?
10407 			    0 : TSB_ALLOC;
10408 
10409 			sfmmu_hat_exit(hatlockp);
10410 
10411 			/*
10412 			 * Try to allocate a TSB for 4[32|256]M pages.  If we
10413 			 * can't get the size we want, retry w/a minimum sized
10414 			 * TSB.  If that still didn't work, give up; we can
10415 			 * still run without one.
10416 			 */
10417 			tsb_bits = (mmu_page_sizes == max_mmu_page_sizes)?
10418 			    TSB4M|TSB32M|TSB256M:TSB4M;
10419 			if ((sfmmu_tsbinfo_alloc(&newtsb, tsb_szc, tsb_bits,
10420 			    allocflags, sfmmup)) &&
10421 			    (tsb_szc <= TSB_4M_SZCODE ||
10422 			    sfmmu_tsbinfo_alloc(&newtsb, TSB_4M_SZCODE,
10423 			    tsb_bits, allocflags, sfmmup)) &&
10424 			    sfmmu_tsbinfo_alloc(&newtsb, TSB_MIN_SZCODE,
10425 			    tsb_bits, allocflags, sfmmup)) {
10426 				return;
10427 			}
10428 
10429 			hatlockp = sfmmu_hat_enter(sfmmup);
10430 
10431 			sfmmu_invalidate_ctx(sfmmup);
10432 
10433 			if (sfmmup->sfmmu_tsb->tsb_next == NULL) {
10434 				sfmmup->sfmmu_tsb->tsb_next = newtsb;
10435 				SFMMU_STAT(sf_tsb_sectsb_create);
10436 				sfmmu_hat_exit(hatlockp);
10437 				return;
10438 			} else {
10439 				/*
10440 				 * It's annoying, but possible for us
10441 				 * to get here.. we dropped the HAT lock
10442 				 * because of locking order in the kmem
10443 				 * allocator, and while we were off getting
10444 				 * our memory, some other thread decided to
10445 				 * do us a favor and won the race to get a
10446 				 * second TSB for this process.  Sigh.
10447 				 */
10448 				sfmmu_hat_exit(hatlockp);
10449 				sfmmu_tsbinfo_free(newtsb);
10450 				return;
10451 			}
10452 		}
10453 
10454 		/*
10455 		 * We have a second TSB, see if it's big enough.
10456 		 */
10457 		tsbinfop = tsbinfop->tsb_next;
10458 
10459 		/*
10460 		 * Check to see if our second TSB is the right size;
10461 		 * we may need to grow or shrink it.
10462 		 * To prevent thrashing (e.g. growing the TSB on a
10463 		 * subsequent map operation), only try to shrink if
10464 		 * the TSB reach exceeds twice the virtual address
10465 		 * space size.
10466 		 */
10467 		if (!growing && (tsb_szc < tsbinfop->tsb_szc) &&
10468 		    (tsb_szc >= default_tsb_size) && TSB_OK_SHRINK()) {
10469 			(void) sfmmu_replace_tsb(sfmmup, tsbinfop,
10470 			    tsb_szc, hatlockp, TSB_SHRINK);
10471 		} else if (growing && tsb_szc > tsbinfop->tsb_szc &&
10472 		    TSB_OK_GROW()) {
10473 			(void) sfmmu_replace_tsb(sfmmup, tsbinfop,
10474 			    tsb_szc, hatlockp, TSB_GROW);
10475 		}
10476 	}
10477 
10478 	sfmmu_hat_exit(hatlockp);
10479 }
10480 
10481 /*
10482  * Free up a sfmmu
10483  * Since the sfmmu is currently embedded in the hat struct we simply zero
10484  * out our fields and free up the ism map blk list if any.
10485  */
10486 static void
10487 sfmmu_free_sfmmu(sfmmu_t *sfmmup)
10488 {
10489 	ism_blk_t	*blkp, *nx_blkp;
10490 #ifdef	DEBUG
10491 	ism_map_t	*map;
10492 	int 		i;
10493 #endif
10494 
10495 	ASSERT(sfmmup->sfmmu_ttecnt[TTE8K] == 0);
10496 	ASSERT(sfmmup->sfmmu_ttecnt[TTE64K] == 0);
10497 	ASSERT(sfmmup->sfmmu_ttecnt[TTE512K] == 0);
10498 	ASSERT(sfmmup->sfmmu_ttecnt[TTE4M] == 0);
10499 	ASSERT(sfmmup->sfmmu_ttecnt[TTE32M] == 0);
10500 	ASSERT(sfmmup->sfmmu_ttecnt[TTE256M] == 0);
10501 	ASSERT(SF_RGNMAP_ISNULL(sfmmup));
10502 
10503 	sfmmup->sfmmu_free = 0;
10504 	sfmmup->sfmmu_ismhat = 0;
10505 
10506 	blkp = sfmmup->sfmmu_iblk;
10507 	sfmmup->sfmmu_iblk = NULL;
10508 
10509 	while (blkp) {
10510 #ifdef	DEBUG
10511 		map = blkp->iblk_maps;
10512 		for (i = 0; i < ISM_MAP_SLOTS; i++) {
10513 			ASSERT(map[i].imap_seg == 0);
10514 			ASSERT(map[i].imap_ismhat == NULL);
10515 			ASSERT(map[i].imap_ment == NULL);
10516 		}
10517 #endif
10518 		nx_blkp = blkp->iblk_next;
10519 		blkp->iblk_next = NULL;
10520 		blkp->iblk_nextpa = (uint64_t)-1;
10521 		kmem_cache_free(ism_blk_cache, blkp);
10522 		blkp = nx_blkp;
10523 	}
10524 }
10525 
10526 /*
10527  * Locking primitves accessed by HATLOCK macros
10528  */
10529 
10530 #define	SFMMU_SPL_MTX	(0x0)
10531 #define	SFMMU_ML_MTX	(0x1)
10532 
10533 #define	SFMMU_MLSPL_MTX(type, pg)	(((type) == SFMMU_SPL_MTX) ? \
10534 					    SPL_HASH(pg) : MLIST_HASH(pg))
10535 
10536 kmutex_t *
10537 sfmmu_page_enter(struct page *pp)
10538 {
10539 	return (sfmmu_mlspl_enter(pp, SFMMU_SPL_MTX));
10540 }
10541 
10542 void
10543 sfmmu_page_exit(kmutex_t *spl)
10544 {
10545 	mutex_exit(spl);
10546 }
10547 
10548 int
10549 sfmmu_page_spl_held(struct page *pp)
10550 {
10551 	return (sfmmu_mlspl_held(pp, SFMMU_SPL_MTX));
10552 }
10553 
10554 kmutex_t *
10555 sfmmu_mlist_enter(struct page *pp)
10556 {
10557 	return (sfmmu_mlspl_enter(pp, SFMMU_ML_MTX));
10558 }
10559 
10560 void
10561 sfmmu_mlist_exit(kmutex_t *mml)
10562 {
10563 	mutex_exit(mml);
10564 }
10565 
10566 int
10567 sfmmu_mlist_held(struct page *pp)
10568 {
10569 
10570 	return (sfmmu_mlspl_held(pp, SFMMU_ML_MTX));
10571 }
10572 
10573 /*
10574  * Common code for sfmmu_mlist_enter() and sfmmu_page_enter().  For
10575  * sfmmu_mlist_enter() case mml_table lock array is used and for
10576  * sfmmu_page_enter() sfmmu_page_lock lock array is used.
10577  *
10578  * The lock is taken on a root page so that it protects an operation on all
10579  * constituent pages of a large page pp belongs to.
10580  *
10581  * The routine takes a lock from the appropriate array. The lock is determined
10582  * by hashing the root page. After taking the lock this routine checks if the
10583  * root page has the same size code that was used to determine the root (i.e
10584  * that root hasn't changed).  If root page has the expected p_szc field we
10585  * have the right lock and it's returned to the caller. If root's p_szc
10586  * decreased we release the lock and retry from the beginning.  This case can
10587  * happen due to hat_page_demote() decreasing p_szc between our load of p_szc
10588  * value and taking the lock. The number of retries due to p_szc decrease is
10589  * limited by the maximum p_szc value. If p_szc is 0 we return the lock
10590  * determined by hashing pp itself.
10591  *
10592  * If our caller doesn't hold a SE_SHARED or SE_EXCL lock on pp it's also
10593  * possible that p_szc can increase. To increase p_szc a thread has to lock
10594  * all constituent pages EXCL and do hat_pageunload() on all of them. All the
10595  * callers that don't hold a page locked recheck if hmeblk through which pp
10596  * was found still maps this pp.  If it doesn't map it anymore returned lock
10597  * is immediately dropped. Therefore if sfmmu_mlspl_enter() hits the case of
10598  * p_szc increase after taking the lock it returns this lock without further
10599  * retries because in this case the caller doesn't care about which lock was
10600  * taken. The caller will drop it right away.
10601  *
10602  * After the routine returns it's guaranteed that hat_page_demote() can't
10603  * change p_szc field of any of constituent pages of a large page pp belongs
10604  * to as long as pp was either locked at least SHARED prior to this call or
10605  * the caller finds that hment that pointed to this pp still references this
10606  * pp (this also assumes that the caller holds hme hash bucket lock so that
10607  * the same pp can't be remapped into the same hmeblk after it was unmapped by
10608  * hat_pageunload()).
10609  */
10610 static kmutex_t *
10611 sfmmu_mlspl_enter(struct page *pp, int type)
10612 {
10613 	kmutex_t	*mtx;
10614 	uint_t		prev_rszc = UINT_MAX;
10615 	page_t		*rootpp;
10616 	uint_t		szc;
10617 	uint_t		rszc;
10618 	uint_t		pszc = pp->p_szc;
10619 
10620 	ASSERT(pp != NULL);
10621 
10622 again:
10623 	if (pszc == 0) {
10624 		mtx = SFMMU_MLSPL_MTX(type, pp);
10625 		mutex_enter(mtx);
10626 		return (mtx);
10627 	}
10628 
10629 	/* The lock lives in the root page */
10630 	rootpp = PP_GROUPLEADER(pp, pszc);
10631 	mtx = SFMMU_MLSPL_MTX(type, rootpp);
10632 	mutex_enter(mtx);
10633 
10634 	/*
10635 	 * Return mml in the following 3 cases:
10636 	 *
10637 	 * 1) If pp itself is root since if its p_szc decreased before we took
10638 	 * the lock pp is still the root of smaller szc page. And if its p_szc
10639 	 * increased it doesn't matter what lock we return (see comment in
10640 	 * front of this routine).
10641 	 *
10642 	 * 2) If pp's not root but rootpp is the root of a rootpp->p_szc size
10643 	 * large page we have the right lock since any previous potential
10644 	 * hat_page_demote() is done demoting from greater than current root's
10645 	 * p_szc because hat_page_demote() changes root's p_szc last. No
10646 	 * further hat_page_demote() can start or be in progress since it
10647 	 * would need the same lock we currently hold.
10648 	 *
10649 	 * 3) If rootpp's p_szc increased since previous iteration it doesn't
10650 	 * matter what lock we return (see comment in front of this routine).
10651 	 */
10652 	if (pp == rootpp || (rszc = rootpp->p_szc) == pszc ||
10653 	    rszc >= prev_rszc) {
10654 		return (mtx);
10655 	}
10656 
10657 	/*
10658 	 * hat_page_demote() could have decreased root's p_szc.
10659 	 * In this case pp's p_szc must also be smaller than pszc.
10660 	 * Retry.
10661 	 */
10662 	if (rszc < pszc) {
10663 		szc = pp->p_szc;
10664 		if (szc < pszc) {
10665 			mutex_exit(mtx);
10666 			pszc = szc;
10667 			goto again;
10668 		}
10669 		/*
10670 		 * pp's p_szc increased after it was decreased.
10671 		 * page cannot be mapped. Return current lock. The caller
10672 		 * will drop it right away.
10673 		 */
10674 		return (mtx);
10675 	}
10676 
10677 	/*
10678 	 * root's p_szc is greater than pp's p_szc.
10679 	 * hat_page_demote() is not done with all pages
10680 	 * yet. Wait for it to complete.
10681 	 */
10682 	mutex_exit(mtx);
10683 	rootpp = PP_GROUPLEADER(rootpp, rszc);
10684 	mtx = SFMMU_MLSPL_MTX(type, rootpp);
10685 	mutex_enter(mtx);
10686 	mutex_exit(mtx);
10687 	prev_rszc = rszc;
10688 	goto again;
10689 }
10690 
10691 static int
10692 sfmmu_mlspl_held(struct page *pp, int type)
10693 {
10694 	kmutex_t	*mtx;
10695 
10696 	ASSERT(pp != NULL);
10697 	/* The lock lives in the root page */
10698 	pp = PP_PAGEROOT(pp);
10699 	ASSERT(pp != NULL);
10700 
10701 	mtx = SFMMU_MLSPL_MTX(type, pp);
10702 	return (MUTEX_HELD(mtx));
10703 }
10704 
10705 static uint_t
10706 sfmmu_get_free_hblk(struct hme_blk **hmeblkpp, uint_t critical)
10707 {
10708 	struct  hme_blk *hblkp;
10709 
10710 
10711 	if (freehblkp != NULL) {
10712 		mutex_enter(&freehblkp_lock);
10713 		if (freehblkp != NULL) {
10714 			/*
10715 			 * If the current thread is owning hblk_reserve OR
10716 			 * critical request from sfmmu_hblk_steal()
10717 			 * let it succeed even if freehblkcnt is really low.
10718 			 */
10719 			if (freehblkcnt <= HBLK_RESERVE_MIN && !critical) {
10720 				SFMMU_STAT(sf_get_free_throttle);
10721 				mutex_exit(&freehblkp_lock);
10722 				return (0);
10723 			}
10724 			freehblkcnt--;
10725 			*hmeblkpp = freehblkp;
10726 			hblkp = *hmeblkpp;
10727 			freehblkp = hblkp->hblk_next;
10728 			mutex_exit(&freehblkp_lock);
10729 			hblkp->hblk_next = NULL;
10730 			SFMMU_STAT(sf_get_free_success);
10731 
10732 			ASSERT(hblkp->hblk_hmecnt == 0);
10733 			ASSERT(hblkp->hblk_vcnt == 0);
10734 			ASSERT(hblkp->hblk_nextpa == va_to_pa((caddr_t)hblkp));
10735 
10736 			return (1);
10737 		}
10738 		mutex_exit(&freehblkp_lock);
10739 	}
10740 
10741 	/* Check cpu hblk pending queues */
10742 	if ((*hmeblkpp = sfmmu_check_pending_hblks(TTE8K)) != NULL) {
10743 		hblkp = *hmeblkpp;
10744 		hblkp->hblk_next = NULL;
10745 		hblkp->hblk_nextpa = va_to_pa((caddr_t)hblkp);
10746 
10747 		ASSERT(hblkp->hblk_hmecnt == 0);
10748 		ASSERT(hblkp->hblk_vcnt == 0);
10749 
10750 		return (1);
10751 	}
10752 
10753 	SFMMU_STAT(sf_get_free_fail);
10754 	return (0);
10755 }
10756 
10757 static uint_t
10758 sfmmu_put_free_hblk(struct hme_blk *hmeblkp, uint_t critical)
10759 {
10760 	struct  hme_blk *hblkp;
10761 
10762 	ASSERT(hmeblkp->hblk_hmecnt == 0);
10763 	ASSERT(hmeblkp->hblk_vcnt == 0);
10764 	ASSERT(hmeblkp->hblk_nextpa == va_to_pa((caddr_t)hmeblkp));
10765 
10766 	/*
10767 	 * If the current thread is mapping into kernel space,
10768 	 * let it succede even if freehblkcnt is max
10769 	 * so that it will avoid freeing it to kmem.
10770 	 * This will prevent stack overflow due to
10771 	 * possible recursion since kmem_cache_free()
10772 	 * might require creation of a slab which
10773 	 * in turn needs an hmeblk to map that slab;
10774 	 * let's break this vicious chain at the first
10775 	 * opportunity.
10776 	 */
10777 	if (freehblkcnt < HBLK_RESERVE_CNT || critical) {
10778 		mutex_enter(&freehblkp_lock);
10779 		if (freehblkcnt < HBLK_RESERVE_CNT || critical) {
10780 			SFMMU_STAT(sf_put_free_success);
10781 			freehblkcnt++;
10782 			hmeblkp->hblk_next = freehblkp;
10783 			freehblkp = hmeblkp;
10784 			mutex_exit(&freehblkp_lock);
10785 			return (1);
10786 		}
10787 		mutex_exit(&freehblkp_lock);
10788 	}
10789 
10790 	/*
10791 	 * Bring down freehblkcnt to HBLK_RESERVE_CNT. We are here
10792 	 * only if freehblkcnt is at least HBLK_RESERVE_CNT *and*
10793 	 * we are not in the process of mapping into kernel space.
10794 	 */
10795 	ASSERT(!critical);
10796 	while (freehblkcnt > HBLK_RESERVE_CNT) {
10797 		mutex_enter(&freehblkp_lock);
10798 		if (freehblkcnt > HBLK_RESERVE_CNT) {
10799 			freehblkcnt--;
10800 			hblkp = freehblkp;
10801 			freehblkp = hblkp->hblk_next;
10802 			mutex_exit(&freehblkp_lock);
10803 			ASSERT(get_hblk_cache(hblkp) == sfmmu8_cache);
10804 			kmem_cache_free(sfmmu8_cache, hblkp);
10805 			continue;
10806 		}
10807 		mutex_exit(&freehblkp_lock);
10808 	}
10809 	SFMMU_STAT(sf_put_free_fail);
10810 	return (0);
10811 }
10812 
10813 static void
10814 sfmmu_hblk_swap(struct hme_blk *new)
10815 {
10816 	struct hme_blk *old, *hblkp, *prev;
10817 	uint64_t newpa;
10818 	caddr_t	base, vaddr, endaddr;
10819 	struct hmehash_bucket *hmebp;
10820 	struct sf_hment *osfhme, *nsfhme;
10821 	page_t *pp;
10822 	kmutex_t *pml;
10823 	tte_t tte;
10824 	struct hme_blk *list = NULL;
10825 
10826 #ifdef	DEBUG
10827 	hmeblk_tag		hblktag;
10828 	struct hme_blk		*found;
10829 #endif
10830 	old = HBLK_RESERVE;
10831 	ASSERT(!old->hblk_shared);
10832 
10833 	/*
10834 	 * save pa before bcopy clobbers it
10835 	 */
10836 	newpa = new->hblk_nextpa;
10837 
10838 	base = (caddr_t)get_hblk_base(old);
10839 	endaddr = base + get_hblk_span(old);
10840 
10841 	/*
10842 	 * acquire hash bucket lock.
10843 	 */
10844 	hmebp = sfmmu_tteload_acquire_hashbucket(ksfmmup, base, TTE8K,
10845 	    SFMMU_INVALID_SHMERID);
10846 
10847 	/*
10848 	 * copy contents from old to new
10849 	 */
10850 	bcopy((void *)old, (void *)new, HME8BLK_SZ);
10851 
10852 	/*
10853 	 * add new to hash chain
10854 	 */
10855 	sfmmu_hblk_hash_add(hmebp, new, newpa);
10856 
10857 	/*
10858 	 * search hash chain for hblk_reserve; this needs to be performed
10859 	 * after adding new, otherwise prev won't correspond to the hblk which
10860 	 * is prior to old in hash chain when we call sfmmu_hblk_hash_rm to
10861 	 * remove old later.
10862 	 */
10863 	for (prev = NULL,
10864 	    hblkp = hmebp->hmeblkp; hblkp != NULL && hblkp != old;
10865 	    prev = hblkp, hblkp = hblkp->hblk_next)
10866 		;
10867 
10868 	if (hblkp != old)
10869 		panic("sfmmu_hblk_swap: hblk_reserve not found");
10870 
10871 	/*
10872 	 * p_mapping list is still pointing to hments in hblk_reserve;
10873 	 * fix up p_mapping list so that they point to hments in new.
10874 	 *
10875 	 * Since all these mappings are created by hblk_reserve_thread
10876 	 * on the way and it's using at least one of the buffers from each of
10877 	 * the newly minted slabs, there is no danger of any of these
10878 	 * mappings getting unloaded by another thread.
10879 	 *
10880 	 * tsbmiss could only modify ref/mod bits of hments in old/new.
10881 	 * Since all of these hments hold mappings established by segkmem
10882 	 * and mappings in segkmem are setup with HAT_NOSYNC, ref/mod bits
10883 	 * have no meaning for the mappings in hblk_reserve.  hments in
10884 	 * old and new are identical except for ref/mod bits.
10885 	 */
10886 	for (vaddr = base; vaddr < endaddr; vaddr += TTEBYTES(TTE8K)) {
10887 
10888 		HBLKTOHME(osfhme, old, vaddr);
10889 		sfmmu_copytte(&osfhme->hme_tte, &tte);
10890 
10891 		if (TTE_IS_VALID(&tte)) {
10892 			if ((pp = osfhme->hme_page) == NULL)
10893 				panic("sfmmu_hblk_swap: page not mapped");
10894 
10895 			pml = sfmmu_mlist_enter(pp);
10896 
10897 			if (pp != osfhme->hme_page)
10898 				panic("sfmmu_hblk_swap: mapping changed");
10899 
10900 			HBLKTOHME(nsfhme, new, vaddr);
10901 
10902 			HME_ADD(nsfhme, pp);
10903 			HME_SUB(osfhme, pp);
10904 
10905 			sfmmu_mlist_exit(pml);
10906 		}
10907 	}
10908 
10909 	/*
10910 	 * remove old from hash chain
10911 	 */
10912 	sfmmu_hblk_hash_rm(hmebp, old, prev, &list, 1);
10913 
10914 #ifdef	DEBUG
10915 
10916 	hblktag.htag_id = ksfmmup;
10917 	hblktag.htag_rid = SFMMU_INVALID_SHMERID;
10918 	hblktag.htag_bspage = HME_HASH_BSPAGE(base, HME_HASH_SHIFT(TTE8K));
10919 	hblktag.htag_rehash = HME_HASH_REHASH(TTE8K);
10920 	HME_HASH_FAST_SEARCH(hmebp, hblktag, found);
10921 
10922 	if (found != new)
10923 		panic("sfmmu_hblk_swap: new hblk not found");
10924 #endif
10925 
10926 	SFMMU_HASH_UNLOCK(hmebp);
10927 
10928 	/*
10929 	 * Reset hblk_reserve
10930 	 */
10931 	bzero((void *)old, HME8BLK_SZ);
10932 	old->hblk_nextpa = va_to_pa((caddr_t)old);
10933 }
10934 
10935 /*
10936  * Grab the mlist mutex for both pages passed in.
10937  *
10938  * low and high will be returned as pointers to the mutexes for these pages.
10939  * low refers to the mutex residing in the lower bin of the mlist hash, while
10940  * high refers to the mutex residing in the higher bin of the mlist hash.  This
10941  * is due to the locking order restrictions on the same thread grabbing
10942  * multiple mlist mutexes.  The low lock must be acquired before the high lock.
10943  *
10944  * If both pages hash to the same mutex, only grab that single mutex, and
10945  * high will be returned as NULL
10946  * If the pages hash to different bins in the hash, grab the lower addressed
10947  * lock first and then the higher addressed lock in order to follow the locking
10948  * rules involved with the same thread grabbing multiple mlist mutexes.
10949  * low and high will both have non-NULL values.
10950  */
10951 static void
10952 sfmmu_mlist_reloc_enter(struct page *targ, struct page *repl,
10953     kmutex_t **low, kmutex_t **high)
10954 {
10955 	kmutex_t	*mml_targ, *mml_repl;
10956 
10957 	/*
10958 	 * no need to do the dance around szc as in sfmmu_mlist_enter()
10959 	 * because this routine is only called by hat_page_relocate() and all
10960 	 * targ and repl pages are already locked EXCL so szc can't change.
10961 	 */
10962 
10963 	mml_targ = MLIST_HASH(PP_PAGEROOT(targ));
10964 	mml_repl = MLIST_HASH(PP_PAGEROOT(repl));
10965 
10966 	if (mml_targ == mml_repl) {
10967 		*low = mml_targ;
10968 		*high = NULL;
10969 	} else {
10970 		if (mml_targ < mml_repl) {
10971 			*low = mml_targ;
10972 			*high = mml_repl;
10973 		} else {
10974 			*low = mml_repl;
10975 			*high = mml_targ;
10976 		}
10977 	}
10978 
10979 	mutex_enter(*low);
10980 	if (*high)
10981 		mutex_enter(*high);
10982 }
10983 
10984 static void
10985 sfmmu_mlist_reloc_exit(kmutex_t *low, kmutex_t *high)
10986 {
10987 	if (high)
10988 		mutex_exit(high);
10989 	mutex_exit(low);
10990 }
10991 
10992 static hatlock_t *
10993 sfmmu_hat_enter(sfmmu_t *sfmmup)
10994 {
10995 	hatlock_t	*hatlockp;
10996 
10997 	if (sfmmup != ksfmmup) {
10998 		hatlockp = TSB_HASH(sfmmup);
10999 		mutex_enter(HATLOCK_MUTEXP(hatlockp));
11000 		return (hatlockp);
11001 	}
11002 	return (NULL);
11003 }
11004 
11005 static hatlock_t *
11006 sfmmu_hat_tryenter(sfmmu_t *sfmmup)
11007 {
11008 	hatlock_t	*hatlockp;
11009 
11010 	if (sfmmup != ksfmmup) {
11011 		hatlockp = TSB_HASH(sfmmup);
11012 		if (mutex_tryenter(HATLOCK_MUTEXP(hatlockp)) == 0)
11013 			return (NULL);
11014 		return (hatlockp);
11015 	}
11016 	return (NULL);
11017 }
11018 
11019 static void
11020 sfmmu_hat_exit(hatlock_t *hatlockp)
11021 {
11022 	if (hatlockp != NULL)
11023 		mutex_exit(HATLOCK_MUTEXP(hatlockp));
11024 }
11025 
11026 static void
11027 sfmmu_hat_lock_all(void)
11028 {
11029 	int i;
11030 	for (i = 0; i < SFMMU_NUM_LOCK; i++)
11031 		mutex_enter(HATLOCK_MUTEXP(&hat_lock[i]));
11032 }
11033 
11034 static void
11035 sfmmu_hat_unlock_all(void)
11036 {
11037 	int i;
11038 	for (i = SFMMU_NUM_LOCK - 1; i >= 0; i--)
11039 		mutex_exit(HATLOCK_MUTEXP(&hat_lock[i]));
11040 }
11041 
11042 int
11043 sfmmu_hat_lock_held(sfmmu_t *sfmmup)
11044 {
11045 	ASSERT(sfmmup != ksfmmup);
11046 	return (MUTEX_HELD(HATLOCK_MUTEXP(TSB_HASH(sfmmup))));
11047 }
11048 
11049 /*
11050  * Locking primitives to provide consistency between ISM unmap
11051  * and other operations.  Since ISM unmap can take a long time, we
11052  * use HAT_ISMBUSY flag (protected by the hatlock) to avoid creating
11053  * contention on the hatlock buckets while ISM segments are being
11054  * unmapped.  The tradeoff is that the flags don't prevent priority
11055  * inversion from occurring, so we must request kernel priority in
11056  * case we have to sleep to keep from getting buried while holding
11057  * the HAT_ISMBUSY flag set, which in turn could block other kernel
11058  * threads from running (for example, in sfmmu_uvatopfn()).
11059  */
11060 static void
11061 sfmmu_ismhat_enter(sfmmu_t *sfmmup, int hatlock_held)
11062 {
11063 	hatlock_t *hatlockp;
11064 
11065 	THREAD_KPRI_REQUEST();
11066 	if (!hatlock_held)
11067 		hatlockp = sfmmu_hat_enter(sfmmup);
11068 	while (SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY))
11069 		cv_wait(&sfmmup->sfmmu_tsb_cv, HATLOCK_MUTEXP(hatlockp));
11070 	SFMMU_FLAGS_SET(sfmmup, HAT_ISMBUSY);
11071 	if (!hatlock_held)
11072 		sfmmu_hat_exit(hatlockp);
11073 }
11074 
11075 static void
11076 sfmmu_ismhat_exit(sfmmu_t *sfmmup, int hatlock_held)
11077 {
11078 	hatlock_t *hatlockp;
11079 
11080 	if (!hatlock_held)
11081 		hatlockp = sfmmu_hat_enter(sfmmup);
11082 	ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY));
11083 	SFMMU_FLAGS_CLEAR(sfmmup, HAT_ISMBUSY);
11084 	cv_broadcast(&sfmmup->sfmmu_tsb_cv);
11085 	if (!hatlock_held)
11086 		sfmmu_hat_exit(hatlockp);
11087 	THREAD_KPRI_RELEASE();
11088 }
11089 
11090 /*
11091  *
11092  * Algorithm:
11093  *
11094  * (1) if segkmem is not ready, allocate hblk from an array of pre-alloc'ed
11095  *	hblks.
11096  *
11097  * (2) if we are allocating an hblk for mapping a slab in sfmmu_cache,
11098  *
11099  * 		(a) try to return an hblk from reserve pool of free hblks;
11100  *		(b) if the reserve pool is empty, acquire hblk_reserve_lock
11101  *		    and return hblk_reserve.
11102  *
11103  * (3) call kmem_cache_alloc() to allocate hblk;
11104  *
11105  *		(a) if hblk_reserve_lock is held by the current thread,
11106  *		    atomically replace hblk_reserve by the hblk that is
11107  *		    returned by kmem_cache_alloc; release hblk_reserve_lock
11108  *		    and call kmem_cache_alloc() again.
11109  *		(b) if reserve pool is not full, add the hblk that is
11110  *		    returned by kmem_cache_alloc to reserve pool and
11111  *		    call kmem_cache_alloc again.
11112  *
11113  */
11114 static struct hme_blk *
11115 sfmmu_hblk_alloc(sfmmu_t *sfmmup, caddr_t vaddr,
11116 	struct hmehash_bucket *hmebp, uint_t size, hmeblk_tag hblktag,
11117 	uint_t flags, uint_t rid)
11118 {
11119 	struct hme_blk *hmeblkp = NULL;
11120 	struct hme_blk *newhblkp;
11121 	struct hme_blk *shw_hblkp = NULL;
11122 	struct kmem_cache *sfmmu_cache = NULL;
11123 	uint64_t hblkpa;
11124 	ulong_t index;
11125 	uint_t owner;		/* set to 1 if using hblk_reserve */
11126 	uint_t forcefree;
11127 	int sleep;
11128 	sf_srd_t *srdp;
11129 	sf_region_t *rgnp;
11130 
11131 	ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp));
11132 	ASSERT(hblktag.htag_rid == rid);
11133 	SFMMU_VALIDATE_HMERID(sfmmup, rid, vaddr, TTEBYTES(size));
11134 	ASSERT(!SFMMU_IS_SHMERID_VALID(rid) ||
11135 	    IS_P2ALIGNED(vaddr, TTEBYTES(size)));
11136 
11137 	/*
11138 	 * If segkmem is not created yet, allocate from static hmeblks
11139 	 * created at the end of startup_modules().  See the block comment
11140 	 * in startup_modules() describing how we estimate the number of
11141 	 * static hmeblks that will be needed during re-map.
11142 	 */
11143 	if (!hblk_alloc_dynamic) {
11144 
11145 		ASSERT(!SFMMU_IS_SHMERID_VALID(rid));
11146 
11147 		if (size == TTE8K) {
11148 			index = nucleus_hblk8.index;
11149 			if (index >= nucleus_hblk8.len) {
11150 				/*
11151 				 * If we panic here, see startup_modules() to
11152 				 * make sure that we are calculating the
11153 				 * number of hblk8's that we need correctly.
11154 				 */
11155 				prom_panic("no nucleus hblk8 to allocate");
11156 			}
11157 			hmeblkp =
11158 			    (struct hme_blk *)&nucleus_hblk8.list[index];
11159 			nucleus_hblk8.index++;
11160 			SFMMU_STAT(sf_hblk8_nalloc);
11161 		} else {
11162 			index = nucleus_hblk1.index;
11163 			if (nucleus_hblk1.index >= nucleus_hblk1.len) {
11164 				/*
11165 				 * If we panic here, see startup_modules().
11166 				 * Most likely you need to update the
11167 				 * calculation of the number of hblk1 elements
11168 				 * that the kernel needs to boot.
11169 				 */
11170 				prom_panic("no nucleus hblk1 to allocate");
11171 			}
11172 			hmeblkp =
11173 			    (struct hme_blk *)&nucleus_hblk1.list[index];
11174 			nucleus_hblk1.index++;
11175 			SFMMU_STAT(sf_hblk1_nalloc);
11176 		}
11177 
11178 		goto hblk_init;
11179 	}
11180 
11181 	SFMMU_HASH_UNLOCK(hmebp);
11182 
11183 	if (sfmmup != KHATID && !SFMMU_IS_SHMERID_VALID(rid)) {
11184 		if (mmu_page_sizes == max_mmu_page_sizes) {
11185 			if (size < TTE256M)
11186 				shw_hblkp = sfmmu_shadow_hcreate(sfmmup, vaddr,
11187 				    size, flags);
11188 		} else {
11189 			if (size < TTE4M)
11190 				shw_hblkp = sfmmu_shadow_hcreate(sfmmup, vaddr,
11191 				    size, flags);
11192 		}
11193 	} else if (SFMMU_IS_SHMERID_VALID(rid)) {
11194 		/*
11195 		 * Shared hmes use per region bitmaps in rgn_hmeflag
11196 		 * rather than shadow hmeblks to keep track of the
11197 		 * mapping sizes which have been allocated for the region.
11198 		 * Here we cleanup old invalid hmeblks with this rid,
11199 		 * which may be left around by pageunload().
11200 		 */
11201 		int ttesz;
11202 		caddr_t va;
11203 		caddr_t	eva = vaddr + TTEBYTES(size);
11204 
11205 		ASSERT(sfmmup != KHATID);
11206 
11207 		srdp = sfmmup->sfmmu_srdp;
11208 		ASSERT(srdp != NULL && srdp->srd_refcnt != 0);
11209 		rgnp = srdp->srd_hmergnp[rid];
11210 		ASSERT(rgnp != NULL && rgnp->rgn_id == rid);
11211 		ASSERT(rgnp->rgn_refcnt != 0);
11212 		ASSERT(size <= rgnp->rgn_pgszc);
11213 
11214 		ttesz = HBLK_MIN_TTESZ;
11215 		do {
11216 			if (!(rgnp->rgn_hmeflags & (0x1 << ttesz))) {
11217 				continue;
11218 			}
11219 
11220 			if (ttesz > size && ttesz != HBLK_MIN_TTESZ) {
11221 				sfmmu_cleanup_rhblk(srdp, vaddr, rid, ttesz);
11222 			} else if (ttesz < size) {
11223 				for (va = vaddr; va < eva;
11224 				    va += TTEBYTES(ttesz)) {
11225 					sfmmu_cleanup_rhblk(srdp, va, rid,
11226 					    ttesz);
11227 				}
11228 			}
11229 		} while (++ttesz <= rgnp->rgn_pgszc);
11230 	}
11231 
11232 fill_hblk:
11233 	owner = (hblk_reserve_thread == curthread) ? 1 : 0;
11234 
11235 	if (owner && size == TTE8K) {
11236 
11237 		ASSERT(!SFMMU_IS_SHMERID_VALID(rid));
11238 		/*
11239 		 * We are really in a tight spot. We already own
11240 		 * hblk_reserve and we need another hblk.  In anticipation
11241 		 * of this kind of scenario, we specifically set aside
11242 		 * HBLK_RESERVE_MIN number of hblks to be used exclusively
11243 		 * by owner of hblk_reserve.
11244 		 */
11245 		SFMMU_STAT(sf_hblk_recurse_cnt);
11246 
11247 		if (!sfmmu_get_free_hblk(&hmeblkp, 1))
11248 			panic("sfmmu_hblk_alloc: reserve list is empty");
11249 
11250 		goto hblk_verify;
11251 	}
11252 
11253 	ASSERT(!owner);
11254 
11255 	if ((flags & HAT_NO_KALLOC) == 0) {
11256 
11257 		sfmmu_cache = ((size == TTE8K) ? sfmmu8_cache : sfmmu1_cache);
11258 		sleep = ((sfmmup == KHATID) ? KM_NOSLEEP : KM_SLEEP);
11259 
11260 		if ((hmeblkp = kmem_cache_alloc(sfmmu_cache, sleep)) == NULL) {
11261 			hmeblkp = sfmmu_hblk_steal(size);
11262 		} else {
11263 			/*
11264 			 * if we are the owner of hblk_reserve,
11265 			 * swap hblk_reserve with hmeblkp and
11266 			 * start a fresh life.  Hope things go
11267 			 * better this time.
11268 			 */
11269 			if (hblk_reserve_thread == curthread) {
11270 				ASSERT(sfmmu_cache == sfmmu8_cache);
11271 				sfmmu_hblk_swap(hmeblkp);
11272 				hblk_reserve_thread = NULL;
11273 				mutex_exit(&hblk_reserve_lock);
11274 				goto fill_hblk;
11275 			}
11276 			/*
11277 			 * let's donate this hblk to our reserve list if
11278 			 * we are not mapping kernel range
11279 			 */
11280 			if (size == TTE8K && sfmmup != KHATID) {
11281 				if (sfmmu_put_free_hblk(hmeblkp, 0))
11282 					goto fill_hblk;
11283 			}
11284 		}
11285 	} else {
11286 		/*
11287 		 * We are here to map the slab in sfmmu8_cache; let's
11288 		 * check if we could tap our reserve list; if successful,
11289 		 * this will avoid the pain of going thru sfmmu_hblk_swap
11290 		 */
11291 		SFMMU_STAT(sf_hblk_slab_cnt);
11292 		if (!sfmmu_get_free_hblk(&hmeblkp, 0)) {
11293 			/*
11294 			 * let's start hblk_reserve dance
11295 			 */
11296 			SFMMU_STAT(sf_hblk_reserve_cnt);
11297 			owner = 1;
11298 			mutex_enter(&hblk_reserve_lock);
11299 			hmeblkp = HBLK_RESERVE;
11300 			hblk_reserve_thread = curthread;
11301 		}
11302 	}
11303 
11304 hblk_verify:
11305 	ASSERT(hmeblkp != NULL);
11306 	set_hblk_sz(hmeblkp, size);
11307 	ASSERT(hmeblkp->hblk_nextpa == va_to_pa((caddr_t)hmeblkp));
11308 	SFMMU_HASH_LOCK(hmebp);
11309 	HME_HASH_FAST_SEARCH(hmebp, hblktag, newhblkp);
11310 	if (newhblkp != NULL) {
11311 		SFMMU_HASH_UNLOCK(hmebp);
11312 		if (hmeblkp != HBLK_RESERVE) {
11313 			/*
11314 			 * This is really tricky!
11315 			 *
11316 			 * vmem_alloc(vmem_seg_arena)
11317 			 *  vmem_alloc(vmem_internal_arena)
11318 			 *   segkmem_alloc(heap_arena)
11319 			 *    vmem_alloc(heap_arena)
11320 			 *    page_create()
11321 			 *    hat_memload()
11322 			 *	kmem_cache_free()
11323 			 *	 kmem_cache_alloc()
11324 			 *	  kmem_slab_create()
11325 			 *	   vmem_alloc(kmem_internal_arena)
11326 			 *	    segkmem_alloc(heap_arena)
11327 			 *		vmem_alloc(heap_arena)
11328 			 *		page_create()
11329 			 *		hat_memload()
11330 			 *		  kmem_cache_free()
11331 			 *		...
11332 			 *
11333 			 * Thus, hat_memload() could call kmem_cache_free
11334 			 * for enough number of times that we could easily
11335 			 * hit the bottom of the stack or run out of reserve
11336 			 * list of vmem_seg structs.  So, we must donate
11337 			 * this hblk to reserve list if it's allocated
11338 			 * from sfmmu8_cache *and* mapping kernel range.
11339 			 * We don't need to worry about freeing hmeblk1's
11340 			 * to kmem since they don't map any kmem slabs.
11341 			 *
11342 			 * Note: When segkmem supports largepages, we must
11343 			 * free hmeblk1's to reserve list as well.
11344 			 */
11345 			forcefree = (sfmmup == KHATID) ? 1 : 0;
11346 			if (size == TTE8K &&
11347 			    sfmmu_put_free_hblk(hmeblkp, forcefree)) {
11348 				goto re_verify;
11349 			}
11350 			ASSERT(sfmmup != KHATID);
11351 			kmem_cache_free(get_hblk_cache(hmeblkp), hmeblkp);
11352 		} else {
11353 			/*
11354 			 * Hey! we don't need hblk_reserve any more.
11355 			 */
11356 			ASSERT(owner);
11357 			hblk_reserve_thread = NULL;
11358 			mutex_exit(&hblk_reserve_lock);
11359 			owner = 0;
11360 		}
11361 re_verify:
11362 		/*
11363 		 * let's check if the goodies are still present
11364 		 */
11365 		SFMMU_HASH_LOCK(hmebp);
11366 		HME_HASH_FAST_SEARCH(hmebp, hblktag, newhblkp);
11367 		if (newhblkp != NULL) {
11368 			/*
11369 			 * return newhblkp if it's not hblk_reserve;
11370 			 * if newhblkp is hblk_reserve, return it
11371 			 * _only if_ we are the owner of hblk_reserve.
11372 			 */
11373 			if (newhblkp != HBLK_RESERVE || owner) {
11374 				ASSERT(!SFMMU_IS_SHMERID_VALID(rid) ||
11375 				    newhblkp->hblk_shared);
11376 				ASSERT(SFMMU_IS_SHMERID_VALID(rid) ||
11377 				    !newhblkp->hblk_shared);
11378 				return (newhblkp);
11379 			} else {
11380 				/*
11381 				 * we just hit hblk_reserve in the hash and
11382 				 * we are not the owner of that;
11383 				 *
11384 				 * block until hblk_reserve_thread completes
11385 				 * swapping hblk_reserve and try the dance
11386 				 * once again.
11387 				 */
11388 				SFMMU_HASH_UNLOCK(hmebp);
11389 				mutex_enter(&hblk_reserve_lock);
11390 				mutex_exit(&hblk_reserve_lock);
11391 				SFMMU_STAT(sf_hblk_reserve_hit);
11392 				goto fill_hblk;
11393 			}
11394 		} else {
11395 			/*
11396 			 * it's no more! try the dance once again.
11397 			 */
11398 			SFMMU_HASH_UNLOCK(hmebp);
11399 			goto fill_hblk;
11400 		}
11401 	}
11402 
11403 hblk_init:
11404 	if (SFMMU_IS_SHMERID_VALID(rid)) {
11405 		uint16_t tteflag = 0x1 <<
11406 		    ((size < HBLK_MIN_TTESZ) ? HBLK_MIN_TTESZ : size);
11407 
11408 		if (!(rgnp->rgn_hmeflags & tteflag)) {
11409 			atomic_or_16(&rgnp->rgn_hmeflags, tteflag);
11410 		}
11411 		hmeblkp->hblk_shared = 1;
11412 	} else {
11413 		hmeblkp->hblk_shared = 0;
11414 	}
11415 	set_hblk_sz(hmeblkp, size);
11416 	ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp));
11417 	hmeblkp->hblk_next = (struct hme_blk *)NULL;
11418 	hmeblkp->hblk_tag = hblktag;
11419 	hmeblkp->hblk_shadow = shw_hblkp;
11420 	hblkpa = hmeblkp->hblk_nextpa;
11421 	hmeblkp->hblk_nextpa = HMEBLK_ENDPA;
11422 
11423 	ASSERT(get_hblk_ttesz(hmeblkp) == size);
11424 	ASSERT(get_hblk_span(hmeblkp) == HMEBLK_SPAN(size));
11425 	ASSERT(hmeblkp->hblk_hmecnt == 0);
11426 	ASSERT(hmeblkp->hblk_vcnt == 0);
11427 	ASSERT(hmeblkp->hblk_lckcnt == 0);
11428 	ASSERT(hblkpa == va_to_pa((caddr_t)hmeblkp));
11429 	sfmmu_hblk_hash_add(hmebp, hmeblkp, hblkpa);
11430 	return (hmeblkp);
11431 }
11432 
11433 /*
11434  * This function cleans up the hme_blk and returns it to the free list.
11435  */
11436 /* ARGSUSED */
11437 static void
11438 sfmmu_hblk_free(struct hme_blk **listp)
11439 {
11440 	struct hme_blk *hmeblkp, *next_hmeblkp;
11441 	int		size;
11442 	uint_t		critical;
11443 	uint64_t	hblkpa;
11444 
11445 	ASSERT(*listp != NULL);
11446 
11447 	hmeblkp = *listp;
11448 	while (hmeblkp != NULL) {
11449 		next_hmeblkp = hmeblkp->hblk_next;
11450 		ASSERT(!hmeblkp->hblk_hmecnt);
11451 		ASSERT(!hmeblkp->hblk_vcnt);
11452 		ASSERT(!hmeblkp->hblk_lckcnt);
11453 		ASSERT(hmeblkp != (struct hme_blk *)hblk_reserve);
11454 		ASSERT(hmeblkp->hblk_shared == 0);
11455 		ASSERT(hmeblkp->hblk_shw_bit == 0);
11456 		ASSERT(hmeblkp->hblk_shadow == NULL);
11457 
11458 		hblkpa = va_to_pa((caddr_t)hmeblkp);
11459 		ASSERT(hblkpa != (uint64_t)-1);
11460 		critical = (hblktosfmmu(hmeblkp) == KHATID) ? 1 : 0;
11461 
11462 		size = get_hblk_ttesz(hmeblkp);
11463 		hmeblkp->hblk_next = NULL;
11464 		hmeblkp->hblk_nextpa = hblkpa;
11465 
11466 		if (hmeblkp->hblk_nuc_bit == 0) {
11467 
11468 			if (size != TTE8K ||
11469 			    !sfmmu_put_free_hblk(hmeblkp, critical))
11470 				kmem_cache_free(get_hblk_cache(hmeblkp),
11471 				    hmeblkp);
11472 		}
11473 		hmeblkp = next_hmeblkp;
11474 	}
11475 }
11476 
11477 #define	BUCKETS_TO_SEARCH_BEFORE_UNLOAD	30
11478 #define	SFMMU_HBLK_STEAL_THRESHOLD 5
11479 
11480 static uint_t sfmmu_hblk_steal_twice;
11481 static uint_t sfmmu_hblk_steal_count, sfmmu_hblk_steal_unload_count;
11482 
11483 /*
11484  * Steal a hmeblk from user or kernel hme hash lists.
11485  * For 8K tte grab one from reserve pool (freehblkp) before proceeding to
11486  * steal and if we fail to steal after SFMMU_HBLK_STEAL_THRESHOLD attempts
11487  * tap into critical reserve of freehblkp.
11488  * Note: We remain looping in this routine until we find one.
11489  */
11490 static struct hme_blk *
11491 sfmmu_hblk_steal(int size)
11492 {
11493 	static struct hmehash_bucket *uhmehash_steal_hand = NULL;
11494 	struct hmehash_bucket *hmebp;
11495 	struct hme_blk *hmeblkp = NULL, *pr_hblk;
11496 	uint64_t hblkpa;
11497 	int i;
11498 	uint_t loop_cnt = 0, critical;
11499 
11500 	for (;;) {
11501 		/* Check cpu hblk pending queues */
11502 		if ((hmeblkp = sfmmu_check_pending_hblks(size)) != NULL) {
11503 			hmeblkp->hblk_nextpa = va_to_pa((caddr_t)hmeblkp);
11504 			ASSERT(hmeblkp->hblk_hmecnt == 0);
11505 			ASSERT(hmeblkp->hblk_vcnt == 0);
11506 			return (hmeblkp);
11507 		}
11508 
11509 		if (size == TTE8K) {
11510 			critical =
11511 			    (++loop_cnt > SFMMU_HBLK_STEAL_THRESHOLD) ? 1 : 0;
11512 			if (sfmmu_get_free_hblk(&hmeblkp, critical))
11513 				return (hmeblkp);
11514 		}
11515 
11516 		hmebp = (uhmehash_steal_hand == NULL) ? uhme_hash :
11517 		    uhmehash_steal_hand;
11518 		ASSERT(hmebp >= uhme_hash && hmebp <= &uhme_hash[UHMEHASH_SZ]);
11519 
11520 		for (i = 0; hmeblkp == NULL && i <= UHMEHASH_SZ +
11521 		    BUCKETS_TO_SEARCH_BEFORE_UNLOAD; i++) {
11522 			SFMMU_HASH_LOCK(hmebp);
11523 			hmeblkp = hmebp->hmeblkp;
11524 			hblkpa = hmebp->hmeh_nextpa;
11525 			pr_hblk = NULL;
11526 			while (hmeblkp) {
11527 				/*
11528 				 * check if it is a hmeblk that is not locked
11529 				 * and not shared. skip shadow hmeblks with
11530 				 * shadow_mask set i.e valid count non zero.
11531 				 */
11532 				if ((get_hblk_ttesz(hmeblkp) == size) &&
11533 				    (hmeblkp->hblk_shw_bit == 0 ||
11534 				    hmeblkp->hblk_vcnt == 0) &&
11535 				    (hmeblkp->hblk_lckcnt == 0)) {
11536 					/*
11537 					 * there is a high probability that we
11538 					 * will find a free one. search some
11539 					 * buckets for a free hmeblk initially
11540 					 * before unloading a valid hmeblk.
11541 					 */
11542 					if ((hmeblkp->hblk_vcnt == 0 &&
11543 					    hmeblkp->hblk_hmecnt == 0) || (i >=
11544 					    BUCKETS_TO_SEARCH_BEFORE_UNLOAD)) {
11545 						if (sfmmu_steal_this_hblk(hmebp,
11546 						    hmeblkp, hblkpa, pr_hblk)) {
11547 							/*
11548 							 * Hblk is unloaded
11549 							 * successfully
11550 							 */
11551 							break;
11552 						}
11553 					}
11554 				}
11555 				pr_hblk = hmeblkp;
11556 				hblkpa = hmeblkp->hblk_nextpa;
11557 				hmeblkp = hmeblkp->hblk_next;
11558 			}
11559 
11560 			SFMMU_HASH_UNLOCK(hmebp);
11561 			if (hmebp++ == &uhme_hash[UHMEHASH_SZ])
11562 				hmebp = uhme_hash;
11563 		}
11564 		uhmehash_steal_hand = hmebp;
11565 
11566 		if (hmeblkp != NULL)
11567 			break;
11568 
11569 		/*
11570 		 * in the worst case, look for a free one in the kernel
11571 		 * hash table.
11572 		 */
11573 		for (i = 0, hmebp = khme_hash; i <= KHMEHASH_SZ; i++) {
11574 			SFMMU_HASH_LOCK(hmebp);
11575 			hmeblkp = hmebp->hmeblkp;
11576 			hblkpa = hmebp->hmeh_nextpa;
11577 			pr_hblk = NULL;
11578 			while (hmeblkp) {
11579 				/*
11580 				 * check if it is free hmeblk
11581 				 */
11582 				if ((get_hblk_ttesz(hmeblkp) == size) &&
11583 				    (hmeblkp->hblk_lckcnt == 0) &&
11584 				    (hmeblkp->hblk_vcnt == 0) &&
11585 				    (hmeblkp->hblk_hmecnt == 0)) {
11586 					if (sfmmu_steal_this_hblk(hmebp,
11587 					    hmeblkp, hblkpa, pr_hblk)) {
11588 						break;
11589 					} else {
11590 						/*
11591 						 * Cannot fail since we have
11592 						 * hash lock.
11593 						 */
11594 						panic("fail to steal?");
11595 					}
11596 				}
11597 
11598 				pr_hblk = hmeblkp;
11599 				hblkpa = hmeblkp->hblk_nextpa;
11600 				hmeblkp = hmeblkp->hblk_next;
11601 			}
11602 
11603 			SFMMU_HASH_UNLOCK(hmebp);
11604 			if (hmebp++ == &khme_hash[KHMEHASH_SZ])
11605 				hmebp = khme_hash;
11606 		}
11607 
11608 		if (hmeblkp != NULL)
11609 			break;
11610 		sfmmu_hblk_steal_twice++;
11611 	}
11612 	return (hmeblkp);
11613 }
11614 
11615 /*
11616  * This routine does real work to prepare a hblk to be "stolen" by
11617  * unloading the mappings, updating shadow counts ....
11618  * It returns 1 if the block is ready to be reused (stolen), or 0
11619  * means the block cannot be stolen yet- pageunload is still working
11620  * on this hblk.
11621  */
11622 static int
11623 sfmmu_steal_this_hblk(struct hmehash_bucket *hmebp, struct hme_blk *hmeblkp,
11624 	uint64_t hblkpa, struct hme_blk *pr_hblk)
11625 {
11626 	int shw_size, vshift;
11627 	struct hme_blk *shw_hblkp;
11628 	caddr_t vaddr;
11629 	uint_t shw_mask, newshw_mask;
11630 	struct hme_blk *list = NULL;
11631 
11632 	ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp));
11633 
11634 	/*
11635 	 * check if the hmeblk is free, unload if necessary
11636 	 */
11637 	if (hmeblkp->hblk_vcnt || hmeblkp->hblk_hmecnt) {
11638 		sfmmu_t *sfmmup;
11639 		demap_range_t dmr;
11640 
11641 		sfmmup = hblktosfmmu(hmeblkp);
11642 		if (hmeblkp->hblk_shared || sfmmup->sfmmu_ismhat) {
11643 			return (0);
11644 		}
11645 		DEMAP_RANGE_INIT(sfmmup, &dmr);
11646 		(void) sfmmu_hblk_unload(sfmmup, hmeblkp,
11647 		    (caddr_t)get_hblk_base(hmeblkp),
11648 		    get_hblk_endaddr(hmeblkp), &dmr, HAT_UNLOAD);
11649 		DEMAP_RANGE_FLUSH(&dmr);
11650 		if (hmeblkp->hblk_vcnt || hmeblkp->hblk_hmecnt) {
11651 			/*
11652 			 * Pageunload is working on the same hblk.
11653 			 */
11654 			return (0);
11655 		}
11656 
11657 		sfmmu_hblk_steal_unload_count++;
11658 	}
11659 
11660 	ASSERT(hmeblkp->hblk_lckcnt == 0);
11661 	ASSERT(hmeblkp->hblk_vcnt == 0 && hmeblkp->hblk_hmecnt == 0);
11662 
11663 	sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk, &list, 1);
11664 	hmeblkp->hblk_nextpa = hblkpa;
11665 
11666 	shw_hblkp = hmeblkp->hblk_shadow;
11667 	if (shw_hblkp) {
11668 		ASSERT(!hmeblkp->hblk_shared);
11669 		shw_size = get_hblk_ttesz(shw_hblkp);
11670 		vaddr = (caddr_t)get_hblk_base(hmeblkp);
11671 		vshift = vaddr_to_vshift(shw_hblkp->hblk_tag, vaddr, shw_size);
11672 		ASSERT(vshift < 8);
11673 		/*
11674 		 * Atomically clear shadow mask bit
11675 		 */
11676 		do {
11677 			shw_mask = shw_hblkp->hblk_shw_mask;
11678 			ASSERT(shw_mask & (1 << vshift));
11679 			newshw_mask = shw_mask & ~(1 << vshift);
11680 			newshw_mask = cas32(&shw_hblkp->hblk_shw_mask,
11681 			    shw_mask, newshw_mask);
11682 		} while (newshw_mask != shw_mask);
11683 		hmeblkp->hblk_shadow = NULL;
11684 	}
11685 
11686 	/*
11687 	 * remove shadow bit if we are stealing an unused shadow hmeblk.
11688 	 * sfmmu_hblk_alloc needs it that way, will set shadow bit later if
11689 	 * we are indeed allocating a shadow hmeblk.
11690 	 */
11691 	hmeblkp->hblk_shw_bit = 0;
11692 
11693 	if (hmeblkp->hblk_shared) {
11694 		sf_srd_t	*srdp;
11695 		sf_region_t	*rgnp;
11696 		uint_t		rid;
11697 
11698 		srdp = hblktosrd(hmeblkp);
11699 		ASSERT(srdp != NULL && srdp->srd_refcnt != 0);
11700 		rid = hmeblkp->hblk_tag.htag_rid;
11701 		ASSERT(SFMMU_IS_SHMERID_VALID(rid));
11702 		ASSERT(rid < SFMMU_MAX_HME_REGIONS);
11703 		rgnp = srdp->srd_hmergnp[rid];
11704 		ASSERT(rgnp != NULL);
11705 		SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid);
11706 		hmeblkp->hblk_shared = 0;
11707 	}
11708 
11709 	sfmmu_hblk_steal_count++;
11710 	SFMMU_STAT(sf_steal_count);
11711 
11712 	return (1);
11713 }
11714 
11715 struct hme_blk *
11716 sfmmu_hmetohblk(struct sf_hment *sfhme)
11717 {
11718 	struct hme_blk *hmeblkp;
11719 	struct sf_hment *sfhme0;
11720 	struct hme_blk *hblk_dummy = 0;
11721 
11722 	/*
11723 	 * No dummy sf_hments, please.
11724 	 */
11725 	ASSERT(sfhme->hme_tte.ll != 0);
11726 
11727 	sfhme0 = sfhme - sfhme->hme_tte.tte_hmenum;
11728 	hmeblkp = (struct hme_blk *)((uintptr_t)sfhme0 -
11729 	    (uintptr_t)&hblk_dummy->hblk_hme[0]);
11730 
11731 	return (hmeblkp);
11732 }
11733 
11734 /*
11735  * On swapin, get appropriately sized TSB(s) and clear the HAT_SWAPPED flag.
11736  * If we can't get appropriately sized TSB(s), try for 8K TSB(s) using
11737  * KM_SLEEP allocation.
11738  *
11739  * Return 0 on success, -1 otherwise.
11740  */
11741 static void
11742 sfmmu_tsb_swapin(sfmmu_t *sfmmup, hatlock_t *hatlockp)
11743 {
11744 	struct tsb_info *tsbinfop, *next;
11745 	tsb_replace_rc_t rc;
11746 	boolean_t gotfirst = B_FALSE;
11747 
11748 	ASSERT(sfmmup != ksfmmup);
11749 	ASSERT(sfmmu_hat_lock_held(sfmmup));
11750 
11751 	while (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPIN)) {
11752 		cv_wait(&sfmmup->sfmmu_tsb_cv, HATLOCK_MUTEXP(hatlockp));
11753 	}
11754 
11755 	if (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) {
11756 		SFMMU_FLAGS_SET(sfmmup, HAT_SWAPIN);
11757 	} else {
11758 		return;
11759 	}
11760 
11761 	ASSERT(sfmmup->sfmmu_tsb != NULL);
11762 
11763 	/*
11764 	 * Loop over all tsbinfo's replacing them with ones that actually have
11765 	 * a TSB.  If any of the replacements ever fail, bail out of the loop.
11766 	 */
11767 	for (tsbinfop = sfmmup->sfmmu_tsb; tsbinfop != NULL; tsbinfop = next) {
11768 		ASSERT(tsbinfop->tsb_flags & TSB_SWAPPED);
11769 		next = tsbinfop->tsb_next;
11770 		rc = sfmmu_replace_tsb(sfmmup, tsbinfop, tsbinfop->tsb_szc,
11771 		    hatlockp, TSB_SWAPIN);
11772 		if (rc != TSB_SUCCESS) {
11773 			break;
11774 		}
11775 		gotfirst = B_TRUE;
11776 	}
11777 
11778 	switch (rc) {
11779 	case TSB_SUCCESS:
11780 		SFMMU_FLAGS_CLEAR(sfmmup, HAT_SWAPPED|HAT_SWAPIN);
11781 		cv_broadcast(&sfmmup->sfmmu_tsb_cv);
11782 		return;
11783 	case TSB_LOSTRACE:
11784 		break;
11785 	case TSB_ALLOCFAIL:
11786 		break;
11787 	default:
11788 		panic("sfmmu_replace_tsb returned unrecognized failure code "
11789 		    "%d", rc);
11790 	}
11791 
11792 	/*
11793 	 * In this case, we failed to get one of our TSBs.  If we failed to
11794 	 * get the first TSB, get one of minimum size (8KB).  Walk the list
11795 	 * and throw away the tsbinfos, starting where the allocation failed;
11796 	 * we can get by with just one TSB as long as we don't leave the
11797 	 * SWAPPED tsbinfo structures lying around.
11798 	 */
11799 	tsbinfop = sfmmup->sfmmu_tsb;
11800 	next = tsbinfop->tsb_next;
11801 	tsbinfop->tsb_next = NULL;
11802 
11803 	sfmmu_hat_exit(hatlockp);
11804 	for (tsbinfop = next; tsbinfop != NULL; tsbinfop = next) {
11805 		next = tsbinfop->tsb_next;
11806 		sfmmu_tsbinfo_free(tsbinfop);
11807 	}
11808 	hatlockp = sfmmu_hat_enter(sfmmup);
11809 
11810 	/*
11811 	 * If we don't have any TSBs, get a single 8K TSB for 8K, 64K and 512K
11812 	 * pages.
11813 	 */
11814 	if (!gotfirst) {
11815 		tsbinfop = sfmmup->sfmmu_tsb;
11816 		rc = sfmmu_replace_tsb(sfmmup, tsbinfop, TSB_MIN_SZCODE,
11817 		    hatlockp, TSB_SWAPIN | TSB_FORCEALLOC);
11818 		ASSERT(rc == TSB_SUCCESS);
11819 	}
11820 
11821 	SFMMU_FLAGS_CLEAR(sfmmup, HAT_SWAPPED|HAT_SWAPIN);
11822 	cv_broadcast(&sfmmup->sfmmu_tsb_cv);
11823 }
11824 
11825 static int
11826 sfmmu_is_rgnva(sf_srd_t *srdp, caddr_t addr, ulong_t w, ulong_t bmw)
11827 {
11828 	ulong_t bix = 0;
11829 	uint_t rid;
11830 	sf_region_t *rgnp;
11831 
11832 	ASSERT(srdp != NULL);
11833 	ASSERT(srdp->srd_refcnt != 0);
11834 
11835 	w <<= BT_ULSHIFT;
11836 	while (bmw) {
11837 		if (!(bmw & 0x1)) {
11838 			bix++;
11839 			bmw >>= 1;
11840 			continue;
11841 		}
11842 		rid = w | bix;
11843 		rgnp = srdp->srd_hmergnp[rid];
11844 		ASSERT(rgnp->rgn_refcnt > 0);
11845 		ASSERT(rgnp->rgn_id == rid);
11846 		if (addr < rgnp->rgn_saddr ||
11847 		    addr >= (rgnp->rgn_saddr + rgnp->rgn_size)) {
11848 			bix++;
11849 			bmw >>= 1;
11850 		} else {
11851 			return (1);
11852 		}
11853 	}
11854 	return (0);
11855 }
11856 
11857 /*
11858  * Handle exceptions for low level tsb_handler.
11859  *
11860  * There are many scenarios that could land us here:
11861  *
11862  * If the context is invalid we land here. The context can be invalid
11863  * for 3 reasons: 1) we couldn't allocate a new context and now need to
11864  * perform a wrap around operation in order to allocate a new context.
11865  * 2) Context was invalidated to change pagesize programming 3) ISMs or
11866  * TSBs configuration is changeing for this process and we are forced into
11867  * here to do a syncronization operation. If the context is valid we can
11868  * be here from window trap hanlder. In this case just call trap to handle
11869  * the fault.
11870  *
11871  * Note that the process will run in INVALID_CONTEXT before
11872  * faulting into here and subsequently loading the MMU registers
11873  * (including the TSB base register) associated with this process.
11874  * For this reason, the trap handlers must all test for
11875  * INVALID_CONTEXT before attempting to access any registers other
11876  * than the context registers.
11877  */
11878 void
11879 sfmmu_tsbmiss_exception(struct regs *rp, uintptr_t tagaccess, uint_t traptype)
11880 {
11881 	sfmmu_t *sfmmup, *shsfmmup;
11882 	uint_t ctxtype;
11883 	klwp_id_t lwp;
11884 	char lwp_save_state;
11885 	hatlock_t *hatlockp, *shatlockp;
11886 	struct tsb_info *tsbinfop;
11887 	struct tsbmiss *tsbmp;
11888 	sf_scd_t *scdp;
11889 
11890 	SFMMU_STAT(sf_tsb_exceptions);
11891 	SFMMU_MMU_STAT(mmu_tsb_exceptions);
11892 	sfmmup = astosfmmu(curthread->t_procp->p_as);
11893 	/*
11894 	 * note that in sun4u, tagacces register contains ctxnum
11895 	 * while sun4v passes ctxtype in the tagaccess register.
11896 	 */
11897 	ctxtype = tagaccess & TAGACC_CTX_MASK;
11898 
11899 	ASSERT(sfmmup != ksfmmup && ctxtype != KCONTEXT);
11900 	ASSERT(sfmmup->sfmmu_ismhat == 0);
11901 	ASSERT(!SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED) ||
11902 	    ctxtype == INVALID_CONTEXT);
11903 
11904 	if (ctxtype != INVALID_CONTEXT && traptype != T_DATA_PROT) {
11905 		/*
11906 		 * We may land here because shme bitmap and pagesize
11907 		 * flags are updated lazily in tsbmiss area on other cpus.
11908 		 * If we detect here that tsbmiss area is out of sync with
11909 		 * sfmmu update it and retry the trapped instruction.
11910 		 * Otherwise call trap().
11911 		 */
11912 		int ret = 0;
11913 		uchar_t tteflag_mask = (1 << TTE64K) | (1 << TTE8K);
11914 		caddr_t addr = (caddr_t)(tagaccess & TAGACC_VADDR_MASK);
11915 
11916 		/*
11917 		 * Must set lwp state to LWP_SYS before
11918 		 * trying to acquire any adaptive lock
11919 		 */
11920 		lwp = ttolwp(curthread);
11921 		ASSERT(lwp);
11922 		lwp_save_state = lwp->lwp_state;
11923 		lwp->lwp_state = LWP_SYS;
11924 
11925 		hatlockp = sfmmu_hat_enter(sfmmup);
11926 		kpreempt_disable();
11927 		tsbmp = &tsbmiss_area[CPU->cpu_id];
11928 		ASSERT(sfmmup == tsbmp->usfmmup);
11929 		if (((tsbmp->uhat_tteflags ^ sfmmup->sfmmu_tteflags) &
11930 		    ~tteflag_mask) ||
11931 		    ((tsbmp->uhat_rtteflags ^  sfmmup->sfmmu_rtteflags) &
11932 		    ~tteflag_mask)) {
11933 			tsbmp->uhat_tteflags = sfmmup->sfmmu_tteflags;
11934 			tsbmp->uhat_rtteflags = sfmmup->sfmmu_rtteflags;
11935 			ret = 1;
11936 		}
11937 		if (sfmmup->sfmmu_srdp != NULL) {
11938 			ulong_t *sm = sfmmup->sfmmu_hmeregion_map.bitmap;
11939 			ulong_t *tm = tsbmp->shmermap;
11940 			ulong_t i;
11941 			for (i = 0; i < SFMMU_HMERGNMAP_WORDS; i++) {
11942 				ulong_t d = tm[i] ^ sm[i];
11943 				if (d) {
11944 					if (d & sm[i]) {
11945 						if (!ret && sfmmu_is_rgnva(
11946 						    sfmmup->sfmmu_srdp,
11947 						    addr, i, d & sm[i])) {
11948 							ret = 1;
11949 						}
11950 					}
11951 					tm[i] = sm[i];
11952 				}
11953 			}
11954 		}
11955 		kpreempt_enable();
11956 		sfmmu_hat_exit(hatlockp);
11957 		lwp->lwp_state = lwp_save_state;
11958 		if (ret) {
11959 			return;
11960 		}
11961 	} else if (ctxtype == INVALID_CONTEXT) {
11962 		/*
11963 		 * First, make sure we come out of here with a valid ctx,
11964 		 * since if we don't get one we'll simply loop on the
11965 		 * faulting instruction.
11966 		 *
11967 		 * If the ISM mappings are changing, the TSB is relocated,
11968 		 * the process is swapped, the process is joining SCD or
11969 		 * leaving SCD or shared regions we serialize behind the
11970 		 * controlling thread with hat lock, sfmmu_flags and
11971 		 * sfmmu_tsb_cv condition variable.
11972 		 */
11973 
11974 		/*
11975 		 * Must set lwp state to LWP_SYS before
11976 		 * trying to acquire any adaptive lock
11977 		 */
11978 		lwp = ttolwp(curthread);
11979 		ASSERT(lwp);
11980 		lwp_save_state = lwp->lwp_state;
11981 		lwp->lwp_state = LWP_SYS;
11982 
11983 		hatlockp = sfmmu_hat_enter(sfmmup);
11984 retry:
11985 		if ((scdp = sfmmup->sfmmu_scdp) != NULL) {
11986 			shsfmmup = scdp->scd_sfmmup;
11987 			ASSERT(shsfmmup != NULL);
11988 
11989 			for (tsbinfop = shsfmmup->sfmmu_tsb; tsbinfop != NULL;
11990 			    tsbinfop = tsbinfop->tsb_next) {
11991 				if (tsbinfop->tsb_flags & TSB_RELOC_FLAG) {
11992 					/* drop the private hat lock */
11993 					sfmmu_hat_exit(hatlockp);
11994 					/* acquire the shared hat lock */
11995 					shatlockp = sfmmu_hat_enter(shsfmmup);
11996 					/*
11997 					 * recheck to see if anything changed
11998 					 * after we drop the private hat lock.
11999 					 */
12000 					if (sfmmup->sfmmu_scdp == scdp &&
12001 					    shsfmmup == scdp->scd_sfmmup) {
12002 						sfmmu_tsb_chk_reloc(shsfmmup,
12003 						    shatlockp);
12004 					}
12005 					sfmmu_hat_exit(shatlockp);
12006 					hatlockp = sfmmu_hat_enter(sfmmup);
12007 					goto retry;
12008 				}
12009 			}
12010 		}
12011 
12012 		for (tsbinfop = sfmmup->sfmmu_tsb; tsbinfop != NULL;
12013 		    tsbinfop = tsbinfop->tsb_next) {
12014 			if (tsbinfop->tsb_flags & TSB_RELOC_FLAG) {
12015 				cv_wait(&sfmmup->sfmmu_tsb_cv,
12016 				    HATLOCK_MUTEXP(hatlockp));
12017 				goto retry;
12018 			}
12019 		}
12020 
12021 		/*
12022 		 * Wait for ISM maps to be updated.
12023 		 */
12024 		if (SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)) {
12025 			cv_wait(&sfmmup->sfmmu_tsb_cv,
12026 			    HATLOCK_MUTEXP(hatlockp));
12027 			goto retry;
12028 		}
12029 
12030 		/* Is this process joining an SCD? */
12031 		if (SFMMU_FLAGS_ISSET(sfmmup, HAT_JOIN_SCD)) {
12032 			/*
12033 			 * Flush private TSB and setup shared TSB.
12034 			 * sfmmu_finish_join_scd() does not drop the
12035 			 * hat lock.
12036 			 */
12037 			sfmmu_finish_join_scd(sfmmup);
12038 			SFMMU_FLAGS_CLEAR(sfmmup, HAT_JOIN_SCD);
12039 		}
12040 
12041 		/*
12042 		 * If we're swapping in, get TSB(s).  Note that we must do
12043 		 * this before we get a ctx or load the MMU state.  Once
12044 		 * we swap in we have to recheck to make sure the TSB(s) and
12045 		 * ISM mappings didn't change while we slept.
12046 		 */
12047 		if (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) {
12048 			sfmmu_tsb_swapin(sfmmup, hatlockp);
12049 			goto retry;
12050 		}
12051 
12052 		sfmmu_get_ctx(sfmmup);
12053 
12054 		sfmmu_hat_exit(hatlockp);
12055 		/*
12056 		 * Must restore lwp_state if not calling
12057 		 * trap() for further processing. Restore
12058 		 * it anyway.
12059 		 */
12060 		lwp->lwp_state = lwp_save_state;
12061 		return;
12062 	}
12063 	trap(rp, (caddr_t)tagaccess, traptype, 0);
12064 }
12065 
12066 static void
12067 sfmmu_tsb_chk_reloc(sfmmu_t *sfmmup, hatlock_t *hatlockp)
12068 {
12069 	struct tsb_info *tp;
12070 
12071 	ASSERT(sfmmu_hat_lock_held(sfmmup));
12072 
12073 	for (tp = sfmmup->sfmmu_tsb; tp != NULL; tp = tp->tsb_next) {
12074 		if (tp->tsb_flags & TSB_RELOC_FLAG) {
12075 			cv_wait(&sfmmup->sfmmu_tsb_cv,
12076 			    HATLOCK_MUTEXP(hatlockp));
12077 			break;
12078 		}
12079 	}
12080 }
12081 
12082 /*
12083  * sfmmu_vatopfn_suspended is called from GET_TTE when TL=0 and
12084  * TTE_SUSPENDED bit set in tte we block on aquiring a page lock
12085  * rather than spinning to avoid send mondo timeouts with
12086  * interrupts enabled. When the lock is acquired it is immediately
12087  * released and we return back to sfmmu_vatopfn just after
12088  * the GET_TTE call.
12089  */
12090 void
12091 sfmmu_vatopfn_suspended(caddr_t vaddr, sfmmu_t *sfmmu, tte_t *ttep)
12092 {
12093 	struct page	**pp;
12094 
12095 	(void) as_pagelock(sfmmu->sfmmu_as, &pp, vaddr, TTE_CSZ(ttep), S_WRITE);
12096 	as_pageunlock(sfmmu->sfmmu_as, pp, vaddr, TTE_CSZ(ttep), S_WRITE);
12097 }
12098 
12099 /*
12100  * sfmmu_tsbmiss_suspended is called from GET_TTE when TL>0 and
12101  * TTE_SUSPENDED bit set in tte. We do this so that we can handle
12102  * cross traps which cannot be handled while spinning in the
12103  * trap handlers. Simply enter and exit the kpr_suspendlock spin
12104  * mutex, which is held by the holder of the suspend bit, and then
12105  * retry the trapped instruction after unwinding.
12106  */
12107 /*ARGSUSED*/
12108 void
12109 sfmmu_tsbmiss_suspended(struct regs *rp, uintptr_t tagacc, uint_t traptype)
12110 {
12111 	ASSERT(curthread != kreloc_thread);
12112 	mutex_enter(&kpr_suspendlock);
12113 	mutex_exit(&kpr_suspendlock);
12114 }
12115 
12116 /*
12117  * This routine could be optimized to reduce the number of xcalls by flushing
12118  * the entire TLBs if region reference count is above some threshold but the
12119  * tradeoff will depend on the size of the TLB. So for now flush the specific
12120  * page a context at a time.
12121  *
12122  * If uselocks is 0 then it's called after all cpus were captured and all the
12123  * hat locks were taken. In this case don't take the region lock by relying on
12124  * the order of list region update operations in hat_join_region(),
12125  * hat_leave_region() and hat_dup_region(). The ordering in those routines
12126  * guarantees that list is always forward walkable and reaches active sfmmus
12127  * regardless of where xc_attention() captures a cpu.
12128  */
12129 cpuset_t
12130 sfmmu_rgntlb_demap(caddr_t addr, sf_region_t *rgnp,
12131     struct hme_blk *hmeblkp, int uselocks)
12132 {
12133 	sfmmu_t	*sfmmup;
12134 	cpuset_t cpuset;
12135 	cpuset_t rcpuset;
12136 	hatlock_t *hatlockp;
12137 	uint_t rid = rgnp->rgn_id;
12138 	sf_rgn_link_t *rlink;
12139 	sf_scd_t *scdp;
12140 
12141 	ASSERT(hmeblkp->hblk_shared);
12142 	ASSERT(SFMMU_IS_SHMERID_VALID(rid));
12143 	ASSERT(rid < SFMMU_MAX_HME_REGIONS);
12144 
12145 	CPUSET_ZERO(rcpuset);
12146 	if (uselocks) {
12147 		mutex_enter(&rgnp->rgn_mutex);
12148 	}
12149 	sfmmup = rgnp->rgn_sfmmu_head;
12150 	while (sfmmup != NULL) {
12151 		if (uselocks) {
12152 			hatlockp = sfmmu_hat_enter(sfmmup);
12153 		}
12154 
12155 		/*
12156 		 * When an SCD is created the SCD hat is linked on the sfmmu
12157 		 * region lists for each hme region which is part of the
12158 		 * SCD. If we find an SCD hat, when walking these lists,
12159 		 * then we flush the shared TSBs, if we find a private hat,
12160 		 * which is part of an SCD, but where the region
12161 		 * is not part of the SCD then we flush the private TSBs.
12162 		 */
12163 		if (!sfmmup->sfmmu_scdhat && sfmmup->sfmmu_scdp != NULL &&
12164 		    !SFMMU_FLAGS_ISSET(sfmmup, HAT_JOIN_SCD)) {
12165 			scdp = sfmmup->sfmmu_scdp;
12166 			if (SF_RGNMAP_TEST(scdp->scd_hmeregion_map, rid)) {
12167 				if (uselocks) {
12168 					sfmmu_hat_exit(hatlockp);
12169 				}
12170 				goto next;
12171 			}
12172 		}
12173 
12174 		SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp, 0);
12175 
12176 		kpreempt_disable();
12177 		cpuset = sfmmup->sfmmu_cpusran;
12178 		CPUSET_AND(cpuset, cpu_ready_set);
12179 		CPUSET_DEL(cpuset, CPU->cpu_id);
12180 		SFMMU_XCALL_STATS(sfmmup);
12181 		xt_some(cpuset, vtag_flushpage_tl1,
12182 		    (uint64_t)addr, (uint64_t)sfmmup);
12183 		vtag_flushpage(addr, (uint64_t)sfmmup);
12184 		if (uselocks) {
12185 			sfmmu_hat_exit(hatlockp);
12186 		}
12187 		kpreempt_enable();
12188 		CPUSET_OR(rcpuset, cpuset);
12189 
12190 next:
12191 		/* LINTED: constant in conditional context */
12192 		SFMMU_HMERID2RLINKP(sfmmup, rid, rlink, 0, 0);
12193 		ASSERT(rlink != NULL);
12194 		sfmmup = rlink->next;
12195 	}
12196 	if (uselocks) {
12197 		mutex_exit(&rgnp->rgn_mutex);
12198 	}
12199 	return (rcpuset);
12200 }
12201 
12202 /*
12203  * This routine takes an sfmmu pointer and the va for an adddress in an
12204  * ISM region as input and returns the corresponding region id in ism_rid.
12205  * The return value of 1 indicates that a region has been found and ism_rid
12206  * is valid, otherwise 0 is returned.
12207  */
12208 static int
12209 find_ism_rid(sfmmu_t *sfmmup, sfmmu_t *ism_sfmmup, caddr_t va, uint_t *ism_rid)
12210 {
12211 	ism_blk_t	*ism_blkp;
12212 	int		i;
12213 	ism_map_t	*ism_map;
12214 #ifdef DEBUG
12215 	struct hat	*ism_hatid;
12216 #endif
12217 	ASSERT(sfmmu_hat_lock_held(sfmmup));
12218 
12219 	ism_blkp = sfmmup->sfmmu_iblk;
12220 	while (ism_blkp != NULL) {
12221 		ism_map = ism_blkp->iblk_maps;
12222 		for (i = 0; i < ISM_MAP_SLOTS && ism_map[i].imap_ismhat; i++) {
12223 			if ((va >= ism_start(ism_map[i])) &&
12224 			    (va < ism_end(ism_map[i]))) {
12225 
12226 				*ism_rid = ism_map[i].imap_rid;
12227 #ifdef DEBUG
12228 				ism_hatid = ism_map[i].imap_ismhat;
12229 				ASSERT(ism_hatid == ism_sfmmup);
12230 				ASSERT(ism_hatid->sfmmu_ismhat);
12231 #endif
12232 				return (1);
12233 			}
12234 		}
12235 		ism_blkp = ism_blkp->iblk_next;
12236 	}
12237 	return (0);
12238 }
12239 
12240 /*
12241  * Special routine to flush out ism mappings- TSBs, TLBs and D-caches.
12242  * This routine may be called with all cpu's captured. Therefore, the
12243  * caller is responsible for holding all locks and disabling kernel
12244  * preemption.
12245  */
12246 /* ARGSUSED */
12247 static void
12248 sfmmu_ismtlbcache_demap(caddr_t addr, sfmmu_t *ism_sfmmup,
12249 	struct hme_blk *hmeblkp, pfn_t pfnum, int cache_flush_flag)
12250 {
12251 	cpuset_t 	cpuset;
12252 	caddr_t 	va;
12253 	ism_ment_t	*ment;
12254 	sfmmu_t		*sfmmup;
12255 #ifdef VAC
12256 	int 		vcolor;
12257 #endif
12258 
12259 	sf_scd_t	*scdp;
12260 	uint_t		ism_rid;
12261 
12262 	ASSERT(!hmeblkp->hblk_shared);
12263 	/*
12264 	 * Walk the ism_hat's mapping list and flush the page
12265 	 * from every hat sharing this ism_hat. This routine
12266 	 * may be called while all cpu's have been captured.
12267 	 * Therefore we can't attempt to grab any locks. For now
12268 	 * this means we will protect the ism mapping list under
12269 	 * a single lock which will be grabbed by the caller.
12270 	 * If hat_share/unshare scalibility becomes a performance
12271 	 * problem then we may need to re-think ism mapping list locking.
12272 	 */
12273 	ASSERT(ism_sfmmup->sfmmu_ismhat);
12274 	ASSERT(MUTEX_HELD(&ism_mlist_lock));
12275 	addr = addr - ISMID_STARTADDR;
12276 
12277 	for (ment = ism_sfmmup->sfmmu_iment; ment; ment = ment->iment_next) {
12278 
12279 		sfmmup = ment->iment_hat;
12280 
12281 		va = ment->iment_base_va;
12282 		va = (caddr_t)((uintptr_t)va  + (uintptr_t)addr);
12283 
12284 		/*
12285 		 * When an SCD is created the SCD hat is linked on the ism
12286 		 * mapping lists for each ISM segment which is part of the
12287 		 * SCD. If we find an SCD hat, when walking these lists,
12288 		 * then we flush the shared TSBs, if we find a private hat,
12289 		 * which is part of an SCD, but where the region
12290 		 * corresponding to this va is not part of the SCD then we
12291 		 * flush the private TSBs.
12292 		 */
12293 		if (!sfmmup->sfmmu_scdhat && sfmmup->sfmmu_scdp != NULL &&
12294 		    !SFMMU_FLAGS_ISSET(sfmmup, HAT_JOIN_SCD) &&
12295 		    !SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)) {
12296 			if (!find_ism_rid(sfmmup, ism_sfmmup, va,
12297 			    &ism_rid)) {
12298 				cmn_err(CE_PANIC,
12299 				    "can't find matching ISM rid!");
12300 			}
12301 
12302 			scdp = sfmmup->sfmmu_scdp;
12303 			if (SFMMU_IS_ISMRID_VALID(ism_rid) &&
12304 			    SF_RGNMAP_TEST(scdp->scd_ismregion_map,
12305 			    ism_rid)) {
12306 				continue;
12307 			}
12308 		}
12309 		SFMMU_UNLOAD_TSB(va, sfmmup, hmeblkp, 1);
12310 
12311 		cpuset = sfmmup->sfmmu_cpusran;
12312 		CPUSET_AND(cpuset, cpu_ready_set);
12313 		CPUSET_DEL(cpuset, CPU->cpu_id);
12314 		SFMMU_XCALL_STATS(sfmmup);
12315 		xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)va,
12316 		    (uint64_t)sfmmup);
12317 		vtag_flushpage(va, (uint64_t)sfmmup);
12318 
12319 #ifdef VAC
12320 		/*
12321 		 * Flush D$
12322 		 * When flushing D$ we must flush all
12323 		 * cpu's. See sfmmu_cache_flush().
12324 		 */
12325 		if (cache_flush_flag == CACHE_FLUSH) {
12326 			cpuset = cpu_ready_set;
12327 			CPUSET_DEL(cpuset, CPU->cpu_id);
12328 
12329 			SFMMU_XCALL_STATS(sfmmup);
12330 			vcolor = addr_to_vcolor(va);
12331 			xt_some(cpuset, vac_flushpage_tl1, pfnum, vcolor);
12332 			vac_flushpage(pfnum, vcolor);
12333 		}
12334 #endif	/* VAC */
12335 	}
12336 }
12337 
12338 /*
12339  * Demaps the TSB, CPU caches, and flushes all TLBs on all CPUs of
12340  * a particular virtual address and ctx.  If noflush is set we do not
12341  * flush the TLB/TSB.  This function may or may not be called with the
12342  * HAT lock held.
12343  */
12344 static void
12345 sfmmu_tlbcache_demap(caddr_t addr, sfmmu_t *sfmmup, struct hme_blk *hmeblkp,
12346 	pfn_t pfnum, int tlb_noflush, int cpu_flag, int cache_flush_flag,
12347 	int hat_lock_held)
12348 {
12349 #ifdef VAC
12350 	int vcolor;
12351 #endif
12352 	cpuset_t cpuset;
12353 	hatlock_t *hatlockp;
12354 
12355 	ASSERT(!hmeblkp->hblk_shared);
12356 
12357 #if defined(lint) && !defined(VAC)
12358 	pfnum = pfnum;
12359 	cpu_flag = cpu_flag;
12360 	cache_flush_flag = cache_flush_flag;
12361 #endif
12362 
12363 	/*
12364 	 * There is no longer a need to protect against ctx being
12365 	 * stolen here since we don't store the ctx in the TSB anymore.
12366 	 */
12367 #ifdef VAC
12368 	vcolor = addr_to_vcolor(addr);
12369 #endif
12370 
12371 	/*
12372 	 * We must hold the hat lock during the flush of TLB,
12373 	 * to avoid a race with sfmmu_invalidate_ctx(), where
12374 	 * sfmmu_cnum on a MMU could be set to INVALID_CONTEXT,
12375 	 * causing TLB demap routine to skip flush on that MMU.
12376 	 * If the context on a MMU has already been set to
12377 	 * INVALID_CONTEXT, we just get an extra flush on
12378 	 * that MMU.
12379 	 */
12380 	if (!hat_lock_held && !tlb_noflush)
12381 		hatlockp = sfmmu_hat_enter(sfmmup);
12382 
12383 	kpreempt_disable();
12384 	if (!tlb_noflush) {
12385 		/*
12386 		 * Flush the TSB and TLB.
12387 		 */
12388 		SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp, 0);
12389 
12390 		cpuset = sfmmup->sfmmu_cpusran;
12391 		CPUSET_AND(cpuset, cpu_ready_set);
12392 		CPUSET_DEL(cpuset, CPU->cpu_id);
12393 
12394 		SFMMU_XCALL_STATS(sfmmup);
12395 
12396 		xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)addr,
12397 		    (uint64_t)sfmmup);
12398 
12399 		vtag_flushpage(addr, (uint64_t)sfmmup);
12400 	}
12401 
12402 	if (!hat_lock_held && !tlb_noflush)
12403 		sfmmu_hat_exit(hatlockp);
12404 
12405 #ifdef VAC
12406 	/*
12407 	 * Flush the D$
12408 	 *
12409 	 * Even if the ctx is stolen, we need to flush the
12410 	 * cache. Our ctx stealer only flushes the TLBs.
12411 	 */
12412 	if (cache_flush_flag == CACHE_FLUSH) {
12413 		if (cpu_flag & FLUSH_ALL_CPUS) {
12414 			cpuset = cpu_ready_set;
12415 		} else {
12416 			cpuset = sfmmup->sfmmu_cpusran;
12417 			CPUSET_AND(cpuset, cpu_ready_set);
12418 		}
12419 		CPUSET_DEL(cpuset, CPU->cpu_id);
12420 		SFMMU_XCALL_STATS(sfmmup);
12421 		xt_some(cpuset, vac_flushpage_tl1, pfnum, vcolor);
12422 		vac_flushpage(pfnum, vcolor);
12423 	}
12424 #endif	/* VAC */
12425 	kpreempt_enable();
12426 }
12427 
12428 /*
12429  * Demaps the TSB and flushes all TLBs on all cpus for a particular virtual
12430  * address and ctx.  If noflush is set we do not currently do anything.
12431  * This function may or may not be called with the HAT lock held.
12432  */
12433 static void
12434 sfmmu_tlb_demap(caddr_t addr, sfmmu_t *sfmmup, struct hme_blk *hmeblkp,
12435 	int tlb_noflush, int hat_lock_held)
12436 {
12437 	cpuset_t cpuset;
12438 	hatlock_t *hatlockp;
12439 
12440 	ASSERT(!hmeblkp->hblk_shared);
12441 
12442 	/*
12443 	 * If the process is exiting we have nothing to do.
12444 	 */
12445 	if (tlb_noflush)
12446 		return;
12447 
12448 	/*
12449 	 * Flush TSB.
12450 	 */
12451 	if (!hat_lock_held)
12452 		hatlockp = sfmmu_hat_enter(sfmmup);
12453 	SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp, 0);
12454 
12455 	kpreempt_disable();
12456 
12457 	cpuset = sfmmup->sfmmu_cpusran;
12458 	CPUSET_AND(cpuset, cpu_ready_set);
12459 	CPUSET_DEL(cpuset, CPU->cpu_id);
12460 
12461 	SFMMU_XCALL_STATS(sfmmup);
12462 	xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)addr, (uint64_t)sfmmup);
12463 
12464 	vtag_flushpage(addr, (uint64_t)sfmmup);
12465 
12466 	if (!hat_lock_held)
12467 		sfmmu_hat_exit(hatlockp);
12468 
12469 	kpreempt_enable();
12470 
12471 }
12472 
12473 /*
12474  * Special case of sfmmu_tlb_demap for MMU_PAGESIZE hblks. Use the xcall
12475  * call handler that can flush a range of pages to save on xcalls.
12476  */
12477 static int sfmmu_xcall_save;
12478 
12479 /*
12480  * this routine is never used for demaping addresses backed by SRD hmeblks.
12481  */
12482 static void
12483 sfmmu_tlb_range_demap(demap_range_t *dmrp)
12484 {
12485 	sfmmu_t *sfmmup = dmrp->dmr_sfmmup;
12486 	hatlock_t *hatlockp;
12487 	cpuset_t cpuset;
12488 	uint64_t sfmmu_pgcnt;
12489 	pgcnt_t pgcnt = 0;
12490 	int pgunload = 0;
12491 	int dirtypg = 0;
12492 	caddr_t addr = dmrp->dmr_addr;
12493 	caddr_t eaddr;
12494 	uint64_t bitvec = dmrp->dmr_bitvec;
12495 
12496 	ASSERT(bitvec & 1);
12497 
12498 	/*
12499 	 * Flush TSB and calculate number of pages to flush.
12500 	 */
12501 	while (bitvec != 0) {
12502 		dirtypg = 0;
12503 		/*
12504 		 * Find the first page to flush and then count how many
12505 		 * pages there are after it that also need to be flushed.
12506 		 * This way the number of TSB flushes is minimized.
12507 		 */
12508 		while ((bitvec & 1) == 0) {
12509 			pgcnt++;
12510 			addr += MMU_PAGESIZE;
12511 			bitvec >>= 1;
12512 		}
12513 		while (bitvec & 1) {
12514 			dirtypg++;
12515 			bitvec >>= 1;
12516 		}
12517 		eaddr = addr + ptob(dirtypg);
12518 		hatlockp = sfmmu_hat_enter(sfmmup);
12519 		sfmmu_unload_tsb_range(sfmmup, addr, eaddr, TTE8K);
12520 		sfmmu_hat_exit(hatlockp);
12521 		pgunload += dirtypg;
12522 		addr = eaddr;
12523 		pgcnt += dirtypg;
12524 	}
12525 
12526 	ASSERT((pgcnt<<MMU_PAGESHIFT) <= dmrp->dmr_endaddr - dmrp->dmr_addr);
12527 	if (sfmmup->sfmmu_free == 0) {
12528 		addr = dmrp->dmr_addr;
12529 		bitvec = dmrp->dmr_bitvec;
12530 
12531 		/*
12532 		 * make sure it has SFMMU_PGCNT_SHIFT bits only,
12533 		 * as it will be used to pack argument for xt_some
12534 		 */
12535 		ASSERT((pgcnt > 0) &&
12536 		    (pgcnt <= (1 << SFMMU_PGCNT_SHIFT)));
12537 
12538 		/*
12539 		 * Encode pgcnt as (pgcnt -1 ), and pass (pgcnt - 1) in
12540 		 * the low 6 bits of sfmmup. This is doable since pgcnt
12541 		 * always >= 1.
12542 		 */
12543 		ASSERT(!((uint64_t)sfmmup & SFMMU_PGCNT_MASK));
12544 		sfmmu_pgcnt = (uint64_t)sfmmup |
12545 		    ((pgcnt - 1) & SFMMU_PGCNT_MASK);
12546 
12547 		/*
12548 		 * We must hold the hat lock during the flush of TLB,
12549 		 * to avoid a race with sfmmu_invalidate_ctx(), where
12550 		 * sfmmu_cnum on a MMU could be set to INVALID_CONTEXT,
12551 		 * causing TLB demap routine to skip flush on that MMU.
12552 		 * If the context on a MMU has already been set to
12553 		 * INVALID_CONTEXT, we just get an extra flush on
12554 		 * that MMU.
12555 		 */
12556 		hatlockp = sfmmu_hat_enter(sfmmup);
12557 		kpreempt_disable();
12558 
12559 		cpuset = sfmmup->sfmmu_cpusran;
12560 		CPUSET_AND(cpuset, cpu_ready_set);
12561 		CPUSET_DEL(cpuset, CPU->cpu_id);
12562 
12563 		SFMMU_XCALL_STATS(sfmmup);
12564 		xt_some(cpuset, vtag_flush_pgcnt_tl1, (uint64_t)addr,
12565 		    sfmmu_pgcnt);
12566 
12567 		for (; bitvec != 0; bitvec >>= 1) {
12568 			if (bitvec & 1)
12569 				vtag_flushpage(addr, (uint64_t)sfmmup);
12570 			addr += MMU_PAGESIZE;
12571 		}
12572 		kpreempt_enable();
12573 		sfmmu_hat_exit(hatlockp);
12574 
12575 		sfmmu_xcall_save += (pgunload-1);
12576 	}
12577 	dmrp->dmr_bitvec = 0;
12578 }
12579 
12580 /*
12581  * In cases where we need to synchronize with TLB/TSB miss trap
12582  * handlers, _and_ need to flush the TLB, it's a lot easier to
12583  * throw away the context from the process than to do a
12584  * special song and dance to keep things consistent for the
12585  * handlers.
12586  *
12587  * Since the process suddenly ends up without a context and our caller
12588  * holds the hat lock, threads that fault after this function is called
12589  * will pile up on the lock.  We can then do whatever we need to
12590  * atomically from the context of the caller.  The first blocked thread
12591  * to resume executing will get the process a new context, and the
12592  * process will resume executing.
12593  *
12594  * One added advantage of this approach is that on MMUs that
12595  * support a "flush all" operation, we will delay the flush until
12596  * cnum wrap-around, and then flush the TLB one time.  This
12597  * is rather rare, so it's a lot less expensive than making 8000
12598  * x-calls to flush the TLB 8000 times.
12599  *
12600  * A per-process (PP) lock is used to synchronize ctx allocations in
12601  * resume() and ctx invalidations here.
12602  */
12603 static void
12604 sfmmu_invalidate_ctx(sfmmu_t *sfmmup)
12605 {
12606 	cpuset_t cpuset;
12607 	int cnum, currcnum;
12608 	mmu_ctx_t *mmu_ctxp;
12609 	int i;
12610 	uint_t pstate_save;
12611 
12612 	SFMMU_STAT(sf_ctx_inv);
12613 
12614 	ASSERT(sfmmu_hat_lock_held(sfmmup));
12615 	ASSERT(sfmmup != ksfmmup);
12616 
12617 	kpreempt_disable();
12618 
12619 	mmu_ctxp = CPU_MMU_CTXP(CPU);
12620 	ASSERT(mmu_ctxp);
12621 	ASSERT(mmu_ctxp->mmu_idx < max_mmu_ctxdoms);
12622 	ASSERT(mmu_ctxp == mmu_ctxs_tbl[mmu_ctxp->mmu_idx]);
12623 
12624 	currcnum = sfmmup->sfmmu_ctxs[mmu_ctxp->mmu_idx].cnum;
12625 
12626 	pstate_save = sfmmu_disable_intrs();
12627 
12628 	lock_set(&sfmmup->sfmmu_ctx_lock);	/* acquire PP lock */
12629 	/* set HAT cnum invalid across all context domains. */
12630 	for (i = 0; i < max_mmu_ctxdoms; i++) {
12631 
12632 		cnum = 	sfmmup->sfmmu_ctxs[i].cnum;
12633 		if (cnum == INVALID_CONTEXT) {
12634 			continue;
12635 		}
12636 
12637 		sfmmup->sfmmu_ctxs[i].cnum = INVALID_CONTEXT;
12638 	}
12639 	membar_enter();	/* make sure globally visible to all CPUs */
12640 	lock_clear(&sfmmup->sfmmu_ctx_lock);	/* release PP lock */
12641 
12642 	sfmmu_enable_intrs(pstate_save);
12643 
12644 	cpuset = sfmmup->sfmmu_cpusran;
12645 	CPUSET_DEL(cpuset, CPU->cpu_id);
12646 	CPUSET_AND(cpuset, cpu_ready_set);
12647 	if (!CPUSET_ISNULL(cpuset)) {
12648 		SFMMU_XCALL_STATS(sfmmup);
12649 		xt_some(cpuset, sfmmu_raise_tsb_exception,
12650 		    (uint64_t)sfmmup, INVALID_CONTEXT);
12651 		xt_sync(cpuset);
12652 		SFMMU_STAT(sf_tsb_raise_exception);
12653 		SFMMU_MMU_STAT(mmu_tsb_raise_exception);
12654 	}
12655 
12656 	/*
12657 	 * If the hat to-be-invalidated is the same as the current
12658 	 * process on local CPU we need to invalidate
12659 	 * this CPU context as well.
12660 	 */
12661 	if ((sfmmu_getctx_sec() == currcnum) &&
12662 	    (currcnum != INVALID_CONTEXT)) {
12663 		/* sets shared context to INVALID too */
12664 		sfmmu_setctx_sec(INVALID_CONTEXT);
12665 		sfmmu_clear_utsbinfo();
12666 	}
12667 
12668 	SFMMU_FLAGS_SET(sfmmup, HAT_ALLCTX_INVALID);
12669 
12670 	kpreempt_enable();
12671 
12672 	/*
12673 	 * we hold the hat lock, so nobody should allocate a context
12674 	 * for us yet
12675 	 */
12676 	ASSERT(sfmmup->sfmmu_ctxs[mmu_ctxp->mmu_idx].cnum == INVALID_CONTEXT);
12677 }
12678 
12679 #ifdef VAC
12680 /*
12681  * We need to flush the cache in all cpus.  It is possible that
12682  * a process referenced a page as cacheable but has sinced exited
12683  * and cleared the mapping list.  We still to flush it but have no
12684  * state so all cpus is the only alternative.
12685  */
12686 void
12687 sfmmu_cache_flush(pfn_t pfnum, int vcolor)
12688 {
12689 	cpuset_t cpuset;
12690 
12691 	kpreempt_disable();
12692 	cpuset = cpu_ready_set;
12693 	CPUSET_DEL(cpuset, CPU->cpu_id);
12694 	SFMMU_XCALL_STATS(NULL);	/* account to any ctx */
12695 	xt_some(cpuset, vac_flushpage_tl1, pfnum, vcolor);
12696 	xt_sync(cpuset);
12697 	vac_flushpage(pfnum, vcolor);
12698 	kpreempt_enable();
12699 }
12700 
12701 void
12702 sfmmu_cache_flushcolor(int vcolor, pfn_t pfnum)
12703 {
12704 	cpuset_t cpuset;
12705 
12706 	ASSERT(vcolor >= 0);
12707 
12708 	kpreempt_disable();
12709 	cpuset = cpu_ready_set;
12710 	CPUSET_DEL(cpuset, CPU->cpu_id);
12711 	SFMMU_XCALL_STATS(NULL);	/* account to any ctx */
12712 	xt_some(cpuset, vac_flushcolor_tl1, vcolor, pfnum);
12713 	xt_sync(cpuset);
12714 	vac_flushcolor(vcolor, pfnum);
12715 	kpreempt_enable();
12716 }
12717 #endif	/* VAC */
12718 
12719 /*
12720  * We need to prevent processes from accessing the TSB using a cached physical
12721  * address.  It's alright if they try to access the TSB via virtual address
12722  * since they will just fault on that virtual address once the mapping has
12723  * been suspended.
12724  */
12725 #pragma weak sendmondo_in_recover
12726 
12727 /* ARGSUSED */
12728 static int
12729 sfmmu_tsb_pre_relocator(caddr_t va, uint_t tsbsz, uint_t flags, void *tsbinfo)
12730 {
12731 	struct tsb_info *tsbinfop = (struct tsb_info *)tsbinfo;
12732 	sfmmu_t *sfmmup = tsbinfop->tsb_sfmmu;
12733 	hatlock_t *hatlockp;
12734 	sf_scd_t *scdp;
12735 
12736 	if (flags != HAT_PRESUSPEND)
12737 		return (0);
12738 
12739 	/*
12740 	 * If tsb is a shared TSB with TSB_SHAREDCTX set, sfmmup must
12741 	 * be a shared hat, then set SCD's tsbinfo's flag.
12742 	 * If tsb is not shared, sfmmup is a private hat, then set
12743 	 * its private tsbinfo's flag.
12744 	 */
12745 	hatlockp = sfmmu_hat_enter(sfmmup);
12746 	tsbinfop->tsb_flags |= TSB_RELOC_FLAG;
12747 
12748 	if (!(tsbinfop->tsb_flags & TSB_SHAREDCTX)) {
12749 		sfmmu_tsb_inv_ctx(sfmmup);
12750 		sfmmu_hat_exit(hatlockp);
12751 	} else {
12752 		/* release lock on the shared hat */
12753 		sfmmu_hat_exit(hatlockp);
12754 		/* sfmmup is a shared hat */
12755 		ASSERT(sfmmup->sfmmu_scdhat);
12756 		scdp = sfmmup->sfmmu_scdp;
12757 		ASSERT(scdp != NULL);
12758 		/* get private hat from the scd list */
12759 		mutex_enter(&scdp->scd_mutex);
12760 		sfmmup = scdp->scd_sf_list;
12761 		while (sfmmup != NULL) {
12762 			hatlockp = sfmmu_hat_enter(sfmmup);
12763 			/*
12764 			 * We do not call sfmmu_tsb_inv_ctx here because
12765 			 * sendmondo_in_recover check is only needed for
12766 			 * sun4u.
12767 			 */
12768 			sfmmu_invalidate_ctx(sfmmup);
12769 			sfmmu_hat_exit(hatlockp);
12770 			sfmmup = sfmmup->sfmmu_scd_link.next;
12771 
12772 		}
12773 		mutex_exit(&scdp->scd_mutex);
12774 	}
12775 	return (0);
12776 }
12777 
12778 static void
12779 sfmmu_tsb_inv_ctx(sfmmu_t *sfmmup)
12780 {
12781 	extern uint32_t sendmondo_in_recover;
12782 
12783 	ASSERT(sfmmu_hat_lock_held(sfmmup));
12784 
12785 	/*
12786 	 * For Cheetah+ Erratum 25:
12787 	 * Wait for any active recovery to finish.  We can't risk
12788 	 * relocating the TSB of the thread running mondo_recover_proc()
12789 	 * since, if we did that, we would deadlock.  The scenario we are
12790 	 * trying to avoid is as follows:
12791 	 *
12792 	 * THIS CPU			RECOVER CPU
12793 	 * --------			-----------
12794 	 *				Begins recovery, walking through TSB
12795 	 * hat_pagesuspend() TSB TTE
12796 	 *				TLB miss on TSB TTE, spins at TL1
12797 	 * xt_sync()
12798 	 *	send_mondo_timeout()
12799 	 *	mondo_recover_proc()
12800 	 *	((deadlocked))
12801 	 *
12802 	 * The second half of the workaround is that mondo_recover_proc()
12803 	 * checks to see if the tsb_info has the RELOC flag set, and if it
12804 	 * does, it skips over that TSB without ever touching tsbinfop->tsb_va
12805 	 * and hence avoiding the TLB miss that could result in a deadlock.
12806 	 */
12807 	if (&sendmondo_in_recover) {
12808 		membar_enter();	/* make sure RELOC flag visible */
12809 		while (sendmondo_in_recover) {
12810 			drv_usecwait(1);
12811 			membar_consumer();
12812 		}
12813 	}
12814 
12815 	sfmmu_invalidate_ctx(sfmmup);
12816 }
12817 
12818 /* ARGSUSED */
12819 static int
12820 sfmmu_tsb_post_relocator(caddr_t va, uint_t tsbsz, uint_t flags,
12821 	void *tsbinfo, pfn_t newpfn)
12822 {
12823 	hatlock_t *hatlockp;
12824 	struct tsb_info *tsbinfop = (struct tsb_info *)tsbinfo;
12825 	sfmmu_t	*sfmmup = tsbinfop->tsb_sfmmu;
12826 
12827 	if (flags != HAT_POSTUNSUSPEND)
12828 		return (0);
12829 
12830 	hatlockp = sfmmu_hat_enter(sfmmup);
12831 
12832 	SFMMU_STAT(sf_tsb_reloc);
12833 
12834 	/*
12835 	 * The process may have swapped out while we were relocating one
12836 	 * of its TSBs.  If so, don't bother doing the setup since the
12837 	 * process can't be using the memory anymore.
12838 	 */
12839 	if ((tsbinfop->tsb_flags & TSB_SWAPPED) == 0) {
12840 		ASSERT(va == tsbinfop->tsb_va);
12841 		sfmmu_tsbinfo_setup_phys(tsbinfop, newpfn);
12842 
12843 		if (tsbinfop->tsb_flags & TSB_FLUSH_NEEDED) {
12844 			sfmmu_inv_tsb(tsbinfop->tsb_va,
12845 			    TSB_BYTES(tsbinfop->tsb_szc));
12846 			tsbinfop->tsb_flags &= ~TSB_FLUSH_NEEDED;
12847 		}
12848 	}
12849 
12850 	membar_exit();
12851 	tsbinfop->tsb_flags &= ~TSB_RELOC_FLAG;
12852 	cv_broadcast(&sfmmup->sfmmu_tsb_cv);
12853 
12854 	sfmmu_hat_exit(hatlockp);
12855 
12856 	return (0);
12857 }
12858 
12859 /*
12860  * Allocate and initialize a tsb_info structure.  Note that we may or may not
12861  * allocate a TSB here, depending on the flags passed in.
12862  */
12863 static int
12864 sfmmu_tsbinfo_alloc(struct tsb_info **tsbinfopp, int tsb_szc, int tte_sz_mask,
12865 	uint_t flags, sfmmu_t *sfmmup)
12866 {
12867 	int err;
12868 
12869 	*tsbinfopp = (struct tsb_info *)kmem_cache_alloc(
12870 	    sfmmu_tsbinfo_cache, KM_SLEEP);
12871 
12872 	if ((err = sfmmu_init_tsbinfo(*tsbinfopp, tte_sz_mask,
12873 	    tsb_szc, flags, sfmmup)) != 0) {
12874 		kmem_cache_free(sfmmu_tsbinfo_cache, *tsbinfopp);
12875 		SFMMU_STAT(sf_tsb_allocfail);
12876 		*tsbinfopp = NULL;
12877 		return (err);
12878 	}
12879 	SFMMU_STAT(sf_tsb_alloc);
12880 
12881 	/*
12882 	 * Bump the TSB size counters for this TSB size.
12883 	 */
12884 	(*(((int *)&sfmmu_tsbsize_stat) + tsb_szc))++;
12885 	return (0);
12886 }
12887 
12888 static void
12889 sfmmu_tsb_free(struct tsb_info *tsbinfo)
12890 {
12891 	caddr_t tsbva = tsbinfo->tsb_va;
12892 	uint_t tsb_size = TSB_BYTES(tsbinfo->tsb_szc);
12893 	struct kmem_cache *kmem_cachep = tsbinfo->tsb_cache;
12894 	vmem_t	*vmp = tsbinfo->tsb_vmp;
12895 
12896 	/*
12897 	 * If we allocated this TSB from relocatable kernel memory, then we
12898 	 * need to uninstall the callback handler.
12899 	 */
12900 	if (tsbinfo->tsb_cache != sfmmu_tsb8k_cache) {
12901 		uintptr_t slab_mask;
12902 		caddr_t slab_vaddr;
12903 		page_t **ppl;
12904 		int ret;
12905 
12906 		ASSERT(tsb_size <= MMU_PAGESIZE4M || use_bigtsb_arena);
12907 		if (tsb_size > MMU_PAGESIZE4M)
12908 			slab_mask = ~((uintptr_t)bigtsb_slab_mask) << PAGESHIFT;
12909 		else
12910 			slab_mask = ~((uintptr_t)tsb_slab_mask) << PAGESHIFT;
12911 		slab_vaddr = (caddr_t)((uintptr_t)tsbva & slab_mask);
12912 
12913 		ret = as_pagelock(&kas, &ppl, slab_vaddr, PAGESIZE, S_WRITE);
12914 		ASSERT(ret == 0);
12915 		hat_delete_callback(tsbva, (uint_t)tsb_size, (void *)tsbinfo,
12916 		    0, NULL);
12917 		as_pageunlock(&kas, ppl, slab_vaddr, PAGESIZE, S_WRITE);
12918 	}
12919 
12920 	if (kmem_cachep != NULL) {
12921 		kmem_cache_free(kmem_cachep, tsbva);
12922 	} else {
12923 		vmem_xfree(vmp, (void *)tsbva, tsb_size);
12924 	}
12925 	tsbinfo->tsb_va = (caddr_t)0xbad00bad;
12926 	atomic_add_64(&tsb_alloc_bytes, -(int64_t)tsb_size);
12927 }
12928 
12929 static void
12930 sfmmu_tsbinfo_free(struct tsb_info *tsbinfo)
12931 {
12932 	if ((tsbinfo->tsb_flags & TSB_SWAPPED) == 0) {
12933 		sfmmu_tsb_free(tsbinfo);
12934 	}
12935 	kmem_cache_free(sfmmu_tsbinfo_cache, tsbinfo);
12936 
12937 }
12938 
12939 /*
12940  * Setup all the references to physical memory for this tsbinfo.
12941  * The underlying page(s) must be locked.
12942  */
12943 static void
12944 sfmmu_tsbinfo_setup_phys(struct tsb_info *tsbinfo, pfn_t pfn)
12945 {
12946 	ASSERT(pfn != PFN_INVALID);
12947 	ASSERT(pfn == va_to_pfn(tsbinfo->tsb_va));
12948 
12949 #ifndef sun4v
12950 	if (tsbinfo->tsb_szc == 0) {
12951 		sfmmu_memtte(&tsbinfo->tsb_tte, pfn,
12952 		    PROT_WRITE|PROT_READ, TTE8K);
12953 	} else {
12954 		/*
12955 		 * Round down PA and use a large mapping; the handlers will
12956 		 * compute the TSB pointer at the correct offset into the
12957 		 * big virtual page.  NOTE: this assumes all TSBs larger
12958 		 * than 8K must come from physically contiguous slabs of
12959 		 * size tsb_slab_size.
12960 		 */
12961 		sfmmu_memtte(&tsbinfo->tsb_tte, pfn & ~tsb_slab_mask,
12962 		    PROT_WRITE|PROT_READ, tsb_slab_ttesz);
12963 	}
12964 	tsbinfo->tsb_pa = ptob(pfn);
12965 
12966 	TTE_SET_LOCKED(&tsbinfo->tsb_tte); /* lock the tte into dtlb */
12967 	TTE_SET_MOD(&tsbinfo->tsb_tte);    /* enable writes */
12968 
12969 	ASSERT(TTE_IS_PRIVILEGED(&tsbinfo->tsb_tte));
12970 	ASSERT(TTE_IS_LOCKED(&tsbinfo->tsb_tte));
12971 #else /* sun4v */
12972 	tsbinfo->tsb_pa = ptob(pfn);
12973 #endif /* sun4v */
12974 }
12975 
12976 
12977 /*
12978  * Returns zero on success, ENOMEM if over the high water mark,
12979  * or EAGAIN if the caller needs to retry with a smaller TSB
12980  * size (or specify TSB_FORCEALLOC if the allocation can't fail).
12981  *
12982  * This call cannot fail to allocate a TSB if TSB_FORCEALLOC
12983  * is specified and the TSB requested is PAGESIZE, though it
12984  * may sleep waiting for memory if sufficient memory is not
12985  * available.
12986  */
12987 static int
12988 sfmmu_init_tsbinfo(struct tsb_info *tsbinfo, int tteszmask,
12989     int tsbcode, uint_t flags, sfmmu_t *sfmmup)
12990 {
12991 	caddr_t vaddr = NULL;
12992 	caddr_t slab_vaddr;
12993 	uintptr_t slab_mask;
12994 	int tsbbytes = TSB_BYTES(tsbcode);
12995 	int lowmem = 0;
12996 	struct kmem_cache *kmem_cachep = NULL;
12997 	vmem_t *vmp = NULL;
12998 	lgrp_id_t lgrpid = LGRP_NONE;
12999 	pfn_t pfn;
13000 	uint_t cbflags = HAC_SLEEP;
13001 	page_t **pplist;
13002 	int ret;
13003 
13004 	ASSERT(tsbbytes <= MMU_PAGESIZE4M || use_bigtsb_arena);
13005 	if (tsbbytes > MMU_PAGESIZE4M)
13006 		slab_mask = ~((uintptr_t)bigtsb_slab_mask) << PAGESHIFT;
13007 	else
13008 		slab_mask = ~((uintptr_t)tsb_slab_mask) << PAGESHIFT;
13009 
13010 	if (flags & (TSB_FORCEALLOC | TSB_SWAPIN | TSB_GROW | TSB_SHRINK))
13011 		flags |= TSB_ALLOC;
13012 
13013 	ASSERT((flags & TSB_FORCEALLOC) == 0 || tsbcode == TSB_MIN_SZCODE);
13014 
13015 	tsbinfo->tsb_sfmmu = sfmmup;
13016 
13017 	/*
13018 	 * If not allocating a TSB, set up the tsbinfo, set TSB_SWAPPED, and
13019 	 * return.
13020 	 */
13021 	if ((flags & TSB_ALLOC) == 0) {
13022 		tsbinfo->tsb_szc = tsbcode;
13023 		tsbinfo->tsb_ttesz_mask = tteszmask;
13024 		tsbinfo->tsb_va = (caddr_t)0xbadbadbeef;
13025 		tsbinfo->tsb_pa = -1;
13026 		tsbinfo->tsb_tte.ll = 0;
13027 		tsbinfo->tsb_next = NULL;
13028 		tsbinfo->tsb_flags = TSB_SWAPPED;
13029 		tsbinfo->tsb_cache = NULL;
13030 		tsbinfo->tsb_vmp = NULL;
13031 		return (0);
13032 	}
13033 
13034 #ifdef DEBUG
13035 	/*
13036 	 * For debugging:
13037 	 * Randomly force allocation failures every tsb_alloc_mtbf
13038 	 * tries if TSB_FORCEALLOC is not specified.  This will
13039 	 * return ENOMEM if tsb_alloc_mtbf is odd, or EAGAIN if
13040 	 * it is even, to allow testing of both failure paths...
13041 	 */
13042 	if (tsb_alloc_mtbf && ((flags & TSB_FORCEALLOC) == 0) &&
13043 	    (tsb_alloc_count++ == tsb_alloc_mtbf)) {
13044 		tsb_alloc_count = 0;
13045 		tsb_alloc_fail_mtbf++;
13046 		return ((tsb_alloc_mtbf & 1)? ENOMEM : EAGAIN);
13047 	}
13048 #endif	/* DEBUG */
13049 
13050 	/*
13051 	 * Enforce high water mark if we are not doing a forced allocation
13052 	 * and are not shrinking a process' TSB.
13053 	 */
13054 	if ((flags & TSB_SHRINK) == 0 &&
13055 	    (tsbbytes + tsb_alloc_bytes) > tsb_alloc_hiwater) {
13056 		if ((flags & TSB_FORCEALLOC) == 0)
13057 			return (ENOMEM);
13058 		lowmem = 1;
13059 	}
13060 
13061 	/*
13062 	 * Allocate from the correct location based upon the size of the TSB
13063 	 * compared to the base page size, and what memory conditions dictate.
13064 	 * Note we always do nonblocking allocations from the TSB arena since
13065 	 * we don't want memory fragmentation to cause processes to block
13066 	 * indefinitely waiting for memory; until the kernel algorithms that
13067 	 * coalesce large pages are improved this is our best option.
13068 	 *
13069 	 * Algorithm:
13070 	 *	If allocating a "large" TSB (>8K), allocate from the
13071 	 *		appropriate kmem_tsb_default_arena vmem arena
13072 	 *	else if low on memory or the TSB_FORCEALLOC flag is set or
13073 	 *	tsb_forceheap is set
13074 	 *		Allocate from kernel heap via sfmmu_tsb8k_cache with
13075 	 *		KM_SLEEP (never fails)
13076 	 *	else
13077 	 *		Allocate from appropriate sfmmu_tsb_cache with
13078 	 *		KM_NOSLEEP
13079 	 *	endif
13080 	 */
13081 	if (tsb_lgrp_affinity)
13082 		lgrpid = lgrp_home_id(curthread);
13083 	if (lgrpid == LGRP_NONE)
13084 		lgrpid = 0;	/* use lgrp of boot CPU */
13085 
13086 	if (tsbbytes > MMU_PAGESIZE) {
13087 		if (tsbbytes > MMU_PAGESIZE4M) {
13088 			vmp = kmem_bigtsb_default_arena[lgrpid];
13089 			vaddr = (caddr_t)vmem_xalloc(vmp, tsbbytes, tsbbytes,
13090 			    0, 0, NULL, NULL, VM_NOSLEEP);
13091 		} else {
13092 			vmp = kmem_tsb_default_arena[lgrpid];
13093 			vaddr = (caddr_t)vmem_xalloc(vmp, tsbbytes, tsbbytes,
13094 			    0, 0, NULL, NULL, VM_NOSLEEP);
13095 		}
13096 #ifdef	DEBUG
13097 	} else if (lowmem || (flags & TSB_FORCEALLOC) || tsb_forceheap) {
13098 #else	/* !DEBUG */
13099 	} else if (lowmem || (flags & TSB_FORCEALLOC)) {
13100 #endif	/* DEBUG */
13101 		kmem_cachep = sfmmu_tsb8k_cache;
13102 		vaddr = (caddr_t)kmem_cache_alloc(kmem_cachep, KM_SLEEP);
13103 		ASSERT(vaddr != NULL);
13104 	} else {
13105 		kmem_cachep = sfmmu_tsb_cache[lgrpid];
13106 		vaddr = (caddr_t)kmem_cache_alloc(kmem_cachep, KM_NOSLEEP);
13107 	}
13108 
13109 	tsbinfo->tsb_cache = kmem_cachep;
13110 	tsbinfo->tsb_vmp = vmp;
13111 
13112 	if (vaddr == NULL) {
13113 		return (EAGAIN);
13114 	}
13115 
13116 	atomic_add_64(&tsb_alloc_bytes, (int64_t)tsbbytes);
13117 	kmem_cachep = tsbinfo->tsb_cache;
13118 
13119 	/*
13120 	 * If we are allocating from outside the cage, then we need to
13121 	 * register a relocation callback handler.  Note that for now
13122 	 * since pseudo mappings always hang off of the slab's root page,
13123 	 * we need only lock the first 8K of the TSB slab.  This is a bit
13124 	 * hacky but it is good for performance.
13125 	 */
13126 	if (kmem_cachep != sfmmu_tsb8k_cache) {
13127 		slab_vaddr = (caddr_t)((uintptr_t)vaddr & slab_mask);
13128 		ret = as_pagelock(&kas, &pplist, slab_vaddr, PAGESIZE, S_WRITE);
13129 		ASSERT(ret == 0);
13130 		ret = hat_add_callback(sfmmu_tsb_cb_id, vaddr, (uint_t)tsbbytes,
13131 		    cbflags, (void *)tsbinfo, &pfn, NULL);
13132 
13133 		/*
13134 		 * Need to free up resources if we could not successfully
13135 		 * add the callback function and return an error condition.
13136 		 */
13137 		if (ret != 0) {
13138 			if (kmem_cachep) {
13139 				kmem_cache_free(kmem_cachep, vaddr);
13140 			} else {
13141 				vmem_xfree(vmp, (void *)vaddr, tsbbytes);
13142 			}
13143 			as_pageunlock(&kas, pplist, slab_vaddr, PAGESIZE,
13144 			    S_WRITE);
13145 			return (EAGAIN);
13146 		}
13147 	} else {
13148 		/*
13149 		 * Since allocation of 8K TSBs from heap is rare and occurs
13150 		 * during memory pressure we allocate them from permanent
13151 		 * memory rather than using callbacks to get the PFN.
13152 		 */
13153 		pfn = hat_getpfnum(kas.a_hat, vaddr);
13154 	}
13155 
13156 	tsbinfo->tsb_va = vaddr;
13157 	tsbinfo->tsb_szc = tsbcode;
13158 	tsbinfo->tsb_ttesz_mask = tteszmask;
13159 	tsbinfo->tsb_next = NULL;
13160 	tsbinfo->tsb_flags = 0;
13161 
13162 	sfmmu_tsbinfo_setup_phys(tsbinfo, pfn);
13163 
13164 	sfmmu_inv_tsb(vaddr, tsbbytes);
13165 
13166 	if (kmem_cachep != sfmmu_tsb8k_cache) {
13167 		as_pageunlock(&kas, pplist, slab_vaddr, PAGESIZE, S_WRITE);
13168 	}
13169 
13170 	return (0);
13171 }
13172 
13173 /*
13174  * Initialize per cpu tsb and per cpu tsbmiss_area
13175  */
13176 void
13177 sfmmu_init_tsbs(void)
13178 {
13179 	int i;
13180 	struct tsbmiss	*tsbmissp;
13181 	struct kpmtsbm	*kpmtsbmp;
13182 #ifndef sun4v
13183 	extern int	dcache_line_mask;
13184 #endif /* sun4v */
13185 	extern uint_t	vac_colors;
13186 
13187 	/*
13188 	 * Init. tsb miss area.
13189 	 */
13190 	tsbmissp = tsbmiss_area;
13191 
13192 	for (i = 0; i < NCPU; tsbmissp++, i++) {
13193 		/*
13194 		 * initialize the tsbmiss area.
13195 		 * Do this for all possible CPUs as some may be added
13196 		 * while the system is running. There is no cost to this.
13197 		 */
13198 		tsbmissp->ksfmmup = ksfmmup;
13199 #ifndef sun4v
13200 		tsbmissp->dcache_line_mask = (uint16_t)dcache_line_mask;
13201 #endif /* sun4v */
13202 		tsbmissp->khashstart =
13203 		    (struct hmehash_bucket *)va_to_pa((caddr_t)khme_hash);
13204 		tsbmissp->uhashstart =
13205 		    (struct hmehash_bucket *)va_to_pa((caddr_t)uhme_hash);
13206 		tsbmissp->khashsz = khmehash_num;
13207 		tsbmissp->uhashsz = uhmehash_num;
13208 	}
13209 
13210 	sfmmu_tsb_cb_id = hat_register_callback('T'<<16 | 'S' << 8 | 'B',
13211 	    sfmmu_tsb_pre_relocator, sfmmu_tsb_post_relocator, NULL, 0);
13212 
13213 	if (kpm_enable == 0)
13214 		return;
13215 
13216 	/* -- Begin KPM specific init -- */
13217 
13218 	if (kpm_smallpages) {
13219 		/*
13220 		 * If we're using base pagesize pages for seg_kpm
13221 		 * mappings, we use the kernel TSB since we can't afford
13222 		 * to allocate a second huge TSB for these mappings.
13223 		 */
13224 		kpm_tsbbase = ktsb_phys? ktsb_pbase : (uint64_t)ktsb_base;
13225 		kpm_tsbsz = ktsb_szcode;
13226 		kpmsm_tsbbase = kpm_tsbbase;
13227 		kpmsm_tsbsz = kpm_tsbsz;
13228 	} else {
13229 		/*
13230 		 * In VAC conflict case, just put the entries in the
13231 		 * kernel 8K indexed TSB for now so we can find them.
13232 		 * This could really be changed in the future if we feel
13233 		 * the need...
13234 		 */
13235 		kpmsm_tsbbase = ktsb_phys? ktsb_pbase : (uint64_t)ktsb_base;
13236 		kpmsm_tsbsz = ktsb_szcode;
13237 		kpm_tsbbase = ktsb_phys? ktsb4m_pbase : (uint64_t)ktsb4m_base;
13238 		kpm_tsbsz = ktsb4m_szcode;
13239 	}
13240 
13241 	kpmtsbmp = kpmtsbm_area;
13242 	for (i = 0; i < NCPU; kpmtsbmp++, i++) {
13243 		/*
13244 		 * Initialize the kpmtsbm area.
13245 		 * Do this for all possible CPUs as some may be added
13246 		 * while the system is running. There is no cost to this.
13247 		 */
13248 		kpmtsbmp->vbase = kpm_vbase;
13249 		kpmtsbmp->vend = kpm_vbase + kpm_size * vac_colors;
13250 		kpmtsbmp->sz_shift = kpm_size_shift;
13251 		kpmtsbmp->kpmp_shift = kpmp_shift;
13252 		kpmtsbmp->kpmp2pshft = (uchar_t)kpmp2pshft;
13253 		if (kpm_smallpages == 0) {
13254 			kpmtsbmp->kpmp_table_sz = kpmp_table_sz;
13255 			kpmtsbmp->kpmp_tablepa = va_to_pa(kpmp_table);
13256 		} else {
13257 			kpmtsbmp->kpmp_table_sz = kpmp_stable_sz;
13258 			kpmtsbmp->kpmp_tablepa = va_to_pa(kpmp_stable);
13259 		}
13260 		kpmtsbmp->msegphashpa = va_to_pa(memseg_phash);
13261 		kpmtsbmp->flags = KPMTSBM_ENABLE_FLAG;
13262 #ifdef	DEBUG
13263 		kpmtsbmp->flags |= (kpm_tsbmtl) ?  KPMTSBM_TLTSBM_FLAG : 0;
13264 #endif	/* DEBUG */
13265 		if (ktsb_phys)
13266 			kpmtsbmp->flags |= KPMTSBM_TSBPHYS_FLAG;
13267 	}
13268 
13269 	/* -- End KPM specific init -- */
13270 }
13271 
13272 /* Avoid using sfmmu_tsbinfo_alloc() to avoid kmem_alloc - no real reason */
13273 struct tsb_info ktsb_info[2];
13274 
13275 /*
13276  * Called from hat_kern_setup() to setup the tsb_info for ksfmmup.
13277  */
13278 void
13279 sfmmu_init_ktsbinfo()
13280 {
13281 	ASSERT(ksfmmup != NULL);
13282 	ASSERT(ksfmmup->sfmmu_tsb == NULL);
13283 	/*
13284 	 * Allocate tsbinfos for kernel and copy in data
13285 	 * to make debug easier and sun4v setup easier.
13286 	 */
13287 	ktsb_info[0].tsb_sfmmu = ksfmmup;
13288 	ktsb_info[0].tsb_szc = ktsb_szcode;
13289 	ktsb_info[0].tsb_ttesz_mask = TSB8K|TSB64K|TSB512K;
13290 	ktsb_info[0].tsb_va = ktsb_base;
13291 	ktsb_info[0].tsb_pa = ktsb_pbase;
13292 	ktsb_info[0].tsb_flags = 0;
13293 	ktsb_info[0].tsb_tte.ll = 0;
13294 	ktsb_info[0].tsb_cache = NULL;
13295 
13296 	ktsb_info[1].tsb_sfmmu = ksfmmup;
13297 	ktsb_info[1].tsb_szc = ktsb4m_szcode;
13298 	ktsb_info[1].tsb_ttesz_mask = TSB4M;
13299 	ktsb_info[1].tsb_va = ktsb4m_base;
13300 	ktsb_info[1].tsb_pa = ktsb4m_pbase;
13301 	ktsb_info[1].tsb_flags = 0;
13302 	ktsb_info[1].tsb_tte.ll = 0;
13303 	ktsb_info[1].tsb_cache = NULL;
13304 
13305 	/* Link them into ksfmmup. */
13306 	ktsb_info[0].tsb_next = &ktsb_info[1];
13307 	ktsb_info[1].tsb_next = NULL;
13308 	ksfmmup->sfmmu_tsb = &ktsb_info[0];
13309 
13310 	sfmmu_setup_tsbinfo(ksfmmup);
13311 }
13312 
13313 /*
13314  * Cache the last value returned from va_to_pa().  If the VA specified
13315  * in the current call to cached_va_to_pa() maps to the same Page (as the
13316  * previous call to cached_va_to_pa()), then compute the PA using
13317  * cached info, else call va_to_pa().
13318  *
13319  * Note: this function is neither MT-safe nor consistent in the presence
13320  * of multiple, interleaved threads.  This function was created to enable
13321  * an optimization used during boot (at a point when there's only one thread
13322  * executing on the "boot CPU", and before startup_vm() has been called).
13323  */
13324 static uint64_t
13325 cached_va_to_pa(void *vaddr)
13326 {
13327 	static uint64_t prev_vaddr_base = 0;
13328 	static uint64_t prev_pfn = 0;
13329 
13330 	if ((((uint64_t)vaddr) & MMU_PAGEMASK) == prev_vaddr_base) {
13331 		return (prev_pfn | ((uint64_t)vaddr & MMU_PAGEOFFSET));
13332 	} else {
13333 		uint64_t pa = va_to_pa(vaddr);
13334 
13335 		if (pa != ((uint64_t)-1)) {
13336 			/*
13337 			 * Computed physical address is valid.  Cache its
13338 			 * related info for the next cached_va_to_pa() call.
13339 			 */
13340 			prev_pfn = pa & MMU_PAGEMASK;
13341 			prev_vaddr_base = ((uint64_t)vaddr) & MMU_PAGEMASK;
13342 		}
13343 
13344 		return (pa);
13345 	}
13346 }
13347 
13348 /*
13349  * Carve up our nucleus hblk region.  We may allocate more hblks than
13350  * asked due to rounding errors but we are guaranteed to have at least
13351  * enough space to allocate the requested number of hblk8's and hblk1's.
13352  */
13353 void
13354 sfmmu_init_nucleus_hblks(caddr_t addr, size_t size, int nhblk8, int nhblk1)
13355 {
13356 	struct hme_blk *hmeblkp;
13357 	size_t hme8blk_sz, hme1blk_sz;
13358 	size_t i;
13359 	size_t hblk8_bound;
13360 	ulong_t j = 0, k = 0;
13361 
13362 	ASSERT(addr != NULL && size != 0);
13363 
13364 	/* Need to use proper structure alignment */
13365 	hme8blk_sz = roundup(HME8BLK_SZ, sizeof (int64_t));
13366 	hme1blk_sz = roundup(HME1BLK_SZ, sizeof (int64_t));
13367 
13368 	nucleus_hblk8.list = (void *)addr;
13369 	nucleus_hblk8.index = 0;
13370 
13371 	/*
13372 	 * Use as much memory as possible for hblk8's since we
13373 	 * expect all bop_alloc'ed memory to be allocated in 8k chunks.
13374 	 * We need to hold back enough space for the hblk1's which
13375 	 * we'll allocate next.
13376 	 */
13377 	hblk8_bound = size - (nhblk1 * hme1blk_sz) - hme8blk_sz;
13378 	for (i = 0; i <= hblk8_bound; i += hme8blk_sz, j++) {
13379 		hmeblkp = (struct hme_blk *)addr;
13380 		addr += hme8blk_sz;
13381 		hmeblkp->hblk_nuc_bit = 1;
13382 		hmeblkp->hblk_nextpa = cached_va_to_pa((caddr_t)hmeblkp);
13383 	}
13384 	nucleus_hblk8.len = j;
13385 	ASSERT(j >= nhblk8);
13386 	SFMMU_STAT_ADD(sf_hblk8_ncreate, j);
13387 
13388 	nucleus_hblk1.list = (void *)addr;
13389 	nucleus_hblk1.index = 0;
13390 	for (; i <= (size - hme1blk_sz); i += hme1blk_sz, k++) {
13391 		hmeblkp = (struct hme_blk *)addr;
13392 		addr += hme1blk_sz;
13393 		hmeblkp->hblk_nuc_bit = 1;
13394 		hmeblkp->hblk_nextpa = cached_va_to_pa((caddr_t)hmeblkp);
13395 	}
13396 	ASSERT(k >= nhblk1);
13397 	nucleus_hblk1.len = k;
13398 	SFMMU_STAT_ADD(sf_hblk1_ncreate, k);
13399 }
13400 
13401 /*
13402  * This function is currently not supported on this platform. For what
13403  * it's supposed to do, see hat.c and hat_srmmu.c
13404  */
13405 /* ARGSUSED */
13406 faultcode_t
13407 hat_softlock(struct hat *hat, caddr_t addr, size_t *lenp, page_t **ppp,
13408     uint_t flags)
13409 {
13410 	ASSERT(hat->sfmmu_xhat_provider == NULL);
13411 	return (FC_NOSUPPORT);
13412 }
13413 
13414 /*
13415  * Searchs the mapping list of the page for a mapping of the same size. If not
13416  * found the corresponding bit is cleared in the p_index field. When large
13417  * pages are more prevalent in the system, we can maintain the mapping list
13418  * in order and we don't have to traverse the list each time. Just check the
13419  * next and prev entries, and if both are of different size, we clear the bit.
13420  */
13421 static void
13422 sfmmu_rm_large_mappings(page_t *pp, int ttesz)
13423 {
13424 	struct sf_hment *sfhmep;
13425 	struct hme_blk *hmeblkp;
13426 	int	index;
13427 	pgcnt_t	npgs;
13428 
13429 	ASSERT(ttesz > TTE8K);
13430 
13431 	ASSERT(sfmmu_mlist_held(pp));
13432 
13433 	ASSERT(PP_ISMAPPED_LARGE(pp));
13434 
13435 	/*
13436 	 * Traverse mapping list looking for another mapping of same size.
13437 	 * since we only want to clear index field if all mappings of
13438 	 * that size are gone.
13439 	 */
13440 
13441 	for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) {
13442 		if (IS_PAHME(sfhmep))
13443 			continue;
13444 		hmeblkp = sfmmu_hmetohblk(sfhmep);
13445 		if (hmeblkp->hblk_xhat_bit)
13446 			continue;
13447 		if (hme_size(sfhmep) == ttesz) {
13448 			/*
13449 			 * another mapping of the same size. don't clear index.
13450 			 */
13451 			return;
13452 		}
13453 	}
13454 
13455 	/*
13456 	 * Clear the p_index bit for large page.
13457 	 */
13458 	index = PAGESZ_TO_INDEX(ttesz);
13459 	npgs = TTEPAGES(ttesz);
13460 	while (npgs-- > 0) {
13461 		ASSERT(pp->p_index & index);
13462 		pp->p_index &= ~index;
13463 		pp = PP_PAGENEXT(pp);
13464 	}
13465 }
13466 
13467 /*
13468  * return supported features
13469  */
13470 /* ARGSUSED */
13471 int
13472 hat_supported(enum hat_features feature, void *arg)
13473 {
13474 	switch (feature) {
13475 	case    HAT_SHARED_PT:
13476 	case	HAT_DYNAMIC_ISM_UNMAP:
13477 	case	HAT_VMODSORT:
13478 		return (1);
13479 	case	HAT_SHARED_REGIONS:
13480 		if (shctx_on)
13481 			return (1);
13482 		else
13483 			return (0);
13484 	default:
13485 		return (0);
13486 	}
13487 }
13488 
13489 void
13490 hat_enter(struct hat *hat)
13491 {
13492 	hatlock_t	*hatlockp;
13493 
13494 	if (hat != ksfmmup) {
13495 		hatlockp = TSB_HASH(hat);
13496 		mutex_enter(HATLOCK_MUTEXP(hatlockp));
13497 	}
13498 }
13499 
13500 void
13501 hat_exit(struct hat *hat)
13502 {
13503 	hatlock_t	*hatlockp;
13504 
13505 	if (hat != ksfmmup) {
13506 		hatlockp = TSB_HASH(hat);
13507 		mutex_exit(HATLOCK_MUTEXP(hatlockp));
13508 	}
13509 }
13510 
13511 /*ARGSUSED*/
13512 void
13513 hat_reserve(struct as *as, caddr_t addr, size_t len)
13514 {
13515 }
13516 
13517 static void
13518 hat_kstat_init(void)
13519 {
13520 	kstat_t *ksp;
13521 
13522 	ksp = kstat_create("unix", 0, "sfmmu_global_stat", "hat",
13523 	    KSTAT_TYPE_RAW, sizeof (struct sfmmu_global_stat),
13524 	    KSTAT_FLAG_VIRTUAL);
13525 	if (ksp) {
13526 		ksp->ks_data = (void *) &sfmmu_global_stat;
13527 		kstat_install(ksp);
13528 	}
13529 	ksp = kstat_create("unix", 0, "sfmmu_tsbsize_stat", "hat",
13530 	    KSTAT_TYPE_RAW, sizeof (struct sfmmu_tsbsize_stat),
13531 	    KSTAT_FLAG_VIRTUAL);
13532 	if (ksp) {
13533 		ksp->ks_data = (void *) &sfmmu_tsbsize_stat;
13534 		kstat_install(ksp);
13535 	}
13536 	ksp = kstat_create("unix", 0, "sfmmu_percpu_stat", "hat",
13537 	    KSTAT_TYPE_RAW, sizeof (struct sfmmu_percpu_stat) * NCPU,
13538 	    KSTAT_FLAG_WRITABLE);
13539 	if (ksp) {
13540 		ksp->ks_update = sfmmu_kstat_percpu_update;
13541 		kstat_install(ksp);
13542 	}
13543 }
13544 
13545 /* ARGSUSED */
13546 static int
13547 sfmmu_kstat_percpu_update(kstat_t *ksp, int rw)
13548 {
13549 	struct sfmmu_percpu_stat *cpu_kstat = ksp->ks_data;
13550 	struct tsbmiss *tsbm = tsbmiss_area;
13551 	struct kpmtsbm *kpmtsbm = kpmtsbm_area;
13552 	int i;
13553 
13554 	ASSERT(cpu_kstat);
13555 	if (rw == KSTAT_READ) {
13556 		for (i = 0; i < NCPU; cpu_kstat++, tsbm++, kpmtsbm++, i++) {
13557 			cpu_kstat->sf_itlb_misses = 0;
13558 			cpu_kstat->sf_dtlb_misses = 0;
13559 			cpu_kstat->sf_utsb_misses = tsbm->utsb_misses -
13560 			    tsbm->uprot_traps;
13561 			cpu_kstat->sf_ktsb_misses = tsbm->ktsb_misses +
13562 			    kpmtsbm->kpm_tsb_misses - tsbm->kprot_traps;
13563 			cpu_kstat->sf_tsb_hits = 0;
13564 			cpu_kstat->sf_umod_faults = tsbm->uprot_traps;
13565 			cpu_kstat->sf_kmod_faults = tsbm->kprot_traps;
13566 		}
13567 	} else {
13568 		/* KSTAT_WRITE is used to clear stats */
13569 		for (i = 0; i < NCPU; tsbm++, kpmtsbm++, i++) {
13570 			tsbm->utsb_misses = 0;
13571 			tsbm->ktsb_misses = 0;
13572 			tsbm->uprot_traps = 0;
13573 			tsbm->kprot_traps = 0;
13574 			kpmtsbm->kpm_dtlb_misses = 0;
13575 			kpmtsbm->kpm_tsb_misses = 0;
13576 		}
13577 	}
13578 	return (0);
13579 }
13580 
13581 #ifdef	DEBUG
13582 
13583 tte_t  *gorig[NCPU], *gcur[NCPU], *gnew[NCPU];
13584 
13585 /*
13586  * A tte checker. *orig_old is the value we read before cas.
13587  *	*cur is the value returned by cas.
13588  *	*new is the desired value when we do the cas.
13589  *
13590  *	*hmeblkp is currently unused.
13591  */
13592 
13593 /* ARGSUSED */
13594 void
13595 chk_tte(tte_t *orig_old, tte_t *cur, tte_t *new, struct hme_blk *hmeblkp)
13596 {
13597 	pfn_t i, j, k;
13598 	int cpuid = CPU->cpu_id;
13599 
13600 	gorig[cpuid] = orig_old;
13601 	gcur[cpuid] = cur;
13602 	gnew[cpuid] = new;
13603 
13604 #ifdef lint
13605 	hmeblkp = hmeblkp;
13606 #endif
13607 
13608 	if (TTE_IS_VALID(orig_old)) {
13609 		if (TTE_IS_VALID(cur)) {
13610 			i = TTE_TO_TTEPFN(orig_old);
13611 			j = TTE_TO_TTEPFN(cur);
13612 			k = TTE_TO_TTEPFN(new);
13613 			if (i != j) {
13614 				/* remap error? */
13615 				panic("chk_tte: bad pfn, 0x%lx, 0x%lx", i, j);
13616 			}
13617 
13618 			if (i != k) {
13619 				/* remap error? */
13620 				panic("chk_tte: bad pfn2, 0x%lx, 0x%lx", i, k);
13621 			}
13622 		} else {
13623 			if (TTE_IS_VALID(new)) {
13624 				panic("chk_tte: invalid cur? ");
13625 			}
13626 
13627 			i = TTE_TO_TTEPFN(orig_old);
13628 			k = TTE_TO_TTEPFN(new);
13629 			if (i != k) {
13630 				panic("chk_tte: bad pfn3, 0x%lx, 0x%lx", i, k);
13631 			}
13632 		}
13633 	} else {
13634 		if (TTE_IS_VALID(cur)) {
13635 			j = TTE_TO_TTEPFN(cur);
13636 			if (TTE_IS_VALID(new)) {
13637 				k = TTE_TO_TTEPFN(new);
13638 				if (j != k) {
13639 					panic("chk_tte: bad pfn4, 0x%lx, 0x%lx",
13640 					    j, k);
13641 				}
13642 			} else {
13643 				panic("chk_tte: why here?");
13644 			}
13645 		} else {
13646 			if (!TTE_IS_VALID(new)) {
13647 				panic("chk_tte: why here2 ?");
13648 			}
13649 		}
13650 	}
13651 }
13652 
13653 #endif /* DEBUG */
13654 
13655 extern void prefetch_tsbe_read(struct tsbe *);
13656 extern void prefetch_tsbe_write(struct tsbe *);
13657 
13658 
13659 /*
13660  * We want to prefetch 7 cache lines ahead for our read prefetch.  This gives
13661  * us optimal performance on Cheetah+.  You can only have 8 outstanding
13662  * prefetches at any one time, so we opted for 7 read prefetches and 1 write
13663  * prefetch to make the most utilization of the prefetch capability.
13664  */
13665 #define	TSBE_PREFETCH_STRIDE (7)
13666 
13667 void
13668 sfmmu_copy_tsb(struct tsb_info *old_tsbinfo, struct tsb_info *new_tsbinfo)
13669 {
13670 	int old_bytes = TSB_BYTES(old_tsbinfo->tsb_szc);
13671 	int new_bytes = TSB_BYTES(new_tsbinfo->tsb_szc);
13672 	int old_entries = TSB_ENTRIES(old_tsbinfo->tsb_szc);
13673 	int new_entries = TSB_ENTRIES(new_tsbinfo->tsb_szc);
13674 	struct tsbe *old;
13675 	struct tsbe *new;
13676 	struct tsbe *new_base = (struct tsbe *)new_tsbinfo->tsb_va;
13677 	uint64_t va;
13678 	int new_offset;
13679 	int i;
13680 	int vpshift;
13681 	int last_prefetch;
13682 
13683 	if (old_bytes == new_bytes) {
13684 		bcopy(old_tsbinfo->tsb_va, new_tsbinfo->tsb_va, new_bytes);
13685 	} else {
13686 
13687 		/*
13688 		 * A TSBE is 16 bytes which means there are four TSBE's per
13689 		 * P$ line (64 bytes), thus every 4 TSBE's we prefetch.
13690 		 */
13691 		old = (struct tsbe *)old_tsbinfo->tsb_va;
13692 		last_prefetch = old_entries - (4*(TSBE_PREFETCH_STRIDE+1));
13693 		for (i = 0; i < old_entries; i++, old++) {
13694 			if (((i & (4-1)) == 0) && (i < last_prefetch))
13695 				prefetch_tsbe_read(old);
13696 			if (!old->tte_tag.tag_invalid) {
13697 				/*
13698 				 * We have a valid TTE to remap.  Check the
13699 				 * size.  We won't remap 64K or 512K TTEs
13700 				 * because they span more than one TSB entry
13701 				 * and are indexed using an 8K virt. page.
13702 				 * Ditto for 32M and 256M TTEs.
13703 				 */
13704 				if (TTE_CSZ(&old->tte_data) == TTE64K ||
13705 				    TTE_CSZ(&old->tte_data) == TTE512K)
13706 					continue;
13707 				if (mmu_page_sizes == max_mmu_page_sizes) {
13708 					if (TTE_CSZ(&old->tte_data) == TTE32M ||
13709 					    TTE_CSZ(&old->tte_data) == TTE256M)
13710 						continue;
13711 				}
13712 
13713 				/* clear the lower 22 bits of the va */
13714 				va = *(uint64_t *)old << 22;
13715 				/* turn va into a virtual pfn */
13716 				va >>= 22 - TSB_START_SIZE;
13717 				/*
13718 				 * or in bits from the offset in the tsb
13719 				 * to get the real virtual pfn. These
13720 				 * correspond to bits [21:13] in the va
13721 				 */
13722 				vpshift =
13723 				    TTE_BSZS_SHIFT(TTE_CSZ(&old->tte_data)) &
13724 				    0x1ff;
13725 				va |= (i << vpshift);
13726 				va >>= vpshift;
13727 				new_offset = va & (new_entries - 1);
13728 				new = new_base + new_offset;
13729 				prefetch_tsbe_write(new);
13730 				*new = *old;
13731 			}
13732 		}
13733 	}
13734 }
13735 
13736 /*
13737  * unused in sfmmu
13738  */
13739 void
13740 hat_dump(void)
13741 {
13742 }
13743 
13744 /*
13745  * Called when a thread is exiting and we have switched to the kernel address
13746  * space.  Perform the same VM initialization resume() uses when switching
13747  * processes.
13748  *
13749  * Note that sfmmu_load_mmustate() is currently a no-op for kernel threads, but
13750  * we call it anyway in case the semantics change in the future.
13751  */
13752 /*ARGSUSED*/
13753 void
13754 hat_thread_exit(kthread_t *thd)
13755 {
13756 	uint_t pgsz_cnum;
13757 	uint_t pstate_save;
13758 
13759 	ASSERT(thd->t_procp->p_as == &kas);
13760 
13761 	pgsz_cnum = KCONTEXT;
13762 #ifdef sun4u
13763 	pgsz_cnum |= (ksfmmup->sfmmu_cext << CTXREG_EXT_SHIFT);
13764 #endif
13765 
13766 	/*
13767 	 * Note that sfmmu_load_mmustate() is currently a no-op for
13768 	 * kernel threads. We need to disable interrupts here,
13769 	 * simply because otherwise sfmmu_load_mmustate() would panic
13770 	 * if the caller does not disable interrupts.
13771 	 */
13772 	pstate_save = sfmmu_disable_intrs();
13773 
13774 	/* Compatibility Note: hw takes care of MMU_SCONTEXT1 */
13775 	sfmmu_setctx_sec(pgsz_cnum);
13776 	sfmmu_load_mmustate(ksfmmup);
13777 	sfmmu_enable_intrs(pstate_save);
13778 }
13779 
13780 
13781 /*
13782  * SRD support
13783  */
13784 #define	SRD_HASH_FUNCTION(vp)	(((((uintptr_t)(vp)) >> 4) ^ \
13785 				    (((uintptr_t)(vp)) >> 11)) & \
13786 				    srd_hashmask)
13787 
13788 /*
13789  * Attach the process to the srd struct associated with the exec vnode
13790  * from which the process is started.
13791  */
13792 void
13793 hat_join_srd(struct hat *sfmmup, vnode_t *evp)
13794 {
13795 	uint_t hash = SRD_HASH_FUNCTION(evp);
13796 	sf_srd_t *srdp;
13797 	sf_srd_t *newsrdp;
13798 
13799 	ASSERT(sfmmup != ksfmmup);
13800 	ASSERT(sfmmup->sfmmu_srdp == NULL);
13801 
13802 	if (!shctx_on) {
13803 		return;
13804 	}
13805 
13806 	VN_HOLD(evp);
13807 
13808 	if (srd_buckets[hash].srdb_srdp != NULL) {
13809 		mutex_enter(&srd_buckets[hash].srdb_lock);
13810 		for (srdp = srd_buckets[hash].srdb_srdp; srdp != NULL;
13811 		    srdp = srdp->srd_hash) {
13812 			if (srdp->srd_evp == evp) {
13813 				ASSERT(srdp->srd_refcnt >= 0);
13814 				sfmmup->sfmmu_srdp = srdp;
13815 				atomic_add_32(
13816 				    (volatile uint_t *)&srdp->srd_refcnt, 1);
13817 				mutex_exit(&srd_buckets[hash].srdb_lock);
13818 				return;
13819 			}
13820 		}
13821 		mutex_exit(&srd_buckets[hash].srdb_lock);
13822 	}
13823 	newsrdp = kmem_cache_alloc(srd_cache, KM_SLEEP);
13824 	ASSERT(newsrdp->srd_next_ismrid == 0 && newsrdp->srd_next_hmerid == 0);
13825 
13826 	newsrdp->srd_evp = evp;
13827 	newsrdp->srd_refcnt = 1;
13828 	newsrdp->srd_hmergnfree = NULL;
13829 	newsrdp->srd_ismrgnfree = NULL;
13830 
13831 	mutex_enter(&srd_buckets[hash].srdb_lock);
13832 	for (srdp = srd_buckets[hash].srdb_srdp; srdp != NULL;
13833 	    srdp = srdp->srd_hash) {
13834 		if (srdp->srd_evp == evp) {
13835 			ASSERT(srdp->srd_refcnt >= 0);
13836 			sfmmup->sfmmu_srdp = srdp;
13837 			atomic_add_32((volatile uint_t *)&srdp->srd_refcnt, 1);
13838 			mutex_exit(&srd_buckets[hash].srdb_lock);
13839 			kmem_cache_free(srd_cache, newsrdp);
13840 			return;
13841 		}
13842 	}
13843 	newsrdp->srd_hash = srd_buckets[hash].srdb_srdp;
13844 	srd_buckets[hash].srdb_srdp = newsrdp;
13845 	sfmmup->sfmmu_srdp = newsrdp;
13846 
13847 	mutex_exit(&srd_buckets[hash].srdb_lock);
13848 
13849 }
13850 
13851 static void
13852 sfmmu_leave_srd(sfmmu_t *sfmmup)
13853 {
13854 	vnode_t *evp;
13855 	sf_srd_t *srdp = sfmmup->sfmmu_srdp;
13856 	uint_t hash;
13857 	sf_srd_t **prev_srdpp;
13858 	sf_region_t *rgnp;
13859 	sf_region_t *nrgnp;
13860 #ifdef DEBUG
13861 	int rgns = 0;
13862 #endif
13863 	int i;
13864 
13865 	ASSERT(sfmmup != ksfmmup);
13866 	ASSERT(srdp != NULL);
13867 	ASSERT(srdp->srd_refcnt > 0);
13868 	ASSERT(sfmmup->sfmmu_scdp == NULL);
13869 	ASSERT(sfmmup->sfmmu_free == 1);
13870 
13871 	sfmmup->sfmmu_srdp = NULL;
13872 	evp = srdp->srd_evp;
13873 	ASSERT(evp != NULL);
13874 	if (atomic_add_32_nv(
13875 	    (volatile uint_t *)&srdp->srd_refcnt, -1)) {
13876 		VN_RELE(evp);
13877 		return;
13878 	}
13879 
13880 	hash = SRD_HASH_FUNCTION(evp);
13881 	mutex_enter(&srd_buckets[hash].srdb_lock);
13882 	for (prev_srdpp = &srd_buckets[hash].srdb_srdp;
13883 	    (srdp = *prev_srdpp) != NULL; prev_srdpp = &srdp->srd_hash) {
13884 		if (srdp->srd_evp == evp) {
13885 			break;
13886 		}
13887 	}
13888 	if (srdp == NULL || srdp->srd_refcnt) {
13889 		mutex_exit(&srd_buckets[hash].srdb_lock);
13890 		VN_RELE(evp);
13891 		return;
13892 	}
13893 	*prev_srdpp = srdp->srd_hash;
13894 	mutex_exit(&srd_buckets[hash].srdb_lock);
13895 
13896 	ASSERT(srdp->srd_refcnt == 0);
13897 	VN_RELE(evp);
13898 
13899 #ifdef DEBUG
13900 	for (i = 0; i < SFMMU_MAX_REGION_BUCKETS; i++) {
13901 		ASSERT(srdp->srd_rgnhash[i] == NULL);
13902 	}
13903 #endif /* DEBUG */
13904 
13905 	/* free each hme regions in the srd */
13906 	for (rgnp = srdp->srd_hmergnfree; rgnp != NULL; rgnp = nrgnp) {
13907 		nrgnp = rgnp->rgn_next;
13908 		ASSERT(rgnp->rgn_id < srdp->srd_next_hmerid);
13909 		ASSERT(rgnp->rgn_refcnt == 0);
13910 		ASSERT(rgnp->rgn_sfmmu_head == NULL);
13911 		ASSERT(rgnp->rgn_flags & SFMMU_REGION_FREE);
13912 		ASSERT(rgnp->rgn_hmeflags == 0);
13913 		ASSERT(srdp->srd_hmergnp[rgnp->rgn_id] == rgnp);
13914 #ifdef DEBUG
13915 		for (i = 0; i < MMU_PAGE_SIZES; i++) {
13916 			ASSERT(rgnp->rgn_ttecnt[i] == 0);
13917 		}
13918 		rgns++;
13919 #endif /* DEBUG */
13920 		kmem_cache_free(region_cache, rgnp);
13921 	}
13922 	ASSERT(rgns == srdp->srd_next_hmerid);
13923 
13924 #ifdef DEBUG
13925 	rgns = 0;
13926 #endif
13927 	/* free each ism rgns in the srd */
13928 	for (rgnp = srdp->srd_ismrgnfree; rgnp != NULL; rgnp = nrgnp) {
13929 		nrgnp = rgnp->rgn_next;
13930 		ASSERT(rgnp->rgn_id < srdp->srd_next_ismrid);
13931 		ASSERT(rgnp->rgn_refcnt == 0);
13932 		ASSERT(rgnp->rgn_sfmmu_head == NULL);
13933 		ASSERT(rgnp->rgn_flags & SFMMU_REGION_FREE);
13934 		ASSERT(srdp->srd_ismrgnp[rgnp->rgn_id] == rgnp);
13935 #ifdef DEBUG
13936 		for (i = 0; i < MMU_PAGE_SIZES; i++) {
13937 			ASSERT(rgnp->rgn_ttecnt[i] == 0);
13938 		}
13939 		rgns++;
13940 #endif /* DEBUG */
13941 		kmem_cache_free(region_cache, rgnp);
13942 	}
13943 	ASSERT(rgns == srdp->srd_next_ismrid);
13944 	ASSERT(srdp->srd_ismbusyrgns == 0);
13945 	ASSERT(srdp->srd_hmebusyrgns == 0);
13946 
13947 	srdp->srd_next_ismrid = 0;
13948 	srdp->srd_next_hmerid = 0;
13949 
13950 	bzero((void *)srdp->srd_ismrgnp,
13951 	    sizeof (sf_region_t *) * SFMMU_MAX_ISM_REGIONS);
13952 	bzero((void *)srdp->srd_hmergnp,
13953 	    sizeof (sf_region_t *) * SFMMU_MAX_HME_REGIONS);
13954 
13955 	ASSERT(srdp->srd_scdp == NULL);
13956 	kmem_cache_free(srd_cache, srdp);
13957 }
13958 
13959 /* ARGSUSED */
13960 static int
13961 sfmmu_srdcache_constructor(void *buf, void *cdrarg, int kmflags)
13962 {
13963 	sf_srd_t *srdp = (sf_srd_t *)buf;
13964 	bzero(buf, sizeof (*srdp));
13965 
13966 	mutex_init(&srdp->srd_mutex, NULL, MUTEX_DEFAULT, NULL);
13967 	mutex_init(&srdp->srd_scd_mutex, NULL, MUTEX_DEFAULT, NULL);
13968 	return (0);
13969 }
13970 
13971 /* ARGSUSED */
13972 static void
13973 sfmmu_srdcache_destructor(void *buf, void *cdrarg)
13974 {
13975 	sf_srd_t *srdp = (sf_srd_t *)buf;
13976 
13977 	mutex_destroy(&srdp->srd_mutex);
13978 	mutex_destroy(&srdp->srd_scd_mutex);
13979 }
13980 
13981 /*
13982  * The caller makes sure hat_join_region()/hat_leave_region() can't be called
13983  * at the same time for the same process and address range. This is ensured by
13984  * the fact that address space is locked as writer when a process joins the
13985  * regions. Therefore there's no need to hold an srd lock during the entire
13986  * execution of hat_join_region()/hat_leave_region().
13987  */
13988 
13989 #define	RGN_HASH_FUNCTION(obj)	(((((uintptr_t)(obj)) >> 4) ^ \
13990 				    (((uintptr_t)(obj)) >> 11)) & \
13991 					srd_rgn_hashmask)
13992 /*
13993  * This routine implements the shared context functionality required when
13994  * attaching a segment to an address space. It must be called from
13995  * hat_share() for D(ISM) segments and from segvn_create() for segments
13996  * with the MAP_PRIVATE and MAP_TEXT flags set. It returns a region_cookie
13997  * which is saved in the private segment data for hme segments and
13998  * the ism_map structure for ism segments.
13999  */
14000 hat_region_cookie_t
14001 hat_join_region(struct hat *sfmmup,
14002 	caddr_t r_saddr,
14003 	size_t r_size,
14004 	void *r_obj,
14005 	u_offset_t r_objoff,
14006 	uchar_t r_perm,
14007 	uchar_t r_pgszc,
14008 	hat_rgn_cb_func_t r_cb_function,
14009 	uint_t flags)
14010 {
14011 	sf_srd_t *srdp = sfmmup->sfmmu_srdp;
14012 	uint_t rhash;
14013 	uint_t rid;
14014 	hatlock_t *hatlockp;
14015 	sf_region_t *rgnp;
14016 	sf_region_t *new_rgnp = NULL;
14017 	int i;
14018 	uint16_t *nextidp;
14019 	sf_region_t **freelistp;
14020 	int maxids;
14021 	sf_region_t **rarrp;
14022 	uint16_t *busyrgnsp;
14023 	ulong_t rttecnt;
14024 	uchar_t tteflag;
14025 	uchar_t r_type = flags & HAT_REGION_TYPE_MASK;
14026 	int text = (r_type == HAT_REGION_TEXT);
14027 
14028 	if (srdp == NULL || r_size == 0) {
14029 		return (HAT_INVALID_REGION_COOKIE);
14030 	}
14031 
14032 	ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
14033 	ASSERT(sfmmup != ksfmmup);
14034 	ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock));
14035 	ASSERT(srdp->srd_refcnt > 0);
14036 	ASSERT(!(flags & ~HAT_REGION_TYPE_MASK));
14037 	ASSERT(flags == HAT_REGION_TEXT || flags == HAT_REGION_ISM);
14038 	ASSERT(r_pgszc < mmu_page_sizes);
14039 	if (!IS_P2ALIGNED(r_saddr, TTEBYTES(r_pgszc)) ||
14040 	    !IS_P2ALIGNED(r_size, TTEBYTES(r_pgszc))) {
14041 		panic("hat_join_region: region addr or size is not aligned\n");
14042 	}
14043 
14044 
14045 	r_type = (r_type == HAT_REGION_ISM) ? SFMMU_REGION_ISM :
14046 	    SFMMU_REGION_HME;
14047 	/*
14048 	 * Currently only support shared hmes for the read only main text
14049 	 * region.
14050 	 */
14051 	if (r_type == SFMMU_REGION_HME && ((r_obj != srdp->srd_evp) ||
14052 	    (r_perm & PROT_WRITE))) {
14053 		return (HAT_INVALID_REGION_COOKIE);
14054 	}
14055 
14056 	rhash = RGN_HASH_FUNCTION(r_obj);
14057 
14058 	if (r_type == SFMMU_REGION_ISM) {
14059 		nextidp = &srdp->srd_next_ismrid;
14060 		freelistp = &srdp->srd_ismrgnfree;
14061 		maxids = SFMMU_MAX_ISM_REGIONS;
14062 		rarrp = srdp->srd_ismrgnp;
14063 		busyrgnsp = &srdp->srd_ismbusyrgns;
14064 	} else {
14065 		nextidp = &srdp->srd_next_hmerid;
14066 		freelistp = &srdp->srd_hmergnfree;
14067 		maxids = SFMMU_MAX_HME_REGIONS;
14068 		rarrp = srdp->srd_hmergnp;
14069 		busyrgnsp = &srdp->srd_hmebusyrgns;
14070 	}
14071 
14072 	mutex_enter(&srdp->srd_mutex);
14073 
14074 	for (rgnp = srdp->srd_rgnhash[rhash]; rgnp != NULL;
14075 	    rgnp = rgnp->rgn_hash) {
14076 		if (rgnp->rgn_saddr == r_saddr && rgnp->rgn_size == r_size &&
14077 		    rgnp->rgn_obj == r_obj && rgnp->rgn_objoff == r_objoff &&
14078 		    rgnp->rgn_perm == r_perm && rgnp->rgn_pgszc == r_pgszc) {
14079 			break;
14080 		}
14081 	}
14082 
14083 rfound:
14084 	if (rgnp != NULL) {
14085 		ASSERT((rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK) == r_type);
14086 		ASSERT(rgnp->rgn_cb_function == r_cb_function);
14087 		ASSERT(rgnp->rgn_refcnt >= 0);
14088 		rid = rgnp->rgn_id;
14089 		ASSERT(rid < maxids);
14090 		ASSERT(rarrp[rid] == rgnp);
14091 		ASSERT(rid < *nextidp);
14092 		atomic_add_32((volatile uint_t *)&rgnp->rgn_refcnt, 1);
14093 		mutex_exit(&srdp->srd_mutex);
14094 		if (new_rgnp != NULL) {
14095 			kmem_cache_free(region_cache, new_rgnp);
14096 		}
14097 		if (r_type == SFMMU_REGION_HME) {
14098 			int myjoin =
14099 			    (sfmmup == astosfmmu(curthread->t_procp->p_as));
14100 
14101 			sfmmu_link_to_hmeregion(sfmmup, rgnp);
14102 			/*
14103 			 * bitmap should be updated after linking sfmmu on
14104 			 * region list so that pageunload() doesn't skip
14105 			 * TSB/TLB flush. As soon as bitmap is updated another
14106 			 * thread in this process can already start accessing
14107 			 * this region.
14108 			 */
14109 			/*
14110 			 * Normally ttecnt accounting is done as part of
14111 			 * pagefault handling. But a process may not take any
14112 			 * pagefaults on shared hmeblks created by some other
14113 			 * process. To compensate for this assume that the
14114 			 * entire region will end up faulted in using
14115 			 * the region's pagesize.
14116 			 *
14117 			 */
14118 			if (r_pgszc > TTE8K) {
14119 				tteflag = 1 << r_pgszc;
14120 				if (disable_large_pages & tteflag) {
14121 					tteflag = 0;
14122 				}
14123 			} else {
14124 				tteflag = 0;
14125 			}
14126 			if (tteflag && !(sfmmup->sfmmu_rtteflags & tteflag)) {
14127 				hatlockp = sfmmu_hat_enter(sfmmup);
14128 				sfmmup->sfmmu_rtteflags |= tteflag;
14129 				sfmmu_hat_exit(hatlockp);
14130 			}
14131 			hatlockp = sfmmu_hat_enter(sfmmup);
14132 
14133 			/*
14134 			 * Preallocate 1/4 of ttecnt's in 8K TSB for >= 4M
14135 			 * region to allow for large page allocation failure.
14136 			 */
14137 			if (r_pgszc >= TTE4M) {
14138 				sfmmup->sfmmu_tsb0_4minflcnt +=
14139 				    r_size >> (TTE_PAGE_SHIFT(TTE8K) + 2);
14140 			}
14141 
14142 			/* update sfmmu_ttecnt with the shme rgn ttecnt */
14143 			rttecnt = r_size >> TTE_PAGE_SHIFT(r_pgszc);
14144 			atomic_add_long(&sfmmup->sfmmu_ttecnt[r_pgszc],
14145 			    rttecnt);
14146 
14147 			if (text && r_pgszc >= TTE4M &&
14148 			    (tteflag || ((disable_large_pages >> TTE4M) &
14149 			    ((1 << (r_pgszc - TTE4M + 1)) - 1))) &&
14150 			    !SFMMU_FLAGS_ISSET(sfmmup, HAT_4MTEXT_FLAG)) {
14151 				SFMMU_FLAGS_SET(sfmmup, HAT_4MTEXT_FLAG);
14152 			}
14153 
14154 			sfmmu_hat_exit(hatlockp);
14155 			/*
14156 			 * On Panther we need to make sure TLB is programmed
14157 			 * to accept 32M/256M pages.  Call
14158 			 * sfmmu_check_page_sizes() now to make sure TLB is
14159 			 * setup before making hmeregions visible to other
14160 			 * threads.
14161 			 */
14162 			sfmmu_check_page_sizes(sfmmup, 1);
14163 			hatlockp = sfmmu_hat_enter(sfmmup);
14164 			SF_RGNMAP_ADD(sfmmup->sfmmu_hmeregion_map, rid);
14165 
14166 			/*
14167 			 * if context is invalid tsb miss exception code will
14168 			 * call sfmmu_check_page_sizes() and update tsbmiss
14169 			 * area later.
14170 			 */
14171 			kpreempt_disable();
14172 			if (myjoin &&
14173 			    (sfmmup->sfmmu_ctxs[CPU_MMU_IDX(CPU)].cnum
14174 			    != INVALID_CONTEXT)) {
14175 				struct tsbmiss *tsbmp;
14176 
14177 				tsbmp = &tsbmiss_area[CPU->cpu_id];
14178 				ASSERT(sfmmup == tsbmp->usfmmup);
14179 				BT_SET(tsbmp->shmermap, rid);
14180 				if (r_pgszc > TTE64K) {
14181 					tsbmp->uhat_rtteflags |= tteflag;
14182 				}
14183 
14184 			}
14185 			kpreempt_enable();
14186 
14187 			sfmmu_hat_exit(hatlockp);
14188 			ASSERT((hat_region_cookie_t)((uint64_t)rid) !=
14189 			    HAT_INVALID_REGION_COOKIE);
14190 		} else {
14191 			hatlockp = sfmmu_hat_enter(sfmmup);
14192 			SF_RGNMAP_ADD(sfmmup->sfmmu_ismregion_map, rid);
14193 			sfmmu_hat_exit(hatlockp);
14194 		}
14195 		ASSERT(rid < maxids);
14196 
14197 		if (r_type == SFMMU_REGION_ISM) {
14198 			sfmmu_find_scd(sfmmup);
14199 		}
14200 		return ((hat_region_cookie_t)((uint64_t)rid));
14201 	}
14202 
14203 	ASSERT(new_rgnp == NULL);
14204 
14205 	if (*busyrgnsp >= maxids) {
14206 		mutex_exit(&srdp->srd_mutex);
14207 		return (HAT_INVALID_REGION_COOKIE);
14208 	}
14209 
14210 	ASSERT(MUTEX_HELD(&srdp->srd_mutex));
14211 	if (*freelistp != NULL) {
14212 		rgnp = *freelistp;
14213 		*freelistp = rgnp->rgn_next;
14214 		ASSERT(rgnp->rgn_id < *nextidp);
14215 		ASSERT(rgnp->rgn_id < maxids);
14216 		ASSERT(rgnp->rgn_flags & SFMMU_REGION_FREE);
14217 		ASSERT((rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK)
14218 		    == r_type);
14219 		ASSERT(rarrp[rgnp->rgn_id] == rgnp);
14220 		ASSERT(rgnp->rgn_hmeflags == 0);
14221 	} else {
14222 		/*
14223 		 * release local locks before memory allocation.
14224 		 */
14225 		mutex_exit(&srdp->srd_mutex);
14226 
14227 		new_rgnp = kmem_cache_alloc(region_cache, KM_SLEEP);
14228 
14229 		mutex_enter(&srdp->srd_mutex);
14230 		for (rgnp = srdp->srd_rgnhash[rhash]; rgnp != NULL;
14231 		    rgnp = rgnp->rgn_hash) {
14232 			if (rgnp->rgn_saddr == r_saddr &&
14233 			    rgnp->rgn_size == r_size &&
14234 			    rgnp->rgn_obj == r_obj &&
14235 			    rgnp->rgn_objoff == r_objoff &&
14236 			    rgnp->rgn_perm == r_perm &&
14237 			    rgnp->rgn_pgszc == r_pgszc) {
14238 				break;
14239 			}
14240 		}
14241 		if (rgnp != NULL) {
14242 			goto rfound;
14243 		}
14244 
14245 		if (*nextidp >= maxids) {
14246 			mutex_exit(&srdp->srd_mutex);
14247 			goto fail;
14248 		}
14249 		rgnp = new_rgnp;
14250 		new_rgnp = NULL;
14251 		rgnp->rgn_id = (*nextidp)++;
14252 		ASSERT(rgnp->rgn_id < maxids);
14253 		ASSERT(rarrp[rgnp->rgn_id] == NULL);
14254 		rarrp[rgnp->rgn_id] = rgnp;
14255 	}
14256 
14257 	ASSERT(rgnp->rgn_sfmmu_head == NULL);
14258 	ASSERT(rgnp->rgn_hmeflags == 0);
14259 #ifdef DEBUG
14260 	for (i = 0; i < MMU_PAGE_SIZES; i++) {
14261 		ASSERT(rgnp->rgn_ttecnt[i] == 0);
14262 	}
14263 #endif
14264 	rgnp->rgn_saddr = r_saddr;
14265 	rgnp->rgn_size = r_size;
14266 	rgnp->rgn_obj = r_obj;
14267 	rgnp->rgn_objoff = r_objoff;
14268 	rgnp->rgn_perm = r_perm;
14269 	rgnp->rgn_pgszc = r_pgszc;
14270 	rgnp->rgn_flags = r_type;
14271 	rgnp->rgn_refcnt = 0;
14272 	rgnp->rgn_cb_function = r_cb_function;
14273 	rgnp->rgn_hash = srdp->srd_rgnhash[rhash];
14274 	srdp->srd_rgnhash[rhash] = rgnp;
14275 	(*busyrgnsp)++;
14276 	ASSERT(*busyrgnsp <= maxids);
14277 	goto rfound;
14278 
14279 fail:
14280 	ASSERT(new_rgnp != NULL);
14281 	kmem_cache_free(region_cache, new_rgnp);
14282 	return (HAT_INVALID_REGION_COOKIE);
14283 }
14284 
14285 /*
14286  * This function implements the shared context functionality required
14287  * when detaching a segment from an address space. It must be called
14288  * from hat_unshare() for all D(ISM) segments and from segvn_unmap(),
14289  * for segments with a valid region_cookie.
14290  * It will also be called from all seg_vn routines which change a
14291  * segment's attributes such as segvn_setprot(), segvn_setpagesize(),
14292  * segvn_clrszc() & segvn_advise(), as well as in the case of COW fault
14293  * from segvn_fault().
14294  */
14295 void
14296 hat_leave_region(struct hat *sfmmup, hat_region_cookie_t rcookie, uint_t flags)
14297 {
14298 	sf_srd_t *srdp = sfmmup->sfmmu_srdp;
14299 	sf_scd_t *scdp;
14300 	uint_t rhash;
14301 	uint_t rid = (uint_t)((uint64_t)rcookie);
14302 	hatlock_t *hatlockp = NULL;
14303 	sf_region_t *rgnp;
14304 	sf_region_t **prev_rgnpp;
14305 	sf_region_t *cur_rgnp;
14306 	void *r_obj;
14307 	int i;
14308 	caddr_t	r_saddr;
14309 	caddr_t r_eaddr;
14310 	size_t	r_size;
14311 	uchar_t	r_pgszc;
14312 	uchar_t r_type = flags & HAT_REGION_TYPE_MASK;
14313 
14314 	ASSERT(sfmmup != ksfmmup);
14315 	ASSERT(srdp != NULL);
14316 	ASSERT(srdp->srd_refcnt > 0);
14317 	ASSERT(!(flags & ~HAT_REGION_TYPE_MASK));
14318 	ASSERT(flags == HAT_REGION_TEXT || flags == HAT_REGION_ISM);
14319 	ASSERT(!sfmmup->sfmmu_free || sfmmup->sfmmu_scdp == NULL);
14320 
14321 	r_type = (r_type == HAT_REGION_ISM) ? SFMMU_REGION_ISM :
14322 	    SFMMU_REGION_HME;
14323 
14324 	if (r_type == SFMMU_REGION_ISM) {
14325 		ASSERT(SFMMU_IS_ISMRID_VALID(rid));
14326 		ASSERT(rid < SFMMU_MAX_ISM_REGIONS);
14327 		rgnp = srdp->srd_ismrgnp[rid];
14328 	} else {
14329 		ASSERT(SFMMU_IS_SHMERID_VALID(rid));
14330 		ASSERT(rid < SFMMU_MAX_HME_REGIONS);
14331 		rgnp = srdp->srd_hmergnp[rid];
14332 	}
14333 	ASSERT(rgnp != NULL);
14334 	ASSERT(rgnp->rgn_id == rid);
14335 	ASSERT((rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK) == r_type);
14336 	ASSERT(!(rgnp->rgn_flags & SFMMU_REGION_FREE));
14337 	ASSERT(AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock));
14338 
14339 	ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
14340 	if (r_type == SFMMU_REGION_HME && sfmmup->sfmmu_as->a_xhat != NULL) {
14341 		xhat_unload_callback_all(sfmmup->sfmmu_as, rgnp->rgn_saddr,
14342 		    rgnp->rgn_size, 0, NULL);
14343 	}
14344 
14345 	if (sfmmup->sfmmu_free) {
14346 		ulong_t rttecnt;
14347 		r_pgszc = rgnp->rgn_pgszc;
14348 		r_size = rgnp->rgn_size;
14349 
14350 		ASSERT(sfmmup->sfmmu_scdp == NULL);
14351 		if (r_type == SFMMU_REGION_ISM) {
14352 			SF_RGNMAP_DEL(sfmmup->sfmmu_ismregion_map, rid);
14353 		} else {
14354 			/* update shme rgns ttecnt in sfmmu_ttecnt */
14355 			rttecnt = r_size >> TTE_PAGE_SHIFT(r_pgszc);
14356 			ASSERT(sfmmup->sfmmu_ttecnt[r_pgszc] >= rttecnt);
14357 
14358 			atomic_add_long(&sfmmup->sfmmu_ttecnt[r_pgszc],
14359 			    -rttecnt);
14360 
14361 			SF_RGNMAP_DEL(sfmmup->sfmmu_hmeregion_map, rid);
14362 		}
14363 	} else if (r_type == SFMMU_REGION_ISM) {
14364 		hatlockp = sfmmu_hat_enter(sfmmup);
14365 		ASSERT(rid < srdp->srd_next_ismrid);
14366 		SF_RGNMAP_DEL(sfmmup->sfmmu_ismregion_map, rid);
14367 		scdp = sfmmup->sfmmu_scdp;
14368 		if (scdp != NULL &&
14369 		    SF_RGNMAP_TEST(scdp->scd_ismregion_map, rid)) {
14370 			sfmmu_leave_scd(sfmmup, r_type);
14371 			ASSERT(sfmmu_hat_lock_held(sfmmup));
14372 		}
14373 		sfmmu_hat_exit(hatlockp);
14374 	} else {
14375 		ulong_t rttecnt;
14376 		r_pgszc = rgnp->rgn_pgszc;
14377 		r_saddr = rgnp->rgn_saddr;
14378 		r_size = rgnp->rgn_size;
14379 		r_eaddr = r_saddr + r_size;
14380 
14381 		ASSERT(r_type == SFMMU_REGION_HME);
14382 		hatlockp = sfmmu_hat_enter(sfmmup);
14383 		ASSERT(rid < srdp->srd_next_hmerid);
14384 		SF_RGNMAP_DEL(sfmmup->sfmmu_hmeregion_map, rid);
14385 
14386 		/*
14387 		 * If region is part of an SCD call sfmmu_leave_scd().
14388 		 * Otherwise if process is not exiting and has valid context
14389 		 * just drop the context on the floor to lose stale TLB
14390 		 * entries and force the update of tsb miss area to reflect
14391 		 * the new region map. After that clean our TSB entries.
14392 		 */
14393 		scdp = sfmmup->sfmmu_scdp;
14394 		if (scdp != NULL &&
14395 		    SF_RGNMAP_TEST(scdp->scd_hmeregion_map, rid)) {
14396 			sfmmu_leave_scd(sfmmup, r_type);
14397 			ASSERT(sfmmu_hat_lock_held(sfmmup));
14398 		}
14399 		sfmmu_invalidate_ctx(sfmmup);
14400 
14401 		i = TTE8K;
14402 		while (i < mmu_page_sizes) {
14403 			if (rgnp->rgn_ttecnt[i] != 0) {
14404 				sfmmu_unload_tsb_range(sfmmup, r_saddr,
14405 				    r_eaddr, i);
14406 				if (i < TTE4M) {
14407 					i = TTE4M;
14408 					continue;
14409 				} else {
14410 					break;
14411 				}
14412 			}
14413 			i++;
14414 		}
14415 		/* Remove the preallocated 1/4 8k ttecnt for 4M regions. */
14416 		if (r_pgszc >= TTE4M) {
14417 			rttecnt = r_size >> (TTE_PAGE_SHIFT(TTE8K) + 2);
14418 			ASSERT(sfmmup->sfmmu_tsb0_4minflcnt >=
14419 			    rttecnt);
14420 			sfmmup->sfmmu_tsb0_4minflcnt -= rttecnt;
14421 		}
14422 
14423 		/* update shme rgns ttecnt in sfmmu_ttecnt */
14424 		rttecnt = r_size >> TTE_PAGE_SHIFT(r_pgszc);
14425 		ASSERT(sfmmup->sfmmu_ttecnt[r_pgszc] >= rttecnt);
14426 		atomic_add_long(&sfmmup->sfmmu_ttecnt[r_pgszc], -rttecnt);
14427 
14428 		sfmmu_hat_exit(hatlockp);
14429 		if (scdp != NULL && sfmmup->sfmmu_scdp == NULL) {
14430 			/* sfmmup left the scd, grow private tsb */
14431 			sfmmu_check_page_sizes(sfmmup, 1);
14432 		} else {
14433 			sfmmu_check_page_sizes(sfmmup, 0);
14434 		}
14435 	}
14436 
14437 	if (r_type == SFMMU_REGION_HME) {
14438 		sfmmu_unlink_from_hmeregion(sfmmup, rgnp);
14439 	}
14440 
14441 	r_obj = rgnp->rgn_obj;
14442 	if (atomic_add_32_nv((volatile uint_t *)&rgnp->rgn_refcnt, -1)) {
14443 		return;
14444 	}
14445 
14446 	/*
14447 	 * looks like nobody uses this region anymore. Free it.
14448 	 */
14449 	rhash = RGN_HASH_FUNCTION(r_obj);
14450 	mutex_enter(&srdp->srd_mutex);
14451 	for (prev_rgnpp = &srdp->srd_rgnhash[rhash];
14452 	    (cur_rgnp = *prev_rgnpp) != NULL;
14453 	    prev_rgnpp = &cur_rgnp->rgn_hash) {
14454 		if (cur_rgnp == rgnp && cur_rgnp->rgn_refcnt == 0) {
14455 			break;
14456 		}
14457 	}
14458 
14459 	if (cur_rgnp == NULL) {
14460 		mutex_exit(&srdp->srd_mutex);
14461 		return;
14462 	}
14463 
14464 	ASSERT((rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK) == r_type);
14465 	*prev_rgnpp = rgnp->rgn_hash;
14466 	if (r_type == SFMMU_REGION_ISM) {
14467 		rgnp->rgn_flags |= SFMMU_REGION_FREE;
14468 		ASSERT(rid < srdp->srd_next_ismrid);
14469 		rgnp->rgn_next = srdp->srd_ismrgnfree;
14470 		srdp->srd_ismrgnfree = rgnp;
14471 		ASSERT(srdp->srd_ismbusyrgns > 0);
14472 		srdp->srd_ismbusyrgns--;
14473 		mutex_exit(&srdp->srd_mutex);
14474 		return;
14475 	}
14476 	mutex_exit(&srdp->srd_mutex);
14477 
14478 	/*
14479 	 * Destroy region's hmeblks.
14480 	 */
14481 	sfmmu_unload_hmeregion(srdp, rgnp);
14482 
14483 	rgnp->rgn_hmeflags = 0;
14484 
14485 	ASSERT(rgnp->rgn_sfmmu_head == NULL);
14486 	ASSERT(rgnp->rgn_id == rid);
14487 	for (i = 0; i < MMU_PAGE_SIZES; i++) {
14488 		rgnp->rgn_ttecnt[i] = 0;
14489 	}
14490 	rgnp->rgn_flags |= SFMMU_REGION_FREE;
14491 	mutex_enter(&srdp->srd_mutex);
14492 	ASSERT(rid < srdp->srd_next_hmerid);
14493 	rgnp->rgn_next = srdp->srd_hmergnfree;
14494 	srdp->srd_hmergnfree = rgnp;
14495 	ASSERT(srdp->srd_hmebusyrgns > 0);
14496 	srdp->srd_hmebusyrgns--;
14497 	mutex_exit(&srdp->srd_mutex);
14498 }
14499 
14500 /*
14501  * For now only called for hmeblk regions and not for ISM regions.
14502  */
14503 void
14504 hat_dup_region(struct hat *sfmmup, hat_region_cookie_t rcookie)
14505 {
14506 	sf_srd_t *srdp = sfmmup->sfmmu_srdp;
14507 	uint_t rid = (uint_t)((uint64_t)rcookie);
14508 	sf_region_t *rgnp;
14509 	sf_rgn_link_t *rlink;
14510 	sf_rgn_link_t *hrlink;
14511 	ulong_t	rttecnt;
14512 
14513 	ASSERT(sfmmup != ksfmmup);
14514 	ASSERT(srdp != NULL);
14515 	ASSERT(srdp->srd_refcnt > 0);
14516 
14517 	ASSERT(rid < srdp->srd_next_hmerid);
14518 	ASSERT(SFMMU_IS_SHMERID_VALID(rid));
14519 	ASSERT(rid < SFMMU_MAX_HME_REGIONS);
14520 
14521 	rgnp = srdp->srd_hmergnp[rid];
14522 	ASSERT(rgnp->rgn_refcnt > 0);
14523 	ASSERT(rgnp->rgn_id == rid);
14524 	ASSERT((rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK) == SFMMU_REGION_HME);
14525 	ASSERT(!(rgnp->rgn_flags & SFMMU_REGION_FREE));
14526 
14527 	atomic_add_32((volatile uint_t *)&rgnp->rgn_refcnt, 1);
14528 
14529 	/* LINTED: constant in conditional context */
14530 	SFMMU_HMERID2RLINKP(sfmmup, rid, rlink, 1, 0);
14531 	ASSERT(rlink != NULL);
14532 	mutex_enter(&rgnp->rgn_mutex);
14533 	ASSERT(rgnp->rgn_sfmmu_head != NULL);
14534 	/* LINTED: constant in conditional context */
14535 	SFMMU_HMERID2RLINKP(rgnp->rgn_sfmmu_head, rid, hrlink, 0, 0);
14536 	ASSERT(hrlink != NULL);
14537 	ASSERT(hrlink->prev == NULL);
14538 	rlink->next = rgnp->rgn_sfmmu_head;
14539 	rlink->prev = NULL;
14540 	hrlink->prev = sfmmup;
14541 	/*
14542 	 * make sure rlink's next field is correct
14543 	 * before making this link visible.
14544 	 */
14545 	membar_stst();
14546 	rgnp->rgn_sfmmu_head = sfmmup;
14547 	mutex_exit(&rgnp->rgn_mutex);
14548 
14549 	/* update sfmmu_ttecnt with the shme rgn ttecnt */
14550 	rttecnt = rgnp->rgn_size >> TTE_PAGE_SHIFT(rgnp->rgn_pgszc);
14551 	atomic_add_long(&sfmmup->sfmmu_ttecnt[rgnp->rgn_pgszc], rttecnt);
14552 	/* update tsb0 inflation count */
14553 	if (rgnp->rgn_pgszc >= TTE4M) {
14554 		sfmmup->sfmmu_tsb0_4minflcnt +=
14555 		    rgnp->rgn_size >> (TTE_PAGE_SHIFT(TTE8K) + 2);
14556 	}
14557 	/*
14558 	 * Update regionid bitmask without hat lock since no other thread
14559 	 * can update this region bitmask right now.
14560 	 */
14561 	SF_RGNMAP_ADD(sfmmup->sfmmu_hmeregion_map, rid);
14562 }
14563 
14564 /* ARGSUSED */
14565 static int
14566 sfmmu_rgncache_constructor(void *buf, void *cdrarg, int kmflags)
14567 {
14568 	sf_region_t *rgnp = (sf_region_t *)buf;
14569 	bzero(buf, sizeof (*rgnp));
14570 
14571 	mutex_init(&rgnp->rgn_mutex, NULL, MUTEX_DEFAULT, NULL);
14572 
14573 	return (0);
14574 }
14575 
14576 /* ARGSUSED */
14577 static void
14578 sfmmu_rgncache_destructor(void *buf, void *cdrarg)
14579 {
14580 	sf_region_t *rgnp = (sf_region_t *)buf;
14581 	mutex_destroy(&rgnp->rgn_mutex);
14582 }
14583 
14584 static int
14585 sfrgnmap_isnull(sf_region_map_t *map)
14586 {
14587 	int i;
14588 
14589 	for (i = 0; i < SFMMU_RGNMAP_WORDS; i++) {
14590 		if (map->bitmap[i] != 0) {
14591 			return (0);
14592 		}
14593 	}
14594 	return (1);
14595 }
14596 
14597 static int
14598 sfhmergnmap_isnull(sf_hmeregion_map_t *map)
14599 {
14600 	int i;
14601 
14602 	for (i = 0; i < SFMMU_HMERGNMAP_WORDS; i++) {
14603 		if (map->bitmap[i] != 0) {
14604 			return (0);
14605 		}
14606 	}
14607 	return (1);
14608 }
14609 
14610 #ifdef DEBUG
14611 static void
14612 check_scd_sfmmu_list(sfmmu_t **headp, sfmmu_t *sfmmup, int onlist)
14613 {
14614 	sfmmu_t *sp;
14615 	sf_srd_t *srdp = sfmmup->sfmmu_srdp;
14616 
14617 	for (sp = *headp; sp != NULL; sp = sp->sfmmu_scd_link.next) {
14618 		ASSERT(srdp == sp->sfmmu_srdp);
14619 		if (sp == sfmmup) {
14620 			if (onlist) {
14621 				return;
14622 			} else {
14623 				panic("shctx: sfmmu 0x%p found on scd"
14624 				    "list 0x%p", (void *)sfmmup,
14625 				    (void *)*headp);
14626 			}
14627 		}
14628 	}
14629 	if (onlist) {
14630 		panic("shctx: sfmmu 0x%p not found on scd list 0x%p",
14631 		    (void *)sfmmup, (void *)*headp);
14632 	} else {
14633 		return;
14634 	}
14635 }
14636 #else /* DEBUG */
14637 #define	check_scd_sfmmu_list(headp, sfmmup, onlist)
14638 #endif /* DEBUG */
14639 
14640 /*
14641  * Removes an sfmmu from the SCD sfmmu list.
14642  */
14643 static void
14644 sfmmu_from_scd_list(sfmmu_t **headp, sfmmu_t *sfmmup)
14645 {
14646 	ASSERT(sfmmup->sfmmu_srdp != NULL);
14647 	check_scd_sfmmu_list(headp, sfmmup, 1);
14648 	if (sfmmup->sfmmu_scd_link.prev != NULL) {
14649 		ASSERT(*headp != sfmmup);
14650 		sfmmup->sfmmu_scd_link.prev->sfmmu_scd_link.next =
14651 		    sfmmup->sfmmu_scd_link.next;
14652 	} else {
14653 		ASSERT(*headp == sfmmup);
14654 		*headp = sfmmup->sfmmu_scd_link.next;
14655 	}
14656 	if (sfmmup->sfmmu_scd_link.next != NULL) {
14657 		sfmmup->sfmmu_scd_link.next->sfmmu_scd_link.prev =
14658 		    sfmmup->sfmmu_scd_link.prev;
14659 	}
14660 }
14661 
14662 
14663 /*
14664  * Adds an sfmmu to the start of the queue.
14665  */
14666 static void
14667 sfmmu_to_scd_list(sfmmu_t **headp, sfmmu_t *sfmmup)
14668 {
14669 	check_scd_sfmmu_list(headp, sfmmup, 0);
14670 	sfmmup->sfmmu_scd_link.prev = NULL;
14671 	sfmmup->sfmmu_scd_link.next = *headp;
14672 	if (*headp != NULL)
14673 		(*headp)->sfmmu_scd_link.prev = sfmmup;
14674 	*headp = sfmmup;
14675 }
14676 
14677 /*
14678  * Remove an scd from the start of the queue.
14679  */
14680 static void
14681 sfmmu_remove_scd(sf_scd_t **headp, sf_scd_t *scdp)
14682 {
14683 	if (scdp->scd_prev != NULL) {
14684 		ASSERT(*headp != scdp);
14685 		scdp->scd_prev->scd_next = scdp->scd_next;
14686 	} else {
14687 		ASSERT(*headp == scdp);
14688 		*headp = scdp->scd_next;
14689 	}
14690 
14691 	if (scdp->scd_next != NULL) {
14692 		scdp->scd_next->scd_prev = scdp->scd_prev;
14693 	}
14694 }
14695 
14696 /*
14697  * Add an scd to the start of the queue.
14698  */
14699 static void
14700 sfmmu_add_scd(sf_scd_t **headp, sf_scd_t *scdp)
14701 {
14702 	scdp->scd_prev = NULL;
14703 	scdp->scd_next = *headp;
14704 	if (*headp != NULL) {
14705 		(*headp)->scd_prev = scdp;
14706 	}
14707 	*headp = scdp;
14708 }
14709 
14710 static int
14711 sfmmu_alloc_scd_tsbs(sf_srd_t *srdp, sf_scd_t *scdp)
14712 {
14713 	uint_t rid;
14714 	uint_t i;
14715 	uint_t j;
14716 	ulong_t w;
14717 	sf_region_t *rgnp;
14718 	ulong_t tte8k_cnt = 0;
14719 	ulong_t tte4m_cnt = 0;
14720 	uint_t tsb_szc;
14721 	sfmmu_t *scsfmmup = scdp->scd_sfmmup;
14722 	sfmmu_t	*ism_hatid;
14723 	struct tsb_info *newtsb;
14724 	int szc;
14725 
14726 	ASSERT(srdp != NULL);
14727 
14728 	for (i = 0; i < SFMMU_RGNMAP_WORDS; i++) {
14729 		if ((w = scdp->scd_region_map.bitmap[i]) == 0) {
14730 			continue;
14731 		}
14732 		j = 0;
14733 		while (w) {
14734 			if (!(w & 0x1)) {
14735 				j++;
14736 				w >>= 1;
14737 				continue;
14738 			}
14739 			rid = (i << BT_ULSHIFT) | j;
14740 			j++;
14741 			w >>= 1;
14742 
14743 			if (rid < SFMMU_MAX_HME_REGIONS) {
14744 				rgnp = srdp->srd_hmergnp[rid];
14745 				ASSERT(rgnp->rgn_id == rid);
14746 				ASSERT(rgnp->rgn_refcnt > 0);
14747 
14748 				if (rgnp->rgn_pgszc < TTE4M) {
14749 					tte8k_cnt += rgnp->rgn_size >>
14750 					    TTE_PAGE_SHIFT(TTE8K);
14751 				} else {
14752 					ASSERT(rgnp->rgn_pgszc >= TTE4M);
14753 					tte4m_cnt += rgnp->rgn_size >>
14754 					    TTE_PAGE_SHIFT(TTE4M);
14755 					/*
14756 					 * Inflate SCD tsb0 by preallocating
14757 					 * 1/4 8k ttecnt for 4M regions to
14758 					 * allow for lgpg alloc failure.
14759 					 */
14760 					tte8k_cnt += rgnp->rgn_size >>
14761 					    (TTE_PAGE_SHIFT(TTE8K) + 2);
14762 				}
14763 			} else {
14764 				rid -= SFMMU_MAX_HME_REGIONS;
14765 				rgnp = srdp->srd_ismrgnp[rid];
14766 				ASSERT(rgnp->rgn_id == rid);
14767 				ASSERT(rgnp->rgn_refcnt > 0);
14768 
14769 				ism_hatid = (sfmmu_t *)rgnp->rgn_obj;
14770 				ASSERT(ism_hatid->sfmmu_ismhat);
14771 
14772 				for (szc = 0; szc < TTE4M; szc++) {
14773 					tte8k_cnt +=
14774 					    ism_hatid->sfmmu_ttecnt[szc] <<
14775 					    TTE_BSZS_SHIFT(szc);
14776 				}
14777 
14778 				ASSERT(rgnp->rgn_pgszc >= TTE4M);
14779 				if (rgnp->rgn_pgszc >= TTE4M) {
14780 					tte4m_cnt += rgnp->rgn_size >>
14781 					    TTE_PAGE_SHIFT(TTE4M);
14782 				}
14783 			}
14784 		}
14785 	}
14786 
14787 	tsb_szc = SELECT_TSB_SIZECODE(tte8k_cnt);
14788 
14789 	/* Allocate both the SCD TSBs here. */
14790 	if (sfmmu_tsbinfo_alloc(&scsfmmup->sfmmu_tsb,
14791 	    tsb_szc, TSB8K|TSB64K|TSB512K, TSB_ALLOC, scsfmmup) &&
14792 	    (tsb_szc <= TSB_4M_SZCODE ||
14793 	    sfmmu_tsbinfo_alloc(&scsfmmup->sfmmu_tsb,
14794 	    TSB_4M_SZCODE, TSB8K|TSB64K|TSB512K,
14795 	    TSB_ALLOC, scsfmmup))) {
14796 
14797 		SFMMU_STAT(sf_scd_1sttsb_allocfail);
14798 		return (TSB_ALLOCFAIL);
14799 	} else {
14800 		scsfmmup->sfmmu_tsb->tsb_flags |= TSB_SHAREDCTX;
14801 
14802 		if (tte4m_cnt) {
14803 			tsb_szc = SELECT_TSB_SIZECODE(tte4m_cnt);
14804 			if (sfmmu_tsbinfo_alloc(&newtsb, tsb_szc,
14805 			    TSB4M|TSB32M|TSB256M, TSB_ALLOC, scsfmmup) &&
14806 			    (tsb_szc <= TSB_4M_SZCODE ||
14807 			    sfmmu_tsbinfo_alloc(&newtsb, TSB_4M_SZCODE,
14808 			    TSB4M|TSB32M|TSB256M,
14809 			    TSB_ALLOC, scsfmmup))) {
14810 				/*
14811 				 * If we fail to allocate the 2nd shared tsb,
14812 				 * just free the 1st tsb, return failure.
14813 				 */
14814 				sfmmu_tsbinfo_free(scsfmmup->sfmmu_tsb);
14815 				SFMMU_STAT(sf_scd_2ndtsb_allocfail);
14816 				return (TSB_ALLOCFAIL);
14817 			} else {
14818 				ASSERT(scsfmmup->sfmmu_tsb->tsb_next == NULL);
14819 				newtsb->tsb_flags |= TSB_SHAREDCTX;
14820 				scsfmmup->sfmmu_tsb->tsb_next = newtsb;
14821 				SFMMU_STAT(sf_scd_2ndtsb_alloc);
14822 			}
14823 		}
14824 		SFMMU_STAT(sf_scd_1sttsb_alloc);
14825 	}
14826 	return (TSB_SUCCESS);
14827 }
14828 
14829 static void
14830 sfmmu_free_scd_tsbs(sfmmu_t *scd_sfmmu)
14831 {
14832 	while (scd_sfmmu->sfmmu_tsb != NULL) {
14833 		struct tsb_info *next = scd_sfmmu->sfmmu_tsb->tsb_next;
14834 		sfmmu_tsbinfo_free(scd_sfmmu->sfmmu_tsb);
14835 		scd_sfmmu->sfmmu_tsb = next;
14836 	}
14837 }
14838 
14839 /*
14840  * Link the sfmmu onto the hme region list.
14841  */
14842 void
14843 sfmmu_link_to_hmeregion(sfmmu_t *sfmmup, sf_region_t *rgnp)
14844 {
14845 	uint_t rid;
14846 	sf_rgn_link_t *rlink;
14847 	sfmmu_t *head;
14848 	sf_rgn_link_t *hrlink;
14849 
14850 	rid = rgnp->rgn_id;
14851 	ASSERT(SFMMU_IS_SHMERID_VALID(rid));
14852 
14853 	/* LINTED: constant in conditional context */
14854 	SFMMU_HMERID2RLINKP(sfmmup, rid, rlink, 1, 1);
14855 	ASSERT(rlink != NULL);
14856 	mutex_enter(&rgnp->rgn_mutex);
14857 	if ((head = rgnp->rgn_sfmmu_head) == NULL) {
14858 		rlink->next = NULL;
14859 		rlink->prev = NULL;
14860 		/*
14861 		 * make sure rlink's next field is NULL
14862 		 * before making this link visible.
14863 		 */
14864 		membar_stst();
14865 		rgnp->rgn_sfmmu_head = sfmmup;
14866 	} else {
14867 		/* LINTED: constant in conditional context */
14868 		SFMMU_HMERID2RLINKP(head, rid, hrlink, 0, 0);
14869 		ASSERT(hrlink != NULL);
14870 		ASSERT(hrlink->prev == NULL);
14871 		rlink->next = head;
14872 		rlink->prev = NULL;
14873 		hrlink->prev = sfmmup;
14874 		/*
14875 		 * make sure rlink's next field is correct
14876 		 * before making this link visible.
14877 		 */
14878 		membar_stst();
14879 		rgnp->rgn_sfmmu_head = sfmmup;
14880 	}
14881 	mutex_exit(&rgnp->rgn_mutex);
14882 }
14883 
14884 /*
14885  * Unlink the sfmmu from the hme region list.
14886  */
14887 void
14888 sfmmu_unlink_from_hmeregion(sfmmu_t *sfmmup, sf_region_t *rgnp)
14889 {
14890 	uint_t rid;
14891 	sf_rgn_link_t *rlink;
14892 
14893 	rid = rgnp->rgn_id;
14894 	ASSERT(SFMMU_IS_SHMERID_VALID(rid));
14895 
14896 	/* LINTED: constant in conditional context */
14897 	SFMMU_HMERID2RLINKP(sfmmup, rid, rlink, 0, 0);
14898 	ASSERT(rlink != NULL);
14899 	mutex_enter(&rgnp->rgn_mutex);
14900 	if (rgnp->rgn_sfmmu_head == sfmmup) {
14901 		sfmmu_t *next = rlink->next;
14902 		rgnp->rgn_sfmmu_head = next;
14903 		/*
14904 		 * if we are stopped by xc_attention() after this
14905 		 * point the forward link walking in
14906 		 * sfmmu_rgntlb_demap() will work correctly since the
14907 		 * head correctly points to the next element.
14908 		 */
14909 		membar_stst();
14910 		rlink->next = NULL;
14911 		ASSERT(rlink->prev == NULL);
14912 		if (next != NULL) {
14913 			sf_rgn_link_t *nrlink;
14914 			/* LINTED: constant in conditional context */
14915 			SFMMU_HMERID2RLINKP(next, rid, nrlink, 0, 0);
14916 			ASSERT(nrlink != NULL);
14917 			ASSERT(nrlink->prev == sfmmup);
14918 			nrlink->prev = NULL;
14919 		}
14920 	} else {
14921 		sfmmu_t *next = rlink->next;
14922 		sfmmu_t *prev = rlink->prev;
14923 		sf_rgn_link_t *prlink;
14924 
14925 		ASSERT(prev != NULL);
14926 		/* LINTED: constant in conditional context */
14927 		SFMMU_HMERID2RLINKP(prev, rid, prlink, 0, 0);
14928 		ASSERT(prlink != NULL);
14929 		ASSERT(prlink->next == sfmmup);
14930 		prlink->next = next;
14931 		/*
14932 		 * if we are stopped by xc_attention()
14933 		 * after this point the forward link walking
14934 		 * will work correctly since the prev element
14935 		 * correctly points to the next element.
14936 		 */
14937 		membar_stst();
14938 		rlink->next = NULL;
14939 		rlink->prev = NULL;
14940 		if (next != NULL) {
14941 			sf_rgn_link_t *nrlink;
14942 			/* LINTED: constant in conditional context */
14943 			SFMMU_HMERID2RLINKP(next, rid, nrlink, 0, 0);
14944 			ASSERT(nrlink != NULL);
14945 			ASSERT(nrlink->prev == sfmmup);
14946 			nrlink->prev = prev;
14947 		}
14948 	}
14949 	mutex_exit(&rgnp->rgn_mutex);
14950 }
14951 
14952 /*
14953  * Link scd sfmmu onto ism or hme region list for each region in the
14954  * scd region map.
14955  */
14956 void
14957 sfmmu_link_scd_to_regions(sf_srd_t *srdp, sf_scd_t *scdp)
14958 {
14959 	uint_t rid;
14960 	uint_t i;
14961 	uint_t j;
14962 	ulong_t w;
14963 	sf_region_t *rgnp;
14964 	sfmmu_t *scsfmmup;
14965 
14966 	scsfmmup = scdp->scd_sfmmup;
14967 	ASSERT(scsfmmup->sfmmu_scdhat);
14968 	for (i = 0; i < SFMMU_RGNMAP_WORDS; i++) {
14969 		if ((w = scdp->scd_region_map.bitmap[i]) == 0) {
14970 			continue;
14971 		}
14972 		j = 0;
14973 		while (w) {
14974 			if (!(w & 0x1)) {
14975 				j++;
14976 				w >>= 1;
14977 				continue;
14978 			}
14979 			rid = (i << BT_ULSHIFT) | j;
14980 			j++;
14981 			w >>= 1;
14982 
14983 			if (rid < SFMMU_MAX_HME_REGIONS) {
14984 				rgnp = srdp->srd_hmergnp[rid];
14985 				ASSERT(rgnp->rgn_id == rid);
14986 				ASSERT(rgnp->rgn_refcnt > 0);
14987 				sfmmu_link_to_hmeregion(scsfmmup, rgnp);
14988 			} else {
14989 				sfmmu_t *ism_hatid = NULL;
14990 				ism_ment_t *ism_ment;
14991 				rid -= SFMMU_MAX_HME_REGIONS;
14992 				rgnp = srdp->srd_ismrgnp[rid];
14993 				ASSERT(rgnp->rgn_id == rid);
14994 				ASSERT(rgnp->rgn_refcnt > 0);
14995 
14996 				ism_hatid = (sfmmu_t *)rgnp->rgn_obj;
14997 				ASSERT(ism_hatid->sfmmu_ismhat);
14998 				ism_ment = &scdp->scd_ism_links[rid];
14999 				ism_ment->iment_hat = scsfmmup;
15000 				ism_ment->iment_base_va = rgnp->rgn_saddr;
15001 				mutex_enter(&ism_mlist_lock);
15002 				iment_add(ism_ment, ism_hatid);
15003 				mutex_exit(&ism_mlist_lock);
15004 
15005 			}
15006 		}
15007 	}
15008 }
15009 /*
15010  * Unlink scd sfmmu from ism or hme region list for each region in the
15011  * scd region map.
15012  */
15013 void
15014 sfmmu_unlink_scd_from_regions(sf_srd_t *srdp, sf_scd_t *scdp)
15015 {
15016 	uint_t rid;
15017 	uint_t i;
15018 	uint_t j;
15019 	ulong_t w;
15020 	sf_region_t *rgnp;
15021 	sfmmu_t *scsfmmup;
15022 
15023 	scsfmmup = scdp->scd_sfmmup;
15024 	for (i = 0; i < SFMMU_RGNMAP_WORDS; i++) {
15025 		if ((w = scdp->scd_region_map.bitmap[i]) == 0) {
15026 			continue;
15027 		}
15028 		j = 0;
15029 		while (w) {
15030 			if (!(w & 0x1)) {
15031 				j++;
15032 				w >>= 1;
15033 				continue;
15034 			}
15035 			rid = (i << BT_ULSHIFT) | j;
15036 			j++;
15037 			w >>= 1;
15038 
15039 			if (rid < SFMMU_MAX_HME_REGIONS) {
15040 				rgnp = srdp->srd_hmergnp[rid];
15041 				ASSERT(rgnp->rgn_id == rid);
15042 				ASSERT(rgnp->rgn_refcnt > 0);
15043 				sfmmu_unlink_from_hmeregion(scsfmmup,
15044 				    rgnp);
15045 
15046 			} else {
15047 				sfmmu_t *ism_hatid = NULL;
15048 				ism_ment_t *ism_ment;
15049 				rid -= SFMMU_MAX_HME_REGIONS;
15050 				rgnp = srdp->srd_ismrgnp[rid];
15051 				ASSERT(rgnp->rgn_id == rid);
15052 				ASSERT(rgnp->rgn_refcnt > 0);
15053 
15054 				ism_hatid = (sfmmu_t *)rgnp->rgn_obj;
15055 				ASSERT(ism_hatid->sfmmu_ismhat);
15056 				ism_ment = &scdp->scd_ism_links[rid];
15057 				ASSERT(ism_ment->iment_hat == scdp->scd_sfmmup);
15058 				ASSERT(ism_ment->iment_base_va ==
15059 				    rgnp->rgn_saddr);
15060 				mutex_enter(&ism_mlist_lock);
15061 				iment_sub(ism_ment, ism_hatid);
15062 				mutex_exit(&ism_mlist_lock);
15063 
15064 			}
15065 		}
15066 	}
15067 }
15068 /*
15069  * Allocates and initialises a new SCD structure, this is called with
15070  * the srd_scd_mutex held and returns with the reference count
15071  * initialised to 1.
15072  */
15073 static sf_scd_t *
15074 sfmmu_alloc_scd(sf_srd_t *srdp, sf_region_map_t *new_map)
15075 {
15076 	sf_scd_t *new_scdp;
15077 	sfmmu_t *scsfmmup;
15078 	int i;
15079 
15080 	ASSERT(MUTEX_HELD(&srdp->srd_scd_mutex));
15081 	new_scdp = kmem_cache_alloc(scd_cache, KM_SLEEP);
15082 
15083 	scsfmmup = kmem_cache_alloc(sfmmuid_cache, KM_SLEEP);
15084 	new_scdp->scd_sfmmup = scsfmmup;
15085 	scsfmmup->sfmmu_srdp = srdp;
15086 	scsfmmup->sfmmu_scdp = new_scdp;
15087 	scsfmmup->sfmmu_tsb0_4minflcnt = 0;
15088 	scsfmmup->sfmmu_scdhat = 1;
15089 	CPUSET_ALL(scsfmmup->sfmmu_cpusran);
15090 	bzero(scsfmmup->sfmmu_hmeregion_links, SFMMU_L1_HMERLINKS_SIZE);
15091 
15092 	ASSERT(max_mmu_ctxdoms > 0);
15093 	for (i = 0; i < max_mmu_ctxdoms; i++) {
15094 		scsfmmup->sfmmu_ctxs[i].cnum = INVALID_CONTEXT;
15095 		scsfmmup->sfmmu_ctxs[i].gnum = 0;
15096 	}
15097 
15098 	for (i = 0; i < MMU_PAGE_SIZES; i++) {
15099 		new_scdp->scd_rttecnt[i] = 0;
15100 	}
15101 
15102 	new_scdp->scd_region_map = *new_map;
15103 	new_scdp->scd_refcnt = 1;
15104 	if (sfmmu_alloc_scd_tsbs(srdp, new_scdp) != TSB_SUCCESS) {
15105 		kmem_cache_free(scd_cache, new_scdp);
15106 		kmem_cache_free(sfmmuid_cache, scsfmmup);
15107 		return (NULL);
15108 	}
15109 	if (&mmu_init_scd) {
15110 		mmu_init_scd(new_scdp);
15111 	}
15112 	return (new_scdp);
15113 }
15114 
15115 /*
15116  * The first phase of a process joining an SCD. The hat structure is
15117  * linked to the SCD queue and then the HAT_JOIN_SCD sfmmu flag is set
15118  * and a cross-call with context invalidation is used to cause the
15119  * remaining work to be carried out in the sfmmu_tsbmiss_exception()
15120  * routine.
15121  */
15122 static void
15123 sfmmu_join_scd(sf_scd_t *scdp, sfmmu_t *sfmmup)
15124 {
15125 	hatlock_t *hatlockp;
15126 	sf_srd_t *srdp = sfmmup->sfmmu_srdp;
15127 	int i;
15128 	sf_scd_t *old_scdp;
15129 
15130 	ASSERT(srdp != NULL);
15131 	ASSERT(scdp != NULL);
15132 	ASSERT(scdp->scd_refcnt > 0);
15133 	ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock));
15134 
15135 	if ((old_scdp = sfmmup->sfmmu_scdp) != NULL) {
15136 		ASSERT(old_scdp != scdp);
15137 
15138 		mutex_enter(&old_scdp->scd_mutex);
15139 		sfmmu_from_scd_list(&old_scdp->scd_sf_list, sfmmup);
15140 		mutex_exit(&old_scdp->scd_mutex);
15141 		/*
15142 		 * sfmmup leaves the old scd. Update sfmmu_ttecnt to
15143 		 * include the shme rgn ttecnt for rgns that
15144 		 * were in the old SCD
15145 		 */
15146 		for (i = 0; i < mmu_page_sizes; i++) {
15147 			ASSERT(sfmmup->sfmmu_scdrttecnt[i] ==
15148 			    old_scdp->scd_rttecnt[i]);
15149 			atomic_add_long(&sfmmup->sfmmu_ttecnt[i],
15150 			    sfmmup->sfmmu_scdrttecnt[i]);
15151 		}
15152 	}
15153 
15154 	/*
15155 	 * Move sfmmu to the scd lists.
15156 	 */
15157 	mutex_enter(&scdp->scd_mutex);
15158 	sfmmu_to_scd_list(&scdp->scd_sf_list, sfmmup);
15159 	mutex_exit(&scdp->scd_mutex);
15160 	SF_SCD_INCR_REF(scdp);
15161 
15162 	hatlockp = sfmmu_hat_enter(sfmmup);
15163 	/*
15164 	 * For a multi-thread process, we must stop
15165 	 * all the other threads before joining the scd.
15166 	 */
15167 
15168 	SFMMU_FLAGS_SET(sfmmup, HAT_JOIN_SCD);
15169 
15170 	sfmmu_invalidate_ctx(sfmmup);
15171 	sfmmup->sfmmu_scdp = scdp;
15172 
15173 	/*
15174 	 * Copy scd_rttecnt into sfmmup's sfmmu_scdrttecnt, and update
15175 	 * sfmmu_ttecnt to not include the rgn ttecnt just joined in SCD.
15176 	 */
15177 	for (i = 0; i < mmu_page_sizes; i++) {
15178 		sfmmup->sfmmu_scdrttecnt[i] = scdp->scd_rttecnt[i];
15179 		ASSERT(sfmmup->sfmmu_ttecnt[i] >= scdp->scd_rttecnt[i]);
15180 		atomic_add_long(&sfmmup->sfmmu_ttecnt[i],
15181 		    -sfmmup->sfmmu_scdrttecnt[i]);
15182 	}
15183 	/* update tsb0 inflation count */
15184 	if (old_scdp != NULL) {
15185 		sfmmup->sfmmu_tsb0_4minflcnt +=
15186 		    old_scdp->scd_sfmmup->sfmmu_tsb0_4minflcnt;
15187 	}
15188 	ASSERT(sfmmup->sfmmu_tsb0_4minflcnt >=
15189 	    scdp->scd_sfmmup->sfmmu_tsb0_4minflcnt);
15190 	sfmmup->sfmmu_tsb0_4minflcnt -= scdp->scd_sfmmup->sfmmu_tsb0_4minflcnt;
15191 
15192 	sfmmu_hat_exit(hatlockp);
15193 
15194 	if (old_scdp != NULL) {
15195 		SF_SCD_DECR_REF(srdp, old_scdp);
15196 	}
15197 
15198 }
15199 
15200 /*
15201  * This routine is called by a process to become part of an SCD. It is called
15202  * from sfmmu_tsbmiss_exception() once most of the initial work has been
15203  * done by sfmmu_join_scd(). This routine must not drop the hat lock.
15204  */
15205 static void
15206 sfmmu_finish_join_scd(sfmmu_t *sfmmup)
15207 {
15208 	struct tsb_info	*tsbinfop;
15209 
15210 	ASSERT(sfmmu_hat_lock_held(sfmmup));
15211 	ASSERT(sfmmup->sfmmu_scdp != NULL);
15212 	ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_JOIN_SCD));
15213 	ASSERT(!SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY));
15214 	ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_ALLCTX_INVALID));
15215 
15216 	for (tsbinfop = sfmmup->sfmmu_tsb; tsbinfop != NULL;
15217 	    tsbinfop = tsbinfop->tsb_next) {
15218 		if (tsbinfop->tsb_flags & TSB_SWAPPED) {
15219 			continue;
15220 		}
15221 		ASSERT(!(tsbinfop->tsb_flags & TSB_RELOC_FLAG));
15222 
15223 		sfmmu_inv_tsb(tsbinfop->tsb_va,
15224 		    TSB_BYTES(tsbinfop->tsb_szc));
15225 	}
15226 
15227 	/* Set HAT_CTX1_FLAG for all SCD ISMs */
15228 	sfmmu_ism_hatflags(sfmmup, 1);
15229 
15230 	SFMMU_STAT(sf_join_scd);
15231 }
15232 
15233 /*
15234  * This routine is called in order to check if there is an SCD which matches
15235  * the process's region map if not then a new SCD may be created.
15236  */
15237 static void
15238 sfmmu_find_scd(sfmmu_t *sfmmup)
15239 {
15240 	sf_srd_t *srdp = sfmmup->sfmmu_srdp;
15241 	sf_scd_t *scdp, *new_scdp;
15242 	int ret;
15243 
15244 	ASSERT(srdp != NULL);
15245 	ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock));
15246 
15247 	mutex_enter(&srdp->srd_scd_mutex);
15248 	for (scdp = srdp->srd_scdp; scdp != NULL;
15249 	    scdp = scdp->scd_next) {
15250 		SF_RGNMAP_EQUAL(&scdp->scd_region_map,
15251 		    &sfmmup->sfmmu_region_map, ret);
15252 		if (ret == 1) {
15253 			SF_SCD_INCR_REF(scdp);
15254 			mutex_exit(&srdp->srd_scd_mutex);
15255 			sfmmu_join_scd(scdp, sfmmup);
15256 			ASSERT(scdp->scd_refcnt >= 2);
15257 			atomic_add_32((volatile uint32_t *)
15258 			    &scdp->scd_refcnt, -1);
15259 			return;
15260 		} else {
15261 			/*
15262 			 * If the sfmmu region map is a subset of the scd
15263 			 * region map, then the assumption is that this process
15264 			 * will continue attaching to ISM segments until the
15265 			 * region maps are equal.
15266 			 */
15267 			SF_RGNMAP_IS_SUBSET(&scdp->scd_region_map,
15268 			    &sfmmup->sfmmu_region_map, ret);
15269 			if (ret == 1) {
15270 				mutex_exit(&srdp->srd_scd_mutex);
15271 				return;
15272 			}
15273 		}
15274 	}
15275 
15276 	ASSERT(scdp == NULL);
15277 	/*
15278 	 * No matching SCD has been found, create a new one.
15279 	 */
15280 	if ((new_scdp = sfmmu_alloc_scd(srdp, &sfmmup->sfmmu_region_map)) ==
15281 	    NULL) {
15282 		mutex_exit(&srdp->srd_scd_mutex);
15283 		return;
15284 	}
15285 
15286 	/*
15287 	 * sfmmu_alloc_scd() returns with a ref count of 1 on the scd.
15288 	 */
15289 
15290 	/* Set scd_rttecnt for shme rgns in SCD */
15291 	sfmmu_set_scd_rttecnt(srdp, new_scdp);
15292 
15293 	/*
15294 	 * Link scd onto srd_scdp list and scd sfmmu onto region/iment lists.
15295 	 */
15296 	sfmmu_link_scd_to_regions(srdp, new_scdp);
15297 	sfmmu_add_scd(&srdp->srd_scdp, new_scdp);
15298 	SFMMU_STAT_ADD(sf_create_scd, 1);
15299 
15300 	mutex_exit(&srdp->srd_scd_mutex);
15301 	sfmmu_join_scd(new_scdp, sfmmup);
15302 	ASSERT(new_scdp->scd_refcnt >= 2);
15303 	atomic_add_32((volatile uint32_t *)&new_scdp->scd_refcnt, -1);
15304 }
15305 
15306 /*
15307  * This routine is called by a process to remove itself from an SCD. It is
15308  * either called when the processes has detached from a segment or from
15309  * hat_free_start() as a result of calling exit.
15310  */
15311 static void
15312 sfmmu_leave_scd(sfmmu_t *sfmmup, uchar_t r_type)
15313 {
15314 	sf_scd_t *scdp = sfmmup->sfmmu_scdp;
15315 	sf_srd_t *srdp =  sfmmup->sfmmu_srdp;
15316 	hatlock_t *hatlockp = TSB_HASH(sfmmup);
15317 	int i;
15318 
15319 	ASSERT(scdp != NULL);
15320 	ASSERT(srdp != NULL);
15321 
15322 	if (sfmmup->sfmmu_free) {
15323 		/*
15324 		 * If the process is part of an SCD the sfmmu is unlinked
15325 		 * from scd_sf_list.
15326 		 */
15327 		mutex_enter(&scdp->scd_mutex);
15328 		sfmmu_from_scd_list(&scdp->scd_sf_list, sfmmup);
15329 		mutex_exit(&scdp->scd_mutex);
15330 		/*
15331 		 * Update sfmmu_ttecnt to include the rgn ttecnt for rgns that
15332 		 * are about to leave the SCD
15333 		 */
15334 		for (i = 0; i < mmu_page_sizes; i++) {
15335 			ASSERT(sfmmup->sfmmu_scdrttecnt[i] ==
15336 			    scdp->scd_rttecnt[i]);
15337 			atomic_add_long(&sfmmup->sfmmu_ttecnt[i],
15338 			    sfmmup->sfmmu_scdrttecnt[i]);
15339 			sfmmup->sfmmu_scdrttecnt[i] = 0;
15340 		}
15341 		sfmmup->sfmmu_scdp = NULL;
15342 
15343 		SF_SCD_DECR_REF(srdp, scdp);
15344 		return;
15345 	}
15346 
15347 	ASSERT(r_type != SFMMU_REGION_ISM ||
15348 	    SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY));
15349 	ASSERT(scdp->scd_refcnt);
15350 	ASSERT(!sfmmup->sfmmu_free);
15351 	ASSERT(sfmmu_hat_lock_held(sfmmup));
15352 	ASSERT(AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock));
15353 
15354 	/*
15355 	 * Wait for ISM maps to be updated.
15356 	 */
15357 	if (r_type != SFMMU_REGION_ISM) {
15358 		while (SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY) &&
15359 		    sfmmup->sfmmu_scdp != NULL) {
15360 			cv_wait(&sfmmup->sfmmu_tsb_cv,
15361 			    HATLOCK_MUTEXP(hatlockp));
15362 		}
15363 
15364 		if (sfmmup->sfmmu_scdp == NULL) {
15365 			sfmmu_hat_exit(hatlockp);
15366 			return;
15367 		}
15368 		SFMMU_FLAGS_SET(sfmmup, HAT_ISMBUSY);
15369 	}
15370 
15371 	if (SFMMU_FLAGS_ISSET(sfmmup, HAT_JOIN_SCD)) {
15372 		SFMMU_FLAGS_CLEAR(sfmmup, HAT_JOIN_SCD);
15373 		/*
15374 		 * Since HAT_JOIN_SCD was set our context
15375 		 * is still invalid.
15376 		 */
15377 	} else {
15378 		/*
15379 		 * For a multi-thread process, we must stop
15380 		 * all the other threads before leaving the scd.
15381 		 */
15382 
15383 		sfmmu_invalidate_ctx(sfmmup);
15384 	}
15385 
15386 	/* Clear all the rid's for ISM, delete flags, etc */
15387 	ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY));
15388 	sfmmu_ism_hatflags(sfmmup, 0);
15389 
15390 	/*
15391 	 * Update sfmmu_ttecnt to include the rgn ttecnt for rgns that
15392 	 * are in SCD before this sfmmup leaves the SCD.
15393 	 */
15394 	for (i = 0; i < mmu_page_sizes; i++) {
15395 		ASSERT(sfmmup->sfmmu_scdrttecnt[i] ==
15396 		    scdp->scd_rttecnt[i]);
15397 		atomic_add_long(&sfmmup->sfmmu_ttecnt[i],
15398 		    sfmmup->sfmmu_scdrttecnt[i]);
15399 		sfmmup->sfmmu_scdrttecnt[i] = 0;
15400 		/* update ismttecnt to include SCD ism before hat leaves SCD */
15401 		sfmmup->sfmmu_ismttecnt[i] += sfmmup->sfmmu_scdismttecnt[i];
15402 		sfmmup->sfmmu_scdismttecnt[i] = 0;
15403 	}
15404 	/* update tsb0 inflation count */
15405 	sfmmup->sfmmu_tsb0_4minflcnt += scdp->scd_sfmmup->sfmmu_tsb0_4minflcnt;
15406 
15407 	if (r_type != SFMMU_REGION_ISM) {
15408 		SFMMU_FLAGS_CLEAR(sfmmup, HAT_ISMBUSY);
15409 	}
15410 	sfmmup->sfmmu_scdp = NULL;
15411 
15412 	sfmmu_hat_exit(hatlockp);
15413 
15414 	/*
15415 	 * Unlink sfmmu from scd_sf_list this can be done without holding
15416 	 * the hat lock as we hold the sfmmu_as lock which prevents
15417 	 * hat_join_region from adding this thread to the scd again. Other
15418 	 * threads check if sfmmu_scdp is NULL under hat lock and if it's NULL
15419 	 * they won't get here, since sfmmu_leave_scd() clears sfmmu_scdp
15420 	 * while holding the hat lock.
15421 	 */
15422 	mutex_enter(&scdp->scd_mutex);
15423 	sfmmu_from_scd_list(&scdp->scd_sf_list, sfmmup);
15424 	mutex_exit(&scdp->scd_mutex);
15425 	SFMMU_STAT(sf_leave_scd);
15426 
15427 	SF_SCD_DECR_REF(srdp, scdp);
15428 	hatlockp = sfmmu_hat_enter(sfmmup);
15429 
15430 }
15431 
15432 /*
15433  * Unlink and free up an SCD structure with a reference count of 0.
15434  */
15435 static void
15436 sfmmu_destroy_scd(sf_srd_t *srdp, sf_scd_t *scdp, sf_region_map_t *scd_rmap)
15437 {
15438 	sfmmu_t *scsfmmup;
15439 	sf_scd_t *sp;
15440 	hatlock_t *shatlockp;
15441 	int i, ret;
15442 
15443 	mutex_enter(&srdp->srd_scd_mutex);
15444 	for (sp = srdp->srd_scdp; sp != NULL; sp = sp->scd_next) {
15445 		if (sp == scdp)
15446 			break;
15447 	}
15448 	if (sp == NULL || sp->scd_refcnt) {
15449 		mutex_exit(&srdp->srd_scd_mutex);
15450 		return;
15451 	}
15452 
15453 	/*
15454 	 * It is possible that the scd has been freed and reallocated with a
15455 	 * different region map while we've been waiting for the srd_scd_mutex.
15456 	 */
15457 	SF_RGNMAP_EQUAL(scd_rmap, &sp->scd_region_map, ret);
15458 	if (ret != 1) {
15459 		mutex_exit(&srdp->srd_scd_mutex);
15460 		return;
15461 	}
15462 
15463 	ASSERT(scdp->scd_sf_list == NULL);
15464 	/*
15465 	 * Unlink scd from srd_scdp list.
15466 	 */
15467 	sfmmu_remove_scd(&srdp->srd_scdp, scdp);
15468 	mutex_exit(&srdp->srd_scd_mutex);
15469 
15470 	sfmmu_unlink_scd_from_regions(srdp, scdp);
15471 
15472 	/* Clear shared context tsb and release ctx */
15473 	scsfmmup = scdp->scd_sfmmup;
15474 
15475 	/*
15476 	 * create a barrier so that scd will not be destroyed
15477 	 * if other thread still holds the same shared hat lock.
15478 	 * E.g., sfmmu_tsbmiss_exception() needs to acquire the
15479 	 * shared hat lock before checking the shared tsb reloc flag.
15480 	 */
15481 	shatlockp = sfmmu_hat_enter(scsfmmup);
15482 	sfmmu_hat_exit(shatlockp);
15483 
15484 	sfmmu_free_scd_tsbs(scsfmmup);
15485 
15486 	for (i = 0; i < SFMMU_L1_HMERLINKS; i++) {
15487 		if (scsfmmup->sfmmu_hmeregion_links[i] != NULL) {
15488 			kmem_free(scsfmmup->sfmmu_hmeregion_links[i],
15489 			    SFMMU_L2_HMERLINKS_SIZE);
15490 			scsfmmup->sfmmu_hmeregion_links[i] = NULL;
15491 		}
15492 	}
15493 	kmem_cache_free(sfmmuid_cache, scsfmmup);
15494 	kmem_cache_free(scd_cache, scdp);
15495 	SFMMU_STAT(sf_destroy_scd);
15496 }
15497 
15498 /*
15499  * Modifies the HAT_CTX1_FLAG for each of the ISM segments which correspond to
15500  * bits which are set in the ism_region_map parameter. This flag indicates to
15501  * the tsbmiss handler that mapping for these segments should be loaded using
15502  * the shared context.
15503  */
15504 static void
15505 sfmmu_ism_hatflags(sfmmu_t *sfmmup, int addflag)
15506 {
15507 	sf_scd_t *scdp = sfmmup->sfmmu_scdp;
15508 	ism_blk_t *ism_blkp;
15509 	ism_map_t *ism_map;
15510 	int i, rid;
15511 
15512 	ASSERT(sfmmup->sfmmu_iblk != NULL);
15513 	ASSERT(scdp != NULL);
15514 	/*
15515 	 * Note that the caller either set HAT_ISMBUSY flag or checked
15516 	 * under hat lock that HAT_ISMBUSY was not set by another thread.
15517 	 */
15518 	ASSERT(sfmmu_hat_lock_held(sfmmup));
15519 
15520 	ism_blkp = sfmmup->sfmmu_iblk;
15521 	while (ism_blkp != NULL) {
15522 		ism_map = ism_blkp->iblk_maps;
15523 		for (i = 0; ism_map[i].imap_ismhat && i < ISM_MAP_SLOTS; i++) {
15524 			rid = ism_map[i].imap_rid;
15525 			if (rid == SFMMU_INVALID_ISMRID) {
15526 				continue;
15527 			}
15528 			ASSERT(rid >= 0 && rid < SFMMU_MAX_ISM_REGIONS);
15529 			if (SF_RGNMAP_TEST(scdp->scd_ismregion_map, rid) &&
15530 			    addflag) {
15531 				ism_map[i].imap_hatflags |=
15532 				    HAT_CTX1_FLAG;
15533 			} else {
15534 				ism_map[i].imap_hatflags &=
15535 				    ~HAT_CTX1_FLAG;
15536 			}
15537 		}
15538 		ism_blkp = ism_blkp->iblk_next;
15539 	}
15540 }
15541 
15542 static int
15543 sfmmu_srd_lock_held(sf_srd_t *srdp)
15544 {
15545 	return (MUTEX_HELD(&srdp->srd_mutex));
15546 }
15547 
15548 /* ARGSUSED */
15549 static int
15550 sfmmu_scdcache_constructor(void *buf, void *cdrarg, int kmflags)
15551 {
15552 	sf_scd_t *scdp = (sf_scd_t *)buf;
15553 
15554 	bzero(buf, sizeof (sf_scd_t));
15555 	mutex_init(&scdp->scd_mutex, NULL, MUTEX_DEFAULT, NULL);
15556 	return (0);
15557 }
15558 
15559 /* ARGSUSED */
15560 static void
15561 sfmmu_scdcache_destructor(void *buf, void *cdrarg)
15562 {
15563 	sf_scd_t *scdp = (sf_scd_t *)buf;
15564 
15565 	mutex_destroy(&scdp->scd_mutex);
15566 }
15567 
15568 /*
15569  * The listp parameter is a pointer to a list of hmeblks which are partially
15570  * freed as result of calling sfmmu_hblk_hash_rm(), the last phase of the
15571  * freeing process is to cross-call all cpus to ensure that there are no
15572  * remaining cached references.
15573  *
15574  * If the local generation number is less than the global then we can free
15575  * hmeblks which are already on the pending queue as another cpu has completed
15576  * the cross-call.
15577  *
15578  * We cross-call to make sure that there are no threads on other cpus accessing
15579  * these hmblks and then complete the process of freeing them under the
15580  * following conditions:
15581  * 	The total number of pending hmeblks is greater than the threshold
15582  *	The reserve list has fewer than HBLK_RESERVE_CNT hmeblks
15583  *	It is at least 1 second since the last time we cross-called
15584  *
15585  * Otherwise, we add the hmeblks to the per-cpu pending queue.
15586  */
15587 static void
15588 sfmmu_hblks_list_purge(struct hme_blk **listp, int dontfree)
15589 {
15590 	struct hme_blk *hblkp, *pr_hblkp = NULL;
15591 	int		count = 0;
15592 	cpuset_t	cpuset = cpu_ready_set;
15593 	cpu_hme_pend_t	*cpuhp;
15594 	timestruc_t	now;
15595 	int		one_second_expired = 0;
15596 
15597 	gethrestime_lasttick(&now);
15598 
15599 	for (hblkp = *listp; hblkp != NULL; hblkp = hblkp->hblk_next) {
15600 		ASSERT(hblkp->hblk_shw_bit == 0);
15601 		ASSERT(hblkp->hblk_shared == 0);
15602 		count++;
15603 		pr_hblkp = hblkp;
15604 	}
15605 
15606 	cpuhp = &cpu_hme_pend[CPU->cpu_seqid];
15607 	mutex_enter(&cpuhp->chp_mutex);
15608 
15609 	if ((cpuhp->chp_count + count) == 0) {
15610 		mutex_exit(&cpuhp->chp_mutex);
15611 		return;
15612 	}
15613 
15614 	if ((now.tv_sec - cpuhp->chp_timestamp) > 1) {
15615 		one_second_expired  = 1;
15616 	}
15617 
15618 	if (!dontfree && (freehblkcnt < HBLK_RESERVE_CNT ||
15619 	    (cpuhp->chp_count + count) > cpu_hme_pend_thresh ||
15620 	    one_second_expired)) {
15621 		/* Append global list to local */
15622 		if (pr_hblkp == NULL) {
15623 			*listp = cpuhp->chp_listp;
15624 		} else {
15625 			pr_hblkp->hblk_next = cpuhp->chp_listp;
15626 		}
15627 		cpuhp->chp_listp = NULL;
15628 		cpuhp->chp_count = 0;
15629 		cpuhp->chp_timestamp = now.tv_sec;
15630 		mutex_exit(&cpuhp->chp_mutex);
15631 
15632 		kpreempt_disable();
15633 		CPUSET_DEL(cpuset, CPU->cpu_id);
15634 		xt_sync(cpuset);
15635 		xt_sync(cpuset);
15636 		kpreempt_enable();
15637 
15638 		/*
15639 		 * At this stage we know that no trap handlers on other
15640 		 * cpus can have references to hmeblks on the list.
15641 		 */
15642 		sfmmu_hblk_free(listp);
15643 	} else if (*listp != NULL) {
15644 		pr_hblkp->hblk_next = cpuhp->chp_listp;
15645 		cpuhp->chp_listp = *listp;
15646 		cpuhp->chp_count += count;
15647 		*listp = NULL;
15648 		mutex_exit(&cpuhp->chp_mutex);
15649 	} else {
15650 		mutex_exit(&cpuhp->chp_mutex);
15651 	}
15652 }
15653 
15654 /*
15655  * Add an hmeblk to the the hash list.
15656  */
15657 void
15658 sfmmu_hblk_hash_add(struct hmehash_bucket *hmebp, struct hme_blk *hmeblkp,
15659 	uint64_t hblkpa)
15660 {
15661 	ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp));
15662 #ifdef	DEBUG
15663 	if (hmebp->hmeblkp == NULL) {
15664 		ASSERT(hmebp->hmeh_nextpa == HMEBLK_ENDPA);
15665 	}
15666 #endif /* DEBUG */
15667 
15668 	hmeblkp->hblk_nextpa = hmebp->hmeh_nextpa;
15669 	/*
15670 	 * Since the TSB miss handler now does not lock the hash chain before
15671 	 * walking it, make sure that the hmeblks nextpa is globally visible
15672 	 * before we make the hmeblk globally visible by updating the chain root
15673 	 * pointer in the hash bucket.
15674 	 */
15675 	membar_producer();
15676 	hmebp->hmeh_nextpa = hblkpa;
15677 	hmeblkp->hblk_next = hmebp->hmeblkp;
15678 	hmebp->hmeblkp = hmeblkp;
15679 
15680 }
15681 
15682 /*
15683  * This function is the first part of a 2 part process to remove an hmeblk
15684  * from the hash chain. In this phase we unlink the hmeblk from the hash chain
15685  * but leave the next physical pointer unchanged. The hmeblk is then linked onto
15686  * a per-cpu pending list using the virtual address pointer.
15687  *
15688  * TSB miss trap handlers that start after this phase will no longer see
15689  * this hmeblk. TSB miss handlers that still cache this hmeblk in a register
15690  * can still use it for further chain traversal because we haven't yet modifed
15691  * the next physical pointer or freed it.
15692  *
15693  * In the second phase of hmeblk removal we'll issue a barrier xcall before
15694  * we reuse or free this hmeblk. This will make sure all lingering references to
15695  * the hmeblk after first phase disappear before we finally reclaim it.
15696  * This scheme eliminates the need for TSB miss handlers to lock hmeblk chains
15697  * during their traversal.
15698  *
15699  * The hmehash_mutex must be held when calling this function.
15700  *
15701  * Input:
15702  *	 hmebp - hme hash bucket pointer
15703  *	 hmeblkp - address of hmeblk to be removed
15704  *	 pr_hblk - virtual address of previous hmeblkp
15705  *	 listp - pointer to list of hmeblks linked by virtual address
15706  *	 free_now flag - indicates that a complete removal from the hash chains
15707  *			 is necessary.
15708  *
15709  * It is inefficient to use the free_now flag as a cross-call is required to
15710  * remove a single hmeblk from the hash chain but is necessary when hmeblks are
15711  * in short supply.
15712  */
15713 void
15714 sfmmu_hblk_hash_rm(struct hmehash_bucket *hmebp, struct hme_blk *hmeblkp,
15715     struct hme_blk *pr_hblk, struct hme_blk **listp,
15716     int free_now)
15717 {
15718 	int shw_size, vshift;
15719 	struct hme_blk *shw_hblkp;
15720 	uint_t		shw_mask, newshw_mask;
15721 	caddr_t		vaddr;
15722 	int		size;
15723 	cpuset_t cpuset = cpu_ready_set;
15724 
15725 	ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp));
15726 
15727 	if (hmebp->hmeblkp == hmeblkp) {
15728 		hmebp->hmeh_nextpa = hmeblkp->hblk_nextpa;
15729 		hmebp->hmeblkp = hmeblkp->hblk_next;
15730 	} else {
15731 		pr_hblk->hblk_nextpa = hmeblkp->hblk_nextpa;
15732 		pr_hblk->hblk_next = hmeblkp->hblk_next;
15733 	}
15734 
15735 	size = get_hblk_ttesz(hmeblkp);
15736 	shw_hblkp = hmeblkp->hblk_shadow;
15737 	if (shw_hblkp) {
15738 		ASSERT(hblktosfmmu(hmeblkp) != KHATID);
15739 		ASSERT(!hmeblkp->hblk_shared);
15740 #ifdef	DEBUG
15741 		if (mmu_page_sizes == max_mmu_page_sizes) {
15742 			ASSERT(size < TTE256M);
15743 		} else {
15744 			ASSERT(size < TTE4M);
15745 		}
15746 #endif /* DEBUG */
15747 
15748 		shw_size = get_hblk_ttesz(shw_hblkp);
15749 		vaddr = (caddr_t)get_hblk_base(hmeblkp);
15750 		vshift = vaddr_to_vshift(shw_hblkp->hblk_tag, vaddr, shw_size);
15751 		ASSERT(vshift < 8);
15752 		/*
15753 		 * Atomically clear shadow mask bit
15754 		 */
15755 		do {
15756 			shw_mask = shw_hblkp->hblk_shw_mask;
15757 			ASSERT(shw_mask & (1 << vshift));
15758 			newshw_mask = shw_mask & ~(1 << vshift);
15759 			newshw_mask = cas32(&shw_hblkp->hblk_shw_mask,
15760 			    shw_mask, newshw_mask);
15761 		} while (newshw_mask != shw_mask);
15762 		hmeblkp->hblk_shadow = NULL;
15763 	}
15764 	hmeblkp->hblk_shw_bit = 0;
15765 
15766 	if (hmeblkp->hblk_shared) {
15767 #ifdef	DEBUG
15768 		sf_srd_t	*srdp;
15769 		sf_region_t	*rgnp;
15770 		uint_t		rid;
15771 
15772 		srdp = hblktosrd(hmeblkp);
15773 		ASSERT(srdp != NULL && srdp->srd_refcnt != 0);
15774 		rid = hmeblkp->hblk_tag.htag_rid;
15775 		ASSERT(SFMMU_IS_SHMERID_VALID(rid));
15776 		ASSERT(rid < SFMMU_MAX_HME_REGIONS);
15777 		rgnp = srdp->srd_hmergnp[rid];
15778 		ASSERT(rgnp != NULL);
15779 		SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid);
15780 #endif /* DEBUG */
15781 		hmeblkp->hblk_shared = 0;
15782 	}
15783 	if (free_now) {
15784 		kpreempt_disable();
15785 		CPUSET_DEL(cpuset, CPU->cpu_id);
15786 		xt_sync(cpuset);
15787 		xt_sync(cpuset);
15788 		kpreempt_enable();
15789 
15790 		hmeblkp->hblk_nextpa = HMEBLK_ENDPA;
15791 		hmeblkp->hblk_next = NULL;
15792 	} else {
15793 		/* Append hmeblkp to listp for processing later. */
15794 		hmeblkp->hblk_next = *listp;
15795 		*listp = hmeblkp;
15796 	}
15797 }
15798 
15799 /*
15800  * This routine is called when memory is in short supply and returns a free
15801  * hmeblk of the requested size from the cpu pending lists.
15802  */
15803 static struct hme_blk *
15804 sfmmu_check_pending_hblks(int size)
15805 {
15806 	int i;
15807 	struct hme_blk *hmeblkp = NULL, *last_hmeblkp;
15808 	int found_hmeblk;
15809 	cpuset_t cpuset = cpu_ready_set;
15810 	cpu_hme_pend_t *cpuhp;
15811 
15812 	/* Flush cpu hblk pending queues */
15813 	for (i = 0; i < NCPU; i++) {
15814 		cpuhp = &cpu_hme_pend[i];
15815 		if (cpuhp->chp_listp != NULL)  {
15816 			mutex_enter(&cpuhp->chp_mutex);
15817 			if (cpuhp->chp_listp == NULL)  {
15818 				mutex_exit(&cpuhp->chp_mutex);
15819 				continue;
15820 			}
15821 			found_hmeblk = 0;
15822 			last_hmeblkp = NULL;
15823 			for (hmeblkp = cpuhp->chp_listp; hmeblkp != NULL;
15824 			    hmeblkp = hmeblkp->hblk_next) {
15825 				if (get_hblk_ttesz(hmeblkp) == size) {
15826 					if (last_hmeblkp == NULL) {
15827 						cpuhp->chp_listp =
15828 						    hmeblkp->hblk_next;
15829 					} else {
15830 						last_hmeblkp->hblk_next =
15831 						    hmeblkp->hblk_next;
15832 					}
15833 					ASSERT(cpuhp->chp_count > 0);
15834 					cpuhp->chp_count--;
15835 					found_hmeblk = 1;
15836 					break;
15837 				} else {
15838 					last_hmeblkp = hmeblkp;
15839 				}
15840 			}
15841 			mutex_exit(&cpuhp->chp_mutex);
15842 
15843 			if (found_hmeblk) {
15844 				kpreempt_disable();
15845 				CPUSET_DEL(cpuset, CPU->cpu_id);
15846 				xt_sync(cpuset);
15847 				xt_sync(cpuset);
15848 				kpreempt_enable();
15849 				return (hmeblkp);
15850 			}
15851 		}
15852 	}
15853 	return (NULL);
15854 }
15855