xref: /titanic_41/usr/src/uts/sfmmu/vm/hat_sfmmu.c (revision fd9cb95cbb2f626355a60efb9d02c5f0a33c10e6)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 /*
30  * VM - Hardware Address Translation management for Spitfire MMU.
31  *
32  * This file implements the machine specific hardware translation
33  * needed by the VM system.  The machine independent interface is
34  * described in <vm/hat.h> while the machine dependent interface
35  * and data structures are described in <vm/hat_sfmmu.h>.
36  *
37  * The hat layer manages the address translation hardware as a cache
38  * driven by calls from the higher levels in the VM system.
39  */
40 
41 #include <sys/types.h>
42 #include <vm/hat.h>
43 #include <vm/hat_sfmmu.h>
44 #include <vm/page.h>
45 #include <sys/pte.h>
46 #include <sys/systm.h>
47 #include <sys/mman.h>
48 #include <sys/sysmacros.h>
49 #include <sys/machparam.h>
50 #include <sys/vtrace.h>
51 #include <sys/kmem.h>
52 #include <sys/mmu.h>
53 #include <sys/cmn_err.h>
54 #include <sys/cpu.h>
55 #include <sys/cpuvar.h>
56 #include <sys/debug.h>
57 #include <sys/lgrp.h>
58 #include <sys/archsystm.h>
59 #include <sys/machsystm.h>
60 #include <sys/vmsystm.h>
61 #include <vm/as.h>
62 #include <vm/seg.h>
63 #include <vm/seg_kp.h>
64 #include <vm/seg_kmem.h>
65 #include <vm/seg_kpm.h>
66 #include <vm/rm.h>
67 #include <sys/t_lock.h>
68 #include <sys/obpdefs.h>
69 #include <sys/vm_machparam.h>
70 #include <sys/var.h>
71 #include <sys/trap.h>
72 #include <sys/machtrap.h>
73 #include <sys/scb.h>
74 #include <sys/bitmap.h>
75 #include <sys/machlock.h>
76 #include <sys/membar.h>
77 #include <sys/atomic.h>
78 #include <sys/cpu_module.h>
79 #include <sys/prom_debug.h>
80 #include <sys/ksynch.h>
81 #include <sys/mem_config.h>
82 #include <sys/mem_cage.h>
83 #include <sys/dtrace.h>
84 #include <vm/vm_dep.h>
85 #include <vm/xhat_sfmmu.h>
86 #include <sys/fpu/fpusystm.h>
87 
88 #if defined(SF_ERRATA_57)
89 extern caddr_t errata57_limit;
90 #endif
91 
92 #define	HME8BLK_SZ_RND		((roundup(HME8BLK_SZ, sizeof (int64_t))) /  \
93 				(sizeof (int64_t)))
94 #define	HBLK_RESERVE		((struct hme_blk *)hblk_reserve)
95 
96 #define	HBLK_RESERVE_CNT	128
97 #define	HBLK_RESERVE_MIN	20
98 
99 static struct hme_blk		*freehblkp;
100 static kmutex_t			freehblkp_lock;
101 static int			freehblkcnt;
102 
103 static int64_t			hblk_reserve[HME8BLK_SZ_RND];
104 static kmutex_t			hblk_reserve_lock;
105 static kthread_t		*hblk_reserve_thread;
106 
107 static nucleus_hblk8_info_t	nucleus_hblk8;
108 static nucleus_hblk1_info_t	nucleus_hblk1;
109 
110 /*
111  * SFMMU specific hat functions
112  */
113 void	hat_pagecachectl(struct page *, int);
114 
115 /* flags for hat_pagecachectl */
116 #define	HAT_CACHE	0x1
117 #define	HAT_UNCACHE	0x2
118 #define	HAT_TMPNC	0x4
119 
120 /*
121  * Flag to allow the creation of non-cacheable translations
122  * to system memory. It is off by default. At the moment this
123  * flag is used by the ecache error injector. The error injector
124  * will turn it on when creating such a translation then shut it
125  * off when it's finished.
126  */
127 
128 int	sfmmu_allow_nc_trans = 0;
129 
130 /*
131  * Flag to disable large page support.
132  * 	value of 1 => disable all large pages.
133  *	bits 1, 2, and 3 are to disable 64K, 512K and 4M pages respectively.
134  *
135  * For example, use the value 0x4 to disable 512K pages.
136  *
137  */
138 #define	LARGE_PAGES_OFF		0x1
139 
140 /*
141  * WARNING: 512K pages MUST be disabled for ISM/DISM. If not
142  * a process would page fault indefinitely if it tried to
143  * access a 512K page.
144  */
145 int	disable_ism_large_pages = (1 << TTE512K);
146 int	disable_large_pages = 0;
147 int	disable_auto_large_pages = 0;
148 
149 /*
150  * Private sfmmu data structures for hat management
151  */
152 static struct kmem_cache *sfmmuid_cache;
153 
154 /*
155  * Private sfmmu data structures for ctx management
156  */
157 static struct ctx	*ctxhand;	/* hand used while stealing ctxs */
158 static struct ctx	*ctxfree;	/* head of free ctx list */
159 static struct ctx	*ctxdirty;	/* head of dirty ctx list */
160 
161 /*
162  * Private sfmmu data structures for tsb management
163  */
164 static struct kmem_cache *sfmmu_tsbinfo_cache;
165 static struct kmem_cache *sfmmu_tsb8k_cache;
166 static struct kmem_cache *sfmmu_tsb_cache[NLGRPS_MAX];
167 static vmem_t *kmem_tsb_arena;
168 
169 /*
170  * sfmmu static variables for hmeblk resource management.
171  */
172 static vmem_t *hat_memload1_arena; /* HAT translation arena for sfmmu1_cache */
173 static struct kmem_cache *sfmmu8_cache;
174 static struct kmem_cache *sfmmu1_cache;
175 static struct kmem_cache *pa_hment_cache;
176 
177 static kmutex_t 	ctx_list_lock;	/* mutex for ctx free/dirty lists */
178 static kmutex_t 	ism_mlist_lock;	/* mutex for ism mapping list */
179 /*
180  * private data for ism
181  */
182 static struct kmem_cache *ism_blk_cache;
183 static struct kmem_cache *ism_ment_cache;
184 #define	ISMID_STARTADDR	NULL
185 
186 /*
187  * Whether to delay TLB flushes and use Cheetah's flush-all support
188  * when removing contexts from the dirty list.
189  */
190 int delay_tlb_flush;
191 int disable_delay_tlb_flush;
192 
193 /*
194  * ``hat_lock'' is a hashed mutex lock for protecting sfmmu TSB lists,
195  * HAT flags, synchronizing TLB/TSB coherency, and context management.
196  * The lock is hashed on the sfmmup since the case where we need to lock
197  * all processes is rare but does occur (e.g. we need to unload a shared
198  * mapping from all processes using the mapping).  We have a lot of buckets,
199  * and each slab of sfmmu_t's can use about a quarter of them, giving us
200  * a fairly good distribution without wasting too much space and overhead
201  * when we have to grab them all.
202  */
203 #define	SFMMU_NUM_LOCK	128		/* must be power of two */
204 hatlock_t	hat_lock[SFMMU_NUM_LOCK];
205 
206 /*
207  * Hash algorithm optimized for a small number of slabs.
208  *  7 is (highbit((sizeof sfmmu_t)) - 1)
209  * This hash algorithm is based upon the knowledge that sfmmu_t's come from a
210  * kmem_cache, and thus they will be sequential within that cache.  In
211  * addition, each new slab will have a different "color" up to cache_maxcolor
212  * which will skew the hashing for each successive slab which is allocated.
213  * If the size of sfmmu_t changed to a larger size, this algorithm may need
214  * to be revisited.
215  */
216 #define	TSB_HASH_SHIFT_BITS (7)
217 #define	PTR_HASH(x) ((uintptr_t)x >> TSB_HASH_SHIFT_BITS)
218 
219 #ifdef DEBUG
220 int tsb_hash_debug = 0;
221 #define	TSB_HASH(sfmmup)	\
222 	(tsb_hash_debug ? &hat_lock[0] : \
223 	&hat_lock[PTR_HASH(sfmmup) & (SFMMU_NUM_LOCK-1)])
224 #else	/* DEBUG */
225 #define	TSB_HASH(sfmmup)	&hat_lock[PTR_HASH(sfmmup) & (SFMMU_NUM_LOCK-1)]
226 #endif	/* DEBUG */
227 
228 
229 /* sfmmu_replace_tsb() return codes. */
230 typedef enum tsb_replace_rc {
231 	TSB_SUCCESS,
232 	TSB_ALLOCFAIL,
233 	TSB_LOSTRACE,
234 	TSB_ALREADY_SWAPPED,
235 	TSB_CANTGROW
236 } tsb_replace_rc_t;
237 
238 /*
239  * Flags for TSB allocation routines.
240  */
241 #define	TSB_ALLOC	0x01
242 #define	TSB_FORCEALLOC	0x02
243 #define	TSB_GROW	0x04
244 #define	TSB_SHRINK	0x08
245 #define	TSB_SWAPIN	0x10
246 
247 /*
248  * Support for HAT callbacks.
249  */
250 #define	SFMMU_MAX_RELOC_CALLBACKS	10
251 int sfmmu_max_cb_id = SFMMU_MAX_RELOC_CALLBACKS;
252 static id_t sfmmu_cb_nextid = 0;
253 static id_t sfmmu_tsb_cb_id;
254 struct sfmmu_callback *sfmmu_cb_table;
255 
256 /*
257  * Kernel page relocation is enabled by default for non-caged
258  * kernel pages.  This has little effect unless segkmem_reloc is
259  * set, since by default kernel memory comes from inside the
260  * kernel cage.
261  */
262 int hat_kpr_enabled = 1;
263 
264 kmutex_t	kpr_mutex;
265 kmutex_t	kpr_suspendlock;
266 kthread_t	*kreloc_thread;
267 
268 /*
269  * Enable VA->PA translation sanity checking on DEBUG kernels.
270  * Disabled by default.  This is incompatible with some
271  * drivers (error injector, RSM) so if it breaks you get
272  * to keep both pieces.
273  */
274 int hat_check_vtop = 0;
275 
276 /*
277  * Private sfmmu routines (prototypes)
278  */
279 static struct hme_blk *sfmmu_shadow_hcreate(sfmmu_t *, caddr_t, int, uint_t);
280 static struct 	hme_blk *sfmmu_hblk_alloc(sfmmu_t *, caddr_t,
281 			struct hmehash_bucket *, uint_t, hmeblk_tag, uint_t);
282 static caddr_t	sfmmu_hblk_unload(struct hat *, struct hme_blk *, caddr_t,
283 			caddr_t, demap_range_t *, uint_t);
284 static caddr_t	sfmmu_hblk_sync(struct hat *, struct hme_blk *, caddr_t,
285 			caddr_t, int);
286 static void	sfmmu_hblk_free(struct hmehash_bucket *, struct hme_blk *,
287 			uint64_t, struct hme_blk **);
288 static void	sfmmu_hblks_list_purge(struct hme_blk **);
289 static uint_t	sfmmu_get_free_hblk(struct hme_blk **, uint_t);
290 static uint_t	sfmmu_put_free_hblk(struct hme_blk *, uint_t);
291 static struct hme_blk *sfmmu_hblk_steal(int);
292 static int	sfmmu_steal_this_hblk(struct hmehash_bucket *,
293 			struct hme_blk *, uint64_t, uint64_t,
294 			struct hme_blk *);
295 static caddr_t	sfmmu_hblk_unlock(struct hme_blk *, caddr_t, caddr_t);
296 
297 static void	sfmmu_memload_batchsmall(struct hat *, caddr_t, page_t **,
298 		    uint_t, uint_t, pgcnt_t);
299 void		sfmmu_tteload(struct hat *, tte_t *, caddr_t, page_t *,
300 			uint_t);
301 static int	sfmmu_tteload_array(sfmmu_t *, tte_t *, caddr_t, page_t **,
302 			uint_t);
303 static struct hmehash_bucket *sfmmu_tteload_acquire_hashbucket(sfmmu_t *,
304 					caddr_t, int);
305 static struct hme_blk *sfmmu_tteload_find_hmeblk(sfmmu_t *,
306 			struct hmehash_bucket *, caddr_t, uint_t, uint_t);
307 static int	sfmmu_tteload_addentry(sfmmu_t *, struct hme_blk *, tte_t *,
308 			caddr_t, page_t **, uint_t);
309 static void	sfmmu_tteload_release_hashbucket(struct hmehash_bucket *);
310 
311 static int	sfmmu_pagearray_setup(caddr_t, page_t **, tte_t *, int);
312 pfn_t		sfmmu_uvatopfn(caddr_t, sfmmu_t *);
313 void		sfmmu_memtte(tte_t *, pfn_t, uint_t, int);
314 static void	sfmmu_vac_conflict(struct hat *, caddr_t, page_t *);
315 static int	sfmmu_vacconflict_array(caddr_t, page_t *, int *);
316 static int	tst_tnc(page_t *pp, pgcnt_t);
317 static void	conv_tnc(page_t *pp, int);
318 
319 static struct ctx *sfmmu_get_ctx(sfmmu_t *);
320 static void	sfmmu_free_ctx(sfmmu_t *, struct ctx *);
321 static void	sfmmu_free_sfmmu(sfmmu_t *);
322 
323 static void	sfmmu_gettte(struct hat *, caddr_t, tte_t *);
324 static void	sfmmu_ttesync(struct hat *, caddr_t, tte_t *, page_t *);
325 static void	sfmmu_chgattr(struct hat *, caddr_t, size_t, uint_t, int);
326 
327 static cpuset_t	sfmmu_pageunload(page_t *, struct sf_hment *, int);
328 static void	hat_pagereload(struct page *, struct page *);
329 static cpuset_t	sfmmu_pagesync(page_t *, struct sf_hment *, uint_t);
330 static void	sfmmu_page_cache_array(page_t *, int, int, pgcnt_t);
331 static void	sfmmu_page_cache(page_t *, int, int, int);
332 
333 static void	sfmmu_tlbcache_demap(caddr_t, sfmmu_t *, struct hme_blk *,
334 			pfn_t, int, int, int, int);
335 static void	sfmmu_ismtlbcache_demap(caddr_t, sfmmu_t *, struct hme_blk *,
336 			pfn_t, int);
337 static void	sfmmu_tlb_demap(caddr_t, sfmmu_t *, struct hme_blk *, int, int);
338 static void	sfmmu_tlb_range_demap(demap_range_t *);
339 static void	sfmmu_tlb_ctx_demap(sfmmu_t *);
340 static void	sfmmu_tlb_all_demap(void);
341 static void	sfmmu_tlb_swap_ctx(sfmmu_t *, struct ctx *);
342 static void	sfmmu_sync_mmustate(sfmmu_t *);
343 
344 static void 	sfmmu_tsbinfo_setup_phys(struct tsb_info *, pfn_t);
345 static int	sfmmu_tsbinfo_alloc(struct tsb_info **, int, int, uint_t,
346 			sfmmu_t *);
347 static void	sfmmu_tsb_free(struct tsb_info *);
348 static void	sfmmu_tsbinfo_free(struct tsb_info *);
349 static int	sfmmu_init_tsbinfo(struct tsb_info *, int, int, uint_t,
350 			sfmmu_t *);
351 
352 static void	sfmmu_tsb_swapin(sfmmu_t *, hatlock_t *);
353 static int	sfmmu_select_tsb_szc(pgcnt_t);
354 static void	sfmmu_mod_tsb(sfmmu_t *, caddr_t, tte_t *, int);
355 #define		sfmmu_load_tsb(sfmmup, vaddr, tte, szc) \
356 	sfmmu_mod_tsb(sfmmup, vaddr, tte, szc)
357 #define		sfmmu_unload_tsb(sfmmup, vaddr, szc)    \
358 	sfmmu_mod_tsb(sfmmup, vaddr, NULL, szc)
359 static void	sfmmu_copy_tsb(struct tsb_info *, struct tsb_info *);
360 static tsb_replace_rc_t sfmmu_replace_tsb(sfmmu_t *, struct tsb_info *, uint_t,
361     hatlock_t *, uint_t);
362 static void	sfmmu_size_tsb(sfmmu_t *, int, uint64_t, uint64_t, int);
363 
364 static void	sfmmu_cache_flush(pfn_t, int);
365 void		sfmmu_cache_flushcolor(int, pfn_t);
366 static caddr_t	sfmmu_hblk_chgattr(sfmmu_t *, struct hme_blk *, caddr_t,
367 			caddr_t, demap_range_t *, uint_t, int);
368 
369 static uint64_t	sfmmu_vtop_attr(uint_t, int mode, tte_t *);
370 static uint_t	sfmmu_ptov_attr(tte_t *);
371 static caddr_t	sfmmu_hblk_chgprot(sfmmu_t *, struct hme_blk *, caddr_t,
372 			caddr_t, demap_range_t *, uint_t);
373 static uint_t	sfmmu_vtop_prot(uint_t, uint_t *);
374 static int	sfmmu_idcache_constructor(void *, void *, int);
375 static void	sfmmu_idcache_destructor(void *, void *);
376 static int	sfmmu_hblkcache_constructor(void *, void *, int);
377 static void	sfmmu_hblkcache_destructor(void *, void *);
378 static void	sfmmu_hblkcache_reclaim(void *);
379 static void	sfmmu_shadow_hcleanup(sfmmu_t *, struct hme_blk *,
380 			struct hmehash_bucket *);
381 static void	sfmmu_free_hblks(sfmmu_t *, caddr_t, caddr_t, int);
382 
383 static void	sfmmu_reuse_ctx(struct ctx *, sfmmu_t *);
384 static void	sfmmu_disallow_ctx_steal(sfmmu_t *);
385 static void	sfmmu_allow_ctx_steal(sfmmu_t *);
386 
387 static void	sfmmu_rm_large_mappings(page_t *, int);
388 
389 static void	hat_lock_init(void);
390 static void	hat_kstat_init(void);
391 static int	sfmmu_kstat_percpu_update(kstat_t *ksp, int rw);
392 static void	sfmmu_check_page_sizes(sfmmu_t *, int);
393 static int	fnd_mapping_sz(page_t *);
394 static void	iment_add(struct ism_ment *,  struct hat *);
395 static void	iment_sub(struct ism_ment *, struct hat *);
396 static pgcnt_t	ism_tsb_entries(sfmmu_t *, int szc);
397 extern void	sfmmu_setup_tsbinfo(sfmmu_t *);
398 extern void	sfmmu_clear_utsbinfo(void);
399 
400 /* kpm prototypes */
401 static caddr_t	sfmmu_kpm_mapin(page_t *);
402 static void	sfmmu_kpm_mapout(page_t *, caddr_t);
403 static int	sfmmu_kpme_lookup(struct kpme *, page_t *);
404 static void	sfmmu_kpme_add(struct kpme *, page_t *);
405 static void	sfmmu_kpme_sub(struct kpme *, page_t *);
406 static caddr_t	sfmmu_kpm_getvaddr(page_t *, int *);
407 static int	sfmmu_kpm_fault(caddr_t, struct memseg *, page_t *);
408 static int	sfmmu_kpm_fault_small(caddr_t, struct memseg *, page_t *);
409 static void	sfmmu_kpm_vac_conflict(page_t *, caddr_t);
410 static void	sfmmu_kpm_pageunload(page_t *);
411 static void	sfmmu_kpm_vac_unload(page_t *, caddr_t);
412 static void	sfmmu_kpm_demap_large(caddr_t);
413 static void	sfmmu_kpm_demap_small(caddr_t);
414 static void	sfmmu_kpm_demap_tlbs(caddr_t, int);
415 static void	sfmmu_kpm_hme_unload(page_t *);
416 static kpm_hlk_t *sfmmu_kpm_kpmp_enter(page_t *, pgcnt_t);
417 static void	sfmmu_kpm_kpmp_exit(kpm_hlk_t *kpmp);
418 static void	sfmmu_kpm_page_cache(page_t *, int, int);
419 
420 /* kpm globals */
421 #ifdef	DEBUG
422 /*
423  * Enable trap level tsbmiss handling
424  */
425 int	kpm_tsbmtl = 1;
426 
427 /*
428  * Flush the TLB on kpm mapout. Note: Xcalls are used (again) for the
429  * required TLB shootdowns in this case, so handle w/ care. Off by default.
430  */
431 int	kpm_tlb_flush;
432 #endif	/* DEBUG */
433 
434 static void	*sfmmu_vmem_xalloc_aligned_wrapper(vmem_t *, size_t, int);
435 
436 #ifdef DEBUG
437 static void	sfmmu_check_hblk_flist();
438 #endif
439 
440 /*
441  * Semi-private sfmmu data structures.  Some of them are initialize in
442  * startup or in hat_init. Some of them are private but accessed by
443  * assembly code or mach_sfmmu.c
444  */
445 struct hmehash_bucket *uhme_hash;	/* user hmeblk hash table */
446 struct hmehash_bucket *khme_hash;	/* kernel hmeblk hash table */
447 uint64_t	uhme_hash_pa;		/* PA of uhme_hash */
448 uint64_t	khme_hash_pa;		/* PA of khme_hash */
449 int 		uhmehash_num;		/* # of buckets in user hash table */
450 int 		khmehash_num;		/* # of buckets in kernel hash table */
451 struct ctx	*ctxs;			/* used by <machine/mmu.c> */
452 uint_t		nctxs;			/* total number of contexts */
453 
454 int		cache;			/* describes system cache */
455 
456 caddr_t		ktsb_base;		/* kernel 8k-indexed tsb base address */
457 uint64_t	ktsb_pbase;		/* kernel 8k-indexed tsb phys address */
458 int		ktsb_szcode;		/* kernel 8k-indexed tsb size code */
459 int		ktsb_sz;		/* kernel 8k-indexed tsb size */
460 
461 caddr_t		ktsb4m_base;		/* kernel 4m-indexed tsb base address */
462 uint64_t	ktsb4m_pbase;		/* kernel 4m-indexed tsb phys address */
463 int		ktsb4m_szcode;		/* kernel 4m-indexed tsb size code */
464 int		ktsb4m_sz;		/* kernel 4m-indexed tsb size */
465 
466 uint64_t	kpm_tsbbase;		/* kernel seg_kpm 4M TSB base address */
467 int		kpm_tsbsz;		/* kernel seg_kpm 4M TSB size code */
468 uint64_t	kpmsm_tsbbase;		/* kernel seg_kpm 8K TSB base address */
469 int		kpmsm_tsbsz;		/* kernel seg_kpm 8K TSB size code */
470 
471 #ifndef sun4v
472 int		utsb_dtlb_ttenum = -1;	/* index in TLB for utsb locked TTE */
473 int		utsb4m_dtlb_ttenum = -1; /* index in TLB for 4M TSB TTE */
474 int		dtlb_resv_ttenum;	/* index in TLB of first reserved TTE */
475 caddr_t		utsb_vabase;		/* reserved kernel virtual memory */
476 caddr_t		utsb4m_vabase;		/* for trap handler TSB accesses */
477 #endif /* sun4v */
478 uint64_t	tsb_alloc_bytes = 0;	/* bytes allocated to TSBs */
479 vmem_t		*kmem_tsb_default_arena[NLGRPS_MAX];	/* For dynamic TSBs */
480 
481 /*
482  * Size to use for TSB slabs.  Future platforms that support page sizes
483  * larger than 4M may wish to change these values, and provide their own
484  * assembly macros for building and decoding the TSB base register contents.
485  */
486 uint_t	tsb_slab_size = MMU_PAGESIZE4M;
487 uint_t	tsb_slab_shift = MMU_PAGESHIFT4M;
488 uint_t	tsb_slab_ttesz = TTE4M;
489 uint_t	tsb_slab_mask = 0x1ff;	/* 4M page alignment for 8K pfn */
490 
491 /* largest TSB size to grow to, will be smaller on smaller memory systems */
492 int	tsb_max_growsize = UTSB_MAX_SZCODE;
493 
494 /*
495  * Tunable parameters dealing with TSB policies.
496  */
497 
498 /*
499  * This undocumented tunable forces all 8K TSBs to be allocated from
500  * the kernel heap rather than from the kmem_tsb_default_arena arenas.
501  */
502 #ifdef	DEBUG
503 int	tsb_forceheap = 0;
504 #endif	/* DEBUG */
505 
506 /*
507  * Decide whether to use per-lgroup arenas, or one global set of
508  * TSB arenas.  The default is not to break up per-lgroup, since
509  * most platforms don't recognize any tangible benefit from it.
510  */
511 int	tsb_lgrp_affinity = 0;
512 
513 /*
514  * Used for growing the TSB based on the process RSS.
515  * tsb_rss_factor is based on the smallest TSB, and is
516  * shifted by the TSB size to determine if we need to grow.
517  * The default will grow the TSB if the number of TTEs for
518  * this page size exceeds 75% of the number of TSB entries,
519  * which should _almost_ eliminate all conflict misses
520  * (at the expense of using up lots and lots of memory).
521  */
522 #define	TSB_RSS_FACTOR		(TSB_ENTRIES(TSB_MIN_SZCODE) * 0.75)
523 #define	SFMMU_RSS_TSBSIZE(tsbszc)	(tsb_rss_factor << tsbszc)
524 #define	SELECT_TSB_SIZECODE(pgcnt) ( \
525 	(enable_tsb_rss_sizing)? sfmmu_select_tsb_szc(pgcnt) : \
526 	default_tsb_size)
527 #define	TSB_OK_SHRINK()	\
528 	(tsb_alloc_bytes > tsb_alloc_hiwater || freemem < desfree)
529 #define	TSB_OK_GROW()	\
530 	(tsb_alloc_bytes < tsb_alloc_hiwater && freemem > desfree)
531 
532 int	enable_tsb_rss_sizing = 1;
533 int	tsb_rss_factor	= (int)TSB_RSS_FACTOR;
534 
535 /* which TSB size code to use for new address spaces or if rss sizing off */
536 int default_tsb_size = TSB_8K_SZCODE;
537 
538 static uint64_t tsb_alloc_hiwater; /* limit TSB reserved memory */
539 uint64_t tsb_alloc_hiwater_factor; /* tsb_alloc_hiwater = physmem / this */
540 #define	TSB_ALLOC_HIWATER_FACTOR_DEFAULT	32
541 
542 #ifdef DEBUG
543 static int tsb_random_size = 0;	/* set to 1 to test random tsb sizes on alloc */
544 static int tsb_grow_stress = 0;	/* if set to 1, keep replacing TSB w/ random */
545 static int tsb_alloc_mtbf = 0;	/* fail allocation every n attempts */
546 static int tsb_alloc_fail_mtbf = 0;
547 static int tsb_alloc_count = 0;
548 #endif /* DEBUG */
549 
550 /* if set to 1, will remap valid TTEs when growing TSB. */
551 int tsb_remap_ttes = 1;
552 
553 /*
554  * If we have more than this many mappings, allocate a second TSB.
555  * This default is chosen because the I/D fully associative TLBs are
556  * assumed to have at least 8 available entries. Platforms with a
557  * larger fully-associative TLB could probably override the default.
558  */
559 int tsb_sectsb_threshold = 8;
560 
561 /*
562  * kstat data
563  */
564 struct sfmmu_global_stat sfmmu_global_stat;
565 struct sfmmu_tsbsize_stat sfmmu_tsbsize_stat;
566 
567 /*
568  * Global data
569  */
570 sfmmu_t 	*ksfmmup;		/* kernel's hat id */
571 struct ctx 	*kctx;			/* kernel's context */
572 
573 #ifdef DEBUG
574 static void	chk_tte(tte_t *, tte_t *, tte_t *, struct hme_blk *);
575 #endif
576 
577 /* sfmmu locking operations */
578 static kmutex_t *sfmmu_mlspl_enter(struct page *, int);
579 static int	sfmmu_mlspl_held(struct page *, int);
580 
581 static kmutex_t *sfmmu_page_enter(page_t *);
582 static void	sfmmu_page_exit(kmutex_t *);
583 static int	sfmmu_page_spl_held(struct page *);
584 
585 /* sfmmu internal locking operations - accessed directly */
586 static void	sfmmu_mlist_reloc_enter(page_t *, page_t *,
587 				kmutex_t **, kmutex_t **);
588 static void	sfmmu_mlist_reloc_exit(kmutex_t *, kmutex_t *);
589 static hatlock_t *
590 		sfmmu_hat_enter(sfmmu_t *);
591 static hatlock_t *
592 		sfmmu_hat_tryenter(sfmmu_t *);
593 static void	sfmmu_hat_exit(hatlock_t *);
594 static void	sfmmu_hat_lock_all(void);
595 static void	sfmmu_hat_unlock_all(void);
596 static void	sfmmu_ismhat_enter(sfmmu_t *, int);
597 static void	sfmmu_ismhat_exit(sfmmu_t *, int);
598 
599 /*
600  * Array of mutexes protecting a page's mapping list and p_nrm field.
601  *
602  * The hash function looks complicated, but is made up so that:
603  *
604  * "pp" not shifted, so adjacent pp values will hash to different cache lines
605  *  (8 byte alignment * 8 bytes/mutes == 64 byte coherency subblock)
606  *
607  * "pp" >> mml_shift, incorporates more source bits into the hash result
608  *
609  *  "& (mml_table_size - 1), should be faster than using remainder "%"
610  *
611  * Hopefully, mml_table, mml_table_size and mml_shift are all in the same
612  * cacheline, since they get declared next to each other below. We'll trust
613  * ld not to do something random.
614  */
615 #ifdef	DEBUG
616 int mlist_hash_debug = 0;
617 #define	MLIST_HASH(pp)	(mlist_hash_debug ? &mml_table[0] : \
618 	&mml_table[((uintptr_t)(pp) + \
619 	((uintptr_t)(pp) >> mml_shift)) & (mml_table_sz - 1)])
620 #else	/* !DEBUG */
621 #define	MLIST_HASH(pp)   &mml_table[ \
622 	((uintptr_t)(pp) + ((uintptr_t)(pp) >> mml_shift)) & (mml_table_sz - 1)]
623 #endif	/* !DEBUG */
624 
625 kmutex_t		*mml_table;
626 uint_t			mml_table_sz;	/* must be a power of 2 */
627 uint_t			mml_shift;	/* log2(mml_table_sz) + 3 for align */
628 
629 /*
630  * kpm_page lock hash.
631  * All slots should be used equally and 2 adjacent kpm_page_t's
632  * shouldn't have their mutexes in the same cache line.
633  */
634 #ifdef	DEBUG
635 int kpmp_hash_debug = 0;
636 #define	KPMP_HASH(kpp)	(kpmp_hash_debug ? &kpmp_table[0] : &kpmp_table[ \
637 	((uintptr_t)(kpp) + ((uintptr_t)(kpp) >> kpmp_shift)) \
638 	& (kpmp_table_sz - 1)])
639 #else	/* !DEBUG */
640 #define	KPMP_HASH(kpp)	&kpmp_table[ \
641 	((uintptr_t)(kpp) + ((uintptr_t)(kpp) >> kpmp_shift)) \
642 	& (kpmp_table_sz - 1)]
643 #endif	/* DEBUG */
644 
645 kpm_hlk_t	*kpmp_table;
646 uint_t		kpmp_table_sz;	/* must be a power of 2 */
647 uchar_t		kpmp_shift;
648 
649 #ifdef	DEBUG
650 #define	KPMP_SHASH(kpp)	(kpmp_hash_debug ? &kpmp_stable[0] : &kpmp_stable[ \
651 	(((uintptr_t)(kpp) << kpmp_shift) + (uintptr_t)(kpp)) \
652 	& (kpmp_stable_sz - 1)])
653 #else	/* !DEBUG */
654 #define	KPMP_SHASH(kpp)	&kpmp_stable[ \
655 	(((uintptr_t)(kpp) << kpmp_shift) + (uintptr_t)(kpp)) \
656 	& (kpmp_stable_sz - 1)]
657 #endif	/* DEBUG */
658 
659 kpm_shlk_t	*kpmp_stable;
660 uint_t		kpmp_stable_sz;	/* must be a power of 2 */
661 
662 /*
663  * SPL_HASH was improved to avoid false cache line sharing
664  */
665 #define	SPL_TABLE_SIZE	128
666 #define	SPL_MASK	(SPL_TABLE_SIZE - 1)
667 #define	SPL_SHIFT	7		/* log2(SPL_TABLE_SIZE) */
668 
669 #define	SPL_INDEX(pp) \
670 	((((uintptr_t)(pp) >> SPL_SHIFT) ^ \
671 	((uintptr_t)(pp) >> (SPL_SHIFT << 1))) & \
672 	(SPL_TABLE_SIZE - 1))
673 
674 #define	SPL_HASH(pp)    \
675 	(&sfmmu_page_lock[SPL_INDEX(pp) & SPL_MASK].pad_mutex)
676 
677 static	pad_mutex_t	sfmmu_page_lock[SPL_TABLE_SIZE];
678 
679 
680 /*
681  * hat_unload_callback() will group together callbacks in order
682  * to avoid xt_sync() calls.  This is the maximum size of the group.
683  */
684 #define	MAX_CB_ADDR	32
685 
686 #ifdef DEBUG
687 
688 /*
689  * Debugging trace ring buffer for stolen and freed ctxs.  The
690  * stolen_ctxs[] array is protected by the ctx_trace_mutex.
691  */
692 struct ctx_trace stolen_ctxs[TRSIZE];
693 struct ctx_trace *ctx_trace_first = &stolen_ctxs[0];
694 struct ctx_trace *ctx_trace_last = &stolen_ctxs[TRSIZE-1];
695 struct ctx_trace *ctx_trace_ptr = &stolen_ctxs[0];
696 kmutex_t ctx_trace_mutex;
697 uint_t	num_ctx_stolen = 0;
698 
699 int	ism_debug = 0;
700 
701 #endif /* DEBUG */
702 
703 tte_t	hw_tte;
704 static ulong_t sfmmu_dmr_maxbit = DMR_MAXBIT;
705 
706 /*
707  * kpm virtual address to physical address
708  */
709 #define	SFMMU_KPM_VTOP(vaddr, paddr) {					\
710 	uintptr_t r, v;							\
711 									\
712 	r = ((vaddr) - kpm_vbase) >> (uintptr_t)kpm_size_shift;		\
713 	(paddr) = (vaddr) - kpm_vbase;					\
714 	if (r != 0) {							\
715 		v = ((uintptr_t)(vaddr) >> MMU_PAGESHIFT) &		\
716 		    vac_colors_mask;					\
717 		(paddr) -= r << kpm_size_shift;				\
718 		if (r > v)						\
719 			(paddr) += (r - v) << MMU_PAGESHIFT;		\
720 		else							\
721 			(paddr) -= r << MMU_PAGESHIFT;			\
722 	}								\
723 }
724 
725 /*
726  * Wrapper for vmem_xalloc since vmem_create only allows limited
727  * parameters for vm_source_alloc functions.  This function allows us
728  * to specify alignment consistent with the size of the object being
729  * allocated.
730  */
731 static void *
732 sfmmu_vmem_xalloc_aligned_wrapper(vmem_t *vmp, size_t size, int vmflag)
733 {
734 	return (vmem_xalloc(vmp, size, size, 0, 0, NULL, NULL, vmflag));
735 }
736 
737 /* Common code for setting tsb_alloc_hiwater. */
738 #define	SFMMU_SET_TSB_ALLOC_HIWATER(pages)	tsb_alloc_hiwater = \
739 		ptob(pages) / tsb_alloc_hiwater_factor
740 
741 /*
742  * Set tsb_max_growsize to allow at most all of physical memory to be mapped by
743  * a single TSB.  physmem is the number of physical pages so we need physmem 8K
744  * TTEs to represent all those physical pages.  We round this up by using
745  * 1<<highbit().  To figure out which size code to use, remember that the size
746  * code is just an amount to shift the smallest TSB size to get the size of
747  * this TSB.  So we subtract that size, TSB_START_SIZE, from highbit() (or
748  * highbit() - 1) to get the size code for the smallest TSB that can represent
749  * all of physical memory, while erring on the side of too much.
750  *
751  * If the computed size code is less than the current tsb_max_growsize, we set
752  * tsb_max_growsize to the computed size code.  In the case where the computed
753  * size code is greater than tsb_max_growsize, we have these restrictions that
754  * apply to increasing tsb_max_growsize:
755  *	1) TSBs can't grow larger than the TSB slab size
756  *	2) TSBs can't grow larger than UTSB_MAX_SZCODE.
757  */
758 #define	SFMMU_SET_TSB_MAX_GROWSIZE(pages) {				\
759 	int	i, szc;							\
760 									\
761 	i = highbit(pages);						\
762 	if ((1 << (i - 1)) == (pages))					\
763 		i--;		/* 2^n case, round down */		\
764 	szc = i - TSB_START_SIZE;					\
765 	if (szc < tsb_max_growsize)					\
766 		tsb_max_growsize = szc;					\
767 	else if ((szc > tsb_max_growsize) &&				\
768 	    (szc <= tsb_slab_shift - (TSB_START_SIZE + TSB_ENTRY_SHIFT))) \
769 		tsb_max_growsize = MIN(szc, UTSB_MAX_SZCODE);		\
770 }
771 
772 /*
773  * Given a pointer to an sfmmu and a TTE size code, return a pointer to the
774  * tsb_info which handles that TTE size.
775  */
776 #define	SFMMU_GET_TSBINFO(tsbinfop, sfmmup, tte_szc)			\
777 	(tsbinfop) = (sfmmup)->sfmmu_tsb;				\
778 	ASSERT(sfmmu_hat_lock_held(sfmmup));				\
779 	if ((tte_szc) >= TTE4M)						\
780 		(tsbinfop) = (tsbinfop)->tsb_next;
781 
782 /*
783  * Return the number of mappings present in the HAT
784  * for a particular process and page size.
785  */
786 #define	SFMMU_TTE_CNT(sfmmup, szc)					\
787 	(sfmmup)->sfmmu_iblk?						\
788 	    (sfmmup)->sfmmu_ismttecnt[(szc)] +				\
789 	    (sfmmup)->sfmmu_ttecnt[(szc)] :				\
790 	    (sfmmup)->sfmmu_ttecnt[(szc)];
791 
792 /*
793  * Macro to use to unload entries from the TSB.
794  * It has knowledge of which page sizes get replicated in the TSB
795  * and will call the appropriate unload routine for the appropriate size.
796  */
797 #define	SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp)				\
798 {									\
799 	int ttesz = get_hblk_ttesz(hmeblkp);				\
800 	if (ttesz == TTE8K || ttesz == TTE4M) {				\
801 		sfmmu_unload_tsb(sfmmup, addr, ttesz);			\
802 	} else {							\
803 		caddr_t sva = (caddr_t)get_hblk_base(hmeblkp);		\
804 		caddr_t eva = sva + get_hblk_span(hmeblkp);		\
805 		ASSERT(addr >= sva && addr < eva);			\
806 		sfmmu_unload_tsb_range(sfmmup, sva, eva, ttesz);	\
807 	}								\
808 }
809 
810 
811 /* Update tsb_alloc_hiwater after memory is configured. */
812 /*ARGSUSED*/
813 static void
814 sfmmu_update_tsb_post_add(void *arg, pgcnt_t delta_pages)
815 {
816 	/* Assumes physmem has already been updated. */
817 	SFMMU_SET_TSB_ALLOC_HIWATER(physmem);
818 	SFMMU_SET_TSB_MAX_GROWSIZE(physmem);
819 }
820 
821 /*
822  * Update tsb_alloc_hiwater before memory is deleted.  We'll do nothing here
823  * and update tsb_alloc_hiwater and tsb_max_growsize after the memory is
824  * deleted.
825  */
826 /*ARGSUSED*/
827 static int
828 sfmmu_update_tsb_pre_del(void *arg, pgcnt_t delta_pages)
829 {
830 	return (0);
831 }
832 
833 /* Update tsb_alloc_hiwater after memory fails to be unconfigured. */
834 /*ARGSUSED*/
835 static void
836 sfmmu_update_tsb_post_del(void *arg, pgcnt_t delta_pages, int cancelled)
837 {
838 	/*
839 	 * Whether the delete was cancelled or not, just go ahead and update
840 	 * tsb_alloc_hiwater and tsb_max_growsize.
841 	 */
842 	SFMMU_SET_TSB_ALLOC_HIWATER(physmem);
843 	SFMMU_SET_TSB_MAX_GROWSIZE(physmem);
844 }
845 
846 static kphysm_setup_vector_t sfmmu_update_tsb_vec = {
847 	KPHYSM_SETUP_VECTOR_VERSION,	/* version */
848 	sfmmu_update_tsb_post_add,	/* post_add */
849 	sfmmu_update_tsb_pre_del,	/* pre_del */
850 	sfmmu_update_tsb_post_del	/* post_del */
851 };
852 
853 
854 /*
855  * HME_BLK HASH PRIMITIVES
856  */
857 
858 /*
859  * Enter a hme on the mapping list for page pp.
860  * When large pages are more prevalent in the system we might want to
861  * keep the mapping list in ascending order by the hment size. For now,
862  * small pages are more frequent, so don't slow it down.
863  */
864 #define	HME_ADD(hme, pp)					\
865 {								\
866 	ASSERT(sfmmu_mlist_held(pp));				\
867 								\
868 	hme->hme_prev = NULL;					\
869 	hme->hme_next = pp->p_mapping;				\
870 	hme->hme_page = pp;					\
871 	if (pp->p_mapping) {					\
872 		((struct sf_hment *)(pp->p_mapping))->hme_prev = hme;\
873 		ASSERT(pp->p_share > 0);			\
874 	} else  {						\
875 		/* EMPTY */					\
876 		ASSERT(pp->p_share == 0);			\
877 	}							\
878 	pp->p_mapping = hme;					\
879 	pp->p_share++;						\
880 }
881 
882 /*
883  * Enter a hme on the mapping list for page pp.
884  * If we are unmapping a large translation, we need to make sure that the
885  * change is reflect in the corresponding bit of the p_index field.
886  */
887 #define	HME_SUB(hme, pp)					\
888 {								\
889 	ASSERT(sfmmu_mlist_held(pp));				\
890 	ASSERT(hme->hme_page == pp || IS_PAHME(hme));		\
891 								\
892 	if (pp->p_mapping == NULL) {				\
893 		panic("hme_remove - no mappings");		\
894 	}							\
895 								\
896 	membar_stst();	/* ensure previous stores finish */	\
897 								\
898 	ASSERT(pp->p_share > 0);				\
899 	pp->p_share--;						\
900 								\
901 	if (hme->hme_prev) {					\
902 		ASSERT(pp->p_mapping != hme);			\
903 		ASSERT(hme->hme_prev->hme_page == pp ||		\
904 			IS_PAHME(hme->hme_prev));		\
905 		hme->hme_prev->hme_next = hme->hme_next;	\
906 	} else {						\
907 		ASSERT(pp->p_mapping == hme);			\
908 		pp->p_mapping = hme->hme_next;			\
909 		ASSERT((pp->p_mapping == NULL) ?		\
910 			(pp->p_share == 0) : 1);		\
911 	}							\
912 								\
913 	if (hme->hme_next) {					\
914 		ASSERT(hme->hme_next->hme_page == pp ||		\
915 			IS_PAHME(hme->hme_next));		\
916 		hme->hme_next->hme_prev = hme->hme_prev;	\
917 	}							\
918 								\
919 	/* zero out the entry */				\
920 	hme->hme_next = NULL;					\
921 	hme->hme_prev = NULL;					\
922 	hme->hme_page = NULL;					\
923 								\
924 	if (hme_size(hme) > TTE8K) {				\
925 		/* remove mappings for remainder of large pg */	\
926 		sfmmu_rm_large_mappings(pp, hme_size(hme));	\
927 	}							\
928 }
929 
930 /*
931  * This function returns the hment given the hme_blk and a vaddr.
932  * It assumes addr has already been checked to belong to hme_blk's
933  * range.
934  */
935 #define	HBLKTOHME(hment, hmeblkp, addr)					\
936 {									\
937 	int index;							\
938 	HBLKTOHME_IDX(hment, hmeblkp, addr, index)			\
939 }
940 
941 /*
942  * Version of HBLKTOHME that also returns the index in hmeblkp
943  * of the hment.
944  */
945 #define	HBLKTOHME_IDX(hment, hmeblkp, addr, idx)			\
946 {									\
947 	ASSERT(in_hblk_range((hmeblkp), (addr)));			\
948 									\
949 	if (get_hblk_ttesz(hmeblkp) == TTE8K) {				\
950 		idx = (((uintptr_t)(addr) >> MMU_PAGESHIFT) & (NHMENTS-1)); \
951 	} else								\
952 		idx = 0;						\
953 									\
954 	(hment) = &(hmeblkp)->hblk_hme[idx];				\
955 }
956 
957 /*
958  * Disable any page sizes not supported by the CPU
959  */
960 void
961 hat_init_pagesizes()
962 {
963 	int 		i;
964 
965 	mmu_exported_page_sizes = 0;
966 	for (i = TTE8K; i < max_mmu_page_sizes; i++) {
967 		extern int	disable_text_largepages;
968 		extern int	disable_initdata_largepages;
969 
970 		szc_2_userszc[i] = (uint_t)-1;
971 		userszc_2_szc[i] = (uint_t)-1;
972 
973 		if ((mmu_exported_pagesize_mask & (1 << i)) == 0) {
974 			disable_large_pages |= (1 << i);
975 			disable_ism_large_pages |= (1 << i);
976 			disable_text_largepages |= (1 << i);
977 			disable_initdata_largepages |= (1 << i);
978 		} else {
979 			szc_2_userszc[i] = mmu_exported_page_sizes;
980 			userszc_2_szc[mmu_exported_page_sizes] = i;
981 			mmu_exported_page_sizes++;
982 		}
983 	}
984 
985 	disable_auto_large_pages = disable_large_pages;
986 
987 	/*
988 	 * Initialize mmu-specific large page sizes.
989 	 */
990 	if ((mmu_page_sizes == max_mmu_page_sizes) &&
991 	    (&mmu_large_pages_disabled)) {
992 		disable_large_pages |= mmu_large_pages_disabled(HAT_LOAD);
993 		disable_ism_large_pages |=
994 		    mmu_large_pages_disabled(HAT_LOAD_SHARE);
995 		disable_auto_large_pages |=
996 		    mmu_large_pages_disabled(HAT_LOAD_AUTOLPG);
997 	}
998 
999 }
1000 
1001 /*
1002  * Initialize the hardware address translation structures.
1003  */
1004 void
1005 hat_init(void)
1006 {
1007 	struct ctx	*ctx;
1008 	struct ctx	*cur_ctx = NULL;
1009 	int 		i;
1010 
1011 	hat_lock_init();
1012 	hat_kstat_init();
1013 
1014 	/*
1015 	 * Hardware-only bits in a TTE
1016 	 */
1017 	MAKE_TTE_MASK(&hw_tte);
1018 
1019 	hat_init_pagesizes();
1020 
1021 	/* Initialize the hash locks */
1022 	for (i = 0; i < khmehash_num; i++) {
1023 		mutex_init(&khme_hash[i].hmehash_mutex, NULL,
1024 		    MUTEX_DEFAULT, NULL);
1025 	}
1026 	for (i = 0; i < uhmehash_num; i++) {
1027 		mutex_init(&uhme_hash[i].hmehash_mutex, NULL,
1028 		    MUTEX_DEFAULT, NULL);
1029 	}
1030 	khmehash_num--;		/* make sure counter starts from 0 */
1031 	uhmehash_num--;		/* make sure counter starts from 0 */
1032 
1033 	/*
1034 	 * Initialize ctx structures and list lock.
1035 	 * We keep two lists of ctxs. The "free" list contains contexts
1036 	 * ready to use.  The "dirty" list contains contexts that are OK
1037 	 * to use after flushing the TLBs of any stale mappings.
1038 	 */
1039 	mutex_init(&ctx_list_lock, NULL, MUTEX_DEFAULT, NULL);
1040 	kctx = &ctxs[KCONTEXT];
1041 	ctx = &ctxs[NUM_LOCKED_CTXS];
1042 	ctxhand = ctxfree = ctx;		/* head of free list */
1043 	ctxdirty = NULL;
1044 	for (i = NUM_LOCKED_CTXS; i < nctxs; i++) {
1045 		cur_ctx = &ctxs[i];
1046 		cur_ctx->ctx_flags = CTX_FREE_FLAG;
1047 		cur_ctx->ctx_free = &ctxs[i + 1];
1048 	}
1049 	cur_ctx->ctx_free = NULL;		/* tail of free list */
1050 
1051 	/*
1052 	 * Intialize ism mapping list lock.
1053 	 */
1054 	mutex_init(&ism_mlist_lock, NULL, MUTEX_DEFAULT, NULL);
1055 
1056 	sfmmuid_cache = kmem_cache_create("sfmmuid_cache", sizeof (sfmmu_t),
1057 	    0, sfmmu_idcache_constructor, sfmmu_idcache_destructor,
1058 	    NULL, NULL, NULL, 0);
1059 
1060 	sfmmu_tsbinfo_cache = kmem_cache_create("sfmmu_tsbinfo_cache",
1061 	    sizeof (struct tsb_info), 0, NULL, NULL, NULL, NULL, NULL, 0);
1062 
1063 	/*
1064 	 * Since we only use the tsb8k cache to "borrow" pages for TSBs
1065 	 * from the heap when low on memory or when TSB_FORCEALLOC is
1066 	 * specified, don't use magazines to cache them--we want to return
1067 	 * them to the system as quickly as possible.
1068 	 */
1069 	sfmmu_tsb8k_cache = kmem_cache_create("sfmmu_tsb8k_cache",
1070 	    MMU_PAGESIZE, MMU_PAGESIZE, NULL, NULL, NULL, NULL,
1071 	    static_arena, KMC_NOMAGAZINE);
1072 
1073 	/*
1074 	 * Set tsb_alloc_hiwater to 1/tsb_alloc_hiwater_factor of physical
1075 	 * memory, which corresponds to the old static reserve for TSBs.
1076 	 * tsb_alloc_hiwater_factor defaults to 32.  This caps the amount of
1077 	 * memory we'll allocate for TSB slabs; beyond this point TSB
1078 	 * allocations will be taken from the kernel heap (via
1079 	 * sfmmu_tsb8k_cache) and will be throttled as would any other kmem
1080 	 * consumer.
1081 	 */
1082 	if (tsb_alloc_hiwater_factor == 0) {
1083 		tsb_alloc_hiwater_factor = TSB_ALLOC_HIWATER_FACTOR_DEFAULT;
1084 	}
1085 	SFMMU_SET_TSB_ALLOC_HIWATER(physmem);
1086 
1087 	/* Set tsb_max_growsize. */
1088 	SFMMU_SET_TSB_MAX_GROWSIZE(physmem);
1089 
1090 	/*
1091 	 * On smaller memory systems, allocate TSB memory in 512K chunks
1092 	 * instead of the default 4M slab size.  The trap handlers need to
1093 	 * be patched with the final slab shift since they need to be able
1094 	 * to construct the TSB pointer at runtime.
1095 	 */
1096 	if ((tsb_max_growsize <= TSB_512K_SZCODE) &&
1097 	    !(disable_large_pages & (1 << TTE512K))) {
1098 		tsb_slab_size = MMU_PAGESIZE512K;
1099 		tsb_slab_shift = MMU_PAGESHIFT512K;
1100 		tsb_slab_ttesz = TTE512K;
1101 		tsb_slab_mask = 0x3f;	/* 512K page alignment for 8K pfn */
1102 	}
1103 
1104 	/*
1105 	 * Set up memory callback to update tsb_alloc_hiwater and
1106 	 * tsb_max_growsize.
1107 	 */
1108 	i = kphysm_setup_func_register(&sfmmu_update_tsb_vec, (void *) 0);
1109 	ASSERT(i == 0);
1110 
1111 	/*
1112 	 * kmem_tsb_arena is the source from which large TSB slabs are
1113 	 * drawn.  The quantum of this arena corresponds to the largest
1114 	 * TSB size we can dynamically allocate for user processes.
1115 	 * Currently it must also be a supported page size since we
1116 	 * use exactly one translation entry to map each slab page.
1117 	 *
1118 	 * The per-lgroup kmem_tsb_default_arena arenas are the arenas from
1119 	 * which most TSBs are allocated.  Since most TSB allocations are
1120 	 * typically 8K we have a kmem cache we stack on top of each
1121 	 * kmem_tsb_default_arena to speed up those allocations.
1122 	 *
1123 	 * Note the two-level scheme of arenas is required only
1124 	 * because vmem_create doesn't allow us to specify alignment
1125 	 * requirements.  If this ever changes the code could be
1126 	 * simplified to use only one level of arenas.
1127 	 */
1128 	kmem_tsb_arena = vmem_create("kmem_tsb", NULL, 0, tsb_slab_size,
1129 	    sfmmu_vmem_xalloc_aligned_wrapper, vmem_xfree, heap_arena,
1130 	    0, VM_SLEEP);
1131 
1132 	if (tsb_lgrp_affinity) {
1133 		char s[50];
1134 		for (i = 0; i < NLGRPS_MAX; i++) {
1135 			(void) sprintf(s, "kmem_tsb_lgrp%d", i);
1136 			kmem_tsb_default_arena[i] =
1137 			    vmem_create(s, NULL, 0, PAGESIZE,
1138 			    sfmmu_tsb_segkmem_alloc, sfmmu_tsb_segkmem_free,
1139 			    kmem_tsb_arena, 0, VM_SLEEP | VM_BESTFIT);
1140 			(void) sprintf(s, "sfmmu_tsb_lgrp%d_cache", i);
1141 			sfmmu_tsb_cache[i] = kmem_cache_create(s, PAGESIZE,
1142 			    PAGESIZE, NULL, NULL, NULL, NULL,
1143 			    kmem_tsb_default_arena[i], 0);
1144 		}
1145 	} else {
1146 		kmem_tsb_default_arena[0] = vmem_create("kmem_tsb_default",
1147 		    NULL, 0, PAGESIZE, sfmmu_tsb_segkmem_alloc,
1148 		    sfmmu_tsb_segkmem_free, kmem_tsb_arena, 0,
1149 		    VM_SLEEP | VM_BESTFIT);
1150 
1151 		sfmmu_tsb_cache[0] = kmem_cache_create("sfmmu_tsb_cache",
1152 		    PAGESIZE, PAGESIZE, NULL, NULL, NULL, NULL,
1153 		    kmem_tsb_default_arena[0], 0);
1154 	}
1155 
1156 	sfmmu8_cache = kmem_cache_create("sfmmu8_cache", HME8BLK_SZ,
1157 		HMEBLK_ALIGN, sfmmu_hblkcache_constructor,
1158 		sfmmu_hblkcache_destructor,
1159 		sfmmu_hblkcache_reclaim, (void *)HME8BLK_SZ,
1160 		hat_memload_arena, KMC_NOHASH);
1161 
1162 	hat_memload1_arena = vmem_create("hat_memload1", NULL, 0, PAGESIZE,
1163 	    segkmem_alloc_permanent, segkmem_free, heap_arena, 0, VM_SLEEP);
1164 
1165 	sfmmu1_cache = kmem_cache_create("sfmmu1_cache", HME1BLK_SZ,
1166 		HMEBLK_ALIGN, sfmmu_hblkcache_constructor,
1167 		sfmmu_hblkcache_destructor,
1168 		NULL, (void *)HME1BLK_SZ,
1169 		hat_memload1_arena, KMC_NOHASH);
1170 
1171 	pa_hment_cache = kmem_cache_create("pa_hment_cache", PAHME_SZ,
1172 		0, NULL, NULL, NULL, NULL, static_arena, KMC_NOHASH);
1173 
1174 	ism_blk_cache = kmem_cache_create("ism_blk_cache",
1175 		sizeof (ism_blk_t), ecache_alignsize, NULL, NULL,
1176 		NULL, NULL, static_arena, KMC_NOHASH);
1177 
1178 	ism_ment_cache = kmem_cache_create("ism_ment_cache",
1179 		sizeof (ism_ment_t), 0, NULL, NULL,
1180 		NULL, NULL, NULL, 0);
1181 
1182 	/*
1183 	 * We grab the first hat for the kernel,
1184 	 */
1185 	AS_LOCK_ENTER(&kas, &kas.a_lock, RW_WRITER);
1186 	kas.a_hat = hat_alloc(&kas);
1187 	AS_LOCK_EXIT(&kas, &kas.a_lock);
1188 
1189 	/*
1190 	 * Initialize hblk_reserve.
1191 	 */
1192 	((struct hme_blk *)hblk_reserve)->hblk_nextpa =
1193 				va_to_pa((caddr_t)hblk_reserve);
1194 
1195 #ifndef sun4v
1196 	/*
1197 	 * Reserve some kernel virtual address space for the locked TTEs
1198 	 * that allow us to probe the TSB from TL>0.
1199 	 */
1200 	utsb_vabase = vmem_xalloc(heap_arena, tsb_slab_size, tsb_slab_size,
1201 		0, 0, NULL, NULL, VM_SLEEP);
1202 	utsb4m_vabase = vmem_xalloc(heap_arena, tsb_slab_size, tsb_slab_size,
1203 		0, 0, NULL, NULL, VM_SLEEP);
1204 #endif
1205 
1206 	/*
1207 	 * The big page VAC handling code assumes VAC
1208 	 * will not be bigger than the smallest big
1209 	 * page- which is 64K.
1210 	 */
1211 	if (TTEPAGES(TTE64K) < CACHE_NUM_COLOR) {
1212 		cmn_err(CE_PANIC, "VAC too big!");
1213 	}
1214 
1215 	(void) xhat_init();
1216 
1217 	uhme_hash_pa = va_to_pa(uhme_hash);
1218 	khme_hash_pa = va_to_pa(khme_hash);
1219 
1220 	/*
1221 	 * Initialize relocation locks. kpr_suspendlock is held
1222 	 * at PIL_MAX to prevent interrupts from pinning the holder
1223 	 * of a suspended TTE which may access it leading to a
1224 	 * deadlock condition.
1225 	 */
1226 	mutex_init(&kpr_mutex, NULL, MUTEX_DEFAULT, NULL);
1227 	mutex_init(&kpr_suspendlock, NULL, MUTEX_SPIN, (void *)PIL_MAX);
1228 }
1229 
1230 /*
1231  * Initialize locking for the hat layer, called early during boot.
1232  */
1233 static void
1234 hat_lock_init()
1235 {
1236 	int i;
1237 	struct ctx *ctx;
1238 
1239 	/*
1240 	 * initialize the array of mutexes protecting a page's mapping
1241 	 * list and p_nrm field.
1242 	 */
1243 	for (i = 0; i < mml_table_sz; i++)
1244 		mutex_init(&mml_table[i], NULL, MUTEX_DEFAULT, NULL);
1245 
1246 	if (kpm_enable) {
1247 		for (i = 0; i < kpmp_table_sz; i++) {
1248 			mutex_init(&kpmp_table[i].khl_mutex, NULL,
1249 			    MUTEX_DEFAULT, NULL);
1250 		}
1251 	}
1252 
1253 	/*
1254 	 * Initialize array of mutex locks that protects sfmmu fields and
1255 	 * TSB lists.
1256 	 */
1257 	for (i = 0; i < SFMMU_NUM_LOCK; i++)
1258 		mutex_init(HATLOCK_MUTEXP(&hat_lock[i]), NULL, MUTEX_DEFAULT,
1259 		    NULL);
1260 
1261 #ifdef	DEBUG
1262 	mutex_init(&ctx_trace_mutex, NULL, MUTEX_DEFAULT, NULL);
1263 #endif	/* DEBUG */
1264 
1265 	for (ctx = ctxs, i = 0; i < nctxs; i++, ctx++) {
1266 		rw_init(&ctx->ctx_rwlock, NULL, RW_DEFAULT, NULL);
1267 	}
1268 }
1269 
1270 extern caddr_t kmem64_base, kmem64_end;
1271 
1272 #define	SFMMU_KERNEL_MAXVA \
1273 	(kmem64_base ? (uintptr_t)kmem64_end : (SYSLIMIT))
1274 
1275 /*
1276  * Allocate a hat structure.
1277  * Called when an address space first uses a hat.
1278  */
1279 struct hat *
1280 hat_alloc(struct as *as)
1281 {
1282 	sfmmu_t *sfmmup;
1283 	struct ctx *ctx;
1284 	int i;
1285 	extern uint_t get_color_start(struct as *);
1286 
1287 	ASSERT(AS_WRITE_HELD(as, &as->a_lock));
1288 	sfmmup = kmem_cache_alloc(sfmmuid_cache, KM_SLEEP);
1289 	sfmmup->sfmmu_as = as;
1290 	sfmmup->sfmmu_flags = 0;
1291 
1292 	if (as == &kas) {
1293 		ctx = kctx;
1294 		ksfmmup = sfmmup;
1295 		sfmmup->sfmmu_cnum = ctxtoctxnum(ctx);
1296 		ASSERT(sfmmup->sfmmu_cnum == KCONTEXT);
1297 		sfmmup->sfmmu_cext = 0;
1298 		ctx->ctx_sfmmu = sfmmup;
1299 		ctx->ctx_flags = 0;
1300 		sfmmup->sfmmu_clrstart = 0;
1301 		sfmmup->sfmmu_tsb = NULL;
1302 		/*
1303 		 * hat_kern_setup() will call sfmmu_init_ktsbinfo()
1304 		 * to setup tsb_info for ksfmmup.
1305 		 */
1306 	} else {
1307 
1308 		/*
1309 		 * Just set to invalid ctx. When it faults, it will
1310 		 * get a valid ctx. This would avoid the situation
1311 		 * where we get a ctx, but it gets stolen and then
1312 		 * we fault when we try to run and so have to get
1313 		 * another ctx.
1314 		 */
1315 		sfmmup->sfmmu_cnum = INVALID_CONTEXT;
1316 		sfmmup->sfmmu_cext = 0;
1317 		/* initialize original physical page coloring bin */
1318 		sfmmup->sfmmu_clrstart = get_color_start(as);
1319 #ifdef DEBUG
1320 		if (tsb_random_size) {
1321 			uint32_t randval = (uint32_t)gettick() >> 4;
1322 			int size = randval % (tsb_max_growsize + 1);
1323 
1324 			/* chose a random tsb size for stress testing */
1325 			(void) sfmmu_tsbinfo_alloc(&sfmmup->sfmmu_tsb, size,
1326 			    TSB8K|TSB64K|TSB512K, 0, sfmmup);
1327 		} else
1328 #endif /* DEBUG */
1329 			(void) sfmmu_tsbinfo_alloc(&sfmmup->sfmmu_tsb,
1330 			    default_tsb_size,
1331 			    TSB8K|TSB64K|TSB512K, 0, sfmmup);
1332 		sfmmup->sfmmu_flags = HAT_SWAPPED;
1333 		ASSERT(sfmmup->sfmmu_tsb != NULL);
1334 	}
1335 	sfmmu_setup_tsbinfo(sfmmup);
1336 	for (i = 0; i < max_mmu_page_sizes; i++) {
1337 		sfmmup->sfmmu_ttecnt[i] = 0;
1338 		sfmmup->sfmmu_ismttecnt[i] = 0;
1339 		sfmmup->sfmmu_pgsz[i] = TTE8K;
1340 	}
1341 
1342 	sfmmup->sfmmu_iblk = NULL;
1343 	sfmmup->sfmmu_ismhat = 0;
1344 	sfmmup->sfmmu_ismblkpa = (uint64_t)-1;
1345 	if (sfmmup == ksfmmup) {
1346 		CPUSET_ALL(sfmmup->sfmmu_cpusran);
1347 	} else {
1348 		CPUSET_ZERO(sfmmup->sfmmu_cpusran);
1349 	}
1350 	sfmmup->sfmmu_free = 0;
1351 	sfmmup->sfmmu_rmstat = 0;
1352 	sfmmup->sfmmu_clrbin = sfmmup->sfmmu_clrstart;
1353 	sfmmup->sfmmu_xhat_provider = NULL;
1354 	cv_init(&sfmmup->sfmmu_tsb_cv, NULL, CV_DEFAULT, NULL);
1355 	return (sfmmup);
1356 }
1357 
1358 /*
1359  * Hat_setup, makes an address space context the current active one.
1360  * In sfmmu this translates to setting the secondary context with the
1361  * corresponding context.
1362  */
1363 void
1364 hat_setup(struct hat *sfmmup, int allocflag)
1365 {
1366 	struct ctx *ctx;
1367 	uint_t ctx_num;
1368 	hatlock_t *hatlockp;
1369 
1370 	/* Init needs some special treatment. */
1371 	if (allocflag == HAT_INIT) {
1372 		/*
1373 		 * Make sure that we have
1374 		 * 1. a TSB
1375 		 * 2. a valid ctx that doesn't get stolen after this point.
1376 		 */
1377 		hatlockp = sfmmu_hat_enter(sfmmup);
1378 
1379 		/*
1380 		 * Swap in the TSB.  hat_init() allocates tsbinfos without
1381 		 * TSBs, but we need one for init, since the kernel does some
1382 		 * special things to set up its stack and needs the TSB to
1383 		 * resolve page faults.
1384 		 */
1385 		sfmmu_tsb_swapin(sfmmup, hatlockp);
1386 
1387 		sfmmu_disallow_ctx_steal(sfmmup);
1388 
1389 		kpreempt_disable();
1390 
1391 		ctx = sfmmutoctx(sfmmup);
1392 		CPUSET_ADD(sfmmup->sfmmu_cpusran, CPU->cpu_id);
1393 		ctx_num = ctxtoctxnum(ctx);
1394 		ASSERT(sfmmup == ctx->ctx_sfmmu);
1395 		ASSERT(ctx_num >= NUM_LOCKED_CTXS);
1396 		sfmmu_setctx_sec(ctx_num);
1397 		sfmmu_load_mmustate(sfmmup);
1398 
1399 		kpreempt_enable();
1400 
1401 		/*
1402 		 * Allow ctx to be stolen.
1403 		 */
1404 		sfmmu_allow_ctx_steal(sfmmup);
1405 		sfmmu_hat_exit(hatlockp);
1406 	} else {
1407 		ASSERT(allocflag == HAT_ALLOC);
1408 
1409 		hatlockp = sfmmu_hat_enter(sfmmup);
1410 		kpreempt_disable();
1411 
1412 		CPUSET_ADD(sfmmup->sfmmu_cpusran, CPU->cpu_id);
1413 		sfmmu_setctx_sec(INVALID_CONTEXT);
1414 		sfmmu_clear_utsbinfo();
1415 
1416 		kpreempt_enable();
1417 		sfmmu_hat_exit(hatlockp);
1418 	}
1419 }
1420 
1421 /*
1422  * Free all the translation resources for the specified address space.
1423  * Called from as_free when an address space is being destroyed.
1424  */
1425 void
1426 hat_free_start(struct hat *sfmmup)
1427 {
1428 	ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock));
1429 	ASSERT(sfmmup != ksfmmup);
1430 	ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
1431 
1432 	sfmmup->sfmmu_free = 1;
1433 }
1434 
1435 void
1436 hat_free_end(struct hat *sfmmup)
1437 {
1438 	int i;
1439 
1440 	ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
1441 	if (sfmmup->sfmmu_ismhat) {
1442 		for (i = 0; i < mmu_page_sizes; i++) {
1443 			sfmmup->sfmmu_ttecnt[i] = 0;
1444 			sfmmup->sfmmu_ismttecnt[i] = 0;
1445 		}
1446 	} else {
1447 		/* EMPTY */
1448 		ASSERT(sfmmup->sfmmu_ttecnt[TTE8K] == 0);
1449 		ASSERT(sfmmup->sfmmu_ttecnt[TTE64K] == 0);
1450 		ASSERT(sfmmup->sfmmu_ttecnt[TTE512K] == 0);
1451 		ASSERT(sfmmup->sfmmu_ttecnt[TTE4M] == 0);
1452 		ASSERT(sfmmup->sfmmu_ttecnt[TTE32M] == 0);
1453 		ASSERT(sfmmup->sfmmu_ttecnt[TTE256M] == 0);
1454 	}
1455 
1456 	if (sfmmup->sfmmu_rmstat) {
1457 		hat_freestat(sfmmup->sfmmu_as, NULL);
1458 	}
1459 	if (!delay_tlb_flush) {
1460 		sfmmu_tlb_ctx_demap(sfmmup);
1461 		xt_sync(sfmmup->sfmmu_cpusran);
1462 	} else {
1463 		SFMMU_STAT(sf_tlbflush_deferred);
1464 	}
1465 	sfmmu_free_ctx(sfmmup, sfmmutoctx(sfmmup));
1466 	while (sfmmup->sfmmu_tsb != NULL) {
1467 		struct tsb_info *next = sfmmup->sfmmu_tsb->tsb_next;
1468 		sfmmu_tsbinfo_free(sfmmup->sfmmu_tsb);
1469 		sfmmup->sfmmu_tsb = next;
1470 	}
1471 	sfmmu_free_sfmmu(sfmmup);
1472 
1473 	kmem_cache_free(sfmmuid_cache, sfmmup);
1474 }
1475 
1476 /*
1477  * Set up any translation structures, for the specified address space,
1478  * that are needed or preferred when the process is being swapped in.
1479  */
1480 /* ARGSUSED */
1481 void
1482 hat_swapin(struct hat *hat)
1483 {
1484 	ASSERT(hat->sfmmu_xhat_provider == NULL);
1485 }
1486 
1487 /*
1488  * Free all of the translation resources, for the specified address space,
1489  * that can be freed while the process is swapped out. Called from as_swapout.
1490  * Also, free up the ctx that this process was using.
1491  */
1492 void
1493 hat_swapout(struct hat *sfmmup)
1494 {
1495 	struct hmehash_bucket *hmebp;
1496 	struct hme_blk *hmeblkp;
1497 	struct hme_blk *pr_hblk = NULL;
1498 	struct hme_blk *nx_hblk;
1499 	struct ctx *ctx;
1500 	int cnum;
1501 	int i;
1502 	uint64_t hblkpa, prevpa, nx_pa;
1503 	struct hme_blk *list = NULL;
1504 	hatlock_t *hatlockp;
1505 	struct tsb_info *tsbinfop;
1506 	struct free_tsb {
1507 		struct free_tsb *next;
1508 		struct tsb_info *tsbinfop;
1509 	};			/* free list of TSBs */
1510 	struct free_tsb *freelist, *last, *next;
1511 
1512 	ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
1513 	SFMMU_STAT(sf_swapout);
1514 
1515 	/*
1516 	 * There is no way to go from an as to all its translations in sfmmu.
1517 	 * Here is one of the times when we take the big hit and traverse
1518 	 * the hash looking for hme_blks to free up.  Not only do we free up
1519 	 * this as hme_blks but all those that are free.  We are obviously
1520 	 * swapping because we need memory so let's free up as much
1521 	 * as we can.
1522 	 *
1523 	 * Note that we don't flush TLB/TSB here -- it's not necessary
1524 	 * because:
1525 	 *  1) we free the ctx we're using and throw away the TSB(s);
1526 	 *  2) processes aren't runnable while being swapped out.
1527 	 */
1528 	ASSERT(sfmmup != KHATID);
1529 	for (i = 0; i <= UHMEHASH_SZ; i++) {
1530 		hmebp = &uhme_hash[i];
1531 		SFMMU_HASH_LOCK(hmebp);
1532 		hmeblkp = hmebp->hmeblkp;
1533 		hblkpa = hmebp->hmeh_nextpa;
1534 		prevpa = 0;
1535 		pr_hblk = NULL;
1536 		while (hmeblkp) {
1537 
1538 			ASSERT(!hmeblkp->hblk_xhat_bit);
1539 
1540 			if ((hmeblkp->hblk_tag.htag_id == sfmmup) &&
1541 			    !hmeblkp->hblk_shw_bit && !hmeblkp->hblk_lckcnt) {
1542 				(void) sfmmu_hblk_unload(sfmmup, hmeblkp,
1543 					(caddr_t)get_hblk_base(hmeblkp),
1544 					get_hblk_endaddr(hmeblkp),
1545 					NULL, HAT_UNLOAD);
1546 			}
1547 			nx_hblk = hmeblkp->hblk_next;
1548 			nx_pa = hmeblkp->hblk_nextpa;
1549 			if (!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) {
1550 				ASSERT(!hmeblkp->hblk_lckcnt);
1551 				sfmmu_hblk_hash_rm(hmebp, hmeblkp,
1552 					prevpa, pr_hblk);
1553 				sfmmu_hblk_free(hmebp, hmeblkp, hblkpa, &list);
1554 			} else {
1555 				pr_hblk = hmeblkp;
1556 				prevpa = hblkpa;
1557 			}
1558 			hmeblkp = nx_hblk;
1559 			hblkpa = nx_pa;
1560 		}
1561 		SFMMU_HASH_UNLOCK(hmebp);
1562 	}
1563 
1564 	sfmmu_hblks_list_purge(&list);
1565 
1566 	/*
1567 	 * Now free up the ctx so that others can reuse it.
1568 	 */
1569 	hatlockp = sfmmu_hat_enter(sfmmup);
1570 	ctx = sfmmutoctx(sfmmup);
1571 	cnum = ctxtoctxnum(ctx);
1572 
1573 	if (cnum != INVALID_CONTEXT) {
1574 		rw_enter(&ctx->ctx_rwlock, RW_WRITER);
1575 		if (sfmmup->sfmmu_cnum == cnum) {
1576 			sfmmu_reuse_ctx(ctx, sfmmup);
1577 			/*
1578 			 * Put ctx back to the free list.
1579 			 */
1580 			mutex_enter(&ctx_list_lock);
1581 			CTX_SET_FLAGS(ctx, CTX_FREE_FLAG);
1582 			ctx->ctx_free = ctxfree;
1583 			ctxfree = ctx;
1584 			mutex_exit(&ctx_list_lock);
1585 		}
1586 		rw_exit(&ctx->ctx_rwlock);
1587 	}
1588 
1589 	/*
1590 	 * Free TSBs, but not tsbinfos, and set SWAPPED flag.
1591 	 * If TSBs were never swapped in, just return.
1592 	 * This implies that we don't support partial swapping
1593 	 * of TSBs -- either all are swapped out, or none are.
1594 	 *
1595 	 * We must hold the HAT lock here to prevent racing with another
1596 	 * thread trying to unmap TTEs from the TSB or running the post-
1597 	 * relocator after relocating the TSB's memory.  Unfortunately, we
1598 	 * can't free memory while holding the HAT lock or we could
1599 	 * deadlock, so we build a list of TSBs to be freed after marking
1600 	 * the tsbinfos as swapped out and free them after dropping the
1601 	 * lock.
1602 	 */
1603 	if (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) {
1604 		sfmmu_hat_exit(hatlockp);
1605 		return;
1606 	}
1607 
1608 	SFMMU_FLAGS_SET(sfmmup, HAT_SWAPPED);
1609 	last = freelist = NULL;
1610 	for (tsbinfop = sfmmup->sfmmu_tsb; tsbinfop != NULL;
1611 	    tsbinfop = tsbinfop->tsb_next) {
1612 		ASSERT((tsbinfop->tsb_flags & TSB_SWAPPED) == 0);
1613 
1614 		/*
1615 		 * Cast the TSB into a struct free_tsb and put it on the free
1616 		 * list.
1617 		 */
1618 		if (freelist == NULL) {
1619 			last = freelist = (struct free_tsb *)tsbinfop->tsb_va;
1620 		} else {
1621 			last->next = (struct free_tsb *)tsbinfop->tsb_va;
1622 			last = last->next;
1623 		}
1624 		last->next = NULL;
1625 		last->tsbinfop = tsbinfop;
1626 		tsbinfop->tsb_flags |= TSB_SWAPPED;
1627 		/*
1628 		 * Zero out the TTE to clear the valid bit.
1629 		 * Note we can't use a value like 0xbad because we want to
1630 		 * ensure diagnostic bits are NEVER set on TTEs that might
1631 		 * be loaded.  The intent is to catch any invalid access
1632 		 * to the swapped TSB, such as a thread running with a valid
1633 		 * context without first calling sfmmu_tsb_swapin() to
1634 		 * allocate TSB memory.
1635 		 */
1636 		tsbinfop->tsb_tte.ll = 0;
1637 	}
1638 
1639 	/* Now we can drop the lock and free the TSB memory. */
1640 	sfmmu_hat_exit(hatlockp);
1641 	for (; freelist != NULL; freelist = next) {
1642 		next = freelist->next;
1643 		sfmmu_tsb_free(freelist->tsbinfop);
1644 	}
1645 }
1646 
1647 /*
1648  * Duplicate the translations of an as into another newas
1649  */
1650 /* ARGSUSED */
1651 int
1652 hat_dup(struct hat *hat, struct hat *newhat, caddr_t addr, size_t len,
1653 	uint_t flag)
1654 {
1655 	ASSERT(hat->sfmmu_xhat_provider == NULL);
1656 	ASSERT((flag == 0) || (flag == HAT_DUP_ALL) || (flag == HAT_DUP_COW));
1657 
1658 	if (flag == HAT_DUP_COW) {
1659 		panic("hat_dup: HAT_DUP_COW not supported");
1660 	}
1661 	return (0);
1662 }
1663 
1664 /*
1665  * Set up addr to map to page pp with protection prot.
1666  * As an optimization we also load the TSB with the
1667  * corresponding tte but it is no big deal if  the tte gets kicked out.
1668  */
1669 void
1670 hat_memload(struct hat *hat, caddr_t addr, struct page *pp,
1671 	uint_t attr, uint_t flags)
1672 {
1673 	tte_t tte;
1674 
1675 
1676 	ASSERT(hat != NULL);
1677 	ASSERT(PAGE_LOCKED(pp));
1678 	ASSERT(!((uintptr_t)addr & MMU_PAGEOFFSET));
1679 	ASSERT(!(flags & ~SFMMU_LOAD_ALLFLAG));
1680 	ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR));
1681 
1682 	if (PP_ISFREE(pp)) {
1683 		panic("hat_memload: loading a mapping to free page %p",
1684 		    (void *)pp);
1685 	}
1686 
1687 	if (hat->sfmmu_xhat_provider) {
1688 		XHAT_MEMLOAD(hat, addr, pp, attr, flags);
1689 		return;
1690 	}
1691 
1692 	ASSERT((hat == ksfmmup) ||
1693 		AS_LOCK_HELD(hat->sfmmu_as, &hat->sfmmu_as->a_lock));
1694 
1695 	if (flags & ~SFMMU_LOAD_ALLFLAG)
1696 		cmn_err(CE_NOTE, "hat_memload: unsupported flags %d",
1697 		    flags & ~SFMMU_LOAD_ALLFLAG);
1698 
1699 	if (hat->sfmmu_rmstat)
1700 		hat_resvstat(MMU_PAGESIZE, hat->sfmmu_as, addr);
1701 
1702 #if defined(SF_ERRATA_57)
1703 	if ((hat != ksfmmup) && AS_TYPE_64BIT(hat->sfmmu_as) &&
1704 	    (addr < errata57_limit) && (attr & PROT_EXEC) &&
1705 	    !(flags & HAT_LOAD_SHARE)) {
1706 		cmn_err(CE_WARN, "hat_memload: illegal attempt to make user "
1707 		    " page executable");
1708 		attr &= ~PROT_EXEC;
1709 	}
1710 #endif
1711 
1712 	sfmmu_memtte(&tte, pp->p_pagenum, attr, TTE8K);
1713 	(void) sfmmu_tteload_array(hat, &tte, addr, &pp, flags);
1714 
1715 	/*
1716 	 * Check TSB and TLB page sizes.
1717 	 */
1718 	if ((flags & HAT_LOAD_SHARE) == 0) {
1719 		sfmmu_check_page_sizes(hat, 1);
1720 	}
1721 }
1722 
1723 /*
1724  * hat_devload can be called to map real memory (e.g.
1725  * /dev/kmem) and even though hat_devload will determine pf is
1726  * for memory, it will be unable to get a shared lock on the
1727  * page (because someone else has it exclusively) and will
1728  * pass dp = NULL.  If tteload doesn't get a non-NULL
1729  * page pointer it can't cache memory.
1730  */
1731 void
1732 hat_devload(struct hat *hat, caddr_t addr, size_t len, pfn_t pfn,
1733 	uint_t attr, int flags)
1734 {
1735 	tte_t tte;
1736 	struct page *pp = NULL;
1737 	int use_lgpg = 0;
1738 
1739 	ASSERT(hat != NULL);
1740 
1741 	if (hat->sfmmu_xhat_provider) {
1742 		XHAT_DEVLOAD(hat, addr, len, pfn, attr, flags);
1743 		return;
1744 	}
1745 
1746 	ASSERT(!(flags & ~SFMMU_LOAD_ALLFLAG));
1747 	ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR));
1748 	ASSERT((hat == ksfmmup) ||
1749 		AS_LOCK_HELD(hat->sfmmu_as, &hat->sfmmu_as->a_lock));
1750 	if (len == 0)
1751 		panic("hat_devload: zero len");
1752 	if (flags & ~SFMMU_LOAD_ALLFLAG)
1753 		cmn_err(CE_NOTE, "hat_devload: unsupported flags %d",
1754 		    flags & ~SFMMU_LOAD_ALLFLAG);
1755 
1756 #if defined(SF_ERRATA_57)
1757 	if ((hat != ksfmmup) && AS_TYPE_64BIT(hat->sfmmu_as) &&
1758 	    (addr < errata57_limit) && (attr & PROT_EXEC) &&
1759 	    !(flags & HAT_LOAD_SHARE)) {
1760 		cmn_err(CE_WARN, "hat_devload: illegal attempt to make user "
1761 		    " page executable");
1762 		attr &= ~PROT_EXEC;
1763 	}
1764 #endif
1765 
1766 	/*
1767 	 * If it's a memory page find its pp
1768 	 */
1769 	if (!(flags & HAT_LOAD_NOCONSIST) && pf_is_memory(pfn)) {
1770 		pp = page_numtopp_nolock(pfn);
1771 		if (pp == NULL) {
1772 			flags |= HAT_LOAD_NOCONSIST;
1773 		} else {
1774 			if (PP_ISFREE(pp)) {
1775 				panic("hat_memload: loading "
1776 				    "a mapping to free page %p",
1777 				    (void *)pp);
1778 			}
1779 			if (!PAGE_LOCKED(pp) && !PP_ISNORELOC(pp)) {
1780 				panic("hat_memload: loading a mapping "
1781 				    "to unlocked relocatable page %p",
1782 				    (void *)pp);
1783 			}
1784 			ASSERT(len == MMU_PAGESIZE);
1785 		}
1786 	}
1787 
1788 	if (hat->sfmmu_rmstat)
1789 		hat_resvstat(len, hat->sfmmu_as, addr);
1790 
1791 	if (flags & HAT_LOAD_NOCONSIST) {
1792 		attr |= SFMMU_UNCACHEVTTE;
1793 		use_lgpg = 1;
1794 	}
1795 	if (!pf_is_memory(pfn)) {
1796 		attr |= SFMMU_UNCACHEPTTE | HAT_NOSYNC;
1797 		use_lgpg = 1;
1798 		switch (attr & HAT_ORDER_MASK) {
1799 			case HAT_STRICTORDER:
1800 			case HAT_UNORDERED_OK:
1801 				/*
1802 				 * we set the side effect bit for all non
1803 				 * memory mappings unless merging is ok
1804 				 */
1805 				attr |= SFMMU_SIDEFFECT;
1806 				break;
1807 			case HAT_MERGING_OK:
1808 			case HAT_LOADCACHING_OK:
1809 			case HAT_STORECACHING_OK:
1810 				break;
1811 			default:
1812 				panic("hat_devload: bad attr");
1813 				break;
1814 		}
1815 	}
1816 	while (len) {
1817 		if (!use_lgpg) {
1818 			sfmmu_memtte(&tte, pfn, attr, TTE8K);
1819 			(void) sfmmu_tteload_array(hat, &tte, addr, &pp,
1820 			    flags);
1821 			len -= MMU_PAGESIZE;
1822 			addr += MMU_PAGESIZE;
1823 			pfn++;
1824 			continue;
1825 		}
1826 		/*
1827 		 *  try to use large pages, check va/pa alignments
1828 		 *  Note that 32M/256M page sizes are not (yet) supported.
1829 		 */
1830 		if ((len >= MMU_PAGESIZE4M) &&
1831 		    !((uintptr_t)addr & MMU_PAGEOFFSET4M) &&
1832 		    !(disable_large_pages & (1 << TTE4M)) &&
1833 		    !(mmu_ptob(pfn) & MMU_PAGEOFFSET4M)) {
1834 			sfmmu_memtte(&tte, pfn, attr, TTE4M);
1835 			(void) sfmmu_tteload_array(hat, &tte, addr, &pp,
1836 			    flags);
1837 			len -= MMU_PAGESIZE4M;
1838 			addr += MMU_PAGESIZE4M;
1839 			pfn += MMU_PAGESIZE4M / MMU_PAGESIZE;
1840 		} else if ((len >= MMU_PAGESIZE512K) &&
1841 		    !((uintptr_t)addr & MMU_PAGEOFFSET512K) &&
1842 		    !(disable_large_pages & (1 << TTE512K)) &&
1843 		    !(mmu_ptob(pfn) & MMU_PAGEOFFSET512K)) {
1844 			sfmmu_memtte(&tte, pfn, attr, TTE512K);
1845 			(void) sfmmu_tteload_array(hat, &tte, addr, &pp,
1846 			    flags);
1847 			len -= MMU_PAGESIZE512K;
1848 			addr += MMU_PAGESIZE512K;
1849 			pfn += MMU_PAGESIZE512K / MMU_PAGESIZE;
1850 		} else if ((len >= MMU_PAGESIZE64K) &&
1851 		    !((uintptr_t)addr & MMU_PAGEOFFSET64K) &&
1852 		    !(disable_large_pages & (1 << TTE64K)) &&
1853 		    !(mmu_ptob(pfn) & MMU_PAGEOFFSET64K)) {
1854 			sfmmu_memtte(&tte, pfn, attr, TTE64K);
1855 			(void) sfmmu_tteload_array(hat, &tte, addr, &pp,
1856 			    flags);
1857 			len -= MMU_PAGESIZE64K;
1858 			addr += MMU_PAGESIZE64K;
1859 			pfn += MMU_PAGESIZE64K / MMU_PAGESIZE;
1860 		} else {
1861 			sfmmu_memtte(&tte, pfn, attr, TTE8K);
1862 			(void) sfmmu_tteload_array(hat, &tte, addr, &pp,
1863 			    flags);
1864 			len -= MMU_PAGESIZE;
1865 			addr += MMU_PAGESIZE;
1866 			pfn++;
1867 		}
1868 	}
1869 
1870 	/*
1871 	 * Check TSB and TLB page sizes.
1872 	 */
1873 	if ((flags & HAT_LOAD_SHARE) == 0) {
1874 		sfmmu_check_page_sizes(hat, 1);
1875 	}
1876 }
1877 
1878 /*
1879  * Map the largest extend possible out of the page array. The array may NOT
1880  * be in order.  The largest possible mapping a page can have
1881  * is specified in the p_szc field.  The p_szc field
1882  * cannot change as long as there any mappings (large or small)
1883  * to any of the pages that make up the large page. (ie. any
1884  * promotion/demotion of page size is not up to the hat but up to
1885  * the page free list manager).  The array
1886  * should consist of properly aligned contigous pages that are
1887  * part of a big page for a large mapping to be created.
1888  */
1889 void
1890 hat_memload_array(struct hat *hat, caddr_t addr, size_t len,
1891 	struct page **pps, uint_t attr, uint_t flags)
1892 {
1893 	int  ttesz;
1894 	size_t mapsz;
1895 	pgcnt_t	numpg, npgs;
1896 	tte_t tte;
1897 	page_t *pp;
1898 	int large_pages_disable;
1899 
1900 	ASSERT(!((uintptr_t)addr & MMU_PAGEOFFSET));
1901 
1902 	if (hat->sfmmu_xhat_provider) {
1903 		XHAT_MEMLOAD_ARRAY(hat, addr, len, pps, attr, flags);
1904 		return;
1905 	}
1906 
1907 	if (hat->sfmmu_rmstat)
1908 		hat_resvstat(len, hat->sfmmu_as, addr);
1909 
1910 #if defined(SF_ERRATA_57)
1911 	if ((hat != ksfmmup) && AS_TYPE_64BIT(hat->sfmmu_as) &&
1912 	    (addr < errata57_limit) && (attr & PROT_EXEC) &&
1913 	    !(flags & HAT_LOAD_SHARE)) {
1914 		cmn_err(CE_WARN, "hat_memload_array: illegal attempt to make "
1915 		    "user page executable");
1916 		attr &= ~PROT_EXEC;
1917 	}
1918 #endif
1919 
1920 	/* Get number of pages */
1921 	npgs = len >> MMU_PAGESHIFT;
1922 
1923 	if (flags & HAT_LOAD_SHARE) {
1924 		large_pages_disable = disable_ism_large_pages;
1925 	} else {
1926 		large_pages_disable = disable_large_pages;
1927 	}
1928 
1929 	if (npgs < NHMENTS || large_pages_disable == LARGE_PAGES_OFF) {
1930 		sfmmu_memload_batchsmall(hat, addr, pps, attr, flags, npgs);
1931 		return;
1932 	}
1933 
1934 	while (npgs >= NHMENTS) {
1935 		pp = *pps;
1936 		for (ttesz = pp->p_szc; ttesz != TTE8K; ttesz--) {
1937 			/*
1938 			 * Check if this page size is disabled.
1939 			 */
1940 			if (large_pages_disable & (1 << ttesz))
1941 				continue;
1942 
1943 			numpg = TTEPAGES(ttesz);
1944 			mapsz = numpg << MMU_PAGESHIFT;
1945 			if ((npgs >= numpg) &&
1946 			    IS_P2ALIGNED(addr, mapsz) &&
1947 			    IS_P2ALIGNED(pp->p_pagenum, numpg)) {
1948 				/*
1949 				 * At this point we have enough pages and
1950 				 * we know the virtual address and the pfn
1951 				 * are properly aligned.  We still need
1952 				 * to check for physical contiguity but since
1953 				 * it is very likely that this is the case
1954 				 * we will assume they are so and undo
1955 				 * the request if necessary.  It would
1956 				 * be great if we could get a hint flag
1957 				 * like HAT_CONTIG which would tell us
1958 				 * the pages are contigous for sure.
1959 				 */
1960 				sfmmu_memtte(&tte, (*pps)->p_pagenum,
1961 					attr, ttesz);
1962 				if (!sfmmu_tteload_array(hat, &tte, addr,
1963 				    pps, flags)) {
1964 					break;
1965 				}
1966 			}
1967 		}
1968 		if (ttesz == TTE8K) {
1969 			/*
1970 			 * We were not able to map array using a large page
1971 			 * batch a hmeblk or fraction at a time.
1972 			 */
1973 			numpg = ((uintptr_t)addr >> MMU_PAGESHIFT)
1974 				& (NHMENTS-1);
1975 			numpg = NHMENTS - numpg;
1976 			ASSERT(numpg <= npgs);
1977 			mapsz = numpg * MMU_PAGESIZE;
1978 			sfmmu_memload_batchsmall(hat, addr, pps, attr, flags,
1979 							numpg);
1980 		}
1981 		addr += mapsz;
1982 		npgs -= numpg;
1983 		pps += numpg;
1984 	}
1985 
1986 	if (npgs) {
1987 		sfmmu_memload_batchsmall(hat, addr, pps, attr, flags, npgs);
1988 	}
1989 
1990 	/*
1991 	 * Check TSB and TLB page sizes.
1992 	 */
1993 	if ((flags & HAT_LOAD_SHARE) == 0) {
1994 		sfmmu_check_page_sizes(hat, 1);
1995 	}
1996 }
1997 
1998 /*
1999  * Function tries to batch 8K pages into the same hme blk.
2000  */
2001 static void
2002 sfmmu_memload_batchsmall(struct hat *hat, caddr_t vaddr, page_t **pps,
2003 		    uint_t attr, uint_t flags, pgcnt_t npgs)
2004 {
2005 	tte_t	tte;
2006 	page_t *pp;
2007 	struct hmehash_bucket *hmebp;
2008 	struct hme_blk *hmeblkp;
2009 	int	index;
2010 
2011 	while (npgs) {
2012 		/*
2013 		 * Acquire the hash bucket.
2014 		 */
2015 		hmebp = sfmmu_tteload_acquire_hashbucket(hat, vaddr, TTE8K);
2016 		ASSERT(hmebp);
2017 
2018 		/*
2019 		 * Find the hment block.
2020 		 */
2021 		hmeblkp = sfmmu_tteload_find_hmeblk(hat, hmebp, vaddr,
2022 				TTE8K, flags);
2023 		ASSERT(hmeblkp);
2024 
2025 		do {
2026 			/*
2027 			 * Make the tte.
2028 			 */
2029 			pp = *pps;
2030 			sfmmu_memtte(&tte, pp->p_pagenum, attr, TTE8K);
2031 
2032 			/*
2033 			 * Add the translation.
2034 			 */
2035 			(void) sfmmu_tteload_addentry(hat, hmeblkp, &tte,
2036 					vaddr, pps, flags);
2037 
2038 			/*
2039 			 * Goto next page.
2040 			 */
2041 			pps++;
2042 			npgs--;
2043 
2044 			/*
2045 			 * Goto next address.
2046 			 */
2047 			vaddr += MMU_PAGESIZE;
2048 
2049 			/*
2050 			 * Don't crossover into a different hmentblk.
2051 			 */
2052 			index = (int)(((uintptr_t)vaddr >> MMU_PAGESHIFT) &
2053 			    (NHMENTS-1));
2054 
2055 		} while (index != 0 && npgs != 0);
2056 
2057 		/*
2058 		 * Release the hash bucket.
2059 		 */
2060 
2061 		sfmmu_tteload_release_hashbucket(hmebp);
2062 	}
2063 }
2064 
2065 /*
2066  * Construct a tte for a page:
2067  *
2068  * tte_valid = 1
2069  * tte_size2 = size & TTE_SZ2_BITS (Panther-only)
2070  * tte_size = size
2071  * tte_nfo = attr & HAT_NOFAULT
2072  * tte_ie = attr & HAT_STRUCTURE_LE
2073  * tte_hmenum = hmenum
2074  * tte_pahi = pp->p_pagenum >> TTE_PASHIFT;
2075  * tte_palo = pp->p_pagenum & TTE_PALOMASK;
2076  * tte_ref = 1 (optimization)
2077  * tte_wr_perm = attr & PROT_WRITE;
2078  * tte_no_sync = attr & HAT_NOSYNC
2079  * tte_lock = attr & SFMMU_LOCKTTE
2080  * tte_cp = !(attr & SFMMU_UNCACHEPTTE)
2081  * tte_cv = !(attr & SFMMU_UNCACHEVTTE)
2082  * tte_e = attr & SFMMU_SIDEFFECT
2083  * tte_priv = !(attr & PROT_USER)
2084  * tte_hwwr = if nosync is set and it is writable we set the mod bit (opt)
2085  * tte_glb = 0
2086  */
2087 void
2088 sfmmu_memtte(tte_t *ttep, pfn_t pfn, uint_t attr, int tte_sz)
2089 {
2090 	ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR));
2091 
2092 	ttep->tte_inthi = MAKE_TTE_INTHI(pfn, attr, tte_sz, 0 /* hmenum */);
2093 	ttep->tte_intlo = MAKE_TTE_INTLO(pfn, attr, tte_sz, 0 /* hmenum */);
2094 
2095 	if (TTE_IS_NOSYNC(ttep)) {
2096 		TTE_SET_REF(ttep);
2097 		if (TTE_IS_WRITABLE(ttep)) {
2098 			TTE_SET_MOD(ttep);
2099 		}
2100 	}
2101 	if (TTE_IS_NFO(ttep) && TTE_IS_EXECUTABLE(ttep)) {
2102 		panic("sfmmu_memtte: can't set both NFO and EXEC bits");
2103 	}
2104 }
2105 
2106 /*
2107  * This function will add a translation to the hme_blk and allocate the
2108  * hme_blk if one does not exist.
2109  * If a page structure is specified then it will add the
2110  * corresponding hment to the mapping list.
2111  * It will also update the hmenum field for the tte.
2112  */
2113 void
2114 sfmmu_tteload(struct hat *sfmmup, tte_t *ttep, caddr_t vaddr, page_t *pp,
2115 	uint_t flags)
2116 {
2117 	(void) sfmmu_tteload_array(sfmmup, ttep, vaddr, &pp, flags);
2118 }
2119 
2120 /*
2121  * Load (ttep != NULL) or unload (ttep == NULL) one entry in the TSB.
2122  * Assumes that a particular page size may only be resident in one TSB.
2123  */
2124 static void
2125 sfmmu_mod_tsb(sfmmu_t *sfmmup, caddr_t vaddr, tte_t *ttep, int ttesz)
2126 {
2127 	struct tsb_info *tsbinfop = NULL;
2128 	uint64_t tag;
2129 	struct tsbe *tsbe_addr;
2130 	uint64_t tsb_base;
2131 	uint_t tsb_size;
2132 	int vpshift = MMU_PAGESHIFT;
2133 	int phys = 0;
2134 
2135 	if (sfmmup == ksfmmup) { /* No support for 32/256M ksfmmu pages */
2136 		phys = ktsb_phys;
2137 		if (ttesz >= TTE4M) {
2138 #ifndef sun4v
2139 			ASSERT((ttesz != TTE32M) && (ttesz != TTE256M));
2140 #endif
2141 			tsb_base = (phys)? ktsb4m_pbase : (uint64_t)ktsb4m_base;
2142 			tsb_size = ktsb4m_szcode;
2143 		} else {
2144 			tsb_base = (phys)? ktsb_pbase : (uint64_t)ktsb_base;
2145 			tsb_size = ktsb_szcode;
2146 		}
2147 	} else {
2148 		SFMMU_GET_TSBINFO(tsbinfop, sfmmup, ttesz);
2149 
2150 		/*
2151 		 * If there isn't a TSB for this page size, or the TSB is
2152 		 * swapped out, there is nothing to do.  Note that the latter
2153 		 * case seems impossible but can occur if hat_pageunload()
2154 		 * is called on an ISM mapping while the process is swapped
2155 		 * out.
2156 		 */
2157 		if (tsbinfop == NULL || (tsbinfop->tsb_flags & TSB_SWAPPED))
2158 			return;
2159 
2160 		/*
2161 		 * If another thread is in the middle of relocating a TSB
2162 		 * we can't unload the entry so set a flag so that the
2163 		 * TSB will be flushed before it can be accessed by the
2164 		 * process.
2165 		 */
2166 		if ((tsbinfop->tsb_flags & TSB_RELOC_FLAG) != 0) {
2167 			if (ttep == NULL)
2168 				tsbinfop->tsb_flags |= TSB_FLUSH_NEEDED;
2169 			return;
2170 		}
2171 #if defined(UTSB_PHYS)
2172 		phys = 1;
2173 		tsb_base = (uint64_t)tsbinfop->tsb_pa;
2174 #else
2175 		tsb_base = (uint64_t)tsbinfop->tsb_va;
2176 #endif
2177 		tsb_size = tsbinfop->tsb_szc;
2178 	}
2179 	if (ttesz >= TTE4M)
2180 		vpshift = MMU_PAGESHIFT4M;
2181 
2182 	tsbe_addr = sfmmu_get_tsbe(tsb_base, vaddr, vpshift, tsb_size);
2183 	tag = sfmmu_make_tsbtag(vaddr);
2184 
2185 	if (ttep == NULL) {
2186 		sfmmu_unload_tsbe(tsbe_addr, tag, phys);
2187 	} else {
2188 		if (ttesz >= TTE4M) {
2189 			SFMMU_STAT(sf_tsb_load4m);
2190 		} else {
2191 			SFMMU_STAT(sf_tsb_load8k);
2192 		}
2193 
2194 		sfmmu_load_tsbe(tsbe_addr, tag, ttep, phys);
2195 	}
2196 }
2197 
2198 /*
2199  * Unmap all entries from [start, end) matching the given page size.
2200  *
2201  * This function is used primarily to unmap replicated 64K or 512K entries
2202  * from the TSB that are inserted using the base page size TSB pointer, but
2203  * it may also be called to unmap a range of addresses from the TSB.
2204  */
2205 void
2206 sfmmu_unload_tsb_range(sfmmu_t *sfmmup, caddr_t start, caddr_t end, int ttesz)
2207 {
2208 	struct tsb_info *tsbinfop;
2209 	uint64_t tag;
2210 	struct tsbe *tsbe_addr;
2211 	caddr_t vaddr;
2212 	uint64_t tsb_base;
2213 	int vpshift, vpgsz;
2214 	uint_t tsb_size;
2215 	int phys = 0;
2216 
2217 	/*
2218 	 * Assumptions:
2219 	 *  If ttesz == 8K, 64K or 512K, we walk through the range 8K
2220 	 *  at a time shooting down any valid entries we encounter.
2221 	 *
2222 	 *  If ttesz >= 4M we walk the range 4M at a time shooting
2223 	 *  down any valid mappings we find.
2224 	 */
2225 	if (sfmmup == ksfmmup) {
2226 		phys = ktsb_phys;
2227 		if (ttesz >= TTE4M) {
2228 #ifndef sun4v
2229 			ASSERT((ttesz != TTE32M) && (ttesz != TTE256M));
2230 #endif
2231 			tsb_base = (phys)? ktsb4m_pbase : (uint64_t)ktsb4m_base;
2232 			tsb_size = ktsb4m_szcode;
2233 		} else {
2234 			tsb_base = (phys)? ktsb_pbase : (uint64_t)ktsb_base;
2235 			tsb_size = ktsb_szcode;
2236 		}
2237 	} else {
2238 		SFMMU_GET_TSBINFO(tsbinfop, sfmmup, ttesz);
2239 
2240 		/*
2241 		 * If there isn't a TSB for this page size, or the TSB is
2242 		 * swapped out, there is nothing to do.  Note that the latter
2243 		 * case seems impossible but can occur if hat_pageunload()
2244 		 * is called on an ISM mapping while the process is swapped
2245 		 * out.
2246 		 */
2247 		if (tsbinfop == NULL || (tsbinfop->tsb_flags & TSB_SWAPPED))
2248 			return;
2249 
2250 		/*
2251 		 * If another thread is in the middle of relocating a TSB
2252 		 * we can't unload the entry so set a flag so that the
2253 		 * TSB will be flushed before it can be accessed by the
2254 		 * process.
2255 		 */
2256 		if ((tsbinfop->tsb_flags & TSB_RELOC_FLAG) != 0) {
2257 			tsbinfop->tsb_flags |= TSB_FLUSH_NEEDED;
2258 			return;
2259 		}
2260 #if defined(UTSB_PHYS)
2261 		phys = 1;
2262 		tsb_base = (uint64_t)tsbinfop->tsb_pa;
2263 #else
2264 		tsb_base = (uint64_t)tsbinfop->tsb_va;
2265 #endif
2266 		tsb_size = tsbinfop->tsb_szc;
2267 	}
2268 	if (ttesz >= TTE4M) {
2269 		vpshift = MMU_PAGESHIFT4M;
2270 		vpgsz = MMU_PAGESIZE4M;
2271 	} else {
2272 		vpshift = MMU_PAGESHIFT;
2273 		vpgsz = MMU_PAGESIZE;
2274 	}
2275 
2276 	for (vaddr = start; vaddr < end; vaddr += vpgsz) {
2277 		tag = sfmmu_make_tsbtag(vaddr);
2278 		tsbe_addr = sfmmu_get_tsbe(tsb_base, vaddr, vpshift, tsb_size);
2279 		sfmmu_unload_tsbe(tsbe_addr, tag, phys);
2280 	}
2281 }
2282 
2283 /*
2284  * Select the optimum TSB size given the number of mappings
2285  * that need to be cached.
2286  */
2287 static int
2288 sfmmu_select_tsb_szc(pgcnt_t pgcnt)
2289 {
2290 	int szc = 0;
2291 
2292 #ifdef DEBUG
2293 	if (tsb_grow_stress) {
2294 		uint32_t randval = (uint32_t)gettick() >> 4;
2295 		return (randval % (tsb_max_growsize + 1));
2296 	}
2297 #endif	/* DEBUG */
2298 
2299 	while ((szc < tsb_max_growsize) && (pgcnt > SFMMU_RSS_TSBSIZE(szc)))
2300 		szc++;
2301 	return (szc);
2302 }
2303 
2304 /*
2305  * This function will add a translation to the hme_blk and allocate the
2306  * hme_blk if one does not exist.
2307  * If a page structure is specified then it will add the
2308  * corresponding hment to the mapping list.
2309  * It will also update the hmenum field for the tte.
2310  * Furthermore, it attempts to create a large page translation
2311  * for <addr,hat> at page array pps.  It assumes addr and first
2312  * pp is correctly aligned.  It returns 0 if successful and 1 otherwise.
2313  */
2314 static int
2315 sfmmu_tteload_array(sfmmu_t *sfmmup, tte_t *ttep, caddr_t vaddr,
2316 	page_t **pps, uint_t flags)
2317 {
2318 	struct hmehash_bucket *hmebp;
2319 	struct hme_blk *hmeblkp;
2320 	int 	ret;
2321 	uint_t	size;
2322 
2323 	/*
2324 	 * Get mapping size.
2325 	 */
2326 	size = TTE_CSZ(ttep);
2327 	ASSERT(!((uintptr_t)vaddr & TTE_PAGE_OFFSET(size)));
2328 
2329 	/*
2330 	 * Acquire the hash bucket.
2331 	 */
2332 	hmebp = sfmmu_tteload_acquire_hashbucket(sfmmup, vaddr, size);
2333 	ASSERT(hmebp);
2334 
2335 	/*
2336 	 * Find the hment block.
2337 	 */
2338 	hmeblkp = sfmmu_tteload_find_hmeblk(sfmmup, hmebp, vaddr, size, flags);
2339 	ASSERT(hmeblkp);
2340 
2341 	/*
2342 	 * Add the translation.
2343 	 */
2344 	ret = sfmmu_tteload_addentry(sfmmup, hmeblkp, ttep, vaddr, pps, flags);
2345 
2346 	/*
2347 	 * Release the hash bucket.
2348 	 */
2349 	sfmmu_tteload_release_hashbucket(hmebp);
2350 
2351 	return (ret);
2352 }
2353 
2354 /*
2355  * Function locks and returns a pointer to the hash bucket for vaddr and size.
2356  */
2357 static struct hmehash_bucket *
2358 sfmmu_tteload_acquire_hashbucket(sfmmu_t *sfmmup, caddr_t vaddr, int size)
2359 {
2360 	struct hmehash_bucket *hmebp;
2361 	int hmeshift;
2362 
2363 	hmeshift = HME_HASH_SHIFT(size);
2364 
2365 	hmebp = HME_HASH_FUNCTION(sfmmup, vaddr, hmeshift);
2366 
2367 	SFMMU_HASH_LOCK(hmebp);
2368 
2369 	return (hmebp);
2370 }
2371 
2372 /*
2373  * Function returns a pointer to an hmeblk in the hash bucket, hmebp. If the
2374  * hmeblk doesn't exists for the [sfmmup, vaddr & size] signature, a hmeblk is
2375  * allocated.
2376  */
2377 static struct hme_blk *
2378 sfmmu_tteload_find_hmeblk(sfmmu_t *sfmmup, struct hmehash_bucket *hmebp,
2379 	caddr_t vaddr, uint_t size, uint_t flags)
2380 {
2381 	hmeblk_tag hblktag;
2382 	int hmeshift;
2383 	struct hme_blk *hmeblkp, *pr_hblk, *list = NULL;
2384 	uint64_t hblkpa, prevpa;
2385 	struct kmem_cache *sfmmu_cache;
2386 	uint_t forcefree;
2387 
2388 	hblktag.htag_id = sfmmup;
2389 	hmeshift = HME_HASH_SHIFT(size);
2390 	hblktag.htag_bspage = HME_HASH_BSPAGE(vaddr, hmeshift);
2391 	hblktag.htag_rehash = HME_HASH_REHASH(size);
2392 
2393 ttearray_realloc:
2394 
2395 	HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, hblkpa,
2396 	    pr_hblk, prevpa, &list);
2397 
2398 	/*
2399 	 * We block until hblk_reserve_lock is released; it's held by
2400 	 * the thread, temporarily using hblk_reserve, until hblk_reserve is
2401 	 * replaced by a hblk from sfmmu8_cache.
2402 	 */
2403 	if (hmeblkp == (struct hme_blk *)hblk_reserve &&
2404 	    hblk_reserve_thread != curthread) {
2405 		SFMMU_HASH_UNLOCK(hmebp);
2406 		mutex_enter(&hblk_reserve_lock);
2407 		mutex_exit(&hblk_reserve_lock);
2408 		SFMMU_STAT(sf_hblk_reserve_hit);
2409 		SFMMU_HASH_LOCK(hmebp);
2410 		goto ttearray_realloc;
2411 	}
2412 
2413 	if (hmeblkp == NULL) {
2414 		hmeblkp = sfmmu_hblk_alloc(sfmmup, vaddr, hmebp, size,
2415 		    hblktag, flags);
2416 	} else {
2417 		/*
2418 		 * It is possible for 8k and 64k hblks to collide since they
2419 		 * have the same rehash value. This is because we
2420 		 * lazily free hblks and 8K/64K blks could be lingering.
2421 		 * If we find size mismatch we free the block and & try again.
2422 		 */
2423 		if (get_hblk_ttesz(hmeblkp) != size) {
2424 			ASSERT(!hmeblkp->hblk_vcnt);
2425 			ASSERT(!hmeblkp->hblk_hmecnt);
2426 			sfmmu_hblk_hash_rm(hmebp, hmeblkp, prevpa, pr_hblk);
2427 			sfmmu_hblk_free(hmebp, hmeblkp, hblkpa, &list);
2428 			goto ttearray_realloc;
2429 		}
2430 		if (hmeblkp->hblk_shw_bit) {
2431 			/*
2432 			 * if the hblk was previously used as a shadow hblk then
2433 			 * we will change it to a normal hblk
2434 			 */
2435 			if (hmeblkp->hblk_shw_mask) {
2436 				sfmmu_shadow_hcleanup(sfmmup, hmeblkp, hmebp);
2437 				ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp));
2438 				goto ttearray_realloc;
2439 			} else {
2440 				hmeblkp->hblk_shw_bit = 0;
2441 			}
2442 		}
2443 		SFMMU_STAT(sf_hblk_hit);
2444 	}
2445 
2446 	/*
2447 	 * hat_memload() should never call kmem_cache_free(); see block
2448 	 * comment showing the stacktrace in sfmmu_hblk_alloc();
2449 	 * enqueue each hblk in the list to reserve list if it's created
2450 	 * from sfmmu8_cache *and* sfmmup == KHATID.
2451 	 */
2452 	forcefree = (sfmmup == KHATID) ? 1 : 0;
2453 	while ((pr_hblk = list) != NULL) {
2454 		list = pr_hblk->hblk_next;
2455 		sfmmu_cache = get_hblk_cache(pr_hblk);
2456 		if ((sfmmu_cache == sfmmu8_cache) &&
2457 		    sfmmu_put_free_hblk(pr_hblk, forcefree))
2458 			continue;
2459 
2460 		ASSERT(sfmmup != KHATID);
2461 		kmem_cache_free(sfmmu_cache, pr_hblk);
2462 	}
2463 
2464 	ASSERT(get_hblk_ttesz(hmeblkp) == size);
2465 	ASSERT(!hmeblkp->hblk_shw_bit);
2466 
2467 	return (hmeblkp);
2468 }
2469 
2470 /*
2471  * Function adds a tte entry into the hmeblk. It returns 0 if successful and 1
2472  * otherwise.
2473  */
2474 static int
2475 sfmmu_tteload_addentry(sfmmu_t *sfmmup, struct hme_blk *hmeblkp, tte_t *ttep,
2476 	caddr_t vaddr, page_t **pps, uint_t flags)
2477 {
2478 	page_t *pp = *pps;
2479 	int hmenum, size, remap;
2480 	tte_t tteold, flush_tte;
2481 #ifdef DEBUG
2482 	tte_t orig_old;
2483 #endif /* DEBUG */
2484 	struct sf_hment *sfhme;
2485 	kmutex_t *pml, *pmtx;
2486 	hatlock_t *hatlockp;
2487 
2488 	/*
2489 	 * remove this panic when we decide to let user virtual address
2490 	 * space be >= USERLIMIT.
2491 	 */
2492 	if (!TTE_IS_PRIVILEGED(ttep) && vaddr >= (caddr_t)USERLIMIT)
2493 		panic("user addr %p in kernel space", vaddr);
2494 #if defined(TTE_IS_GLOBAL)
2495 	if (TTE_IS_GLOBAL(ttep))
2496 		panic("sfmmu_tteload: creating global tte");
2497 #endif
2498 
2499 #ifdef DEBUG
2500 	if (pf_is_memory(sfmmu_ttetopfn(ttep, vaddr)) &&
2501 	    !TTE_IS_PCACHEABLE(ttep) && !sfmmu_allow_nc_trans)
2502 		panic("sfmmu_tteload: non cacheable memory tte");
2503 #endif /* DEBUG */
2504 
2505 	if ((flags & HAT_LOAD_SHARE) || !TTE_IS_REF(ttep) ||
2506 	    !TTE_IS_MOD(ttep)) {
2507 		/*
2508 		 * Don't load TSB for dummy as in ISM.  Also don't preload
2509 		 * the TSB if the TTE isn't writable since we're likely to
2510 		 * fault on it again -- preloading can be fairly expensive.
2511 		 */
2512 		flags |= SFMMU_NO_TSBLOAD;
2513 	}
2514 
2515 	size = TTE_CSZ(ttep);
2516 	switch (size) {
2517 	case TTE8K:
2518 		SFMMU_STAT(sf_tteload8k);
2519 		break;
2520 	case TTE64K:
2521 		SFMMU_STAT(sf_tteload64k);
2522 		break;
2523 	case TTE512K:
2524 		SFMMU_STAT(sf_tteload512k);
2525 		break;
2526 	case TTE4M:
2527 		SFMMU_STAT(sf_tteload4m);
2528 		break;
2529 	case (TTE32M):
2530 		SFMMU_STAT(sf_tteload32m);
2531 		ASSERT(mmu_page_sizes == max_mmu_page_sizes);
2532 		break;
2533 	case (TTE256M):
2534 		SFMMU_STAT(sf_tteload256m);
2535 		ASSERT(mmu_page_sizes == max_mmu_page_sizes);
2536 		break;
2537 	}
2538 
2539 	ASSERT(!((uintptr_t)vaddr & TTE_PAGE_OFFSET(size)));
2540 
2541 	HBLKTOHME_IDX(sfhme, hmeblkp, vaddr, hmenum);
2542 
2543 	/*
2544 	 * Need to grab mlist lock here so that pageunload
2545 	 * will not change tte behind us.
2546 	 */
2547 	if (pp) {
2548 		pml = sfmmu_mlist_enter(pp);
2549 	}
2550 
2551 	sfmmu_copytte(&sfhme->hme_tte, &tteold);
2552 	/*
2553 	 * Look for corresponding hment and if valid verify
2554 	 * pfns are equal.
2555 	 */
2556 	remap = TTE_IS_VALID(&tteold);
2557 	if (remap) {
2558 		pfn_t	new_pfn, old_pfn;
2559 
2560 		old_pfn = TTE_TO_PFN(vaddr, &tteold);
2561 		new_pfn = TTE_TO_PFN(vaddr, ttep);
2562 
2563 		if (flags & HAT_LOAD_REMAP) {
2564 			/* make sure we are remapping same type of pages */
2565 			if (pf_is_memory(old_pfn) != pf_is_memory(new_pfn)) {
2566 				panic("sfmmu_tteload - tte remap io<->memory");
2567 			}
2568 			if (old_pfn != new_pfn &&
2569 			    (pp != NULL || sfhme->hme_page != NULL)) {
2570 				panic("sfmmu_tteload - tte remap pp != NULL");
2571 			}
2572 		} else if (old_pfn != new_pfn) {
2573 			panic("sfmmu_tteload - tte remap, hmeblkp 0x%p",
2574 			    (void *)hmeblkp);
2575 		}
2576 		ASSERT(TTE_CSZ(&tteold) == TTE_CSZ(ttep));
2577 	}
2578 
2579 	if (pp) {
2580 		if (size == TTE8K) {
2581 			/*
2582 			 * Handle VAC consistency
2583 			 */
2584 			if (!remap && (cache & CACHE_VAC) && !PP_ISNC(pp)) {
2585 				sfmmu_vac_conflict(sfmmup, vaddr, pp);
2586 			}
2587 
2588 			if (TTE_IS_WRITABLE(ttep) && PP_ISRO(pp)) {
2589 				pmtx = sfmmu_page_enter(pp);
2590 				PP_CLRRO(pp);
2591 				sfmmu_page_exit(pmtx);
2592 			} else if (!PP_ISMAPPED(pp) &&
2593 			    (!TTE_IS_WRITABLE(ttep)) && !(PP_ISMOD(pp))) {
2594 				pmtx = sfmmu_page_enter(pp);
2595 				if (!(PP_ISMOD(pp))) {
2596 					PP_SETRO(pp);
2597 				}
2598 				sfmmu_page_exit(pmtx);
2599 			}
2600 
2601 		} else if (sfmmu_pagearray_setup(vaddr, pps, ttep, remap)) {
2602 			/*
2603 			 * sfmmu_pagearray_setup failed so return
2604 			 */
2605 			sfmmu_mlist_exit(pml);
2606 			return (1);
2607 		}
2608 	}
2609 
2610 	/*
2611 	 * Make sure hment is not on a mapping list.
2612 	 */
2613 	ASSERT(remap || (sfhme->hme_page == NULL));
2614 
2615 	/* if it is not a remap then hme->next better be NULL */
2616 	ASSERT((!remap) ? sfhme->hme_next == NULL : 1);
2617 
2618 	if (flags & HAT_LOAD_LOCK) {
2619 		if (((int)hmeblkp->hblk_lckcnt + 1) >= MAX_HBLK_LCKCNT) {
2620 			panic("too high lckcnt-hmeblk %p",
2621 			    (void *)hmeblkp);
2622 		}
2623 		atomic_add_16(&hmeblkp->hblk_lckcnt, 1);
2624 
2625 		HBLK_STACK_TRACE(hmeblkp, HBLK_LOCK);
2626 	}
2627 
2628 	if (pp && PP_ISNC(pp)) {
2629 		/*
2630 		 * If the physical page is marked to be uncacheable, like
2631 		 * by a vac conflict, make sure the new mapping is also
2632 		 * uncacheable.
2633 		 */
2634 		TTE_CLR_VCACHEABLE(ttep);
2635 		ASSERT(PP_GET_VCOLOR(pp) == NO_VCOLOR);
2636 	}
2637 	ttep->tte_hmenum = hmenum;
2638 
2639 #ifdef DEBUG
2640 	orig_old = tteold;
2641 #endif /* DEBUG */
2642 
2643 	while (sfmmu_modifytte_try(&tteold, ttep, &sfhme->hme_tte) < 0) {
2644 		if ((sfmmup == KHATID) &&
2645 		    (flags & (HAT_LOAD_LOCK | HAT_LOAD_REMAP))) {
2646 			sfmmu_copytte(&sfhme->hme_tte, &tteold);
2647 		}
2648 #ifdef DEBUG
2649 		chk_tte(&orig_old, &tteold, ttep, hmeblkp);
2650 #endif /* DEBUG */
2651 	}
2652 
2653 	if (!TTE_IS_VALID(&tteold)) {
2654 
2655 		atomic_add_16(&hmeblkp->hblk_vcnt, 1);
2656 		atomic_add_long(&sfmmup->sfmmu_ttecnt[size], 1);
2657 
2658 		/*
2659 		 * HAT_RELOAD_SHARE has been deprecated with lpg DISM.
2660 		 */
2661 
2662 		if (size > TTE8K && (flags & HAT_LOAD_SHARE) == 0 &&
2663 		    sfmmup != ksfmmup) {
2664 			/*
2665 			 * If this is the first large mapping for the process
2666 			 * we must force any CPUs running this process to TL=0
2667 			 * where they will reload the HAT flags from the
2668 			 * tsbmiss area.  This is necessary to make the large
2669 			 * mappings we are about to load visible to those CPUs;
2670 			 * otherwise they'll loop forever calling pagefault()
2671 			 * since we don't search large hash chains by default.
2672 			 */
2673 			hatlockp = sfmmu_hat_enter(sfmmup);
2674 			if (size == TTE512K &&
2675 			    !SFMMU_FLAGS_ISSET(sfmmup, HAT_512K_FLAG)) {
2676 				SFMMU_FLAGS_SET(sfmmup, HAT_512K_FLAG);
2677 				sfmmu_sync_mmustate(sfmmup);
2678 			} else if (size == TTE4M &&
2679 			    !SFMMU_FLAGS_ISSET(sfmmup, HAT_4M_FLAG)) {
2680 				SFMMU_FLAGS_SET(sfmmup, HAT_4M_FLAG);
2681 				sfmmu_sync_mmustate(sfmmup);
2682 			} else if (size == TTE64K &&
2683 			    !SFMMU_FLAGS_ISSET(sfmmup, HAT_64K_FLAG)) {
2684 				SFMMU_FLAGS_SET(sfmmup, HAT_64K_FLAG);
2685 				/* no sync mmustate; 64K shares 8K hashes */
2686 			} else if (mmu_page_sizes == max_mmu_page_sizes) {
2687 			    if (size == TTE32M &&
2688 				!SFMMU_FLAGS_ISSET(sfmmup, HAT_32M_FLAG)) {
2689 				SFMMU_FLAGS_SET(sfmmup, HAT_32M_FLAG);
2690 				sfmmu_sync_mmustate(sfmmup);
2691 			    } else if (size == TTE256M &&
2692 				!SFMMU_FLAGS_ISSET(sfmmup, HAT_256M_FLAG)) {
2693 				SFMMU_FLAGS_SET(sfmmup, HAT_256M_FLAG);
2694 				sfmmu_sync_mmustate(sfmmup);
2695 			    }
2696 			}
2697 			if (size >= TTE4M && (flags & HAT_LOAD_TEXT) &&
2698 			    !SFMMU_FLAGS_ISSET(sfmmup, HAT_4MTEXT_FLAG)) {
2699 				SFMMU_FLAGS_SET(sfmmup, HAT_4MTEXT_FLAG);
2700 			}
2701 			sfmmu_hat_exit(hatlockp);
2702 		}
2703 	}
2704 	ASSERT(TTE_IS_VALID(&sfhme->hme_tte));
2705 
2706 	flush_tte.tte_intlo = (tteold.tte_intlo ^ ttep->tte_intlo) &
2707 	    hw_tte.tte_intlo;
2708 	flush_tte.tte_inthi = (tteold.tte_inthi ^ ttep->tte_inthi) &
2709 	    hw_tte.tte_inthi;
2710 
2711 	if (remap && (flush_tte.tte_inthi || flush_tte.tte_intlo)) {
2712 		/*
2713 		 * If remap and new tte differs from old tte we need
2714 		 * to sync the mod bit and flush TLB/TSB.  We don't
2715 		 * need to sync ref bit because we currently always set
2716 		 * ref bit in tteload.
2717 		 */
2718 		ASSERT(TTE_IS_REF(ttep));
2719 		if (TTE_IS_MOD(&tteold)) {
2720 			sfmmu_ttesync(sfmmup, vaddr, &tteold, pp);
2721 		}
2722 		sfmmu_tlb_demap(vaddr, sfmmup, hmeblkp, 0, 0);
2723 		xt_sync(sfmmup->sfmmu_cpusran);
2724 	}
2725 
2726 	if ((flags & SFMMU_NO_TSBLOAD) == 0) {
2727 		/*
2728 		 * We only preload 8K and 4M mappings into the TSB, since
2729 		 * 64K and 512K mappings are replicated and hence don't
2730 		 * have a single, unique TSB entry. Ditto for 32M/256M.
2731 		 */
2732 		if (size == TTE8K || size == TTE4M) {
2733 			hatlockp = sfmmu_hat_enter(sfmmup);
2734 			sfmmu_load_tsb(sfmmup, vaddr, &sfhme->hme_tte, size);
2735 			sfmmu_hat_exit(hatlockp);
2736 		}
2737 	}
2738 	if (pp) {
2739 		if (!remap) {
2740 			HME_ADD(sfhme, pp);
2741 			atomic_add_16(&hmeblkp->hblk_hmecnt, 1);
2742 			ASSERT(hmeblkp->hblk_hmecnt > 0);
2743 
2744 			/*
2745 			 * Cannot ASSERT(hmeblkp->hblk_hmecnt <= NHMENTS)
2746 			 * see pageunload() for comment.
2747 			 */
2748 		}
2749 		sfmmu_mlist_exit(pml);
2750 	}
2751 
2752 	return (0);
2753 }
2754 /*
2755  * Function unlocks hash bucket.
2756  */
2757 static void
2758 sfmmu_tteload_release_hashbucket(struct hmehash_bucket *hmebp)
2759 {
2760 	ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp));
2761 	SFMMU_HASH_UNLOCK(hmebp);
2762 }
2763 
2764 /*
2765  * function which checks and sets up page array for a large
2766  * translation.  Will set p_vcolor, p_index, p_ro fields.
2767  * Assumes addr and pfnum of first page are properly aligned.
2768  * Will check for physical contiguity. If check fails it return
2769  * non null.
2770  */
2771 static int
2772 sfmmu_pagearray_setup(caddr_t addr, page_t **pps, tte_t *ttep, int remap)
2773 {
2774 	int 	i, index, ttesz, osz;
2775 	pfn_t	pfnum;
2776 	pgcnt_t	npgs;
2777 	int cflags = 0;
2778 	page_t *pp, *pp1;
2779 	kmutex_t *pmtx;
2780 	int vac_err = 0;
2781 	int newidx = 0;
2782 
2783 	ttesz = TTE_CSZ(ttep);
2784 
2785 	ASSERT(ttesz > TTE8K);
2786 
2787 	npgs = TTEPAGES(ttesz);
2788 	index = PAGESZ_TO_INDEX(ttesz);
2789 
2790 	pfnum = (*pps)->p_pagenum;
2791 	ASSERT(IS_P2ALIGNED(pfnum, npgs));
2792 
2793 	/*
2794 	 * Save the first pp so we can do HAT_TMPNC at the end.
2795 	 */
2796 	pp1 = *pps;
2797 	osz = fnd_mapping_sz(pp1);
2798 
2799 	for (i = 0; i < npgs; i++, pps++) {
2800 		pp = *pps;
2801 		ASSERT(PAGE_LOCKED(pp));
2802 		ASSERT(pp->p_szc >= ttesz);
2803 		ASSERT(pp->p_szc == pp1->p_szc);
2804 		ASSERT(sfmmu_mlist_held(pp));
2805 
2806 		/*
2807 		 * XXX is it possible to maintain P_RO on the root only?
2808 		 */
2809 		if (TTE_IS_WRITABLE(ttep) && PP_ISRO(pp)) {
2810 			pmtx = sfmmu_page_enter(pp);
2811 			PP_CLRRO(pp);
2812 			sfmmu_page_exit(pmtx);
2813 		} else if (!PP_ISMAPPED(pp) && !TTE_IS_WRITABLE(ttep) &&
2814 		    !PP_ISMOD(pp)) {
2815 			pmtx = sfmmu_page_enter(pp);
2816 			if (!(PP_ISMOD(pp))) {
2817 				PP_SETRO(pp);
2818 			}
2819 			sfmmu_page_exit(pmtx);
2820 		}
2821 
2822 		/*
2823 		 * If this is a remap we skip vac & contiguity checks.
2824 		 */
2825 		if (remap)
2826 			continue;
2827 
2828 		/*
2829 		 * set p_vcolor and detect any vac conflicts.
2830 		 */
2831 		if (vac_err == 0) {
2832 			vac_err = sfmmu_vacconflict_array(addr, pp, &cflags);
2833 
2834 		}
2835 
2836 		/*
2837 		 * Save current index in case we need to undo it.
2838 		 * Note: "PAGESZ_TO_INDEX(sz)	(1 << (sz))"
2839 		 *	"SFMMU_INDEX_SHIFT	6"
2840 		 *	 "SFMMU_INDEX_MASK	((1 << SFMMU_INDEX_SHIFT) - 1)"
2841 		 *	 "PP_MAPINDEX(p_index)	(p_index & SFMMU_INDEX_MASK)"
2842 		 *
2843 		 * So:	index = PAGESZ_TO_INDEX(ttesz);
2844 		 *	if ttesz == 1 then index = 0x2
2845 		 *		    2 then index = 0x4
2846 		 *		    3 then index = 0x8
2847 		 *		    4 then index = 0x10
2848 		 *		    5 then index = 0x20
2849 		 * The code below checks if it's a new pagesize (ie, newidx)
2850 		 * in case we need to take it back out of p_index,
2851 		 * and then or's the new index into the existing index.
2852 		 */
2853 		if ((PP_MAPINDEX(pp) & index) == 0)
2854 			newidx = 1;
2855 		pp->p_index = (PP_MAPINDEX(pp) | index);
2856 
2857 		/*
2858 		 * contiguity check
2859 		 */
2860 		if (pp->p_pagenum != pfnum) {
2861 			/*
2862 			 * If we fail the contiguity test then
2863 			 * the only thing we need to fix is the p_index field.
2864 			 * We might get a few extra flushes but since this
2865 			 * path is rare that is ok.  The p_ro field will
2866 			 * get automatically fixed on the next tteload to
2867 			 * the page.  NO TNC bit is set yet.
2868 			 */
2869 			while (i >= 0) {
2870 				pp = *pps;
2871 				if (newidx)
2872 					pp->p_index = (PP_MAPINDEX(pp) &
2873 					    ~index);
2874 				pps--;
2875 				i--;
2876 			}
2877 			return (1);
2878 		}
2879 		pfnum++;
2880 		addr += MMU_PAGESIZE;
2881 	}
2882 
2883 	if (vac_err) {
2884 		if (ttesz > osz) {
2885 			/*
2886 			 * There are some smaller mappings that causes vac
2887 			 * conflicts. Convert all existing small mappings to
2888 			 * TNC.
2889 			 */
2890 			SFMMU_STAT_ADD(sf_uncache_conflict, npgs);
2891 			sfmmu_page_cache_array(pp1, HAT_TMPNC, CACHE_FLUSH,
2892 				npgs);
2893 		} else {
2894 			/* EMPTY */
2895 			/*
2896 			 * If there exists an big page mapping,
2897 			 * that means the whole existing big page
2898 			 * has TNC setting already. No need to covert to
2899 			 * TNC again.
2900 			 */
2901 			ASSERT(PP_ISTNC(pp1));
2902 		}
2903 	}
2904 
2905 	return (0);
2906 }
2907 
2908 /*
2909  * Routine that detects vac consistency for a large page. It also
2910  * sets virtual color for all pp's for this big mapping.
2911  */
2912 static int
2913 sfmmu_vacconflict_array(caddr_t addr, page_t *pp, int *cflags)
2914 {
2915 	int vcolor, ocolor;
2916 
2917 	ASSERT(sfmmu_mlist_held(pp));
2918 
2919 	if (PP_ISNC(pp)) {
2920 		return (HAT_TMPNC);
2921 	}
2922 
2923 	vcolor = addr_to_vcolor(addr);
2924 	if (PP_NEWPAGE(pp)) {
2925 		PP_SET_VCOLOR(pp, vcolor);
2926 		return (0);
2927 	}
2928 
2929 	ocolor = PP_GET_VCOLOR(pp);
2930 	if (ocolor == vcolor) {
2931 		return (0);
2932 	}
2933 
2934 	if (!PP_ISMAPPED(pp)) {
2935 		/*
2936 		 * Previous user of page had a differnet color
2937 		 * but since there are no current users
2938 		 * we just flush the cache and change the color.
2939 		 * As an optimization for large pages we flush the
2940 		 * entire cache of that color and set a flag.
2941 		 */
2942 		SFMMU_STAT(sf_pgcolor_conflict);
2943 		if (!CacheColor_IsFlushed(*cflags, ocolor)) {
2944 			CacheColor_SetFlushed(*cflags, ocolor);
2945 			sfmmu_cache_flushcolor(ocolor, pp->p_pagenum);
2946 		}
2947 		PP_SET_VCOLOR(pp, vcolor);
2948 		return (0);
2949 	}
2950 
2951 	/*
2952 	 * We got a real conflict with a current mapping.
2953 	 * set flags to start unencaching all mappings
2954 	 * and return failure so we restart looping
2955 	 * the pp array from the beginning.
2956 	 */
2957 	return (HAT_TMPNC);
2958 }
2959 
2960 /*
2961  * creates a large page shadow hmeblk for a tte.
2962  * The purpose of this routine is to allow us to do quick unloads because
2963  * the vm layer can easily pass a very large but sparsely populated range.
2964  */
2965 static struct hme_blk *
2966 sfmmu_shadow_hcreate(sfmmu_t *sfmmup, caddr_t vaddr, int ttesz, uint_t flags)
2967 {
2968 	struct hmehash_bucket *hmebp;
2969 	hmeblk_tag hblktag;
2970 	int hmeshift, size, vshift;
2971 	uint_t shw_mask, newshw_mask;
2972 	struct hme_blk *hmeblkp;
2973 
2974 	ASSERT(sfmmup != KHATID);
2975 	if (mmu_page_sizes == max_mmu_page_sizes) {
2976 		ASSERT(ttesz < TTE256M);
2977 	} else {
2978 		ASSERT(ttesz < TTE4M);
2979 		ASSERT(sfmmup->sfmmu_ttecnt[TTE32M] == 0);
2980 		ASSERT(sfmmup->sfmmu_ttecnt[TTE256M] == 0);
2981 	}
2982 
2983 	if (ttesz == TTE8K) {
2984 		size = TTE512K;
2985 	} else {
2986 		size = ++ttesz;
2987 	}
2988 
2989 	hblktag.htag_id = sfmmup;
2990 	hmeshift = HME_HASH_SHIFT(size);
2991 	hblktag.htag_bspage = HME_HASH_BSPAGE(vaddr, hmeshift);
2992 	hblktag.htag_rehash = HME_HASH_REHASH(size);
2993 	hmebp = HME_HASH_FUNCTION(sfmmup, vaddr, hmeshift);
2994 
2995 	SFMMU_HASH_LOCK(hmebp);
2996 
2997 	HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp);
2998 	ASSERT(hmeblkp != (struct hme_blk *)hblk_reserve);
2999 	if (hmeblkp == NULL) {
3000 		hmeblkp = sfmmu_hblk_alloc(sfmmup, vaddr, hmebp, size,
3001 			hblktag, flags);
3002 	}
3003 	ASSERT(hmeblkp);
3004 	if (!hmeblkp->hblk_shw_mask) {
3005 		/*
3006 		 * if this is a unused hblk it was just allocated or could
3007 		 * potentially be a previous large page hblk so we need to
3008 		 * set the shadow bit.
3009 		 */
3010 		hmeblkp->hblk_shw_bit = 1;
3011 	}
3012 	ASSERT(hmeblkp->hblk_shw_bit == 1);
3013 	vshift = vaddr_to_vshift(hblktag, vaddr, size);
3014 	ASSERT(vshift < 8);
3015 	/*
3016 	 * Atomically set shw mask bit
3017 	 */
3018 	do {
3019 		shw_mask = hmeblkp->hblk_shw_mask;
3020 		newshw_mask = shw_mask | (1 << vshift);
3021 		newshw_mask = cas32(&hmeblkp->hblk_shw_mask, shw_mask,
3022 		    newshw_mask);
3023 	} while (newshw_mask != shw_mask);
3024 
3025 	SFMMU_HASH_UNLOCK(hmebp);
3026 
3027 	return (hmeblkp);
3028 }
3029 
3030 /*
3031  * This routine cleanup a previous shadow hmeblk and changes it to
3032  * a regular hblk.  This happens rarely but it is possible
3033  * when a process wants to use large pages and there are hblks still
3034  * lying around from the previous as that used these hmeblks.
3035  * The alternative was to cleanup the shadow hblks at unload time
3036  * but since so few user processes actually use large pages, it is
3037  * better to be lazy and cleanup at this time.
3038  */
3039 static void
3040 sfmmu_shadow_hcleanup(sfmmu_t *sfmmup, struct hme_blk *hmeblkp,
3041 	struct hmehash_bucket *hmebp)
3042 {
3043 	caddr_t addr, endaddr;
3044 	int hashno, size;
3045 
3046 	ASSERT(hmeblkp->hblk_shw_bit);
3047 
3048 	ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp));
3049 
3050 	if (!hmeblkp->hblk_shw_mask) {
3051 		hmeblkp->hblk_shw_bit = 0;
3052 		return;
3053 	}
3054 	addr = (caddr_t)get_hblk_base(hmeblkp);
3055 	endaddr = get_hblk_endaddr(hmeblkp);
3056 	size = get_hblk_ttesz(hmeblkp);
3057 	hashno = size - 1;
3058 	ASSERT(hashno > 0);
3059 	SFMMU_HASH_UNLOCK(hmebp);
3060 
3061 	sfmmu_free_hblks(sfmmup, addr, endaddr, hashno);
3062 
3063 	SFMMU_HASH_LOCK(hmebp);
3064 }
3065 
3066 static void
3067 sfmmu_free_hblks(sfmmu_t *sfmmup, caddr_t addr, caddr_t endaddr,
3068 	int hashno)
3069 {
3070 	int hmeshift, shadow = 0;
3071 	hmeblk_tag hblktag;
3072 	struct hmehash_bucket *hmebp;
3073 	struct hme_blk *hmeblkp;
3074 	struct hme_blk *nx_hblk, *pr_hblk, *list = NULL;
3075 	uint64_t hblkpa, prevpa, nx_pa;
3076 
3077 	ASSERT(hashno > 0);
3078 	hblktag.htag_id = sfmmup;
3079 	hblktag.htag_rehash = hashno;
3080 
3081 	hmeshift = HME_HASH_SHIFT(hashno);
3082 
3083 	while (addr < endaddr) {
3084 		hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift);
3085 		hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift);
3086 		SFMMU_HASH_LOCK(hmebp);
3087 		/* inline HME_HASH_SEARCH */
3088 		hmeblkp = hmebp->hmeblkp;
3089 		hblkpa = hmebp->hmeh_nextpa;
3090 		prevpa = 0;
3091 		pr_hblk = NULL;
3092 		while (hmeblkp) {
3093 			ASSERT(hblkpa == va_to_pa((caddr_t)hmeblkp));
3094 			if (HTAGS_EQ(hmeblkp->hblk_tag, hblktag)) {
3095 				/* found hme_blk */
3096 				if (hmeblkp->hblk_shw_bit) {
3097 					if (hmeblkp->hblk_shw_mask) {
3098 						shadow = 1;
3099 						sfmmu_shadow_hcleanup(sfmmup,
3100 						    hmeblkp, hmebp);
3101 						break;
3102 					} else {
3103 						hmeblkp->hblk_shw_bit = 0;
3104 					}
3105 				}
3106 
3107 				/*
3108 				 * Hblk_hmecnt and hblk_vcnt could be non zero
3109 				 * since hblk_unload() does not gurantee that.
3110 				 *
3111 				 * XXX - this could cause tteload() to spin
3112 				 * where sfmmu_shadow_hcleanup() is called.
3113 				 */
3114 			}
3115 
3116 			nx_hblk = hmeblkp->hblk_next;
3117 			nx_pa = hmeblkp->hblk_nextpa;
3118 			if (!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) {
3119 				sfmmu_hblk_hash_rm(hmebp, hmeblkp, prevpa,
3120 					pr_hblk);
3121 				sfmmu_hblk_free(hmebp, hmeblkp, hblkpa, &list);
3122 			} else {
3123 				pr_hblk = hmeblkp;
3124 				prevpa = hblkpa;
3125 			}
3126 			hmeblkp = nx_hblk;
3127 			hblkpa = nx_pa;
3128 		}
3129 
3130 		SFMMU_HASH_UNLOCK(hmebp);
3131 
3132 		if (shadow) {
3133 			/*
3134 			 * We found another shadow hblk so cleaned its
3135 			 * children.  We need to go back and cleanup
3136 			 * the original hblk so we don't change the
3137 			 * addr.
3138 			 */
3139 			shadow = 0;
3140 		} else {
3141 			addr = (caddr_t)roundup((uintptr_t)addr + 1,
3142 				(1 << hmeshift));
3143 		}
3144 	}
3145 	sfmmu_hblks_list_purge(&list);
3146 }
3147 
3148 /*
3149  * Release one hardware address translation lock on the given address range.
3150  */
3151 void
3152 hat_unlock(struct hat *sfmmup, caddr_t addr, size_t len)
3153 {
3154 	struct hmehash_bucket *hmebp;
3155 	hmeblk_tag hblktag;
3156 	int hmeshift, hashno = 1;
3157 	struct hme_blk *hmeblkp, *list = NULL;
3158 	caddr_t endaddr;
3159 
3160 	ASSERT(sfmmup != NULL);
3161 	ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
3162 
3163 	ASSERT((sfmmup == ksfmmup) ||
3164 		AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock));
3165 	ASSERT((len & MMU_PAGEOFFSET) == 0);
3166 	endaddr = addr + len;
3167 	hblktag.htag_id = sfmmup;
3168 
3169 	/*
3170 	 * Spitfire supports 4 page sizes.
3171 	 * Most pages are expected to be of the smallest page size (8K) and
3172 	 * these will not need to be rehashed. 64K pages also don't need to be
3173 	 * rehashed because an hmeblk spans 64K of address space. 512K pages
3174 	 * might need 1 rehash and and 4M pages might need 2 rehashes.
3175 	 */
3176 	while (addr < endaddr) {
3177 		hmeshift = HME_HASH_SHIFT(hashno);
3178 		hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift);
3179 		hblktag.htag_rehash = hashno;
3180 		hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift);
3181 
3182 		SFMMU_HASH_LOCK(hmebp);
3183 
3184 		HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list);
3185 		if (hmeblkp != NULL) {
3186 			/*
3187 			 * If we encounter a shadow hmeblk then
3188 			 * we know there are no valid hmeblks mapping
3189 			 * this address at this size or larger.
3190 			 * Just increment address by the smallest
3191 			 * page size.
3192 			 */
3193 			if (hmeblkp->hblk_shw_bit) {
3194 				addr += MMU_PAGESIZE;
3195 			} else {
3196 				addr = sfmmu_hblk_unlock(hmeblkp, addr,
3197 				    endaddr);
3198 			}
3199 			SFMMU_HASH_UNLOCK(hmebp);
3200 			hashno = 1;
3201 			continue;
3202 		}
3203 		SFMMU_HASH_UNLOCK(hmebp);
3204 
3205 		if (!HME_REHASH(sfmmup) || (hashno >= mmu_hashcnt)) {
3206 			/*
3207 			 * We have traversed the whole list and rehashed
3208 			 * if necessary without finding the address to unlock
3209 			 * which should never happen.
3210 			 */
3211 			panic("sfmmu_unlock: addr not found. "
3212 			    "addr %p hat %p", (void *)addr, (void *)sfmmup);
3213 		} else {
3214 			hashno++;
3215 		}
3216 	}
3217 
3218 	sfmmu_hblks_list_purge(&list);
3219 }
3220 
3221 /*
3222  * Function to unlock a range of addresses in an hmeblk.  It returns the
3223  * next address that needs to be unlocked.
3224  * Should be called with the hash lock held.
3225  */
3226 static caddr_t
3227 sfmmu_hblk_unlock(struct hme_blk *hmeblkp, caddr_t addr, caddr_t endaddr)
3228 {
3229 	struct sf_hment *sfhme;
3230 	tte_t tteold, ttemod;
3231 	int ttesz, ret;
3232 
3233 	ASSERT(in_hblk_range(hmeblkp, addr));
3234 	ASSERT(hmeblkp->hblk_shw_bit == 0);
3235 
3236 	endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp));
3237 	ttesz = get_hblk_ttesz(hmeblkp);
3238 
3239 	HBLKTOHME(sfhme, hmeblkp, addr);
3240 	while (addr < endaddr) {
3241 readtte:
3242 		sfmmu_copytte(&sfhme->hme_tte, &tteold);
3243 		if (TTE_IS_VALID(&tteold)) {
3244 
3245 			ttemod = tteold;
3246 
3247 			ret = sfmmu_modifytte_try(&tteold, &ttemod,
3248 			    &sfhme->hme_tte);
3249 
3250 			if (ret < 0)
3251 				goto readtte;
3252 
3253 			if (hmeblkp->hblk_lckcnt == 0)
3254 				panic("zero hblk lckcnt");
3255 
3256 			if (((uintptr_t)addr + TTEBYTES(ttesz)) >
3257 			    (uintptr_t)endaddr)
3258 				panic("can't unlock large tte");
3259 
3260 			ASSERT(hmeblkp->hblk_lckcnt > 0);
3261 			atomic_add_16(&hmeblkp->hblk_lckcnt, -1);
3262 			HBLK_STACK_TRACE(hmeblkp, HBLK_UNLOCK);
3263 		} else {
3264 			panic("sfmmu_hblk_unlock: invalid tte");
3265 		}
3266 		addr += TTEBYTES(ttesz);
3267 		sfhme++;
3268 	}
3269 	return (addr);
3270 }
3271 
3272 /*
3273  * Physical Address Mapping Framework
3274  *
3275  * General rules:
3276  *
3277  * (1) Applies only to seg_kmem memory pages. To make things easier,
3278  *     seg_kpm addresses are also accepted by the routines, but nothing
3279  *     is done with them since by definition their PA mappings are static.
3280  * (2) hat_add_callback() may only be called while holding the page lock
3281  *     SE_SHARED or SE_EXCL of the underlying page (e.g., as_pagelock()).
3282  * (3) prehandler() and posthandler() may not call hat_add_callback() or
3283  *     hat_delete_callback(), nor should they allocate memory. Post quiesce
3284  *     callbacks may not sleep or acquire adaptive mutex locks.
3285  * (4) Either prehandler() or posthandler() (but not both) may be specified
3286  *     as being NULL.  Specifying an errhandler() is optional.
3287  *
3288  * Details of using the framework:
3289  *
3290  * registering a callback (hat_register_callback())
3291  *
3292  *	Pass prehandler, posthandler, errhandler addresses
3293  *	as described below. If capture_cpus argument is nonzero,
3294  *	suspend callback to the prehandler will occur with CPUs
3295  *	captured and executing xc_loop() and CPUs will remain
3296  *	captured until after the posthandler suspend callback
3297  *	occurs.
3298  *
3299  * adding a callback (hat_add_callback())
3300  *
3301  *      as_pagelock();
3302  *	hat_add_callback();
3303  *      save returned pfn in private data structures or program registers;
3304  *      as_pageunlock();
3305  *
3306  * prehandler()
3307  *
3308  *	Stop all accesses by physical address to this memory page.
3309  *	Called twice: the first, PRESUSPEND, is a context safe to acquire
3310  *	adaptive locks. The second, SUSPEND, is called at high PIL with
3311  *	CPUs captured so adaptive locks may NOT be acquired (and all spin
3312  *	locks must be XCALL_PIL or higher locks).
3313  *
3314  *	May return the following errors:
3315  *		EIO:	A fatal error has occurred. This will result in panic.
3316  *		EAGAIN:	The page cannot be suspended. This will fail the
3317  *			relocation.
3318  *		0:	Success.
3319  *
3320  * posthandler()
3321  *
3322  *      Save new pfn in private data structures or program registers;
3323  *	not allowed to fail (non-zero return values will result in panic).
3324  *
3325  * errhandler()
3326  *
3327  *	called when an error occurs related to the callback.  Currently
3328  *	the only such error is HAT_CB_ERR_LEAKED which indicates that
3329  *	a page is being freed, but there are still outstanding callback(s)
3330  *	registered on the page.
3331  *
3332  * removing a callback (hat_delete_callback(); e.g., prior to freeing memory)
3333  *
3334  *	stop using physical address
3335  *	hat_delete_callback();
3336  *
3337  */
3338 
3339 /*
3340  * Register a callback class.  Each subsystem should do this once and
3341  * cache the id_t returned for use in setting up and tearing down callbacks.
3342  *
3343  * There is no facility for removing callback IDs once they are created;
3344  * the "key" should be unique for each module, so in case a module is unloaded
3345  * and subsequently re-loaded, we can recycle the module's previous entry.
3346  */
3347 id_t
3348 hat_register_callback(int key,
3349 	int (*prehandler)(caddr_t, uint_t, uint_t, void *),
3350 	int (*posthandler)(caddr_t, uint_t, uint_t, void *, pfn_t),
3351 	int (*errhandler)(caddr_t, uint_t, uint_t, void *),
3352 	int capture_cpus)
3353 {
3354 	id_t id;
3355 
3356 	/*
3357 	 * Search the table for a pre-existing callback associated with
3358 	 * the identifier "key".  If one exists, we re-use that entry in
3359 	 * the table for this instance, otherwise we assign the next
3360 	 * available table slot.
3361 	 */
3362 	for (id = 0; id < sfmmu_max_cb_id; id++) {
3363 		if (sfmmu_cb_table[id].key == key)
3364 			break;
3365 	}
3366 
3367 	if (id == sfmmu_max_cb_id) {
3368 		id = sfmmu_cb_nextid++;
3369 		if (id >= sfmmu_max_cb_id)
3370 			panic("hat_register_callback: out of callback IDs");
3371 	}
3372 
3373 	ASSERT(prehandler != NULL || posthandler != NULL);
3374 
3375 	sfmmu_cb_table[id].key = key;
3376 	sfmmu_cb_table[id].prehandler = prehandler;
3377 	sfmmu_cb_table[id].posthandler = posthandler;
3378 	sfmmu_cb_table[id].errhandler = errhandler;
3379 	sfmmu_cb_table[id].capture_cpus = capture_cpus;
3380 
3381 	return (id);
3382 }
3383 
3384 /*
3385  * Add relocation callbacks to the specified addr/len which will be called
3386  * when relocating the associated page.  See the description of pre and
3387  * posthandler above for more details.  IMPT: this operation is only valid
3388  * on seg_kmem pages!!
3389  *
3390  * If HAC_PAGELOCK is included in flags, the underlying memory page is
3391  * locked internally so the caller must be able to deal with the callback
3392  * running even before this function has returned.  If HAC_PAGELOCK is not
3393  * set, it is assumed that the underlying memory pages are locked.
3394  *
3395  * Since the caller must track the individual page boundaries anyway,
3396  * we only allow a callback to be added to a single page (large
3397  * or small).  Thus [addr, addr + len) MUST be contained within a single
3398  * page.
3399  *
3400  * Registering multiple callbacks on the same [addr, addr+len) is supported,
3401  * in which case the corresponding callback will be called once with each
3402  * unique parameter specified. The number of subsequent deletes must match
3403  * since reference counts are held.  If a callback is desired for each
3404  * virtual object with the same parameter specified for multiple callbacks,
3405  * a different virtual address should be specified at the time of
3406  * callback registration.
3407  *
3408  * Returns the pfn of the underlying kernel page in *rpfn
3409  * on success, or PFN_INVALID on failure.
3410  *
3411  * Returns values:
3412  *    0:      success
3413  *    ENOMEM: memory allocation failure (e.g. flags was passed as HAC_NOSLEEP)
3414  *    EINVAL: callback ID is not valid
3415  *    ENXIO:  ["vaddr", "vaddr" + len) is not mapped in the kernel's address
3416  *            space, or crosses a page boundary
3417  */
3418 int
3419 hat_add_callback(id_t callback_id, caddr_t vaddr, uint_t len, uint_t flags,
3420 	void *pvt, pfn_t *rpfn)
3421 {
3422 	struct 		hmehash_bucket *hmebp;
3423 	hmeblk_tag 	hblktag;
3424 	struct hme_blk	*hmeblkp;
3425 	int 		hmeshift, hashno;
3426 	caddr_t 	saddr, eaddr, baseaddr;
3427 	struct pa_hment *pahmep, *tpahmep;
3428 	struct sf_hment *sfhmep, *osfhmep, *tsfhmep;
3429 	kmutex_t	*pml;
3430 	tte_t   	tte;
3431 	page_t		*pp, *rpp;
3432 	pfn_t		pfn;
3433 	int		kmflags = (flags & HAC_SLEEP)? KM_SLEEP : KM_NOSLEEP;
3434 	int		locked = 0;
3435 
3436 	/*
3437 	 * For KPM mappings, just return the physical address since we
3438 	 * don't need to register any callbacks.
3439 	 */
3440 	if (IS_KPM_ADDR(vaddr)) {
3441 		uint64_t paddr;
3442 		SFMMU_KPM_VTOP(vaddr, paddr);
3443 		*rpfn = btop(paddr);
3444 		return (0);
3445 	}
3446 
3447 	if (callback_id < (id_t)0 || callback_id >= sfmmu_cb_nextid) {
3448 		*rpfn = PFN_INVALID;
3449 		return (EINVAL);
3450 	}
3451 
3452 	if ((pahmep = kmem_cache_alloc(pa_hment_cache, kmflags)) == NULL) {
3453 		*rpfn = PFN_INVALID;
3454 		return (ENOMEM);
3455 	}
3456 
3457 	sfhmep = &pahmep->sfment;
3458 
3459 	saddr = (caddr_t)((uintptr_t)vaddr & MMU_PAGEMASK);
3460 	eaddr = saddr + len;
3461 
3462 rehash:
3463 	/* Find the mapping(s) for this page */
3464 	for (hashno = TTE64K, hmeblkp = NULL;
3465 	    hmeblkp == NULL && hashno <= mmu_hashcnt;
3466 	    hashno++) {
3467 		hmeshift = HME_HASH_SHIFT(hashno);
3468 		hblktag.htag_id = ksfmmup;
3469 		hblktag.htag_bspage = HME_HASH_BSPAGE(saddr, hmeshift);
3470 		hblktag.htag_rehash = hashno;
3471 		hmebp = HME_HASH_FUNCTION(ksfmmup, saddr, hmeshift);
3472 
3473 		SFMMU_HASH_LOCK(hmebp);
3474 
3475 		HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp);
3476 
3477 		if (hmeblkp == NULL)
3478 			SFMMU_HASH_UNLOCK(hmebp);
3479 	}
3480 
3481 	if (hmeblkp == NULL) {
3482 		kmem_cache_free(pa_hment_cache, pahmep);
3483 		*rpfn = PFN_INVALID;
3484 		return (ENXIO);
3485 	}
3486 
3487 	/*
3488 	 * Make sure the boundaries for the callback fall within this
3489 	 * single mapping.
3490 	 */
3491 	baseaddr = (caddr_t)get_hblk_base(hmeblkp);
3492 	ASSERT(saddr >= baseaddr);
3493 	if (eaddr > (caddr_t)get_hblk_endaddr(hmeblkp)) {
3494 		SFMMU_HASH_UNLOCK(hmebp);
3495 		kmem_cache_free(pa_hment_cache, pahmep);
3496 		*rpfn = PFN_INVALID;
3497 		return (ENXIO);
3498 	}
3499 
3500 	HBLKTOHME(osfhmep, hmeblkp, saddr);
3501 	sfmmu_copytte(&osfhmep->hme_tte, &tte);
3502 
3503 	ASSERT(TTE_IS_VALID(&tte));
3504 	pfn = sfmmu_ttetopfn(&tte, vaddr);
3505 
3506 	/*
3507 	 * The pfn may not have a page_t underneath in which case we
3508 	 * just return it. This can happen if we are doing I/O to a
3509 	 * static portion of the kernel's address space, for instance.
3510 	 */
3511 	pp = osfhmep->hme_page;
3512 	if (pp == NULL) {
3513 		SFMMU_HASH_UNLOCK(hmebp);
3514 		kmem_cache_free(pa_hment_cache, pahmep);
3515 		*rpfn = pfn;
3516 		return (0);
3517 	}
3518 
3519 	pml = sfmmu_mlist_enter(pp);
3520 
3521 	if ((flags & HAC_PAGELOCK) && !locked) {
3522 		if (!page_trylock(pp, SE_SHARED)) {
3523 			/*
3524 			 * Somebody is holding SE_EXCL lock.  Drop all
3525 			 * our locks, lookup the page in &kvp, and
3526 			 * retry.
3527 			 */
3528 			sfmmu_mlist_exit(pml);
3529 			SFMMU_HASH_UNLOCK(hmebp);
3530 			pp = page_lookup(&kvp, (u_offset_t)saddr, SE_SHARED);
3531 			ASSERT(pp != NULL);
3532 			rpp = PP_PAGEROOT(pp);
3533 			if (rpp != pp) {
3534 				page_unlock(pp);
3535 				(void) page_lock(rpp, SE_SHARED, NULL,
3536 				    P_NO_RECLAIM);
3537 			}
3538 			locked = 1;
3539 			goto rehash;
3540 		}
3541 		locked = 1;
3542 	}
3543 
3544 	if (!PAGE_LOCKED(pp) && !panicstr)
3545 		panic("hat_add_callback: page 0x%p not locked", pp);
3546 
3547 	if (osfhmep->hme_page != pp || pp->p_vnode != &kvp ||
3548 	    pp->p_offset < (u_offset_t)baseaddr ||
3549 	    pp->p_offset > (u_offset_t)eaddr) {
3550 		/*
3551 		 * The page moved before we got our hands on it.  Drop
3552 		 * all the locks and try again.
3553 		 */
3554 		ASSERT((flags & HAC_PAGELOCK) != 0);
3555 		sfmmu_mlist_exit(pml);
3556 		SFMMU_HASH_UNLOCK(hmebp);
3557 		page_unlock(pp);
3558 		locked = 0;
3559 		goto rehash;
3560 	}
3561 
3562 	ASSERT(osfhmep->hme_page == pp);
3563 
3564 	for (tsfhmep = pp->p_mapping; tsfhmep != NULL;
3565 	    tsfhmep = tsfhmep->hme_next) {
3566 
3567 		/*
3568 		 * skip va to pa mappings
3569 		 */
3570 		if (!IS_PAHME(tsfhmep))
3571 			continue;
3572 
3573 		tpahmep = tsfhmep->hme_data;
3574 		ASSERT(tpahmep != NULL);
3575 
3576 		/*
3577 		 * See if the pahment already exists.
3578 		 */
3579 		if ((tpahmep->pvt == pvt) &&
3580 		    (tpahmep->addr == vaddr) &&
3581 		    (tpahmep->len == len)) {
3582 			ASSERT(tpahmep->cb_id == callback_id);
3583 			tpahmep->refcnt++;
3584 			pp->p_share++;
3585 
3586 			sfmmu_mlist_exit(pml);
3587 			SFMMU_HASH_UNLOCK(hmebp);
3588 
3589 			if (locked)
3590 				page_unlock(pp);
3591 
3592 			kmem_cache_free(pa_hment_cache, pahmep);
3593 
3594 			*rpfn = pfn;
3595 			return (0);
3596 		}
3597 	}
3598 
3599 	/*
3600 	 * setup this shiny new pa_hment ..
3601 	 */
3602 	pp->p_share++;
3603 	pahmep->cb_id = callback_id;
3604 	pahmep->addr = vaddr;
3605 	pahmep->len = len;
3606 	pahmep->refcnt = 1;
3607 	pahmep->flags = 0;
3608 	pahmep->pvt = pvt;
3609 
3610 	/*
3611 	 * .. and also set up the sf_hment and link to p_mapping list.
3612 	 */
3613 	sfhmep->hme_tte.ll = 0;
3614 	sfhmep->hme_data = pahmep;
3615 	sfhmep->hme_prev = osfhmep;
3616 	sfhmep->hme_next = osfhmep->hme_next;
3617 
3618 	if (osfhmep->hme_next)
3619 		osfhmep->hme_next->hme_prev = sfhmep;
3620 
3621 	osfhmep->hme_next = sfhmep;
3622 
3623 	sfmmu_mlist_exit(pml);
3624 	SFMMU_HASH_UNLOCK(hmebp);
3625 
3626 	*rpfn = pfn;
3627 	if (locked)
3628 		page_unlock(pp);
3629 
3630 	return (0);
3631 }
3632 
3633 /*
3634  * Remove the relocation callbacks from the specified addr/len.
3635  */
3636 void
3637 hat_delete_callback(caddr_t vaddr, uint_t len, void *pvt, uint_t flags)
3638 {
3639 	struct		hmehash_bucket *hmebp;
3640 	hmeblk_tag	hblktag;
3641 	struct hme_blk	*hmeblkp;
3642 	int		hmeshift, hashno;
3643 	caddr_t		saddr, eaddr, baseaddr;
3644 	struct pa_hment	*pahmep;
3645 	struct sf_hment	*sfhmep, *osfhmep;
3646 	kmutex_t	*pml;
3647 	tte_t		tte;
3648 	page_t		*pp, *rpp;
3649 	int		locked = 0;
3650 
3651 	if (IS_KPM_ADDR(vaddr))
3652 		return;
3653 
3654 	saddr = (caddr_t)((uintptr_t)vaddr & MMU_PAGEMASK);
3655 	eaddr = saddr + len;
3656 
3657 rehash:
3658 	/* Find the mapping(s) for this page */
3659 	for (hashno = TTE64K, hmeblkp = NULL;
3660 	    hmeblkp == NULL && hashno <= mmu_hashcnt;
3661 	    hashno++) {
3662 		hmeshift = HME_HASH_SHIFT(hashno);
3663 		hblktag.htag_id = ksfmmup;
3664 		hblktag.htag_bspage = HME_HASH_BSPAGE(saddr, hmeshift);
3665 		hblktag.htag_rehash = hashno;
3666 		hmebp = HME_HASH_FUNCTION(ksfmmup, saddr, hmeshift);
3667 
3668 		SFMMU_HASH_LOCK(hmebp);
3669 
3670 		HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp);
3671 
3672 		if (hmeblkp == NULL)
3673 			SFMMU_HASH_UNLOCK(hmebp);
3674 	}
3675 
3676 	if (hmeblkp == NULL) {
3677 		if (!panicstr) {
3678 			panic("hat_delete_callback: addr 0x%p not found",
3679 			    saddr);
3680 		}
3681 		return;
3682 	}
3683 
3684 	baseaddr = (caddr_t)get_hblk_base(hmeblkp);
3685 	HBLKTOHME(osfhmep, hmeblkp, saddr);
3686 
3687 	sfmmu_copytte(&osfhmep->hme_tte, &tte);
3688 	ASSERT(TTE_IS_VALID(&tte));
3689 
3690 	pp = osfhmep->hme_page;
3691 	if (pp == NULL) {
3692 		SFMMU_HASH_UNLOCK(hmebp);
3693 		return;
3694 	}
3695 
3696 	pml = sfmmu_mlist_enter(pp);
3697 
3698 	if ((flags & HAC_PAGELOCK) && !locked) {
3699 		if (!page_trylock(pp, SE_SHARED)) {
3700 			/*
3701 			 * Somebody is holding SE_EXCL lock.  Drop all
3702 			 * our locks, lookup the page in &kvp, and
3703 			 * retry.
3704 			 */
3705 			sfmmu_mlist_exit(pml);
3706 			SFMMU_HASH_UNLOCK(hmebp);
3707 			pp = page_lookup(&kvp, (u_offset_t)saddr, SE_SHARED);
3708 			ASSERT(pp != NULL);
3709 			rpp = PP_PAGEROOT(pp);
3710 			if (rpp != pp) {
3711 				page_unlock(pp);
3712 				(void) page_lock(rpp, SE_SHARED, NULL,
3713 				    P_NO_RECLAIM);
3714 			}
3715 			locked = 1;
3716 			goto rehash;
3717 		}
3718 		locked = 1;
3719 	}
3720 
3721 	ASSERT(PAGE_LOCKED(pp));
3722 
3723 	if (osfhmep->hme_page != pp || pp->p_vnode != &kvp ||
3724 	    pp->p_offset < (u_offset_t)baseaddr ||
3725 	    pp->p_offset > (u_offset_t)eaddr) {
3726 		/*
3727 		 * The page moved before we got our hands on it.  Drop
3728 		 * all the locks and try again.
3729 		 */
3730 		ASSERT((flags & HAC_PAGELOCK) != 0);
3731 		sfmmu_mlist_exit(pml);
3732 		SFMMU_HASH_UNLOCK(hmebp);
3733 		page_unlock(pp);
3734 		locked = 0;
3735 		goto rehash;
3736 	}
3737 
3738 	ASSERT(osfhmep->hme_page == pp);
3739 
3740 	for (sfhmep = pp->p_mapping; sfhmep != NULL;
3741 	    sfhmep = sfhmep->hme_next) {
3742 
3743 		/*
3744 		 * skip va<->pa mappings
3745 		 */
3746 		if (!IS_PAHME(sfhmep))
3747 			continue;
3748 
3749 		pahmep = sfhmep->hme_data;
3750 		ASSERT(pahmep != NULL);
3751 
3752 		/*
3753 		 * if pa_hment matches, remove it
3754 		 */
3755 		if ((pahmep->pvt == pvt) &&
3756 		    (pahmep->addr == vaddr) &&
3757 		    (pahmep->len == len)) {
3758 			break;
3759 		}
3760 	}
3761 
3762 	if (sfhmep == NULL) {
3763 		if (!panicstr) {
3764 			panic("hat_delete_callback: pa_hment not found, pp %p",
3765 			    (void *)pp);
3766 		}
3767 		return;
3768 	}
3769 
3770 	/*
3771 	 * Note: at this point a valid kernel mapping must still be
3772 	 * present on this page.
3773 	 */
3774 	pp->p_share--;
3775 	if (pp->p_share <= 0)
3776 		panic("hat_delete_callback: zero p_share");
3777 
3778 	if (--pahmep->refcnt == 0) {
3779 		if (pahmep->flags != 0)
3780 			panic("hat_delete_callback: pa_hment is busy");
3781 
3782 		/*
3783 		 * Remove sfhmep from the mapping list for the page.
3784 		 */
3785 		if (sfhmep->hme_prev) {
3786 			sfhmep->hme_prev->hme_next = sfhmep->hme_next;
3787 		} else {
3788 			pp->p_mapping = sfhmep->hme_next;
3789 		}
3790 
3791 		if (sfhmep->hme_next)
3792 			sfhmep->hme_next->hme_prev = sfhmep->hme_prev;
3793 
3794 		sfmmu_mlist_exit(pml);
3795 		SFMMU_HASH_UNLOCK(hmebp);
3796 
3797 		if (locked)
3798 			page_unlock(pp);
3799 
3800 		kmem_cache_free(pa_hment_cache, pahmep);
3801 		return;
3802 	}
3803 
3804 	sfmmu_mlist_exit(pml);
3805 	SFMMU_HASH_UNLOCK(hmebp);
3806 	if (locked)
3807 		page_unlock(pp);
3808 }
3809 
3810 /*
3811  * hat_probe returns 1 if the translation for the address 'addr' is
3812  * loaded, zero otherwise.
3813  *
3814  * hat_probe should be used only for advisorary purposes because it may
3815  * occasionally return the wrong value. The implementation must guarantee that
3816  * returning the wrong value is a very rare event. hat_probe is used
3817  * to implement optimizations in the segment drivers.
3818  *
3819  */
3820 int
3821 hat_probe(struct hat *sfmmup, caddr_t addr)
3822 {
3823 	pfn_t pfn;
3824 	tte_t tte;
3825 
3826 	ASSERT(sfmmup != NULL);
3827 	ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
3828 
3829 	ASSERT((sfmmup == ksfmmup) ||
3830 		AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock));
3831 
3832 	if (sfmmup == ksfmmup) {
3833 		while ((pfn = sfmmu_vatopfn(addr, sfmmup, &tte))
3834 		    == PFN_SUSPENDED) {
3835 			sfmmu_vatopfn_suspended(addr, sfmmup, &tte);
3836 		}
3837 	} else {
3838 		pfn = sfmmu_uvatopfn(addr, sfmmup);
3839 	}
3840 
3841 	if (pfn != PFN_INVALID)
3842 		return (1);
3843 	else
3844 		return (0);
3845 }
3846 
3847 ssize_t
3848 hat_getpagesize(struct hat *sfmmup, caddr_t addr)
3849 {
3850 	tte_t tte;
3851 
3852 	ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
3853 
3854 	sfmmu_gettte(sfmmup, addr, &tte);
3855 	if (TTE_IS_VALID(&tte)) {
3856 		return (TTEBYTES(TTE_CSZ(&tte)));
3857 	}
3858 	return (-1);
3859 }
3860 
3861 static void
3862 sfmmu_gettte(struct hat *sfmmup, caddr_t addr, tte_t *ttep)
3863 {
3864 	struct hmehash_bucket *hmebp;
3865 	hmeblk_tag hblktag;
3866 	int hmeshift, hashno = 1;
3867 	struct hme_blk *hmeblkp, *list = NULL;
3868 	struct sf_hment *sfhmep;
3869 
3870 	/* support for ISM */
3871 	ism_map_t	*ism_map;
3872 	ism_blk_t	*ism_blkp;
3873 	int		i;
3874 	sfmmu_t		*ism_hatid = NULL;
3875 	sfmmu_t		*locked_hatid = NULL;
3876 
3877 	ASSERT(!((uintptr_t)addr & MMU_PAGEOFFSET));
3878 
3879 	ism_blkp = sfmmup->sfmmu_iblk;
3880 	if (ism_blkp) {
3881 		sfmmu_ismhat_enter(sfmmup, 0);
3882 		locked_hatid = sfmmup;
3883 	}
3884 	while (ism_blkp && ism_hatid == NULL) {
3885 		ism_map = ism_blkp->iblk_maps;
3886 		for (i = 0; ism_map[i].imap_ismhat && i < ISM_MAP_SLOTS; i++) {
3887 			if (addr >= ism_start(ism_map[i]) &&
3888 			    addr < ism_end(ism_map[i])) {
3889 				sfmmup = ism_hatid = ism_map[i].imap_ismhat;
3890 				addr = (caddr_t)(addr -
3891 					ism_start(ism_map[i]));
3892 				break;
3893 			}
3894 		}
3895 		ism_blkp = ism_blkp->iblk_next;
3896 	}
3897 	if (locked_hatid) {
3898 		sfmmu_ismhat_exit(locked_hatid, 0);
3899 	}
3900 
3901 	hblktag.htag_id = sfmmup;
3902 	ttep->ll = 0;
3903 
3904 	do {
3905 		hmeshift = HME_HASH_SHIFT(hashno);
3906 		hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift);
3907 		hblktag.htag_rehash = hashno;
3908 		hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift);
3909 
3910 		SFMMU_HASH_LOCK(hmebp);
3911 
3912 		HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list);
3913 		if (hmeblkp != NULL) {
3914 			HBLKTOHME(sfhmep, hmeblkp, addr);
3915 			sfmmu_copytte(&sfhmep->hme_tte, ttep);
3916 			SFMMU_HASH_UNLOCK(hmebp);
3917 			break;
3918 		}
3919 		SFMMU_HASH_UNLOCK(hmebp);
3920 		hashno++;
3921 	} while (HME_REHASH(sfmmup) && (hashno <= mmu_hashcnt));
3922 
3923 	sfmmu_hblks_list_purge(&list);
3924 }
3925 
3926 uint_t
3927 hat_getattr(struct hat *sfmmup, caddr_t addr, uint_t *attr)
3928 {
3929 	tte_t tte;
3930 
3931 	ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
3932 
3933 	sfmmu_gettte(sfmmup, addr, &tte);
3934 	if (TTE_IS_VALID(&tte)) {
3935 		*attr = sfmmu_ptov_attr(&tte);
3936 		return (0);
3937 	}
3938 	*attr = 0;
3939 	return ((uint_t)0xffffffff);
3940 }
3941 
3942 /*
3943  * Enables more attributes on specified address range (ie. logical OR)
3944  */
3945 void
3946 hat_setattr(struct hat *hat, caddr_t addr, size_t len, uint_t attr)
3947 {
3948 	if (hat->sfmmu_xhat_provider) {
3949 		XHAT_SETATTR(hat, addr, len, attr);
3950 		return;
3951 	} else {
3952 		/*
3953 		 * This must be a CPU HAT. If the address space has
3954 		 * XHATs attached, change attributes for all of them,
3955 		 * just in case
3956 		 */
3957 		ASSERT(hat->sfmmu_as != NULL);
3958 		if (hat->sfmmu_as->a_xhat != NULL)
3959 			xhat_setattr_all(hat->sfmmu_as, addr, len, attr);
3960 	}
3961 
3962 	sfmmu_chgattr(hat, addr, len, attr, SFMMU_SETATTR);
3963 }
3964 
3965 /*
3966  * Assigns attributes to the specified address range.  All the attributes
3967  * are specified.
3968  */
3969 void
3970 hat_chgattr(struct hat *hat, caddr_t addr, size_t len, uint_t attr)
3971 {
3972 	if (hat->sfmmu_xhat_provider) {
3973 		XHAT_CHGATTR(hat, addr, len, attr);
3974 		return;
3975 	} else {
3976 		/*
3977 		 * This must be a CPU HAT. If the address space has
3978 		 * XHATs attached, change attributes for all of them,
3979 		 * just in case
3980 		 */
3981 		ASSERT(hat->sfmmu_as != NULL);
3982 		if (hat->sfmmu_as->a_xhat != NULL)
3983 			xhat_chgattr_all(hat->sfmmu_as, addr, len, attr);
3984 	}
3985 
3986 	sfmmu_chgattr(hat, addr, len, attr, SFMMU_CHGATTR);
3987 }
3988 
3989 /*
3990  * Remove attributes on the specified address range (ie. loginal NAND)
3991  */
3992 void
3993 hat_clrattr(struct hat *hat, caddr_t addr, size_t len, uint_t attr)
3994 {
3995 	if (hat->sfmmu_xhat_provider) {
3996 		XHAT_CLRATTR(hat, addr, len, attr);
3997 		return;
3998 	} else {
3999 		/*
4000 		 * This must be a CPU HAT. If the address space has
4001 		 * XHATs attached, change attributes for all of them,
4002 		 * just in case
4003 		 */
4004 		ASSERT(hat->sfmmu_as != NULL);
4005 		if (hat->sfmmu_as->a_xhat != NULL)
4006 			xhat_clrattr_all(hat->sfmmu_as, addr, len, attr);
4007 	}
4008 
4009 	sfmmu_chgattr(hat, addr, len, attr, SFMMU_CLRATTR);
4010 }
4011 
4012 /*
4013  * Change attributes on an address range to that specified by attr and mode.
4014  */
4015 static void
4016 sfmmu_chgattr(struct hat *sfmmup, caddr_t addr, size_t len, uint_t attr,
4017 	int mode)
4018 {
4019 	struct hmehash_bucket *hmebp;
4020 	hmeblk_tag hblktag;
4021 	int hmeshift, hashno = 1;
4022 	struct hme_blk *hmeblkp, *list = NULL;
4023 	caddr_t endaddr;
4024 	cpuset_t cpuset;
4025 	demap_range_t dmr;
4026 
4027 	CPUSET_ZERO(cpuset);
4028 
4029 	ASSERT((sfmmup == ksfmmup) ||
4030 		AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock));
4031 	ASSERT((len & MMU_PAGEOFFSET) == 0);
4032 	ASSERT(((uintptr_t)addr & MMU_PAGEOFFSET) == 0);
4033 
4034 	if ((attr & PROT_USER) && (mode != SFMMU_CLRATTR) &&
4035 	    ((addr + len) > (caddr_t)USERLIMIT)) {
4036 		panic("user addr %p in kernel space",
4037 		    (void *)addr);
4038 	}
4039 
4040 	endaddr = addr + len;
4041 	hblktag.htag_id = sfmmup;
4042 	DEMAP_RANGE_INIT(sfmmup, &dmr);
4043 
4044 	while (addr < endaddr) {
4045 		hmeshift = HME_HASH_SHIFT(hashno);
4046 		hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift);
4047 		hblktag.htag_rehash = hashno;
4048 		hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift);
4049 
4050 		SFMMU_HASH_LOCK(hmebp);
4051 
4052 		HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list);
4053 		if (hmeblkp != NULL) {
4054 			/*
4055 			 * We've encountered a shadow hmeblk so skip the range
4056 			 * of the next smaller mapping size.
4057 			 */
4058 			if (hmeblkp->hblk_shw_bit) {
4059 				ASSERT(sfmmup != ksfmmup);
4060 				ASSERT(hashno > 1);
4061 				addr = (caddr_t)P2END((uintptr_t)addr,
4062 					    TTEBYTES(hashno - 1));
4063 			} else {
4064 				addr = sfmmu_hblk_chgattr(sfmmup,
4065 				    hmeblkp, addr, endaddr, &dmr, attr, mode);
4066 			}
4067 			SFMMU_HASH_UNLOCK(hmebp);
4068 			hashno = 1;
4069 			continue;
4070 		}
4071 		SFMMU_HASH_UNLOCK(hmebp);
4072 
4073 		if (!HME_REHASH(sfmmup) || (hashno >= mmu_hashcnt)) {
4074 			/*
4075 			 * We have traversed the whole list and rehashed
4076 			 * if necessary without finding the address to chgattr.
4077 			 * This is ok, so we increment the address by the
4078 			 * smallest hmeblk range for kernel mappings or for
4079 			 * user mappings with no large pages, and the largest
4080 			 * hmeblk range, to account for shadow hmeblks, for
4081 			 * user mappings with large pages and continue.
4082 			 */
4083 			if (sfmmup == ksfmmup)
4084 				addr = (caddr_t)P2END((uintptr_t)addr,
4085 					    TTEBYTES(1));
4086 			else
4087 				addr = (caddr_t)P2END((uintptr_t)addr,
4088 					    TTEBYTES(hashno));
4089 			hashno = 1;
4090 		} else {
4091 			hashno++;
4092 		}
4093 	}
4094 
4095 	sfmmu_hblks_list_purge(&list);
4096 	DEMAP_RANGE_FLUSH(&dmr);
4097 	cpuset = sfmmup->sfmmu_cpusran;
4098 	xt_sync(cpuset);
4099 }
4100 
4101 /*
4102  * This function chgattr on a range of addresses in an hmeblk.  It returns the
4103  * next addres that needs to be chgattr.
4104  * It should be called with the hash lock held.
4105  * XXX It should be possible to optimize chgattr by not flushing every time but
4106  * on the other hand:
4107  * 1. do one flush crosscall.
4108  * 2. only flush if we are increasing permissions (make sure this will work)
4109  */
4110 static caddr_t
4111 sfmmu_hblk_chgattr(struct hat *sfmmup, struct hme_blk *hmeblkp, caddr_t addr,
4112 	caddr_t endaddr, demap_range_t *dmrp, uint_t attr, int mode)
4113 {
4114 	tte_t tte, tteattr, tteflags, ttemod;
4115 	struct sf_hment *sfhmep;
4116 	int ttesz;
4117 	struct page *pp = NULL;
4118 	kmutex_t *pml, *pmtx;
4119 	int ret;
4120 	int use_demap_range;
4121 #if defined(SF_ERRATA_57)
4122 	int check_exec;
4123 #endif
4124 
4125 	ASSERT(in_hblk_range(hmeblkp, addr));
4126 	ASSERT(hmeblkp->hblk_shw_bit == 0);
4127 
4128 	endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp));
4129 	ttesz = get_hblk_ttesz(hmeblkp);
4130 
4131 	/*
4132 	 * Flush the current demap region if addresses have been
4133 	 * skipped or the page size doesn't match.
4134 	 */
4135 	use_demap_range = (TTEBYTES(ttesz) == DEMAP_RANGE_PGSZ(dmrp));
4136 	if (use_demap_range) {
4137 		DEMAP_RANGE_CONTINUE(dmrp, addr, endaddr);
4138 	} else {
4139 		DEMAP_RANGE_FLUSH(dmrp);
4140 	}
4141 
4142 	tteattr.ll = sfmmu_vtop_attr(attr, mode, &tteflags);
4143 #if defined(SF_ERRATA_57)
4144 	check_exec = (sfmmup != ksfmmup) &&
4145 	    AS_TYPE_64BIT(sfmmup->sfmmu_as) &&
4146 	    TTE_IS_EXECUTABLE(&tteattr);
4147 #endif
4148 	HBLKTOHME(sfhmep, hmeblkp, addr);
4149 	while (addr < endaddr) {
4150 		sfmmu_copytte(&sfhmep->hme_tte, &tte);
4151 		if (TTE_IS_VALID(&tte)) {
4152 			if ((tte.ll & tteflags.ll) == tteattr.ll) {
4153 				/*
4154 				 * if the new attr is the same as old
4155 				 * continue
4156 				 */
4157 				goto next_addr;
4158 			}
4159 			if (!TTE_IS_WRITABLE(&tteattr)) {
4160 				/*
4161 				 * make sure we clear hw modify bit if we
4162 				 * removing write protections
4163 				 */
4164 				tteflags.tte_intlo |= TTE_HWWR_INT;
4165 			}
4166 
4167 			pml = NULL;
4168 			pp = sfhmep->hme_page;
4169 			if (pp) {
4170 				pml = sfmmu_mlist_enter(pp);
4171 			}
4172 
4173 			if (pp != sfhmep->hme_page) {
4174 				/*
4175 				 * tte must have been unloaded.
4176 				 */
4177 				ASSERT(pml);
4178 				sfmmu_mlist_exit(pml);
4179 				continue;
4180 			}
4181 
4182 			ASSERT(pp == NULL || sfmmu_mlist_held(pp));
4183 
4184 			ttemod = tte;
4185 			ttemod.ll = (ttemod.ll & ~tteflags.ll) | tteattr.ll;
4186 			ASSERT(TTE_TO_TTEPFN(&ttemod) == TTE_TO_TTEPFN(&tte));
4187 
4188 #if defined(SF_ERRATA_57)
4189 			if (check_exec && addr < errata57_limit)
4190 				ttemod.tte_exec_perm = 0;
4191 #endif
4192 			ret = sfmmu_modifytte_try(&tte, &ttemod,
4193 			    &sfhmep->hme_tte);
4194 
4195 			if (ret < 0) {
4196 				/* tte changed underneath us */
4197 				if (pml) {
4198 					sfmmu_mlist_exit(pml);
4199 				}
4200 				continue;
4201 			}
4202 
4203 			if (tteflags.tte_intlo & TTE_HWWR_INT) {
4204 				/*
4205 				 * need to sync if we are clearing modify bit.
4206 				 */
4207 				sfmmu_ttesync(sfmmup, addr, &tte, pp);
4208 			}
4209 
4210 			if (pp && PP_ISRO(pp)) {
4211 				if (tteattr.tte_intlo & TTE_WRPRM_INT) {
4212 					pmtx = sfmmu_page_enter(pp);
4213 					PP_CLRRO(pp);
4214 					sfmmu_page_exit(pmtx);
4215 				}
4216 			}
4217 
4218 			if (ret > 0 && use_demap_range) {
4219 				DEMAP_RANGE_MARKPG(dmrp, addr);
4220 			} else if (ret > 0) {
4221 				sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0);
4222 			}
4223 
4224 			if (pml) {
4225 				sfmmu_mlist_exit(pml);
4226 			}
4227 		}
4228 next_addr:
4229 		addr += TTEBYTES(ttesz);
4230 		sfhmep++;
4231 		DEMAP_RANGE_NEXTPG(dmrp);
4232 	}
4233 	return (addr);
4234 }
4235 
4236 /*
4237  * This routine converts virtual attributes to physical ones.  It will
4238  * update the tteflags field with the tte mask corresponding to the attributes
4239  * affected and it returns the new attributes.  It will also clear the modify
4240  * bit if we are taking away write permission.  This is necessary since the
4241  * modify bit is the hardware permission bit and we need to clear it in order
4242  * to detect write faults.
4243  */
4244 static uint64_t
4245 sfmmu_vtop_attr(uint_t attr, int mode, tte_t *ttemaskp)
4246 {
4247 	tte_t ttevalue;
4248 
4249 	ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR));
4250 
4251 	switch (mode) {
4252 	case SFMMU_CHGATTR:
4253 		/* all attributes specified */
4254 		ttevalue.tte_inthi = MAKE_TTEATTR_INTHI(attr);
4255 		ttevalue.tte_intlo = MAKE_TTEATTR_INTLO(attr);
4256 		ttemaskp->tte_inthi = TTEINTHI_ATTR;
4257 		ttemaskp->tte_intlo = TTEINTLO_ATTR;
4258 		break;
4259 	case SFMMU_SETATTR:
4260 		ASSERT(!(attr & ~HAT_PROT_MASK));
4261 		ttemaskp->ll = 0;
4262 		ttevalue.ll = 0;
4263 		/*
4264 		 * a valid tte implies exec and read for sfmmu
4265 		 * so no need to do anything about them.
4266 		 * since priviledged access implies user access
4267 		 * PROT_USER doesn't make sense either.
4268 		 */
4269 		if (attr & PROT_WRITE) {
4270 			ttemaskp->tte_intlo |= TTE_WRPRM_INT;
4271 			ttevalue.tte_intlo |= TTE_WRPRM_INT;
4272 		}
4273 		break;
4274 	case SFMMU_CLRATTR:
4275 		/* attributes will be nand with current ones */
4276 		if (attr & ~(PROT_WRITE | PROT_USER)) {
4277 			panic("sfmmu: attr %x not supported", attr);
4278 		}
4279 		ttemaskp->ll = 0;
4280 		ttevalue.ll = 0;
4281 		if (attr & PROT_WRITE) {
4282 			/* clear both writable and modify bit */
4283 			ttemaskp->tte_intlo |= TTE_WRPRM_INT | TTE_HWWR_INT;
4284 		}
4285 		if (attr & PROT_USER) {
4286 			ttemaskp->tte_intlo |= TTE_PRIV_INT;
4287 			ttevalue.tte_intlo |= TTE_PRIV_INT;
4288 		}
4289 		break;
4290 	default:
4291 		panic("sfmmu_vtop_attr: bad mode %x", mode);
4292 	}
4293 	ASSERT(TTE_TO_TTEPFN(&ttevalue) == 0);
4294 	return (ttevalue.ll);
4295 }
4296 
4297 static uint_t
4298 sfmmu_ptov_attr(tte_t *ttep)
4299 {
4300 	uint_t attr;
4301 
4302 	ASSERT(TTE_IS_VALID(ttep));
4303 
4304 	attr = PROT_READ;
4305 
4306 	if (TTE_IS_WRITABLE(ttep)) {
4307 		attr |= PROT_WRITE;
4308 	}
4309 	if (TTE_IS_EXECUTABLE(ttep)) {
4310 		attr |= PROT_EXEC;
4311 	}
4312 	if (!TTE_IS_PRIVILEGED(ttep)) {
4313 		attr |= PROT_USER;
4314 	}
4315 	if (TTE_IS_NFO(ttep)) {
4316 		attr |= HAT_NOFAULT;
4317 	}
4318 	if (TTE_IS_NOSYNC(ttep)) {
4319 		attr |= HAT_NOSYNC;
4320 	}
4321 	if (TTE_IS_SIDEFFECT(ttep)) {
4322 		attr |= SFMMU_SIDEFFECT;
4323 	}
4324 	if (!TTE_IS_VCACHEABLE(ttep)) {
4325 		attr |= SFMMU_UNCACHEVTTE;
4326 	}
4327 	if (!TTE_IS_PCACHEABLE(ttep)) {
4328 		attr |= SFMMU_UNCACHEPTTE;
4329 	}
4330 	return (attr);
4331 }
4332 
4333 /*
4334  * hat_chgprot is a deprecated hat call.  New segment drivers
4335  * should store all attributes and use hat_*attr calls.
4336  *
4337  * Change the protections in the virtual address range
4338  * given to the specified virtual protection.  If vprot is ~PROT_WRITE,
4339  * then remove write permission, leaving the other
4340  * permissions unchanged.  If vprot is ~PROT_USER, remove user permissions.
4341  *
4342  */
4343 void
4344 hat_chgprot(struct hat *sfmmup, caddr_t addr, size_t len, uint_t vprot)
4345 {
4346 	struct hmehash_bucket *hmebp;
4347 	hmeblk_tag hblktag;
4348 	int hmeshift, hashno = 1;
4349 	struct hme_blk *hmeblkp, *list = NULL;
4350 	caddr_t endaddr;
4351 	cpuset_t cpuset;
4352 	demap_range_t dmr;
4353 
4354 	ASSERT((len & MMU_PAGEOFFSET) == 0);
4355 	ASSERT(((uintptr_t)addr & MMU_PAGEOFFSET) == 0);
4356 
4357 	if (sfmmup->sfmmu_xhat_provider) {
4358 		XHAT_CHGPROT(sfmmup, addr, len, vprot);
4359 		return;
4360 	} else {
4361 		/*
4362 		 * This must be a CPU HAT. If the address space has
4363 		 * XHATs attached, change attributes for all of them,
4364 		 * just in case
4365 		 */
4366 		ASSERT(sfmmup->sfmmu_as != NULL);
4367 		if (sfmmup->sfmmu_as->a_xhat != NULL)
4368 			xhat_chgprot_all(sfmmup->sfmmu_as, addr, len, vprot);
4369 	}
4370 
4371 	CPUSET_ZERO(cpuset);
4372 
4373 	if ((vprot != (uint_t)~PROT_WRITE) && (vprot & PROT_USER) &&
4374 	    ((addr + len) > (caddr_t)USERLIMIT)) {
4375 		panic("user addr %p vprot %x in kernel space",
4376 		    (void *)addr, vprot);
4377 	}
4378 	endaddr = addr + len;
4379 	hblktag.htag_id = sfmmup;
4380 	DEMAP_RANGE_INIT(sfmmup, &dmr);
4381 
4382 	while (addr < endaddr) {
4383 		hmeshift = HME_HASH_SHIFT(hashno);
4384 		hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift);
4385 		hblktag.htag_rehash = hashno;
4386 		hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift);
4387 
4388 		SFMMU_HASH_LOCK(hmebp);
4389 
4390 		HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list);
4391 		if (hmeblkp != NULL) {
4392 			/*
4393 			 * We've encountered a shadow hmeblk so skip the range
4394 			 * of the next smaller mapping size.
4395 			 */
4396 			if (hmeblkp->hblk_shw_bit) {
4397 				ASSERT(sfmmup != ksfmmup);
4398 				ASSERT(hashno > 1);
4399 				addr = (caddr_t)P2END((uintptr_t)addr,
4400 					    TTEBYTES(hashno - 1));
4401 			} else {
4402 				addr = sfmmu_hblk_chgprot(sfmmup, hmeblkp,
4403 					addr, endaddr, &dmr, vprot);
4404 			}
4405 			SFMMU_HASH_UNLOCK(hmebp);
4406 			hashno = 1;
4407 			continue;
4408 		}
4409 		SFMMU_HASH_UNLOCK(hmebp);
4410 
4411 		if (!HME_REHASH(sfmmup) || (hashno >= mmu_hashcnt)) {
4412 			/*
4413 			 * We have traversed the whole list and rehashed
4414 			 * if necessary without finding the address to chgprot.
4415 			 * This is ok so we increment the address by the
4416 			 * smallest hmeblk range for kernel mappings and the
4417 			 * largest hmeblk range, to account for shadow hmeblks,
4418 			 * for user mappings and continue.
4419 			 */
4420 			if (sfmmup == ksfmmup)
4421 				addr = (caddr_t)P2END((uintptr_t)addr,
4422 					    TTEBYTES(1));
4423 			else
4424 				addr = (caddr_t)P2END((uintptr_t)addr,
4425 					    TTEBYTES(hashno));
4426 			hashno = 1;
4427 		} else {
4428 			hashno++;
4429 		}
4430 	}
4431 
4432 	sfmmu_hblks_list_purge(&list);
4433 	DEMAP_RANGE_FLUSH(&dmr);
4434 	cpuset = sfmmup->sfmmu_cpusran;
4435 	xt_sync(cpuset);
4436 }
4437 
4438 /*
4439  * This function chgprots a range of addresses in an hmeblk.  It returns the
4440  * next addres that needs to be chgprot.
4441  * It should be called with the hash lock held.
4442  * XXX It shold be possible to optimize chgprot by not flushing every time but
4443  * on the other hand:
4444  * 1. do one flush crosscall.
4445  * 2. only flush if we are increasing permissions (make sure this will work)
4446  */
4447 static caddr_t
4448 sfmmu_hblk_chgprot(sfmmu_t *sfmmup, struct hme_blk *hmeblkp, caddr_t addr,
4449 	caddr_t endaddr, demap_range_t *dmrp, uint_t vprot)
4450 {
4451 	uint_t pprot;
4452 	tte_t tte, ttemod;
4453 	struct sf_hment *sfhmep;
4454 	uint_t tteflags;
4455 	int ttesz;
4456 	struct page *pp = NULL;
4457 	kmutex_t *pml, *pmtx;
4458 	int ret;
4459 	int use_demap_range;
4460 #if defined(SF_ERRATA_57)
4461 	int check_exec;
4462 #endif
4463 
4464 	ASSERT(in_hblk_range(hmeblkp, addr));
4465 	ASSERT(hmeblkp->hblk_shw_bit == 0);
4466 
4467 #ifdef DEBUG
4468 	if (get_hblk_ttesz(hmeblkp) != TTE8K &&
4469 	    (endaddr < get_hblk_endaddr(hmeblkp))) {
4470 		panic("sfmmu_hblk_chgprot: partial chgprot of large page");
4471 	}
4472 #endif /* DEBUG */
4473 
4474 	endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp));
4475 	ttesz = get_hblk_ttesz(hmeblkp);
4476 
4477 	pprot = sfmmu_vtop_prot(vprot, &tteflags);
4478 #if defined(SF_ERRATA_57)
4479 	check_exec = (sfmmup != ksfmmup) &&
4480 	    AS_TYPE_64BIT(sfmmup->sfmmu_as) &&
4481 	    ((vprot & PROT_EXEC) == PROT_EXEC);
4482 #endif
4483 	HBLKTOHME(sfhmep, hmeblkp, addr);
4484 
4485 	/*
4486 	 * Flush the current demap region if addresses have been
4487 	 * skipped or the page size doesn't match.
4488 	 */
4489 	use_demap_range = (TTEBYTES(ttesz) == MMU_PAGESIZE);
4490 	if (use_demap_range) {
4491 		DEMAP_RANGE_CONTINUE(dmrp, addr, endaddr);
4492 	} else {
4493 		DEMAP_RANGE_FLUSH(dmrp);
4494 	}
4495 
4496 	while (addr < endaddr) {
4497 		sfmmu_copytte(&sfhmep->hme_tte, &tte);
4498 		if (TTE_IS_VALID(&tte)) {
4499 			if (TTE_GET_LOFLAGS(&tte, tteflags) == pprot) {
4500 				/*
4501 				 * if the new protection is the same as old
4502 				 * continue
4503 				 */
4504 				goto next_addr;
4505 			}
4506 			pml = NULL;
4507 			pp = sfhmep->hme_page;
4508 			if (pp) {
4509 				pml = sfmmu_mlist_enter(pp);
4510 			}
4511 			if (pp != sfhmep->hme_page) {
4512 				/*
4513 				 * tte most have been unloaded
4514 				 * underneath us.  Recheck
4515 				 */
4516 				ASSERT(pml);
4517 				sfmmu_mlist_exit(pml);
4518 				continue;
4519 			}
4520 
4521 			ASSERT(pp == NULL || sfmmu_mlist_held(pp));
4522 
4523 			ttemod = tte;
4524 			TTE_SET_LOFLAGS(&ttemod, tteflags, pprot);
4525 #if defined(SF_ERRATA_57)
4526 			if (check_exec && addr < errata57_limit)
4527 				ttemod.tte_exec_perm = 0;
4528 #endif
4529 			ret = sfmmu_modifytte_try(&tte, &ttemod,
4530 			    &sfhmep->hme_tte);
4531 
4532 			if (ret < 0) {
4533 				/* tte changed underneath us */
4534 				if (pml) {
4535 					sfmmu_mlist_exit(pml);
4536 				}
4537 				continue;
4538 			}
4539 
4540 			if (tteflags & TTE_HWWR_INT) {
4541 				/*
4542 				 * need to sync if we are clearing modify bit.
4543 				 */
4544 				sfmmu_ttesync(sfmmup, addr, &tte, pp);
4545 			}
4546 
4547 			if (pp && PP_ISRO(pp)) {
4548 				if (pprot & TTE_WRPRM_INT) {
4549 					pmtx = sfmmu_page_enter(pp);
4550 					PP_CLRRO(pp);
4551 					sfmmu_page_exit(pmtx);
4552 				}
4553 			}
4554 
4555 			if (ret > 0 && use_demap_range) {
4556 				DEMAP_RANGE_MARKPG(dmrp, addr);
4557 			} else if (ret > 0) {
4558 				sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0);
4559 			}
4560 
4561 			if (pml) {
4562 				sfmmu_mlist_exit(pml);
4563 			}
4564 		}
4565 next_addr:
4566 		addr += TTEBYTES(ttesz);
4567 		sfhmep++;
4568 		DEMAP_RANGE_NEXTPG(dmrp);
4569 	}
4570 	return (addr);
4571 }
4572 
4573 /*
4574  * This routine is deprecated and should only be used by hat_chgprot.
4575  * The correct routine is sfmmu_vtop_attr.
4576  * This routine converts virtual page protections to physical ones.  It will
4577  * update the tteflags field with the tte mask corresponding to the protections
4578  * affected and it returns the new protections.  It will also clear the modify
4579  * bit if we are taking away write permission.  This is necessary since the
4580  * modify bit is the hardware permission bit and we need to clear it in order
4581  * to detect write faults.
4582  * It accepts the following special protections:
4583  * ~PROT_WRITE = remove write permissions.
4584  * ~PROT_USER = remove user permissions.
4585  */
4586 static uint_t
4587 sfmmu_vtop_prot(uint_t vprot, uint_t *tteflagsp)
4588 {
4589 	if (vprot == (uint_t)~PROT_WRITE) {
4590 		*tteflagsp = TTE_WRPRM_INT | TTE_HWWR_INT;
4591 		return (0);		/* will cause wrprm to be cleared */
4592 	}
4593 	if (vprot == (uint_t)~PROT_USER) {
4594 		*tteflagsp = TTE_PRIV_INT;
4595 		return (0);		/* will cause privprm to be cleared */
4596 	}
4597 	if ((vprot == 0) || (vprot == PROT_USER) ||
4598 		((vprot & PROT_ALL) != vprot)) {
4599 		panic("sfmmu_vtop_prot -- bad prot %x", vprot);
4600 	}
4601 
4602 	switch (vprot) {
4603 	case (PROT_READ):
4604 	case (PROT_EXEC):
4605 	case (PROT_EXEC | PROT_READ):
4606 		*tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT | TTE_HWWR_INT;
4607 		return (TTE_PRIV_INT); 		/* set prv and clr wrt */
4608 	case (PROT_WRITE):
4609 	case (PROT_WRITE | PROT_READ):
4610 	case (PROT_EXEC | PROT_WRITE):
4611 	case (PROT_EXEC | PROT_WRITE | PROT_READ):
4612 		*tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT;
4613 		return (TTE_PRIV_INT | TTE_WRPRM_INT); 	/* set prv and wrt */
4614 	case (PROT_USER | PROT_READ):
4615 	case (PROT_USER | PROT_EXEC):
4616 	case (PROT_USER | PROT_EXEC | PROT_READ):
4617 		*tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT | TTE_HWWR_INT;
4618 		return (0); 			/* clr prv and wrt */
4619 	case (PROT_USER | PROT_WRITE):
4620 	case (PROT_USER | PROT_WRITE | PROT_READ):
4621 	case (PROT_USER | PROT_EXEC | PROT_WRITE):
4622 	case (PROT_USER | PROT_EXEC | PROT_WRITE | PROT_READ):
4623 		*tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT;
4624 		return (TTE_WRPRM_INT); 	/* clr prv and set wrt */
4625 	default:
4626 		panic("sfmmu_vtop_prot -- bad prot %x", vprot);
4627 	}
4628 	return (0);
4629 }
4630 
4631 /*
4632  * Alternate unload for very large virtual ranges. With a true 64 bit VA,
4633  * the normal algorithm would take too long for a very large VA range with
4634  * few real mappings. This routine just walks thru all HMEs in the global
4635  * hash table to find and remove mappings.
4636  */
4637 static void
4638 hat_unload_large_virtual(
4639 	struct hat		*sfmmup,
4640 	caddr_t			startaddr,
4641 	size_t			len,
4642 	uint_t			flags,
4643 	hat_callback_t		*callback)
4644 {
4645 	struct hmehash_bucket *hmebp;
4646 	struct hme_blk *hmeblkp;
4647 	struct hme_blk *pr_hblk = NULL;
4648 	struct hme_blk *nx_hblk;
4649 	struct hme_blk *list = NULL;
4650 	int i;
4651 	uint64_t hblkpa, prevpa, nx_pa;
4652 	hatlock_t	*hatlockp;
4653 	struct tsb_info	*tsbinfop;
4654 	struct ctx	*ctx;
4655 	caddr_t	endaddr = startaddr + len;
4656 	caddr_t	sa;
4657 	caddr_t	ea;
4658 	caddr_t	cb_sa[MAX_CB_ADDR];
4659 	caddr_t	cb_ea[MAX_CB_ADDR];
4660 	int	addr_cnt = 0;
4661 	int	a = 0;
4662 	int	cnum;
4663 
4664 	hatlockp = sfmmu_hat_enter(sfmmup);
4665 
4666 	/*
4667 	 * Since we know we're unmapping a huge range of addresses,
4668 	 * just throw away the context and switch to another.  It's
4669 	 * cheaper than trying to unmap all of the TTEs we may find
4670 	 * from the TLB individually, which is too expensive in terms
4671 	 * of xcalls.  Better yet, if we're exiting, no need to flush
4672 	 * anything at all!
4673 	 */
4674 	if (!sfmmup->sfmmu_free) {
4675 		ctx = sfmmutoctx(sfmmup);
4676 		rw_enter(&ctx->ctx_rwlock, RW_WRITER);
4677 		cnum = sfmmutoctxnum(sfmmup);
4678 		if (cnum != INVALID_CONTEXT) {
4679 			sfmmu_tlb_swap_ctx(sfmmup, ctx);
4680 		}
4681 		rw_exit(&ctx->ctx_rwlock);
4682 
4683 		for (tsbinfop = sfmmup->sfmmu_tsb; tsbinfop != NULL;
4684 		    tsbinfop = tsbinfop->tsb_next) {
4685 			if (tsbinfop->tsb_flags & TSB_SWAPPED)
4686 				continue;
4687 			sfmmu_inv_tsb(tsbinfop->tsb_va,
4688 			    TSB_BYTES(tsbinfop->tsb_szc));
4689 		}
4690 	}
4691 
4692 	/*
4693 	 * Loop through all the hash buckets of HME blocks looking for matches.
4694 	 */
4695 	for (i = 0; i <= UHMEHASH_SZ; i++) {
4696 		hmebp = &uhme_hash[i];
4697 		SFMMU_HASH_LOCK(hmebp);
4698 		hmeblkp = hmebp->hmeblkp;
4699 		hblkpa = hmebp->hmeh_nextpa;
4700 		prevpa = 0;
4701 		pr_hblk = NULL;
4702 		while (hmeblkp) {
4703 			nx_hblk = hmeblkp->hblk_next;
4704 			nx_pa = hmeblkp->hblk_nextpa;
4705 
4706 			/*
4707 			 * skip if not this context, if a shadow block or
4708 			 * if the mapping is not in the requested range
4709 			 */
4710 			if (hmeblkp->hblk_tag.htag_id != sfmmup ||
4711 			    hmeblkp->hblk_shw_bit ||
4712 			    (sa = (caddr_t)get_hblk_base(hmeblkp)) >= endaddr ||
4713 			    (ea = get_hblk_endaddr(hmeblkp)) <= startaddr) {
4714 				pr_hblk = hmeblkp;
4715 				prevpa = hblkpa;
4716 				goto next_block;
4717 			}
4718 
4719 			/*
4720 			 * unload if there are any current valid mappings
4721 			 */
4722 			if (hmeblkp->hblk_vcnt != 0 ||
4723 			    hmeblkp->hblk_hmecnt != 0)
4724 				(void) sfmmu_hblk_unload(sfmmup, hmeblkp,
4725 				    sa, ea, NULL, flags);
4726 
4727 			/*
4728 			 * on unmap we also release the HME block itself, once
4729 			 * all mappings are gone.
4730 			 */
4731 			if ((flags & HAT_UNLOAD_UNMAP) != 0 &&
4732 			    !hmeblkp->hblk_vcnt &&
4733 			    !hmeblkp->hblk_hmecnt) {
4734 				ASSERT(!hmeblkp->hblk_lckcnt);
4735 				sfmmu_hblk_hash_rm(hmebp, hmeblkp,
4736 					prevpa, pr_hblk);
4737 				sfmmu_hblk_free(hmebp, hmeblkp, hblkpa, &list);
4738 			} else {
4739 				pr_hblk = hmeblkp;
4740 				prevpa = hblkpa;
4741 			}
4742 
4743 			if (callback == NULL)
4744 				goto next_block;
4745 
4746 			/*
4747 			 * HME blocks may span more than one page, but we may be
4748 			 * unmapping only one page, so check for a smaller range
4749 			 * for the callback
4750 			 */
4751 			if (sa < startaddr)
4752 				sa = startaddr;
4753 			if (--ea > endaddr)
4754 				ea = endaddr - 1;
4755 
4756 			cb_sa[addr_cnt] = sa;
4757 			cb_ea[addr_cnt] = ea;
4758 			if (++addr_cnt == MAX_CB_ADDR) {
4759 				for (a = 0; a < MAX_CB_ADDR; ++a) {
4760 					callback->hcb_start_addr = cb_sa[a];
4761 					callback->hcb_end_addr = cb_ea[a];
4762 					callback->hcb_function(callback);
4763 				}
4764 				addr_cnt = 0;
4765 			}
4766 
4767 next_block:
4768 			hmeblkp = nx_hblk;
4769 			hblkpa = nx_pa;
4770 		}
4771 		SFMMU_HASH_UNLOCK(hmebp);
4772 	}
4773 
4774 	sfmmu_hblks_list_purge(&list);
4775 
4776 	for (a = 0; a < addr_cnt; ++a) {
4777 		callback->hcb_start_addr = cb_sa[a];
4778 		callback->hcb_end_addr = cb_ea[a];
4779 		callback->hcb_function(callback);
4780 	}
4781 
4782 	sfmmu_hat_exit(hatlockp);
4783 
4784 	/*
4785 	 * Check TSB and TLB page sizes if the process isn't exiting.
4786 	 */
4787 	if (!sfmmup->sfmmu_free)
4788 		sfmmu_check_page_sizes(sfmmup, 0);
4789 }
4790 
4791 
4792 /*
4793  * Unload all the mappings in the range [addr..addr+len). addr and len must
4794  * be MMU_PAGESIZE aligned.
4795  */
4796 
4797 extern struct seg *segkmap;
4798 #define	ISSEGKMAP(sfmmup, addr) (sfmmup == ksfmmup && \
4799 segkmap->s_base <= (addr) && (addr) < (segkmap->s_base + segkmap->s_size))
4800 
4801 
4802 void
4803 hat_unload_callback(
4804 	struct hat *sfmmup,
4805 	caddr_t addr,
4806 	size_t len,
4807 	uint_t flags,
4808 	hat_callback_t *callback)
4809 {
4810 	struct hmehash_bucket *hmebp;
4811 	hmeblk_tag hblktag;
4812 	int hmeshift, hashno, iskernel;
4813 	struct hme_blk *hmeblkp, *pr_hblk, *list = NULL;
4814 	caddr_t endaddr;
4815 	cpuset_t cpuset;
4816 	uint64_t hblkpa, prevpa;
4817 	int addr_count = 0;
4818 	int a;
4819 	caddr_t cb_start_addr[MAX_CB_ADDR];
4820 	caddr_t cb_end_addr[MAX_CB_ADDR];
4821 	int issegkmap = ISSEGKMAP(sfmmup, addr);
4822 	demap_range_t dmr, *dmrp;
4823 
4824 	if (sfmmup->sfmmu_xhat_provider) {
4825 		XHAT_UNLOAD_CALLBACK(sfmmup, addr, len, flags, callback);
4826 		return;
4827 	} else {
4828 		/*
4829 		 * This must be a CPU HAT. If the address space has
4830 		 * XHATs attached, unload the mappings for all of them,
4831 		 * just in case
4832 		 */
4833 		ASSERT(sfmmup->sfmmu_as != NULL);
4834 		if (sfmmup->sfmmu_as->a_xhat != NULL)
4835 			xhat_unload_callback_all(sfmmup->sfmmu_as, addr,
4836 			    len, flags, callback);
4837 	}
4838 
4839 	ASSERT((sfmmup == ksfmmup) || (flags & HAT_UNLOAD_OTHER) || \
4840 	    AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock));
4841 
4842 	ASSERT(sfmmup != NULL);
4843 	ASSERT((len & MMU_PAGEOFFSET) == 0);
4844 	ASSERT(!((uintptr_t)addr & MMU_PAGEOFFSET));
4845 
4846 	/*
4847 	 * Probing through a large VA range (say 63 bits) will be slow, even
4848 	 * at 4 Meg steps between the probes. So, when the virtual address range
4849 	 * is very large, search the HME entries for what to unload.
4850 	 *
4851 	 *	len >> TTE_PAGE_SHIFT(TTE4M) is the # of 4Meg probes we'd need
4852 	 *
4853 	 *	UHMEHASH_SZ is number of hash buckets to examine
4854 	 *
4855 	 */
4856 	if (sfmmup != KHATID && (len >> TTE_PAGE_SHIFT(TTE4M)) > UHMEHASH_SZ) {
4857 		hat_unload_large_virtual(sfmmup, addr, len, flags, callback);
4858 		return;
4859 	}
4860 
4861 	CPUSET_ZERO(cpuset);
4862 
4863 	/*
4864 	 * If the process is exiting, we can save a lot of fuss since
4865 	 * we'll flush the TLB when we free the ctx anyway.
4866 	 */
4867 	if (sfmmup->sfmmu_free)
4868 		dmrp = NULL;
4869 	else
4870 		dmrp = &dmr;
4871 
4872 	DEMAP_RANGE_INIT(sfmmup, dmrp);
4873 	endaddr = addr + len;
4874 	hblktag.htag_id = sfmmup;
4875 
4876 	/*
4877 	 * It is likely for the vm to call unload over a wide range of
4878 	 * addresses that are actually very sparsely populated by
4879 	 * translations.  In order to speed this up the sfmmu hat supports
4880 	 * the concept of shadow hmeblks. Dummy large page hmeblks that
4881 	 * correspond to actual small translations are allocated at tteload
4882 	 * time and are referred to as shadow hmeblks.  Now, during unload
4883 	 * time, we first check if we have a shadow hmeblk for that
4884 	 * translation.  The absence of one means the corresponding address
4885 	 * range is empty and can be skipped.
4886 	 *
4887 	 * The kernel is an exception to above statement and that is why
4888 	 * we don't use shadow hmeblks and hash starting from the smallest
4889 	 * page size.
4890 	 */
4891 	if (sfmmup == KHATID) {
4892 		iskernel = 1;
4893 		hashno = TTE64K;
4894 	} else {
4895 		iskernel = 0;
4896 		if (mmu_page_sizes == max_mmu_page_sizes) {
4897 			hashno = TTE256M;
4898 		} else {
4899 			hashno = TTE4M;
4900 		}
4901 	}
4902 	while (addr < endaddr) {
4903 		hmeshift = HME_HASH_SHIFT(hashno);
4904 		hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift);
4905 		hblktag.htag_rehash = hashno;
4906 		hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift);
4907 
4908 		SFMMU_HASH_LOCK(hmebp);
4909 
4910 		HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, hblkpa, pr_hblk,
4911 			prevpa, &list);
4912 		if (hmeblkp == NULL) {
4913 			/*
4914 			 * didn't find an hmeblk. skip the appropiate
4915 			 * address range.
4916 			 */
4917 			SFMMU_HASH_UNLOCK(hmebp);
4918 			if (iskernel) {
4919 				if (hashno < mmu_hashcnt) {
4920 					hashno++;
4921 					continue;
4922 				} else {
4923 					hashno = TTE64K;
4924 					addr = (caddr_t)roundup((uintptr_t)addr
4925 						+ 1, MMU_PAGESIZE64K);
4926 					continue;
4927 				}
4928 			}
4929 			addr = (caddr_t)roundup((uintptr_t)addr + 1,
4930 				(1 << hmeshift));
4931 			if ((uintptr_t)addr & MMU_PAGEOFFSET512K) {
4932 				ASSERT(hashno == TTE64K);
4933 				continue;
4934 			}
4935 			if ((uintptr_t)addr & MMU_PAGEOFFSET4M) {
4936 				hashno = TTE512K;
4937 				continue;
4938 			}
4939 			if (mmu_page_sizes == max_mmu_page_sizes) {
4940 				if ((uintptr_t)addr & MMU_PAGEOFFSET32M) {
4941 					hashno = TTE4M;
4942 					continue;
4943 				}
4944 				if ((uintptr_t)addr & MMU_PAGEOFFSET256M) {
4945 					hashno = TTE32M;
4946 					continue;
4947 				}
4948 				hashno = TTE256M;
4949 				continue;
4950 			} else {
4951 				hashno = TTE4M;
4952 				continue;
4953 			}
4954 		}
4955 		ASSERT(hmeblkp);
4956 		if (!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) {
4957 			/*
4958 			 * If the valid count is zero we can skip the range
4959 			 * mapped by this hmeblk.
4960 			 * We free hblks in the case of HAT_UNMAP.  HAT_UNMAP
4961 			 * is used by segment drivers as a hint
4962 			 * that the mapping resource won't be used any longer.
4963 			 * The best example of this is during exit().
4964 			 */
4965 			addr = (caddr_t)roundup((uintptr_t)addr + 1,
4966 				get_hblk_span(hmeblkp));
4967 			if ((flags & HAT_UNLOAD_UNMAP) ||
4968 			    (iskernel && !issegkmap)) {
4969 				sfmmu_hblk_hash_rm(hmebp, hmeblkp, prevpa,
4970 				    pr_hblk);
4971 				sfmmu_hblk_free(hmebp, hmeblkp, hblkpa, &list);
4972 			}
4973 			SFMMU_HASH_UNLOCK(hmebp);
4974 
4975 			if (iskernel) {
4976 				hashno = TTE64K;
4977 				continue;
4978 			}
4979 			if ((uintptr_t)addr & MMU_PAGEOFFSET512K) {
4980 				ASSERT(hashno == TTE64K);
4981 				continue;
4982 			}
4983 			if ((uintptr_t)addr & MMU_PAGEOFFSET4M) {
4984 				hashno = TTE512K;
4985 				continue;
4986 			}
4987 			if (mmu_page_sizes == max_mmu_page_sizes) {
4988 				if ((uintptr_t)addr & MMU_PAGEOFFSET32M) {
4989 					hashno = TTE4M;
4990 					continue;
4991 				}
4992 				if ((uintptr_t)addr & MMU_PAGEOFFSET256M) {
4993 					hashno = TTE32M;
4994 					continue;
4995 				}
4996 				hashno = TTE256M;
4997 				continue;
4998 			} else {
4999 				hashno = TTE4M;
5000 				continue;
5001 			}
5002 		}
5003 		if (hmeblkp->hblk_shw_bit) {
5004 			/*
5005 			 * If we encounter a shadow hmeblk we know there is
5006 			 * smaller sized hmeblks mapping the same address space.
5007 			 * Decrement the hash size and rehash.
5008 			 */
5009 			ASSERT(sfmmup != KHATID);
5010 			hashno--;
5011 			SFMMU_HASH_UNLOCK(hmebp);
5012 			continue;
5013 		}
5014 
5015 		/*
5016 		 * track callback address ranges.
5017 		 * only start a new range when it's not contiguous
5018 		 */
5019 		if (callback != NULL) {
5020 			if (addr_count > 0 &&
5021 			    addr == cb_end_addr[addr_count - 1])
5022 				--addr_count;
5023 			else
5024 				cb_start_addr[addr_count] = addr;
5025 		}
5026 
5027 		addr = sfmmu_hblk_unload(sfmmup, hmeblkp, addr, endaddr,
5028 				dmrp, flags);
5029 
5030 		if (callback != NULL)
5031 			cb_end_addr[addr_count++] = addr;
5032 
5033 		if (((flags & HAT_UNLOAD_UNMAP) || (iskernel && !issegkmap)) &&
5034 		    !hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) {
5035 			sfmmu_hblk_hash_rm(hmebp, hmeblkp, prevpa,
5036 			    pr_hblk);
5037 			sfmmu_hblk_free(hmebp, hmeblkp, hblkpa, &list);
5038 		}
5039 		SFMMU_HASH_UNLOCK(hmebp);
5040 
5041 		/*
5042 		 * Notify our caller as to exactly which pages
5043 		 * have been unloaded. We do these in clumps,
5044 		 * to minimize the number of xt_sync()s that need to occur.
5045 		 */
5046 		if (callback != NULL && addr_count == MAX_CB_ADDR) {
5047 			DEMAP_RANGE_FLUSH(dmrp);
5048 			if (dmrp != NULL) {
5049 				cpuset = sfmmup->sfmmu_cpusran;
5050 				xt_sync(cpuset);
5051 			}
5052 
5053 			for (a = 0; a < MAX_CB_ADDR; ++a) {
5054 				callback->hcb_start_addr = cb_start_addr[a];
5055 				callback->hcb_end_addr = cb_end_addr[a];
5056 				callback->hcb_function(callback);
5057 			}
5058 			addr_count = 0;
5059 		}
5060 		if (iskernel) {
5061 			hashno = TTE64K;
5062 			continue;
5063 		}
5064 		if ((uintptr_t)addr & MMU_PAGEOFFSET512K) {
5065 			ASSERT(hashno == TTE64K);
5066 			continue;
5067 		}
5068 		if ((uintptr_t)addr & MMU_PAGEOFFSET4M) {
5069 			hashno = TTE512K;
5070 			continue;
5071 		}
5072 		if (mmu_page_sizes == max_mmu_page_sizes) {
5073 			if ((uintptr_t)addr & MMU_PAGEOFFSET32M) {
5074 				hashno = TTE4M;
5075 				continue;
5076 			}
5077 			if ((uintptr_t)addr & MMU_PAGEOFFSET256M) {
5078 				hashno = TTE32M;
5079 				continue;
5080 			}
5081 			hashno = TTE256M;
5082 		} else {
5083 			hashno = TTE4M;
5084 		}
5085 	}
5086 
5087 	sfmmu_hblks_list_purge(&list);
5088 	DEMAP_RANGE_FLUSH(dmrp);
5089 	if (dmrp != NULL) {
5090 		cpuset = sfmmup->sfmmu_cpusran;
5091 		xt_sync(cpuset);
5092 	}
5093 	if (callback && addr_count != 0) {
5094 		for (a = 0; a < addr_count; ++a) {
5095 			callback->hcb_start_addr = cb_start_addr[a];
5096 			callback->hcb_end_addr = cb_end_addr[a];
5097 			callback->hcb_function(callback);
5098 		}
5099 	}
5100 
5101 	/*
5102 	 * Check TSB and TLB page sizes if the process isn't exiting.
5103 	 */
5104 	if (!sfmmup->sfmmu_free)
5105 		sfmmu_check_page_sizes(sfmmup, 0);
5106 }
5107 
5108 /*
5109  * Unload all the mappings in the range [addr..addr+len). addr and len must
5110  * be MMU_PAGESIZE aligned.
5111  */
5112 void
5113 hat_unload(struct hat *sfmmup, caddr_t addr, size_t len, uint_t flags)
5114 {
5115 	if (sfmmup->sfmmu_xhat_provider) {
5116 		XHAT_UNLOAD(sfmmup, addr, len, flags);
5117 		return;
5118 	}
5119 	hat_unload_callback(sfmmup, addr, len, flags, NULL);
5120 }
5121 
5122 
5123 /*
5124  * Find the largest mapping size for this page.
5125  */
5126 static int
5127 fnd_mapping_sz(page_t *pp)
5128 {
5129 	int sz;
5130 	int p_index;
5131 
5132 	p_index = PP_MAPINDEX(pp);
5133 
5134 	sz = 0;
5135 	p_index >>= 1;	/* don't care about 8K bit */
5136 	for (; p_index; p_index >>= 1) {
5137 		sz++;
5138 	}
5139 
5140 	return (sz);
5141 }
5142 
5143 /*
5144  * This function unloads a range of addresses for an hmeblk.
5145  * It returns the next address to be unloaded.
5146  * It should be called with the hash lock held.
5147  */
5148 static caddr_t
5149 sfmmu_hblk_unload(struct hat *sfmmup, struct hme_blk *hmeblkp, caddr_t addr,
5150 	caddr_t endaddr, demap_range_t *dmrp, uint_t flags)
5151 {
5152 	tte_t	tte, ttemod;
5153 	struct	sf_hment *sfhmep;
5154 	int	ttesz;
5155 	long	ttecnt;
5156 	page_t *pp;
5157 	kmutex_t *pml;
5158 	int ret;
5159 	int use_demap_range;
5160 
5161 	ASSERT(in_hblk_range(hmeblkp, addr));
5162 	ASSERT(!hmeblkp->hblk_shw_bit);
5163 #ifdef DEBUG
5164 	if (get_hblk_ttesz(hmeblkp) != TTE8K &&
5165 	    (endaddr < get_hblk_endaddr(hmeblkp))) {
5166 		panic("sfmmu_hblk_unload: partial unload of large page");
5167 	}
5168 #endif /* DEBUG */
5169 
5170 	endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp));
5171 	ttesz = get_hblk_ttesz(hmeblkp);
5172 
5173 	use_demap_range = (do_virtual_coloring &&
5174 				TTEBYTES(ttesz) == DEMAP_RANGE_PGSZ(dmrp));
5175 	if (use_demap_range) {
5176 		DEMAP_RANGE_CONTINUE(dmrp, addr, endaddr);
5177 	} else {
5178 		DEMAP_RANGE_FLUSH(dmrp);
5179 	}
5180 	ttecnt = 0;
5181 	HBLKTOHME(sfhmep, hmeblkp, addr);
5182 
5183 	while (addr < endaddr) {
5184 		pml = NULL;
5185 again:
5186 		sfmmu_copytte(&sfhmep->hme_tte, &tte);
5187 		if (TTE_IS_VALID(&tte)) {
5188 			pp = sfhmep->hme_page;
5189 			if (pp && pml == NULL) {
5190 				pml = sfmmu_mlist_enter(pp);
5191 			}
5192 
5193 			/*
5194 			 * Verify if hme still points to 'pp' now that
5195 			 * we have p_mapping lock.
5196 			 */
5197 			if (sfhmep->hme_page != pp) {
5198 				if (pp != NULL && sfhmep->hme_page != NULL) {
5199 					if (pml) {
5200 						sfmmu_mlist_exit(pml);
5201 					}
5202 					/* Re-start this iteration. */
5203 					continue;
5204 				}
5205 				ASSERT((pp != NULL) &&
5206 				    (sfhmep->hme_page == NULL));
5207 				goto tte_unloaded;
5208 			}
5209 
5210 			/*
5211 			 * This point on we have both HASH and p_mapping
5212 			 * lock.
5213 			 */
5214 			ASSERT(pp == sfhmep->hme_page);
5215 			ASSERT(pp == NULL || sfmmu_mlist_held(pp));
5216 
5217 			/*
5218 			 * We need to loop on modify tte because it is
5219 			 * possible for pagesync to come along and
5220 			 * change the software bits beneath us.
5221 			 *
5222 			 * Page_unload can also invalidate the tte after
5223 			 * we read tte outside of p_mapping lock.
5224 			 */
5225 			ttemod = tte;
5226 
5227 			TTE_SET_INVALID(&ttemod);
5228 			ret = sfmmu_modifytte_try(&tte, &ttemod,
5229 			    &sfhmep->hme_tte);
5230 
5231 			if (ret <= 0) {
5232 				if (TTE_IS_VALID(&tte)) {
5233 					goto again;
5234 				} else {
5235 					/*
5236 					 * We read in a valid pte, but it
5237 					 * is unloaded by page_unload.
5238 					 * hme_page has become NULL and
5239 					 * we hold no p_mapping lock.
5240 					 */
5241 					ASSERT(pp == NULL && pml == NULL);
5242 					goto tte_unloaded;
5243 				}
5244 			}
5245 
5246 			if (!(flags & HAT_UNLOAD_NOSYNC)) {
5247 				sfmmu_ttesync(sfmmup, addr, &tte, pp);
5248 			}
5249 
5250 			/*
5251 			 * Ok- we invalidated the tte. Do the rest of the job.
5252 			 */
5253 			ttecnt++;
5254 
5255 			if (flags & HAT_UNLOAD_UNLOCK) {
5256 				ASSERT(hmeblkp->hblk_lckcnt > 0);
5257 				atomic_add_16(&hmeblkp->hblk_lckcnt, -1);
5258 				HBLK_STACK_TRACE(hmeblkp, HBLK_UNLOCK);
5259 			}
5260 
5261 			/*
5262 			 * Normally we would need to flush the page
5263 			 * from the virtual cache at this point in
5264 			 * order to prevent a potential cache alias
5265 			 * inconsistency.
5266 			 * The particular scenario we need to worry
5267 			 * about is:
5268 			 * Given:  va1 and va2 are two virtual address
5269 			 * that alias and map the same physical
5270 			 * address.
5271 			 * 1.	mapping exists from va1 to pa and data
5272 			 * has been read into the cache.
5273 			 * 2.	unload va1.
5274 			 * 3.	load va2 and modify data using va2.
5275 			 * 4	unload va2.
5276 			 * 5.	load va1 and reference data.  Unless we
5277 			 * flush the data cache when we unload we will
5278 			 * get stale data.
5279 			 * Fortunately, page coloring eliminates the
5280 			 * above scenario by remembering the color a
5281 			 * physical page was last or is currently
5282 			 * mapped to.  Now, we delay the flush until
5283 			 * the loading of translations.  Only when the
5284 			 * new translation is of a different color
5285 			 * are we forced to flush.
5286 			 */
5287 			if (use_demap_range) {
5288 				/*
5289 				 * Mark this page as needing a demap.
5290 				 */
5291 				DEMAP_RANGE_MARKPG(dmrp, addr);
5292 			} else {
5293 				if (do_virtual_coloring) {
5294 					sfmmu_tlb_demap(addr, sfmmup, hmeblkp,
5295 					    sfmmup->sfmmu_free, 0);
5296 				} else {
5297 					pfn_t pfnum;
5298 
5299 					pfnum = TTE_TO_PFN(addr, &tte);
5300 					sfmmu_tlbcache_demap(addr, sfmmup,
5301 					    hmeblkp, pfnum, sfmmup->sfmmu_free,
5302 					    FLUSH_NECESSARY_CPUS,
5303 					    CACHE_FLUSH, 0);
5304 				}
5305 			}
5306 
5307 			if (pp) {
5308 				/*
5309 				 * Remove the hment from the mapping list
5310 				 */
5311 				ASSERT(hmeblkp->hblk_hmecnt > 0);
5312 
5313 				/*
5314 				 * Again, we cannot
5315 				 * ASSERT(hmeblkp->hblk_hmecnt <= NHMENTS);
5316 				 */
5317 				HME_SUB(sfhmep, pp);
5318 				membar_stst();
5319 				atomic_add_16(&hmeblkp->hblk_hmecnt, -1);
5320 			}
5321 
5322 			ASSERT(hmeblkp->hblk_vcnt > 0);
5323 			atomic_add_16(&hmeblkp->hblk_vcnt, -1);
5324 
5325 			ASSERT(hmeblkp->hblk_hmecnt || hmeblkp->hblk_vcnt ||
5326 			    !hmeblkp->hblk_lckcnt);
5327 
5328 			if (pp && (pp->p_nrm & (P_KPMC | P_KPMS | P_TNC))) {
5329 				if (PP_ISTNC(pp)) {
5330 					/*
5331 					 * If page was temporary
5332 					 * uncached, try to recache
5333 					 * it. Note that HME_SUB() was
5334 					 * called above so p_index and
5335 					 * mlist had been updated.
5336 					 */
5337 					conv_tnc(pp, ttesz);
5338 				} else if (pp->p_mapping == NULL) {
5339 					ASSERT(kpm_enable);
5340 					/*
5341 					 * Page is marked to be in VAC conflict
5342 					 * to an existing kpm mapping and/or is
5343 					 * kpm mapped using only the regular
5344 					 * pagesize.
5345 					 */
5346 					sfmmu_kpm_hme_unload(pp);
5347 				}
5348 			}
5349 		} else if ((pp = sfhmep->hme_page) != NULL) {
5350 				/*
5351 				 * TTE is invalid but the hme
5352 				 * still exists. let pageunload
5353 				 * complete its job.
5354 				 */
5355 				ASSERT(pml == NULL);
5356 				pml = sfmmu_mlist_enter(pp);
5357 				if (sfhmep->hme_page != NULL) {
5358 					sfmmu_mlist_exit(pml);
5359 					pml = NULL;
5360 					goto again;
5361 				}
5362 				ASSERT(sfhmep->hme_page == NULL);
5363 		} else if (hmeblkp->hblk_hmecnt != 0) {
5364 			/*
5365 			 * pageunload may have not finished decrementing
5366 			 * hblk_vcnt and hblk_hmecnt. Find page_t if any and
5367 			 * wait for pageunload to finish. Rely on pageunload
5368 			 * to decrement hblk_hmecnt after hblk_vcnt.
5369 			 */
5370 			pfn_t pfn = TTE_TO_TTEPFN(&tte);
5371 			ASSERT(pml == NULL);
5372 			if (pf_is_memory(pfn)) {
5373 				pp = page_numtopp_nolock(pfn);
5374 				if (pp != NULL) {
5375 					pml = sfmmu_mlist_enter(pp);
5376 					sfmmu_mlist_exit(pml);
5377 					pml = NULL;
5378 				}
5379 			}
5380 		}
5381 
5382 tte_unloaded:
5383 		/*
5384 		 * At this point, the tte we are looking at
5385 		 * should be unloaded, and hme has been unlinked
5386 		 * from page too. This is important because in
5387 		 * pageunload, it does ttesync() then HME_SUB.
5388 		 * We need to make sure HME_SUB has been completed
5389 		 * so we know ttesync() has been completed. Otherwise,
5390 		 * at exit time, after return from hat layer, VM will
5391 		 * release as structure which hat_setstat() (called
5392 		 * by ttesync()) needs.
5393 		 */
5394 #ifdef DEBUG
5395 		{
5396 			tte_t	dtte;
5397 
5398 			ASSERT(sfhmep->hme_page == NULL);
5399 
5400 			sfmmu_copytte(&sfhmep->hme_tte, &dtte);
5401 			ASSERT(!TTE_IS_VALID(&dtte));
5402 		}
5403 #endif
5404 
5405 		if (pml) {
5406 			sfmmu_mlist_exit(pml);
5407 		}
5408 
5409 		addr += TTEBYTES(ttesz);
5410 		sfhmep++;
5411 		DEMAP_RANGE_NEXTPG(dmrp);
5412 	}
5413 	if (ttecnt > 0)
5414 		atomic_add_long(&sfmmup->sfmmu_ttecnt[ttesz], -ttecnt);
5415 	return (addr);
5416 }
5417 
5418 /*
5419  * Synchronize all the mappings in the range [addr..addr+len).
5420  * Can be called with clearflag having two states:
5421  * HAT_SYNC_DONTZERO means just return the rm stats
5422  * HAT_SYNC_ZERORM means zero rm bits in the tte and return the stats
5423  */
5424 void
5425 hat_sync(struct hat *sfmmup, caddr_t addr, size_t len, uint_t clearflag)
5426 {
5427 	struct hmehash_bucket *hmebp;
5428 	hmeblk_tag hblktag;
5429 	int hmeshift, hashno = 1;
5430 	struct hme_blk *hmeblkp, *list = NULL;
5431 	caddr_t endaddr;
5432 	cpuset_t cpuset;
5433 
5434 	ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
5435 	ASSERT((sfmmup == ksfmmup) ||
5436 		AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock));
5437 	ASSERT((len & MMU_PAGEOFFSET) == 0);
5438 	ASSERT((clearflag == HAT_SYNC_DONTZERO) ||
5439 		(clearflag == HAT_SYNC_ZERORM));
5440 
5441 	CPUSET_ZERO(cpuset);
5442 
5443 	endaddr = addr + len;
5444 	hblktag.htag_id = sfmmup;
5445 	/*
5446 	 * Spitfire supports 4 page sizes.
5447 	 * Most pages are expected to be of the smallest page
5448 	 * size (8K) and these will not need to be rehashed. 64K
5449 	 * pages also don't need to be rehashed because the an hmeblk
5450 	 * spans 64K of address space. 512K pages might need 1 rehash and
5451 	 * and 4M pages 2 rehashes.
5452 	 */
5453 	while (addr < endaddr) {
5454 		hmeshift = HME_HASH_SHIFT(hashno);
5455 		hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift);
5456 		hblktag.htag_rehash = hashno;
5457 		hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift);
5458 
5459 		SFMMU_HASH_LOCK(hmebp);
5460 
5461 		HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list);
5462 		if (hmeblkp != NULL) {
5463 			/*
5464 			 * We've encountered a shadow hmeblk so skip the range
5465 			 * of the next smaller mapping size.
5466 			 */
5467 			if (hmeblkp->hblk_shw_bit) {
5468 				ASSERT(sfmmup != ksfmmup);
5469 				ASSERT(hashno > 1);
5470 				addr = (caddr_t)P2END((uintptr_t)addr,
5471 					    TTEBYTES(hashno - 1));
5472 			} else {
5473 				addr = sfmmu_hblk_sync(sfmmup, hmeblkp,
5474 				    addr, endaddr, clearflag);
5475 			}
5476 			SFMMU_HASH_UNLOCK(hmebp);
5477 			hashno = 1;
5478 			continue;
5479 		}
5480 		SFMMU_HASH_UNLOCK(hmebp);
5481 
5482 		if (!HME_REHASH(sfmmup) || (hashno >= mmu_hashcnt)) {
5483 			/*
5484 			 * We have traversed the whole list and rehashed
5485 			 * if necessary without finding the address to sync.
5486 			 * This is ok so we increment the address by the
5487 			 * smallest hmeblk range for kernel mappings and the
5488 			 * largest hmeblk range, to account for shadow hmeblks,
5489 			 * for user mappings and continue.
5490 			 */
5491 			if (sfmmup == ksfmmup)
5492 				addr = (caddr_t)P2END((uintptr_t)addr,
5493 					    TTEBYTES(1));
5494 			else
5495 				addr = (caddr_t)P2END((uintptr_t)addr,
5496 					    TTEBYTES(hashno));
5497 			hashno = 1;
5498 		} else {
5499 			hashno++;
5500 		}
5501 	}
5502 	sfmmu_hblks_list_purge(&list);
5503 	cpuset = sfmmup->sfmmu_cpusran;
5504 	xt_sync(cpuset);
5505 }
5506 
5507 static caddr_t
5508 sfmmu_hblk_sync(struct hat *sfmmup, struct hme_blk *hmeblkp, caddr_t addr,
5509 	caddr_t endaddr, int clearflag)
5510 {
5511 	tte_t	tte, ttemod;
5512 	struct sf_hment *sfhmep;
5513 	int ttesz;
5514 	struct page *pp;
5515 	kmutex_t *pml;
5516 	int ret;
5517 
5518 	ASSERT(hmeblkp->hblk_shw_bit == 0);
5519 
5520 	endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp));
5521 
5522 	ttesz = get_hblk_ttesz(hmeblkp);
5523 	HBLKTOHME(sfhmep, hmeblkp, addr);
5524 
5525 	while (addr < endaddr) {
5526 		sfmmu_copytte(&sfhmep->hme_tte, &tte);
5527 		if (TTE_IS_VALID(&tte)) {
5528 			pml = NULL;
5529 			pp = sfhmep->hme_page;
5530 			if (pp) {
5531 				pml = sfmmu_mlist_enter(pp);
5532 			}
5533 			if (pp != sfhmep->hme_page) {
5534 				/*
5535 				 * tte most have been unloaded
5536 				 * underneath us.  Recheck
5537 				 */
5538 				ASSERT(pml);
5539 				sfmmu_mlist_exit(pml);
5540 				continue;
5541 			}
5542 
5543 			ASSERT(pp == NULL || sfmmu_mlist_held(pp));
5544 
5545 			if (clearflag == HAT_SYNC_ZERORM) {
5546 				ttemod = tte;
5547 				TTE_CLR_RM(&ttemod);
5548 				ret = sfmmu_modifytte_try(&tte, &ttemod,
5549 				    &sfhmep->hme_tte);
5550 				if (ret < 0) {
5551 					if (pml) {
5552 						sfmmu_mlist_exit(pml);
5553 					}
5554 					continue;
5555 				}
5556 
5557 				if (ret > 0) {
5558 					sfmmu_tlb_demap(addr, sfmmup,
5559 						hmeblkp, 0, 0);
5560 				}
5561 			}
5562 			sfmmu_ttesync(sfmmup, addr, &tte, pp);
5563 			if (pml) {
5564 				sfmmu_mlist_exit(pml);
5565 			}
5566 		}
5567 		addr += TTEBYTES(ttesz);
5568 		sfhmep++;
5569 	}
5570 	return (addr);
5571 }
5572 
5573 /*
5574  * This function will sync a tte to the page struct and it will
5575  * update the hat stats. Currently it allows us to pass a NULL pp
5576  * and we will simply update the stats.  We may want to change this
5577  * so we only keep stats for pages backed by pp's.
5578  */
5579 static void
5580 sfmmu_ttesync(struct hat *sfmmup, caddr_t addr, tte_t *ttep, page_t *pp)
5581 {
5582 	uint_t rm = 0;
5583 	int   	sz;
5584 	pgcnt_t	npgs;
5585 
5586 	ASSERT(TTE_IS_VALID(ttep));
5587 
5588 	if (TTE_IS_NOSYNC(ttep)) {
5589 		return;
5590 	}
5591 
5592 	if (TTE_IS_REF(ttep))  {
5593 		rm = P_REF;
5594 	}
5595 	if (TTE_IS_MOD(ttep))  {
5596 		rm |= P_MOD;
5597 	}
5598 
5599 	if (rm == 0) {
5600 		return;
5601 	}
5602 
5603 	sz = TTE_CSZ(ttep);
5604 	if (sfmmup->sfmmu_rmstat) {
5605 		int i;
5606 		caddr_t	vaddr = addr;
5607 
5608 		for (i = 0; i < TTEPAGES(sz); i++, vaddr += MMU_PAGESIZE) {
5609 			hat_setstat(sfmmup->sfmmu_as, vaddr, MMU_PAGESIZE, rm);
5610 		}
5611 
5612 	}
5613 
5614 	/*
5615 	 * XXX I want to use cas to update nrm bits but they
5616 	 * currently belong in common/vm and not in hat where
5617 	 * they should be.
5618 	 * The nrm bits are protected by the same mutex as
5619 	 * the one that protects the page's mapping list.
5620 	 */
5621 	if (!pp)
5622 		return;
5623 	ASSERT(sfmmu_mlist_held(pp));
5624 	/*
5625 	 * If the tte is for a large page, we need to sync all the
5626 	 * pages covered by the tte.
5627 	 */
5628 	if (sz != TTE8K) {
5629 		ASSERT(pp->p_szc != 0);
5630 		pp = PP_GROUPLEADER(pp, sz);
5631 		ASSERT(sfmmu_mlist_held(pp));
5632 	}
5633 
5634 	/* Get number of pages from tte size. */
5635 	npgs = TTEPAGES(sz);
5636 
5637 	do {
5638 		ASSERT(pp);
5639 		ASSERT(sfmmu_mlist_held(pp));
5640 		if (((rm & P_REF) != 0 && !PP_ISREF(pp)) ||
5641 		    ((rm & P_MOD) != 0 && !PP_ISMOD(pp)))
5642 			hat_page_setattr(pp, rm);
5643 
5644 		/*
5645 		 * Are we done? If not, we must have a large mapping.
5646 		 * For large mappings we need to sync the rest of the pages
5647 		 * covered by this tte; goto the next page.
5648 		 */
5649 	} while (--npgs > 0 && (pp = PP_PAGENEXT(pp)));
5650 }
5651 
5652 /*
5653  * Execute pre-callback handler of each pa_hment linked to pp
5654  *
5655  * Inputs:
5656  *   flag: either HAT_PRESUSPEND or HAT_SUSPEND.
5657  *   capture_cpus: pointer to return value (below)
5658  *
5659  * Returns:
5660  *   Propagates the subsystem callback return values back to the caller;
5661  *   returns 0 on success.  If capture_cpus is non-NULL, the value returned
5662  *   is zero if all of the pa_hments are of a type that do not require
5663  *   capturing CPUs prior to suspending the mapping, else it is 1.
5664  */
5665 static int
5666 hat_pageprocess_precallbacks(struct page *pp, uint_t flag, int *capture_cpus)
5667 {
5668 	struct sf_hment	*sfhmep;
5669 	struct pa_hment *pahmep;
5670 	int (*f)(caddr_t, uint_t, uint_t, void *);
5671 	int		ret;
5672 	id_t		id;
5673 	int		locked = 0;
5674 	kmutex_t	*pml;
5675 
5676 	ASSERT(PAGE_EXCL(pp));
5677 	if (!sfmmu_mlist_held(pp)) {
5678 		pml = sfmmu_mlist_enter(pp);
5679 		locked = 1;
5680 	}
5681 
5682 	if (capture_cpus)
5683 		*capture_cpus = 0;
5684 
5685 top:
5686 	for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) {
5687 		/*
5688 		 * skip sf_hments corresponding to VA<->PA mappings;
5689 		 * for pa_hment's, hme_tte.ll is zero
5690 		 */
5691 		if (!IS_PAHME(sfhmep))
5692 			continue;
5693 
5694 		pahmep = sfhmep->hme_data;
5695 		ASSERT(pahmep != NULL);
5696 
5697 		/*
5698 		 * skip if pre-handler has been called earlier in this loop
5699 		 */
5700 		if (pahmep->flags & flag)
5701 			continue;
5702 
5703 		id = pahmep->cb_id;
5704 		ASSERT(id >= (id_t)0 && id < sfmmu_cb_nextid);
5705 		if (capture_cpus && sfmmu_cb_table[id].capture_cpus != 0)
5706 			*capture_cpus = 1;
5707 		if ((f = sfmmu_cb_table[id].prehandler) == NULL) {
5708 			pahmep->flags |= flag;
5709 			continue;
5710 		}
5711 
5712 		/*
5713 		 * Drop the mapping list lock to avoid locking order issues.
5714 		 */
5715 		if (locked)
5716 			sfmmu_mlist_exit(pml);
5717 
5718 		ret = f(pahmep->addr, pahmep->len, flag, pahmep->pvt);
5719 		if (ret != 0)
5720 			return (ret);	/* caller must do the cleanup */
5721 
5722 		if (locked) {
5723 			pml = sfmmu_mlist_enter(pp);
5724 			pahmep->flags |= flag;
5725 			goto top;
5726 		}
5727 
5728 		pahmep->flags |= flag;
5729 	}
5730 
5731 	if (locked)
5732 		sfmmu_mlist_exit(pml);
5733 
5734 	return (0);
5735 }
5736 
5737 /*
5738  * Execute post-callback handler of each pa_hment linked to pp
5739  *
5740  * Same overall assumptions and restrictions apply as for
5741  * hat_pageprocess_precallbacks().
5742  */
5743 static void
5744 hat_pageprocess_postcallbacks(struct page *pp, uint_t flag)
5745 {
5746 	pfn_t pgpfn = pp->p_pagenum;
5747 	pfn_t pgmask = btop(page_get_pagesize(pp->p_szc)) - 1;
5748 	pfn_t newpfn;
5749 	struct sf_hment *sfhmep;
5750 	struct pa_hment *pahmep;
5751 	int (*f)(caddr_t, uint_t, uint_t, void *, pfn_t);
5752 	id_t	id;
5753 	int	locked = 0;
5754 	kmutex_t *pml;
5755 
5756 	ASSERT(PAGE_EXCL(pp));
5757 	if (!sfmmu_mlist_held(pp)) {
5758 		pml = sfmmu_mlist_enter(pp);
5759 		locked = 1;
5760 	}
5761 
5762 top:
5763 	for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) {
5764 		/*
5765 		 * skip sf_hments corresponding to VA<->PA mappings;
5766 		 * for pa_hment's, hme_tte.ll is zero
5767 		 */
5768 		if (!IS_PAHME(sfhmep))
5769 			continue;
5770 
5771 		pahmep = sfhmep->hme_data;
5772 		ASSERT(pahmep != NULL);
5773 
5774 		if ((pahmep->flags & flag) == 0)
5775 			continue;
5776 
5777 		pahmep->flags &= ~flag;
5778 
5779 		id = pahmep->cb_id;
5780 		ASSERT(id >= (id_t)0 && id < sfmmu_cb_nextid);
5781 		if ((f = sfmmu_cb_table[id].posthandler) == NULL)
5782 			continue;
5783 
5784 		/*
5785 		 * Convert the base page PFN into the constituent PFN
5786 		 * which is needed by the callback handler.
5787 		 */
5788 		newpfn = pgpfn | (btop((uintptr_t)pahmep->addr) & pgmask);
5789 
5790 		/*
5791 		 * Drop the mapping list lock to avoid locking order issues.
5792 		 */
5793 		if (locked)
5794 			sfmmu_mlist_exit(pml);
5795 
5796 		if (f(pahmep->addr, pahmep->len, flag, pahmep->pvt, newpfn)
5797 		    != 0)
5798 			panic("sfmmu: posthandler failed");
5799 
5800 		if (locked) {
5801 			pml = sfmmu_mlist_enter(pp);
5802 			goto top;
5803 		}
5804 	}
5805 
5806 	if (locked)
5807 		sfmmu_mlist_exit(pml);
5808 }
5809 
5810 /*
5811  * Suspend locked kernel mapping
5812  */
5813 void
5814 hat_pagesuspend(struct page *pp)
5815 {
5816 	struct sf_hment *sfhmep;
5817 	sfmmu_t *sfmmup;
5818 	tte_t tte, ttemod;
5819 	struct hme_blk *hmeblkp;
5820 	caddr_t addr;
5821 	int index, cons;
5822 	cpuset_t cpuset;
5823 
5824 	ASSERT(PAGE_EXCL(pp));
5825 	ASSERT(sfmmu_mlist_held(pp));
5826 
5827 	mutex_enter(&kpr_suspendlock);
5828 
5829 	/*
5830 	 * Call into dtrace to tell it we're about to suspend a
5831 	 * kernel mapping. This prevents us from running into issues
5832 	 * with probe context trying to touch a suspended page
5833 	 * in the relocation codepath itself.
5834 	 */
5835 	if (dtrace_kreloc_init)
5836 		(*dtrace_kreloc_init)();
5837 
5838 	index = PP_MAPINDEX(pp);
5839 	cons = TTE8K;
5840 
5841 retry:
5842 	for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) {
5843 
5844 		if (IS_PAHME(sfhmep))
5845 			continue;
5846 
5847 		if (get_hblk_ttesz(sfmmu_hmetohblk(sfhmep)) != cons)
5848 			continue;
5849 
5850 		/*
5851 		 * Loop until we successfully set the suspend bit in
5852 		 * the TTE.
5853 		 */
5854 again:
5855 		sfmmu_copytte(&sfhmep->hme_tte, &tte);
5856 		ASSERT(TTE_IS_VALID(&tte));
5857 
5858 		ttemod = tte;
5859 		TTE_SET_SUSPEND(&ttemod);
5860 		if (sfmmu_modifytte_try(&tte, &ttemod,
5861 		    &sfhmep->hme_tte) < 0)
5862 			goto again;
5863 
5864 		/*
5865 		 * Invalidate TSB entry
5866 		 */
5867 		hmeblkp = sfmmu_hmetohblk(sfhmep);
5868 
5869 		sfmmup = hblktosfmmu(hmeblkp);
5870 		ASSERT(sfmmup == ksfmmup);
5871 
5872 		addr = tte_to_vaddr(hmeblkp, tte);
5873 
5874 		/*
5875 		 * No need to make sure that the TSB for this sfmmu is
5876 		 * not being relocated since it is ksfmmup and thus it
5877 		 * will never be relocated.
5878 		 */
5879 		SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp);
5880 
5881 		/*
5882 		 * Update xcall stats
5883 		 */
5884 		cpuset = cpu_ready_set;
5885 		CPUSET_DEL(cpuset, CPU->cpu_id);
5886 
5887 		/* LINTED: constant in conditional context */
5888 		SFMMU_XCALL_STATS(KCONTEXT);
5889 
5890 		/*
5891 		 * Flush TLB entry on remote CPU's
5892 		 */
5893 		xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)addr, KCONTEXT);
5894 		xt_sync(cpuset);
5895 
5896 		/*
5897 		 * Flush TLB entry on local CPU
5898 		 */
5899 		vtag_flushpage(addr, KCONTEXT);
5900 	}
5901 
5902 	while (index != 0) {
5903 		index = index >> 1;
5904 		if (index != 0)
5905 			cons++;
5906 		if (index & 0x1) {
5907 			pp = PP_GROUPLEADER(pp, cons);
5908 			goto retry;
5909 		}
5910 	}
5911 }
5912 
5913 #ifdef	DEBUG
5914 
5915 #define	N_PRLE	1024
5916 struct prle {
5917 	page_t *targ;
5918 	page_t *repl;
5919 	int status;
5920 	int pausecpus;
5921 	hrtime_t whence;
5922 };
5923 
5924 static struct prle page_relocate_log[N_PRLE];
5925 static int prl_entry;
5926 static kmutex_t prl_mutex;
5927 
5928 #define	PAGE_RELOCATE_LOG(t, r, s, p)					\
5929 	mutex_enter(&prl_mutex);					\
5930 	page_relocate_log[prl_entry].targ = *(t);			\
5931 	page_relocate_log[prl_entry].repl = *(r);			\
5932 	page_relocate_log[prl_entry].status = (s);			\
5933 	page_relocate_log[prl_entry].pausecpus = (p);			\
5934 	page_relocate_log[prl_entry].whence = gethrtime();		\
5935 	prl_entry = (prl_entry == (N_PRLE - 1))? 0 : prl_entry + 1;	\
5936 	mutex_exit(&prl_mutex);
5937 
5938 #else	/* !DEBUG */
5939 #define	PAGE_RELOCATE_LOG(t, r, s, p)
5940 #endif
5941 
5942 /*
5943  * Core Kernel Page Relocation Algorithm
5944  *
5945  * Input:
5946  *
5947  * target : 	constituent pages are SE_EXCL locked.
5948  * replacement:	constituent pages are SE_EXCL locked.
5949  *
5950  * Output:
5951  *
5952  * nrelocp:	number of pages relocated
5953  */
5954 int
5955 hat_page_relocate(page_t **target, page_t **replacement, spgcnt_t *nrelocp)
5956 {
5957 	page_t		*targ, *repl;
5958 	page_t		*tpp, *rpp;
5959 	kmutex_t	*low, *high;
5960 	spgcnt_t	npages, i;
5961 	page_t		*pl = NULL;
5962 	int		old_pil;
5963 	cpuset_t	cpuset;
5964 	int		cap_cpus;
5965 	int		ret;
5966 
5967 	if (hat_kpr_enabled == 0 || !kcage_on || PP_ISNORELOC(*target)) {
5968 		PAGE_RELOCATE_LOG(target, replacement, EAGAIN, -1);
5969 		return (EAGAIN);
5970 	}
5971 
5972 	mutex_enter(&kpr_mutex);
5973 	kreloc_thread = curthread;
5974 
5975 	targ = *target;
5976 	repl = *replacement;
5977 	ASSERT(repl != NULL);
5978 	ASSERT(targ->p_szc == repl->p_szc);
5979 
5980 	npages = page_get_pagecnt(targ->p_szc);
5981 
5982 	/*
5983 	 * unload VA<->PA mappings that are not locked
5984 	 */
5985 	tpp = targ;
5986 	for (i = 0; i < npages; i++) {
5987 		(void) hat_pageunload(tpp, SFMMU_KERNEL_RELOC);
5988 		tpp++;
5989 	}
5990 
5991 	/*
5992 	 * Do "presuspend" callbacks, in a context from which we can still
5993 	 * block as needed. Note that we don't hold the mapping list lock
5994 	 * of "targ" at this point due to potential locking order issues;
5995 	 * we assume that between the hat_pageunload() above and holding
5996 	 * the SE_EXCL lock that the mapping list *cannot* change at this
5997 	 * point.
5998 	 */
5999 	ret = hat_pageprocess_precallbacks(targ, HAT_PRESUSPEND, &cap_cpus);
6000 	if (ret != 0) {
6001 		/*
6002 		 * EIO translates to fatal error, for all others cleanup
6003 		 * and return EAGAIN.
6004 		 */
6005 		ASSERT(ret != EIO);
6006 		hat_pageprocess_postcallbacks(targ, HAT_POSTUNSUSPEND);
6007 		PAGE_RELOCATE_LOG(target, replacement, ret, -1);
6008 		kreloc_thread = NULL;
6009 		mutex_exit(&kpr_mutex);
6010 		return (EAGAIN);
6011 	}
6012 
6013 	/*
6014 	 * acquire p_mapping list lock for both the target and replacement
6015 	 * root pages.
6016 	 *
6017 	 * low and high refer to the need to grab the mlist locks in a
6018 	 * specific order in order to prevent race conditions.  Thus the
6019 	 * lower lock must be grabbed before the higher lock.
6020 	 *
6021 	 * This will block hat_unload's accessing p_mapping list.  Since
6022 	 * we have SE_EXCL lock, hat_memload and hat_pageunload will be
6023 	 * blocked.  Thus, no one else will be accessing the p_mapping list
6024 	 * while we suspend and reload the locked mapping below.
6025 	 */
6026 	tpp = targ;
6027 	rpp = repl;
6028 	sfmmu_mlist_reloc_enter(tpp, rpp, &low, &high);
6029 
6030 	kpreempt_disable();
6031 
6032 	/*
6033 	 * If the replacement page is of a different virtual color
6034 	 * than the page it is replacing, we need to handle the VAC
6035 	 * consistency for it just as we would if we were setting up
6036 	 * a new mapping to a page.
6037 	 */
6038 	if ((tpp->p_szc == 0) && (PP_GET_VCOLOR(rpp) != NO_VCOLOR)) {
6039 		if (tpp->p_vcolor != rpp->p_vcolor) {
6040 			sfmmu_cache_flushcolor(PP_GET_VCOLOR(rpp),
6041 			    rpp->p_pagenum);
6042 		}
6043 	}
6044 
6045 	/*
6046 	 * We raise our PIL to 13 so that we don't get captured by
6047 	 * another CPU or pinned by an interrupt thread.  We can't go to
6048 	 * PIL 14 since the nexus driver(s) may need to interrupt at
6049 	 * that level in the case of IOMMU pseudo mappings.
6050 	 */
6051 	cpuset = cpu_ready_set;
6052 	CPUSET_DEL(cpuset, CPU->cpu_id);
6053 	if (!cap_cpus || CPUSET_ISNULL(cpuset)) {
6054 		old_pil = splr(XCALL_PIL);
6055 	} else {
6056 		old_pil = -1;
6057 		xc_attention(cpuset);
6058 	}
6059 	ASSERT(getpil() == XCALL_PIL);
6060 
6061 	/*
6062 	 * Now do suspend callbacks. In the case of an IOMMU mapping
6063 	 * this will suspend all DMA activity to the page while it is
6064 	 * being relocated. Since we are well above LOCK_LEVEL and CPUs
6065 	 * may be captured at this point we should have acquired any needed
6066 	 * locks in the presuspend callback.
6067 	 */
6068 	ret = hat_pageprocess_precallbacks(targ, HAT_SUSPEND, NULL);
6069 	if (ret != 0) {
6070 		repl = targ;
6071 		goto suspend_fail;
6072 	}
6073 
6074 	/*
6075 	 * Raise the PIL yet again, this time to block all high-level
6076 	 * interrupts on this CPU. This is necessary to prevent an
6077 	 * interrupt routine from pinning the thread which holds the
6078 	 * mapping suspended and then touching the suspended page.
6079 	 *
6080 	 * Once the page is suspended we also need to be careful to
6081 	 * avoid calling any functions which touch any seg_kmem memory
6082 	 * since that memory may be backed by the very page we are
6083 	 * relocating in here!
6084 	 */
6085 	hat_pagesuspend(targ);
6086 
6087 	/*
6088 	 * Now that we are confident everybody has stopped using this page,
6089 	 * copy the page contents.  Note we use a physical copy to prevent
6090 	 * locking issues and to avoid fpRAS because we can't handle it in
6091 	 * this context.
6092 	 */
6093 	for (i = 0; i < npages; i++, tpp++, rpp++) {
6094 		/*
6095 		 * Copy the contents of the page.
6096 		 */
6097 		ppcopy_kernel(tpp, rpp);
6098 	}
6099 
6100 	tpp = targ;
6101 	rpp = repl;
6102 	for (i = 0; i < npages; i++, tpp++, rpp++) {
6103 		/*
6104 		 * Copy attributes.  VAC consistency was handled above,
6105 		 * if required.
6106 		 */
6107 		rpp->p_nrm = tpp->p_nrm;
6108 		tpp->p_nrm = 0;
6109 		rpp->p_index = tpp->p_index;
6110 		tpp->p_index = 0;
6111 		rpp->p_vcolor = tpp->p_vcolor;
6112 	}
6113 
6114 	/*
6115 	 * First, unsuspend the page, if we set the suspend bit, and transfer
6116 	 * the mapping list from the target page to the replacement page.
6117 	 * Next process postcallbacks; since pa_hment's are linked only to the
6118 	 * p_mapping list of root page, we don't iterate over the constituent
6119 	 * pages.
6120 	 */
6121 	hat_pagereload(targ, repl);
6122 
6123 suspend_fail:
6124 	hat_pageprocess_postcallbacks(repl, HAT_UNSUSPEND);
6125 
6126 	/*
6127 	 * Now lower our PIL and release any captured CPUs since we
6128 	 * are out of the "danger zone".  After this it will again be
6129 	 * safe to acquire adaptive mutex locks, or to drop them...
6130 	 */
6131 	if (old_pil != -1) {
6132 		splx(old_pil);
6133 	} else {
6134 		xc_dismissed(cpuset);
6135 	}
6136 
6137 	kpreempt_enable();
6138 
6139 	sfmmu_mlist_reloc_exit(low, high);
6140 
6141 	/*
6142 	 * Postsuspend callbacks should drop any locks held across
6143 	 * the suspend callbacks.  As before, we don't hold the mapping
6144 	 * list lock at this point.. our assumption is that the mapping
6145 	 * list still can't change due to our holding SE_EXCL lock and
6146 	 * there being no unlocked mappings left. Hence the restriction
6147 	 * on calling context to hat_delete_callback()
6148 	 */
6149 	hat_pageprocess_postcallbacks(repl, HAT_POSTUNSUSPEND);
6150 	if (ret != 0) {
6151 		/*
6152 		 * The second presuspend call failed: we got here through
6153 		 * the suspend_fail label above.
6154 		 */
6155 		ASSERT(ret != EIO);
6156 		PAGE_RELOCATE_LOG(target, replacement, ret, cap_cpus);
6157 		kreloc_thread = NULL;
6158 		mutex_exit(&kpr_mutex);
6159 		return (EAGAIN);
6160 	}
6161 
6162 	/*
6163 	 * Now that we're out of the performance critical section we can
6164 	 * take care of updating the hash table, since we still
6165 	 * hold all the pages locked SE_EXCL at this point we
6166 	 * needn't worry about things changing out from under us.
6167 	 */
6168 	tpp = targ;
6169 	rpp = repl;
6170 	for (i = 0; i < npages; i++, tpp++, rpp++) {
6171 
6172 		/*
6173 		 * replace targ with replacement in page_hash table
6174 		 */
6175 		targ = tpp;
6176 		page_relocate_hash(rpp, targ);
6177 
6178 		/*
6179 		 * concatenate target; caller of platform_page_relocate()
6180 		 * expects target to be concatenated after returning.
6181 		 */
6182 		ASSERT(targ->p_next == targ);
6183 		ASSERT(targ->p_prev == targ);
6184 		page_list_concat(&pl, &targ);
6185 	}
6186 
6187 	ASSERT(*target == pl);
6188 	*nrelocp = npages;
6189 	PAGE_RELOCATE_LOG(target, replacement, 0, cap_cpus);
6190 	kreloc_thread = NULL;
6191 	mutex_exit(&kpr_mutex);
6192 	return (0);
6193 }
6194 
6195 /*
6196  * Called when stray pa_hments are found attached to a page which is
6197  * being freed.  Notify the subsystem which attached the pa_hment of
6198  * the error if it registered a suitable handler, else panic.
6199  */
6200 static void
6201 sfmmu_pahment_leaked(struct pa_hment *pahmep)
6202 {
6203 	id_t cb_id = pahmep->cb_id;
6204 
6205 	ASSERT(cb_id >= (id_t)0 && cb_id < sfmmu_cb_nextid);
6206 	if (sfmmu_cb_table[cb_id].errhandler != NULL) {
6207 		if (sfmmu_cb_table[cb_id].errhandler(pahmep->addr, pahmep->len,
6208 		    HAT_CB_ERR_LEAKED, pahmep->pvt) == 0)
6209 			return;		/* non-fatal */
6210 	}
6211 	panic("pa_hment leaked: 0x%p", pahmep);
6212 }
6213 
6214 /*
6215  * Remove all mappings to page 'pp'.
6216  */
6217 int
6218 hat_pageunload(struct page *pp, uint_t forceflag)
6219 {
6220 	struct page *origpp = pp;
6221 	struct sf_hment *sfhme, *tmphme;
6222 	struct hme_blk *hmeblkp;
6223 	kmutex_t *pml, *pmtx;
6224 	cpuset_t cpuset, tset;
6225 	int index, cons;
6226 	int xhme_blks;
6227 	int pa_hments;
6228 
6229 	ASSERT(PAGE_EXCL(pp));
6230 
6231 retry_xhat:
6232 	tmphme = NULL;
6233 	xhme_blks = 0;
6234 	pa_hments = 0;
6235 	CPUSET_ZERO(cpuset);
6236 
6237 	pml = sfmmu_mlist_enter(pp);
6238 
6239 	if (pp->p_kpmref)
6240 		sfmmu_kpm_pageunload(pp);
6241 	ASSERT(!PP_ISMAPPED_KPM(pp));
6242 
6243 	index = PP_MAPINDEX(pp);
6244 	cons = TTE8K;
6245 retry:
6246 	for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) {
6247 		tmphme = sfhme->hme_next;
6248 
6249 		if (IS_PAHME(sfhme)) {
6250 			ASSERT(sfhme->hme_data != NULL);
6251 			pa_hments++;
6252 			continue;
6253 		}
6254 
6255 		hmeblkp = sfmmu_hmetohblk(sfhme);
6256 		if (hmeblkp->hblk_xhat_bit) {
6257 			struct xhat_hme_blk *xblk =
6258 			    (struct xhat_hme_blk *)hmeblkp;
6259 
6260 			(void) XHAT_PAGEUNLOAD(xblk->xhat_hme_blk_hat,
6261 			    pp, forceflag, XBLK2PROVBLK(xblk));
6262 
6263 			xhme_blks = 1;
6264 			continue;
6265 		}
6266 
6267 		/*
6268 		 * If there are kernel mappings don't unload them, they will
6269 		 * be suspended.
6270 		 */
6271 		if (forceflag == SFMMU_KERNEL_RELOC && hmeblkp->hblk_lckcnt &&
6272 		    hmeblkp->hblk_tag.htag_id == ksfmmup)
6273 			continue;
6274 
6275 		tset = sfmmu_pageunload(pp, sfhme, cons);
6276 		CPUSET_OR(cpuset, tset);
6277 	}
6278 
6279 	while (index != 0) {
6280 		index = index >> 1;
6281 		if (index != 0)
6282 			cons++;
6283 		if (index & 0x1) {
6284 			/* Go to leading page */
6285 			pp = PP_GROUPLEADER(pp, cons);
6286 			ASSERT(sfmmu_mlist_held(pp));
6287 			goto retry;
6288 		}
6289 	}
6290 
6291 	/*
6292 	 * cpuset may be empty if the page was only mapped by segkpm,
6293 	 * in which case we won't actually cross-trap.
6294 	 */
6295 	xt_sync(cpuset);
6296 
6297 	/*
6298 	 * The page should have no mappings at this point, unless
6299 	 * we were called from hat_page_relocate() in which case we
6300 	 * leave the locked mappings which will be suspended later.
6301 	 */
6302 	ASSERT(!PP_ISMAPPED(origpp) || xhme_blks || pa_hments ||
6303 	    (forceflag == SFMMU_KERNEL_RELOC));
6304 
6305 	if (PP_ISTNC(pp)) {
6306 		if (cons == TTE8K) {
6307 			pmtx = sfmmu_page_enter(pp);
6308 			PP_CLRTNC(pp);
6309 			sfmmu_page_exit(pmtx);
6310 		} else {
6311 			conv_tnc(pp, cons);
6312 		}
6313 	}
6314 
6315 	if (pa_hments && forceflag != SFMMU_KERNEL_RELOC) {
6316 		/*
6317 		 * Unlink any pa_hments and free them, calling back
6318 		 * the responsible subsystem to notify it of the error.
6319 		 * This can occur in situations such as drivers leaking
6320 		 * DMA handles: naughty, but common enough that we'd like
6321 		 * to keep the system running rather than bringing it
6322 		 * down with an obscure error like "pa_hment leaked"
6323 		 * which doesn't aid the user in debugging their driver.
6324 		 */
6325 		for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) {
6326 			tmphme = sfhme->hme_next;
6327 			if (IS_PAHME(sfhme)) {
6328 				struct pa_hment *pahmep = sfhme->hme_data;
6329 				sfmmu_pahment_leaked(pahmep);
6330 				HME_SUB(sfhme, pp);
6331 				kmem_cache_free(pa_hment_cache, pahmep);
6332 			}
6333 		}
6334 
6335 		ASSERT(!PP_ISMAPPED(origpp) || xhme_blks);
6336 	}
6337 
6338 	sfmmu_mlist_exit(pml);
6339 
6340 	/*
6341 	 * XHAT may not have finished unloading pages
6342 	 * because some other thread was waiting for
6343 	 * mlist lock and XHAT_PAGEUNLOAD let it do
6344 	 * the job.
6345 	 */
6346 	if (xhme_blks) {
6347 		pp = origpp;
6348 		goto retry_xhat;
6349 	}
6350 
6351 	return (0);
6352 }
6353 
6354 static cpuset_t
6355 sfmmu_pageunload(page_t *pp, struct sf_hment *sfhme, int cons)
6356 {
6357 	struct hme_blk *hmeblkp;
6358 	sfmmu_t *sfmmup;
6359 	tte_t tte, ttemod;
6360 #ifdef DEBUG
6361 	tte_t orig_old;
6362 #endif /* DEBUG */
6363 	caddr_t addr;
6364 	int ttesz;
6365 	int ret;
6366 	cpuset_t cpuset;
6367 
6368 	ASSERT(pp != NULL);
6369 	ASSERT(sfmmu_mlist_held(pp));
6370 	ASSERT(pp->p_vnode != &kvp);
6371 
6372 	CPUSET_ZERO(cpuset);
6373 
6374 	hmeblkp = sfmmu_hmetohblk(sfhme);
6375 
6376 readtte:
6377 	sfmmu_copytte(&sfhme->hme_tte, &tte);
6378 	if (TTE_IS_VALID(&tte)) {
6379 		sfmmup = hblktosfmmu(hmeblkp);
6380 		ttesz = get_hblk_ttesz(hmeblkp);
6381 		/*
6382 		 * Only unload mappings of 'cons' size.
6383 		 */
6384 		if (ttesz != cons)
6385 			return (cpuset);
6386 
6387 		/*
6388 		 * Note that we have p_mapping lock, but no hash lock here.
6389 		 * hblk_unload() has to have both hash lock AND p_mapping
6390 		 * lock before it tries to modify tte. So, the tte could
6391 		 * not become invalid in the sfmmu_modifytte_try() below.
6392 		 */
6393 		ttemod = tte;
6394 #ifdef DEBUG
6395 		orig_old = tte;
6396 #endif /* DEBUG */
6397 
6398 		TTE_SET_INVALID(&ttemod);
6399 		ret = sfmmu_modifytte_try(&tte, &ttemod, &sfhme->hme_tte);
6400 		if (ret < 0) {
6401 #ifdef DEBUG
6402 			/* only R/M bits can change. */
6403 			chk_tte(&orig_old, &tte, &ttemod, hmeblkp);
6404 #endif /* DEBUG */
6405 			goto readtte;
6406 		}
6407 
6408 		if (ret == 0) {
6409 			panic("pageunload: cas failed?");
6410 		}
6411 
6412 		addr = tte_to_vaddr(hmeblkp, tte);
6413 
6414 		sfmmu_ttesync(sfmmup, addr, &tte, pp);
6415 
6416 		atomic_add_long(&sfmmup->sfmmu_ttecnt[ttesz], -1);
6417 
6418 		/*
6419 		 * We need to flush the page from the virtual cache
6420 		 * in order to prevent a virtual cache alias
6421 		 * inconsistency. The particular scenario we need
6422 		 * to worry about is:
6423 		 * Given:  va1 and va2 are two virtual address that
6424 		 * alias and will map the same physical address.
6425 		 * 1.	mapping exists from va1 to pa and data has
6426 		 *	been read into the cache.
6427 		 * 2.	unload va1.
6428 		 * 3.	load va2 and modify data using va2.
6429 		 * 4	unload va2.
6430 		 * 5.	load va1 and reference data.  Unless we flush
6431 		 *	the data cache when we unload we will get
6432 		 *	stale data.
6433 		 * This scenario is taken care of by using virtual
6434 		 * page coloring.
6435 		 */
6436 		if (sfmmup->sfmmu_ismhat) {
6437 			/*
6438 			 * Flush TSBs, TLBs and caches
6439 			 * of every process
6440 			 * sharing this ism segment.
6441 			 */
6442 			sfmmu_hat_lock_all();
6443 			mutex_enter(&ism_mlist_lock);
6444 			kpreempt_disable();
6445 			if (do_virtual_coloring)
6446 				sfmmu_ismtlbcache_demap(addr, sfmmup, hmeblkp,
6447 					pp->p_pagenum, CACHE_NO_FLUSH);
6448 			else
6449 				sfmmu_ismtlbcache_demap(addr, sfmmup, hmeblkp,
6450 					pp->p_pagenum, CACHE_FLUSH);
6451 			kpreempt_enable();
6452 			mutex_exit(&ism_mlist_lock);
6453 			sfmmu_hat_unlock_all();
6454 			cpuset = cpu_ready_set;
6455 		} else if (do_virtual_coloring) {
6456 			sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0);
6457 			cpuset = sfmmup->sfmmu_cpusran;
6458 		} else {
6459 			sfmmu_tlbcache_demap(addr, sfmmup, hmeblkp,
6460 				pp->p_pagenum, 0, FLUSH_NECESSARY_CPUS,
6461 				CACHE_FLUSH, 0);
6462 			cpuset = sfmmup->sfmmu_cpusran;
6463 		}
6464 
6465 		/*
6466 		 * Hme_sub has to run after ttesync() and a_rss update.
6467 		 * See hblk_unload().
6468 		 */
6469 		HME_SUB(sfhme, pp);
6470 		membar_stst();
6471 
6472 		/*
6473 		 * We can not make ASSERT(hmeblkp->hblk_hmecnt <= NHMENTS)
6474 		 * since pteload may have done a HME_ADD() right after
6475 		 * we did the HME_SUB() above. Hmecnt is now maintained
6476 		 * by cas only. no lock guranteed its value. The only
6477 		 * gurantee we have is the hmecnt should not be less than
6478 		 * what it should be so the hblk will not be taken away.
6479 		 * It's also important that we decremented the hmecnt after
6480 		 * we are done with hmeblkp so that this hmeblk won't be
6481 		 * stolen.
6482 		 */
6483 		ASSERT(hmeblkp->hblk_hmecnt > 0);
6484 		ASSERT(hmeblkp->hblk_vcnt > 0);
6485 		atomic_add_16(&hmeblkp->hblk_vcnt, -1);
6486 		atomic_add_16(&hmeblkp->hblk_hmecnt, -1);
6487 		/*
6488 		 * This is bug 4063182.
6489 		 * XXX: fixme
6490 		 * ASSERT(hmeblkp->hblk_hmecnt || hmeblkp->hblk_vcnt ||
6491 		 *	!hmeblkp->hblk_lckcnt);
6492 		 */
6493 	} else {
6494 		panic("invalid tte? pp %p &tte %p",
6495 		    (void *)pp, (void *)&tte);
6496 	}
6497 
6498 	return (cpuset);
6499 }
6500 
6501 /*
6502  * While relocating a kernel page, this function will move the mappings
6503  * from tpp to dpp and modify any associated data with these mappings.
6504  * It also unsuspends the suspended kernel mapping.
6505  */
6506 static void
6507 hat_pagereload(struct page *tpp, struct page *dpp)
6508 {
6509 	struct sf_hment *sfhme;
6510 	tte_t tte, ttemod;
6511 	int index, cons;
6512 
6513 	ASSERT(getpil() == PIL_MAX);
6514 	ASSERT(sfmmu_mlist_held(tpp));
6515 	ASSERT(sfmmu_mlist_held(dpp));
6516 
6517 	index = PP_MAPINDEX(tpp);
6518 	cons = TTE8K;
6519 
6520 	/* Update real mappings to the page */
6521 retry:
6522 	for (sfhme = tpp->p_mapping; sfhme != NULL; sfhme = sfhme->hme_next) {
6523 		if (IS_PAHME(sfhme))
6524 			continue;
6525 		sfmmu_copytte(&sfhme->hme_tte, &tte);
6526 		ttemod = tte;
6527 
6528 		/*
6529 		 * replace old pfn with new pfn in TTE
6530 		 */
6531 		PFN_TO_TTE(ttemod, dpp->p_pagenum);
6532 
6533 		/*
6534 		 * clear suspend bit
6535 		 */
6536 		ASSERT(TTE_IS_SUSPEND(&ttemod));
6537 		TTE_CLR_SUSPEND(&ttemod);
6538 
6539 		if (sfmmu_modifytte_try(&tte, &ttemod, &sfhme->hme_tte) < 0)
6540 			panic("hat_pagereload(): sfmmu_modifytte_try() failed");
6541 
6542 		/*
6543 		 * set hme_page point to new page
6544 		 */
6545 		sfhme->hme_page = dpp;
6546 	}
6547 
6548 	/*
6549 	 * move p_mapping list from old page to new page
6550 	 */
6551 	dpp->p_mapping = tpp->p_mapping;
6552 	tpp->p_mapping = NULL;
6553 	dpp->p_share = tpp->p_share;
6554 	tpp->p_share = 0;
6555 
6556 	while (index != 0) {
6557 		index = index >> 1;
6558 		if (index != 0)
6559 			cons++;
6560 		if (index & 0x1) {
6561 			tpp = PP_GROUPLEADER(tpp, cons);
6562 			dpp = PP_GROUPLEADER(dpp, cons);
6563 			goto retry;
6564 		}
6565 	}
6566 
6567 	if (dtrace_kreloc_fini)
6568 		(*dtrace_kreloc_fini)();
6569 	mutex_exit(&kpr_suspendlock);
6570 }
6571 
6572 uint_t
6573 hat_pagesync(struct page *pp, uint_t clearflag)
6574 {
6575 	struct sf_hment *sfhme, *tmphme = NULL;
6576 	struct hme_blk *hmeblkp;
6577 	kmutex_t *pml;
6578 	cpuset_t cpuset, tset;
6579 	int	index, cons;
6580 	extern	ulong_t po_share;
6581 	page_t	*save_pp = pp;
6582 
6583 	CPUSET_ZERO(cpuset);
6584 
6585 	if (PP_ISRO(pp) && (clearflag & HAT_SYNC_STOPON_MOD)) {
6586 		return (PP_GENERIC_ATTR(pp));
6587 	}
6588 
6589 	if ((clearflag == (HAT_SYNC_STOPON_REF | HAT_SYNC_DONTZERO)) &&
6590 	    PP_ISREF(pp)) {
6591 		return (PP_GENERIC_ATTR(pp));
6592 	}
6593 
6594 	if ((clearflag == (HAT_SYNC_STOPON_MOD | HAT_SYNC_DONTZERO)) &&
6595 	    PP_ISMOD(pp)) {
6596 		return (PP_GENERIC_ATTR(pp));
6597 	}
6598 
6599 	if ((clearflag & HAT_SYNC_STOPON_SHARED) != 0 &&
6600 	    (pp->p_share > po_share) &&
6601 	    !(clearflag & HAT_SYNC_ZERORM)) {
6602 		if (PP_ISRO(pp))
6603 			hat_page_setattr(pp, P_REF);
6604 		return (PP_GENERIC_ATTR(pp));
6605 	}
6606 
6607 	clearflag &= ~HAT_SYNC_STOPON_SHARED;
6608 	pml = sfmmu_mlist_enter(pp);
6609 	index = PP_MAPINDEX(pp);
6610 	cons = TTE8K;
6611 retry:
6612 	for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) {
6613 		/*
6614 		 * We need to save the next hment on the list since
6615 		 * it is possible for pagesync to remove an invalid hment
6616 		 * from the list.
6617 		 */
6618 		tmphme = sfhme->hme_next;
6619 		/*
6620 		 * If we are looking for large mappings and this hme doesn't
6621 		 * reach the range we are seeking, just ignore its.
6622 		 */
6623 		hmeblkp = sfmmu_hmetohblk(sfhme);
6624 		if (hmeblkp->hblk_xhat_bit)
6625 			continue;
6626 
6627 		if (hme_size(sfhme) < cons)
6628 			continue;
6629 		tset = sfmmu_pagesync(pp, sfhme,
6630 			clearflag & ~HAT_SYNC_STOPON_RM);
6631 		CPUSET_OR(cpuset, tset);
6632 		/*
6633 		 * If clearflag is HAT_SYNC_DONTZERO, break out as soon
6634 		 * as the "ref" or "mod" is set.
6635 		 */
6636 		if ((clearflag & ~HAT_SYNC_STOPON_RM) == HAT_SYNC_DONTZERO &&
6637 		    ((clearflag & HAT_SYNC_STOPON_MOD) && PP_ISMOD(save_pp)) ||
6638 		    ((clearflag & HAT_SYNC_STOPON_REF) && PP_ISREF(save_pp))) {
6639 			index = 0;
6640 			break;
6641 		}
6642 	}
6643 
6644 	while (index) {
6645 		index = index >> 1;
6646 		cons++;
6647 		if (index & 0x1) {
6648 			/* Go to leading page */
6649 			pp = PP_GROUPLEADER(pp, cons);
6650 			goto retry;
6651 		}
6652 	}
6653 
6654 	xt_sync(cpuset);
6655 	sfmmu_mlist_exit(pml);
6656 	return (PP_GENERIC_ATTR(save_pp));
6657 }
6658 
6659 /*
6660  * Get all the hardware dependent attributes for a page struct
6661  */
6662 static cpuset_t
6663 sfmmu_pagesync(struct page *pp, struct sf_hment *sfhme,
6664 	uint_t clearflag)
6665 {
6666 	caddr_t addr;
6667 	tte_t tte, ttemod;
6668 	struct hme_blk *hmeblkp;
6669 	int ret;
6670 	sfmmu_t *sfmmup;
6671 	cpuset_t cpuset;
6672 
6673 	ASSERT(pp != NULL);
6674 	ASSERT(sfmmu_mlist_held(pp));
6675 	ASSERT((clearflag == HAT_SYNC_DONTZERO) ||
6676 		(clearflag == HAT_SYNC_ZERORM));
6677 
6678 	SFMMU_STAT(sf_pagesync);
6679 
6680 	CPUSET_ZERO(cpuset);
6681 
6682 sfmmu_pagesync_retry:
6683 
6684 	sfmmu_copytte(&sfhme->hme_tte, &tte);
6685 	if (TTE_IS_VALID(&tte)) {
6686 		hmeblkp = sfmmu_hmetohblk(sfhme);
6687 		sfmmup = hblktosfmmu(hmeblkp);
6688 		addr = tte_to_vaddr(hmeblkp, tte);
6689 		if (clearflag == HAT_SYNC_ZERORM) {
6690 			ttemod = tte;
6691 			TTE_CLR_RM(&ttemod);
6692 			ret = sfmmu_modifytte_try(&tte, &ttemod,
6693 				&sfhme->hme_tte);
6694 			if (ret < 0) {
6695 				/*
6696 				 * cas failed and the new value is not what
6697 				 * we want.
6698 				 */
6699 				goto sfmmu_pagesync_retry;
6700 			}
6701 
6702 			if (ret > 0) {
6703 				/* we win the cas */
6704 				sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0);
6705 				cpuset = sfmmup->sfmmu_cpusran;
6706 			}
6707 		}
6708 
6709 		sfmmu_ttesync(sfmmup, addr, &tte, pp);
6710 	}
6711 	return (cpuset);
6712 }
6713 
6714 /*
6715  * Remove write permission from a mappings to a page, so that
6716  * we can detect the next modification of it. This requires modifying
6717  * the TTE then invalidating (demap) any TLB entry using that TTE.
6718  * This code is similar to sfmmu_pagesync().
6719  */
6720 static cpuset_t
6721 sfmmu_pageclrwrt(struct page *pp, struct sf_hment *sfhme)
6722 {
6723 	caddr_t addr;
6724 	tte_t tte;
6725 	tte_t ttemod;
6726 	struct hme_blk *hmeblkp;
6727 	int ret;
6728 	sfmmu_t *sfmmup;
6729 	cpuset_t cpuset;
6730 
6731 	ASSERT(pp != NULL);
6732 	ASSERT(sfmmu_mlist_held(pp));
6733 
6734 	CPUSET_ZERO(cpuset);
6735 	SFMMU_STAT(sf_clrwrt);
6736 
6737 retry:
6738 
6739 	sfmmu_copytte(&sfhme->hme_tte, &tte);
6740 	if (TTE_IS_VALID(&tte) && TTE_IS_WRITABLE(&tte)) {
6741 		hmeblkp = sfmmu_hmetohblk(sfhme);
6742 
6743 		/*
6744 		 * xhat mappings should never be to a VMODSORT page.
6745 		 */
6746 		ASSERT(hmeblkp->hblk_xhat_bit == 0);
6747 
6748 		sfmmup = hblktosfmmu(hmeblkp);
6749 		addr = tte_to_vaddr(hmeblkp, tte);
6750 
6751 		ttemod = tte;
6752 		TTE_CLR_WRT(&ttemod);
6753 		TTE_CLR_MOD(&ttemod);
6754 		ret = sfmmu_modifytte_try(&tte, &ttemod, &sfhme->hme_tte);
6755 
6756 		/*
6757 		 * if cas failed and the new value is not what
6758 		 * we want retry
6759 		 */
6760 		if (ret < 0)
6761 			goto retry;
6762 
6763 		/* we win the cas */
6764 		if (ret > 0) {
6765 			sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0);
6766 			cpuset = sfmmup->sfmmu_cpusran;
6767 		}
6768 	}
6769 
6770 	return (cpuset);
6771 }
6772 
6773 /*
6774  * Walk all mappings of a page, removing write permission and clearing the
6775  * ref/mod bits. This code is similar to hat_pagesync()
6776  */
6777 static void
6778 hat_page_clrwrt(page_t *pp)
6779 {
6780 	struct sf_hment *sfhme;
6781 	struct sf_hment *tmphme = NULL;
6782 	kmutex_t *pml;
6783 	cpuset_t cpuset;
6784 	cpuset_t tset;
6785 	int	index;
6786 	int	 cons;
6787 
6788 	CPUSET_ZERO(cpuset);
6789 
6790 	pml = sfmmu_mlist_enter(pp);
6791 	index = PP_MAPINDEX(pp);
6792 	cons = TTE8K;
6793 retry:
6794 	for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) {
6795 		tmphme = sfhme->hme_next;
6796 
6797 		/*
6798 		 * If we are looking for large mappings and this hme doesn't
6799 		 * reach the range we are seeking, just ignore its.
6800 		 */
6801 
6802 		if (hme_size(sfhme) < cons)
6803 			continue;
6804 
6805 		tset = sfmmu_pageclrwrt(pp, sfhme);
6806 		CPUSET_OR(cpuset, tset);
6807 	}
6808 
6809 	while (index) {
6810 		index = index >> 1;
6811 		cons++;
6812 		if (index & 0x1) {
6813 			/* Go to leading page */
6814 			pp = PP_GROUPLEADER(pp, cons);
6815 			goto retry;
6816 		}
6817 	}
6818 
6819 	xt_sync(cpuset);
6820 	sfmmu_mlist_exit(pml);
6821 }
6822 
6823 /*
6824  * Set the given REF/MOD/RO bits for the given page.
6825  * For a vnode with a sorted v_pages list, we need to change
6826  * the attributes and the v_pages list together under page_vnode_mutex.
6827  */
6828 void
6829 hat_page_setattr(page_t *pp, uint_t flag)
6830 {
6831 	vnode_t		*vp = pp->p_vnode;
6832 	page_t		**listp;
6833 	kmutex_t	*pmtx;
6834 	kmutex_t	*vphm = NULL;
6835 
6836 	ASSERT(!(flag & ~(P_MOD | P_REF | P_RO)));
6837 
6838 	/*
6839 	 * nothing to do if attribute already set
6840 	 */
6841 	if ((pp->p_nrm & flag) == flag)
6842 		return;
6843 
6844 	if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp)) {
6845 		vphm = page_vnode_mutex(vp);
6846 		mutex_enter(vphm);
6847 	}
6848 
6849 	pmtx = sfmmu_page_enter(pp);
6850 	pp->p_nrm |= flag;
6851 	sfmmu_page_exit(pmtx);
6852 
6853 	if (vphm != NULL) {
6854 		/*
6855 		 * Some File Systems examine v_pages for NULL w/o
6856 		 * grabbing the vphm mutex. Must not let it become NULL when
6857 		 * pp is the only page on the list.
6858 		 */
6859 		if (pp->p_vpnext != pp) {
6860 			page_vpsub(&vp->v_pages, pp);
6861 			if (vp->v_pages != NULL)
6862 				listp = &vp->v_pages->p_vpprev->p_vpnext;
6863 			else
6864 				listp = &vp->v_pages;
6865 			page_vpadd(listp, pp);
6866 		}
6867 		mutex_exit(vphm);
6868 	}
6869 }
6870 
6871 void
6872 hat_page_clrattr(page_t *pp, uint_t flag)
6873 {
6874 	vnode_t		*vp = pp->p_vnode;
6875 	kmutex_t	*vphm = NULL;
6876 	kmutex_t	*pmtx;
6877 
6878 	ASSERT(!(flag & ~(P_MOD | P_REF | P_RO)));
6879 
6880 	/*
6881 	 * For vnode with a sorted v_pages list, we need to change
6882 	 * the attributes and the v_pages list together under page_vnode_mutex.
6883 	 */
6884 	if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp)) {
6885 		vphm = page_vnode_mutex(vp);
6886 		mutex_enter(vphm);
6887 	}
6888 
6889 	pmtx = sfmmu_page_enter(pp);
6890 	pp->p_nrm &= ~flag;
6891 	sfmmu_page_exit(pmtx);
6892 
6893 	if (vphm != NULL) {
6894 		/*
6895 		 * Some File Systems examine v_pages for NULL w/o
6896 		 * grabbing the vphm mutex. Must not let it become NULL when
6897 		 * pp is the only page on the list.
6898 		 */
6899 		if (pp->p_vpnext != pp) {
6900 			page_vpsub(&vp->v_pages, pp);
6901 			page_vpadd(&vp->v_pages, pp);
6902 		}
6903 		mutex_exit(vphm);
6904 
6905 		/*
6906 		 * VMODSORT works by removing write permissions and getting
6907 		 * a fault when a page is made dirty. At this point
6908 		 * we need to remove write permission from all mappings
6909 		 * to this page.
6910 		 */
6911 		hat_page_clrwrt(pp);
6912 	}
6913 }
6914 
6915 
6916 uint_t
6917 hat_page_getattr(page_t *pp, uint_t flag)
6918 {
6919 	ASSERT(!(flag & ~(P_MOD | P_REF | P_RO)));
6920 	return ((uint_t)(pp->p_nrm & flag));
6921 }
6922 
6923 /*
6924  * DEBUG kernels: verify that a kernel va<->pa translation
6925  * is safe by checking the underlying page_t is in a page
6926  * relocation-safe state.
6927  */
6928 #ifdef	DEBUG
6929 void
6930 sfmmu_check_kpfn(pfn_t pfn)
6931 {
6932 	page_t *pp;
6933 	int index, cons;
6934 
6935 	if (hat_check_vtop == 0)
6936 		return;
6937 
6938 	if (hat_kpr_enabled == 0 || kvseg.s_base == NULL || panicstr)
6939 		return;
6940 
6941 	pp = page_numtopp_nolock(pfn);
6942 	if (!pp)
6943 		return;
6944 
6945 	if (PAGE_LOCKED(pp) || PP_ISNORELOC(pp))
6946 		return;
6947 
6948 	/*
6949 	 * Handed a large kernel page, we dig up the root page since we
6950 	 * know the root page might have the lock also.
6951 	 */
6952 	if (pp->p_szc != 0) {
6953 		index = PP_MAPINDEX(pp);
6954 		cons = TTE8K;
6955 again:
6956 		while (index != 0) {
6957 			index >>= 1;
6958 			if (index != 0)
6959 				cons++;
6960 			if (index & 0x1) {
6961 				pp = PP_GROUPLEADER(pp, cons);
6962 				goto again;
6963 			}
6964 		}
6965 	}
6966 
6967 	if (PAGE_LOCKED(pp) || PP_ISNORELOC(pp))
6968 		return;
6969 
6970 	/*
6971 	 * Pages need to be locked or allocated "permanent" (either from
6972 	 * static_arena arena or explicitly setting PG_NORELOC when calling
6973 	 * page_create_va()) for VA->PA translations to be valid.
6974 	 */
6975 	if (!PP_ISNORELOC(pp))
6976 		panic("Illegal VA->PA translation, pp 0x%p not permanent", pp);
6977 	else
6978 		panic("Illegal VA->PA translation, pp 0x%p not locked", pp);
6979 }
6980 #endif	/* DEBUG */
6981 
6982 /*
6983  * Returns a page frame number for a given virtual address.
6984  * Returns PFN_INVALID to indicate an invalid mapping
6985  */
6986 pfn_t
6987 hat_getpfnum(struct hat *hat, caddr_t addr)
6988 {
6989 	pfn_t pfn;
6990 	tte_t tte;
6991 
6992 	/*
6993 	 * We would like to
6994 	 * ASSERT(AS_LOCK_HELD(as, &as->a_lock));
6995 	 * but we can't because the iommu driver will call this
6996 	 * routine at interrupt time and it can't grab the as lock
6997 	 * or it will deadlock: A thread could have the as lock
6998 	 * and be waiting for io.  The io can't complete
6999 	 * because the interrupt thread is blocked trying to grab
7000 	 * the as lock.
7001 	 */
7002 
7003 	ASSERT(hat->sfmmu_xhat_provider == NULL);
7004 
7005 	if (hat == ksfmmup) {
7006 		if (segkpm && IS_KPM_ADDR(addr))
7007 			return (sfmmu_kpm_vatopfn(addr));
7008 		while ((pfn = sfmmu_vatopfn(addr, ksfmmup, &tte))
7009 		    == PFN_SUSPENDED) {
7010 			sfmmu_vatopfn_suspended(addr, ksfmmup, &tte);
7011 		}
7012 		sfmmu_check_kpfn(pfn);
7013 		return (pfn);
7014 	} else {
7015 		return (sfmmu_uvatopfn(addr, hat));
7016 	}
7017 }
7018 
7019 /*
7020  * hat_getkpfnum() is an obsolete DDI routine, and its use is discouraged.
7021  * Use hat_getpfnum(kas.a_hat, ...) instead.
7022  *
7023  * We'd like to return PFN_INVALID if the mappings have underlying page_t's
7024  * but can't right now due to the fact that some software has grown to use
7025  * this interface incorrectly. So for now when the interface is misused,
7026  * return a warning to the user that in the future it won't work in the
7027  * way they're abusing it, and carry on (after disabling page relocation).
7028  */
7029 pfn_t
7030 hat_getkpfnum(caddr_t addr)
7031 {
7032 	pfn_t pfn;
7033 	tte_t tte;
7034 	int badcaller = 0;
7035 	extern int segkmem_reloc;
7036 
7037 	if (segkpm && IS_KPM_ADDR(addr)) {
7038 		badcaller = 1;
7039 		pfn = sfmmu_kpm_vatopfn(addr);
7040 	} else {
7041 		while ((pfn = sfmmu_vatopfn(addr, ksfmmup, &tte))
7042 		    == PFN_SUSPENDED) {
7043 			sfmmu_vatopfn_suspended(addr, ksfmmup, &tte);
7044 		}
7045 		badcaller = pf_is_memory(pfn);
7046 	}
7047 
7048 	if (badcaller) {
7049 		/*
7050 		 * We can't return PFN_INVALID or the caller may panic
7051 		 * or corrupt the system.  The only alternative is to
7052 		 * disable page relocation at this point for all kernel
7053 		 * memory.  This will impact any callers of page_relocate()
7054 		 * such as FMA or DR.
7055 		 *
7056 		 * RFE: Add junk here to spit out an ereport so the sysadmin
7057 		 * can be advised that he should upgrade his device driver
7058 		 * so that this doesn't happen.
7059 		 */
7060 		hat_getkpfnum_badcall(caller());
7061 		if (hat_kpr_enabled && segkmem_reloc) {
7062 			hat_kpr_enabled = 0;
7063 			segkmem_reloc = 0;
7064 			cmn_err(CE_WARN, "Kernel Page Relocation is DISABLED");
7065 		}
7066 	}
7067 	return (pfn);
7068 }
7069 
7070 pfn_t
7071 sfmmu_uvatopfn(caddr_t vaddr, struct hat *sfmmup)
7072 {
7073 	struct hmehash_bucket *hmebp;
7074 	hmeblk_tag hblktag;
7075 	int hmeshift, hashno = 1;
7076 	struct hme_blk *hmeblkp = NULL;
7077 
7078 	struct sf_hment *sfhmep;
7079 	tte_t tte;
7080 	pfn_t pfn;
7081 
7082 	/* support for ISM */
7083 	ism_map_t	*ism_map;
7084 	ism_blk_t	*ism_blkp;
7085 	int		i;
7086 	sfmmu_t *ism_hatid = NULL;
7087 	sfmmu_t *locked_hatid = NULL;
7088 
7089 
7090 	ASSERT(sfmmup != ksfmmup);
7091 	SFMMU_STAT(sf_user_vtop);
7092 	/*
7093 	 * Set ism_hatid if vaddr falls in a ISM segment.
7094 	 */
7095 	ism_blkp = sfmmup->sfmmu_iblk;
7096 	if (ism_blkp) {
7097 		sfmmu_ismhat_enter(sfmmup, 0);
7098 		locked_hatid = sfmmup;
7099 	}
7100 	while (ism_blkp && ism_hatid == NULL) {
7101 		ism_map = ism_blkp->iblk_maps;
7102 		for (i = 0; ism_map[i].imap_ismhat && i < ISM_MAP_SLOTS; i++) {
7103 			if (vaddr >= ism_start(ism_map[i]) &&
7104 			    vaddr < ism_end(ism_map[i])) {
7105 				sfmmup = ism_hatid = ism_map[i].imap_ismhat;
7106 				vaddr = (caddr_t)(vaddr -
7107 					ism_start(ism_map[i]));
7108 				break;
7109 			}
7110 		}
7111 		ism_blkp = ism_blkp->iblk_next;
7112 	}
7113 	if (locked_hatid) {
7114 		sfmmu_ismhat_exit(locked_hatid, 0);
7115 	}
7116 
7117 	hblktag.htag_id = sfmmup;
7118 	do {
7119 		hmeshift = HME_HASH_SHIFT(hashno);
7120 		hblktag.htag_bspage = HME_HASH_BSPAGE(vaddr, hmeshift);
7121 		hblktag.htag_rehash = hashno;
7122 		hmebp = HME_HASH_FUNCTION(sfmmup, vaddr, hmeshift);
7123 
7124 		SFMMU_HASH_LOCK(hmebp);
7125 
7126 		HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp);
7127 		if (hmeblkp != NULL) {
7128 			HBLKTOHME(sfhmep, hmeblkp, vaddr);
7129 			sfmmu_copytte(&sfhmep->hme_tte, &tte);
7130 			if (TTE_IS_VALID(&tte)) {
7131 				pfn = TTE_TO_PFN(vaddr, &tte);
7132 			} else {
7133 				pfn = PFN_INVALID;
7134 			}
7135 			SFMMU_HASH_UNLOCK(hmebp);
7136 			return (pfn);
7137 		}
7138 		SFMMU_HASH_UNLOCK(hmebp);
7139 		hashno++;
7140 	} while (HME_REHASH(sfmmup) && (hashno <= mmu_hashcnt));
7141 	return (PFN_INVALID);
7142 }
7143 
7144 
7145 /*
7146  * For compatability with AT&T and later optimizations
7147  */
7148 /* ARGSUSED */
7149 void
7150 hat_map(struct hat *hat, caddr_t addr, size_t len, uint_t flags)
7151 {
7152 	ASSERT(hat != NULL);
7153 	ASSERT(hat->sfmmu_xhat_provider == NULL);
7154 }
7155 
7156 /*
7157  * Return the number of mappings to a particular page.
7158  * This number is an approximation of the number of
7159  * number of people sharing the page.
7160  */
7161 ulong_t
7162 hat_page_getshare(page_t *pp)
7163 {
7164 	page_t *spp = pp;	/* start page */
7165 	kmutex_t *pml;
7166 	ulong_t	cnt;
7167 	int index, sz = TTE64K;
7168 
7169 	/*
7170 	 * We need to grab the mlist lock to make sure any outstanding
7171 	 * load/unloads complete.  Otherwise we could return zero
7172 	 * even though the unload(s) hasn't finished yet.
7173 	 */
7174 	pml = sfmmu_mlist_enter(spp);
7175 	cnt = spp->p_share;
7176 
7177 	if (kpm_enable)
7178 		cnt += spp->p_kpmref;
7179 
7180 	/*
7181 	 * If we have any large mappings, we count the number of
7182 	 * mappings that this large page is part of.
7183 	 */
7184 	index = PP_MAPINDEX(spp);
7185 	index >>= 1;
7186 	while (index) {
7187 		pp = PP_GROUPLEADER(spp, sz);
7188 		if ((index & 0x1) && pp != spp) {
7189 			cnt += pp->p_share;
7190 			spp = pp;
7191 		}
7192 		index >>= 1;
7193 		sz++;
7194 	}
7195 	sfmmu_mlist_exit(pml);
7196 	return (cnt);
7197 }
7198 
7199 /*
7200  * Unload all large mappings to the pp and reset the p_szc field of every
7201  * constituent page according to the remaining mappings.
7202  *
7203  * pp must be locked SE_EXCL. Even though no other constituent pages are
7204  * locked it's legal to unload the large mappings to the pp because all
7205  * constituent pages of large locked mappings have to be locked SE_SHARED.
7206  * This means if we have SE_EXCL lock on one of constituent pages none of the
7207  * large mappings to pp are locked.
7208  *
7209  * Decrease p_szc field starting from the last constituent page and ending
7210  * with the root page. This method is used because other threads rely on the
7211  * root's p_szc to find the lock to syncronize on. After a root page_t's p_szc
7212  * is demoted then other threads will succeed in sfmmu_mlspl_enter(). This
7213  * ensures that p_szc changes of the constituent pages appears atomic for all
7214  * threads that use sfmmu_mlspl_enter() to examine p_szc field.
7215  *
7216  * This mechanism is only used for file system pages where it's not always
7217  * possible to get SE_EXCL locks on all constituent pages to demote the size
7218  * code (as is done for anonymous or kernel large pages).
7219  *
7220  * See more comments in front of sfmmu_mlspl_enter().
7221  */
7222 void
7223 hat_page_demote(page_t *pp)
7224 {
7225 	int index;
7226 	int sz;
7227 	cpuset_t cpuset;
7228 	int sync = 0;
7229 	page_t *rootpp;
7230 	struct sf_hment *sfhme;
7231 	struct sf_hment *tmphme = NULL;
7232 	struct hme_blk *hmeblkp;
7233 	uint_t pszc;
7234 	page_t *lastpp;
7235 	cpuset_t tset;
7236 	pgcnt_t npgs;
7237 	kmutex_t *pml;
7238 	kmutex_t *pmtx = NULL;
7239 
7240 	ASSERT(PAGE_EXCL(pp));
7241 	ASSERT(!PP_ISFREE(pp));
7242 	ASSERT(page_szc_lock_assert(pp));
7243 	pml = sfmmu_mlist_enter(pp);
7244 
7245 	pszc = pp->p_szc;
7246 	if (pszc == 0) {
7247 		goto out;
7248 	}
7249 
7250 	index = PP_MAPINDEX(pp) >> 1;
7251 
7252 	if (index) {
7253 		CPUSET_ZERO(cpuset);
7254 		sz = TTE64K;
7255 		sync = 1;
7256 	}
7257 
7258 	while (index) {
7259 		if (!(index & 0x1)) {
7260 			index >>= 1;
7261 			sz++;
7262 			continue;
7263 		}
7264 		ASSERT(sz <= pszc);
7265 		rootpp = PP_GROUPLEADER(pp, sz);
7266 		for (sfhme = rootpp->p_mapping; sfhme; sfhme = tmphme) {
7267 			tmphme = sfhme->hme_next;
7268 			hmeblkp = sfmmu_hmetohblk(sfhme);
7269 			if (hme_size(sfhme) != sz) {
7270 				continue;
7271 			}
7272 			if (hmeblkp->hblk_xhat_bit) {
7273 				cmn_err(CE_PANIC,
7274 				    "hat_page_demote: xhat hmeblk");
7275 			}
7276 			tset = sfmmu_pageunload(rootpp, sfhme, sz);
7277 			CPUSET_OR(cpuset, tset);
7278 		}
7279 		if (index >>= 1) {
7280 			sz++;
7281 		}
7282 	}
7283 
7284 	ASSERT(!PP_ISMAPPED_LARGE(pp));
7285 
7286 	if (sync) {
7287 		xt_sync(cpuset);
7288 		if (PP_ISTNC(pp)) {
7289 			conv_tnc(rootpp, sz);
7290 		}
7291 	}
7292 
7293 	pmtx = sfmmu_page_enter(pp);
7294 
7295 	ASSERT(pp->p_szc == pszc);
7296 	rootpp = PP_PAGEROOT(pp);
7297 	ASSERT(rootpp->p_szc == pszc);
7298 	lastpp = PP_PAGENEXT_N(rootpp, TTEPAGES(pszc) - 1);
7299 
7300 	while (lastpp != rootpp) {
7301 		sz = PP_MAPINDEX(lastpp) ? fnd_mapping_sz(lastpp) : 0;
7302 		ASSERT(sz < pszc);
7303 		npgs = (sz == 0) ? 1 : TTEPAGES(sz);
7304 		ASSERT(P2PHASE(lastpp->p_pagenum, npgs) == npgs - 1);
7305 		while (--npgs > 0) {
7306 			lastpp->p_szc = (uchar_t)sz;
7307 			lastpp = PP_PAGEPREV(lastpp);
7308 		}
7309 		if (sz) {
7310 			/*
7311 			 * make sure before current root's pszc
7312 			 * is updated all updates to constituent pages pszc
7313 			 * fields are globally visible.
7314 			 */
7315 			membar_producer();
7316 		}
7317 		lastpp->p_szc = sz;
7318 		ASSERT(IS_P2ALIGNED(lastpp->p_pagenum, TTEPAGES(sz)));
7319 		if (lastpp != rootpp) {
7320 			lastpp = PP_PAGEPREV(lastpp);
7321 		}
7322 	}
7323 	if (sz == 0) {
7324 		/* the loop above doesn't cover this case */
7325 		rootpp->p_szc = 0;
7326 	}
7327 out:
7328 	ASSERT(pp->p_szc == 0);
7329 	if (pmtx != NULL) {
7330 		sfmmu_page_exit(pmtx);
7331 	}
7332 	sfmmu_mlist_exit(pml);
7333 }
7334 
7335 /*
7336  * Refresh the HAT ismttecnt[] element for size szc.
7337  * Caller must have set ISM busy flag to prevent mapping
7338  * lists from changing while we're traversing them.
7339  */
7340 pgcnt_t
7341 ism_tsb_entries(sfmmu_t *sfmmup, int szc)
7342 {
7343 	ism_blk_t	*ism_blkp = sfmmup->sfmmu_iblk;
7344 	ism_map_t	*ism_map;
7345 	pgcnt_t		npgs = 0;
7346 	int		j;
7347 
7348 	ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY));
7349 	for (; ism_blkp != NULL; ism_blkp = ism_blkp->iblk_next) {
7350 		ism_map = ism_blkp->iblk_maps;
7351 		for (j = 0; ism_map[j].imap_ismhat && j < ISM_MAP_SLOTS; j++)
7352 			npgs += ism_map[j].imap_ismhat->sfmmu_ttecnt[szc];
7353 	}
7354 	sfmmup->sfmmu_ismttecnt[szc] = npgs;
7355 	return (npgs);
7356 }
7357 
7358 /*
7359  * Yield the memory claim requirement for an address space.
7360  *
7361  * This is currently implemented as the number of bytes that have active
7362  * hardware translations that have page structures.  Therefore, it can
7363  * underestimate the traditional resident set size, eg, if the
7364  * physical page is present and the hardware translation is missing;
7365  * and it can overestimate the rss, eg, if there are active
7366  * translations to a frame buffer with page structs.
7367  * Also, it does not take sharing into account.
7368  *
7369  * Note that we don't acquire locks here since this function is most often
7370  * called from the clock thread.
7371  */
7372 size_t
7373 hat_get_mapped_size(struct hat *hat)
7374 {
7375 	size_t		assize = 0;
7376 	int 		i;
7377 
7378 	if (hat == NULL)
7379 		return (0);
7380 
7381 	ASSERT(hat->sfmmu_xhat_provider == NULL);
7382 
7383 	for (i = 0; i < mmu_page_sizes; i++)
7384 		assize += (pgcnt_t)hat->sfmmu_ttecnt[i] * TTEBYTES(i);
7385 
7386 	if (hat->sfmmu_iblk == NULL)
7387 		return (assize);
7388 
7389 	for (i = 0; i < mmu_page_sizes; i++)
7390 		assize += (pgcnt_t)hat->sfmmu_ismttecnt[i] * TTEBYTES(i);
7391 
7392 	return (assize);
7393 }
7394 
7395 int
7396 hat_stats_enable(struct hat *hat)
7397 {
7398 	hatlock_t	*hatlockp;
7399 
7400 	ASSERT(hat->sfmmu_xhat_provider == NULL);
7401 
7402 	hatlockp = sfmmu_hat_enter(hat);
7403 	hat->sfmmu_rmstat++;
7404 	sfmmu_hat_exit(hatlockp);
7405 	return (1);
7406 }
7407 
7408 void
7409 hat_stats_disable(struct hat *hat)
7410 {
7411 	hatlock_t	*hatlockp;
7412 
7413 	ASSERT(hat->sfmmu_xhat_provider == NULL);
7414 
7415 	hatlockp = sfmmu_hat_enter(hat);
7416 	hat->sfmmu_rmstat--;
7417 	sfmmu_hat_exit(hatlockp);
7418 }
7419 
7420 /*
7421  * Routines for entering or removing  ourselves from the
7422  * ism_hat's mapping list.
7423  */
7424 static void
7425 iment_add(struct ism_ment *iment,  struct hat *ism_hat)
7426 {
7427 	ASSERT(MUTEX_HELD(&ism_mlist_lock));
7428 
7429 	iment->iment_prev = NULL;
7430 	iment->iment_next = ism_hat->sfmmu_iment;
7431 	if (ism_hat->sfmmu_iment) {
7432 		ism_hat->sfmmu_iment->iment_prev = iment;
7433 	}
7434 	ism_hat->sfmmu_iment = iment;
7435 }
7436 
7437 static void
7438 iment_sub(struct ism_ment *iment, struct hat *ism_hat)
7439 {
7440 	ASSERT(MUTEX_HELD(&ism_mlist_lock));
7441 
7442 	if (ism_hat->sfmmu_iment == NULL) {
7443 		panic("ism map entry remove - no entries");
7444 	}
7445 
7446 	if (iment->iment_prev) {
7447 		ASSERT(ism_hat->sfmmu_iment != iment);
7448 		iment->iment_prev->iment_next = iment->iment_next;
7449 	} else {
7450 		ASSERT(ism_hat->sfmmu_iment == iment);
7451 		ism_hat->sfmmu_iment = iment->iment_next;
7452 	}
7453 
7454 	if (iment->iment_next) {
7455 		iment->iment_next->iment_prev = iment->iment_prev;
7456 	}
7457 
7458 	/*
7459 	 * zero out the entry
7460 	 */
7461 	iment->iment_next = NULL;
7462 	iment->iment_prev = NULL;
7463 	iment->iment_hat =  NULL;
7464 }
7465 
7466 /*
7467  * Hat_share()/unshare() return an (non-zero) error
7468  * when saddr and daddr are not properly aligned.
7469  *
7470  * The top level mapping element determines the alignment
7471  * requirement for saddr and daddr, depending on different
7472  * architectures.
7473  *
7474  * When hat_share()/unshare() are not supported,
7475  * HATOP_SHARE()/UNSHARE() return 0
7476  */
7477 int
7478 hat_share(struct hat *sfmmup, caddr_t addr,
7479 	struct hat *ism_hatid, caddr_t sptaddr, size_t len, uint_t ismszc)
7480 {
7481 	ism_blk_t	*ism_blkp;
7482 	ism_blk_t	*new_iblk;
7483 	ism_map_t 	*ism_map;
7484 	ism_ment_t	*ism_ment;
7485 	int		i, added;
7486 	hatlock_t	*hatlockp;
7487 	int		reload_mmu = 0;
7488 	uint_t		ismshift = page_get_shift(ismszc);
7489 	size_t		ismpgsz = page_get_pagesize(ismszc);
7490 	uint_t		ismmask = (uint_t)ismpgsz - 1;
7491 	size_t		sh_size = ISM_SHIFT(ismshift, len);
7492 	ushort_t	ismhatflag;
7493 
7494 #ifdef DEBUG
7495 	caddr_t		eaddr = addr + len;
7496 #endif /* DEBUG */
7497 
7498 	ASSERT(ism_hatid != NULL && sfmmup != NULL);
7499 	ASSERT(sptaddr == ISMID_STARTADDR);
7500 	/*
7501 	 * Check the alignment.
7502 	 */
7503 	if (!ISM_ALIGNED(ismshift, addr) || !ISM_ALIGNED(ismshift, sptaddr))
7504 		return (EINVAL);
7505 
7506 	/*
7507 	 * Check size alignment.
7508 	 */
7509 	if (!ISM_ALIGNED(ismshift, len))
7510 		return (EINVAL);
7511 
7512 	ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
7513 
7514 	/*
7515 	 * Allocate ism_ment for the ism_hat's mapping list, and an
7516 	 * ism map blk in case we need one.  We must do our
7517 	 * allocations before acquiring locks to prevent a deadlock
7518 	 * in the kmem allocator on the mapping list lock.
7519 	 */
7520 	new_iblk = kmem_cache_alloc(ism_blk_cache, KM_SLEEP);
7521 	ism_ment = kmem_cache_alloc(ism_ment_cache, KM_SLEEP);
7522 
7523 	/*
7524 	 * Serialize ISM mappings with the ISM busy flag, and also the
7525 	 * trap handlers.
7526 	 */
7527 	sfmmu_ismhat_enter(sfmmup, 0);
7528 
7529 	/*
7530 	 * Allocate an ism map blk if necessary.
7531 	 */
7532 	if (sfmmup->sfmmu_iblk == NULL) {
7533 		sfmmup->sfmmu_iblk = new_iblk;
7534 		bzero(new_iblk, sizeof (*new_iblk));
7535 		new_iblk->iblk_nextpa = (uint64_t)-1;
7536 		membar_stst();	/* make sure next ptr visible to all CPUs */
7537 		sfmmup->sfmmu_ismblkpa = va_to_pa((caddr_t)new_iblk);
7538 		reload_mmu = 1;
7539 		new_iblk = NULL;
7540 	}
7541 
7542 #ifdef DEBUG
7543 	/*
7544 	 * Make sure mapping does not already exist.
7545 	 */
7546 	ism_blkp = sfmmup->sfmmu_iblk;
7547 	while (ism_blkp) {
7548 		ism_map = ism_blkp->iblk_maps;
7549 		for (i = 0; i < ISM_MAP_SLOTS && ism_map[i].imap_ismhat; i++) {
7550 			if ((addr >= ism_start(ism_map[i]) &&
7551 			    addr < ism_end(ism_map[i])) ||
7552 			    eaddr > ism_start(ism_map[i]) &&
7553 			    eaddr <= ism_end(ism_map[i])) {
7554 				panic("sfmmu_share: Already mapped!");
7555 			}
7556 		}
7557 		ism_blkp = ism_blkp->iblk_next;
7558 	}
7559 #endif /* DEBUG */
7560 
7561 	ASSERT(ismszc >= TTE4M);
7562 	if (ismszc == TTE4M) {
7563 		ismhatflag = HAT_4M_FLAG;
7564 	} else if (ismszc == TTE32M) {
7565 		ismhatflag = HAT_32M_FLAG;
7566 	} else if (ismszc == TTE256M) {
7567 		ismhatflag = HAT_256M_FLAG;
7568 	}
7569 	/*
7570 	 * Add mapping to first available mapping slot.
7571 	 */
7572 	ism_blkp = sfmmup->sfmmu_iblk;
7573 	added = 0;
7574 	while (!added) {
7575 		ism_map = ism_blkp->iblk_maps;
7576 		for (i = 0; i < ISM_MAP_SLOTS; i++)  {
7577 			if (ism_map[i].imap_ismhat == NULL) {
7578 
7579 				ism_map[i].imap_ismhat = ism_hatid;
7580 				ism_map[i].imap_vb_shift = (ushort_t)ismshift;
7581 				ism_map[i].imap_hatflags = ismhatflag;
7582 				ism_map[i].imap_sz_mask = ismmask;
7583 				/*
7584 				 * imap_seg is checked in ISM_CHECK to see if
7585 				 * non-NULL, then other info assumed valid.
7586 				 */
7587 				membar_stst();
7588 				ism_map[i].imap_seg = (uintptr_t)addr | sh_size;
7589 				ism_map[i].imap_ment = ism_ment;
7590 
7591 				/*
7592 				 * Now add ourselves to the ism_hat's
7593 				 * mapping list.
7594 				 */
7595 				ism_ment->iment_hat = sfmmup;
7596 				ism_ment->iment_base_va = addr;
7597 				ism_hatid->sfmmu_ismhat = 1;
7598 				ism_hatid->sfmmu_flags = 0;
7599 				mutex_enter(&ism_mlist_lock);
7600 				iment_add(ism_ment, ism_hatid);
7601 				mutex_exit(&ism_mlist_lock);
7602 				added = 1;
7603 				break;
7604 			}
7605 		}
7606 		if (!added && ism_blkp->iblk_next == NULL) {
7607 			ism_blkp->iblk_next = new_iblk;
7608 			new_iblk = NULL;
7609 			bzero(ism_blkp->iblk_next,
7610 			    sizeof (*ism_blkp->iblk_next));
7611 			ism_blkp->iblk_next->iblk_nextpa = (uint64_t)-1;
7612 			membar_stst();
7613 			ism_blkp->iblk_nextpa =
7614 				va_to_pa((caddr_t)ism_blkp->iblk_next);
7615 		}
7616 		ism_blkp = ism_blkp->iblk_next;
7617 	}
7618 
7619 	/*
7620 	 * Update our counters for this sfmmup's ism mappings.
7621 	 */
7622 	for (i = 0; i <= ismszc; i++) {
7623 		if (!(disable_ism_large_pages & (1 << i)))
7624 			(void) ism_tsb_entries(sfmmup, i);
7625 	}
7626 
7627 	hatlockp = sfmmu_hat_enter(sfmmup);
7628 
7629 	/*
7630 	 * For ISM and DISM we do not support 512K pages, so we only
7631 	 * only search the 4M and 8K/64K hashes for 4 pagesize cpus, and search
7632 	 * the 256M or 32M, and 4M and 8K/64K hashes for 6 pagesize cpus.
7633 	 */
7634 	ASSERT((disable_ism_large_pages & (1 << TTE512K)) != 0);
7635 
7636 	if (ismszc > TTE4M && !SFMMU_FLAGS_ISSET(sfmmup, HAT_4M_FLAG))
7637 		SFMMU_FLAGS_SET(sfmmup, HAT_4M_FLAG);
7638 
7639 	if (!SFMMU_FLAGS_ISSET(sfmmup, HAT_64K_FLAG))
7640 		SFMMU_FLAGS_SET(sfmmup, HAT_64K_FLAG);
7641 
7642 	/*
7643 	 * If we updated the ismblkpa for this HAT or we need
7644 	 * to start searching the 256M or 32M or 4M hash, we must
7645 	 * make sure all CPUs running this process reload their
7646 	 * tsbmiss area.  Otherwise they will fail to load the mappings
7647 	 * in the tsbmiss handler and will loop calling pagefault().
7648 	 */
7649 	switch (ismszc) {
7650 	case TTE256M:
7651 		if (reload_mmu || !SFMMU_FLAGS_ISSET(sfmmup, HAT_256M_FLAG)) {
7652 			SFMMU_FLAGS_SET(sfmmup, HAT_256M_FLAG);
7653 			sfmmu_sync_mmustate(sfmmup);
7654 		}
7655 		break;
7656 	case TTE32M:
7657 		if (reload_mmu || !SFMMU_FLAGS_ISSET(sfmmup, HAT_32M_FLAG)) {
7658 			SFMMU_FLAGS_SET(sfmmup, HAT_32M_FLAG);
7659 			sfmmu_sync_mmustate(sfmmup);
7660 		}
7661 		break;
7662 	case TTE4M:
7663 		if (reload_mmu || !SFMMU_FLAGS_ISSET(sfmmup, HAT_4M_FLAG)) {
7664 			SFMMU_FLAGS_SET(sfmmup, HAT_4M_FLAG);
7665 			sfmmu_sync_mmustate(sfmmup);
7666 		}
7667 		break;
7668 	default:
7669 		break;
7670 	}
7671 
7672 	/*
7673 	 * Now we can drop the locks.
7674 	 */
7675 	sfmmu_ismhat_exit(sfmmup, 1);
7676 	sfmmu_hat_exit(hatlockp);
7677 
7678 	/*
7679 	 * Free up ismblk if we didn't use it.
7680 	 */
7681 	if (new_iblk != NULL)
7682 		kmem_cache_free(ism_blk_cache, new_iblk);
7683 
7684 	/*
7685 	 * Check TSB and TLB page sizes.
7686 	 */
7687 	sfmmu_check_page_sizes(sfmmup, 1);
7688 
7689 	return (0);
7690 }
7691 
7692 /*
7693  * hat_unshare removes exactly one ism_map from
7694  * this process's as.  It expects multiple calls
7695  * to hat_unshare for multiple shm segments.
7696  */
7697 void
7698 hat_unshare(struct hat *sfmmup, caddr_t addr, size_t len, uint_t ismszc)
7699 {
7700 	ism_map_t 	*ism_map;
7701 	ism_ment_t	*free_ment = NULL;
7702 	ism_blk_t	*ism_blkp;
7703 	struct hat	*ism_hatid;
7704 	struct ctx	*ctx;
7705 	int 		cnum, found, i;
7706 	hatlock_t	*hatlockp;
7707 	struct tsb_info	*tsbinfo;
7708 	uint_t		ismshift = page_get_shift(ismszc);
7709 	size_t		sh_size = ISM_SHIFT(ismshift, len);
7710 
7711 	ASSERT(ISM_ALIGNED(ismshift, addr));
7712 	ASSERT(ISM_ALIGNED(ismshift, len));
7713 	ASSERT(sfmmup != NULL);
7714 	ASSERT(sfmmup != ksfmmup);
7715 
7716 	if (sfmmup->sfmmu_xhat_provider) {
7717 		XHAT_UNSHARE(sfmmup, addr, len);
7718 		return;
7719 	} else {
7720 		/*
7721 		 * This must be a CPU HAT. If the address space has
7722 		 * XHATs attached, inform all XHATs that ISM segment
7723 		 * is going away
7724 		 */
7725 		ASSERT(sfmmup->sfmmu_as != NULL);
7726 		if (sfmmup->sfmmu_as->a_xhat != NULL)
7727 			xhat_unshare_all(sfmmup->sfmmu_as, addr, len);
7728 	}
7729 
7730 	/*
7731 	 * Make sure that during the entire time ISM mappings are removed,
7732 	 * the trap handlers serialize behind us, and that no one else
7733 	 * can be mucking with ISM mappings.  This also lets us get away
7734 	 * with not doing expensive cross calls to flush the TLB -- we
7735 	 * just discard the context, flush the entire TSB, and call it
7736 	 * a day.
7737 	 */
7738 	sfmmu_ismhat_enter(sfmmup, 0);
7739 
7740 	/*
7741 	 * Remove the mapping.
7742 	 *
7743 	 * We can't have any holes in the ism map.
7744 	 * The tsb miss code while searching the ism map will
7745 	 * stop on an empty map slot.  So we must move
7746 	 * everyone past the hole up 1 if any.
7747 	 *
7748 	 * Also empty ism map blks are not freed until the
7749 	 * process exits. This is to prevent a MT race condition
7750 	 * between sfmmu_unshare() and sfmmu_tsbmiss_exception().
7751 	 */
7752 	found = 0;
7753 	ism_blkp = sfmmup->sfmmu_iblk;
7754 	while (!found && ism_blkp) {
7755 		ism_map = ism_blkp->iblk_maps;
7756 		for (i = 0; i < ISM_MAP_SLOTS; i++) {
7757 			if (addr == ism_start(ism_map[i]) &&
7758 			    sh_size == (size_t)(ism_size(ism_map[i]))) {
7759 				found = 1;
7760 				break;
7761 			}
7762 		}
7763 		if (!found)
7764 			ism_blkp = ism_blkp->iblk_next;
7765 	}
7766 
7767 	if (found) {
7768 		ism_hatid = ism_map[i].imap_ismhat;
7769 		ASSERT(ism_hatid != NULL);
7770 		ASSERT(ism_hatid->sfmmu_ismhat == 1);
7771 		ASSERT(ism_hatid->sfmmu_cnum == INVALID_CONTEXT);
7772 
7773 		/*
7774 		 * First remove ourselves from the ism mapping list.
7775 		 */
7776 		mutex_enter(&ism_mlist_lock);
7777 		iment_sub(ism_map[i].imap_ment, ism_hatid);
7778 		mutex_exit(&ism_mlist_lock);
7779 		free_ment = ism_map[i].imap_ment;
7780 
7781 		/*
7782 		 * Now gurantee that any other cpu
7783 		 * that tries to process an ISM miss
7784 		 * will go to tl=0.
7785 		 */
7786 		hatlockp = sfmmu_hat_enter(sfmmup);
7787 		ctx = sfmmutoctx(sfmmup);
7788 		rw_enter(&ctx->ctx_rwlock, RW_WRITER);
7789 		cnum = sfmmutoctxnum(sfmmup);
7790 
7791 		if (cnum != INVALID_CONTEXT) {
7792 			sfmmu_tlb_swap_ctx(sfmmup, ctx);
7793 		}
7794 		rw_exit(&ctx->ctx_rwlock);
7795 		sfmmu_hat_exit(hatlockp);
7796 
7797 		/*
7798 		 * We delete the ism map by copying
7799 		 * the next map over the current one.
7800 		 * We will take the next one in the maps
7801 		 * array or from the next ism_blk.
7802 		 */
7803 		while (ism_blkp) {
7804 			ism_map = ism_blkp->iblk_maps;
7805 			while (i < (ISM_MAP_SLOTS - 1)) {
7806 				ism_map[i] = ism_map[i + 1];
7807 				i++;
7808 			}
7809 			/* i == (ISM_MAP_SLOTS - 1) */
7810 			ism_blkp = ism_blkp->iblk_next;
7811 			if (ism_blkp) {
7812 				ism_map[i] = ism_blkp->iblk_maps[0];
7813 				i = 0;
7814 			} else {
7815 				ism_map[i].imap_seg = 0;
7816 				ism_map[i].imap_vb_shift = 0;
7817 				ism_map[i].imap_hatflags = 0;
7818 				ism_map[i].imap_sz_mask = 0;
7819 				ism_map[i].imap_ismhat = NULL;
7820 				ism_map[i].imap_ment = NULL;
7821 			}
7822 		}
7823 
7824 		/*
7825 		 * Now flush entire TSB for the process, since
7826 		 * demapping page by page can be too expensive.
7827 		 * We don't have to flush the TLB here anymore
7828 		 * since we switch to a new TLB ctx instead.
7829 		 * Also, there is no need to flush if the process
7830 		 * is exiting since the TSB will be freed later.
7831 		 */
7832 		if (!sfmmup->sfmmu_free) {
7833 			hatlockp = sfmmu_hat_enter(sfmmup);
7834 			for (tsbinfo = sfmmup->sfmmu_tsb; tsbinfo != NULL;
7835 			    tsbinfo = tsbinfo->tsb_next) {
7836 				if (tsbinfo->tsb_flags & TSB_SWAPPED)
7837 					continue;
7838 				sfmmu_inv_tsb(tsbinfo->tsb_va,
7839 				    TSB_BYTES(tsbinfo->tsb_szc));
7840 			}
7841 			sfmmu_hat_exit(hatlockp);
7842 		}
7843 	}
7844 
7845 	/*
7846 	 * Update our counters for this sfmmup's ism mappings.
7847 	 */
7848 	for (i = 0; i <= ismszc; i++) {
7849 		if (!(disable_ism_large_pages & (1 << i)))
7850 			(void) ism_tsb_entries(sfmmup, i);
7851 	}
7852 
7853 	sfmmu_ismhat_exit(sfmmup, 0);
7854 
7855 	/*
7856 	 * We must do our freeing here after dropping locks
7857 	 * to prevent a deadlock in the kmem allocator on the
7858 	 * mapping list lock.
7859 	 */
7860 	if (free_ment != NULL)
7861 		kmem_cache_free(ism_ment_cache, free_ment);
7862 
7863 	/*
7864 	 * Check TSB and TLB page sizes if the process isn't exiting.
7865 	 */
7866 	if (!sfmmup->sfmmu_free)
7867 		sfmmu_check_page_sizes(sfmmup, 0);
7868 }
7869 
7870 /* ARGSUSED */
7871 static int
7872 sfmmu_idcache_constructor(void *buf, void *cdrarg, int kmflags)
7873 {
7874 	/* void *buf is sfmmu_t pointer */
7875 	return (0);
7876 }
7877 
7878 /* ARGSUSED */
7879 static void
7880 sfmmu_idcache_destructor(void *buf, void *cdrarg)
7881 {
7882 	/* void *buf is sfmmu_t pointer */
7883 }
7884 
7885 /*
7886  * setup kmem hmeblks by bzeroing all members and initializing the nextpa
7887  * field to be the pa of this hmeblk
7888  */
7889 /* ARGSUSED */
7890 static int
7891 sfmmu_hblkcache_constructor(void *buf, void *cdrarg, int kmflags)
7892 {
7893 	struct hme_blk *hmeblkp;
7894 
7895 	bzero(buf, (size_t)cdrarg);
7896 	hmeblkp = (struct hme_blk *)buf;
7897 	hmeblkp->hblk_nextpa = va_to_pa((caddr_t)hmeblkp);
7898 
7899 #ifdef	HBLK_TRACE
7900 	mutex_init(&hmeblkp->hblk_audit_lock, NULL, MUTEX_DEFAULT, NULL);
7901 #endif	/* HBLK_TRACE */
7902 
7903 	return (0);
7904 }
7905 
7906 /* ARGSUSED */
7907 static void
7908 sfmmu_hblkcache_destructor(void *buf, void *cdrarg)
7909 {
7910 
7911 #ifdef	HBLK_TRACE
7912 
7913 	struct hme_blk *hmeblkp;
7914 
7915 	hmeblkp = (struct hme_blk *)buf;
7916 	mutex_destroy(&hmeblkp->hblk_audit_lock);
7917 
7918 #endif	/* HBLK_TRACE */
7919 }
7920 
7921 #define	SFMMU_CACHE_RECLAIM_SCAN_RATIO 8
7922 static int sfmmu_cache_reclaim_scan_ratio = SFMMU_CACHE_RECLAIM_SCAN_RATIO;
7923 /*
7924  * The kmem allocator will callback into our reclaim routine when the system
7925  * is running low in memory.  We traverse the hash and free up all unused but
7926  * still cached hme_blks.  We also traverse the free list and free them up
7927  * as well.
7928  */
7929 /*ARGSUSED*/
7930 static void
7931 sfmmu_hblkcache_reclaim(void *cdrarg)
7932 {
7933 	int i;
7934 	uint64_t hblkpa, prevpa, nx_pa;
7935 	struct hmehash_bucket *hmebp;
7936 	struct hme_blk *hmeblkp, *nx_hblk, *pr_hblk = NULL;
7937 	static struct hmehash_bucket *uhmehash_reclaim_hand;
7938 	static struct hmehash_bucket *khmehash_reclaim_hand;
7939 	struct hme_blk *list = NULL;
7940 
7941 	hmebp = uhmehash_reclaim_hand;
7942 	if (hmebp == NULL || hmebp > &uhme_hash[UHMEHASH_SZ])
7943 		uhmehash_reclaim_hand = hmebp = uhme_hash;
7944 	uhmehash_reclaim_hand += UHMEHASH_SZ / sfmmu_cache_reclaim_scan_ratio;
7945 
7946 	for (i = UHMEHASH_SZ / sfmmu_cache_reclaim_scan_ratio; i; i--) {
7947 		if (SFMMU_HASH_LOCK_TRYENTER(hmebp) != 0) {
7948 			hmeblkp = hmebp->hmeblkp;
7949 			hblkpa = hmebp->hmeh_nextpa;
7950 			prevpa = 0;
7951 			pr_hblk = NULL;
7952 			while (hmeblkp) {
7953 				nx_hblk = hmeblkp->hblk_next;
7954 				nx_pa = hmeblkp->hblk_nextpa;
7955 				if (!hmeblkp->hblk_vcnt &&
7956 				    !hmeblkp->hblk_hmecnt) {
7957 					sfmmu_hblk_hash_rm(hmebp, hmeblkp,
7958 						prevpa, pr_hblk);
7959 					sfmmu_hblk_free(hmebp, hmeblkp,
7960 					    hblkpa, &list);
7961 				} else {
7962 					pr_hblk = hmeblkp;
7963 					prevpa = hblkpa;
7964 				}
7965 				hmeblkp = nx_hblk;
7966 				hblkpa = nx_pa;
7967 			}
7968 			SFMMU_HASH_UNLOCK(hmebp);
7969 		}
7970 		if (hmebp++ == &uhme_hash[UHMEHASH_SZ])
7971 			hmebp = uhme_hash;
7972 	}
7973 
7974 	hmebp = khmehash_reclaim_hand;
7975 	if (hmebp == NULL || hmebp > &khme_hash[KHMEHASH_SZ])
7976 		khmehash_reclaim_hand = hmebp = khme_hash;
7977 	khmehash_reclaim_hand += KHMEHASH_SZ / sfmmu_cache_reclaim_scan_ratio;
7978 
7979 	for (i = KHMEHASH_SZ / sfmmu_cache_reclaim_scan_ratio; i; i--) {
7980 		if (SFMMU_HASH_LOCK_TRYENTER(hmebp) != 0) {
7981 			hmeblkp = hmebp->hmeblkp;
7982 			hblkpa = hmebp->hmeh_nextpa;
7983 			prevpa = 0;
7984 			pr_hblk = NULL;
7985 			while (hmeblkp) {
7986 				nx_hblk = hmeblkp->hblk_next;
7987 				nx_pa = hmeblkp->hblk_nextpa;
7988 				if (!hmeblkp->hblk_vcnt &&
7989 				    !hmeblkp->hblk_hmecnt) {
7990 					sfmmu_hblk_hash_rm(hmebp, hmeblkp,
7991 						prevpa, pr_hblk);
7992 					sfmmu_hblk_free(hmebp, hmeblkp,
7993 					    hblkpa, &list);
7994 				} else {
7995 					pr_hblk = hmeblkp;
7996 					prevpa = hblkpa;
7997 				}
7998 				hmeblkp = nx_hblk;
7999 				hblkpa = nx_pa;
8000 			}
8001 			SFMMU_HASH_UNLOCK(hmebp);
8002 		}
8003 		if (hmebp++ == &khme_hash[KHMEHASH_SZ])
8004 			hmebp = khme_hash;
8005 	}
8006 	sfmmu_hblks_list_purge(&list);
8007 }
8008 
8009 /*
8010  * sfmmu_get_ppvcolor should become a vm_machdep or hatop interface.
8011  * same goes for sfmmu_get_addrvcolor().
8012  *
8013  * This function will return the virtual color for the specified page. The
8014  * virtual color corresponds to this page current mapping or its last mapping.
8015  * It is used by memory allocators to choose addresses with the correct
8016  * alignment so vac consistency is automatically maintained.  If the page
8017  * has no color it returns -1.
8018  */
8019 int
8020 sfmmu_get_ppvcolor(struct page *pp)
8021 {
8022 	int color;
8023 
8024 	if (!(cache & CACHE_VAC) || PP_NEWPAGE(pp)) {
8025 		return (-1);
8026 	}
8027 	color = PP_GET_VCOLOR(pp);
8028 	ASSERT(color < mmu_btop(shm_alignment));
8029 	return (color);
8030 }
8031 
8032 /*
8033  * This function will return the desired alignment for vac consistency
8034  * (vac color) given a virtual address.  If no vac is present it returns -1.
8035  */
8036 int
8037 sfmmu_get_addrvcolor(caddr_t vaddr)
8038 {
8039 	if (cache & CACHE_VAC) {
8040 		return (addr_to_vcolor(vaddr));
8041 	} else {
8042 		return (-1);
8043 	}
8044 
8045 }
8046 
8047 /*
8048  * Check for conflicts.
8049  * A conflict exists if the new and existent mappings do not match in
8050  * their "shm_alignment fields. If conflicts exist, the existant mappings
8051  * are flushed unless one of them is locked. If one of them is locked, then
8052  * the mappings are flushed and converted to non-cacheable mappings.
8053  */
8054 static void
8055 sfmmu_vac_conflict(struct hat *hat, caddr_t addr, page_t *pp)
8056 {
8057 	struct hat *tmphat;
8058 	struct sf_hment *sfhmep, *tmphme = NULL;
8059 	struct hme_blk *hmeblkp;
8060 	int vcolor;
8061 	tte_t tte;
8062 
8063 	ASSERT(sfmmu_mlist_held(pp));
8064 	ASSERT(!PP_ISNC(pp));		/* page better be cacheable */
8065 
8066 	vcolor = addr_to_vcolor(addr);
8067 	if (PP_NEWPAGE(pp)) {
8068 		PP_SET_VCOLOR(pp, vcolor);
8069 		return;
8070 	}
8071 
8072 	if (PP_GET_VCOLOR(pp) == vcolor) {
8073 		return;
8074 	}
8075 
8076 	if (!PP_ISMAPPED(pp) && !PP_ISMAPPED_KPM(pp)) {
8077 		/*
8078 		 * Previous user of page had a different color
8079 		 * but since there are no current users
8080 		 * we just flush the cache and change the color.
8081 		 */
8082 		SFMMU_STAT(sf_pgcolor_conflict);
8083 		sfmmu_cache_flush(pp->p_pagenum, PP_GET_VCOLOR(pp));
8084 		PP_SET_VCOLOR(pp, vcolor);
8085 		return;
8086 	}
8087 
8088 	/*
8089 	 * If we get here we have a vac conflict with a current
8090 	 * mapping.  VAC conflict policy is as follows.
8091 	 * - The default is to unload the other mappings unless:
8092 	 * - If we have a large mapping we uncache the page.
8093 	 * We need to uncache the rest of the large page too.
8094 	 * - If any of the mappings are locked we uncache the page.
8095 	 * - If the requested mapping is inconsistent
8096 	 * with another mapping and that mapping
8097 	 * is in the same address space we have to
8098 	 * make it non-cached.  The default thing
8099 	 * to do is unload the inconsistent mapping
8100 	 * but if they are in the same address space
8101 	 * we run the risk of unmapping the pc or the
8102 	 * stack which we will use as we return to the user,
8103 	 * in which case we can then fault on the thing
8104 	 * we just unloaded and get into an infinite loop.
8105 	 */
8106 	if (PP_ISMAPPED_LARGE(pp)) {
8107 		int sz;
8108 
8109 		/*
8110 		 * Existing mapping is for big pages. We don't unload
8111 		 * existing big mappings to satisfy new mappings.
8112 		 * Always convert all mappings to TNC.
8113 		 */
8114 		sz = fnd_mapping_sz(pp);
8115 		pp = PP_GROUPLEADER(pp, sz);
8116 		SFMMU_STAT_ADD(sf_uncache_conflict, TTEPAGES(sz));
8117 		sfmmu_page_cache_array(pp, HAT_TMPNC, CACHE_FLUSH,
8118 			TTEPAGES(sz));
8119 
8120 		return;
8121 	}
8122 
8123 	/*
8124 	 * check if any mapping is in same as or if it is locked
8125 	 * since in that case we need to uncache.
8126 	 */
8127 	for (sfhmep = pp->p_mapping; sfhmep; sfhmep = tmphme) {
8128 		tmphme = sfhmep->hme_next;
8129 		hmeblkp = sfmmu_hmetohblk(sfhmep);
8130 		if (hmeblkp->hblk_xhat_bit)
8131 			continue;
8132 		tmphat = hblktosfmmu(hmeblkp);
8133 		sfmmu_copytte(&sfhmep->hme_tte, &tte);
8134 		ASSERT(TTE_IS_VALID(&tte));
8135 		if ((tmphat == hat) || hmeblkp->hblk_lckcnt) {
8136 			/*
8137 			 * We have an uncache conflict
8138 			 */
8139 			SFMMU_STAT(sf_uncache_conflict);
8140 			sfmmu_page_cache_array(pp, HAT_TMPNC, CACHE_FLUSH, 1);
8141 			return;
8142 		}
8143 	}
8144 
8145 	/*
8146 	 * We have an unload conflict
8147 	 * We have already checked for LARGE mappings, therefore
8148 	 * the remaining mapping(s) must be TTE8K.
8149 	 */
8150 	SFMMU_STAT(sf_unload_conflict);
8151 
8152 	for (sfhmep = pp->p_mapping; sfhmep; sfhmep = tmphme) {
8153 		tmphme = sfhmep->hme_next;
8154 		hmeblkp = sfmmu_hmetohblk(sfhmep);
8155 		if (hmeblkp->hblk_xhat_bit)
8156 			continue;
8157 		(void) sfmmu_pageunload(pp, sfhmep, TTE8K);
8158 	}
8159 
8160 	if (PP_ISMAPPED_KPM(pp))
8161 		sfmmu_kpm_vac_unload(pp, addr);
8162 
8163 	/*
8164 	 * Unloads only do TLB flushes so we need to flush the
8165 	 * cache here.
8166 	 */
8167 	sfmmu_cache_flush(pp->p_pagenum, PP_GET_VCOLOR(pp));
8168 	PP_SET_VCOLOR(pp, vcolor);
8169 }
8170 
8171 /*
8172  * Whenever a mapping is unloaded and the page is in TNC state,
8173  * we see if the page can be made cacheable again. 'pp' is
8174  * the page that we just unloaded a mapping from, the size
8175  * of mapping that was unloaded is 'ottesz'.
8176  * Remark:
8177  * The recache policy for mpss pages can leave a performance problem
8178  * under the following circumstances:
8179  * . A large page in uncached mode has just been unmapped.
8180  * . All constituent pages are TNC due to a conflicting small mapping.
8181  * . There are many other, non conflicting, small mappings around for
8182  *   a lot of the constituent pages.
8183  * . We're called w/ the "old" groupleader page and the old ottesz,
8184  *   but this is irrelevant, since we're no more "PP_ISMAPPED_LARGE", so
8185  *   we end up w/ TTE8K or npages == 1.
8186  * . We call tst_tnc w/ the old groupleader only, and if there is no
8187  *   conflict, we re-cache only this page.
8188  * . All other small mappings are not checked and will be left in TNC mode.
8189  * The problem is not very serious because:
8190  * . mpss is actually only defined for heap and stack, so the probability
8191  *   is not very high that a large page mapping exists in parallel to a small
8192  *   one (this is possible, but seems to be bad programming style in the
8193  *   appl).
8194  * . The problem gets a little bit more serious, when those TNC pages
8195  *   have to be mapped into kernel space, e.g. for networking.
8196  * . When VAC alias conflicts occur in applications, this is regarded
8197  *   as an application bug. So if kstat's show them, the appl should
8198  *   be changed anyway.
8199  */
8200 static void
8201 conv_tnc(page_t *pp, int ottesz)
8202 {
8203 	int cursz, dosz;
8204 	pgcnt_t curnpgs, dopgs;
8205 	pgcnt_t pg64k;
8206 	page_t *pp2;
8207 
8208 	/*
8209 	 * Determine how big a range we check for TNC and find
8210 	 * leader page. cursz is the size of the biggest
8211 	 * mapping that still exist on 'pp'.
8212 	 */
8213 	if (PP_ISMAPPED_LARGE(pp)) {
8214 		cursz = fnd_mapping_sz(pp);
8215 	} else {
8216 		cursz = TTE8K;
8217 	}
8218 
8219 	if (ottesz >= cursz) {
8220 		dosz = ottesz;
8221 		pp2 = pp;
8222 	} else {
8223 		dosz = cursz;
8224 		pp2 = PP_GROUPLEADER(pp, dosz);
8225 	}
8226 
8227 	pg64k = TTEPAGES(TTE64K);
8228 	dopgs = TTEPAGES(dosz);
8229 
8230 	ASSERT(dopgs == 1 || ((dopgs & (pg64k - 1)) == 0));
8231 
8232 	while (dopgs != 0) {
8233 		curnpgs = TTEPAGES(cursz);
8234 		if (tst_tnc(pp2, curnpgs)) {
8235 			SFMMU_STAT_ADD(sf_recache, curnpgs);
8236 			sfmmu_page_cache_array(pp2, HAT_CACHE, CACHE_NO_FLUSH,
8237 				curnpgs);
8238 		}
8239 
8240 		ASSERT(dopgs >= curnpgs);
8241 		dopgs -= curnpgs;
8242 
8243 		if (dopgs == 0) {
8244 			break;
8245 		}
8246 
8247 		pp2 = PP_PAGENEXT_N(pp2, curnpgs);
8248 		if (((dopgs & (pg64k - 1)) == 0) && PP_ISMAPPED_LARGE(pp2)) {
8249 			cursz = fnd_mapping_sz(pp2);
8250 		} else {
8251 			cursz = TTE8K;
8252 		}
8253 	}
8254 }
8255 
8256 /*
8257  * Returns 1 if page(s) can be converted from TNC to cacheable setting,
8258  * returns 0 otherwise. Note that oaddr argument is valid for only
8259  * 8k pages.
8260  */
8261 static int
8262 tst_tnc(page_t *pp, pgcnt_t npages)
8263 {
8264 	struct	sf_hment *sfhme;
8265 	struct	hme_blk *hmeblkp;
8266 	tte_t	tte;
8267 	caddr_t	vaddr;
8268 	int	clr_valid = 0;
8269 	int 	color, color1, bcolor;
8270 	int	i, ncolors;
8271 
8272 	ASSERT(pp != NULL);
8273 	ASSERT(!(cache & CACHE_WRITEBACK));
8274 
8275 	if (npages > 1) {
8276 		ncolors = CACHE_NUM_COLOR;
8277 	}
8278 
8279 	for (i = 0; i < npages; i++) {
8280 		ASSERT(sfmmu_mlist_held(pp));
8281 		ASSERT(PP_ISTNC(pp));
8282 		ASSERT(PP_GET_VCOLOR(pp) == NO_VCOLOR);
8283 
8284 		if (PP_ISPNC(pp)) {
8285 			return (0);
8286 		}
8287 
8288 		clr_valid = 0;
8289 		if (PP_ISMAPPED_KPM(pp)) {
8290 			caddr_t kpmvaddr;
8291 
8292 			ASSERT(kpm_enable);
8293 			kpmvaddr = hat_kpm_page2va(pp, 1);
8294 			ASSERT(!(npages > 1 && IS_KPM_ALIAS_RANGE(kpmvaddr)));
8295 			color1 = addr_to_vcolor(kpmvaddr);
8296 			clr_valid = 1;
8297 		}
8298 
8299 		for (sfhme = pp->p_mapping; sfhme; sfhme = sfhme->hme_next) {
8300 			hmeblkp = sfmmu_hmetohblk(sfhme);
8301 			if (hmeblkp->hblk_xhat_bit)
8302 				continue;
8303 
8304 			sfmmu_copytte(&sfhme->hme_tte, &tte);
8305 			ASSERT(TTE_IS_VALID(&tte));
8306 
8307 			vaddr = tte_to_vaddr(hmeblkp, tte);
8308 			color = addr_to_vcolor(vaddr);
8309 
8310 			if (npages > 1) {
8311 				/*
8312 				 * If there is a big mapping, make sure
8313 				 * 8K mapping is consistent with the big
8314 				 * mapping.
8315 				 */
8316 				bcolor = i % ncolors;
8317 				if (color != bcolor) {
8318 					return (0);
8319 				}
8320 			}
8321 			if (!clr_valid) {
8322 				clr_valid = 1;
8323 				color1 = color;
8324 			}
8325 
8326 			if (color1 != color) {
8327 				return (0);
8328 			}
8329 		}
8330 
8331 		pp = PP_PAGENEXT(pp);
8332 	}
8333 
8334 	return (1);
8335 }
8336 
8337 static void
8338 sfmmu_page_cache_array(page_t *pp, int flags, int cache_flush_flag,
8339 	pgcnt_t npages)
8340 {
8341 	kmutex_t *pmtx;
8342 	int i, ncolors, bcolor;
8343 	kpm_hlk_t *kpmp;
8344 	cpuset_t cpuset;
8345 
8346 	ASSERT(pp != NULL);
8347 	ASSERT(!(cache & CACHE_WRITEBACK));
8348 
8349 	kpmp = sfmmu_kpm_kpmp_enter(pp, npages);
8350 	pmtx = sfmmu_page_enter(pp);
8351 
8352 	/*
8353 	 * Fast path caching single unmapped page
8354 	 */
8355 	if (npages == 1 && !PP_ISMAPPED(pp) && !PP_ISMAPPED_KPM(pp) &&
8356 	    flags == HAT_CACHE) {
8357 		PP_CLRTNC(pp);
8358 		PP_CLRPNC(pp);
8359 		sfmmu_page_exit(pmtx);
8360 		sfmmu_kpm_kpmp_exit(kpmp);
8361 		return;
8362 	}
8363 
8364 	/*
8365 	 * We need to capture all cpus in order to change cacheability
8366 	 * because we can't allow one cpu to access the same physical
8367 	 * page using a cacheable and a non-cachebale mapping at the same
8368 	 * time. Since we may end up walking the ism mapping list
8369 	 * have to grab it's lock now since we can't after all the
8370 	 * cpus have been captured.
8371 	 */
8372 	sfmmu_hat_lock_all();
8373 	mutex_enter(&ism_mlist_lock);
8374 	kpreempt_disable();
8375 	cpuset = cpu_ready_set;
8376 	xc_attention(cpuset);
8377 
8378 	if (npages > 1) {
8379 		/*
8380 		 * Make sure all colors are flushed since the
8381 		 * sfmmu_page_cache() only flushes one color-
8382 		 * it does not know big pages.
8383 		 */
8384 		ncolors = CACHE_NUM_COLOR;
8385 		if (flags & HAT_TMPNC) {
8386 			for (i = 0; i < ncolors; i++) {
8387 				sfmmu_cache_flushcolor(i, pp->p_pagenum);
8388 			}
8389 			cache_flush_flag = CACHE_NO_FLUSH;
8390 		}
8391 	}
8392 
8393 	for (i = 0; i < npages; i++) {
8394 
8395 		ASSERT(sfmmu_mlist_held(pp));
8396 
8397 		if (!(flags == HAT_TMPNC && PP_ISTNC(pp))) {
8398 
8399 			if (npages > 1) {
8400 				bcolor = i % ncolors;
8401 			} else {
8402 				bcolor = NO_VCOLOR;
8403 			}
8404 
8405 			sfmmu_page_cache(pp, flags, cache_flush_flag,
8406 			    bcolor);
8407 		}
8408 
8409 		pp = PP_PAGENEXT(pp);
8410 	}
8411 
8412 	xt_sync(cpuset);
8413 	xc_dismissed(cpuset);
8414 	mutex_exit(&ism_mlist_lock);
8415 	sfmmu_hat_unlock_all();
8416 	sfmmu_page_exit(pmtx);
8417 	sfmmu_kpm_kpmp_exit(kpmp);
8418 	kpreempt_enable();
8419 }
8420 
8421 /*
8422  * This function changes the virtual cacheability of all mappings to a
8423  * particular page.  When changing from uncache to cacheable the mappings will
8424  * only be changed if all of them have the same virtual color.
8425  * We need to flush the cache in all cpus.  It is possible that
8426  * a process referenced a page as cacheable but has sinced exited
8427  * and cleared the mapping list.  We still to flush it but have no
8428  * state so all cpus is the only alternative.
8429  */
8430 static void
8431 sfmmu_page_cache(page_t *pp, int flags, int cache_flush_flag, int bcolor)
8432 {
8433 	struct	sf_hment *sfhme;
8434 	struct	hme_blk *hmeblkp;
8435 	sfmmu_t *sfmmup;
8436 	tte_t	tte, ttemod;
8437 	caddr_t	vaddr;
8438 	int	ret, color;
8439 	pfn_t	pfn;
8440 
8441 	color = bcolor;
8442 	pfn = pp->p_pagenum;
8443 
8444 	for (sfhme = pp->p_mapping; sfhme; sfhme = sfhme->hme_next) {
8445 
8446 		hmeblkp = sfmmu_hmetohblk(sfhme);
8447 
8448 		if (hmeblkp->hblk_xhat_bit)
8449 			continue;
8450 
8451 		sfmmu_copytte(&sfhme->hme_tte, &tte);
8452 		ASSERT(TTE_IS_VALID(&tte));
8453 		vaddr = tte_to_vaddr(hmeblkp, tte);
8454 		color = addr_to_vcolor(vaddr);
8455 
8456 #ifdef DEBUG
8457 		if ((flags & HAT_CACHE) && bcolor != NO_VCOLOR) {
8458 			ASSERT(color == bcolor);
8459 		}
8460 #endif
8461 
8462 		ASSERT(flags != HAT_TMPNC || color == PP_GET_VCOLOR(pp));
8463 
8464 		ttemod = tte;
8465 		if (flags & (HAT_UNCACHE | HAT_TMPNC)) {
8466 			TTE_CLR_VCACHEABLE(&ttemod);
8467 		} else {	/* flags & HAT_CACHE */
8468 			TTE_SET_VCACHEABLE(&ttemod);
8469 		}
8470 		ret = sfmmu_modifytte_try(&tte, &ttemod, &sfhme->hme_tte);
8471 		if (ret < 0) {
8472 			/*
8473 			 * Since all cpus are captured modifytte should not
8474 			 * fail.
8475 			 */
8476 			panic("sfmmu_page_cache: write to tte failed");
8477 		}
8478 
8479 		sfmmup = hblktosfmmu(hmeblkp);
8480 		if (cache_flush_flag == CACHE_FLUSH) {
8481 			/*
8482 			 * Flush TSBs, TLBs and caches
8483 			 */
8484 			if (sfmmup->sfmmu_ismhat) {
8485 				if (flags & HAT_CACHE) {
8486 					SFMMU_STAT(sf_ism_recache);
8487 				} else {
8488 					SFMMU_STAT(sf_ism_uncache);
8489 				}
8490 				sfmmu_ismtlbcache_demap(vaddr, sfmmup, hmeblkp,
8491 				    pfn, CACHE_FLUSH);
8492 			} else {
8493 				sfmmu_tlbcache_demap(vaddr, sfmmup, hmeblkp,
8494 				    pfn, 0, FLUSH_ALL_CPUS, CACHE_FLUSH, 1);
8495 			}
8496 
8497 			/*
8498 			 * all cache entries belonging to this pfn are
8499 			 * now flushed.
8500 			 */
8501 			cache_flush_flag = CACHE_NO_FLUSH;
8502 		} else {
8503 
8504 			/*
8505 			 * Flush only TSBs and TLBs.
8506 			 */
8507 			if (sfmmup->sfmmu_ismhat) {
8508 				if (flags & HAT_CACHE) {
8509 					SFMMU_STAT(sf_ism_recache);
8510 				} else {
8511 					SFMMU_STAT(sf_ism_uncache);
8512 				}
8513 				sfmmu_ismtlbcache_demap(vaddr, sfmmup, hmeblkp,
8514 				    pfn, CACHE_NO_FLUSH);
8515 			} else {
8516 				sfmmu_tlb_demap(vaddr, sfmmup, hmeblkp, 0, 1);
8517 			}
8518 		}
8519 	}
8520 
8521 	if (PP_ISMAPPED_KPM(pp))
8522 		sfmmu_kpm_page_cache(pp, flags, cache_flush_flag);
8523 
8524 	switch (flags) {
8525 
8526 		default:
8527 			panic("sfmmu_pagecache: unknown flags");
8528 			break;
8529 
8530 		case HAT_CACHE:
8531 			PP_CLRTNC(pp);
8532 			PP_CLRPNC(pp);
8533 			PP_SET_VCOLOR(pp, color);
8534 			break;
8535 
8536 		case HAT_TMPNC:
8537 			PP_SETTNC(pp);
8538 			PP_SET_VCOLOR(pp, NO_VCOLOR);
8539 			break;
8540 
8541 		case HAT_UNCACHE:
8542 			PP_SETPNC(pp);
8543 			PP_CLRTNC(pp);
8544 			PP_SET_VCOLOR(pp, NO_VCOLOR);
8545 			break;
8546 	}
8547 }
8548 
8549 /*
8550  * This routine gets called when the system has run out of free contexts.
8551  * This will simply choose context passed to it to be stolen and reused.
8552  */
8553 /* ARGSUSED */
8554 static void
8555 sfmmu_reuse_ctx(struct ctx *ctx, sfmmu_t *sfmmup)
8556 {
8557 	sfmmu_t *stolen_sfmmup;
8558 	cpuset_t cpuset;
8559 	ushort_t	cnum = ctxtoctxnum(ctx);
8560 
8561 	ASSERT(cnum != KCONTEXT);
8562 	ASSERT(rw_read_locked(&ctx->ctx_rwlock) == 0);	/* write locked */
8563 
8564 	/*
8565 	 * simply steal and reuse the ctx passed to us.
8566 	 */
8567 	stolen_sfmmup = ctx->ctx_sfmmu;
8568 	ASSERT(sfmmu_hat_lock_held(sfmmup));
8569 	ASSERT(stolen_sfmmup->sfmmu_cnum == cnum);
8570 	ASSERT(stolen_sfmmup != ksfmmup);
8571 
8572 	TRACE_CTXS(&ctx_trace_mutex, ctx_trace_ptr, cnum, stolen_sfmmup,
8573 	    sfmmup, CTX_TRC_STEAL);
8574 	SFMMU_STAT(sf_ctxsteal);
8575 
8576 	/*
8577 	 * Update sfmmu and ctx structs. After this point all threads
8578 	 * belonging to this hat/proc will fault and not use the ctx
8579 	 * being stolen.
8580 	 */
8581 	kpreempt_disable();
8582 	/*
8583 	 * Enforce reverse order of assignments from sfmmu_get_ctx().  This
8584 	 * is done to prevent a race where a thread faults with the context
8585 	 * but the TSB has changed.
8586 	 */
8587 	stolen_sfmmup->sfmmu_cnum = INVALID_CONTEXT;
8588 	membar_enter();
8589 	ctx->ctx_sfmmu = NULL;
8590 
8591 	/*
8592 	 * 1. flush TLB in all CPUs that ran the process whose ctx
8593 	 * we are stealing.
8594 	 * 2. change context for all other CPUs to INVALID_CONTEXT,
8595 	 * if they are running in the context that we are going to steal.
8596 	 */
8597 	cpuset = stolen_sfmmup->sfmmu_cpusran;
8598 	CPUSET_DEL(cpuset, CPU->cpu_id);
8599 	CPUSET_AND(cpuset, cpu_ready_set);
8600 	SFMMU_XCALL_STATS(cnum);
8601 	xt_some(cpuset, sfmmu_ctx_steal_tl1, cnum, INVALID_CONTEXT);
8602 	xt_sync(cpuset);
8603 
8604 	/*
8605 	 * flush TLB of local processor
8606 	 */
8607 	vtag_flushctx(cnum);
8608 
8609 	/*
8610 	 * If we just stole the ctx from the current process
8611 	 * on local cpu then we also invalidate his context
8612 	 * here.
8613 	 */
8614 	if (sfmmu_getctx_sec() == cnum) {
8615 		sfmmu_setctx_sec(INVALID_CONTEXT);
8616 		sfmmu_clear_utsbinfo();
8617 	}
8618 
8619 	kpreempt_enable();
8620 	SFMMU_STAT(sf_tlbflush_ctx);
8621 }
8622 
8623 /*
8624  * Returns a context with the reader lock held.
8625  *
8626  * We maintain 2 different list of contexts.  The first list
8627  * is the free list and it is headed by ctxfree.  These contexts
8628  * are ready to use.  The second list is the dirty list and is
8629  * headed by ctxdirty. These contexts have been freed but haven't
8630  * been flushed from the TLB.
8631  *
8632  * It's the responsibility of the caller to guarantee that the
8633  * process serializes on calls here by taking the HAT lock for
8634  * the hat.
8635  *
8636  * Changing the page size is a rather complicated process, so
8637  * rather than jump through lots of hoops to special case it,
8638  * the easiest way to go about it is to tell the MMU we want
8639  * to change page sizes and then switch to using a different
8640  * context.  When we program the context registers for the
8641  * process, we can take care of setting up the (new) page size
8642  * for that context at that point.
8643  */
8644 
8645 static struct ctx *
8646 sfmmu_get_ctx(sfmmu_t *sfmmup)
8647 {
8648 	struct ctx *ctx;
8649 	ushort_t cnum;
8650 	struct ctx *lastctx = &ctxs[nctxs-1];
8651 	struct ctx *firstctx = &ctxs[NUM_LOCKED_CTXS];
8652 	uint_t	found_stealable_ctx;
8653 	uint_t	retry_count = 0;
8654 
8655 #define	NEXT_CTX(ctx)   (((ctx) >= lastctx) ? firstctx : ((ctx) + 1))
8656 
8657 retry:
8658 
8659 	ASSERT(sfmmup->sfmmu_cnum != KCONTEXT);
8660 	/*
8661 	 * Check to see if this process has already got a ctx.
8662 	 * In that case just set the sec-ctx, grab a readers lock, and
8663 	 * return.
8664 	 *
8665 	 * We have to double check after we get the readers lock on the
8666 	 * context, since it could be stolen in this short window.
8667 	 */
8668 	if (sfmmup->sfmmu_cnum >= NUM_LOCKED_CTXS) {
8669 		ctx = sfmmutoctx(sfmmup);
8670 		rw_enter(&ctx->ctx_rwlock, RW_READER);
8671 		if (ctx->ctx_sfmmu == sfmmup) {
8672 			return (ctx);
8673 		} else {
8674 			rw_exit(&ctx->ctx_rwlock);
8675 		}
8676 	}
8677 
8678 	found_stealable_ctx = 0;
8679 	mutex_enter(&ctx_list_lock);
8680 	if ((ctx = ctxfree) != NULL) {
8681 		/*
8682 		 * Found a ctx in free list. Delete it from the list and
8683 		 * use it.  There's a short window where the stealer can
8684 		 * look at the context before we grab the lock on the
8685 		 * context, so we have to handle that with the free flag.
8686 		 */
8687 		SFMMU_STAT(sf_ctxfree);
8688 		ctxfree = ctx->ctx_free;
8689 		ctx->ctx_sfmmu = NULL;
8690 		mutex_exit(&ctx_list_lock);
8691 		rw_enter(&ctx->ctx_rwlock, RW_WRITER);
8692 		ASSERT(ctx->ctx_sfmmu == NULL);
8693 		ASSERT((ctx->ctx_flags & CTX_FREE_FLAG) != 0);
8694 	} else if ((ctx = ctxdirty) != NULL) {
8695 		/*
8696 		 * No free contexts.  If we have at least one dirty ctx
8697 		 * then flush the TLBs on all cpus if necessary and move
8698 		 * the dirty list to the free list.
8699 		 */
8700 		SFMMU_STAT(sf_ctxdirty);
8701 		ctxdirty = NULL;
8702 		if (delay_tlb_flush)
8703 			sfmmu_tlb_all_demap();
8704 		ctxfree = ctx->ctx_free;
8705 		ctx->ctx_sfmmu = NULL;
8706 		mutex_exit(&ctx_list_lock);
8707 		rw_enter(&ctx->ctx_rwlock, RW_WRITER);
8708 		ASSERT(ctx->ctx_sfmmu == NULL);
8709 		ASSERT((ctx->ctx_flags & CTX_FREE_FLAG) != 0);
8710 	} else {
8711 		/*
8712 		 * No free context available, so steal one.
8713 		 *
8714 		 * The policy to choose the appropriate context is simple;
8715 		 * just sweep all the ctxs using ctxhand. This will steal
8716 		 * the LRU ctx.
8717 		 *
8718 		 * We however only steal a non-free context that can be
8719 		 * write locked.  Keep searching till we find a stealable
8720 		 * ctx.
8721 		 */
8722 		mutex_exit(&ctx_list_lock);
8723 		ctx = ctxhand;
8724 		do {
8725 			/*
8726 			 * If you get the writers lock, and the ctx isn't
8727 			 * a free ctx, THEN you can steal this ctx.
8728 			 */
8729 			if ((ctx->ctx_flags & CTX_FREE_FLAG) == 0 &&
8730 			    rw_tryenter(&ctx->ctx_rwlock, RW_WRITER) != 0) {
8731 				if (ctx->ctx_flags & CTX_FREE_FLAG) {
8732 					/* let the first guy have it */
8733 					rw_exit(&ctx->ctx_rwlock);
8734 				} else {
8735 					found_stealable_ctx = 1;
8736 					break;
8737 				}
8738 			}
8739 			ctx = NEXT_CTX(ctx);
8740 		} while (ctx != ctxhand);
8741 
8742 		if (found_stealable_ctx) {
8743 			/*
8744 			 * Try and reuse the ctx.
8745 			 */
8746 			sfmmu_reuse_ctx(ctx, sfmmup);
8747 
8748 		} else if (retry_count++ < GET_CTX_RETRY_CNT) {
8749 			goto retry;
8750 
8751 		} else {
8752 			panic("Can't find any stealable context");
8753 		}
8754 	}
8755 
8756 	ASSERT(rw_read_locked(&ctx->ctx_rwlock) == 0);	/* write locked */
8757 	ctx->ctx_sfmmu = sfmmup;
8758 
8759 	/*
8760 	 * Clear the ctx_flags field.
8761 	 */
8762 	ctx->ctx_flags = 0;
8763 
8764 	cnum = ctxtoctxnum(ctx);
8765 	membar_exit();
8766 	sfmmup->sfmmu_cnum = cnum;
8767 
8768 	/*
8769 	 * Let the MMU set up the page sizes to use for
8770 	 * this context in the TLB. Don't program 2nd dtlb for ism hat.
8771 	 */
8772 	if ((&mmu_set_ctx_page_sizes) && (sfmmup->sfmmu_ismhat == 0))
8773 		mmu_set_ctx_page_sizes(sfmmup);
8774 
8775 	/*
8776 	 * Downgrade to reader's lock.
8777 	 */
8778 	rw_downgrade(&ctx->ctx_rwlock);
8779 
8780 	/*
8781 	 * If this value doesn't get set to what we want
8782 	 * it won't matter, so don't worry about locking.
8783 	 */
8784 	ctxhand = NEXT_CTX(ctx);
8785 
8786 	/*
8787 	 * Better not have been stolen while we held the ctx'
8788 	 * lock or we're hosed.
8789 	 */
8790 	ASSERT(sfmmup == sfmmutoctx(sfmmup)->ctx_sfmmu);
8791 
8792 	return (ctx);
8793 
8794 #undef NEXT_CTX
8795 }
8796 
8797 
8798 /*
8799  * Set the process context to INVALID_CONTEXT (but
8800  * without stealing the ctx) so that it faults and
8801  * reloads the MMU state from TL=0.  Caller must
8802  * hold the hat lock since we don't acquire it here.
8803  */
8804 static void
8805 sfmmu_sync_mmustate(sfmmu_t *sfmmup)
8806 {
8807 	int cnum;
8808 	cpuset_t cpuset;
8809 
8810 	ASSERT(sfmmup != ksfmmup);
8811 	ASSERT(sfmmu_hat_lock_held(sfmmup));
8812 
8813 	kpreempt_disable();
8814 
8815 	cnum = sfmmutoctxnum(sfmmup);
8816 	if (cnum != INVALID_CONTEXT) {
8817 		cpuset = sfmmup->sfmmu_cpusran;
8818 		CPUSET_DEL(cpuset, CPU->cpu_id);
8819 		CPUSET_AND(cpuset, cpu_ready_set);
8820 		SFMMU_XCALL_STATS(cnum);
8821 
8822 		xt_some(cpuset, sfmmu_raise_tsb_exception,
8823 		    cnum, INVALID_CONTEXT);
8824 		xt_sync(cpuset);
8825 
8826 		/*
8827 		 * If the process is running on the local CPU
8828 		 * we need to update the MMU state here as well.
8829 		 */
8830 		if (sfmmu_getctx_sec() == cnum)
8831 			sfmmu_load_mmustate(sfmmup);
8832 
8833 		SFMMU_STAT(sf_tsb_raise_exception);
8834 	}
8835 
8836 	kpreempt_enable();
8837 }
8838 
8839 
8840 /*
8841  * Replace the specified TSB with a new TSB.  This function gets called when
8842  * we grow, shrink or swapin a TSB.  When swapping in a TSB (TSB_SWAPIN), the
8843  * TSB_FORCEALLOC flag may be used to force allocation of a minimum-sized TSB
8844  * (8K).
8845  *
8846  * Caller must hold the HAT lock, but should assume any tsb_info
8847  * pointers it has are no longer valid after calling this function.
8848  *
8849  * Return values:
8850  *	TSB_ALLOCFAIL	Failed to allocate a TSB, due to memory constraints
8851  *	TSB_LOSTRACE	HAT is busy, i.e. another thread is already doing
8852  *			something to this tsbinfo/TSB
8853  *	TSB_SUCCESS	Operation succeeded
8854  */
8855 static tsb_replace_rc_t
8856 sfmmu_replace_tsb(sfmmu_t *sfmmup, struct tsb_info *old_tsbinfo, uint_t szc,
8857     hatlock_t *hatlockp, uint_t flags)
8858 {
8859 	struct tsb_info *new_tsbinfo = NULL;
8860 	struct tsb_info *curtsb, *prevtsb;
8861 	uint_t tte_sz_mask;
8862 	cpuset_t cpuset;
8863 	struct ctx *ctx = NULL;
8864 	int ctxnum;
8865 
8866 	ASSERT(sfmmup != ksfmmup);
8867 	ASSERT(sfmmup->sfmmu_ismhat == 0);
8868 	ASSERT(sfmmu_hat_lock_held(sfmmup));
8869 	ASSERT(szc <= tsb_max_growsize);
8870 
8871 	if (SFMMU_FLAGS_ISSET(sfmmup, HAT_BUSY))
8872 		return (TSB_LOSTRACE);
8873 
8874 	/*
8875 	 * Find the tsb_info ahead of this one in the list, and
8876 	 * also make sure that the tsb_info passed in really
8877 	 * exists!
8878 	 */
8879 	for (prevtsb = NULL, curtsb = sfmmup->sfmmu_tsb;
8880 	    curtsb != old_tsbinfo && curtsb != NULL;
8881 	    prevtsb = curtsb, curtsb = curtsb->tsb_next);
8882 	ASSERT(curtsb != NULL);
8883 
8884 	if (!(flags & TSB_SWAPIN) && SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) {
8885 		/*
8886 		 * The process is swapped out, so just set the new size
8887 		 * code.  When it swaps back in, we'll allocate a new one
8888 		 * of the new chosen size.
8889 		 */
8890 		curtsb->tsb_szc = szc;
8891 		return (TSB_SUCCESS);
8892 	}
8893 	SFMMU_FLAGS_SET(sfmmup, HAT_BUSY);
8894 
8895 	tte_sz_mask = old_tsbinfo->tsb_ttesz_mask;
8896 
8897 	/*
8898 	 * All initialization is done inside of sfmmu_tsbinfo_alloc().
8899 	 * If we fail to allocate a TSB, exit.
8900 	 */
8901 	sfmmu_hat_exit(hatlockp);
8902 	if (sfmmu_tsbinfo_alloc(&new_tsbinfo, szc, tte_sz_mask,
8903 	    flags, sfmmup)) {
8904 		(void) sfmmu_hat_enter(sfmmup);
8905 		if (!(flags & TSB_SWAPIN))
8906 			SFMMU_STAT(sf_tsb_resize_failures);
8907 		SFMMU_FLAGS_CLEAR(sfmmup, HAT_BUSY);
8908 		return (TSB_ALLOCFAIL);
8909 	}
8910 	(void) sfmmu_hat_enter(sfmmup);
8911 
8912 	/*
8913 	 * Re-check to make sure somebody else didn't muck with us while we
8914 	 * didn't hold the HAT lock.  If the process swapped out, fine, just
8915 	 * exit; this can happen if we try to shrink the TSB from the context
8916 	 * of another process (such as on an ISM unmap), though it is rare.
8917 	 */
8918 	if (!(flags & TSB_SWAPIN) && SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) {
8919 		SFMMU_STAT(sf_tsb_resize_failures);
8920 		SFMMU_FLAGS_CLEAR(sfmmup, HAT_BUSY);
8921 		sfmmu_hat_exit(hatlockp);
8922 		sfmmu_tsbinfo_free(new_tsbinfo);
8923 		(void) sfmmu_hat_enter(sfmmup);
8924 		return (TSB_LOSTRACE);
8925 	}
8926 
8927 #ifdef	DEBUG
8928 	/* Reverify that the tsb_info still exists.. for debugging only */
8929 	for (prevtsb = NULL, curtsb = sfmmup->sfmmu_tsb;
8930 	    curtsb != old_tsbinfo && curtsb != NULL;
8931 	    prevtsb = curtsb, curtsb = curtsb->tsb_next);
8932 	ASSERT(curtsb != NULL);
8933 #endif	/* DEBUG */
8934 
8935 	/*
8936 	 * Quiesce any CPUs running this process on their next TLB miss
8937 	 * so they atomically see the new tsb_info.  We temporarily set the
8938 	 * context to invalid context so new threads that come on processor
8939 	 * after we do the xcall to cpusran will also serialize behind the
8940 	 * HAT lock on TLB miss and will see the new TSB.  Since this short
8941 	 * race with a new thread coming on processor is relatively rare,
8942 	 * this synchronization mechanism should be cheaper than always
8943 	 * pausing all CPUs for the duration of the setup, which is what
8944 	 * the old implementation did.  This is particuarly true if we are
8945 	 * copying a huge chunk of memory around during that window.
8946 	 *
8947 	 * The memory barriers are to make sure things stay consistent
8948 	 * with resume() since it does not hold the HAT lock while
8949 	 * walking the list of tsb_info structures.
8950 	 */
8951 	if ((flags & TSB_SWAPIN) != TSB_SWAPIN) {
8952 		/* The TSB is either growing or shrinking. */
8953 		ctx = sfmmutoctx(sfmmup);
8954 		rw_enter(&ctx->ctx_rwlock, RW_WRITER);
8955 
8956 		ctxnum = sfmmutoctxnum(sfmmup);
8957 		sfmmup->sfmmu_cnum = INVALID_CONTEXT;
8958 		membar_enter();	/* make sure visible on all CPUs */
8959 
8960 		kpreempt_disable();
8961 		if (ctxnum != INVALID_CONTEXT) {
8962 			cpuset = sfmmup->sfmmu_cpusran;
8963 			CPUSET_DEL(cpuset, CPU->cpu_id);
8964 			CPUSET_AND(cpuset, cpu_ready_set);
8965 			SFMMU_XCALL_STATS(ctxnum);
8966 
8967 			xt_some(cpuset, sfmmu_raise_tsb_exception,
8968 			    ctxnum, INVALID_CONTEXT);
8969 			xt_sync(cpuset);
8970 
8971 			SFMMU_STAT(sf_tsb_raise_exception);
8972 		}
8973 		kpreempt_enable();
8974 	} else {
8975 		/*
8976 		 * It is illegal to swap in TSBs from a process other
8977 		 * than a process being swapped in.  This in turn
8978 		 * implies we do not have a valid MMU context here
8979 		 * since a process needs one to resolve translation
8980 		 * misses.
8981 		 */
8982 		ASSERT(curthread->t_procp->p_as->a_hat == sfmmup);
8983 		ASSERT(sfmmutoctxnum(sfmmup) == INVALID_CONTEXT);
8984 	}
8985 
8986 	new_tsbinfo->tsb_next = old_tsbinfo->tsb_next;
8987 	membar_stst();	/* strict ordering required */
8988 	if (prevtsb)
8989 		prevtsb->tsb_next = new_tsbinfo;
8990 	else
8991 		sfmmup->sfmmu_tsb = new_tsbinfo;
8992 	membar_enter();	/* make sure new TSB globally visible */
8993 	sfmmu_setup_tsbinfo(sfmmup);
8994 
8995 	/*
8996 	 * We need to migrate TSB entries from the old TSB to the new TSB
8997 	 * if tsb_remap_ttes is set and the TSB is growing.
8998 	 */
8999 	if (tsb_remap_ttes && ((flags & TSB_GROW) == TSB_GROW))
9000 		sfmmu_copy_tsb(old_tsbinfo, new_tsbinfo);
9001 
9002 	if ((flags & TSB_SWAPIN) != TSB_SWAPIN) {
9003 		kpreempt_disable();
9004 		membar_exit();
9005 		sfmmup->sfmmu_cnum = ctxnum;
9006 		if (ctxnum != INVALID_CONTEXT &&
9007 		    sfmmu_getctx_sec() == ctxnum) {
9008 			sfmmu_load_mmustate(sfmmup);
9009 		}
9010 		kpreempt_enable();
9011 		rw_exit(&ctx->ctx_rwlock);
9012 	}
9013 
9014 	SFMMU_FLAGS_CLEAR(sfmmup, HAT_BUSY);
9015 
9016 	/*
9017 	 * Drop the HAT lock to free our old tsb_info.
9018 	 */
9019 	sfmmu_hat_exit(hatlockp);
9020 
9021 	if ((flags & TSB_GROW) == TSB_GROW) {
9022 		SFMMU_STAT(sf_tsb_grow);
9023 	} else if ((flags & TSB_SHRINK) == TSB_SHRINK) {
9024 		SFMMU_STAT(sf_tsb_shrink);
9025 	}
9026 
9027 	sfmmu_tsbinfo_free(old_tsbinfo);
9028 
9029 	(void) sfmmu_hat_enter(sfmmup);
9030 	return (TSB_SUCCESS);
9031 }
9032 
9033 /*
9034  * Steal context from process, forcing the process to switch to another
9035  * context on the next TLB miss, and therefore start using the TLB that
9036  * is reprogrammed for the new page sizes.
9037  */
9038 void
9039 sfmmu_steal_context(sfmmu_t *sfmmup, uint8_t *tmp_pgsz)
9040 {
9041 	struct ctx *ctx;
9042 	int i, cnum;
9043 	hatlock_t *hatlockp = NULL;
9044 
9045 	hatlockp = sfmmu_hat_enter(sfmmup);
9046 	/* USIII+-IV+ optimization, requires hat lock */
9047 	if (tmp_pgsz) {
9048 		for (i = 0; i < mmu_page_sizes; i++)
9049 			sfmmup->sfmmu_pgsz[i] = tmp_pgsz[i];
9050 	}
9051 	SFMMU_STAT(sf_tlb_reprog_pgsz);
9052 	ctx = sfmmutoctx(sfmmup);
9053 	rw_enter(&ctx->ctx_rwlock, RW_WRITER);
9054 	cnum = sfmmutoctxnum(sfmmup);
9055 
9056 	if (cnum != INVALID_CONTEXT) {
9057 		sfmmu_tlb_swap_ctx(sfmmup, ctx);
9058 	}
9059 	rw_exit(&ctx->ctx_rwlock);
9060 	sfmmu_hat_exit(hatlockp);
9061 }
9062 
9063 /*
9064  * This function assumes that there are either four or six supported page
9065  * sizes and at most two programmable TLBs, so we need to decide which
9066  * page sizes are most important and then tell the MMU layer so it
9067  * can adjust the TLB page sizes accordingly (if supported).
9068  *
9069  * If these assumptions change, this function will need to be
9070  * updated to support whatever the new limits are.
9071  *
9072  * The growing flag is nonzero if we are growing the address space,
9073  * and zero if it is shrinking.  This allows us to decide whether
9074  * to grow or shrink our TSB, depending upon available memory
9075  * conditions.
9076  */
9077 static void
9078 sfmmu_check_page_sizes(sfmmu_t *sfmmup, int growing)
9079 {
9080 	uint64_t ttecnt[MMU_PAGE_SIZES];
9081 	uint64_t tte8k_cnt, tte4m_cnt;
9082 	uint8_t i;
9083 	int sectsb_thresh;
9084 
9085 	/*
9086 	 * Kernel threads, processes with small address spaces not using
9087 	 * large pages, and dummy ISM HATs need not apply.
9088 	 */
9089 	if (sfmmup == ksfmmup || sfmmup->sfmmu_ismhat != NULL)
9090 		return;
9091 
9092 	if ((sfmmup->sfmmu_flags & HAT_LGPG_FLAGS) == 0 &&
9093 	    sfmmup->sfmmu_ttecnt[TTE8K] <= tsb_rss_factor)
9094 		return;
9095 
9096 	for (i = 0; i < mmu_page_sizes; i++) {
9097 		ttecnt[i] = SFMMU_TTE_CNT(sfmmup, i);
9098 	}
9099 
9100 	/* Check pagesizes in use, and possibly reprogram DTLB. */
9101 	if (&mmu_check_page_sizes)
9102 		mmu_check_page_sizes(sfmmup, ttecnt);
9103 
9104 	/*
9105 	 * Calculate the number of 8k ttes to represent the span of these
9106 	 * pages.
9107 	 */
9108 	tte8k_cnt = ttecnt[TTE8K] +
9109 	    (ttecnt[TTE64K] << (MMU_PAGESHIFT64K - MMU_PAGESHIFT)) +
9110 	    (ttecnt[TTE512K] << (MMU_PAGESHIFT512K - MMU_PAGESHIFT));
9111 	if (mmu_page_sizes == max_mmu_page_sizes) {
9112 		tte4m_cnt = ttecnt[TTE4M] +
9113 		    (ttecnt[TTE32M] << (MMU_PAGESHIFT32M - MMU_PAGESHIFT4M)) +
9114 		    (ttecnt[TTE256M] << (MMU_PAGESHIFT256M - MMU_PAGESHIFT4M));
9115 	} else {
9116 		tte4m_cnt = ttecnt[TTE4M];
9117 	}
9118 
9119 	/*
9120 	 * Inflate TSB sizes by a factor of 2 if this process
9121 	 * uses 4M text pages to minimize extra conflict misses
9122 	 * in the first TSB since without counting text pages
9123 	 * 8K TSB may become too small.
9124 	 *
9125 	 * Also double the size of the second TSB to minimize
9126 	 * extra conflict misses due to competition between 4M text pages
9127 	 * and data pages.
9128 	 *
9129 	 * We need to adjust the second TSB allocation threshold by the
9130 	 * inflation factor, since there is no point in creating a second
9131 	 * TSB when we know all the mappings can fit in the I/D TLBs.
9132 	 */
9133 	sectsb_thresh = tsb_sectsb_threshold;
9134 	if (sfmmup->sfmmu_flags & HAT_4MTEXT_FLAG) {
9135 		tte8k_cnt <<= 1;
9136 		tte4m_cnt <<= 1;
9137 		sectsb_thresh <<= 1;
9138 	}
9139 
9140 	/*
9141 	 * Check to see if our TSB is the right size; we may need to
9142 	 * grow or shrink it.  If the process is small, our work is
9143 	 * finished at this point.
9144 	 */
9145 	if (tte8k_cnt <= tsb_rss_factor && tte4m_cnt <= sectsb_thresh) {
9146 		return;
9147 	}
9148 	sfmmu_size_tsb(sfmmup, growing, tte8k_cnt, tte4m_cnt, sectsb_thresh);
9149 }
9150 
9151 static void
9152 sfmmu_size_tsb(sfmmu_t *sfmmup, int growing, uint64_t tte8k_cnt,
9153 	uint64_t tte4m_cnt, int sectsb_thresh)
9154 {
9155 	int tsb_bits;
9156 	uint_t tsb_szc;
9157 	struct tsb_info *tsbinfop;
9158 	hatlock_t *hatlockp = NULL;
9159 
9160 	hatlockp = sfmmu_hat_enter(sfmmup);
9161 	ASSERT(hatlockp != NULL);
9162 	tsbinfop = sfmmup->sfmmu_tsb;
9163 	ASSERT(tsbinfop != NULL);
9164 
9165 	/*
9166 	 * If we're growing, select the size based on RSS.  If we're
9167 	 * shrinking, leave some room so we don't have to turn around and
9168 	 * grow again immediately.
9169 	 */
9170 	if (growing)
9171 		tsb_szc = SELECT_TSB_SIZECODE(tte8k_cnt);
9172 	else
9173 		tsb_szc = SELECT_TSB_SIZECODE(tte8k_cnt << 1);
9174 
9175 	if (!growing && (tsb_szc < tsbinfop->tsb_szc) &&
9176 	    (tsb_szc >= default_tsb_size) && TSB_OK_SHRINK()) {
9177 		(void) sfmmu_replace_tsb(sfmmup, tsbinfop, tsb_szc,
9178 		    hatlockp, TSB_SHRINK);
9179 	} else if (growing && tsb_szc > tsbinfop->tsb_szc && TSB_OK_GROW()) {
9180 		(void) sfmmu_replace_tsb(sfmmup, tsbinfop, tsb_szc,
9181 		    hatlockp, TSB_GROW);
9182 	}
9183 	tsbinfop = sfmmup->sfmmu_tsb;
9184 
9185 	/*
9186 	 * With the TLB and first TSB out of the way, we need to see if
9187 	 * we need a second TSB for 4M pages.  If we managed to reprogram
9188 	 * the TLB page sizes above, the process will start using this new
9189 	 * TSB right away; otherwise, it will start using it on the next
9190 	 * context switch.  Either way, it's no big deal so there's no
9191 	 * synchronization with the trap handlers here unless we grow the
9192 	 * TSB (in which case it's required to prevent using the old one
9193 	 * after it's freed). Note: second tsb is required for 32M/256M
9194 	 * page sizes.
9195 	 */
9196 	if (tte4m_cnt > sectsb_thresh) {
9197 		/*
9198 		 * If we're growing, select the size based on RSS.  If we're
9199 		 * shrinking, leave some room so we don't have to turn
9200 		 * around and grow again immediately.
9201 		 */
9202 		if (growing)
9203 			tsb_szc = SELECT_TSB_SIZECODE(tte4m_cnt);
9204 		else
9205 			tsb_szc = SELECT_TSB_SIZECODE(tte4m_cnt << 1);
9206 		if (tsbinfop->tsb_next == NULL) {
9207 			struct tsb_info *newtsb;
9208 			int allocflags = SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)?
9209 			    0 : TSB_ALLOC;
9210 
9211 			sfmmu_hat_exit(hatlockp);
9212 
9213 			/*
9214 			 * Try to allocate a TSB for 4[32|256]M pages.  If we
9215 			 * can't get the size we want, retry w/a minimum sized
9216 			 * TSB.  If that still didn't work, give up; we can
9217 			 * still run without one.
9218 			 */
9219 			tsb_bits = (mmu_page_sizes == max_mmu_page_sizes)?
9220 			    TSB4M|TSB32M|TSB256M:TSB4M;
9221 			if ((sfmmu_tsbinfo_alloc(&newtsb, tsb_szc, tsb_bits,
9222 			    allocflags, sfmmup) != 0) &&
9223 			    (sfmmu_tsbinfo_alloc(&newtsb, TSB_MIN_SZCODE,
9224 			    tsb_bits, allocflags, sfmmup) != 0)) {
9225 				return;
9226 			}
9227 
9228 			hatlockp = sfmmu_hat_enter(sfmmup);
9229 
9230 			if (sfmmup->sfmmu_tsb->tsb_next == NULL) {
9231 				sfmmup->sfmmu_tsb->tsb_next = newtsb;
9232 				SFMMU_STAT(sf_tsb_sectsb_create);
9233 				sfmmu_setup_tsbinfo(sfmmup);
9234 				sfmmu_hat_exit(hatlockp);
9235 				return;
9236 			} else {
9237 				/*
9238 				 * It's annoying, but possible for us
9239 				 * to get here.. we dropped the HAT lock
9240 				 * because of locking order in the kmem
9241 				 * allocator, and while we were off getting
9242 				 * our memory, some other thread decided to
9243 				 * do us a favor and won the race to get a
9244 				 * second TSB for this process.  Sigh.
9245 				 */
9246 				sfmmu_hat_exit(hatlockp);
9247 				sfmmu_tsbinfo_free(newtsb);
9248 				return;
9249 			}
9250 		}
9251 
9252 		/*
9253 		 * We have a second TSB, see if it's big enough.
9254 		 */
9255 		tsbinfop = tsbinfop->tsb_next;
9256 
9257 		/*
9258 		 * Check to see if our second TSB is the right size;
9259 		 * we may need to grow or shrink it.
9260 		 * To prevent thrashing (e.g. growing the TSB on a
9261 		 * subsequent map operation), only try to shrink if
9262 		 * the TSB reach exceeds twice the virtual address
9263 		 * space size.
9264 		 */
9265 		if (!growing && (tsb_szc < tsbinfop->tsb_szc) &&
9266 		    (tsb_szc >= default_tsb_size) && TSB_OK_SHRINK()) {
9267 			(void) sfmmu_replace_tsb(sfmmup, tsbinfop,
9268 			    tsb_szc, hatlockp, TSB_SHRINK);
9269 		} else if (growing && tsb_szc > tsbinfop->tsb_szc &&
9270 		    TSB_OK_GROW()) {
9271 			(void) sfmmu_replace_tsb(sfmmup, tsbinfop,
9272 			    tsb_szc, hatlockp, TSB_GROW);
9273 		}
9274 	}
9275 
9276 	sfmmu_hat_exit(hatlockp);
9277 }
9278 
9279 /*
9280  * Get the preferred page size code for a hat.
9281  * This is only advice, so locking is not done;
9282  * this transitory information could change
9283  * following the call anyway.  This interface is
9284  * sun4 private.
9285  */
9286 /*ARGSUSED*/
9287 uint_t
9288 hat_preferred_pgsz(struct hat *hat, caddr_t vaddr, size_t maplen, int maptype)
9289 {
9290 	sfmmu_t *sfmmup = (sfmmu_t *)hat;
9291 	uint_t szc, maxszc = mmu_page_sizes - 1;
9292 	size_t pgsz;
9293 
9294 	if (maptype == MAPPGSZ_ISM) {
9295 		for (szc = maxszc; szc >= TTE4M; szc--) {
9296 			if (disable_ism_large_pages & (1 << szc))
9297 				continue;
9298 
9299 			pgsz = hw_page_array[szc].hp_size;
9300 			if ((maplen >= pgsz) && IS_P2ALIGNED(vaddr, pgsz))
9301 				return (szc);
9302 		}
9303 		return (TTE4M);
9304 	} else if (&mmu_preferred_pgsz) { /* USIII+-USIV+ */
9305 		return (mmu_preferred_pgsz(sfmmup, vaddr, maplen));
9306 	} else {	/* USIII, USII, Niagara */
9307 		for (szc = maxszc; szc > TTE8K; szc--) {
9308 			if (disable_large_pages & (1 << szc))
9309 				continue;
9310 
9311 			pgsz = hw_page_array[szc].hp_size;
9312 			if ((maplen >= pgsz) && IS_P2ALIGNED(vaddr, pgsz))
9313 				return (szc);
9314 		}
9315 		return (TTE8K);
9316 	}
9317 }
9318 
9319 /*
9320  * Free up a ctx
9321  */
9322 static void
9323 sfmmu_free_ctx(sfmmu_t *sfmmup, struct ctx *ctx)
9324 {
9325 	int ctxnum;
9326 
9327 	rw_enter(&ctx->ctx_rwlock, RW_WRITER);
9328 
9329 	TRACE_CTXS(&ctx_trace_mutex, ctx_trace_ptr, sfmmup->sfmmu_cnum,
9330 	    sfmmup, 0, CTX_TRC_FREE);
9331 
9332 	if (sfmmup->sfmmu_cnum == INVALID_CONTEXT) {
9333 		CPUSET_ZERO(sfmmup->sfmmu_cpusran);
9334 		rw_exit(&ctx->ctx_rwlock);
9335 		return;
9336 	}
9337 
9338 	ASSERT(sfmmup == ctx->ctx_sfmmu);
9339 
9340 	ctx->ctx_sfmmu = NULL;
9341 	ctx->ctx_flags = 0;
9342 	sfmmup->sfmmu_cnum = INVALID_CONTEXT;
9343 	membar_enter();
9344 	CPUSET_ZERO(sfmmup->sfmmu_cpusran);
9345 	ctxnum = sfmmu_getctx_sec();
9346 	if (ctxnum == ctxtoctxnum(ctx)) {
9347 		sfmmu_setctx_sec(INVALID_CONTEXT);
9348 		sfmmu_clear_utsbinfo();
9349 	}
9350 
9351 	/*
9352 	 * Put the freed ctx on the dirty list
9353 	 */
9354 	mutex_enter(&ctx_list_lock);
9355 	CTX_SET_FLAGS(ctx, CTX_FREE_FLAG);
9356 	ctx->ctx_free = ctxdirty;
9357 	ctxdirty = ctx;
9358 	mutex_exit(&ctx_list_lock);
9359 
9360 	rw_exit(&ctx->ctx_rwlock);
9361 }
9362 
9363 /*
9364  * Free up a sfmmu
9365  * Since the sfmmu is currently embedded in the hat struct we simply zero
9366  * out our fields and free up the ism map blk list if any.
9367  */
9368 static void
9369 sfmmu_free_sfmmu(sfmmu_t *sfmmup)
9370 {
9371 	ism_blk_t	*blkp, *nx_blkp;
9372 #ifdef	DEBUG
9373 	ism_map_t	*map;
9374 	int 		i;
9375 #endif
9376 
9377 	ASSERT(sfmmup->sfmmu_ttecnt[TTE8K] == 0);
9378 	ASSERT(sfmmup->sfmmu_ttecnt[TTE64K] == 0);
9379 	ASSERT(sfmmup->sfmmu_ttecnt[TTE512K] == 0);
9380 	ASSERT(sfmmup->sfmmu_ttecnt[TTE4M] == 0);
9381 	ASSERT(sfmmup->sfmmu_ttecnt[TTE32M] == 0);
9382 	ASSERT(sfmmup->sfmmu_ttecnt[TTE256M] == 0);
9383 	ASSERT(sfmmup->sfmmu_cnum == INVALID_CONTEXT);
9384 	sfmmup->sfmmu_free = 0;
9385 	sfmmup->sfmmu_ismhat = 0;
9386 
9387 	blkp = sfmmup->sfmmu_iblk;
9388 	sfmmup->sfmmu_iblk = NULL;
9389 
9390 	while (blkp) {
9391 #ifdef	DEBUG
9392 		map = blkp->iblk_maps;
9393 		for (i = 0; i < ISM_MAP_SLOTS; i++) {
9394 			ASSERT(map[i].imap_seg == 0);
9395 			ASSERT(map[i].imap_ismhat == NULL);
9396 			ASSERT(map[i].imap_ment == NULL);
9397 		}
9398 #endif
9399 		nx_blkp = blkp->iblk_next;
9400 		blkp->iblk_next = NULL;
9401 		blkp->iblk_nextpa = (uint64_t)-1;
9402 		kmem_cache_free(ism_blk_cache, blkp);
9403 		blkp = nx_blkp;
9404 	}
9405 }
9406 
9407 /*
9408  * Locking primitves accessed by HATLOCK macros
9409  */
9410 
9411 #define	SFMMU_SPL_MTX	(0x0)
9412 #define	SFMMU_ML_MTX	(0x1)
9413 
9414 #define	SFMMU_MLSPL_MTX(type, pg)	(((type) == SFMMU_SPL_MTX) ? \
9415 					    SPL_HASH(pg) : MLIST_HASH(pg))
9416 
9417 kmutex_t *
9418 sfmmu_page_enter(struct page *pp)
9419 {
9420 	return (sfmmu_mlspl_enter(pp, SFMMU_SPL_MTX));
9421 }
9422 
9423 static void
9424 sfmmu_page_exit(kmutex_t *spl)
9425 {
9426 	mutex_exit(spl);
9427 }
9428 
9429 static int
9430 sfmmu_page_spl_held(struct page *pp)
9431 {
9432 	return (sfmmu_mlspl_held(pp, SFMMU_SPL_MTX));
9433 }
9434 
9435 kmutex_t *
9436 sfmmu_mlist_enter(struct page *pp)
9437 {
9438 	return (sfmmu_mlspl_enter(pp, SFMMU_ML_MTX));
9439 }
9440 
9441 void
9442 sfmmu_mlist_exit(kmutex_t *mml)
9443 {
9444 	mutex_exit(mml);
9445 }
9446 
9447 int
9448 sfmmu_mlist_held(struct page *pp)
9449 {
9450 
9451 	return (sfmmu_mlspl_held(pp, SFMMU_ML_MTX));
9452 }
9453 
9454 /*
9455  * Common code for sfmmu_mlist_enter() and sfmmu_page_enter().  For
9456  * sfmmu_mlist_enter() case mml_table lock array is used and for
9457  * sfmmu_page_enter() sfmmu_page_lock lock array is used.
9458  *
9459  * The lock is taken on a root page so that it protects an operation on all
9460  * constituent pages of a large page pp belongs to.
9461  *
9462  * The routine takes a lock from the appropriate array. The lock is determined
9463  * by hashing the root page. After taking the lock this routine checks if the
9464  * root page has the same size code that was used to determine the root (i.e
9465  * that root hasn't changed).  If root page has the expected p_szc field we
9466  * have the right lock and it's returned to the caller. If root's p_szc
9467  * decreased we release the lock and retry from the beginning.  This case can
9468  * happen due to hat_page_demote() decreasing p_szc between our load of p_szc
9469  * value and taking the lock. The number of retries due to p_szc decrease is
9470  * limited by the maximum p_szc value. If p_szc is 0 we return the lock
9471  * determined by hashing pp itself.
9472  *
9473  * If our caller doesn't hold a SE_SHARED or SE_EXCL lock on pp it's also
9474  * possible that p_szc can increase. To increase p_szc a thread has to lock
9475  * all constituent pages EXCL and do hat_pageunload() on all of them. All the
9476  * callers that don't hold a page locked recheck if hmeblk through which pp
9477  * was found still maps this pp.  If it doesn't map it anymore returned lock
9478  * is immediately dropped. Therefore if sfmmu_mlspl_enter() hits the case of
9479  * p_szc increase after taking the lock it returns this lock without further
9480  * retries because in this case the caller doesn't care about which lock was
9481  * taken. The caller will drop it right away.
9482  *
9483  * After the routine returns it's guaranteed that hat_page_demote() can't
9484  * change p_szc field of any of constituent pages of a large page pp belongs
9485  * to as long as pp was either locked at least SHARED prior to this call or
9486  * the caller finds that hment that pointed to this pp still references this
9487  * pp (this also assumes that the caller holds hme hash bucket lock so that
9488  * the same pp can't be remapped into the same hmeblk after it was unmapped by
9489  * hat_pageunload()).
9490  */
9491 static kmutex_t *
9492 sfmmu_mlspl_enter(struct page *pp, int type)
9493 {
9494 	kmutex_t	*mtx;
9495 	uint_t		prev_rszc = UINT_MAX;
9496 	page_t		*rootpp;
9497 	uint_t		szc;
9498 	uint_t		rszc;
9499 	uint_t		pszc = pp->p_szc;
9500 
9501 	ASSERT(pp != NULL);
9502 
9503 again:
9504 	if (pszc == 0) {
9505 		mtx = SFMMU_MLSPL_MTX(type, pp);
9506 		mutex_enter(mtx);
9507 		return (mtx);
9508 	}
9509 
9510 	/* The lock lives in the root page */
9511 	rootpp = PP_GROUPLEADER(pp, pszc);
9512 	mtx = SFMMU_MLSPL_MTX(type, rootpp);
9513 	mutex_enter(mtx);
9514 
9515 	/*
9516 	 * Return mml in the following 3 cases:
9517 	 *
9518 	 * 1) If pp itself is root since if its p_szc decreased before we took
9519 	 * the lock pp is still the root of smaller szc page. And if its p_szc
9520 	 * increased it doesn't matter what lock we return (see comment in
9521 	 * front of this routine).
9522 	 *
9523 	 * 2) If pp's not root but rootpp is the root of a rootpp->p_szc size
9524 	 * large page we have the right lock since any previous potential
9525 	 * hat_page_demote() is done demoting from greater than current root's
9526 	 * p_szc because hat_page_demote() changes root's p_szc last. No
9527 	 * further hat_page_demote() can start or be in progress since it
9528 	 * would need the same lock we currently hold.
9529 	 *
9530 	 * 3) If rootpp's p_szc increased since previous iteration it doesn't
9531 	 * matter what lock we return (see comment in front of this routine).
9532 	 */
9533 	if (pp == rootpp || (rszc = rootpp->p_szc) == pszc ||
9534 	    rszc >= prev_rszc) {
9535 		return (mtx);
9536 	}
9537 
9538 	/*
9539 	 * hat_page_demote() could have decreased root's p_szc.
9540 	 * In this case pp's p_szc must also be smaller than pszc.
9541 	 * Retry.
9542 	 */
9543 	if (rszc < pszc) {
9544 		szc = pp->p_szc;
9545 		if (szc < pszc) {
9546 			mutex_exit(mtx);
9547 			pszc = szc;
9548 			goto again;
9549 		}
9550 		/*
9551 		 * pp's p_szc increased after it was decreased.
9552 		 * page cannot be mapped. Return current lock. The caller
9553 		 * will drop it right away.
9554 		 */
9555 		return (mtx);
9556 	}
9557 
9558 	/*
9559 	 * root's p_szc is greater than pp's p_szc.
9560 	 * hat_page_demote() is not done with all pages
9561 	 * yet. Wait for it to complete.
9562 	 */
9563 	mutex_exit(mtx);
9564 	rootpp = PP_GROUPLEADER(rootpp, rszc);
9565 	mtx = SFMMU_MLSPL_MTX(type, rootpp);
9566 	mutex_enter(mtx);
9567 	mutex_exit(mtx);
9568 	prev_rszc = rszc;
9569 	goto again;
9570 }
9571 
9572 static int
9573 sfmmu_mlspl_held(struct page *pp, int type)
9574 {
9575 	kmutex_t	*mtx;
9576 
9577 	ASSERT(pp != NULL);
9578 	/* The lock lives in the root page */
9579 	pp = PP_PAGEROOT(pp);
9580 	ASSERT(pp != NULL);
9581 
9582 	mtx = SFMMU_MLSPL_MTX(type, pp);
9583 	return (MUTEX_HELD(mtx));
9584 }
9585 
9586 static uint_t
9587 sfmmu_get_free_hblk(struct hme_blk **hmeblkpp, uint_t critical)
9588 {
9589 	struct  hme_blk *hblkp;
9590 
9591 	if (freehblkp != NULL) {
9592 		mutex_enter(&freehblkp_lock);
9593 		if (freehblkp != NULL) {
9594 			/*
9595 			 * If the current thread is owning hblk_reserve,
9596 			 * let it succede even if freehblkcnt is really low.
9597 			 */
9598 			if (freehblkcnt <= HBLK_RESERVE_MIN && !critical) {
9599 				SFMMU_STAT(sf_get_free_throttle);
9600 				mutex_exit(&freehblkp_lock);
9601 				return (0);
9602 			}
9603 			freehblkcnt--;
9604 			*hmeblkpp = freehblkp;
9605 			hblkp = *hmeblkpp;
9606 			freehblkp = hblkp->hblk_next;
9607 			mutex_exit(&freehblkp_lock);
9608 			hblkp->hblk_next = NULL;
9609 			SFMMU_STAT(sf_get_free_success);
9610 			return (1);
9611 		}
9612 		mutex_exit(&freehblkp_lock);
9613 	}
9614 	SFMMU_STAT(sf_get_free_fail);
9615 	return (0);
9616 }
9617 
9618 static uint_t
9619 sfmmu_put_free_hblk(struct hme_blk *hmeblkp, uint_t critical)
9620 {
9621 	struct  hme_blk *hblkp;
9622 
9623 	/*
9624 	 * If the current thread is mapping into kernel space,
9625 	 * let it succede even if freehblkcnt is max
9626 	 * so that it will avoid freeing it to kmem.
9627 	 * This will prevent stack overflow due to
9628 	 * possible recursion since kmem_cache_free()
9629 	 * might require creation of a slab which
9630 	 * in turn needs an hmeblk to map that slab;
9631 	 * let's break this vicious chain at the first
9632 	 * opportunity.
9633 	 */
9634 	if (freehblkcnt < HBLK_RESERVE_CNT || critical) {
9635 		mutex_enter(&freehblkp_lock);
9636 		if (freehblkcnt < HBLK_RESERVE_CNT || critical) {
9637 			SFMMU_STAT(sf_put_free_success);
9638 			freehblkcnt++;
9639 			hmeblkp->hblk_next = freehblkp;
9640 			freehblkp = hmeblkp;
9641 			mutex_exit(&freehblkp_lock);
9642 			return (1);
9643 		}
9644 		mutex_exit(&freehblkp_lock);
9645 	}
9646 
9647 	/*
9648 	 * Bring down freehblkcnt to HBLK_RESERVE_CNT. We are here
9649 	 * only if freehblkcnt is at least HBLK_RESERVE_CNT *and*
9650 	 * we are not in the process of mapping into kernel space.
9651 	 */
9652 	ASSERT(!critical);
9653 	while (freehblkcnt > HBLK_RESERVE_CNT) {
9654 		mutex_enter(&freehblkp_lock);
9655 		if (freehblkcnt > HBLK_RESERVE_CNT) {
9656 			freehblkcnt--;
9657 			hblkp = freehblkp;
9658 			freehblkp = hblkp->hblk_next;
9659 			mutex_exit(&freehblkp_lock);
9660 			ASSERT(get_hblk_cache(hblkp) == sfmmu8_cache);
9661 			kmem_cache_free(sfmmu8_cache, hblkp);
9662 			continue;
9663 		}
9664 		mutex_exit(&freehblkp_lock);
9665 	}
9666 	SFMMU_STAT(sf_put_free_fail);
9667 	return (0);
9668 }
9669 
9670 static void
9671 sfmmu_hblk_swap(struct hme_blk *new)
9672 {
9673 	struct hme_blk *old, *hblkp, *prev;
9674 	uint64_t hblkpa, prevpa, newpa;
9675 	caddr_t	base, vaddr, endaddr;
9676 	struct hmehash_bucket *hmebp;
9677 	struct sf_hment *osfhme, *nsfhme;
9678 	page_t *pp;
9679 	kmutex_t *pml;
9680 	tte_t tte;
9681 
9682 #ifdef	DEBUG
9683 	hmeblk_tag		hblktag;
9684 	struct hme_blk		*found;
9685 #endif
9686 	old = HBLK_RESERVE;
9687 
9688 	/*
9689 	 * save pa before bcopy clobbers it
9690 	 */
9691 	newpa = new->hblk_nextpa;
9692 
9693 	base = (caddr_t)get_hblk_base(old);
9694 	endaddr = base + get_hblk_span(old);
9695 
9696 	/*
9697 	 * acquire hash bucket lock.
9698 	 */
9699 	hmebp = sfmmu_tteload_acquire_hashbucket(ksfmmup, base, TTE8K);
9700 
9701 	/*
9702 	 * copy contents from old to new
9703 	 */
9704 	bcopy((void *)old, (void *)new, HME8BLK_SZ);
9705 
9706 	/*
9707 	 * add new to hash chain
9708 	 */
9709 	sfmmu_hblk_hash_add(hmebp, new, newpa);
9710 
9711 	/*
9712 	 * search hash chain for hblk_reserve; this needs to be performed
9713 	 * after adding new, otherwise prevpa and prev won't correspond
9714 	 * to the hblk which is prior to old in hash chain when we call
9715 	 * sfmmu_hblk_hash_rm to remove old later.
9716 	 */
9717 	for (prevpa = 0, prev = NULL,
9718 	    hblkpa = hmebp->hmeh_nextpa, hblkp = hmebp->hmeblkp;
9719 	    hblkp != NULL && hblkp != old;
9720 	    prevpa = hblkpa, prev = hblkp,
9721 	    hblkpa = hblkp->hblk_nextpa, hblkp = hblkp->hblk_next);
9722 
9723 	if (hblkp != old)
9724 		panic("sfmmu_hblk_swap: hblk_reserve not found");
9725 
9726 	/*
9727 	 * p_mapping list is still pointing to hments in hblk_reserve;
9728 	 * fix up p_mapping list so that they point to hments in new.
9729 	 *
9730 	 * Since all these mappings are created by hblk_reserve_thread
9731 	 * on the way and it's using at least one of the buffers from each of
9732 	 * the newly minted slabs, there is no danger of any of these
9733 	 * mappings getting unloaded by another thread.
9734 	 *
9735 	 * tsbmiss could only modify ref/mod bits of hments in old/new.
9736 	 * Since all of these hments hold mappings established by segkmem
9737 	 * and mappings in segkmem are setup with HAT_NOSYNC, ref/mod bits
9738 	 * have no meaning for the mappings in hblk_reserve.  hments in
9739 	 * old and new are identical except for ref/mod bits.
9740 	 */
9741 	for (vaddr = base; vaddr < endaddr; vaddr += TTEBYTES(TTE8K)) {
9742 
9743 		HBLKTOHME(osfhme, old, vaddr);
9744 		sfmmu_copytte(&osfhme->hme_tte, &tte);
9745 
9746 		if (TTE_IS_VALID(&tte)) {
9747 			if ((pp = osfhme->hme_page) == NULL)
9748 				panic("sfmmu_hblk_swap: page not mapped");
9749 
9750 			pml = sfmmu_mlist_enter(pp);
9751 
9752 			if (pp != osfhme->hme_page)
9753 				panic("sfmmu_hblk_swap: mapping changed");
9754 
9755 			HBLKTOHME(nsfhme, new, vaddr);
9756 
9757 			HME_ADD(nsfhme, pp);
9758 			HME_SUB(osfhme, pp);
9759 
9760 			sfmmu_mlist_exit(pml);
9761 		}
9762 	}
9763 
9764 	/*
9765 	 * remove old from hash chain
9766 	 */
9767 	sfmmu_hblk_hash_rm(hmebp, old, prevpa, prev);
9768 
9769 #ifdef	DEBUG
9770 
9771 	hblktag.htag_id = ksfmmup;
9772 	hblktag.htag_bspage = HME_HASH_BSPAGE(base, HME_HASH_SHIFT(TTE8K));
9773 	hblktag.htag_rehash = HME_HASH_REHASH(TTE8K);
9774 	HME_HASH_FAST_SEARCH(hmebp, hblktag, found);
9775 
9776 	if (found != new)
9777 		panic("sfmmu_hblk_swap: new hblk not found");
9778 #endif
9779 
9780 	SFMMU_HASH_UNLOCK(hmebp);
9781 
9782 	/*
9783 	 * Reset hblk_reserve
9784 	 */
9785 	bzero((void *)old, HME8BLK_SZ);
9786 	old->hblk_nextpa = va_to_pa((caddr_t)old);
9787 }
9788 
9789 /*
9790  * Grab the mlist mutex for both pages passed in.
9791  *
9792  * low and high will be returned as pointers to the mutexes for these pages.
9793  * low refers to the mutex residing in the lower bin of the mlist hash, while
9794  * high refers to the mutex residing in the higher bin of the mlist hash.  This
9795  * is due to the locking order restrictions on the same thread grabbing
9796  * multiple mlist mutexes.  The low lock must be acquired before the high lock.
9797  *
9798  * If both pages hash to the same mutex, only grab that single mutex, and
9799  * high will be returned as NULL
9800  * If the pages hash to different bins in the hash, grab the lower addressed
9801  * lock first and then the higher addressed lock in order to follow the locking
9802  * rules involved with the same thread grabbing multiple mlist mutexes.
9803  * low and high will both have non-NULL values.
9804  */
9805 static void
9806 sfmmu_mlist_reloc_enter(struct page *targ, struct page *repl,
9807     kmutex_t **low, kmutex_t **high)
9808 {
9809 	kmutex_t	*mml_targ, *mml_repl;
9810 
9811 	/*
9812 	 * no need to do the dance around szc as in sfmmu_mlist_enter()
9813 	 * because this routine is only called by hat_page_relocate() and all
9814 	 * targ and repl pages are already locked EXCL so szc can't change.
9815 	 */
9816 
9817 	mml_targ = MLIST_HASH(PP_PAGEROOT(targ));
9818 	mml_repl = MLIST_HASH(PP_PAGEROOT(repl));
9819 
9820 	if (mml_targ == mml_repl) {
9821 		*low = mml_targ;
9822 		*high = NULL;
9823 	} else {
9824 		if (mml_targ < mml_repl) {
9825 			*low = mml_targ;
9826 			*high = mml_repl;
9827 		} else {
9828 			*low = mml_repl;
9829 			*high = mml_targ;
9830 		}
9831 	}
9832 
9833 	mutex_enter(*low);
9834 	if (*high)
9835 		mutex_enter(*high);
9836 }
9837 
9838 static void
9839 sfmmu_mlist_reloc_exit(kmutex_t *low, kmutex_t *high)
9840 {
9841 	if (high)
9842 		mutex_exit(high);
9843 	mutex_exit(low);
9844 }
9845 
9846 static hatlock_t *
9847 sfmmu_hat_enter(sfmmu_t *sfmmup)
9848 {
9849 	hatlock_t	*hatlockp;
9850 
9851 	if (sfmmup != ksfmmup) {
9852 		hatlockp = TSB_HASH(sfmmup);
9853 		mutex_enter(HATLOCK_MUTEXP(hatlockp));
9854 		return (hatlockp);
9855 	}
9856 	return (NULL);
9857 }
9858 
9859 static hatlock_t *
9860 sfmmu_hat_tryenter(sfmmu_t *sfmmup)
9861 {
9862 	hatlock_t	*hatlockp;
9863 
9864 	if (sfmmup != ksfmmup) {
9865 		hatlockp = TSB_HASH(sfmmup);
9866 		if (mutex_tryenter(HATLOCK_MUTEXP(hatlockp)) == 0)
9867 			return (NULL);
9868 		return (hatlockp);
9869 	}
9870 	return (NULL);
9871 }
9872 
9873 static void
9874 sfmmu_hat_exit(hatlock_t *hatlockp)
9875 {
9876 	if (hatlockp != NULL)
9877 		mutex_exit(HATLOCK_MUTEXP(hatlockp));
9878 }
9879 
9880 static void
9881 sfmmu_hat_lock_all(void)
9882 {
9883 	int i;
9884 	for (i = 0; i < SFMMU_NUM_LOCK; i++)
9885 		mutex_enter(HATLOCK_MUTEXP(&hat_lock[i]));
9886 }
9887 
9888 static void
9889 sfmmu_hat_unlock_all(void)
9890 {
9891 	int i;
9892 	for (i = SFMMU_NUM_LOCK - 1; i >= 0; i--)
9893 		mutex_exit(HATLOCK_MUTEXP(&hat_lock[i]));
9894 }
9895 
9896 int
9897 sfmmu_hat_lock_held(sfmmu_t *sfmmup)
9898 {
9899 	ASSERT(sfmmup != ksfmmup);
9900 	return (MUTEX_HELD(HATLOCK_MUTEXP(TSB_HASH(sfmmup))));
9901 }
9902 
9903 /*
9904  * Locking primitives to provide consistency between ISM unmap
9905  * and other operations.  Since ISM unmap can take a long time, we
9906  * use HAT_ISMBUSY flag (protected by the hatlock) to avoid creating
9907  * contention on the hatlock buckets while ISM segments are being
9908  * unmapped.  The tradeoff is that the flags don't prevent priority
9909  * inversion from occurring, so we must request kernel priority in
9910  * case we have to sleep to keep from getting buried while holding
9911  * the HAT_ISMBUSY flag set, which in turn could block other kernel
9912  * threads from running (for example, in sfmmu_uvatopfn()).
9913  */
9914 static void
9915 sfmmu_ismhat_enter(sfmmu_t *sfmmup, int hatlock_held)
9916 {
9917 	hatlock_t *hatlockp;
9918 
9919 	THREAD_KPRI_REQUEST();
9920 	if (!hatlock_held)
9921 		hatlockp = sfmmu_hat_enter(sfmmup);
9922 	while (SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY))
9923 		cv_wait(&sfmmup->sfmmu_tsb_cv, HATLOCK_MUTEXP(hatlockp));
9924 	SFMMU_FLAGS_SET(sfmmup, HAT_ISMBUSY);
9925 	if (!hatlock_held)
9926 		sfmmu_hat_exit(hatlockp);
9927 }
9928 
9929 static void
9930 sfmmu_ismhat_exit(sfmmu_t *sfmmup, int hatlock_held)
9931 {
9932 	hatlock_t *hatlockp;
9933 
9934 	if (!hatlock_held)
9935 		hatlockp = sfmmu_hat_enter(sfmmup);
9936 	ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY));
9937 	SFMMU_FLAGS_CLEAR(sfmmup, HAT_ISMBUSY);
9938 	cv_broadcast(&sfmmup->sfmmu_tsb_cv);
9939 	if (!hatlock_held)
9940 		sfmmu_hat_exit(hatlockp);
9941 	THREAD_KPRI_RELEASE();
9942 }
9943 
9944 /*
9945  *
9946  * Algorithm:
9947  *
9948  * (1) if segkmem is not ready, allocate hblk from an array of pre-alloc'ed
9949  *	hblks.
9950  *
9951  * (2) if we are allocating an hblk for mapping a slab in sfmmu_cache,
9952  *
9953  * 		(a) try to return an hblk from reserve pool of free hblks;
9954  *		(b) if the reserve pool is empty, acquire hblk_reserve_lock
9955  *		    and return hblk_reserve.
9956  *
9957  * (3) call kmem_cache_alloc() to allocate hblk;
9958  *
9959  *		(a) if hblk_reserve_lock is held by the current thread,
9960  *		    atomically replace hblk_reserve by the hblk that is
9961  *		    returned by kmem_cache_alloc; release hblk_reserve_lock
9962  *		    and call kmem_cache_alloc() again.
9963  *		(b) if reserve pool is not full, add the hblk that is
9964  *		    returned by kmem_cache_alloc to reserve pool and
9965  *		    call kmem_cache_alloc again.
9966  *
9967  */
9968 static struct hme_blk *
9969 sfmmu_hblk_alloc(sfmmu_t *sfmmup, caddr_t vaddr,
9970 	struct hmehash_bucket *hmebp, uint_t size, hmeblk_tag hblktag,
9971 	uint_t flags)
9972 {
9973 	struct hme_blk *hmeblkp = NULL;
9974 	struct hme_blk *newhblkp;
9975 	struct hme_blk *shw_hblkp = NULL;
9976 	struct kmem_cache *sfmmu_cache = NULL;
9977 	uint64_t hblkpa;
9978 	ulong_t index;
9979 	uint_t owner;		/* set to 1 if using hblk_reserve */
9980 	uint_t forcefree;
9981 	int sleep;
9982 
9983 	ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp));
9984 
9985 	/*
9986 	 * If segkmem is not created yet, allocate from static hmeblks
9987 	 * created at the end of startup_modules().  See the block comment
9988 	 * in startup_modules() describing how we estimate the number of
9989 	 * static hmeblks that will be needed during re-map.
9990 	 */
9991 	if (!hblk_alloc_dynamic) {
9992 
9993 		if (size == TTE8K) {
9994 			index = nucleus_hblk8.index;
9995 			if (index >= nucleus_hblk8.len) {
9996 				/*
9997 				 * If we panic here, see startup_modules() to
9998 				 * make sure that we are calculating the
9999 				 * number of hblk8's that we need correctly.
10000 				 */
10001 				panic("no nucleus hblk8 to allocate");
10002 			}
10003 			hmeblkp =
10004 			    (struct hme_blk *)&nucleus_hblk8.list[index];
10005 			nucleus_hblk8.index++;
10006 			SFMMU_STAT(sf_hblk8_nalloc);
10007 		} else {
10008 			index = nucleus_hblk1.index;
10009 			if (nucleus_hblk1.index >= nucleus_hblk1.len) {
10010 				/*
10011 				 * If we panic here, see startup_modules()
10012 				 * and H8TOH1; most likely you need to
10013 				 * update the calculation of the number
10014 				 * of hblk1's the kernel needs to boot.
10015 				 */
10016 				panic("no nucleus hblk1 to allocate");
10017 			}
10018 			hmeblkp =
10019 			    (struct hme_blk *)&nucleus_hblk1.list[index];
10020 			nucleus_hblk1.index++;
10021 			SFMMU_STAT(sf_hblk1_nalloc);
10022 		}
10023 
10024 		goto hblk_init;
10025 	}
10026 
10027 	SFMMU_HASH_UNLOCK(hmebp);
10028 
10029 	if (sfmmup != KHATID) {
10030 		if (mmu_page_sizes == max_mmu_page_sizes) {
10031 			if (size < TTE256M)
10032 				shw_hblkp = sfmmu_shadow_hcreate(sfmmup, vaddr,
10033 				    size, flags);
10034 		} else {
10035 			if (size < TTE4M)
10036 				shw_hblkp = sfmmu_shadow_hcreate(sfmmup, vaddr,
10037 				    size, flags);
10038 		}
10039 	}
10040 
10041 fill_hblk:
10042 	owner = (hblk_reserve_thread == curthread) ? 1 : 0;
10043 
10044 	if (owner && size == TTE8K) {
10045 
10046 		/*
10047 		 * We are really in a tight spot. We already own
10048 		 * hblk_reserve and we need another hblk.  In anticipation
10049 		 * of this kind of scenario, we specifically set aside
10050 		 * HBLK_RESERVE_MIN number of hblks to be used exclusively
10051 		 * by owner of hblk_reserve.
10052 		 */
10053 		SFMMU_STAT(sf_hblk_recurse_cnt);
10054 
10055 		if (!sfmmu_get_free_hblk(&hmeblkp, 1))
10056 			panic("sfmmu_hblk_alloc: reserve list is empty");
10057 
10058 		goto hblk_verify;
10059 	}
10060 
10061 	ASSERT(!owner);
10062 
10063 	if ((flags & HAT_NO_KALLOC) == 0) {
10064 
10065 		sfmmu_cache = ((size == TTE8K) ? sfmmu8_cache : sfmmu1_cache);
10066 		sleep = ((sfmmup == KHATID) ? KM_NOSLEEP : KM_SLEEP);
10067 
10068 		if ((hmeblkp = kmem_cache_alloc(sfmmu_cache, sleep)) == NULL) {
10069 			hmeblkp = sfmmu_hblk_steal(size);
10070 		} else {
10071 			/*
10072 			 * if we are the owner of hblk_reserve,
10073 			 * swap hblk_reserve with hmeblkp and
10074 			 * start a fresh life.  Hope things go
10075 			 * better this time.
10076 			 */
10077 			if (hblk_reserve_thread == curthread) {
10078 				ASSERT(sfmmu_cache == sfmmu8_cache);
10079 				sfmmu_hblk_swap(hmeblkp);
10080 				hblk_reserve_thread = NULL;
10081 				mutex_exit(&hblk_reserve_lock);
10082 				goto fill_hblk;
10083 			}
10084 			/*
10085 			 * let's donate this hblk to our reserve list if
10086 			 * we are not mapping kernel range
10087 			 */
10088 			if (size == TTE8K && sfmmup != KHATID)
10089 				if (sfmmu_put_free_hblk(hmeblkp, 0))
10090 					goto fill_hblk;
10091 		}
10092 	} else {
10093 		/*
10094 		 * We are here to map the slab in sfmmu8_cache; let's
10095 		 * check if we could tap our reserve list; if successful,
10096 		 * this will avoid the pain of going thru sfmmu_hblk_swap
10097 		 */
10098 		SFMMU_STAT(sf_hblk_slab_cnt);
10099 		if (!sfmmu_get_free_hblk(&hmeblkp, 0)) {
10100 			/*
10101 			 * let's start hblk_reserve dance
10102 			 */
10103 			SFMMU_STAT(sf_hblk_reserve_cnt);
10104 			owner = 1;
10105 			mutex_enter(&hblk_reserve_lock);
10106 			hmeblkp = HBLK_RESERVE;
10107 			hblk_reserve_thread = curthread;
10108 		}
10109 	}
10110 
10111 hblk_verify:
10112 	ASSERT(hmeblkp != NULL);
10113 	set_hblk_sz(hmeblkp, size);
10114 	ASSERT(hmeblkp->hblk_nextpa == va_to_pa((caddr_t)hmeblkp));
10115 	SFMMU_HASH_LOCK(hmebp);
10116 	HME_HASH_FAST_SEARCH(hmebp, hblktag, newhblkp);
10117 	if (newhblkp != NULL) {
10118 		SFMMU_HASH_UNLOCK(hmebp);
10119 		if (hmeblkp != HBLK_RESERVE) {
10120 			/*
10121 			 * This is really tricky!
10122 			 *
10123 			 * vmem_alloc(vmem_seg_arena)
10124 			 *  vmem_alloc(vmem_internal_arena)
10125 			 *   segkmem_alloc(heap_arena)
10126 			 *    vmem_alloc(heap_arena)
10127 			 *    page_create()
10128 			 *    hat_memload()
10129 			 *	kmem_cache_free()
10130 			 *	 kmem_cache_alloc()
10131 			 *	  kmem_slab_create()
10132 			 *	   vmem_alloc(kmem_internal_arena)
10133 			 *	    segkmem_alloc(heap_arena)
10134 			 *		vmem_alloc(heap_arena)
10135 			 *		page_create()
10136 			 *		hat_memload()
10137 			 *		  kmem_cache_free()
10138 			 *		...
10139 			 *
10140 			 * Thus, hat_memload() could call kmem_cache_free
10141 			 * for enough number of times that we could easily
10142 			 * hit the bottom of the stack or run out of reserve
10143 			 * list of vmem_seg structs.  So, we must donate
10144 			 * this hblk to reserve list if it's allocated
10145 			 * from sfmmu8_cache *and* mapping kernel range.
10146 			 * We don't need to worry about freeing hmeblk1's
10147 			 * to kmem since they don't map any kmem slabs.
10148 			 *
10149 			 * Note: When segkmem supports largepages, we must
10150 			 * free hmeblk1's to reserve list as well.
10151 			 */
10152 			forcefree = (sfmmup == KHATID) ? 1 : 0;
10153 			if (size == TTE8K &&
10154 			    sfmmu_put_free_hblk(hmeblkp, forcefree)) {
10155 				goto re_verify;
10156 			}
10157 			ASSERT(sfmmup != KHATID);
10158 			kmem_cache_free(get_hblk_cache(hmeblkp), hmeblkp);
10159 		} else {
10160 			/*
10161 			 * Hey! we don't need hblk_reserve any more.
10162 			 */
10163 			ASSERT(owner);
10164 			hblk_reserve_thread = NULL;
10165 			mutex_exit(&hblk_reserve_lock);
10166 			owner = 0;
10167 		}
10168 re_verify:
10169 		/*
10170 		 * let's check if the goodies are still present
10171 		 */
10172 		SFMMU_HASH_LOCK(hmebp);
10173 		HME_HASH_FAST_SEARCH(hmebp, hblktag, newhblkp);
10174 		if (newhblkp != NULL) {
10175 			/*
10176 			 * return newhblkp if it's not hblk_reserve;
10177 			 * if newhblkp is hblk_reserve, return it
10178 			 * _only if_ we are the owner of hblk_reserve.
10179 			 */
10180 			if (newhblkp != HBLK_RESERVE || owner) {
10181 				return (newhblkp);
10182 			} else {
10183 				/*
10184 				 * we just hit hblk_reserve in the hash and
10185 				 * we are not the owner of that;
10186 				 *
10187 				 * block until hblk_reserve_thread completes
10188 				 * swapping hblk_reserve and try the dance
10189 				 * once again.
10190 				 */
10191 				SFMMU_HASH_UNLOCK(hmebp);
10192 				mutex_enter(&hblk_reserve_lock);
10193 				mutex_exit(&hblk_reserve_lock);
10194 				SFMMU_STAT(sf_hblk_reserve_hit);
10195 				goto fill_hblk;
10196 			}
10197 		} else {
10198 			/*
10199 			 * it's no more! try the dance once again.
10200 			 */
10201 			SFMMU_HASH_UNLOCK(hmebp);
10202 			goto fill_hblk;
10203 		}
10204 	}
10205 
10206 hblk_init:
10207 	set_hblk_sz(hmeblkp, size);
10208 	ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp));
10209 	hmeblkp->hblk_next = (struct hme_blk *)NULL;
10210 	hmeblkp->hblk_tag = hblktag;
10211 	hmeblkp->hblk_shadow = shw_hblkp;
10212 	hblkpa = hmeblkp->hblk_nextpa;
10213 	hmeblkp->hblk_nextpa = 0;
10214 
10215 	ASSERT(get_hblk_ttesz(hmeblkp) == size);
10216 	ASSERT(get_hblk_span(hmeblkp) == HMEBLK_SPAN(size));
10217 	ASSERT(hmeblkp->hblk_hmecnt == 0);
10218 	ASSERT(hmeblkp->hblk_vcnt == 0);
10219 	ASSERT(hmeblkp->hblk_lckcnt == 0);
10220 	ASSERT(hblkpa == va_to_pa((caddr_t)hmeblkp));
10221 	sfmmu_hblk_hash_add(hmebp, hmeblkp, hblkpa);
10222 	return (hmeblkp);
10223 }
10224 
10225 /*
10226  * This function performs any cleanup required on the hme_blk
10227  * and returns it to the free list.
10228  */
10229 /* ARGSUSED */
10230 static void
10231 sfmmu_hblk_free(struct hmehash_bucket *hmebp, struct hme_blk *hmeblkp,
10232 	uint64_t hblkpa, struct hme_blk **listp)
10233 {
10234 	int shw_size, vshift;
10235 	struct hme_blk *shw_hblkp;
10236 	uint_t		shw_mask, newshw_mask;
10237 	uintptr_t	vaddr;
10238 	int		size;
10239 	uint_t		critical;
10240 
10241 	ASSERT(hmeblkp);
10242 	ASSERT(!hmeblkp->hblk_hmecnt);
10243 	ASSERT(!hmeblkp->hblk_vcnt);
10244 	ASSERT(!hmeblkp->hblk_lckcnt);
10245 	ASSERT(hblkpa == va_to_pa((caddr_t)hmeblkp));
10246 	ASSERT(hmeblkp != (struct hme_blk *)hblk_reserve);
10247 
10248 	critical = (hblktosfmmu(hmeblkp) == KHATID) ? 1 : 0;
10249 
10250 	size = get_hblk_ttesz(hmeblkp);
10251 	shw_hblkp = hmeblkp->hblk_shadow;
10252 	if (shw_hblkp) {
10253 		ASSERT(hblktosfmmu(hmeblkp) != KHATID);
10254 		if (mmu_page_sizes == max_mmu_page_sizes) {
10255 			ASSERT(size < TTE256M);
10256 		} else {
10257 			ASSERT(size < TTE4M);
10258 		}
10259 
10260 		shw_size = get_hblk_ttesz(shw_hblkp);
10261 		vaddr = get_hblk_base(hmeblkp);
10262 		vshift = vaddr_to_vshift(shw_hblkp->hblk_tag, vaddr, shw_size);
10263 		ASSERT(vshift < 8);
10264 		/*
10265 		 * Atomically clear shadow mask bit
10266 		 */
10267 		do {
10268 			shw_mask = shw_hblkp->hblk_shw_mask;
10269 			ASSERT(shw_mask & (1 << vshift));
10270 			newshw_mask = shw_mask & ~(1 << vshift);
10271 			newshw_mask = cas32(&shw_hblkp->hblk_shw_mask,
10272 				shw_mask, newshw_mask);
10273 		} while (newshw_mask != shw_mask);
10274 		hmeblkp->hblk_shadow = NULL;
10275 	}
10276 	hmeblkp->hblk_next = NULL;
10277 	hmeblkp->hblk_nextpa = hblkpa;
10278 	hmeblkp->hblk_shw_bit = 0;
10279 
10280 	if (hmeblkp->hblk_nuc_bit == 0) {
10281 
10282 		if (size == TTE8K && sfmmu_put_free_hblk(hmeblkp, critical))
10283 			return;
10284 
10285 		hmeblkp->hblk_next = *listp;
10286 		*listp = hmeblkp;
10287 	}
10288 }
10289 
10290 static void
10291 sfmmu_hblks_list_purge(struct hme_blk **listp)
10292 {
10293 	struct hme_blk	*hmeblkp;
10294 
10295 	while ((hmeblkp = *listp) != NULL) {
10296 		*listp = hmeblkp->hblk_next;
10297 		kmem_cache_free(get_hblk_cache(hmeblkp), hmeblkp);
10298 	}
10299 }
10300 
10301 #define	BUCKETS_TO_SEARCH_BEFORE_UNLOAD	30
10302 
10303 static uint_t sfmmu_hblk_steal_twice;
10304 static uint_t sfmmu_hblk_steal_count, sfmmu_hblk_steal_unload_count;
10305 
10306 /*
10307  * Steal a hmeblk
10308  * Enough hmeblks were allocated at startup (nucleus hmeblks) and also
10309  * hmeblks were added dynamically. We should never ever not be able to
10310  * find one. Look for an unused/unlocked hmeblk in user hash table.
10311  */
10312 static struct hme_blk *
10313 sfmmu_hblk_steal(int size)
10314 {
10315 	static struct hmehash_bucket *uhmehash_steal_hand = NULL;
10316 	struct hmehash_bucket *hmebp;
10317 	struct hme_blk *hmeblkp = NULL, *pr_hblk;
10318 	uint64_t hblkpa, prevpa;
10319 	int i;
10320 
10321 	for (;;) {
10322 		hmebp = (uhmehash_steal_hand == NULL) ? uhme_hash :
10323 			uhmehash_steal_hand;
10324 		ASSERT(hmebp >= uhme_hash && hmebp <= &uhme_hash[UHMEHASH_SZ]);
10325 
10326 		for (i = 0; hmeblkp == NULL && i <= UHMEHASH_SZ +
10327 		    BUCKETS_TO_SEARCH_BEFORE_UNLOAD; i++) {
10328 			SFMMU_HASH_LOCK(hmebp);
10329 			hmeblkp = hmebp->hmeblkp;
10330 			hblkpa = hmebp->hmeh_nextpa;
10331 			prevpa = 0;
10332 			pr_hblk = NULL;
10333 			while (hmeblkp) {
10334 				/*
10335 				 * check if it is a hmeblk that is not locked
10336 				 * and not shared. skip shadow hmeblks with
10337 				 * shadow_mask set i.e valid count non zero.
10338 				 */
10339 				if ((get_hblk_ttesz(hmeblkp) == size) &&
10340 				    (hmeblkp->hblk_shw_bit == 0 ||
10341 					hmeblkp->hblk_vcnt == 0) &&
10342 				    (hmeblkp->hblk_lckcnt == 0)) {
10343 					/*
10344 					 * there is a high probability that we
10345 					 * will find a free one. search some
10346 					 * buckets for a free hmeblk initially
10347 					 * before unloading a valid hmeblk.
10348 					 */
10349 					if ((hmeblkp->hblk_vcnt == 0 &&
10350 					    hmeblkp->hblk_hmecnt == 0) || (i >=
10351 					    BUCKETS_TO_SEARCH_BEFORE_UNLOAD)) {
10352 						if (sfmmu_steal_this_hblk(hmebp,
10353 						    hmeblkp, hblkpa, prevpa,
10354 						    pr_hblk)) {
10355 							/*
10356 							 * Hblk is unloaded
10357 							 * successfully
10358 							 */
10359 							break;
10360 						}
10361 					}
10362 				}
10363 				pr_hblk = hmeblkp;
10364 				prevpa = hblkpa;
10365 				hblkpa = hmeblkp->hblk_nextpa;
10366 				hmeblkp = hmeblkp->hblk_next;
10367 			}
10368 
10369 			SFMMU_HASH_UNLOCK(hmebp);
10370 			if (hmebp++ == &uhme_hash[UHMEHASH_SZ])
10371 				hmebp = uhme_hash;
10372 		}
10373 		uhmehash_steal_hand = hmebp;
10374 
10375 		if (hmeblkp != NULL)
10376 			break;
10377 
10378 		/*
10379 		 * in the worst case, look for a free one in the kernel
10380 		 * hash table.
10381 		 */
10382 		for (i = 0, hmebp = khme_hash; i <= KHMEHASH_SZ; i++) {
10383 			SFMMU_HASH_LOCK(hmebp);
10384 			hmeblkp = hmebp->hmeblkp;
10385 			hblkpa = hmebp->hmeh_nextpa;
10386 			prevpa = 0;
10387 			pr_hblk = NULL;
10388 			while (hmeblkp) {
10389 				/*
10390 				 * check if it is free hmeblk
10391 				 */
10392 				if ((get_hblk_ttesz(hmeblkp) == size) &&
10393 				    (hmeblkp->hblk_lckcnt == 0) &&
10394 				    (hmeblkp->hblk_vcnt == 0) &&
10395 				    (hmeblkp->hblk_hmecnt == 0)) {
10396 					if (sfmmu_steal_this_hblk(hmebp,
10397 					    hmeblkp, hblkpa, prevpa, pr_hblk)) {
10398 						break;
10399 					} else {
10400 						/*
10401 						 * Cannot fail since we have
10402 						 * hash lock.
10403 						 */
10404 						panic("fail to steal?");
10405 					}
10406 				}
10407 
10408 				pr_hblk = hmeblkp;
10409 				prevpa = hblkpa;
10410 				hblkpa = hmeblkp->hblk_nextpa;
10411 				hmeblkp = hmeblkp->hblk_next;
10412 			}
10413 
10414 			SFMMU_HASH_UNLOCK(hmebp);
10415 			if (hmebp++ == &khme_hash[KHMEHASH_SZ])
10416 				hmebp = khme_hash;
10417 		}
10418 
10419 		if (hmeblkp != NULL)
10420 			break;
10421 		sfmmu_hblk_steal_twice++;
10422 	}
10423 	return (hmeblkp);
10424 }
10425 
10426 /*
10427  * This routine does real work to prepare a hblk to be "stolen" by
10428  * unloading the mappings, updating shadow counts ....
10429  * It returns 1 if the block is ready to be reused (stolen), or 0
10430  * means the block cannot be stolen yet- pageunload is still working
10431  * on this hblk.
10432  */
10433 static int
10434 sfmmu_steal_this_hblk(struct hmehash_bucket *hmebp, struct hme_blk *hmeblkp,
10435 	uint64_t hblkpa, uint64_t prevpa, struct hme_blk *pr_hblk)
10436 {
10437 	int shw_size, vshift;
10438 	struct hme_blk *shw_hblkp;
10439 	uintptr_t vaddr;
10440 	uint_t shw_mask, newshw_mask;
10441 
10442 	ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp));
10443 
10444 	/*
10445 	 * check if the hmeblk is free, unload if necessary
10446 	 */
10447 	if (hmeblkp->hblk_vcnt || hmeblkp->hblk_hmecnt) {
10448 		sfmmu_t *sfmmup;
10449 		demap_range_t dmr;
10450 
10451 		sfmmup = hblktosfmmu(hmeblkp);
10452 		DEMAP_RANGE_INIT(sfmmup, &dmr);
10453 		(void) sfmmu_hblk_unload(sfmmup, hmeblkp,
10454 		    (caddr_t)get_hblk_base(hmeblkp),
10455 		    get_hblk_endaddr(hmeblkp), &dmr, HAT_UNLOAD);
10456 		DEMAP_RANGE_FLUSH(&dmr);
10457 		if (hmeblkp->hblk_vcnt || hmeblkp->hblk_hmecnt) {
10458 			/*
10459 			 * Pageunload is working on the same hblk.
10460 			 */
10461 			return (0);
10462 		}
10463 
10464 		sfmmu_hblk_steal_unload_count++;
10465 	}
10466 
10467 	ASSERT(hmeblkp->hblk_lckcnt == 0);
10468 	ASSERT(hmeblkp->hblk_vcnt == 0 && hmeblkp->hblk_hmecnt == 0);
10469 
10470 	sfmmu_hblk_hash_rm(hmebp, hmeblkp, prevpa, pr_hblk);
10471 	hmeblkp->hblk_nextpa = hblkpa;
10472 
10473 	shw_hblkp = hmeblkp->hblk_shadow;
10474 	if (shw_hblkp) {
10475 		shw_size = get_hblk_ttesz(shw_hblkp);
10476 		vaddr = get_hblk_base(hmeblkp);
10477 		vshift = vaddr_to_vshift(shw_hblkp->hblk_tag, vaddr, shw_size);
10478 		ASSERT(vshift < 8);
10479 		/*
10480 		 * Atomically clear shadow mask bit
10481 		 */
10482 		do {
10483 			shw_mask = shw_hblkp->hblk_shw_mask;
10484 			ASSERT(shw_mask & (1 << vshift));
10485 			newshw_mask = shw_mask & ~(1 << vshift);
10486 			newshw_mask = cas32(&shw_hblkp->hblk_shw_mask,
10487 				shw_mask, newshw_mask);
10488 		} while (newshw_mask != shw_mask);
10489 		hmeblkp->hblk_shadow = NULL;
10490 	}
10491 
10492 	/*
10493 	 * remove shadow bit if we are stealing an unused shadow hmeblk.
10494 	 * sfmmu_hblk_alloc needs it that way, will set shadow bit later if
10495 	 * we are indeed allocating a shadow hmeblk.
10496 	 */
10497 	hmeblkp->hblk_shw_bit = 0;
10498 
10499 	sfmmu_hblk_steal_count++;
10500 	SFMMU_STAT(sf_steal_count);
10501 
10502 	return (1);
10503 }
10504 
10505 struct hme_blk *
10506 sfmmu_hmetohblk(struct sf_hment *sfhme)
10507 {
10508 	struct hme_blk *hmeblkp;
10509 	struct sf_hment *sfhme0;
10510 	struct hme_blk *hblk_dummy = 0;
10511 
10512 	/*
10513 	 * No dummy sf_hments, please.
10514 	 */
10515 	ASSERT(sfhme->hme_tte.ll != 0);
10516 
10517 	sfhme0 = sfhme - sfhme->hme_tte.tte_hmenum;
10518 	hmeblkp = (struct hme_blk *)((uintptr_t)sfhme0 -
10519 		(uintptr_t)&hblk_dummy->hblk_hme[0]);
10520 
10521 	return (hmeblkp);
10522 }
10523 
10524 /*
10525  * Make sure that there is a valid ctx, if not get a ctx.
10526  * Also, get a readers lock on the ctx, so that the ctx cannot
10527  * be stolen underneath us.
10528  */
10529 static void
10530 sfmmu_disallow_ctx_steal(sfmmu_t *sfmmup)
10531 {
10532 	struct	ctx *ctx;
10533 
10534 	ASSERT(sfmmup != ksfmmup);
10535 	ASSERT(sfmmup->sfmmu_ismhat == 0);
10536 
10537 	/*
10538 	 * If ctx has been stolen, get a ctx.
10539 	 */
10540 	if (sfmmup->sfmmu_cnum == INVALID_CONTEXT) {
10541 		/*
10542 		 * Our ctx was stolen. Get a ctx with rlock.
10543 		 */
10544 		ctx = sfmmu_get_ctx(sfmmup);
10545 		return;
10546 	} else {
10547 		ctx = sfmmutoctx(sfmmup);
10548 	}
10549 
10550 	/*
10551 	 * Get the reader lock.
10552 	 */
10553 	rw_enter(&ctx->ctx_rwlock, RW_READER);
10554 	if (ctx->ctx_sfmmu != sfmmup) {
10555 		/*
10556 		 * The ctx got stolen, so spin again.
10557 		 */
10558 		rw_exit(&ctx->ctx_rwlock);
10559 		ctx = sfmmu_get_ctx(sfmmup);
10560 	}
10561 
10562 	ASSERT(sfmmup->sfmmu_cnum >= NUM_LOCKED_CTXS);
10563 }
10564 
10565 /*
10566  * Decrement reference count for our ctx. If the reference count
10567  * becomes 0, our ctx can be stolen by someone.
10568  */
10569 static void
10570 sfmmu_allow_ctx_steal(sfmmu_t *sfmmup)
10571 {
10572 	struct	ctx *ctx;
10573 
10574 	ASSERT(sfmmup != ksfmmup);
10575 	ASSERT(sfmmup->sfmmu_ismhat == 0);
10576 	ctx = sfmmutoctx(sfmmup);
10577 
10578 	ASSERT(sfmmup == ctx->ctx_sfmmu);
10579 	ASSERT(sfmmup->sfmmu_cnum != INVALID_CONTEXT);
10580 	rw_exit(&ctx->ctx_rwlock);
10581 }
10582 
10583 /*
10584  * On swapin, get appropriately sized TSB(s) and clear the HAT_SWAPPED flag.
10585  * If we can't get appropriately sized TSB(s), try for 8K TSB(s) using
10586  * KM_SLEEP allocation.
10587  *
10588  * Return 0 on success, -1 otherwise.
10589  */
10590 static void
10591 sfmmu_tsb_swapin(sfmmu_t *sfmmup, hatlock_t *hatlockp)
10592 {
10593 	struct tsb_info *tsbinfop, *next;
10594 	tsb_replace_rc_t rc;
10595 	boolean_t gotfirst = B_FALSE;
10596 
10597 	ASSERT(sfmmup != ksfmmup);
10598 	ASSERT(sfmmu_hat_lock_held(sfmmup));
10599 
10600 	while (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPIN)) {
10601 		cv_wait(&sfmmup->sfmmu_tsb_cv, HATLOCK_MUTEXP(hatlockp));
10602 	}
10603 
10604 	if (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) {
10605 		SFMMU_FLAGS_SET(sfmmup, HAT_SWAPIN);
10606 	} else {
10607 		return;
10608 	}
10609 
10610 	ASSERT(sfmmup->sfmmu_tsb != NULL);
10611 
10612 	/*
10613 	 * Loop over all tsbinfo's replacing them with ones that actually have
10614 	 * a TSB.  If any of the replacements ever fail, bail out of the loop.
10615 	 */
10616 	for (tsbinfop = sfmmup->sfmmu_tsb; tsbinfop != NULL; tsbinfop = next) {
10617 		ASSERT(tsbinfop->tsb_flags & TSB_SWAPPED);
10618 		next = tsbinfop->tsb_next;
10619 		rc = sfmmu_replace_tsb(sfmmup, tsbinfop, tsbinfop->tsb_szc,
10620 		    hatlockp, TSB_SWAPIN);
10621 		if (rc != TSB_SUCCESS) {
10622 			break;
10623 		}
10624 		gotfirst = B_TRUE;
10625 	}
10626 
10627 	switch (rc) {
10628 	case TSB_SUCCESS:
10629 		SFMMU_FLAGS_CLEAR(sfmmup, HAT_SWAPPED|HAT_SWAPIN);
10630 		cv_broadcast(&sfmmup->sfmmu_tsb_cv);
10631 		return;
10632 	case TSB_ALLOCFAIL:
10633 		break;
10634 	default:
10635 		panic("sfmmu_replace_tsb returned unrecognized failure code "
10636 		    "%d", rc);
10637 	}
10638 
10639 	/*
10640 	 * In this case, we failed to get one of our TSBs.  If we failed to
10641 	 * get the first TSB, get one of minimum size (8KB).  Walk the list
10642 	 * and throw away the tsbinfos, starting where the allocation failed;
10643 	 * we can get by with just one TSB as long as we don't leave the
10644 	 * SWAPPED tsbinfo structures lying around.
10645 	 */
10646 	tsbinfop = sfmmup->sfmmu_tsb;
10647 	next = tsbinfop->tsb_next;
10648 	tsbinfop->tsb_next = NULL;
10649 
10650 	sfmmu_hat_exit(hatlockp);
10651 	for (tsbinfop = next; tsbinfop != NULL; tsbinfop = next) {
10652 		next = tsbinfop->tsb_next;
10653 		sfmmu_tsbinfo_free(tsbinfop);
10654 	}
10655 	hatlockp = sfmmu_hat_enter(sfmmup);
10656 
10657 	/*
10658 	 * If we don't have any TSBs, get a single 8K TSB for 8K, 64K and 512K
10659 	 * pages.
10660 	 */
10661 	if (!gotfirst) {
10662 		tsbinfop = sfmmup->sfmmu_tsb;
10663 		rc = sfmmu_replace_tsb(sfmmup, tsbinfop, TSB_MIN_SZCODE,
10664 		    hatlockp, TSB_SWAPIN | TSB_FORCEALLOC);
10665 		ASSERT(rc == TSB_SUCCESS);
10666 	}
10667 
10668 	SFMMU_FLAGS_CLEAR(sfmmup, HAT_SWAPPED|HAT_SWAPIN);
10669 	cv_broadcast(&sfmmup->sfmmu_tsb_cv);
10670 }
10671 
10672 /*
10673  * Handle exceptions for low level tsb_handler.
10674  *
10675  * There are many scenarios that could land us here:
10676  *
10677  *	1) Process has no context.  In this case, ctx is
10678  *         INVALID_CONTEXT and sfmmup->sfmmu_cnum == 1 so
10679  *         we will acquire a context before returning.
10680  *      2) Need to re-load our MMU state.  In this case,
10681  *         ctx is INVALID_CONTEXT and sfmmup->sfmmu_cnum != 1.
10682  *      3) ISM mappings are being updated.  This is handled
10683  *         just like case #2.
10684  *      4) We wish to program a new page size into the TLB.
10685  *         This is handled just like case #1, since changing
10686  *         TLB page size requires us to flush the TLB.
10687  *	5) Window fault and no valid translation found.
10688  *
10689  * Cases 1-4, ctx is INVALID_CONTEXT so we handle it and then
10690  * exit which will retry the trapped instruction.  Case #5 we
10691  * punt to trap() which will raise us a trap level and handle
10692  * the fault before unwinding.
10693  *
10694  * Note that the process will run in INVALID_CONTEXT before
10695  * faulting into here and subsequently loading the MMU registers
10696  * (including the TSB base register) associated with this process.
10697  * For this reason, the trap handlers must all test for
10698  * INVALID_CONTEXT before attempting to access any registers other
10699  * than the context registers.
10700  */
10701 void
10702 sfmmu_tsbmiss_exception(struct regs *rp, uintptr_t tagaccess, uint_t traptype)
10703 {
10704 	sfmmu_t *sfmmup;
10705 	uint_t ctxnum;
10706 	klwp_id_t lwp;
10707 	char lwp_save_state;
10708 	hatlock_t *hatlockp;
10709 	struct tsb_info *tsbinfop;
10710 
10711 	SFMMU_STAT(sf_tsb_exceptions);
10712 	sfmmup = astosfmmu(curthread->t_procp->p_as);
10713 	ctxnum = tagaccess & TAGACC_CTX_MASK;
10714 
10715 	ASSERT(sfmmup != ksfmmup && ctxnum != KCONTEXT);
10716 	ASSERT(sfmmup->sfmmu_ismhat == 0);
10717 	/*
10718 	 * First, make sure we come out of here with a valid ctx,
10719 	 * since if we don't get one we'll simply loop on the
10720 	 * faulting instruction.
10721 	 *
10722 	 * If the ISM mappings are changing, the TSB is being relocated, or
10723 	 * the process is swapped out we serialize behind the controlling
10724 	 * thread with the sfmmu_flags and sfmmu_tsb_cv condition variable.
10725 	 * Otherwise we synchronize with the context stealer or the thread
10726 	 * that required us to change out our MMU registers (such
10727 	 * as a thread changing out our TSB while we were running) by
10728 	 * locking the HAT and grabbing the rwlock on the context as a
10729 	 * reader temporarily.
10730 	 */
10731 	if (ctxnum == INVALID_CONTEXT ||
10732 	    SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) {
10733 		/*
10734 		 * Must set lwp state to LWP_SYS before
10735 		 * trying to acquire any adaptive lock
10736 		 */
10737 		lwp = ttolwp(curthread);
10738 		ASSERT(lwp);
10739 		lwp_save_state = lwp->lwp_state;
10740 		lwp->lwp_state = LWP_SYS;
10741 
10742 		hatlockp = sfmmu_hat_enter(sfmmup);
10743 retry:
10744 		for (tsbinfop = sfmmup->sfmmu_tsb; tsbinfop != NULL;
10745 		    tsbinfop = tsbinfop->tsb_next) {
10746 			if (tsbinfop->tsb_flags & TSB_RELOC_FLAG) {
10747 				cv_wait(&sfmmup->sfmmu_tsb_cv,
10748 				    HATLOCK_MUTEXP(hatlockp));
10749 				goto retry;
10750 			}
10751 		}
10752 
10753 		/*
10754 		 * Wait for ISM maps to be updated.
10755 		 */
10756 		if (SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)) {
10757 			cv_wait(&sfmmup->sfmmu_tsb_cv,
10758 				    HATLOCK_MUTEXP(hatlockp));
10759 			goto retry;
10760 		}
10761 
10762 		/*
10763 		 * If we're swapping in, get TSB(s).  Note that we must do
10764 		 * this before we get a ctx or load the MMU state.  Once
10765 		 * we swap in we have to recheck to make sure the TSB(s) and
10766 		 * ISM mappings didn't change while we slept.
10767 		 */
10768 		if (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) {
10769 			sfmmu_tsb_swapin(sfmmup, hatlockp);
10770 			goto retry;
10771 		}
10772 
10773 		sfmmu_disallow_ctx_steal(sfmmup);
10774 		ctxnum = sfmmup->sfmmu_cnum;
10775 		kpreempt_disable();
10776 		sfmmu_setctx_sec(ctxnum);
10777 		sfmmu_load_mmustate(sfmmup);
10778 		kpreempt_enable();
10779 		sfmmu_allow_ctx_steal(sfmmup);
10780 		sfmmu_hat_exit(hatlockp);
10781 		/*
10782 		 * Must restore lwp_state if not calling
10783 		 * trap() for further processing. Restore
10784 		 * it anyway.
10785 		 */
10786 		lwp->lwp_state = lwp_save_state;
10787 		if (sfmmup->sfmmu_ttecnt[TTE8K] != 0 ||
10788 		    sfmmup->sfmmu_ttecnt[TTE64K] != 0 ||
10789 		    sfmmup->sfmmu_ttecnt[TTE512K] != 0 ||
10790 		    sfmmup->sfmmu_ttecnt[TTE4M] != 0 ||
10791 		    sfmmup->sfmmu_ttecnt[TTE32M] != 0 ||
10792 		    sfmmup->sfmmu_ttecnt[TTE256M] != 0) {
10793 			return;
10794 		}
10795 		if (traptype == T_DATA_PROT) {
10796 			traptype = T_DATA_MMU_MISS;
10797 		}
10798 	}
10799 	trap(rp, (caddr_t)tagaccess, traptype, 0);
10800 }
10801 
10802 /*
10803  * sfmmu_vatopfn_suspended is called from GET_TTE when TL=0 and
10804  * TTE_SUSPENDED bit set in tte we block on aquiring a page lock
10805  * rather than spinning to avoid send mondo timeouts with
10806  * interrupts enabled. When the lock is acquired it is immediately
10807  * released and we return back to sfmmu_vatopfn just after
10808  * the GET_TTE call.
10809  */
10810 void
10811 sfmmu_vatopfn_suspended(caddr_t vaddr, sfmmu_t *sfmmu, tte_t *ttep)
10812 {
10813 	struct page	**pp;
10814 
10815 	(void) as_pagelock(sfmmu->sfmmu_as, &pp, vaddr, TTE_CSZ(ttep), S_WRITE);
10816 	as_pageunlock(sfmmu->sfmmu_as, pp, vaddr, TTE_CSZ(ttep), S_WRITE);
10817 }
10818 
10819 /*
10820  * sfmmu_tsbmiss_suspended is called from GET_TTE when TL>0 and
10821  * TTE_SUSPENDED bit set in tte. We do this so that we can handle
10822  * cross traps which cannot be handled while spinning in the
10823  * trap handlers. Simply enter and exit the kpr_suspendlock spin
10824  * mutex, which is held by the holder of the suspend bit, and then
10825  * retry the trapped instruction after unwinding.
10826  */
10827 /*ARGSUSED*/
10828 void
10829 sfmmu_tsbmiss_suspended(struct regs *rp, uintptr_t tagacc, uint_t traptype)
10830 {
10831 	ASSERT(curthread != kreloc_thread);
10832 	mutex_enter(&kpr_suspendlock);
10833 	mutex_exit(&kpr_suspendlock);
10834 }
10835 
10836 /*
10837  * Special routine to flush out ism mappings- TSBs, TLBs and D-caches.
10838  * This routine may be called with all cpu's captured. Therefore, the
10839  * caller is responsible for holding all locks and disabling kernel
10840  * preemption.
10841  */
10842 /* ARGSUSED */
10843 static void
10844 sfmmu_ismtlbcache_demap(caddr_t addr, sfmmu_t *ism_sfmmup,
10845 	struct hme_blk *hmeblkp, pfn_t pfnum, int cache_flush_flag)
10846 {
10847 	cpuset_t 	cpuset;
10848 	caddr_t 	va;
10849 	ism_ment_t	*ment;
10850 	sfmmu_t		*sfmmup;
10851 	int 		ctxnum;
10852 	int 		vcolor;
10853 	int		ttesz;
10854 
10855 	/*
10856 	 * Walk the ism_hat's mapping list and flush the page
10857 	 * from every hat sharing this ism_hat. This routine
10858 	 * may be called while all cpu's have been captured.
10859 	 * Therefore we can't attempt to grab any locks. For now
10860 	 * this means we will protect the ism mapping list under
10861 	 * a single lock which will be grabbed by the caller.
10862 	 * If hat_share/unshare scalibility becomes a performance
10863 	 * problem then we may need to re-think ism mapping list locking.
10864 	 */
10865 	ASSERT(ism_sfmmup->sfmmu_ismhat);
10866 	ASSERT(MUTEX_HELD(&ism_mlist_lock));
10867 	addr = addr - ISMID_STARTADDR;
10868 	for (ment = ism_sfmmup->sfmmu_iment; ment; ment = ment->iment_next) {
10869 
10870 		sfmmup = ment->iment_hat;
10871 		ctxnum = sfmmup->sfmmu_cnum;
10872 		va = ment->iment_base_va;
10873 		va = (caddr_t)((uintptr_t)va  + (uintptr_t)addr);
10874 
10875 		/*
10876 		 * Flush TSB of ISM mappings.
10877 		 */
10878 		ttesz = get_hblk_ttesz(hmeblkp);
10879 		if (ttesz == TTE8K || ttesz == TTE4M) {
10880 			sfmmu_unload_tsb(sfmmup, va, ttesz);
10881 		} else {
10882 			caddr_t sva = va;
10883 			caddr_t eva;
10884 			ASSERT(addr == (caddr_t)get_hblk_base(hmeblkp));
10885 			eva = sva + get_hblk_span(hmeblkp);
10886 			sfmmu_unload_tsb_range(sfmmup, sva, eva, ttesz);
10887 		}
10888 
10889 		if (ctxnum != INVALID_CONTEXT) {
10890 			/*
10891 			 * Flush TLBs.  We don't need to do this for
10892 			 * invalid context since the flushing is already
10893 			 * done as part of context stealing.
10894 			 */
10895 			cpuset = sfmmup->sfmmu_cpusran;
10896 			CPUSET_AND(cpuset, cpu_ready_set);
10897 			CPUSET_DEL(cpuset, CPU->cpu_id);
10898 			SFMMU_XCALL_STATS(ctxnum);
10899 			xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)va,
10900 			    ctxnum);
10901 			vtag_flushpage(va, ctxnum);
10902 		}
10903 
10904 		/*
10905 		 * Flush D$
10906 		 * When flushing D$ we must flush all
10907 		 * cpu's. See sfmmu_cache_flush().
10908 		 */
10909 		if (cache_flush_flag == CACHE_FLUSH) {
10910 			cpuset = cpu_ready_set;
10911 			CPUSET_DEL(cpuset, CPU->cpu_id);
10912 			SFMMU_XCALL_STATS(ctxnum);
10913 			vcolor = addr_to_vcolor(va);
10914 			xt_some(cpuset, vac_flushpage_tl1, pfnum, vcolor);
10915 			vac_flushpage(pfnum, vcolor);
10916 		}
10917 	}
10918 }
10919 
10920 /*
10921  * Demaps the TSB, CPU caches, and flushes all TLBs on all CPUs of
10922  * a particular virtual address and ctx.  If noflush is set we do not
10923  * flush the TLB/TSB.  This function may or may not be called with the
10924  * HAT lock held.
10925  */
10926 static void
10927 sfmmu_tlbcache_demap(caddr_t addr, sfmmu_t *sfmmup, struct hme_blk *hmeblkp,
10928 	pfn_t pfnum, int tlb_noflush, int cpu_flag, int cache_flush_flag,
10929 	int hat_lock_held)
10930 {
10931 	int ctxnum, vcolor;
10932 	cpuset_t cpuset;
10933 	hatlock_t *hatlockp;
10934 
10935 	/*
10936 	 * There is no longer a need to protect against ctx being
10937 	 * stolen here since we don't store the ctx in the TSB anymore.
10938 	 */
10939 	vcolor = addr_to_vcolor(addr);
10940 
10941 	kpreempt_disable();
10942 	if (!tlb_noflush) {
10943 		/*
10944 		 * Flush the TSB.
10945 		 */
10946 		if (!hat_lock_held)
10947 			hatlockp = sfmmu_hat_enter(sfmmup);
10948 		SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp);
10949 		ctxnum = (int)sfmmutoctxnum(sfmmup);
10950 		if (!hat_lock_held)
10951 			sfmmu_hat_exit(hatlockp);
10952 
10953 		if (ctxnum != INVALID_CONTEXT) {
10954 			/*
10955 			 * Flush TLBs.  We don't need to do this if our
10956 			 * context is invalid context.  Since we hold the
10957 			 * HAT lock the context must have been stolen and
10958 			 * hence will be flushed before re-use.
10959 			 */
10960 			cpuset = sfmmup->sfmmu_cpusran;
10961 			CPUSET_AND(cpuset, cpu_ready_set);
10962 			CPUSET_DEL(cpuset, CPU->cpu_id);
10963 			SFMMU_XCALL_STATS(ctxnum);
10964 			xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)addr,
10965 				ctxnum);
10966 			vtag_flushpage(addr, ctxnum);
10967 		}
10968 	}
10969 
10970 	/*
10971 	 * Flush the D$
10972 	 *
10973 	 * Even if the ctx is stolen, we need to flush the
10974 	 * cache. Our ctx stealer only flushes the TLBs.
10975 	 */
10976 	if (cache_flush_flag == CACHE_FLUSH) {
10977 		if (cpu_flag & FLUSH_ALL_CPUS) {
10978 			cpuset = cpu_ready_set;
10979 		} else {
10980 			cpuset = sfmmup->sfmmu_cpusran;
10981 			CPUSET_AND(cpuset, cpu_ready_set);
10982 		}
10983 		CPUSET_DEL(cpuset, CPU->cpu_id);
10984 		SFMMU_XCALL_STATS(sfmmutoctxnum(sfmmup));
10985 		xt_some(cpuset, vac_flushpage_tl1, pfnum, vcolor);
10986 		vac_flushpage(pfnum, vcolor);
10987 	}
10988 	kpreempt_enable();
10989 }
10990 
10991 /*
10992  * Demaps the TSB and flushes all TLBs on all cpus for a particular virtual
10993  * address and ctx.  If noflush is set we do not currently do anything.
10994  * This function may or may not be called with the HAT lock held.
10995  */
10996 static void
10997 sfmmu_tlb_demap(caddr_t addr, sfmmu_t *sfmmup, struct hme_blk *hmeblkp,
10998 	int tlb_noflush, int hat_lock_held)
10999 {
11000 	int ctxnum;
11001 	cpuset_t cpuset;
11002 	hatlock_t *hatlockp;
11003 
11004 	/*
11005 	 * If the process is exiting we have nothing to do.
11006 	 */
11007 	if (tlb_noflush)
11008 		return;
11009 
11010 	/*
11011 	 * Flush TSB.
11012 	 */
11013 	if (!hat_lock_held)
11014 		hatlockp = sfmmu_hat_enter(sfmmup);
11015 	SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp);
11016 	ctxnum = sfmmutoctxnum(sfmmup);
11017 	if (!hat_lock_held)
11018 		sfmmu_hat_exit(hatlockp);
11019 
11020 	/*
11021 	 * Flush TLBs.  We don't need to do this if our context is invalid
11022 	 * context.  Since we hold the HAT lock the context must have been
11023 	 * stolen and hence will be flushed before re-use.
11024 	 */
11025 	if (ctxnum != INVALID_CONTEXT) {
11026 		/*
11027 		 * There is no need to protect against ctx being stolen.
11028 		 * If the ctx is stolen we will simply get an extra flush.
11029 		 */
11030 		kpreempt_disable();
11031 		cpuset = sfmmup->sfmmu_cpusran;
11032 		CPUSET_AND(cpuset, cpu_ready_set);
11033 		CPUSET_DEL(cpuset, CPU->cpu_id);
11034 		SFMMU_XCALL_STATS(ctxnum);
11035 		xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)addr, ctxnum);
11036 		vtag_flushpage(addr, ctxnum);
11037 		kpreempt_enable();
11038 	}
11039 }
11040 
11041 /*
11042  * Special case of sfmmu_tlb_demap for MMU_PAGESIZE hblks. Use the xcall
11043  * call handler that can flush a range of pages to save on xcalls.
11044  */
11045 static int sfmmu_xcall_save;
11046 
11047 static void
11048 sfmmu_tlb_range_demap(demap_range_t *dmrp)
11049 {
11050 	sfmmu_t *sfmmup = dmrp->dmr_sfmmup;
11051 	int ctxnum;
11052 	hatlock_t *hatlockp;
11053 	cpuset_t cpuset;
11054 	uint64_t ctx_pgcnt;
11055 	pgcnt_t pgcnt = 0;
11056 	int pgunload = 0;
11057 	int dirtypg = 0;
11058 	caddr_t addr = dmrp->dmr_addr;
11059 	caddr_t eaddr;
11060 	uint64_t bitvec = dmrp->dmr_bitvec;
11061 
11062 	ASSERT(bitvec & 1);
11063 
11064 	/*
11065 	 * Flush TSB and calculate number of pages to flush.
11066 	 */
11067 	while (bitvec != 0) {
11068 		dirtypg = 0;
11069 		/*
11070 		 * Find the first page to flush and then count how many
11071 		 * pages there are after it that also need to be flushed.
11072 		 * This way the number of TSB flushes is minimized.
11073 		 */
11074 		while ((bitvec & 1) == 0) {
11075 			pgcnt++;
11076 			addr += MMU_PAGESIZE;
11077 			bitvec >>= 1;
11078 		}
11079 		while (bitvec & 1) {
11080 			dirtypg++;
11081 			bitvec >>= 1;
11082 		}
11083 		eaddr = addr + ptob(dirtypg);
11084 		hatlockp = sfmmu_hat_enter(sfmmup);
11085 		sfmmu_unload_tsb_range(sfmmup, addr, eaddr, TTE8K);
11086 		sfmmu_hat_exit(hatlockp);
11087 		pgunload += dirtypg;
11088 		addr = eaddr;
11089 		pgcnt += dirtypg;
11090 	}
11091 
11092 	/*
11093 	 * In the case where context is invalid context, bail.
11094 	 * We hold the hat lock while checking the ctx to prevent
11095 	 * a race with sfmmu_replace_tsb() which temporarily sets
11096 	 * the ctx to INVALID_CONTEXT to force processes to enter
11097 	 * sfmmu_tsbmiss_exception().
11098 	 */
11099 	hatlockp = sfmmu_hat_enter(sfmmup);
11100 	ctxnum = sfmmutoctxnum(sfmmup);
11101 	sfmmu_hat_exit(hatlockp);
11102 	if (ctxnum == INVALID_CONTEXT) {
11103 		dmrp->dmr_bitvec = 0;
11104 		return;
11105 	}
11106 
11107 	ASSERT((pgcnt<<MMU_PAGESHIFT) <= dmrp->dmr_endaddr - dmrp->dmr_addr);
11108 	if (sfmmup->sfmmu_free == 0) {
11109 		addr = dmrp->dmr_addr;
11110 		bitvec = dmrp->dmr_bitvec;
11111 		ctx_pgcnt = (uint64_t)((ctxnum << 16) | pgcnt);
11112 		kpreempt_disable();
11113 		cpuset = sfmmup->sfmmu_cpusran;
11114 		CPUSET_AND(cpuset, cpu_ready_set);
11115 		CPUSET_DEL(cpuset, CPU->cpu_id);
11116 		SFMMU_XCALL_STATS(ctxnum);
11117 		xt_some(cpuset, vtag_flush_pgcnt_tl1, (uint64_t)addr,
11118 			ctx_pgcnt);
11119 		for (; bitvec != 0; bitvec >>= 1) {
11120 			if (bitvec & 1)
11121 				vtag_flushpage(addr, ctxnum);
11122 			addr += MMU_PAGESIZE;
11123 		}
11124 		kpreempt_enable();
11125 		sfmmu_xcall_save += (pgunload-1);
11126 	}
11127 	dmrp->dmr_bitvec = 0;
11128 }
11129 
11130 /*
11131  * Flushes only TLB.
11132  */
11133 static void
11134 sfmmu_tlb_ctx_demap(sfmmu_t *sfmmup)
11135 {
11136 	int ctxnum;
11137 	cpuset_t cpuset;
11138 
11139 	ctxnum = (int)sfmmutoctxnum(sfmmup);
11140 	if (ctxnum == INVALID_CONTEXT) {
11141 		/*
11142 		 * if ctx was stolen then simply return
11143 		 * whoever stole ctx is responsible for flush.
11144 		 */
11145 		return;
11146 	}
11147 	ASSERT(ctxnum != KCONTEXT);
11148 	/*
11149 	 * There is no need to protect against ctx being stolen.  If the
11150 	 * ctx is stolen we will simply get an extra flush.
11151 	 */
11152 	kpreempt_disable();
11153 
11154 	cpuset = sfmmup->sfmmu_cpusran;
11155 	CPUSET_DEL(cpuset, CPU->cpu_id);
11156 	CPUSET_AND(cpuset, cpu_ready_set);
11157 	SFMMU_XCALL_STATS(ctxnum);
11158 
11159 	/*
11160 	 * Flush TLB.
11161 	 * RFE: it might be worth delaying the TLB flush as well. In that
11162 	 * case each cpu would have to traverse the dirty list and flush
11163 	 * each one of those ctx from the TLB.
11164 	 */
11165 	vtag_flushctx(ctxnum);
11166 	xt_some(cpuset, vtag_flushctx_tl1, ctxnum, 0);
11167 
11168 	kpreempt_enable();
11169 	SFMMU_STAT(sf_tlbflush_ctx);
11170 }
11171 
11172 /*
11173  * Flushes all TLBs.
11174  */
11175 static void
11176 sfmmu_tlb_all_demap(void)
11177 {
11178 	cpuset_t cpuset;
11179 
11180 	/*
11181 	 * There is no need to protect against ctx being stolen.  If the
11182 	 * ctx is stolen we will simply get an extra flush.
11183 	 */
11184 	kpreempt_disable();
11185 
11186 	cpuset = cpu_ready_set;
11187 	CPUSET_DEL(cpuset, CPU->cpu_id);
11188 	/* LINTED: constant in conditional context */
11189 	SFMMU_XCALL_STATS(INVALID_CONTEXT);
11190 
11191 	vtag_flushall();
11192 	xt_some(cpuset, vtag_flushall_tl1, 0, 0);
11193 	xt_sync(cpuset);
11194 
11195 	kpreempt_enable();
11196 	SFMMU_STAT(sf_tlbflush_all);
11197 }
11198 
11199 /*
11200  * In cases where we need to synchronize with TLB/TSB miss trap
11201  * handlers, _and_ need to flush the TLB, it's a lot easier to
11202  * steal the context from the process and free it than to do a
11203  * special song and dance to keep things consistent for the
11204  * handlers.
11205  *
11206  * Since the process suddenly ends up without a context and our caller
11207  * holds the hat lock, threads that fault after this function is called
11208  * will pile up on the lock.  We can then do whatever we need to
11209  * atomically from the context of the caller.  The first blocked thread
11210  * to resume executing will get the process a new context, and the
11211  * process will resume executing.
11212  *
11213  * One added advantage of this approach is that on MMUs that
11214  * support a "flush all" operation, we will delay the flush until
11215  * we run out of contexts, and then flush the TLB one time.  This
11216  * is rather rare, so it's a lot less expensive than making 8000
11217  * x-calls to flush the TLB 8000 times.  Another is that we can do
11218  * all of this without pausing CPUs, due to some knowledge of how
11219  * resume() loads processes onto the processor; it sets the thread
11220  * into cpusran, and _then_ looks at cnum.  Because we do things in
11221  * the reverse order here, we guarantee exactly one of the following
11222  * statements is always true:
11223  *
11224  *   1) Nobody is in resume() so we have nothing to worry about anyway.
11225  *   2) The thread in resume() isn't in cpusran when we do the xcall,
11226  *      so we know when it does set itself it'll see cnum is
11227  *      INVALID_CONTEXT.
11228  *   3) The thread in resume() is in cpusran, and already might have
11229  *      looked at the old cnum.  That's OK, because we'll xcall it
11230  *      and, if necessary, flush the TLB along with the rest of the
11231  *      crowd.
11232  */
11233 static void
11234 sfmmu_tlb_swap_ctx(sfmmu_t *sfmmup, struct ctx *ctx)
11235 {
11236 	cpuset_t cpuset;
11237 	int cnum;
11238 
11239 	if (sfmmup->sfmmu_cnum == INVALID_CONTEXT)
11240 		return;
11241 
11242 	SFMMU_STAT(sf_ctx_swap);
11243 
11244 	kpreempt_disable();
11245 
11246 	ASSERT(rw_read_locked(&ctx->ctx_rwlock) == 0);
11247 	ASSERT(ctx->ctx_sfmmu == sfmmup);
11248 
11249 	cnum = ctxtoctxnum(ctx);
11250 	ASSERT(sfmmup->sfmmu_cnum == cnum);
11251 	ASSERT(cnum >= NUM_LOCKED_CTXS);
11252 
11253 	sfmmup->sfmmu_cnum = INVALID_CONTEXT;
11254 	membar_enter();	/* make sure visible on all CPUs */
11255 	ctx->ctx_sfmmu = NULL;
11256 
11257 	cpuset = sfmmup->sfmmu_cpusran;
11258 	CPUSET_DEL(cpuset, CPU->cpu_id);
11259 	CPUSET_AND(cpuset, cpu_ready_set);
11260 	SFMMU_XCALL_STATS(cnum);
11261 
11262 	/*
11263 	 * Force anybody running this process on CPU
11264 	 * to enter sfmmu_tsbmiss_exception() on the
11265 	 * next TLB miss, synchronize behind us on
11266 	 * the HAT lock, and grab a new context.  At
11267 	 * that point the new page size will become
11268 	 * active in the TLB for the new context.
11269 	 * See sfmmu_get_ctx() for details.
11270 	 */
11271 	if (delay_tlb_flush) {
11272 		xt_some(cpuset, sfmmu_raise_tsb_exception,
11273 		    cnum, INVALID_CONTEXT);
11274 		SFMMU_STAT(sf_tlbflush_deferred);
11275 	} else {
11276 		xt_some(cpuset, sfmmu_ctx_steal_tl1, cnum, INVALID_CONTEXT);
11277 		vtag_flushctx(cnum);
11278 		SFMMU_STAT(sf_tlbflush_ctx);
11279 	}
11280 	xt_sync(cpuset);
11281 
11282 	/*
11283 	 * If we just stole the ctx from the current
11284 	 * process on local CPU we need to invalidate
11285 	 * this CPU context as well.
11286 	 */
11287 	if (sfmmu_getctx_sec() == cnum) {
11288 		sfmmu_setctx_sec(INVALID_CONTEXT);
11289 		sfmmu_clear_utsbinfo();
11290 	}
11291 
11292 	kpreempt_enable();
11293 
11294 	/*
11295 	 * Now put old ctx on the dirty list since we may not
11296 	 * have flushed the context out of the TLB.  We'll let
11297 	 * the next guy who uses this ctx flush it instead.
11298 	 */
11299 	mutex_enter(&ctx_list_lock);
11300 	CTX_SET_FLAGS(ctx, CTX_FREE_FLAG);
11301 	ctx->ctx_free = ctxdirty;
11302 	ctxdirty = ctx;
11303 	mutex_exit(&ctx_list_lock);
11304 }
11305 
11306 /*
11307  * We need to flush the cache in all cpus.  It is possible that
11308  * a process referenced a page as cacheable but has sinced exited
11309  * and cleared the mapping list.  We still to flush it but have no
11310  * state so all cpus is the only alternative.
11311  */
11312 void
11313 sfmmu_cache_flush(pfn_t pfnum, int vcolor)
11314 {
11315 	cpuset_t cpuset;
11316 	int	ctxnum = INVALID_CONTEXT;
11317 
11318 	kpreempt_disable();
11319 	cpuset = cpu_ready_set;
11320 	CPUSET_DEL(cpuset, CPU->cpu_id);
11321 	SFMMU_XCALL_STATS(ctxnum);	/* account to any ctx */
11322 	xt_some(cpuset, vac_flushpage_tl1, pfnum, vcolor);
11323 	xt_sync(cpuset);
11324 	vac_flushpage(pfnum, vcolor);
11325 	kpreempt_enable();
11326 }
11327 
11328 void
11329 sfmmu_cache_flushcolor(int vcolor, pfn_t pfnum)
11330 {
11331 	cpuset_t cpuset;
11332 	int	ctxnum = INVALID_CONTEXT;
11333 
11334 	ASSERT(vcolor >= 0);
11335 
11336 	kpreempt_disable();
11337 	cpuset = cpu_ready_set;
11338 	CPUSET_DEL(cpuset, CPU->cpu_id);
11339 	SFMMU_XCALL_STATS(ctxnum);	/* account to any ctx */
11340 	xt_some(cpuset, vac_flushcolor_tl1, vcolor, pfnum);
11341 	xt_sync(cpuset);
11342 	vac_flushcolor(vcolor, pfnum);
11343 	kpreempt_enable();
11344 }
11345 
11346 /*
11347  * We need to prevent processes from accessing the TSB using a cached physical
11348  * address.  It's alright if they try to access the TSB via virtual address
11349  * since they will just fault on that virtual address once the mapping has
11350  * been suspended.
11351  */
11352 #pragma weak sendmondo_in_recover
11353 
11354 /* ARGSUSED */
11355 static int
11356 sfmmu_tsb_pre_relocator(caddr_t va, uint_t tsbsz, uint_t flags, void *tsbinfo)
11357 {
11358 	hatlock_t *hatlockp;
11359 	struct tsb_info *tsbinfop = (struct tsb_info *)tsbinfo;
11360 	sfmmu_t *sfmmup = tsbinfop->tsb_sfmmu;
11361 	struct ctx *ctx;
11362 	int cnum;
11363 	extern uint32_t sendmondo_in_recover;
11364 
11365 	if (flags != HAT_PRESUSPEND)
11366 		return (0);
11367 
11368 	hatlockp = sfmmu_hat_enter(sfmmup);
11369 
11370 	tsbinfop->tsb_flags |= TSB_RELOC_FLAG;
11371 
11372 	/*
11373 	 * For Cheetah+ Erratum 25:
11374 	 * Wait for any active recovery to finish.  We can't risk
11375 	 * relocating the TSB of the thread running mondo_recover_proc()
11376 	 * since, if we did that, we would deadlock.  The scenario we are
11377 	 * trying to avoid is as follows:
11378 	 *
11379 	 * THIS CPU			RECOVER CPU
11380 	 * --------			-----------
11381 	 *				Begins recovery, walking through TSB
11382 	 * hat_pagesuspend() TSB TTE
11383 	 *				TLB miss on TSB TTE, spins at TL1
11384 	 * xt_sync()
11385 	 *	send_mondo_timeout()
11386 	 *	mondo_recover_proc()
11387 	 *	((deadlocked))
11388 	 *
11389 	 * The second half of the workaround is that mondo_recover_proc()
11390 	 * checks to see if the tsb_info has the RELOC flag set, and if it
11391 	 * does, it skips over that TSB without ever touching tsbinfop->tsb_va
11392 	 * and hence avoiding the TLB miss that could result in a deadlock.
11393 	 */
11394 	if (&sendmondo_in_recover) {
11395 		membar_enter();	/* make sure RELOC flag visible */
11396 		while (sendmondo_in_recover) {
11397 			drv_usecwait(1);
11398 			membar_consumer();
11399 		}
11400 	}
11401 
11402 	ctx = sfmmutoctx(sfmmup);
11403 	rw_enter(&ctx->ctx_rwlock, RW_WRITER);
11404 	cnum = sfmmutoctxnum(sfmmup);
11405 
11406 	if (cnum != INVALID_CONTEXT) {
11407 		/*
11408 		 * Force all threads for this sfmmu to sfmmu_tsbmiss_exception
11409 		 * on their next TLB miss.
11410 		 */
11411 		sfmmu_tlb_swap_ctx(sfmmup, ctx);
11412 	}
11413 
11414 	rw_exit(&ctx->ctx_rwlock);
11415 
11416 	sfmmu_hat_exit(hatlockp);
11417 
11418 	return (0);
11419 }
11420 
11421 /* ARGSUSED */
11422 static int
11423 sfmmu_tsb_post_relocator(caddr_t va, uint_t tsbsz, uint_t flags,
11424 	void *tsbinfo, pfn_t newpfn)
11425 {
11426 	hatlock_t *hatlockp;
11427 	struct tsb_info *tsbinfop = (struct tsb_info *)tsbinfo;
11428 	sfmmu_t	*sfmmup = tsbinfop->tsb_sfmmu;
11429 
11430 	if (flags != HAT_POSTUNSUSPEND)
11431 		return (0);
11432 
11433 	hatlockp = sfmmu_hat_enter(sfmmup);
11434 
11435 	SFMMU_STAT(sf_tsb_reloc);
11436 
11437 	/*
11438 	 * The process may have swapped out while we were relocating one
11439 	 * of its TSBs.  If so, don't bother doing the setup since the
11440 	 * process can't be using the memory anymore.
11441 	 */
11442 	if ((tsbinfop->tsb_flags & TSB_SWAPPED) == 0) {
11443 		ASSERT(va == tsbinfop->tsb_va);
11444 		sfmmu_tsbinfo_setup_phys(tsbinfop, newpfn);
11445 		sfmmu_setup_tsbinfo(sfmmup);
11446 
11447 		if (tsbinfop->tsb_flags & TSB_FLUSH_NEEDED) {
11448 			sfmmu_inv_tsb(tsbinfop->tsb_va,
11449 			    TSB_BYTES(tsbinfop->tsb_szc));
11450 			tsbinfop->tsb_flags &= ~TSB_FLUSH_NEEDED;
11451 		}
11452 	}
11453 
11454 	membar_exit();
11455 	tsbinfop->tsb_flags &= ~TSB_RELOC_FLAG;
11456 	cv_broadcast(&sfmmup->sfmmu_tsb_cv);
11457 
11458 	sfmmu_hat_exit(hatlockp);
11459 
11460 	return (0);
11461 }
11462 
11463 /*
11464  * Allocate and initialize a tsb_info structure.  Note that we may or may not
11465  * allocate a TSB here, depending on the flags passed in.
11466  */
11467 static int
11468 sfmmu_tsbinfo_alloc(struct tsb_info **tsbinfopp, int tsb_szc, int tte_sz_mask,
11469 	uint_t flags, sfmmu_t *sfmmup)
11470 {
11471 	int err;
11472 
11473 	*tsbinfopp = (struct tsb_info *)kmem_cache_alloc(
11474 	    sfmmu_tsbinfo_cache, KM_SLEEP);
11475 
11476 	if ((err = sfmmu_init_tsbinfo(*tsbinfopp, tte_sz_mask,
11477 	    tsb_szc, flags, sfmmup)) != 0) {
11478 		kmem_cache_free(sfmmu_tsbinfo_cache, *tsbinfopp);
11479 		SFMMU_STAT(sf_tsb_allocfail);
11480 		*tsbinfopp = NULL;
11481 		return (err);
11482 	}
11483 	SFMMU_STAT(sf_tsb_alloc);
11484 
11485 	/*
11486 	 * Bump the TSB size counters for this TSB size.
11487 	 */
11488 	(*(((int *)&sfmmu_tsbsize_stat) + tsb_szc))++;
11489 	return (0);
11490 }
11491 
11492 static void
11493 sfmmu_tsb_free(struct tsb_info *tsbinfo)
11494 {
11495 	caddr_t tsbva = tsbinfo->tsb_va;
11496 	uint_t tsb_size = TSB_BYTES(tsbinfo->tsb_szc);
11497 	struct kmem_cache *kmem_cachep = tsbinfo->tsb_cache;
11498 	vmem_t	*vmp = tsbinfo->tsb_vmp;
11499 
11500 	/*
11501 	 * If we allocated this TSB from relocatable kernel memory, then we
11502 	 * need to uninstall the callback handler.
11503 	 */
11504 	if (tsbinfo->tsb_cache != sfmmu_tsb8k_cache) {
11505 		uintptr_t slab_mask = ~((uintptr_t)tsb_slab_mask) << PAGESHIFT;
11506 		caddr_t slab_vaddr = (caddr_t)((uintptr_t)tsbva & slab_mask);
11507 		page_t **ppl;
11508 		int ret;
11509 
11510 		ret = as_pagelock(&kas, &ppl, slab_vaddr, PAGESIZE, S_WRITE);
11511 		ASSERT(ret == 0);
11512 		hat_delete_callback(tsbva, (uint_t)tsb_size, (void *)tsbinfo,
11513 		    0);
11514 		as_pageunlock(&kas, ppl, slab_vaddr, PAGESIZE, S_WRITE);
11515 	}
11516 
11517 	if (kmem_cachep != NULL) {
11518 		kmem_cache_free(kmem_cachep, tsbva);
11519 	} else {
11520 		vmem_xfree(vmp, (void *)tsbva, tsb_size);
11521 	}
11522 	tsbinfo->tsb_va = (caddr_t)0xbad00bad;
11523 	atomic_add_64(&tsb_alloc_bytes, -(int64_t)tsb_size);
11524 }
11525 
11526 static void
11527 sfmmu_tsbinfo_free(struct tsb_info *tsbinfo)
11528 {
11529 	if ((tsbinfo->tsb_flags & TSB_SWAPPED) == 0) {
11530 		sfmmu_tsb_free(tsbinfo);
11531 	}
11532 	kmem_cache_free(sfmmu_tsbinfo_cache, tsbinfo);
11533 
11534 }
11535 
11536 /*
11537  * Setup all the references to physical memory for this tsbinfo.
11538  * The underlying page(s) must be locked.
11539  */
11540 static void
11541 sfmmu_tsbinfo_setup_phys(struct tsb_info *tsbinfo, pfn_t pfn)
11542 {
11543 	ASSERT(pfn != PFN_INVALID);
11544 	ASSERT(pfn == va_to_pfn(tsbinfo->tsb_va));
11545 
11546 #ifndef sun4v
11547 	if (tsbinfo->tsb_szc == 0) {
11548 		sfmmu_memtte(&tsbinfo->tsb_tte, pfn,
11549 		    PROT_WRITE|PROT_READ, TTE8K);
11550 	} else {
11551 		/*
11552 		 * Round down PA and use a large mapping; the handlers will
11553 		 * compute the TSB pointer at the correct offset into the
11554 		 * big virtual page.  NOTE: this assumes all TSBs larger
11555 		 * than 8K must come from physically contiguous slabs of
11556 		 * size tsb_slab_size.
11557 		 */
11558 		sfmmu_memtte(&tsbinfo->tsb_tte, pfn & ~tsb_slab_mask,
11559 		    PROT_WRITE|PROT_READ, tsb_slab_ttesz);
11560 	}
11561 	tsbinfo->tsb_pa = ptob(pfn);
11562 
11563 	TTE_SET_LOCKED(&tsbinfo->tsb_tte); /* lock the tte into dtlb */
11564 	TTE_SET_MOD(&tsbinfo->tsb_tte);    /* enable writes */
11565 
11566 	ASSERT(TTE_IS_PRIVILEGED(&tsbinfo->tsb_tte));
11567 	ASSERT(TTE_IS_LOCKED(&tsbinfo->tsb_tte));
11568 #else /* sun4v */
11569 	tsbinfo->tsb_pa = ptob(pfn);
11570 #endif /* sun4v */
11571 }
11572 
11573 
11574 /*
11575  * Returns zero on success, ENOMEM if over the high water mark,
11576  * or EAGAIN if the caller needs to retry with a smaller TSB
11577  * size (or specify TSB_FORCEALLOC if the allocation can't fail).
11578  *
11579  * This call cannot fail to allocate a TSB if TSB_FORCEALLOC
11580  * is specified and the TSB requested is PAGESIZE, though it
11581  * may sleep waiting for memory if sufficient memory is not
11582  * available.
11583  */
11584 static int
11585 sfmmu_init_tsbinfo(struct tsb_info *tsbinfo, int tteszmask,
11586     int tsbcode, uint_t flags, sfmmu_t *sfmmup)
11587 {
11588 	caddr_t vaddr = NULL;
11589 	caddr_t slab_vaddr;
11590 	uintptr_t slab_mask = ~((uintptr_t)tsb_slab_mask) << PAGESHIFT;
11591 	int tsbbytes = TSB_BYTES(tsbcode);
11592 	int lowmem = 0;
11593 	struct kmem_cache *kmem_cachep = NULL;
11594 	vmem_t *vmp = NULL;
11595 	lgrp_id_t lgrpid = LGRP_NONE;
11596 	pfn_t pfn;
11597 	uint_t cbflags = HAC_SLEEP;
11598 	page_t **pplist;
11599 	int ret;
11600 
11601 	if (flags & (TSB_FORCEALLOC | TSB_SWAPIN | TSB_GROW | TSB_SHRINK))
11602 		flags |= TSB_ALLOC;
11603 
11604 	ASSERT((flags & TSB_FORCEALLOC) == 0 || tsbcode == TSB_MIN_SZCODE);
11605 
11606 	tsbinfo->tsb_sfmmu = sfmmup;
11607 
11608 	/*
11609 	 * If not allocating a TSB, set up the tsbinfo, set TSB_SWAPPED, and
11610 	 * return.
11611 	 */
11612 	if ((flags & TSB_ALLOC) == 0) {
11613 		tsbinfo->tsb_szc = tsbcode;
11614 		tsbinfo->tsb_ttesz_mask = tteszmask;
11615 		tsbinfo->tsb_va = (caddr_t)0xbadbadbeef;
11616 		tsbinfo->tsb_pa = -1;
11617 		tsbinfo->tsb_tte.ll = 0;
11618 		tsbinfo->tsb_next = NULL;
11619 		tsbinfo->tsb_flags = TSB_SWAPPED;
11620 		tsbinfo->tsb_cache = NULL;
11621 		tsbinfo->tsb_vmp = NULL;
11622 		return (0);
11623 	}
11624 
11625 #ifdef DEBUG
11626 	/*
11627 	 * For debugging:
11628 	 * Randomly force allocation failures every tsb_alloc_mtbf
11629 	 * tries if TSB_FORCEALLOC is not specified.  This will
11630 	 * return ENOMEM if tsb_alloc_mtbf is odd, or EAGAIN if
11631 	 * it is even, to allow testing of both failure paths...
11632 	 */
11633 	if (tsb_alloc_mtbf && ((flags & TSB_FORCEALLOC) == 0) &&
11634 	    (tsb_alloc_count++ == tsb_alloc_mtbf)) {
11635 		tsb_alloc_count = 0;
11636 		tsb_alloc_fail_mtbf++;
11637 		return ((tsb_alloc_mtbf & 1)? ENOMEM : EAGAIN);
11638 	}
11639 #endif	/* DEBUG */
11640 
11641 	/*
11642 	 * Enforce high water mark if we are not doing a forced allocation
11643 	 * and are not shrinking a process' TSB.
11644 	 */
11645 	if ((flags & TSB_SHRINK) == 0 &&
11646 	    (tsbbytes + tsb_alloc_bytes) > tsb_alloc_hiwater) {
11647 		if ((flags & TSB_FORCEALLOC) == 0)
11648 			return (ENOMEM);
11649 		lowmem = 1;
11650 	}
11651 
11652 	/*
11653 	 * Allocate from the correct location based upon the size of the TSB
11654 	 * compared to the base page size, and what memory conditions dictate.
11655 	 * Note we always do nonblocking allocations from the TSB arena since
11656 	 * we don't want memory fragmentation to cause processes to block
11657 	 * indefinitely waiting for memory; until the kernel algorithms that
11658 	 * coalesce large pages are improved this is our best option.
11659 	 *
11660 	 * Algorithm:
11661 	 *	If allocating a "large" TSB (>8K), allocate from the
11662 	 *		appropriate kmem_tsb_default_arena vmem arena
11663 	 *	else if low on memory or the TSB_FORCEALLOC flag is set or
11664 	 *	tsb_forceheap is set
11665 	 *		Allocate from kernel heap via sfmmu_tsb8k_cache with
11666 	 *		KM_SLEEP (never fails)
11667 	 *	else
11668 	 *		Allocate from appropriate sfmmu_tsb_cache with
11669 	 *		KM_NOSLEEP
11670 	 *	endif
11671 	 */
11672 	if (tsb_lgrp_affinity)
11673 		lgrpid = lgrp_home_id(curthread);
11674 	if (lgrpid == LGRP_NONE)
11675 		lgrpid = 0;	/* use lgrp of boot CPU */
11676 
11677 	if (tsbbytes > MMU_PAGESIZE) {
11678 		vmp = kmem_tsb_default_arena[lgrpid];
11679 		vaddr = (caddr_t)vmem_xalloc(vmp, tsbbytes, tsbbytes, 0, 0,
11680 		    NULL, NULL, VM_NOSLEEP);
11681 #ifdef	DEBUG
11682 	} else if (lowmem || (flags & TSB_FORCEALLOC) || tsb_forceheap) {
11683 #else	/* !DEBUG */
11684 	} else if (lowmem || (flags & TSB_FORCEALLOC)) {
11685 #endif	/* DEBUG */
11686 		kmem_cachep = sfmmu_tsb8k_cache;
11687 		vaddr = (caddr_t)kmem_cache_alloc(kmem_cachep, KM_SLEEP);
11688 		ASSERT(vaddr != NULL);
11689 	} else {
11690 		kmem_cachep = sfmmu_tsb_cache[lgrpid];
11691 		vaddr = (caddr_t)kmem_cache_alloc(kmem_cachep, KM_NOSLEEP);
11692 	}
11693 
11694 	tsbinfo->tsb_cache = kmem_cachep;
11695 	tsbinfo->tsb_vmp = vmp;
11696 
11697 	if (vaddr == NULL) {
11698 		return (EAGAIN);
11699 	}
11700 
11701 	atomic_add_64(&tsb_alloc_bytes, (int64_t)tsbbytes);
11702 	kmem_cachep = tsbinfo->tsb_cache;
11703 
11704 	/*
11705 	 * If we are allocating from outside the cage, then we need to
11706 	 * register a relocation callback handler.  Note that for now
11707 	 * since pseudo mappings always hang off of the slab's root page,
11708 	 * we need only lock the first 8K of the TSB slab.  This is a bit
11709 	 * hacky but it is good for performance.
11710 	 */
11711 	if (kmem_cachep != sfmmu_tsb8k_cache) {
11712 		slab_vaddr = (caddr_t)((uintptr_t)vaddr & slab_mask);
11713 		ret = as_pagelock(&kas, &pplist, slab_vaddr, PAGESIZE, S_WRITE);
11714 		ASSERT(ret == 0);
11715 		ret = hat_add_callback(sfmmu_tsb_cb_id, vaddr, (uint_t)tsbbytes,
11716 		    cbflags, (void *)tsbinfo, &pfn);
11717 
11718 		/*
11719 		 * Need to free up resources if we could not successfully
11720 		 * add the callback function and return an error condition.
11721 		 */
11722 		if (ret != 0) {
11723 			if (kmem_cachep) {
11724 				kmem_cache_free(kmem_cachep, vaddr);
11725 			} else {
11726 				vmem_xfree(vmp, (void *)vaddr, tsbbytes);
11727 			}
11728 			as_pageunlock(&kas, pplist, slab_vaddr, PAGESIZE,
11729 			    S_WRITE);
11730 			return (EAGAIN);
11731 		}
11732 	} else {
11733 		/*
11734 		 * Since allocation of 8K TSBs from heap is rare and occurs
11735 		 * during memory pressure we allocate them from permanent
11736 		 * memory rather than using callbacks to get the PFN.
11737 		 */
11738 		pfn = hat_getpfnum(kas.a_hat, vaddr);
11739 	}
11740 
11741 	tsbinfo->tsb_va = vaddr;
11742 	tsbinfo->tsb_szc = tsbcode;
11743 	tsbinfo->tsb_ttesz_mask = tteszmask;
11744 	tsbinfo->tsb_next = NULL;
11745 	tsbinfo->tsb_flags = 0;
11746 
11747 	sfmmu_tsbinfo_setup_phys(tsbinfo, pfn);
11748 
11749 	if (kmem_cachep != sfmmu_tsb8k_cache) {
11750 		as_pageunlock(&kas, pplist, slab_vaddr, PAGESIZE, S_WRITE);
11751 	}
11752 
11753 	sfmmu_inv_tsb(vaddr, tsbbytes);
11754 	return (0);
11755 }
11756 
11757 /*
11758  * Initialize per cpu tsb and per cpu tsbmiss_area
11759  */
11760 void
11761 sfmmu_init_tsbs(void)
11762 {
11763 	int i;
11764 	struct tsbmiss	*tsbmissp;
11765 	struct kpmtsbm	*kpmtsbmp;
11766 #ifndef sun4v
11767 	extern int	dcache_line_mask;
11768 #endif /* sun4v */
11769 	extern uint_t	vac_colors;
11770 
11771 	/*
11772 	 * Init. tsb miss area.
11773 	 */
11774 	tsbmissp = tsbmiss_area;
11775 
11776 	for (i = 0; i < NCPU; tsbmissp++, i++) {
11777 		/*
11778 		 * initialize the tsbmiss area.
11779 		 * Do this for all possible CPUs as some may be added
11780 		 * while the system is running. There is no cost to this.
11781 		 */
11782 		tsbmissp->ksfmmup = ksfmmup;
11783 #ifndef sun4v
11784 		tsbmissp->dcache_line_mask = (uint16_t)dcache_line_mask;
11785 #endif /* sun4v */
11786 		tsbmissp->khashstart =
11787 		    (struct hmehash_bucket *)va_to_pa((caddr_t)khme_hash);
11788 		tsbmissp->uhashstart =
11789 		    (struct hmehash_bucket *)va_to_pa((caddr_t)uhme_hash);
11790 		tsbmissp->khashsz = khmehash_num;
11791 		tsbmissp->uhashsz = uhmehash_num;
11792 	}
11793 
11794 	if (kpm_enable == 0)
11795 		return;
11796 
11797 	if (kpm_smallpages) {
11798 		/*
11799 		 * If we're using base pagesize pages for seg_kpm
11800 		 * mappings, we use the kernel TSB since we can't afford
11801 		 * to allocate a second huge TSB for these mappings.
11802 		 */
11803 		kpm_tsbbase = ktsb_phys? ktsb_pbase : (uint64_t)ktsb_base;
11804 		kpm_tsbsz = ktsb_szcode;
11805 		kpmsm_tsbbase = kpm_tsbbase;
11806 		kpmsm_tsbsz = kpm_tsbsz;
11807 	} else {
11808 		/*
11809 		 * In VAC conflict case, just put the entries in the
11810 		 * kernel 8K indexed TSB for now so we can find them.
11811 		 * This could really be changed in the future if we feel
11812 		 * the need...
11813 		 */
11814 		kpmsm_tsbbase = ktsb_phys? ktsb_pbase : (uint64_t)ktsb_base;
11815 		kpmsm_tsbsz = ktsb_szcode;
11816 		kpm_tsbbase = ktsb_phys? ktsb4m_pbase : (uint64_t)ktsb4m_base;
11817 		kpm_tsbsz = ktsb4m_szcode;
11818 	}
11819 
11820 	kpmtsbmp = kpmtsbm_area;
11821 	for (i = 0; i < NCPU; kpmtsbmp++, i++) {
11822 		/*
11823 		 * Initialize the kpmtsbm area.
11824 		 * Do this for all possible CPUs as some may be added
11825 		 * while the system is running. There is no cost to this.
11826 		 */
11827 		kpmtsbmp->vbase = kpm_vbase;
11828 		kpmtsbmp->vend = kpm_vbase + kpm_size * vac_colors;
11829 		kpmtsbmp->sz_shift = kpm_size_shift;
11830 		kpmtsbmp->kpmp_shift = kpmp_shift;
11831 		kpmtsbmp->kpmp2pshft = (uchar_t)kpmp2pshft;
11832 		if (kpm_smallpages == 0) {
11833 			kpmtsbmp->kpmp_table_sz = kpmp_table_sz;
11834 			kpmtsbmp->kpmp_tablepa = va_to_pa(kpmp_table);
11835 		} else {
11836 			kpmtsbmp->kpmp_table_sz = kpmp_stable_sz;
11837 			kpmtsbmp->kpmp_tablepa = va_to_pa(kpmp_stable);
11838 		}
11839 		kpmtsbmp->msegphashpa = va_to_pa(memseg_phash);
11840 		kpmtsbmp->flags = KPMTSBM_ENABLE_FLAG;
11841 #ifdef	DEBUG
11842 		kpmtsbmp->flags |= (kpm_tsbmtl) ?  KPMTSBM_TLTSBM_FLAG : 0;
11843 #endif	/* DEBUG */
11844 		if (ktsb_phys)
11845 			kpmtsbmp->flags |= KPMTSBM_TSBPHYS_FLAG;
11846 	}
11847 
11848 	sfmmu_tsb_cb_id = hat_register_callback('T'<<16 | 'S' << 8 | 'B',
11849 	    sfmmu_tsb_pre_relocator, sfmmu_tsb_post_relocator, NULL, 0);
11850 }
11851 
11852 /* Avoid using sfmmu_tsbinfo_alloc() to avoid kmem_alloc - no real reason */
11853 struct tsb_info ktsb_info[2];
11854 
11855 /*
11856  * Called from hat_kern_setup() to setup the tsb_info for ksfmmup.
11857  */
11858 void
11859 sfmmu_init_ktsbinfo()
11860 {
11861 	ASSERT(ksfmmup != NULL);
11862 	ASSERT(ksfmmup->sfmmu_tsb == NULL);
11863 	/*
11864 	 * Allocate tsbinfos for kernel and copy in data
11865 	 * to make debug easier and sun4v setup easier.
11866 	 */
11867 	ktsb_info[0].tsb_sfmmu = ksfmmup;
11868 	ktsb_info[0].tsb_szc = ktsb_szcode;
11869 	ktsb_info[0].tsb_ttesz_mask = TSB8K|TSB64K|TSB512K;
11870 	ktsb_info[0].tsb_va = ktsb_base;
11871 	ktsb_info[0].tsb_pa = ktsb_pbase;
11872 	ktsb_info[0].tsb_flags = 0;
11873 	ktsb_info[0].tsb_tte.ll = 0;
11874 	ktsb_info[0].tsb_cache = NULL;
11875 
11876 	ktsb_info[1].tsb_sfmmu = ksfmmup;
11877 	ktsb_info[1].tsb_szc = ktsb4m_szcode;
11878 	ktsb_info[1].tsb_ttesz_mask = TSB4M;
11879 	ktsb_info[1].tsb_va = ktsb4m_base;
11880 	ktsb_info[1].tsb_pa = ktsb4m_pbase;
11881 	ktsb_info[1].tsb_flags = 0;
11882 	ktsb_info[1].tsb_tte.ll = 0;
11883 	ktsb_info[1].tsb_cache = NULL;
11884 
11885 	/* Link them into ksfmmup. */
11886 	ktsb_info[0].tsb_next = &ktsb_info[1];
11887 	ktsb_info[1].tsb_next = NULL;
11888 	ksfmmup->sfmmu_tsb = &ktsb_info[0];
11889 
11890 	sfmmu_setup_tsbinfo(ksfmmup);
11891 }
11892 
11893 /*
11894  * Cache the last value returned from va_to_pa().  If the VA specified
11895  * in the current call to cached_va_to_pa() maps to the same Page (as the
11896  * previous call to cached_va_to_pa()), then compute the PA using
11897  * cached info, else call va_to_pa().
11898  *
11899  * Note: this function is neither MT-safe nor consistent in the presence
11900  * of multiple, interleaved threads.  This function was created to enable
11901  * an optimization used during boot (at a point when there's only one thread
11902  * executing on the "boot CPU", and before startup_vm() has been called).
11903  */
11904 static uint64_t
11905 cached_va_to_pa(void *vaddr)
11906 {
11907 	static uint64_t prev_vaddr_base = 0;
11908 	static uint64_t prev_pfn = 0;
11909 
11910 	if ((((uint64_t)vaddr) & MMU_PAGEMASK) == prev_vaddr_base) {
11911 		return (prev_pfn | ((uint64_t)vaddr & MMU_PAGEOFFSET));
11912 	} else {
11913 		uint64_t pa = va_to_pa(vaddr);
11914 
11915 		if (pa != ((uint64_t)-1)) {
11916 			/*
11917 			 * Computed physical address is valid.  Cache its
11918 			 * related info for the next cached_va_to_pa() call.
11919 			 */
11920 			prev_pfn = pa & MMU_PAGEMASK;
11921 			prev_vaddr_base = ((uint64_t)vaddr) & MMU_PAGEMASK;
11922 		}
11923 
11924 		return (pa);
11925 	}
11926 }
11927 
11928 /*
11929  * Carve up our nucleus hblk region.  We may allocate more hblks than
11930  * asked due to rounding errors but we are guaranteed to have at least
11931  * enough space to allocate the requested number of hblk8's and hblk1's.
11932  */
11933 void
11934 sfmmu_init_nucleus_hblks(caddr_t addr, size_t size, int nhblk8, int nhblk1)
11935 {
11936 	struct hme_blk *hmeblkp;
11937 	size_t hme8blk_sz, hme1blk_sz;
11938 	size_t i;
11939 	size_t hblk8_bound;
11940 	ulong_t j = 0, k = 0;
11941 
11942 	ASSERT(addr != NULL && size != 0);
11943 
11944 	/* Need to use proper structure alignment */
11945 	hme8blk_sz = roundup(HME8BLK_SZ, sizeof (int64_t));
11946 	hme1blk_sz = roundup(HME1BLK_SZ, sizeof (int64_t));
11947 
11948 	nucleus_hblk8.list = (void *)addr;
11949 	nucleus_hblk8.index = 0;
11950 
11951 	/*
11952 	 * Use as much memory as possible for hblk8's since we
11953 	 * expect all bop_alloc'ed memory to be allocated in 8k chunks.
11954 	 * We need to hold back enough space for the hblk1's which
11955 	 * we'll allocate next.
11956 	 */
11957 	hblk8_bound = size - (nhblk1 * hme1blk_sz) - hme8blk_sz;
11958 	for (i = 0; i <= hblk8_bound; i += hme8blk_sz, j++) {
11959 		hmeblkp = (struct hme_blk *)addr;
11960 		addr += hme8blk_sz;
11961 		hmeblkp->hblk_nuc_bit = 1;
11962 		hmeblkp->hblk_nextpa = cached_va_to_pa((caddr_t)hmeblkp);
11963 	}
11964 	nucleus_hblk8.len = j;
11965 	ASSERT(j >= nhblk8);
11966 	SFMMU_STAT_ADD(sf_hblk8_ncreate, j);
11967 
11968 	nucleus_hblk1.list = (void *)addr;
11969 	nucleus_hblk1.index = 0;
11970 	for (; i <= (size - hme1blk_sz); i += hme1blk_sz, k++) {
11971 		hmeblkp = (struct hme_blk *)addr;
11972 		addr += hme1blk_sz;
11973 		hmeblkp->hblk_nuc_bit = 1;
11974 		hmeblkp->hblk_nextpa = cached_va_to_pa((caddr_t)hmeblkp);
11975 	}
11976 	ASSERT(k >= nhblk1);
11977 	nucleus_hblk1.len = k;
11978 	SFMMU_STAT_ADD(sf_hblk1_ncreate, k);
11979 }
11980 
11981 /*
11982  * This function is currently not supported on this platform. For what
11983  * it's supposed to do, see hat.c and hat_srmmu.c
11984  */
11985 /* ARGSUSED */
11986 faultcode_t
11987 hat_softlock(struct hat *hat, caddr_t addr, size_t *lenp, page_t **ppp,
11988     uint_t flags)
11989 {
11990 	ASSERT(hat->sfmmu_xhat_provider == NULL);
11991 	return (FC_NOSUPPORT);
11992 }
11993 
11994 /*
11995  * Searchs the mapping list of the page for a mapping of the same size. If not
11996  * found the corresponding bit is cleared in the p_index field. When large
11997  * pages are more prevalent in the system, we can maintain the mapping list
11998  * in order and we don't have to traverse the list each time. Just check the
11999  * next and prev entries, and if both are of different size, we clear the bit.
12000  */
12001 static void
12002 sfmmu_rm_large_mappings(page_t *pp, int ttesz)
12003 {
12004 	struct sf_hment *sfhmep;
12005 	struct hme_blk *hmeblkp;
12006 	int	index;
12007 	pgcnt_t	npgs;
12008 
12009 	ASSERT(ttesz > TTE8K);
12010 
12011 	ASSERT(sfmmu_mlist_held(pp));
12012 
12013 	ASSERT(PP_ISMAPPED_LARGE(pp));
12014 
12015 	/*
12016 	 * Traverse mapping list looking for another mapping of same size.
12017 	 * since we only want to clear index field if all mappings of
12018 	 * that size are gone.
12019 	 */
12020 
12021 	for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) {
12022 		hmeblkp = sfmmu_hmetohblk(sfhmep);
12023 		if (hmeblkp->hblk_xhat_bit)
12024 			continue;
12025 		if (hme_size(sfhmep) == ttesz) {
12026 			/*
12027 			 * another mapping of the same size. don't clear index.
12028 			 */
12029 			return;
12030 		}
12031 	}
12032 
12033 	/*
12034 	 * Clear the p_index bit for large page.
12035 	 */
12036 	index = PAGESZ_TO_INDEX(ttesz);
12037 	npgs = TTEPAGES(ttesz);
12038 	while (npgs-- > 0) {
12039 		ASSERT(pp->p_index & index);
12040 		pp->p_index &= ~index;
12041 		pp = PP_PAGENEXT(pp);
12042 	}
12043 }
12044 
12045 /*
12046  * return supported features
12047  */
12048 /* ARGSUSED */
12049 int
12050 hat_supported(enum hat_features feature, void *arg)
12051 {
12052 	switch (feature) {
12053 	case    HAT_SHARED_PT:
12054 	case	HAT_DYNAMIC_ISM_UNMAP:
12055 	case	HAT_VMODSORT:
12056 		return (1);
12057 	default:
12058 		return (0);
12059 	}
12060 }
12061 
12062 void
12063 hat_enter(struct hat *hat)
12064 {
12065 	hatlock_t	*hatlockp;
12066 
12067 	if (hat != ksfmmup) {
12068 		hatlockp = TSB_HASH(hat);
12069 		mutex_enter(HATLOCK_MUTEXP(hatlockp));
12070 	}
12071 }
12072 
12073 void
12074 hat_exit(struct hat *hat)
12075 {
12076 	hatlock_t	*hatlockp;
12077 
12078 	if (hat != ksfmmup) {
12079 		hatlockp = TSB_HASH(hat);
12080 		mutex_exit(HATLOCK_MUTEXP(hatlockp));
12081 	}
12082 }
12083 
12084 /*ARGSUSED*/
12085 void
12086 hat_reserve(struct as *as, caddr_t addr, size_t len)
12087 {
12088 }
12089 
12090 static void
12091 hat_kstat_init(void)
12092 {
12093 	kstat_t *ksp;
12094 
12095 	ksp = kstat_create("unix", 0, "sfmmu_global_stat", "hat",
12096 		KSTAT_TYPE_RAW, sizeof (struct sfmmu_global_stat),
12097 		KSTAT_FLAG_VIRTUAL);
12098 	if (ksp) {
12099 		ksp->ks_data = (void *) &sfmmu_global_stat;
12100 		kstat_install(ksp);
12101 	}
12102 	ksp = kstat_create("unix", 0, "sfmmu_tsbsize_stat", "hat",
12103 		KSTAT_TYPE_RAW, sizeof (struct sfmmu_tsbsize_stat),
12104 		KSTAT_FLAG_VIRTUAL);
12105 	if (ksp) {
12106 		ksp->ks_data = (void *) &sfmmu_tsbsize_stat;
12107 		kstat_install(ksp);
12108 	}
12109 	ksp = kstat_create("unix", 0, "sfmmu_percpu_stat", "hat",
12110 		KSTAT_TYPE_RAW, sizeof (struct sfmmu_percpu_stat) * NCPU,
12111 		KSTAT_FLAG_WRITABLE);
12112 	if (ksp) {
12113 		ksp->ks_update = sfmmu_kstat_percpu_update;
12114 		kstat_install(ksp);
12115 	}
12116 }
12117 
12118 /* ARGSUSED */
12119 static int
12120 sfmmu_kstat_percpu_update(kstat_t *ksp, int rw)
12121 {
12122 	struct sfmmu_percpu_stat *cpu_kstat = ksp->ks_data;
12123 	struct tsbmiss *tsbm = tsbmiss_area;
12124 	struct kpmtsbm *kpmtsbm = kpmtsbm_area;
12125 	int i;
12126 
12127 	ASSERT(cpu_kstat);
12128 	if (rw == KSTAT_READ) {
12129 		for (i = 0; i < NCPU; cpu_kstat++, tsbm++, kpmtsbm++, i++) {
12130 			cpu_kstat->sf_itlb_misses = tsbm->itlb_misses;
12131 			cpu_kstat->sf_dtlb_misses = tsbm->dtlb_misses;
12132 			cpu_kstat->sf_utsb_misses = tsbm->utsb_misses -
12133 				tsbm->uprot_traps;
12134 			cpu_kstat->sf_ktsb_misses = tsbm->ktsb_misses +
12135 				kpmtsbm->kpm_tsb_misses - tsbm->kprot_traps;
12136 
12137 			if (tsbm->itlb_misses > 0 && tsbm->dtlb_misses > 0) {
12138 				cpu_kstat->sf_tsb_hits =
12139 				(tsbm->itlb_misses + tsbm->dtlb_misses) -
12140 				(tsbm->utsb_misses + tsbm->ktsb_misses +
12141 				kpmtsbm->kpm_tsb_misses);
12142 			} else {
12143 				cpu_kstat->sf_tsb_hits = 0;
12144 			}
12145 			cpu_kstat->sf_umod_faults = tsbm->uprot_traps;
12146 			cpu_kstat->sf_kmod_faults = tsbm->kprot_traps;
12147 		}
12148 	} else {
12149 		/* KSTAT_WRITE is used to clear stats */
12150 		for (i = 0; i < NCPU; tsbm++, kpmtsbm++, i++) {
12151 			tsbm->itlb_misses = 0;
12152 			tsbm->dtlb_misses = 0;
12153 			tsbm->utsb_misses = 0;
12154 			tsbm->ktsb_misses = 0;
12155 			tsbm->uprot_traps = 0;
12156 			tsbm->kprot_traps = 0;
12157 			kpmtsbm->kpm_dtlb_misses = 0;
12158 			kpmtsbm->kpm_tsb_misses = 0;
12159 		}
12160 	}
12161 	return (0);
12162 }
12163 
12164 #ifdef	DEBUG
12165 
12166 tte_t  *gorig[NCPU], *gcur[NCPU], *gnew[NCPU];
12167 
12168 /*
12169  * A tte checker. *orig_old is the value we read before cas.
12170  *	*cur is the value returned by cas.
12171  *	*new is the desired value when we do the cas.
12172  *
12173  *	*hmeblkp is currently unused.
12174  */
12175 
12176 /* ARGSUSED */
12177 void
12178 chk_tte(tte_t *orig_old, tte_t *cur, tte_t *new, struct hme_blk *hmeblkp)
12179 {
12180 	uint_t i, j, k;
12181 	int cpuid = CPU->cpu_id;
12182 
12183 	gorig[cpuid] = orig_old;
12184 	gcur[cpuid] = cur;
12185 	gnew[cpuid] = new;
12186 
12187 #ifdef lint
12188 	hmeblkp = hmeblkp;
12189 #endif
12190 
12191 	if (TTE_IS_VALID(orig_old)) {
12192 		if (TTE_IS_VALID(cur)) {
12193 			i = TTE_TO_TTEPFN(orig_old);
12194 			j = TTE_TO_TTEPFN(cur);
12195 			k = TTE_TO_TTEPFN(new);
12196 			if (i != j) {
12197 				/* remap error? */
12198 				panic("chk_tte: bad pfn, 0x%x, 0x%x",
12199 					i, j);
12200 			}
12201 
12202 			if (i != k) {
12203 				/* remap error? */
12204 				panic("chk_tte: bad pfn2, 0x%x, 0x%x",
12205 					i, k);
12206 			}
12207 		} else {
12208 			if (TTE_IS_VALID(new)) {
12209 				panic("chk_tte: invalid cur? ");
12210 			}
12211 
12212 			i = TTE_TO_TTEPFN(orig_old);
12213 			k = TTE_TO_TTEPFN(new);
12214 			if (i != k) {
12215 				panic("chk_tte: bad pfn3, 0x%x, 0x%x",
12216 					i, k);
12217 			}
12218 		}
12219 	} else {
12220 		if (TTE_IS_VALID(cur)) {
12221 			j = TTE_TO_TTEPFN(cur);
12222 			if (TTE_IS_VALID(new)) {
12223 				k = TTE_TO_TTEPFN(new);
12224 				if (j != k) {
12225 					panic("chk_tte: bad pfn4, 0x%x, 0x%x",
12226 						j, k);
12227 				}
12228 			} else {
12229 				panic("chk_tte: why here?");
12230 			}
12231 		} else {
12232 			if (!TTE_IS_VALID(new)) {
12233 				panic("chk_tte: why here2 ?");
12234 			}
12235 		}
12236 	}
12237 }
12238 
12239 #endif /* DEBUG */
12240 
12241 extern void prefetch_tsbe_read(struct tsbe *);
12242 extern void prefetch_tsbe_write(struct tsbe *);
12243 
12244 
12245 /*
12246  * We want to prefetch 7 cache lines ahead for our read prefetch.  This gives
12247  * us optimal performance on Cheetah+.  You can only have 8 outstanding
12248  * prefetches at any one time, so we opted for 7 read prefetches and 1 write
12249  * prefetch to make the most utilization of the prefetch capability.
12250  */
12251 #define	TSBE_PREFETCH_STRIDE (7)
12252 
12253 void
12254 sfmmu_copy_tsb(struct tsb_info *old_tsbinfo, struct tsb_info *new_tsbinfo)
12255 {
12256 	int old_bytes = TSB_BYTES(old_tsbinfo->tsb_szc);
12257 	int new_bytes = TSB_BYTES(new_tsbinfo->tsb_szc);
12258 	int old_entries = TSB_ENTRIES(old_tsbinfo->tsb_szc);
12259 	int new_entries = TSB_ENTRIES(new_tsbinfo->tsb_szc);
12260 	struct tsbe *old;
12261 	struct tsbe *new;
12262 	struct tsbe *new_base = (struct tsbe *)new_tsbinfo->tsb_va;
12263 	uint64_t va;
12264 	int new_offset;
12265 	int i;
12266 	int vpshift;
12267 	int last_prefetch;
12268 
12269 	if (old_bytes == new_bytes) {
12270 		bcopy(old_tsbinfo->tsb_va, new_tsbinfo->tsb_va, new_bytes);
12271 	} else {
12272 
12273 		/*
12274 		 * A TSBE is 16 bytes which means there are four TSBE's per
12275 		 * P$ line (64 bytes), thus every 4 TSBE's we prefetch.
12276 		 */
12277 		old = (struct tsbe *)old_tsbinfo->tsb_va;
12278 		last_prefetch = old_entries - (4*(TSBE_PREFETCH_STRIDE+1));
12279 		for (i = 0; i < old_entries; i++, old++) {
12280 			if (((i & (4-1)) == 0) && (i < last_prefetch))
12281 				prefetch_tsbe_read(old);
12282 			if (!old->tte_tag.tag_invalid) {
12283 				/*
12284 				 * We have a valid TTE to remap.  Check the
12285 				 * size.  We won't remap 64K or 512K TTEs
12286 				 * because they span more than one TSB entry
12287 				 * and are indexed using an 8K virt. page.
12288 				 * Ditto for 32M and 256M TTEs.
12289 				 */
12290 				if (TTE_CSZ(&old->tte_data) == TTE64K ||
12291 				    TTE_CSZ(&old->tte_data) == TTE512K)
12292 					continue;
12293 				if (mmu_page_sizes == max_mmu_page_sizes) {
12294 				    if (TTE_CSZ(&old->tte_data) == TTE32M ||
12295 					TTE_CSZ(&old->tte_data) == TTE256M)
12296 					    continue;
12297 				}
12298 
12299 				/* clear the lower 22 bits of the va */
12300 				va = *(uint64_t *)old << 22;
12301 				/* turn va into a virtual pfn */
12302 				va >>= 22 - TSB_START_SIZE;
12303 				/*
12304 				 * or in bits from the offset in the tsb
12305 				 * to get the real virtual pfn. These
12306 				 * correspond to bits [21:13] in the va
12307 				 */
12308 				vpshift =
12309 				    TTE_BSZS_SHIFT(TTE_CSZ(&old->tte_data)) &
12310 				    0x1ff;
12311 				va |= (i << vpshift);
12312 				va >>= vpshift;
12313 				new_offset = va & (new_entries - 1);
12314 				new = new_base + new_offset;
12315 				prefetch_tsbe_write(new);
12316 				*new = *old;
12317 			}
12318 		}
12319 	}
12320 }
12321 
12322 /*
12323  * Kernel Physical Mapping (kpm) facility
12324  */
12325 
12326 /* -- hat_kpm interface section -- */
12327 
12328 /*
12329  * Mapin a locked page and return the vaddr.
12330  * When a kpme is provided by the caller it is added to
12331  * the page p_kpmelist. The page to be mapped in must
12332  * be at least read locked (p_selock).
12333  */
12334 caddr_t
12335 hat_kpm_mapin(struct page *pp, struct kpme *kpme)
12336 {
12337 	kmutex_t	*pml;
12338 	caddr_t		vaddr;
12339 
12340 	if (kpm_enable == 0) {
12341 		cmn_err(CE_WARN, "hat_kpm_mapin: kpm_enable not set");
12342 		return ((caddr_t)NULL);
12343 	}
12344 
12345 	if (pp == NULL || PAGE_LOCKED(pp) == 0) {
12346 		cmn_err(CE_WARN, "hat_kpm_mapin: pp zero or not locked");
12347 		return ((caddr_t)NULL);
12348 	}
12349 
12350 	pml = sfmmu_mlist_enter(pp);
12351 	ASSERT(pp->p_kpmref >= 0);
12352 
12353 	vaddr = (pp->p_kpmref == 0) ?
12354 		sfmmu_kpm_mapin(pp) : hat_kpm_page2va(pp, 1);
12355 
12356 	if (kpme != NULL) {
12357 		/*
12358 		 * Tolerate multiple mapins for the same kpme to avoid
12359 		 * the need for an extra serialization.
12360 		 */
12361 		if ((sfmmu_kpme_lookup(kpme, pp)) == 0)
12362 			sfmmu_kpme_add(kpme, pp);
12363 
12364 		ASSERT(pp->p_kpmref > 0);
12365 
12366 	} else {
12367 		pp->p_kpmref++;
12368 	}
12369 
12370 	sfmmu_mlist_exit(pml);
12371 	return (vaddr);
12372 }
12373 
12374 /*
12375  * Mapout a locked page.
12376  * When a kpme is provided by the caller it is removed from
12377  * the page p_kpmelist. The page to be mapped out must be at
12378  * least read locked (p_selock).
12379  * Note: The seg_kpm layer provides a mapout interface for the
12380  * case that a kpme is used and the underlying page is unlocked.
12381  * This can be used instead of calling this function directly.
12382  */
12383 void
12384 hat_kpm_mapout(struct page *pp, struct kpme *kpme, caddr_t vaddr)
12385 {
12386 	kmutex_t	*pml;
12387 
12388 	if (kpm_enable == 0) {
12389 		cmn_err(CE_WARN, "hat_kpm_mapout: kpm_enable not set");
12390 		return;
12391 	}
12392 
12393 	if (IS_KPM_ADDR(vaddr) == 0) {
12394 		cmn_err(CE_WARN, "hat_kpm_mapout: no kpm address");
12395 		return;
12396 	}
12397 
12398 	if (pp == NULL || PAGE_LOCKED(pp) == 0) {
12399 		cmn_err(CE_WARN, "hat_kpm_mapout: page zero or not locked");
12400 		return;
12401 	}
12402 
12403 	if (kpme != NULL) {
12404 		ASSERT(pp == kpme->kpe_page);
12405 		pp = kpme->kpe_page;
12406 		pml = sfmmu_mlist_enter(pp);
12407 
12408 		if (sfmmu_kpme_lookup(kpme, pp) == 0)
12409 			panic("hat_kpm_mapout: kpme not found pp=%p",
12410 				(void *)pp);
12411 
12412 		ASSERT(pp->p_kpmref > 0);
12413 		sfmmu_kpme_sub(kpme, pp);
12414 
12415 	} else {
12416 		pml = sfmmu_mlist_enter(pp);
12417 		pp->p_kpmref--;
12418 	}
12419 
12420 	ASSERT(pp->p_kpmref >= 0);
12421 	if (pp->p_kpmref == 0)
12422 		sfmmu_kpm_mapout(pp, vaddr);
12423 
12424 	sfmmu_mlist_exit(pml);
12425 }
12426 
12427 /*
12428  * Return the kpm virtual address for the page at pp.
12429  * If checkswap is non zero and the page is backed by a
12430  * swap vnode the physical address is used rather than
12431  * p_offset to determine the kpm region.
12432  * Note: The function has to be used w/ extreme care. The
12433  * stability of the page identity is in the responsibility
12434  * of the caller.
12435  */
12436 caddr_t
12437 hat_kpm_page2va(struct page *pp, int checkswap)
12438 {
12439 	int		vcolor, vcolor_pa;
12440 	uintptr_t	paddr, vaddr;
12441 
12442 	ASSERT(kpm_enable);
12443 
12444 	paddr = ptob(pp->p_pagenum);
12445 	vcolor_pa = addr_to_vcolor(paddr);
12446 
12447 	if (checkswap && pp->p_vnode && IS_SWAPFSVP(pp->p_vnode))
12448 		vcolor = (PP_ISNC(pp)) ? vcolor_pa : PP_GET_VCOLOR(pp);
12449 	else
12450 		vcolor = addr_to_vcolor(pp->p_offset);
12451 
12452 	vaddr = (uintptr_t)kpm_vbase + paddr;
12453 
12454 	if (vcolor_pa != vcolor) {
12455 		vaddr += ((uintptr_t)(vcolor - vcolor_pa) << MMU_PAGESHIFT);
12456 		vaddr += (vcolor_pa > vcolor) ?
12457 			((uintptr_t)vcolor_pa << kpm_size_shift) :
12458 			((uintptr_t)(vcolor - vcolor_pa) << kpm_size_shift);
12459 	}
12460 
12461 	return ((caddr_t)vaddr);
12462 }
12463 
12464 /*
12465  * Return the page for the kpm virtual address vaddr.
12466  * Caller is responsible for the kpm mapping and lock
12467  * state of the page.
12468  */
12469 page_t *
12470 hat_kpm_vaddr2page(caddr_t vaddr)
12471 {
12472 	uintptr_t	paddr;
12473 	pfn_t		pfn;
12474 
12475 	ASSERT(IS_KPM_ADDR(vaddr));
12476 
12477 	SFMMU_KPM_VTOP(vaddr, paddr);
12478 	pfn = (pfn_t)btop(paddr);
12479 
12480 	return (page_numtopp_nolock(pfn));
12481 }
12482 
12483 /* page to kpm_page */
12484 #define	PP2KPMPG(pp, kp) {						\
12485 	struct memseg	*mseg;						\
12486 	pgcnt_t		inx;						\
12487 	pfn_t		pfn;						\
12488 									\
12489 	pfn = pp->p_pagenum;						\
12490 	mseg = page_numtomemseg_nolock(pfn);				\
12491 	ASSERT(mseg);							\
12492 	inx = ptokpmp(kpmptop(ptokpmp(pfn)) - mseg->kpm_pbase);		\
12493 	ASSERT(inx < mseg->kpm_nkpmpgs);				\
12494 	kp = &mseg->kpm_pages[inx];					\
12495 }
12496 
12497 /* page to kpm_spage */
12498 #define	PP2KPMSPG(pp, ksp) {						\
12499 	struct memseg	*mseg;						\
12500 	pgcnt_t		inx;						\
12501 	pfn_t		pfn;						\
12502 									\
12503 	pfn = pp->p_pagenum;						\
12504 	mseg = page_numtomemseg_nolock(pfn);				\
12505 	ASSERT(mseg);							\
12506 	inx = pfn - mseg->kpm_pbase;					\
12507 	ksp = &mseg->kpm_spages[inx];					\
12508 }
12509 
12510 /*
12511  * hat_kpm_fault is called from segkpm_fault when a kpm tsbmiss occurred
12512  * which could not be resolved by the trap level tsbmiss handler for the
12513  * following reasons:
12514  * . The vaddr is in VAC alias range (always PAGESIZE mapping size).
12515  * . The kpm (s)page range of vaddr is in a VAC alias prevention state.
12516  * . tsbmiss handling at trap level is not desired (DEBUG kernel only,
12517  *   kpm_tsbmtl == 0).
12518  */
12519 int
12520 hat_kpm_fault(struct hat *hat, caddr_t vaddr)
12521 {
12522 	int		error;
12523 	uintptr_t	paddr;
12524 	pfn_t		pfn;
12525 	struct memseg	*mseg;
12526 	page_t	*pp;
12527 
12528 	if (kpm_enable == 0) {
12529 		cmn_err(CE_WARN, "hat_kpm_fault: kpm_enable not set");
12530 		return (ENOTSUP);
12531 	}
12532 
12533 	ASSERT(hat == ksfmmup);
12534 	ASSERT(IS_KPM_ADDR(vaddr));
12535 
12536 	SFMMU_KPM_VTOP(vaddr, paddr);
12537 	pfn = (pfn_t)btop(paddr);
12538 	mseg = page_numtomemseg_nolock(pfn);
12539 	if (mseg == NULL)
12540 		return (EFAULT);
12541 
12542 	pp = &mseg->pages[(pgcnt_t)(pfn - mseg->pages_base)];
12543 	ASSERT((pfn_t)pp->p_pagenum == pfn);
12544 
12545 	if (!PAGE_LOCKED(pp))
12546 		return (EFAULT);
12547 
12548 	if (kpm_smallpages == 0)
12549 		error = sfmmu_kpm_fault(vaddr, mseg, pp);
12550 	else
12551 		error = sfmmu_kpm_fault_small(vaddr, mseg, pp);
12552 
12553 	return (error);
12554 }
12555 
12556 extern  krwlock_t memsegslock;
12557 
12558 /*
12559  * memseg_hash[] was cleared, need to clear memseg_phash[] too.
12560  */
12561 void
12562 hat_kpm_mseghash_clear(int nentries)
12563 {
12564 	pgcnt_t i;
12565 
12566 	if (kpm_enable == 0)
12567 		return;
12568 
12569 	for (i = 0; i < nentries; i++)
12570 		memseg_phash[i] = MSEG_NULLPTR_PA;
12571 }
12572 
12573 /*
12574  * Update memseg_phash[inx] when memseg_hash[inx] was changed.
12575  */
12576 void
12577 hat_kpm_mseghash_update(pgcnt_t inx, struct memseg *msp)
12578 {
12579 	if (kpm_enable == 0)
12580 		return;
12581 
12582 	memseg_phash[inx] = (msp) ? va_to_pa(msp) : MSEG_NULLPTR_PA;
12583 }
12584 
12585 /*
12586  * Update kpm memseg members from basic memseg info.
12587  */
12588 void
12589 hat_kpm_addmem_mseg_update(struct memseg *msp, pgcnt_t nkpmpgs,
12590 	offset_t kpm_pages_off)
12591 {
12592 	if (kpm_enable == 0)
12593 		return;
12594 
12595 	msp->kpm_pages = (kpm_page_t *)((caddr_t)msp->pages + kpm_pages_off);
12596 	msp->kpm_nkpmpgs = nkpmpgs;
12597 	msp->kpm_pbase = kpmptop(ptokpmp(msp->pages_base));
12598 	msp->pagespa = va_to_pa(msp->pages);
12599 	msp->epagespa = va_to_pa(msp->epages);
12600 	msp->kpm_pagespa = va_to_pa(msp->kpm_pages);
12601 }
12602 
12603 /*
12604  * Setup nextpa when a memseg is inserted.
12605  * Assumes that the memsegslock is already held.
12606  */
12607 void
12608 hat_kpm_addmem_mseg_insert(struct memseg *msp)
12609 {
12610 	if (kpm_enable == 0)
12611 		return;
12612 
12613 	ASSERT(RW_LOCK_HELD(&memsegslock));
12614 	msp->nextpa = (memsegs) ? va_to_pa(memsegs) : MSEG_NULLPTR_PA;
12615 }
12616 
12617 /*
12618  * Setup memsegspa when a memseg is (head) inserted.
12619  * Called before memsegs is updated to complete a
12620  * memseg insert operation.
12621  * Assumes that the memsegslock is already held.
12622  */
12623 void
12624 hat_kpm_addmem_memsegs_update(struct memseg *msp)
12625 {
12626 	if (kpm_enable == 0)
12627 		return;
12628 
12629 	ASSERT(RW_LOCK_HELD(&memsegslock));
12630 	ASSERT(memsegs);
12631 	memsegspa = va_to_pa(msp);
12632 }
12633 
12634 /*
12635  * Return end of metadata for an already setup memseg.
12636  *
12637  * Note: kpm_pages and kpm_spages are aliases and the underlying
12638  * member of struct memseg is a union, therefore they always have
12639  * the same address within a memseg. They must be differentiated
12640  * when pointer arithmetic is used with them.
12641  */
12642 caddr_t
12643 hat_kpm_mseg_reuse(struct memseg *msp)
12644 {
12645 	caddr_t end;
12646 
12647 	if (kpm_smallpages == 0)
12648 		end = (caddr_t)(msp->kpm_pages + msp->kpm_nkpmpgs);
12649 	else
12650 		end = (caddr_t)(msp->kpm_spages + msp->kpm_nkpmpgs);
12651 
12652 	return (end);
12653 }
12654 
12655 /*
12656  * Update memsegspa (when first memseg in list
12657  * is deleted) or nextpa  when a memseg deleted.
12658  * Assumes that the memsegslock is already held.
12659  */
12660 void
12661 hat_kpm_delmem_mseg_update(struct memseg *msp, struct memseg **mspp)
12662 {
12663 	struct memseg *lmsp;
12664 
12665 	if (kpm_enable == 0)
12666 		return;
12667 
12668 	ASSERT(RW_LOCK_HELD(&memsegslock));
12669 
12670 	if (mspp == &memsegs) {
12671 		memsegspa = (msp->next) ?
12672 				va_to_pa(msp->next) : MSEG_NULLPTR_PA;
12673 	} else {
12674 		lmsp = (struct memseg *)
12675 			((uint64_t)mspp - offsetof(struct memseg, next));
12676 		lmsp->nextpa = (msp->next) ?
12677 				va_to_pa(msp->next) : MSEG_NULLPTR_PA;
12678 	}
12679 }
12680 
12681 /*
12682  * Update kpm members for all memseg's involved in a split operation
12683  * and do the atomic update of the physical memseg chain.
12684  *
12685  * Note: kpm_pages and kpm_spages are aliases and the underlying member
12686  * of struct memseg is a union, therefore they always have the same
12687  * address within a memseg. With that the direct assignments and
12688  * va_to_pa conversions below don't have to be distinguished wrt. to
12689  * kpm_smallpages. They must be differentiated when pointer arithmetic
12690  * is used with them.
12691  *
12692  * Assumes that the memsegslock is already held.
12693  */
12694 void
12695 hat_kpm_split_mseg_update(struct memseg *msp, struct memseg **mspp,
12696 	struct memseg *lo, struct memseg *mid, struct memseg *hi)
12697 {
12698 	pgcnt_t start, end, kbase, kstart, num;
12699 	struct memseg *lmsp;
12700 
12701 	if (kpm_enable == 0)
12702 		return;
12703 
12704 	ASSERT(RW_LOCK_HELD(&memsegslock));
12705 	ASSERT(msp && mid && msp->kpm_pages);
12706 
12707 	kbase = ptokpmp(msp->kpm_pbase);
12708 
12709 	if (lo) {
12710 		num = lo->pages_end - lo->pages_base;
12711 		start = kpmptop(ptokpmp(lo->pages_base));
12712 		/* align end to kpm page size granularity */
12713 		end = kpmptop(ptokpmp(start + num - 1)) + kpmpnpgs;
12714 		lo->kpm_pbase = start;
12715 		lo->kpm_nkpmpgs = ptokpmp(end - start);
12716 		lo->kpm_pages = msp->kpm_pages;
12717 		lo->kpm_pagespa = va_to_pa(lo->kpm_pages);
12718 		lo->pagespa = va_to_pa(lo->pages);
12719 		lo->epagespa = va_to_pa(lo->epages);
12720 		lo->nextpa = va_to_pa(lo->next);
12721 	}
12722 
12723 	/* mid */
12724 	num = mid->pages_end - mid->pages_base;
12725 	kstart = ptokpmp(mid->pages_base);
12726 	start = kpmptop(kstart);
12727 	/* align end to kpm page size granularity */
12728 	end = kpmptop(ptokpmp(start + num - 1)) + kpmpnpgs;
12729 	mid->kpm_pbase = start;
12730 	mid->kpm_nkpmpgs = ptokpmp(end - start);
12731 	if (kpm_smallpages == 0) {
12732 		mid->kpm_pages = msp->kpm_pages + (kstart - kbase);
12733 	} else {
12734 		mid->kpm_spages = msp->kpm_spages + (kstart - kbase);
12735 	}
12736 	mid->kpm_pagespa = va_to_pa(mid->kpm_pages);
12737 	mid->pagespa = va_to_pa(mid->pages);
12738 	mid->epagespa = va_to_pa(mid->epages);
12739 	mid->nextpa = (mid->next) ?  va_to_pa(mid->next) : MSEG_NULLPTR_PA;
12740 
12741 	if (hi) {
12742 		num = hi->pages_end - hi->pages_base;
12743 		kstart = ptokpmp(hi->pages_base);
12744 		start = kpmptop(kstart);
12745 		/* align end to kpm page size granularity */
12746 		end = kpmptop(ptokpmp(start + num - 1)) + kpmpnpgs;
12747 		hi->kpm_pbase = start;
12748 		hi->kpm_nkpmpgs = ptokpmp(end - start);
12749 		if (kpm_smallpages == 0) {
12750 			hi->kpm_pages = msp->kpm_pages + (kstart - kbase);
12751 		} else {
12752 			hi->kpm_spages = msp->kpm_spages + (kstart - kbase);
12753 		}
12754 		hi->kpm_pagespa = va_to_pa(hi->kpm_pages);
12755 		hi->pagespa = va_to_pa(hi->pages);
12756 		hi->epagespa = va_to_pa(hi->epages);
12757 		hi->nextpa = (hi->next) ? va_to_pa(hi->next) : MSEG_NULLPTR_PA;
12758 	}
12759 
12760 	/*
12761 	 * Atomic update of the physical memseg chain
12762 	 */
12763 	if (mspp == &memsegs) {
12764 		memsegspa = (lo) ? va_to_pa(lo) : va_to_pa(mid);
12765 	} else {
12766 		lmsp = (struct memseg *)
12767 			((uint64_t)mspp - offsetof(struct memseg, next));
12768 		lmsp->nextpa = (lo) ? va_to_pa(lo) : va_to_pa(mid);
12769 	}
12770 }
12771 
12772 /*
12773  * Walk the memsegs chain, applying func to each memseg span and vcolor.
12774  */
12775 void
12776 hat_kpm_walk(void (*func)(void *, void *, size_t), void *arg)
12777 {
12778 	pfn_t	pbase, pend;
12779 	int	vcolor;
12780 	void	*base;
12781 	size_t	size;
12782 	struct memseg *msp;
12783 	extern uint_t vac_colors;
12784 
12785 	for (msp = memsegs; msp; msp = msp->next) {
12786 		pbase = msp->pages_base;
12787 		pend = msp->pages_end;
12788 		for (vcolor = 0; vcolor < vac_colors; vcolor++) {
12789 			base = ptob(pbase) + kpm_vbase + kpm_size * vcolor;
12790 			size = ptob(pend - pbase);
12791 			func(arg, base, size);
12792 		}
12793 	}
12794 }
12795 
12796 
12797 /* -- sfmmu_kpm internal section -- */
12798 
12799 /*
12800  * Return the page frame number if a valid segkpm mapping exists
12801  * for vaddr, otherwise return PFN_INVALID. No locks are grabbed.
12802  * Should only be used by other sfmmu routines.
12803  */
12804 pfn_t
12805 sfmmu_kpm_vatopfn(caddr_t vaddr)
12806 {
12807 	uintptr_t	paddr;
12808 	pfn_t		pfn;
12809 	page_t	*pp;
12810 
12811 	ASSERT(kpm_enable && IS_KPM_ADDR(vaddr));
12812 
12813 	SFMMU_KPM_VTOP(vaddr, paddr);
12814 	pfn = (pfn_t)btop(paddr);
12815 	pp = page_numtopp_nolock(pfn);
12816 	if (pp && pp->p_kpmref)
12817 		return (pfn);
12818 	else
12819 		return ((pfn_t)PFN_INVALID);
12820 }
12821 
12822 /*
12823  * Lookup a kpme in the p_kpmelist.
12824  */
12825 static int
12826 sfmmu_kpme_lookup(struct kpme *kpme, page_t *pp)
12827 {
12828 	struct kpme	*p;
12829 
12830 	for (p = pp->p_kpmelist; p; p = p->kpe_next) {
12831 		if (p == kpme)
12832 			return (1);
12833 	}
12834 	return (0);
12835 }
12836 
12837 /*
12838  * Insert a kpme into the p_kpmelist and increment
12839  * the per page kpm reference count.
12840  */
12841 static void
12842 sfmmu_kpme_add(struct kpme *kpme, page_t *pp)
12843 {
12844 	ASSERT(pp->p_kpmref >= 0);
12845 
12846 	/* head insert */
12847 	kpme->kpe_prev = NULL;
12848 	kpme->kpe_next = pp->p_kpmelist;
12849 
12850 	if (pp->p_kpmelist)
12851 		pp->p_kpmelist->kpe_prev = kpme;
12852 
12853 	pp->p_kpmelist = kpme;
12854 	kpme->kpe_page = pp;
12855 	pp->p_kpmref++;
12856 }
12857 
12858 /*
12859  * Remove a kpme from the p_kpmelist and decrement
12860  * the per page kpm reference count.
12861  */
12862 static void
12863 sfmmu_kpme_sub(struct kpme *kpme, page_t *pp)
12864 {
12865 	ASSERT(pp->p_kpmref > 0);
12866 
12867 	if (kpme->kpe_prev) {
12868 		ASSERT(pp->p_kpmelist != kpme);
12869 		ASSERT(kpme->kpe_prev->kpe_page == pp);
12870 		kpme->kpe_prev->kpe_next = kpme->kpe_next;
12871 	} else {
12872 		ASSERT(pp->p_kpmelist == kpme);
12873 		pp->p_kpmelist = kpme->kpe_next;
12874 	}
12875 
12876 	if (kpme->kpe_next) {
12877 		ASSERT(kpme->kpe_next->kpe_page == pp);
12878 		kpme->kpe_next->kpe_prev = kpme->kpe_prev;
12879 	}
12880 
12881 	kpme->kpe_next = kpme->kpe_prev = NULL;
12882 	kpme->kpe_page = NULL;
12883 	pp->p_kpmref--;
12884 }
12885 
12886 /*
12887  * Mapin a single page, it is called every time a page changes it's state
12888  * from kpm-unmapped to kpm-mapped. It may not be called, when only a new
12889  * kpm instance does a mapin and wants to share the mapping.
12890  * Assumes that the mlist mutex is already grabbed.
12891  */
12892 static caddr_t
12893 sfmmu_kpm_mapin(page_t *pp)
12894 {
12895 	kpm_page_t	*kp;
12896 	kpm_hlk_t	*kpmp;
12897 	caddr_t		vaddr;
12898 	int		kpm_vac_range;
12899 	pfn_t		pfn;
12900 	tte_t		tte;
12901 	kmutex_t	*pmtx;
12902 	int		uncached;
12903 	kpm_spage_t	*ksp;
12904 	kpm_shlk_t	*kpmsp;
12905 	int		oldval;
12906 
12907 	ASSERT(sfmmu_mlist_held(pp));
12908 	ASSERT(pp->p_kpmref == 0);
12909 
12910 	vaddr = sfmmu_kpm_getvaddr(pp, &kpm_vac_range);
12911 
12912 	ASSERT(IS_KPM_ADDR(vaddr));
12913 	uncached = PP_ISNC(pp);
12914 	pfn = pp->p_pagenum;
12915 
12916 	if (kpm_smallpages)
12917 		goto smallpages_mapin;
12918 
12919 	PP2KPMPG(pp, kp);
12920 
12921 	kpmp = KPMP_HASH(kp);
12922 	mutex_enter(&kpmp->khl_mutex);
12923 
12924 	ASSERT(PP_ISKPMC(pp) == 0);
12925 	ASSERT(PP_ISKPMS(pp) == 0);
12926 
12927 	if (uncached) {
12928 		/* ASSERT(pp->p_share); XXX use hat_page_getshare */
12929 		if (kpm_vac_range == 0) {
12930 			if (kp->kp_refcnts == 0) {
12931 				/*
12932 				 * Must remove large page mapping if it exists.
12933 				 * Pages in uncached state can only be mapped
12934 				 * small (PAGESIZE) within the regular kpm
12935 				 * range.
12936 				 */
12937 				if (kp->kp_refcntc == -1) {
12938 					/* remove go indication */
12939 					sfmmu_kpm_tsbmtl(&kp->kp_refcntc,
12940 						&kpmp->khl_lock, KPMTSBM_STOP);
12941 				}
12942 				if (kp->kp_refcnt > 0 && kp->kp_refcntc == 0)
12943 					sfmmu_kpm_demap_large(vaddr);
12944 			}
12945 			ASSERT(kp->kp_refcntc >= 0);
12946 			kp->kp_refcntc++;
12947 		}
12948 		pmtx = sfmmu_page_enter(pp);
12949 		PP_SETKPMC(pp);
12950 		sfmmu_page_exit(pmtx);
12951 	}
12952 
12953 	if ((kp->kp_refcntc > 0 || kp->kp_refcnts > 0) && kpm_vac_range == 0) {
12954 		/*
12955 		 * Have to do a small (PAGESIZE) mapin within this kpm_page
12956 		 * range since it is marked to be in VAC conflict mode or
12957 		 * when there are still other small mappings around.
12958 		 */
12959 
12960 		/* tte assembly */
12961 		if (uncached == 0)
12962 			KPM_TTE_VCACHED(tte.ll, pfn, TTE8K);
12963 		else
12964 			KPM_TTE_VUNCACHED(tte.ll, pfn, TTE8K);
12965 
12966 		/* tsb dropin */
12967 		sfmmu_kpm_load_tsb(vaddr, &tte, MMU_PAGESHIFT);
12968 
12969 		pmtx = sfmmu_page_enter(pp);
12970 		PP_SETKPMS(pp);
12971 		sfmmu_page_exit(pmtx);
12972 
12973 		kp->kp_refcnts++;
12974 		ASSERT(kp->kp_refcnts > 0);
12975 		goto exit;
12976 	}
12977 
12978 	if (kpm_vac_range == 0) {
12979 		/*
12980 		 * Fast path / regular case, no VAC conflict handling
12981 		 * in progress within this kpm_page range.
12982 		 */
12983 		if (kp->kp_refcnt == 0) {
12984 
12985 			/* tte assembly */
12986 			KPM_TTE_VCACHED(tte.ll, pfn, TTE4M);
12987 
12988 			/* tsb dropin */
12989 			sfmmu_kpm_load_tsb(vaddr, &tte, MMU_PAGESHIFT4M);
12990 
12991 			/* Set go flag for TL tsbmiss handler */
12992 			if (kp->kp_refcntc == 0)
12993 				sfmmu_kpm_tsbmtl(&kp->kp_refcntc,
12994 						&kpmp->khl_lock, KPMTSBM_START);
12995 
12996 			ASSERT(kp->kp_refcntc == -1);
12997 		}
12998 		kp->kp_refcnt++;
12999 		ASSERT(kp->kp_refcnt);
13000 
13001 	} else {
13002 		/*
13003 		 * The page is not setup according to the common VAC
13004 		 * prevention rules for the regular and kpm mapping layer
13005 		 * E.g. the page layer was not able to deliver a right
13006 		 * vcolor'ed page for a given vaddr corresponding to
13007 		 * the wanted p_offset. It has to be mapped in small in
13008 		 * within the corresponding kpm vac range in order to
13009 		 * prevent VAC alias conflicts.
13010 		 */
13011 
13012 		/* tte assembly */
13013 		if (uncached == 0) {
13014 			KPM_TTE_VCACHED(tte.ll, pfn, TTE8K);
13015 		} else {
13016 			KPM_TTE_VUNCACHED(tte.ll, pfn, TTE8K);
13017 		}
13018 
13019 		/* tsb dropin */
13020 		sfmmu_kpm_load_tsb(vaddr, &tte, MMU_PAGESHIFT);
13021 
13022 		kp->kp_refcnta++;
13023 		if (kp->kp_refcntc == -1) {
13024 			ASSERT(kp->kp_refcnt > 0);
13025 
13026 			/* remove go indication */
13027 			sfmmu_kpm_tsbmtl(&kp->kp_refcntc, &kpmp->khl_lock,
13028 					KPMTSBM_STOP);
13029 		}
13030 		ASSERT(kp->kp_refcntc >= 0);
13031 	}
13032 exit:
13033 	mutex_exit(&kpmp->khl_mutex);
13034 	return (vaddr);
13035 
13036 smallpages_mapin:
13037 	if (uncached == 0) {
13038 		/* tte assembly */
13039 		KPM_TTE_VCACHED(tte.ll, pfn, TTE8K);
13040 	} else {
13041 		/* ASSERT(pp->p_share); XXX use hat_page_getshare */
13042 		pmtx = sfmmu_page_enter(pp);
13043 		PP_SETKPMC(pp);
13044 		sfmmu_page_exit(pmtx);
13045 		/* tte assembly */
13046 		KPM_TTE_VUNCACHED(tte.ll, pfn, TTE8K);
13047 	}
13048 
13049 	/* tsb dropin */
13050 	sfmmu_kpm_load_tsb(vaddr, &tte, MMU_PAGESHIFT);
13051 
13052 	PP2KPMSPG(pp, ksp);
13053 	kpmsp = KPMP_SHASH(ksp);
13054 
13055 	oldval = sfmmu_kpm_stsbmtl(&ksp->kp_mapped, &kpmsp->kshl_lock,
13056 				(uncached) ? KPM_MAPPEDSC : KPM_MAPPEDS);
13057 
13058 	if (oldval != 0)
13059 		panic("sfmmu_kpm_mapin: stale smallpages mapping");
13060 
13061 	return (vaddr);
13062 }
13063 
13064 /*
13065  * Mapout a single page, it is called every time a page changes it's state
13066  * from kpm-mapped to kpm-unmapped. It may not be called, when only a kpm
13067  * instance calls mapout and there are still other instances mapping the
13068  * page. Assumes that the mlist mutex is already grabbed.
13069  *
13070  * Note: In normal mode (no VAC conflict prevention pending) TLB's are
13071  * not flushed. This is the core segkpm behavior to avoid xcalls. It is
13072  * no problem because a translation from a segkpm virtual address to a
13073  * physical address is always the same. The only downside is a slighty
13074  * increased window of vulnerability for misbehaving _kernel_ modules.
13075  */
13076 static void
13077 sfmmu_kpm_mapout(page_t *pp, caddr_t vaddr)
13078 {
13079 	kpm_page_t	*kp;
13080 	kpm_hlk_t	*kpmp;
13081 	int		alias_range;
13082 	kmutex_t	*pmtx;
13083 	kpm_spage_t	*ksp;
13084 	kpm_shlk_t	*kpmsp;
13085 	int		oldval;
13086 
13087 	ASSERT(sfmmu_mlist_held(pp));
13088 	ASSERT(pp->p_kpmref == 0);
13089 
13090 	alias_range = IS_KPM_ALIAS_RANGE(vaddr);
13091 
13092 	if (kpm_smallpages)
13093 		goto smallpages_mapout;
13094 
13095 	PP2KPMPG(pp, kp);
13096 	kpmp = KPMP_HASH(kp);
13097 	mutex_enter(&kpmp->khl_mutex);
13098 
13099 	if (alias_range) {
13100 		ASSERT(PP_ISKPMS(pp) == 0);
13101 		if (kp->kp_refcnta <= 0) {
13102 			panic("sfmmu_kpm_mapout: bad refcnta kp=%p",
13103 				(void *)kp);
13104 		}
13105 
13106 		if (PP_ISTNC(pp))  {
13107 			if (PP_ISKPMC(pp) == 0) {
13108 				/*
13109 				 * Uncached kpm mappings must always have
13110 				 * forced "small page" mode.
13111 				 */
13112 				panic("sfmmu_kpm_mapout: uncached page not "
13113 					"kpm marked");
13114 			}
13115 			sfmmu_kpm_demap_small(vaddr);
13116 
13117 			pmtx = sfmmu_page_enter(pp);
13118 			PP_CLRKPMC(pp);
13119 			sfmmu_page_exit(pmtx);
13120 
13121 			/*
13122 			 * Check if we can resume cached mode. This might
13123 			 * be the case if the kpm mapping was the only
13124 			 * mapping in conflict with other non rule
13125 			 * compliant mappings. The page is no more marked
13126 			 * as kpm mapped, so the conv_tnc path will not
13127 			 * change kpm state.
13128 			 */
13129 			conv_tnc(pp, TTE8K);
13130 
13131 		} else if (PP_ISKPMC(pp) == 0) {
13132 			/* remove TSB entry only */
13133 			sfmmu_kpm_unload_tsb(vaddr, MMU_PAGESHIFT);
13134 
13135 		} else {
13136 			/* already demapped */
13137 			pmtx = sfmmu_page_enter(pp);
13138 			PP_CLRKPMC(pp);
13139 			sfmmu_page_exit(pmtx);
13140 		}
13141 		kp->kp_refcnta--;
13142 		goto exit;
13143 	}
13144 
13145 	if (kp->kp_refcntc <= 0 && kp->kp_refcnts == 0) {
13146 		/*
13147 		 * Fast path / regular case.
13148 		 */
13149 		ASSERT(kp->kp_refcntc >= -1);
13150 		ASSERT(!(pp->p_nrm & (P_KPMC | P_KPMS | P_TNC | P_PNC)));
13151 
13152 		if (kp->kp_refcnt <= 0)
13153 			panic("sfmmu_kpm_mapout: bad refcnt kp=%p", (void *)kp);
13154 
13155 		if (--kp->kp_refcnt == 0) {
13156 			/* remove go indication */
13157 			if (kp->kp_refcntc == -1) {
13158 				sfmmu_kpm_tsbmtl(&kp->kp_refcntc,
13159 					&kpmp->khl_lock, KPMTSBM_STOP);
13160 			}
13161 			ASSERT(kp->kp_refcntc == 0);
13162 
13163 			/* remove TSB entry */
13164 			sfmmu_kpm_unload_tsb(vaddr, MMU_PAGESHIFT4M);
13165 #ifdef	DEBUG
13166 			if (kpm_tlb_flush)
13167 				sfmmu_kpm_demap_tlbs(vaddr, KCONTEXT);
13168 #endif
13169 		}
13170 
13171 	} else {
13172 		/*
13173 		 * The VAC alias path.
13174 		 * We come here if the kpm vaddr is not in any alias_range
13175 		 * and we are unmapping a page within the regular kpm_page
13176 		 * range. The kpm_page either holds conflict pages and/or
13177 		 * is in "small page" mode. If the page is not marked
13178 		 * P_KPMS it couldn't have a valid PAGESIZE sized TSB
13179 		 * entry. Dcache flushing is done lazy and follows the
13180 		 * rules of the regular virtual page coloring scheme.
13181 		 *
13182 		 * Per page states and required actions:
13183 		 *   P_KPMC: remove a kpm mapping that is conflicting.
13184 		 *   P_KPMS: remove a small kpm mapping within a kpm_page.
13185 		 *   P_TNC:  check if we can re-cache the page.
13186 		 *   P_PNC:  we cannot re-cache, sorry.
13187 		 * Per kpm_page:
13188 		 *   kp_refcntc > 0: page is part of a kpm_page with conflicts.
13189 		 *   kp_refcnts > 0: rm a small mapped page within a kpm_page.
13190 		 */
13191 
13192 		if (PP_ISKPMS(pp)) {
13193 			if (kp->kp_refcnts < 1) {
13194 				panic("sfmmu_kpm_mapout: bad refcnts kp=%p",
13195 					(void *)kp);
13196 			}
13197 			sfmmu_kpm_demap_small(vaddr);
13198 
13199 			/*
13200 			 * Check if we can resume cached mode. This might
13201 			 * be the case if the kpm mapping was the only
13202 			 * mapping in conflict with other non rule
13203 			 * compliant mappings. The page is no more marked
13204 			 * as kpm mapped, so the conv_tnc path will not
13205 			 * change kpm state.
13206 			 */
13207 			if (PP_ISTNC(pp))  {
13208 				if (!PP_ISKPMC(pp)) {
13209 					/*
13210 					 * Uncached kpm mappings must always
13211 					 * have forced "small page" mode.
13212 					 */
13213 					panic("sfmmu_kpm_mapout: uncached "
13214 						"page not kpm marked");
13215 				}
13216 				conv_tnc(pp, TTE8K);
13217 			}
13218 			kp->kp_refcnts--;
13219 			kp->kp_refcnt++;
13220 			pmtx = sfmmu_page_enter(pp);
13221 			PP_CLRKPMS(pp);
13222 			sfmmu_page_exit(pmtx);
13223 		}
13224 
13225 		if (PP_ISKPMC(pp)) {
13226 			if (kp->kp_refcntc < 1) {
13227 				panic("sfmmu_kpm_mapout: bad refcntc kp=%p",
13228 					(void *)kp);
13229 			}
13230 			pmtx = sfmmu_page_enter(pp);
13231 			PP_CLRKPMC(pp);
13232 			sfmmu_page_exit(pmtx);
13233 			kp->kp_refcntc--;
13234 		}
13235 
13236 		if (kp->kp_refcnt-- < 1)
13237 			panic("sfmmu_kpm_mapout: bad refcnt kp=%p", (void *)kp);
13238 	}
13239 exit:
13240 	mutex_exit(&kpmp->khl_mutex);
13241 	return;
13242 
13243 smallpages_mapout:
13244 	PP2KPMSPG(pp, ksp);
13245 	kpmsp = KPMP_SHASH(ksp);
13246 
13247 	if (PP_ISKPMC(pp) == 0) {
13248 		oldval = sfmmu_kpm_stsbmtl(&ksp->kp_mapped,
13249 					&kpmsp->kshl_lock, 0);
13250 
13251 		if (oldval != KPM_MAPPEDS) {
13252 			/*
13253 			 * When we're called after sfmmu_kpm_hme_unload,
13254 			 * KPM_MAPPEDSC is valid too.
13255 			 */
13256 			if (oldval != KPM_MAPPEDSC)
13257 				panic("sfmmu_kpm_mapout: incorrect mapping");
13258 		}
13259 
13260 		/* remove TSB entry */
13261 		sfmmu_kpm_unload_tsb(vaddr, MMU_PAGESHIFT);
13262 #ifdef	DEBUG
13263 		if (kpm_tlb_flush)
13264 			sfmmu_kpm_demap_tlbs(vaddr, KCONTEXT);
13265 #endif
13266 
13267 	} else if (PP_ISTNC(pp)) {
13268 		oldval = sfmmu_kpm_stsbmtl(&ksp->kp_mapped,
13269 					&kpmsp->kshl_lock, 0);
13270 
13271 		if (oldval != KPM_MAPPEDSC || PP_ISKPMC(pp) == 0)
13272 			panic("sfmmu_kpm_mapout: inconsistent TNC mapping");
13273 
13274 		sfmmu_kpm_demap_small(vaddr);
13275 
13276 		pmtx = sfmmu_page_enter(pp);
13277 		PP_CLRKPMC(pp);
13278 		sfmmu_page_exit(pmtx);
13279 
13280 		/*
13281 		 * Check if we can resume cached mode. This might be
13282 		 * the case if the kpm mapping was the only mapping
13283 		 * in conflict with other non rule compliant mappings.
13284 		 * The page is no more marked as kpm mapped, so the
13285 		 * conv_tnc path will not change the kpm state.
13286 		 */
13287 		conv_tnc(pp, TTE8K);
13288 
13289 	} else {
13290 		oldval = sfmmu_kpm_stsbmtl(&ksp->kp_mapped,
13291 					&kpmsp->kshl_lock, 0);
13292 
13293 		if (oldval != KPM_MAPPEDSC)
13294 			panic("sfmmu_kpm_mapout: inconsistent mapping");
13295 
13296 		pmtx = sfmmu_page_enter(pp);
13297 		PP_CLRKPMC(pp);
13298 		sfmmu_page_exit(pmtx);
13299 	}
13300 }
13301 
13302 #define	abs(x)  ((x) < 0 ? -(x) : (x))
13303 
13304 /*
13305  * Determine appropriate kpm mapping address and handle any kpm/hme
13306  * conflicts. Page mapping list and its vcolor parts must be protected.
13307  */
13308 static caddr_t
13309 sfmmu_kpm_getvaddr(page_t *pp, int *kpm_vac_rangep)
13310 {
13311 	int		vcolor, vcolor_pa;
13312 	caddr_t		vaddr;
13313 	uintptr_t	paddr;
13314 
13315 
13316 	ASSERT(sfmmu_mlist_held(pp));
13317 
13318 	paddr = ptob(pp->p_pagenum);
13319 	vcolor_pa = addr_to_vcolor(paddr);
13320 
13321 	if (IS_SWAPFSVP(pp->p_vnode)) {
13322 		vcolor = (PP_NEWPAGE(pp) || PP_ISNC(pp)) ?
13323 		    vcolor_pa : PP_GET_VCOLOR(pp);
13324 	} else {
13325 		vcolor = addr_to_vcolor(pp->p_offset);
13326 	}
13327 
13328 	vaddr = kpm_vbase + paddr;
13329 	*kpm_vac_rangep = 0;
13330 
13331 	if (vcolor_pa != vcolor) {
13332 		*kpm_vac_rangep = abs(vcolor - vcolor_pa);
13333 		vaddr += ((uintptr_t)(vcolor - vcolor_pa) << MMU_PAGESHIFT);
13334 		vaddr += (vcolor_pa > vcolor) ?
13335 			((uintptr_t)vcolor_pa << kpm_size_shift) :
13336 			((uintptr_t)(vcolor - vcolor_pa) << kpm_size_shift);
13337 
13338 		ASSERT(!PP_ISMAPPED_LARGE(pp));
13339 	}
13340 
13341 	if (PP_ISNC(pp))
13342 		return (vaddr);
13343 
13344 	if (PP_NEWPAGE(pp)) {
13345 		PP_SET_VCOLOR(pp, vcolor);
13346 		return (vaddr);
13347 	}
13348 
13349 	if (PP_GET_VCOLOR(pp) == vcolor)
13350 		return (vaddr);
13351 
13352 	ASSERT(!PP_ISMAPPED_KPM(pp));
13353 	sfmmu_kpm_vac_conflict(pp, vaddr);
13354 
13355 	return (vaddr);
13356 }
13357 
13358 /*
13359  * VAC conflict state bit values.
13360  * The following defines are used to make the handling of the
13361  * various input states more concise. For that the kpm states
13362  * per kpm_page and per page are combined in a summary state.
13363  * Each single state has a corresponding bit value in the
13364  * summary state. These defines only apply for kpm large page
13365  * mappings. Within comments the abbreviations "kc, c, ks, s"
13366  * are used as short form of the actual state, e.g. "kc" for
13367  * "kp_refcntc > 0", etc.
13368  */
13369 #define	KPM_KC	0x00000008	/* kpm_page: kp_refcntc > 0 */
13370 #define	KPM_C	0x00000004	/* page: P_KPMC set */
13371 #define	KPM_KS	0x00000002	/* kpm_page: kp_refcnts > 0 */
13372 #define	KPM_S	0x00000001	/* page: P_KPMS set */
13373 
13374 /*
13375  * Summary states used in sfmmu_kpm_fault (KPM_TSBM_*).
13376  * See also more detailed comments within in the sfmmu_kpm_fault switch.
13377  * Abbreviations used:
13378  * CONFL: VAC conflict(s) within a kpm_page.
13379  * MAPS:  Mapped small: Page mapped in using a regular page size kpm mapping.
13380  * RASM:  Re-assembling of a large page mapping possible.
13381  * RPLS:  Replace: TSB miss due to TSB replacement only.
13382  * BRKO:  Breakup Other: A large kpm mapping has to be broken because another
13383  *        page within the kpm_page is already involved in a VAC conflict.
13384  * BRKT:  Breakup This: A large kpm mapping has to be broken, this page is
13385  *        is involved in a VAC conflict.
13386  */
13387 #define	KPM_TSBM_CONFL_GONE	(0)
13388 #define	KPM_TSBM_MAPS_RASM	(KPM_KS)
13389 #define	KPM_TSBM_RPLS_RASM	(KPM_KS | KPM_S)
13390 #define	KPM_TSBM_MAPS_BRKO	(KPM_KC)
13391 #define	KPM_TSBM_MAPS		(KPM_KC | KPM_KS)
13392 #define	KPM_TSBM_RPLS		(KPM_KC | KPM_KS | KPM_S)
13393 #define	KPM_TSBM_MAPS_BRKT	(KPM_KC | KPM_C)
13394 #define	KPM_TSBM_MAPS_CONFL	(KPM_KC | KPM_C | KPM_KS)
13395 #define	KPM_TSBM_RPLS_CONFL	(KPM_KC | KPM_C | KPM_KS | KPM_S)
13396 
13397 /*
13398  * kpm fault handler for mappings with large page size.
13399  */
13400 int
13401 sfmmu_kpm_fault(caddr_t vaddr, struct memseg *mseg, page_t *pp)
13402 {
13403 	int		error;
13404 	pgcnt_t		inx;
13405 	kpm_page_t	*kp;
13406 	tte_t		tte;
13407 	pfn_t		pfn = pp->p_pagenum;
13408 	kpm_hlk_t	*kpmp;
13409 	kmutex_t	*pml;
13410 	int		alias_range;
13411 	int		uncached = 0;
13412 	kmutex_t	*pmtx;
13413 	int		badstate;
13414 	uint_t		tsbmcase;
13415 
13416 	alias_range = IS_KPM_ALIAS_RANGE(vaddr);
13417 
13418 	inx = ptokpmp(kpmptop(ptokpmp(pfn)) - mseg->kpm_pbase);
13419 	if (inx >= mseg->kpm_nkpmpgs) {
13420 		cmn_err(CE_PANIC, "sfmmu_kpm_fault: kpm overflow in memseg "
13421 			"0x%p  pp 0x%p", (void *)mseg, (void *)pp);
13422 	}
13423 
13424 	kp = &mseg->kpm_pages[inx];
13425 	kpmp = KPMP_HASH(kp);
13426 
13427 	pml = sfmmu_mlist_enter(pp);
13428 
13429 	if (!PP_ISMAPPED_KPM(pp)) {
13430 		sfmmu_mlist_exit(pml);
13431 		return (EFAULT);
13432 	}
13433 
13434 	mutex_enter(&kpmp->khl_mutex);
13435 
13436 	if (alias_range) {
13437 		ASSERT(!PP_ISMAPPED_LARGE(pp));
13438 		if (kp->kp_refcnta > 0) {
13439 			if (PP_ISKPMC(pp)) {
13440 				pmtx = sfmmu_page_enter(pp);
13441 				PP_CLRKPMC(pp);
13442 				sfmmu_page_exit(pmtx);
13443 			}
13444 			/*
13445 			 * Check for vcolor conflicts. Return here
13446 			 * w/ either no conflict (fast path), removed hme
13447 			 * mapping chains (unload conflict) or uncached
13448 			 * (uncache conflict). VACaches are cleaned and
13449 			 * p_vcolor and PP_TNC are set accordingly for the
13450 			 * conflict cases.  Drop kpmp for uncache conflict
13451 			 * cases since it will be grabbed within
13452 			 * sfmmu_kpm_page_cache in case of an uncache
13453 			 * conflict.
13454 			 */
13455 			mutex_exit(&kpmp->khl_mutex);
13456 			sfmmu_kpm_vac_conflict(pp, vaddr);
13457 			mutex_enter(&kpmp->khl_mutex);
13458 
13459 			if (PP_ISNC(pp)) {
13460 				uncached = 1;
13461 				pmtx = sfmmu_page_enter(pp);
13462 				PP_SETKPMC(pp);
13463 				sfmmu_page_exit(pmtx);
13464 			}
13465 			goto smallexit;
13466 
13467 		} else {
13468 			/*
13469 			 * We got a tsbmiss on a not active kpm_page range.
13470 			 * Let segkpm_fault decide how to panic.
13471 			 */
13472 			error = EFAULT;
13473 		}
13474 		goto exit;
13475 	}
13476 
13477 	badstate = (kp->kp_refcnt < 0 || kp->kp_refcnts < 0);
13478 	if (kp->kp_refcntc == -1) {
13479 		/*
13480 		 * We should come here only if trap level tsb miss
13481 		 * handler is disabled.
13482 		 */
13483 		badstate |= (kp->kp_refcnt == 0 || kp->kp_refcnts > 0 ||
13484 			PP_ISKPMC(pp) || PP_ISKPMS(pp) || PP_ISNC(pp));
13485 
13486 		if (badstate == 0)
13487 			goto largeexit;
13488 	}
13489 
13490 	if (badstate || kp->kp_refcntc < 0)
13491 		goto badstate_exit;
13492 
13493 	/*
13494 	 * Combine the per kpm_page and per page kpm VAC states to
13495 	 * a summary state in order to make the kpm fault handling
13496 	 * more concise.
13497 	 */
13498 	tsbmcase = (((kp->kp_refcntc > 0) ? KPM_KC : 0) |
13499 			((kp->kp_refcnts > 0) ? KPM_KS : 0) |
13500 			(PP_ISKPMC(pp) ? KPM_C : 0) |
13501 			(PP_ISKPMS(pp) ? KPM_S : 0));
13502 
13503 	switch (tsbmcase) {
13504 	case KPM_TSBM_CONFL_GONE:		/* - - - - */
13505 		/*
13506 		 * That's fine, we either have no more vac conflict in
13507 		 * this kpm page or someone raced in and has solved the
13508 		 * vac conflict for us -- call sfmmu_kpm_vac_conflict
13509 		 * to take care for correcting the vcolor and flushing
13510 		 * the dcache if required.
13511 		 */
13512 		mutex_exit(&kpmp->khl_mutex);
13513 		sfmmu_kpm_vac_conflict(pp, vaddr);
13514 		mutex_enter(&kpmp->khl_mutex);
13515 
13516 		if (PP_ISNC(pp) || kp->kp_refcnt <= 0 ||
13517 		    addr_to_vcolor(vaddr) != PP_GET_VCOLOR(pp)) {
13518 			panic("sfmmu_kpm_fault: inconsistent CONFL_GONE "
13519 				"state, pp=%p", (void *)pp);
13520 		}
13521 		goto largeexit;
13522 
13523 	case KPM_TSBM_MAPS_RASM:		/* - - ks - */
13524 		/*
13525 		 * All conflicts in this kpm page are gone but there are
13526 		 * already small mappings around, so we also map this
13527 		 * page small. This could be the trigger case for a
13528 		 * small mapping reaper, if this is really needed.
13529 		 * For now fall thru to the KPM_TSBM_MAPS handling.
13530 		 */
13531 
13532 	case KPM_TSBM_MAPS:			/* kc - ks - */
13533 		/*
13534 		 * Large page mapping is already broken, this page is not
13535 		 * conflicting, so map it small. Call sfmmu_kpm_vac_conflict
13536 		 * to take care for correcting the vcolor and flushing
13537 		 * the dcache if required.
13538 		 */
13539 		mutex_exit(&kpmp->khl_mutex);
13540 		sfmmu_kpm_vac_conflict(pp, vaddr);
13541 		mutex_enter(&kpmp->khl_mutex);
13542 
13543 		if (PP_ISNC(pp) || kp->kp_refcnt <= 0 ||
13544 		    addr_to_vcolor(vaddr) != PP_GET_VCOLOR(pp)) {
13545 			panic("sfmmu_kpm_fault:  inconsistent MAPS state, "
13546 				"pp=%p", (void *)pp);
13547 		}
13548 		kp->kp_refcnt--;
13549 		kp->kp_refcnts++;
13550 		pmtx = sfmmu_page_enter(pp);
13551 		PP_SETKPMS(pp);
13552 		sfmmu_page_exit(pmtx);
13553 		goto smallexit;
13554 
13555 	case KPM_TSBM_RPLS_RASM:		/* - - ks s */
13556 		/*
13557 		 * All conflicts in this kpm page are gone but this page
13558 		 * is mapped small. This could be the trigger case for a
13559 		 * small mapping reaper, if this is really needed.
13560 		 * For now we drop it in small again. Fall thru to the
13561 		 * KPM_TSBM_RPLS handling.
13562 		 */
13563 
13564 	case KPM_TSBM_RPLS:			/* kc - ks s */
13565 		/*
13566 		 * Large page mapping is already broken, this page is not
13567 		 * conflicting but already mapped small, so drop it in
13568 		 * small again.
13569 		 */
13570 		if (PP_ISNC(pp) ||
13571 		    addr_to_vcolor(vaddr) != PP_GET_VCOLOR(pp)) {
13572 			panic("sfmmu_kpm_fault:  inconsistent RPLS state, "
13573 				"pp=%p", (void *)pp);
13574 		}
13575 		goto smallexit;
13576 
13577 	case KPM_TSBM_MAPS_BRKO:		/* kc - - - */
13578 		/*
13579 		 * The kpm page where we live in is marked conflicting
13580 		 * but this page is not conflicting. So we have to map it
13581 		 * in small. Call sfmmu_kpm_vac_conflict to take care for
13582 		 * correcting the vcolor and flushing the dcache if required.
13583 		 */
13584 		mutex_exit(&kpmp->khl_mutex);
13585 		sfmmu_kpm_vac_conflict(pp, vaddr);
13586 		mutex_enter(&kpmp->khl_mutex);
13587 
13588 		if (PP_ISNC(pp) || kp->kp_refcnt <= 0 ||
13589 		    addr_to_vcolor(vaddr) != PP_GET_VCOLOR(pp)) {
13590 			panic("sfmmu_kpm_fault:  inconsistent MAPS_BRKO state, "
13591 				"pp=%p", (void *)pp);
13592 		}
13593 		kp->kp_refcnt--;
13594 		kp->kp_refcnts++;
13595 		pmtx = sfmmu_page_enter(pp);
13596 		PP_SETKPMS(pp);
13597 		sfmmu_page_exit(pmtx);
13598 		goto smallexit;
13599 
13600 	case KPM_TSBM_MAPS_BRKT:		/* kc c - - */
13601 	case KPM_TSBM_MAPS_CONFL:		/* kc c ks - */
13602 		if (!PP_ISMAPPED(pp)) {
13603 			/*
13604 			 * We got a tsbmiss on kpm large page range that is
13605 			 * marked to contain vac conflicting pages introduced
13606 			 * by hme mappings. The hme mappings are all gone and
13607 			 * must have bypassed the kpm alias prevention logic.
13608 			 */
13609 			panic("sfmmu_kpm_fault: stale VAC conflict, pp=%p",
13610 				(void *)pp);
13611 		}
13612 
13613 		/*
13614 		 * Check for vcolor conflicts. Return here w/ either no
13615 		 * conflict (fast path), removed hme mapping chains
13616 		 * (unload conflict) or uncached (uncache conflict).
13617 		 * Dcache is cleaned and p_vcolor and P_TNC are set
13618 		 * accordingly. Drop kpmp for uncache conflict cases
13619 		 * since it will be grabbed within sfmmu_kpm_page_cache
13620 		 * in case of an uncache conflict.
13621 		 */
13622 		mutex_exit(&kpmp->khl_mutex);
13623 		sfmmu_kpm_vac_conflict(pp, vaddr);
13624 		mutex_enter(&kpmp->khl_mutex);
13625 
13626 		if (kp->kp_refcnt <= 0)
13627 			panic("sfmmu_kpm_fault: bad refcnt kp=%p", (void *)kp);
13628 
13629 		if (PP_ISNC(pp)) {
13630 			uncached = 1;
13631 		} else {
13632 			/*
13633 			 * When an unload conflict is solved and there are
13634 			 * no other small mappings around, we can resume
13635 			 * largepage mode. Otherwise we have to map or drop
13636 			 * in small. This could be a trigger for a small
13637 			 * mapping reaper when this was the last conflict
13638 			 * within the kpm page and when there are only
13639 			 * other small mappings around.
13640 			 */
13641 			ASSERT(addr_to_vcolor(vaddr) == PP_GET_VCOLOR(pp));
13642 			ASSERT(kp->kp_refcntc > 0);
13643 			kp->kp_refcntc--;
13644 			pmtx = sfmmu_page_enter(pp);
13645 			PP_CLRKPMC(pp);
13646 			sfmmu_page_exit(pmtx);
13647 			ASSERT(PP_ISKPMS(pp) == 0);
13648 			if (kp->kp_refcntc == 0 && kp->kp_refcnts == 0)
13649 				goto largeexit;
13650 		}
13651 
13652 		kp->kp_refcnt--;
13653 		kp->kp_refcnts++;
13654 		pmtx = sfmmu_page_enter(pp);
13655 		PP_SETKPMS(pp);
13656 		sfmmu_page_exit(pmtx);
13657 		goto smallexit;
13658 
13659 	case KPM_TSBM_RPLS_CONFL:		/* kc c ks s */
13660 		if (!PP_ISMAPPED(pp)) {
13661 			/*
13662 			 * We got a tsbmiss on kpm large page range that is
13663 			 * marked to contain vac conflicting pages introduced
13664 			 * by hme mappings. They are all gone and must have
13665 			 * somehow bypassed the kpm alias prevention logic.
13666 			 */
13667 			panic("sfmmu_kpm_fault: stale VAC conflict, pp=%p",
13668 				(void *)pp);
13669 		}
13670 
13671 		/*
13672 		 * This state is only possible for an uncached mapping.
13673 		 */
13674 		if (!PP_ISNC(pp)) {
13675 			panic("sfmmu_kpm_fault: page not uncached, pp=%p",
13676 				(void *)pp);
13677 		}
13678 		uncached = 1;
13679 		goto smallexit;
13680 
13681 	default:
13682 badstate_exit:
13683 		panic("sfmmu_kpm_fault: inconsistent VAC state, vaddr=%p kp=%p "
13684 			"pp=%p", (void *)vaddr, (void *)kp, (void *)pp);
13685 	}
13686 
13687 smallexit:
13688 	/* tte assembly */
13689 	if (uncached == 0)
13690 		KPM_TTE_VCACHED(tte.ll, pfn, TTE8K);
13691 	else
13692 		KPM_TTE_VUNCACHED(tte.ll, pfn, TTE8K);
13693 
13694 	/* tsb dropin */
13695 	sfmmu_kpm_load_tsb(vaddr, &tte, MMU_PAGESHIFT);
13696 
13697 	error = 0;
13698 	goto exit;
13699 
13700 largeexit:
13701 	if (kp->kp_refcnt > 0) {
13702 
13703 		/* tte assembly */
13704 		KPM_TTE_VCACHED(tte.ll, pfn, TTE4M);
13705 
13706 		/* tsb dropin */
13707 		sfmmu_kpm_load_tsb(vaddr, &tte, MMU_PAGESHIFT4M);
13708 
13709 		if (kp->kp_refcntc == 0) {
13710 			/* Set "go" flag for TL tsbmiss handler */
13711 			sfmmu_kpm_tsbmtl(&kp->kp_refcntc, &kpmp->khl_lock,
13712 					KPMTSBM_START);
13713 		}
13714 		ASSERT(kp->kp_refcntc == -1);
13715 		error = 0;
13716 
13717 	} else
13718 		error = EFAULT;
13719 exit:
13720 	mutex_exit(&kpmp->khl_mutex);
13721 	sfmmu_mlist_exit(pml);
13722 	return (error);
13723 }
13724 
13725 /*
13726  * kpm fault handler for mappings with small page size.
13727  */
13728 int
13729 sfmmu_kpm_fault_small(caddr_t vaddr, struct memseg *mseg, page_t *pp)
13730 {
13731 	int		error = 0;
13732 	pgcnt_t		inx;
13733 	kpm_spage_t	*ksp;
13734 	kpm_shlk_t	*kpmsp;
13735 	kmutex_t	*pml;
13736 	pfn_t		pfn = pp->p_pagenum;
13737 	tte_t		tte;
13738 	kmutex_t	*pmtx;
13739 	int		oldval;
13740 
13741 	inx = pfn - mseg->kpm_pbase;
13742 	ksp = &mseg->kpm_spages[inx];
13743 	kpmsp = KPMP_SHASH(ksp);
13744 
13745 	pml = sfmmu_mlist_enter(pp);
13746 
13747 	if (!PP_ISMAPPED_KPM(pp)) {
13748 		sfmmu_mlist_exit(pml);
13749 		return (EFAULT);
13750 	}
13751 
13752 	/*
13753 	 * kp_mapped lookup protected by mlist mutex
13754 	 */
13755 	if (ksp->kp_mapped == KPM_MAPPEDS) {
13756 		/*
13757 		 * Fast path tsbmiss
13758 		 */
13759 		ASSERT(!PP_ISKPMC(pp));
13760 		ASSERT(!PP_ISNC(pp));
13761 
13762 		/* tte assembly */
13763 		KPM_TTE_VCACHED(tte.ll, pfn, TTE8K);
13764 
13765 		/* tsb dropin */
13766 		sfmmu_kpm_load_tsb(vaddr, &tte, MMU_PAGESHIFT);
13767 
13768 	} else if (ksp->kp_mapped == KPM_MAPPEDSC) {
13769 		/*
13770 		 * Got here due to existing or gone kpm/hme VAC conflict.
13771 		 * Recheck for vcolor conflicts. Return here w/ either
13772 		 * no conflict, removed hme mapping chain (unload
13773 		 * conflict) or uncached (uncache conflict). VACaches
13774 		 * are cleaned and p_vcolor and PP_TNC are set accordingly
13775 		 * for the conflict cases.
13776 		 */
13777 		sfmmu_kpm_vac_conflict(pp, vaddr);
13778 
13779 		if (PP_ISNC(pp)) {
13780 			/* ASSERT(pp->p_share); XXX use hat_page_getshare */
13781 
13782 			/* tte assembly */
13783 			KPM_TTE_VUNCACHED(tte.ll, pfn, TTE8K);
13784 
13785 			/* tsb dropin */
13786 			sfmmu_kpm_load_tsb(vaddr, &tte, MMU_PAGESHIFT);
13787 
13788 		} else {
13789 			if (PP_ISKPMC(pp)) {
13790 				pmtx = sfmmu_page_enter(pp);
13791 				PP_CLRKPMC(pp);
13792 				sfmmu_page_exit(pmtx);
13793 			}
13794 
13795 			/* tte assembly */
13796 			KPM_TTE_VCACHED(tte.ll, pfn, TTE8K);
13797 
13798 			/* tsb dropin */
13799 			sfmmu_kpm_load_tsb(vaddr, &tte, MMU_PAGESHIFT);
13800 
13801 			oldval = sfmmu_kpm_stsbmtl(&ksp->kp_mapped,
13802 					&kpmsp->kshl_lock, KPM_MAPPEDS);
13803 
13804 			if (oldval != KPM_MAPPEDSC)
13805 				panic("sfmmu_kpm_fault_small: "
13806 					"stale smallpages mapping");
13807 		}
13808 
13809 	} else {
13810 		/*
13811 		 * We got a tsbmiss on a not active kpm_page range.
13812 		 * Let decide segkpm_fault how to panic.
13813 		 */
13814 		error = EFAULT;
13815 	}
13816 
13817 	sfmmu_mlist_exit(pml);
13818 	return (error);
13819 }
13820 
13821 /*
13822  * Check/handle potential hme/kpm mapping conflicts
13823  */
13824 static void
13825 sfmmu_kpm_vac_conflict(page_t *pp, caddr_t vaddr)
13826 {
13827 	int		vcolor;
13828 	struct sf_hment	*sfhmep;
13829 	struct hat	*tmphat;
13830 	struct sf_hment	*tmphme = NULL;
13831 	struct hme_blk	*hmeblkp;
13832 	tte_t		tte;
13833 
13834 	ASSERT(sfmmu_mlist_held(pp));
13835 
13836 	if (PP_ISNC(pp))
13837 		return;
13838 
13839 	vcolor = addr_to_vcolor(vaddr);
13840 	if (PP_GET_VCOLOR(pp) == vcolor)
13841 		return;
13842 
13843 	/*
13844 	 * There could be no vcolor conflict between a large cached
13845 	 * hme page and a non alias range kpm page (neither large nor
13846 	 * small mapped). So if a hme conflict already exists between
13847 	 * a constituent page of a large hme mapping and a shared small
13848 	 * conflicting hme mapping, both mappings must be already
13849 	 * uncached at this point.
13850 	 */
13851 	ASSERT(!PP_ISMAPPED_LARGE(pp));
13852 
13853 	if (!PP_ISMAPPED(pp)) {
13854 		/*
13855 		 * Previous hme user of page had a different color
13856 		 * but since there are no current users
13857 		 * we just flush the cache and change the color.
13858 		 */
13859 		SFMMU_STAT(sf_pgcolor_conflict);
13860 		sfmmu_cache_flush(pp->p_pagenum, PP_GET_VCOLOR(pp));
13861 		PP_SET_VCOLOR(pp, vcolor);
13862 		return;
13863 	}
13864 
13865 	/*
13866 	 * If we get here we have a vac conflict with a current hme
13867 	 * mapping. This must have been established by forcing a wrong
13868 	 * colored mapping, e.g. by using mmap(2) with MAP_FIXED.
13869 	 */
13870 
13871 	/*
13872 	 * Check if any mapping is in same as or if it is locked
13873 	 * since in that case we need to uncache.
13874 	 */
13875 	for (sfhmep = pp->p_mapping; sfhmep; sfhmep = tmphme) {
13876 		tmphme = sfhmep->hme_next;
13877 		hmeblkp = sfmmu_hmetohblk(sfhmep);
13878 		if (hmeblkp->hblk_xhat_bit)
13879 			continue;
13880 		tmphat = hblktosfmmu(hmeblkp);
13881 		sfmmu_copytte(&sfhmep->hme_tte, &tte);
13882 		ASSERT(TTE_IS_VALID(&tte));
13883 		if ((tmphat == ksfmmup) || hmeblkp->hblk_lckcnt) {
13884 			/*
13885 			 * We have an uncache conflict
13886 			 */
13887 			SFMMU_STAT(sf_uncache_conflict);
13888 			sfmmu_page_cache_array(pp, HAT_TMPNC, CACHE_FLUSH, 1);
13889 			return;
13890 		}
13891 	}
13892 
13893 	/*
13894 	 * We have an unload conflict
13895 	 */
13896 	SFMMU_STAT(sf_unload_conflict);
13897 
13898 	for (sfhmep = pp->p_mapping; sfhmep; sfhmep = tmphme) {
13899 		tmphme = sfhmep->hme_next;
13900 		hmeblkp = sfmmu_hmetohblk(sfhmep);
13901 		if (hmeblkp->hblk_xhat_bit)
13902 			continue;
13903 		(void) sfmmu_pageunload(pp, sfhmep, TTE8K);
13904 	}
13905 
13906 	/*
13907 	 * Unloads only does tlb flushes so we need to flush the
13908 	 * dcache vcolor here.
13909 	 */
13910 	sfmmu_cache_flush(pp->p_pagenum, PP_GET_VCOLOR(pp));
13911 	PP_SET_VCOLOR(pp, vcolor);
13912 }
13913 
13914 /*
13915  * Remove all kpm mappings using kpme's for pp and check that
13916  * all kpm mappings (w/ and w/o kpme's) are gone.
13917  */
13918 static void
13919 sfmmu_kpm_pageunload(page_t *pp)
13920 {
13921 	caddr_t		vaddr;
13922 	struct kpme	*kpme, *nkpme;
13923 
13924 	ASSERT(pp != NULL);
13925 	ASSERT(pp->p_kpmref);
13926 	ASSERT(sfmmu_mlist_held(pp));
13927 
13928 	vaddr = hat_kpm_page2va(pp, 1);
13929 
13930 	for (kpme = pp->p_kpmelist; kpme; kpme = nkpme) {
13931 		ASSERT(kpme->kpe_page == pp);
13932 
13933 		if (pp->p_kpmref == 0)
13934 			panic("sfmmu_kpm_pageunload: stale p_kpmref pp=%p "
13935 				"kpme=%p", (void *)pp, (void *)kpme);
13936 
13937 		nkpme = kpme->kpe_next;
13938 
13939 		/* Add instance callback here here if needed later */
13940 		sfmmu_kpme_sub(kpme, pp);
13941 	}
13942 
13943 	/*
13944 	 * Also correct after mixed kpme/nonkpme mappings. If nonkpme
13945 	 * segkpm clients have unlocked the page and forgot to mapout
13946 	 * we panic here.
13947 	 */
13948 	if (pp->p_kpmref != 0)
13949 		panic("sfmmu_kpm_pageunload: bad refcnt pp=%p", (void *)pp);
13950 
13951 	sfmmu_kpm_mapout(pp, vaddr);
13952 }
13953 
13954 /*
13955  * Remove a large kpm mapping from kernel TSB and all TLB's.
13956  */
13957 static void
13958 sfmmu_kpm_demap_large(caddr_t vaddr)
13959 {
13960 	sfmmu_kpm_unload_tsb(vaddr, MMU_PAGESHIFT4M);
13961 	sfmmu_kpm_demap_tlbs(vaddr, KCONTEXT);
13962 }
13963 
13964 /*
13965  * Remove a small kpm mapping from kernel TSB and all TLB's.
13966  */
13967 static void
13968 sfmmu_kpm_demap_small(caddr_t vaddr)
13969 {
13970 	sfmmu_kpm_unload_tsb(vaddr, MMU_PAGESHIFT);
13971 	sfmmu_kpm_demap_tlbs(vaddr, KCONTEXT);
13972 }
13973 
13974 /*
13975  * Demap a kpm mapping in all TLB's.
13976  */
13977 static void
13978 sfmmu_kpm_demap_tlbs(caddr_t vaddr, int ctxnum)
13979 {
13980 	cpuset_t cpuset;
13981 
13982 	kpreempt_disable();
13983 	cpuset = ksfmmup->sfmmu_cpusran;
13984 	CPUSET_AND(cpuset, cpu_ready_set);
13985 	CPUSET_DEL(cpuset, CPU->cpu_id);
13986 	SFMMU_XCALL_STATS(ctxnum);
13987 	xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)vaddr, ctxnum);
13988 	vtag_flushpage(vaddr, ctxnum);
13989 	kpreempt_enable();
13990 }
13991 
13992 /*
13993  * Summary states used in sfmmu_kpm_vac_unload (KPM_VUL__*).
13994  * See also more detailed comments within in the sfmmu_kpm_vac_unload switch.
13995  * Abbreviations used:
13996  * BIG:   Large page kpm mapping in use.
13997  * CONFL: VAC conflict(s) within a kpm_page.
13998  * INCR:  Count of conflicts within a kpm_page is going to be incremented.
13999  * DECR:  Count of conflicts within a kpm_page is going to be decremented.
14000  * UNMAP_SMALL: A small (regular page size) mapping is going to be unmapped.
14001  * TNC:   Temporary non cached: a kpm mapped page is mapped in TNC state.
14002  */
14003 #define	KPM_VUL_BIG		(0)
14004 #define	KPM_VUL_CONFL_INCR1	(KPM_KS)
14005 #define	KPM_VUL_UNMAP_SMALL1	(KPM_KS | KPM_S)
14006 #define	KPM_VUL_CONFL_INCR2	(KPM_KC)
14007 #define	KPM_VUL_CONFL_INCR3	(KPM_KC | KPM_KS)
14008 #define	KPM_VUL_UNMAP_SMALL2	(KPM_KC | KPM_KS | KPM_S)
14009 #define	KPM_VUL_CONFL_DECR1	(KPM_KC | KPM_C)
14010 #define	KPM_VUL_CONFL_DECR2	(KPM_KC | KPM_C | KPM_KS)
14011 #define	KPM_VUL_TNC		(KPM_KC | KPM_C | KPM_KS | KPM_S)
14012 
14013 /*
14014  * Handle VAC unload conflicts introduced by hme mappings or vice
14015  * versa when a hme conflict mapping is replaced by a non conflict
14016  * one. Perform actions and state transitions according to the
14017  * various page and kpm_page entry states. VACache flushes are in
14018  * the responsibiliy of the caller. We still hold the mlist lock.
14019  */
14020 static void
14021 sfmmu_kpm_vac_unload(page_t *pp, caddr_t vaddr)
14022 {
14023 	kpm_page_t	*kp;
14024 	kpm_hlk_t	*kpmp;
14025 	caddr_t		kpmvaddr = hat_kpm_page2va(pp, 1);
14026 	int		newcolor;
14027 	kmutex_t	*pmtx;
14028 	uint_t		vacunlcase;
14029 	int		badstate = 0;
14030 	kpm_spage_t	*ksp;
14031 	kpm_shlk_t	*kpmsp;
14032 
14033 	ASSERT(PAGE_LOCKED(pp));
14034 	ASSERT(sfmmu_mlist_held(pp));
14035 	ASSERT(!PP_ISNC(pp));
14036 
14037 	newcolor = addr_to_vcolor(kpmvaddr) != addr_to_vcolor(vaddr);
14038 	if (kpm_smallpages)
14039 		goto smallpages_vac_unload;
14040 
14041 	PP2KPMPG(pp, kp);
14042 	kpmp = KPMP_HASH(kp);
14043 	mutex_enter(&kpmp->khl_mutex);
14044 
14045 	if (IS_KPM_ALIAS_RANGE(kpmvaddr)) {
14046 		if (kp->kp_refcnta < 1) {
14047 			panic("sfmmu_kpm_vac_unload: bad refcnta kpm_page=%p\n",
14048 				(void *)kp);
14049 		}
14050 
14051 		if (PP_ISKPMC(pp) == 0) {
14052 			if (newcolor == 0)
14053 				goto exit;
14054 			sfmmu_kpm_demap_small(kpmvaddr);
14055 			pmtx = sfmmu_page_enter(pp);
14056 			PP_SETKPMC(pp);
14057 			sfmmu_page_exit(pmtx);
14058 
14059 		} else if (newcolor == 0) {
14060 			pmtx = sfmmu_page_enter(pp);
14061 			PP_CLRKPMC(pp);
14062 			sfmmu_page_exit(pmtx);
14063 
14064 		} else {
14065 			badstate++;
14066 		}
14067 
14068 		goto exit;
14069 	}
14070 
14071 	badstate = (kp->kp_refcnt < 0 || kp->kp_refcnts < 0);
14072 	if (kp->kp_refcntc == -1) {
14073 		/*
14074 		 * We should come here only if trap level tsb miss
14075 		 * handler is disabled.
14076 		 */
14077 		badstate |= (kp->kp_refcnt == 0 || kp->kp_refcnts > 0 ||
14078 			PP_ISKPMC(pp) || PP_ISKPMS(pp) || PP_ISNC(pp));
14079 	} else {
14080 		badstate |= (kp->kp_refcntc < 0);
14081 	}
14082 
14083 	if (badstate)
14084 		goto exit;
14085 
14086 	if (PP_ISKPMC(pp) == 0 && newcolor == 0) {
14087 		ASSERT(PP_ISKPMS(pp) == 0);
14088 		goto exit;
14089 	}
14090 
14091 	/*
14092 	 * Combine the per kpm_page and per page kpm VAC states
14093 	 * to a summary state in order to make the vac unload
14094 	 * handling more concise.
14095 	 */
14096 	vacunlcase = (((kp->kp_refcntc > 0) ? KPM_KC : 0) |
14097 			((kp->kp_refcnts > 0) ? KPM_KS : 0) |
14098 			(PP_ISKPMC(pp) ? KPM_C : 0) |
14099 			(PP_ISKPMS(pp) ? KPM_S : 0));
14100 
14101 	switch (vacunlcase) {
14102 	case KPM_VUL_BIG:				/* - - - - */
14103 		/*
14104 		 * Have to breakup the large page mapping to be
14105 		 * able to handle the conflicting hme vaddr.
14106 		 */
14107 		if (kp->kp_refcntc == -1) {
14108 			/* remove go indication */
14109 			sfmmu_kpm_tsbmtl(&kp->kp_refcntc,
14110 					&kpmp->khl_lock, KPMTSBM_STOP);
14111 		}
14112 		sfmmu_kpm_demap_large(kpmvaddr);
14113 
14114 		ASSERT(kp->kp_refcntc == 0);
14115 		kp->kp_refcntc++;
14116 		pmtx = sfmmu_page_enter(pp);
14117 		PP_SETKPMC(pp);
14118 		sfmmu_page_exit(pmtx);
14119 		break;
14120 
14121 	case KPM_VUL_UNMAP_SMALL1:			/* -  - ks s */
14122 	case KPM_VUL_UNMAP_SMALL2:			/* kc - ks s */
14123 		/*
14124 		 * New conflict w/ an active kpm page, actually mapped
14125 		 * in by small TSB/TLB entries. Remove the mapping and
14126 		 * update states.
14127 		 */
14128 		ASSERT(newcolor);
14129 		sfmmu_kpm_demap_small(kpmvaddr);
14130 		kp->kp_refcnts--;
14131 		kp->kp_refcnt++;
14132 		kp->kp_refcntc++;
14133 		pmtx = sfmmu_page_enter(pp);
14134 		PP_CLRKPMS(pp);
14135 		PP_SETKPMC(pp);
14136 		sfmmu_page_exit(pmtx);
14137 		break;
14138 
14139 	case KPM_VUL_CONFL_INCR1:			/* -  - ks - */
14140 	case KPM_VUL_CONFL_INCR2:			/* kc - -  - */
14141 	case KPM_VUL_CONFL_INCR3:			/* kc - ks - */
14142 		/*
14143 		 * New conflict on a active kpm mapped page not yet in
14144 		 * TSB/TLB. Mark page and increment the kpm_page conflict
14145 		 * count.
14146 		 */
14147 		ASSERT(newcolor);
14148 		kp->kp_refcntc++;
14149 		pmtx = sfmmu_page_enter(pp);
14150 		PP_SETKPMC(pp);
14151 		sfmmu_page_exit(pmtx);
14152 		break;
14153 
14154 	case KPM_VUL_CONFL_DECR1:			/* kc c -  - */
14155 	case KPM_VUL_CONFL_DECR2:			/* kc c ks - */
14156 		/*
14157 		 * A conflicting hme mapping is removed for an active
14158 		 * kpm page not yet in TSB/TLB. Unmark page and decrement
14159 		 * the kpm_page conflict count.
14160 		 */
14161 		ASSERT(newcolor == 0);
14162 		kp->kp_refcntc--;
14163 		pmtx = sfmmu_page_enter(pp);
14164 		PP_CLRKPMC(pp);
14165 		sfmmu_page_exit(pmtx);
14166 		break;
14167 
14168 	case KPM_VUL_TNC:				/* kc c ks s */
14169 		cmn_err(CE_NOTE, "sfmmu_kpm_vac_unload: "
14170 			"page not in NC state");
14171 		/* FALLTHRU */
14172 
14173 	default:
14174 		badstate++;
14175 	}
14176 exit:
14177 	if (badstate) {
14178 		panic("sfmmu_kpm_vac_unload: inconsistent VAC state, "
14179 			"kpmvaddr=%p kp=%p pp=%p",
14180 			(void *)kpmvaddr, (void *)kp, (void *)pp);
14181 	}
14182 	mutex_exit(&kpmp->khl_mutex);
14183 
14184 	return;
14185 
14186 smallpages_vac_unload:
14187 	if (newcolor == 0)
14188 		return;
14189 
14190 	PP2KPMSPG(pp, ksp);
14191 	kpmsp = KPMP_SHASH(ksp);
14192 
14193 	if (PP_ISKPMC(pp) == 0) {
14194 		if (ksp->kp_mapped == KPM_MAPPEDS) {
14195 			/*
14196 			 * Stop TL tsbmiss handling
14197 			 */
14198 			(void) sfmmu_kpm_stsbmtl(&ksp->kp_mapped,
14199 					&kpmsp->kshl_lock, KPM_MAPPEDSC);
14200 
14201 			sfmmu_kpm_demap_small(kpmvaddr);
14202 
14203 		} else if (ksp->kp_mapped != KPM_MAPPEDSC) {
14204 			panic("sfmmu_kpm_vac_unload: inconsistent mapping");
14205 		}
14206 
14207 		pmtx = sfmmu_page_enter(pp);
14208 		PP_SETKPMC(pp);
14209 		sfmmu_page_exit(pmtx);
14210 
14211 	} else {
14212 		if (ksp->kp_mapped != KPM_MAPPEDSC)
14213 			panic("sfmmu_kpm_vac_unload: inconsistent mapping");
14214 	}
14215 }
14216 
14217 /*
14218  * Page is marked to be in VAC conflict to an existing kpm mapping
14219  * or is kpm mapped using only the regular pagesize. Called from
14220  * sfmmu_hblk_unload when a mlist is completely removed.
14221  */
14222 static void
14223 sfmmu_kpm_hme_unload(page_t *pp)
14224 {
14225 	/* tte assembly */
14226 	kpm_page_t	*kp;
14227 	kpm_hlk_t	*kpmp;
14228 	caddr_t		vaddr;
14229 	kmutex_t	*pmtx;
14230 	uint_t		flags;
14231 	kpm_spage_t	*ksp;
14232 
14233 	ASSERT(sfmmu_mlist_held(pp));
14234 	ASSERT(PP_ISMAPPED_KPM(pp));
14235 
14236 	flags = pp->p_nrm & (P_KPMC | P_KPMS);
14237 	if (kpm_smallpages)
14238 		goto smallpages_hme_unload;
14239 
14240 	if (flags == (P_KPMC | P_KPMS)) {
14241 		panic("sfmmu_kpm_hme_unload: page should be uncached");
14242 
14243 	} else if (flags == P_KPMS) {
14244 		/*
14245 		 * Page mapped small but not involved in VAC conflict
14246 		 */
14247 		return;
14248 	}
14249 
14250 	vaddr = hat_kpm_page2va(pp, 1);
14251 
14252 	PP2KPMPG(pp, kp);
14253 	kpmp = KPMP_HASH(kp);
14254 	mutex_enter(&kpmp->khl_mutex);
14255 
14256 	if (IS_KPM_ALIAS_RANGE(vaddr)) {
14257 		if (kp->kp_refcnta < 1) {
14258 			panic("sfmmu_kpm_hme_unload: bad refcnta kpm_page=%p\n",
14259 				(void *)kp);
14260 		}
14261 
14262 	} else {
14263 		if (kp->kp_refcntc < 1) {
14264 			panic("sfmmu_kpm_hme_unload: bad refcntc kpm_page=%p\n",
14265 				(void *)kp);
14266 		}
14267 		kp->kp_refcntc--;
14268 	}
14269 
14270 	pmtx = sfmmu_page_enter(pp);
14271 	PP_CLRKPMC(pp);
14272 	sfmmu_page_exit(pmtx);
14273 
14274 	mutex_exit(&kpmp->khl_mutex);
14275 	return;
14276 
14277 smallpages_hme_unload:
14278 	if (flags != P_KPMC)
14279 		panic("sfmmu_kpm_hme_unload: page should be uncached");
14280 
14281 	vaddr = hat_kpm_page2va(pp, 1);
14282 	PP2KPMSPG(pp, ksp);
14283 
14284 	if (ksp->kp_mapped != KPM_MAPPEDSC)
14285 		panic("sfmmu_kpm_hme_unload: inconsistent mapping");
14286 
14287 	/*
14288 	 * Keep KPM_MAPPEDSC until the next kpm tsbmiss where it
14289 	 * prevents TL tsbmiss handling and force a hat_kpm_fault.
14290 	 * There we can start over again.
14291 	 */
14292 
14293 	pmtx = sfmmu_page_enter(pp);
14294 	PP_CLRKPMC(pp);
14295 	sfmmu_page_exit(pmtx);
14296 }
14297 
14298 /*
14299  * Special hooks for sfmmu_page_cache_array() when changing the
14300  * cacheability of a page. It is used to obey the hat_kpm lock
14301  * ordering (mlist -> kpmp -> spl, and back).
14302  */
14303 static kpm_hlk_t *
14304 sfmmu_kpm_kpmp_enter(page_t *pp, pgcnt_t npages)
14305 {
14306 	kpm_page_t	*kp;
14307 	kpm_hlk_t	*kpmp;
14308 
14309 	ASSERT(sfmmu_mlist_held(pp));
14310 
14311 	if (kpm_smallpages || PP_ISMAPPED_KPM(pp) == 0)
14312 		return (NULL);
14313 
14314 	ASSERT(npages <= kpmpnpgs);
14315 
14316 	PP2KPMPG(pp, kp);
14317 	kpmp = KPMP_HASH(kp);
14318 	mutex_enter(&kpmp->khl_mutex);
14319 
14320 	return (kpmp);
14321 }
14322 
14323 static void
14324 sfmmu_kpm_kpmp_exit(kpm_hlk_t *kpmp)
14325 {
14326 	if (kpm_smallpages || kpmp == NULL)
14327 		return;
14328 
14329 	mutex_exit(&kpmp->khl_mutex);
14330 }
14331 
14332 /*
14333  * Summary states used in sfmmu_kpm_page_cache (KPM_*).
14334  * See also more detailed comments within in the sfmmu_kpm_page_cache switch.
14335  * Abbreviations used:
14336  * UNC:     Input state for an uncache request.
14337  *   BIG:     Large page kpm mapping in use.
14338  *   SMALL:   Page has a small kpm mapping within a kpm_page range.
14339  *   NODEMAP: No demap needed.
14340  *   NOP:     No operation needed on this input state.
14341  * CACHE:   Input state for a re-cache request.
14342  *   MAPS:    Page is in TNC and kpm VAC conflict state and kpm mapped small.
14343  *   NOMAP:   Page is in TNC and kpm VAC conflict state, but not small kpm
14344  *            mapped.
14345  *   NOMAPO:  Page is in TNC and kpm VAC conflict state, but not small kpm
14346  *            mapped. There are also other small kpm mappings within this
14347  *            kpm_page.
14348  */
14349 #define	KPM_UNC_BIG		(0)
14350 #define	KPM_UNC_NODEMAP1	(KPM_KS)
14351 #define	KPM_UNC_SMALL1		(KPM_KS | KPM_S)
14352 #define	KPM_UNC_NODEMAP2	(KPM_KC)
14353 #define	KPM_UNC_NODEMAP3	(KPM_KC | KPM_KS)
14354 #define	KPM_UNC_SMALL2		(KPM_KC | KPM_KS | KPM_S)
14355 #define	KPM_UNC_NOP1		(KPM_KC | KPM_C)
14356 #define	KPM_UNC_NOP2		(KPM_KC | KPM_C | KPM_KS)
14357 #define	KPM_CACHE_NOMAP		(KPM_KC | KPM_C)
14358 #define	KPM_CACHE_NOMAPO	(KPM_KC | KPM_C | KPM_KS)
14359 #define	KPM_CACHE_MAPS		(KPM_KC | KPM_C | KPM_KS | KPM_S)
14360 
14361 /*
14362  * This function is called when the virtual cacheability of a page
14363  * is changed and the page has an actice kpm mapping. The mlist mutex,
14364  * the spl hash lock and the kpmp mutex (if needed) are already grabbed.
14365  */
14366 static void
14367 sfmmu_kpm_page_cache(page_t *pp, int flags, int cache_flush_tag)
14368 {
14369 	kpm_page_t	*kp;
14370 	kpm_hlk_t	*kpmp;
14371 	caddr_t		kpmvaddr;
14372 	int		badstate = 0;
14373 	uint_t		pgcacase;
14374 	kpm_spage_t	*ksp;
14375 	kpm_shlk_t	*kpmsp;
14376 	int		oldval;
14377 
14378 	ASSERT(PP_ISMAPPED_KPM(pp));
14379 	ASSERT(sfmmu_mlist_held(pp));
14380 	ASSERT(sfmmu_page_spl_held(pp));
14381 
14382 	if (flags != HAT_TMPNC && flags != HAT_CACHE)
14383 		panic("sfmmu_kpm_page_cache: bad flags");
14384 
14385 	kpmvaddr = hat_kpm_page2va(pp, 1);
14386 
14387 	if (flags == HAT_TMPNC && cache_flush_tag == CACHE_FLUSH) {
14388 		pfn_t pfn = pp->p_pagenum;
14389 		int vcolor = addr_to_vcolor(kpmvaddr);
14390 		cpuset_t cpuset = cpu_ready_set;
14391 
14392 		/* Flush vcolor in DCache */
14393 		CPUSET_DEL(cpuset, CPU->cpu_id);
14394 		SFMMU_XCALL_STATS(ksfmmup->sfmmu_cnum);
14395 		xt_some(cpuset, vac_flushpage_tl1, pfn, vcolor);
14396 		vac_flushpage(pfn, vcolor);
14397 	}
14398 
14399 	if (kpm_smallpages)
14400 		goto smallpages_page_cache;
14401 
14402 	PP2KPMPG(pp, kp);
14403 	kpmp = KPMP_HASH(kp);
14404 	ASSERT(MUTEX_HELD(&kpmp->khl_mutex));
14405 
14406 	if (IS_KPM_ALIAS_RANGE(kpmvaddr)) {
14407 		if (kp->kp_refcnta < 1) {
14408 			panic("sfmmu_kpm_page_cache: bad refcnta "
14409 				"kpm_page=%p\n", (void *)kp);
14410 		}
14411 		sfmmu_kpm_demap_small(kpmvaddr);
14412 		if (flags == HAT_TMPNC) {
14413 			PP_SETKPMC(pp);
14414 			ASSERT(!PP_ISKPMS(pp));
14415 		} else {
14416 			ASSERT(PP_ISKPMC(pp));
14417 			PP_CLRKPMC(pp);
14418 		}
14419 		goto exit;
14420 	}
14421 
14422 	badstate = (kp->kp_refcnt < 0 || kp->kp_refcnts < 0);
14423 	if (kp->kp_refcntc == -1) {
14424 		/*
14425 		 * We should come here only if trap level tsb miss
14426 		 * handler is disabled.
14427 		 */
14428 		badstate |= (kp->kp_refcnt == 0 || kp->kp_refcnts > 0 ||
14429 			PP_ISKPMC(pp) || PP_ISKPMS(pp) || PP_ISNC(pp));
14430 	} else {
14431 		badstate |= (kp->kp_refcntc < 0);
14432 	}
14433 
14434 	if (badstate)
14435 		goto exit;
14436 
14437 	/*
14438 	 * Combine the per kpm_page and per page kpm VAC states to
14439 	 * a summary state in order to make the VAC cache/uncache
14440 	 * handling more concise.
14441 	 */
14442 	pgcacase = (((kp->kp_refcntc > 0) ? KPM_KC : 0) |
14443 			((kp->kp_refcnts > 0) ? KPM_KS : 0) |
14444 			(PP_ISKPMC(pp) ? KPM_C : 0) |
14445 			(PP_ISKPMS(pp) ? KPM_S : 0));
14446 
14447 	if (flags == HAT_CACHE) {
14448 		switch (pgcacase) {
14449 		case KPM_CACHE_MAPS:			/* kc c ks s */
14450 			sfmmu_kpm_demap_small(kpmvaddr);
14451 			if (kp->kp_refcnts < 1) {
14452 				panic("sfmmu_kpm_page_cache: bad refcnts "
14453 				"kpm_page=%p\n", (void *)kp);
14454 			}
14455 			kp->kp_refcnts--;
14456 			kp->kp_refcnt++;
14457 			PP_CLRKPMS(pp);
14458 			/* FALLTHRU */
14459 
14460 		case KPM_CACHE_NOMAP:			/* kc c -  - */
14461 		case KPM_CACHE_NOMAPO:			/* kc c ks - */
14462 			kp->kp_refcntc--;
14463 			PP_CLRKPMC(pp);
14464 			break;
14465 
14466 		default:
14467 			badstate++;
14468 		}
14469 		goto exit;
14470 	}
14471 
14472 	switch (pgcacase) {
14473 	case KPM_UNC_BIG:				/* - - - - */
14474 		if (kp->kp_refcnt < 1) {
14475 			panic("sfmmu_kpm_page_cache: bad refcnt "
14476 				"kpm_page=%p\n", (void *)kp);
14477 		}
14478 
14479 		/*
14480 		 * Have to breakup the large page mapping in preparation
14481 		 * to the upcoming TNC mode handled by small mappings.
14482 		 * The demap can already be done due to another conflict
14483 		 * within the kpm_page.
14484 		 */
14485 		if (kp->kp_refcntc == -1) {
14486 			/* remove go indication */
14487 			sfmmu_kpm_tsbmtl(&kp->kp_refcntc,
14488 				&kpmp->khl_lock, KPMTSBM_STOP);
14489 		}
14490 		ASSERT(kp->kp_refcntc == 0);
14491 		sfmmu_kpm_demap_large(kpmvaddr);
14492 		kp->kp_refcntc++;
14493 		PP_SETKPMC(pp);
14494 		break;
14495 
14496 	case KPM_UNC_SMALL1:				/* -  - ks s */
14497 	case KPM_UNC_SMALL2:				/* kc - ks s */
14498 		/*
14499 		 * Have to demap an already small kpm mapping in preparation
14500 		 * to the upcoming TNC mode. The demap can already be done
14501 		 * due to another conflict within the kpm_page.
14502 		 */
14503 		sfmmu_kpm_demap_small(kpmvaddr);
14504 		kp->kp_refcntc++;
14505 		kp->kp_refcnts--;
14506 		kp->kp_refcnt++;
14507 		PP_CLRKPMS(pp);
14508 		PP_SETKPMC(pp);
14509 		break;
14510 
14511 	case KPM_UNC_NODEMAP1:				/* -  - ks - */
14512 		/* fallthru */
14513 
14514 	case KPM_UNC_NODEMAP2:				/* kc - -  - */
14515 	case KPM_UNC_NODEMAP3:				/* kc - ks - */
14516 		kp->kp_refcntc++;
14517 		PP_SETKPMC(pp);
14518 		break;
14519 
14520 	case KPM_UNC_NOP1:				/* kc c -  - */
14521 	case KPM_UNC_NOP2:				/* kc c ks - */
14522 		break;
14523 
14524 	default:
14525 		badstate++;
14526 	}
14527 exit:
14528 	if (badstate) {
14529 		panic("sfmmu_kpm_page_cache: inconsistent VAC state "
14530 			"kpmvaddr=%p kp=%p pp=%p", (void *)kpmvaddr,
14531 			(void *)kp, (void *)pp);
14532 	}
14533 	return;
14534 
14535 smallpages_page_cache:
14536 	PP2KPMSPG(pp, ksp);
14537 	kpmsp = KPMP_SHASH(ksp);
14538 
14539 	oldval = sfmmu_kpm_stsbmtl(&ksp->kp_mapped,
14540 				&kpmsp->kshl_lock, KPM_MAPPEDSC);
14541 
14542 	if (!(oldval == KPM_MAPPEDS || oldval == KPM_MAPPEDSC))
14543 		panic("smallpages_page_cache: inconsistent mapping");
14544 
14545 	sfmmu_kpm_demap_small(kpmvaddr);
14546 
14547 	if (flags == HAT_TMPNC) {
14548 		PP_SETKPMC(pp);
14549 		ASSERT(!PP_ISKPMS(pp));
14550 
14551 	} else {
14552 		ASSERT(PP_ISKPMC(pp));
14553 		PP_CLRKPMC(pp);
14554 	}
14555 
14556 	/*
14557 	 * Keep KPM_MAPPEDSC until the next kpm tsbmiss where it
14558 	 * prevents TL tsbmiss handling and force a hat_kpm_fault.
14559 	 * There we can start over again.
14560 	 */
14561 }
14562 
14563 /*
14564  * unused in sfmmu
14565  */
14566 void
14567 hat_dump(void)
14568 {
14569 }
14570 
14571 /*
14572  * Called when a thread is exiting and we have switched to the kernel address
14573  * space.  Perform the same VM initialization resume() uses when switching
14574  * processes.
14575  *
14576  * Note that sfmmu_load_mmustate() is currently a no-op for kernel threads, but
14577  * we call it anyway in case the semantics change in the future.
14578  */
14579 /*ARGSUSED*/
14580 void
14581 hat_thread_exit(kthread_t *thd)
14582 {
14583 	ASSERT(thd->t_procp->p_as == &kas);
14584 
14585 	sfmmu_setctx_sec(KCONTEXT);
14586 	sfmmu_load_mmustate(ksfmmup);
14587 }
14588