xref: /illumos-gate/usr/src/uts/sfmmu/vm/hat_sfmmu.c (revision 5d0bc3ededb82d77f7c33d8f58e517a837ba5140)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 /*
29  * VM - Hardware Address Translation management for Spitfire MMU.
30  *
31  * This file implements the machine specific hardware translation
32  * needed by the VM system.  The machine independent interface is
33  * described in <vm/hat.h> while the machine dependent interface
34  * and data structures are described in <vm/hat_sfmmu.h>.
35  *
36  * The hat layer manages the address translation hardware as a cache
37  * driven by calls from the higher levels in the VM system.
38  */
39 
40 #include <sys/types.h>
41 #include <sys/kstat.h>
42 #include <vm/hat.h>
43 #include <vm/hat_sfmmu.h>
44 #include <vm/page.h>
45 #include <sys/pte.h>
46 #include <sys/systm.h>
47 #include <sys/mman.h>
48 #include <sys/sysmacros.h>
49 #include <sys/machparam.h>
50 #include <sys/vtrace.h>
51 #include <sys/kmem.h>
52 #include <sys/mmu.h>
53 #include <sys/cmn_err.h>
54 #include <sys/cpu.h>
55 #include <sys/cpuvar.h>
56 #include <sys/debug.h>
57 #include <sys/lgrp.h>
58 #include <sys/archsystm.h>
59 #include <sys/machsystm.h>
60 #include <sys/vmsystm.h>
61 #include <vm/as.h>
62 #include <vm/seg.h>
63 #include <vm/seg_kp.h>
64 #include <vm/seg_kmem.h>
65 #include <vm/seg_kpm.h>
66 #include <vm/rm.h>
67 #include <sys/t_lock.h>
68 #include <sys/obpdefs.h>
69 #include <sys/vm_machparam.h>
70 #include <sys/var.h>
71 #include <sys/trap.h>
72 #include <sys/machtrap.h>
73 #include <sys/scb.h>
74 #include <sys/bitmap.h>
75 #include <sys/machlock.h>
76 #include <sys/membar.h>
77 #include <sys/atomic.h>
78 #include <sys/cpu_module.h>
79 #include <sys/prom_debug.h>
80 #include <sys/ksynch.h>
81 #include <sys/mem_config.h>
82 #include <sys/mem_cage.h>
83 #include <sys/dtrace.h>
84 #include <vm/vm_dep.h>
85 #include <vm/xhat_sfmmu.h>
86 #include <sys/fpu/fpusystm.h>
87 
88 #if defined(SF_ERRATA_57)
89 extern caddr_t errata57_limit;
90 #endif
91 
92 #define	HME8BLK_SZ_RND		((roundup(HME8BLK_SZ, sizeof (int64_t))) /  \
93 				(sizeof (int64_t)))
94 #define	HBLK_RESERVE		((struct hme_blk *)hblk_reserve)
95 
96 #define	HBLK_RESERVE_CNT	128
97 #define	HBLK_RESERVE_MIN	20
98 
99 static struct hme_blk		*freehblkp;
100 static kmutex_t			freehblkp_lock;
101 static int			freehblkcnt;
102 
103 static int64_t			hblk_reserve[HME8BLK_SZ_RND];
104 static kmutex_t			hblk_reserve_lock;
105 static kthread_t		*hblk_reserve_thread;
106 
107 static nucleus_hblk8_info_t	nucleus_hblk8;
108 static nucleus_hblk1_info_t	nucleus_hblk1;
109 
110 /*
111  * SFMMU specific hat functions
112  */
113 void	hat_pagecachectl(struct page *, int);
114 
115 /* flags for hat_pagecachectl */
116 #define	HAT_CACHE	0x1
117 #define	HAT_UNCACHE	0x2
118 #define	HAT_TMPNC	0x4
119 
120 /*
121  * Flag to allow the creation of non-cacheable translations
122  * to system memory. It is off by default. At the moment this
123  * flag is used by the ecache error injector. The error injector
124  * will turn it on when creating such a translation then shut it
125  * off when it's finished.
126  */
127 
128 int	sfmmu_allow_nc_trans = 0;
129 
130 /*
131  * Flag to disable large page support.
132  * 	value of 1 => disable all large pages.
133  *	bits 1, 2, and 3 are to disable 64K, 512K and 4M pages respectively.
134  *
135  * For example, use the value 0x4 to disable 512K pages.
136  *
137  */
138 #define	LARGE_PAGES_OFF		0x1
139 
140 /*
141  * WARNING: 512K pages MUST be disabled for ISM/DISM. If not
142  * a process would page fault indefinitely if it tried to
143  * access a 512K page.
144  */
145 int	disable_ism_large_pages = (1 << TTE512K);
146 int	disable_large_pages = 0;
147 int	disable_auto_large_pages = 0;
148 
149 /*
150  * Private sfmmu data structures for hat management
151  */
152 static struct kmem_cache *sfmmuid_cache;
153 static struct kmem_cache *mmuctxdom_cache;
154 
155 /*
156  * Private sfmmu data structures for tsb management
157  */
158 static struct kmem_cache *sfmmu_tsbinfo_cache;
159 static struct kmem_cache *sfmmu_tsb8k_cache;
160 static struct kmem_cache *sfmmu_tsb_cache[NLGRPS_MAX];
161 static vmem_t *kmem_tsb_arena;
162 
163 /*
164  * sfmmu static variables for hmeblk resource management.
165  */
166 static vmem_t *hat_memload1_arena; /* HAT translation arena for sfmmu1_cache */
167 static struct kmem_cache *sfmmu8_cache;
168 static struct kmem_cache *sfmmu1_cache;
169 static struct kmem_cache *pa_hment_cache;
170 
171 static kmutex_t 	ism_mlist_lock;	/* mutex for ism mapping list */
172 /*
173  * private data for ism
174  */
175 static struct kmem_cache *ism_blk_cache;
176 static struct kmem_cache *ism_ment_cache;
177 #define	ISMID_STARTADDR	NULL
178 
179 /*
180  * Whether to delay TLB flushes and use Cheetah's flush-all support
181  * when removing contexts from the dirty list.
182  */
183 int delay_tlb_flush;
184 int disable_delay_tlb_flush;
185 
186 /*
187  * ``hat_lock'' is a hashed mutex lock for protecting sfmmu TSB lists,
188  * HAT flags, synchronizing TLB/TSB coherency, and context management.
189  * The lock is hashed on the sfmmup since the case where we need to lock
190  * all processes is rare but does occur (e.g. we need to unload a shared
191  * mapping from all processes using the mapping).  We have a lot of buckets,
192  * and each slab of sfmmu_t's can use about a quarter of them, giving us
193  * a fairly good distribution without wasting too much space and overhead
194  * when we have to grab them all.
195  */
196 #define	SFMMU_NUM_LOCK	128		/* must be power of two */
197 hatlock_t	hat_lock[SFMMU_NUM_LOCK];
198 
199 /*
200  * Hash algorithm optimized for a small number of slabs.
201  *  7 is (highbit((sizeof sfmmu_t)) - 1)
202  * This hash algorithm is based upon the knowledge that sfmmu_t's come from a
203  * kmem_cache, and thus they will be sequential within that cache.  In
204  * addition, each new slab will have a different "color" up to cache_maxcolor
205  * which will skew the hashing for each successive slab which is allocated.
206  * If the size of sfmmu_t changed to a larger size, this algorithm may need
207  * to be revisited.
208  */
209 #define	TSB_HASH_SHIFT_BITS (7)
210 #define	PTR_HASH(x) ((uintptr_t)x >> TSB_HASH_SHIFT_BITS)
211 
212 #ifdef DEBUG
213 int tsb_hash_debug = 0;
214 #define	TSB_HASH(sfmmup)	\
215 	(tsb_hash_debug ? &hat_lock[0] : \
216 	&hat_lock[PTR_HASH(sfmmup) & (SFMMU_NUM_LOCK-1)])
217 #else	/* DEBUG */
218 #define	TSB_HASH(sfmmup)	&hat_lock[PTR_HASH(sfmmup) & (SFMMU_NUM_LOCK-1)]
219 #endif	/* DEBUG */
220 
221 
222 /* sfmmu_replace_tsb() return codes. */
223 typedef enum tsb_replace_rc {
224 	TSB_SUCCESS,
225 	TSB_ALLOCFAIL,
226 	TSB_LOSTRACE,
227 	TSB_ALREADY_SWAPPED,
228 	TSB_CANTGROW
229 } tsb_replace_rc_t;
230 
231 /*
232  * Flags for TSB allocation routines.
233  */
234 #define	TSB_ALLOC	0x01
235 #define	TSB_FORCEALLOC	0x02
236 #define	TSB_GROW	0x04
237 #define	TSB_SHRINK	0x08
238 #define	TSB_SWAPIN	0x10
239 
240 /*
241  * Support for HAT callbacks.
242  */
243 #define	SFMMU_MAX_RELOC_CALLBACKS	10
244 int sfmmu_max_cb_id = SFMMU_MAX_RELOC_CALLBACKS;
245 static id_t sfmmu_cb_nextid = 0;
246 static id_t sfmmu_tsb_cb_id;
247 struct sfmmu_callback *sfmmu_cb_table;
248 
249 /*
250  * Kernel page relocation is enabled by default for non-caged
251  * kernel pages.  This has little effect unless segkmem_reloc is
252  * set, since by default kernel memory comes from inside the
253  * kernel cage.
254  */
255 int hat_kpr_enabled = 1;
256 
257 kmutex_t	kpr_mutex;
258 kmutex_t	kpr_suspendlock;
259 kthread_t	*kreloc_thread;
260 
261 /*
262  * Enable VA->PA translation sanity checking on DEBUG kernels.
263  * Disabled by default.  This is incompatible with some
264  * drivers (error injector, RSM) so if it breaks you get
265  * to keep both pieces.
266  */
267 int hat_check_vtop = 0;
268 
269 /*
270  * Private sfmmu routines (prototypes)
271  */
272 static struct hme_blk *sfmmu_shadow_hcreate(sfmmu_t *, caddr_t, int, uint_t);
273 static struct 	hme_blk *sfmmu_hblk_alloc(sfmmu_t *, caddr_t,
274 			struct hmehash_bucket *, uint_t, hmeblk_tag, uint_t);
275 static caddr_t	sfmmu_hblk_unload(struct hat *, struct hme_blk *, caddr_t,
276 			caddr_t, demap_range_t *, uint_t);
277 static caddr_t	sfmmu_hblk_sync(struct hat *, struct hme_blk *, caddr_t,
278 			caddr_t, int);
279 static void	sfmmu_hblk_free(struct hmehash_bucket *, struct hme_blk *,
280 			uint64_t, struct hme_blk **);
281 static void	sfmmu_hblks_list_purge(struct hme_blk **);
282 static uint_t	sfmmu_get_free_hblk(struct hme_blk **, uint_t);
283 static uint_t	sfmmu_put_free_hblk(struct hme_blk *, uint_t);
284 static struct hme_blk *sfmmu_hblk_steal(int);
285 static int	sfmmu_steal_this_hblk(struct hmehash_bucket *,
286 			struct hme_blk *, uint64_t, uint64_t,
287 			struct hme_blk *);
288 static caddr_t	sfmmu_hblk_unlock(struct hme_blk *, caddr_t, caddr_t);
289 
290 static void	sfmmu_memload_batchsmall(struct hat *, caddr_t, page_t **,
291 		    uint_t, uint_t, pgcnt_t);
292 void		sfmmu_tteload(struct hat *, tte_t *, caddr_t, page_t *,
293 			uint_t);
294 static int	sfmmu_tteload_array(sfmmu_t *, tte_t *, caddr_t, page_t **,
295 			uint_t);
296 static struct hmehash_bucket *sfmmu_tteload_acquire_hashbucket(sfmmu_t *,
297 					caddr_t, int);
298 static struct hme_blk *sfmmu_tteload_find_hmeblk(sfmmu_t *,
299 			struct hmehash_bucket *, caddr_t, uint_t, uint_t);
300 static int	sfmmu_tteload_addentry(sfmmu_t *, struct hme_blk *, tte_t *,
301 			caddr_t, page_t **, uint_t);
302 static void	sfmmu_tteload_release_hashbucket(struct hmehash_bucket *);
303 
304 static int	sfmmu_pagearray_setup(caddr_t, page_t **, tte_t *, int);
305 pfn_t		sfmmu_uvatopfn(caddr_t, sfmmu_t *);
306 void		sfmmu_memtte(tte_t *, pfn_t, uint_t, int);
307 static void	sfmmu_vac_conflict(struct hat *, caddr_t, page_t *);
308 static int	sfmmu_vacconflict_array(caddr_t, page_t *, int *);
309 static int	tst_tnc(page_t *pp, pgcnt_t);
310 static void	conv_tnc(page_t *pp, int);
311 
312 static void	sfmmu_get_ctx(sfmmu_t *);
313 static void	sfmmu_free_sfmmu(sfmmu_t *);
314 
315 static void	sfmmu_gettte(struct hat *, caddr_t, tte_t *);
316 static void	sfmmu_ttesync(struct hat *, caddr_t, tte_t *, page_t *);
317 static void	sfmmu_chgattr(struct hat *, caddr_t, size_t, uint_t, int);
318 
319 static cpuset_t	sfmmu_pageunload(page_t *, struct sf_hment *, int);
320 static void	hat_pagereload(struct page *, struct page *);
321 static cpuset_t	sfmmu_pagesync(page_t *, struct sf_hment *, uint_t);
322 static void	sfmmu_page_cache_array(page_t *, int, int, pgcnt_t);
323 static void	sfmmu_page_cache(page_t *, int, int, int);
324 
325 static void	sfmmu_tlbcache_demap(caddr_t, sfmmu_t *, struct hme_blk *,
326 			pfn_t, int, int, int, int);
327 static void	sfmmu_ismtlbcache_demap(caddr_t, sfmmu_t *, struct hme_blk *,
328 			pfn_t, int);
329 static void	sfmmu_tlb_demap(caddr_t, sfmmu_t *, struct hme_blk *, int, int);
330 static void	sfmmu_tlb_range_demap(demap_range_t *);
331 static void	sfmmu_invalidate_ctx(sfmmu_t *);
332 static void	sfmmu_sync_mmustate(sfmmu_t *);
333 
334 static void 	sfmmu_tsbinfo_setup_phys(struct tsb_info *, pfn_t);
335 static int	sfmmu_tsbinfo_alloc(struct tsb_info **, int, int, uint_t,
336 			sfmmu_t *);
337 static void	sfmmu_tsb_free(struct tsb_info *);
338 static void	sfmmu_tsbinfo_free(struct tsb_info *);
339 static int	sfmmu_init_tsbinfo(struct tsb_info *, int, int, uint_t,
340 			sfmmu_t *);
341 
342 static void	sfmmu_tsb_swapin(sfmmu_t *, hatlock_t *);
343 static int	sfmmu_select_tsb_szc(pgcnt_t);
344 static void	sfmmu_mod_tsb(sfmmu_t *, caddr_t, tte_t *, int);
345 #define		sfmmu_load_tsb(sfmmup, vaddr, tte, szc) \
346 	sfmmu_mod_tsb(sfmmup, vaddr, tte, szc)
347 #define		sfmmu_unload_tsb(sfmmup, vaddr, szc)    \
348 	sfmmu_mod_tsb(sfmmup, vaddr, NULL, szc)
349 static void	sfmmu_copy_tsb(struct tsb_info *, struct tsb_info *);
350 static tsb_replace_rc_t sfmmu_replace_tsb(sfmmu_t *, struct tsb_info *, uint_t,
351     hatlock_t *, uint_t);
352 static void	sfmmu_size_tsb(sfmmu_t *, int, uint64_t, uint64_t, int);
353 
354 static void	sfmmu_cache_flush(pfn_t, int);
355 void		sfmmu_cache_flushcolor(int, pfn_t);
356 static caddr_t	sfmmu_hblk_chgattr(sfmmu_t *, struct hme_blk *, caddr_t,
357 			caddr_t, demap_range_t *, uint_t, int);
358 
359 static uint64_t	sfmmu_vtop_attr(uint_t, int mode, tte_t *);
360 static uint_t	sfmmu_ptov_attr(tte_t *);
361 static caddr_t	sfmmu_hblk_chgprot(sfmmu_t *, struct hme_blk *, caddr_t,
362 			caddr_t, demap_range_t *, uint_t);
363 static uint_t	sfmmu_vtop_prot(uint_t, uint_t *);
364 static int	sfmmu_idcache_constructor(void *, void *, int);
365 static void	sfmmu_idcache_destructor(void *, void *);
366 static int	sfmmu_hblkcache_constructor(void *, void *, int);
367 static void	sfmmu_hblkcache_destructor(void *, void *);
368 static void	sfmmu_hblkcache_reclaim(void *);
369 static void	sfmmu_shadow_hcleanup(sfmmu_t *, struct hme_blk *,
370 			struct hmehash_bucket *);
371 static void	sfmmu_free_hblks(sfmmu_t *, caddr_t, caddr_t, int);
372 static void	sfmmu_rm_large_mappings(page_t *, int);
373 
374 static void	hat_lock_init(void);
375 static void	hat_kstat_init(void);
376 static int	sfmmu_kstat_percpu_update(kstat_t *ksp, int rw);
377 static void	sfmmu_check_page_sizes(sfmmu_t *, int);
378 static int	fnd_mapping_sz(page_t *);
379 static void	iment_add(struct ism_ment *,  struct hat *);
380 static void	iment_sub(struct ism_ment *, struct hat *);
381 static pgcnt_t	ism_tsb_entries(sfmmu_t *, int szc);
382 extern void	sfmmu_setup_tsbinfo(sfmmu_t *);
383 extern void	sfmmu_clear_utsbinfo(void);
384 
385 /* kpm prototypes */
386 static caddr_t	sfmmu_kpm_mapin(page_t *);
387 static void	sfmmu_kpm_mapout(page_t *, caddr_t);
388 static int	sfmmu_kpme_lookup(struct kpme *, page_t *);
389 static void	sfmmu_kpme_add(struct kpme *, page_t *);
390 static void	sfmmu_kpme_sub(struct kpme *, page_t *);
391 static caddr_t	sfmmu_kpm_getvaddr(page_t *, int *);
392 static int	sfmmu_kpm_fault(caddr_t, struct memseg *, page_t *);
393 static int	sfmmu_kpm_fault_small(caddr_t, struct memseg *, page_t *);
394 static void	sfmmu_kpm_vac_conflict(page_t *, caddr_t);
395 static void	sfmmu_kpm_pageunload(page_t *);
396 static void	sfmmu_kpm_vac_unload(page_t *, caddr_t);
397 static void	sfmmu_kpm_demap_large(caddr_t);
398 static void	sfmmu_kpm_demap_small(caddr_t);
399 static void	sfmmu_kpm_demap_tlbs(caddr_t);
400 static void	sfmmu_kpm_hme_unload(page_t *);
401 static kpm_hlk_t *sfmmu_kpm_kpmp_enter(page_t *, pgcnt_t);
402 static void	sfmmu_kpm_kpmp_exit(kpm_hlk_t *kpmp);
403 static void	sfmmu_kpm_page_cache(page_t *, int, int);
404 
405 static void	sfmmu_ctx_wrap_around(mmu_ctx_t *);
406 
407 /* kpm globals */
408 #ifdef	DEBUG
409 /*
410  * Enable trap level tsbmiss handling
411  */
412 int	kpm_tsbmtl = 1;
413 
414 /*
415  * Flush the TLB on kpm mapout. Note: Xcalls are used (again) for the
416  * required TLB shootdowns in this case, so handle w/ care. Off by default.
417  */
418 int	kpm_tlb_flush;
419 #endif	/* DEBUG */
420 
421 static void	*sfmmu_vmem_xalloc_aligned_wrapper(vmem_t *, size_t, int);
422 
423 #ifdef DEBUG
424 static void	sfmmu_check_hblk_flist();
425 #endif
426 
427 /*
428  * Semi-private sfmmu data structures.  Some of them are initialize in
429  * startup or in hat_init. Some of them are private but accessed by
430  * assembly code or mach_sfmmu.c
431  */
432 struct hmehash_bucket *uhme_hash;	/* user hmeblk hash table */
433 struct hmehash_bucket *khme_hash;	/* kernel hmeblk hash table */
434 uint64_t	uhme_hash_pa;		/* PA of uhme_hash */
435 uint64_t	khme_hash_pa;		/* PA of khme_hash */
436 int 		uhmehash_num;		/* # of buckets in user hash table */
437 int 		khmehash_num;		/* # of buckets in kernel hash table */
438 
439 uint_t		max_mmu_ctxdoms = 0;	/* max context domains in the system */
440 mmu_ctx_t	**mmu_ctxs_tbl;		/* global array of context domains */
441 uint64_t	mmu_saved_gnum = 0;	/* to init incoming MMUs' gnums */
442 
443 #define	DEFAULT_NUM_CTXS_PER_MMU 8192
444 static uint_t	nctxs = DEFAULT_NUM_CTXS_PER_MMU;
445 
446 int		cache;			/* describes system cache */
447 
448 caddr_t		ktsb_base;		/* kernel 8k-indexed tsb base address */
449 uint64_t	ktsb_pbase;		/* kernel 8k-indexed tsb phys address */
450 int		ktsb_szcode;		/* kernel 8k-indexed tsb size code */
451 int		ktsb_sz;		/* kernel 8k-indexed tsb size */
452 
453 caddr_t		ktsb4m_base;		/* kernel 4m-indexed tsb base address */
454 uint64_t	ktsb4m_pbase;		/* kernel 4m-indexed tsb phys address */
455 int		ktsb4m_szcode;		/* kernel 4m-indexed tsb size code */
456 int		ktsb4m_sz;		/* kernel 4m-indexed tsb size */
457 
458 uint64_t	kpm_tsbbase;		/* kernel seg_kpm 4M TSB base address */
459 int		kpm_tsbsz;		/* kernel seg_kpm 4M TSB size code */
460 uint64_t	kpmsm_tsbbase;		/* kernel seg_kpm 8K TSB base address */
461 int		kpmsm_tsbsz;		/* kernel seg_kpm 8K TSB size code */
462 
463 #ifndef sun4v
464 int		utsb_dtlb_ttenum = -1;	/* index in TLB for utsb locked TTE */
465 int		utsb4m_dtlb_ttenum = -1; /* index in TLB for 4M TSB TTE */
466 int		dtlb_resv_ttenum;	/* index in TLB of first reserved TTE */
467 caddr_t		utsb_vabase;		/* reserved kernel virtual memory */
468 caddr_t		utsb4m_vabase;		/* for trap handler TSB accesses */
469 #endif /* sun4v */
470 uint64_t	tsb_alloc_bytes = 0;	/* bytes allocated to TSBs */
471 vmem_t		*kmem_tsb_default_arena[NLGRPS_MAX];	/* For dynamic TSBs */
472 
473 /*
474  * Size to use for TSB slabs.  Future platforms that support page sizes
475  * larger than 4M may wish to change these values, and provide their own
476  * assembly macros for building and decoding the TSB base register contents.
477  * Note disable_large_pages will override the value set here.
478  */
479 uint_t	tsb_slab_ttesz = TTE4M;
480 uint_t	tsb_slab_size;
481 uint_t	tsb_slab_shift;
482 uint_t	tsb_slab_mask;	/* PFN mask for TTE */
483 
484 /* largest TSB size to grow to, will be smaller on smaller memory systems */
485 int	tsb_max_growsize = UTSB_MAX_SZCODE;
486 
487 /*
488  * Tunable parameters dealing with TSB policies.
489  */
490 
491 /*
492  * This undocumented tunable forces all 8K TSBs to be allocated from
493  * the kernel heap rather than from the kmem_tsb_default_arena arenas.
494  */
495 #ifdef	DEBUG
496 int	tsb_forceheap = 0;
497 #endif	/* DEBUG */
498 
499 /*
500  * Decide whether to use per-lgroup arenas, or one global set of
501  * TSB arenas.  The default is not to break up per-lgroup, since
502  * most platforms don't recognize any tangible benefit from it.
503  */
504 int	tsb_lgrp_affinity = 0;
505 
506 /*
507  * Used for growing the TSB based on the process RSS.
508  * tsb_rss_factor is based on the smallest TSB, and is
509  * shifted by the TSB size to determine if we need to grow.
510  * The default will grow the TSB if the number of TTEs for
511  * this page size exceeds 75% of the number of TSB entries,
512  * which should _almost_ eliminate all conflict misses
513  * (at the expense of using up lots and lots of memory).
514  */
515 #define	TSB_RSS_FACTOR		(TSB_ENTRIES(TSB_MIN_SZCODE) * 0.75)
516 #define	SFMMU_RSS_TSBSIZE(tsbszc)	(tsb_rss_factor << tsbszc)
517 #define	SELECT_TSB_SIZECODE(pgcnt) ( \
518 	(enable_tsb_rss_sizing)? sfmmu_select_tsb_szc(pgcnt) : \
519 	default_tsb_size)
520 #define	TSB_OK_SHRINK()	\
521 	(tsb_alloc_bytes > tsb_alloc_hiwater || freemem < desfree)
522 #define	TSB_OK_GROW()	\
523 	(tsb_alloc_bytes < tsb_alloc_hiwater && freemem > desfree)
524 
525 int	enable_tsb_rss_sizing = 1;
526 int	tsb_rss_factor	= (int)TSB_RSS_FACTOR;
527 
528 /* which TSB size code to use for new address spaces or if rss sizing off */
529 int default_tsb_size = TSB_8K_SZCODE;
530 
531 static uint64_t tsb_alloc_hiwater; /* limit TSB reserved memory */
532 uint64_t tsb_alloc_hiwater_factor; /* tsb_alloc_hiwater = physmem / this */
533 #define	TSB_ALLOC_HIWATER_FACTOR_DEFAULT	32
534 
535 #ifdef DEBUG
536 static int tsb_random_size = 0;	/* set to 1 to test random tsb sizes on alloc */
537 static int tsb_grow_stress = 0;	/* if set to 1, keep replacing TSB w/ random */
538 static int tsb_alloc_mtbf = 0;	/* fail allocation every n attempts */
539 static int tsb_alloc_fail_mtbf = 0;
540 static int tsb_alloc_count = 0;
541 #endif /* DEBUG */
542 
543 /* if set to 1, will remap valid TTEs when growing TSB. */
544 int tsb_remap_ttes = 1;
545 
546 /*
547  * If we have more than this many mappings, allocate a second TSB.
548  * This default is chosen because the I/D fully associative TLBs are
549  * assumed to have at least 8 available entries. Platforms with a
550  * larger fully-associative TLB could probably override the default.
551  */
552 int tsb_sectsb_threshold = 8;
553 
554 /*
555  * kstat data
556  */
557 struct sfmmu_global_stat sfmmu_global_stat;
558 struct sfmmu_tsbsize_stat sfmmu_tsbsize_stat;
559 
560 /*
561  * Global data
562  */
563 sfmmu_t 	*ksfmmup;		/* kernel's hat id */
564 
565 #ifdef DEBUG
566 static void	chk_tte(tte_t *, tte_t *, tte_t *, struct hme_blk *);
567 #endif
568 
569 /* sfmmu locking operations */
570 static kmutex_t *sfmmu_mlspl_enter(struct page *, int);
571 static int	sfmmu_mlspl_held(struct page *, int);
572 
573 static kmutex_t *sfmmu_page_enter(page_t *);
574 static void	sfmmu_page_exit(kmutex_t *);
575 static int	sfmmu_page_spl_held(struct page *);
576 
577 /* sfmmu internal locking operations - accessed directly */
578 static void	sfmmu_mlist_reloc_enter(page_t *, page_t *,
579 				kmutex_t **, kmutex_t **);
580 static void	sfmmu_mlist_reloc_exit(kmutex_t *, kmutex_t *);
581 static hatlock_t *
582 		sfmmu_hat_enter(sfmmu_t *);
583 static hatlock_t *
584 		sfmmu_hat_tryenter(sfmmu_t *);
585 static void	sfmmu_hat_exit(hatlock_t *);
586 static void	sfmmu_hat_lock_all(void);
587 static void	sfmmu_hat_unlock_all(void);
588 static void	sfmmu_ismhat_enter(sfmmu_t *, int);
589 static void	sfmmu_ismhat_exit(sfmmu_t *, int);
590 
591 /*
592  * Array of mutexes protecting a page's mapping list and p_nrm field.
593  *
594  * The hash function looks complicated, but is made up so that:
595  *
596  * "pp" not shifted, so adjacent pp values will hash to different cache lines
597  *  (8 byte alignment * 8 bytes/mutes == 64 byte coherency subblock)
598  *
599  * "pp" >> mml_shift, incorporates more source bits into the hash result
600  *
601  *  "& (mml_table_size - 1), should be faster than using remainder "%"
602  *
603  * Hopefully, mml_table, mml_table_size and mml_shift are all in the same
604  * cacheline, since they get declared next to each other below. We'll trust
605  * ld not to do something random.
606  */
607 #ifdef	DEBUG
608 int mlist_hash_debug = 0;
609 #define	MLIST_HASH(pp)	(mlist_hash_debug ? &mml_table[0] : \
610 	&mml_table[((uintptr_t)(pp) + \
611 	((uintptr_t)(pp) >> mml_shift)) & (mml_table_sz - 1)])
612 #else	/* !DEBUG */
613 #define	MLIST_HASH(pp)   &mml_table[ \
614 	((uintptr_t)(pp) + ((uintptr_t)(pp) >> mml_shift)) & (mml_table_sz - 1)]
615 #endif	/* !DEBUG */
616 
617 kmutex_t		*mml_table;
618 uint_t			mml_table_sz;	/* must be a power of 2 */
619 uint_t			mml_shift;	/* log2(mml_table_sz) + 3 for align */
620 
621 /*
622  * kpm_page lock hash.
623  * All slots should be used equally and 2 adjacent kpm_page_t's
624  * shouldn't have their mutexes in the same cache line.
625  */
626 #ifdef	DEBUG
627 int kpmp_hash_debug = 0;
628 #define	KPMP_HASH(kpp)	(kpmp_hash_debug ? &kpmp_table[0] : &kpmp_table[ \
629 	((uintptr_t)(kpp) + ((uintptr_t)(kpp) >> kpmp_shift)) \
630 	& (kpmp_table_sz - 1)])
631 #else	/* !DEBUG */
632 #define	KPMP_HASH(kpp)	&kpmp_table[ \
633 	((uintptr_t)(kpp) + ((uintptr_t)(kpp) >> kpmp_shift)) \
634 	& (kpmp_table_sz - 1)]
635 #endif	/* DEBUG */
636 
637 kpm_hlk_t	*kpmp_table;
638 uint_t		kpmp_table_sz;	/* must be a power of 2 */
639 uchar_t		kpmp_shift;
640 
641 #ifdef	DEBUG
642 #define	KPMP_SHASH(kpp)	(kpmp_hash_debug ? &kpmp_stable[0] : &kpmp_stable[ \
643 	(((uintptr_t)(kpp) << kpmp_shift) + (uintptr_t)(kpp)) \
644 	& (kpmp_stable_sz - 1)])
645 #else	/* !DEBUG */
646 #define	KPMP_SHASH(kpp)	&kpmp_stable[ \
647 	(((uintptr_t)(kpp) << kpmp_shift) + (uintptr_t)(kpp)) \
648 	& (kpmp_stable_sz - 1)]
649 #endif	/* DEBUG */
650 
651 kpm_shlk_t	*kpmp_stable;
652 uint_t		kpmp_stable_sz;	/* must be a power of 2 */
653 
654 /*
655  * SPL_HASH was improved to avoid false cache line sharing
656  */
657 #define	SPL_TABLE_SIZE	128
658 #define	SPL_MASK	(SPL_TABLE_SIZE - 1)
659 #define	SPL_SHIFT	7		/* log2(SPL_TABLE_SIZE) */
660 
661 #define	SPL_INDEX(pp) \
662 	((((uintptr_t)(pp) >> SPL_SHIFT) ^ \
663 	((uintptr_t)(pp) >> (SPL_SHIFT << 1))) & \
664 	(SPL_TABLE_SIZE - 1))
665 
666 #define	SPL_HASH(pp)    \
667 	(&sfmmu_page_lock[SPL_INDEX(pp) & SPL_MASK].pad_mutex)
668 
669 static	pad_mutex_t	sfmmu_page_lock[SPL_TABLE_SIZE];
670 
671 
672 /*
673  * hat_unload_callback() will group together callbacks in order
674  * to avoid xt_sync() calls.  This is the maximum size of the group.
675  */
676 #define	MAX_CB_ADDR	32
677 
678 tte_t	hw_tte;
679 static ulong_t sfmmu_dmr_maxbit = DMR_MAXBIT;
680 
681 static char	*mmu_ctx_kstat_names[] = {
682 	"mmu_ctx_tsb_exceptions",
683 	"mmu_ctx_tsb_raise_exception",
684 	"mmu_ctx_wrap_around",
685 };
686 
687 /*
688  * kpm virtual address to physical address
689  */
690 #define	SFMMU_KPM_VTOP(vaddr, paddr) {					\
691 	uintptr_t r, v;							\
692 									\
693 	r = ((vaddr) - kpm_vbase) >> (uintptr_t)kpm_size_shift;		\
694 	(paddr) = (vaddr) - kpm_vbase;					\
695 	if (r != 0) {							\
696 		v = ((uintptr_t)(vaddr) >> MMU_PAGESHIFT) &		\
697 		    vac_colors_mask;					\
698 		(paddr) -= r << kpm_size_shift;				\
699 		if (r > v)						\
700 			(paddr) += (r - v) << MMU_PAGESHIFT;		\
701 		else							\
702 			(paddr) -= r << MMU_PAGESHIFT;			\
703 	}								\
704 }
705 
706 /*
707  * Wrapper for vmem_xalloc since vmem_create only allows limited
708  * parameters for vm_source_alloc functions.  This function allows us
709  * to specify alignment consistent with the size of the object being
710  * allocated.
711  */
712 static void *
713 sfmmu_vmem_xalloc_aligned_wrapper(vmem_t *vmp, size_t size, int vmflag)
714 {
715 	return (vmem_xalloc(vmp, size, size, 0, 0, NULL, NULL, vmflag));
716 }
717 
718 /* Common code for setting tsb_alloc_hiwater. */
719 #define	SFMMU_SET_TSB_ALLOC_HIWATER(pages)	tsb_alloc_hiwater = \
720 		ptob(pages) / tsb_alloc_hiwater_factor
721 
722 /*
723  * Set tsb_max_growsize to allow at most all of physical memory to be mapped by
724  * a single TSB.  physmem is the number of physical pages so we need physmem 8K
725  * TTEs to represent all those physical pages.  We round this up by using
726  * 1<<highbit().  To figure out which size code to use, remember that the size
727  * code is just an amount to shift the smallest TSB size to get the size of
728  * this TSB.  So we subtract that size, TSB_START_SIZE, from highbit() (or
729  * highbit() - 1) to get the size code for the smallest TSB that can represent
730  * all of physical memory, while erring on the side of too much.
731  *
732  * If the computed size code is less than the current tsb_max_growsize, we set
733  * tsb_max_growsize to the computed size code.  In the case where the computed
734  * size code is greater than tsb_max_growsize, we have these restrictions that
735  * apply to increasing tsb_max_growsize:
736  *	1) TSBs can't grow larger than the TSB slab size
737  *	2) TSBs can't grow larger than UTSB_MAX_SZCODE.
738  */
739 #define	SFMMU_SET_TSB_MAX_GROWSIZE(pages) {				\
740 	int	i, szc;							\
741 									\
742 	i = highbit(pages);						\
743 	if ((1 << (i - 1)) == (pages))					\
744 		i--;		/* 2^n case, round down */		\
745 	szc = i - TSB_START_SIZE;					\
746 	if (szc < tsb_max_growsize)					\
747 		tsb_max_growsize = szc;					\
748 	else if ((szc > tsb_max_growsize) &&				\
749 	    (szc <= tsb_slab_shift - (TSB_START_SIZE + TSB_ENTRY_SHIFT))) \
750 		tsb_max_growsize = MIN(szc, UTSB_MAX_SZCODE);		\
751 }
752 
753 /*
754  * Given a pointer to an sfmmu and a TTE size code, return a pointer to the
755  * tsb_info which handles that TTE size.
756  */
757 #define	SFMMU_GET_TSBINFO(tsbinfop, sfmmup, tte_szc)			\
758 	(tsbinfop) = (sfmmup)->sfmmu_tsb;				\
759 	ASSERT(sfmmu_hat_lock_held(sfmmup));				\
760 	if ((tte_szc) >= TTE4M)						\
761 		(tsbinfop) = (tsbinfop)->tsb_next;
762 
763 /*
764  * Return the number of mappings present in the HAT
765  * for a particular process and page size.
766  */
767 #define	SFMMU_TTE_CNT(sfmmup, szc)					\
768 	(sfmmup)->sfmmu_iblk?						\
769 	    (sfmmup)->sfmmu_ismttecnt[(szc)] +				\
770 	    (sfmmup)->sfmmu_ttecnt[(szc)] :				\
771 	    (sfmmup)->sfmmu_ttecnt[(szc)];
772 
773 /*
774  * Macro to use to unload entries from the TSB.
775  * It has knowledge of which page sizes get replicated in the TSB
776  * and will call the appropriate unload routine for the appropriate size.
777  */
778 #define	SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp)				\
779 {									\
780 	int ttesz = get_hblk_ttesz(hmeblkp);				\
781 	if (ttesz == TTE8K || ttesz == TTE4M) {				\
782 		sfmmu_unload_tsb(sfmmup, addr, ttesz);			\
783 	} else {							\
784 		caddr_t sva = (caddr_t)get_hblk_base(hmeblkp);		\
785 		caddr_t eva = sva + get_hblk_span(hmeblkp);		\
786 		ASSERT(addr >= sva && addr < eva);			\
787 		sfmmu_unload_tsb_range(sfmmup, sva, eva, ttesz);	\
788 	}								\
789 }
790 
791 
792 /* Update tsb_alloc_hiwater after memory is configured. */
793 /*ARGSUSED*/
794 static void
795 sfmmu_update_tsb_post_add(void *arg, pgcnt_t delta_pages)
796 {
797 	/* Assumes physmem has already been updated. */
798 	SFMMU_SET_TSB_ALLOC_HIWATER(physmem);
799 	SFMMU_SET_TSB_MAX_GROWSIZE(physmem);
800 }
801 
802 /*
803  * Update tsb_alloc_hiwater before memory is deleted.  We'll do nothing here
804  * and update tsb_alloc_hiwater and tsb_max_growsize after the memory is
805  * deleted.
806  */
807 /*ARGSUSED*/
808 static int
809 sfmmu_update_tsb_pre_del(void *arg, pgcnt_t delta_pages)
810 {
811 	return (0);
812 }
813 
814 /* Update tsb_alloc_hiwater after memory fails to be unconfigured. */
815 /*ARGSUSED*/
816 static void
817 sfmmu_update_tsb_post_del(void *arg, pgcnt_t delta_pages, int cancelled)
818 {
819 	/*
820 	 * Whether the delete was cancelled or not, just go ahead and update
821 	 * tsb_alloc_hiwater and tsb_max_growsize.
822 	 */
823 	SFMMU_SET_TSB_ALLOC_HIWATER(physmem);
824 	SFMMU_SET_TSB_MAX_GROWSIZE(physmem);
825 }
826 
827 static kphysm_setup_vector_t sfmmu_update_tsb_vec = {
828 	KPHYSM_SETUP_VECTOR_VERSION,	/* version */
829 	sfmmu_update_tsb_post_add,	/* post_add */
830 	sfmmu_update_tsb_pre_del,	/* pre_del */
831 	sfmmu_update_tsb_post_del	/* post_del */
832 };
833 
834 
835 /*
836  * HME_BLK HASH PRIMITIVES
837  */
838 
839 /*
840  * Enter a hme on the mapping list for page pp.
841  * When large pages are more prevalent in the system we might want to
842  * keep the mapping list in ascending order by the hment size. For now,
843  * small pages are more frequent, so don't slow it down.
844  */
845 #define	HME_ADD(hme, pp)					\
846 {								\
847 	ASSERT(sfmmu_mlist_held(pp));				\
848 								\
849 	hme->hme_prev = NULL;					\
850 	hme->hme_next = pp->p_mapping;				\
851 	hme->hme_page = pp;					\
852 	if (pp->p_mapping) {					\
853 		((struct sf_hment *)(pp->p_mapping))->hme_prev = hme;\
854 		ASSERT(pp->p_share > 0);			\
855 	} else  {						\
856 		/* EMPTY */					\
857 		ASSERT(pp->p_share == 0);			\
858 	}							\
859 	pp->p_mapping = hme;					\
860 	pp->p_share++;						\
861 }
862 
863 /*
864  * Enter a hme on the mapping list for page pp.
865  * If we are unmapping a large translation, we need to make sure that the
866  * change is reflect in the corresponding bit of the p_index field.
867  */
868 #define	HME_SUB(hme, pp)					\
869 {								\
870 	ASSERT(sfmmu_mlist_held(pp));				\
871 	ASSERT(hme->hme_page == pp || IS_PAHME(hme));		\
872 								\
873 	if (pp->p_mapping == NULL) {				\
874 		panic("hme_remove - no mappings");		\
875 	}							\
876 								\
877 	membar_stst();	/* ensure previous stores finish */	\
878 								\
879 	ASSERT(pp->p_share > 0);				\
880 	pp->p_share--;						\
881 								\
882 	if (hme->hme_prev) {					\
883 		ASSERT(pp->p_mapping != hme);			\
884 		ASSERT(hme->hme_prev->hme_page == pp ||		\
885 			IS_PAHME(hme->hme_prev));		\
886 		hme->hme_prev->hme_next = hme->hme_next;	\
887 	} else {						\
888 		ASSERT(pp->p_mapping == hme);			\
889 		pp->p_mapping = hme->hme_next;			\
890 		ASSERT((pp->p_mapping == NULL) ?		\
891 			(pp->p_share == 0) : 1);		\
892 	}							\
893 								\
894 	if (hme->hme_next) {					\
895 		ASSERT(hme->hme_next->hme_page == pp ||		\
896 			IS_PAHME(hme->hme_next));		\
897 		hme->hme_next->hme_prev = hme->hme_prev;	\
898 	}							\
899 								\
900 	/* zero out the entry */				\
901 	hme->hme_next = NULL;					\
902 	hme->hme_prev = NULL;					\
903 	hme->hme_page = NULL;					\
904 								\
905 	if (hme_size(hme) > TTE8K) {				\
906 		/* remove mappings for remainder of large pg */	\
907 		sfmmu_rm_large_mappings(pp, hme_size(hme));	\
908 	}							\
909 }
910 
911 /*
912  * This function returns the hment given the hme_blk and a vaddr.
913  * It assumes addr has already been checked to belong to hme_blk's
914  * range.
915  */
916 #define	HBLKTOHME(hment, hmeblkp, addr)					\
917 {									\
918 	int index;							\
919 	HBLKTOHME_IDX(hment, hmeblkp, addr, index)			\
920 }
921 
922 /*
923  * Version of HBLKTOHME that also returns the index in hmeblkp
924  * of the hment.
925  */
926 #define	HBLKTOHME_IDX(hment, hmeblkp, addr, idx)			\
927 {									\
928 	ASSERT(in_hblk_range((hmeblkp), (addr)));			\
929 									\
930 	if (get_hblk_ttesz(hmeblkp) == TTE8K) {				\
931 		idx = (((uintptr_t)(addr) >> MMU_PAGESHIFT) & (NHMENTS-1)); \
932 	} else								\
933 		idx = 0;						\
934 									\
935 	(hment) = &(hmeblkp)->hblk_hme[idx];				\
936 }
937 
938 /*
939  * Disable any page sizes not supported by the CPU
940  */
941 void
942 hat_init_pagesizes()
943 {
944 	int 		i;
945 
946 	mmu_exported_page_sizes = 0;
947 	for (i = TTE8K; i < max_mmu_page_sizes; i++) {
948 		extern int	disable_text_largepages;
949 		extern int	disable_initdata_largepages;
950 
951 		szc_2_userszc[i] = (uint_t)-1;
952 		userszc_2_szc[i] = (uint_t)-1;
953 
954 		if ((mmu_exported_pagesize_mask & (1 << i)) == 0) {
955 			disable_large_pages |= (1 << i);
956 			disable_ism_large_pages |= (1 << i);
957 			disable_text_largepages |= (1 << i);
958 			disable_initdata_largepages |= (1 << i);
959 		} else {
960 			szc_2_userszc[i] = mmu_exported_page_sizes;
961 			userszc_2_szc[mmu_exported_page_sizes] = i;
962 			mmu_exported_page_sizes++;
963 		}
964 	}
965 
966 	disable_auto_large_pages = disable_large_pages;
967 
968 	/*
969 	 * Initialize mmu-specific large page sizes.
970 	 */
971 	if ((mmu_page_sizes == max_mmu_page_sizes) &&
972 	    (&mmu_large_pages_disabled)) {
973 		disable_large_pages |= mmu_large_pages_disabled(HAT_LOAD);
974 		disable_ism_large_pages |=
975 		    mmu_large_pages_disabled(HAT_LOAD_SHARE);
976 		disable_auto_large_pages |=
977 		    mmu_large_pages_disabled(HAT_LOAD_AUTOLPG);
978 	}
979 
980 }
981 
982 /*
983  * Initialize the hardware address translation structures.
984  */
985 void
986 hat_init(void)
987 {
988 	int 		i;
989 	uint_t		sz;
990 	uint_t		maxtsb;
991 	size_t		size;
992 
993 	hat_lock_init();
994 	hat_kstat_init();
995 
996 	/*
997 	 * Hardware-only bits in a TTE
998 	 */
999 	MAKE_TTE_MASK(&hw_tte);
1000 
1001 	hat_init_pagesizes();
1002 
1003 	/* Initialize the hash locks */
1004 	for (i = 0; i < khmehash_num; i++) {
1005 		mutex_init(&khme_hash[i].hmehash_mutex, NULL,
1006 		    MUTEX_DEFAULT, NULL);
1007 	}
1008 	for (i = 0; i < uhmehash_num; i++) {
1009 		mutex_init(&uhme_hash[i].hmehash_mutex, NULL,
1010 		    MUTEX_DEFAULT, NULL);
1011 	}
1012 	khmehash_num--;		/* make sure counter starts from 0 */
1013 	uhmehash_num--;		/* make sure counter starts from 0 */
1014 
1015 	/*
1016 	 * Allocate context domain structures.
1017 	 *
1018 	 * A platform may choose to modify max_mmu_ctxdoms in
1019 	 * set_platform_defaults(). If a platform does not define
1020 	 * a set_platform_defaults() or does not choose to modify
1021 	 * max_mmu_ctxdoms, it gets one MMU context domain for every CPU.
1022 	 *
1023 	 * For sun4v, there will be one global context domain, this is to
1024 	 * avoid the ldom cpu substitution problem.
1025 	 *
1026 	 * For all platforms that have CPUs sharing MMUs, this
1027 	 * value must be defined.
1028 	 */
1029 	if (max_mmu_ctxdoms == 0) {
1030 #ifndef sun4v
1031 		max_mmu_ctxdoms = max_ncpus;
1032 #else /* sun4v */
1033 		max_mmu_ctxdoms = 1;
1034 #endif /* sun4v */
1035 	}
1036 
1037 	size = max_mmu_ctxdoms * sizeof (mmu_ctx_t *);
1038 	mmu_ctxs_tbl = kmem_zalloc(size, KM_SLEEP);
1039 
1040 	/* mmu_ctx_t is 64 bytes aligned */
1041 	mmuctxdom_cache = kmem_cache_create("mmuctxdom_cache",
1042 	    sizeof (mmu_ctx_t), 64, NULL, NULL, NULL, NULL, NULL, 0);
1043 	/*
1044 	 * MMU context domain initialization for the Boot CPU.
1045 	 * This needs the context domains array allocated above.
1046 	 */
1047 	mutex_enter(&cpu_lock);
1048 	sfmmu_cpu_init(CPU);
1049 	mutex_exit(&cpu_lock);
1050 
1051 	/*
1052 	 * Intialize ism mapping list lock.
1053 	 */
1054 
1055 	mutex_init(&ism_mlist_lock, NULL, MUTEX_DEFAULT, NULL);
1056 
1057 	/*
1058 	 * Each sfmmu structure carries an array of MMU context info
1059 	 * structures, one per context domain. The size of this array depends
1060 	 * on the maximum number of context domains. So, the size of the
1061 	 * sfmmu structure varies per platform.
1062 	 *
1063 	 * sfmmu is allocated from static arena, because trap
1064 	 * handler at TL > 0 is not allowed to touch kernel relocatable
1065 	 * memory. sfmmu's alignment is changed to 64 bytes from
1066 	 * default 8 bytes, as the lower 6 bits will be used to pass
1067 	 * pgcnt to vtag_flush_pgcnt_tl1.
1068 	 */
1069 	size = sizeof (sfmmu_t) + sizeof (sfmmu_ctx_t) * (max_mmu_ctxdoms - 1);
1070 
1071 	sfmmuid_cache = kmem_cache_create("sfmmuid_cache", size,
1072 	    64, sfmmu_idcache_constructor, sfmmu_idcache_destructor,
1073 	    NULL, NULL, static_arena, 0);
1074 
1075 	sfmmu_tsbinfo_cache = kmem_cache_create("sfmmu_tsbinfo_cache",
1076 	    sizeof (struct tsb_info), 0, NULL, NULL, NULL, NULL, NULL, 0);
1077 
1078 	/*
1079 	 * Since we only use the tsb8k cache to "borrow" pages for TSBs
1080 	 * from the heap when low on memory or when TSB_FORCEALLOC is
1081 	 * specified, don't use magazines to cache them--we want to return
1082 	 * them to the system as quickly as possible.
1083 	 */
1084 	sfmmu_tsb8k_cache = kmem_cache_create("sfmmu_tsb8k_cache",
1085 	    MMU_PAGESIZE, MMU_PAGESIZE, NULL, NULL, NULL, NULL,
1086 	    static_arena, KMC_NOMAGAZINE);
1087 
1088 	/*
1089 	 * Set tsb_alloc_hiwater to 1/tsb_alloc_hiwater_factor of physical
1090 	 * memory, which corresponds to the old static reserve for TSBs.
1091 	 * tsb_alloc_hiwater_factor defaults to 32.  This caps the amount of
1092 	 * memory we'll allocate for TSB slabs; beyond this point TSB
1093 	 * allocations will be taken from the kernel heap (via
1094 	 * sfmmu_tsb8k_cache) and will be throttled as would any other kmem
1095 	 * consumer.
1096 	 */
1097 	if (tsb_alloc_hiwater_factor == 0) {
1098 		tsb_alloc_hiwater_factor = TSB_ALLOC_HIWATER_FACTOR_DEFAULT;
1099 	}
1100 	SFMMU_SET_TSB_ALLOC_HIWATER(physmem);
1101 
1102 	/* Set tsb_max_growsize. */
1103 	SFMMU_SET_TSB_MAX_GROWSIZE(physmem);
1104 
1105 	/*
1106 	 * On smaller memory systems, allocate TSB memory in smaller chunks
1107 	 * than the default 4M slab size. We also honor disable_large_pages
1108 	 * here.
1109 	 *
1110 	 * The trap handlers need to be patched with the final slab shift,
1111 	 * since they need to be able to construct the TSB pointer at runtime.
1112 	 */
1113 	if (tsb_max_growsize <= TSB_512K_SZCODE)
1114 		tsb_slab_ttesz = TTE512K;
1115 
1116 	for (sz = tsb_slab_ttesz; sz > 0; sz--) {
1117 		if (!(disable_large_pages & (1 << sz)))
1118 			break;
1119 	}
1120 
1121 	tsb_slab_ttesz = sz;
1122 	tsb_slab_shift = MMU_PAGESHIFT + (sz << 1) + sz;
1123 	tsb_slab_size = 1 << tsb_slab_shift;
1124 	tsb_slab_mask = (1 << (tsb_slab_shift - MMU_PAGESHIFT)) - 1;
1125 
1126 	maxtsb = tsb_slab_shift - (TSB_START_SIZE + TSB_ENTRY_SHIFT);
1127 	if (tsb_max_growsize > maxtsb)
1128 		tsb_max_growsize = maxtsb;
1129 
1130 	/*
1131 	 * Set up memory callback to update tsb_alloc_hiwater and
1132 	 * tsb_max_growsize.
1133 	 */
1134 	i = kphysm_setup_func_register(&sfmmu_update_tsb_vec, (void *) 0);
1135 	ASSERT(i == 0);
1136 
1137 	/*
1138 	 * kmem_tsb_arena is the source from which large TSB slabs are
1139 	 * drawn.  The quantum of this arena corresponds to the largest
1140 	 * TSB size we can dynamically allocate for user processes.
1141 	 * Currently it must also be a supported page size since we
1142 	 * use exactly one translation entry to map each slab page.
1143 	 *
1144 	 * The per-lgroup kmem_tsb_default_arena arenas are the arenas from
1145 	 * which most TSBs are allocated.  Since most TSB allocations are
1146 	 * typically 8K we have a kmem cache we stack on top of each
1147 	 * kmem_tsb_default_arena to speed up those allocations.
1148 	 *
1149 	 * Note the two-level scheme of arenas is required only
1150 	 * because vmem_create doesn't allow us to specify alignment
1151 	 * requirements.  If this ever changes the code could be
1152 	 * simplified to use only one level of arenas.
1153 	 */
1154 	kmem_tsb_arena = vmem_create("kmem_tsb", NULL, 0, tsb_slab_size,
1155 	    sfmmu_vmem_xalloc_aligned_wrapper, vmem_xfree, heap_arena,
1156 	    0, VM_SLEEP);
1157 
1158 	if (tsb_lgrp_affinity) {
1159 		char s[50];
1160 		for (i = 0; i < NLGRPS_MAX; i++) {
1161 			(void) sprintf(s, "kmem_tsb_lgrp%d", i);
1162 			kmem_tsb_default_arena[i] =
1163 			    vmem_create(s, NULL, 0, PAGESIZE,
1164 			    sfmmu_tsb_segkmem_alloc, sfmmu_tsb_segkmem_free,
1165 			    kmem_tsb_arena, 0, VM_SLEEP | VM_BESTFIT);
1166 			(void) sprintf(s, "sfmmu_tsb_lgrp%d_cache", i);
1167 			sfmmu_tsb_cache[i] = kmem_cache_create(s, PAGESIZE,
1168 			    PAGESIZE, NULL, NULL, NULL, NULL,
1169 			    kmem_tsb_default_arena[i], 0);
1170 		}
1171 	} else {
1172 		kmem_tsb_default_arena[0] = vmem_create("kmem_tsb_default",
1173 		    NULL, 0, PAGESIZE, sfmmu_tsb_segkmem_alloc,
1174 		    sfmmu_tsb_segkmem_free, kmem_tsb_arena, 0,
1175 		    VM_SLEEP | VM_BESTFIT);
1176 
1177 		sfmmu_tsb_cache[0] = kmem_cache_create("sfmmu_tsb_cache",
1178 		    PAGESIZE, PAGESIZE, NULL, NULL, NULL, NULL,
1179 		    kmem_tsb_default_arena[0], 0);
1180 	}
1181 
1182 	sfmmu8_cache = kmem_cache_create("sfmmu8_cache", HME8BLK_SZ,
1183 		HMEBLK_ALIGN, sfmmu_hblkcache_constructor,
1184 		sfmmu_hblkcache_destructor,
1185 		sfmmu_hblkcache_reclaim, (void *)HME8BLK_SZ,
1186 		hat_memload_arena, KMC_NOHASH);
1187 
1188 	hat_memload1_arena = vmem_create("hat_memload1", NULL, 0, PAGESIZE,
1189 	    segkmem_alloc_permanent, segkmem_free, heap_arena, 0, VM_SLEEP);
1190 
1191 	sfmmu1_cache = kmem_cache_create("sfmmu1_cache", HME1BLK_SZ,
1192 		HMEBLK_ALIGN, sfmmu_hblkcache_constructor,
1193 		sfmmu_hblkcache_destructor,
1194 		NULL, (void *)HME1BLK_SZ,
1195 		hat_memload1_arena, KMC_NOHASH);
1196 
1197 	pa_hment_cache = kmem_cache_create("pa_hment_cache", PAHME_SZ,
1198 		0, NULL, NULL, NULL, NULL, static_arena, KMC_NOHASH);
1199 
1200 	ism_blk_cache = kmem_cache_create("ism_blk_cache",
1201 		sizeof (ism_blk_t), ecache_alignsize, NULL, NULL,
1202 		NULL, NULL, static_arena, KMC_NOHASH);
1203 
1204 	ism_ment_cache = kmem_cache_create("ism_ment_cache",
1205 		sizeof (ism_ment_t), 0, NULL, NULL,
1206 		NULL, NULL, NULL, 0);
1207 
1208 	/*
1209 	 * We grab the first hat for the kernel,
1210 	 */
1211 	AS_LOCK_ENTER(&kas, &kas.a_lock, RW_WRITER);
1212 	kas.a_hat = hat_alloc(&kas);
1213 	AS_LOCK_EXIT(&kas, &kas.a_lock);
1214 
1215 	/*
1216 	 * Initialize hblk_reserve.
1217 	 */
1218 	((struct hme_blk *)hblk_reserve)->hblk_nextpa =
1219 				va_to_pa((caddr_t)hblk_reserve);
1220 
1221 #ifndef UTSB_PHYS
1222 	/*
1223 	 * Reserve some kernel virtual address space for the locked TTEs
1224 	 * that allow us to probe the TSB from TL>0.
1225 	 */
1226 	utsb_vabase = vmem_xalloc(heap_arena, tsb_slab_size, tsb_slab_size,
1227 		0, 0, NULL, NULL, VM_SLEEP);
1228 	utsb4m_vabase = vmem_xalloc(heap_arena, tsb_slab_size, tsb_slab_size,
1229 		0, 0, NULL, NULL, VM_SLEEP);
1230 #endif
1231 
1232 	/*
1233 	 * The big page VAC handling code assumes VAC
1234 	 * will not be bigger than the smallest big
1235 	 * page- which is 64K.
1236 	 */
1237 	if (TTEPAGES(TTE64K) < CACHE_NUM_COLOR) {
1238 		cmn_err(CE_PANIC, "VAC too big!");
1239 	}
1240 
1241 	(void) xhat_init();
1242 
1243 	uhme_hash_pa = va_to_pa(uhme_hash);
1244 	khme_hash_pa = va_to_pa(khme_hash);
1245 
1246 	/*
1247 	 * Initialize relocation locks. kpr_suspendlock is held
1248 	 * at PIL_MAX to prevent interrupts from pinning the holder
1249 	 * of a suspended TTE which may access it leading to a
1250 	 * deadlock condition.
1251 	 */
1252 	mutex_init(&kpr_mutex, NULL, MUTEX_DEFAULT, NULL);
1253 	mutex_init(&kpr_suspendlock, NULL, MUTEX_SPIN, (void *)PIL_MAX);
1254 }
1255 
1256 /*
1257  * Initialize locking for the hat layer, called early during boot.
1258  */
1259 static void
1260 hat_lock_init()
1261 {
1262 	int i;
1263 
1264 	/*
1265 	 * initialize the array of mutexes protecting a page's mapping
1266 	 * list and p_nrm field.
1267 	 */
1268 	for (i = 0; i < mml_table_sz; i++)
1269 		mutex_init(&mml_table[i], NULL, MUTEX_DEFAULT, NULL);
1270 
1271 	if (kpm_enable) {
1272 		for (i = 0; i < kpmp_table_sz; i++) {
1273 			mutex_init(&kpmp_table[i].khl_mutex, NULL,
1274 			    MUTEX_DEFAULT, NULL);
1275 		}
1276 	}
1277 
1278 	/*
1279 	 * Initialize array of mutex locks that protects sfmmu fields and
1280 	 * TSB lists.
1281 	 */
1282 	for (i = 0; i < SFMMU_NUM_LOCK; i++)
1283 		mutex_init(HATLOCK_MUTEXP(&hat_lock[i]), NULL, MUTEX_DEFAULT,
1284 		    NULL);
1285 }
1286 
1287 extern caddr_t kmem64_base, kmem64_end;
1288 
1289 #define	SFMMU_KERNEL_MAXVA \
1290 	(kmem64_base ? (uintptr_t)kmem64_end : (SYSLIMIT))
1291 
1292 /*
1293  * Allocate a hat structure.
1294  * Called when an address space first uses a hat.
1295  */
1296 struct hat *
1297 hat_alloc(struct as *as)
1298 {
1299 	sfmmu_t *sfmmup;
1300 	int i;
1301 	uint64_t cnum;
1302 	extern uint_t get_color_start(struct as *);
1303 
1304 	ASSERT(AS_WRITE_HELD(as, &as->a_lock));
1305 	sfmmup = kmem_cache_alloc(sfmmuid_cache, KM_SLEEP);
1306 	sfmmup->sfmmu_as = as;
1307 	sfmmup->sfmmu_flags = 0;
1308 	LOCK_INIT_CLEAR(&sfmmup->sfmmu_ctx_lock);
1309 
1310 	if (as == &kas) {
1311 		ksfmmup = sfmmup;
1312 		sfmmup->sfmmu_cext = 0;
1313 		cnum = KCONTEXT;
1314 
1315 		sfmmup->sfmmu_clrstart = 0;
1316 		sfmmup->sfmmu_tsb = NULL;
1317 		/*
1318 		 * hat_kern_setup() will call sfmmu_init_ktsbinfo()
1319 		 * to setup tsb_info for ksfmmup.
1320 		 */
1321 	} else {
1322 
1323 		/*
1324 		 * Just set to invalid ctx. When it faults, it will
1325 		 * get a valid ctx. This would avoid the situation
1326 		 * where we get a ctx, but it gets stolen and then
1327 		 * we fault when we try to run and so have to get
1328 		 * another ctx.
1329 		 */
1330 		sfmmup->sfmmu_cext = 0;
1331 		cnum = INVALID_CONTEXT;
1332 
1333 		/* initialize original physical page coloring bin */
1334 		sfmmup->sfmmu_clrstart = get_color_start(as);
1335 #ifdef DEBUG
1336 		if (tsb_random_size) {
1337 			uint32_t randval = (uint32_t)gettick() >> 4;
1338 			int size = randval % (tsb_max_growsize + 1);
1339 
1340 			/* chose a random tsb size for stress testing */
1341 			(void) sfmmu_tsbinfo_alloc(&sfmmup->sfmmu_tsb, size,
1342 			    TSB8K|TSB64K|TSB512K, 0, sfmmup);
1343 		} else
1344 #endif /* DEBUG */
1345 			(void) sfmmu_tsbinfo_alloc(&sfmmup->sfmmu_tsb,
1346 			    default_tsb_size,
1347 			    TSB8K|TSB64K|TSB512K, 0, sfmmup);
1348 		sfmmup->sfmmu_flags = HAT_SWAPPED;
1349 		ASSERT(sfmmup->sfmmu_tsb != NULL);
1350 	}
1351 
1352 	ASSERT(max_mmu_ctxdoms > 0);
1353 	for (i = 0; i < max_mmu_ctxdoms; i++) {
1354 		sfmmup->sfmmu_ctxs[i].cnum = cnum;
1355 		sfmmup->sfmmu_ctxs[i].gnum = 0;
1356 	}
1357 
1358 	sfmmu_setup_tsbinfo(sfmmup);
1359 	for (i = 0; i < max_mmu_page_sizes; i++) {
1360 		sfmmup->sfmmu_ttecnt[i] = 0;
1361 		sfmmup->sfmmu_ismttecnt[i] = 0;
1362 		sfmmup->sfmmu_pgsz[i] = TTE8K;
1363 	}
1364 
1365 	sfmmup->sfmmu_iblk = NULL;
1366 	sfmmup->sfmmu_ismhat = 0;
1367 	sfmmup->sfmmu_ismblkpa = (uint64_t)-1;
1368 	if (sfmmup == ksfmmup) {
1369 		CPUSET_ALL(sfmmup->sfmmu_cpusran);
1370 	} else {
1371 		CPUSET_ZERO(sfmmup->sfmmu_cpusran);
1372 	}
1373 	sfmmup->sfmmu_free = 0;
1374 	sfmmup->sfmmu_rmstat = 0;
1375 	sfmmup->sfmmu_clrbin = sfmmup->sfmmu_clrstart;
1376 	sfmmup->sfmmu_xhat_provider = NULL;
1377 	cv_init(&sfmmup->sfmmu_tsb_cv, NULL, CV_DEFAULT, NULL);
1378 	return (sfmmup);
1379 }
1380 
1381 /*
1382  * Create per-MMU context domain kstats for a given MMU ctx.
1383  */
1384 static void
1385 sfmmu_mmu_kstat_create(mmu_ctx_t *mmu_ctxp)
1386 {
1387 	mmu_ctx_stat_t	stat;
1388 	kstat_t		*mmu_kstat;
1389 
1390 	ASSERT(MUTEX_HELD(&cpu_lock));
1391 	ASSERT(mmu_ctxp->mmu_kstat == NULL);
1392 
1393 	mmu_kstat = kstat_create("unix", mmu_ctxp->mmu_idx, "mmu_ctx",
1394 	    "hat", KSTAT_TYPE_NAMED, MMU_CTX_NUM_STATS, KSTAT_FLAG_VIRTUAL);
1395 
1396 	if (mmu_kstat == NULL) {
1397 		cmn_err(CE_WARN, "kstat_create for MMU %d failed",
1398 		    mmu_ctxp->mmu_idx);
1399 	} else {
1400 		mmu_kstat->ks_data = mmu_ctxp->mmu_kstat_data;
1401 		for (stat = 0; stat < MMU_CTX_NUM_STATS; stat++)
1402 			kstat_named_init(&mmu_ctxp->mmu_kstat_data[stat],
1403 			    mmu_ctx_kstat_names[stat], KSTAT_DATA_INT64);
1404 		mmu_ctxp->mmu_kstat = mmu_kstat;
1405 		kstat_install(mmu_kstat);
1406 	}
1407 }
1408 
1409 /*
1410  * plat_cpuid_to_mmu_ctx_info() is a platform interface that returns MMU
1411  * context domain information for a given CPU. If a platform does not
1412  * specify that interface, then the function below is used instead to return
1413  * default information. The defaults are as follows:
1414  *
1415  *	- For sun4u systems there's one MMU context domain per CPU.
1416  *	  This default is used by all sun4u systems except OPL. OPL systems
1417  *	  provide platform specific interface to map CPU ids to MMU ids
1418  *	  because on OPL more than 1 CPU shares a single MMU.
1419  *        Note that on sun4v, there is one global context domain for
1420  *	  the entire system. This is to avoid running into potential problem
1421  *	  with ldom physical cpu substitution feature.
1422  *	- The number of MMU context IDs supported on any CPU in the
1423  *	  system is 8K.
1424  */
1425 /*ARGSUSED*/
1426 static void
1427 sfmmu_cpuid_to_mmu_ctx_info(processorid_t cpuid, mmu_ctx_info_t *infop)
1428 {
1429 	infop->mmu_nctxs = nctxs;
1430 #ifndef sun4v
1431 	infop->mmu_idx = cpu[cpuid]->cpu_seqid;
1432 #else /* sun4v */
1433 	infop->mmu_idx = 0;
1434 #endif /* sun4v */
1435 }
1436 
1437 /*
1438  * Called during CPU initialization to set the MMU context-related information
1439  * for a CPU.
1440  *
1441  * cpu_lock serializes accesses to mmu_ctxs and mmu_saved_gnum.
1442  */
1443 void
1444 sfmmu_cpu_init(cpu_t *cp)
1445 {
1446 	mmu_ctx_info_t	info;
1447 	mmu_ctx_t	*mmu_ctxp;
1448 
1449 	ASSERT(MUTEX_HELD(&cpu_lock));
1450 
1451 	if (&plat_cpuid_to_mmu_ctx_info == NULL)
1452 		sfmmu_cpuid_to_mmu_ctx_info(cp->cpu_id, &info);
1453 	else
1454 		plat_cpuid_to_mmu_ctx_info(cp->cpu_id, &info);
1455 
1456 	ASSERT(info.mmu_idx < max_mmu_ctxdoms);
1457 
1458 	if ((mmu_ctxp = mmu_ctxs_tbl[info.mmu_idx]) == NULL) {
1459 		/* Each mmu_ctx is cacheline aligned. */
1460 		mmu_ctxp = kmem_cache_alloc(mmuctxdom_cache, KM_SLEEP);
1461 		bzero(mmu_ctxp, sizeof (mmu_ctx_t));
1462 
1463 		mutex_init(&mmu_ctxp->mmu_lock, NULL, MUTEX_SPIN,
1464 		    (void *)ipltospl(DISP_LEVEL));
1465 		mmu_ctxp->mmu_idx = info.mmu_idx;
1466 		mmu_ctxp->mmu_nctxs = info.mmu_nctxs;
1467 		/*
1468 		 * Globally for lifetime of a system,
1469 		 * gnum must always increase.
1470 		 * mmu_saved_gnum is protected by the cpu_lock.
1471 		 */
1472 		mmu_ctxp->mmu_gnum = mmu_saved_gnum + 1;
1473 		mmu_ctxp->mmu_cnum = NUM_LOCKED_CTXS;
1474 
1475 		sfmmu_mmu_kstat_create(mmu_ctxp);
1476 
1477 		mmu_ctxs_tbl[info.mmu_idx] = mmu_ctxp;
1478 	} else {
1479 		ASSERT(mmu_ctxp->mmu_idx == info.mmu_idx);
1480 	}
1481 
1482 	/*
1483 	 * The mmu_lock is acquired here to prevent races with
1484 	 * the wrap-around code.
1485 	 */
1486 	mutex_enter(&mmu_ctxp->mmu_lock);
1487 
1488 
1489 	mmu_ctxp->mmu_ncpus++;
1490 	CPUSET_ADD(mmu_ctxp->mmu_cpuset, cp->cpu_id);
1491 	CPU_MMU_IDX(cp) = info.mmu_idx;
1492 	CPU_MMU_CTXP(cp) = mmu_ctxp;
1493 
1494 	mutex_exit(&mmu_ctxp->mmu_lock);
1495 }
1496 
1497 /*
1498  * Called to perform MMU context-related cleanup for a CPU.
1499  */
1500 void
1501 sfmmu_cpu_cleanup(cpu_t *cp)
1502 {
1503 	mmu_ctx_t	*mmu_ctxp;
1504 
1505 	ASSERT(MUTEX_HELD(&cpu_lock));
1506 
1507 	mmu_ctxp = CPU_MMU_CTXP(cp);
1508 	ASSERT(mmu_ctxp != NULL);
1509 
1510 	/*
1511 	 * The mmu_lock is acquired here to prevent races with
1512 	 * the wrap-around code.
1513 	 */
1514 	mutex_enter(&mmu_ctxp->mmu_lock);
1515 
1516 	CPU_MMU_CTXP(cp) = NULL;
1517 
1518 	CPUSET_DEL(mmu_ctxp->mmu_cpuset, cp->cpu_id);
1519 	if (--mmu_ctxp->mmu_ncpus == 0) {
1520 		mmu_ctxs_tbl[mmu_ctxp->mmu_idx] = NULL;
1521 		mutex_exit(&mmu_ctxp->mmu_lock);
1522 		mutex_destroy(&mmu_ctxp->mmu_lock);
1523 
1524 		if (mmu_ctxp->mmu_kstat)
1525 			kstat_delete(mmu_ctxp->mmu_kstat);
1526 
1527 		/* mmu_saved_gnum is protected by the cpu_lock. */
1528 		if (mmu_saved_gnum < mmu_ctxp->mmu_gnum)
1529 			mmu_saved_gnum = mmu_ctxp->mmu_gnum;
1530 
1531 		kmem_cache_free(mmuctxdom_cache, mmu_ctxp);
1532 
1533 		return;
1534 	}
1535 
1536 	mutex_exit(&mmu_ctxp->mmu_lock);
1537 }
1538 
1539 /*
1540  * Hat_setup, makes an address space context the current active one.
1541  * In sfmmu this translates to setting the secondary context with the
1542  * corresponding context.
1543  */
1544 void
1545 hat_setup(struct hat *sfmmup, int allocflag)
1546 {
1547 	hatlock_t *hatlockp;
1548 
1549 	/* Init needs some special treatment. */
1550 	if (allocflag == HAT_INIT) {
1551 		/*
1552 		 * Make sure that we have
1553 		 * 1. a TSB
1554 		 * 2. a valid ctx that doesn't get stolen after this point.
1555 		 */
1556 		hatlockp = sfmmu_hat_enter(sfmmup);
1557 
1558 		/*
1559 		 * Swap in the TSB.  hat_init() allocates tsbinfos without
1560 		 * TSBs, but we need one for init, since the kernel does some
1561 		 * special things to set up its stack and needs the TSB to
1562 		 * resolve page faults.
1563 		 */
1564 		sfmmu_tsb_swapin(sfmmup, hatlockp);
1565 
1566 		sfmmu_get_ctx(sfmmup);
1567 
1568 		sfmmu_hat_exit(hatlockp);
1569 	} else {
1570 		ASSERT(allocflag == HAT_ALLOC);
1571 
1572 		hatlockp = sfmmu_hat_enter(sfmmup);
1573 		kpreempt_disable();
1574 
1575 		CPUSET_ADD(sfmmup->sfmmu_cpusran, CPU->cpu_id);
1576 
1577 		/*
1578 		 * sfmmu_setctx_sec takes <pgsz|cnum> as a parameter,
1579 		 * pagesize bits don't matter in this case since we are passing
1580 		 * INVALID_CONTEXT to it.
1581 		 */
1582 		sfmmu_setctx_sec(INVALID_CONTEXT);
1583 		sfmmu_clear_utsbinfo();
1584 
1585 		kpreempt_enable();
1586 		sfmmu_hat_exit(hatlockp);
1587 	}
1588 }
1589 
1590 /*
1591  * Free all the translation resources for the specified address space.
1592  * Called from as_free when an address space is being destroyed.
1593  */
1594 void
1595 hat_free_start(struct hat *sfmmup)
1596 {
1597 	ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock));
1598 	ASSERT(sfmmup != ksfmmup);
1599 	ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
1600 
1601 	sfmmup->sfmmu_free = 1;
1602 }
1603 
1604 void
1605 hat_free_end(struct hat *sfmmup)
1606 {
1607 	int i;
1608 
1609 	ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
1610 	if (sfmmup->sfmmu_ismhat) {
1611 		for (i = 0; i < mmu_page_sizes; i++) {
1612 			sfmmup->sfmmu_ttecnt[i] = 0;
1613 			sfmmup->sfmmu_ismttecnt[i] = 0;
1614 		}
1615 	} else {
1616 		/* EMPTY */
1617 		ASSERT(sfmmup->sfmmu_ttecnt[TTE8K] == 0);
1618 		ASSERT(sfmmup->sfmmu_ttecnt[TTE64K] == 0);
1619 		ASSERT(sfmmup->sfmmu_ttecnt[TTE512K] == 0);
1620 		ASSERT(sfmmup->sfmmu_ttecnt[TTE4M] == 0);
1621 		ASSERT(sfmmup->sfmmu_ttecnt[TTE32M] == 0);
1622 		ASSERT(sfmmup->sfmmu_ttecnt[TTE256M] == 0);
1623 	}
1624 
1625 	if (sfmmup->sfmmu_rmstat) {
1626 		hat_freestat(sfmmup->sfmmu_as, NULL);
1627 	}
1628 
1629 	while (sfmmup->sfmmu_tsb != NULL) {
1630 		struct tsb_info *next = sfmmup->sfmmu_tsb->tsb_next;
1631 		sfmmu_tsbinfo_free(sfmmup->sfmmu_tsb);
1632 		sfmmup->sfmmu_tsb = next;
1633 	}
1634 	sfmmu_free_sfmmu(sfmmup);
1635 
1636 	kmem_cache_free(sfmmuid_cache, sfmmup);
1637 }
1638 
1639 /*
1640  * Set up any translation structures, for the specified address space,
1641  * that are needed or preferred when the process is being swapped in.
1642  */
1643 /* ARGSUSED */
1644 void
1645 hat_swapin(struct hat *hat)
1646 {
1647 	ASSERT(hat->sfmmu_xhat_provider == NULL);
1648 }
1649 
1650 /*
1651  * Free all of the translation resources, for the specified address space,
1652  * that can be freed while the process is swapped out. Called from as_swapout.
1653  * Also, free up the ctx that this process was using.
1654  */
1655 void
1656 hat_swapout(struct hat *sfmmup)
1657 {
1658 	struct hmehash_bucket *hmebp;
1659 	struct hme_blk *hmeblkp;
1660 	struct hme_blk *pr_hblk = NULL;
1661 	struct hme_blk *nx_hblk;
1662 	int i;
1663 	uint64_t hblkpa, prevpa, nx_pa;
1664 	struct hme_blk *list = NULL;
1665 	hatlock_t *hatlockp;
1666 	struct tsb_info *tsbinfop;
1667 	struct free_tsb {
1668 		struct free_tsb *next;
1669 		struct tsb_info *tsbinfop;
1670 	};			/* free list of TSBs */
1671 	struct free_tsb *freelist, *last, *next;
1672 
1673 	ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
1674 	SFMMU_STAT(sf_swapout);
1675 
1676 	/*
1677 	 * There is no way to go from an as to all its translations in sfmmu.
1678 	 * Here is one of the times when we take the big hit and traverse
1679 	 * the hash looking for hme_blks to free up.  Not only do we free up
1680 	 * this as hme_blks but all those that are free.  We are obviously
1681 	 * swapping because we need memory so let's free up as much
1682 	 * as we can.
1683 	 *
1684 	 * Note that we don't flush TLB/TSB here -- it's not necessary
1685 	 * because:
1686 	 *  1) we free the ctx we're using and throw away the TSB(s);
1687 	 *  2) processes aren't runnable while being swapped out.
1688 	 */
1689 	ASSERT(sfmmup != KHATID);
1690 	for (i = 0; i <= UHMEHASH_SZ; i++) {
1691 		hmebp = &uhme_hash[i];
1692 		SFMMU_HASH_LOCK(hmebp);
1693 		hmeblkp = hmebp->hmeblkp;
1694 		hblkpa = hmebp->hmeh_nextpa;
1695 		prevpa = 0;
1696 		pr_hblk = NULL;
1697 		while (hmeblkp) {
1698 
1699 			ASSERT(!hmeblkp->hblk_xhat_bit);
1700 
1701 			if ((hmeblkp->hblk_tag.htag_id == sfmmup) &&
1702 			    !hmeblkp->hblk_shw_bit && !hmeblkp->hblk_lckcnt) {
1703 				(void) sfmmu_hblk_unload(sfmmup, hmeblkp,
1704 					(caddr_t)get_hblk_base(hmeblkp),
1705 					get_hblk_endaddr(hmeblkp),
1706 					NULL, HAT_UNLOAD);
1707 			}
1708 			nx_hblk = hmeblkp->hblk_next;
1709 			nx_pa = hmeblkp->hblk_nextpa;
1710 			if (!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) {
1711 				ASSERT(!hmeblkp->hblk_lckcnt);
1712 				sfmmu_hblk_hash_rm(hmebp, hmeblkp,
1713 					prevpa, pr_hblk);
1714 				sfmmu_hblk_free(hmebp, hmeblkp, hblkpa, &list);
1715 			} else {
1716 				pr_hblk = hmeblkp;
1717 				prevpa = hblkpa;
1718 			}
1719 			hmeblkp = nx_hblk;
1720 			hblkpa = nx_pa;
1721 		}
1722 		SFMMU_HASH_UNLOCK(hmebp);
1723 	}
1724 
1725 	sfmmu_hblks_list_purge(&list);
1726 
1727 	/*
1728 	 * Now free up the ctx so that others can reuse it.
1729 	 */
1730 	hatlockp = sfmmu_hat_enter(sfmmup);
1731 
1732 	sfmmu_invalidate_ctx(sfmmup);
1733 
1734 	/*
1735 	 * Free TSBs, but not tsbinfos, and set SWAPPED flag.
1736 	 * If TSBs were never swapped in, just return.
1737 	 * This implies that we don't support partial swapping
1738 	 * of TSBs -- either all are swapped out, or none are.
1739 	 *
1740 	 * We must hold the HAT lock here to prevent racing with another
1741 	 * thread trying to unmap TTEs from the TSB or running the post-
1742 	 * relocator after relocating the TSB's memory.  Unfortunately, we
1743 	 * can't free memory while holding the HAT lock or we could
1744 	 * deadlock, so we build a list of TSBs to be freed after marking
1745 	 * the tsbinfos as swapped out and free them after dropping the
1746 	 * lock.
1747 	 */
1748 	if (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) {
1749 		sfmmu_hat_exit(hatlockp);
1750 		return;
1751 	}
1752 
1753 	SFMMU_FLAGS_SET(sfmmup, HAT_SWAPPED);
1754 	last = freelist = NULL;
1755 	for (tsbinfop = sfmmup->sfmmu_tsb; tsbinfop != NULL;
1756 	    tsbinfop = tsbinfop->tsb_next) {
1757 		ASSERT((tsbinfop->tsb_flags & TSB_SWAPPED) == 0);
1758 
1759 		/*
1760 		 * Cast the TSB into a struct free_tsb and put it on the free
1761 		 * list.
1762 		 */
1763 		if (freelist == NULL) {
1764 			last = freelist = (struct free_tsb *)tsbinfop->tsb_va;
1765 		} else {
1766 			last->next = (struct free_tsb *)tsbinfop->tsb_va;
1767 			last = last->next;
1768 		}
1769 		last->next = NULL;
1770 		last->tsbinfop = tsbinfop;
1771 		tsbinfop->tsb_flags |= TSB_SWAPPED;
1772 		/*
1773 		 * Zero out the TTE to clear the valid bit.
1774 		 * Note we can't use a value like 0xbad because we want to
1775 		 * ensure diagnostic bits are NEVER set on TTEs that might
1776 		 * be loaded.  The intent is to catch any invalid access
1777 		 * to the swapped TSB, such as a thread running with a valid
1778 		 * context without first calling sfmmu_tsb_swapin() to
1779 		 * allocate TSB memory.
1780 		 */
1781 		tsbinfop->tsb_tte.ll = 0;
1782 	}
1783 
1784 	/* Now we can drop the lock and free the TSB memory. */
1785 	sfmmu_hat_exit(hatlockp);
1786 	for (; freelist != NULL; freelist = next) {
1787 		next = freelist->next;
1788 		sfmmu_tsb_free(freelist->tsbinfop);
1789 	}
1790 }
1791 
1792 /*
1793  * Duplicate the translations of an as into another newas
1794  */
1795 /* ARGSUSED */
1796 int
1797 hat_dup(struct hat *hat, struct hat *newhat, caddr_t addr, size_t len,
1798 	uint_t flag)
1799 {
1800 	ASSERT(hat->sfmmu_xhat_provider == NULL);
1801 	ASSERT((flag == 0) || (flag == HAT_DUP_ALL) || (flag == HAT_DUP_COW));
1802 
1803 	if (flag == HAT_DUP_COW) {
1804 		panic("hat_dup: HAT_DUP_COW not supported");
1805 	}
1806 	return (0);
1807 }
1808 
1809 /*
1810  * Set up addr to map to page pp with protection prot.
1811  * As an optimization we also load the TSB with the
1812  * corresponding tte but it is no big deal if  the tte gets kicked out.
1813  */
1814 void
1815 hat_memload(struct hat *hat, caddr_t addr, struct page *pp,
1816 	uint_t attr, uint_t flags)
1817 {
1818 	tte_t tte;
1819 
1820 
1821 	ASSERT(hat != NULL);
1822 	ASSERT(PAGE_LOCKED(pp));
1823 	ASSERT(!((uintptr_t)addr & MMU_PAGEOFFSET));
1824 	ASSERT(!(flags & ~SFMMU_LOAD_ALLFLAG));
1825 	ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR));
1826 
1827 	if (PP_ISFREE(pp)) {
1828 		panic("hat_memload: loading a mapping to free page %p",
1829 		    (void *)pp);
1830 	}
1831 
1832 	if (hat->sfmmu_xhat_provider) {
1833 		XHAT_MEMLOAD(hat, addr, pp, attr, flags);
1834 		return;
1835 	}
1836 
1837 	ASSERT((hat == ksfmmup) ||
1838 		AS_LOCK_HELD(hat->sfmmu_as, &hat->sfmmu_as->a_lock));
1839 
1840 	if (flags & ~SFMMU_LOAD_ALLFLAG)
1841 		cmn_err(CE_NOTE, "hat_memload: unsupported flags %d",
1842 		    flags & ~SFMMU_LOAD_ALLFLAG);
1843 
1844 	if (hat->sfmmu_rmstat)
1845 		hat_resvstat(MMU_PAGESIZE, hat->sfmmu_as, addr);
1846 
1847 #if defined(SF_ERRATA_57)
1848 	if ((hat != ksfmmup) && AS_TYPE_64BIT(hat->sfmmu_as) &&
1849 	    (addr < errata57_limit) && (attr & PROT_EXEC) &&
1850 	    !(flags & HAT_LOAD_SHARE)) {
1851 		cmn_err(CE_WARN, "hat_memload: illegal attempt to make user "
1852 		    " page executable");
1853 		attr &= ~PROT_EXEC;
1854 	}
1855 #endif
1856 
1857 	sfmmu_memtte(&tte, pp->p_pagenum, attr, TTE8K);
1858 	(void) sfmmu_tteload_array(hat, &tte, addr, &pp, flags);
1859 
1860 	/*
1861 	 * Check TSB and TLB page sizes.
1862 	 */
1863 	if ((flags & HAT_LOAD_SHARE) == 0) {
1864 		sfmmu_check_page_sizes(hat, 1);
1865 	}
1866 }
1867 
1868 /*
1869  * hat_devload can be called to map real memory (e.g.
1870  * /dev/kmem) and even though hat_devload will determine pf is
1871  * for memory, it will be unable to get a shared lock on the
1872  * page (because someone else has it exclusively) and will
1873  * pass dp = NULL.  If tteload doesn't get a non-NULL
1874  * page pointer it can't cache memory.
1875  */
1876 void
1877 hat_devload(struct hat *hat, caddr_t addr, size_t len, pfn_t pfn,
1878 	uint_t attr, int flags)
1879 {
1880 	tte_t tte;
1881 	struct page *pp = NULL;
1882 	int use_lgpg = 0;
1883 
1884 	ASSERT(hat != NULL);
1885 
1886 	if (hat->sfmmu_xhat_provider) {
1887 		XHAT_DEVLOAD(hat, addr, len, pfn, attr, flags);
1888 		return;
1889 	}
1890 
1891 	ASSERT(!(flags & ~SFMMU_LOAD_ALLFLAG));
1892 	ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR));
1893 	ASSERT((hat == ksfmmup) ||
1894 		AS_LOCK_HELD(hat->sfmmu_as, &hat->sfmmu_as->a_lock));
1895 	if (len == 0)
1896 		panic("hat_devload: zero len");
1897 	if (flags & ~SFMMU_LOAD_ALLFLAG)
1898 		cmn_err(CE_NOTE, "hat_devload: unsupported flags %d",
1899 		    flags & ~SFMMU_LOAD_ALLFLAG);
1900 
1901 #if defined(SF_ERRATA_57)
1902 	if ((hat != ksfmmup) && AS_TYPE_64BIT(hat->sfmmu_as) &&
1903 	    (addr < errata57_limit) && (attr & PROT_EXEC) &&
1904 	    !(flags & HAT_LOAD_SHARE)) {
1905 		cmn_err(CE_WARN, "hat_devload: illegal attempt to make user "
1906 		    " page executable");
1907 		attr &= ~PROT_EXEC;
1908 	}
1909 #endif
1910 
1911 	/*
1912 	 * If it's a memory page find its pp
1913 	 */
1914 	if (!(flags & HAT_LOAD_NOCONSIST) && pf_is_memory(pfn)) {
1915 		pp = page_numtopp_nolock(pfn);
1916 		if (pp == NULL) {
1917 			flags |= HAT_LOAD_NOCONSIST;
1918 		} else {
1919 			if (PP_ISFREE(pp)) {
1920 				panic("hat_memload: loading "
1921 				    "a mapping to free page %p",
1922 				    (void *)pp);
1923 			}
1924 			if (!PAGE_LOCKED(pp) && !PP_ISNORELOC(pp)) {
1925 				panic("hat_memload: loading a mapping "
1926 				    "to unlocked relocatable page %p",
1927 				    (void *)pp);
1928 			}
1929 			ASSERT(len == MMU_PAGESIZE);
1930 		}
1931 	}
1932 
1933 	if (hat->sfmmu_rmstat)
1934 		hat_resvstat(len, hat->sfmmu_as, addr);
1935 
1936 	if (flags & HAT_LOAD_NOCONSIST) {
1937 		attr |= SFMMU_UNCACHEVTTE;
1938 		use_lgpg = 1;
1939 	}
1940 	if (!pf_is_memory(pfn)) {
1941 		attr |= SFMMU_UNCACHEPTTE | HAT_NOSYNC;
1942 		use_lgpg = 1;
1943 		switch (attr & HAT_ORDER_MASK) {
1944 			case HAT_STRICTORDER:
1945 			case HAT_UNORDERED_OK:
1946 				/*
1947 				 * we set the side effect bit for all non
1948 				 * memory mappings unless merging is ok
1949 				 */
1950 				attr |= SFMMU_SIDEFFECT;
1951 				break;
1952 			case HAT_MERGING_OK:
1953 			case HAT_LOADCACHING_OK:
1954 			case HAT_STORECACHING_OK:
1955 				break;
1956 			default:
1957 				panic("hat_devload: bad attr");
1958 				break;
1959 		}
1960 	}
1961 	while (len) {
1962 		if (!use_lgpg) {
1963 			sfmmu_memtte(&tte, pfn, attr, TTE8K);
1964 			(void) sfmmu_tteload_array(hat, &tte, addr, &pp,
1965 			    flags);
1966 			len -= MMU_PAGESIZE;
1967 			addr += MMU_PAGESIZE;
1968 			pfn++;
1969 			continue;
1970 		}
1971 		/*
1972 		 *  try to use large pages, check va/pa alignments
1973 		 *  Note that 32M/256M page sizes are not (yet) supported.
1974 		 */
1975 		if ((len >= MMU_PAGESIZE4M) &&
1976 		    !((uintptr_t)addr & MMU_PAGEOFFSET4M) &&
1977 		    !(disable_large_pages & (1 << TTE4M)) &&
1978 		    !(mmu_ptob(pfn) & MMU_PAGEOFFSET4M)) {
1979 			sfmmu_memtte(&tte, pfn, attr, TTE4M);
1980 			(void) sfmmu_tteload_array(hat, &tte, addr, &pp,
1981 			    flags);
1982 			len -= MMU_PAGESIZE4M;
1983 			addr += MMU_PAGESIZE4M;
1984 			pfn += MMU_PAGESIZE4M / MMU_PAGESIZE;
1985 		} else if ((len >= MMU_PAGESIZE512K) &&
1986 		    !((uintptr_t)addr & MMU_PAGEOFFSET512K) &&
1987 		    !(disable_large_pages & (1 << TTE512K)) &&
1988 		    !(mmu_ptob(pfn) & MMU_PAGEOFFSET512K)) {
1989 			sfmmu_memtte(&tte, pfn, attr, TTE512K);
1990 			(void) sfmmu_tteload_array(hat, &tte, addr, &pp,
1991 			    flags);
1992 			len -= MMU_PAGESIZE512K;
1993 			addr += MMU_PAGESIZE512K;
1994 			pfn += MMU_PAGESIZE512K / MMU_PAGESIZE;
1995 		} else if ((len >= MMU_PAGESIZE64K) &&
1996 		    !((uintptr_t)addr & MMU_PAGEOFFSET64K) &&
1997 		    !(disable_large_pages & (1 << TTE64K)) &&
1998 		    !(mmu_ptob(pfn) & MMU_PAGEOFFSET64K)) {
1999 			sfmmu_memtte(&tte, pfn, attr, TTE64K);
2000 			(void) sfmmu_tteload_array(hat, &tte, addr, &pp,
2001 			    flags);
2002 			len -= MMU_PAGESIZE64K;
2003 			addr += MMU_PAGESIZE64K;
2004 			pfn += MMU_PAGESIZE64K / MMU_PAGESIZE;
2005 		} else {
2006 			sfmmu_memtte(&tte, pfn, attr, TTE8K);
2007 			(void) sfmmu_tteload_array(hat, &tte, addr, &pp,
2008 			    flags);
2009 			len -= MMU_PAGESIZE;
2010 			addr += MMU_PAGESIZE;
2011 			pfn++;
2012 		}
2013 	}
2014 
2015 	/*
2016 	 * Check TSB and TLB page sizes.
2017 	 */
2018 	if ((flags & HAT_LOAD_SHARE) == 0) {
2019 		sfmmu_check_page_sizes(hat, 1);
2020 	}
2021 }
2022 
2023 /*
2024  * Map the largest extend possible out of the page array. The array may NOT
2025  * be in order.  The largest possible mapping a page can have
2026  * is specified in the p_szc field.  The p_szc field
2027  * cannot change as long as there any mappings (large or small)
2028  * to any of the pages that make up the large page. (ie. any
2029  * promotion/demotion of page size is not up to the hat but up to
2030  * the page free list manager).  The array
2031  * should consist of properly aligned contigous pages that are
2032  * part of a big page for a large mapping to be created.
2033  */
2034 void
2035 hat_memload_array(struct hat *hat, caddr_t addr, size_t len,
2036 	struct page **pps, uint_t attr, uint_t flags)
2037 {
2038 	int  ttesz;
2039 	size_t mapsz;
2040 	pgcnt_t	numpg, npgs;
2041 	tte_t tte;
2042 	page_t *pp;
2043 	int large_pages_disable;
2044 
2045 	ASSERT(!((uintptr_t)addr & MMU_PAGEOFFSET));
2046 
2047 	if (hat->sfmmu_xhat_provider) {
2048 		XHAT_MEMLOAD_ARRAY(hat, addr, len, pps, attr, flags);
2049 		return;
2050 	}
2051 
2052 	if (hat->sfmmu_rmstat)
2053 		hat_resvstat(len, hat->sfmmu_as, addr);
2054 
2055 #if defined(SF_ERRATA_57)
2056 	if ((hat != ksfmmup) && AS_TYPE_64BIT(hat->sfmmu_as) &&
2057 	    (addr < errata57_limit) && (attr & PROT_EXEC) &&
2058 	    !(flags & HAT_LOAD_SHARE)) {
2059 		cmn_err(CE_WARN, "hat_memload_array: illegal attempt to make "
2060 		    "user page executable");
2061 		attr &= ~PROT_EXEC;
2062 	}
2063 #endif
2064 
2065 	/* Get number of pages */
2066 	npgs = len >> MMU_PAGESHIFT;
2067 
2068 	if (flags & HAT_LOAD_SHARE) {
2069 		large_pages_disable = disable_ism_large_pages;
2070 	} else {
2071 		large_pages_disable = disable_large_pages;
2072 	}
2073 
2074 	if (npgs < NHMENTS || large_pages_disable == LARGE_PAGES_OFF) {
2075 		sfmmu_memload_batchsmall(hat, addr, pps, attr, flags, npgs);
2076 		return;
2077 	}
2078 
2079 	while (npgs >= NHMENTS) {
2080 		pp = *pps;
2081 		for (ttesz = pp->p_szc; ttesz != TTE8K; ttesz--) {
2082 			/*
2083 			 * Check if this page size is disabled.
2084 			 */
2085 			if (large_pages_disable & (1 << ttesz))
2086 				continue;
2087 
2088 			numpg = TTEPAGES(ttesz);
2089 			mapsz = numpg << MMU_PAGESHIFT;
2090 			if ((npgs >= numpg) &&
2091 			    IS_P2ALIGNED(addr, mapsz) &&
2092 			    IS_P2ALIGNED(pp->p_pagenum, numpg)) {
2093 				/*
2094 				 * At this point we have enough pages and
2095 				 * we know the virtual address and the pfn
2096 				 * are properly aligned.  We still need
2097 				 * to check for physical contiguity but since
2098 				 * it is very likely that this is the case
2099 				 * we will assume they are so and undo
2100 				 * the request if necessary.  It would
2101 				 * be great if we could get a hint flag
2102 				 * like HAT_CONTIG which would tell us
2103 				 * the pages are contigous for sure.
2104 				 */
2105 				sfmmu_memtte(&tte, (*pps)->p_pagenum,
2106 					attr, ttesz);
2107 				if (!sfmmu_tteload_array(hat, &tte, addr,
2108 				    pps, flags)) {
2109 					break;
2110 				}
2111 			}
2112 		}
2113 		if (ttesz == TTE8K) {
2114 			/*
2115 			 * We were not able to map array using a large page
2116 			 * batch a hmeblk or fraction at a time.
2117 			 */
2118 			numpg = ((uintptr_t)addr >> MMU_PAGESHIFT)
2119 				& (NHMENTS-1);
2120 			numpg = NHMENTS - numpg;
2121 			ASSERT(numpg <= npgs);
2122 			mapsz = numpg * MMU_PAGESIZE;
2123 			sfmmu_memload_batchsmall(hat, addr, pps, attr, flags,
2124 							numpg);
2125 		}
2126 		addr += mapsz;
2127 		npgs -= numpg;
2128 		pps += numpg;
2129 	}
2130 
2131 	if (npgs) {
2132 		sfmmu_memload_batchsmall(hat, addr, pps, attr, flags, npgs);
2133 	}
2134 
2135 	/*
2136 	 * Check TSB and TLB page sizes.
2137 	 */
2138 	if ((flags & HAT_LOAD_SHARE) == 0) {
2139 		sfmmu_check_page_sizes(hat, 1);
2140 	}
2141 }
2142 
2143 /*
2144  * Function tries to batch 8K pages into the same hme blk.
2145  */
2146 static void
2147 sfmmu_memload_batchsmall(struct hat *hat, caddr_t vaddr, page_t **pps,
2148 		    uint_t attr, uint_t flags, pgcnt_t npgs)
2149 {
2150 	tte_t	tte;
2151 	page_t *pp;
2152 	struct hmehash_bucket *hmebp;
2153 	struct hme_blk *hmeblkp;
2154 	int	index;
2155 
2156 	while (npgs) {
2157 		/*
2158 		 * Acquire the hash bucket.
2159 		 */
2160 		hmebp = sfmmu_tteload_acquire_hashbucket(hat, vaddr, TTE8K);
2161 		ASSERT(hmebp);
2162 
2163 		/*
2164 		 * Find the hment block.
2165 		 */
2166 		hmeblkp = sfmmu_tteload_find_hmeblk(hat, hmebp, vaddr,
2167 				TTE8K, flags);
2168 		ASSERT(hmeblkp);
2169 
2170 		do {
2171 			/*
2172 			 * Make the tte.
2173 			 */
2174 			pp = *pps;
2175 			sfmmu_memtte(&tte, pp->p_pagenum, attr, TTE8K);
2176 
2177 			/*
2178 			 * Add the translation.
2179 			 */
2180 			(void) sfmmu_tteload_addentry(hat, hmeblkp, &tte,
2181 					vaddr, pps, flags);
2182 
2183 			/*
2184 			 * Goto next page.
2185 			 */
2186 			pps++;
2187 			npgs--;
2188 
2189 			/*
2190 			 * Goto next address.
2191 			 */
2192 			vaddr += MMU_PAGESIZE;
2193 
2194 			/*
2195 			 * Don't crossover into a different hmentblk.
2196 			 */
2197 			index = (int)(((uintptr_t)vaddr >> MMU_PAGESHIFT) &
2198 			    (NHMENTS-1));
2199 
2200 		} while (index != 0 && npgs != 0);
2201 
2202 		/*
2203 		 * Release the hash bucket.
2204 		 */
2205 
2206 		sfmmu_tteload_release_hashbucket(hmebp);
2207 	}
2208 }
2209 
2210 /*
2211  * Construct a tte for a page:
2212  *
2213  * tte_valid = 1
2214  * tte_size2 = size & TTE_SZ2_BITS (Panther and Olympus-C only)
2215  * tte_size = size
2216  * tte_nfo = attr & HAT_NOFAULT
2217  * tte_ie = attr & HAT_STRUCTURE_LE
2218  * tte_hmenum = hmenum
2219  * tte_pahi = pp->p_pagenum >> TTE_PASHIFT;
2220  * tte_palo = pp->p_pagenum & TTE_PALOMASK;
2221  * tte_ref = 1 (optimization)
2222  * tte_wr_perm = attr & PROT_WRITE;
2223  * tte_no_sync = attr & HAT_NOSYNC
2224  * tte_lock = attr & SFMMU_LOCKTTE
2225  * tte_cp = !(attr & SFMMU_UNCACHEPTTE)
2226  * tte_cv = !(attr & SFMMU_UNCACHEVTTE)
2227  * tte_e = attr & SFMMU_SIDEFFECT
2228  * tte_priv = !(attr & PROT_USER)
2229  * tte_hwwr = if nosync is set and it is writable we set the mod bit (opt)
2230  * tte_glb = 0
2231  */
2232 void
2233 sfmmu_memtte(tte_t *ttep, pfn_t pfn, uint_t attr, int tte_sz)
2234 {
2235 	ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR));
2236 
2237 	ttep->tte_inthi = MAKE_TTE_INTHI(pfn, attr, tte_sz, 0 /* hmenum */);
2238 	ttep->tte_intlo = MAKE_TTE_INTLO(pfn, attr, tte_sz, 0 /* hmenum */);
2239 
2240 	if (TTE_IS_NOSYNC(ttep)) {
2241 		TTE_SET_REF(ttep);
2242 		if (TTE_IS_WRITABLE(ttep)) {
2243 			TTE_SET_MOD(ttep);
2244 		}
2245 	}
2246 	if (TTE_IS_NFO(ttep) && TTE_IS_EXECUTABLE(ttep)) {
2247 		panic("sfmmu_memtte: can't set both NFO and EXEC bits");
2248 	}
2249 }
2250 
2251 /*
2252  * This function will add a translation to the hme_blk and allocate the
2253  * hme_blk if one does not exist.
2254  * If a page structure is specified then it will add the
2255  * corresponding hment to the mapping list.
2256  * It will also update the hmenum field for the tte.
2257  */
2258 void
2259 sfmmu_tteload(struct hat *sfmmup, tte_t *ttep, caddr_t vaddr, page_t *pp,
2260 	uint_t flags)
2261 {
2262 	(void) sfmmu_tteload_array(sfmmup, ttep, vaddr, &pp, flags);
2263 }
2264 
2265 /*
2266  * Load (ttep != NULL) or unload (ttep == NULL) one entry in the TSB.
2267  * Assumes that a particular page size may only be resident in one TSB.
2268  */
2269 static void
2270 sfmmu_mod_tsb(sfmmu_t *sfmmup, caddr_t vaddr, tte_t *ttep, int ttesz)
2271 {
2272 	struct tsb_info *tsbinfop = NULL;
2273 	uint64_t tag;
2274 	struct tsbe *tsbe_addr;
2275 	uint64_t tsb_base;
2276 	uint_t tsb_size;
2277 	int vpshift = MMU_PAGESHIFT;
2278 	int phys = 0;
2279 
2280 	if (sfmmup == ksfmmup) { /* No support for 32/256M ksfmmu pages */
2281 		phys = ktsb_phys;
2282 		if (ttesz >= TTE4M) {
2283 #ifndef sun4v
2284 			ASSERT((ttesz != TTE32M) && (ttesz != TTE256M));
2285 #endif
2286 			tsb_base = (phys)? ktsb4m_pbase : (uint64_t)ktsb4m_base;
2287 			tsb_size = ktsb4m_szcode;
2288 		} else {
2289 			tsb_base = (phys)? ktsb_pbase : (uint64_t)ktsb_base;
2290 			tsb_size = ktsb_szcode;
2291 		}
2292 	} else {
2293 		SFMMU_GET_TSBINFO(tsbinfop, sfmmup, ttesz);
2294 
2295 		/*
2296 		 * If there isn't a TSB for this page size, or the TSB is
2297 		 * swapped out, there is nothing to do.  Note that the latter
2298 		 * case seems impossible but can occur if hat_pageunload()
2299 		 * is called on an ISM mapping while the process is swapped
2300 		 * out.
2301 		 */
2302 		if (tsbinfop == NULL || (tsbinfop->tsb_flags & TSB_SWAPPED))
2303 			return;
2304 
2305 		/*
2306 		 * If another thread is in the middle of relocating a TSB
2307 		 * we can't unload the entry so set a flag so that the
2308 		 * TSB will be flushed before it can be accessed by the
2309 		 * process.
2310 		 */
2311 		if ((tsbinfop->tsb_flags & TSB_RELOC_FLAG) != 0) {
2312 			if (ttep == NULL)
2313 				tsbinfop->tsb_flags |= TSB_FLUSH_NEEDED;
2314 			return;
2315 		}
2316 #if defined(UTSB_PHYS)
2317 		phys = 1;
2318 		tsb_base = (uint64_t)tsbinfop->tsb_pa;
2319 #else
2320 		tsb_base = (uint64_t)tsbinfop->tsb_va;
2321 #endif
2322 		tsb_size = tsbinfop->tsb_szc;
2323 	}
2324 	if (ttesz >= TTE4M)
2325 		vpshift = MMU_PAGESHIFT4M;
2326 
2327 	tsbe_addr = sfmmu_get_tsbe(tsb_base, vaddr, vpshift, tsb_size);
2328 	tag = sfmmu_make_tsbtag(vaddr);
2329 
2330 	if (ttep == NULL) {
2331 		sfmmu_unload_tsbe(tsbe_addr, tag, phys);
2332 	} else {
2333 		if (ttesz >= TTE4M) {
2334 			SFMMU_STAT(sf_tsb_load4m);
2335 		} else {
2336 			SFMMU_STAT(sf_tsb_load8k);
2337 		}
2338 
2339 		sfmmu_load_tsbe(tsbe_addr, tag, ttep, phys);
2340 	}
2341 }
2342 
2343 /*
2344  * Unmap all entries from [start, end) matching the given page size.
2345  *
2346  * This function is used primarily to unmap replicated 64K or 512K entries
2347  * from the TSB that are inserted using the base page size TSB pointer, but
2348  * it may also be called to unmap a range of addresses from the TSB.
2349  */
2350 void
2351 sfmmu_unload_tsb_range(sfmmu_t *sfmmup, caddr_t start, caddr_t end, int ttesz)
2352 {
2353 	struct tsb_info *tsbinfop;
2354 	uint64_t tag;
2355 	struct tsbe *tsbe_addr;
2356 	caddr_t vaddr;
2357 	uint64_t tsb_base;
2358 	int vpshift, vpgsz;
2359 	uint_t tsb_size;
2360 	int phys = 0;
2361 
2362 	/*
2363 	 * Assumptions:
2364 	 *  If ttesz == 8K, 64K or 512K, we walk through the range 8K
2365 	 *  at a time shooting down any valid entries we encounter.
2366 	 *
2367 	 *  If ttesz >= 4M we walk the range 4M at a time shooting
2368 	 *  down any valid mappings we find.
2369 	 */
2370 	if (sfmmup == ksfmmup) {
2371 		phys = ktsb_phys;
2372 		if (ttesz >= TTE4M) {
2373 #ifndef sun4v
2374 			ASSERT((ttesz != TTE32M) && (ttesz != TTE256M));
2375 #endif
2376 			tsb_base = (phys)? ktsb4m_pbase : (uint64_t)ktsb4m_base;
2377 			tsb_size = ktsb4m_szcode;
2378 		} else {
2379 			tsb_base = (phys)? ktsb_pbase : (uint64_t)ktsb_base;
2380 			tsb_size = ktsb_szcode;
2381 		}
2382 	} else {
2383 		SFMMU_GET_TSBINFO(tsbinfop, sfmmup, ttesz);
2384 
2385 		/*
2386 		 * If there isn't a TSB for this page size, or the TSB is
2387 		 * swapped out, there is nothing to do.  Note that the latter
2388 		 * case seems impossible but can occur if hat_pageunload()
2389 		 * is called on an ISM mapping while the process is swapped
2390 		 * out.
2391 		 */
2392 		if (tsbinfop == NULL || (tsbinfop->tsb_flags & TSB_SWAPPED))
2393 			return;
2394 
2395 		/*
2396 		 * If another thread is in the middle of relocating a TSB
2397 		 * we can't unload the entry so set a flag so that the
2398 		 * TSB will be flushed before it can be accessed by the
2399 		 * process.
2400 		 */
2401 		if ((tsbinfop->tsb_flags & TSB_RELOC_FLAG) != 0) {
2402 			tsbinfop->tsb_flags |= TSB_FLUSH_NEEDED;
2403 			return;
2404 		}
2405 #if defined(UTSB_PHYS)
2406 		phys = 1;
2407 		tsb_base = (uint64_t)tsbinfop->tsb_pa;
2408 #else
2409 		tsb_base = (uint64_t)tsbinfop->tsb_va;
2410 #endif
2411 		tsb_size = tsbinfop->tsb_szc;
2412 	}
2413 	if (ttesz >= TTE4M) {
2414 		vpshift = MMU_PAGESHIFT4M;
2415 		vpgsz = MMU_PAGESIZE4M;
2416 	} else {
2417 		vpshift = MMU_PAGESHIFT;
2418 		vpgsz = MMU_PAGESIZE;
2419 	}
2420 
2421 	for (vaddr = start; vaddr < end; vaddr += vpgsz) {
2422 		tag = sfmmu_make_tsbtag(vaddr);
2423 		tsbe_addr = sfmmu_get_tsbe(tsb_base, vaddr, vpshift, tsb_size);
2424 		sfmmu_unload_tsbe(tsbe_addr, tag, phys);
2425 	}
2426 }
2427 
2428 /*
2429  * Select the optimum TSB size given the number of mappings
2430  * that need to be cached.
2431  */
2432 static int
2433 sfmmu_select_tsb_szc(pgcnt_t pgcnt)
2434 {
2435 	int szc = 0;
2436 
2437 #ifdef DEBUG
2438 	if (tsb_grow_stress) {
2439 		uint32_t randval = (uint32_t)gettick() >> 4;
2440 		return (randval % (tsb_max_growsize + 1));
2441 	}
2442 #endif	/* DEBUG */
2443 
2444 	while ((szc < tsb_max_growsize) && (pgcnt > SFMMU_RSS_TSBSIZE(szc)))
2445 		szc++;
2446 	return (szc);
2447 }
2448 
2449 /*
2450  * This function will add a translation to the hme_blk and allocate the
2451  * hme_blk if one does not exist.
2452  * If a page structure is specified then it will add the
2453  * corresponding hment to the mapping list.
2454  * It will also update the hmenum field for the tte.
2455  * Furthermore, it attempts to create a large page translation
2456  * for <addr,hat> at page array pps.  It assumes addr and first
2457  * pp is correctly aligned.  It returns 0 if successful and 1 otherwise.
2458  */
2459 static int
2460 sfmmu_tteload_array(sfmmu_t *sfmmup, tte_t *ttep, caddr_t vaddr,
2461 	page_t **pps, uint_t flags)
2462 {
2463 	struct hmehash_bucket *hmebp;
2464 	struct hme_blk *hmeblkp;
2465 	int 	ret;
2466 	uint_t	size;
2467 
2468 	/*
2469 	 * Get mapping size.
2470 	 */
2471 	size = TTE_CSZ(ttep);
2472 	ASSERT(!((uintptr_t)vaddr & TTE_PAGE_OFFSET(size)));
2473 
2474 	/*
2475 	 * Acquire the hash bucket.
2476 	 */
2477 	hmebp = sfmmu_tteload_acquire_hashbucket(sfmmup, vaddr, size);
2478 	ASSERT(hmebp);
2479 
2480 	/*
2481 	 * Find the hment block.
2482 	 */
2483 	hmeblkp = sfmmu_tteload_find_hmeblk(sfmmup, hmebp, vaddr, size, flags);
2484 	ASSERT(hmeblkp);
2485 
2486 	/*
2487 	 * Add the translation.
2488 	 */
2489 	ret = sfmmu_tteload_addentry(sfmmup, hmeblkp, ttep, vaddr, pps, flags);
2490 
2491 	/*
2492 	 * Release the hash bucket.
2493 	 */
2494 	sfmmu_tteload_release_hashbucket(hmebp);
2495 
2496 	return (ret);
2497 }
2498 
2499 /*
2500  * Function locks and returns a pointer to the hash bucket for vaddr and size.
2501  */
2502 static struct hmehash_bucket *
2503 sfmmu_tteload_acquire_hashbucket(sfmmu_t *sfmmup, caddr_t vaddr, int size)
2504 {
2505 	struct hmehash_bucket *hmebp;
2506 	int hmeshift;
2507 
2508 	hmeshift = HME_HASH_SHIFT(size);
2509 
2510 	hmebp = HME_HASH_FUNCTION(sfmmup, vaddr, hmeshift);
2511 
2512 	SFMMU_HASH_LOCK(hmebp);
2513 
2514 	return (hmebp);
2515 }
2516 
2517 /*
2518  * Function returns a pointer to an hmeblk in the hash bucket, hmebp. If the
2519  * hmeblk doesn't exists for the [sfmmup, vaddr & size] signature, a hmeblk is
2520  * allocated.
2521  */
2522 static struct hme_blk *
2523 sfmmu_tteload_find_hmeblk(sfmmu_t *sfmmup, struct hmehash_bucket *hmebp,
2524 	caddr_t vaddr, uint_t size, uint_t flags)
2525 {
2526 	hmeblk_tag hblktag;
2527 	int hmeshift;
2528 	struct hme_blk *hmeblkp, *pr_hblk, *list = NULL;
2529 	uint64_t hblkpa, prevpa;
2530 	struct kmem_cache *sfmmu_cache;
2531 	uint_t forcefree;
2532 
2533 	hblktag.htag_id = sfmmup;
2534 	hmeshift = HME_HASH_SHIFT(size);
2535 	hblktag.htag_bspage = HME_HASH_BSPAGE(vaddr, hmeshift);
2536 	hblktag.htag_rehash = HME_HASH_REHASH(size);
2537 
2538 ttearray_realloc:
2539 
2540 	HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, hblkpa,
2541 	    pr_hblk, prevpa, &list);
2542 
2543 	/*
2544 	 * We block until hblk_reserve_lock is released; it's held by
2545 	 * the thread, temporarily using hblk_reserve, until hblk_reserve is
2546 	 * replaced by a hblk from sfmmu8_cache.
2547 	 */
2548 	if (hmeblkp == (struct hme_blk *)hblk_reserve &&
2549 	    hblk_reserve_thread != curthread) {
2550 		SFMMU_HASH_UNLOCK(hmebp);
2551 		mutex_enter(&hblk_reserve_lock);
2552 		mutex_exit(&hblk_reserve_lock);
2553 		SFMMU_STAT(sf_hblk_reserve_hit);
2554 		SFMMU_HASH_LOCK(hmebp);
2555 		goto ttearray_realloc;
2556 	}
2557 
2558 	if (hmeblkp == NULL) {
2559 		hmeblkp = sfmmu_hblk_alloc(sfmmup, vaddr, hmebp, size,
2560 		    hblktag, flags);
2561 	} else {
2562 		/*
2563 		 * It is possible for 8k and 64k hblks to collide since they
2564 		 * have the same rehash value. This is because we
2565 		 * lazily free hblks and 8K/64K blks could be lingering.
2566 		 * If we find size mismatch we free the block and & try again.
2567 		 */
2568 		if (get_hblk_ttesz(hmeblkp) != size) {
2569 			ASSERT(!hmeblkp->hblk_vcnt);
2570 			ASSERT(!hmeblkp->hblk_hmecnt);
2571 			sfmmu_hblk_hash_rm(hmebp, hmeblkp, prevpa, pr_hblk);
2572 			sfmmu_hblk_free(hmebp, hmeblkp, hblkpa, &list);
2573 			goto ttearray_realloc;
2574 		}
2575 		if (hmeblkp->hblk_shw_bit) {
2576 			/*
2577 			 * if the hblk was previously used as a shadow hblk then
2578 			 * we will change it to a normal hblk
2579 			 */
2580 			if (hmeblkp->hblk_shw_mask) {
2581 				sfmmu_shadow_hcleanup(sfmmup, hmeblkp, hmebp);
2582 				ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp));
2583 				goto ttearray_realloc;
2584 			} else {
2585 				hmeblkp->hblk_shw_bit = 0;
2586 			}
2587 		}
2588 		SFMMU_STAT(sf_hblk_hit);
2589 	}
2590 
2591 	/*
2592 	 * hat_memload() should never call kmem_cache_free(); see block
2593 	 * comment showing the stacktrace in sfmmu_hblk_alloc();
2594 	 * enqueue each hblk in the list to reserve list if it's created
2595 	 * from sfmmu8_cache *and* sfmmup == KHATID.
2596 	 */
2597 	forcefree = (sfmmup == KHATID) ? 1 : 0;
2598 	while ((pr_hblk = list) != NULL) {
2599 		list = pr_hblk->hblk_next;
2600 		sfmmu_cache = get_hblk_cache(pr_hblk);
2601 		if ((sfmmu_cache == sfmmu8_cache) &&
2602 		    sfmmu_put_free_hblk(pr_hblk, forcefree))
2603 			continue;
2604 
2605 		ASSERT(sfmmup != KHATID);
2606 		kmem_cache_free(sfmmu_cache, pr_hblk);
2607 	}
2608 
2609 	ASSERT(get_hblk_ttesz(hmeblkp) == size);
2610 	ASSERT(!hmeblkp->hblk_shw_bit);
2611 
2612 	return (hmeblkp);
2613 }
2614 
2615 /*
2616  * Function adds a tte entry into the hmeblk. It returns 0 if successful and 1
2617  * otherwise.
2618  */
2619 static int
2620 sfmmu_tteload_addentry(sfmmu_t *sfmmup, struct hme_blk *hmeblkp, tte_t *ttep,
2621 	caddr_t vaddr, page_t **pps, uint_t flags)
2622 {
2623 	page_t *pp = *pps;
2624 	int hmenum, size, remap;
2625 	tte_t tteold, flush_tte;
2626 #ifdef DEBUG
2627 	tte_t orig_old;
2628 #endif /* DEBUG */
2629 	struct sf_hment *sfhme;
2630 	kmutex_t *pml, *pmtx;
2631 	hatlock_t *hatlockp;
2632 
2633 	/*
2634 	 * remove this panic when we decide to let user virtual address
2635 	 * space be >= USERLIMIT.
2636 	 */
2637 	if (!TTE_IS_PRIVILEGED(ttep) && vaddr >= (caddr_t)USERLIMIT)
2638 		panic("user addr %p in kernel space", vaddr);
2639 #if defined(TTE_IS_GLOBAL)
2640 	if (TTE_IS_GLOBAL(ttep))
2641 		panic("sfmmu_tteload: creating global tte");
2642 #endif
2643 
2644 #ifdef DEBUG
2645 	if (pf_is_memory(sfmmu_ttetopfn(ttep, vaddr)) &&
2646 	    !TTE_IS_PCACHEABLE(ttep) && !sfmmu_allow_nc_trans)
2647 		panic("sfmmu_tteload: non cacheable memory tte");
2648 #endif /* DEBUG */
2649 
2650 	if ((flags & HAT_LOAD_SHARE) || !TTE_IS_REF(ttep) ||
2651 	    !TTE_IS_MOD(ttep)) {
2652 		/*
2653 		 * Don't load TSB for dummy as in ISM.  Also don't preload
2654 		 * the TSB if the TTE isn't writable since we're likely to
2655 		 * fault on it again -- preloading can be fairly expensive.
2656 		 */
2657 		flags |= SFMMU_NO_TSBLOAD;
2658 	}
2659 
2660 	size = TTE_CSZ(ttep);
2661 	switch (size) {
2662 	case TTE8K:
2663 		SFMMU_STAT(sf_tteload8k);
2664 		break;
2665 	case TTE64K:
2666 		SFMMU_STAT(sf_tteload64k);
2667 		break;
2668 	case TTE512K:
2669 		SFMMU_STAT(sf_tteload512k);
2670 		break;
2671 	case TTE4M:
2672 		SFMMU_STAT(sf_tteload4m);
2673 		break;
2674 	case (TTE32M):
2675 		SFMMU_STAT(sf_tteload32m);
2676 		ASSERT(mmu_page_sizes == max_mmu_page_sizes);
2677 		break;
2678 	case (TTE256M):
2679 		SFMMU_STAT(sf_tteload256m);
2680 		ASSERT(mmu_page_sizes == max_mmu_page_sizes);
2681 		break;
2682 	}
2683 
2684 	ASSERT(!((uintptr_t)vaddr & TTE_PAGE_OFFSET(size)));
2685 
2686 	HBLKTOHME_IDX(sfhme, hmeblkp, vaddr, hmenum);
2687 
2688 	/*
2689 	 * Need to grab mlist lock here so that pageunload
2690 	 * will not change tte behind us.
2691 	 */
2692 	if (pp) {
2693 		pml = sfmmu_mlist_enter(pp);
2694 	}
2695 
2696 	sfmmu_copytte(&sfhme->hme_tte, &tteold);
2697 	/*
2698 	 * Look for corresponding hment and if valid verify
2699 	 * pfns are equal.
2700 	 */
2701 	remap = TTE_IS_VALID(&tteold);
2702 	if (remap) {
2703 		pfn_t	new_pfn, old_pfn;
2704 
2705 		old_pfn = TTE_TO_PFN(vaddr, &tteold);
2706 		new_pfn = TTE_TO_PFN(vaddr, ttep);
2707 
2708 		if (flags & HAT_LOAD_REMAP) {
2709 			/* make sure we are remapping same type of pages */
2710 			if (pf_is_memory(old_pfn) != pf_is_memory(new_pfn)) {
2711 				panic("sfmmu_tteload - tte remap io<->memory");
2712 			}
2713 			if (old_pfn != new_pfn &&
2714 			    (pp != NULL || sfhme->hme_page != NULL)) {
2715 				panic("sfmmu_tteload - tte remap pp != NULL");
2716 			}
2717 		} else if (old_pfn != new_pfn) {
2718 			panic("sfmmu_tteload - tte remap, hmeblkp 0x%p",
2719 			    (void *)hmeblkp);
2720 		}
2721 		ASSERT(TTE_CSZ(&tteold) == TTE_CSZ(ttep));
2722 	}
2723 
2724 	if (pp) {
2725 		if (size == TTE8K) {
2726 			/*
2727 			 * Handle VAC consistency
2728 			 */
2729 			if (!remap && (cache & CACHE_VAC) && !PP_ISNC(pp)) {
2730 				sfmmu_vac_conflict(sfmmup, vaddr, pp);
2731 			}
2732 
2733 			if (TTE_IS_WRITABLE(ttep) && PP_ISRO(pp)) {
2734 				pmtx = sfmmu_page_enter(pp);
2735 				PP_CLRRO(pp);
2736 				sfmmu_page_exit(pmtx);
2737 			} else if (!PP_ISMAPPED(pp) &&
2738 			    (!TTE_IS_WRITABLE(ttep)) && !(PP_ISMOD(pp))) {
2739 				pmtx = sfmmu_page_enter(pp);
2740 				if (!(PP_ISMOD(pp))) {
2741 					PP_SETRO(pp);
2742 				}
2743 				sfmmu_page_exit(pmtx);
2744 			}
2745 
2746 		} else if (sfmmu_pagearray_setup(vaddr, pps, ttep, remap)) {
2747 			/*
2748 			 * sfmmu_pagearray_setup failed so return
2749 			 */
2750 			sfmmu_mlist_exit(pml);
2751 			return (1);
2752 		}
2753 	}
2754 
2755 	/*
2756 	 * Make sure hment is not on a mapping list.
2757 	 */
2758 	ASSERT(remap || (sfhme->hme_page == NULL));
2759 
2760 	/* if it is not a remap then hme->next better be NULL */
2761 	ASSERT((!remap) ? sfhme->hme_next == NULL : 1);
2762 
2763 	if (flags & HAT_LOAD_LOCK) {
2764 		if (((int)hmeblkp->hblk_lckcnt + 1) >= MAX_HBLK_LCKCNT) {
2765 			panic("too high lckcnt-hmeblk %p",
2766 			    (void *)hmeblkp);
2767 		}
2768 		atomic_add_16(&hmeblkp->hblk_lckcnt, 1);
2769 
2770 		HBLK_STACK_TRACE(hmeblkp, HBLK_LOCK);
2771 	}
2772 
2773 	if (pp && PP_ISNC(pp)) {
2774 		/*
2775 		 * If the physical page is marked to be uncacheable, like
2776 		 * by a vac conflict, make sure the new mapping is also
2777 		 * uncacheable.
2778 		 */
2779 		TTE_CLR_VCACHEABLE(ttep);
2780 		ASSERT(PP_GET_VCOLOR(pp) == NO_VCOLOR);
2781 	}
2782 	ttep->tte_hmenum = hmenum;
2783 
2784 #ifdef DEBUG
2785 	orig_old = tteold;
2786 #endif /* DEBUG */
2787 
2788 	while (sfmmu_modifytte_try(&tteold, ttep, &sfhme->hme_tte) < 0) {
2789 		if ((sfmmup == KHATID) &&
2790 		    (flags & (HAT_LOAD_LOCK | HAT_LOAD_REMAP))) {
2791 			sfmmu_copytte(&sfhme->hme_tte, &tteold);
2792 		}
2793 #ifdef DEBUG
2794 		chk_tte(&orig_old, &tteold, ttep, hmeblkp);
2795 #endif /* DEBUG */
2796 	}
2797 
2798 	if (!TTE_IS_VALID(&tteold)) {
2799 
2800 		atomic_add_16(&hmeblkp->hblk_vcnt, 1);
2801 		atomic_add_long(&sfmmup->sfmmu_ttecnt[size], 1);
2802 
2803 		/*
2804 		 * HAT_RELOAD_SHARE has been deprecated with lpg DISM.
2805 		 */
2806 
2807 		if (size > TTE8K && (flags & HAT_LOAD_SHARE) == 0 &&
2808 		    sfmmup != ksfmmup) {
2809 			/*
2810 			 * If this is the first large mapping for the process
2811 			 * we must force any CPUs running this process to TL=0
2812 			 * where they will reload the HAT flags from the
2813 			 * tsbmiss area.  This is necessary to make the large
2814 			 * mappings we are about to load visible to those CPUs;
2815 			 * otherwise they'll loop forever calling pagefault()
2816 			 * since we don't search large hash chains by default.
2817 			 */
2818 			hatlockp = sfmmu_hat_enter(sfmmup);
2819 			if (size == TTE512K &&
2820 			    !SFMMU_FLAGS_ISSET(sfmmup, HAT_512K_FLAG)) {
2821 				SFMMU_FLAGS_SET(sfmmup, HAT_512K_FLAG);
2822 				sfmmu_sync_mmustate(sfmmup);
2823 			} else if (size == TTE4M &&
2824 			    !SFMMU_FLAGS_ISSET(sfmmup, HAT_4M_FLAG)) {
2825 				SFMMU_FLAGS_SET(sfmmup, HAT_4M_FLAG);
2826 				sfmmu_sync_mmustate(sfmmup);
2827 			} else if (size == TTE64K &&
2828 			    !SFMMU_FLAGS_ISSET(sfmmup, HAT_64K_FLAG)) {
2829 				SFMMU_FLAGS_SET(sfmmup, HAT_64K_FLAG);
2830 				/* no sync mmustate; 64K shares 8K hashes */
2831 			} else if (mmu_page_sizes == max_mmu_page_sizes) {
2832 			    if (size == TTE32M &&
2833 				!SFMMU_FLAGS_ISSET(sfmmup, HAT_32M_FLAG)) {
2834 				SFMMU_FLAGS_SET(sfmmup, HAT_32M_FLAG);
2835 				sfmmu_sync_mmustate(sfmmup);
2836 			    } else if (size == TTE256M &&
2837 				!SFMMU_FLAGS_ISSET(sfmmup, HAT_256M_FLAG)) {
2838 				SFMMU_FLAGS_SET(sfmmup, HAT_256M_FLAG);
2839 				sfmmu_sync_mmustate(sfmmup);
2840 			    }
2841 			}
2842 			if (size >= TTE4M && (flags & HAT_LOAD_TEXT) &&
2843 			    !SFMMU_FLAGS_ISSET(sfmmup, HAT_4MTEXT_FLAG)) {
2844 				SFMMU_FLAGS_SET(sfmmup, HAT_4MTEXT_FLAG);
2845 			}
2846 			sfmmu_hat_exit(hatlockp);
2847 		}
2848 	}
2849 	ASSERT(TTE_IS_VALID(&sfhme->hme_tte));
2850 
2851 	flush_tte.tte_intlo = (tteold.tte_intlo ^ ttep->tte_intlo) &
2852 	    hw_tte.tte_intlo;
2853 	flush_tte.tte_inthi = (tteold.tte_inthi ^ ttep->tte_inthi) &
2854 	    hw_tte.tte_inthi;
2855 
2856 	if (remap && (flush_tte.tte_inthi || flush_tte.tte_intlo)) {
2857 		/*
2858 		 * If remap and new tte differs from old tte we need
2859 		 * to sync the mod bit and flush TLB/TSB.  We don't
2860 		 * need to sync ref bit because we currently always set
2861 		 * ref bit in tteload.
2862 		 */
2863 		ASSERT(TTE_IS_REF(ttep));
2864 		if (TTE_IS_MOD(&tteold)) {
2865 			sfmmu_ttesync(sfmmup, vaddr, &tteold, pp);
2866 		}
2867 		sfmmu_tlb_demap(vaddr, sfmmup, hmeblkp, 0, 0);
2868 		xt_sync(sfmmup->sfmmu_cpusran);
2869 	}
2870 
2871 	if ((flags & SFMMU_NO_TSBLOAD) == 0) {
2872 		/*
2873 		 * We only preload 8K and 4M mappings into the TSB, since
2874 		 * 64K and 512K mappings are replicated and hence don't
2875 		 * have a single, unique TSB entry. Ditto for 32M/256M.
2876 		 */
2877 		if (size == TTE8K || size == TTE4M) {
2878 			hatlockp = sfmmu_hat_enter(sfmmup);
2879 			sfmmu_load_tsb(sfmmup, vaddr, &sfhme->hme_tte, size);
2880 			sfmmu_hat_exit(hatlockp);
2881 		}
2882 	}
2883 	if (pp) {
2884 		if (!remap) {
2885 			HME_ADD(sfhme, pp);
2886 			atomic_add_16(&hmeblkp->hblk_hmecnt, 1);
2887 			ASSERT(hmeblkp->hblk_hmecnt > 0);
2888 
2889 			/*
2890 			 * Cannot ASSERT(hmeblkp->hblk_hmecnt <= NHMENTS)
2891 			 * see pageunload() for comment.
2892 			 */
2893 		}
2894 		sfmmu_mlist_exit(pml);
2895 	}
2896 
2897 	return (0);
2898 }
2899 /*
2900  * Function unlocks hash bucket.
2901  */
2902 static void
2903 sfmmu_tteload_release_hashbucket(struct hmehash_bucket *hmebp)
2904 {
2905 	ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp));
2906 	SFMMU_HASH_UNLOCK(hmebp);
2907 }
2908 
2909 /*
2910  * function which checks and sets up page array for a large
2911  * translation.  Will set p_vcolor, p_index, p_ro fields.
2912  * Assumes addr and pfnum of first page are properly aligned.
2913  * Will check for physical contiguity. If check fails it return
2914  * non null.
2915  */
2916 static int
2917 sfmmu_pagearray_setup(caddr_t addr, page_t **pps, tte_t *ttep, int remap)
2918 {
2919 	int 	i, index, ttesz, osz;
2920 	pfn_t	pfnum;
2921 	pgcnt_t	npgs;
2922 	int cflags = 0;
2923 	page_t *pp, *pp1;
2924 	kmutex_t *pmtx;
2925 	int vac_err = 0;
2926 	int newidx = 0;
2927 
2928 	ttesz = TTE_CSZ(ttep);
2929 
2930 	ASSERT(ttesz > TTE8K);
2931 
2932 	npgs = TTEPAGES(ttesz);
2933 	index = PAGESZ_TO_INDEX(ttesz);
2934 
2935 	pfnum = (*pps)->p_pagenum;
2936 	ASSERT(IS_P2ALIGNED(pfnum, npgs));
2937 
2938 	/*
2939 	 * Save the first pp so we can do HAT_TMPNC at the end.
2940 	 */
2941 	pp1 = *pps;
2942 	osz = fnd_mapping_sz(pp1);
2943 
2944 	for (i = 0; i < npgs; i++, pps++) {
2945 		pp = *pps;
2946 		ASSERT(PAGE_LOCKED(pp));
2947 		ASSERT(pp->p_szc >= ttesz);
2948 		ASSERT(pp->p_szc == pp1->p_szc);
2949 		ASSERT(sfmmu_mlist_held(pp));
2950 
2951 		/*
2952 		 * XXX is it possible to maintain P_RO on the root only?
2953 		 */
2954 		if (TTE_IS_WRITABLE(ttep) && PP_ISRO(pp)) {
2955 			pmtx = sfmmu_page_enter(pp);
2956 			PP_CLRRO(pp);
2957 			sfmmu_page_exit(pmtx);
2958 		} else if (!PP_ISMAPPED(pp) && !TTE_IS_WRITABLE(ttep) &&
2959 		    !PP_ISMOD(pp)) {
2960 			pmtx = sfmmu_page_enter(pp);
2961 			if (!(PP_ISMOD(pp))) {
2962 				PP_SETRO(pp);
2963 			}
2964 			sfmmu_page_exit(pmtx);
2965 		}
2966 
2967 		/*
2968 		 * If this is a remap we skip vac & contiguity checks.
2969 		 */
2970 		if (remap)
2971 			continue;
2972 
2973 		/*
2974 		 * set p_vcolor and detect any vac conflicts.
2975 		 */
2976 		if (vac_err == 0) {
2977 			vac_err = sfmmu_vacconflict_array(addr, pp, &cflags);
2978 
2979 		}
2980 
2981 		/*
2982 		 * Save current index in case we need to undo it.
2983 		 * Note: "PAGESZ_TO_INDEX(sz)	(1 << (sz))"
2984 		 *	"SFMMU_INDEX_SHIFT	6"
2985 		 *	 "SFMMU_INDEX_MASK	((1 << SFMMU_INDEX_SHIFT) - 1)"
2986 		 *	 "PP_MAPINDEX(p_index)	(p_index & SFMMU_INDEX_MASK)"
2987 		 *
2988 		 * So:	index = PAGESZ_TO_INDEX(ttesz);
2989 		 *	if ttesz == 1 then index = 0x2
2990 		 *		    2 then index = 0x4
2991 		 *		    3 then index = 0x8
2992 		 *		    4 then index = 0x10
2993 		 *		    5 then index = 0x20
2994 		 * The code below checks if it's a new pagesize (ie, newidx)
2995 		 * in case we need to take it back out of p_index,
2996 		 * and then or's the new index into the existing index.
2997 		 */
2998 		if ((PP_MAPINDEX(pp) & index) == 0)
2999 			newidx = 1;
3000 		pp->p_index = (PP_MAPINDEX(pp) | index);
3001 
3002 		/*
3003 		 * contiguity check
3004 		 */
3005 		if (pp->p_pagenum != pfnum) {
3006 			/*
3007 			 * If we fail the contiguity test then
3008 			 * the only thing we need to fix is the p_index field.
3009 			 * We might get a few extra flushes but since this
3010 			 * path is rare that is ok.  The p_ro field will
3011 			 * get automatically fixed on the next tteload to
3012 			 * the page.  NO TNC bit is set yet.
3013 			 */
3014 			while (i >= 0) {
3015 				pp = *pps;
3016 				if (newidx)
3017 					pp->p_index = (PP_MAPINDEX(pp) &
3018 					    ~index);
3019 				pps--;
3020 				i--;
3021 			}
3022 			return (1);
3023 		}
3024 		pfnum++;
3025 		addr += MMU_PAGESIZE;
3026 	}
3027 
3028 	if (vac_err) {
3029 		if (ttesz > osz) {
3030 			/*
3031 			 * There are some smaller mappings that causes vac
3032 			 * conflicts. Convert all existing small mappings to
3033 			 * TNC.
3034 			 */
3035 			SFMMU_STAT_ADD(sf_uncache_conflict, npgs);
3036 			sfmmu_page_cache_array(pp1, HAT_TMPNC, CACHE_FLUSH,
3037 				npgs);
3038 		} else {
3039 			/* EMPTY */
3040 			/*
3041 			 * If there exists an big page mapping,
3042 			 * that means the whole existing big page
3043 			 * has TNC setting already. No need to covert to
3044 			 * TNC again.
3045 			 */
3046 			ASSERT(PP_ISTNC(pp1));
3047 		}
3048 	}
3049 
3050 	return (0);
3051 }
3052 
3053 /*
3054  * Routine that detects vac consistency for a large page. It also
3055  * sets virtual color for all pp's for this big mapping.
3056  */
3057 static int
3058 sfmmu_vacconflict_array(caddr_t addr, page_t *pp, int *cflags)
3059 {
3060 	int vcolor, ocolor;
3061 
3062 	ASSERT(sfmmu_mlist_held(pp));
3063 
3064 	if (PP_ISNC(pp)) {
3065 		return (HAT_TMPNC);
3066 	}
3067 
3068 	vcolor = addr_to_vcolor(addr);
3069 	if (PP_NEWPAGE(pp)) {
3070 		PP_SET_VCOLOR(pp, vcolor);
3071 		return (0);
3072 	}
3073 
3074 	ocolor = PP_GET_VCOLOR(pp);
3075 	if (ocolor == vcolor) {
3076 		return (0);
3077 	}
3078 
3079 	if (!PP_ISMAPPED(pp)) {
3080 		/*
3081 		 * Previous user of page had a differnet color
3082 		 * but since there are no current users
3083 		 * we just flush the cache and change the color.
3084 		 * As an optimization for large pages we flush the
3085 		 * entire cache of that color and set a flag.
3086 		 */
3087 		SFMMU_STAT(sf_pgcolor_conflict);
3088 		if (!CacheColor_IsFlushed(*cflags, ocolor)) {
3089 			CacheColor_SetFlushed(*cflags, ocolor);
3090 			sfmmu_cache_flushcolor(ocolor, pp->p_pagenum);
3091 		}
3092 		PP_SET_VCOLOR(pp, vcolor);
3093 		return (0);
3094 	}
3095 
3096 	/*
3097 	 * We got a real conflict with a current mapping.
3098 	 * set flags to start unencaching all mappings
3099 	 * and return failure so we restart looping
3100 	 * the pp array from the beginning.
3101 	 */
3102 	return (HAT_TMPNC);
3103 }
3104 
3105 /*
3106  * creates a large page shadow hmeblk for a tte.
3107  * The purpose of this routine is to allow us to do quick unloads because
3108  * the vm layer can easily pass a very large but sparsely populated range.
3109  */
3110 static struct hme_blk *
3111 sfmmu_shadow_hcreate(sfmmu_t *sfmmup, caddr_t vaddr, int ttesz, uint_t flags)
3112 {
3113 	struct hmehash_bucket *hmebp;
3114 	hmeblk_tag hblktag;
3115 	int hmeshift, size, vshift;
3116 	uint_t shw_mask, newshw_mask;
3117 	struct hme_blk *hmeblkp;
3118 
3119 	ASSERT(sfmmup != KHATID);
3120 	if (mmu_page_sizes == max_mmu_page_sizes) {
3121 		ASSERT(ttesz < TTE256M);
3122 	} else {
3123 		ASSERT(ttesz < TTE4M);
3124 		ASSERT(sfmmup->sfmmu_ttecnt[TTE32M] == 0);
3125 		ASSERT(sfmmup->sfmmu_ttecnt[TTE256M] == 0);
3126 	}
3127 
3128 	if (ttesz == TTE8K) {
3129 		size = TTE512K;
3130 	} else {
3131 		size = ++ttesz;
3132 	}
3133 
3134 	hblktag.htag_id = sfmmup;
3135 	hmeshift = HME_HASH_SHIFT(size);
3136 	hblktag.htag_bspage = HME_HASH_BSPAGE(vaddr, hmeshift);
3137 	hblktag.htag_rehash = HME_HASH_REHASH(size);
3138 	hmebp = HME_HASH_FUNCTION(sfmmup, vaddr, hmeshift);
3139 
3140 	SFMMU_HASH_LOCK(hmebp);
3141 
3142 	HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp);
3143 	ASSERT(hmeblkp != (struct hme_blk *)hblk_reserve);
3144 	if (hmeblkp == NULL) {
3145 		hmeblkp = sfmmu_hblk_alloc(sfmmup, vaddr, hmebp, size,
3146 			hblktag, flags);
3147 	}
3148 	ASSERT(hmeblkp);
3149 	if (!hmeblkp->hblk_shw_mask) {
3150 		/*
3151 		 * if this is a unused hblk it was just allocated or could
3152 		 * potentially be a previous large page hblk so we need to
3153 		 * set the shadow bit.
3154 		 */
3155 		hmeblkp->hblk_shw_bit = 1;
3156 	}
3157 	ASSERT(hmeblkp->hblk_shw_bit == 1);
3158 	vshift = vaddr_to_vshift(hblktag, vaddr, size);
3159 	ASSERT(vshift < 8);
3160 	/*
3161 	 * Atomically set shw mask bit
3162 	 */
3163 	do {
3164 		shw_mask = hmeblkp->hblk_shw_mask;
3165 		newshw_mask = shw_mask | (1 << vshift);
3166 		newshw_mask = cas32(&hmeblkp->hblk_shw_mask, shw_mask,
3167 		    newshw_mask);
3168 	} while (newshw_mask != shw_mask);
3169 
3170 	SFMMU_HASH_UNLOCK(hmebp);
3171 
3172 	return (hmeblkp);
3173 }
3174 
3175 /*
3176  * This routine cleanup a previous shadow hmeblk and changes it to
3177  * a regular hblk.  This happens rarely but it is possible
3178  * when a process wants to use large pages and there are hblks still
3179  * lying around from the previous as that used these hmeblks.
3180  * The alternative was to cleanup the shadow hblks at unload time
3181  * but since so few user processes actually use large pages, it is
3182  * better to be lazy and cleanup at this time.
3183  */
3184 static void
3185 sfmmu_shadow_hcleanup(sfmmu_t *sfmmup, struct hme_blk *hmeblkp,
3186 	struct hmehash_bucket *hmebp)
3187 {
3188 	caddr_t addr, endaddr;
3189 	int hashno, size;
3190 
3191 	ASSERT(hmeblkp->hblk_shw_bit);
3192 
3193 	ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp));
3194 
3195 	if (!hmeblkp->hblk_shw_mask) {
3196 		hmeblkp->hblk_shw_bit = 0;
3197 		return;
3198 	}
3199 	addr = (caddr_t)get_hblk_base(hmeblkp);
3200 	endaddr = get_hblk_endaddr(hmeblkp);
3201 	size = get_hblk_ttesz(hmeblkp);
3202 	hashno = size - 1;
3203 	ASSERT(hashno > 0);
3204 	SFMMU_HASH_UNLOCK(hmebp);
3205 
3206 	sfmmu_free_hblks(sfmmup, addr, endaddr, hashno);
3207 
3208 	SFMMU_HASH_LOCK(hmebp);
3209 }
3210 
3211 static void
3212 sfmmu_free_hblks(sfmmu_t *sfmmup, caddr_t addr, caddr_t endaddr,
3213 	int hashno)
3214 {
3215 	int hmeshift, shadow = 0;
3216 	hmeblk_tag hblktag;
3217 	struct hmehash_bucket *hmebp;
3218 	struct hme_blk *hmeblkp;
3219 	struct hme_blk *nx_hblk, *pr_hblk, *list = NULL;
3220 	uint64_t hblkpa, prevpa, nx_pa;
3221 
3222 	ASSERT(hashno > 0);
3223 	hblktag.htag_id = sfmmup;
3224 	hblktag.htag_rehash = hashno;
3225 
3226 	hmeshift = HME_HASH_SHIFT(hashno);
3227 
3228 	while (addr < endaddr) {
3229 		hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift);
3230 		hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift);
3231 		SFMMU_HASH_LOCK(hmebp);
3232 		/* inline HME_HASH_SEARCH */
3233 		hmeblkp = hmebp->hmeblkp;
3234 		hblkpa = hmebp->hmeh_nextpa;
3235 		prevpa = 0;
3236 		pr_hblk = NULL;
3237 		while (hmeblkp) {
3238 			ASSERT(hblkpa == va_to_pa((caddr_t)hmeblkp));
3239 			if (HTAGS_EQ(hmeblkp->hblk_tag, hblktag)) {
3240 				/* found hme_blk */
3241 				if (hmeblkp->hblk_shw_bit) {
3242 					if (hmeblkp->hblk_shw_mask) {
3243 						shadow = 1;
3244 						sfmmu_shadow_hcleanup(sfmmup,
3245 						    hmeblkp, hmebp);
3246 						break;
3247 					} else {
3248 						hmeblkp->hblk_shw_bit = 0;
3249 					}
3250 				}
3251 
3252 				/*
3253 				 * Hblk_hmecnt and hblk_vcnt could be non zero
3254 				 * since hblk_unload() does not gurantee that.
3255 				 *
3256 				 * XXX - this could cause tteload() to spin
3257 				 * where sfmmu_shadow_hcleanup() is called.
3258 				 */
3259 			}
3260 
3261 			nx_hblk = hmeblkp->hblk_next;
3262 			nx_pa = hmeblkp->hblk_nextpa;
3263 			if (!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) {
3264 				sfmmu_hblk_hash_rm(hmebp, hmeblkp, prevpa,
3265 					pr_hblk);
3266 				sfmmu_hblk_free(hmebp, hmeblkp, hblkpa, &list);
3267 			} else {
3268 				pr_hblk = hmeblkp;
3269 				prevpa = hblkpa;
3270 			}
3271 			hmeblkp = nx_hblk;
3272 			hblkpa = nx_pa;
3273 		}
3274 
3275 		SFMMU_HASH_UNLOCK(hmebp);
3276 
3277 		if (shadow) {
3278 			/*
3279 			 * We found another shadow hblk so cleaned its
3280 			 * children.  We need to go back and cleanup
3281 			 * the original hblk so we don't change the
3282 			 * addr.
3283 			 */
3284 			shadow = 0;
3285 		} else {
3286 			addr = (caddr_t)roundup((uintptr_t)addr + 1,
3287 				(1 << hmeshift));
3288 		}
3289 	}
3290 	sfmmu_hblks_list_purge(&list);
3291 }
3292 
3293 /*
3294  * Release one hardware address translation lock on the given address range.
3295  */
3296 void
3297 hat_unlock(struct hat *sfmmup, caddr_t addr, size_t len)
3298 {
3299 	struct hmehash_bucket *hmebp;
3300 	hmeblk_tag hblktag;
3301 	int hmeshift, hashno = 1;
3302 	struct hme_blk *hmeblkp, *list = NULL;
3303 	caddr_t endaddr;
3304 
3305 	ASSERT(sfmmup != NULL);
3306 	ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
3307 
3308 	ASSERT((sfmmup == ksfmmup) ||
3309 		AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock));
3310 	ASSERT((len & MMU_PAGEOFFSET) == 0);
3311 	endaddr = addr + len;
3312 	hblktag.htag_id = sfmmup;
3313 
3314 	/*
3315 	 * Spitfire supports 4 page sizes.
3316 	 * Most pages are expected to be of the smallest page size (8K) and
3317 	 * these will not need to be rehashed. 64K pages also don't need to be
3318 	 * rehashed because an hmeblk spans 64K of address space. 512K pages
3319 	 * might need 1 rehash and and 4M pages might need 2 rehashes.
3320 	 */
3321 	while (addr < endaddr) {
3322 		hmeshift = HME_HASH_SHIFT(hashno);
3323 		hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift);
3324 		hblktag.htag_rehash = hashno;
3325 		hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift);
3326 
3327 		SFMMU_HASH_LOCK(hmebp);
3328 
3329 		HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list);
3330 		if (hmeblkp != NULL) {
3331 			/*
3332 			 * If we encounter a shadow hmeblk then
3333 			 * we know there are no valid hmeblks mapping
3334 			 * this address at this size or larger.
3335 			 * Just increment address by the smallest
3336 			 * page size.
3337 			 */
3338 			if (hmeblkp->hblk_shw_bit) {
3339 				addr += MMU_PAGESIZE;
3340 			} else {
3341 				addr = sfmmu_hblk_unlock(hmeblkp, addr,
3342 				    endaddr);
3343 			}
3344 			SFMMU_HASH_UNLOCK(hmebp);
3345 			hashno = 1;
3346 			continue;
3347 		}
3348 		SFMMU_HASH_UNLOCK(hmebp);
3349 
3350 		if (!HME_REHASH(sfmmup) || (hashno >= mmu_hashcnt)) {
3351 			/*
3352 			 * We have traversed the whole list and rehashed
3353 			 * if necessary without finding the address to unlock
3354 			 * which should never happen.
3355 			 */
3356 			panic("sfmmu_unlock: addr not found. "
3357 			    "addr %p hat %p", (void *)addr, (void *)sfmmup);
3358 		} else {
3359 			hashno++;
3360 		}
3361 	}
3362 
3363 	sfmmu_hblks_list_purge(&list);
3364 }
3365 
3366 /*
3367  * Function to unlock a range of addresses in an hmeblk.  It returns the
3368  * next address that needs to be unlocked.
3369  * Should be called with the hash lock held.
3370  */
3371 static caddr_t
3372 sfmmu_hblk_unlock(struct hme_blk *hmeblkp, caddr_t addr, caddr_t endaddr)
3373 {
3374 	struct sf_hment *sfhme;
3375 	tte_t tteold, ttemod;
3376 	int ttesz, ret;
3377 
3378 	ASSERT(in_hblk_range(hmeblkp, addr));
3379 	ASSERT(hmeblkp->hblk_shw_bit == 0);
3380 
3381 	endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp));
3382 	ttesz = get_hblk_ttesz(hmeblkp);
3383 
3384 	HBLKTOHME(sfhme, hmeblkp, addr);
3385 	while (addr < endaddr) {
3386 readtte:
3387 		sfmmu_copytte(&sfhme->hme_tte, &tteold);
3388 		if (TTE_IS_VALID(&tteold)) {
3389 
3390 			ttemod = tteold;
3391 
3392 			ret = sfmmu_modifytte_try(&tteold, &ttemod,
3393 			    &sfhme->hme_tte);
3394 
3395 			if (ret < 0)
3396 				goto readtte;
3397 
3398 			if (hmeblkp->hblk_lckcnt == 0)
3399 				panic("zero hblk lckcnt");
3400 
3401 			if (((uintptr_t)addr + TTEBYTES(ttesz)) >
3402 			    (uintptr_t)endaddr)
3403 				panic("can't unlock large tte");
3404 
3405 			ASSERT(hmeblkp->hblk_lckcnt > 0);
3406 			atomic_add_16(&hmeblkp->hblk_lckcnt, -1);
3407 			HBLK_STACK_TRACE(hmeblkp, HBLK_UNLOCK);
3408 		} else {
3409 			panic("sfmmu_hblk_unlock: invalid tte");
3410 		}
3411 		addr += TTEBYTES(ttesz);
3412 		sfhme++;
3413 	}
3414 	return (addr);
3415 }
3416 
3417 /*
3418  * Physical Address Mapping Framework
3419  *
3420  * General rules:
3421  *
3422  * (1) Applies only to seg_kmem memory pages. To make things easier,
3423  *     seg_kpm addresses are also accepted by the routines, but nothing
3424  *     is done with them since by definition their PA mappings are static.
3425  * (2) hat_add_callback() may only be called while holding the page lock
3426  *     SE_SHARED or SE_EXCL of the underlying page (e.g., as_pagelock()),
3427  *     or passing HAC_PAGELOCK flag.
3428  * (3) prehandler() and posthandler() may not call hat_add_callback() or
3429  *     hat_delete_callback(), nor should they allocate memory. Post quiesce
3430  *     callbacks may not sleep or acquire adaptive mutex locks.
3431  * (4) Either prehandler() or posthandler() (but not both) may be specified
3432  *     as being NULL.  Specifying an errhandler() is optional.
3433  *
3434  * Details of using the framework:
3435  *
3436  * registering a callback (hat_register_callback())
3437  *
3438  *	Pass prehandler, posthandler, errhandler addresses
3439  *	as described below. If capture_cpus argument is nonzero,
3440  *	suspend callback to the prehandler will occur with CPUs
3441  *	captured and executing xc_loop() and CPUs will remain
3442  *	captured until after the posthandler suspend callback
3443  *	occurs.
3444  *
3445  * adding a callback (hat_add_callback())
3446  *
3447  *      as_pagelock();
3448  *	hat_add_callback();
3449  *      save returned pfn in private data structures or program registers;
3450  *      as_pageunlock();
3451  *
3452  * prehandler()
3453  *
3454  *	Stop all accesses by physical address to this memory page.
3455  *	Called twice: the first, PRESUSPEND, is a context safe to acquire
3456  *	adaptive locks. The second, SUSPEND, is called at high PIL with
3457  *	CPUs captured so adaptive locks may NOT be acquired (and all spin
3458  *	locks must be XCALL_PIL or higher locks).
3459  *
3460  *	May return the following errors:
3461  *		EIO:	A fatal error has occurred. This will result in panic.
3462  *		EAGAIN:	The page cannot be suspended. This will fail the
3463  *			relocation.
3464  *		0:	Success.
3465  *
3466  * posthandler()
3467  *
3468  *      Save new pfn in private data structures or program registers;
3469  *	not allowed to fail (non-zero return values will result in panic).
3470  *
3471  * errhandler()
3472  *
3473  *	called when an error occurs related to the callback.  Currently
3474  *	the only such error is HAT_CB_ERR_LEAKED which indicates that
3475  *	a page is being freed, but there are still outstanding callback(s)
3476  *	registered on the page.
3477  *
3478  * removing a callback (hat_delete_callback(); e.g., prior to freeing memory)
3479  *
3480  *	stop using physical address
3481  *	hat_delete_callback();
3482  *
3483  */
3484 
3485 /*
3486  * Register a callback class.  Each subsystem should do this once and
3487  * cache the id_t returned for use in setting up and tearing down callbacks.
3488  *
3489  * There is no facility for removing callback IDs once they are created;
3490  * the "key" should be unique for each module, so in case a module is unloaded
3491  * and subsequently re-loaded, we can recycle the module's previous entry.
3492  */
3493 id_t
3494 hat_register_callback(int key,
3495 	int (*prehandler)(caddr_t, uint_t, uint_t, void *),
3496 	int (*posthandler)(caddr_t, uint_t, uint_t, void *, pfn_t),
3497 	int (*errhandler)(caddr_t, uint_t, uint_t, void *),
3498 	int capture_cpus)
3499 {
3500 	id_t id;
3501 
3502 	/*
3503 	 * Search the table for a pre-existing callback associated with
3504 	 * the identifier "key".  If one exists, we re-use that entry in
3505 	 * the table for this instance, otherwise we assign the next
3506 	 * available table slot.
3507 	 */
3508 	for (id = 0; id < sfmmu_max_cb_id; id++) {
3509 		if (sfmmu_cb_table[id].key == key)
3510 			break;
3511 	}
3512 
3513 	if (id == sfmmu_max_cb_id) {
3514 		id = sfmmu_cb_nextid++;
3515 		if (id >= sfmmu_max_cb_id)
3516 			panic("hat_register_callback: out of callback IDs");
3517 	}
3518 
3519 	ASSERT(prehandler != NULL || posthandler != NULL);
3520 
3521 	sfmmu_cb_table[id].key = key;
3522 	sfmmu_cb_table[id].prehandler = prehandler;
3523 	sfmmu_cb_table[id].posthandler = posthandler;
3524 	sfmmu_cb_table[id].errhandler = errhandler;
3525 	sfmmu_cb_table[id].capture_cpus = capture_cpus;
3526 
3527 	return (id);
3528 }
3529 
3530 #define	HAC_COOKIE_NONE	(void *)-1
3531 
3532 /*
3533  * Add relocation callbacks to the specified addr/len which will be called
3534  * when relocating the associated page. See the description of pre and
3535  * posthandler above for more details.
3536  *
3537  * If HAC_PAGELOCK is included in flags, the underlying memory page is
3538  * locked internally so the caller must be able to deal with the callback
3539  * running even before this function has returned.  If HAC_PAGELOCK is not
3540  * set, it is assumed that the underlying memory pages are locked.
3541  *
3542  * Since the caller must track the individual page boundaries anyway,
3543  * we only allow a callback to be added to a single page (large
3544  * or small).  Thus [addr, addr + len) MUST be contained within a single
3545  * page.
3546  *
3547  * Registering multiple callbacks on the same [addr, addr+len) is supported,
3548  * _provided_that_ a unique parameter is specified for each callback.
3549  * If multiple callbacks are registered on the same range the callback will
3550  * be invoked with each unique parameter. Registering the same callback with
3551  * the same argument more than once will result in corrupted kernel state.
3552  *
3553  * Returns the pfn of the underlying kernel page in *rpfn
3554  * on success, or PFN_INVALID on failure.
3555  *
3556  * cookiep (if passed) provides storage space for an opaque cookie
3557  * to return later to hat_delete_callback(). This cookie makes the callback
3558  * deletion significantly quicker by avoiding a potentially lengthy hash
3559  * search.
3560  *
3561  * Returns values:
3562  *    0:      success
3563  *    ENOMEM: memory allocation failure (e.g. flags was passed as HAC_NOSLEEP)
3564  *    EINVAL: callback ID is not valid
3565  *    ENXIO:  ["vaddr", "vaddr" + len) is not mapped in the kernel's address
3566  *            space
3567  *    ERANGE: ["vaddr", "vaddr" + len) crosses a page boundary
3568  */
3569 int
3570 hat_add_callback(id_t callback_id, caddr_t vaddr, uint_t len, uint_t flags,
3571 	void *pvt, pfn_t *rpfn, void **cookiep)
3572 {
3573 	struct 		hmehash_bucket *hmebp;
3574 	hmeblk_tag 	hblktag;
3575 	struct hme_blk	*hmeblkp;
3576 	int 		hmeshift, hashno;
3577 	caddr_t 	saddr, eaddr, baseaddr;
3578 	struct pa_hment *pahmep;
3579 	struct sf_hment *sfhmep, *osfhmep;
3580 	kmutex_t	*pml;
3581 	tte_t   	tte;
3582 	page_t		*pp;
3583 	vnode_t		*vp;
3584 	u_offset_t	off;
3585 	pfn_t		pfn;
3586 	int		kmflags = (flags & HAC_SLEEP)? KM_SLEEP : KM_NOSLEEP;
3587 	int		locked = 0;
3588 
3589 	/*
3590 	 * For KPM mappings, just return the physical address since we
3591 	 * don't need to register any callbacks.
3592 	 */
3593 	if (IS_KPM_ADDR(vaddr)) {
3594 		uint64_t paddr;
3595 		SFMMU_KPM_VTOP(vaddr, paddr);
3596 		*rpfn = btop(paddr);
3597 		if (cookiep != NULL)
3598 			*cookiep = HAC_COOKIE_NONE;
3599 		return (0);
3600 	}
3601 
3602 	if (callback_id < (id_t)0 || callback_id >= sfmmu_cb_nextid) {
3603 		*rpfn = PFN_INVALID;
3604 		return (EINVAL);
3605 	}
3606 
3607 	if ((pahmep = kmem_cache_alloc(pa_hment_cache, kmflags)) == NULL) {
3608 		*rpfn = PFN_INVALID;
3609 		return (ENOMEM);
3610 	}
3611 
3612 	sfhmep = &pahmep->sfment;
3613 
3614 	saddr = (caddr_t)((uintptr_t)vaddr & MMU_PAGEMASK);
3615 	eaddr = saddr + len;
3616 
3617 rehash:
3618 	/* Find the mapping(s) for this page */
3619 	for (hashno = TTE64K, hmeblkp = NULL;
3620 	    hmeblkp == NULL && hashno <= mmu_hashcnt;
3621 	    hashno++) {
3622 		hmeshift = HME_HASH_SHIFT(hashno);
3623 		hblktag.htag_id = ksfmmup;
3624 		hblktag.htag_bspage = HME_HASH_BSPAGE(saddr, hmeshift);
3625 		hblktag.htag_rehash = hashno;
3626 		hmebp = HME_HASH_FUNCTION(ksfmmup, saddr, hmeshift);
3627 
3628 		SFMMU_HASH_LOCK(hmebp);
3629 
3630 		HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp);
3631 
3632 		if (hmeblkp == NULL)
3633 			SFMMU_HASH_UNLOCK(hmebp);
3634 	}
3635 
3636 	if (hmeblkp == NULL) {
3637 		kmem_cache_free(pa_hment_cache, pahmep);
3638 		*rpfn = PFN_INVALID;
3639 		return (ENXIO);
3640 	}
3641 
3642 	HBLKTOHME(osfhmep, hmeblkp, saddr);
3643 	sfmmu_copytte(&osfhmep->hme_tte, &tte);
3644 
3645 	if (!TTE_IS_VALID(&tte)) {
3646 		SFMMU_HASH_UNLOCK(hmebp);
3647 		kmem_cache_free(pa_hment_cache, pahmep);
3648 		*rpfn = PFN_INVALID;
3649 		return (ENXIO);
3650 	}
3651 
3652 	/*
3653 	 * Make sure the boundaries for the callback fall within this
3654 	 * single mapping.
3655 	 */
3656 	baseaddr = (caddr_t)get_hblk_base(hmeblkp);
3657 	ASSERT(saddr >= baseaddr);
3658 	if (eaddr > saddr + TTEBYTES(TTE_CSZ(&tte))) {
3659 		SFMMU_HASH_UNLOCK(hmebp);
3660 		kmem_cache_free(pa_hment_cache, pahmep);
3661 		*rpfn = PFN_INVALID;
3662 		return (ERANGE);
3663 	}
3664 
3665 	pfn = sfmmu_ttetopfn(&tte, vaddr);
3666 
3667 	/*
3668 	 * The pfn may not have a page_t underneath in which case we
3669 	 * just return it. This can happen if we are doing I/O to a
3670 	 * static portion of the kernel's address space, for instance.
3671 	 */
3672 	pp = osfhmep->hme_page;
3673 	if (pp == NULL) {
3674 		SFMMU_HASH_UNLOCK(hmebp);
3675 		kmem_cache_free(pa_hment_cache, pahmep);
3676 		*rpfn = pfn;
3677 		if (cookiep)
3678 			*cookiep = HAC_COOKIE_NONE;
3679 		return (0);
3680 	}
3681 	ASSERT(pp == PP_PAGEROOT(pp));
3682 
3683 	vp = pp->p_vnode;
3684 	off = pp->p_offset;
3685 
3686 	pml = sfmmu_mlist_enter(pp);
3687 
3688 	if (flags & HAC_PAGELOCK) {
3689 		if (!page_trylock(pp, SE_SHARED)) {
3690 			/*
3691 			 * Somebody is holding SE_EXCL lock. Might
3692 			 * even be hat_page_relocate(). Drop all
3693 			 * our locks, lookup the page in &kvp, and
3694 			 * retry. If it doesn't exist in &kvp, then
3695 			 * we must be dealing with a kernel mapped
3696 			 * page which doesn't actually belong to
3697 			 * segkmem so we punt.
3698 			 */
3699 			sfmmu_mlist_exit(pml);
3700 			SFMMU_HASH_UNLOCK(hmebp);
3701 			pp = page_lookup(&kvp, (u_offset_t)saddr, SE_SHARED);
3702 			if (pp == NULL) {
3703 				kmem_cache_free(pa_hment_cache, pahmep);
3704 				*rpfn = pfn;
3705 				if (cookiep)
3706 					*cookiep = HAC_COOKIE_NONE;
3707 				return (0);
3708 			}
3709 			page_unlock(pp);
3710 			goto rehash;
3711 		}
3712 		locked = 1;
3713 	}
3714 
3715 	if (!PAGE_LOCKED(pp) && !panicstr)
3716 		panic("hat_add_callback: page 0x%p not locked", pp);
3717 
3718 	if (osfhmep->hme_page != pp || pp->p_vnode != vp ||
3719 	    pp->p_offset != off) {
3720 		/*
3721 		 * The page moved before we got our hands on it.  Drop
3722 		 * all the locks and try again.
3723 		 */
3724 		ASSERT((flags & HAC_PAGELOCK) != 0);
3725 		sfmmu_mlist_exit(pml);
3726 		SFMMU_HASH_UNLOCK(hmebp);
3727 		page_unlock(pp);
3728 		locked = 0;
3729 		goto rehash;
3730 	}
3731 
3732 	if (vp != &kvp) {
3733 		/*
3734 		 * This is not a segkmem page but another page which
3735 		 * has been kernel mapped. It had better have at least
3736 		 * a share lock on it. Return the pfn.
3737 		 */
3738 		sfmmu_mlist_exit(pml);
3739 		SFMMU_HASH_UNLOCK(hmebp);
3740 		if (locked)
3741 			page_unlock(pp);
3742 		kmem_cache_free(pa_hment_cache, pahmep);
3743 		ASSERT(PAGE_LOCKED(pp));
3744 		*rpfn = pfn;
3745 		if (cookiep)
3746 			*cookiep = HAC_COOKIE_NONE;
3747 		return (0);
3748 	}
3749 
3750 	/*
3751 	 * Setup this pa_hment and link its embedded dummy sf_hment into
3752 	 * the mapping list.
3753 	 */
3754 	pp->p_share++;
3755 	pahmep->cb_id = callback_id;
3756 	pahmep->addr = vaddr;
3757 	pahmep->len = len;
3758 	pahmep->refcnt = 1;
3759 	pahmep->flags = 0;
3760 	pahmep->pvt = pvt;
3761 
3762 	sfhmep->hme_tte.ll = 0;
3763 	sfhmep->hme_data = pahmep;
3764 	sfhmep->hme_prev = osfhmep;
3765 	sfhmep->hme_next = osfhmep->hme_next;
3766 
3767 	if (osfhmep->hme_next)
3768 		osfhmep->hme_next->hme_prev = sfhmep;
3769 
3770 	osfhmep->hme_next = sfhmep;
3771 
3772 	sfmmu_mlist_exit(pml);
3773 	SFMMU_HASH_UNLOCK(hmebp);
3774 
3775 	if (locked)
3776 		page_unlock(pp);
3777 
3778 	*rpfn = pfn;
3779 	if (cookiep)
3780 		*cookiep = (void *)pahmep;
3781 
3782 	return (0);
3783 }
3784 
3785 /*
3786  * Remove the relocation callbacks from the specified addr/len.
3787  */
3788 void
3789 hat_delete_callback(caddr_t vaddr, uint_t len, void *pvt, uint_t flags,
3790 	void *cookie)
3791 {
3792 	struct		hmehash_bucket *hmebp;
3793 	hmeblk_tag	hblktag;
3794 	struct hme_blk	*hmeblkp;
3795 	int		hmeshift, hashno;
3796 	caddr_t		saddr;
3797 	struct pa_hment	*pahmep;
3798 	struct sf_hment	*sfhmep, *osfhmep;
3799 	kmutex_t	*pml;
3800 	tte_t		tte;
3801 	page_t		*pp;
3802 	vnode_t		*vp;
3803 	u_offset_t	off;
3804 	int		locked = 0;
3805 
3806 	/*
3807 	 * If the cookie is HAC_COOKIE_NONE then there is no pa_hment to
3808 	 * remove so just return.
3809 	 */
3810 	if (cookie == HAC_COOKIE_NONE || IS_KPM_ADDR(vaddr))
3811 		return;
3812 
3813 	saddr = (caddr_t)((uintptr_t)vaddr & MMU_PAGEMASK);
3814 
3815 rehash:
3816 	/* Find the mapping(s) for this page */
3817 	for (hashno = TTE64K, hmeblkp = NULL;
3818 	    hmeblkp == NULL && hashno <= mmu_hashcnt;
3819 	    hashno++) {
3820 		hmeshift = HME_HASH_SHIFT(hashno);
3821 		hblktag.htag_id = ksfmmup;
3822 		hblktag.htag_bspage = HME_HASH_BSPAGE(saddr, hmeshift);
3823 		hblktag.htag_rehash = hashno;
3824 		hmebp = HME_HASH_FUNCTION(ksfmmup, saddr, hmeshift);
3825 
3826 		SFMMU_HASH_LOCK(hmebp);
3827 
3828 		HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp);
3829 
3830 		if (hmeblkp == NULL)
3831 			SFMMU_HASH_UNLOCK(hmebp);
3832 	}
3833 
3834 	if (hmeblkp == NULL)
3835 		return;
3836 
3837 	HBLKTOHME(osfhmep, hmeblkp, saddr);
3838 
3839 	sfmmu_copytte(&osfhmep->hme_tte, &tte);
3840 	if (!TTE_IS_VALID(&tte)) {
3841 		SFMMU_HASH_UNLOCK(hmebp);
3842 		return;
3843 	}
3844 
3845 	pp = osfhmep->hme_page;
3846 	if (pp == NULL) {
3847 		SFMMU_HASH_UNLOCK(hmebp);
3848 		ASSERT(cookie == NULL);
3849 		return;
3850 	}
3851 
3852 	vp = pp->p_vnode;
3853 	off = pp->p_offset;
3854 
3855 	pml = sfmmu_mlist_enter(pp);
3856 
3857 	if (flags & HAC_PAGELOCK) {
3858 		if (!page_trylock(pp, SE_SHARED)) {
3859 			/*
3860 			 * Somebody is holding SE_EXCL lock. Might
3861 			 * even be hat_page_relocate(). Drop all
3862 			 * our locks, lookup the page in &kvp, and
3863 			 * retry. If it doesn't exist in &kvp, then
3864 			 * we must be dealing with a kernel mapped
3865 			 * page which doesn't actually belong to
3866 			 * segkmem so we punt.
3867 			 */
3868 			sfmmu_mlist_exit(pml);
3869 			SFMMU_HASH_UNLOCK(hmebp);
3870 			pp = page_lookup(&kvp, (u_offset_t)saddr, SE_SHARED);
3871 			if (pp == NULL) {
3872 				ASSERT(cookie == NULL);
3873 				return;
3874 			}
3875 			page_unlock(pp);
3876 			goto rehash;
3877 		}
3878 		locked = 1;
3879 	}
3880 
3881 	ASSERT(PAGE_LOCKED(pp));
3882 
3883 	if (osfhmep->hme_page != pp || pp->p_vnode != vp ||
3884 	    pp->p_offset != off) {
3885 		/*
3886 		 * The page moved before we got our hands on it.  Drop
3887 		 * all the locks and try again.
3888 		 */
3889 		ASSERT((flags & HAC_PAGELOCK) != 0);
3890 		sfmmu_mlist_exit(pml);
3891 		SFMMU_HASH_UNLOCK(hmebp);
3892 		page_unlock(pp);
3893 		locked = 0;
3894 		goto rehash;
3895 	}
3896 
3897 	if (vp != &kvp) {
3898 		/*
3899 		 * This is not a segkmem page but another page which
3900 		 * has been kernel mapped.
3901 		 */
3902 		sfmmu_mlist_exit(pml);
3903 		SFMMU_HASH_UNLOCK(hmebp);
3904 		if (locked)
3905 			page_unlock(pp);
3906 		ASSERT(cookie == NULL);
3907 		return;
3908 	}
3909 
3910 	if (cookie != NULL) {
3911 		pahmep = (struct pa_hment *)cookie;
3912 		sfhmep = &pahmep->sfment;
3913 	} else {
3914 		for (sfhmep = pp->p_mapping; sfhmep != NULL;
3915 		    sfhmep = sfhmep->hme_next) {
3916 
3917 			/*
3918 			 * skip va<->pa mappings
3919 			 */
3920 			if (!IS_PAHME(sfhmep))
3921 				continue;
3922 
3923 			pahmep = sfhmep->hme_data;
3924 			ASSERT(pahmep != NULL);
3925 
3926 			/*
3927 			 * if pa_hment matches, remove it
3928 			 */
3929 			if ((pahmep->pvt == pvt) &&
3930 			    (pahmep->addr == vaddr) &&
3931 			    (pahmep->len == len)) {
3932 				break;
3933 			}
3934 		}
3935 	}
3936 
3937 	if (sfhmep == NULL) {
3938 		if (!panicstr) {
3939 			panic("hat_delete_callback: pa_hment not found, pp %p",
3940 			    (void *)pp);
3941 		}
3942 		return;
3943 	}
3944 
3945 	/*
3946 	 * Note: at this point a valid kernel mapping must still be
3947 	 * present on this page.
3948 	 */
3949 	pp->p_share--;
3950 	if (pp->p_share <= 0)
3951 		panic("hat_delete_callback: zero p_share");
3952 
3953 	if (--pahmep->refcnt == 0) {
3954 		if (pahmep->flags != 0)
3955 			panic("hat_delete_callback: pa_hment is busy");
3956 
3957 		/*
3958 		 * Remove sfhmep from the mapping list for the page.
3959 		 */
3960 		if (sfhmep->hme_prev) {
3961 			sfhmep->hme_prev->hme_next = sfhmep->hme_next;
3962 		} else {
3963 			pp->p_mapping = sfhmep->hme_next;
3964 		}
3965 
3966 		if (sfhmep->hme_next)
3967 			sfhmep->hme_next->hme_prev = sfhmep->hme_prev;
3968 
3969 		sfmmu_mlist_exit(pml);
3970 		SFMMU_HASH_UNLOCK(hmebp);
3971 
3972 		if (locked)
3973 			page_unlock(pp);
3974 
3975 		kmem_cache_free(pa_hment_cache, pahmep);
3976 		return;
3977 	}
3978 
3979 	sfmmu_mlist_exit(pml);
3980 	SFMMU_HASH_UNLOCK(hmebp);
3981 	if (locked)
3982 		page_unlock(pp);
3983 }
3984 
3985 /*
3986  * hat_probe returns 1 if the translation for the address 'addr' is
3987  * loaded, zero otherwise.
3988  *
3989  * hat_probe should be used only for advisorary purposes because it may
3990  * occasionally return the wrong value. The implementation must guarantee that
3991  * returning the wrong value is a very rare event. hat_probe is used
3992  * to implement optimizations in the segment drivers.
3993  *
3994  */
3995 int
3996 hat_probe(struct hat *sfmmup, caddr_t addr)
3997 {
3998 	pfn_t pfn;
3999 	tte_t tte;
4000 
4001 	ASSERT(sfmmup != NULL);
4002 	ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
4003 
4004 	ASSERT((sfmmup == ksfmmup) ||
4005 		AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock));
4006 
4007 	if (sfmmup == ksfmmup) {
4008 		while ((pfn = sfmmu_vatopfn(addr, sfmmup, &tte))
4009 		    == PFN_SUSPENDED) {
4010 			sfmmu_vatopfn_suspended(addr, sfmmup, &tte);
4011 		}
4012 	} else {
4013 		pfn = sfmmu_uvatopfn(addr, sfmmup);
4014 	}
4015 
4016 	if (pfn != PFN_INVALID)
4017 		return (1);
4018 	else
4019 		return (0);
4020 }
4021 
4022 ssize_t
4023 hat_getpagesize(struct hat *sfmmup, caddr_t addr)
4024 {
4025 	tte_t tte;
4026 
4027 	ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
4028 
4029 	sfmmu_gettte(sfmmup, addr, &tte);
4030 	if (TTE_IS_VALID(&tte)) {
4031 		return (TTEBYTES(TTE_CSZ(&tte)));
4032 	}
4033 	return (-1);
4034 }
4035 
4036 static void
4037 sfmmu_gettte(struct hat *sfmmup, caddr_t addr, tte_t *ttep)
4038 {
4039 	struct hmehash_bucket *hmebp;
4040 	hmeblk_tag hblktag;
4041 	int hmeshift, hashno = 1;
4042 	struct hme_blk *hmeblkp, *list = NULL;
4043 	struct sf_hment *sfhmep;
4044 
4045 	/* support for ISM */
4046 	ism_map_t	*ism_map;
4047 	ism_blk_t	*ism_blkp;
4048 	int		i;
4049 	sfmmu_t		*ism_hatid = NULL;
4050 	sfmmu_t		*locked_hatid = NULL;
4051 
4052 	ASSERT(!((uintptr_t)addr & MMU_PAGEOFFSET));
4053 
4054 	ism_blkp = sfmmup->sfmmu_iblk;
4055 	if (ism_blkp) {
4056 		sfmmu_ismhat_enter(sfmmup, 0);
4057 		locked_hatid = sfmmup;
4058 	}
4059 	while (ism_blkp && ism_hatid == NULL) {
4060 		ism_map = ism_blkp->iblk_maps;
4061 		for (i = 0; ism_map[i].imap_ismhat && i < ISM_MAP_SLOTS; i++) {
4062 			if (addr >= ism_start(ism_map[i]) &&
4063 			    addr < ism_end(ism_map[i])) {
4064 				sfmmup = ism_hatid = ism_map[i].imap_ismhat;
4065 				addr = (caddr_t)(addr -
4066 					ism_start(ism_map[i]));
4067 				break;
4068 			}
4069 		}
4070 		ism_blkp = ism_blkp->iblk_next;
4071 	}
4072 	if (locked_hatid) {
4073 		sfmmu_ismhat_exit(locked_hatid, 0);
4074 	}
4075 
4076 	hblktag.htag_id = sfmmup;
4077 	ttep->ll = 0;
4078 
4079 	do {
4080 		hmeshift = HME_HASH_SHIFT(hashno);
4081 		hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift);
4082 		hblktag.htag_rehash = hashno;
4083 		hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift);
4084 
4085 		SFMMU_HASH_LOCK(hmebp);
4086 
4087 		HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list);
4088 		if (hmeblkp != NULL) {
4089 			HBLKTOHME(sfhmep, hmeblkp, addr);
4090 			sfmmu_copytte(&sfhmep->hme_tte, ttep);
4091 			SFMMU_HASH_UNLOCK(hmebp);
4092 			break;
4093 		}
4094 		SFMMU_HASH_UNLOCK(hmebp);
4095 		hashno++;
4096 	} while (HME_REHASH(sfmmup) && (hashno <= mmu_hashcnt));
4097 
4098 	sfmmu_hblks_list_purge(&list);
4099 }
4100 
4101 uint_t
4102 hat_getattr(struct hat *sfmmup, caddr_t addr, uint_t *attr)
4103 {
4104 	tte_t tte;
4105 
4106 	ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
4107 
4108 	sfmmu_gettte(sfmmup, addr, &tte);
4109 	if (TTE_IS_VALID(&tte)) {
4110 		*attr = sfmmu_ptov_attr(&tte);
4111 		return (0);
4112 	}
4113 	*attr = 0;
4114 	return ((uint_t)0xffffffff);
4115 }
4116 
4117 /*
4118  * Enables more attributes on specified address range (ie. logical OR)
4119  */
4120 void
4121 hat_setattr(struct hat *hat, caddr_t addr, size_t len, uint_t attr)
4122 {
4123 	if (hat->sfmmu_xhat_provider) {
4124 		XHAT_SETATTR(hat, addr, len, attr);
4125 		return;
4126 	} else {
4127 		/*
4128 		 * This must be a CPU HAT. If the address space has
4129 		 * XHATs attached, change attributes for all of them,
4130 		 * just in case
4131 		 */
4132 		ASSERT(hat->sfmmu_as != NULL);
4133 		if (hat->sfmmu_as->a_xhat != NULL)
4134 			xhat_setattr_all(hat->sfmmu_as, addr, len, attr);
4135 	}
4136 
4137 	sfmmu_chgattr(hat, addr, len, attr, SFMMU_SETATTR);
4138 }
4139 
4140 /*
4141  * Assigns attributes to the specified address range.  All the attributes
4142  * are specified.
4143  */
4144 void
4145 hat_chgattr(struct hat *hat, caddr_t addr, size_t len, uint_t attr)
4146 {
4147 	if (hat->sfmmu_xhat_provider) {
4148 		XHAT_CHGATTR(hat, addr, len, attr);
4149 		return;
4150 	} else {
4151 		/*
4152 		 * This must be a CPU HAT. If the address space has
4153 		 * XHATs attached, change attributes for all of them,
4154 		 * just in case
4155 		 */
4156 		ASSERT(hat->sfmmu_as != NULL);
4157 		if (hat->sfmmu_as->a_xhat != NULL)
4158 			xhat_chgattr_all(hat->sfmmu_as, addr, len, attr);
4159 	}
4160 
4161 	sfmmu_chgattr(hat, addr, len, attr, SFMMU_CHGATTR);
4162 }
4163 
4164 /*
4165  * Remove attributes on the specified address range (ie. loginal NAND)
4166  */
4167 void
4168 hat_clrattr(struct hat *hat, caddr_t addr, size_t len, uint_t attr)
4169 {
4170 	if (hat->sfmmu_xhat_provider) {
4171 		XHAT_CLRATTR(hat, addr, len, attr);
4172 		return;
4173 	} else {
4174 		/*
4175 		 * This must be a CPU HAT. If the address space has
4176 		 * XHATs attached, change attributes for all of them,
4177 		 * just in case
4178 		 */
4179 		ASSERT(hat->sfmmu_as != NULL);
4180 		if (hat->sfmmu_as->a_xhat != NULL)
4181 			xhat_clrattr_all(hat->sfmmu_as, addr, len, attr);
4182 	}
4183 
4184 	sfmmu_chgattr(hat, addr, len, attr, SFMMU_CLRATTR);
4185 }
4186 
4187 /*
4188  * Change attributes on an address range to that specified by attr and mode.
4189  */
4190 static void
4191 sfmmu_chgattr(struct hat *sfmmup, caddr_t addr, size_t len, uint_t attr,
4192 	int mode)
4193 {
4194 	struct hmehash_bucket *hmebp;
4195 	hmeblk_tag hblktag;
4196 	int hmeshift, hashno = 1;
4197 	struct hme_blk *hmeblkp, *list = NULL;
4198 	caddr_t endaddr;
4199 	cpuset_t cpuset;
4200 	demap_range_t dmr;
4201 
4202 	CPUSET_ZERO(cpuset);
4203 
4204 	ASSERT((sfmmup == ksfmmup) ||
4205 		AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock));
4206 	ASSERT((len & MMU_PAGEOFFSET) == 0);
4207 	ASSERT(((uintptr_t)addr & MMU_PAGEOFFSET) == 0);
4208 
4209 	if ((attr & PROT_USER) && (mode != SFMMU_CLRATTR) &&
4210 	    ((addr + len) > (caddr_t)USERLIMIT)) {
4211 		panic("user addr %p in kernel space",
4212 		    (void *)addr);
4213 	}
4214 
4215 	endaddr = addr + len;
4216 	hblktag.htag_id = sfmmup;
4217 	DEMAP_RANGE_INIT(sfmmup, &dmr);
4218 
4219 	while (addr < endaddr) {
4220 		hmeshift = HME_HASH_SHIFT(hashno);
4221 		hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift);
4222 		hblktag.htag_rehash = hashno;
4223 		hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift);
4224 
4225 		SFMMU_HASH_LOCK(hmebp);
4226 
4227 		HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list);
4228 		if (hmeblkp != NULL) {
4229 			/*
4230 			 * We've encountered a shadow hmeblk so skip the range
4231 			 * of the next smaller mapping size.
4232 			 */
4233 			if (hmeblkp->hblk_shw_bit) {
4234 				ASSERT(sfmmup != ksfmmup);
4235 				ASSERT(hashno > 1);
4236 				addr = (caddr_t)P2END((uintptr_t)addr,
4237 					    TTEBYTES(hashno - 1));
4238 			} else {
4239 				addr = sfmmu_hblk_chgattr(sfmmup,
4240 				    hmeblkp, addr, endaddr, &dmr, attr, mode);
4241 			}
4242 			SFMMU_HASH_UNLOCK(hmebp);
4243 			hashno = 1;
4244 			continue;
4245 		}
4246 		SFMMU_HASH_UNLOCK(hmebp);
4247 
4248 		if (!HME_REHASH(sfmmup) || (hashno >= mmu_hashcnt)) {
4249 			/*
4250 			 * We have traversed the whole list and rehashed
4251 			 * if necessary without finding the address to chgattr.
4252 			 * This is ok, so we increment the address by the
4253 			 * smallest hmeblk range for kernel mappings or for
4254 			 * user mappings with no large pages, and the largest
4255 			 * hmeblk range, to account for shadow hmeblks, for
4256 			 * user mappings with large pages and continue.
4257 			 */
4258 			if (sfmmup == ksfmmup)
4259 				addr = (caddr_t)P2END((uintptr_t)addr,
4260 					    TTEBYTES(1));
4261 			else
4262 				addr = (caddr_t)P2END((uintptr_t)addr,
4263 					    TTEBYTES(hashno));
4264 			hashno = 1;
4265 		} else {
4266 			hashno++;
4267 		}
4268 	}
4269 
4270 	sfmmu_hblks_list_purge(&list);
4271 	DEMAP_RANGE_FLUSH(&dmr);
4272 	cpuset = sfmmup->sfmmu_cpusran;
4273 	xt_sync(cpuset);
4274 }
4275 
4276 /*
4277  * This function chgattr on a range of addresses in an hmeblk.  It returns the
4278  * next addres that needs to be chgattr.
4279  * It should be called with the hash lock held.
4280  * XXX It should be possible to optimize chgattr by not flushing every time but
4281  * on the other hand:
4282  * 1. do one flush crosscall.
4283  * 2. only flush if we are increasing permissions (make sure this will work)
4284  */
4285 static caddr_t
4286 sfmmu_hblk_chgattr(struct hat *sfmmup, struct hme_blk *hmeblkp, caddr_t addr,
4287 	caddr_t endaddr, demap_range_t *dmrp, uint_t attr, int mode)
4288 {
4289 	tte_t tte, tteattr, tteflags, ttemod;
4290 	struct sf_hment *sfhmep;
4291 	int ttesz;
4292 	struct page *pp = NULL;
4293 	kmutex_t *pml, *pmtx;
4294 	int ret;
4295 	int use_demap_range;
4296 #if defined(SF_ERRATA_57)
4297 	int check_exec;
4298 #endif
4299 
4300 	ASSERT(in_hblk_range(hmeblkp, addr));
4301 	ASSERT(hmeblkp->hblk_shw_bit == 0);
4302 
4303 	endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp));
4304 	ttesz = get_hblk_ttesz(hmeblkp);
4305 
4306 	/*
4307 	 * Flush the current demap region if addresses have been
4308 	 * skipped or the page size doesn't match.
4309 	 */
4310 	use_demap_range = (TTEBYTES(ttesz) == DEMAP_RANGE_PGSZ(dmrp));
4311 	if (use_demap_range) {
4312 		DEMAP_RANGE_CONTINUE(dmrp, addr, endaddr);
4313 	} else {
4314 		DEMAP_RANGE_FLUSH(dmrp);
4315 	}
4316 
4317 	tteattr.ll = sfmmu_vtop_attr(attr, mode, &tteflags);
4318 #if defined(SF_ERRATA_57)
4319 	check_exec = (sfmmup != ksfmmup) &&
4320 	    AS_TYPE_64BIT(sfmmup->sfmmu_as) &&
4321 	    TTE_IS_EXECUTABLE(&tteattr);
4322 #endif
4323 	HBLKTOHME(sfhmep, hmeblkp, addr);
4324 	while (addr < endaddr) {
4325 		sfmmu_copytte(&sfhmep->hme_tte, &tte);
4326 		if (TTE_IS_VALID(&tte)) {
4327 			if ((tte.ll & tteflags.ll) == tteattr.ll) {
4328 				/*
4329 				 * if the new attr is the same as old
4330 				 * continue
4331 				 */
4332 				goto next_addr;
4333 			}
4334 			if (!TTE_IS_WRITABLE(&tteattr)) {
4335 				/*
4336 				 * make sure we clear hw modify bit if we
4337 				 * removing write protections
4338 				 */
4339 				tteflags.tte_intlo |= TTE_HWWR_INT;
4340 			}
4341 
4342 			pml = NULL;
4343 			pp = sfhmep->hme_page;
4344 			if (pp) {
4345 				pml = sfmmu_mlist_enter(pp);
4346 			}
4347 
4348 			if (pp != sfhmep->hme_page) {
4349 				/*
4350 				 * tte must have been unloaded.
4351 				 */
4352 				ASSERT(pml);
4353 				sfmmu_mlist_exit(pml);
4354 				continue;
4355 			}
4356 
4357 			ASSERT(pp == NULL || sfmmu_mlist_held(pp));
4358 
4359 			ttemod = tte;
4360 			ttemod.ll = (ttemod.ll & ~tteflags.ll) | tteattr.ll;
4361 			ASSERT(TTE_TO_TTEPFN(&ttemod) == TTE_TO_TTEPFN(&tte));
4362 
4363 #if defined(SF_ERRATA_57)
4364 			if (check_exec && addr < errata57_limit)
4365 				ttemod.tte_exec_perm = 0;
4366 #endif
4367 			ret = sfmmu_modifytte_try(&tte, &ttemod,
4368 			    &sfhmep->hme_tte);
4369 
4370 			if (ret < 0) {
4371 				/* tte changed underneath us */
4372 				if (pml) {
4373 					sfmmu_mlist_exit(pml);
4374 				}
4375 				continue;
4376 			}
4377 
4378 			if (tteflags.tte_intlo & TTE_HWWR_INT) {
4379 				/*
4380 				 * need to sync if we are clearing modify bit.
4381 				 */
4382 				sfmmu_ttesync(sfmmup, addr, &tte, pp);
4383 			}
4384 
4385 			if (pp && PP_ISRO(pp)) {
4386 				if (tteattr.tte_intlo & TTE_WRPRM_INT) {
4387 					pmtx = sfmmu_page_enter(pp);
4388 					PP_CLRRO(pp);
4389 					sfmmu_page_exit(pmtx);
4390 				}
4391 			}
4392 
4393 			if (ret > 0 && use_demap_range) {
4394 				DEMAP_RANGE_MARKPG(dmrp, addr);
4395 			} else if (ret > 0) {
4396 				sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0);
4397 			}
4398 
4399 			if (pml) {
4400 				sfmmu_mlist_exit(pml);
4401 			}
4402 		}
4403 next_addr:
4404 		addr += TTEBYTES(ttesz);
4405 		sfhmep++;
4406 		DEMAP_RANGE_NEXTPG(dmrp);
4407 	}
4408 	return (addr);
4409 }
4410 
4411 /*
4412  * This routine converts virtual attributes to physical ones.  It will
4413  * update the tteflags field with the tte mask corresponding to the attributes
4414  * affected and it returns the new attributes.  It will also clear the modify
4415  * bit if we are taking away write permission.  This is necessary since the
4416  * modify bit is the hardware permission bit and we need to clear it in order
4417  * to detect write faults.
4418  */
4419 static uint64_t
4420 sfmmu_vtop_attr(uint_t attr, int mode, tte_t *ttemaskp)
4421 {
4422 	tte_t ttevalue;
4423 
4424 	ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR));
4425 
4426 	switch (mode) {
4427 	case SFMMU_CHGATTR:
4428 		/* all attributes specified */
4429 		ttevalue.tte_inthi = MAKE_TTEATTR_INTHI(attr);
4430 		ttevalue.tte_intlo = MAKE_TTEATTR_INTLO(attr);
4431 		ttemaskp->tte_inthi = TTEINTHI_ATTR;
4432 		ttemaskp->tte_intlo = TTEINTLO_ATTR;
4433 		break;
4434 	case SFMMU_SETATTR:
4435 		ASSERT(!(attr & ~HAT_PROT_MASK));
4436 		ttemaskp->ll = 0;
4437 		ttevalue.ll = 0;
4438 		/*
4439 		 * a valid tte implies exec and read for sfmmu
4440 		 * so no need to do anything about them.
4441 		 * since priviledged access implies user access
4442 		 * PROT_USER doesn't make sense either.
4443 		 */
4444 		if (attr & PROT_WRITE) {
4445 			ttemaskp->tte_intlo |= TTE_WRPRM_INT;
4446 			ttevalue.tte_intlo |= TTE_WRPRM_INT;
4447 		}
4448 		break;
4449 	case SFMMU_CLRATTR:
4450 		/* attributes will be nand with current ones */
4451 		if (attr & ~(PROT_WRITE | PROT_USER)) {
4452 			panic("sfmmu: attr %x not supported", attr);
4453 		}
4454 		ttemaskp->ll = 0;
4455 		ttevalue.ll = 0;
4456 		if (attr & PROT_WRITE) {
4457 			/* clear both writable and modify bit */
4458 			ttemaskp->tte_intlo |= TTE_WRPRM_INT | TTE_HWWR_INT;
4459 		}
4460 		if (attr & PROT_USER) {
4461 			ttemaskp->tte_intlo |= TTE_PRIV_INT;
4462 			ttevalue.tte_intlo |= TTE_PRIV_INT;
4463 		}
4464 		break;
4465 	default:
4466 		panic("sfmmu_vtop_attr: bad mode %x", mode);
4467 	}
4468 	ASSERT(TTE_TO_TTEPFN(&ttevalue) == 0);
4469 	return (ttevalue.ll);
4470 }
4471 
4472 static uint_t
4473 sfmmu_ptov_attr(tte_t *ttep)
4474 {
4475 	uint_t attr;
4476 
4477 	ASSERT(TTE_IS_VALID(ttep));
4478 
4479 	attr = PROT_READ;
4480 
4481 	if (TTE_IS_WRITABLE(ttep)) {
4482 		attr |= PROT_WRITE;
4483 	}
4484 	if (TTE_IS_EXECUTABLE(ttep)) {
4485 		attr |= PROT_EXEC;
4486 	}
4487 	if (!TTE_IS_PRIVILEGED(ttep)) {
4488 		attr |= PROT_USER;
4489 	}
4490 	if (TTE_IS_NFO(ttep)) {
4491 		attr |= HAT_NOFAULT;
4492 	}
4493 	if (TTE_IS_NOSYNC(ttep)) {
4494 		attr |= HAT_NOSYNC;
4495 	}
4496 	if (TTE_IS_SIDEFFECT(ttep)) {
4497 		attr |= SFMMU_SIDEFFECT;
4498 	}
4499 	if (!TTE_IS_VCACHEABLE(ttep)) {
4500 		attr |= SFMMU_UNCACHEVTTE;
4501 	}
4502 	if (!TTE_IS_PCACHEABLE(ttep)) {
4503 		attr |= SFMMU_UNCACHEPTTE;
4504 	}
4505 	return (attr);
4506 }
4507 
4508 /*
4509  * hat_chgprot is a deprecated hat call.  New segment drivers
4510  * should store all attributes and use hat_*attr calls.
4511  *
4512  * Change the protections in the virtual address range
4513  * given to the specified virtual protection.  If vprot is ~PROT_WRITE,
4514  * then remove write permission, leaving the other
4515  * permissions unchanged.  If vprot is ~PROT_USER, remove user permissions.
4516  *
4517  */
4518 void
4519 hat_chgprot(struct hat *sfmmup, caddr_t addr, size_t len, uint_t vprot)
4520 {
4521 	struct hmehash_bucket *hmebp;
4522 	hmeblk_tag hblktag;
4523 	int hmeshift, hashno = 1;
4524 	struct hme_blk *hmeblkp, *list = NULL;
4525 	caddr_t endaddr;
4526 	cpuset_t cpuset;
4527 	demap_range_t dmr;
4528 
4529 	ASSERT((len & MMU_PAGEOFFSET) == 0);
4530 	ASSERT(((uintptr_t)addr & MMU_PAGEOFFSET) == 0);
4531 
4532 	if (sfmmup->sfmmu_xhat_provider) {
4533 		XHAT_CHGPROT(sfmmup, addr, len, vprot);
4534 		return;
4535 	} else {
4536 		/*
4537 		 * This must be a CPU HAT. If the address space has
4538 		 * XHATs attached, change attributes for all of them,
4539 		 * just in case
4540 		 */
4541 		ASSERT(sfmmup->sfmmu_as != NULL);
4542 		if (sfmmup->sfmmu_as->a_xhat != NULL)
4543 			xhat_chgprot_all(sfmmup->sfmmu_as, addr, len, vprot);
4544 	}
4545 
4546 	CPUSET_ZERO(cpuset);
4547 
4548 	if ((vprot != (uint_t)~PROT_WRITE) && (vprot & PROT_USER) &&
4549 	    ((addr + len) > (caddr_t)USERLIMIT)) {
4550 		panic("user addr %p vprot %x in kernel space",
4551 		    (void *)addr, vprot);
4552 	}
4553 	endaddr = addr + len;
4554 	hblktag.htag_id = sfmmup;
4555 	DEMAP_RANGE_INIT(sfmmup, &dmr);
4556 
4557 	while (addr < endaddr) {
4558 		hmeshift = HME_HASH_SHIFT(hashno);
4559 		hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift);
4560 		hblktag.htag_rehash = hashno;
4561 		hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift);
4562 
4563 		SFMMU_HASH_LOCK(hmebp);
4564 
4565 		HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list);
4566 		if (hmeblkp != NULL) {
4567 			/*
4568 			 * We've encountered a shadow hmeblk so skip the range
4569 			 * of the next smaller mapping size.
4570 			 */
4571 			if (hmeblkp->hblk_shw_bit) {
4572 				ASSERT(sfmmup != ksfmmup);
4573 				ASSERT(hashno > 1);
4574 				addr = (caddr_t)P2END((uintptr_t)addr,
4575 					    TTEBYTES(hashno - 1));
4576 			} else {
4577 				addr = sfmmu_hblk_chgprot(sfmmup, hmeblkp,
4578 					addr, endaddr, &dmr, vprot);
4579 			}
4580 			SFMMU_HASH_UNLOCK(hmebp);
4581 			hashno = 1;
4582 			continue;
4583 		}
4584 		SFMMU_HASH_UNLOCK(hmebp);
4585 
4586 		if (!HME_REHASH(sfmmup) || (hashno >= mmu_hashcnt)) {
4587 			/*
4588 			 * We have traversed the whole list and rehashed
4589 			 * if necessary without finding the address to chgprot.
4590 			 * This is ok so we increment the address by the
4591 			 * smallest hmeblk range for kernel mappings and the
4592 			 * largest hmeblk range, to account for shadow hmeblks,
4593 			 * for user mappings and continue.
4594 			 */
4595 			if (sfmmup == ksfmmup)
4596 				addr = (caddr_t)P2END((uintptr_t)addr,
4597 					    TTEBYTES(1));
4598 			else
4599 				addr = (caddr_t)P2END((uintptr_t)addr,
4600 					    TTEBYTES(hashno));
4601 			hashno = 1;
4602 		} else {
4603 			hashno++;
4604 		}
4605 	}
4606 
4607 	sfmmu_hblks_list_purge(&list);
4608 	DEMAP_RANGE_FLUSH(&dmr);
4609 	cpuset = sfmmup->sfmmu_cpusran;
4610 	xt_sync(cpuset);
4611 }
4612 
4613 /*
4614  * This function chgprots a range of addresses in an hmeblk.  It returns the
4615  * next addres that needs to be chgprot.
4616  * It should be called with the hash lock held.
4617  * XXX It shold be possible to optimize chgprot by not flushing every time but
4618  * on the other hand:
4619  * 1. do one flush crosscall.
4620  * 2. only flush if we are increasing permissions (make sure this will work)
4621  */
4622 static caddr_t
4623 sfmmu_hblk_chgprot(sfmmu_t *sfmmup, struct hme_blk *hmeblkp, caddr_t addr,
4624 	caddr_t endaddr, demap_range_t *dmrp, uint_t vprot)
4625 {
4626 	uint_t pprot;
4627 	tte_t tte, ttemod;
4628 	struct sf_hment *sfhmep;
4629 	uint_t tteflags;
4630 	int ttesz;
4631 	struct page *pp = NULL;
4632 	kmutex_t *pml, *pmtx;
4633 	int ret;
4634 	int use_demap_range;
4635 #if defined(SF_ERRATA_57)
4636 	int check_exec;
4637 #endif
4638 
4639 	ASSERT(in_hblk_range(hmeblkp, addr));
4640 	ASSERT(hmeblkp->hblk_shw_bit == 0);
4641 
4642 #ifdef DEBUG
4643 	if (get_hblk_ttesz(hmeblkp) != TTE8K &&
4644 	    (endaddr < get_hblk_endaddr(hmeblkp))) {
4645 		panic("sfmmu_hblk_chgprot: partial chgprot of large page");
4646 	}
4647 #endif /* DEBUG */
4648 
4649 	endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp));
4650 	ttesz = get_hblk_ttesz(hmeblkp);
4651 
4652 	pprot = sfmmu_vtop_prot(vprot, &tteflags);
4653 #if defined(SF_ERRATA_57)
4654 	check_exec = (sfmmup != ksfmmup) &&
4655 	    AS_TYPE_64BIT(sfmmup->sfmmu_as) &&
4656 	    ((vprot & PROT_EXEC) == PROT_EXEC);
4657 #endif
4658 	HBLKTOHME(sfhmep, hmeblkp, addr);
4659 
4660 	/*
4661 	 * Flush the current demap region if addresses have been
4662 	 * skipped or the page size doesn't match.
4663 	 */
4664 	use_demap_range = (TTEBYTES(ttesz) == MMU_PAGESIZE);
4665 	if (use_demap_range) {
4666 		DEMAP_RANGE_CONTINUE(dmrp, addr, endaddr);
4667 	} else {
4668 		DEMAP_RANGE_FLUSH(dmrp);
4669 	}
4670 
4671 	while (addr < endaddr) {
4672 		sfmmu_copytte(&sfhmep->hme_tte, &tte);
4673 		if (TTE_IS_VALID(&tte)) {
4674 			if (TTE_GET_LOFLAGS(&tte, tteflags) == pprot) {
4675 				/*
4676 				 * if the new protection is the same as old
4677 				 * continue
4678 				 */
4679 				goto next_addr;
4680 			}
4681 			pml = NULL;
4682 			pp = sfhmep->hme_page;
4683 			if (pp) {
4684 				pml = sfmmu_mlist_enter(pp);
4685 			}
4686 			if (pp != sfhmep->hme_page) {
4687 				/*
4688 				 * tte most have been unloaded
4689 				 * underneath us.  Recheck
4690 				 */
4691 				ASSERT(pml);
4692 				sfmmu_mlist_exit(pml);
4693 				continue;
4694 			}
4695 
4696 			ASSERT(pp == NULL || sfmmu_mlist_held(pp));
4697 
4698 			ttemod = tte;
4699 			TTE_SET_LOFLAGS(&ttemod, tteflags, pprot);
4700 #if defined(SF_ERRATA_57)
4701 			if (check_exec && addr < errata57_limit)
4702 				ttemod.tte_exec_perm = 0;
4703 #endif
4704 			ret = sfmmu_modifytte_try(&tte, &ttemod,
4705 			    &sfhmep->hme_tte);
4706 
4707 			if (ret < 0) {
4708 				/* tte changed underneath us */
4709 				if (pml) {
4710 					sfmmu_mlist_exit(pml);
4711 				}
4712 				continue;
4713 			}
4714 
4715 			if (tteflags & TTE_HWWR_INT) {
4716 				/*
4717 				 * need to sync if we are clearing modify bit.
4718 				 */
4719 				sfmmu_ttesync(sfmmup, addr, &tte, pp);
4720 			}
4721 
4722 			if (pp && PP_ISRO(pp)) {
4723 				if (pprot & TTE_WRPRM_INT) {
4724 					pmtx = sfmmu_page_enter(pp);
4725 					PP_CLRRO(pp);
4726 					sfmmu_page_exit(pmtx);
4727 				}
4728 			}
4729 
4730 			if (ret > 0 && use_demap_range) {
4731 				DEMAP_RANGE_MARKPG(dmrp, addr);
4732 			} else if (ret > 0) {
4733 				sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0);
4734 			}
4735 
4736 			if (pml) {
4737 				sfmmu_mlist_exit(pml);
4738 			}
4739 		}
4740 next_addr:
4741 		addr += TTEBYTES(ttesz);
4742 		sfhmep++;
4743 		DEMAP_RANGE_NEXTPG(dmrp);
4744 	}
4745 	return (addr);
4746 }
4747 
4748 /*
4749  * This routine is deprecated and should only be used by hat_chgprot.
4750  * The correct routine is sfmmu_vtop_attr.
4751  * This routine converts virtual page protections to physical ones.  It will
4752  * update the tteflags field with the tte mask corresponding to the protections
4753  * affected and it returns the new protections.  It will also clear the modify
4754  * bit if we are taking away write permission.  This is necessary since the
4755  * modify bit is the hardware permission bit and we need to clear it in order
4756  * to detect write faults.
4757  * It accepts the following special protections:
4758  * ~PROT_WRITE = remove write permissions.
4759  * ~PROT_USER = remove user permissions.
4760  */
4761 static uint_t
4762 sfmmu_vtop_prot(uint_t vprot, uint_t *tteflagsp)
4763 {
4764 	if (vprot == (uint_t)~PROT_WRITE) {
4765 		*tteflagsp = TTE_WRPRM_INT | TTE_HWWR_INT;
4766 		return (0);		/* will cause wrprm to be cleared */
4767 	}
4768 	if (vprot == (uint_t)~PROT_USER) {
4769 		*tteflagsp = TTE_PRIV_INT;
4770 		return (0);		/* will cause privprm to be cleared */
4771 	}
4772 	if ((vprot == 0) || (vprot == PROT_USER) ||
4773 		((vprot & PROT_ALL) != vprot)) {
4774 		panic("sfmmu_vtop_prot -- bad prot %x", vprot);
4775 	}
4776 
4777 	switch (vprot) {
4778 	case (PROT_READ):
4779 	case (PROT_EXEC):
4780 	case (PROT_EXEC | PROT_READ):
4781 		*tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT | TTE_HWWR_INT;
4782 		return (TTE_PRIV_INT); 		/* set prv and clr wrt */
4783 	case (PROT_WRITE):
4784 	case (PROT_WRITE | PROT_READ):
4785 	case (PROT_EXEC | PROT_WRITE):
4786 	case (PROT_EXEC | PROT_WRITE | PROT_READ):
4787 		*tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT;
4788 		return (TTE_PRIV_INT | TTE_WRPRM_INT); 	/* set prv and wrt */
4789 	case (PROT_USER | PROT_READ):
4790 	case (PROT_USER | PROT_EXEC):
4791 	case (PROT_USER | PROT_EXEC | PROT_READ):
4792 		*tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT | TTE_HWWR_INT;
4793 		return (0); 			/* clr prv and wrt */
4794 	case (PROT_USER | PROT_WRITE):
4795 	case (PROT_USER | PROT_WRITE | PROT_READ):
4796 	case (PROT_USER | PROT_EXEC | PROT_WRITE):
4797 	case (PROT_USER | PROT_EXEC | PROT_WRITE | PROT_READ):
4798 		*tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT;
4799 		return (TTE_WRPRM_INT); 	/* clr prv and set wrt */
4800 	default:
4801 		panic("sfmmu_vtop_prot -- bad prot %x", vprot);
4802 	}
4803 	return (0);
4804 }
4805 
4806 /*
4807  * Alternate unload for very large virtual ranges. With a true 64 bit VA,
4808  * the normal algorithm would take too long for a very large VA range with
4809  * few real mappings. This routine just walks thru all HMEs in the global
4810  * hash table to find and remove mappings.
4811  */
4812 static void
4813 hat_unload_large_virtual(
4814 	struct hat		*sfmmup,
4815 	caddr_t			startaddr,
4816 	size_t			len,
4817 	uint_t			flags,
4818 	hat_callback_t		*callback)
4819 {
4820 	struct hmehash_bucket *hmebp;
4821 	struct hme_blk *hmeblkp;
4822 	struct hme_blk *pr_hblk = NULL;
4823 	struct hme_blk *nx_hblk;
4824 	struct hme_blk *list = NULL;
4825 	int i;
4826 	uint64_t hblkpa, prevpa, nx_pa;
4827 	demap_range_t dmr, *dmrp;
4828 	cpuset_t cpuset;
4829 	caddr_t	endaddr = startaddr + len;
4830 	caddr_t	sa;
4831 	caddr_t	ea;
4832 	caddr_t	cb_sa[MAX_CB_ADDR];
4833 	caddr_t	cb_ea[MAX_CB_ADDR];
4834 	int	addr_cnt = 0;
4835 	int	a = 0;
4836 
4837 	if (sfmmup->sfmmu_free) {
4838 		dmrp = NULL;
4839 	} else {
4840 		dmrp = &dmr;
4841 		DEMAP_RANGE_INIT(sfmmup, dmrp);
4842 	}
4843 
4844 	/*
4845 	 * Loop through all the hash buckets of HME blocks looking for matches.
4846 	 */
4847 	for (i = 0; i <= UHMEHASH_SZ; i++) {
4848 		hmebp = &uhme_hash[i];
4849 		SFMMU_HASH_LOCK(hmebp);
4850 		hmeblkp = hmebp->hmeblkp;
4851 		hblkpa = hmebp->hmeh_nextpa;
4852 		prevpa = 0;
4853 		pr_hblk = NULL;
4854 		while (hmeblkp) {
4855 			nx_hblk = hmeblkp->hblk_next;
4856 			nx_pa = hmeblkp->hblk_nextpa;
4857 
4858 			/*
4859 			 * skip if not this context, if a shadow block or
4860 			 * if the mapping is not in the requested range
4861 			 */
4862 			if (hmeblkp->hblk_tag.htag_id != sfmmup ||
4863 			    hmeblkp->hblk_shw_bit ||
4864 			    (sa = (caddr_t)get_hblk_base(hmeblkp)) >= endaddr ||
4865 			    (ea = get_hblk_endaddr(hmeblkp)) <= startaddr) {
4866 				pr_hblk = hmeblkp;
4867 				prevpa = hblkpa;
4868 				goto next_block;
4869 			}
4870 
4871 			/*
4872 			 * unload if there are any current valid mappings
4873 			 */
4874 			if (hmeblkp->hblk_vcnt != 0 ||
4875 			    hmeblkp->hblk_hmecnt != 0)
4876 				(void) sfmmu_hblk_unload(sfmmup, hmeblkp,
4877 				    sa, ea, dmrp, flags);
4878 
4879 			/*
4880 			 * on unmap we also release the HME block itself, once
4881 			 * all mappings are gone.
4882 			 */
4883 			if ((flags & HAT_UNLOAD_UNMAP) != 0 &&
4884 			    !hmeblkp->hblk_vcnt &&
4885 			    !hmeblkp->hblk_hmecnt) {
4886 				ASSERT(!hmeblkp->hblk_lckcnt);
4887 				sfmmu_hblk_hash_rm(hmebp, hmeblkp,
4888 					prevpa, pr_hblk);
4889 				sfmmu_hblk_free(hmebp, hmeblkp, hblkpa, &list);
4890 			} else {
4891 				pr_hblk = hmeblkp;
4892 				prevpa = hblkpa;
4893 			}
4894 
4895 			if (callback == NULL)
4896 				goto next_block;
4897 
4898 			/*
4899 			 * HME blocks may span more than one page, but we may be
4900 			 * unmapping only one page, so check for a smaller range
4901 			 * for the callback
4902 			 */
4903 			if (sa < startaddr)
4904 				sa = startaddr;
4905 			if (--ea > endaddr)
4906 				ea = endaddr - 1;
4907 
4908 			cb_sa[addr_cnt] = sa;
4909 			cb_ea[addr_cnt] = ea;
4910 			if (++addr_cnt == MAX_CB_ADDR) {
4911 				if (dmrp != NULL) {
4912 					DEMAP_RANGE_FLUSH(dmrp);
4913 					cpuset = sfmmup->sfmmu_cpusran;
4914 					xt_sync(cpuset);
4915 				}
4916 
4917 				for (a = 0; a < MAX_CB_ADDR; ++a) {
4918 					callback->hcb_start_addr = cb_sa[a];
4919 					callback->hcb_end_addr = cb_ea[a];
4920 					callback->hcb_function(callback);
4921 				}
4922 				addr_cnt = 0;
4923 			}
4924 
4925 next_block:
4926 			hmeblkp = nx_hblk;
4927 			hblkpa = nx_pa;
4928 		}
4929 		SFMMU_HASH_UNLOCK(hmebp);
4930 	}
4931 
4932 	sfmmu_hblks_list_purge(&list);
4933 	if (dmrp != NULL) {
4934 		DEMAP_RANGE_FLUSH(dmrp);
4935 		cpuset = sfmmup->sfmmu_cpusran;
4936 		xt_sync(cpuset);
4937 	}
4938 
4939 	for (a = 0; a < addr_cnt; ++a) {
4940 		callback->hcb_start_addr = cb_sa[a];
4941 		callback->hcb_end_addr = cb_ea[a];
4942 		callback->hcb_function(callback);
4943 	}
4944 
4945 	/*
4946 	 * Check TSB and TLB page sizes if the process isn't exiting.
4947 	 */
4948 	if (!sfmmup->sfmmu_free)
4949 		sfmmu_check_page_sizes(sfmmup, 0);
4950 }
4951 
4952 /*
4953  * Unload all the mappings in the range [addr..addr+len). addr and len must
4954  * be MMU_PAGESIZE aligned.
4955  */
4956 
4957 extern struct seg *segkmap;
4958 #define	ISSEGKMAP(sfmmup, addr) (sfmmup == ksfmmup && \
4959 segkmap->s_base <= (addr) && (addr) < (segkmap->s_base + segkmap->s_size))
4960 
4961 
4962 void
4963 hat_unload_callback(
4964 	struct hat *sfmmup,
4965 	caddr_t addr,
4966 	size_t len,
4967 	uint_t flags,
4968 	hat_callback_t *callback)
4969 {
4970 	struct hmehash_bucket *hmebp;
4971 	hmeblk_tag hblktag;
4972 	int hmeshift, hashno, iskernel;
4973 	struct hme_blk *hmeblkp, *pr_hblk, *list = NULL;
4974 	caddr_t endaddr;
4975 	cpuset_t cpuset;
4976 	uint64_t hblkpa, prevpa;
4977 	int addr_count = 0;
4978 	int a;
4979 	caddr_t cb_start_addr[MAX_CB_ADDR];
4980 	caddr_t cb_end_addr[MAX_CB_ADDR];
4981 	int issegkmap = ISSEGKMAP(sfmmup, addr);
4982 	demap_range_t dmr, *dmrp;
4983 
4984 	if (sfmmup->sfmmu_xhat_provider) {
4985 		XHAT_UNLOAD_CALLBACK(sfmmup, addr, len, flags, callback);
4986 		return;
4987 	} else {
4988 		/*
4989 		 * This must be a CPU HAT. If the address space has
4990 		 * XHATs attached, unload the mappings for all of them,
4991 		 * just in case
4992 		 */
4993 		ASSERT(sfmmup->sfmmu_as != NULL);
4994 		if (sfmmup->sfmmu_as->a_xhat != NULL)
4995 			xhat_unload_callback_all(sfmmup->sfmmu_as, addr,
4996 			    len, flags, callback);
4997 	}
4998 
4999 	ASSERT((sfmmup == ksfmmup) || (flags & HAT_UNLOAD_OTHER) || \
5000 	    AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock));
5001 
5002 	ASSERT(sfmmup != NULL);
5003 	ASSERT((len & MMU_PAGEOFFSET) == 0);
5004 	ASSERT(!((uintptr_t)addr & MMU_PAGEOFFSET));
5005 
5006 	/*
5007 	 * Probing through a large VA range (say 63 bits) will be slow, even
5008 	 * at 4 Meg steps between the probes. So, when the virtual address range
5009 	 * is very large, search the HME entries for what to unload.
5010 	 *
5011 	 *	len >> TTE_PAGE_SHIFT(TTE4M) is the # of 4Meg probes we'd need
5012 	 *
5013 	 *	UHMEHASH_SZ is number of hash buckets to examine
5014 	 *
5015 	 */
5016 	if (sfmmup != KHATID && (len >> TTE_PAGE_SHIFT(TTE4M)) > UHMEHASH_SZ) {
5017 		hat_unload_large_virtual(sfmmup, addr, len, flags, callback);
5018 		return;
5019 	}
5020 
5021 	CPUSET_ZERO(cpuset);
5022 
5023 	/*
5024 	 * If the process is exiting, we can save a lot of fuss since
5025 	 * we'll flush the TLB when we free the ctx anyway.
5026 	 */
5027 	if (sfmmup->sfmmu_free)
5028 		dmrp = NULL;
5029 	else
5030 		dmrp = &dmr;
5031 
5032 	DEMAP_RANGE_INIT(sfmmup, dmrp);
5033 	endaddr = addr + len;
5034 	hblktag.htag_id = sfmmup;
5035 
5036 	/*
5037 	 * It is likely for the vm to call unload over a wide range of
5038 	 * addresses that are actually very sparsely populated by
5039 	 * translations.  In order to speed this up the sfmmu hat supports
5040 	 * the concept of shadow hmeblks. Dummy large page hmeblks that
5041 	 * correspond to actual small translations are allocated at tteload
5042 	 * time and are referred to as shadow hmeblks.  Now, during unload
5043 	 * time, we first check if we have a shadow hmeblk for that
5044 	 * translation.  The absence of one means the corresponding address
5045 	 * range is empty and can be skipped.
5046 	 *
5047 	 * The kernel is an exception to above statement and that is why
5048 	 * we don't use shadow hmeblks and hash starting from the smallest
5049 	 * page size.
5050 	 */
5051 	if (sfmmup == KHATID) {
5052 		iskernel = 1;
5053 		hashno = TTE64K;
5054 	} else {
5055 		iskernel = 0;
5056 		if (mmu_page_sizes == max_mmu_page_sizes) {
5057 			hashno = TTE256M;
5058 		} else {
5059 			hashno = TTE4M;
5060 		}
5061 	}
5062 	while (addr < endaddr) {
5063 		hmeshift = HME_HASH_SHIFT(hashno);
5064 		hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift);
5065 		hblktag.htag_rehash = hashno;
5066 		hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift);
5067 
5068 		SFMMU_HASH_LOCK(hmebp);
5069 
5070 		HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, hblkpa, pr_hblk,
5071 			prevpa, &list);
5072 		if (hmeblkp == NULL) {
5073 			/*
5074 			 * didn't find an hmeblk. skip the appropiate
5075 			 * address range.
5076 			 */
5077 			SFMMU_HASH_UNLOCK(hmebp);
5078 			if (iskernel) {
5079 				if (hashno < mmu_hashcnt) {
5080 					hashno++;
5081 					continue;
5082 				} else {
5083 					hashno = TTE64K;
5084 					addr = (caddr_t)roundup((uintptr_t)addr
5085 						+ 1, MMU_PAGESIZE64K);
5086 					continue;
5087 				}
5088 			}
5089 			addr = (caddr_t)roundup((uintptr_t)addr + 1,
5090 				(1 << hmeshift));
5091 			if ((uintptr_t)addr & MMU_PAGEOFFSET512K) {
5092 				ASSERT(hashno == TTE64K);
5093 				continue;
5094 			}
5095 			if ((uintptr_t)addr & MMU_PAGEOFFSET4M) {
5096 				hashno = TTE512K;
5097 				continue;
5098 			}
5099 			if (mmu_page_sizes == max_mmu_page_sizes) {
5100 				if ((uintptr_t)addr & MMU_PAGEOFFSET32M) {
5101 					hashno = TTE4M;
5102 					continue;
5103 				}
5104 				if ((uintptr_t)addr & MMU_PAGEOFFSET256M) {
5105 					hashno = TTE32M;
5106 					continue;
5107 				}
5108 				hashno = TTE256M;
5109 				continue;
5110 			} else {
5111 				hashno = TTE4M;
5112 				continue;
5113 			}
5114 		}
5115 		ASSERT(hmeblkp);
5116 		if (!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) {
5117 			/*
5118 			 * If the valid count is zero we can skip the range
5119 			 * mapped by this hmeblk.
5120 			 * We free hblks in the case of HAT_UNMAP.  HAT_UNMAP
5121 			 * is used by segment drivers as a hint
5122 			 * that the mapping resource won't be used any longer.
5123 			 * The best example of this is during exit().
5124 			 */
5125 			addr = (caddr_t)roundup((uintptr_t)addr + 1,
5126 				get_hblk_span(hmeblkp));
5127 			if ((flags & HAT_UNLOAD_UNMAP) ||
5128 			    (iskernel && !issegkmap)) {
5129 				sfmmu_hblk_hash_rm(hmebp, hmeblkp, prevpa,
5130 				    pr_hblk);
5131 				sfmmu_hblk_free(hmebp, hmeblkp, hblkpa, &list);
5132 			}
5133 			SFMMU_HASH_UNLOCK(hmebp);
5134 
5135 			if (iskernel) {
5136 				hashno = TTE64K;
5137 				continue;
5138 			}
5139 			if ((uintptr_t)addr & MMU_PAGEOFFSET512K) {
5140 				ASSERT(hashno == TTE64K);
5141 				continue;
5142 			}
5143 			if ((uintptr_t)addr & MMU_PAGEOFFSET4M) {
5144 				hashno = TTE512K;
5145 				continue;
5146 			}
5147 			if (mmu_page_sizes == max_mmu_page_sizes) {
5148 				if ((uintptr_t)addr & MMU_PAGEOFFSET32M) {
5149 					hashno = TTE4M;
5150 					continue;
5151 				}
5152 				if ((uintptr_t)addr & MMU_PAGEOFFSET256M) {
5153 					hashno = TTE32M;
5154 					continue;
5155 				}
5156 				hashno = TTE256M;
5157 				continue;
5158 			} else {
5159 				hashno = TTE4M;
5160 				continue;
5161 			}
5162 		}
5163 		if (hmeblkp->hblk_shw_bit) {
5164 			/*
5165 			 * If we encounter a shadow hmeblk we know there is
5166 			 * smaller sized hmeblks mapping the same address space.
5167 			 * Decrement the hash size and rehash.
5168 			 */
5169 			ASSERT(sfmmup != KHATID);
5170 			hashno--;
5171 			SFMMU_HASH_UNLOCK(hmebp);
5172 			continue;
5173 		}
5174 
5175 		/*
5176 		 * track callback address ranges.
5177 		 * only start a new range when it's not contiguous
5178 		 */
5179 		if (callback != NULL) {
5180 			if (addr_count > 0 &&
5181 			    addr == cb_end_addr[addr_count - 1])
5182 				--addr_count;
5183 			else
5184 				cb_start_addr[addr_count] = addr;
5185 		}
5186 
5187 		addr = sfmmu_hblk_unload(sfmmup, hmeblkp, addr, endaddr,
5188 				dmrp, flags);
5189 
5190 		if (callback != NULL)
5191 			cb_end_addr[addr_count++] = addr;
5192 
5193 		if (((flags & HAT_UNLOAD_UNMAP) || (iskernel && !issegkmap)) &&
5194 		    !hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) {
5195 			sfmmu_hblk_hash_rm(hmebp, hmeblkp, prevpa,
5196 			    pr_hblk);
5197 			sfmmu_hblk_free(hmebp, hmeblkp, hblkpa, &list);
5198 		}
5199 		SFMMU_HASH_UNLOCK(hmebp);
5200 
5201 		/*
5202 		 * Notify our caller as to exactly which pages
5203 		 * have been unloaded. We do these in clumps,
5204 		 * to minimize the number of xt_sync()s that need to occur.
5205 		 */
5206 		if (callback != NULL && addr_count == MAX_CB_ADDR) {
5207 			DEMAP_RANGE_FLUSH(dmrp);
5208 			if (dmrp != NULL) {
5209 				cpuset = sfmmup->sfmmu_cpusran;
5210 				xt_sync(cpuset);
5211 			}
5212 
5213 			for (a = 0; a < MAX_CB_ADDR; ++a) {
5214 				callback->hcb_start_addr = cb_start_addr[a];
5215 				callback->hcb_end_addr = cb_end_addr[a];
5216 				callback->hcb_function(callback);
5217 			}
5218 			addr_count = 0;
5219 		}
5220 		if (iskernel) {
5221 			hashno = TTE64K;
5222 			continue;
5223 		}
5224 		if ((uintptr_t)addr & MMU_PAGEOFFSET512K) {
5225 			ASSERT(hashno == TTE64K);
5226 			continue;
5227 		}
5228 		if ((uintptr_t)addr & MMU_PAGEOFFSET4M) {
5229 			hashno = TTE512K;
5230 			continue;
5231 		}
5232 		if (mmu_page_sizes == max_mmu_page_sizes) {
5233 			if ((uintptr_t)addr & MMU_PAGEOFFSET32M) {
5234 				hashno = TTE4M;
5235 				continue;
5236 			}
5237 			if ((uintptr_t)addr & MMU_PAGEOFFSET256M) {
5238 				hashno = TTE32M;
5239 				continue;
5240 			}
5241 			hashno = TTE256M;
5242 		} else {
5243 			hashno = TTE4M;
5244 		}
5245 	}
5246 
5247 	sfmmu_hblks_list_purge(&list);
5248 	DEMAP_RANGE_FLUSH(dmrp);
5249 	if (dmrp != NULL) {
5250 		cpuset = sfmmup->sfmmu_cpusran;
5251 		xt_sync(cpuset);
5252 	}
5253 	if (callback && addr_count != 0) {
5254 		for (a = 0; a < addr_count; ++a) {
5255 			callback->hcb_start_addr = cb_start_addr[a];
5256 			callback->hcb_end_addr = cb_end_addr[a];
5257 			callback->hcb_function(callback);
5258 		}
5259 	}
5260 
5261 	/*
5262 	 * Check TSB and TLB page sizes if the process isn't exiting.
5263 	 */
5264 	if (!sfmmup->sfmmu_free)
5265 		sfmmu_check_page_sizes(sfmmup, 0);
5266 }
5267 
5268 /*
5269  * Unload all the mappings in the range [addr..addr+len). addr and len must
5270  * be MMU_PAGESIZE aligned.
5271  */
5272 void
5273 hat_unload(struct hat *sfmmup, caddr_t addr, size_t len, uint_t flags)
5274 {
5275 	if (sfmmup->sfmmu_xhat_provider) {
5276 		XHAT_UNLOAD(sfmmup, addr, len, flags);
5277 		return;
5278 	}
5279 	hat_unload_callback(sfmmup, addr, len, flags, NULL);
5280 }
5281 
5282 
5283 /*
5284  * Find the largest mapping size for this page.
5285  */
5286 static int
5287 fnd_mapping_sz(page_t *pp)
5288 {
5289 	int sz;
5290 	int p_index;
5291 
5292 	p_index = PP_MAPINDEX(pp);
5293 
5294 	sz = 0;
5295 	p_index >>= 1;	/* don't care about 8K bit */
5296 	for (; p_index; p_index >>= 1) {
5297 		sz++;
5298 	}
5299 
5300 	return (sz);
5301 }
5302 
5303 /*
5304  * This function unloads a range of addresses for an hmeblk.
5305  * It returns the next address to be unloaded.
5306  * It should be called with the hash lock held.
5307  */
5308 static caddr_t
5309 sfmmu_hblk_unload(struct hat *sfmmup, struct hme_blk *hmeblkp, caddr_t addr,
5310 	caddr_t endaddr, demap_range_t *dmrp, uint_t flags)
5311 {
5312 	tte_t	tte, ttemod;
5313 	struct	sf_hment *sfhmep;
5314 	int	ttesz;
5315 	long	ttecnt;
5316 	page_t *pp;
5317 	kmutex_t *pml;
5318 	int ret;
5319 	int use_demap_range;
5320 
5321 	ASSERT(in_hblk_range(hmeblkp, addr));
5322 	ASSERT(!hmeblkp->hblk_shw_bit);
5323 #ifdef DEBUG
5324 	if (get_hblk_ttesz(hmeblkp) != TTE8K &&
5325 	    (endaddr < get_hblk_endaddr(hmeblkp))) {
5326 		panic("sfmmu_hblk_unload: partial unload of large page");
5327 	}
5328 #endif /* DEBUG */
5329 
5330 	endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp));
5331 	ttesz = get_hblk_ttesz(hmeblkp);
5332 
5333 	use_demap_range = (do_virtual_coloring &&
5334 	    ((dmrp == NULL) || TTEBYTES(ttesz) == DEMAP_RANGE_PGSZ(dmrp)));
5335 	if (use_demap_range) {
5336 		DEMAP_RANGE_CONTINUE(dmrp, addr, endaddr);
5337 	} else {
5338 		DEMAP_RANGE_FLUSH(dmrp);
5339 	}
5340 	ttecnt = 0;
5341 	HBLKTOHME(sfhmep, hmeblkp, addr);
5342 
5343 	while (addr < endaddr) {
5344 		pml = NULL;
5345 again:
5346 		sfmmu_copytte(&sfhmep->hme_tte, &tte);
5347 		if (TTE_IS_VALID(&tte)) {
5348 			pp = sfhmep->hme_page;
5349 			if (pp && pml == NULL) {
5350 				pml = sfmmu_mlist_enter(pp);
5351 			}
5352 
5353 			/*
5354 			 * Verify if hme still points to 'pp' now that
5355 			 * we have p_mapping lock.
5356 			 */
5357 			if (sfhmep->hme_page != pp) {
5358 				if (pp != NULL && sfhmep->hme_page != NULL) {
5359 					if (pml) {
5360 						sfmmu_mlist_exit(pml);
5361 					}
5362 					/* Re-start this iteration. */
5363 					continue;
5364 				}
5365 				ASSERT((pp != NULL) &&
5366 				    (sfhmep->hme_page == NULL));
5367 				goto tte_unloaded;
5368 			}
5369 
5370 			/*
5371 			 * This point on we have both HASH and p_mapping
5372 			 * lock.
5373 			 */
5374 			ASSERT(pp == sfhmep->hme_page);
5375 			ASSERT(pp == NULL || sfmmu_mlist_held(pp));
5376 
5377 			/*
5378 			 * We need to loop on modify tte because it is
5379 			 * possible for pagesync to come along and
5380 			 * change the software bits beneath us.
5381 			 *
5382 			 * Page_unload can also invalidate the tte after
5383 			 * we read tte outside of p_mapping lock.
5384 			 */
5385 			ttemod = tte;
5386 
5387 			TTE_SET_INVALID(&ttemod);
5388 			ret = sfmmu_modifytte_try(&tte, &ttemod,
5389 			    &sfhmep->hme_tte);
5390 
5391 			if (ret <= 0) {
5392 				if (TTE_IS_VALID(&tte)) {
5393 					goto again;
5394 				} else {
5395 					/*
5396 					 * We read in a valid pte, but it
5397 					 * is unloaded by page_unload.
5398 					 * hme_page has become NULL and
5399 					 * we hold no p_mapping lock.
5400 					 */
5401 					ASSERT(pp == NULL && pml == NULL);
5402 					goto tte_unloaded;
5403 				}
5404 			}
5405 
5406 			if (!(flags & HAT_UNLOAD_NOSYNC)) {
5407 				sfmmu_ttesync(sfmmup, addr, &tte, pp);
5408 			}
5409 
5410 			/*
5411 			 * Ok- we invalidated the tte. Do the rest of the job.
5412 			 */
5413 			ttecnt++;
5414 
5415 			if (flags & HAT_UNLOAD_UNLOCK) {
5416 				ASSERT(hmeblkp->hblk_lckcnt > 0);
5417 				atomic_add_16(&hmeblkp->hblk_lckcnt, -1);
5418 				HBLK_STACK_TRACE(hmeblkp, HBLK_UNLOCK);
5419 			}
5420 
5421 			/*
5422 			 * Normally we would need to flush the page
5423 			 * from the virtual cache at this point in
5424 			 * order to prevent a potential cache alias
5425 			 * inconsistency.
5426 			 * The particular scenario we need to worry
5427 			 * about is:
5428 			 * Given:  va1 and va2 are two virtual address
5429 			 * that alias and map the same physical
5430 			 * address.
5431 			 * 1.	mapping exists from va1 to pa and data
5432 			 * has been read into the cache.
5433 			 * 2.	unload va1.
5434 			 * 3.	load va2 and modify data using va2.
5435 			 * 4	unload va2.
5436 			 * 5.	load va1 and reference data.  Unless we
5437 			 * flush the data cache when we unload we will
5438 			 * get stale data.
5439 			 * Fortunately, page coloring eliminates the
5440 			 * above scenario by remembering the color a
5441 			 * physical page was last or is currently
5442 			 * mapped to.  Now, we delay the flush until
5443 			 * the loading of translations.  Only when the
5444 			 * new translation is of a different color
5445 			 * are we forced to flush.
5446 			 */
5447 			if (use_demap_range) {
5448 				/*
5449 				 * Mark this page as needing a demap.
5450 				 */
5451 				DEMAP_RANGE_MARKPG(dmrp, addr);
5452 			} else {
5453 				if (do_virtual_coloring) {
5454 					sfmmu_tlb_demap(addr, sfmmup, hmeblkp,
5455 					    sfmmup->sfmmu_free, 0);
5456 				} else {
5457 					pfn_t pfnum;
5458 
5459 					pfnum = TTE_TO_PFN(addr, &tte);
5460 					sfmmu_tlbcache_demap(addr, sfmmup,
5461 					    hmeblkp, pfnum, sfmmup->sfmmu_free,
5462 					    FLUSH_NECESSARY_CPUS,
5463 					    CACHE_FLUSH, 0);
5464 				}
5465 			}
5466 
5467 			if (pp) {
5468 				/*
5469 				 * Remove the hment from the mapping list
5470 				 */
5471 				ASSERT(hmeblkp->hblk_hmecnt > 0);
5472 
5473 				/*
5474 				 * Again, we cannot
5475 				 * ASSERT(hmeblkp->hblk_hmecnt <= NHMENTS);
5476 				 */
5477 				HME_SUB(sfhmep, pp);
5478 				membar_stst();
5479 				atomic_add_16(&hmeblkp->hblk_hmecnt, -1);
5480 			}
5481 
5482 			ASSERT(hmeblkp->hblk_vcnt > 0);
5483 			atomic_add_16(&hmeblkp->hblk_vcnt, -1);
5484 
5485 			ASSERT(hmeblkp->hblk_hmecnt || hmeblkp->hblk_vcnt ||
5486 			    !hmeblkp->hblk_lckcnt);
5487 
5488 			if (pp && (pp->p_nrm & (P_KPMC | P_KPMS | P_TNC))) {
5489 				if (PP_ISTNC(pp)) {
5490 					/*
5491 					 * If page was temporary
5492 					 * uncached, try to recache
5493 					 * it. Note that HME_SUB() was
5494 					 * called above so p_index and
5495 					 * mlist had been updated.
5496 					 */
5497 					conv_tnc(pp, ttesz);
5498 				} else if (pp->p_mapping == NULL) {
5499 					ASSERT(kpm_enable);
5500 					/*
5501 					 * Page is marked to be in VAC conflict
5502 					 * to an existing kpm mapping and/or is
5503 					 * kpm mapped using only the regular
5504 					 * pagesize.
5505 					 */
5506 					sfmmu_kpm_hme_unload(pp);
5507 				}
5508 			}
5509 		} else if ((pp = sfhmep->hme_page) != NULL) {
5510 				/*
5511 				 * TTE is invalid but the hme
5512 				 * still exists. let pageunload
5513 				 * complete its job.
5514 				 */
5515 				ASSERT(pml == NULL);
5516 				pml = sfmmu_mlist_enter(pp);
5517 				if (sfhmep->hme_page != NULL) {
5518 					sfmmu_mlist_exit(pml);
5519 					pml = NULL;
5520 					goto again;
5521 				}
5522 				ASSERT(sfhmep->hme_page == NULL);
5523 		} else if (hmeblkp->hblk_hmecnt != 0) {
5524 			/*
5525 			 * pageunload may have not finished decrementing
5526 			 * hblk_vcnt and hblk_hmecnt. Find page_t if any and
5527 			 * wait for pageunload to finish. Rely on pageunload
5528 			 * to decrement hblk_hmecnt after hblk_vcnt.
5529 			 */
5530 			pfn_t pfn = TTE_TO_TTEPFN(&tte);
5531 			ASSERT(pml == NULL);
5532 			if (pf_is_memory(pfn)) {
5533 				pp = page_numtopp_nolock(pfn);
5534 				if (pp != NULL) {
5535 					pml = sfmmu_mlist_enter(pp);
5536 					sfmmu_mlist_exit(pml);
5537 					pml = NULL;
5538 				}
5539 			}
5540 		}
5541 
5542 tte_unloaded:
5543 		/*
5544 		 * At this point, the tte we are looking at
5545 		 * should be unloaded, and hme has been unlinked
5546 		 * from page too. This is important because in
5547 		 * pageunload, it does ttesync() then HME_SUB.
5548 		 * We need to make sure HME_SUB has been completed
5549 		 * so we know ttesync() has been completed. Otherwise,
5550 		 * at exit time, after return from hat layer, VM will
5551 		 * release as structure which hat_setstat() (called
5552 		 * by ttesync()) needs.
5553 		 */
5554 #ifdef DEBUG
5555 		{
5556 			tte_t	dtte;
5557 
5558 			ASSERT(sfhmep->hme_page == NULL);
5559 
5560 			sfmmu_copytte(&sfhmep->hme_tte, &dtte);
5561 			ASSERT(!TTE_IS_VALID(&dtte));
5562 		}
5563 #endif
5564 
5565 		if (pml) {
5566 			sfmmu_mlist_exit(pml);
5567 		}
5568 
5569 		addr += TTEBYTES(ttesz);
5570 		sfhmep++;
5571 		DEMAP_RANGE_NEXTPG(dmrp);
5572 	}
5573 	if (ttecnt > 0)
5574 		atomic_add_long(&sfmmup->sfmmu_ttecnt[ttesz], -ttecnt);
5575 	return (addr);
5576 }
5577 
5578 /*
5579  * Synchronize all the mappings in the range [addr..addr+len).
5580  * Can be called with clearflag having two states:
5581  * HAT_SYNC_DONTZERO means just return the rm stats
5582  * HAT_SYNC_ZERORM means zero rm bits in the tte and return the stats
5583  */
5584 void
5585 hat_sync(struct hat *sfmmup, caddr_t addr, size_t len, uint_t clearflag)
5586 {
5587 	struct hmehash_bucket *hmebp;
5588 	hmeblk_tag hblktag;
5589 	int hmeshift, hashno = 1;
5590 	struct hme_blk *hmeblkp, *list = NULL;
5591 	caddr_t endaddr;
5592 	cpuset_t cpuset;
5593 
5594 	ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
5595 	ASSERT((sfmmup == ksfmmup) ||
5596 		AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock));
5597 	ASSERT((len & MMU_PAGEOFFSET) == 0);
5598 	ASSERT((clearflag == HAT_SYNC_DONTZERO) ||
5599 		(clearflag == HAT_SYNC_ZERORM));
5600 
5601 	CPUSET_ZERO(cpuset);
5602 
5603 	endaddr = addr + len;
5604 	hblktag.htag_id = sfmmup;
5605 	/*
5606 	 * Spitfire supports 4 page sizes.
5607 	 * Most pages are expected to be of the smallest page
5608 	 * size (8K) and these will not need to be rehashed. 64K
5609 	 * pages also don't need to be rehashed because the an hmeblk
5610 	 * spans 64K of address space. 512K pages might need 1 rehash and
5611 	 * and 4M pages 2 rehashes.
5612 	 */
5613 	while (addr < endaddr) {
5614 		hmeshift = HME_HASH_SHIFT(hashno);
5615 		hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift);
5616 		hblktag.htag_rehash = hashno;
5617 		hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift);
5618 
5619 		SFMMU_HASH_LOCK(hmebp);
5620 
5621 		HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list);
5622 		if (hmeblkp != NULL) {
5623 			/*
5624 			 * We've encountered a shadow hmeblk so skip the range
5625 			 * of the next smaller mapping size.
5626 			 */
5627 			if (hmeblkp->hblk_shw_bit) {
5628 				ASSERT(sfmmup != ksfmmup);
5629 				ASSERT(hashno > 1);
5630 				addr = (caddr_t)P2END((uintptr_t)addr,
5631 					    TTEBYTES(hashno - 1));
5632 			} else {
5633 				addr = sfmmu_hblk_sync(sfmmup, hmeblkp,
5634 				    addr, endaddr, clearflag);
5635 			}
5636 			SFMMU_HASH_UNLOCK(hmebp);
5637 			hashno = 1;
5638 			continue;
5639 		}
5640 		SFMMU_HASH_UNLOCK(hmebp);
5641 
5642 		if (!HME_REHASH(sfmmup) || (hashno >= mmu_hashcnt)) {
5643 			/*
5644 			 * We have traversed the whole list and rehashed
5645 			 * if necessary without finding the address to sync.
5646 			 * This is ok so we increment the address by the
5647 			 * smallest hmeblk range for kernel mappings and the
5648 			 * largest hmeblk range, to account for shadow hmeblks,
5649 			 * for user mappings and continue.
5650 			 */
5651 			if (sfmmup == ksfmmup)
5652 				addr = (caddr_t)P2END((uintptr_t)addr,
5653 					    TTEBYTES(1));
5654 			else
5655 				addr = (caddr_t)P2END((uintptr_t)addr,
5656 					    TTEBYTES(hashno));
5657 			hashno = 1;
5658 		} else {
5659 			hashno++;
5660 		}
5661 	}
5662 	sfmmu_hblks_list_purge(&list);
5663 	cpuset = sfmmup->sfmmu_cpusran;
5664 	xt_sync(cpuset);
5665 }
5666 
5667 static caddr_t
5668 sfmmu_hblk_sync(struct hat *sfmmup, struct hme_blk *hmeblkp, caddr_t addr,
5669 	caddr_t endaddr, int clearflag)
5670 {
5671 	tte_t	tte, ttemod;
5672 	struct sf_hment *sfhmep;
5673 	int ttesz;
5674 	struct page *pp;
5675 	kmutex_t *pml;
5676 	int ret;
5677 
5678 	ASSERT(hmeblkp->hblk_shw_bit == 0);
5679 
5680 	endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp));
5681 
5682 	ttesz = get_hblk_ttesz(hmeblkp);
5683 	HBLKTOHME(sfhmep, hmeblkp, addr);
5684 
5685 	while (addr < endaddr) {
5686 		sfmmu_copytte(&sfhmep->hme_tte, &tte);
5687 		if (TTE_IS_VALID(&tte)) {
5688 			pml = NULL;
5689 			pp = sfhmep->hme_page;
5690 			if (pp) {
5691 				pml = sfmmu_mlist_enter(pp);
5692 			}
5693 			if (pp != sfhmep->hme_page) {
5694 				/*
5695 				 * tte most have been unloaded
5696 				 * underneath us.  Recheck
5697 				 */
5698 				ASSERT(pml);
5699 				sfmmu_mlist_exit(pml);
5700 				continue;
5701 			}
5702 
5703 			ASSERT(pp == NULL || sfmmu_mlist_held(pp));
5704 
5705 			if (clearflag == HAT_SYNC_ZERORM) {
5706 				ttemod = tte;
5707 				TTE_CLR_RM(&ttemod);
5708 				ret = sfmmu_modifytte_try(&tte, &ttemod,
5709 				    &sfhmep->hme_tte);
5710 				if (ret < 0) {
5711 					if (pml) {
5712 						sfmmu_mlist_exit(pml);
5713 					}
5714 					continue;
5715 				}
5716 
5717 				if (ret > 0) {
5718 					sfmmu_tlb_demap(addr, sfmmup,
5719 						hmeblkp, 0, 0);
5720 				}
5721 			}
5722 			sfmmu_ttesync(sfmmup, addr, &tte, pp);
5723 			if (pml) {
5724 				sfmmu_mlist_exit(pml);
5725 			}
5726 		}
5727 		addr += TTEBYTES(ttesz);
5728 		sfhmep++;
5729 	}
5730 	return (addr);
5731 }
5732 
5733 /*
5734  * This function will sync a tte to the page struct and it will
5735  * update the hat stats. Currently it allows us to pass a NULL pp
5736  * and we will simply update the stats.  We may want to change this
5737  * so we only keep stats for pages backed by pp's.
5738  */
5739 static void
5740 sfmmu_ttesync(struct hat *sfmmup, caddr_t addr, tte_t *ttep, page_t *pp)
5741 {
5742 	uint_t rm = 0;
5743 	int   	sz;
5744 	pgcnt_t	npgs;
5745 
5746 	ASSERT(TTE_IS_VALID(ttep));
5747 
5748 	if (TTE_IS_NOSYNC(ttep)) {
5749 		return;
5750 	}
5751 
5752 	if (TTE_IS_REF(ttep))  {
5753 		rm = P_REF;
5754 	}
5755 	if (TTE_IS_MOD(ttep))  {
5756 		rm |= P_MOD;
5757 	}
5758 
5759 	if (rm == 0) {
5760 		return;
5761 	}
5762 
5763 	sz = TTE_CSZ(ttep);
5764 	if (sfmmup->sfmmu_rmstat) {
5765 		int i;
5766 		caddr_t	vaddr = addr;
5767 
5768 		for (i = 0; i < TTEPAGES(sz); i++, vaddr += MMU_PAGESIZE) {
5769 			hat_setstat(sfmmup->sfmmu_as, vaddr, MMU_PAGESIZE, rm);
5770 		}
5771 
5772 	}
5773 
5774 	/*
5775 	 * XXX I want to use cas to update nrm bits but they
5776 	 * currently belong in common/vm and not in hat where
5777 	 * they should be.
5778 	 * The nrm bits are protected by the same mutex as
5779 	 * the one that protects the page's mapping list.
5780 	 */
5781 	if (!pp)
5782 		return;
5783 	ASSERT(sfmmu_mlist_held(pp));
5784 	/*
5785 	 * If the tte is for a large page, we need to sync all the
5786 	 * pages covered by the tte.
5787 	 */
5788 	if (sz != TTE8K) {
5789 		ASSERT(pp->p_szc != 0);
5790 		pp = PP_GROUPLEADER(pp, sz);
5791 		ASSERT(sfmmu_mlist_held(pp));
5792 	}
5793 
5794 	/* Get number of pages from tte size. */
5795 	npgs = TTEPAGES(sz);
5796 
5797 	do {
5798 		ASSERT(pp);
5799 		ASSERT(sfmmu_mlist_held(pp));
5800 		if (((rm & P_REF) != 0 && !PP_ISREF(pp)) ||
5801 		    ((rm & P_MOD) != 0 && !PP_ISMOD(pp)))
5802 			hat_page_setattr(pp, rm);
5803 
5804 		/*
5805 		 * Are we done? If not, we must have a large mapping.
5806 		 * For large mappings we need to sync the rest of the pages
5807 		 * covered by this tte; goto the next page.
5808 		 */
5809 	} while (--npgs > 0 && (pp = PP_PAGENEXT(pp)));
5810 }
5811 
5812 /*
5813  * Execute pre-callback handler of each pa_hment linked to pp
5814  *
5815  * Inputs:
5816  *   flag: either HAT_PRESUSPEND or HAT_SUSPEND.
5817  *   capture_cpus: pointer to return value (below)
5818  *
5819  * Returns:
5820  *   Propagates the subsystem callback return values back to the caller;
5821  *   returns 0 on success.  If capture_cpus is non-NULL, the value returned
5822  *   is zero if all of the pa_hments are of a type that do not require
5823  *   capturing CPUs prior to suspending the mapping, else it is 1.
5824  */
5825 static int
5826 hat_pageprocess_precallbacks(struct page *pp, uint_t flag, int *capture_cpus)
5827 {
5828 	struct sf_hment	*sfhmep;
5829 	struct pa_hment *pahmep;
5830 	int (*f)(caddr_t, uint_t, uint_t, void *);
5831 	int		ret;
5832 	id_t		id;
5833 	int		locked = 0;
5834 	kmutex_t	*pml;
5835 
5836 	ASSERT(PAGE_EXCL(pp));
5837 	if (!sfmmu_mlist_held(pp)) {
5838 		pml = sfmmu_mlist_enter(pp);
5839 		locked = 1;
5840 	}
5841 
5842 	if (capture_cpus)
5843 		*capture_cpus = 0;
5844 
5845 top:
5846 	for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) {
5847 		/*
5848 		 * skip sf_hments corresponding to VA<->PA mappings;
5849 		 * for pa_hment's, hme_tte.ll is zero
5850 		 */
5851 		if (!IS_PAHME(sfhmep))
5852 			continue;
5853 
5854 		pahmep = sfhmep->hme_data;
5855 		ASSERT(pahmep != NULL);
5856 
5857 		/*
5858 		 * skip if pre-handler has been called earlier in this loop
5859 		 */
5860 		if (pahmep->flags & flag)
5861 			continue;
5862 
5863 		id = pahmep->cb_id;
5864 		ASSERT(id >= (id_t)0 && id < sfmmu_cb_nextid);
5865 		if (capture_cpus && sfmmu_cb_table[id].capture_cpus != 0)
5866 			*capture_cpus = 1;
5867 		if ((f = sfmmu_cb_table[id].prehandler) == NULL) {
5868 			pahmep->flags |= flag;
5869 			continue;
5870 		}
5871 
5872 		/*
5873 		 * Drop the mapping list lock to avoid locking order issues.
5874 		 */
5875 		if (locked)
5876 			sfmmu_mlist_exit(pml);
5877 
5878 		ret = f(pahmep->addr, pahmep->len, flag, pahmep->pvt);
5879 		if (ret != 0)
5880 			return (ret);	/* caller must do the cleanup */
5881 
5882 		if (locked) {
5883 			pml = sfmmu_mlist_enter(pp);
5884 			pahmep->flags |= flag;
5885 			goto top;
5886 		}
5887 
5888 		pahmep->flags |= flag;
5889 	}
5890 
5891 	if (locked)
5892 		sfmmu_mlist_exit(pml);
5893 
5894 	return (0);
5895 }
5896 
5897 /*
5898  * Execute post-callback handler of each pa_hment linked to pp
5899  *
5900  * Same overall assumptions and restrictions apply as for
5901  * hat_pageprocess_precallbacks().
5902  */
5903 static void
5904 hat_pageprocess_postcallbacks(struct page *pp, uint_t flag)
5905 {
5906 	pfn_t pgpfn = pp->p_pagenum;
5907 	pfn_t pgmask = btop(page_get_pagesize(pp->p_szc)) - 1;
5908 	pfn_t newpfn;
5909 	struct sf_hment *sfhmep;
5910 	struct pa_hment *pahmep;
5911 	int (*f)(caddr_t, uint_t, uint_t, void *, pfn_t);
5912 	id_t	id;
5913 	int	locked = 0;
5914 	kmutex_t *pml;
5915 
5916 	ASSERT(PAGE_EXCL(pp));
5917 	if (!sfmmu_mlist_held(pp)) {
5918 		pml = sfmmu_mlist_enter(pp);
5919 		locked = 1;
5920 	}
5921 
5922 top:
5923 	for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) {
5924 		/*
5925 		 * skip sf_hments corresponding to VA<->PA mappings;
5926 		 * for pa_hment's, hme_tte.ll is zero
5927 		 */
5928 		if (!IS_PAHME(sfhmep))
5929 			continue;
5930 
5931 		pahmep = sfhmep->hme_data;
5932 		ASSERT(pahmep != NULL);
5933 
5934 		if ((pahmep->flags & flag) == 0)
5935 			continue;
5936 
5937 		pahmep->flags &= ~flag;
5938 
5939 		id = pahmep->cb_id;
5940 		ASSERT(id >= (id_t)0 && id < sfmmu_cb_nextid);
5941 		if ((f = sfmmu_cb_table[id].posthandler) == NULL)
5942 			continue;
5943 
5944 		/*
5945 		 * Convert the base page PFN into the constituent PFN
5946 		 * which is needed by the callback handler.
5947 		 */
5948 		newpfn = pgpfn | (btop((uintptr_t)pahmep->addr) & pgmask);
5949 
5950 		/*
5951 		 * Drop the mapping list lock to avoid locking order issues.
5952 		 */
5953 		if (locked)
5954 			sfmmu_mlist_exit(pml);
5955 
5956 		if (f(pahmep->addr, pahmep->len, flag, pahmep->pvt, newpfn)
5957 		    != 0)
5958 			panic("sfmmu: posthandler failed");
5959 
5960 		if (locked) {
5961 			pml = sfmmu_mlist_enter(pp);
5962 			goto top;
5963 		}
5964 	}
5965 
5966 	if (locked)
5967 		sfmmu_mlist_exit(pml);
5968 }
5969 
5970 /*
5971  * Suspend locked kernel mapping
5972  */
5973 void
5974 hat_pagesuspend(struct page *pp)
5975 {
5976 	struct sf_hment *sfhmep;
5977 	sfmmu_t *sfmmup;
5978 	tte_t tte, ttemod;
5979 	struct hme_blk *hmeblkp;
5980 	caddr_t addr;
5981 	int index, cons;
5982 	cpuset_t cpuset;
5983 
5984 	ASSERT(PAGE_EXCL(pp));
5985 	ASSERT(sfmmu_mlist_held(pp));
5986 
5987 	mutex_enter(&kpr_suspendlock);
5988 
5989 	/*
5990 	 * Call into dtrace to tell it we're about to suspend a
5991 	 * kernel mapping. This prevents us from running into issues
5992 	 * with probe context trying to touch a suspended page
5993 	 * in the relocation codepath itself.
5994 	 */
5995 	if (dtrace_kreloc_init)
5996 		(*dtrace_kreloc_init)();
5997 
5998 	index = PP_MAPINDEX(pp);
5999 	cons = TTE8K;
6000 
6001 retry:
6002 	for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) {
6003 
6004 		if (IS_PAHME(sfhmep))
6005 			continue;
6006 
6007 		if (get_hblk_ttesz(sfmmu_hmetohblk(sfhmep)) != cons)
6008 			continue;
6009 
6010 		/*
6011 		 * Loop until we successfully set the suspend bit in
6012 		 * the TTE.
6013 		 */
6014 again:
6015 		sfmmu_copytte(&sfhmep->hme_tte, &tte);
6016 		ASSERT(TTE_IS_VALID(&tte));
6017 
6018 		ttemod = tte;
6019 		TTE_SET_SUSPEND(&ttemod);
6020 		if (sfmmu_modifytte_try(&tte, &ttemod,
6021 		    &sfhmep->hme_tte) < 0)
6022 			goto again;
6023 
6024 		/*
6025 		 * Invalidate TSB entry
6026 		 */
6027 		hmeblkp = sfmmu_hmetohblk(sfhmep);
6028 
6029 		sfmmup = hblktosfmmu(hmeblkp);
6030 		ASSERT(sfmmup == ksfmmup);
6031 
6032 		addr = tte_to_vaddr(hmeblkp, tte);
6033 
6034 		/*
6035 		 * No need to make sure that the TSB for this sfmmu is
6036 		 * not being relocated since it is ksfmmup and thus it
6037 		 * will never be relocated.
6038 		 */
6039 		SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp);
6040 
6041 		/*
6042 		 * Update xcall stats
6043 		 */
6044 		cpuset = cpu_ready_set;
6045 		CPUSET_DEL(cpuset, CPU->cpu_id);
6046 
6047 		/* LINTED: constant in conditional context */
6048 		SFMMU_XCALL_STATS(ksfmmup);
6049 
6050 		/*
6051 		 * Flush TLB entry on remote CPU's
6052 		 */
6053 		xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)addr,
6054 		    (uint64_t)ksfmmup);
6055 		xt_sync(cpuset);
6056 
6057 		/*
6058 		 * Flush TLB entry on local CPU
6059 		 */
6060 		vtag_flushpage(addr, (uint64_t)ksfmmup);
6061 	}
6062 
6063 	while (index != 0) {
6064 		index = index >> 1;
6065 		if (index != 0)
6066 			cons++;
6067 		if (index & 0x1) {
6068 			pp = PP_GROUPLEADER(pp, cons);
6069 			goto retry;
6070 		}
6071 	}
6072 }
6073 
6074 #ifdef	DEBUG
6075 
6076 #define	N_PRLE	1024
6077 struct prle {
6078 	page_t *targ;
6079 	page_t *repl;
6080 	int status;
6081 	int pausecpus;
6082 	hrtime_t whence;
6083 };
6084 
6085 static struct prle page_relocate_log[N_PRLE];
6086 static int prl_entry;
6087 static kmutex_t prl_mutex;
6088 
6089 #define	PAGE_RELOCATE_LOG(t, r, s, p)					\
6090 	mutex_enter(&prl_mutex);					\
6091 	page_relocate_log[prl_entry].targ = *(t);			\
6092 	page_relocate_log[prl_entry].repl = *(r);			\
6093 	page_relocate_log[prl_entry].status = (s);			\
6094 	page_relocate_log[prl_entry].pausecpus = (p);			\
6095 	page_relocate_log[prl_entry].whence = gethrtime();		\
6096 	prl_entry = (prl_entry == (N_PRLE - 1))? 0 : prl_entry + 1;	\
6097 	mutex_exit(&prl_mutex);
6098 
6099 #else	/* !DEBUG */
6100 #define	PAGE_RELOCATE_LOG(t, r, s, p)
6101 #endif
6102 
6103 /*
6104  * Core Kernel Page Relocation Algorithm
6105  *
6106  * Input:
6107  *
6108  * target : 	constituent pages are SE_EXCL locked.
6109  * replacement:	constituent pages are SE_EXCL locked.
6110  *
6111  * Output:
6112  *
6113  * nrelocp:	number of pages relocated
6114  */
6115 int
6116 hat_page_relocate(page_t **target, page_t **replacement, spgcnt_t *nrelocp)
6117 {
6118 	page_t		*targ, *repl;
6119 	page_t		*tpp, *rpp;
6120 	kmutex_t	*low, *high;
6121 	spgcnt_t	npages, i;
6122 	page_t		*pl = NULL;
6123 	int		old_pil;
6124 	cpuset_t	cpuset;
6125 	int		cap_cpus;
6126 	int		ret;
6127 
6128 	if (hat_kpr_enabled == 0 || !kcage_on || PP_ISNORELOC(*target)) {
6129 		PAGE_RELOCATE_LOG(target, replacement, EAGAIN, -1);
6130 		return (EAGAIN);
6131 	}
6132 
6133 	mutex_enter(&kpr_mutex);
6134 	kreloc_thread = curthread;
6135 
6136 	targ = *target;
6137 	repl = *replacement;
6138 	ASSERT(repl != NULL);
6139 	ASSERT(targ->p_szc == repl->p_szc);
6140 
6141 	npages = page_get_pagecnt(targ->p_szc);
6142 
6143 	/*
6144 	 * unload VA<->PA mappings that are not locked
6145 	 */
6146 	tpp = targ;
6147 	for (i = 0; i < npages; i++) {
6148 		(void) hat_pageunload(tpp, SFMMU_KERNEL_RELOC);
6149 		tpp++;
6150 	}
6151 
6152 	/*
6153 	 * Do "presuspend" callbacks, in a context from which we can still
6154 	 * block as needed. Note that we don't hold the mapping list lock
6155 	 * of "targ" at this point due to potential locking order issues;
6156 	 * we assume that between the hat_pageunload() above and holding
6157 	 * the SE_EXCL lock that the mapping list *cannot* change at this
6158 	 * point.
6159 	 */
6160 	ret = hat_pageprocess_precallbacks(targ, HAT_PRESUSPEND, &cap_cpus);
6161 	if (ret != 0) {
6162 		/*
6163 		 * EIO translates to fatal error, for all others cleanup
6164 		 * and return EAGAIN.
6165 		 */
6166 		ASSERT(ret != EIO);
6167 		hat_pageprocess_postcallbacks(targ, HAT_POSTUNSUSPEND);
6168 		PAGE_RELOCATE_LOG(target, replacement, ret, -1);
6169 		kreloc_thread = NULL;
6170 		mutex_exit(&kpr_mutex);
6171 		return (EAGAIN);
6172 	}
6173 
6174 	/*
6175 	 * acquire p_mapping list lock for both the target and replacement
6176 	 * root pages.
6177 	 *
6178 	 * low and high refer to the need to grab the mlist locks in a
6179 	 * specific order in order to prevent race conditions.  Thus the
6180 	 * lower lock must be grabbed before the higher lock.
6181 	 *
6182 	 * This will block hat_unload's accessing p_mapping list.  Since
6183 	 * we have SE_EXCL lock, hat_memload and hat_pageunload will be
6184 	 * blocked.  Thus, no one else will be accessing the p_mapping list
6185 	 * while we suspend and reload the locked mapping below.
6186 	 */
6187 	tpp = targ;
6188 	rpp = repl;
6189 	sfmmu_mlist_reloc_enter(tpp, rpp, &low, &high);
6190 
6191 	kpreempt_disable();
6192 
6193 	/*
6194 	 * If the replacement page is of a different virtual color
6195 	 * than the page it is replacing, we need to handle the VAC
6196 	 * consistency for it just as we would if we were setting up
6197 	 * a new mapping to a page.
6198 	 */
6199 	if ((tpp->p_szc == 0) && (PP_GET_VCOLOR(rpp) != NO_VCOLOR)) {
6200 		if (tpp->p_vcolor != rpp->p_vcolor) {
6201 			sfmmu_cache_flushcolor(PP_GET_VCOLOR(rpp),
6202 			    rpp->p_pagenum);
6203 		}
6204 	}
6205 
6206 	/*
6207 	 * We raise our PIL to 13 so that we don't get captured by
6208 	 * another CPU or pinned by an interrupt thread.  We can't go to
6209 	 * PIL 14 since the nexus driver(s) may need to interrupt at
6210 	 * that level in the case of IOMMU pseudo mappings.
6211 	 */
6212 	cpuset = cpu_ready_set;
6213 	CPUSET_DEL(cpuset, CPU->cpu_id);
6214 	if (!cap_cpus || CPUSET_ISNULL(cpuset)) {
6215 		old_pil = splr(XCALL_PIL);
6216 	} else {
6217 		old_pil = -1;
6218 		xc_attention(cpuset);
6219 	}
6220 	ASSERT(getpil() == XCALL_PIL);
6221 
6222 	/*
6223 	 * Now do suspend callbacks. In the case of an IOMMU mapping
6224 	 * this will suspend all DMA activity to the page while it is
6225 	 * being relocated. Since we are well above LOCK_LEVEL and CPUs
6226 	 * may be captured at this point we should have acquired any needed
6227 	 * locks in the presuspend callback.
6228 	 */
6229 	ret = hat_pageprocess_precallbacks(targ, HAT_SUSPEND, NULL);
6230 	if (ret != 0) {
6231 		repl = targ;
6232 		goto suspend_fail;
6233 	}
6234 
6235 	/*
6236 	 * Raise the PIL yet again, this time to block all high-level
6237 	 * interrupts on this CPU. This is necessary to prevent an
6238 	 * interrupt routine from pinning the thread which holds the
6239 	 * mapping suspended and then touching the suspended page.
6240 	 *
6241 	 * Once the page is suspended we also need to be careful to
6242 	 * avoid calling any functions which touch any seg_kmem memory
6243 	 * since that memory may be backed by the very page we are
6244 	 * relocating in here!
6245 	 */
6246 	hat_pagesuspend(targ);
6247 
6248 	/*
6249 	 * Now that we are confident everybody has stopped using this page,
6250 	 * copy the page contents.  Note we use a physical copy to prevent
6251 	 * locking issues and to avoid fpRAS because we can't handle it in
6252 	 * this context.
6253 	 */
6254 	for (i = 0; i < npages; i++, tpp++, rpp++) {
6255 		/*
6256 		 * Copy the contents of the page.
6257 		 */
6258 		ppcopy_kernel(tpp, rpp);
6259 	}
6260 
6261 	tpp = targ;
6262 	rpp = repl;
6263 	for (i = 0; i < npages; i++, tpp++, rpp++) {
6264 		/*
6265 		 * Copy attributes.  VAC consistency was handled above,
6266 		 * if required.
6267 		 */
6268 		rpp->p_nrm = tpp->p_nrm;
6269 		tpp->p_nrm = 0;
6270 		rpp->p_index = tpp->p_index;
6271 		tpp->p_index = 0;
6272 		rpp->p_vcolor = tpp->p_vcolor;
6273 	}
6274 
6275 	/*
6276 	 * First, unsuspend the page, if we set the suspend bit, and transfer
6277 	 * the mapping list from the target page to the replacement page.
6278 	 * Next process postcallbacks; since pa_hment's are linked only to the
6279 	 * p_mapping list of root page, we don't iterate over the constituent
6280 	 * pages.
6281 	 */
6282 	hat_pagereload(targ, repl);
6283 
6284 suspend_fail:
6285 	hat_pageprocess_postcallbacks(repl, HAT_UNSUSPEND);
6286 
6287 	/*
6288 	 * Now lower our PIL and release any captured CPUs since we
6289 	 * are out of the "danger zone".  After this it will again be
6290 	 * safe to acquire adaptive mutex locks, or to drop them...
6291 	 */
6292 	if (old_pil != -1) {
6293 		splx(old_pil);
6294 	} else {
6295 		xc_dismissed(cpuset);
6296 	}
6297 
6298 	kpreempt_enable();
6299 
6300 	sfmmu_mlist_reloc_exit(low, high);
6301 
6302 	/*
6303 	 * Postsuspend callbacks should drop any locks held across
6304 	 * the suspend callbacks.  As before, we don't hold the mapping
6305 	 * list lock at this point.. our assumption is that the mapping
6306 	 * list still can't change due to our holding SE_EXCL lock and
6307 	 * there being no unlocked mappings left. Hence the restriction
6308 	 * on calling context to hat_delete_callback()
6309 	 */
6310 	hat_pageprocess_postcallbacks(repl, HAT_POSTUNSUSPEND);
6311 	if (ret != 0) {
6312 		/*
6313 		 * The second presuspend call failed: we got here through
6314 		 * the suspend_fail label above.
6315 		 */
6316 		ASSERT(ret != EIO);
6317 		PAGE_RELOCATE_LOG(target, replacement, ret, cap_cpus);
6318 		kreloc_thread = NULL;
6319 		mutex_exit(&kpr_mutex);
6320 		return (EAGAIN);
6321 	}
6322 
6323 	/*
6324 	 * Now that we're out of the performance critical section we can
6325 	 * take care of updating the hash table, since we still
6326 	 * hold all the pages locked SE_EXCL at this point we
6327 	 * needn't worry about things changing out from under us.
6328 	 */
6329 	tpp = targ;
6330 	rpp = repl;
6331 	for (i = 0; i < npages; i++, tpp++, rpp++) {
6332 
6333 		/*
6334 		 * replace targ with replacement in page_hash table
6335 		 */
6336 		targ = tpp;
6337 		page_relocate_hash(rpp, targ);
6338 
6339 		/*
6340 		 * concatenate target; caller of platform_page_relocate()
6341 		 * expects target to be concatenated after returning.
6342 		 */
6343 		ASSERT(targ->p_next == targ);
6344 		ASSERT(targ->p_prev == targ);
6345 		page_list_concat(&pl, &targ);
6346 	}
6347 
6348 	ASSERT(*target == pl);
6349 	*nrelocp = npages;
6350 	PAGE_RELOCATE_LOG(target, replacement, 0, cap_cpus);
6351 	kreloc_thread = NULL;
6352 	mutex_exit(&kpr_mutex);
6353 	return (0);
6354 }
6355 
6356 /*
6357  * Called when stray pa_hments are found attached to a page which is
6358  * being freed.  Notify the subsystem which attached the pa_hment of
6359  * the error if it registered a suitable handler, else panic.
6360  */
6361 static void
6362 sfmmu_pahment_leaked(struct pa_hment *pahmep)
6363 {
6364 	id_t cb_id = pahmep->cb_id;
6365 
6366 	ASSERT(cb_id >= (id_t)0 && cb_id < sfmmu_cb_nextid);
6367 	if (sfmmu_cb_table[cb_id].errhandler != NULL) {
6368 		if (sfmmu_cb_table[cb_id].errhandler(pahmep->addr, pahmep->len,
6369 		    HAT_CB_ERR_LEAKED, pahmep->pvt) == 0)
6370 			return;		/* non-fatal */
6371 	}
6372 	panic("pa_hment leaked: 0x%p", pahmep);
6373 }
6374 
6375 /*
6376  * Remove all mappings to page 'pp'.
6377  */
6378 int
6379 hat_pageunload(struct page *pp, uint_t forceflag)
6380 {
6381 	struct page *origpp = pp;
6382 	struct sf_hment *sfhme, *tmphme;
6383 	struct hme_blk *hmeblkp;
6384 	kmutex_t *pml, *pmtx;
6385 	cpuset_t cpuset, tset;
6386 	int index, cons;
6387 	int xhme_blks;
6388 	int pa_hments;
6389 
6390 	ASSERT(PAGE_EXCL(pp));
6391 
6392 retry_xhat:
6393 	tmphme = NULL;
6394 	xhme_blks = 0;
6395 	pa_hments = 0;
6396 	CPUSET_ZERO(cpuset);
6397 
6398 	pml = sfmmu_mlist_enter(pp);
6399 
6400 	if (pp->p_kpmref)
6401 		sfmmu_kpm_pageunload(pp);
6402 	ASSERT(!PP_ISMAPPED_KPM(pp));
6403 
6404 	index = PP_MAPINDEX(pp);
6405 	cons = TTE8K;
6406 retry:
6407 	for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) {
6408 		tmphme = sfhme->hme_next;
6409 
6410 		if (IS_PAHME(sfhme)) {
6411 			ASSERT(sfhme->hme_data != NULL);
6412 			pa_hments++;
6413 			continue;
6414 		}
6415 
6416 		hmeblkp = sfmmu_hmetohblk(sfhme);
6417 		if (hmeblkp->hblk_xhat_bit) {
6418 			struct xhat_hme_blk *xblk =
6419 			    (struct xhat_hme_blk *)hmeblkp;
6420 
6421 			(void) XHAT_PAGEUNLOAD(xblk->xhat_hme_blk_hat,
6422 			    pp, forceflag, XBLK2PROVBLK(xblk));
6423 
6424 			xhme_blks = 1;
6425 			continue;
6426 		}
6427 
6428 		/*
6429 		 * If there are kernel mappings don't unload them, they will
6430 		 * be suspended.
6431 		 */
6432 		if (forceflag == SFMMU_KERNEL_RELOC && hmeblkp->hblk_lckcnt &&
6433 		    hmeblkp->hblk_tag.htag_id == ksfmmup)
6434 			continue;
6435 
6436 		tset = sfmmu_pageunload(pp, sfhme, cons);
6437 		CPUSET_OR(cpuset, tset);
6438 	}
6439 
6440 	while (index != 0) {
6441 		index = index >> 1;
6442 		if (index != 0)
6443 			cons++;
6444 		if (index & 0x1) {
6445 			/* Go to leading page */
6446 			pp = PP_GROUPLEADER(pp, cons);
6447 			ASSERT(sfmmu_mlist_held(pp));
6448 			goto retry;
6449 		}
6450 	}
6451 
6452 	/*
6453 	 * cpuset may be empty if the page was only mapped by segkpm,
6454 	 * in which case we won't actually cross-trap.
6455 	 */
6456 	xt_sync(cpuset);
6457 
6458 	/*
6459 	 * The page should have no mappings at this point, unless
6460 	 * we were called from hat_page_relocate() in which case we
6461 	 * leave the locked mappings which will be suspended later.
6462 	 */
6463 	ASSERT(!PP_ISMAPPED(origpp) || xhme_blks || pa_hments ||
6464 	    (forceflag == SFMMU_KERNEL_RELOC));
6465 
6466 	if (PP_ISTNC(pp)) {
6467 		if (cons == TTE8K) {
6468 			pmtx = sfmmu_page_enter(pp);
6469 			PP_CLRTNC(pp);
6470 			sfmmu_page_exit(pmtx);
6471 		} else {
6472 			conv_tnc(pp, cons);
6473 		}
6474 	}
6475 
6476 	if (pa_hments && forceflag != SFMMU_KERNEL_RELOC) {
6477 		/*
6478 		 * Unlink any pa_hments and free them, calling back
6479 		 * the responsible subsystem to notify it of the error.
6480 		 * This can occur in situations such as drivers leaking
6481 		 * DMA handles: naughty, but common enough that we'd like
6482 		 * to keep the system running rather than bringing it
6483 		 * down with an obscure error like "pa_hment leaked"
6484 		 * which doesn't aid the user in debugging their driver.
6485 		 */
6486 		for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) {
6487 			tmphme = sfhme->hme_next;
6488 			if (IS_PAHME(sfhme)) {
6489 				struct pa_hment *pahmep = sfhme->hme_data;
6490 				sfmmu_pahment_leaked(pahmep);
6491 				HME_SUB(sfhme, pp);
6492 				kmem_cache_free(pa_hment_cache, pahmep);
6493 			}
6494 		}
6495 
6496 		ASSERT(!PP_ISMAPPED(origpp) || xhme_blks);
6497 	}
6498 
6499 	sfmmu_mlist_exit(pml);
6500 
6501 	/*
6502 	 * XHAT may not have finished unloading pages
6503 	 * because some other thread was waiting for
6504 	 * mlist lock and XHAT_PAGEUNLOAD let it do
6505 	 * the job.
6506 	 */
6507 	if (xhme_blks) {
6508 		pp = origpp;
6509 		goto retry_xhat;
6510 	}
6511 
6512 	return (0);
6513 }
6514 
6515 static cpuset_t
6516 sfmmu_pageunload(page_t *pp, struct sf_hment *sfhme, int cons)
6517 {
6518 	struct hme_blk *hmeblkp;
6519 	sfmmu_t *sfmmup;
6520 	tte_t tte, ttemod;
6521 #ifdef DEBUG
6522 	tte_t orig_old;
6523 #endif /* DEBUG */
6524 	caddr_t addr;
6525 	int ttesz;
6526 	int ret;
6527 	cpuset_t cpuset;
6528 
6529 	ASSERT(pp != NULL);
6530 	ASSERT(sfmmu_mlist_held(pp));
6531 	ASSERT(pp->p_vnode != &kvp);
6532 
6533 	CPUSET_ZERO(cpuset);
6534 
6535 	hmeblkp = sfmmu_hmetohblk(sfhme);
6536 
6537 readtte:
6538 	sfmmu_copytte(&sfhme->hme_tte, &tte);
6539 	if (TTE_IS_VALID(&tte)) {
6540 		sfmmup = hblktosfmmu(hmeblkp);
6541 		ttesz = get_hblk_ttesz(hmeblkp);
6542 		/*
6543 		 * Only unload mappings of 'cons' size.
6544 		 */
6545 		if (ttesz != cons)
6546 			return (cpuset);
6547 
6548 		/*
6549 		 * Note that we have p_mapping lock, but no hash lock here.
6550 		 * hblk_unload() has to have both hash lock AND p_mapping
6551 		 * lock before it tries to modify tte. So, the tte could
6552 		 * not become invalid in the sfmmu_modifytte_try() below.
6553 		 */
6554 		ttemod = tte;
6555 #ifdef DEBUG
6556 		orig_old = tte;
6557 #endif /* DEBUG */
6558 
6559 		TTE_SET_INVALID(&ttemod);
6560 		ret = sfmmu_modifytte_try(&tte, &ttemod, &sfhme->hme_tte);
6561 		if (ret < 0) {
6562 #ifdef DEBUG
6563 			/* only R/M bits can change. */
6564 			chk_tte(&orig_old, &tte, &ttemod, hmeblkp);
6565 #endif /* DEBUG */
6566 			goto readtte;
6567 		}
6568 
6569 		if (ret == 0) {
6570 			panic("pageunload: cas failed?");
6571 		}
6572 
6573 		addr = tte_to_vaddr(hmeblkp, tte);
6574 
6575 		sfmmu_ttesync(sfmmup, addr, &tte, pp);
6576 
6577 		atomic_add_long(&sfmmup->sfmmu_ttecnt[ttesz], -1);
6578 
6579 		/*
6580 		 * We need to flush the page from the virtual cache
6581 		 * in order to prevent a virtual cache alias
6582 		 * inconsistency. The particular scenario we need
6583 		 * to worry about is:
6584 		 * Given:  va1 and va2 are two virtual address that
6585 		 * alias and will map the same physical address.
6586 		 * 1.	mapping exists from va1 to pa and data has
6587 		 *	been read into the cache.
6588 		 * 2.	unload va1.
6589 		 * 3.	load va2 and modify data using va2.
6590 		 * 4	unload va2.
6591 		 * 5.	load va1 and reference data.  Unless we flush
6592 		 *	the data cache when we unload we will get
6593 		 *	stale data.
6594 		 * This scenario is taken care of by using virtual
6595 		 * page coloring.
6596 		 */
6597 		if (sfmmup->sfmmu_ismhat) {
6598 			/*
6599 			 * Flush TSBs, TLBs and caches
6600 			 * of every process
6601 			 * sharing this ism segment.
6602 			 */
6603 			sfmmu_hat_lock_all();
6604 			mutex_enter(&ism_mlist_lock);
6605 			kpreempt_disable();
6606 			if (do_virtual_coloring)
6607 				sfmmu_ismtlbcache_demap(addr, sfmmup, hmeblkp,
6608 					pp->p_pagenum, CACHE_NO_FLUSH);
6609 			else
6610 				sfmmu_ismtlbcache_demap(addr, sfmmup, hmeblkp,
6611 					pp->p_pagenum, CACHE_FLUSH);
6612 			kpreempt_enable();
6613 			mutex_exit(&ism_mlist_lock);
6614 			sfmmu_hat_unlock_all();
6615 			cpuset = cpu_ready_set;
6616 		} else if (do_virtual_coloring) {
6617 			sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0);
6618 			cpuset = sfmmup->sfmmu_cpusran;
6619 		} else {
6620 			sfmmu_tlbcache_demap(addr, sfmmup, hmeblkp,
6621 				pp->p_pagenum, 0, FLUSH_NECESSARY_CPUS,
6622 				CACHE_FLUSH, 0);
6623 			cpuset = sfmmup->sfmmu_cpusran;
6624 		}
6625 
6626 		/*
6627 		 * Hme_sub has to run after ttesync() and a_rss update.
6628 		 * See hblk_unload().
6629 		 */
6630 		HME_SUB(sfhme, pp);
6631 		membar_stst();
6632 
6633 		/*
6634 		 * We can not make ASSERT(hmeblkp->hblk_hmecnt <= NHMENTS)
6635 		 * since pteload may have done a HME_ADD() right after
6636 		 * we did the HME_SUB() above. Hmecnt is now maintained
6637 		 * by cas only. no lock guranteed its value. The only
6638 		 * gurantee we have is the hmecnt should not be less than
6639 		 * what it should be so the hblk will not be taken away.
6640 		 * It's also important that we decremented the hmecnt after
6641 		 * we are done with hmeblkp so that this hmeblk won't be
6642 		 * stolen.
6643 		 */
6644 		ASSERT(hmeblkp->hblk_hmecnt > 0);
6645 		ASSERT(hmeblkp->hblk_vcnt > 0);
6646 		atomic_add_16(&hmeblkp->hblk_vcnt, -1);
6647 		atomic_add_16(&hmeblkp->hblk_hmecnt, -1);
6648 		/*
6649 		 * This is bug 4063182.
6650 		 * XXX: fixme
6651 		 * ASSERT(hmeblkp->hblk_hmecnt || hmeblkp->hblk_vcnt ||
6652 		 *	!hmeblkp->hblk_lckcnt);
6653 		 */
6654 	} else {
6655 		panic("invalid tte? pp %p &tte %p",
6656 		    (void *)pp, (void *)&tte);
6657 	}
6658 
6659 	return (cpuset);
6660 }
6661 
6662 /*
6663  * While relocating a kernel page, this function will move the mappings
6664  * from tpp to dpp and modify any associated data with these mappings.
6665  * It also unsuspends the suspended kernel mapping.
6666  */
6667 static void
6668 hat_pagereload(struct page *tpp, struct page *dpp)
6669 {
6670 	struct sf_hment *sfhme;
6671 	tte_t tte, ttemod;
6672 	int index, cons;
6673 
6674 	ASSERT(getpil() == PIL_MAX);
6675 	ASSERT(sfmmu_mlist_held(tpp));
6676 	ASSERT(sfmmu_mlist_held(dpp));
6677 
6678 	index = PP_MAPINDEX(tpp);
6679 	cons = TTE8K;
6680 
6681 	/* Update real mappings to the page */
6682 retry:
6683 	for (sfhme = tpp->p_mapping; sfhme != NULL; sfhme = sfhme->hme_next) {
6684 		if (IS_PAHME(sfhme))
6685 			continue;
6686 		sfmmu_copytte(&sfhme->hme_tte, &tte);
6687 		ttemod = tte;
6688 
6689 		/*
6690 		 * replace old pfn with new pfn in TTE
6691 		 */
6692 		PFN_TO_TTE(ttemod, dpp->p_pagenum);
6693 
6694 		/*
6695 		 * clear suspend bit
6696 		 */
6697 		ASSERT(TTE_IS_SUSPEND(&ttemod));
6698 		TTE_CLR_SUSPEND(&ttemod);
6699 
6700 		if (sfmmu_modifytte_try(&tte, &ttemod, &sfhme->hme_tte) < 0)
6701 			panic("hat_pagereload(): sfmmu_modifytte_try() failed");
6702 
6703 		/*
6704 		 * set hme_page point to new page
6705 		 */
6706 		sfhme->hme_page = dpp;
6707 	}
6708 
6709 	/*
6710 	 * move p_mapping list from old page to new page
6711 	 */
6712 	dpp->p_mapping = tpp->p_mapping;
6713 	tpp->p_mapping = NULL;
6714 	dpp->p_share = tpp->p_share;
6715 	tpp->p_share = 0;
6716 
6717 	while (index != 0) {
6718 		index = index >> 1;
6719 		if (index != 0)
6720 			cons++;
6721 		if (index & 0x1) {
6722 			tpp = PP_GROUPLEADER(tpp, cons);
6723 			dpp = PP_GROUPLEADER(dpp, cons);
6724 			goto retry;
6725 		}
6726 	}
6727 
6728 	if (dtrace_kreloc_fini)
6729 		(*dtrace_kreloc_fini)();
6730 	mutex_exit(&kpr_suspendlock);
6731 }
6732 
6733 uint_t
6734 hat_pagesync(struct page *pp, uint_t clearflag)
6735 {
6736 	struct sf_hment *sfhme, *tmphme = NULL;
6737 	struct hme_blk *hmeblkp;
6738 	kmutex_t *pml;
6739 	cpuset_t cpuset, tset;
6740 	int	index, cons;
6741 	extern	ulong_t po_share;
6742 	page_t	*save_pp = pp;
6743 
6744 	CPUSET_ZERO(cpuset);
6745 
6746 	if (PP_ISRO(pp) && (clearflag & HAT_SYNC_STOPON_MOD)) {
6747 		return (PP_GENERIC_ATTR(pp));
6748 	}
6749 
6750 	if ((clearflag == (HAT_SYNC_STOPON_REF | HAT_SYNC_DONTZERO)) &&
6751 	    PP_ISREF(pp)) {
6752 		return (PP_GENERIC_ATTR(pp));
6753 	}
6754 
6755 	if ((clearflag == (HAT_SYNC_STOPON_MOD | HAT_SYNC_DONTZERO)) &&
6756 	    PP_ISMOD(pp)) {
6757 		return (PP_GENERIC_ATTR(pp));
6758 	}
6759 
6760 	if ((clearflag & HAT_SYNC_STOPON_SHARED) != 0 &&
6761 	    (pp->p_share > po_share) &&
6762 	    !(clearflag & HAT_SYNC_ZERORM)) {
6763 		if (PP_ISRO(pp))
6764 			hat_page_setattr(pp, P_REF);
6765 		return (PP_GENERIC_ATTR(pp));
6766 	}
6767 
6768 	clearflag &= ~HAT_SYNC_STOPON_SHARED;
6769 	pml = sfmmu_mlist_enter(pp);
6770 	index = PP_MAPINDEX(pp);
6771 	cons = TTE8K;
6772 retry:
6773 	for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) {
6774 		/*
6775 		 * We need to save the next hment on the list since
6776 		 * it is possible for pagesync to remove an invalid hment
6777 		 * from the list.
6778 		 */
6779 		tmphme = sfhme->hme_next;
6780 		/*
6781 		 * If we are looking for large mappings and this hme doesn't
6782 		 * reach the range we are seeking, just ignore its.
6783 		 */
6784 		hmeblkp = sfmmu_hmetohblk(sfhme);
6785 		if (hmeblkp->hblk_xhat_bit)
6786 			continue;
6787 
6788 		if (hme_size(sfhme) < cons)
6789 			continue;
6790 		tset = sfmmu_pagesync(pp, sfhme,
6791 			clearflag & ~HAT_SYNC_STOPON_RM);
6792 		CPUSET_OR(cpuset, tset);
6793 		/*
6794 		 * If clearflag is HAT_SYNC_DONTZERO, break out as soon
6795 		 * as the "ref" or "mod" is set.
6796 		 */
6797 		if ((clearflag & ~HAT_SYNC_STOPON_RM) == HAT_SYNC_DONTZERO &&
6798 		    ((clearflag & HAT_SYNC_STOPON_MOD) && PP_ISMOD(save_pp)) ||
6799 		    ((clearflag & HAT_SYNC_STOPON_REF) && PP_ISREF(save_pp))) {
6800 			index = 0;
6801 			break;
6802 		}
6803 	}
6804 
6805 	while (index) {
6806 		index = index >> 1;
6807 		cons++;
6808 		if (index & 0x1) {
6809 			/* Go to leading page */
6810 			pp = PP_GROUPLEADER(pp, cons);
6811 			goto retry;
6812 		}
6813 	}
6814 
6815 	xt_sync(cpuset);
6816 	sfmmu_mlist_exit(pml);
6817 	return (PP_GENERIC_ATTR(save_pp));
6818 }
6819 
6820 /*
6821  * Get all the hardware dependent attributes for a page struct
6822  */
6823 static cpuset_t
6824 sfmmu_pagesync(struct page *pp, struct sf_hment *sfhme,
6825 	uint_t clearflag)
6826 {
6827 	caddr_t addr;
6828 	tte_t tte, ttemod;
6829 	struct hme_blk *hmeblkp;
6830 	int ret;
6831 	sfmmu_t *sfmmup;
6832 	cpuset_t cpuset;
6833 
6834 	ASSERT(pp != NULL);
6835 	ASSERT(sfmmu_mlist_held(pp));
6836 	ASSERT((clearflag == HAT_SYNC_DONTZERO) ||
6837 		(clearflag == HAT_SYNC_ZERORM));
6838 
6839 	SFMMU_STAT(sf_pagesync);
6840 
6841 	CPUSET_ZERO(cpuset);
6842 
6843 sfmmu_pagesync_retry:
6844 
6845 	sfmmu_copytte(&sfhme->hme_tte, &tte);
6846 	if (TTE_IS_VALID(&tte)) {
6847 		hmeblkp = sfmmu_hmetohblk(sfhme);
6848 		sfmmup = hblktosfmmu(hmeblkp);
6849 		addr = tte_to_vaddr(hmeblkp, tte);
6850 		if (clearflag == HAT_SYNC_ZERORM) {
6851 			ttemod = tte;
6852 			TTE_CLR_RM(&ttemod);
6853 			ret = sfmmu_modifytte_try(&tte, &ttemod,
6854 				&sfhme->hme_tte);
6855 			if (ret < 0) {
6856 				/*
6857 				 * cas failed and the new value is not what
6858 				 * we want.
6859 				 */
6860 				goto sfmmu_pagesync_retry;
6861 			}
6862 
6863 			if (ret > 0) {
6864 				/* we win the cas */
6865 				sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0);
6866 				cpuset = sfmmup->sfmmu_cpusran;
6867 			}
6868 		}
6869 
6870 		sfmmu_ttesync(sfmmup, addr, &tte, pp);
6871 	}
6872 	return (cpuset);
6873 }
6874 
6875 /*
6876  * Remove write permission from a mappings to a page, so that
6877  * we can detect the next modification of it. This requires modifying
6878  * the TTE then invalidating (demap) any TLB entry using that TTE.
6879  * This code is similar to sfmmu_pagesync().
6880  */
6881 static cpuset_t
6882 sfmmu_pageclrwrt(struct page *pp, struct sf_hment *sfhme)
6883 {
6884 	caddr_t addr;
6885 	tte_t tte;
6886 	tte_t ttemod;
6887 	struct hme_blk *hmeblkp;
6888 	int ret;
6889 	sfmmu_t *sfmmup;
6890 	cpuset_t cpuset;
6891 
6892 	ASSERT(pp != NULL);
6893 	ASSERT(sfmmu_mlist_held(pp));
6894 
6895 	CPUSET_ZERO(cpuset);
6896 	SFMMU_STAT(sf_clrwrt);
6897 
6898 retry:
6899 
6900 	sfmmu_copytte(&sfhme->hme_tte, &tte);
6901 	if (TTE_IS_VALID(&tte) && TTE_IS_WRITABLE(&tte)) {
6902 		hmeblkp = sfmmu_hmetohblk(sfhme);
6903 
6904 		/*
6905 		 * xhat mappings should never be to a VMODSORT page.
6906 		 */
6907 		ASSERT(hmeblkp->hblk_xhat_bit == 0);
6908 
6909 		sfmmup = hblktosfmmu(hmeblkp);
6910 		addr = tte_to_vaddr(hmeblkp, tte);
6911 
6912 		ttemod = tte;
6913 		TTE_CLR_WRT(&ttemod);
6914 		TTE_CLR_MOD(&ttemod);
6915 		ret = sfmmu_modifytte_try(&tte, &ttemod, &sfhme->hme_tte);
6916 
6917 		/*
6918 		 * if cas failed and the new value is not what
6919 		 * we want retry
6920 		 */
6921 		if (ret < 0)
6922 			goto retry;
6923 
6924 		/* we win the cas */
6925 		if (ret > 0) {
6926 			sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0);
6927 			cpuset = sfmmup->sfmmu_cpusran;
6928 		}
6929 	}
6930 
6931 	return (cpuset);
6932 }
6933 
6934 /*
6935  * Walk all mappings of a page, removing write permission and clearing the
6936  * ref/mod bits. This code is similar to hat_pagesync()
6937  */
6938 static void
6939 hat_page_clrwrt(page_t *pp)
6940 {
6941 	struct sf_hment *sfhme;
6942 	struct sf_hment *tmphme = NULL;
6943 	kmutex_t *pml;
6944 	cpuset_t cpuset;
6945 	cpuset_t tset;
6946 	int	index;
6947 	int	 cons;
6948 
6949 	CPUSET_ZERO(cpuset);
6950 
6951 	pml = sfmmu_mlist_enter(pp);
6952 	index = PP_MAPINDEX(pp);
6953 	cons = TTE8K;
6954 retry:
6955 	for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) {
6956 		tmphme = sfhme->hme_next;
6957 
6958 		/*
6959 		 * If we are looking for large mappings and this hme doesn't
6960 		 * reach the range we are seeking, just ignore its.
6961 		 */
6962 
6963 		if (hme_size(sfhme) < cons)
6964 			continue;
6965 
6966 		tset = sfmmu_pageclrwrt(pp, sfhme);
6967 		CPUSET_OR(cpuset, tset);
6968 	}
6969 
6970 	while (index) {
6971 		index = index >> 1;
6972 		cons++;
6973 		if (index & 0x1) {
6974 			/* Go to leading page */
6975 			pp = PP_GROUPLEADER(pp, cons);
6976 			goto retry;
6977 		}
6978 	}
6979 
6980 	xt_sync(cpuset);
6981 	sfmmu_mlist_exit(pml);
6982 }
6983 
6984 /*
6985  * Set the given REF/MOD/RO bits for the given page.
6986  * For a vnode with a sorted v_pages list, we need to change
6987  * the attributes and the v_pages list together under page_vnode_mutex.
6988  */
6989 void
6990 hat_page_setattr(page_t *pp, uint_t flag)
6991 {
6992 	vnode_t		*vp = pp->p_vnode;
6993 	page_t		**listp;
6994 	kmutex_t	*pmtx;
6995 	kmutex_t	*vphm = NULL;
6996 
6997 	ASSERT(!(flag & ~(P_MOD | P_REF | P_RO)));
6998 
6999 	/*
7000 	 * nothing to do if attribute already set
7001 	 */
7002 	if ((pp->p_nrm & flag) == flag)
7003 		return;
7004 
7005 	if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp)) {
7006 		vphm = page_vnode_mutex(vp);
7007 		mutex_enter(vphm);
7008 	}
7009 
7010 	pmtx = sfmmu_page_enter(pp);
7011 	pp->p_nrm |= flag;
7012 	sfmmu_page_exit(pmtx);
7013 
7014 	if (vphm != NULL) {
7015 		/*
7016 		 * Some File Systems examine v_pages for NULL w/o
7017 		 * grabbing the vphm mutex. Must not let it become NULL when
7018 		 * pp is the only page on the list.
7019 		 */
7020 		if (pp->p_vpnext != pp) {
7021 			page_vpsub(&vp->v_pages, pp);
7022 			if (vp->v_pages != NULL)
7023 				listp = &vp->v_pages->p_vpprev->p_vpnext;
7024 			else
7025 				listp = &vp->v_pages;
7026 			page_vpadd(listp, pp);
7027 		}
7028 		mutex_exit(vphm);
7029 	}
7030 }
7031 
7032 void
7033 hat_page_clrattr(page_t *pp, uint_t flag)
7034 {
7035 	vnode_t		*vp = pp->p_vnode;
7036 	kmutex_t	*vphm = NULL;
7037 	kmutex_t	*pmtx;
7038 
7039 	ASSERT(!(flag & ~(P_MOD | P_REF | P_RO)));
7040 
7041 	/*
7042 	 * For vnode with a sorted v_pages list, we need to change
7043 	 * the attributes and the v_pages list together under page_vnode_mutex.
7044 	 */
7045 	if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp)) {
7046 		vphm = page_vnode_mutex(vp);
7047 		mutex_enter(vphm);
7048 	}
7049 
7050 	pmtx = sfmmu_page_enter(pp);
7051 	pp->p_nrm &= ~flag;
7052 	sfmmu_page_exit(pmtx);
7053 
7054 	if (vphm != NULL) {
7055 		/*
7056 		 * Some File Systems examine v_pages for NULL w/o
7057 		 * grabbing the vphm mutex. Must not let it become NULL when
7058 		 * pp is the only page on the list.
7059 		 */
7060 		if (pp->p_vpnext != pp) {
7061 			page_vpsub(&vp->v_pages, pp);
7062 			page_vpadd(&vp->v_pages, pp);
7063 		}
7064 		mutex_exit(vphm);
7065 
7066 		/*
7067 		 * VMODSORT works by removing write permissions and getting
7068 		 * a fault when a page is made dirty. At this point
7069 		 * we need to remove write permission from all mappings
7070 		 * to this page.
7071 		 */
7072 		hat_page_clrwrt(pp);
7073 	}
7074 }
7075 
7076 
7077 uint_t
7078 hat_page_getattr(page_t *pp, uint_t flag)
7079 {
7080 	ASSERT(!(flag & ~(P_MOD | P_REF | P_RO)));
7081 	return ((uint_t)(pp->p_nrm & flag));
7082 }
7083 
7084 /*
7085  * DEBUG kernels: verify that a kernel va<->pa translation
7086  * is safe by checking the underlying page_t is in a page
7087  * relocation-safe state.
7088  */
7089 #ifdef	DEBUG
7090 void
7091 sfmmu_check_kpfn(pfn_t pfn)
7092 {
7093 	page_t *pp;
7094 	int index, cons;
7095 
7096 	if (hat_check_vtop == 0)
7097 		return;
7098 
7099 	if (hat_kpr_enabled == 0 || kvseg.s_base == NULL || panicstr)
7100 		return;
7101 
7102 	pp = page_numtopp_nolock(pfn);
7103 	if (!pp)
7104 		return;
7105 
7106 	if (PAGE_LOCKED(pp) || PP_ISNORELOC(pp))
7107 		return;
7108 
7109 	/*
7110 	 * Handed a large kernel page, we dig up the root page since we
7111 	 * know the root page might have the lock also.
7112 	 */
7113 	if (pp->p_szc != 0) {
7114 		index = PP_MAPINDEX(pp);
7115 		cons = TTE8K;
7116 again:
7117 		while (index != 0) {
7118 			index >>= 1;
7119 			if (index != 0)
7120 				cons++;
7121 			if (index & 0x1) {
7122 				pp = PP_GROUPLEADER(pp, cons);
7123 				goto again;
7124 			}
7125 		}
7126 	}
7127 
7128 	if (PAGE_LOCKED(pp) || PP_ISNORELOC(pp))
7129 		return;
7130 
7131 	/*
7132 	 * Pages need to be locked or allocated "permanent" (either from
7133 	 * static_arena arena or explicitly setting PG_NORELOC when calling
7134 	 * page_create_va()) for VA->PA translations to be valid.
7135 	 */
7136 	if (!PP_ISNORELOC(pp))
7137 		panic("Illegal VA->PA translation, pp 0x%p not permanent", pp);
7138 	else
7139 		panic("Illegal VA->PA translation, pp 0x%p not locked", pp);
7140 }
7141 #endif	/* DEBUG */
7142 
7143 /*
7144  * Returns a page frame number for a given virtual address.
7145  * Returns PFN_INVALID to indicate an invalid mapping
7146  */
7147 pfn_t
7148 hat_getpfnum(struct hat *hat, caddr_t addr)
7149 {
7150 	pfn_t pfn;
7151 	tte_t tte;
7152 
7153 	/*
7154 	 * We would like to
7155 	 * ASSERT(AS_LOCK_HELD(as, &as->a_lock));
7156 	 * but we can't because the iommu driver will call this
7157 	 * routine at interrupt time and it can't grab the as lock
7158 	 * or it will deadlock: A thread could have the as lock
7159 	 * and be waiting for io.  The io can't complete
7160 	 * because the interrupt thread is blocked trying to grab
7161 	 * the as lock.
7162 	 */
7163 
7164 	ASSERT(hat->sfmmu_xhat_provider == NULL);
7165 
7166 	if (hat == ksfmmup) {
7167 		if (segkpm && IS_KPM_ADDR(addr))
7168 			return (sfmmu_kpm_vatopfn(addr));
7169 		while ((pfn = sfmmu_vatopfn(addr, ksfmmup, &tte))
7170 		    == PFN_SUSPENDED) {
7171 			sfmmu_vatopfn_suspended(addr, ksfmmup, &tte);
7172 		}
7173 		sfmmu_check_kpfn(pfn);
7174 		return (pfn);
7175 	} else {
7176 		return (sfmmu_uvatopfn(addr, hat));
7177 	}
7178 }
7179 
7180 /*
7181  * hat_getkpfnum() is an obsolete DDI routine, and its use is discouraged.
7182  * Use hat_getpfnum(kas.a_hat, ...) instead.
7183  *
7184  * We'd like to return PFN_INVALID if the mappings have underlying page_t's
7185  * but can't right now due to the fact that some software has grown to use
7186  * this interface incorrectly. So for now when the interface is misused,
7187  * return a warning to the user that in the future it won't work in the
7188  * way they're abusing it, and carry on (after disabling page relocation).
7189  */
7190 pfn_t
7191 hat_getkpfnum(caddr_t addr)
7192 {
7193 	pfn_t pfn;
7194 	tte_t tte;
7195 	int badcaller = 0;
7196 	extern int segkmem_reloc;
7197 
7198 	if (segkpm && IS_KPM_ADDR(addr)) {
7199 		badcaller = 1;
7200 		pfn = sfmmu_kpm_vatopfn(addr);
7201 	} else {
7202 		while ((pfn = sfmmu_vatopfn(addr, ksfmmup, &tte))
7203 		    == PFN_SUSPENDED) {
7204 			sfmmu_vatopfn_suspended(addr, ksfmmup, &tte);
7205 		}
7206 		badcaller = pf_is_memory(pfn);
7207 	}
7208 
7209 	if (badcaller) {
7210 		/*
7211 		 * We can't return PFN_INVALID or the caller may panic
7212 		 * or corrupt the system.  The only alternative is to
7213 		 * disable page relocation at this point for all kernel
7214 		 * memory.  This will impact any callers of page_relocate()
7215 		 * such as FMA or DR.
7216 		 *
7217 		 * RFE: Add junk here to spit out an ereport so the sysadmin
7218 		 * can be advised that he should upgrade his device driver
7219 		 * so that this doesn't happen.
7220 		 */
7221 		hat_getkpfnum_badcall(caller());
7222 		if (hat_kpr_enabled && segkmem_reloc) {
7223 			hat_kpr_enabled = 0;
7224 			segkmem_reloc = 0;
7225 			cmn_err(CE_WARN, "Kernel Page Relocation is DISABLED");
7226 		}
7227 	}
7228 	return (pfn);
7229 }
7230 
7231 pfn_t
7232 sfmmu_uvatopfn(caddr_t vaddr, struct hat *sfmmup)
7233 {
7234 	struct hmehash_bucket *hmebp;
7235 	hmeblk_tag hblktag;
7236 	int hmeshift, hashno = 1;
7237 	struct hme_blk *hmeblkp = NULL;
7238 
7239 	struct sf_hment *sfhmep;
7240 	tte_t tte;
7241 	pfn_t pfn;
7242 
7243 	/* support for ISM */
7244 	ism_map_t	*ism_map;
7245 	ism_blk_t	*ism_blkp;
7246 	int		i;
7247 	sfmmu_t *ism_hatid = NULL;
7248 	sfmmu_t *locked_hatid = NULL;
7249 
7250 
7251 	ASSERT(sfmmup != ksfmmup);
7252 	SFMMU_STAT(sf_user_vtop);
7253 	/*
7254 	 * Set ism_hatid if vaddr falls in a ISM segment.
7255 	 */
7256 	ism_blkp = sfmmup->sfmmu_iblk;
7257 	if (ism_blkp) {
7258 		sfmmu_ismhat_enter(sfmmup, 0);
7259 		locked_hatid = sfmmup;
7260 	}
7261 	while (ism_blkp && ism_hatid == NULL) {
7262 		ism_map = ism_blkp->iblk_maps;
7263 		for (i = 0; ism_map[i].imap_ismhat && i < ISM_MAP_SLOTS; i++) {
7264 			if (vaddr >= ism_start(ism_map[i]) &&
7265 			    vaddr < ism_end(ism_map[i])) {
7266 				sfmmup = ism_hatid = ism_map[i].imap_ismhat;
7267 				vaddr = (caddr_t)(vaddr -
7268 					ism_start(ism_map[i]));
7269 				break;
7270 			}
7271 		}
7272 		ism_blkp = ism_blkp->iblk_next;
7273 	}
7274 	if (locked_hatid) {
7275 		sfmmu_ismhat_exit(locked_hatid, 0);
7276 	}
7277 
7278 	hblktag.htag_id = sfmmup;
7279 	do {
7280 		hmeshift = HME_HASH_SHIFT(hashno);
7281 		hblktag.htag_bspage = HME_HASH_BSPAGE(vaddr, hmeshift);
7282 		hblktag.htag_rehash = hashno;
7283 		hmebp = HME_HASH_FUNCTION(sfmmup, vaddr, hmeshift);
7284 
7285 		SFMMU_HASH_LOCK(hmebp);
7286 
7287 		HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp);
7288 		if (hmeblkp != NULL) {
7289 			HBLKTOHME(sfhmep, hmeblkp, vaddr);
7290 			sfmmu_copytte(&sfhmep->hme_tte, &tte);
7291 			if (TTE_IS_VALID(&tte)) {
7292 				pfn = TTE_TO_PFN(vaddr, &tte);
7293 			} else {
7294 				pfn = PFN_INVALID;
7295 			}
7296 			SFMMU_HASH_UNLOCK(hmebp);
7297 			return (pfn);
7298 		}
7299 		SFMMU_HASH_UNLOCK(hmebp);
7300 		hashno++;
7301 	} while (HME_REHASH(sfmmup) && (hashno <= mmu_hashcnt));
7302 	return (PFN_INVALID);
7303 }
7304 
7305 
7306 /*
7307  * For compatability with AT&T and later optimizations
7308  */
7309 /* ARGSUSED */
7310 void
7311 hat_map(struct hat *hat, caddr_t addr, size_t len, uint_t flags)
7312 {
7313 	ASSERT(hat != NULL);
7314 	ASSERT(hat->sfmmu_xhat_provider == NULL);
7315 }
7316 
7317 /*
7318  * Return the number of mappings to a particular page.
7319  * This number is an approximation of the number of
7320  * number of people sharing the page.
7321  */
7322 ulong_t
7323 hat_page_getshare(page_t *pp)
7324 {
7325 	page_t *spp = pp;	/* start page */
7326 	kmutex_t *pml;
7327 	ulong_t	cnt;
7328 	int index, sz = TTE64K;
7329 
7330 	/*
7331 	 * We need to grab the mlist lock to make sure any outstanding
7332 	 * load/unloads complete.  Otherwise we could return zero
7333 	 * even though the unload(s) hasn't finished yet.
7334 	 */
7335 	pml = sfmmu_mlist_enter(spp);
7336 	cnt = spp->p_share;
7337 
7338 	if (kpm_enable)
7339 		cnt += spp->p_kpmref;
7340 
7341 	/*
7342 	 * If we have any large mappings, we count the number of
7343 	 * mappings that this large page is part of.
7344 	 */
7345 	index = PP_MAPINDEX(spp);
7346 	index >>= 1;
7347 	while (index) {
7348 		pp = PP_GROUPLEADER(spp, sz);
7349 		if ((index & 0x1) && pp != spp) {
7350 			cnt += pp->p_share;
7351 			spp = pp;
7352 		}
7353 		index >>= 1;
7354 		sz++;
7355 	}
7356 	sfmmu_mlist_exit(pml);
7357 	return (cnt);
7358 }
7359 
7360 /*
7361  * Unload all large mappings to the pp and reset the p_szc field of every
7362  * constituent page according to the remaining mappings.
7363  *
7364  * pp must be locked SE_EXCL. Even though no other constituent pages are
7365  * locked it's legal to unload the large mappings to the pp because all
7366  * constituent pages of large locked mappings have to be locked SE_SHARED.
7367  * This means if we have SE_EXCL lock on one of constituent pages none of the
7368  * large mappings to pp are locked.
7369  *
7370  * Decrease p_szc field starting from the last constituent page and ending
7371  * with the root page. This method is used because other threads rely on the
7372  * root's p_szc to find the lock to syncronize on. After a root page_t's p_szc
7373  * is demoted then other threads will succeed in sfmmu_mlspl_enter(). This
7374  * ensures that p_szc changes of the constituent pages appears atomic for all
7375  * threads that use sfmmu_mlspl_enter() to examine p_szc field.
7376  *
7377  * This mechanism is only used for file system pages where it's not always
7378  * possible to get SE_EXCL locks on all constituent pages to demote the size
7379  * code (as is done for anonymous or kernel large pages).
7380  *
7381  * See more comments in front of sfmmu_mlspl_enter().
7382  */
7383 void
7384 hat_page_demote(page_t *pp)
7385 {
7386 	int index;
7387 	int sz;
7388 	cpuset_t cpuset;
7389 	int sync = 0;
7390 	page_t *rootpp;
7391 	struct sf_hment *sfhme;
7392 	struct sf_hment *tmphme = NULL;
7393 	struct hme_blk *hmeblkp;
7394 	uint_t pszc;
7395 	page_t *lastpp;
7396 	cpuset_t tset;
7397 	pgcnt_t npgs;
7398 	kmutex_t *pml;
7399 	kmutex_t *pmtx = NULL;
7400 
7401 	ASSERT(PAGE_EXCL(pp));
7402 	ASSERT(!PP_ISFREE(pp));
7403 	ASSERT(page_szc_lock_assert(pp));
7404 	pml = sfmmu_mlist_enter(pp);
7405 
7406 	pszc = pp->p_szc;
7407 	if (pszc == 0) {
7408 		goto out;
7409 	}
7410 
7411 	index = PP_MAPINDEX(pp) >> 1;
7412 
7413 	if (index) {
7414 		CPUSET_ZERO(cpuset);
7415 		sz = TTE64K;
7416 		sync = 1;
7417 	}
7418 
7419 	while (index) {
7420 		if (!(index & 0x1)) {
7421 			index >>= 1;
7422 			sz++;
7423 			continue;
7424 		}
7425 		ASSERT(sz <= pszc);
7426 		rootpp = PP_GROUPLEADER(pp, sz);
7427 		for (sfhme = rootpp->p_mapping; sfhme; sfhme = tmphme) {
7428 			tmphme = sfhme->hme_next;
7429 			hmeblkp = sfmmu_hmetohblk(sfhme);
7430 			if (hme_size(sfhme) != sz) {
7431 				continue;
7432 			}
7433 			if (hmeblkp->hblk_xhat_bit) {
7434 				cmn_err(CE_PANIC,
7435 				    "hat_page_demote: xhat hmeblk");
7436 			}
7437 			tset = sfmmu_pageunload(rootpp, sfhme, sz);
7438 			CPUSET_OR(cpuset, tset);
7439 		}
7440 		if (index >>= 1) {
7441 			sz++;
7442 		}
7443 	}
7444 
7445 	ASSERT(!PP_ISMAPPED_LARGE(pp));
7446 
7447 	if (sync) {
7448 		xt_sync(cpuset);
7449 		if (PP_ISTNC(pp)) {
7450 			conv_tnc(rootpp, sz);
7451 		}
7452 	}
7453 
7454 	pmtx = sfmmu_page_enter(pp);
7455 
7456 	ASSERT(pp->p_szc == pszc);
7457 	rootpp = PP_PAGEROOT(pp);
7458 	ASSERT(rootpp->p_szc == pszc);
7459 	lastpp = PP_PAGENEXT_N(rootpp, TTEPAGES(pszc) - 1);
7460 
7461 	while (lastpp != rootpp) {
7462 		sz = PP_MAPINDEX(lastpp) ? fnd_mapping_sz(lastpp) : 0;
7463 		ASSERT(sz < pszc);
7464 		npgs = (sz == 0) ? 1 : TTEPAGES(sz);
7465 		ASSERT(P2PHASE(lastpp->p_pagenum, npgs) == npgs - 1);
7466 		while (--npgs > 0) {
7467 			lastpp->p_szc = (uchar_t)sz;
7468 			lastpp = PP_PAGEPREV(lastpp);
7469 		}
7470 		if (sz) {
7471 			/*
7472 			 * make sure before current root's pszc
7473 			 * is updated all updates to constituent pages pszc
7474 			 * fields are globally visible.
7475 			 */
7476 			membar_producer();
7477 		}
7478 		lastpp->p_szc = sz;
7479 		ASSERT(IS_P2ALIGNED(lastpp->p_pagenum, TTEPAGES(sz)));
7480 		if (lastpp != rootpp) {
7481 			lastpp = PP_PAGEPREV(lastpp);
7482 		}
7483 	}
7484 	if (sz == 0) {
7485 		/* the loop above doesn't cover this case */
7486 		rootpp->p_szc = 0;
7487 	}
7488 out:
7489 	ASSERT(pp->p_szc == 0);
7490 	if (pmtx != NULL) {
7491 		sfmmu_page_exit(pmtx);
7492 	}
7493 	sfmmu_mlist_exit(pml);
7494 }
7495 
7496 /*
7497  * Refresh the HAT ismttecnt[] element for size szc.
7498  * Caller must have set ISM busy flag to prevent mapping
7499  * lists from changing while we're traversing them.
7500  */
7501 pgcnt_t
7502 ism_tsb_entries(sfmmu_t *sfmmup, int szc)
7503 {
7504 	ism_blk_t	*ism_blkp = sfmmup->sfmmu_iblk;
7505 	ism_map_t	*ism_map;
7506 	pgcnt_t		npgs = 0;
7507 	int		j;
7508 
7509 	ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY));
7510 	for (; ism_blkp != NULL; ism_blkp = ism_blkp->iblk_next) {
7511 		ism_map = ism_blkp->iblk_maps;
7512 		for (j = 0; ism_map[j].imap_ismhat && j < ISM_MAP_SLOTS; j++)
7513 			npgs += ism_map[j].imap_ismhat->sfmmu_ttecnt[szc];
7514 	}
7515 	sfmmup->sfmmu_ismttecnt[szc] = npgs;
7516 	return (npgs);
7517 }
7518 
7519 /*
7520  * Yield the memory claim requirement for an address space.
7521  *
7522  * This is currently implemented as the number of bytes that have active
7523  * hardware translations that have page structures.  Therefore, it can
7524  * underestimate the traditional resident set size, eg, if the
7525  * physical page is present and the hardware translation is missing;
7526  * and it can overestimate the rss, eg, if there are active
7527  * translations to a frame buffer with page structs.
7528  * Also, it does not take sharing into account.
7529  *
7530  * Note that we don't acquire locks here since this function is most often
7531  * called from the clock thread.
7532  */
7533 size_t
7534 hat_get_mapped_size(struct hat *hat)
7535 {
7536 	size_t		assize = 0;
7537 	int 		i;
7538 
7539 	if (hat == NULL)
7540 		return (0);
7541 
7542 	ASSERT(hat->sfmmu_xhat_provider == NULL);
7543 
7544 	for (i = 0; i < mmu_page_sizes; i++)
7545 		assize += (pgcnt_t)hat->sfmmu_ttecnt[i] * TTEBYTES(i);
7546 
7547 	if (hat->sfmmu_iblk == NULL)
7548 		return (assize);
7549 
7550 	for (i = 0; i < mmu_page_sizes; i++)
7551 		assize += (pgcnt_t)hat->sfmmu_ismttecnt[i] * TTEBYTES(i);
7552 
7553 	return (assize);
7554 }
7555 
7556 int
7557 hat_stats_enable(struct hat *hat)
7558 {
7559 	hatlock_t	*hatlockp;
7560 
7561 	ASSERT(hat->sfmmu_xhat_provider == NULL);
7562 
7563 	hatlockp = sfmmu_hat_enter(hat);
7564 	hat->sfmmu_rmstat++;
7565 	sfmmu_hat_exit(hatlockp);
7566 	return (1);
7567 }
7568 
7569 void
7570 hat_stats_disable(struct hat *hat)
7571 {
7572 	hatlock_t	*hatlockp;
7573 
7574 	ASSERT(hat->sfmmu_xhat_provider == NULL);
7575 
7576 	hatlockp = sfmmu_hat_enter(hat);
7577 	hat->sfmmu_rmstat--;
7578 	sfmmu_hat_exit(hatlockp);
7579 }
7580 
7581 /*
7582  * Routines for entering or removing  ourselves from the
7583  * ism_hat's mapping list.
7584  */
7585 static void
7586 iment_add(struct ism_ment *iment,  struct hat *ism_hat)
7587 {
7588 	ASSERT(MUTEX_HELD(&ism_mlist_lock));
7589 
7590 	iment->iment_prev = NULL;
7591 	iment->iment_next = ism_hat->sfmmu_iment;
7592 	if (ism_hat->sfmmu_iment) {
7593 		ism_hat->sfmmu_iment->iment_prev = iment;
7594 	}
7595 	ism_hat->sfmmu_iment = iment;
7596 }
7597 
7598 static void
7599 iment_sub(struct ism_ment *iment, struct hat *ism_hat)
7600 {
7601 	ASSERT(MUTEX_HELD(&ism_mlist_lock));
7602 
7603 	if (ism_hat->sfmmu_iment == NULL) {
7604 		panic("ism map entry remove - no entries");
7605 	}
7606 
7607 	if (iment->iment_prev) {
7608 		ASSERT(ism_hat->sfmmu_iment != iment);
7609 		iment->iment_prev->iment_next = iment->iment_next;
7610 	} else {
7611 		ASSERT(ism_hat->sfmmu_iment == iment);
7612 		ism_hat->sfmmu_iment = iment->iment_next;
7613 	}
7614 
7615 	if (iment->iment_next) {
7616 		iment->iment_next->iment_prev = iment->iment_prev;
7617 	}
7618 
7619 	/*
7620 	 * zero out the entry
7621 	 */
7622 	iment->iment_next = NULL;
7623 	iment->iment_prev = NULL;
7624 	iment->iment_hat =  NULL;
7625 }
7626 
7627 /*
7628  * Hat_share()/unshare() return an (non-zero) error
7629  * when saddr and daddr are not properly aligned.
7630  *
7631  * The top level mapping element determines the alignment
7632  * requirement for saddr and daddr, depending on different
7633  * architectures.
7634  *
7635  * When hat_share()/unshare() are not supported,
7636  * HATOP_SHARE()/UNSHARE() return 0
7637  */
7638 int
7639 hat_share(struct hat *sfmmup, caddr_t addr,
7640 	struct hat *ism_hatid, caddr_t sptaddr, size_t len, uint_t ismszc)
7641 {
7642 	ism_blk_t	*ism_blkp;
7643 	ism_blk_t	*new_iblk;
7644 	ism_map_t 	*ism_map;
7645 	ism_ment_t	*ism_ment;
7646 	int		i, added;
7647 	hatlock_t	*hatlockp;
7648 	int		reload_mmu = 0;
7649 	uint_t		ismshift = page_get_shift(ismszc);
7650 	size_t		ismpgsz = page_get_pagesize(ismszc);
7651 	uint_t		ismmask = (uint_t)ismpgsz - 1;
7652 	size_t		sh_size = ISM_SHIFT(ismshift, len);
7653 	ushort_t	ismhatflag;
7654 
7655 #ifdef DEBUG
7656 	caddr_t		eaddr = addr + len;
7657 #endif /* DEBUG */
7658 
7659 	ASSERT(ism_hatid != NULL && sfmmup != NULL);
7660 	ASSERT(sptaddr == ISMID_STARTADDR);
7661 	/*
7662 	 * Check the alignment.
7663 	 */
7664 	if (!ISM_ALIGNED(ismshift, addr) || !ISM_ALIGNED(ismshift, sptaddr))
7665 		return (EINVAL);
7666 
7667 	/*
7668 	 * Check size alignment.
7669 	 */
7670 	if (!ISM_ALIGNED(ismshift, len))
7671 		return (EINVAL);
7672 
7673 	ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
7674 
7675 	/*
7676 	 * Allocate ism_ment for the ism_hat's mapping list, and an
7677 	 * ism map blk in case we need one.  We must do our
7678 	 * allocations before acquiring locks to prevent a deadlock
7679 	 * in the kmem allocator on the mapping list lock.
7680 	 */
7681 	new_iblk = kmem_cache_alloc(ism_blk_cache, KM_SLEEP);
7682 	ism_ment = kmem_cache_alloc(ism_ment_cache, KM_SLEEP);
7683 
7684 	/*
7685 	 * Serialize ISM mappings with the ISM busy flag, and also the
7686 	 * trap handlers.
7687 	 */
7688 	sfmmu_ismhat_enter(sfmmup, 0);
7689 
7690 	/*
7691 	 * Allocate an ism map blk if necessary.
7692 	 */
7693 	if (sfmmup->sfmmu_iblk == NULL) {
7694 		sfmmup->sfmmu_iblk = new_iblk;
7695 		bzero(new_iblk, sizeof (*new_iblk));
7696 		new_iblk->iblk_nextpa = (uint64_t)-1;
7697 		membar_stst();	/* make sure next ptr visible to all CPUs */
7698 		sfmmup->sfmmu_ismblkpa = va_to_pa((caddr_t)new_iblk);
7699 		reload_mmu = 1;
7700 		new_iblk = NULL;
7701 	}
7702 
7703 #ifdef DEBUG
7704 	/*
7705 	 * Make sure mapping does not already exist.
7706 	 */
7707 	ism_blkp = sfmmup->sfmmu_iblk;
7708 	while (ism_blkp) {
7709 		ism_map = ism_blkp->iblk_maps;
7710 		for (i = 0; i < ISM_MAP_SLOTS && ism_map[i].imap_ismhat; i++) {
7711 			if ((addr >= ism_start(ism_map[i]) &&
7712 			    addr < ism_end(ism_map[i])) ||
7713 			    eaddr > ism_start(ism_map[i]) &&
7714 			    eaddr <= ism_end(ism_map[i])) {
7715 				panic("sfmmu_share: Already mapped!");
7716 			}
7717 		}
7718 		ism_blkp = ism_blkp->iblk_next;
7719 	}
7720 #endif /* DEBUG */
7721 
7722 	ASSERT(ismszc >= TTE4M);
7723 	if (ismszc == TTE4M) {
7724 		ismhatflag = HAT_4M_FLAG;
7725 	} else if (ismszc == TTE32M) {
7726 		ismhatflag = HAT_32M_FLAG;
7727 	} else if (ismszc == TTE256M) {
7728 		ismhatflag = HAT_256M_FLAG;
7729 	}
7730 	/*
7731 	 * Add mapping to first available mapping slot.
7732 	 */
7733 	ism_blkp = sfmmup->sfmmu_iblk;
7734 	added = 0;
7735 	while (!added) {
7736 		ism_map = ism_blkp->iblk_maps;
7737 		for (i = 0; i < ISM_MAP_SLOTS; i++)  {
7738 			if (ism_map[i].imap_ismhat == NULL) {
7739 
7740 				ism_map[i].imap_ismhat = ism_hatid;
7741 				ism_map[i].imap_vb_shift = (ushort_t)ismshift;
7742 				ism_map[i].imap_hatflags = ismhatflag;
7743 				ism_map[i].imap_sz_mask = ismmask;
7744 				/*
7745 				 * imap_seg is checked in ISM_CHECK to see if
7746 				 * non-NULL, then other info assumed valid.
7747 				 */
7748 				membar_stst();
7749 				ism_map[i].imap_seg = (uintptr_t)addr | sh_size;
7750 				ism_map[i].imap_ment = ism_ment;
7751 
7752 				/*
7753 				 * Now add ourselves to the ism_hat's
7754 				 * mapping list.
7755 				 */
7756 				ism_ment->iment_hat = sfmmup;
7757 				ism_ment->iment_base_va = addr;
7758 				ism_hatid->sfmmu_ismhat = 1;
7759 				ism_hatid->sfmmu_flags = 0;
7760 				mutex_enter(&ism_mlist_lock);
7761 				iment_add(ism_ment, ism_hatid);
7762 				mutex_exit(&ism_mlist_lock);
7763 				added = 1;
7764 				break;
7765 			}
7766 		}
7767 		if (!added && ism_blkp->iblk_next == NULL) {
7768 			ism_blkp->iblk_next = new_iblk;
7769 			new_iblk = NULL;
7770 			bzero(ism_blkp->iblk_next,
7771 			    sizeof (*ism_blkp->iblk_next));
7772 			ism_blkp->iblk_next->iblk_nextpa = (uint64_t)-1;
7773 			membar_stst();
7774 			ism_blkp->iblk_nextpa =
7775 				va_to_pa((caddr_t)ism_blkp->iblk_next);
7776 		}
7777 		ism_blkp = ism_blkp->iblk_next;
7778 	}
7779 
7780 	/*
7781 	 * Update our counters for this sfmmup's ism mappings.
7782 	 */
7783 	for (i = 0; i <= ismszc; i++) {
7784 		if (!(disable_ism_large_pages & (1 << i)))
7785 			(void) ism_tsb_entries(sfmmup, i);
7786 	}
7787 
7788 	hatlockp = sfmmu_hat_enter(sfmmup);
7789 
7790 	/*
7791 	 * For ISM and DISM we do not support 512K pages, so we only
7792 	 * only search the 4M and 8K/64K hashes for 4 pagesize cpus, and search
7793 	 * the 256M or 32M, and 4M and 8K/64K hashes for 6 pagesize cpus.
7794 	 */
7795 	ASSERT((disable_ism_large_pages & (1 << TTE512K)) != 0);
7796 
7797 	if (ismszc > TTE4M && !SFMMU_FLAGS_ISSET(sfmmup, HAT_4M_FLAG))
7798 		SFMMU_FLAGS_SET(sfmmup, HAT_4M_FLAG);
7799 
7800 	if (!SFMMU_FLAGS_ISSET(sfmmup, HAT_64K_FLAG))
7801 		SFMMU_FLAGS_SET(sfmmup, HAT_64K_FLAG);
7802 
7803 	/*
7804 	 * If we updated the ismblkpa for this HAT or we need
7805 	 * to start searching the 256M or 32M or 4M hash, we must
7806 	 * make sure all CPUs running this process reload their
7807 	 * tsbmiss area.  Otherwise they will fail to load the mappings
7808 	 * in the tsbmiss handler and will loop calling pagefault().
7809 	 */
7810 	switch (ismszc) {
7811 	case TTE256M:
7812 		if (reload_mmu || !SFMMU_FLAGS_ISSET(sfmmup, HAT_256M_FLAG)) {
7813 			SFMMU_FLAGS_SET(sfmmup, HAT_256M_FLAG);
7814 			sfmmu_sync_mmustate(sfmmup);
7815 		}
7816 		break;
7817 	case TTE32M:
7818 		if (reload_mmu || !SFMMU_FLAGS_ISSET(sfmmup, HAT_32M_FLAG)) {
7819 			SFMMU_FLAGS_SET(sfmmup, HAT_32M_FLAG);
7820 			sfmmu_sync_mmustate(sfmmup);
7821 		}
7822 		break;
7823 	case TTE4M:
7824 		if (reload_mmu || !SFMMU_FLAGS_ISSET(sfmmup, HAT_4M_FLAG)) {
7825 			SFMMU_FLAGS_SET(sfmmup, HAT_4M_FLAG);
7826 			sfmmu_sync_mmustate(sfmmup);
7827 		}
7828 		break;
7829 	default:
7830 		break;
7831 	}
7832 
7833 	/*
7834 	 * Now we can drop the locks.
7835 	 */
7836 	sfmmu_ismhat_exit(sfmmup, 1);
7837 	sfmmu_hat_exit(hatlockp);
7838 
7839 	/*
7840 	 * Free up ismblk if we didn't use it.
7841 	 */
7842 	if (new_iblk != NULL)
7843 		kmem_cache_free(ism_blk_cache, new_iblk);
7844 
7845 	/*
7846 	 * Check TSB and TLB page sizes.
7847 	 */
7848 	sfmmu_check_page_sizes(sfmmup, 1);
7849 
7850 	return (0);
7851 }
7852 
7853 /*
7854  * hat_unshare removes exactly one ism_map from
7855  * this process's as.  It expects multiple calls
7856  * to hat_unshare for multiple shm segments.
7857  */
7858 void
7859 hat_unshare(struct hat *sfmmup, caddr_t addr, size_t len, uint_t ismszc)
7860 {
7861 	ism_map_t 	*ism_map;
7862 	ism_ment_t	*free_ment = NULL;
7863 	ism_blk_t	*ism_blkp;
7864 	struct hat	*ism_hatid;
7865 	int 		found, i;
7866 	hatlock_t	*hatlockp;
7867 	struct tsb_info	*tsbinfo;
7868 	uint_t		ismshift = page_get_shift(ismszc);
7869 	size_t		sh_size = ISM_SHIFT(ismshift, len);
7870 
7871 	ASSERT(ISM_ALIGNED(ismshift, addr));
7872 	ASSERT(ISM_ALIGNED(ismshift, len));
7873 	ASSERT(sfmmup != NULL);
7874 	ASSERT(sfmmup != ksfmmup);
7875 
7876 	if (sfmmup->sfmmu_xhat_provider) {
7877 		XHAT_UNSHARE(sfmmup, addr, len);
7878 		return;
7879 	} else {
7880 		/*
7881 		 * This must be a CPU HAT. If the address space has
7882 		 * XHATs attached, inform all XHATs that ISM segment
7883 		 * is going away
7884 		 */
7885 		ASSERT(sfmmup->sfmmu_as != NULL);
7886 		if (sfmmup->sfmmu_as->a_xhat != NULL)
7887 			xhat_unshare_all(sfmmup->sfmmu_as, addr, len);
7888 	}
7889 
7890 	/*
7891 	 * Make sure that during the entire time ISM mappings are removed,
7892 	 * the trap handlers serialize behind us, and that no one else
7893 	 * can be mucking with ISM mappings.  This also lets us get away
7894 	 * with not doing expensive cross calls to flush the TLB -- we
7895 	 * just discard the context, flush the entire TSB, and call it
7896 	 * a day.
7897 	 */
7898 	sfmmu_ismhat_enter(sfmmup, 0);
7899 
7900 	/*
7901 	 * Remove the mapping.
7902 	 *
7903 	 * We can't have any holes in the ism map.
7904 	 * The tsb miss code while searching the ism map will
7905 	 * stop on an empty map slot.  So we must move
7906 	 * everyone past the hole up 1 if any.
7907 	 *
7908 	 * Also empty ism map blks are not freed until the
7909 	 * process exits. This is to prevent a MT race condition
7910 	 * between sfmmu_unshare() and sfmmu_tsbmiss_exception().
7911 	 */
7912 	found = 0;
7913 	ism_blkp = sfmmup->sfmmu_iblk;
7914 	while (!found && ism_blkp) {
7915 		ism_map = ism_blkp->iblk_maps;
7916 		for (i = 0; i < ISM_MAP_SLOTS; i++) {
7917 			if (addr == ism_start(ism_map[i]) &&
7918 			    sh_size == (size_t)(ism_size(ism_map[i]))) {
7919 				found = 1;
7920 				break;
7921 			}
7922 		}
7923 		if (!found)
7924 			ism_blkp = ism_blkp->iblk_next;
7925 	}
7926 
7927 	if (found) {
7928 		ism_hatid = ism_map[i].imap_ismhat;
7929 		ASSERT(ism_hatid != NULL);
7930 		ASSERT(ism_hatid->sfmmu_ismhat == 1);
7931 
7932 		/*
7933 		 * First remove ourselves from the ism mapping list.
7934 		 */
7935 		mutex_enter(&ism_mlist_lock);
7936 		iment_sub(ism_map[i].imap_ment, ism_hatid);
7937 		mutex_exit(&ism_mlist_lock);
7938 		free_ment = ism_map[i].imap_ment;
7939 
7940 		/*
7941 		 * Now gurantee that any other cpu
7942 		 * that tries to process an ISM miss
7943 		 * will go to tl=0.
7944 		 */
7945 		hatlockp = sfmmu_hat_enter(sfmmup);
7946 
7947 		sfmmu_invalidate_ctx(sfmmup);
7948 
7949 		sfmmu_hat_exit(hatlockp);
7950 
7951 		/*
7952 		 * We delete the ism map by copying
7953 		 * the next map over the current one.
7954 		 * We will take the next one in the maps
7955 		 * array or from the next ism_blk.
7956 		 */
7957 		while (ism_blkp) {
7958 			ism_map = ism_blkp->iblk_maps;
7959 			while (i < (ISM_MAP_SLOTS - 1)) {
7960 				ism_map[i] = ism_map[i + 1];
7961 				i++;
7962 			}
7963 			/* i == (ISM_MAP_SLOTS - 1) */
7964 			ism_blkp = ism_blkp->iblk_next;
7965 			if (ism_blkp) {
7966 				ism_map[i] = ism_blkp->iblk_maps[0];
7967 				i = 0;
7968 			} else {
7969 				ism_map[i].imap_seg = 0;
7970 				ism_map[i].imap_vb_shift = 0;
7971 				ism_map[i].imap_hatflags = 0;
7972 				ism_map[i].imap_sz_mask = 0;
7973 				ism_map[i].imap_ismhat = NULL;
7974 				ism_map[i].imap_ment = NULL;
7975 			}
7976 		}
7977 
7978 		/*
7979 		 * Now flush entire TSB for the process, since
7980 		 * demapping page by page can be too expensive.
7981 		 * We don't have to flush the TLB here anymore
7982 		 * since we switch to a new TLB ctx instead.
7983 		 * Also, there is no need to flush if the process
7984 		 * is exiting since the TSB will be freed later.
7985 		 */
7986 		if (!sfmmup->sfmmu_free) {
7987 			hatlockp = sfmmu_hat_enter(sfmmup);
7988 			for (tsbinfo = sfmmup->sfmmu_tsb; tsbinfo != NULL;
7989 			    tsbinfo = tsbinfo->tsb_next) {
7990 				if (tsbinfo->tsb_flags & TSB_SWAPPED)
7991 					continue;
7992 				sfmmu_inv_tsb(tsbinfo->tsb_va,
7993 				    TSB_BYTES(tsbinfo->tsb_szc));
7994 			}
7995 			sfmmu_hat_exit(hatlockp);
7996 		}
7997 	}
7998 
7999 	/*
8000 	 * Update our counters for this sfmmup's ism mappings.
8001 	 */
8002 	for (i = 0; i <= ismszc; i++) {
8003 		if (!(disable_ism_large_pages & (1 << i)))
8004 			(void) ism_tsb_entries(sfmmup, i);
8005 	}
8006 
8007 	sfmmu_ismhat_exit(sfmmup, 0);
8008 
8009 	/*
8010 	 * We must do our freeing here after dropping locks
8011 	 * to prevent a deadlock in the kmem allocator on the
8012 	 * mapping list lock.
8013 	 */
8014 	if (free_ment != NULL)
8015 		kmem_cache_free(ism_ment_cache, free_ment);
8016 
8017 	/*
8018 	 * Check TSB and TLB page sizes if the process isn't exiting.
8019 	 */
8020 	if (!sfmmup->sfmmu_free)
8021 		sfmmu_check_page_sizes(sfmmup, 0);
8022 }
8023 
8024 /* ARGSUSED */
8025 static int
8026 sfmmu_idcache_constructor(void *buf, void *cdrarg, int kmflags)
8027 {
8028 	/* void *buf is sfmmu_t pointer */
8029 	return (0);
8030 }
8031 
8032 /* ARGSUSED */
8033 static void
8034 sfmmu_idcache_destructor(void *buf, void *cdrarg)
8035 {
8036 	/* void *buf is sfmmu_t pointer */
8037 }
8038 
8039 /*
8040  * setup kmem hmeblks by bzeroing all members and initializing the nextpa
8041  * field to be the pa of this hmeblk
8042  */
8043 /* ARGSUSED */
8044 static int
8045 sfmmu_hblkcache_constructor(void *buf, void *cdrarg, int kmflags)
8046 {
8047 	struct hme_blk *hmeblkp;
8048 
8049 	bzero(buf, (size_t)cdrarg);
8050 	hmeblkp = (struct hme_blk *)buf;
8051 	hmeblkp->hblk_nextpa = va_to_pa((caddr_t)hmeblkp);
8052 
8053 #ifdef	HBLK_TRACE
8054 	mutex_init(&hmeblkp->hblk_audit_lock, NULL, MUTEX_DEFAULT, NULL);
8055 #endif	/* HBLK_TRACE */
8056 
8057 	return (0);
8058 }
8059 
8060 /* ARGSUSED */
8061 static void
8062 sfmmu_hblkcache_destructor(void *buf, void *cdrarg)
8063 {
8064 
8065 #ifdef	HBLK_TRACE
8066 
8067 	struct hme_blk *hmeblkp;
8068 
8069 	hmeblkp = (struct hme_blk *)buf;
8070 	mutex_destroy(&hmeblkp->hblk_audit_lock);
8071 
8072 #endif	/* HBLK_TRACE */
8073 }
8074 
8075 #define	SFMMU_CACHE_RECLAIM_SCAN_RATIO 8
8076 static int sfmmu_cache_reclaim_scan_ratio = SFMMU_CACHE_RECLAIM_SCAN_RATIO;
8077 /*
8078  * The kmem allocator will callback into our reclaim routine when the system
8079  * is running low in memory.  We traverse the hash and free up all unused but
8080  * still cached hme_blks.  We also traverse the free list and free them up
8081  * as well.
8082  */
8083 /*ARGSUSED*/
8084 static void
8085 sfmmu_hblkcache_reclaim(void *cdrarg)
8086 {
8087 	int i;
8088 	uint64_t hblkpa, prevpa, nx_pa;
8089 	struct hmehash_bucket *hmebp;
8090 	struct hme_blk *hmeblkp, *nx_hblk, *pr_hblk = NULL;
8091 	static struct hmehash_bucket *uhmehash_reclaim_hand;
8092 	static struct hmehash_bucket *khmehash_reclaim_hand;
8093 	struct hme_blk *list = NULL;
8094 
8095 	hmebp = uhmehash_reclaim_hand;
8096 	if (hmebp == NULL || hmebp > &uhme_hash[UHMEHASH_SZ])
8097 		uhmehash_reclaim_hand = hmebp = uhme_hash;
8098 	uhmehash_reclaim_hand += UHMEHASH_SZ / sfmmu_cache_reclaim_scan_ratio;
8099 
8100 	for (i = UHMEHASH_SZ / sfmmu_cache_reclaim_scan_ratio; i; i--) {
8101 		if (SFMMU_HASH_LOCK_TRYENTER(hmebp) != 0) {
8102 			hmeblkp = hmebp->hmeblkp;
8103 			hblkpa = hmebp->hmeh_nextpa;
8104 			prevpa = 0;
8105 			pr_hblk = NULL;
8106 			while (hmeblkp) {
8107 				nx_hblk = hmeblkp->hblk_next;
8108 				nx_pa = hmeblkp->hblk_nextpa;
8109 				if (!hmeblkp->hblk_vcnt &&
8110 				    !hmeblkp->hblk_hmecnt) {
8111 					sfmmu_hblk_hash_rm(hmebp, hmeblkp,
8112 						prevpa, pr_hblk);
8113 					sfmmu_hblk_free(hmebp, hmeblkp,
8114 					    hblkpa, &list);
8115 				} else {
8116 					pr_hblk = hmeblkp;
8117 					prevpa = hblkpa;
8118 				}
8119 				hmeblkp = nx_hblk;
8120 				hblkpa = nx_pa;
8121 			}
8122 			SFMMU_HASH_UNLOCK(hmebp);
8123 		}
8124 		if (hmebp++ == &uhme_hash[UHMEHASH_SZ])
8125 			hmebp = uhme_hash;
8126 	}
8127 
8128 	hmebp = khmehash_reclaim_hand;
8129 	if (hmebp == NULL || hmebp > &khme_hash[KHMEHASH_SZ])
8130 		khmehash_reclaim_hand = hmebp = khme_hash;
8131 	khmehash_reclaim_hand += KHMEHASH_SZ / sfmmu_cache_reclaim_scan_ratio;
8132 
8133 	for (i = KHMEHASH_SZ / sfmmu_cache_reclaim_scan_ratio; i; i--) {
8134 		if (SFMMU_HASH_LOCK_TRYENTER(hmebp) != 0) {
8135 			hmeblkp = hmebp->hmeblkp;
8136 			hblkpa = hmebp->hmeh_nextpa;
8137 			prevpa = 0;
8138 			pr_hblk = NULL;
8139 			while (hmeblkp) {
8140 				nx_hblk = hmeblkp->hblk_next;
8141 				nx_pa = hmeblkp->hblk_nextpa;
8142 				if (!hmeblkp->hblk_vcnt &&
8143 				    !hmeblkp->hblk_hmecnt) {
8144 					sfmmu_hblk_hash_rm(hmebp, hmeblkp,
8145 						prevpa, pr_hblk);
8146 					sfmmu_hblk_free(hmebp, hmeblkp,
8147 					    hblkpa, &list);
8148 				} else {
8149 					pr_hblk = hmeblkp;
8150 					prevpa = hblkpa;
8151 				}
8152 				hmeblkp = nx_hblk;
8153 				hblkpa = nx_pa;
8154 			}
8155 			SFMMU_HASH_UNLOCK(hmebp);
8156 		}
8157 		if (hmebp++ == &khme_hash[KHMEHASH_SZ])
8158 			hmebp = khme_hash;
8159 	}
8160 	sfmmu_hblks_list_purge(&list);
8161 }
8162 
8163 /*
8164  * sfmmu_get_ppvcolor should become a vm_machdep or hatop interface.
8165  * same goes for sfmmu_get_addrvcolor().
8166  *
8167  * This function will return the virtual color for the specified page. The
8168  * virtual color corresponds to this page current mapping or its last mapping.
8169  * It is used by memory allocators to choose addresses with the correct
8170  * alignment so vac consistency is automatically maintained.  If the page
8171  * has no color it returns -1.
8172  */
8173 int
8174 sfmmu_get_ppvcolor(struct page *pp)
8175 {
8176 	int color;
8177 
8178 	if (!(cache & CACHE_VAC) || PP_NEWPAGE(pp)) {
8179 		return (-1);
8180 	}
8181 	color = PP_GET_VCOLOR(pp);
8182 	ASSERT(color < mmu_btop(shm_alignment));
8183 	return (color);
8184 }
8185 
8186 /*
8187  * This function will return the desired alignment for vac consistency
8188  * (vac color) given a virtual address.  If no vac is present it returns -1.
8189  */
8190 int
8191 sfmmu_get_addrvcolor(caddr_t vaddr)
8192 {
8193 	if (cache & CACHE_VAC) {
8194 		return (addr_to_vcolor(vaddr));
8195 	} else {
8196 		return (-1);
8197 	}
8198 
8199 }
8200 
8201 /*
8202  * Check for conflicts.
8203  * A conflict exists if the new and existent mappings do not match in
8204  * their "shm_alignment fields. If conflicts exist, the existant mappings
8205  * are flushed unless one of them is locked. If one of them is locked, then
8206  * the mappings are flushed and converted to non-cacheable mappings.
8207  */
8208 static void
8209 sfmmu_vac_conflict(struct hat *hat, caddr_t addr, page_t *pp)
8210 {
8211 	struct hat *tmphat;
8212 	struct sf_hment *sfhmep, *tmphme = NULL;
8213 	struct hme_blk *hmeblkp;
8214 	int vcolor;
8215 	tte_t tte;
8216 
8217 	ASSERT(sfmmu_mlist_held(pp));
8218 	ASSERT(!PP_ISNC(pp));		/* page better be cacheable */
8219 
8220 	vcolor = addr_to_vcolor(addr);
8221 	if (PP_NEWPAGE(pp)) {
8222 		PP_SET_VCOLOR(pp, vcolor);
8223 		return;
8224 	}
8225 
8226 	if (PP_GET_VCOLOR(pp) == vcolor) {
8227 		return;
8228 	}
8229 
8230 	if (!PP_ISMAPPED(pp) && !PP_ISMAPPED_KPM(pp)) {
8231 		/*
8232 		 * Previous user of page had a different color
8233 		 * but since there are no current users
8234 		 * we just flush the cache and change the color.
8235 		 */
8236 		SFMMU_STAT(sf_pgcolor_conflict);
8237 		sfmmu_cache_flush(pp->p_pagenum, PP_GET_VCOLOR(pp));
8238 		PP_SET_VCOLOR(pp, vcolor);
8239 		return;
8240 	}
8241 
8242 	/*
8243 	 * If we get here we have a vac conflict with a current
8244 	 * mapping.  VAC conflict policy is as follows.
8245 	 * - The default is to unload the other mappings unless:
8246 	 * - If we have a large mapping we uncache the page.
8247 	 * We need to uncache the rest of the large page too.
8248 	 * - If any of the mappings are locked we uncache the page.
8249 	 * - If the requested mapping is inconsistent
8250 	 * with another mapping and that mapping
8251 	 * is in the same address space we have to
8252 	 * make it non-cached.  The default thing
8253 	 * to do is unload the inconsistent mapping
8254 	 * but if they are in the same address space
8255 	 * we run the risk of unmapping the pc or the
8256 	 * stack which we will use as we return to the user,
8257 	 * in which case we can then fault on the thing
8258 	 * we just unloaded and get into an infinite loop.
8259 	 */
8260 	if (PP_ISMAPPED_LARGE(pp)) {
8261 		int sz;
8262 
8263 		/*
8264 		 * Existing mapping is for big pages. We don't unload
8265 		 * existing big mappings to satisfy new mappings.
8266 		 * Always convert all mappings to TNC.
8267 		 */
8268 		sz = fnd_mapping_sz(pp);
8269 		pp = PP_GROUPLEADER(pp, sz);
8270 		SFMMU_STAT_ADD(sf_uncache_conflict, TTEPAGES(sz));
8271 		sfmmu_page_cache_array(pp, HAT_TMPNC, CACHE_FLUSH,
8272 			TTEPAGES(sz));
8273 
8274 		return;
8275 	}
8276 
8277 	/*
8278 	 * check if any mapping is in same as or if it is locked
8279 	 * since in that case we need to uncache.
8280 	 */
8281 	for (sfhmep = pp->p_mapping; sfhmep; sfhmep = tmphme) {
8282 		tmphme = sfhmep->hme_next;
8283 		hmeblkp = sfmmu_hmetohblk(sfhmep);
8284 		if (hmeblkp->hblk_xhat_bit)
8285 			continue;
8286 		tmphat = hblktosfmmu(hmeblkp);
8287 		sfmmu_copytte(&sfhmep->hme_tte, &tte);
8288 		ASSERT(TTE_IS_VALID(&tte));
8289 		if ((tmphat == hat) || hmeblkp->hblk_lckcnt) {
8290 			/*
8291 			 * We have an uncache conflict
8292 			 */
8293 			SFMMU_STAT(sf_uncache_conflict);
8294 			sfmmu_page_cache_array(pp, HAT_TMPNC, CACHE_FLUSH, 1);
8295 			return;
8296 		}
8297 	}
8298 
8299 	/*
8300 	 * We have an unload conflict
8301 	 * We have already checked for LARGE mappings, therefore
8302 	 * the remaining mapping(s) must be TTE8K.
8303 	 */
8304 	SFMMU_STAT(sf_unload_conflict);
8305 
8306 	for (sfhmep = pp->p_mapping; sfhmep; sfhmep = tmphme) {
8307 		tmphme = sfhmep->hme_next;
8308 		hmeblkp = sfmmu_hmetohblk(sfhmep);
8309 		if (hmeblkp->hblk_xhat_bit)
8310 			continue;
8311 		(void) sfmmu_pageunload(pp, sfhmep, TTE8K);
8312 	}
8313 
8314 	if (PP_ISMAPPED_KPM(pp))
8315 		sfmmu_kpm_vac_unload(pp, addr);
8316 
8317 	/*
8318 	 * Unloads only do TLB flushes so we need to flush the
8319 	 * cache here.
8320 	 */
8321 	sfmmu_cache_flush(pp->p_pagenum, PP_GET_VCOLOR(pp));
8322 	PP_SET_VCOLOR(pp, vcolor);
8323 }
8324 
8325 /*
8326  * Whenever a mapping is unloaded and the page is in TNC state,
8327  * we see if the page can be made cacheable again. 'pp' is
8328  * the page that we just unloaded a mapping from, the size
8329  * of mapping that was unloaded is 'ottesz'.
8330  * Remark:
8331  * The recache policy for mpss pages can leave a performance problem
8332  * under the following circumstances:
8333  * . A large page in uncached mode has just been unmapped.
8334  * . All constituent pages are TNC due to a conflicting small mapping.
8335  * . There are many other, non conflicting, small mappings around for
8336  *   a lot of the constituent pages.
8337  * . We're called w/ the "old" groupleader page and the old ottesz,
8338  *   but this is irrelevant, since we're no more "PP_ISMAPPED_LARGE", so
8339  *   we end up w/ TTE8K or npages == 1.
8340  * . We call tst_tnc w/ the old groupleader only, and if there is no
8341  *   conflict, we re-cache only this page.
8342  * . All other small mappings are not checked and will be left in TNC mode.
8343  * The problem is not very serious because:
8344  * . mpss is actually only defined for heap and stack, so the probability
8345  *   is not very high that a large page mapping exists in parallel to a small
8346  *   one (this is possible, but seems to be bad programming style in the
8347  *   appl).
8348  * . The problem gets a little bit more serious, when those TNC pages
8349  *   have to be mapped into kernel space, e.g. for networking.
8350  * . When VAC alias conflicts occur in applications, this is regarded
8351  *   as an application bug. So if kstat's show them, the appl should
8352  *   be changed anyway.
8353  */
8354 static void
8355 conv_tnc(page_t *pp, int ottesz)
8356 {
8357 	int cursz, dosz;
8358 	pgcnt_t curnpgs, dopgs;
8359 	pgcnt_t pg64k;
8360 	page_t *pp2;
8361 
8362 	/*
8363 	 * Determine how big a range we check for TNC and find
8364 	 * leader page. cursz is the size of the biggest
8365 	 * mapping that still exist on 'pp'.
8366 	 */
8367 	if (PP_ISMAPPED_LARGE(pp)) {
8368 		cursz = fnd_mapping_sz(pp);
8369 	} else {
8370 		cursz = TTE8K;
8371 	}
8372 
8373 	if (ottesz >= cursz) {
8374 		dosz = ottesz;
8375 		pp2 = pp;
8376 	} else {
8377 		dosz = cursz;
8378 		pp2 = PP_GROUPLEADER(pp, dosz);
8379 	}
8380 
8381 	pg64k = TTEPAGES(TTE64K);
8382 	dopgs = TTEPAGES(dosz);
8383 
8384 	ASSERT(dopgs == 1 || ((dopgs & (pg64k - 1)) == 0));
8385 
8386 	while (dopgs != 0) {
8387 		curnpgs = TTEPAGES(cursz);
8388 		if (tst_tnc(pp2, curnpgs)) {
8389 			SFMMU_STAT_ADD(sf_recache, curnpgs);
8390 			sfmmu_page_cache_array(pp2, HAT_CACHE, CACHE_NO_FLUSH,
8391 				curnpgs);
8392 		}
8393 
8394 		ASSERT(dopgs >= curnpgs);
8395 		dopgs -= curnpgs;
8396 
8397 		if (dopgs == 0) {
8398 			break;
8399 		}
8400 
8401 		pp2 = PP_PAGENEXT_N(pp2, curnpgs);
8402 		if (((dopgs & (pg64k - 1)) == 0) && PP_ISMAPPED_LARGE(pp2)) {
8403 			cursz = fnd_mapping_sz(pp2);
8404 		} else {
8405 			cursz = TTE8K;
8406 		}
8407 	}
8408 }
8409 
8410 /*
8411  * Returns 1 if page(s) can be converted from TNC to cacheable setting,
8412  * returns 0 otherwise. Note that oaddr argument is valid for only
8413  * 8k pages.
8414  */
8415 static int
8416 tst_tnc(page_t *pp, pgcnt_t npages)
8417 {
8418 	struct	sf_hment *sfhme;
8419 	struct	hme_blk *hmeblkp;
8420 	tte_t	tte;
8421 	caddr_t	vaddr;
8422 	int	clr_valid = 0;
8423 	int 	color, color1, bcolor;
8424 	int	i, ncolors;
8425 
8426 	ASSERT(pp != NULL);
8427 	ASSERT(!(cache & CACHE_WRITEBACK));
8428 
8429 	if (npages > 1) {
8430 		ncolors = CACHE_NUM_COLOR;
8431 	}
8432 
8433 	for (i = 0; i < npages; i++) {
8434 		ASSERT(sfmmu_mlist_held(pp));
8435 		ASSERT(PP_ISTNC(pp));
8436 		ASSERT(PP_GET_VCOLOR(pp) == NO_VCOLOR);
8437 
8438 		if (PP_ISPNC(pp)) {
8439 			return (0);
8440 		}
8441 
8442 		clr_valid = 0;
8443 		if (PP_ISMAPPED_KPM(pp)) {
8444 			caddr_t kpmvaddr;
8445 
8446 			ASSERT(kpm_enable);
8447 			kpmvaddr = hat_kpm_page2va(pp, 1);
8448 			ASSERT(!(npages > 1 && IS_KPM_ALIAS_RANGE(kpmvaddr)));
8449 			color1 = addr_to_vcolor(kpmvaddr);
8450 			clr_valid = 1;
8451 		}
8452 
8453 		for (sfhme = pp->p_mapping; sfhme; sfhme = sfhme->hme_next) {
8454 			hmeblkp = sfmmu_hmetohblk(sfhme);
8455 			if (hmeblkp->hblk_xhat_bit)
8456 				continue;
8457 
8458 			sfmmu_copytte(&sfhme->hme_tte, &tte);
8459 			ASSERT(TTE_IS_VALID(&tte));
8460 
8461 			vaddr = tte_to_vaddr(hmeblkp, tte);
8462 			color = addr_to_vcolor(vaddr);
8463 
8464 			if (npages > 1) {
8465 				/*
8466 				 * If there is a big mapping, make sure
8467 				 * 8K mapping is consistent with the big
8468 				 * mapping.
8469 				 */
8470 				bcolor = i % ncolors;
8471 				if (color != bcolor) {
8472 					return (0);
8473 				}
8474 			}
8475 			if (!clr_valid) {
8476 				clr_valid = 1;
8477 				color1 = color;
8478 			}
8479 
8480 			if (color1 != color) {
8481 				return (0);
8482 			}
8483 		}
8484 
8485 		pp = PP_PAGENEXT(pp);
8486 	}
8487 
8488 	return (1);
8489 }
8490 
8491 static void
8492 sfmmu_page_cache_array(page_t *pp, int flags, int cache_flush_flag,
8493 	pgcnt_t npages)
8494 {
8495 	kmutex_t *pmtx;
8496 	int i, ncolors, bcolor;
8497 	kpm_hlk_t *kpmp;
8498 	cpuset_t cpuset;
8499 
8500 	ASSERT(pp != NULL);
8501 	ASSERT(!(cache & CACHE_WRITEBACK));
8502 
8503 	kpmp = sfmmu_kpm_kpmp_enter(pp, npages);
8504 	pmtx = sfmmu_page_enter(pp);
8505 
8506 	/*
8507 	 * Fast path caching single unmapped page
8508 	 */
8509 	if (npages == 1 && !PP_ISMAPPED(pp) && !PP_ISMAPPED_KPM(pp) &&
8510 	    flags == HAT_CACHE) {
8511 		PP_CLRTNC(pp);
8512 		PP_CLRPNC(pp);
8513 		sfmmu_page_exit(pmtx);
8514 		sfmmu_kpm_kpmp_exit(kpmp);
8515 		return;
8516 	}
8517 
8518 	/*
8519 	 * We need to capture all cpus in order to change cacheability
8520 	 * because we can't allow one cpu to access the same physical
8521 	 * page using a cacheable and a non-cachebale mapping at the same
8522 	 * time. Since we may end up walking the ism mapping list
8523 	 * have to grab it's lock now since we can't after all the
8524 	 * cpus have been captured.
8525 	 */
8526 	sfmmu_hat_lock_all();
8527 	mutex_enter(&ism_mlist_lock);
8528 	kpreempt_disable();
8529 	cpuset = cpu_ready_set;
8530 	xc_attention(cpuset);
8531 
8532 	if (npages > 1) {
8533 		/*
8534 		 * Make sure all colors are flushed since the
8535 		 * sfmmu_page_cache() only flushes one color-
8536 		 * it does not know big pages.
8537 		 */
8538 		ncolors = CACHE_NUM_COLOR;
8539 		if (flags & HAT_TMPNC) {
8540 			for (i = 0; i < ncolors; i++) {
8541 				sfmmu_cache_flushcolor(i, pp->p_pagenum);
8542 			}
8543 			cache_flush_flag = CACHE_NO_FLUSH;
8544 		}
8545 	}
8546 
8547 	for (i = 0; i < npages; i++) {
8548 
8549 		ASSERT(sfmmu_mlist_held(pp));
8550 
8551 		if (!(flags == HAT_TMPNC && PP_ISTNC(pp))) {
8552 
8553 			if (npages > 1) {
8554 				bcolor = i % ncolors;
8555 			} else {
8556 				bcolor = NO_VCOLOR;
8557 			}
8558 
8559 			sfmmu_page_cache(pp, flags, cache_flush_flag,
8560 			    bcolor);
8561 		}
8562 
8563 		pp = PP_PAGENEXT(pp);
8564 	}
8565 
8566 	xt_sync(cpuset);
8567 	xc_dismissed(cpuset);
8568 	mutex_exit(&ism_mlist_lock);
8569 	sfmmu_hat_unlock_all();
8570 	sfmmu_page_exit(pmtx);
8571 	sfmmu_kpm_kpmp_exit(kpmp);
8572 	kpreempt_enable();
8573 }
8574 
8575 /*
8576  * This function changes the virtual cacheability of all mappings to a
8577  * particular page.  When changing from uncache to cacheable the mappings will
8578  * only be changed if all of them have the same virtual color.
8579  * We need to flush the cache in all cpus.  It is possible that
8580  * a process referenced a page as cacheable but has sinced exited
8581  * and cleared the mapping list.  We still to flush it but have no
8582  * state so all cpus is the only alternative.
8583  */
8584 static void
8585 sfmmu_page_cache(page_t *pp, int flags, int cache_flush_flag, int bcolor)
8586 {
8587 	struct	sf_hment *sfhme;
8588 	struct	hme_blk *hmeblkp;
8589 	sfmmu_t *sfmmup;
8590 	tte_t	tte, ttemod;
8591 	caddr_t	vaddr;
8592 	int	ret, color;
8593 	pfn_t	pfn;
8594 
8595 	color = bcolor;
8596 	pfn = pp->p_pagenum;
8597 
8598 	for (sfhme = pp->p_mapping; sfhme; sfhme = sfhme->hme_next) {
8599 
8600 		hmeblkp = sfmmu_hmetohblk(sfhme);
8601 
8602 		if (hmeblkp->hblk_xhat_bit)
8603 			continue;
8604 
8605 		sfmmu_copytte(&sfhme->hme_tte, &tte);
8606 		ASSERT(TTE_IS_VALID(&tte));
8607 		vaddr = tte_to_vaddr(hmeblkp, tte);
8608 		color = addr_to_vcolor(vaddr);
8609 
8610 #ifdef DEBUG
8611 		if ((flags & HAT_CACHE) && bcolor != NO_VCOLOR) {
8612 			ASSERT(color == bcolor);
8613 		}
8614 #endif
8615 
8616 		ASSERT(flags != HAT_TMPNC || color == PP_GET_VCOLOR(pp));
8617 
8618 		ttemod = tte;
8619 		if (flags & (HAT_UNCACHE | HAT_TMPNC)) {
8620 			TTE_CLR_VCACHEABLE(&ttemod);
8621 		} else {	/* flags & HAT_CACHE */
8622 			TTE_SET_VCACHEABLE(&ttemod);
8623 		}
8624 		ret = sfmmu_modifytte_try(&tte, &ttemod, &sfhme->hme_tte);
8625 		if (ret < 0) {
8626 			/*
8627 			 * Since all cpus are captured modifytte should not
8628 			 * fail.
8629 			 */
8630 			panic("sfmmu_page_cache: write to tte failed");
8631 		}
8632 
8633 		sfmmup = hblktosfmmu(hmeblkp);
8634 		if (cache_flush_flag == CACHE_FLUSH) {
8635 			/*
8636 			 * Flush TSBs, TLBs and caches
8637 			 */
8638 			if (sfmmup->sfmmu_ismhat) {
8639 				if (flags & HAT_CACHE) {
8640 					SFMMU_STAT(sf_ism_recache);
8641 				} else {
8642 					SFMMU_STAT(sf_ism_uncache);
8643 				}
8644 				sfmmu_ismtlbcache_demap(vaddr, sfmmup, hmeblkp,
8645 				    pfn, CACHE_FLUSH);
8646 			} else {
8647 				sfmmu_tlbcache_demap(vaddr, sfmmup, hmeblkp,
8648 				    pfn, 0, FLUSH_ALL_CPUS, CACHE_FLUSH, 1);
8649 			}
8650 
8651 			/*
8652 			 * all cache entries belonging to this pfn are
8653 			 * now flushed.
8654 			 */
8655 			cache_flush_flag = CACHE_NO_FLUSH;
8656 		} else {
8657 
8658 			/*
8659 			 * Flush only TSBs and TLBs.
8660 			 */
8661 			if (sfmmup->sfmmu_ismhat) {
8662 				if (flags & HAT_CACHE) {
8663 					SFMMU_STAT(sf_ism_recache);
8664 				} else {
8665 					SFMMU_STAT(sf_ism_uncache);
8666 				}
8667 				sfmmu_ismtlbcache_demap(vaddr, sfmmup, hmeblkp,
8668 				    pfn, CACHE_NO_FLUSH);
8669 			} else {
8670 				sfmmu_tlb_demap(vaddr, sfmmup, hmeblkp, 0, 1);
8671 			}
8672 		}
8673 	}
8674 
8675 	if (PP_ISMAPPED_KPM(pp))
8676 		sfmmu_kpm_page_cache(pp, flags, cache_flush_flag);
8677 
8678 	switch (flags) {
8679 
8680 		default:
8681 			panic("sfmmu_pagecache: unknown flags");
8682 			break;
8683 
8684 		case HAT_CACHE:
8685 			PP_CLRTNC(pp);
8686 			PP_CLRPNC(pp);
8687 			PP_SET_VCOLOR(pp, color);
8688 			break;
8689 
8690 		case HAT_TMPNC:
8691 			PP_SETTNC(pp);
8692 			PP_SET_VCOLOR(pp, NO_VCOLOR);
8693 			break;
8694 
8695 		case HAT_UNCACHE:
8696 			PP_SETPNC(pp);
8697 			PP_CLRTNC(pp);
8698 			PP_SET_VCOLOR(pp, NO_VCOLOR);
8699 			break;
8700 	}
8701 }
8702 
8703 
8704 /*
8705  * Wrapper routine used to return a context.
8706  *
8707  * It's the responsibility of the caller to guarantee that the
8708  * process serializes on calls here by taking the HAT lock for
8709  * the hat.
8710  *
8711  */
8712 static void
8713 sfmmu_get_ctx(sfmmu_t *sfmmup)
8714 {
8715 	mmu_ctx_t *mmu_ctxp;
8716 	uint_t pstate_save;
8717 
8718 	ASSERT(sfmmu_hat_lock_held(sfmmup));
8719 	ASSERT(sfmmup != ksfmmup);
8720 
8721 	kpreempt_disable();
8722 
8723 	mmu_ctxp = CPU_MMU_CTXP(CPU);
8724 	ASSERT(mmu_ctxp);
8725 	ASSERT(mmu_ctxp->mmu_idx < max_mmu_ctxdoms);
8726 	ASSERT(mmu_ctxp == mmu_ctxs_tbl[mmu_ctxp->mmu_idx]);
8727 
8728 	/*
8729 	 * Do a wrap-around if cnum reaches the max # cnum supported by a MMU.
8730 	 */
8731 	if (mmu_ctxp->mmu_cnum == mmu_ctxp->mmu_nctxs)
8732 		sfmmu_ctx_wrap_around(mmu_ctxp);
8733 
8734 	/*
8735 	 * Let the MMU set up the page sizes to use for
8736 	 * this context in the TLB. Don't program 2nd dtlb for ism hat.
8737 	 */
8738 	if ((&mmu_set_ctx_page_sizes) && (sfmmup->sfmmu_ismhat == 0)) {
8739 		mmu_set_ctx_page_sizes(sfmmup);
8740 	}
8741 
8742 	/*
8743 	 * sfmmu_alloc_ctx and sfmmu_load_mmustate will be performed with
8744 	 * interrupts disabled to prevent race condition with wrap-around
8745 	 * ctx invalidatation. In sun4v, ctx invalidation also involves
8746 	 * a HV call to set the number of TSBs to 0. If interrupts are not
8747 	 * disabled until after sfmmu_load_mmustate is complete TSBs may
8748 	 * become assigned to INVALID_CONTEXT. This is not allowed.
8749 	 */
8750 	pstate_save = sfmmu_disable_intrs();
8751 
8752 	sfmmu_alloc_ctx(sfmmup, 1, CPU);
8753 	sfmmu_load_mmustate(sfmmup);
8754 
8755 	sfmmu_enable_intrs(pstate_save);
8756 
8757 	kpreempt_enable();
8758 }
8759 
8760 /*
8761  * When all cnums are used up in a MMU, cnum will wrap around to the
8762  * next generation and start from 2.
8763  */
8764 static void
8765 sfmmu_ctx_wrap_around(mmu_ctx_t *mmu_ctxp)
8766 {
8767 
8768 	/* caller must have disabled the preemption */
8769 	ASSERT(curthread->t_preempt >= 1);
8770 	ASSERT(mmu_ctxp != NULL);
8771 
8772 	/* acquire Per-MMU (PM) spin lock */
8773 	mutex_enter(&mmu_ctxp->mmu_lock);
8774 
8775 	/* re-check to see if wrap-around is needed */
8776 	if (mmu_ctxp->mmu_cnum < mmu_ctxp->mmu_nctxs)
8777 		goto done;
8778 
8779 	SFMMU_MMU_STAT(mmu_wrap_around);
8780 
8781 	/* update gnum */
8782 	ASSERT(mmu_ctxp->mmu_gnum != 0);
8783 	mmu_ctxp->mmu_gnum++;
8784 	if (mmu_ctxp->mmu_gnum == 0 ||
8785 	    mmu_ctxp->mmu_gnum > MAX_SFMMU_GNUM_VAL) {
8786 		cmn_err(CE_PANIC, "mmu_gnum of mmu_ctx 0x%p is out of bound.",
8787 		    (void *)mmu_ctxp);
8788 	}
8789 
8790 	if (mmu_ctxp->mmu_ncpus > 1) {
8791 		cpuset_t cpuset;
8792 
8793 		membar_enter(); /* make sure updated gnum visible */
8794 
8795 		SFMMU_XCALL_STATS(NULL);
8796 
8797 		/* xcall to others on the same MMU to invalidate ctx */
8798 		cpuset = mmu_ctxp->mmu_cpuset;
8799 		ASSERT(CPU_IN_SET(cpuset, CPU->cpu_id));
8800 		CPUSET_DEL(cpuset, CPU->cpu_id);
8801 		CPUSET_AND(cpuset, cpu_ready_set);
8802 
8803 		/*
8804 		 * Pass in INVALID_CONTEXT as the first parameter to
8805 		 * sfmmu_raise_tsb_exception, which invalidates the context
8806 		 * of any process running on the CPUs in the MMU.
8807 		 */
8808 		xt_some(cpuset, sfmmu_raise_tsb_exception,
8809 		    INVALID_CONTEXT, INVALID_CONTEXT);
8810 		xt_sync(cpuset);
8811 
8812 		SFMMU_MMU_STAT(mmu_tsb_raise_exception);
8813 	}
8814 
8815 	if (sfmmu_getctx_sec() != INVALID_CONTEXT) {
8816 		sfmmu_setctx_sec(INVALID_CONTEXT);
8817 		sfmmu_clear_utsbinfo();
8818 	}
8819 
8820 	/*
8821 	 * No xcall is needed here. For sun4u systems all CPUs in context
8822 	 * domain share a single physical MMU therefore it's enough to flush
8823 	 * TLB on local CPU. On sun4v systems we use 1 global context
8824 	 * domain and flush all remote TLBs in sfmmu_raise_tsb_exception
8825 	 * handler. Note that vtag_flushall_uctxs() is called
8826 	 * for Ultra II machine, where the equivalent flushall functionality
8827 	 * is implemented in SW, and only user ctx TLB entries are flushed.
8828 	 */
8829 	if (&vtag_flushall_uctxs != NULL) {
8830 		vtag_flushall_uctxs();
8831 	} else {
8832 		vtag_flushall();
8833 	}
8834 
8835 	/* reset mmu cnum, skips cnum 0 and 1 */
8836 	mmu_ctxp->mmu_cnum = NUM_LOCKED_CTXS;
8837 
8838 done:
8839 	mutex_exit(&mmu_ctxp->mmu_lock);
8840 }
8841 
8842 
8843 /*
8844  * For multi-threaded process, set the process context to INVALID_CONTEXT
8845  * so that it faults and reloads the MMU state from TL=0. For single-threaded
8846  * process, we can just load the MMU state directly without having to
8847  * set context invalid. Caller must hold the hat lock since we don't
8848  * acquire it here.
8849  */
8850 static void
8851 sfmmu_sync_mmustate(sfmmu_t *sfmmup)
8852 {
8853 	uint_t cnum;
8854 	uint_t pstate_save;
8855 
8856 	ASSERT(sfmmup != ksfmmup);
8857 	ASSERT(sfmmu_hat_lock_held(sfmmup));
8858 
8859 	kpreempt_disable();
8860 
8861 	/*
8862 	 * We check whether the pass'ed-in sfmmup is the same as the
8863 	 * current running proc. This is to makes sure the current proc
8864 	 * stays single-threaded if it already is.
8865 	 */
8866 	if ((sfmmup == curthread->t_procp->p_as->a_hat) &&
8867 	    (curthread->t_procp->p_lwpcnt == 1)) {
8868 		/* single-thread */
8869 		cnum = sfmmup->sfmmu_ctxs[CPU_MMU_IDX(CPU)].cnum;
8870 		if (cnum != INVALID_CONTEXT) {
8871 			uint_t curcnum;
8872 			/*
8873 			 * Disable interrupts to prevent race condition
8874 			 * with sfmmu_ctx_wrap_around ctx invalidation.
8875 			 * In sun4v, ctx invalidation involves setting
8876 			 * TSB to NULL, hence, interrupts should be disabled
8877 			 * untill after sfmmu_load_mmustate is completed.
8878 			 */
8879 			pstate_save = sfmmu_disable_intrs();
8880 			curcnum = sfmmu_getctx_sec();
8881 			if (curcnum == cnum)
8882 				sfmmu_load_mmustate(sfmmup);
8883 			sfmmu_enable_intrs(pstate_save);
8884 			ASSERT(curcnum == cnum || curcnum == INVALID_CONTEXT);
8885 		}
8886 	} else {
8887 		/*
8888 		 * multi-thread
8889 		 * or when sfmmup is not the same as the curproc.
8890 		 */
8891 		sfmmu_invalidate_ctx(sfmmup);
8892 	}
8893 
8894 	kpreempt_enable();
8895 }
8896 
8897 
8898 /*
8899  * Replace the specified TSB with a new TSB.  This function gets called when
8900  * we grow, shrink or swapin a TSB.  When swapping in a TSB (TSB_SWAPIN), the
8901  * TSB_FORCEALLOC flag may be used to force allocation of a minimum-sized TSB
8902  * (8K).
8903  *
8904  * Caller must hold the HAT lock, but should assume any tsb_info
8905  * pointers it has are no longer valid after calling this function.
8906  *
8907  * Return values:
8908  *	TSB_ALLOCFAIL	Failed to allocate a TSB, due to memory constraints
8909  *	TSB_LOSTRACE	HAT is busy, i.e. another thread is already doing
8910  *			something to this tsbinfo/TSB
8911  *	TSB_SUCCESS	Operation succeeded
8912  */
8913 static tsb_replace_rc_t
8914 sfmmu_replace_tsb(sfmmu_t *sfmmup, struct tsb_info *old_tsbinfo, uint_t szc,
8915     hatlock_t *hatlockp, uint_t flags)
8916 {
8917 	struct tsb_info *new_tsbinfo = NULL;
8918 	struct tsb_info *curtsb, *prevtsb;
8919 	uint_t tte_sz_mask;
8920 	int i;
8921 
8922 	ASSERT(sfmmup != ksfmmup);
8923 	ASSERT(sfmmup->sfmmu_ismhat == 0);
8924 	ASSERT(sfmmu_hat_lock_held(sfmmup));
8925 	ASSERT(szc <= tsb_max_growsize);
8926 
8927 	if (SFMMU_FLAGS_ISSET(sfmmup, HAT_BUSY))
8928 		return (TSB_LOSTRACE);
8929 
8930 	/*
8931 	 * Find the tsb_info ahead of this one in the list, and
8932 	 * also make sure that the tsb_info passed in really
8933 	 * exists!
8934 	 */
8935 	for (prevtsb = NULL, curtsb = sfmmup->sfmmu_tsb;
8936 	    curtsb != old_tsbinfo && curtsb != NULL;
8937 	    prevtsb = curtsb, curtsb = curtsb->tsb_next);
8938 	ASSERT(curtsb != NULL);
8939 
8940 	if (!(flags & TSB_SWAPIN) && SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) {
8941 		/*
8942 		 * The process is swapped out, so just set the new size
8943 		 * code.  When it swaps back in, we'll allocate a new one
8944 		 * of the new chosen size.
8945 		 */
8946 		curtsb->tsb_szc = szc;
8947 		return (TSB_SUCCESS);
8948 	}
8949 	SFMMU_FLAGS_SET(sfmmup, HAT_BUSY);
8950 
8951 	tte_sz_mask = old_tsbinfo->tsb_ttesz_mask;
8952 
8953 	/*
8954 	 * All initialization is done inside of sfmmu_tsbinfo_alloc().
8955 	 * If we fail to allocate a TSB, exit.
8956 	 */
8957 	sfmmu_hat_exit(hatlockp);
8958 	if (sfmmu_tsbinfo_alloc(&new_tsbinfo, szc, tte_sz_mask,
8959 	    flags, sfmmup)) {
8960 		(void) sfmmu_hat_enter(sfmmup);
8961 		if (!(flags & TSB_SWAPIN))
8962 			SFMMU_STAT(sf_tsb_resize_failures);
8963 		SFMMU_FLAGS_CLEAR(sfmmup, HAT_BUSY);
8964 		return (TSB_ALLOCFAIL);
8965 	}
8966 	(void) sfmmu_hat_enter(sfmmup);
8967 
8968 	/*
8969 	 * Re-check to make sure somebody else didn't muck with us while we
8970 	 * didn't hold the HAT lock.  If the process swapped out, fine, just
8971 	 * exit; this can happen if we try to shrink the TSB from the context
8972 	 * of another process (such as on an ISM unmap), though it is rare.
8973 	 */
8974 	if (!(flags & TSB_SWAPIN) && SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) {
8975 		SFMMU_STAT(sf_tsb_resize_failures);
8976 		SFMMU_FLAGS_CLEAR(sfmmup, HAT_BUSY);
8977 		sfmmu_hat_exit(hatlockp);
8978 		sfmmu_tsbinfo_free(new_tsbinfo);
8979 		(void) sfmmu_hat_enter(sfmmup);
8980 		return (TSB_LOSTRACE);
8981 	}
8982 
8983 #ifdef	DEBUG
8984 	/* Reverify that the tsb_info still exists.. for debugging only */
8985 	for (prevtsb = NULL, curtsb = sfmmup->sfmmu_tsb;
8986 	    curtsb != old_tsbinfo && curtsb != NULL;
8987 	    prevtsb = curtsb, curtsb = curtsb->tsb_next);
8988 	ASSERT(curtsb != NULL);
8989 #endif	/* DEBUG */
8990 
8991 	/*
8992 	 * Quiesce any CPUs running this process on their next TLB miss
8993 	 * so they atomically see the new tsb_info.  We temporarily set the
8994 	 * context to invalid context so new threads that come on processor
8995 	 * after we do the xcall to cpusran will also serialize behind the
8996 	 * HAT lock on TLB miss and will see the new TSB.  Since this short
8997 	 * race with a new thread coming on processor is relatively rare,
8998 	 * this synchronization mechanism should be cheaper than always
8999 	 * pausing all CPUs for the duration of the setup, which is what
9000 	 * the old implementation did.  This is particuarly true if we are
9001 	 * copying a huge chunk of memory around during that window.
9002 	 *
9003 	 * The memory barriers are to make sure things stay consistent
9004 	 * with resume() since it does not hold the HAT lock while
9005 	 * walking the list of tsb_info structures.
9006 	 */
9007 	if ((flags & TSB_SWAPIN) != TSB_SWAPIN) {
9008 		/* The TSB is either growing or shrinking. */
9009 		sfmmu_invalidate_ctx(sfmmup);
9010 	} else {
9011 		/*
9012 		 * It is illegal to swap in TSBs from a process other
9013 		 * than a process being swapped in.  This in turn
9014 		 * implies we do not have a valid MMU context here
9015 		 * since a process needs one to resolve translation
9016 		 * misses.
9017 		 */
9018 		ASSERT(curthread->t_procp->p_as->a_hat == sfmmup);
9019 	}
9020 
9021 #ifdef DEBUG
9022 	ASSERT(max_mmu_ctxdoms > 0);
9023 
9024 	/*
9025 	 * Process should have INVALID_CONTEXT on all MMUs
9026 	 */
9027 	for (i = 0; i < max_mmu_ctxdoms; i++) {
9028 
9029 		ASSERT(sfmmup->sfmmu_ctxs[i].cnum == INVALID_CONTEXT);
9030 	}
9031 #endif
9032 
9033 	new_tsbinfo->tsb_next = old_tsbinfo->tsb_next;
9034 	membar_stst();	/* strict ordering required */
9035 	if (prevtsb)
9036 		prevtsb->tsb_next = new_tsbinfo;
9037 	else
9038 		sfmmup->sfmmu_tsb = new_tsbinfo;
9039 	membar_enter();	/* make sure new TSB globally visible */
9040 	sfmmu_setup_tsbinfo(sfmmup);
9041 
9042 	/*
9043 	 * We need to migrate TSB entries from the old TSB to the new TSB
9044 	 * if tsb_remap_ttes is set and the TSB is growing.
9045 	 */
9046 	if (tsb_remap_ttes && ((flags & TSB_GROW) == TSB_GROW))
9047 		sfmmu_copy_tsb(old_tsbinfo, new_tsbinfo);
9048 
9049 	SFMMU_FLAGS_CLEAR(sfmmup, HAT_BUSY);
9050 
9051 	/*
9052 	 * Drop the HAT lock to free our old tsb_info.
9053 	 */
9054 	sfmmu_hat_exit(hatlockp);
9055 
9056 	if ((flags & TSB_GROW) == TSB_GROW) {
9057 		SFMMU_STAT(sf_tsb_grow);
9058 	} else if ((flags & TSB_SHRINK) == TSB_SHRINK) {
9059 		SFMMU_STAT(sf_tsb_shrink);
9060 	}
9061 
9062 	sfmmu_tsbinfo_free(old_tsbinfo);
9063 
9064 	(void) sfmmu_hat_enter(sfmmup);
9065 	return (TSB_SUCCESS);
9066 }
9067 
9068 /*
9069  * This function will re-program hat pgsz array, and invalidate the
9070  * process' context, forcing the process to switch to another
9071  * context on the next TLB miss, and therefore start using the
9072  * TLB that is reprogrammed for the new page sizes.
9073  */
9074 void
9075 sfmmu_reprog_pgsz_arr(sfmmu_t *sfmmup, uint8_t *tmp_pgsz)
9076 {
9077 	int i;
9078 	hatlock_t *hatlockp = NULL;
9079 
9080 	hatlockp = sfmmu_hat_enter(sfmmup);
9081 	/* USIII+-IV+ optimization, requires hat lock */
9082 	if (tmp_pgsz) {
9083 		for (i = 0; i < mmu_page_sizes; i++)
9084 			sfmmup->sfmmu_pgsz[i] = tmp_pgsz[i];
9085 	}
9086 	SFMMU_STAT(sf_tlb_reprog_pgsz);
9087 
9088 	sfmmu_invalidate_ctx(sfmmup);
9089 
9090 	sfmmu_hat_exit(hatlockp);
9091 }
9092 
9093 /*
9094  * This function assumes that there are either four or six supported page
9095  * sizes and at most two programmable TLBs, so we need to decide which
9096  * page sizes are most important and then tell the MMU layer so it
9097  * can adjust the TLB page sizes accordingly (if supported).
9098  *
9099  * If these assumptions change, this function will need to be
9100  * updated to support whatever the new limits are.
9101  *
9102  * The growing flag is nonzero if we are growing the address space,
9103  * and zero if it is shrinking.  This allows us to decide whether
9104  * to grow or shrink our TSB, depending upon available memory
9105  * conditions.
9106  */
9107 static void
9108 sfmmu_check_page_sizes(sfmmu_t *sfmmup, int growing)
9109 {
9110 	uint64_t ttecnt[MMU_PAGE_SIZES];
9111 	uint64_t tte8k_cnt, tte4m_cnt;
9112 	uint8_t i;
9113 	int sectsb_thresh;
9114 
9115 	/*
9116 	 * Kernel threads, processes with small address spaces not using
9117 	 * large pages, and dummy ISM HATs need not apply.
9118 	 */
9119 	if (sfmmup == ksfmmup || sfmmup->sfmmu_ismhat != NULL)
9120 		return;
9121 
9122 	if ((sfmmup->sfmmu_flags & HAT_LGPG_FLAGS) == 0 &&
9123 	    sfmmup->sfmmu_ttecnt[TTE8K] <= tsb_rss_factor)
9124 		return;
9125 
9126 	for (i = 0; i < mmu_page_sizes; i++) {
9127 		ttecnt[i] = SFMMU_TTE_CNT(sfmmup, i);
9128 	}
9129 
9130 	/* Check pagesizes in use, and possibly reprogram DTLB. */
9131 	if (&mmu_check_page_sizes)
9132 		mmu_check_page_sizes(sfmmup, ttecnt);
9133 
9134 	/*
9135 	 * Calculate the number of 8k ttes to represent the span of these
9136 	 * pages.
9137 	 */
9138 	tte8k_cnt = ttecnt[TTE8K] +
9139 	    (ttecnt[TTE64K] << (MMU_PAGESHIFT64K - MMU_PAGESHIFT)) +
9140 	    (ttecnt[TTE512K] << (MMU_PAGESHIFT512K - MMU_PAGESHIFT));
9141 	if (mmu_page_sizes == max_mmu_page_sizes) {
9142 		tte4m_cnt = ttecnt[TTE4M] +
9143 		    (ttecnt[TTE32M] << (MMU_PAGESHIFT32M - MMU_PAGESHIFT4M)) +
9144 		    (ttecnt[TTE256M] << (MMU_PAGESHIFT256M - MMU_PAGESHIFT4M));
9145 	} else {
9146 		tte4m_cnt = ttecnt[TTE4M];
9147 	}
9148 
9149 	/*
9150 	 * Inflate TSB sizes by a factor of 2 if this process
9151 	 * uses 4M text pages to minimize extra conflict misses
9152 	 * in the first TSB since without counting text pages
9153 	 * 8K TSB may become too small.
9154 	 *
9155 	 * Also double the size of the second TSB to minimize
9156 	 * extra conflict misses due to competition between 4M text pages
9157 	 * and data pages.
9158 	 *
9159 	 * We need to adjust the second TSB allocation threshold by the
9160 	 * inflation factor, since there is no point in creating a second
9161 	 * TSB when we know all the mappings can fit in the I/D TLBs.
9162 	 */
9163 	sectsb_thresh = tsb_sectsb_threshold;
9164 	if (sfmmup->sfmmu_flags & HAT_4MTEXT_FLAG) {
9165 		tte8k_cnt <<= 1;
9166 		tte4m_cnt <<= 1;
9167 		sectsb_thresh <<= 1;
9168 	}
9169 
9170 	/*
9171 	 * Check to see if our TSB is the right size; we may need to
9172 	 * grow or shrink it.  If the process is small, our work is
9173 	 * finished at this point.
9174 	 */
9175 	if (tte8k_cnt <= tsb_rss_factor && tte4m_cnt <= sectsb_thresh) {
9176 		return;
9177 	}
9178 	sfmmu_size_tsb(sfmmup, growing, tte8k_cnt, tte4m_cnt, sectsb_thresh);
9179 }
9180 
9181 static void
9182 sfmmu_size_tsb(sfmmu_t *sfmmup, int growing, uint64_t tte8k_cnt,
9183 	uint64_t tte4m_cnt, int sectsb_thresh)
9184 {
9185 	int tsb_bits;
9186 	uint_t tsb_szc;
9187 	struct tsb_info *tsbinfop;
9188 	hatlock_t *hatlockp = NULL;
9189 
9190 	hatlockp = sfmmu_hat_enter(sfmmup);
9191 	ASSERT(hatlockp != NULL);
9192 	tsbinfop = sfmmup->sfmmu_tsb;
9193 	ASSERT(tsbinfop != NULL);
9194 
9195 	/*
9196 	 * If we're growing, select the size based on RSS.  If we're
9197 	 * shrinking, leave some room so we don't have to turn around and
9198 	 * grow again immediately.
9199 	 */
9200 	if (growing)
9201 		tsb_szc = SELECT_TSB_SIZECODE(tte8k_cnt);
9202 	else
9203 		tsb_szc = SELECT_TSB_SIZECODE(tte8k_cnt << 1);
9204 
9205 	if (!growing && (tsb_szc < tsbinfop->tsb_szc) &&
9206 	    (tsb_szc >= default_tsb_size) && TSB_OK_SHRINK()) {
9207 		(void) sfmmu_replace_tsb(sfmmup, tsbinfop, tsb_szc,
9208 		    hatlockp, TSB_SHRINK);
9209 	} else if (growing && tsb_szc > tsbinfop->tsb_szc && TSB_OK_GROW()) {
9210 		(void) sfmmu_replace_tsb(sfmmup, tsbinfop, tsb_szc,
9211 		    hatlockp, TSB_GROW);
9212 	}
9213 	tsbinfop = sfmmup->sfmmu_tsb;
9214 
9215 	/*
9216 	 * With the TLB and first TSB out of the way, we need to see if
9217 	 * we need a second TSB for 4M pages.  If we managed to reprogram
9218 	 * the TLB page sizes above, the process will start using this new
9219 	 * TSB right away; otherwise, it will start using it on the next
9220 	 * context switch.  Either way, it's no big deal so there's no
9221 	 * synchronization with the trap handlers here unless we grow the
9222 	 * TSB (in which case it's required to prevent using the old one
9223 	 * after it's freed). Note: second tsb is required for 32M/256M
9224 	 * page sizes.
9225 	 */
9226 	if (tte4m_cnt > sectsb_thresh) {
9227 		/*
9228 		 * If we're growing, select the size based on RSS.  If we're
9229 		 * shrinking, leave some room so we don't have to turn
9230 		 * around and grow again immediately.
9231 		 */
9232 		if (growing)
9233 			tsb_szc = SELECT_TSB_SIZECODE(tte4m_cnt);
9234 		else
9235 			tsb_szc = SELECT_TSB_SIZECODE(tte4m_cnt << 1);
9236 		if (tsbinfop->tsb_next == NULL) {
9237 			struct tsb_info *newtsb;
9238 			int allocflags = SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)?
9239 			    0 : TSB_ALLOC;
9240 
9241 			sfmmu_hat_exit(hatlockp);
9242 
9243 			/*
9244 			 * Try to allocate a TSB for 4[32|256]M pages.  If we
9245 			 * can't get the size we want, retry w/a minimum sized
9246 			 * TSB.  If that still didn't work, give up; we can
9247 			 * still run without one.
9248 			 */
9249 			tsb_bits = (mmu_page_sizes == max_mmu_page_sizes)?
9250 			    TSB4M|TSB32M|TSB256M:TSB4M;
9251 			if ((sfmmu_tsbinfo_alloc(&newtsb, tsb_szc, tsb_bits,
9252 			    allocflags, sfmmup) != 0) &&
9253 			    (sfmmu_tsbinfo_alloc(&newtsb, TSB_MIN_SZCODE,
9254 			    tsb_bits, allocflags, sfmmup) != 0)) {
9255 				return;
9256 			}
9257 
9258 			hatlockp = sfmmu_hat_enter(sfmmup);
9259 
9260 			if (sfmmup->sfmmu_tsb->tsb_next == NULL) {
9261 				sfmmup->sfmmu_tsb->tsb_next = newtsb;
9262 				SFMMU_STAT(sf_tsb_sectsb_create);
9263 				sfmmu_setup_tsbinfo(sfmmup);
9264 				sfmmu_hat_exit(hatlockp);
9265 				return;
9266 			} else {
9267 				/*
9268 				 * It's annoying, but possible for us
9269 				 * to get here.. we dropped the HAT lock
9270 				 * because of locking order in the kmem
9271 				 * allocator, and while we were off getting
9272 				 * our memory, some other thread decided to
9273 				 * do us a favor and won the race to get a
9274 				 * second TSB for this process.  Sigh.
9275 				 */
9276 				sfmmu_hat_exit(hatlockp);
9277 				sfmmu_tsbinfo_free(newtsb);
9278 				return;
9279 			}
9280 		}
9281 
9282 		/*
9283 		 * We have a second TSB, see if it's big enough.
9284 		 */
9285 		tsbinfop = tsbinfop->tsb_next;
9286 
9287 		/*
9288 		 * Check to see if our second TSB is the right size;
9289 		 * we may need to grow or shrink it.
9290 		 * To prevent thrashing (e.g. growing the TSB on a
9291 		 * subsequent map operation), only try to shrink if
9292 		 * the TSB reach exceeds twice the virtual address
9293 		 * space size.
9294 		 */
9295 		if (!growing && (tsb_szc < tsbinfop->tsb_szc) &&
9296 		    (tsb_szc >= default_tsb_size) && TSB_OK_SHRINK()) {
9297 			(void) sfmmu_replace_tsb(sfmmup, tsbinfop,
9298 			    tsb_szc, hatlockp, TSB_SHRINK);
9299 		} else if (growing && tsb_szc > tsbinfop->tsb_szc &&
9300 		    TSB_OK_GROW()) {
9301 			(void) sfmmu_replace_tsb(sfmmup, tsbinfop,
9302 			    tsb_szc, hatlockp, TSB_GROW);
9303 		}
9304 	}
9305 
9306 	sfmmu_hat_exit(hatlockp);
9307 }
9308 
9309 /*
9310  * Get the preferred page size code for a hat.
9311  * This is only advice, so locking is not done;
9312  * this transitory information could change
9313  * following the call anyway.  This interface is
9314  * sun4 private.
9315  */
9316 /*ARGSUSED*/
9317 uint_t
9318 hat_preferred_pgsz(struct hat *hat, caddr_t vaddr, size_t maplen, int maptype)
9319 {
9320 	sfmmu_t *sfmmup = (sfmmu_t *)hat;
9321 	uint_t szc, maxszc = mmu_page_sizes - 1;
9322 	size_t pgsz;
9323 
9324 	if (maptype == MAPPGSZ_ISM) {
9325 		for (szc = maxszc; szc >= TTE4M; szc--) {
9326 			if (disable_ism_large_pages & (1 << szc))
9327 				continue;
9328 
9329 			pgsz = hw_page_array[szc].hp_size;
9330 			if ((maplen >= pgsz) && IS_P2ALIGNED(vaddr, pgsz))
9331 				return (szc);
9332 		}
9333 		return (TTE4M);
9334 	} else if (&mmu_preferred_pgsz) { /* USIII+-USIV+ */
9335 		return (mmu_preferred_pgsz(sfmmup, vaddr, maplen));
9336 	} else {	/* USIII, USII, Niagara */
9337 		for (szc = maxszc; szc > TTE8K; szc--) {
9338 			if (disable_large_pages & (1 << szc))
9339 				continue;
9340 
9341 			pgsz = hw_page_array[szc].hp_size;
9342 			if ((maplen >= pgsz) && IS_P2ALIGNED(vaddr, pgsz))
9343 				return (szc);
9344 		}
9345 		return (TTE8K);
9346 	}
9347 }
9348 
9349 /*
9350  * Free up a sfmmu
9351  * Since the sfmmu is currently embedded in the hat struct we simply zero
9352  * out our fields and free up the ism map blk list if any.
9353  */
9354 static void
9355 sfmmu_free_sfmmu(sfmmu_t *sfmmup)
9356 {
9357 	ism_blk_t	*blkp, *nx_blkp;
9358 #ifdef	DEBUG
9359 	ism_map_t	*map;
9360 	int 		i;
9361 #endif
9362 
9363 	ASSERT(sfmmup->sfmmu_ttecnt[TTE8K] == 0);
9364 	ASSERT(sfmmup->sfmmu_ttecnt[TTE64K] == 0);
9365 	ASSERT(sfmmup->sfmmu_ttecnt[TTE512K] == 0);
9366 	ASSERT(sfmmup->sfmmu_ttecnt[TTE4M] == 0);
9367 	ASSERT(sfmmup->sfmmu_ttecnt[TTE32M] == 0);
9368 	ASSERT(sfmmup->sfmmu_ttecnt[TTE256M] == 0);
9369 
9370 	sfmmup->sfmmu_free = 0;
9371 	sfmmup->sfmmu_ismhat = 0;
9372 
9373 	blkp = sfmmup->sfmmu_iblk;
9374 	sfmmup->sfmmu_iblk = NULL;
9375 
9376 	while (blkp) {
9377 #ifdef	DEBUG
9378 		map = blkp->iblk_maps;
9379 		for (i = 0; i < ISM_MAP_SLOTS; i++) {
9380 			ASSERT(map[i].imap_seg == 0);
9381 			ASSERT(map[i].imap_ismhat == NULL);
9382 			ASSERT(map[i].imap_ment == NULL);
9383 		}
9384 #endif
9385 		nx_blkp = blkp->iblk_next;
9386 		blkp->iblk_next = NULL;
9387 		blkp->iblk_nextpa = (uint64_t)-1;
9388 		kmem_cache_free(ism_blk_cache, blkp);
9389 		blkp = nx_blkp;
9390 	}
9391 }
9392 
9393 /*
9394  * Locking primitves accessed by HATLOCK macros
9395  */
9396 
9397 #define	SFMMU_SPL_MTX	(0x0)
9398 #define	SFMMU_ML_MTX	(0x1)
9399 
9400 #define	SFMMU_MLSPL_MTX(type, pg)	(((type) == SFMMU_SPL_MTX) ? \
9401 					    SPL_HASH(pg) : MLIST_HASH(pg))
9402 
9403 kmutex_t *
9404 sfmmu_page_enter(struct page *pp)
9405 {
9406 	return (sfmmu_mlspl_enter(pp, SFMMU_SPL_MTX));
9407 }
9408 
9409 static void
9410 sfmmu_page_exit(kmutex_t *spl)
9411 {
9412 	mutex_exit(spl);
9413 }
9414 
9415 static int
9416 sfmmu_page_spl_held(struct page *pp)
9417 {
9418 	return (sfmmu_mlspl_held(pp, SFMMU_SPL_MTX));
9419 }
9420 
9421 kmutex_t *
9422 sfmmu_mlist_enter(struct page *pp)
9423 {
9424 	return (sfmmu_mlspl_enter(pp, SFMMU_ML_MTX));
9425 }
9426 
9427 void
9428 sfmmu_mlist_exit(kmutex_t *mml)
9429 {
9430 	mutex_exit(mml);
9431 }
9432 
9433 int
9434 sfmmu_mlist_held(struct page *pp)
9435 {
9436 
9437 	return (sfmmu_mlspl_held(pp, SFMMU_ML_MTX));
9438 }
9439 
9440 /*
9441  * Common code for sfmmu_mlist_enter() and sfmmu_page_enter().  For
9442  * sfmmu_mlist_enter() case mml_table lock array is used and for
9443  * sfmmu_page_enter() sfmmu_page_lock lock array is used.
9444  *
9445  * The lock is taken on a root page so that it protects an operation on all
9446  * constituent pages of a large page pp belongs to.
9447  *
9448  * The routine takes a lock from the appropriate array. The lock is determined
9449  * by hashing the root page. After taking the lock this routine checks if the
9450  * root page has the same size code that was used to determine the root (i.e
9451  * that root hasn't changed).  If root page has the expected p_szc field we
9452  * have the right lock and it's returned to the caller. If root's p_szc
9453  * decreased we release the lock and retry from the beginning.  This case can
9454  * happen due to hat_page_demote() decreasing p_szc between our load of p_szc
9455  * value and taking the lock. The number of retries due to p_szc decrease is
9456  * limited by the maximum p_szc value. If p_szc is 0 we return the lock
9457  * determined by hashing pp itself.
9458  *
9459  * If our caller doesn't hold a SE_SHARED or SE_EXCL lock on pp it's also
9460  * possible that p_szc can increase. To increase p_szc a thread has to lock
9461  * all constituent pages EXCL and do hat_pageunload() on all of them. All the
9462  * callers that don't hold a page locked recheck if hmeblk through which pp
9463  * was found still maps this pp.  If it doesn't map it anymore returned lock
9464  * is immediately dropped. Therefore if sfmmu_mlspl_enter() hits the case of
9465  * p_szc increase after taking the lock it returns this lock without further
9466  * retries because in this case the caller doesn't care about which lock was
9467  * taken. The caller will drop it right away.
9468  *
9469  * After the routine returns it's guaranteed that hat_page_demote() can't
9470  * change p_szc field of any of constituent pages of a large page pp belongs
9471  * to as long as pp was either locked at least SHARED prior to this call or
9472  * the caller finds that hment that pointed to this pp still references this
9473  * pp (this also assumes that the caller holds hme hash bucket lock so that
9474  * the same pp can't be remapped into the same hmeblk after it was unmapped by
9475  * hat_pageunload()).
9476  */
9477 static kmutex_t *
9478 sfmmu_mlspl_enter(struct page *pp, int type)
9479 {
9480 	kmutex_t	*mtx;
9481 	uint_t		prev_rszc = UINT_MAX;
9482 	page_t		*rootpp;
9483 	uint_t		szc;
9484 	uint_t		rszc;
9485 	uint_t		pszc = pp->p_szc;
9486 
9487 	ASSERT(pp != NULL);
9488 
9489 again:
9490 	if (pszc == 0) {
9491 		mtx = SFMMU_MLSPL_MTX(type, pp);
9492 		mutex_enter(mtx);
9493 		return (mtx);
9494 	}
9495 
9496 	/* The lock lives in the root page */
9497 	rootpp = PP_GROUPLEADER(pp, pszc);
9498 	mtx = SFMMU_MLSPL_MTX(type, rootpp);
9499 	mutex_enter(mtx);
9500 
9501 	/*
9502 	 * Return mml in the following 3 cases:
9503 	 *
9504 	 * 1) If pp itself is root since if its p_szc decreased before we took
9505 	 * the lock pp is still the root of smaller szc page. And if its p_szc
9506 	 * increased it doesn't matter what lock we return (see comment in
9507 	 * front of this routine).
9508 	 *
9509 	 * 2) If pp's not root but rootpp is the root of a rootpp->p_szc size
9510 	 * large page we have the right lock since any previous potential
9511 	 * hat_page_demote() is done demoting from greater than current root's
9512 	 * p_szc because hat_page_demote() changes root's p_szc last. No
9513 	 * further hat_page_demote() can start or be in progress since it
9514 	 * would need the same lock we currently hold.
9515 	 *
9516 	 * 3) If rootpp's p_szc increased since previous iteration it doesn't
9517 	 * matter what lock we return (see comment in front of this routine).
9518 	 */
9519 	if (pp == rootpp || (rszc = rootpp->p_szc) == pszc ||
9520 	    rszc >= prev_rszc) {
9521 		return (mtx);
9522 	}
9523 
9524 	/*
9525 	 * hat_page_demote() could have decreased root's p_szc.
9526 	 * In this case pp's p_szc must also be smaller than pszc.
9527 	 * Retry.
9528 	 */
9529 	if (rszc < pszc) {
9530 		szc = pp->p_szc;
9531 		if (szc < pszc) {
9532 			mutex_exit(mtx);
9533 			pszc = szc;
9534 			goto again;
9535 		}
9536 		/*
9537 		 * pp's p_szc increased after it was decreased.
9538 		 * page cannot be mapped. Return current lock. The caller
9539 		 * will drop it right away.
9540 		 */
9541 		return (mtx);
9542 	}
9543 
9544 	/*
9545 	 * root's p_szc is greater than pp's p_szc.
9546 	 * hat_page_demote() is not done with all pages
9547 	 * yet. Wait for it to complete.
9548 	 */
9549 	mutex_exit(mtx);
9550 	rootpp = PP_GROUPLEADER(rootpp, rszc);
9551 	mtx = SFMMU_MLSPL_MTX(type, rootpp);
9552 	mutex_enter(mtx);
9553 	mutex_exit(mtx);
9554 	prev_rszc = rszc;
9555 	goto again;
9556 }
9557 
9558 static int
9559 sfmmu_mlspl_held(struct page *pp, int type)
9560 {
9561 	kmutex_t	*mtx;
9562 
9563 	ASSERT(pp != NULL);
9564 	/* The lock lives in the root page */
9565 	pp = PP_PAGEROOT(pp);
9566 	ASSERT(pp != NULL);
9567 
9568 	mtx = SFMMU_MLSPL_MTX(type, pp);
9569 	return (MUTEX_HELD(mtx));
9570 }
9571 
9572 static uint_t
9573 sfmmu_get_free_hblk(struct hme_blk **hmeblkpp, uint_t critical)
9574 {
9575 	struct  hme_blk *hblkp;
9576 
9577 	if (freehblkp != NULL) {
9578 		mutex_enter(&freehblkp_lock);
9579 		if (freehblkp != NULL) {
9580 			/*
9581 			 * If the current thread is owning hblk_reserve,
9582 			 * let it succede even if freehblkcnt is really low.
9583 			 */
9584 			if (freehblkcnt <= HBLK_RESERVE_MIN && !critical) {
9585 				SFMMU_STAT(sf_get_free_throttle);
9586 				mutex_exit(&freehblkp_lock);
9587 				return (0);
9588 			}
9589 			freehblkcnt--;
9590 			*hmeblkpp = freehblkp;
9591 			hblkp = *hmeblkpp;
9592 			freehblkp = hblkp->hblk_next;
9593 			mutex_exit(&freehblkp_lock);
9594 			hblkp->hblk_next = NULL;
9595 			SFMMU_STAT(sf_get_free_success);
9596 			return (1);
9597 		}
9598 		mutex_exit(&freehblkp_lock);
9599 	}
9600 	SFMMU_STAT(sf_get_free_fail);
9601 	return (0);
9602 }
9603 
9604 static uint_t
9605 sfmmu_put_free_hblk(struct hme_blk *hmeblkp, uint_t critical)
9606 {
9607 	struct  hme_blk *hblkp;
9608 
9609 	/*
9610 	 * If the current thread is mapping into kernel space,
9611 	 * let it succede even if freehblkcnt is max
9612 	 * so that it will avoid freeing it to kmem.
9613 	 * This will prevent stack overflow due to
9614 	 * possible recursion since kmem_cache_free()
9615 	 * might require creation of a slab which
9616 	 * in turn needs an hmeblk to map that slab;
9617 	 * let's break this vicious chain at the first
9618 	 * opportunity.
9619 	 */
9620 	if (freehblkcnt < HBLK_RESERVE_CNT || critical) {
9621 		mutex_enter(&freehblkp_lock);
9622 		if (freehblkcnt < HBLK_RESERVE_CNT || critical) {
9623 			SFMMU_STAT(sf_put_free_success);
9624 			freehblkcnt++;
9625 			hmeblkp->hblk_next = freehblkp;
9626 			freehblkp = hmeblkp;
9627 			mutex_exit(&freehblkp_lock);
9628 			return (1);
9629 		}
9630 		mutex_exit(&freehblkp_lock);
9631 	}
9632 
9633 	/*
9634 	 * Bring down freehblkcnt to HBLK_RESERVE_CNT. We are here
9635 	 * only if freehblkcnt is at least HBLK_RESERVE_CNT *and*
9636 	 * we are not in the process of mapping into kernel space.
9637 	 */
9638 	ASSERT(!critical);
9639 	while (freehblkcnt > HBLK_RESERVE_CNT) {
9640 		mutex_enter(&freehblkp_lock);
9641 		if (freehblkcnt > HBLK_RESERVE_CNT) {
9642 			freehblkcnt--;
9643 			hblkp = freehblkp;
9644 			freehblkp = hblkp->hblk_next;
9645 			mutex_exit(&freehblkp_lock);
9646 			ASSERT(get_hblk_cache(hblkp) == sfmmu8_cache);
9647 			kmem_cache_free(sfmmu8_cache, hblkp);
9648 			continue;
9649 		}
9650 		mutex_exit(&freehblkp_lock);
9651 	}
9652 	SFMMU_STAT(sf_put_free_fail);
9653 	return (0);
9654 }
9655 
9656 static void
9657 sfmmu_hblk_swap(struct hme_blk *new)
9658 {
9659 	struct hme_blk *old, *hblkp, *prev;
9660 	uint64_t hblkpa, prevpa, newpa;
9661 	caddr_t	base, vaddr, endaddr;
9662 	struct hmehash_bucket *hmebp;
9663 	struct sf_hment *osfhme, *nsfhme;
9664 	page_t *pp;
9665 	kmutex_t *pml;
9666 	tte_t tte;
9667 
9668 #ifdef	DEBUG
9669 	hmeblk_tag		hblktag;
9670 	struct hme_blk		*found;
9671 #endif
9672 	old = HBLK_RESERVE;
9673 
9674 	/*
9675 	 * save pa before bcopy clobbers it
9676 	 */
9677 	newpa = new->hblk_nextpa;
9678 
9679 	base = (caddr_t)get_hblk_base(old);
9680 	endaddr = base + get_hblk_span(old);
9681 
9682 	/*
9683 	 * acquire hash bucket lock.
9684 	 */
9685 	hmebp = sfmmu_tteload_acquire_hashbucket(ksfmmup, base, TTE8K);
9686 
9687 	/*
9688 	 * copy contents from old to new
9689 	 */
9690 	bcopy((void *)old, (void *)new, HME8BLK_SZ);
9691 
9692 	/*
9693 	 * add new to hash chain
9694 	 */
9695 	sfmmu_hblk_hash_add(hmebp, new, newpa);
9696 
9697 	/*
9698 	 * search hash chain for hblk_reserve; this needs to be performed
9699 	 * after adding new, otherwise prevpa and prev won't correspond
9700 	 * to the hblk which is prior to old in hash chain when we call
9701 	 * sfmmu_hblk_hash_rm to remove old later.
9702 	 */
9703 	for (prevpa = 0, prev = NULL,
9704 	    hblkpa = hmebp->hmeh_nextpa, hblkp = hmebp->hmeblkp;
9705 	    hblkp != NULL && hblkp != old;
9706 	    prevpa = hblkpa, prev = hblkp,
9707 	    hblkpa = hblkp->hblk_nextpa, hblkp = hblkp->hblk_next);
9708 
9709 	if (hblkp != old)
9710 		panic("sfmmu_hblk_swap: hblk_reserve not found");
9711 
9712 	/*
9713 	 * p_mapping list is still pointing to hments in hblk_reserve;
9714 	 * fix up p_mapping list so that they point to hments in new.
9715 	 *
9716 	 * Since all these mappings are created by hblk_reserve_thread
9717 	 * on the way and it's using at least one of the buffers from each of
9718 	 * the newly minted slabs, there is no danger of any of these
9719 	 * mappings getting unloaded by another thread.
9720 	 *
9721 	 * tsbmiss could only modify ref/mod bits of hments in old/new.
9722 	 * Since all of these hments hold mappings established by segkmem
9723 	 * and mappings in segkmem are setup with HAT_NOSYNC, ref/mod bits
9724 	 * have no meaning for the mappings in hblk_reserve.  hments in
9725 	 * old and new are identical except for ref/mod bits.
9726 	 */
9727 	for (vaddr = base; vaddr < endaddr; vaddr += TTEBYTES(TTE8K)) {
9728 
9729 		HBLKTOHME(osfhme, old, vaddr);
9730 		sfmmu_copytte(&osfhme->hme_tte, &tte);
9731 
9732 		if (TTE_IS_VALID(&tte)) {
9733 			if ((pp = osfhme->hme_page) == NULL)
9734 				panic("sfmmu_hblk_swap: page not mapped");
9735 
9736 			pml = sfmmu_mlist_enter(pp);
9737 
9738 			if (pp != osfhme->hme_page)
9739 				panic("sfmmu_hblk_swap: mapping changed");
9740 
9741 			HBLKTOHME(nsfhme, new, vaddr);
9742 
9743 			HME_ADD(nsfhme, pp);
9744 			HME_SUB(osfhme, pp);
9745 
9746 			sfmmu_mlist_exit(pml);
9747 		}
9748 	}
9749 
9750 	/*
9751 	 * remove old from hash chain
9752 	 */
9753 	sfmmu_hblk_hash_rm(hmebp, old, prevpa, prev);
9754 
9755 #ifdef	DEBUG
9756 
9757 	hblktag.htag_id = ksfmmup;
9758 	hblktag.htag_bspage = HME_HASH_BSPAGE(base, HME_HASH_SHIFT(TTE8K));
9759 	hblktag.htag_rehash = HME_HASH_REHASH(TTE8K);
9760 	HME_HASH_FAST_SEARCH(hmebp, hblktag, found);
9761 
9762 	if (found != new)
9763 		panic("sfmmu_hblk_swap: new hblk not found");
9764 #endif
9765 
9766 	SFMMU_HASH_UNLOCK(hmebp);
9767 
9768 	/*
9769 	 * Reset hblk_reserve
9770 	 */
9771 	bzero((void *)old, HME8BLK_SZ);
9772 	old->hblk_nextpa = va_to_pa((caddr_t)old);
9773 }
9774 
9775 /*
9776  * Grab the mlist mutex for both pages passed in.
9777  *
9778  * low and high will be returned as pointers to the mutexes for these pages.
9779  * low refers to the mutex residing in the lower bin of the mlist hash, while
9780  * high refers to the mutex residing in the higher bin of the mlist hash.  This
9781  * is due to the locking order restrictions on the same thread grabbing
9782  * multiple mlist mutexes.  The low lock must be acquired before the high lock.
9783  *
9784  * If both pages hash to the same mutex, only grab that single mutex, and
9785  * high will be returned as NULL
9786  * If the pages hash to different bins in the hash, grab the lower addressed
9787  * lock first and then the higher addressed lock in order to follow the locking
9788  * rules involved with the same thread grabbing multiple mlist mutexes.
9789  * low and high will both have non-NULL values.
9790  */
9791 static void
9792 sfmmu_mlist_reloc_enter(struct page *targ, struct page *repl,
9793     kmutex_t **low, kmutex_t **high)
9794 {
9795 	kmutex_t	*mml_targ, *mml_repl;
9796 
9797 	/*
9798 	 * no need to do the dance around szc as in sfmmu_mlist_enter()
9799 	 * because this routine is only called by hat_page_relocate() and all
9800 	 * targ and repl pages are already locked EXCL so szc can't change.
9801 	 */
9802 
9803 	mml_targ = MLIST_HASH(PP_PAGEROOT(targ));
9804 	mml_repl = MLIST_HASH(PP_PAGEROOT(repl));
9805 
9806 	if (mml_targ == mml_repl) {
9807 		*low = mml_targ;
9808 		*high = NULL;
9809 	} else {
9810 		if (mml_targ < mml_repl) {
9811 			*low = mml_targ;
9812 			*high = mml_repl;
9813 		} else {
9814 			*low = mml_repl;
9815 			*high = mml_targ;
9816 		}
9817 	}
9818 
9819 	mutex_enter(*low);
9820 	if (*high)
9821 		mutex_enter(*high);
9822 }
9823 
9824 static void
9825 sfmmu_mlist_reloc_exit(kmutex_t *low, kmutex_t *high)
9826 {
9827 	if (high)
9828 		mutex_exit(high);
9829 	mutex_exit(low);
9830 }
9831 
9832 static hatlock_t *
9833 sfmmu_hat_enter(sfmmu_t *sfmmup)
9834 {
9835 	hatlock_t	*hatlockp;
9836 
9837 	if (sfmmup != ksfmmup) {
9838 		hatlockp = TSB_HASH(sfmmup);
9839 		mutex_enter(HATLOCK_MUTEXP(hatlockp));
9840 		return (hatlockp);
9841 	}
9842 	return (NULL);
9843 }
9844 
9845 static hatlock_t *
9846 sfmmu_hat_tryenter(sfmmu_t *sfmmup)
9847 {
9848 	hatlock_t	*hatlockp;
9849 
9850 	if (sfmmup != ksfmmup) {
9851 		hatlockp = TSB_HASH(sfmmup);
9852 		if (mutex_tryenter(HATLOCK_MUTEXP(hatlockp)) == 0)
9853 			return (NULL);
9854 		return (hatlockp);
9855 	}
9856 	return (NULL);
9857 }
9858 
9859 static void
9860 sfmmu_hat_exit(hatlock_t *hatlockp)
9861 {
9862 	if (hatlockp != NULL)
9863 		mutex_exit(HATLOCK_MUTEXP(hatlockp));
9864 }
9865 
9866 static void
9867 sfmmu_hat_lock_all(void)
9868 {
9869 	int i;
9870 	for (i = 0; i < SFMMU_NUM_LOCK; i++)
9871 		mutex_enter(HATLOCK_MUTEXP(&hat_lock[i]));
9872 }
9873 
9874 static void
9875 sfmmu_hat_unlock_all(void)
9876 {
9877 	int i;
9878 	for (i = SFMMU_NUM_LOCK - 1; i >= 0; i--)
9879 		mutex_exit(HATLOCK_MUTEXP(&hat_lock[i]));
9880 }
9881 
9882 int
9883 sfmmu_hat_lock_held(sfmmu_t *sfmmup)
9884 {
9885 	ASSERT(sfmmup != ksfmmup);
9886 	return (MUTEX_HELD(HATLOCK_MUTEXP(TSB_HASH(sfmmup))));
9887 }
9888 
9889 /*
9890  * Locking primitives to provide consistency between ISM unmap
9891  * and other operations.  Since ISM unmap can take a long time, we
9892  * use HAT_ISMBUSY flag (protected by the hatlock) to avoid creating
9893  * contention on the hatlock buckets while ISM segments are being
9894  * unmapped.  The tradeoff is that the flags don't prevent priority
9895  * inversion from occurring, so we must request kernel priority in
9896  * case we have to sleep to keep from getting buried while holding
9897  * the HAT_ISMBUSY flag set, which in turn could block other kernel
9898  * threads from running (for example, in sfmmu_uvatopfn()).
9899  */
9900 static void
9901 sfmmu_ismhat_enter(sfmmu_t *sfmmup, int hatlock_held)
9902 {
9903 	hatlock_t *hatlockp;
9904 
9905 	THREAD_KPRI_REQUEST();
9906 	if (!hatlock_held)
9907 		hatlockp = sfmmu_hat_enter(sfmmup);
9908 	while (SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY))
9909 		cv_wait(&sfmmup->sfmmu_tsb_cv, HATLOCK_MUTEXP(hatlockp));
9910 	SFMMU_FLAGS_SET(sfmmup, HAT_ISMBUSY);
9911 	if (!hatlock_held)
9912 		sfmmu_hat_exit(hatlockp);
9913 }
9914 
9915 static void
9916 sfmmu_ismhat_exit(sfmmu_t *sfmmup, int hatlock_held)
9917 {
9918 	hatlock_t *hatlockp;
9919 
9920 	if (!hatlock_held)
9921 		hatlockp = sfmmu_hat_enter(sfmmup);
9922 	ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY));
9923 	SFMMU_FLAGS_CLEAR(sfmmup, HAT_ISMBUSY);
9924 	cv_broadcast(&sfmmup->sfmmu_tsb_cv);
9925 	if (!hatlock_held)
9926 		sfmmu_hat_exit(hatlockp);
9927 	THREAD_KPRI_RELEASE();
9928 }
9929 
9930 /*
9931  *
9932  * Algorithm:
9933  *
9934  * (1) if segkmem is not ready, allocate hblk from an array of pre-alloc'ed
9935  *	hblks.
9936  *
9937  * (2) if we are allocating an hblk for mapping a slab in sfmmu_cache,
9938  *
9939  * 		(a) try to return an hblk from reserve pool of free hblks;
9940  *		(b) if the reserve pool is empty, acquire hblk_reserve_lock
9941  *		    and return hblk_reserve.
9942  *
9943  * (3) call kmem_cache_alloc() to allocate hblk;
9944  *
9945  *		(a) if hblk_reserve_lock is held by the current thread,
9946  *		    atomically replace hblk_reserve by the hblk that is
9947  *		    returned by kmem_cache_alloc; release hblk_reserve_lock
9948  *		    and call kmem_cache_alloc() again.
9949  *		(b) if reserve pool is not full, add the hblk that is
9950  *		    returned by kmem_cache_alloc to reserve pool and
9951  *		    call kmem_cache_alloc again.
9952  *
9953  */
9954 static struct hme_blk *
9955 sfmmu_hblk_alloc(sfmmu_t *sfmmup, caddr_t vaddr,
9956 	struct hmehash_bucket *hmebp, uint_t size, hmeblk_tag hblktag,
9957 	uint_t flags)
9958 {
9959 	struct hme_blk *hmeblkp = NULL;
9960 	struct hme_blk *newhblkp;
9961 	struct hme_blk *shw_hblkp = NULL;
9962 	struct kmem_cache *sfmmu_cache = NULL;
9963 	uint64_t hblkpa;
9964 	ulong_t index;
9965 	uint_t owner;		/* set to 1 if using hblk_reserve */
9966 	uint_t forcefree;
9967 	int sleep;
9968 
9969 	ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp));
9970 
9971 	/*
9972 	 * If segkmem is not created yet, allocate from static hmeblks
9973 	 * created at the end of startup_modules().  See the block comment
9974 	 * in startup_modules() describing how we estimate the number of
9975 	 * static hmeblks that will be needed during re-map.
9976 	 */
9977 	if (!hblk_alloc_dynamic) {
9978 
9979 		if (size == TTE8K) {
9980 			index = nucleus_hblk8.index;
9981 			if (index >= nucleus_hblk8.len) {
9982 				/*
9983 				 * If we panic here, see startup_modules() to
9984 				 * make sure that we are calculating the
9985 				 * number of hblk8's that we need correctly.
9986 				 */
9987 				panic("no nucleus hblk8 to allocate");
9988 			}
9989 			hmeblkp =
9990 			    (struct hme_blk *)&nucleus_hblk8.list[index];
9991 			nucleus_hblk8.index++;
9992 			SFMMU_STAT(sf_hblk8_nalloc);
9993 		} else {
9994 			index = nucleus_hblk1.index;
9995 			if (nucleus_hblk1.index >= nucleus_hblk1.len) {
9996 				/*
9997 				 * If we panic here, see startup_modules()
9998 				 * and H8TOH1; most likely you need to
9999 				 * update the calculation of the number
10000 				 * of hblk1's the kernel needs to boot.
10001 				 */
10002 				panic("no nucleus hblk1 to allocate");
10003 			}
10004 			hmeblkp =
10005 			    (struct hme_blk *)&nucleus_hblk1.list[index];
10006 			nucleus_hblk1.index++;
10007 			SFMMU_STAT(sf_hblk1_nalloc);
10008 		}
10009 
10010 		goto hblk_init;
10011 	}
10012 
10013 	SFMMU_HASH_UNLOCK(hmebp);
10014 
10015 	if (sfmmup != KHATID) {
10016 		if (mmu_page_sizes == max_mmu_page_sizes) {
10017 			if (size < TTE256M)
10018 				shw_hblkp = sfmmu_shadow_hcreate(sfmmup, vaddr,
10019 				    size, flags);
10020 		} else {
10021 			if (size < TTE4M)
10022 				shw_hblkp = sfmmu_shadow_hcreate(sfmmup, vaddr,
10023 				    size, flags);
10024 		}
10025 	}
10026 
10027 fill_hblk:
10028 	owner = (hblk_reserve_thread == curthread) ? 1 : 0;
10029 
10030 	if (owner && size == TTE8K) {
10031 
10032 		/*
10033 		 * We are really in a tight spot. We already own
10034 		 * hblk_reserve and we need another hblk.  In anticipation
10035 		 * of this kind of scenario, we specifically set aside
10036 		 * HBLK_RESERVE_MIN number of hblks to be used exclusively
10037 		 * by owner of hblk_reserve.
10038 		 */
10039 		SFMMU_STAT(sf_hblk_recurse_cnt);
10040 
10041 		if (!sfmmu_get_free_hblk(&hmeblkp, 1))
10042 			panic("sfmmu_hblk_alloc: reserve list is empty");
10043 
10044 		goto hblk_verify;
10045 	}
10046 
10047 	ASSERT(!owner);
10048 
10049 	if ((flags & HAT_NO_KALLOC) == 0) {
10050 
10051 		sfmmu_cache = ((size == TTE8K) ? sfmmu8_cache : sfmmu1_cache);
10052 		sleep = ((sfmmup == KHATID) ? KM_NOSLEEP : KM_SLEEP);
10053 
10054 		if ((hmeblkp = kmem_cache_alloc(sfmmu_cache, sleep)) == NULL) {
10055 			hmeblkp = sfmmu_hblk_steal(size);
10056 		} else {
10057 			/*
10058 			 * if we are the owner of hblk_reserve,
10059 			 * swap hblk_reserve with hmeblkp and
10060 			 * start a fresh life.  Hope things go
10061 			 * better this time.
10062 			 */
10063 			if (hblk_reserve_thread == curthread) {
10064 				ASSERT(sfmmu_cache == sfmmu8_cache);
10065 				sfmmu_hblk_swap(hmeblkp);
10066 				hblk_reserve_thread = NULL;
10067 				mutex_exit(&hblk_reserve_lock);
10068 				goto fill_hblk;
10069 			}
10070 			/*
10071 			 * let's donate this hblk to our reserve list if
10072 			 * we are not mapping kernel range
10073 			 */
10074 			if (size == TTE8K && sfmmup != KHATID)
10075 				if (sfmmu_put_free_hblk(hmeblkp, 0))
10076 					goto fill_hblk;
10077 		}
10078 	} else {
10079 		/*
10080 		 * We are here to map the slab in sfmmu8_cache; let's
10081 		 * check if we could tap our reserve list; if successful,
10082 		 * this will avoid the pain of going thru sfmmu_hblk_swap
10083 		 */
10084 		SFMMU_STAT(sf_hblk_slab_cnt);
10085 		if (!sfmmu_get_free_hblk(&hmeblkp, 0)) {
10086 			/*
10087 			 * let's start hblk_reserve dance
10088 			 */
10089 			SFMMU_STAT(sf_hblk_reserve_cnt);
10090 			owner = 1;
10091 			mutex_enter(&hblk_reserve_lock);
10092 			hmeblkp = HBLK_RESERVE;
10093 			hblk_reserve_thread = curthread;
10094 		}
10095 	}
10096 
10097 hblk_verify:
10098 	ASSERT(hmeblkp != NULL);
10099 	set_hblk_sz(hmeblkp, size);
10100 	ASSERT(hmeblkp->hblk_nextpa == va_to_pa((caddr_t)hmeblkp));
10101 	SFMMU_HASH_LOCK(hmebp);
10102 	HME_HASH_FAST_SEARCH(hmebp, hblktag, newhblkp);
10103 	if (newhblkp != NULL) {
10104 		SFMMU_HASH_UNLOCK(hmebp);
10105 		if (hmeblkp != HBLK_RESERVE) {
10106 			/*
10107 			 * This is really tricky!
10108 			 *
10109 			 * vmem_alloc(vmem_seg_arena)
10110 			 *  vmem_alloc(vmem_internal_arena)
10111 			 *   segkmem_alloc(heap_arena)
10112 			 *    vmem_alloc(heap_arena)
10113 			 *    page_create()
10114 			 *    hat_memload()
10115 			 *	kmem_cache_free()
10116 			 *	 kmem_cache_alloc()
10117 			 *	  kmem_slab_create()
10118 			 *	   vmem_alloc(kmem_internal_arena)
10119 			 *	    segkmem_alloc(heap_arena)
10120 			 *		vmem_alloc(heap_arena)
10121 			 *		page_create()
10122 			 *		hat_memload()
10123 			 *		  kmem_cache_free()
10124 			 *		...
10125 			 *
10126 			 * Thus, hat_memload() could call kmem_cache_free
10127 			 * for enough number of times that we could easily
10128 			 * hit the bottom of the stack or run out of reserve
10129 			 * list of vmem_seg structs.  So, we must donate
10130 			 * this hblk to reserve list if it's allocated
10131 			 * from sfmmu8_cache *and* mapping kernel range.
10132 			 * We don't need to worry about freeing hmeblk1's
10133 			 * to kmem since they don't map any kmem slabs.
10134 			 *
10135 			 * Note: When segkmem supports largepages, we must
10136 			 * free hmeblk1's to reserve list as well.
10137 			 */
10138 			forcefree = (sfmmup == KHATID) ? 1 : 0;
10139 			if (size == TTE8K &&
10140 			    sfmmu_put_free_hblk(hmeblkp, forcefree)) {
10141 				goto re_verify;
10142 			}
10143 			ASSERT(sfmmup != KHATID);
10144 			kmem_cache_free(get_hblk_cache(hmeblkp), hmeblkp);
10145 		} else {
10146 			/*
10147 			 * Hey! we don't need hblk_reserve any more.
10148 			 */
10149 			ASSERT(owner);
10150 			hblk_reserve_thread = NULL;
10151 			mutex_exit(&hblk_reserve_lock);
10152 			owner = 0;
10153 		}
10154 re_verify:
10155 		/*
10156 		 * let's check if the goodies are still present
10157 		 */
10158 		SFMMU_HASH_LOCK(hmebp);
10159 		HME_HASH_FAST_SEARCH(hmebp, hblktag, newhblkp);
10160 		if (newhblkp != NULL) {
10161 			/*
10162 			 * return newhblkp if it's not hblk_reserve;
10163 			 * if newhblkp is hblk_reserve, return it
10164 			 * _only if_ we are the owner of hblk_reserve.
10165 			 */
10166 			if (newhblkp != HBLK_RESERVE || owner) {
10167 				return (newhblkp);
10168 			} else {
10169 				/*
10170 				 * we just hit hblk_reserve in the hash and
10171 				 * we are not the owner of that;
10172 				 *
10173 				 * block until hblk_reserve_thread completes
10174 				 * swapping hblk_reserve and try the dance
10175 				 * once again.
10176 				 */
10177 				SFMMU_HASH_UNLOCK(hmebp);
10178 				mutex_enter(&hblk_reserve_lock);
10179 				mutex_exit(&hblk_reserve_lock);
10180 				SFMMU_STAT(sf_hblk_reserve_hit);
10181 				goto fill_hblk;
10182 			}
10183 		} else {
10184 			/*
10185 			 * it's no more! try the dance once again.
10186 			 */
10187 			SFMMU_HASH_UNLOCK(hmebp);
10188 			goto fill_hblk;
10189 		}
10190 	}
10191 
10192 hblk_init:
10193 	set_hblk_sz(hmeblkp, size);
10194 	ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp));
10195 	hmeblkp->hblk_next = (struct hme_blk *)NULL;
10196 	hmeblkp->hblk_tag = hblktag;
10197 	hmeblkp->hblk_shadow = shw_hblkp;
10198 	hblkpa = hmeblkp->hblk_nextpa;
10199 	hmeblkp->hblk_nextpa = 0;
10200 
10201 	ASSERT(get_hblk_ttesz(hmeblkp) == size);
10202 	ASSERT(get_hblk_span(hmeblkp) == HMEBLK_SPAN(size));
10203 	ASSERT(hmeblkp->hblk_hmecnt == 0);
10204 	ASSERT(hmeblkp->hblk_vcnt == 0);
10205 	ASSERT(hmeblkp->hblk_lckcnt == 0);
10206 	ASSERT(hblkpa == va_to_pa((caddr_t)hmeblkp));
10207 	sfmmu_hblk_hash_add(hmebp, hmeblkp, hblkpa);
10208 	return (hmeblkp);
10209 }
10210 
10211 /*
10212  * This function performs any cleanup required on the hme_blk
10213  * and returns it to the free list.
10214  */
10215 /* ARGSUSED */
10216 static void
10217 sfmmu_hblk_free(struct hmehash_bucket *hmebp, struct hme_blk *hmeblkp,
10218 	uint64_t hblkpa, struct hme_blk **listp)
10219 {
10220 	int shw_size, vshift;
10221 	struct hme_blk *shw_hblkp;
10222 	uint_t		shw_mask, newshw_mask;
10223 	uintptr_t	vaddr;
10224 	int		size;
10225 	uint_t		critical;
10226 
10227 	ASSERT(hmeblkp);
10228 	ASSERT(!hmeblkp->hblk_hmecnt);
10229 	ASSERT(!hmeblkp->hblk_vcnt);
10230 	ASSERT(!hmeblkp->hblk_lckcnt);
10231 	ASSERT(hblkpa == va_to_pa((caddr_t)hmeblkp));
10232 	ASSERT(hmeblkp != (struct hme_blk *)hblk_reserve);
10233 
10234 	critical = (hblktosfmmu(hmeblkp) == KHATID) ? 1 : 0;
10235 
10236 	size = get_hblk_ttesz(hmeblkp);
10237 	shw_hblkp = hmeblkp->hblk_shadow;
10238 	if (shw_hblkp) {
10239 		ASSERT(hblktosfmmu(hmeblkp) != KHATID);
10240 		if (mmu_page_sizes == max_mmu_page_sizes) {
10241 			ASSERT(size < TTE256M);
10242 		} else {
10243 			ASSERT(size < TTE4M);
10244 		}
10245 
10246 		shw_size = get_hblk_ttesz(shw_hblkp);
10247 		vaddr = get_hblk_base(hmeblkp);
10248 		vshift = vaddr_to_vshift(shw_hblkp->hblk_tag, vaddr, shw_size);
10249 		ASSERT(vshift < 8);
10250 		/*
10251 		 * Atomically clear shadow mask bit
10252 		 */
10253 		do {
10254 			shw_mask = shw_hblkp->hblk_shw_mask;
10255 			ASSERT(shw_mask & (1 << vshift));
10256 			newshw_mask = shw_mask & ~(1 << vshift);
10257 			newshw_mask = cas32(&shw_hblkp->hblk_shw_mask,
10258 				shw_mask, newshw_mask);
10259 		} while (newshw_mask != shw_mask);
10260 		hmeblkp->hblk_shadow = NULL;
10261 	}
10262 	hmeblkp->hblk_next = NULL;
10263 	hmeblkp->hblk_nextpa = hblkpa;
10264 	hmeblkp->hblk_shw_bit = 0;
10265 
10266 	if (hmeblkp->hblk_nuc_bit == 0) {
10267 
10268 		if (size == TTE8K && sfmmu_put_free_hblk(hmeblkp, critical))
10269 			return;
10270 
10271 		hmeblkp->hblk_next = *listp;
10272 		*listp = hmeblkp;
10273 	}
10274 }
10275 
10276 static void
10277 sfmmu_hblks_list_purge(struct hme_blk **listp)
10278 {
10279 	struct hme_blk	*hmeblkp;
10280 
10281 	while ((hmeblkp = *listp) != NULL) {
10282 		*listp = hmeblkp->hblk_next;
10283 		kmem_cache_free(get_hblk_cache(hmeblkp), hmeblkp);
10284 	}
10285 }
10286 
10287 #define	BUCKETS_TO_SEARCH_BEFORE_UNLOAD	30
10288 
10289 static uint_t sfmmu_hblk_steal_twice;
10290 static uint_t sfmmu_hblk_steal_count, sfmmu_hblk_steal_unload_count;
10291 
10292 /*
10293  * Steal a hmeblk
10294  * Enough hmeblks were allocated at startup (nucleus hmeblks) and also
10295  * hmeblks were added dynamically. We should never ever not be able to
10296  * find one. Look for an unused/unlocked hmeblk in user hash table.
10297  */
10298 static struct hme_blk *
10299 sfmmu_hblk_steal(int size)
10300 {
10301 	static struct hmehash_bucket *uhmehash_steal_hand = NULL;
10302 	struct hmehash_bucket *hmebp;
10303 	struct hme_blk *hmeblkp = NULL, *pr_hblk;
10304 	uint64_t hblkpa, prevpa;
10305 	int i;
10306 
10307 	for (;;) {
10308 		hmebp = (uhmehash_steal_hand == NULL) ? uhme_hash :
10309 			uhmehash_steal_hand;
10310 		ASSERT(hmebp >= uhme_hash && hmebp <= &uhme_hash[UHMEHASH_SZ]);
10311 
10312 		for (i = 0; hmeblkp == NULL && i <= UHMEHASH_SZ +
10313 		    BUCKETS_TO_SEARCH_BEFORE_UNLOAD; i++) {
10314 			SFMMU_HASH_LOCK(hmebp);
10315 			hmeblkp = hmebp->hmeblkp;
10316 			hblkpa = hmebp->hmeh_nextpa;
10317 			prevpa = 0;
10318 			pr_hblk = NULL;
10319 			while (hmeblkp) {
10320 				/*
10321 				 * check if it is a hmeblk that is not locked
10322 				 * and not shared. skip shadow hmeblks with
10323 				 * shadow_mask set i.e valid count non zero.
10324 				 */
10325 				if ((get_hblk_ttesz(hmeblkp) == size) &&
10326 				    (hmeblkp->hblk_shw_bit == 0 ||
10327 					hmeblkp->hblk_vcnt == 0) &&
10328 				    (hmeblkp->hblk_lckcnt == 0)) {
10329 					/*
10330 					 * there is a high probability that we
10331 					 * will find a free one. search some
10332 					 * buckets for a free hmeblk initially
10333 					 * before unloading a valid hmeblk.
10334 					 */
10335 					if ((hmeblkp->hblk_vcnt == 0 &&
10336 					    hmeblkp->hblk_hmecnt == 0) || (i >=
10337 					    BUCKETS_TO_SEARCH_BEFORE_UNLOAD)) {
10338 						if (sfmmu_steal_this_hblk(hmebp,
10339 						    hmeblkp, hblkpa, prevpa,
10340 						    pr_hblk)) {
10341 							/*
10342 							 * Hblk is unloaded
10343 							 * successfully
10344 							 */
10345 							break;
10346 						}
10347 					}
10348 				}
10349 				pr_hblk = hmeblkp;
10350 				prevpa = hblkpa;
10351 				hblkpa = hmeblkp->hblk_nextpa;
10352 				hmeblkp = hmeblkp->hblk_next;
10353 			}
10354 
10355 			SFMMU_HASH_UNLOCK(hmebp);
10356 			if (hmebp++ == &uhme_hash[UHMEHASH_SZ])
10357 				hmebp = uhme_hash;
10358 		}
10359 		uhmehash_steal_hand = hmebp;
10360 
10361 		if (hmeblkp != NULL)
10362 			break;
10363 
10364 		/*
10365 		 * in the worst case, look for a free one in the kernel
10366 		 * hash table.
10367 		 */
10368 		for (i = 0, hmebp = khme_hash; i <= KHMEHASH_SZ; i++) {
10369 			SFMMU_HASH_LOCK(hmebp);
10370 			hmeblkp = hmebp->hmeblkp;
10371 			hblkpa = hmebp->hmeh_nextpa;
10372 			prevpa = 0;
10373 			pr_hblk = NULL;
10374 			while (hmeblkp) {
10375 				/*
10376 				 * check if it is free hmeblk
10377 				 */
10378 				if ((get_hblk_ttesz(hmeblkp) == size) &&
10379 				    (hmeblkp->hblk_lckcnt == 0) &&
10380 				    (hmeblkp->hblk_vcnt == 0) &&
10381 				    (hmeblkp->hblk_hmecnt == 0)) {
10382 					if (sfmmu_steal_this_hblk(hmebp,
10383 					    hmeblkp, hblkpa, prevpa, pr_hblk)) {
10384 						break;
10385 					} else {
10386 						/*
10387 						 * Cannot fail since we have
10388 						 * hash lock.
10389 						 */
10390 						panic("fail to steal?");
10391 					}
10392 				}
10393 
10394 				pr_hblk = hmeblkp;
10395 				prevpa = hblkpa;
10396 				hblkpa = hmeblkp->hblk_nextpa;
10397 				hmeblkp = hmeblkp->hblk_next;
10398 			}
10399 
10400 			SFMMU_HASH_UNLOCK(hmebp);
10401 			if (hmebp++ == &khme_hash[KHMEHASH_SZ])
10402 				hmebp = khme_hash;
10403 		}
10404 
10405 		if (hmeblkp != NULL)
10406 			break;
10407 		sfmmu_hblk_steal_twice++;
10408 	}
10409 	return (hmeblkp);
10410 }
10411 
10412 /*
10413  * This routine does real work to prepare a hblk to be "stolen" by
10414  * unloading the mappings, updating shadow counts ....
10415  * It returns 1 if the block is ready to be reused (stolen), or 0
10416  * means the block cannot be stolen yet- pageunload is still working
10417  * on this hblk.
10418  */
10419 static int
10420 sfmmu_steal_this_hblk(struct hmehash_bucket *hmebp, struct hme_blk *hmeblkp,
10421 	uint64_t hblkpa, uint64_t prevpa, struct hme_blk *pr_hblk)
10422 {
10423 	int shw_size, vshift;
10424 	struct hme_blk *shw_hblkp;
10425 	uintptr_t vaddr;
10426 	uint_t shw_mask, newshw_mask;
10427 
10428 	ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp));
10429 
10430 	/*
10431 	 * check if the hmeblk is free, unload if necessary
10432 	 */
10433 	if (hmeblkp->hblk_vcnt || hmeblkp->hblk_hmecnt) {
10434 		sfmmu_t *sfmmup;
10435 		demap_range_t dmr;
10436 
10437 		sfmmup = hblktosfmmu(hmeblkp);
10438 		DEMAP_RANGE_INIT(sfmmup, &dmr);
10439 		(void) sfmmu_hblk_unload(sfmmup, hmeblkp,
10440 		    (caddr_t)get_hblk_base(hmeblkp),
10441 		    get_hblk_endaddr(hmeblkp), &dmr, HAT_UNLOAD);
10442 		DEMAP_RANGE_FLUSH(&dmr);
10443 		if (hmeblkp->hblk_vcnt || hmeblkp->hblk_hmecnt) {
10444 			/*
10445 			 * Pageunload is working on the same hblk.
10446 			 */
10447 			return (0);
10448 		}
10449 
10450 		sfmmu_hblk_steal_unload_count++;
10451 	}
10452 
10453 	ASSERT(hmeblkp->hblk_lckcnt == 0);
10454 	ASSERT(hmeblkp->hblk_vcnt == 0 && hmeblkp->hblk_hmecnt == 0);
10455 
10456 	sfmmu_hblk_hash_rm(hmebp, hmeblkp, prevpa, pr_hblk);
10457 	hmeblkp->hblk_nextpa = hblkpa;
10458 
10459 	shw_hblkp = hmeblkp->hblk_shadow;
10460 	if (shw_hblkp) {
10461 		shw_size = get_hblk_ttesz(shw_hblkp);
10462 		vaddr = get_hblk_base(hmeblkp);
10463 		vshift = vaddr_to_vshift(shw_hblkp->hblk_tag, vaddr, shw_size);
10464 		ASSERT(vshift < 8);
10465 		/*
10466 		 * Atomically clear shadow mask bit
10467 		 */
10468 		do {
10469 			shw_mask = shw_hblkp->hblk_shw_mask;
10470 			ASSERT(shw_mask & (1 << vshift));
10471 			newshw_mask = shw_mask & ~(1 << vshift);
10472 			newshw_mask = cas32(&shw_hblkp->hblk_shw_mask,
10473 				shw_mask, newshw_mask);
10474 		} while (newshw_mask != shw_mask);
10475 		hmeblkp->hblk_shadow = NULL;
10476 	}
10477 
10478 	/*
10479 	 * remove shadow bit if we are stealing an unused shadow hmeblk.
10480 	 * sfmmu_hblk_alloc needs it that way, will set shadow bit later if
10481 	 * we are indeed allocating a shadow hmeblk.
10482 	 */
10483 	hmeblkp->hblk_shw_bit = 0;
10484 
10485 	sfmmu_hblk_steal_count++;
10486 	SFMMU_STAT(sf_steal_count);
10487 
10488 	return (1);
10489 }
10490 
10491 struct hme_blk *
10492 sfmmu_hmetohblk(struct sf_hment *sfhme)
10493 {
10494 	struct hme_blk *hmeblkp;
10495 	struct sf_hment *sfhme0;
10496 	struct hme_blk *hblk_dummy = 0;
10497 
10498 	/*
10499 	 * No dummy sf_hments, please.
10500 	 */
10501 	ASSERT(sfhme->hme_tte.ll != 0);
10502 
10503 	sfhme0 = sfhme - sfhme->hme_tte.tte_hmenum;
10504 	hmeblkp = (struct hme_blk *)((uintptr_t)sfhme0 -
10505 		(uintptr_t)&hblk_dummy->hblk_hme[0]);
10506 
10507 	return (hmeblkp);
10508 }
10509 
10510 /*
10511  * On swapin, get appropriately sized TSB(s) and clear the HAT_SWAPPED flag.
10512  * If we can't get appropriately sized TSB(s), try for 8K TSB(s) using
10513  * KM_SLEEP allocation.
10514  *
10515  * Return 0 on success, -1 otherwise.
10516  */
10517 static void
10518 sfmmu_tsb_swapin(sfmmu_t *sfmmup, hatlock_t *hatlockp)
10519 {
10520 	struct tsb_info *tsbinfop, *next;
10521 	tsb_replace_rc_t rc;
10522 	boolean_t gotfirst = B_FALSE;
10523 
10524 	ASSERT(sfmmup != ksfmmup);
10525 	ASSERT(sfmmu_hat_lock_held(sfmmup));
10526 
10527 	while (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPIN)) {
10528 		cv_wait(&sfmmup->sfmmu_tsb_cv, HATLOCK_MUTEXP(hatlockp));
10529 	}
10530 
10531 	if (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) {
10532 		SFMMU_FLAGS_SET(sfmmup, HAT_SWAPIN);
10533 	} else {
10534 		return;
10535 	}
10536 
10537 	ASSERT(sfmmup->sfmmu_tsb != NULL);
10538 
10539 	/*
10540 	 * Loop over all tsbinfo's replacing them with ones that actually have
10541 	 * a TSB.  If any of the replacements ever fail, bail out of the loop.
10542 	 */
10543 	for (tsbinfop = sfmmup->sfmmu_tsb; tsbinfop != NULL; tsbinfop = next) {
10544 		ASSERT(tsbinfop->tsb_flags & TSB_SWAPPED);
10545 		next = tsbinfop->tsb_next;
10546 		rc = sfmmu_replace_tsb(sfmmup, tsbinfop, tsbinfop->tsb_szc,
10547 		    hatlockp, TSB_SWAPIN);
10548 		if (rc != TSB_SUCCESS) {
10549 			break;
10550 		}
10551 		gotfirst = B_TRUE;
10552 	}
10553 
10554 	switch (rc) {
10555 	case TSB_SUCCESS:
10556 		SFMMU_FLAGS_CLEAR(sfmmup, HAT_SWAPPED|HAT_SWAPIN);
10557 		cv_broadcast(&sfmmup->sfmmu_tsb_cv);
10558 		return;
10559 	case TSB_ALLOCFAIL:
10560 		break;
10561 	default:
10562 		panic("sfmmu_replace_tsb returned unrecognized failure code "
10563 		    "%d", rc);
10564 	}
10565 
10566 	/*
10567 	 * In this case, we failed to get one of our TSBs.  If we failed to
10568 	 * get the first TSB, get one of minimum size (8KB).  Walk the list
10569 	 * and throw away the tsbinfos, starting where the allocation failed;
10570 	 * we can get by with just one TSB as long as we don't leave the
10571 	 * SWAPPED tsbinfo structures lying around.
10572 	 */
10573 	tsbinfop = sfmmup->sfmmu_tsb;
10574 	next = tsbinfop->tsb_next;
10575 	tsbinfop->tsb_next = NULL;
10576 
10577 	sfmmu_hat_exit(hatlockp);
10578 	for (tsbinfop = next; tsbinfop != NULL; tsbinfop = next) {
10579 		next = tsbinfop->tsb_next;
10580 		sfmmu_tsbinfo_free(tsbinfop);
10581 	}
10582 	hatlockp = sfmmu_hat_enter(sfmmup);
10583 
10584 	/*
10585 	 * If we don't have any TSBs, get a single 8K TSB for 8K, 64K and 512K
10586 	 * pages.
10587 	 */
10588 	if (!gotfirst) {
10589 		tsbinfop = sfmmup->sfmmu_tsb;
10590 		rc = sfmmu_replace_tsb(sfmmup, tsbinfop, TSB_MIN_SZCODE,
10591 		    hatlockp, TSB_SWAPIN | TSB_FORCEALLOC);
10592 		ASSERT(rc == TSB_SUCCESS);
10593 	}
10594 
10595 	SFMMU_FLAGS_CLEAR(sfmmup, HAT_SWAPPED|HAT_SWAPIN);
10596 	cv_broadcast(&sfmmup->sfmmu_tsb_cv);
10597 }
10598 
10599 /*
10600  * Handle exceptions for low level tsb_handler.
10601  *
10602  * There are many scenarios that could land us here:
10603  *
10604  * If the context is invalid we land here. The context can be invalid
10605  * for 3 reasons: 1) we couldn't allocate a new context and now need to
10606  * perform a wrap around operation in order to allocate a new context.
10607  * 2) Context was invalidated to change pagesize programming 3) ISMs or
10608  * TSBs configuration is changeing for this process and we are forced into
10609  * here to do a syncronization operation. If the context is valid we can
10610  * be here from window trap hanlder. In this case just call trap to handle
10611  * the fault.
10612  *
10613  * Note that the process will run in INVALID_CONTEXT before
10614  * faulting into here and subsequently loading the MMU registers
10615  * (including the TSB base register) associated with this process.
10616  * For this reason, the trap handlers must all test for
10617  * INVALID_CONTEXT before attempting to access any registers other
10618  * than the context registers.
10619  */
10620 void
10621 sfmmu_tsbmiss_exception(struct regs *rp, uintptr_t tagaccess, uint_t traptype)
10622 {
10623 	sfmmu_t *sfmmup;
10624 	uint_t ctxnum;
10625 	klwp_id_t lwp;
10626 	char lwp_save_state;
10627 	hatlock_t *hatlockp;
10628 	struct tsb_info *tsbinfop;
10629 
10630 	SFMMU_STAT(sf_tsb_exceptions);
10631 	SFMMU_MMU_STAT(mmu_tsb_exceptions);
10632 	sfmmup = astosfmmu(curthread->t_procp->p_as);
10633 	ctxnum = tagaccess & TAGACC_CTX_MASK;
10634 
10635 	ASSERT(sfmmup != ksfmmup && ctxnum != KCONTEXT);
10636 	ASSERT(sfmmup->sfmmu_ismhat == 0);
10637 	/*
10638 	 * First, make sure we come out of here with a valid ctx,
10639 	 * since if we don't get one we'll simply loop on the
10640 	 * faulting instruction.
10641 	 *
10642 	 * If the ISM mappings are changing, the TSB is being relocated, or
10643 	 * the process is swapped out we serialize behind the controlling
10644 	 * thread with the sfmmu_flags and sfmmu_tsb_cv condition variable.
10645 	 * Otherwise we synchronize with the context stealer or the thread
10646 	 * that required us to change out our MMU registers (such
10647 	 * as a thread changing out our TSB while we were running) by
10648 	 * locking the HAT and grabbing the rwlock on the context as a
10649 	 * reader temporarily.
10650 	 */
10651 	ASSERT(!SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED) ||
10652 	    ctxnum == INVALID_CONTEXT);
10653 
10654 	if (ctxnum == INVALID_CONTEXT) {
10655 		/*
10656 		 * Must set lwp state to LWP_SYS before
10657 		 * trying to acquire any adaptive lock
10658 		 */
10659 		lwp = ttolwp(curthread);
10660 		ASSERT(lwp);
10661 		lwp_save_state = lwp->lwp_state;
10662 		lwp->lwp_state = LWP_SYS;
10663 
10664 		hatlockp = sfmmu_hat_enter(sfmmup);
10665 retry:
10666 		for (tsbinfop = sfmmup->sfmmu_tsb; tsbinfop != NULL;
10667 		    tsbinfop = tsbinfop->tsb_next) {
10668 			if (tsbinfop->tsb_flags & TSB_RELOC_FLAG) {
10669 				cv_wait(&sfmmup->sfmmu_tsb_cv,
10670 				    HATLOCK_MUTEXP(hatlockp));
10671 				goto retry;
10672 			}
10673 		}
10674 
10675 		/*
10676 		 * Wait for ISM maps to be updated.
10677 		 */
10678 		if (SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)) {
10679 			cv_wait(&sfmmup->sfmmu_tsb_cv,
10680 			    HATLOCK_MUTEXP(hatlockp));
10681 			goto retry;
10682 		}
10683 
10684 		/*
10685 		 * If we're swapping in, get TSB(s).  Note that we must do
10686 		 * this before we get a ctx or load the MMU state.  Once
10687 		 * we swap in we have to recheck to make sure the TSB(s) and
10688 		 * ISM mappings didn't change while we slept.
10689 		 */
10690 		if (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) {
10691 			sfmmu_tsb_swapin(sfmmup, hatlockp);
10692 			goto retry;
10693 		}
10694 
10695 		sfmmu_get_ctx(sfmmup);
10696 
10697 		sfmmu_hat_exit(hatlockp);
10698 		/*
10699 		 * Must restore lwp_state if not calling
10700 		 * trap() for further processing. Restore
10701 		 * it anyway.
10702 		 */
10703 		lwp->lwp_state = lwp_save_state;
10704 		if (sfmmup->sfmmu_ttecnt[TTE8K] != 0 ||
10705 		    sfmmup->sfmmu_ttecnt[TTE64K] != 0 ||
10706 		    sfmmup->sfmmu_ttecnt[TTE512K] != 0 ||
10707 		    sfmmup->sfmmu_ttecnt[TTE4M] != 0 ||
10708 		    sfmmup->sfmmu_ttecnt[TTE32M] != 0 ||
10709 		    sfmmup->sfmmu_ttecnt[TTE256M] != 0) {
10710 			return;
10711 		}
10712 		if (traptype == T_DATA_PROT) {
10713 			traptype = T_DATA_MMU_MISS;
10714 		}
10715 	}
10716 	trap(rp, (caddr_t)tagaccess, traptype, 0);
10717 }
10718 
10719 /*
10720  * sfmmu_vatopfn_suspended is called from GET_TTE when TL=0 and
10721  * TTE_SUSPENDED bit set in tte we block on aquiring a page lock
10722  * rather than spinning to avoid send mondo timeouts with
10723  * interrupts enabled. When the lock is acquired it is immediately
10724  * released and we return back to sfmmu_vatopfn just after
10725  * the GET_TTE call.
10726  */
10727 void
10728 sfmmu_vatopfn_suspended(caddr_t vaddr, sfmmu_t *sfmmu, tte_t *ttep)
10729 {
10730 	struct page	**pp;
10731 
10732 	(void) as_pagelock(sfmmu->sfmmu_as, &pp, vaddr, TTE_CSZ(ttep), S_WRITE);
10733 	as_pageunlock(sfmmu->sfmmu_as, pp, vaddr, TTE_CSZ(ttep), S_WRITE);
10734 }
10735 
10736 /*
10737  * sfmmu_tsbmiss_suspended is called from GET_TTE when TL>0 and
10738  * TTE_SUSPENDED bit set in tte. We do this so that we can handle
10739  * cross traps which cannot be handled while spinning in the
10740  * trap handlers. Simply enter and exit the kpr_suspendlock spin
10741  * mutex, which is held by the holder of the suspend bit, and then
10742  * retry the trapped instruction after unwinding.
10743  */
10744 /*ARGSUSED*/
10745 void
10746 sfmmu_tsbmiss_suspended(struct regs *rp, uintptr_t tagacc, uint_t traptype)
10747 {
10748 	ASSERT(curthread != kreloc_thread);
10749 	mutex_enter(&kpr_suspendlock);
10750 	mutex_exit(&kpr_suspendlock);
10751 }
10752 
10753 /*
10754  * Special routine to flush out ism mappings- TSBs, TLBs and D-caches.
10755  * This routine may be called with all cpu's captured. Therefore, the
10756  * caller is responsible for holding all locks and disabling kernel
10757  * preemption.
10758  */
10759 /* ARGSUSED */
10760 static void
10761 sfmmu_ismtlbcache_demap(caddr_t addr, sfmmu_t *ism_sfmmup,
10762 	struct hme_blk *hmeblkp, pfn_t pfnum, int cache_flush_flag)
10763 {
10764 	cpuset_t 	cpuset;
10765 	caddr_t 	va;
10766 	ism_ment_t	*ment;
10767 	sfmmu_t		*sfmmup;
10768 	int 		vcolor;
10769 	int		ttesz;
10770 
10771 	/*
10772 	 * Walk the ism_hat's mapping list and flush the page
10773 	 * from every hat sharing this ism_hat. This routine
10774 	 * may be called while all cpu's have been captured.
10775 	 * Therefore we can't attempt to grab any locks. For now
10776 	 * this means we will protect the ism mapping list under
10777 	 * a single lock which will be grabbed by the caller.
10778 	 * If hat_share/unshare scalibility becomes a performance
10779 	 * problem then we may need to re-think ism mapping list locking.
10780 	 */
10781 	ASSERT(ism_sfmmup->sfmmu_ismhat);
10782 	ASSERT(MUTEX_HELD(&ism_mlist_lock));
10783 	addr = addr - ISMID_STARTADDR;
10784 	for (ment = ism_sfmmup->sfmmu_iment; ment; ment = ment->iment_next) {
10785 
10786 		sfmmup = ment->iment_hat;
10787 
10788 		va = ment->iment_base_va;
10789 		va = (caddr_t)((uintptr_t)va  + (uintptr_t)addr);
10790 
10791 		/*
10792 		 * Flush TSB of ISM mappings.
10793 		 */
10794 		ttesz = get_hblk_ttesz(hmeblkp);
10795 		if (ttesz == TTE8K || ttesz == TTE4M) {
10796 			sfmmu_unload_tsb(sfmmup, va, ttesz);
10797 		} else {
10798 			caddr_t sva = va;
10799 			caddr_t eva;
10800 			ASSERT(addr == (caddr_t)get_hblk_base(hmeblkp));
10801 			eva = sva + get_hblk_span(hmeblkp);
10802 			sfmmu_unload_tsb_range(sfmmup, sva, eva, ttesz);
10803 		}
10804 
10805 		cpuset = sfmmup->sfmmu_cpusran;
10806 		CPUSET_AND(cpuset, cpu_ready_set);
10807 		CPUSET_DEL(cpuset, CPU->cpu_id);
10808 
10809 		SFMMU_XCALL_STATS(sfmmup);
10810 		xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)va,
10811 		    (uint64_t)sfmmup);
10812 
10813 		vtag_flushpage(va, (uint64_t)sfmmup);
10814 
10815 		/*
10816 		 * Flush D$
10817 		 * When flushing D$ we must flush all
10818 		 * cpu's. See sfmmu_cache_flush().
10819 		 */
10820 		if (cache_flush_flag == CACHE_FLUSH) {
10821 			cpuset = cpu_ready_set;
10822 			CPUSET_DEL(cpuset, CPU->cpu_id);
10823 
10824 			SFMMU_XCALL_STATS(sfmmup);
10825 			vcolor = addr_to_vcolor(va);
10826 			xt_some(cpuset, vac_flushpage_tl1, pfnum, vcolor);
10827 			vac_flushpage(pfnum, vcolor);
10828 		}
10829 	}
10830 }
10831 
10832 /*
10833  * Demaps the TSB, CPU caches, and flushes all TLBs on all CPUs of
10834  * a particular virtual address and ctx.  If noflush is set we do not
10835  * flush the TLB/TSB.  This function may or may not be called with the
10836  * HAT lock held.
10837  */
10838 static void
10839 sfmmu_tlbcache_demap(caddr_t addr, sfmmu_t *sfmmup, struct hme_blk *hmeblkp,
10840 	pfn_t pfnum, int tlb_noflush, int cpu_flag, int cache_flush_flag,
10841 	int hat_lock_held)
10842 {
10843 	int vcolor;
10844 	cpuset_t cpuset;
10845 	hatlock_t *hatlockp;
10846 
10847 	/*
10848 	 * There is no longer a need to protect against ctx being
10849 	 * stolen here since we don't store the ctx in the TSB anymore.
10850 	 */
10851 	vcolor = addr_to_vcolor(addr);
10852 
10853 	/*
10854 	 * We must hold the hat lock during the flush of TLB,
10855 	 * to avoid a race with sfmmu_invalidate_ctx(), where
10856 	 * sfmmu_cnum on a MMU could be set to INVALID_CONTEXT,
10857 	 * causing TLB demap routine to skip flush on that MMU.
10858 	 * If the context on a MMU has already been set to
10859 	 * INVALID_CONTEXT, we just get an extra flush on
10860 	 * that MMU.
10861 	 */
10862 	if (!hat_lock_held && !tlb_noflush)
10863 		hatlockp = sfmmu_hat_enter(sfmmup);
10864 
10865 	kpreempt_disable();
10866 	if (!tlb_noflush) {
10867 		/*
10868 		 * Flush the TSB and TLB.
10869 		 */
10870 		SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp);
10871 
10872 		cpuset = sfmmup->sfmmu_cpusran;
10873 		CPUSET_AND(cpuset, cpu_ready_set);
10874 		CPUSET_DEL(cpuset, CPU->cpu_id);
10875 
10876 		SFMMU_XCALL_STATS(sfmmup);
10877 
10878 		xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)addr,
10879 		    (uint64_t)sfmmup);
10880 
10881 		vtag_flushpage(addr, (uint64_t)sfmmup);
10882 
10883 	}
10884 
10885 	if (!hat_lock_held && !tlb_noflush)
10886 		sfmmu_hat_exit(hatlockp);
10887 
10888 
10889 	/*
10890 	 * Flush the D$
10891 	 *
10892 	 * Even if the ctx is stolen, we need to flush the
10893 	 * cache. Our ctx stealer only flushes the TLBs.
10894 	 */
10895 	if (cache_flush_flag == CACHE_FLUSH) {
10896 		if (cpu_flag & FLUSH_ALL_CPUS) {
10897 			cpuset = cpu_ready_set;
10898 		} else {
10899 			cpuset = sfmmup->sfmmu_cpusran;
10900 			CPUSET_AND(cpuset, cpu_ready_set);
10901 		}
10902 		CPUSET_DEL(cpuset, CPU->cpu_id);
10903 		SFMMU_XCALL_STATS(sfmmup);
10904 		xt_some(cpuset, vac_flushpage_tl1, pfnum, vcolor);
10905 		vac_flushpage(pfnum, vcolor);
10906 	}
10907 	kpreempt_enable();
10908 }
10909 
10910 /*
10911  * Demaps the TSB and flushes all TLBs on all cpus for a particular virtual
10912  * address and ctx.  If noflush is set we do not currently do anything.
10913  * This function may or may not be called with the HAT lock held.
10914  */
10915 static void
10916 sfmmu_tlb_demap(caddr_t addr, sfmmu_t *sfmmup, struct hme_blk *hmeblkp,
10917 	int tlb_noflush, int hat_lock_held)
10918 {
10919 	cpuset_t cpuset;
10920 	hatlock_t *hatlockp;
10921 
10922 	/*
10923 	 * If the process is exiting we have nothing to do.
10924 	 */
10925 	if (tlb_noflush)
10926 		return;
10927 
10928 	/*
10929 	 * Flush TSB.
10930 	 */
10931 	if (!hat_lock_held)
10932 		hatlockp = sfmmu_hat_enter(sfmmup);
10933 	SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp);
10934 
10935 	kpreempt_disable();
10936 
10937 	cpuset = sfmmup->sfmmu_cpusran;
10938 	CPUSET_AND(cpuset, cpu_ready_set);
10939 	CPUSET_DEL(cpuset, CPU->cpu_id);
10940 
10941 	SFMMU_XCALL_STATS(sfmmup);
10942 	xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)addr, (uint64_t)sfmmup);
10943 
10944 	vtag_flushpage(addr, (uint64_t)sfmmup);
10945 
10946 	if (!hat_lock_held)
10947 		sfmmu_hat_exit(hatlockp);
10948 
10949 	kpreempt_enable();
10950 
10951 }
10952 
10953 /*
10954  * Special case of sfmmu_tlb_demap for MMU_PAGESIZE hblks. Use the xcall
10955  * call handler that can flush a range of pages to save on xcalls.
10956  */
10957 static int sfmmu_xcall_save;
10958 
10959 static void
10960 sfmmu_tlb_range_demap(demap_range_t *dmrp)
10961 {
10962 	sfmmu_t *sfmmup = dmrp->dmr_sfmmup;
10963 	hatlock_t *hatlockp;
10964 	cpuset_t cpuset;
10965 	uint64_t sfmmu_pgcnt;
10966 	pgcnt_t pgcnt = 0;
10967 	int pgunload = 0;
10968 	int dirtypg = 0;
10969 	caddr_t addr = dmrp->dmr_addr;
10970 	caddr_t eaddr;
10971 	uint64_t bitvec = dmrp->dmr_bitvec;
10972 
10973 	ASSERT(bitvec & 1);
10974 
10975 	/*
10976 	 * Flush TSB and calculate number of pages to flush.
10977 	 */
10978 	while (bitvec != 0) {
10979 		dirtypg = 0;
10980 		/*
10981 		 * Find the first page to flush and then count how many
10982 		 * pages there are after it that also need to be flushed.
10983 		 * This way the number of TSB flushes is minimized.
10984 		 */
10985 		while ((bitvec & 1) == 0) {
10986 			pgcnt++;
10987 			addr += MMU_PAGESIZE;
10988 			bitvec >>= 1;
10989 		}
10990 		while (bitvec & 1) {
10991 			dirtypg++;
10992 			bitvec >>= 1;
10993 		}
10994 		eaddr = addr + ptob(dirtypg);
10995 		hatlockp = sfmmu_hat_enter(sfmmup);
10996 		sfmmu_unload_tsb_range(sfmmup, addr, eaddr, TTE8K);
10997 		sfmmu_hat_exit(hatlockp);
10998 		pgunload += dirtypg;
10999 		addr = eaddr;
11000 		pgcnt += dirtypg;
11001 	}
11002 
11003 	ASSERT((pgcnt<<MMU_PAGESHIFT) <= dmrp->dmr_endaddr - dmrp->dmr_addr);
11004 	if (sfmmup->sfmmu_free == 0) {
11005 		addr = dmrp->dmr_addr;
11006 		bitvec = dmrp->dmr_bitvec;
11007 
11008 		/*
11009 		 * make sure it has SFMMU_PGCNT_SHIFT bits only,
11010 		 * as it will be used to pack argument for xt_some
11011 		 */
11012 		ASSERT((pgcnt > 0) &&
11013 		    (pgcnt <= (1 << SFMMU_PGCNT_SHIFT)));
11014 
11015 		/*
11016 		 * Encode pgcnt as (pgcnt -1 ), and pass (pgcnt - 1) in
11017 		 * the low 6 bits of sfmmup. This is doable since pgcnt
11018 		 * always >= 1.
11019 		 */
11020 		ASSERT(!((uint64_t)sfmmup & SFMMU_PGCNT_MASK));
11021 		sfmmu_pgcnt = (uint64_t)sfmmup |
11022 		    ((pgcnt - 1) & SFMMU_PGCNT_MASK);
11023 
11024 		/*
11025 		 * We must hold the hat lock during the flush of TLB,
11026 		 * to avoid a race with sfmmu_invalidate_ctx(), where
11027 		 * sfmmu_cnum on a MMU could be set to INVALID_CONTEXT,
11028 		 * causing TLB demap routine to skip flush on that MMU.
11029 		 * If the context on a MMU has already been set to
11030 		 * INVALID_CONTEXT, we just get an extra flush on
11031 		 * that MMU.
11032 		 */
11033 		hatlockp = sfmmu_hat_enter(sfmmup);
11034 		kpreempt_disable();
11035 
11036 		cpuset = sfmmup->sfmmu_cpusran;
11037 		CPUSET_AND(cpuset, cpu_ready_set);
11038 		CPUSET_DEL(cpuset, CPU->cpu_id);
11039 
11040 		SFMMU_XCALL_STATS(sfmmup);
11041 		xt_some(cpuset, vtag_flush_pgcnt_tl1, (uint64_t)addr,
11042 		    sfmmu_pgcnt);
11043 
11044 		for (; bitvec != 0; bitvec >>= 1) {
11045 			if (bitvec & 1)
11046 				vtag_flushpage(addr, (uint64_t)sfmmup);
11047 			addr += MMU_PAGESIZE;
11048 		}
11049 		kpreempt_enable();
11050 		sfmmu_hat_exit(hatlockp);
11051 
11052 		sfmmu_xcall_save += (pgunload-1);
11053 	}
11054 	dmrp->dmr_bitvec = 0;
11055 }
11056 
11057 /*
11058  * In cases where we need to synchronize with TLB/TSB miss trap
11059  * handlers, _and_ need to flush the TLB, it's a lot easier to
11060  * throw away the context from the process than to do a
11061  * special song and dance to keep things consistent for the
11062  * handlers.
11063  *
11064  * Since the process suddenly ends up without a context and our caller
11065  * holds the hat lock, threads that fault after this function is called
11066  * will pile up on the lock.  We can then do whatever we need to
11067  * atomically from the context of the caller.  The first blocked thread
11068  * to resume executing will get the process a new context, and the
11069  * process will resume executing.
11070  *
11071  * One added advantage of this approach is that on MMUs that
11072  * support a "flush all" operation, we will delay the flush until
11073  * cnum wrap-around, and then flush the TLB one time.  This
11074  * is rather rare, so it's a lot less expensive than making 8000
11075  * x-calls to flush the TLB 8000 times.
11076  *
11077  * A per-process (PP) lock is used to synchronize ctx allocations in
11078  * resume() and ctx invalidations here.
11079  */
11080 static void
11081 sfmmu_invalidate_ctx(sfmmu_t *sfmmup)
11082 {
11083 	cpuset_t cpuset;
11084 	int cnum, currcnum;
11085 	mmu_ctx_t *mmu_ctxp;
11086 	int i;
11087 	uint_t pstate_save;
11088 
11089 	SFMMU_STAT(sf_ctx_inv);
11090 
11091 	ASSERT(sfmmu_hat_lock_held(sfmmup));
11092 	ASSERT(sfmmup != ksfmmup);
11093 
11094 	kpreempt_disable();
11095 
11096 	mmu_ctxp = CPU_MMU_CTXP(CPU);
11097 	ASSERT(mmu_ctxp);
11098 	ASSERT(mmu_ctxp->mmu_idx < max_mmu_ctxdoms);
11099 	ASSERT(mmu_ctxp == mmu_ctxs_tbl[mmu_ctxp->mmu_idx]);
11100 
11101 	currcnum = sfmmup->sfmmu_ctxs[mmu_ctxp->mmu_idx].cnum;
11102 
11103 	pstate_save = sfmmu_disable_intrs();
11104 
11105 	lock_set(&sfmmup->sfmmu_ctx_lock);	/* acquire PP lock */
11106 	/* set HAT cnum invalid across all context domains. */
11107 	for (i = 0; i < max_mmu_ctxdoms; i++) {
11108 
11109 		cnum = 	sfmmup->sfmmu_ctxs[i].cnum;
11110 		if (cnum == INVALID_CONTEXT) {
11111 			continue;
11112 		}
11113 
11114 		sfmmup->sfmmu_ctxs[i].cnum = INVALID_CONTEXT;
11115 	}
11116 	membar_enter();	/* make sure globally visible to all CPUs */
11117 	lock_clear(&sfmmup->sfmmu_ctx_lock);	/* release PP lock */
11118 
11119 	sfmmu_enable_intrs(pstate_save);
11120 
11121 	cpuset = sfmmup->sfmmu_cpusran;
11122 	CPUSET_DEL(cpuset, CPU->cpu_id);
11123 	CPUSET_AND(cpuset, cpu_ready_set);
11124 	if (!CPUSET_ISNULL(cpuset)) {
11125 		SFMMU_XCALL_STATS(sfmmup);
11126 		xt_some(cpuset, sfmmu_raise_tsb_exception,
11127 		    (uint64_t)sfmmup, INVALID_CONTEXT);
11128 		xt_sync(cpuset);
11129 		SFMMU_STAT(sf_tsb_raise_exception);
11130 		SFMMU_MMU_STAT(mmu_tsb_raise_exception);
11131 	}
11132 
11133 	/*
11134 	 * If the hat to-be-invalidated is the same as the current
11135 	 * process on local CPU we need to invalidate
11136 	 * this CPU context as well.
11137 	 */
11138 	if ((sfmmu_getctx_sec() == currcnum) &&
11139 	    (currcnum != INVALID_CONTEXT)) {
11140 		sfmmu_setctx_sec(INVALID_CONTEXT);
11141 		sfmmu_clear_utsbinfo();
11142 	}
11143 
11144 	kpreempt_enable();
11145 
11146 	/*
11147 	 * we hold the hat lock, so nobody should allocate a context
11148 	 * for us yet
11149 	 */
11150 	ASSERT(sfmmup->sfmmu_ctxs[mmu_ctxp->mmu_idx].cnum == INVALID_CONTEXT);
11151 }
11152 
11153 /*
11154  * We need to flush the cache in all cpus.  It is possible that
11155  * a process referenced a page as cacheable but has sinced exited
11156  * and cleared the mapping list.  We still to flush it but have no
11157  * state so all cpus is the only alternative.
11158  */
11159 void
11160 sfmmu_cache_flush(pfn_t pfnum, int vcolor)
11161 {
11162 	cpuset_t cpuset;
11163 
11164 	kpreempt_disable();
11165 	cpuset = cpu_ready_set;
11166 	CPUSET_DEL(cpuset, CPU->cpu_id);
11167 	SFMMU_XCALL_STATS(NULL);	/* account to any ctx */
11168 	xt_some(cpuset, vac_flushpage_tl1, pfnum, vcolor);
11169 	xt_sync(cpuset);
11170 	vac_flushpage(pfnum, vcolor);
11171 	kpreempt_enable();
11172 }
11173 
11174 void
11175 sfmmu_cache_flushcolor(int vcolor, pfn_t pfnum)
11176 {
11177 	cpuset_t cpuset;
11178 
11179 	ASSERT(vcolor >= 0);
11180 
11181 	kpreempt_disable();
11182 	cpuset = cpu_ready_set;
11183 	CPUSET_DEL(cpuset, CPU->cpu_id);
11184 	SFMMU_XCALL_STATS(NULL);	/* account to any ctx */
11185 	xt_some(cpuset, vac_flushcolor_tl1, vcolor, pfnum);
11186 	xt_sync(cpuset);
11187 	vac_flushcolor(vcolor, pfnum);
11188 	kpreempt_enable();
11189 }
11190 
11191 /*
11192  * We need to prevent processes from accessing the TSB using a cached physical
11193  * address.  It's alright if they try to access the TSB via virtual address
11194  * since they will just fault on that virtual address once the mapping has
11195  * been suspended.
11196  */
11197 #pragma weak sendmondo_in_recover
11198 
11199 /* ARGSUSED */
11200 static int
11201 sfmmu_tsb_pre_relocator(caddr_t va, uint_t tsbsz, uint_t flags, void *tsbinfo)
11202 {
11203 	hatlock_t *hatlockp;
11204 	struct tsb_info *tsbinfop = (struct tsb_info *)tsbinfo;
11205 	sfmmu_t *sfmmup = tsbinfop->tsb_sfmmu;
11206 	extern uint32_t sendmondo_in_recover;
11207 
11208 	if (flags != HAT_PRESUSPEND)
11209 		return (0);
11210 
11211 	hatlockp = sfmmu_hat_enter(sfmmup);
11212 
11213 	tsbinfop->tsb_flags |= TSB_RELOC_FLAG;
11214 
11215 	/*
11216 	 * For Cheetah+ Erratum 25:
11217 	 * Wait for any active recovery to finish.  We can't risk
11218 	 * relocating the TSB of the thread running mondo_recover_proc()
11219 	 * since, if we did that, we would deadlock.  The scenario we are
11220 	 * trying to avoid is as follows:
11221 	 *
11222 	 * THIS CPU			RECOVER CPU
11223 	 * --------			-----------
11224 	 *				Begins recovery, walking through TSB
11225 	 * hat_pagesuspend() TSB TTE
11226 	 *				TLB miss on TSB TTE, spins at TL1
11227 	 * xt_sync()
11228 	 *	send_mondo_timeout()
11229 	 *	mondo_recover_proc()
11230 	 *	((deadlocked))
11231 	 *
11232 	 * The second half of the workaround is that mondo_recover_proc()
11233 	 * checks to see if the tsb_info has the RELOC flag set, and if it
11234 	 * does, it skips over that TSB without ever touching tsbinfop->tsb_va
11235 	 * and hence avoiding the TLB miss that could result in a deadlock.
11236 	 */
11237 	if (&sendmondo_in_recover) {
11238 		membar_enter();	/* make sure RELOC flag visible */
11239 		while (sendmondo_in_recover) {
11240 			drv_usecwait(1);
11241 			membar_consumer();
11242 		}
11243 	}
11244 
11245 	sfmmu_invalidate_ctx(sfmmup);
11246 	sfmmu_hat_exit(hatlockp);
11247 
11248 	return (0);
11249 }
11250 
11251 /* ARGSUSED */
11252 static int
11253 sfmmu_tsb_post_relocator(caddr_t va, uint_t tsbsz, uint_t flags,
11254 	void *tsbinfo, pfn_t newpfn)
11255 {
11256 	hatlock_t *hatlockp;
11257 	struct tsb_info *tsbinfop = (struct tsb_info *)tsbinfo;
11258 	sfmmu_t	*sfmmup = tsbinfop->tsb_sfmmu;
11259 
11260 	if (flags != HAT_POSTUNSUSPEND)
11261 		return (0);
11262 
11263 	hatlockp = sfmmu_hat_enter(sfmmup);
11264 
11265 	SFMMU_STAT(sf_tsb_reloc);
11266 
11267 	/*
11268 	 * The process may have swapped out while we were relocating one
11269 	 * of its TSBs.  If so, don't bother doing the setup since the
11270 	 * process can't be using the memory anymore.
11271 	 */
11272 	if ((tsbinfop->tsb_flags & TSB_SWAPPED) == 0) {
11273 		ASSERT(va == tsbinfop->tsb_va);
11274 		sfmmu_tsbinfo_setup_phys(tsbinfop, newpfn);
11275 		sfmmu_setup_tsbinfo(sfmmup);
11276 
11277 		if (tsbinfop->tsb_flags & TSB_FLUSH_NEEDED) {
11278 			sfmmu_inv_tsb(tsbinfop->tsb_va,
11279 			    TSB_BYTES(tsbinfop->tsb_szc));
11280 			tsbinfop->tsb_flags &= ~TSB_FLUSH_NEEDED;
11281 		}
11282 	}
11283 
11284 	membar_exit();
11285 	tsbinfop->tsb_flags &= ~TSB_RELOC_FLAG;
11286 	cv_broadcast(&sfmmup->sfmmu_tsb_cv);
11287 
11288 	sfmmu_hat_exit(hatlockp);
11289 
11290 	return (0);
11291 }
11292 
11293 /*
11294  * Allocate and initialize a tsb_info structure.  Note that we may or may not
11295  * allocate a TSB here, depending on the flags passed in.
11296  */
11297 static int
11298 sfmmu_tsbinfo_alloc(struct tsb_info **tsbinfopp, int tsb_szc, int tte_sz_mask,
11299 	uint_t flags, sfmmu_t *sfmmup)
11300 {
11301 	int err;
11302 
11303 	*tsbinfopp = (struct tsb_info *)kmem_cache_alloc(
11304 	    sfmmu_tsbinfo_cache, KM_SLEEP);
11305 
11306 	if ((err = sfmmu_init_tsbinfo(*tsbinfopp, tte_sz_mask,
11307 	    tsb_szc, flags, sfmmup)) != 0) {
11308 		kmem_cache_free(sfmmu_tsbinfo_cache, *tsbinfopp);
11309 		SFMMU_STAT(sf_tsb_allocfail);
11310 		*tsbinfopp = NULL;
11311 		return (err);
11312 	}
11313 	SFMMU_STAT(sf_tsb_alloc);
11314 
11315 	/*
11316 	 * Bump the TSB size counters for this TSB size.
11317 	 */
11318 	(*(((int *)&sfmmu_tsbsize_stat) + tsb_szc))++;
11319 	return (0);
11320 }
11321 
11322 static void
11323 sfmmu_tsb_free(struct tsb_info *tsbinfo)
11324 {
11325 	caddr_t tsbva = tsbinfo->tsb_va;
11326 	uint_t tsb_size = TSB_BYTES(tsbinfo->tsb_szc);
11327 	struct kmem_cache *kmem_cachep = tsbinfo->tsb_cache;
11328 	vmem_t	*vmp = tsbinfo->tsb_vmp;
11329 
11330 	/*
11331 	 * If we allocated this TSB from relocatable kernel memory, then we
11332 	 * need to uninstall the callback handler.
11333 	 */
11334 	if (tsbinfo->tsb_cache != sfmmu_tsb8k_cache) {
11335 		uintptr_t slab_mask = ~((uintptr_t)tsb_slab_mask) << PAGESHIFT;
11336 		caddr_t slab_vaddr = (caddr_t)((uintptr_t)tsbva & slab_mask);
11337 		page_t **ppl;
11338 		int ret;
11339 
11340 		ret = as_pagelock(&kas, &ppl, slab_vaddr, PAGESIZE, S_WRITE);
11341 		ASSERT(ret == 0);
11342 		hat_delete_callback(tsbva, (uint_t)tsb_size, (void *)tsbinfo,
11343 		    0, NULL);
11344 		as_pageunlock(&kas, ppl, slab_vaddr, PAGESIZE, S_WRITE);
11345 	}
11346 
11347 	if (kmem_cachep != NULL) {
11348 		kmem_cache_free(kmem_cachep, tsbva);
11349 	} else {
11350 		vmem_xfree(vmp, (void *)tsbva, tsb_size);
11351 	}
11352 	tsbinfo->tsb_va = (caddr_t)0xbad00bad;
11353 	atomic_add_64(&tsb_alloc_bytes, -(int64_t)tsb_size);
11354 }
11355 
11356 static void
11357 sfmmu_tsbinfo_free(struct tsb_info *tsbinfo)
11358 {
11359 	if ((tsbinfo->tsb_flags & TSB_SWAPPED) == 0) {
11360 		sfmmu_tsb_free(tsbinfo);
11361 	}
11362 	kmem_cache_free(sfmmu_tsbinfo_cache, tsbinfo);
11363 
11364 }
11365 
11366 /*
11367  * Setup all the references to physical memory for this tsbinfo.
11368  * The underlying page(s) must be locked.
11369  */
11370 static void
11371 sfmmu_tsbinfo_setup_phys(struct tsb_info *tsbinfo, pfn_t pfn)
11372 {
11373 	ASSERT(pfn != PFN_INVALID);
11374 	ASSERT(pfn == va_to_pfn(tsbinfo->tsb_va));
11375 
11376 #ifndef sun4v
11377 	if (tsbinfo->tsb_szc == 0) {
11378 		sfmmu_memtte(&tsbinfo->tsb_tte, pfn,
11379 		    PROT_WRITE|PROT_READ, TTE8K);
11380 	} else {
11381 		/*
11382 		 * Round down PA and use a large mapping; the handlers will
11383 		 * compute the TSB pointer at the correct offset into the
11384 		 * big virtual page.  NOTE: this assumes all TSBs larger
11385 		 * than 8K must come from physically contiguous slabs of
11386 		 * size tsb_slab_size.
11387 		 */
11388 		sfmmu_memtte(&tsbinfo->tsb_tte, pfn & ~tsb_slab_mask,
11389 		    PROT_WRITE|PROT_READ, tsb_slab_ttesz);
11390 	}
11391 	tsbinfo->tsb_pa = ptob(pfn);
11392 
11393 	TTE_SET_LOCKED(&tsbinfo->tsb_tte); /* lock the tte into dtlb */
11394 	TTE_SET_MOD(&tsbinfo->tsb_tte);    /* enable writes */
11395 
11396 	ASSERT(TTE_IS_PRIVILEGED(&tsbinfo->tsb_tte));
11397 	ASSERT(TTE_IS_LOCKED(&tsbinfo->tsb_tte));
11398 #else /* sun4v */
11399 	tsbinfo->tsb_pa = ptob(pfn);
11400 #endif /* sun4v */
11401 }
11402 
11403 
11404 /*
11405  * Returns zero on success, ENOMEM if over the high water mark,
11406  * or EAGAIN if the caller needs to retry with a smaller TSB
11407  * size (or specify TSB_FORCEALLOC if the allocation can't fail).
11408  *
11409  * This call cannot fail to allocate a TSB if TSB_FORCEALLOC
11410  * is specified and the TSB requested is PAGESIZE, though it
11411  * may sleep waiting for memory if sufficient memory is not
11412  * available.
11413  */
11414 static int
11415 sfmmu_init_tsbinfo(struct tsb_info *tsbinfo, int tteszmask,
11416     int tsbcode, uint_t flags, sfmmu_t *sfmmup)
11417 {
11418 	caddr_t vaddr = NULL;
11419 	caddr_t slab_vaddr;
11420 	uintptr_t slab_mask = ~((uintptr_t)tsb_slab_mask) << PAGESHIFT;
11421 	int tsbbytes = TSB_BYTES(tsbcode);
11422 	int lowmem = 0;
11423 	struct kmem_cache *kmem_cachep = NULL;
11424 	vmem_t *vmp = NULL;
11425 	lgrp_id_t lgrpid = LGRP_NONE;
11426 	pfn_t pfn;
11427 	uint_t cbflags = HAC_SLEEP;
11428 	page_t **pplist;
11429 	int ret;
11430 
11431 	if (flags & (TSB_FORCEALLOC | TSB_SWAPIN | TSB_GROW | TSB_SHRINK))
11432 		flags |= TSB_ALLOC;
11433 
11434 	ASSERT((flags & TSB_FORCEALLOC) == 0 || tsbcode == TSB_MIN_SZCODE);
11435 
11436 	tsbinfo->tsb_sfmmu = sfmmup;
11437 
11438 	/*
11439 	 * If not allocating a TSB, set up the tsbinfo, set TSB_SWAPPED, and
11440 	 * return.
11441 	 */
11442 	if ((flags & TSB_ALLOC) == 0) {
11443 		tsbinfo->tsb_szc = tsbcode;
11444 		tsbinfo->tsb_ttesz_mask = tteszmask;
11445 		tsbinfo->tsb_va = (caddr_t)0xbadbadbeef;
11446 		tsbinfo->tsb_pa = -1;
11447 		tsbinfo->tsb_tte.ll = 0;
11448 		tsbinfo->tsb_next = NULL;
11449 		tsbinfo->tsb_flags = TSB_SWAPPED;
11450 		tsbinfo->tsb_cache = NULL;
11451 		tsbinfo->tsb_vmp = NULL;
11452 		return (0);
11453 	}
11454 
11455 #ifdef DEBUG
11456 	/*
11457 	 * For debugging:
11458 	 * Randomly force allocation failures every tsb_alloc_mtbf
11459 	 * tries if TSB_FORCEALLOC is not specified.  This will
11460 	 * return ENOMEM if tsb_alloc_mtbf is odd, or EAGAIN if
11461 	 * it is even, to allow testing of both failure paths...
11462 	 */
11463 	if (tsb_alloc_mtbf && ((flags & TSB_FORCEALLOC) == 0) &&
11464 	    (tsb_alloc_count++ == tsb_alloc_mtbf)) {
11465 		tsb_alloc_count = 0;
11466 		tsb_alloc_fail_mtbf++;
11467 		return ((tsb_alloc_mtbf & 1)? ENOMEM : EAGAIN);
11468 	}
11469 #endif	/* DEBUG */
11470 
11471 	/*
11472 	 * Enforce high water mark if we are not doing a forced allocation
11473 	 * and are not shrinking a process' TSB.
11474 	 */
11475 	if ((flags & TSB_SHRINK) == 0 &&
11476 	    (tsbbytes + tsb_alloc_bytes) > tsb_alloc_hiwater) {
11477 		if ((flags & TSB_FORCEALLOC) == 0)
11478 			return (ENOMEM);
11479 		lowmem = 1;
11480 	}
11481 
11482 	/*
11483 	 * Allocate from the correct location based upon the size of the TSB
11484 	 * compared to the base page size, and what memory conditions dictate.
11485 	 * Note we always do nonblocking allocations from the TSB arena since
11486 	 * we don't want memory fragmentation to cause processes to block
11487 	 * indefinitely waiting for memory; until the kernel algorithms that
11488 	 * coalesce large pages are improved this is our best option.
11489 	 *
11490 	 * Algorithm:
11491 	 *	If allocating a "large" TSB (>8K), allocate from the
11492 	 *		appropriate kmem_tsb_default_arena vmem arena
11493 	 *	else if low on memory or the TSB_FORCEALLOC flag is set or
11494 	 *	tsb_forceheap is set
11495 	 *		Allocate from kernel heap via sfmmu_tsb8k_cache with
11496 	 *		KM_SLEEP (never fails)
11497 	 *	else
11498 	 *		Allocate from appropriate sfmmu_tsb_cache with
11499 	 *		KM_NOSLEEP
11500 	 *	endif
11501 	 */
11502 	if (tsb_lgrp_affinity)
11503 		lgrpid = lgrp_home_id(curthread);
11504 	if (lgrpid == LGRP_NONE)
11505 		lgrpid = 0;	/* use lgrp of boot CPU */
11506 
11507 	if (tsbbytes > MMU_PAGESIZE) {
11508 		vmp = kmem_tsb_default_arena[lgrpid];
11509 		vaddr = (caddr_t)vmem_xalloc(vmp, tsbbytes, tsbbytes, 0, 0,
11510 		    NULL, NULL, VM_NOSLEEP);
11511 #ifdef	DEBUG
11512 	} else if (lowmem || (flags & TSB_FORCEALLOC) || tsb_forceheap) {
11513 #else	/* !DEBUG */
11514 	} else if (lowmem || (flags & TSB_FORCEALLOC)) {
11515 #endif	/* DEBUG */
11516 		kmem_cachep = sfmmu_tsb8k_cache;
11517 		vaddr = (caddr_t)kmem_cache_alloc(kmem_cachep, KM_SLEEP);
11518 		ASSERT(vaddr != NULL);
11519 	} else {
11520 		kmem_cachep = sfmmu_tsb_cache[lgrpid];
11521 		vaddr = (caddr_t)kmem_cache_alloc(kmem_cachep, KM_NOSLEEP);
11522 	}
11523 
11524 	tsbinfo->tsb_cache = kmem_cachep;
11525 	tsbinfo->tsb_vmp = vmp;
11526 
11527 	if (vaddr == NULL) {
11528 		return (EAGAIN);
11529 	}
11530 
11531 	atomic_add_64(&tsb_alloc_bytes, (int64_t)tsbbytes);
11532 	kmem_cachep = tsbinfo->tsb_cache;
11533 
11534 	/*
11535 	 * If we are allocating from outside the cage, then we need to
11536 	 * register a relocation callback handler.  Note that for now
11537 	 * since pseudo mappings always hang off of the slab's root page,
11538 	 * we need only lock the first 8K of the TSB slab.  This is a bit
11539 	 * hacky but it is good for performance.
11540 	 */
11541 	if (kmem_cachep != sfmmu_tsb8k_cache) {
11542 		slab_vaddr = (caddr_t)((uintptr_t)vaddr & slab_mask);
11543 		ret = as_pagelock(&kas, &pplist, slab_vaddr, PAGESIZE, S_WRITE);
11544 		ASSERT(ret == 0);
11545 		ret = hat_add_callback(sfmmu_tsb_cb_id, vaddr, (uint_t)tsbbytes,
11546 		    cbflags, (void *)tsbinfo, &pfn, NULL);
11547 
11548 		/*
11549 		 * Need to free up resources if we could not successfully
11550 		 * add the callback function and return an error condition.
11551 		 */
11552 		if (ret != 0) {
11553 			if (kmem_cachep) {
11554 				kmem_cache_free(kmem_cachep, vaddr);
11555 			} else {
11556 				vmem_xfree(vmp, (void *)vaddr, tsbbytes);
11557 			}
11558 			as_pageunlock(&kas, pplist, slab_vaddr, PAGESIZE,
11559 			    S_WRITE);
11560 			return (EAGAIN);
11561 		}
11562 	} else {
11563 		/*
11564 		 * Since allocation of 8K TSBs from heap is rare and occurs
11565 		 * during memory pressure we allocate them from permanent
11566 		 * memory rather than using callbacks to get the PFN.
11567 		 */
11568 		pfn = hat_getpfnum(kas.a_hat, vaddr);
11569 	}
11570 
11571 	tsbinfo->tsb_va = vaddr;
11572 	tsbinfo->tsb_szc = tsbcode;
11573 	tsbinfo->tsb_ttesz_mask = tteszmask;
11574 	tsbinfo->tsb_next = NULL;
11575 	tsbinfo->tsb_flags = 0;
11576 
11577 	sfmmu_tsbinfo_setup_phys(tsbinfo, pfn);
11578 
11579 	if (kmem_cachep != sfmmu_tsb8k_cache) {
11580 		as_pageunlock(&kas, pplist, slab_vaddr, PAGESIZE, S_WRITE);
11581 	}
11582 
11583 	sfmmu_inv_tsb(vaddr, tsbbytes);
11584 	return (0);
11585 }
11586 
11587 /*
11588  * Initialize per cpu tsb and per cpu tsbmiss_area
11589  */
11590 void
11591 sfmmu_init_tsbs(void)
11592 {
11593 	int i;
11594 	struct tsbmiss	*tsbmissp;
11595 	struct kpmtsbm	*kpmtsbmp;
11596 #ifndef sun4v
11597 	extern int	dcache_line_mask;
11598 #endif /* sun4v */
11599 	extern uint_t	vac_colors;
11600 
11601 	/*
11602 	 * Init. tsb miss area.
11603 	 */
11604 	tsbmissp = tsbmiss_area;
11605 
11606 	for (i = 0; i < NCPU; tsbmissp++, i++) {
11607 		/*
11608 		 * initialize the tsbmiss area.
11609 		 * Do this for all possible CPUs as some may be added
11610 		 * while the system is running. There is no cost to this.
11611 		 */
11612 		tsbmissp->ksfmmup = ksfmmup;
11613 #ifndef sun4v
11614 		tsbmissp->dcache_line_mask = (uint16_t)dcache_line_mask;
11615 #endif /* sun4v */
11616 		tsbmissp->khashstart =
11617 		    (struct hmehash_bucket *)va_to_pa((caddr_t)khme_hash);
11618 		tsbmissp->uhashstart =
11619 		    (struct hmehash_bucket *)va_to_pa((caddr_t)uhme_hash);
11620 		tsbmissp->khashsz = khmehash_num;
11621 		tsbmissp->uhashsz = uhmehash_num;
11622 	}
11623 
11624 	sfmmu_tsb_cb_id = hat_register_callback('T'<<16 | 'S' << 8 | 'B',
11625 	    sfmmu_tsb_pre_relocator, sfmmu_tsb_post_relocator, NULL, 0);
11626 
11627 	if (kpm_enable == 0)
11628 		return;
11629 
11630 	/* -- Begin KPM specific init -- */
11631 
11632 	if (kpm_smallpages) {
11633 		/*
11634 		 * If we're using base pagesize pages for seg_kpm
11635 		 * mappings, we use the kernel TSB since we can't afford
11636 		 * to allocate a second huge TSB for these mappings.
11637 		 */
11638 		kpm_tsbbase = ktsb_phys? ktsb_pbase : (uint64_t)ktsb_base;
11639 		kpm_tsbsz = ktsb_szcode;
11640 		kpmsm_tsbbase = kpm_tsbbase;
11641 		kpmsm_tsbsz = kpm_tsbsz;
11642 	} else {
11643 		/*
11644 		 * In VAC conflict case, just put the entries in the
11645 		 * kernel 8K indexed TSB for now so we can find them.
11646 		 * This could really be changed in the future if we feel
11647 		 * the need...
11648 		 */
11649 		kpmsm_tsbbase = ktsb_phys? ktsb_pbase : (uint64_t)ktsb_base;
11650 		kpmsm_tsbsz = ktsb_szcode;
11651 		kpm_tsbbase = ktsb_phys? ktsb4m_pbase : (uint64_t)ktsb4m_base;
11652 		kpm_tsbsz = ktsb4m_szcode;
11653 	}
11654 
11655 	kpmtsbmp = kpmtsbm_area;
11656 	for (i = 0; i < NCPU; kpmtsbmp++, i++) {
11657 		/*
11658 		 * Initialize the kpmtsbm area.
11659 		 * Do this for all possible CPUs as some may be added
11660 		 * while the system is running. There is no cost to this.
11661 		 */
11662 		kpmtsbmp->vbase = kpm_vbase;
11663 		kpmtsbmp->vend = kpm_vbase + kpm_size * vac_colors;
11664 		kpmtsbmp->sz_shift = kpm_size_shift;
11665 		kpmtsbmp->kpmp_shift = kpmp_shift;
11666 		kpmtsbmp->kpmp2pshft = (uchar_t)kpmp2pshft;
11667 		if (kpm_smallpages == 0) {
11668 			kpmtsbmp->kpmp_table_sz = kpmp_table_sz;
11669 			kpmtsbmp->kpmp_tablepa = va_to_pa(kpmp_table);
11670 		} else {
11671 			kpmtsbmp->kpmp_table_sz = kpmp_stable_sz;
11672 			kpmtsbmp->kpmp_tablepa = va_to_pa(kpmp_stable);
11673 		}
11674 		kpmtsbmp->msegphashpa = va_to_pa(memseg_phash);
11675 		kpmtsbmp->flags = KPMTSBM_ENABLE_FLAG;
11676 #ifdef	DEBUG
11677 		kpmtsbmp->flags |= (kpm_tsbmtl) ?  KPMTSBM_TLTSBM_FLAG : 0;
11678 #endif	/* DEBUG */
11679 		if (ktsb_phys)
11680 			kpmtsbmp->flags |= KPMTSBM_TSBPHYS_FLAG;
11681 	}
11682 
11683 	/* -- End KPM specific init -- */
11684 }
11685 
11686 /* Avoid using sfmmu_tsbinfo_alloc() to avoid kmem_alloc - no real reason */
11687 struct tsb_info ktsb_info[2];
11688 
11689 /*
11690  * Called from hat_kern_setup() to setup the tsb_info for ksfmmup.
11691  */
11692 void
11693 sfmmu_init_ktsbinfo()
11694 {
11695 	ASSERT(ksfmmup != NULL);
11696 	ASSERT(ksfmmup->sfmmu_tsb == NULL);
11697 	/*
11698 	 * Allocate tsbinfos for kernel and copy in data
11699 	 * to make debug easier and sun4v setup easier.
11700 	 */
11701 	ktsb_info[0].tsb_sfmmu = ksfmmup;
11702 	ktsb_info[0].tsb_szc = ktsb_szcode;
11703 	ktsb_info[0].tsb_ttesz_mask = TSB8K|TSB64K|TSB512K;
11704 	ktsb_info[0].tsb_va = ktsb_base;
11705 	ktsb_info[0].tsb_pa = ktsb_pbase;
11706 	ktsb_info[0].tsb_flags = 0;
11707 	ktsb_info[0].tsb_tte.ll = 0;
11708 	ktsb_info[0].tsb_cache = NULL;
11709 
11710 	ktsb_info[1].tsb_sfmmu = ksfmmup;
11711 	ktsb_info[1].tsb_szc = ktsb4m_szcode;
11712 	ktsb_info[1].tsb_ttesz_mask = TSB4M;
11713 	ktsb_info[1].tsb_va = ktsb4m_base;
11714 	ktsb_info[1].tsb_pa = ktsb4m_pbase;
11715 	ktsb_info[1].tsb_flags = 0;
11716 	ktsb_info[1].tsb_tte.ll = 0;
11717 	ktsb_info[1].tsb_cache = NULL;
11718 
11719 	/* Link them into ksfmmup. */
11720 	ktsb_info[0].tsb_next = &ktsb_info[1];
11721 	ktsb_info[1].tsb_next = NULL;
11722 	ksfmmup->sfmmu_tsb = &ktsb_info[0];
11723 
11724 	sfmmu_setup_tsbinfo(ksfmmup);
11725 }
11726 
11727 /*
11728  * Cache the last value returned from va_to_pa().  If the VA specified
11729  * in the current call to cached_va_to_pa() maps to the same Page (as the
11730  * previous call to cached_va_to_pa()), then compute the PA using
11731  * cached info, else call va_to_pa().
11732  *
11733  * Note: this function is neither MT-safe nor consistent in the presence
11734  * of multiple, interleaved threads.  This function was created to enable
11735  * an optimization used during boot (at a point when there's only one thread
11736  * executing on the "boot CPU", and before startup_vm() has been called).
11737  */
11738 static uint64_t
11739 cached_va_to_pa(void *vaddr)
11740 {
11741 	static uint64_t prev_vaddr_base = 0;
11742 	static uint64_t prev_pfn = 0;
11743 
11744 	if ((((uint64_t)vaddr) & MMU_PAGEMASK) == prev_vaddr_base) {
11745 		return (prev_pfn | ((uint64_t)vaddr & MMU_PAGEOFFSET));
11746 	} else {
11747 		uint64_t pa = va_to_pa(vaddr);
11748 
11749 		if (pa != ((uint64_t)-1)) {
11750 			/*
11751 			 * Computed physical address is valid.  Cache its
11752 			 * related info for the next cached_va_to_pa() call.
11753 			 */
11754 			prev_pfn = pa & MMU_PAGEMASK;
11755 			prev_vaddr_base = ((uint64_t)vaddr) & MMU_PAGEMASK;
11756 		}
11757 
11758 		return (pa);
11759 	}
11760 }
11761 
11762 /*
11763  * Carve up our nucleus hblk region.  We may allocate more hblks than
11764  * asked due to rounding errors but we are guaranteed to have at least
11765  * enough space to allocate the requested number of hblk8's and hblk1's.
11766  */
11767 void
11768 sfmmu_init_nucleus_hblks(caddr_t addr, size_t size, int nhblk8, int nhblk1)
11769 {
11770 	struct hme_blk *hmeblkp;
11771 	size_t hme8blk_sz, hme1blk_sz;
11772 	size_t i;
11773 	size_t hblk8_bound;
11774 	ulong_t j = 0, k = 0;
11775 
11776 	ASSERT(addr != NULL && size != 0);
11777 
11778 	/* Need to use proper structure alignment */
11779 	hme8blk_sz = roundup(HME8BLK_SZ, sizeof (int64_t));
11780 	hme1blk_sz = roundup(HME1BLK_SZ, sizeof (int64_t));
11781 
11782 	nucleus_hblk8.list = (void *)addr;
11783 	nucleus_hblk8.index = 0;
11784 
11785 	/*
11786 	 * Use as much memory as possible for hblk8's since we
11787 	 * expect all bop_alloc'ed memory to be allocated in 8k chunks.
11788 	 * We need to hold back enough space for the hblk1's which
11789 	 * we'll allocate next.
11790 	 */
11791 	hblk8_bound = size - (nhblk1 * hme1blk_sz) - hme8blk_sz;
11792 	for (i = 0; i <= hblk8_bound; i += hme8blk_sz, j++) {
11793 		hmeblkp = (struct hme_blk *)addr;
11794 		addr += hme8blk_sz;
11795 		hmeblkp->hblk_nuc_bit = 1;
11796 		hmeblkp->hblk_nextpa = cached_va_to_pa((caddr_t)hmeblkp);
11797 	}
11798 	nucleus_hblk8.len = j;
11799 	ASSERT(j >= nhblk8);
11800 	SFMMU_STAT_ADD(sf_hblk8_ncreate, j);
11801 
11802 	nucleus_hblk1.list = (void *)addr;
11803 	nucleus_hblk1.index = 0;
11804 	for (; i <= (size - hme1blk_sz); i += hme1blk_sz, k++) {
11805 		hmeblkp = (struct hme_blk *)addr;
11806 		addr += hme1blk_sz;
11807 		hmeblkp->hblk_nuc_bit = 1;
11808 		hmeblkp->hblk_nextpa = cached_va_to_pa((caddr_t)hmeblkp);
11809 	}
11810 	ASSERT(k >= nhblk1);
11811 	nucleus_hblk1.len = k;
11812 	SFMMU_STAT_ADD(sf_hblk1_ncreate, k);
11813 }
11814 
11815 /*
11816  * This function is currently not supported on this platform. For what
11817  * it's supposed to do, see hat.c and hat_srmmu.c
11818  */
11819 /* ARGSUSED */
11820 faultcode_t
11821 hat_softlock(struct hat *hat, caddr_t addr, size_t *lenp, page_t **ppp,
11822     uint_t flags)
11823 {
11824 	ASSERT(hat->sfmmu_xhat_provider == NULL);
11825 	return (FC_NOSUPPORT);
11826 }
11827 
11828 /*
11829  * Searchs the mapping list of the page for a mapping of the same size. If not
11830  * found the corresponding bit is cleared in the p_index field. When large
11831  * pages are more prevalent in the system, we can maintain the mapping list
11832  * in order and we don't have to traverse the list each time. Just check the
11833  * next and prev entries, and if both are of different size, we clear the bit.
11834  */
11835 static void
11836 sfmmu_rm_large_mappings(page_t *pp, int ttesz)
11837 {
11838 	struct sf_hment *sfhmep;
11839 	struct hme_blk *hmeblkp;
11840 	int	index;
11841 	pgcnt_t	npgs;
11842 
11843 	ASSERT(ttesz > TTE8K);
11844 
11845 	ASSERT(sfmmu_mlist_held(pp));
11846 
11847 	ASSERT(PP_ISMAPPED_LARGE(pp));
11848 
11849 	/*
11850 	 * Traverse mapping list looking for another mapping of same size.
11851 	 * since we only want to clear index field if all mappings of
11852 	 * that size are gone.
11853 	 */
11854 
11855 	for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) {
11856 		hmeblkp = sfmmu_hmetohblk(sfhmep);
11857 		if (hmeblkp->hblk_xhat_bit)
11858 			continue;
11859 		if (hme_size(sfhmep) == ttesz) {
11860 			/*
11861 			 * another mapping of the same size. don't clear index.
11862 			 */
11863 			return;
11864 		}
11865 	}
11866 
11867 	/*
11868 	 * Clear the p_index bit for large page.
11869 	 */
11870 	index = PAGESZ_TO_INDEX(ttesz);
11871 	npgs = TTEPAGES(ttesz);
11872 	while (npgs-- > 0) {
11873 		ASSERT(pp->p_index & index);
11874 		pp->p_index &= ~index;
11875 		pp = PP_PAGENEXT(pp);
11876 	}
11877 }
11878 
11879 /*
11880  * return supported features
11881  */
11882 /* ARGSUSED */
11883 int
11884 hat_supported(enum hat_features feature, void *arg)
11885 {
11886 	switch (feature) {
11887 	case    HAT_SHARED_PT:
11888 	case	HAT_DYNAMIC_ISM_UNMAP:
11889 	case	HAT_VMODSORT:
11890 		return (1);
11891 	default:
11892 		return (0);
11893 	}
11894 }
11895 
11896 void
11897 hat_enter(struct hat *hat)
11898 {
11899 	hatlock_t	*hatlockp;
11900 
11901 	if (hat != ksfmmup) {
11902 		hatlockp = TSB_HASH(hat);
11903 		mutex_enter(HATLOCK_MUTEXP(hatlockp));
11904 	}
11905 }
11906 
11907 void
11908 hat_exit(struct hat *hat)
11909 {
11910 	hatlock_t	*hatlockp;
11911 
11912 	if (hat != ksfmmup) {
11913 		hatlockp = TSB_HASH(hat);
11914 		mutex_exit(HATLOCK_MUTEXP(hatlockp));
11915 	}
11916 }
11917 
11918 /*ARGSUSED*/
11919 void
11920 hat_reserve(struct as *as, caddr_t addr, size_t len)
11921 {
11922 }
11923 
11924 static void
11925 hat_kstat_init(void)
11926 {
11927 	kstat_t *ksp;
11928 
11929 	ksp = kstat_create("unix", 0, "sfmmu_global_stat", "hat",
11930 		KSTAT_TYPE_RAW, sizeof (struct sfmmu_global_stat),
11931 		KSTAT_FLAG_VIRTUAL);
11932 	if (ksp) {
11933 		ksp->ks_data = (void *) &sfmmu_global_stat;
11934 		kstat_install(ksp);
11935 	}
11936 	ksp = kstat_create("unix", 0, "sfmmu_tsbsize_stat", "hat",
11937 		KSTAT_TYPE_RAW, sizeof (struct sfmmu_tsbsize_stat),
11938 		KSTAT_FLAG_VIRTUAL);
11939 	if (ksp) {
11940 		ksp->ks_data = (void *) &sfmmu_tsbsize_stat;
11941 		kstat_install(ksp);
11942 	}
11943 	ksp = kstat_create("unix", 0, "sfmmu_percpu_stat", "hat",
11944 		KSTAT_TYPE_RAW, sizeof (struct sfmmu_percpu_stat) * NCPU,
11945 		KSTAT_FLAG_WRITABLE);
11946 	if (ksp) {
11947 		ksp->ks_update = sfmmu_kstat_percpu_update;
11948 		kstat_install(ksp);
11949 	}
11950 }
11951 
11952 /* ARGSUSED */
11953 static int
11954 sfmmu_kstat_percpu_update(kstat_t *ksp, int rw)
11955 {
11956 	struct sfmmu_percpu_stat *cpu_kstat = ksp->ks_data;
11957 	struct tsbmiss *tsbm = tsbmiss_area;
11958 	struct kpmtsbm *kpmtsbm = kpmtsbm_area;
11959 	int i;
11960 
11961 	ASSERT(cpu_kstat);
11962 	if (rw == KSTAT_READ) {
11963 		for (i = 0; i < NCPU; cpu_kstat++, tsbm++, kpmtsbm++, i++) {
11964 			cpu_kstat->sf_itlb_misses = tsbm->itlb_misses;
11965 			cpu_kstat->sf_dtlb_misses = tsbm->dtlb_misses;
11966 			cpu_kstat->sf_utsb_misses = tsbm->utsb_misses -
11967 				tsbm->uprot_traps;
11968 			cpu_kstat->sf_ktsb_misses = tsbm->ktsb_misses +
11969 				kpmtsbm->kpm_tsb_misses - tsbm->kprot_traps;
11970 
11971 			if (tsbm->itlb_misses > 0 && tsbm->dtlb_misses > 0) {
11972 				cpu_kstat->sf_tsb_hits =
11973 				(tsbm->itlb_misses + tsbm->dtlb_misses) -
11974 				(tsbm->utsb_misses + tsbm->ktsb_misses +
11975 				kpmtsbm->kpm_tsb_misses);
11976 			} else {
11977 				cpu_kstat->sf_tsb_hits = 0;
11978 			}
11979 			cpu_kstat->sf_umod_faults = tsbm->uprot_traps;
11980 			cpu_kstat->sf_kmod_faults = tsbm->kprot_traps;
11981 		}
11982 	} else {
11983 		/* KSTAT_WRITE is used to clear stats */
11984 		for (i = 0; i < NCPU; tsbm++, kpmtsbm++, i++) {
11985 			tsbm->itlb_misses = 0;
11986 			tsbm->dtlb_misses = 0;
11987 			tsbm->utsb_misses = 0;
11988 			tsbm->ktsb_misses = 0;
11989 			tsbm->uprot_traps = 0;
11990 			tsbm->kprot_traps = 0;
11991 			kpmtsbm->kpm_dtlb_misses = 0;
11992 			kpmtsbm->kpm_tsb_misses = 0;
11993 		}
11994 	}
11995 	return (0);
11996 }
11997 
11998 #ifdef	DEBUG
11999 
12000 tte_t  *gorig[NCPU], *gcur[NCPU], *gnew[NCPU];
12001 
12002 /*
12003  * A tte checker. *orig_old is the value we read before cas.
12004  *	*cur is the value returned by cas.
12005  *	*new is the desired value when we do the cas.
12006  *
12007  *	*hmeblkp is currently unused.
12008  */
12009 
12010 /* ARGSUSED */
12011 void
12012 chk_tte(tte_t *orig_old, tte_t *cur, tte_t *new, struct hme_blk *hmeblkp)
12013 {
12014 	pfn_t i, j, k;
12015 	int cpuid = CPU->cpu_id;
12016 
12017 	gorig[cpuid] = orig_old;
12018 	gcur[cpuid] = cur;
12019 	gnew[cpuid] = new;
12020 
12021 #ifdef lint
12022 	hmeblkp = hmeblkp;
12023 #endif
12024 
12025 	if (TTE_IS_VALID(orig_old)) {
12026 		if (TTE_IS_VALID(cur)) {
12027 			i = TTE_TO_TTEPFN(orig_old);
12028 			j = TTE_TO_TTEPFN(cur);
12029 			k = TTE_TO_TTEPFN(new);
12030 			if (i != j) {
12031 				/* remap error? */
12032 				panic("chk_tte: bad pfn, 0x%lx, 0x%lx", i, j);
12033 			}
12034 
12035 			if (i != k) {
12036 				/* remap error? */
12037 				panic("chk_tte: bad pfn2, 0x%lx, 0x%lx", i, k);
12038 			}
12039 		} else {
12040 			if (TTE_IS_VALID(new)) {
12041 				panic("chk_tte: invalid cur? ");
12042 			}
12043 
12044 			i = TTE_TO_TTEPFN(orig_old);
12045 			k = TTE_TO_TTEPFN(new);
12046 			if (i != k) {
12047 				panic("chk_tte: bad pfn3, 0x%lx, 0x%lx", i, k);
12048 			}
12049 		}
12050 	} else {
12051 		if (TTE_IS_VALID(cur)) {
12052 			j = TTE_TO_TTEPFN(cur);
12053 			if (TTE_IS_VALID(new)) {
12054 				k = TTE_TO_TTEPFN(new);
12055 				if (j != k) {
12056 					panic("chk_tte: bad pfn4, 0x%lx, 0x%lx",
12057 					    j, k);
12058 				}
12059 			} else {
12060 				panic("chk_tte: why here?");
12061 			}
12062 		} else {
12063 			if (!TTE_IS_VALID(new)) {
12064 				panic("chk_tte: why here2 ?");
12065 			}
12066 		}
12067 	}
12068 }
12069 
12070 #endif /* DEBUG */
12071 
12072 extern void prefetch_tsbe_read(struct tsbe *);
12073 extern void prefetch_tsbe_write(struct tsbe *);
12074 
12075 
12076 /*
12077  * We want to prefetch 7 cache lines ahead for our read prefetch.  This gives
12078  * us optimal performance on Cheetah+.  You can only have 8 outstanding
12079  * prefetches at any one time, so we opted for 7 read prefetches and 1 write
12080  * prefetch to make the most utilization of the prefetch capability.
12081  */
12082 #define	TSBE_PREFETCH_STRIDE (7)
12083 
12084 void
12085 sfmmu_copy_tsb(struct tsb_info *old_tsbinfo, struct tsb_info *new_tsbinfo)
12086 {
12087 	int old_bytes = TSB_BYTES(old_tsbinfo->tsb_szc);
12088 	int new_bytes = TSB_BYTES(new_tsbinfo->tsb_szc);
12089 	int old_entries = TSB_ENTRIES(old_tsbinfo->tsb_szc);
12090 	int new_entries = TSB_ENTRIES(new_tsbinfo->tsb_szc);
12091 	struct tsbe *old;
12092 	struct tsbe *new;
12093 	struct tsbe *new_base = (struct tsbe *)new_tsbinfo->tsb_va;
12094 	uint64_t va;
12095 	int new_offset;
12096 	int i;
12097 	int vpshift;
12098 	int last_prefetch;
12099 
12100 	if (old_bytes == new_bytes) {
12101 		bcopy(old_tsbinfo->tsb_va, new_tsbinfo->tsb_va, new_bytes);
12102 	} else {
12103 
12104 		/*
12105 		 * A TSBE is 16 bytes which means there are four TSBE's per
12106 		 * P$ line (64 bytes), thus every 4 TSBE's we prefetch.
12107 		 */
12108 		old = (struct tsbe *)old_tsbinfo->tsb_va;
12109 		last_prefetch = old_entries - (4*(TSBE_PREFETCH_STRIDE+1));
12110 		for (i = 0; i < old_entries; i++, old++) {
12111 			if (((i & (4-1)) == 0) && (i < last_prefetch))
12112 				prefetch_tsbe_read(old);
12113 			if (!old->tte_tag.tag_invalid) {
12114 				/*
12115 				 * We have a valid TTE to remap.  Check the
12116 				 * size.  We won't remap 64K or 512K TTEs
12117 				 * because they span more than one TSB entry
12118 				 * and are indexed using an 8K virt. page.
12119 				 * Ditto for 32M and 256M TTEs.
12120 				 */
12121 				if (TTE_CSZ(&old->tte_data) == TTE64K ||
12122 				    TTE_CSZ(&old->tte_data) == TTE512K)
12123 					continue;
12124 				if (mmu_page_sizes == max_mmu_page_sizes) {
12125 				    if (TTE_CSZ(&old->tte_data) == TTE32M ||
12126 					TTE_CSZ(&old->tte_data) == TTE256M)
12127 					    continue;
12128 				}
12129 
12130 				/* clear the lower 22 bits of the va */
12131 				va = *(uint64_t *)old << 22;
12132 				/* turn va into a virtual pfn */
12133 				va >>= 22 - TSB_START_SIZE;
12134 				/*
12135 				 * or in bits from the offset in the tsb
12136 				 * to get the real virtual pfn. These
12137 				 * correspond to bits [21:13] in the va
12138 				 */
12139 				vpshift =
12140 				    TTE_BSZS_SHIFT(TTE_CSZ(&old->tte_data)) &
12141 				    0x1ff;
12142 				va |= (i << vpshift);
12143 				va >>= vpshift;
12144 				new_offset = va & (new_entries - 1);
12145 				new = new_base + new_offset;
12146 				prefetch_tsbe_write(new);
12147 				*new = *old;
12148 			}
12149 		}
12150 	}
12151 }
12152 
12153 /*
12154  * Kernel Physical Mapping (kpm) facility
12155  */
12156 
12157 /* -- hat_kpm interface section -- */
12158 
12159 /*
12160  * Mapin a locked page and return the vaddr.
12161  * When a kpme is provided by the caller it is added to
12162  * the page p_kpmelist. The page to be mapped in must
12163  * be at least read locked (p_selock).
12164  */
12165 caddr_t
12166 hat_kpm_mapin(struct page *pp, struct kpme *kpme)
12167 {
12168 	kmutex_t	*pml;
12169 	caddr_t		vaddr;
12170 
12171 	if (kpm_enable == 0) {
12172 		cmn_err(CE_WARN, "hat_kpm_mapin: kpm_enable not set");
12173 		return ((caddr_t)NULL);
12174 	}
12175 
12176 	if (pp == NULL || PAGE_LOCKED(pp) == 0) {
12177 		cmn_err(CE_WARN, "hat_kpm_mapin: pp zero or not locked");
12178 		return ((caddr_t)NULL);
12179 	}
12180 
12181 	pml = sfmmu_mlist_enter(pp);
12182 	ASSERT(pp->p_kpmref >= 0);
12183 
12184 	vaddr = (pp->p_kpmref == 0) ?
12185 		sfmmu_kpm_mapin(pp) : hat_kpm_page2va(pp, 1);
12186 
12187 	if (kpme != NULL) {
12188 		/*
12189 		 * Tolerate multiple mapins for the same kpme to avoid
12190 		 * the need for an extra serialization.
12191 		 */
12192 		if ((sfmmu_kpme_lookup(kpme, pp)) == 0)
12193 			sfmmu_kpme_add(kpme, pp);
12194 
12195 		ASSERT(pp->p_kpmref > 0);
12196 
12197 	} else {
12198 		pp->p_kpmref++;
12199 	}
12200 
12201 	sfmmu_mlist_exit(pml);
12202 	return (vaddr);
12203 }
12204 
12205 /*
12206  * Mapout a locked page.
12207  * When a kpme is provided by the caller it is removed from
12208  * the page p_kpmelist. The page to be mapped out must be at
12209  * least read locked (p_selock).
12210  * Note: The seg_kpm layer provides a mapout interface for the
12211  * case that a kpme is used and the underlying page is unlocked.
12212  * This can be used instead of calling this function directly.
12213  */
12214 void
12215 hat_kpm_mapout(struct page *pp, struct kpme *kpme, caddr_t vaddr)
12216 {
12217 	kmutex_t	*pml;
12218 
12219 	if (kpm_enable == 0) {
12220 		cmn_err(CE_WARN, "hat_kpm_mapout: kpm_enable not set");
12221 		return;
12222 	}
12223 
12224 	if (IS_KPM_ADDR(vaddr) == 0) {
12225 		cmn_err(CE_WARN, "hat_kpm_mapout: no kpm address");
12226 		return;
12227 	}
12228 
12229 	if (pp == NULL || PAGE_LOCKED(pp) == 0) {
12230 		cmn_err(CE_WARN, "hat_kpm_mapout: page zero or not locked");
12231 		return;
12232 	}
12233 
12234 	if (kpme != NULL) {
12235 		ASSERT(pp == kpme->kpe_page);
12236 		pp = kpme->kpe_page;
12237 		pml = sfmmu_mlist_enter(pp);
12238 
12239 		if (sfmmu_kpme_lookup(kpme, pp) == 0)
12240 			panic("hat_kpm_mapout: kpme not found pp=%p",
12241 				(void *)pp);
12242 
12243 		ASSERT(pp->p_kpmref > 0);
12244 		sfmmu_kpme_sub(kpme, pp);
12245 
12246 	} else {
12247 		pml = sfmmu_mlist_enter(pp);
12248 		pp->p_kpmref--;
12249 	}
12250 
12251 	ASSERT(pp->p_kpmref >= 0);
12252 	if (pp->p_kpmref == 0)
12253 		sfmmu_kpm_mapout(pp, vaddr);
12254 
12255 	sfmmu_mlist_exit(pml);
12256 }
12257 
12258 /*
12259  * Return the kpm virtual address for the page at pp.
12260  * If checkswap is non zero and the page is backed by a
12261  * swap vnode the physical address is used rather than
12262  * p_offset to determine the kpm region.
12263  * Note: The function has to be used w/ extreme care. The
12264  * stability of the page identity is in the responsibility
12265  * of the caller.
12266  */
12267 caddr_t
12268 hat_kpm_page2va(struct page *pp, int checkswap)
12269 {
12270 	int		vcolor, vcolor_pa;
12271 	uintptr_t	paddr, vaddr;
12272 
12273 	ASSERT(kpm_enable);
12274 
12275 	paddr = ptob(pp->p_pagenum);
12276 	vcolor_pa = addr_to_vcolor(paddr);
12277 
12278 	if (checkswap && pp->p_vnode && IS_SWAPFSVP(pp->p_vnode))
12279 		vcolor = (PP_ISNC(pp)) ? vcolor_pa : PP_GET_VCOLOR(pp);
12280 	else
12281 		vcolor = addr_to_vcolor(pp->p_offset);
12282 
12283 	vaddr = (uintptr_t)kpm_vbase + paddr;
12284 
12285 	if (vcolor_pa != vcolor) {
12286 		vaddr += ((uintptr_t)(vcolor - vcolor_pa) << MMU_PAGESHIFT);
12287 		vaddr += (vcolor_pa > vcolor) ?
12288 			((uintptr_t)vcolor_pa << kpm_size_shift) :
12289 			((uintptr_t)(vcolor - vcolor_pa) << kpm_size_shift);
12290 	}
12291 
12292 	return ((caddr_t)vaddr);
12293 }
12294 
12295 /*
12296  * Return the page for the kpm virtual address vaddr.
12297  * Caller is responsible for the kpm mapping and lock
12298  * state of the page.
12299  */
12300 page_t *
12301 hat_kpm_vaddr2page(caddr_t vaddr)
12302 {
12303 	uintptr_t	paddr;
12304 	pfn_t		pfn;
12305 
12306 	ASSERT(IS_KPM_ADDR(vaddr));
12307 
12308 	SFMMU_KPM_VTOP(vaddr, paddr);
12309 	pfn = (pfn_t)btop(paddr);
12310 
12311 	return (page_numtopp_nolock(pfn));
12312 }
12313 
12314 /* page to kpm_page */
12315 #define	PP2KPMPG(pp, kp) {						\
12316 	struct memseg	*mseg;						\
12317 	pgcnt_t		inx;						\
12318 	pfn_t		pfn;						\
12319 									\
12320 	pfn = pp->p_pagenum;						\
12321 	mseg = page_numtomemseg_nolock(pfn);				\
12322 	ASSERT(mseg);							\
12323 	inx = ptokpmp(kpmptop(ptokpmp(pfn)) - mseg->kpm_pbase);		\
12324 	ASSERT(inx < mseg->kpm_nkpmpgs);				\
12325 	kp = &mseg->kpm_pages[inx];					\
12326 }
12327 
12328 /* page to kpm_spage */
12329 #define	PP2KPMSPG(pp, ksp) {						\
12330 	struct memseg	*mseg;						\
12331 	pgcnt_t		inx;						\
12332 	pfn_t		pfn;						\
12333 									\
12334 	pfn = pp->p_pagenum;						\
12335 	mseg = page_numtomemseg_nolock(pfn);				\
12336 	ASSERT(mseg);							\
12337 	inx = pfn - mseg->kpm_pbase;					\
12338 	ksp = &mseg->kpm_spages[inx];					\
12339 }
12340 
12341 /*
12342  * hat_kpm_fault is called from segkpm_fault when a kpm tsbmiss occurred
12343  * which could not be resolved by the trap level tsbmiss handler for the
12344  * following reasons:
12345  * . The vaddr is in VAC alias range (always PAGESIZE mapping size).
12346  * . The kpm (s)page range of vaddr is in a VAC alias prevention state.
12347  * . tsbmiss handling at trap level is not desired (DEBUG kernel only,
12348  *   kpm_tsbmtl == 0).
12349  */
12350 int
12351 hat_kpm_fault(struct hat *hat, caddr_t vaddr)
12352 {
12353 	int		error;
12354 	uintptr_t	paddr;
12355 	pfn_t		pfn;
12356 	struct memseg	*mseg;
12357 	page_t	*pp;
12358 
12359 	if (kpm_enable == 0) {
12360 		cmn_err(CE_WARN, "hat_kpm_fault: kpm_enable not set");
12361 		return (ENOTSUP);
12362 	}
12363 
12364 	ASSERT(hat == ksfmmup);
12365 	ASSERT(IS_KPM_ADDR(vaddr));
12366 
12367 	SFMMU_KPM_VTOP(vaddr, paddr);
12368 	pfn = (pfn_t)btop(paddr);
12369 	mseg = page_numtomemseg_nolock(pfn);
12370 	if (mseg == NULL)
12371 		return (EFAULT);
12372 
12373 	pp = &mseg->pages[(pgcnt_t)(pfn - mseg->pages_base)];
12374 	ASSERT((pfn_t)pp->p_pagenum == pfn);
12375 
12376 	if (!PAGE_LOCKED(pp))
12377 		return (EFAULT);
12378 
12379 	if (kpm_smallpages == 0)
12380 		error = sfmmu_kpm_fault(vaddr, mseg, pp);
12381 	else
12382 		error = sfmmu_kpm_fault_small(vaddr, mseg, pp);
12383 
12384 	return (error);
12385 }
12386 
12387 extern  krwlock_t memsegslock;
12388 
12389 /*
12390  * memseg_hash[] was cleared, need to clear memseg_phash[] too.
12391  */
12392 void
12393 hat_kpm_mseghash_clear(int nentries)
12394 {
12395 	pgcnt_t i;
12396 
12397 	if (kpm_enable == 0)
12398 		return;
12399 
12400 	for (i = 0; i < nentries; i++)
12401 		memseg_phash[i] = MSEG_NULLPTR_PA;
12402 }
12403 
12404 /*
12405  * Update memseg_phash[inx] when memseg_hash[inx] was changed.
12406  */
12407 void
12408 hat_kpm_mseghash_update(pgcnt_t inx, struct memseg *msp)
12409 {
12410 	if (kpm_enable == 0)
12411 		return;
12412 
12413 	memseg_phash[inx] = (msp) ? va_to_pa(msp) : MSEG_NULLPTR_PA;
12414 }
12415 
12416 /*
12417  * Update kpm memseg members from basic memseg info.
12418  */
12419 void
12420 hat_kpm_addmem_mseg_update(struct memseg *msp, pgcnt_t nkpmpgs,
12421 	offset_t kpm_pages_off)
12422 {
12423 	if (kpm_enable == 0)
12424 		return;
12425 
12426 	msp->kpm_pages = (kpm_page_t *)((caddr_t)msp->pages + kpm_pages_off);
12427 	msp->kpm_nkpmpgs = nkpmpgs;
12428 	msp->kpm_pbase = kpmptop(ptokpmp(msp->pages_base));
12429 	msp->pagespa = va_to_pa(msp->pages);
12430 	msp->epagespa = va_to_pa(msp->epages);
12431 	msp->kpm_pagespa = va_to_pa(msp->kpm_pages);
12432 }
12433 
12434 /*
12435  * Setup nextpa when a memseg is inserted.
12436  * Assumes that the memsegslock is already held.
12437  */
12438 void
12439 hat_kpm_addmem_mseg_insert(struct memseg *msp)
12440 {
12441 	if (kpm_enable == 0)
12442 		return;
12443 
12444 	ASSERT(RW_LOCK_HELD(&memsegslock));
12445 	msp->nextpa = (memsegs) ? va_to_pa(memsegs) : MSEG_NULLPTR_PA;
12446 }
12447 
12448 /*
12449  * Setup memsegspa when a memseg is (head) inserted.
12450  * Called before memsegs is updated to complete a
12451  * memseg insert operation.
12452  * Assumes that the memsegslock is already held.
12453  */
12454 void
12455 hat_kpm_addmem_memsegs_update(struct memseg *msp)
12456 {
12457 	if (kpm_enable == 0)
12458 		return;
12459 
12460 	ASSERT(RW_LOCK_HELD(&memsegslock));
12461 	ASSERT(memsegs);
12462 	memsegspa = va_to_pa(msp);
12463 }
12464 
12465 /*
12466  * Return end of metadata for an already setup memseg.
12467  *
12468  * Note: kpm_pages and kpm_spages are aliases and the underlying
12469  * member of struct memseg is a union, therefore they always have
12470  * the same address within a memseg. They must be differentiated
12471  * when pointer arithmetic is used with them.
12472  */
12473 caddr_t
12474 hat_kpm_mseg_reuse(struct memseg *msp)
12475 {
12476 	caddr_t end;
12477 
12478 	if (kpm_smallpages == 0)
12479 		end = (caddr_t)(msp->kpm_pages + msp->kpm_nkpmpgs);
12480 	else
12481 		end = (caddr_t)(msp->kpm_spages + msp->kpm_nkpmpgs);
12482 
12483 	return (end);
12484 }
12485 
12486 /*
12487  * Update memsegspa (when first memseg in list
12488  * is deleted) or nextpa  when a memseg deleted.
12489  * Assumes that the memsegslock is already held.
12490  */
12491 void
12492 hat_kpm_delmem_mseg_update(struct memseg *msp, struct memseg **mspp)
12493 {
12494 	struct memseg *lmsp;
12495 
12496 	if (kpm_enable == 0)
12497 		return;
12498 
12499 	ASSERT(RW_LOCK_HELD(&memsegslock));
12500 
12501 	if (mspp == &memsegs) {
12502 		memsegspa = (msp->next) ?
12503 				va_to_pa(msp->next) : MSEG_NULLPTR_PA;
12504 	} else {
12505 		lmsp = (struct memseg *)
12506 			((uint64_t)mspp - offsetof(struct memseg, next));
12507 		lmsp->nextpa = (msp->next) ?
12508 				va_to_pa(msp->next) : MSEG_NULLPTR_PA;
12509 	}
12510 }
12511 
12512 /*
12513  * Update kpm members for all memseg's involved in a split operation
12514  * and do the atomic update of the physical memseg chain.
12515  *
12516  * Note: kpm_pages and kpm_spages are aliases and the underlying member
12517  * of struct memseg is a union, therefore they always have the same
12518  * address within a memseg. With that the direct assignments and
12519  * va_to_pa conversions below don't have to be distinguished wrt. to
12520  * kpm_smallpages. They must be differentiated when pointer arithmetic
12521  * is used with them.
12522  *
12523  * Assumes that the memsegslock is already held.
12524  */
12525 void
12526 hat_kpm_split_mseg_update(struct memseg *msp, struct memseg **mspp,
12527 	struct memseg *lo, struct memseg *mid, struct memseg *hi)
12528 {
12529 	pgcnt_t start, end, kbase, kstart, num;
12530 	struct memseg *lmsp;
12531 
12532 	if (kpm_enable == 0)
12533 		return;
12534 
12535 	ASSERT(RW_LOCK_HELD(&memsegslock));
12536 	ASSERT(msp && mid && msp->kpm_pages);
12537 
12538 	kbase = ptokpmp(msp->kpm_pbase);
12539 
12540 	if (lo) {
12541 		num = lo->pages_end - lo->pages_base;
12542 		start = kpmptop(ptokpmp(lo->pages_base));
12543 		/* align end to kpm page size granularity */
12544 		end = kpmptop(ptokpmp(start + num - 1)) + kpmpnpgs;
12545 		lo->kpm_pbase = start;
12546 		lo->kpm_nkpmpgs = ptokpmp(end - start);
12547 		lo->kpm_pages = msp->kpm_pages;
12548 		lo->kpm_pagespa = va_to_pa(lo->kpm_pages);
12549 		lo->pagespa = va_to_pa(lo->pages);
12550 		lo->epagespa = va_to_pa(lo->epages);
12551 		lo->nextpa = va_to_pa(lo->next);
12552 	}
12553 
12554 	/* mid */
12555 	num = mid->pages_end - mid->pages_base;
12556 	kstart = ptokpmp(mid->pages_base);
12557 	start = kpmptop(kstart);
12558 	/* align end to kpm page size granularity */
12559 	end = kpmptop(ptokpmp(start + num - 1)) + kpmpnpgs;
12560 	mid->kpm_pbase = start;
12561 	mid->kpm_nkpmpgs = ptokpmp(end - start);
12562 	if (kpm_smallpages == 0) {
12563 		mid->kpm_pages = msp->kpm_pages + (kstart - kbase);
12564 	} else {
12565 		mid->kpm_spages = msp->kpm_spages + (kstart - kbase);
12566 	}
12567 	mid->kpm_pagespa = va_to_pa(mid->kpm_pages);
12568 	mid->pagespa = va_to_pa(mid->pages);
12569 	mid->epagespa = va_to_pa(mid->epages);
12570 	mid->nextpa = (mid->next) ?  va_to_pa(mid->next) : MSEG_NULLPTR_PA;
12571 
12572 	if (hi) {
12573 		num = hi->pages_end - hi->pages_base;
12574 		kstart = ptokpmp(hi->pages_base);
12575 		start = kpmptop(kstart);
12576 		/* align end to kpm page size granularity */
12577 		end = kpmptop(ptokpmp(start + num - 1)) + kpmpnpgs;
12578 		hi->kpm_pbase = start;
12579 		hi->kpm_nkpmpgs = ptokpmp(end - start);
12580 		if (kpm_smallpages == 0) {
12581 			hi->kpm_pages = msp->kpm_pages + (kstart - kbase);
12582 		} else {
12583 			hi->kpm_spages = msp->kpm_spages + (kstart - kbase);
12584 		}
12585 		hi->kpm_pagespa = va_to_pa(hi->kpm_pages);
12586 		hi->pagespa = va_to_pa(hi->pages);
12587 		hi->epagespa = va_to_pa(hi->epages);
12588 		hi->nextpa = (hi->next) ? va_to_pa(hi->next) : MSEG_NULLPTR_PA;
12589 	}
12590 
12591 	/*
12592 	 * Atomic update of the physical memseg chain
12593 	 */
12594 	if (mspp == &memsegs) {
12595 		memsegspa = (lo) ? va_to_pa(lo) : va_to_pa(mid);
12596 	} else {
12597 		lmsp = (struct memseg *)
12598 			((uint64_t)mspp - offsetof(struct memseg, next));
12599 		lmsp->nextpa = (lo) ? va_to_pa(lo) : va_to_pa(mid);
12600 	}
12601 }
12602 
12603 /*
12604  * Walk the memsegs chain, applying func to each memseg span and vcolor.
12605  */
12606 void
12607 hat_kpm_walk(void (*func)(void *, void *, size_t), void *arg)
12608 {
12609 	pfn_t	pbase, pend;
12610 	int	vcolor;
12611 	void	*base;
12612 	size_t	size;
12613 	struct memseg *msp;
12614 	extern uint_t vac_colors;
12615 
12616 	for (msp = memsegs; msp; msp = msp->next) {
12617 		pbase = msp->pages_base;
12618 		pend = msp->pages_end;
12619 		for (vcolor = 0; vcolor < vac_colors; vcolor++) {
12620 			base = ptob(pbase) + kpm_vbase + kpm_size * vcolor;
12621 			size = ptob(pend - pbase);
12622 			func(arg, base, size);
12623 		}
12624 	}
12625 }
12626 
12627 
12628 /* -- sfmmu_kpm internal section -- */
12629 
12630 /*
12631  * Return the page frame number if a valid segkpm mapping exists
12632  * for vaddr, otherwise return PFN_INVALID. No locks are grabbed.
12633  * Should only be used by other sfmmu routines.
12634  */
12635 pfn_t
12636 sfmmu_kpm_vatopfn(caddr_t vaddr)
12637 {
12638 	uintptr_t	paddr;
12639 	pfn_t		pfn;
12640 	page_t	*pp;
12641 
12642 	ASSERT(kpm_enable && IS_KPM_ADDR(vaddr));
12643 
12644 	SFMMU_KPM_VTOP(vaddr, paddr);
12645 	pfn = (pfn_t)btop(paddr);
12646 	pp = page_numtopp_nolock(pfn);
12647 	if (pp && pp->p_kpmref)
12648 		return (pfn);
12649 	else
12650 		return ((pfn_t)PFN_INVALID);
12651 }
12652 
12653 /*
12654  * Lookup a kpme in the p_kpmelist.
12655  */
12656 static int
12657 sfmmu_kpme_lookup(struct kpme *kpme, page_t *pp)
12658 {
12659 	struct kpme	*p;
12660 
12661 	for (p = pp->p_kpmelist; p; p = p->kpe_next) {
12662 		if (p == kpme)
12663 			return (1);
12664 	}
12665 	return (0);
12666 }
12667 
12668 /*
12669  * Insert a kpme into the p_kpmelist and increment
12670  * the per page kpm reference count.
12671  */
12672 static void
12673 sfmmu_kpme_add(struct kpme *kpme, page_t *pp)
12674 {
12675 	ASSERT(pp->p_kpmref >= 0);
12676 
12677 	/* head insert */
12678 	kpme->kpe_prev = NULL;
12679 	kpme->kpe_next = pp->p_kpmelist;
12680 
12681 	if (pp->p_kpmelist)
12682 		pp->p_kpmelist->kpe_prev = kpme;
12683 
12684 	pp->p_kpmelist = kpme;
12685 	kpme->kpe_page = pp;
12686 	pp->p_kpmref++;
12687 }
12688 
12689 /*
12690  * Remove a kpme from the p_kpmelist and decrement
12691  * the per page kpm reference count.
12692  */
12693 static void
12694 sfmmu_kpme_sub(struct kpme *kpme, page_t *pp)
12695 {
12696 	ASSERT(pp->p_kpmref > 0);
12697 
12698 	if (kpme->kpe_prev) {
12699 		ASSERT(pp->p_kpmelist != kpme);
12700 		ASSERT(kpme->kpe_prev->kpe_page == pp);
12701 		kpme->kpe_prev->kpe_next = kpme->kpe_next;
12702 	} else {
12703 		ASSERT(pp->p_kpmelist == kpme);
12704 		pp->p_kpmelist = kpme->kpe_next;
12705 	}
12706 
12707 	if (kpme->kpe_next) {
12708 		ASSERT(kpme->kpe_next->kpe_page == pp);
12709 		kpme->kpe_next->kpe_prev = kpme->kpe_prev;
12710 	}
12711 
12712 	kpme->kpe_next = kpme->kpe_prev = NULL;
12713 	kpme->kpe_page = NULL;
12714 	pp->p_kpmref--;
12715 }
12716 
12717 /*
12718  * Mapin a single page, it is called every time a page changes it's state
12719  * from kpm-unmapped to kpm-mapped. It may not be called, when only a new
12720  * kpm instance does a mapin and wants to share the mapping.
12721  * Assumes that the mlist mutex is already grabbed.
12722  */
12723 static caddr_t
12724 sfmmu_kpm_mapin(page_t *pp)
12725 {
12726 	kpm_page_t	*kp;
12727 	kpm_hlk_t	*kpmp;
12728 	caddr_t		vaddr;
12729 	int		kpm_vac_range;
12730 	pfn_t		pfn;
12731 	tte_t		tte;
12732 	kmutex_t	*pmtx;
12733 	int		uncached;
12734 	kpm_spage_t	*ksp;
12735 	kpm_shlk_t	*kpmsp;
12736 	int		oldval;
12737 
12738 	ASSERT(sfmmu_mlist_held(pp));
12739 	ASSERT(pp->p_kpmref == 0);
12740 
12741 	vaddr = sfmmu_kpm_getvaddr(pp, &kpm_vac_range);
12742 
12743 	ASSERT(IS_KPM_ADDR(vaddr));
12744 	uncached = PP_ISNC(pp);
12745 	pfn = pp->p_pagenum;
12746 
12747 	if (kpm_smallpages)
12748 		goto smallpages_mapin;
12749 
12750 	PP2KPMPG(pp, kp);
12751 
12752 	kpmp = KPMP_HASH(kp);
12753 	mutex_enter(&kpmp->khl_mutex);
12754 
12755 	ASSERT(PP_ISKPMC(pp) == 0);
12756 	ASSERT(PP_ISKPMS(pp) == 0);
12757 
12758 	if (uncached) {
12759 		/* ASSERT(pp->p_share); XXX use hat_page_getshare */
12760 		if (kpm_vac_range == 0) {
12761 			if (kp->kp_refcnts == 0) {
12762 				/*
12763 				 * Must remove large page mapping if it exists.
12764 				 * Pages in uncached state can only be mapped
12765 				 * small (PAGESIZE) within the regular kpm
12766 				 * range.
12767 				 */
12768 				if (kp->kp_refcntc == -1) {
12769 					/* remove go indication */
12770 					sfmmu_kpm_tsbmtl(&kp->kp_refcntc,
12771 						&kpmp->khl_lock, KPMTSBM_STOP);
12772 				}
12773 				if (kp->kp_refcnt > 0 && kp->kp_refcntc == 0)
12774 					sfmmu_kpm_demap_large(vaddr);
12775 			}
12776 			ASSERT(kp->kp_refcntc >= 0);
12777 			kp->kp_refcntc++;
12778 		}
12779 		pmtx = sfmmu_page_enter(pp);
12780 		PP_SETKPMC(pp);
12781 		sfmmu_page_exit(pmtx);
12782 	}
12783 
12784 	if ((kp->kp_refcntc > 0 || kp->kp_refcnts > 0) && kpm_vac_range == 0) {
12785 		/*
12786 		 * Have to do a small (PAGESIZE) mapin within this kpm_page
12787 		 * range since it is marked to be in VAC conflict mode or
12788 		 * when there are still other small mappings around.
12789 		 */
12790 
12791 		/* tte assembly */
12792 		if (uncached == 0)
12793 			KPM_TTE_VCACHED(tte.ll, pfn, TTE8K);
12794 		else
12795 			KPM_TTE_VUNCACHED(tte.ll, pfn, TTE8K);
12796 
12797 		/* tsb dropin */
12798 		sfmmu_kpm_load_tsb(vaddr, &tte, MMU_PAGESHIFT);
12799 
12800 		pmtx = sfmmu_page_enter(pp);
12801 		PP_SETKPMS(pp);
12802 		sfmmu_page_exit(pmtx);
12803 
12804 		kp->kp_refcnts++;
12805 		ASSERT(kp->kp_refcnts > 0);
12806 		goto exit;
12807 	}
12808 
12809 	if (kpm_vac_range == 0) {
12810 		/*
12811 		 * Fast path / regular case, no VAC conflict handling
12812 		 * in progress within this kpm_page range.
12813 		 */
12814 		if (kp->kp_refcnt == 0) {
12815 
12816 			/* tte assembly */
12817 			KPM_TTE_VCACHED(tte.ll, pfn, TTE4M);
12818 
12819 			/* tsb dropin */
12820 			sfmmu_kpm_load_tsb(vaddr, &tte, MMU_PAGESHIFT4M);
12821 
12822 			/* Set go flag for TL tsbmiss handler */
12823 			if (kp->kp_refcntc == 0)
12824 				sfmmu_kpm_tsbmtl(&kp->kp_refcntc,
12825 						&kpmp->khl_lock, KPMTSBM_START);
12826 
12827 			ASSERT(kp->kp_refcntc == -1);
12828 		}
12829 		kp->kp_refcnt++;
12830 		ASSERT(kp->kp_refcnt);
12831 
12832 	} else {
12833 		/*
12834 		 * The page is not setup according to the common VAC
12835 		 * prevention rules for the regular and kpm mapping layer
12836 		 * E.g. the page layer was not able to deliver a right
12837 		 * vcolor'ed page for a given vaddr corresponding to
12838 		 * the wanted p_offset. It has to be mapped in small in
12839 		 * within the corresponding kpm vac range in order to
12840 		 * prevent VAC alias conflicts.
12841 		 */
12842 
12843 		/* tte assembly */
12844 		if (uncached == 0) {
12845 			KPM_TTE_VCACHED(tte.ll, pfn, TTE8K);
12846 		} else {
12847 			KPM_TTE_VUNCACHED(tte.ll, pfn, TTE8K);
12848 		}
12849 
12850 		/* tsb dropin */
12851 		sfmmu_kpm_load_tsb(vaddr, &tte, MMU_PAGESHIFT);
12852 
12853 		kp->kp_refcnta++;
12854 		if (kp->kp_refcntc == -1) {
12855 			ASSERT(kp->kp_refcnt > 0);
12856 
12857 			/* remove go indication */
12858 			sfmmu_kpm_tsbmtl(&kp->kp_refcntc, &kpmp->khl_lock,
12859 					KPMTSBM_STOP);
12860 		}
12861 		ASSERT(kp->kp_refcntc >= 0);
12862 	}
12863 exit:
12864 	mutex_exit(&kpmp->khl_mutex);
12865 	return (vaddr);
12866 
12867 smallpages_mapin:
12868 	if (uncached == 0) {
12869 		/* tte assembly */
12870 		KPM_TTE_VCACHED(tte.ll, pfn, TTE8K);
12871 	} else {
12872 		/* ASSERT(pp->p_share); XXX use hat_page_getshare */
12873 		pmtx = sfmmu_page_enter(pp);
12874 		PP_SETKPMC(pp);
12875 		sfmmu_page_exit(pmtx);
12876 		/* tte assembly */
12877 		KPM_TTE_VUNCACHED(tte.ll, pfn, TTE8K);
12878 	}
12879 
12880 	/* tsb dropin */
12881 	sfmmu_kpm_load_tsb(vaddr, &tte, MMU_PAGESHIFT);
12882 
12883 	PP2KPMSPG(pp, ksp);
12884 	kpmsp = KPMP_SHASH(ksp);
12885 
12886 	oldval = sfmmu_kpm_stsbmtl(&ksp->kp_mapped, &kpmsp->kshl_lock,
12887 				(uncached) ? KPM_MAPPEDSC : KPM_MAPPEDS);
12888 
12889 	if (oldval != 0)
12890 		panic("sfmmu_kpm_mapin: stale smallpages mapping");
12891 
12892 	return (vaddr);
12893 }
12894 
12895 /*
12896  * Mapout a single page, it is called every time a page changes it's state
12897  * from kpm-mapped to kpm-unmapped. It may not be called, when only a kpm
12898  * instance calls mapout and there are still other instances mapping the
12899  * page. Assumes that the mlist mutex is already grabbed.
12900  *
12901  * Note: In normal mode (no VAC conflict prevention pending) TLB's are
12902  * not flushed. This is the core segkpm behavior to avoid xcalls. It is
12903  * no problem because a translation from a segkpm virtual address to a
12904  * physical address is always the same. The only downside is a slighty
12905  * increased window of vulnerability for misbehaving _kernel_ modules.
12906  */
12907 static void
12908 sfmmu_kpm_mapout(page_t *pp, caddr_t vaddr)
12909 {
12910 	kpm_page_t	*kp;
12911 	kpm_hlk_t	*kpmp;
12912 	int		alias_range;
12913 	kmutex_t	*pmtx;
12914 	kpm_spage_t	*ksp;
12915 	kpm_shlk_t	*kpmsp;
12916 	int		oldval;
12917 
12918 	ASSERT(sfmmu_mlist_held(pp));
12919 	ASSERT(pp->p_kpmref == 0);
12920 
12921 	alias_range = IS_KPM_ALIAS_RANGE(vaddr);
12922 
12923 	if (kpm_smallpages)
12924 		goto smallpages_mapout;
12925 
12926 	PP2KPMPG(pp, kp);
12927 	kpmp = KPMP_HASH(kp);
12928 	mutex_enter(&kpmp->khl_mutex);
12929 
12930 	if (alias_range) {
12931 		ASSERT(PP_ISKPMS(pp) == 0);
12932 		if (kp->kp_refcnta <= 0) {
12933 			panic("sfmmu_kpm_mapout: bad refcnta kp=%p",
12934 				(void *)kp);
12935 		}
12936 
12937 		if (PP_ISTNC(pp))  {
12938 			if (PP_ISKPMC(pp) == 0) {
12939 				/*
12940 				 * Uncached kpm mappings must always have
12941 				 * forced "small page" mode.
12942 				 */
12943 				panic("sfmmu_kpm_mapout: uncached page not "
12944 					"kpm marked");
12945 			}
12946 			sfmmu_kpm_demap_small(vaddr);
12947 
12948 			pmtx = sfmmu_page_enter(pp);
12949 			PP_CLRKPMC(pp);
12950 			sfmmu_page_exit(pmtx);
12951 
12952 			/*
12953 			 * Check if we can resume cached mode. This might
12954 			 * be the case if the kpm mapping was the only
12955 			 * mapping in conflict with other non rule
12956 			 * compliant mappings. The page is no more marked
12957 			 * as kpm mapped, so the conv_tnc path will not
12958 			 * change kpm state.
12959 			 */
12960 			conv_tnc(pp, TTE8K);
12961 
12962 		} else if (PP_ISKPMC(pp) == 0) {
12963 			/* remove TSB entry only */
12964 			sfmmu_kpm_unload_tsb(vaddr, MMU_PAGESHIFT);
12965 
12966 		} else {
12967 			/* already demapped */
12968 			pmtx = sfmmu_page_enter(pp);
12969 			PP_CLRKPMC(pp);
12970 			sfmmu_page_exit(pmtx);
12971 		}
12972 		kp->kp_refcnta--;
12973 		goto exit;
12974 	}
12975 
12976 	if (kp->kp_refcntc <= 0 && kp->kp_refcnts == 0) {
12977 		/*
12978 		 * Fast path / regular case.
12979 		 */
12980 		ASSERT(kp->kp_refcntc >= -1);
12981 		ASSERT(!(pp->p_nrm & (P_KPMC | P_KPMS | P_TNC | P_PNC)));
12982 
12983 		if (kp->kp_refcnt <= 0)
12984 			panic("sfmmu_kpm_mapout: bad refcnt kp=%p", (void *)kp);
12985 
12986 		if (--kp->kp_refcnt == 0) {
12987 			/* remove go indication */
12988 			if (kp->kp_refcntc == -1) {
12989 				sfmmu_kpm_tsbmtl(&kp->kp_refcntc,
12990 					&kpmp->khl_lock, KPMTSBM_STOP);
12991 			}
12992 			ASSERT(kp->kp_refcntc == 0);
12993 
12994 			/* remove TSB entry */
12995 			sfmmu_kpm_unload_tsb(vaddr, MMU_PAGESHIFT4M);
12996 #ifdef	DEBUG
12997 			if (kpm_tlb_flush)
12998 				sfmmu_kpm_demap_tlbs(vaddr);
12999 #endif
13000 		}
13001 
13002 	} else {
13003 		/*
13004 		 * The VAC alias path.
13005 		 * We come here if the kpm vaddr is not in any alias_range
13006 		 * and we are unmapping a page within the regular kpm_page
13007 		 * range. The kpm_page either holds conflict pages and/or
13008 		 * is in "small page" mode. If the page is not marked
13009 		 * P_KPMS it couldn't have a valid PAGESIZE sized TSB
13010 		 * entry. Dcache flushing is done lazy and follows the
13011 		 * rules of the regular virtual page coloring scheme.
13012 		 *
13013 		 * Per page states and required actions:
13014 		 *   P_KPMC: remove a kpm mapping that is conflicting.
13015 		 *   P_KPMS: remove a small kpm mapping within a kpm_page.
13016 		 *   P_TNC:  check if we can re-cache the page.
13017 		 *   P_PNC:  we cannot re-cache, sorry.
13018 		 * Per kpm_page:
13019 		 *   kp_refcntc > 0: page is part of a kpm_page with conflicts.
13020 		 *   kp_refcnts > 0: rm a small mapped page within a kpm_page.
13021 		 */
13022 
13023 		if (PP_ISKPMS(pp)) {
13024 			if (kp->kp_refcnts < 1) {
13025 				panic("sfmmu_kpm_mapout: bad refcnts kp=%p",
13026 					(void *)kp);
13027 			}
13028 			sfmmu_kpm_demap_small(vaddr);
13029 
13030 			/*
13031 			 * Check if we can resume cached mode. This might
13032 			 * be the case if the kpm mapping was the only
13033 			 * mapping in conflict with other non rule
13034 			 * compliant mappings. The page is no more marked
13035 			 * as kpm mapped, so the conv_tnc path will not
13036 			 * change kpm state.
13037 			 */
13038 			if (PP_ISTNC(pp))  {
13039 				if (!PP_ISKPMC(pp)) {
13040 					/*
13041 					 * Uncached kpm mappings must always
13042 					 * have forced "small page" mode.
13043 					 */
13044 					panic("sfmmu_kpm_mapout: uncached "
13045 						"page not kpm marked");
13046 				}
13047 				conv_tnc(pp, TTE8K);
13048 			}
13049 			kp->kp_refcnts--;
13050 			kp->kp_refcnt++;
13051 			pmtx = sfmmu_page_enter(pp);
13052 			PP_CLRKPMS(pp);
13053 			sfmmu_page_exit(pmtx);
13054 		}
13055 
13056 		if (PP_ISKPMC(pp)) {
13057 			if (kp->kp_refcntc < 1) {
13058 				panic("sfmmu_kpm_mapout: bad refcntc kp=%p",
13059 					(void *)kp);
13060 			}
13061 			pmtx = sfmmu_page_enter(pp);
13062 			PP_CLRKPMC(pp);
13063 			sfmmu_page_exit(pmtx);
13064 			kp->kp_refcntc--;
13065 		}
13066 
13067 		if (kp->kp_refcnt-- < 1)
13068 			panic("sfmmu_kpm_mapout: bad refcnt kp=%p", (void *)kp);
13069 	}
13070 exit:
13071 	mutex_exit(&kpmp->khl_mutex);
13072 	return;
13073 
13074 smallpages_mapout:
13075 	PP2KPMSPG(pp, ksp);
13076 	kpmsp = KPMP_SHASH(ksp);
13077 
13078 	if (PP_ISKPMC(pp) == 0) {
13079 		oldval = sfmmu_kpm_stsbmtl(&ksp->kp_mapped,
13080 					&kpmsp->kshl_lock, 0);
13081 
13082 		if (oldval != KPM_MAPPEDS) {
13083 			/*
13084 			 * When we're called after sfmmu_kpm_hme_unload,
13085 			 * KPM_MAPPEDSC is valid too.
13086 			 */
13087 			if (oldval != KPM_MAPPEDSC)
13088 				panic("sfmmu_kpm_mapout: incorrect mapping");
13089 		}
13090 
13091 		/* remove TSB entry */
13092 		sfmmu_kpm_unload_tsb(vaddr, MMU_PAGESHIFT);
13093 #ifdef	DEBUG
13094 		if (kpm_tlb_flush)
13095 			sfmmu_kpm_demap_tlbs(vaddr);
13096 #endif
13097 
13098 	} else if (PP_ISTNC(pp)) {
13099 		oldval = sfmmu_kpm_stsbmtl(&ksp->kp_mapped,
13100 					&kpmsp->kshl_lock, 0);
13101 
13102 		if (oldval != KPM_MAPPEDSC || PP_ISKPMC(pp) == 0)
13103 			panic("sfmmu_kpm_mapout: inconsistent TNC mapping");
13104 
13105 		sfmmu_kpm_demap_small(vaddr);
13106 
13107 		pmtx = sfmmu_page_enter(pp);
13108 		PP_CLRKPMC(pp);
13109 		sfmmu_page_exit(pmtx);
13110 
13111 		/*
13112 		 * Check if we can resume cached mode. This might be
13113 		 * the case if the kpm mapping was the only mapping
13114 		 * in conflict with other non rule compliant mappings.
13115 		 * The page is no more marked as kpm mapped, so the
13116 		 * conv_tnc path will not change the kpm state.
13117 		 */
13118 		conv_tnc(pp, TTE8K);
13119 
13120 	} else {
13121 		oldval = sfmmu_kpm_stsbmtl(&ksp->kp_mapped,
13122 					&kpmsp->kshl_lock, 0);
13123 
13124 		if (oldval != KPM_MAPPEDSC)
13125 			panic("sfmmu_kpm_mapout: inconsistent mapping");
13126 
13127 		pmtx = sfmmu_page_enter(pp);
13128 		PP_CLRKPMC(pp);
13129 		sfmmu_page_exit(pmtx);
13130 	}
13131 }
13132 
13133 #define	abs(x)  ((x) < 0 ? -(x) : (x))
13134 
13135 /*
13136  * Determine appropriate kpm mapping address and handle any kpm/hme
13137  * conflicts. Page mapping list and its vcolor parts must be protected.
13138  */
13139 static caddr_t
13140 sfmmu_kpm_getvaddr(page_t *pp, int *kpm_vac_rangep)
13141 {
13142 	int		vcolor, vcolor_pa;
13143 	caddr_t		vaddr;
13144 	uintptr_t	paddr;
13145 
13146 
13147 	ASSERT(sfmmu_mlist_held(pp));
13148 
13149 	paddr = ptob(pp->p_pagenum);
13150 	vcolor_pa = addr_to_vcolor(paddr);
13151 
13152 	if (pp->p_vnode && IS_SWAPFSVP(pp->p_vnode)) {
13153 		vcolor = (PP_NEWPAGE(pp) || PP_ISNC(pp)) ?
13154 		    vcolor_pa : PP_GET_VCOLOR(pp);
13155 	} else {
13156 		vcolor = addr_to_vcolor(pp->p_offset);
13157 	}
13158 
13159 	vaddr = kpm_vbase + paddr;
13160 	*kpm_vac_rangep = 0;
13161 
13162 	if (vcolor_pa != vcolor) {
13163 		*kpm_vac_rangep = abs(vcolor - vcolor_pa);
13164 		vaddr += ((uintptr_t)(vcolor - vcolor_pa) << MMU_PAGESHIFT);
13165 		vaddr += (vcolor_pa > vcolor) ?
13166 			((uintptr_t)vcolor_pa << kpm_size_shift) :
13167 			((uintptr_t)(vcolor - vcolor_pa) << kpm_size_shift);
13168 
13169 		ASSERT(!PP_ISMAPPED_LARGE(pp));
13170 	}
13171 
13172 	if (PP_ISNC(pp))
13173 		return (vaddr);
13174 
13175 	if (PP_NEWPAGE(pp)) {
13176 		PP_SET_VCOLOR(pp, vcolor);
13177 		return (vaddr);
13178 	}
13179 
13180 	if (PP_GET_VCOLOR(pp) == vcolor)
13181 		return (vaddr);
13182 
13183 	ASSERT(!PP_ISMAPPED_KPM(pp));
13184 	sfmmu_kpm_vac_conflict(pp, vaddr);
13185 
13186 	return (vaddr);
13187 }
13188 
13189 /*
13190  * VAC conflict state bit values.
13191  * The following defines are used to make the handling of the
13192  * various input states more concise. For that the kpm states
13193  * per kpm_page and per page are combined in a summary state.
13194  * Each single state has a corresponding bit value in the
13195  * summary state. These defines only apply for kpm large page
13196  * mappings. Within comments the abbreviations "kc, c, ks, s"
13197  * are used as short form of the actual state, e.g. "kc" for
13198  * "kp_refcntc > 0", etc.
13199  */
13200 #define	KPM_KC	0x00000008	/* kpm_page: kp_refcntc > 0 */
13201 #define	KPM_C	0x00000004	/* page: P_KPMC set */
13202 #define	KPM_KS	0x00000002	/* kpm_page: kp_refcnts > 0 */
13203 #define	KPM_S	0x00000001	/* page: P_KPMS set */
13204 
13205 /*
13206  * Summary states used in sfmmu_kpm_fault (KPM_TSBM_*).
13207  * See also more detailed comments within in the sfmmu_kpm_fault switch.
13208  * Abbreviations used:
13209  * CONFL: VAC conflict(s) within a kpm_page.
13210  * MAPS:  Mapped small: Page mapped in using a regular page size kpm mapping.
13211  * RASM:  Re-assembling of a large page mapping possible.
13212  * RPLS:  Replace: TSB miss due to TSB replacement only.
13213  * BRKO:  Breakup Other: A large kpm mapping has to be broken because another
13214  *        page within the kpm_page is already involved in a VAC conflict.
13215  * BRKT:  Breakup This: A large kpm mapping has to be broken, this page is
13216  *        is involved in a VAC conflict.
13217  */
13218 #define	KPM_TSBM_CONFL_GONE	(0)
13219 #define	KPM_TSBM_MAPS_RASM	(KPM_KS)
13220 #define	KPM_TSBM_RPLS_RASM	(KPM_KS | KPM_S)
13221 #define	KPM_TSBM_MAPS_BRKO	(KPM_KC)
13222 #define	KPM_TSBM_MAPS		(KPM_KC | KPM_KS)
13223 #define	KPM_TSBM_RPLS		(KPM_KC | KPM_KS | KPM_S)
13224 #define	KPM_TSBM_MAPS_BRKT	(KPM_KC | KPM_C)
13225 #define	KPM_TSBM_MAPS_CONFL	(KPM_KC | KPM_C | KPM_KS)
13226 #define	KPM_TSBM_RPLS_CONFL	(KPM_KC | KPM_C | KPM_KS | KPM_S)
13227 
13228 /*
13229  * kpm fault handler for mappings with large page size.
13230  */
13231 int
13232 sfmmu_kpm_fault(caddr_t vaddr, struct memseg *mseg, page_t *pp)
13233 {
13234 	int		error;
13235 	pgcnt_t		inx;
13236 	kpm_page_t	*kp;
13237 	tte_t		tte;
13238 	pfn_t		pfn = pp->p_pagenum;
13239 	kpm_hlk_t	*kpmp;
13240 	kmutex_t	*pml;
13241 	int		alias_range;
13242 	int		uncached = 0;
13243 	kmutex_t	*pmtx;
13244 	int		badstate;
13245 	uint_t		tsbmcase;
13246 
13247 	alias_range = IS_KPM_ALIAS_RANGE(vaddr);
13248 
13249 	inx = ptokpmp(kpmptop(ptokpmp(pfn)) - mseg->kpm_pbase);
13250 	if (inx >= mseg->kpm_nkpmpgs) {
13251 		cmn_err(CE_PANIC, "sfmmu_kpm_fault: kpm overflow in memseg "
13252 			"0x%p  pp 0x%p", (void *)mseg, (void *)pp);
13253 	}
13254 
13255 	kp = &mseg->kpm_pages[inx];
13256 	kpmp = KPMP_HASH(kp);
13257 
13258 	pml = sfmmu_mlist_enter(pp);
13259 
13260 	if (!PP_ISMAPPED_KPM(pp)) {
13261 		sfmmu_mlist_exit(pml);
13262 		return (EFAULT);
13263 	}
13264 
13265 	mutex_enter(&kpmp->khl_mutex);
13266 
13267 	if (alias_range) {
13268 		ASSERT(!PP_ISMAPPED_LARGE(pp));
13269 		if (kp->kp_refcnta > 0) {
13270 			if (PP_ISKPMC(pp)) {
13271 				pmtx = sfmmu_page_enter(pp);
13272 				PP_CLRKPMC(pp);
13273 				sfmmu_page_exit(pmtx);
13274 			}
13275 			/*
13276 			 * Check for vcolor conflicts. Return here
13277 			 * w/ either no conflict (fast path), removed hme
13278 			 * mapping chains (unload conflict) or uncached
13279 			 * (uncache conflict). VACaches are cleaned and
13280 			 * p_vcolor and PP_TNC are set accordingly for the
13281 			 * conflict cases.  Drop kpmp for uncache conflict
13282 			 * cases since it will be grabbed within
13283 			 * sfmmu_kpm_page_cache in case of an uncache
13284 			 * conflict.
13285 			 */
13286 			mutex_exit(&kpmp->khl_mutex);
13287 			sfmmu_kpm_vac_conflict(pp, vaddr);
13288 			mutex_enter(&kpmp->khl_mutex);
13289 
13290 			if (PP_ISNC(pp)) {
13291 				uncached = 1;
13292 				pmtx = sfmmu_page_enter(pp);
13293 				PP_SETKPMC(pp);
13294 				sfmmu_page_exit(pmtx);
13295 			}
13296 			goto smallexit;
13297 
13298 		} else {
13299 			/*
13300 			 * We got a tsbmiss on a not active kpm_page range.
13301 			 * Let segkpm_fault decide how to panic.
13302 			 */
13303 			error = EFAULT;
13304 		}
13305 		goto exit;
13306 	}
13307 
13308 	badstate = (kp->kp_refcnt < 0 || kp->kp_refcnts < 0);
13309 	if (kp->kp_refcntc == -1) {
13310 		/*
13311 		 * We should come here only if trap level tsb miss
13312 		 * handler is disabled.
13313 		 */
13314 		badstate |= (kp->kp_refcnt == 0 || kp->kp_refcnts > 0 ||
13315 			PP_ISKPMC(pp) || PP_ISKPMS(pp) || PP_ISNC(pp));
13316 
13317 		if (badstate == 0)
13318 			goto largeexit;
13319 	}
13320 
13321 	if (badstate || kp->kp_refcntc < 0)
13322 		goto badstate_exit;
13323 
13324 	/*
13325 	 * Combine the per kpm_page and per page kpm VAC states to
13326 	 * a summary state in order to make the kpm fault handling
13327 	 * more concise.
13328 	 */
13329 	tsbmcase = (((kp->kp_refcntc > 0) ? KPM_KC : 0) |
13330 			((kp->kp_refcnts > 0) ? KPM_KS : 0) |
13331 			(PP_ISKPMC(pp) ? KPM_C : 0) |
13332 			(PP_ISKPMS(pp) ? KPM_S : 0));
13333 
13334 	switch (tsbmcase) {
13335 	case KPM_TSBM_CONFL_GONE:		/* - - - - */
13336 		/*
13337 		 * That's fine, we either have no more vac conflict in
13338 		 * this kpm page or someone raced in and has solved the
13339 		 * vac conflict for us -- call sfmmu_kpm_vac_conflict
13340 		 * to take care for correcting the vcolor and flushing
13341 		 * the dcache if required.
13342 		 */
13343 		mutex_exit(&kpmp->khl_mutex);
13344 		sfmmu_kpm_vac_conflict(pp, vaddr);
13345 		mutex_enter(&kpmp->khl_mutex);
13346 
13347 		if (PP_ISNC(pp) || kp->kp_refcnt <= 0 ||
13348 		    addr_to_vcolor(vaddr) != PP_GET_VCOLOR(pp)) {
13349 			panic("sfmmu_kpm_fault: inconsistent CONFL_GONE "
13350 				"state, pp=%p", (void *)pp);
13351 		}
13352 		goto largeexit;
13353 
13354 	case KPM_TSBM_MAPS_RASM:		/* - - ks - */
13355 		/*
13356 		 * All conflicts in this kpm page are gone but there are
13357 		 * already small mappings around, so we also map this
13358 		 * page small. This could be the trigger case for a
13359 		 * small mapping reaper, if this is really needed.
13360 		 * For now fall thru to the KPM_TSBM_MAPS handling.
13361 		 */
13362 
13363 	case KPM_TSBM_MAPS:			/* kc - ks - */
13364 		/*
13365 		 * Large page mapping is already broken, this page is not
13366 		 * conflicting, so map it small. Call sfmmu_kpm_vac_conflict
13367 		 * to take care for correcting the vcolor and flushing
13368 		 * the dcache if required.
13369 		 */
13370 		mutex_exit(&kpmp->khl_mutex);
13371 		sfmmu_kpm_vac_conflict(pp, vaddr);
13372 		mutex_enter(&kpmp->khl_mutex);
13373 
13374 		if (PP_ISNC(pp) || kp->kp_refcnt <= 0 ||
13375 		    addr_to_vcolor(vaddr) != PP_GET_VCOLOR(pp)) {
13376 			panic("sfmmu_kpm_fault:  inconsistent MAPS state, "
13377 				"pp=%p", (void *)pp);
13378 		}
13379 		kp->kp_refcnt--;
13380 		kp->kp_refcnts++;
13381 		pmtx = sfmmu_page_enter(pp);
13382 		PP_SETKPMS(pp);
13383 		sfmmu_page_exit(pmtx);
13384 		goto smallexit;
13385 
13386 	case KPM_TSBM_RPLS_RASM:		/* - - ks s */
13387 		/*
13388 		 * All conflicts in this kpm page are gone but this page
13389 		 * is mapped small. This could be the trigger case for a
13390 		 * small mapping reaper, if this is really needed.
13391 		 * For now we drop it in small again. Fall thru to the
13392 		 * KPM_TSBM_RPLS handling.
13393 		 */
13394 
13395 	case KPM_TSBM_RPLS:			/* kc - ks s */
13396 		/*
13397 		 * Large page mapping is already broken, this page is not
13398 		 * conflicting but already mapped small, so drop it in
13399 		 * small again.
13400 		 */
13401 		if (PP_ISNC(pp) ||
13402 		    addr_to_vcolor(vaddr) != PP_GET_VCOLOR(pp)) {
13403 			panic("sfmmu_kpm_fault:  inconsistent RPLS state, "
13404 				"pp=%p", (void *)pp);
13405 		}
13406 		goto smallexit;
13407 
13408 	case KPM_TSBM_MAPS_BRKO:		/* kc - - - */
13409 		/*
13410 		 * The kpm page where we live in is marked conflicting
13411 		 * but this page is not conflicting. So we have to map it
13412 		 * in small. Call sfmmu_kpm_vac_conflict to take care for
13413 		 * correcting the vcolor and flushing the dcache if required.
13414 		 */
13415 		mutex_exit(&kpmp->khl_mutex);
13416 		sfmmu_kpm_vac_conflict(pp, vaddr);
13417 		mutex_enter(&kpmp->khl_mutex);
13418 
13419 		if (PP_ISNC(pp) || kp->kp_refcnt <= 0 ||
13420 		    addr_to_vcolor(vaddr) != PP_GET_VCOLOR(pp)) {
13421 			panic("sfmmu_kpm_fault:  inconsistent MAPS_BRKO state, "
13422 				"pp=%p", (void *)pp);
13423 		}
13424 		kp->kp_refcnt--;
13425 		kp->kp_refcnts++;
13426 		pmtx = sfmmu_page_enter(pp);
13427 		PP_SETKPMS(pp);
13428 		sfmmu_page_exit(pmtx);
13429 		goto smallexit;
13430 
13431 	case KPM_TSBM_MAPS_BRKT:		/* kc c - - */
13432 	case KPM_TSBM_MAPS_CONFL:		/* kc c ks - */
13433 		if (!PP_ISMAPPED(pp)) {
13434 			/*
13435 			 * We got a tsbmiss on kpm large page range that is
13436 			 * marked to contain vac conflicting pages introduced
13437 			 * by hme mappings. The hme mappings are all gone and
13438 			 * must have bypassed the kpm alias prevention logic.
13439 			 */
13440 			panic("sfmmu_kpm_fault: stale VAC conflict, pp=%p",
13441 				(void *)pp);
13442 		}
13443 
13444 		/*
13445 		 * Check for vcolor conflicts. Return here w/ either no
13446 		 * conflict (fast path), removed hme mapping chains
13447 		 * (unload conflict) or uncached (uncache conflict).
13448 		 * Dcache is cleaned and p_vcolor and P_TNC are set
13449 		 * accordingly. Drop kpmp for uncache conflict cases
13450 		 * since it will be grabbed within sfmmu_kpm_page_cache
13451 		 * in case of an uncache conflict.
13452 		 */
13453 		mutex_exit(&kpmp->khl_mutex);
13454 		sfmmu_kpm_vac_conflict(pp, vaddr);
13455 		mutex_enter(&kpmp->khl_mutex);
13456 
13457 		if (kp->kp_refcnt <= 0)
13458 			panic("sfmmu_kpm_fault: bad refcnt kp=%p", (void *)kp);
13459 
13460 		if (PP_ISNC(pp)) {
13461 			uncached = 1;
13462 		} else {
13463 			/*
13464 			 * When an unload conflict is solved and there are
13465 			 * no other small mappings around, we can resume
13466 			 * largepage mode. Otherwise we have to map or drop
13467 			 * in small. This could be a trigger for a small
13468 			 * mapping reaper when this was the last conflict
13469 			 * within the kpm page and when there are only
13470 			 * other small mappings around.
13471 			 */
13472 			ASSERT(addr_to_vcolor(vaddr) == PP_GET_VCOLOR(pp));
13473 			ASSERT(kp->kp_refcntc > 0);
13474 			kp->kp_refcntc--;
13475 			pmtx = sfmmu_page_enter(pp);
13476 			PP_CLRKPMC(pp);
13477 			sfmmu_page_exit(pmtx);
13478 			ASSERT(PP_ISKPMS(pp) == 0);
13479 			if (kp->kp_refcntc == 0 && kp->kp_refcnts == 0)
13480 				goto largeexit;
13481 		}
13482 
13483 		kp->kp_refcnt--;
13484 		kp->kp_refcnts++;
13485 		pmtx = sfmmu_page_enter(pp);
13486 		PP_SETKPMS(pp);
13487 		sfmmu_page_exit(pmtx);
13488 		goto smallexit;
13489 
13490 	case KPM_TSBM_RPLS_CONFL:		/* kc c ks s */
13491 		if (!PP_ISMAPPED(pp)) {
13492 			/*
13493 			 * We got a tsbmiss on kpm large page range that is
13494 			 * marked to contain vac conflicting pages introduced
13495 			 * by hme mappings. They are all gone and must have
13496 			 * somehow bypassed the kpm alias prevention logic.
13497 			 */
13498 			panic("sfmmu_kpm_fault: stale VAC conflict, pp=%p",
13499 				(void *)pp);
13500 		}
13501 
13502 		/*
13503 		 * This state is only possible for an uncached mapping.
13504 		 */
13505 		if (!PP_ISNC(pp)) {
13506 			panic("sfmmu_kpm_fault: page not uncached, pp=%p",
13507 				(void *)pp);
13508 		}
13509 		uncached = 1;
13510 		goto smallexit;
13511 
13512 	default:
13513 badstate_exit:
13514 		panic("sfmmu_kpm_fault: inconsistent VAC state, vaddr=%p kp=%p "
13515 			"pp=%p", (void *)vaddr, (void *)kp, (void *)pp);
13516 	}
13517 
13518 smallexit:
13519 	/* tte assembly */
13520 	if (uncached == 0)
13521 		KPM_TTE_VCACHED(tte.ll, pfn, TTE8K);
13522 	else
13523 		KPM_TTE_VUNCACHED(tte.ll, pfn, TTE8K);
13524 
13525 	/* tsb dropin */
13526 	sfmmu_kpm_load_tsb(vaddr, &tte, MMU_PAGESHIFT);
13527 
13528 	error = 0;
13529 	goto exit;
13530 
13531 largeexit:
13532 	if (kp->kp_refcnt > 0) {
13533 
13534 		/* tte assembly */
13535 		KPM_TTE_VCACHED(tte.ll, pfn, TTE4M);
13536 
13537 		/* tsb dropin */
13538 		sfmmu_kpm_load_tsb(vaddr, &tte, MMU_PAGESHIFT4M);
13539 
13540 		if (kp->kp_refcntc == 0) {
13541 			/* Set "go" flag for TL tsbmiss handler */
13542 			sfmmu_kpm_tsbmtl(&kp->kp_refcntc, &kpmp->khl_lock,
13543 					KPMTSBM_START);
13544 		}
13545 		ASSERT(kp->kp_refcntc == -1);
13546 		error = 0;
13547 
13548 	} else
13549 		error = EFAULT;
13550 exit:
13551 	mutex_exit(&kpmp->khl_mutex);
13552 	sfmmu_mlist_exit(pml);
13553 	return (error);
13554 }
13555 
13556 /*
13557  * kpm fault handler for mappings with small page size.
13558  */
13559 int
13560 sfmmu_kpm_fault_small(caddr_t vaddr, struct memseg *mseg, page_t *pp)
13561 {
13562 	int		error = 0;
13563 	pgcnt_t		inx;
13564 	kpm_spage_t	*ksp;
13565 	kpm_shlk_t	*kpmsp;
13566 	kmutex_t	*pml;
13567 	pfn_t		pfn = pp->p_pagenum;
13568 	tte_t		tte;
13569 	kmutex_t	*pmtx;
13570 	int		oldval;
13571 
13572 	inx = pfn - mseg->kpm_pbase;
13573 	ksp = &mseg->kpm_spages[inx];
13574 	kpmsp = KPMP_SHASH(ksp);
13575 
13576 	pml = sfmmu_mlist_enter(pp);
13577 
13578 	if (!PP_ISMAPPED_KPM(pp)) {
13579 		sfmmu_mlist_exit(pml);
13580 		return (EFAULT);
13581 	}
13582 
13583 	/*
13584 	 * kp_mapped lookup protected by mlist mutex
13585 	 */
13586 	if (ksp->kp_mapped == KPM_MAPPEDS) {
13587 		/*
13588 		 * Fast path tsbmiss
13589 		 */
13590 		ASSERT(!PP_ISKPMC(pp));
13591 		ASSERT(!PP_ISNC(pp));
13592 
13593 		/* tte assembly */
13594 		KPM_TTE_VCACHED(tte.ll, pfn, TTE8K);
13595 
13596 		/* tsb dropin */
13597 		sfmmu_kpm_load_tsb(vaddr, &tte, MMU_PAGESHIFT);
13598 
13599 	} else if (ksp->kp_mapped == KPM_MAPPEDSC) {
13600 		/*
13601 		 * Got here due to existing or gone kpm/hme VAC conflict.
13602 		 * Recheck for vcolor conflicts. Return here w/ either
13603 		 * no conflict, removed hme mapping chain (unload
13604 		 * conflict) or uncached (uncache conflict). VACaches
13605 		 * are cleaned and p_vcolor and PP_TNC are set accordingly
13606 		 * for the conflict cases.
13607 		 */
13608 		sfmmu_kpm_vac_conflict(pp, vaddr);
13609 
13610 		if (PP_ISNC(pp)) {
13611 			/* ASSERT(pp->p_share); XXX use hat_page_getshare */
13612 
13613 			/* tte assembly */
13614 			KPM_TTE_VUNCACHED(tte.ll, pfn, TTE8K);
13615 
13616 			/* tsb dropin */
13617 			sfmmu_kpm_load_tsb(vaddr, &tte, MMU_PAGESHIFT);
13618 
13619 		} else {
13620 			if (PP_ISKPMC(pp)) {
13621 				pmtx = sfmmu_page_enter(pp);
13622 				PP_CLRKPMC(pp);
13623 				sfmmu_page_exit(pmtx);
13624 			}
13625 
13626 			/* tte assembly */
13627 			KPM_TTE_VCACHED(tte.ll, pfn, TTE8K);
13628 
13629 			/* tsb dropin */
13630 			sfmmu_kpm_load_tsb(vaddr, &tte, MMU_PAGESHIFT);
13631 
13632 			oldval = sfmmu_kpm_stsbmtl(&ksp->kp_mapped,
13633 					&kpmsp->kshl_lock, KPM_MAPPEDS);
13634 
13635 			if (oldval != KPM_MAPPEDSC)
13636 				panic("sfmmu_kpm_fault_small: "
13637 					"stale smallpages mapping");
13638 		}
13639 
13640 	} else {
13641 		/*
13642 		 * We got a tsbmiss on a not active kpm_page range.
13643 		 * Let decide segkpm_fault how to panic.
13644 		 */
13645 		error = EFAULT;
13646 	}
13647 
13648 	sfmmu_mlist_exit(pml);
13649 	return (error);
13650 }
13651 
13652 /*
13653  * Check/handle potential hme/kpm mapping conflicts
13654  */
13655 static void
13656 sfmmu_kpm_vac_conflict(page_t *pp, caddr_t vaddr)
13657 {
13658 	int		vcolor;
13659 	struct sf_hment	*sfhmep;
13660 	struct hat	*tmphat;
13661 	struct sf_hment	*tmphme = NULL;
13662 	struct hme_blk	*hmeblkp;
13663 	tte_t		tte;
13664 
13665 	ASSERT(sfmmu_mlist_held(pp));
13666 
13667 	if (PP_ISNC(pp))
13668 		return;
13669 
13670 	vcolor = addr_to_vcolor(vaddr);
13671 	if (PP_GET_VCOLOR(pp) == vcolor)
13672 		return;
13673 
13674 	/*
13675 	 * There could be no vcolor conflict between a large cached
13676 	 * hme page and a non alias range kpm page (neither large nor
13677 	 * small mapped). So if a hme conflict already exists between
13678 	 * a constituent page of a large hme mapping and a shared small
13679 	 * conflicting hme mapping, both mappings must be already
13680 	 * uncached at this point.
13681 	 */
13682 	ASSERT(!PP_ISMAPPED_LARGE(pp));
13683 
13684 	if (!PP_ISMAPPED(pp)) {
13685 		/*
13686 		 * Previous hme user of page had a different color
13687 		 * but since there are no current users
13688 		 * we just flush the cache and change the color.
13689 		 */
13690 		SFMMU_STAT(sf_pgcolor_conflict);
13691 		sfmmu_cache_flush(pp->p_pagenum, PP_GET_VCOLOR(pp));
13692 		PP_SET_VCOLOR(pp, vcolor);
13693 		return;
13694 	}
13695 
13696 	/*
13697 	 * If we get here we have a vac conflict with a current hme
13698 	 * mapping. This must have been established by forcing a wrong
13699 	 * colored mapping, e.g. by using mmap(2) with MAP_FIXED.
13700 	 */
13701 
13702 	/*
13703 	 * Check if any mapping is in same as or if it is locked
13704 	 * since in that case we need to uncache.
13705 	 */
13706 	for (sfhmep = pp->p_mapping; sfhmep; sfhmep = tmphme) {
13707 		tmphme = sfhmep->hme_next;
13708 		hmeblkp = sfmmu_hmetohblk(sfhmep);
13709 		if (hmeblkp->hblk_xhat_bit)
13710 			continue;
13711 		tmphat = hblktosfmmu(hmeblkp);
13712 		sfmmu_copytte(&sfhmep->hme_tte, &tte);
13713 		ASSERT(TTE_IS_VALID(&tte));
13714 		if ((tmphat == ksfmmup) || hmeblkp->hblk_lckcnt) {
13715 			/*
13716 			 * We have an uncache conflict
13717 			 */
13718 			SFMMU_STAT(sf_uncache_conflict);
13719 			sfmmu_page_cache_array(pp, HAT_TMPNC, CACHE_FLUSH, 1);
13720 			return;
13721 		}
13722 	}
13723 
13724 	/*
13725 	 * We have an unload conflict
13726 	 */
13727 	SFMMU_STAT(sf_unload_conflict);
13728 
13729 	for (sfhmep = pp->p_mapping; sfhmep; sfhmep = tmphme) {
13730 		tmphme = sfhmep->hme_next;
13731 		hmeblkp = sfmmu_hmetohblk(sfhmep);
13732 		if (hmeblkp->hblk_xhat_bit)
13733 			continue;
13734 		(void) sfmmu_pageunload(pp, sfhmep, TTE8K);
13735 	}
13736 
13737 	/*
13738 	 * Unloads only does tlb flushes so we need to flush the
13739 	 * dcache vcolor here.
13740 	 */
13741 	sfmmu_cache_flush(pp->p_pagenum, PP_GET_VCOLOR(pp));
13742 	PP_SET_VCOLOR(pp, vcolor);
13743 }
13744 
13745 /*
13746  * Remove all kpm mappings using kpme's for pp and check that
13747  * all kpm mappings (w/ and w/o kpme's) are gone.
13748  */
13749 static void
13750 sfmmu_kpm_pageunload(page_t *pp)
13751 {
13752 	caddr_t		vaddr;
13753 	struct kpme	*kpme, *nkpme;
13754 
13755 	ASSERT(pp != NULL);
13756 	ASSERT(pp->p_kpmref);
13757 	ASSERT(sfmmu_mlist_held(pp));
13758 
13759 	vaddr = hat_kpm_page2va(pp, 1);
13760 
13761 	for (kpme = pp->p_kpmelist; kpme; kpme = nkpme) {
13762 		ASSERT(kpme->kpe_page == pp);
13763 
13764 		if (pp->p_kpmref == 0)
13765 			panic("sfmmu_kpm_pageunload: stale p_kpmref pp=%p "
13766 				"kpme=%p", (void *)pp, (void *)kpme);
13767 
13768 		nkpme = kpme->kpe_next;
13769 
13770 		/* Add instance callback here here if needed later */
13771 		sfmmu_kpme_sub(kpme, pp);
13772 	}
13773 
13774 	/*
13775 	 * Also correct after mixed kpme/nonkpme mappings. If nonkpme
13776 	 * segkpm clients have unlocked the page and forgot to mapout
13777 	 * we panic here.
13778 	 */
13779 	if (pp->p_kpmref != 0)
13780 		panic("sfmmu_kpm_pageunload: bad refcnt pp=%p", (void *)pp);
13781 
13782 	sfmmu_kpm_mapout(pp, vaddr);
13783 }
13784 
13785 /*
13786  * Remove a large kpm mapping from kernel TSB and all TLB's.
13787  */
13788 static void
13789 sfmmu_kpm_demap_large(caddr_t vaddr)
13790 {
13791 	sfmmu_kpm_unload_tsb(vaddr, MMU_PAGESHIFT4M);
13792 	sfmmu_kpm_demap_tlbs(vaddr);
13793 }
13794 
13795 /*
13796  * Remove a small kpm mapping from kernel TSB and all TLB's.
13797  */
13798 static void
13799 sfmmu_kpm_demap_small(caddr_t vaddr)
13800 {
13801 	sfmmu_kpm_unload_tsb(vaddr, MMU_PAGESHIFT);
13802 	sfmmu_kpm_demap_tlbs(vaddr);
13803 }
13804 
13805 /*
13806  * Demap a kpm mapping in all TLB's.
13807  */
13808 static void
13809 sfmmu_kpm_demap_tlbs(caddr_t vaddr)
13810 {
13811 	cpuset_t cpuset;
13812 
13813 	kpreempt_disable();
13814 	cpuset = ksfmmup->sfmmu_cpusran;
13815 	CPUSET_AND(cpuset, cpu_ready_set);
13816 	CPUSET_DEL(cpuset, CPU->cpu_id);
13817 	SFMMU_XCALL_STATS(ksfmmup);
13818 
13819 	xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)vaddr,
13820 	    (uint64_t)ksfmmup);
13821 	vtag_flushpage(vaddr, (uint64_t)ksfmmup);
13822 
13823 	kpreempt_enable();
13824 }
13825 
13826 /*
13827  * Summary states used in sfmmu_kpm_vac_unload (KPM_VUL__*).
13828  * See also more detailed comments within in the sfmmu_kpm_vac_unload switch.
13829  * Abbreviations used:
13830  * BIG:   Large page kpm mapping in use.
13831  * CONFL: VAC conflict(s) within a kpm_page.
13832  * INCR:  Count of conflicts within a kpm_page is going to be incremented.
13833  * DECR:  Count of conflicts within a kpm_page is going to be decremented.
13834  * UNMAP_SMALL: A small (regular page size) mapping is going to be unmapped.
13835  * TNC:   Temporary non cached: a kpm mapped page is mapped in TNC state.
13836  */
13837 #define	KPM_VUL_BIG		(0)
13838 #define	KPM_VUL_CONFL_INCR1	(KPM_KS)
13839 #define	KPM_VUL_UNMAP_SMALL1	(KPM_KS | KPM_S)
13840 #define	KPM_VUL_CONFL_INCR2	(KPM_KC)
13841 #define	KPM_VUL_CONFL_INCR3	(KPM_KC | KPM_KS)
13842 #define	KPM_VUL_UNMAP_SMALL2	(KPM_KC | KPM_KS | KPM_S)
13843 #define	KPM_VUL_CONFL_DECR1	(KPM_KC | KPM_C)
13844 #define	KPM_VUL_CONFL_DECR2	(KPM_KC | KPM_C | KPM_KS)
13845 #define	KPM_VUL_TNC		(KPM_KC | KPM_C | KPM_KS | KPM_S)
13846 
13847 /*
13848  * Handle VAC unload conflicts introduced by hme mappings or vice
13849  * versa when a hme conflict mapping is replaced by a non conflict
13850  * one. Perform actions and state transitions according to the
13851  * various page and kpm_page entry states. VACache flushes are in
13852  * the responsibiliy of the caller. We still hold the mlist lock.
13853  */
13854 static void
13855 sfmmu_kpm_vac_unload(page_t *pp, caddr_t vaddr)
13856 {
13857 	kpm_page_t	*kp;
13858 	kpm_hlk_t	*kpmp;
13859 	caddr_t		kpmvaddr = hat_kpm_page2va(pp, 1);
13860 	int		newcolor;
13861 	kmutex_t	*pmtx;
13862 	uint_t		vacunlcase;
13863 	int		badstate = 0;
13864 	kpm_spage_t	*ksp;
13865 	kpm_shlk_t	*kpmsp;
13866 
13867 	ASSERT(PAGE_LOCKED(pp));
13868 	ASSERT(sfmmu_mlist_held(pp));
13869 	ASSERT(!PP_ISNC(pp));
13870 
13871 	newcolor = addr_to_vcolor(kpmvaddr) != addr_to_vcolor(vaddr);
13872 	if (kpm_smallpages)
13873 		goto smallpages_vac_unload;
13874 
13875 	PP2KPMPG(pp, kp);
13876 	kpmp = KPMP_HASH(kp);
13877 	mutex_enter(&kpmp->khl_mutex);
13878 
13879 	if (IS_KPM_ALIAS_RANGE(kpmvaddr)) {
13880 		if (kp->kp_refcnta < 1) {
13881 			panic("sfmmu_kpm_vac_unload: bad refcnta kpm_page=%p\n",
13882 				(void *)kp);
13883 		}
13884 
13885 		if (PP_ISKPMC(pp) == 0) {
13886 			if (newcolor == 0)
13887 				goto exit;
13888 			sfmmu_kpm_demap_small(kpmvaddr);
13889 			pmtx = sfmmu_page_enter(pp);
13890 			PP_SETKPMC(pp);
13891 			sfmmu_page_exit(pmtx);
13892 
13893 		} else if (newcolor == 0) {
13894 			pmtx = sfmmu_page_enter(pp);
13895 			PP_CLRKPMC(pp);
13896 			sfmmu_page_exit(pmtx);
13897 
13898 		} else {
13899 			badstate++;
13900 		}
13901 
13902 		goto exit;
13903 	}
13904 
13905 	badstate = (kp->kp_refcnt < 0 || kp->kp_refcnts < 0);
13906 	if (kp->kp_refcntc == -1) {
13907 		/*
13908 		 * We should come here only if trap level tsb miss
13909 		 * handler is disabled.
13910 		 */
13911 		badstate |= (kp->kp_refcnt == 0 || kp->kp_refcnts > 0 ||
13912 			PP_ISKPMC(pp) || PP_ISKPMS(pp) || PP_ISNC(pp));
13913 	} else {
13914 		badstate |= (kp->kp_refcntc < 0);
13915 	}
13916 
13917 	if (badstate)
13918 		goto exit;
13919 
13920 	if (PP_ISKPMC(pp) == 0 && newcolor == 0) {
13921 		ASSERT(PP_ISKPMS(pp) == 0);
13922 		goto exit;
13923 	}
13924 
13925 	/*
13926 	 * Combine the per kpm_page and per page kpm VAC states
13927 	 * to a summary state in order to make the vac unload
13928 	 * handling more concise.
13929 	 */
13930 	vacunlcase = (((kp->kp_refcntc > 0) ? KPM_KC : 0) |
13931 			((kp->kp_refcnts > 0) ? KPM_KS : 0) |
13932 			(PP_ISKPMC(pp) ? KPM_C : 0) |
13933 			(PP_ISKPMS(pp) ? KPM_S : 0));
13934 
13935 	switch (vacunlcase) {
13936 	case KPM_VUL_BIG:				/* - - - - */
13937 		/*
13938 		 * Have to breakup the large page mapping to be
13939 		 * able to handle the conflicting hme vaddr.
13940 		 */
13941 		if (kp->kp_refcntc == -1) {
13942 			/* remove go indication */
13943 			sfmmu_kpm_tsbmtl(&kp->kp_refcntc,
13944 					&kpmp->khl_lock, KPMTSBM_STOP);
13945 		}
13946 		sfmmu_kpm_demap_large(kpmvaddr);
13947 
13948 		ASSERT(kp->kp_refcntc == 0);
13949 		kp->kp_refcntc++;
13950 		pmtx = sfmmu_page_enter(pp);
13951 		PP_SETKPMC(pp);
13952 		sfmmu_page_exit(pmtx);
13953 		break;
13954 
13955 	case KPM_VUL_UNMAP_SMALL1:			/* -  - ks s */
13956 	case KPM_VUL_UNMAP_SMALL2:			/* kc - ks s */
13957 		/*
13958 		 * New conflict w/ an active kpm page, actually mapped
13959 		 * in by small TSB/TLB entries. Remove the mapping and
13960 		 * update states.
13961 		 */
13962 		ASSERT(newcolor);
13963 		sfmmu_kpm_demap_small(kpmvaddr);
13964 		kp->kp_refcnts--;
13965 		kp->kp_refcnt++;
13966 		kp->kp_refcntc++;
13967 		pmtx = sfmmu_page_enter(pp);
13968 		PP_CLRKPMS(pp);
13969 		PP_SETKPMC(pp);
13970 		sfmmu_page_exit(pmtx);
13971 		break;
13972 
13973 	case KPM_VUL_CONFL_INCR1:			/* -  - ks - */
13974 	case KPM_VUL_CONFL_INCR2:			/* kc - -  - */
13975 	case KPM_VUL_CONFL_INCR3:			/* kc - ks - */
13976 		/*
13977 		 * New conflict on a active kpm mapped page not yet in
13978 		 * TSB/TLB. Mark page and increment the kpm_page conflict
13979 		 * count.
13980 		 */
13981 		ASSERT(newcolor);
13982 		kp->kp_refcntc++;
13983 		pmtx = sfmmu_page_enter(pp);
13984 		PP_SETKPMC(pp);
13985 		sfmmu_page_exit(pmtx);
13986 		break;
13987 
13988 	case KPM_VUL_CONFL_DECR1:			/* kc c -  - */
13989 	case KPM_VUL_CONFL_DECR2:			/* kc c ks - */
13990 		/*
13991 		 * A conflicting hme mapping is removed for an active
13992 		 * kpm page not yet in TSB/TLB. Unmark page and decrement
13993 		 * the kpm_page conflict count.
13994 		 */
13995 		ASSERT(newcolor == 0);
13996 		kp->kp_refcntc--;
13997 		pmtx = sfmmu_page_enter(pp);
13998 		PP_CLRKPMC(pp);
13999 		sfmmu_page_exit(pmtx);
14000 		break;
14001 
14002 	case KPM_VUL_TNC:				/* kc c ks s */
14003 		cmn_err(CE_NOTE, "sfmmu_kpm_vac_unload: "
14004 			"page not in NC state");
14005 		/* FALLTHRU */
14006 
14007 	default:
14008 		badstate++;
14009 	}
14010 exit:
14011 	if (badstate) {
14012 		panic("sfmmu_kpm_vac_unload: inconsistent VAC state, "
14013 			"kpmvaddr=%p kp=%p pp=%p",
14014 			(void *)kpmvaddr, (void *)kp, (void *)pp);
14015 	}
14016 	mutex_exit(&kpmp->khl_mutex);
14017 
14018 	return;
14019 
14020 smallpages_vac_unload:
14021 	if (newcolor == 0)
14022 		return;
14023 
14024 	PP2KPMSPG(pp, ksp);
14025 	kpmsp = KPMP_SHASH(ksp);
14026 
14027 	if (PP_ISKPMC(pp) == 0) {
14028 		if (ksp->kp_mapped == KPM_MAPPEDS) {
14029 			/*
14030 			 * Stop TL tsbmiss handling
14031 			 */
14032 			(void) sfmmu_kpm_stsbmtl(&ksp->kp_mapped,
14033 					&kpmsp->kshl_lock, KPM_MAPPEDSC);
14034 
14035 			sfmmu_kpm_demap_small(kpmvaddr);
14036 
14037 		} else if (ksp->kp_mapped != KPM_MAPPEDSC) {
14038 			panic("sfmmu_kpm_vac_unload: inconsistent mapping");
14039 		}
14040 
14041 		pmtx = sfmmu_page_enter(pp);
14042 		PP_SETKPMC(pp);
14043 		sfmmu_page_exit(pmtx);
14044 
14045 	} else {
14046 		if (ksp->kp_mapped != KPM_MAPPEDSC)
14047 			panic("sfmmu_kpm_vac_unload: inconsistent mapping");
14048 	}
14049 }
14050 
14051 /*
14052  * Page is marked to be in VAC conflict to an existing kpm mapping
14053  * or is kpm mapped using only the regular pagesize. Called from
14054  * sfmmu_hblk_unload when a mlist is completely removed.
14055  */
14056 static void
14057 sfmmu_kpm_hme_unload(page_t *pp)
14058 {
14059 	/* tte assembly */
14060 	kpm_page_t	*kp;
14061 	kpm_hlk_t	*kpmp;
14062 	caddr_t		vaddr;
14063 	kmutex_t	*pmtx;
14064 	uint_t		flags;
14065 	kpm_spage_t	*ksp;
14066 
14067 	ASSERT(sfmmu_mlist_held(pp));
14068 	ASSERT(PP_ISMAPPED_KPM(pp));
14069 
14070 	flags = pp->p_nrm & (P_KPMC | P_KPMS);
14071 	if (kpm_smallpages)
14072 		goto smallpages_hme_unload;
14073 
14074 	if (flags == (P_KPMC | P_KPMS)) {
14075 		panic("sfmmu_kpm_hme_unload: page should be uncached");
14076 
14077 	} else if (flags == P_KPMS) {
14078 		/*
14079 		 * Page mapped small but not involved in VAC conflict
14080 		 */
14081 		return;
14082 	}
14083 
14084 	vaddr = hat_kpm_page2va(pp, 1);
14085 
14086 	PP2KPMPG(pp, kp);
14087 	kpmp = KPMP_HASH(kp);
14088 	mutex_enter(&kpmp->khl_mutex);
14089 
14090 	if (IS_KPM_ALIAS_RANGE(vaddr)) {
14091 		if (kp->kp_refcnta < 1) {
14092 			panic("sfmmu_kpm_hme_unload: bad refcnta kpm_page=%p\n",
14093 				(void *)kp);
14094 		}
14095 
14096 	} else {
14097 		if (kp->kp_refcntc < 1) {
14098 			panic("sfmmu_kpm_hme_unload: bad refcntc kpm_page=%p\n",
14099 				(void *)kp);
14100 		}
14101 		kp->kp_refcntc--;
14102 	}
14103 
14104 	pmtx = sfmmu_page_enter(pp);
14105 	PP_CLRKPMC(pp);
14106 	sfmmu_page_exit(pmtx);
14107 
14108 	mutex_exit(&kpmp->khl_mutex);
14109 	return;
14110 
14111 smallpages_hme_unload:
14112 	if (flags != P_KPMC)
14113 		panic("sfmmu_kpm_hme_unload: page should be uncached");
14114 
14115 	vaddr = hat_kpm_page2va(pp, 1);
14116 	PP2KPMSPG(pp, ksp);
14117 
14118 	if (ksp->kp_mapped != KPM_MAPPEDSC)
14119 		panic("sfmmu_kpm_hme_unload: inconsistent mapping");
14120 
14121 	/*
14122 	 * Keep KPM_MAPPEDSC until the next kpm tsbmiss where it
14123 	 * prevents TL tsbmiss handling and force a hat_kpm_fault.
14124 	 * There we can start over again.
14125 	 */
14126 
14127 	pmtx = sfmmu_page_enter(pp);
14128 	PP_CLRKPMC(pp);
14129 	sfmmu_page_exit(pmtx);
14130 }
14131 
14132 /*
14133  * Special hooks for sfmmu_page_cache_array() when changing the
14134  * cacheability of a page. It is used to obey the hat_kpm lock
14135  * ordering (mlist -> kpmp -> spl, and back).
14136  */
14137 static kpm_hlk_t *
14138 sfmmu_kpm_kpmp_enter(page_t *pp, pgcnt_t npages)
14139 {
14140 	kpm_page_t	*kp;
14141 	kpm_hlk_t	*kpmp;
14142 
14143 	ASSERT(sfmmu_mlist_held(pp));
14144 
14145 	if (kpm_smallpages || PP_ISMAPPED_KPM(pp) == 0)
14146 		return (NULL);
14147 
14148 	ASSERT(npages <= kpmpnpgs);
14149 
14150 	PP2KPMPG(pp, kp);
14151 	kpmp = KPMP_HASH(kp);
14152 	mutex_enter(&kpmp->khl_mutex);
14153 
14154 	return (kpmp);
14155 }
14156 
14157 static void
14158 sfmmu_kpm_kpmp_exit(kpm_hlk_t *kpmp)
14159 {
14160 	if (kpm_smallpages || kpmp == NULL)
14161 		return;
14162 
14163 	mutex_exit(&kpmp->khl_mutex);
14164 }
14165 
14166 /*
14167  * Summary states used in sfmmu_kpm_page_cache (KPM_*).
14168  * See also more detailed comments within in the sfmmu_kpm_page_cache switch.
14169  * Abbreviations used:
14170  * UNC:     Input state for an uncache request.
14171  *   BIG:     Large page kpm mapping in use.
14172  *   SMALL:   Page has a small kpm mapping within a kpm_page range.
14173  *   NODEMAP: No demap needed.
14174  *   NOP:     No operation needed on this input state.
14175  * CACHE:   Input state for a re-cache request.
14176  *   MAPS:    Page is in TNC and kpm VAC conflict state and kpm mapped small.
14177  *   NOMAP:   Page is in TNC and kpm VAC conflict state, but not small kpm
14178  *            mapped.
14179  *   NOMAPO:  Page is in TNC and kpm VAC conflict state, but not small kpm
14180  *            mapped. There are also other small kpm mappings within this
14181  *            kpm_page.
14182  */
14183 #define	KPM_UNC_BIG		(0)
14184 #define	KPM_UNC_NODEMAP1	(KPM_KS)
14185 #define	KPM_UNC_SMALL1		(KPM_KS | KPM_S)
14186 #define	KPM_UNC_NODEMAP2	(KPM_KC)
14187 #define	KPM_UNC_NODEMAP3	(KPM_KC | KPM_KS)
14188 #define	KPM_UNC_SMALL2		(KPM_KC | KPM_KS | KPM_S)
14189 #define	KPM_UNC_NOP1		(KPM_KC | KPM_C)
14190 #define	KPM_UNC_NOP2		(KPM_KC | KPM_C | KPM_KS)
14191 #define	KPM_CACHE_NOMAP		(KPM_KC | KPM_C)
14192 #define	KPM_CACHE_NOMAPO	(KPM_KC | KPM_C | KPM_KS)
14193 #define	KPM_CACHE_MAPS		(KPM_KC | KPM_C | KPM_KS | KPM_S)
14194 
14195 /*
14196  * This function is called when the virtual cacheability of a page
14197  * is changed and the page has an actice kpm mapping. The mlist mutex,
14198  * the spl hash lock and the kpmp mutex (if needed) are already grabbed.
14199  */
14200 static void
14201 sfmmu_kpm_page_cache(page_t *pp, int flags, int cache_flush_tag)
14202 {
14203 	kpm_page_t	*kp;
14204 	kpm_hlk_t	*kpmp;
14205 	caddr_t		kpmvaddr;
14206 	int		badstate = 0;
14207 	uint_t		pgcacase;
14208 	kpm_spage_t	*ksp;
14209 	kpm_shlk_t	*kpmsp;
14210 	int		oldval;
14211 
14212 	ASSERT(PP_ISMAPPED_KPM(pp));
14213 	ASSERT(sfmmu_mlist_held(pp));
14214 	ASSERT(sfmmu_page_spl_held(pp));
14215 
14216 	if (flags != HAT_TMPNC && flags != HAT_CACHE)
14217 		panic("sfmmu_kpm_page_cache: bad flags");
14218 
14219 	kpmvaddr = hat_kpm_page2va(pp, 1);
14220 
14221 	if (flags == HAT_TMPNC && cache_flush_tag == CACHE_FLUSH) {
14222 		pfn_t pfn = pp->p_pagenum;
14223 		int vcolor = addr_to_vcolor(kpmvaddr);
14224 		cpuset_t cpuset = cpu_ready_set;
14225 
14226 		/* Flush vcolor in DCache */
14227 		CPUSET_DEL(cpuset, CPU->cpu_id);
14228 		SFMMU_XCALL_STATS(ksfmmup);
14229 		xt_some(cpuset, vac_flushpage_tl1, pfn, vcolor);
14230 		vac_flushpage(pfn, vcolor);
14231 	}
14232 
14233 	if (kpm_smallpages)
14234 		goto smallpages_page_cache;
14235 
14236 	PP2KPMPG(pp, kp);
14237 	kpmp = KPMP_HASH(kp);
14238 	ASSERT(MUTEX_HELD(&kpmp->khl_mutex));
14239 
14240 	if (IS_KPM_ALIAS_RANGE(kpmvaddr)) {
14241 		if (kp->kp_refcnta < 1) {
14242 			panic("sfmmu_kpm_page_cache: bad refcnta "
14243 				"kpm_page=%p\n", (void *)kp);
14244 		}
14245 		sfmmu_kpm_demap_small(kpmvaddr);
14246 		if (flags == HAT_TMPNC) {
14247 			PP_SETKPMC(pp);
14248 			ASSERT(!PP_ISKPMS(pp));
14249 		} else {
14250 			ASSERT(PP_ISKPMC(pp));
14251 			PP_CLRKPMC(pp);
14252 		}
14253 		goto exit;
14254 	}
14255 
14256 	badstate = (kp->kp_refcnt < 0 || kp->kp_refcnts < 0);
14257 	if (kp->kp_refcntc == -1) {
14258 		/*
14259 		 * We should come here only if trap level tsb miss
14260 		 * handler is disabled.
14261 		 */
14262 		badstate |= (kp->kp_refcnt == 0 || kp->kp_refcnts > 0 ||
14263 			PP_ISKPMC(pp) || PP_ISKPMS(pp) || PP_ISNC(pp));
14264 	} else {
14265 		badstate |= (kp->kp_refcntc < 0);
14266 	}
14267 
14268 	if (badstate)
14269 		goto exit;
14270 
14271 	/*
14272 	 * Combine the per kpm_page and per page kpm VAC states to
14273 	 * a summary state in order to make the VAC cache/uncache
14274 	 * handling more concise.
14275 	 */
14276 	pgcacase = (((kp->kp_refcntc > 0) ? KPM_KC : 0) |
14277 			((kp->kp_refcnts > 0) ? KPM_KS : 0) |
14278 			(PP_ISKPMC(pp) ? KPM_C : 0) |
14279 			(PP_ISKPMS(pp) ? KPM_S : 0));
14280 
14281 	if (flags == HAT_CACHE) {
14282 		switch (pgcacase) {
14283 		case KPM_CACHE_MAPS:			/* kc c ks s */
14284 			sfmmu_kpm_demap_small(kpmvaddr);
14285 			if (kp->kp_refcnts < 1) {
14286 				panic("sfmmu_kpm_page_cache: bad refcnts "
14287 				"kpm_page=%p\n", (void *)kp);
14288 			}
14289 			kp->kp_refcnts--;
14290 			kp->kp_refcnt++;
14291 			PP_CLRKPMS(pp);
14292 			/* FALLTHRU */
14293 
14294 		case KPM_CACHE_NOMAP:			/* kc c -  - */
14295 		case KPM_CACHE_NOMAPO:			/* kc c ks - */
14296 			kp->kp_refcntc--;
14297 			PP_CLRKPMC(pp);
14298 			break;
14299 
14300 		default:
14301 			badstate++;
14302 		}
14303 		goto exit;
14304 	}
14305 
14306 	switch (pgcacase) {
14307 	case KPM_UNC_BIG:				/* - - - - */
14308 		if (kp->kp_refcnt < 1) {
14309 			panic("sfmmu_kpm_page_cache: bad refcnt "
14310 				"kpm_page=%p\n", (void *)kp);
14311 		}
14312 
14313 		/*
14314 		 * Have to breakup the large page mapping in preparation
14315 		 * to the upcoming TNC mode handled by small mappings.
14316 		 * The demap can already be done due to another conflict
14317 		 * within the kpm_page.
14318 		 */
14319 		if (kp->kp_refcntc == -1) {
14320 			/* remove go indication */
14321 			sfmmu_kpm_tsbmtl(&kp->kp_refcntc,
14322 				&kpmp->khl_lock, KPMTSBM_STOP);
14323 		}
14324 		ASSERT(kp->kp_refcntc == 0);
14325 		sfmmu_kpm_demap_large(kpmvaddr);
14326 		kp->kp_refcntc++;
14327 		PP_SETKPMC(pp);
14328 		break;
14329 
14330 	case KPM_UNC_SMALL1:				/* -  - ks s */
14331 	case KPM_UNC_SMALL2:				/* kc - ks s */
14332 		/*
14333 		 * Have to demap an already small kpm mapping in preparation
14334 		 * to the upcoming TNC mode. The demap can already be done
14335 		 * due to another conflict within the kpm_page.
14336 		 */
14337 		sfmmu_kpm_demap_small(kpmvaddr);
14338 		kp->kp_refcntc++;
14339 		kp->kp_refcnts--;
14340 		kp->kp_refcnt++;
14341 		PP_CLRKPMS(pp);
14342 		PP_SETKPMC(pp);
14343 		break;
14344 
14345 	case KPM_UNC_NODEMAP1:				/* -  - ks - */
14346 		/* fallthru */
14347 
14348 	case KPM_UNC_NODEMAP2:				/* kc - -  - */
14349 	case KPM_UNC_NODEMAP3:				/* kc - ks - */
14350 		kp->kp_refcntc++;
14351 		PP_SETKPMC(pp);
14352 		break;
14353 
14354 	case KPM_UNC_NOP1:				/* kc c -  - */
14355 	case KPM_UNC_NOP2:				/* kc c ks - */
14356 		break;
14357 
14358 	default:
14359 		badstate++;
14360 	}
14361 exit:
14362 	if (badstate) {
14363 		panic("sfmmu_kpm_page_cache: inconsistent VAC state "
14364 			"kpmvaddr=%p kp=%p pp=%p", (void *)kpmvaddr,
14365 			(void *)kp, (void *)pp);
14366 	}
14367 	return;
14368 
14369 smallpages_page_cache:
14370 	PP2KPMSPG(pp, ksp);
14371 	kpmsp = KPMP_SHASH(ksp);
14372 
14373 	oldval = sfmmu_kpm_stsbmtl(&ksp->kp_mapped,
14374 				&kpmsp->kshl_lock, KPM_MAPPEDSC);
14375 
14376 	if (!(oldval == KPM_MAPPEDS || oldval == KPM_MAPPEDSC))
14377 		panic("smallpages_page_cache: inconsistent mapping");
14378 
14379 	sfmmu_kpm_demap_small(kpmvaddr);
14380 
14381 	if (flags == HAT_TMPNC) {
14382 		PP_SETKPMC(pp);
14383 		ASSERT(!PP_ISKPMS(pp));
14384 
14385 	} else {
14386 		ASSERT(PP_ISKPMC(pp));
14387 		PP_CLRKPMC(pp);
14388 	}
14389 
14390 	/*
14391 	 * Keep KPM_MAPPEDSC until the next kpm tsbmiss where it
14392 	 * prevents TL tsbmiss handling and force a hat_kpm_fault.
14393 	 * There we can start over again.
14394 	 */
14395 }
14396 
14397 /*
14398  * unused in sfmmu
14399  */
14400 void
14401 hat_dump(void)
14402 {
14403 }
14404 
14405 /*
14406  * Called when a thread is exiting and we have switched to the kernel address
14407  * space.  Perform the same VM initialization resume() uses when switching
14408  * processes.
14409  *
14410  * Note that sfmmu_load_mmustate() is currently a no-op for kernel threads, but
14411  * we call it anyway in case the semantics change in the future.
14412  */
14413 /*ARGSUSED*/
14414 void
14415 hat_thread_exit(kthread_t *thd)
14416 {
14417 	uint64_t pgsz_cnum;
14418 	uint_t pstate_save;
14419 
14420 	ASSERT(thd->t_procp->p_as == &kas);
14421 
14422 	pgsz_cnum = KCONTEXT;
14423 #ifdef sun4u
14424 	pgsz_cnum |= (ksfmmup->sfmmu_cext << CTXREG_EXT_SHIFT);
14425 #endif
14426 	/*
14427 	 * Note that sfmmu_load_mmustate() is currently a no-op for
14428 	 * kernel threads. We need to disable interrupts here,
14429 	 * simply because otherwise sfmmu_load_mmustate() would panic
14430 	 * if the caller does not disable interrupts.
14431 	 */
14432 	pstate_save = sfmmu_disable_intrs();
14433 	sfmmu_setctx_sec(pgsz_cnum);
14434 	sfmmu_load_mmustate(ksfmmup);
14435 	sfmmu_enable_intrs(pstate_save);
14436 }
14437