xref: /titanic_50/usr/src/uts/i86pc/vm/hat_i86.c (revision 41efec2219526a9b3ecce26f97aba761ef1e1d0d)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 /*
29  * VM - Hardware Address Translation management for i386 and amd64
30  *
31  * Implementation of the interfaces described in <common/vm/hat.h>
32  *
33  * Nearly all the details of how the hardware is managed should not be
34  * visible outside this layer except for misc. machine specific functions
35  * that work in conjunction with this code.
36  *
37  * Routines used only inside of i86pc/vm start with hati_ for HAT Internal.
38  */
39 
40 #include <sys/machparam.h>
41 #include <sys/machsystm.h>
42 #include <sys/mman.h>
43 #include <sys/types.h>
44 #include <sys/systm.h>
45 #include <sys/cpuvar.h>
46 #include <sys/thread.h>
47 #include <sys/proc.h>
48 #include <sys/cpu.h>
49 #include <sys/kmem.h>
50 #include <sys/disp.h>
51 #include <sys/shm.h>
52 #include <sys/sysmacros.h>
53 #include <sys/machparam.h>
54 #include <sys/vmem.h>
55 #include <sys/vmsystm.h>
56 #include <sys/promif.h>
57 #include <sys/var.h>
58 #include <sys/x86_archext.h>
59 #include <sys/atomic.h>
60 #include <sys/bitmap.h>
61 #include <sys/controlregs.h>
62 #include <sys/bootconf.h>
63 #include <sys/bootsvcs.h>
64 #include <sys/bootinfo.h>
65 #include <sys/archsystm.h>
66 
67 #include <vm/seg_kmem.h>
68 #include <vm/hat_i86.h>
69 #include <vm/as.h>
70 #include <vm/seg.h>
71 #include <vm/page.h>
72 #include <vm/seg_kp.h>
73 #include <vm/seg_kpm.h>
74 #include <vm/vm_dep.h>
75 #include <vm/kboot_mmu.h>
76 #include <vm/seg_spt.h>
77 
78 #include <sys/cmn_err.h>
79 
80 /*
81  * Basic parameters for hat operation.
82  */
83 struct hat_mmu_info mmu;
84 
85 /*
86  * The page that is the kernel's top level pagetable.
87  *
88  * For 32 bit VLP support, the kernel hat will use the 1st 4 entries
89  * on this 4K page for its top level page table. The remaining groups of
90  * 4 entries are used for per processor copies of user VLP pagetables for
91  * running threads.  See hat_switch() and reload_pae32() for details.
92  *
93  * vlp_page[0] - 0th level==2 PTE for kernel HAT (will be zero)
94  * vlp_page[1] - 1st level==2 PTE for kernel HAT (will be zero)
95  * vlp_page[2] - 2nd level==2 PTE for kernel HAT (zero for small memory)
96  * vlp_page[3] - 3rd level==2 PTE for kernel
97  *
98  * vlp_page[4] - 0th level==2 PTE for user thread on cpu 0
99  * vlp_page[5] - 1st level==2 PTE for user thread on cpu 0
100  * vlp_page[6] - 2nd level==2 PTE for user thread on cpu 0
101  * vlp_page[7] - probably copy of kernel PTE
102  *
103  * vlp_page[8]  - 0th level==2 PTE for user thread on cpu 1
104  * vlp_page[9]  - 1st level==2 PTE for user thread on cpu 1
105  * vlp_page[10] - 2nd level==2 PTE for user thread on cpu 1
106  * vlp_page[11] - probably copy of kernel PTE
107  * ...
108  *
109  * when / where the kernel PTE's are (entry 2 or 3 or none) depends
110  * on kernelbase.
111  */
112 static x86pte_t *vlp_page;
113 
114 /*
115  * forward declaration of internal utility routines
116  */
117 static x86pte_t hati_update_pte(htable_t *ht, uint_t entry, x86pte_t expected,
118 	x86pte_t new);
119 
120 /*
121  * The kernel address space exists in all HATs. To implement this the
122  * kernel reserves a fixed number of entries in every topmost level page
123  * table. The values are setup in hat_init() and then copied to every hat
124  * created by hat_alloc(). This means that kernelbase must be:
125  *
126  *	  4Meg aligned for 32 bit kernels
127  *	512Gig aligned for x86_64 64 bit kernel
128  *
129  * The PAE 32 bit hat is handled as a special case. Otherwise requiring 1Gig
130  * alignment would use too much VA for the kernel.
131  *
132  */
133 static uint_t	khat_start;	/* index of 1st entry in kernel's top ptable */
134 static uint_t	khat_entries;	/* number of entries in kernel's top ptable */
135 
136 #if defined(__i386)
137 
138 static htable_t	*khat_pae32_htable = NULL;
139 static uint_t	khat_pae32_start;
140 static uint_t	khat_pae32_entries;
141 
142 #endif
143 
144 uint_t use_boot_reserve = 1;	/* cleared after early boot process */
145 uint_t can_steal_post_boot = 0;	/* set late in boot to enable stealing */
146 
147 /*
148  * A cpuset for all cpus. This is used for kernel address cross calls, since
149  * the kernel addresses apply to all cpus.
150  */
151 cpuset_t khat_cpuset;
152 
153 /*
154  * management stuff for hat structures
155  */
156 kmutex_t	hat_list_lock;
157 kcondvar_t	hat_list_cv;
158 kmem_cache_t	*hat_cache;
159 kmem_cache_t	*hat_hash_cache;
160 kmem_cache_t	*vlp_hash_cache;
161 
162 /*
163  * Simple statistics
164  */
165 struct hatstats hatstat;
166 
167 /*
168  * useful stuff for atomic access/clearing/setting REF/MOD/RO bits in page_t's.
169  */
170 extern void atomic_orb(uchar_t *addr, uchar_t val);
171 extern void atomic_andb(uchar_t *addr, uchar_t val);
172 
173 #define	PP_GETRM(pp, rmmask)    (pp->p_nrm & rmmask)
174 #define	PP_ISMOD(pp)		PP_GETRM(pp, P_MOD)
175 #define	PP_ISREF(pp)		PP_GETRM(pp, P_REF)
176 #define	PP_ISRO(pp)		PP_GETRM(pp, P_RO)
177 
178 #define	PP_SETRM(pp, rm)	atomic_orb(&(pp->p_nrm), rm)
179 #define	PP_SETMOD(pp)		PP_SETRM(pp, P_MOD)
180 #define	PP_SETREF(pp)		PP_SETRM(pp, P_REF)
181 #define	PP_SETRO(pp)		PP_SETRM(pp, P_RO)
182 
183 #define	PP_CLRRM(pp, rm)	atomic_andb(&(pp->p_nrm), ~(rm))
184 #define	PP_CLRMOD(pp)   	PP_CLRRM(pp, P_MOD)
185 #define	PP_CLRREF(pp)   	PP_CLRRM(pp, P_REF)
186 #define	PP_CLRRO(pp)    	PP_CLRRM(pp, P_RO)
187 #define	PP_CLRALL(pp)		PP_CLRRM(pp, P_MOD | P_REF | P_RO)
188 
189 /*
190  * kmem cache constructor for struct hat
191  */
192 /*ARGSUSED*/
193 static int
194 hati_constructor(void *buf, void *handle, int kmflags)
195 {
196 	hat_t	*hat = buf;
197 
198 	mutex_init(&hat->hat_mutex, NULL, MUTEX_DEFAULT, NULL);
199 	bzero(hat->hat_pages_mapped,
200 	    sizeof (pgcnt_t) * (mmu.max_page_level + 1));
201 	hat->hat_ism_pgcnt = 0;
202 	hat->hat_stats = 0;
203 	hat->hat_flags = 0;
204 	CPUSET_ZERO(hat->hat_cpus);
205 	hat->hat_htable = NULL;
206 	hat->hat_ht_hash = NULL;
207 	return (0);
208 }
209 
210 /*
211  * Allocate a hat structure for as. We also create the top level
212  * htable and initialize it to contain the kernel hat entries.
213  */
214 hat_t *
215 hat_alloc(struct as *as)
216 {
217 	hat_t		*hat;
218 	htable_t	*ht;	/* top level htable */
219 	uint_t		use_vlp;
220 
221 	/*
222 	 * Once we start creating user process HATs we can enable
223 	 * the htable_steal() code.
224 	 */
225 	if (can_steal_post_boot == 0)
226 		can_steal_post_boot = 1;
227 
228 	ASSERT(AS_WRITE_HELD(as, &as->a_lock));
229 	hat = kmem_cache_alloc(hat_cache, KM_SLEEP);
230 	hat->hat_as = as;
231 	mutex_init(&hat->hat_mutex, NULL, MUTEX_DEFAULT, NULL);
232 	ASSERT(hat->hat_flags == 0);
233 
234 	/*
235 	 * a 32 bit process uses a VLP style hat when using PAE
236 	 */
237 #if defined(__amd64)
238 	use_vlp = (ttoproc(curthread)->p_model == DATAMODEL_ILP32);
239 #elif defined(__i386)
240 	use_vlp = mmu.pae_hat;
241 #endif
242 	if (use_vlp) {
243 		hat->hat_flags = HAT_VLP;
244 		bzero(hat->hat_vlp_ptes, VLP_SIZE);
245 	}
246 
247 	/*
248 	 * Allocate the htable hash
249 	 */
250 	if ((hat->hat_flags & HAT_VLP)) {
251 		hat->hat_num_hash = mmu.vlp_hash_cnt;
252 		hat->hat_ht_hash = kmem_cache_alloc(vlp_hash_cache, KM_SLEEP);
253 	} else {
254 		hat->hat_num_hash = mmu.hash_cnt;
255 		hat->hat_ht_hash = kmem_cache_alloc(hat_hash_cache, KM_SLEEP);
256 	}
257 	bzero(hat->hat_ht_hash, hat->hat_num_hash * sizeof (htable_t *));
258 
259 	/*
260 	 * Initialize Kernel HAT entries at the top of the top level page
261 	 * table for the new hat.
262 	 *
263 	 * Note that we don't call htable_release() for the top level, that
264 	 * happens when the hat is destroyed in hat_free_end()
265 	 */
266 	hat->hat_htable = NULL;
267 	hat->hat_ht_cached = NULL;
268 	ht = htable_create(hat, (uintptr_t)0, TOP_LEVEL(hat), NULL);
269 
270 	if (!(hat->hat_flags & HAT_VLP))
271 		x86pte_copy(kas.a_hat->hat_htable, ht, khat_start,
272 		    khat_entries);
273 #if defined(__i386)
274 	else if (khat_entries > 0)
275 		bcopy(vlp_page + khat_start, hat->hat_vlp_ptes + khat_start,
276 		    khat_entries * sizeof (x86pte_t));
277 #endif
278 	hat->hat_htable = ht;
279 
280 #if defined(__i386)
281 	/*
282 	 * PAE32 HAT alignment is less restrictive than the others to keep
283 	 * the kernel from using too much VA. Because of this we may need
284 	 * one layer further down when kernelbase isn't 1Gig aligned.
285 	 * See hat_free_end() for the htable_release() that goes with this
286 	 * htable_create()
287 	 */
288 	if (khat_pae32_htable != NULL) {
289 		ht = htable_create(hat, kernelbase,
290 		    khat_pae32_htable->ht_level, NULL);
291 		x86pte_copy(khat_pae32_htable, ht, khat_pae32_start,
292 		    khat_pae32_entries);
293 		ht->ht_valid_cnt = khat_pae32_entries;
294 	}
295 #endif
296 
297 	/*
298 	 * Put it at the start of the global list of all hats (used by stealing)
299 	 *
300 	 * kas.a_hat is not in the list but is instead used to find the
301 	 * first and last items in the list.
302 	 *
303 	 * - kas.a_hat->hat_next points to the start of the user hats.
304 	 *   The list ends where hat->hat_next == NULL
305 	 *
306 	 * - kas.a_hat->hat_prev points to the last of the user hats.
307 	 *   The list begins where hat->hat_prev == NULL
308 	 */
309 	mutex_enter(&hat_list_lock);
310 	hat->hat_prev = NULL;
311 	hat->hat_next = kas.a_hat->hat_next;
312 	if (hat->hat_next)
313 		hat->hat_next->hat_prev = hat;
314 	else
315 		kas.a_hat->hat_prev = hat;
316 	kas.a_hat->hat_next = hat;
317 	mutex_exit(&hat_list_lock);
318 
319 	return (hat);
320 }
321 
322 /*
323  * process has finished executing but as has not been cleaned up yet.
324  */
325 /*ARGSUSED*/
326 void
327 hat_free_start(hat_t *hat)
328 {
329 	ASSERT(AS_WRITE_HELD(hat->hat_as, &hat->hat_as->a_lock));
330 
331 	/*
332 	 * If the hat is currently a stealing victim, wait for the stealing
333 	 * to finish.  Once we mark it as HAT_FREEING, htable_steal()
334 	 * won't look at its pagetables anymore.
335 	 */
336 	mutex_enter(&hat_list_lock);
337 	while (hat->hat_flags & HAT_VICTIM)
338 		cv_wait(&hat_list_cv, &hat_list_lock);
339 	hat->hat_flags |= HAT_FREEING;
340 	mutex_exit(&hat_list_lock);
341 }
342 
343 /*
344  * An address space is being destroyed, so we destroy the associated hat.
345  */
346 void
347 hat_free_end(hat_t *hat)
348 {
349 	int i;
350 	kmem_cache_t *cache;
351 
352 #ifdef DEBUG
353 	for (i = 0; i <= mmu.max_page_level; i++)
354 		ASSERT(hat->hat_pages_mapped[i] == 0);
355 #endif
356 	ASSERT(hat->hat_flags & HAT_FREEING);
357 
358 	/*
359 	 * must not be running on the given hat
360 	 */
361 	ASSERT(CPU->cpu_current_hat != hat);
362 
363 	/*
364 	 * Remove it from the list of HATs
365 	 */
366 	mutex_enter(&hat_list_lock);
367 	if (hat->hat_prev)
368 		hat->hat_prev->hat_next = hat->hat_next;
369 	else
370 		kas.a_hat->hat_next = hat->hat_next;
371 	if (hat->hat_next)
372 		hat->hat_next->hat_prev = hat->hat_prev;
373 	else
374 		kas.a_hat->hat_prev = hat->hat_prev;
375 	mutex_exit(&hat_list_lock);
376 	hat->hat_next = hat->hat_prev = NULL;
377 
378 	/*
379 	 * Make a pass through the htables freeing them all up.
380 	 */
381 	htable_purge_hat(hat);
382 
383 	/*
384 	 * Decide which kmem cache the hash table came from, then free it.
385 	 */
386 	if (hat->hat_flags & HAT_VLP)
387 		cache = vlp_hash_cache;
388 	else
389 		cache = hat_hash_cache;
390 	kmem_cache_free(cache, hat->hat_ht_hash);
391 	hat->hat_ht_hash = NULL;
392 
393 	hat->hat_flags = 0;
394 	kmem_cache_free(hat_cache, hat);
395 }
396 
397 /*
398  * round kernelbase down to a supported value to use for _userlimit
399  *
400  * userlimit must be aligned down to an entry in the top level htable.
401  * The one exception is for 32 bit HAT's running PAE.
402  */
403 uintptr_t
404 hat_kernelbase(uintptr_t va)
405 {
406 #if defined(__i386)
407 	va &= LEVEL_MASK(1);
408 #endif
409 	if (IN_VA_HOLE(va))
410 		panic("_userlimit %p will fall in VA hole\n", (void *)va);
411 	return (va);
412 }
413 
414 /*
415  * Initialize hat data structures based on processor MMU information.
416  */
417 void
418 mmu_init(void)
419 {
420 	uint_t max_htables;
421 	uint_t pa_bits;
422 	uint_t va_bits;
423 	int i;
424 
425 	/*
426 	 * If CPU enabled the page table global bit, use it for the kernel
427 	 * This is bit 7 in CR4 (PGE - Page Global Enable).
428 	 */
429 	if ((x86_feature & X86_PGE) != 0 && (getcr4() & CR4_PGE) != 0)
430 		mmu.pt_global = PT_GLOBAL;
431 
432 	/*
433 	 * Detect NX and PAE usage.
434 	 */
435 	mmu.pae_hat = kbm_pae_support;
436 	if (kbm_nx_support)
437 		mmu.pt_nx = PT_NX;
438 	else
439 		mmu.pt_nx = 0;
440 
441 	/*
442 	 * Intel CPUs allow speculative caching (in TLB-like h/w) of
443 	 * entries in upper page tables even though there may not be
444 	 * any valid entries in lower tables. This implies we have to
445 	 * re-INVLPG at every upper page table entry invalidation.
446 	 */
447 	if (cpuid_getvendor(CPU) == X86_VENDOR_Intel)
448 		mmu.inval_nonleaf = 1;
449 	else
450 		mmu.inval_nonleaf = 0;
451 	/*
452 	 * Use CPU info to set various MMU parameters
453 	 */
454 	cpuid_get_addrsize(CPU, &pa_bits, &va_bits);
455 
456 	if (va_bits < sizeof (void *) * NBBY) {
457 		mmu.hole_start = (1ul << (va_bits - 1));
458 		mmu.hole_end = 0ul - mmu.hole_start - 1;
459 	} else {
460 		mmu.hole_end = 0;
461 		mmu.hole_start = mmu.hole_end - 1;
462 	}
463 #if defined(OPTERON_ERRATUM_121)
464 	/*
465 	 * If erratum 121 has already been detected at this time, hole_start
466 	 * contains the value to be subtracted from mmu.hole_start.
467 	 */
468 	ASSERT(hole_start == 0 || opteron_erratum_121 != 0);
469 	hole_start = mmu.hole_start - hole_start;
470 #else
471 	hole_start = mmu.hole_start;
472 #endif
473 	hole_end = mmu.hole_end;
474 
475 	mmu.highest_pfn = mmu_btop((1ull << pa_bits) - 1);
476 	if (mmu.pae_hat == 0 && pa_bits > 32)
477 		mmu.highest_pfn = PFN_4G - 1;
478 
479 	if (mmu.pae_hat) {
480 		mmu.pte_size = 8;	/* 8 byte PTEs */
481 		mmu.pte_size_shift = 3;
482 	} else {
483 		mmu.pte_size = 4;	/* 4 byte PTEs */
484 		mmu.pte_size_shift = 2;
485 	}
486 
487 	if (mmu.pae_hat && (x86_feature & X86_PAE) == 0)
488 		panic("Processor does not support PAE");
489 
490 	if ((x86_feature & X86_CX8) == 0)
491 		panic("Processor does not support cmpxchg8b instruction");
492 
493 	/*
494 	 * Initialize parameters based on the 64 or 32 bit kernels and
495 	 * for the 32 bit kernel decide if we should use PAE.
496 	 */
497 	if (kbm_largepage_support)
498 		mmu.max_page_level = 1;
499 	else
500 		mmu.max_page_level = 0;
501 	mmu_page_sizes = mmu.max_page_level + 1;
502 	mmu_exported_page_sizes = mmu_page_sizes;
503 
504 #if defined(__amd64)
505 
506 	mmu.num_level = 4;
507 	mmu.max_level = 3;
508 	mmu.ptes_per_table = 512;
509 	mmu.top_level_count = 512;
510 
511 	mmu.level_shift[0] = 12;
512 	mmu.level_shift[1] = 21;
513 	mmu.level_shift[2] = 30;
514 	mmu.level_shift[3] = 39;
515 
516 #elif defined(__i386)
517 
518 	if (mmu.pae_hat) {
519 		mmu.num_level = 3;
520 		mmu.max_level = 2;
521 		mmu.ptes_per_table = 512;
522 		mmu.top_level_count = 4;
523 
524 		mmu.level_shift[0] = 12;
525 		mmu.level_shift[1] = 21;
526 		mmu.level_shift[2] = 30;
527 
528 	} else {
529 		mmu.num_level = 2;
530 		mmu.max_level = 1;
531 		mmu.ptes_per_table = 1024;
532 		mmu.top_level_count = 1024;
533 
534 		mmu.level_shift[0] = 12;
535 		mmu.level_shift[1] = 22;
536 	}
537 
538 #endif	/* __i386 */
539 
540 	for (i = 0; i < mmu.num_level; ++i) {
541 		mmu.level_size[i] = 1UL << mmu.level_shift[i];
542 		mmu.level_offset[i] = mmu.level_size[i] - 1;
543 		mmu.level_mask[i] = ~mmu.level_offset[i];
544 	}
545 
546 	for (i = 0; i <= mmu.max_page_level; ++i) {
547 		mmu.pte_bits[i] = PT_VALID;
548 		if (i > 0)
549 			mmu.pte_bits[i] |= PT_PAGESIZE;
550 	}
551 
552 	/*
553 	 * NOTE Legacy 32 bit PAE mode only has the P_VALID bit at top level.
554 	 */
555 	for (i = 1; i < mmu.num_level; ++i)
556 		mmu.ptp_bits[i] = PT_PTPBITS;
557 
558 #if defined(__i386)
559 	mmu.ptp_bits[2] = PT_VALID;
560 #endif
561 
562 	/*
563 	 * Compute how many hash table entries to have per process for htables.
564 	 * We start with 1 page's worth of entries.
565 	 *
566 	 * If physical memory is small, reduce the amount need to cover it.
567 	 */
568 	max_htables = physmax / mmu.ptes_per_table;
569 	mmu.hash_cnt = MMU_PAGESIZE / sizeof (htable_t *);
570 	while (mmu.hash_cnt > 16 && mmu.hash_cnt >= max_htables)
571 		mmu.hash_cnt >>= 1;
572 	mmu.vlp_hash_cnt = mmu.hash_cnt;
573 
574 #if defined(__amd64)
575 	/*
576 	 * If running in 64 bits and physical memory is large,
577 	 * increase the size of the cache to cover all of memory for
578 	 * a 64 bit process.
579 	 */
580 #define	HASH_MAX_LENGTH 4
581 	while (mmu.hash_cnt * HASH_MAX_LENGTH < max_htables)
582 		mmu.hash_cnt <<= 1;
583 #endif
584 }
585 
586 
587 /*
588  * initialize hat data structures
589  */
590 void
591 hat_init()
592 {
593 #if defined(__i386)
594 	/*
595 	 * _userlimit must be aligned correctly
596 	 */
597 	if ((_userlimit & LEVEL_MASK(1)) != _userlimit) {
598 		prom_printf("hat_init(): _userlimit=%p, not aligned at %p\n",
599 		    (void *)_userlimit, (void *)LEVEL_SIZE(1));
600 		halt("hat_init(): Unable to continue");
601 	}
602 #endif
603 
604 	cv_init(&hat_list_cv, NULL, CV_DEFAULT, NULL);
605 
606 	/*
607 	 * initialize kmem caches
608 	 */
609 	htable_init();
610 	hment_init();
611 
612 	hat_cache = kmem_cache_create("hat_t",
613 	    sizeof (hat_t), 0, hati_constructor, NULL, NULL,
614 	    NULL, 0, 0);
615 
616 	hat_hash_cache = kmem_cache_create("HatHash",
617 	    mmu.hash_cnt * sizeof (htable_t *), 0, NULL, NULL, NULL,
618 	    NULL, 0, 0);
619 
620 	/*
621 	 * VLP hats can use a smaller hash table size on large memroy machines
622 	 */
623 	if (mmu.hash_cnt == mmu.vlp_hash_cnt) {
624 		vlp_hash_cache = hat_hash_cache;
625 	} else {
626 		vlp_hash_cache = kmem_cache_create("HatVlpHash",
627 		    mmu.vlp_hash_cnt * sizeof (htable_t *), 0, NULL, NULL, NULL,
628 		    NULL, 0, 0);
629 	}
630 
631 	/*
632 	 * Set up the kernel's hat
633 	 */
634 	AS_LOCK_ENTER(&kas, &kas.a_lock, RW_WRITER);
635 	kas.a_hat = kmem_cache_alloc(hat_cache, KM_NOSLEEP);
636 	mutex_init(&kas.a_hat->hat_mutex, NULL, MUTEX_DEFAULT, NULL);
637 	kas.a_hat->hat_as = &kas;
638 	kas.a_hat->hat_flags = 0;
639 	AS_LOCK_EXIT(&kas, &kas.a_lock);
640 
641 	CPUSET_ZERO(khat_cpuset);
642 	CPUSET_ADD(khat_cpuset, CPU->cpu_id);
643 
644 	/*
645 	 * The kernel hat's next pointer serves as the head of the hat list .
646 	 * The kernel hat's prev pointer tracks the last hat on the list for
647 	 * htable_steal() to use.
648 	 */
649 	kas.a_hat->hat_next = NULL;
650 	kas.a_hat->hat_prev = NULL;
651 
652 	/*
653 	 * Allocate an htable hash bucket for the kernel
654 	 * XX64 - tune for 64 bit procs
655 	 */
656 	kas.a_hat->hat_num_hash = mmu.hash_cnt;
657 	kas.a_hat->hat_ht_hash = kmem_cache_alloc(hat_hash_cache, KM_NOSLEEP);
658 	bzero(kas.a_hat->hat_ht_hash, mmu.hash_cnt * sizeof (htable_t *));
659 
660 	/*
661 	 * zero out the top level and cached htable pointers
662 	 */
663 	kas.a_hat->hat_ht_cached = NULL;
664 	kas.a_hat->hat_htable = NULL;
665 
666 	/*
667 	 * Pre-allocate hrm_hashtab before enabling the collection of
668 	 * refmod statistics.  Allocating on the fly would mean us
669 	 * running the risk of suffering recursive mutex enters or
670 	 * deadlocks.
671 	 */
672 	hrm_hashtab = kmem_zalloc(HRM_HASHSIZE * sizeof (struct hrmstat *),
673 	    KM_SLEEP);
674 }
675 
676 /*
677  * Prepare CPU specific pagetables for VLP processes on 64 bit kernels.
678  *
679  * Each CPU has a set of 2 pagetables that are reused for any 32 bit
680  * process it runs. They are the top level pagetable, hci_vlp_l3ptes, and
681  * the next to top level table for the bottom 512 Gig, hci_vlp_l2ptes.
682  */
683 /*ARGSUSED*/
684 static void
685 hat_vlp_setup(struct cpu *cpu)
686 {
687 #if defined(__amd64)
688 	struct hat_cpu_info *hci = cpu->cpu_hat_info;
689 	pfn_t pfn;
690 
691 	/*
692 	 * allocate the level==2 page table for the bottom most
693 	 * 512Gig of address space (this is where 32 bit apps live)
694 	 */
695 	ASSERT(hci != NULL);
696 	hci->hci_vlp_l2ptes = kmem_zalloc(MMU_PAGESIZE, KM_SLEEP);
697 
698 	/*
699 	 * Allocate a top level pagetable and copy the kernel's
700 	 * entries into it. Then link in hci_vlp_l2ptes in the 1st entry.
701 	 */
702 	hci->hci_vlp_l3ptes = kmem_zalloc(MMU_PAGESIZE, KM_SLEEP);
703 	hci->hci_vlp_pfn =
704 	    hat_getpfnum(kas.a_hat, (caddr_t)hci->hci_vlp_l3ptes);
705 	ASSERT(hci->hci_vlp_pfn != PFN_INVALID);
706 	bcopy(vlp_page + khat_start, hci->hci_vlp_l3ptes + khat_start,
707 	    khat_entries * sizeof (x86pte_t));
708 
709 	pfn = hat_getpfnum(kas.a_hat, (caddr_t)hci->hci_vlp_l2ptes);
710 	ASSERT(pfn != PFN_INVALID);
711 	hci->hci_vlp_l3ptes[0] = MAKEPTP(pfn, 2);
712 #endif /* __amd64 */
713 }
714 
715 /*ARGSUSED*/
716 static void
717 hat_vlp_teardown(cpu_t *cpu)
718 {
719 #if defined(__amd64)
720 	struct hat_cpu_info *hci;
721 
722 	if ((hci = cpu->cpu_hat_info) == NULL)
723 		return;
724 	if (hci->hci_vlp_l2ptes)
725 		kmem_free(hci->hci_vlp_l2ptes, MMU_PAGESIZE);
726 	if (hci->hci_vlp_l3ptes)
727 		kmem_free(hci->hci_vlp_l3ptes, MMU_PAGESIZE);
728 #endif	/* __amd64 */
729 }
730 
731 /*
732  * Finish filling in the kernel hat.
733  * Pre fill in all top level kernel page table entries for the kernel's
734  * part of the address range.  From this point on we can't use any new
735  * kernel large pages if they need PTE's at max_level
736  *
737  * create the kmap mappings.
738  */
739 void
740 hat_init_finish(void)
741 {
742 	htable_t	*top = kas.a_hat->hat_htable;
743 	htable_t	*ht;
744 	uint_t		e;
745 	x86pte_t	pte;
746 	uintptr_t	va = kernelbase;
747 	size_t		size;
748 
749 
750 #if defined(__i386)
751 	ASSERT((va & LEVEL_MASK(1)) == va);
752 
753 	/*
754 	 * Deal with kernelbase not 1Gig aligned for 32 bit PAE hats.
755 	 */
756 	if (!mmu.pae_hat || (va & LEVEL_OFFSET(mmu.max_level)) == 0) {
757 		khat_pae32_htable = NULL;
758 	} else {
759 		ASSERT(mmu.max_level == 2);
760 		ASSERT((va & LEVEL_OFFSET(mmu.max_level - 1)) == 0);
761 		khat_pae32_htable =
762 		    htable_create(kas.a_hat, va, mmu.max_level - 1, NULL);
763 		khat_pae32_start = htable_va2entry(va, khat_pae32_htable);
764 		khat_pae32_entries = mmu.ptes_per_table - khat_pae32_start;
765 		for (e = khat_pae32_start; e < mmu.ptes_per_table;
766 		    ++e, va += LEVEL_SIZE(mmu.max_level - 1)) {
767 			pte = x86pte_get(khat_pae32_htable, e);
768 			if (PTE_ISVALID(pte))
769 				continue;
770 			ht = htable_create(kas.a_hat, va, mmu.max_level - 2,
771 			    NULL);
772 			ASSERT(ht != NULL);
773 		}
774 	}
775 #endif
776 
777 	/*
778 	 * The kernel hat will need fixed values in the highest level
779 	 * ptable for copying to all other hat's. This implies
780 	 * alignment restrictions on _userlimit.
781 	 *
782 	 * Note we don't htable_release() these htables. This keeps them
783 	 * from ever being stolen or free'd.
784 	 *
785 	 * top_level_count is used instead of ptes_per_table, since
786 	 * on 32-bit PAE we only have 4 usable entries at the top level ptable.
787 	 */
788 	if (va == 0)
789 		khat_start = mmu.top_level_count;
790 	else
791 		khat_start = htable_va2entry(va, kas.a_hat->hat_htable);
792 	khat_entries = mmu.top_level_count - khat_start;
793 	for (e = khat_start; e < mmu.top_level_count;
794 	    ++e, va += LEVEL_SIZE(mmu.max_level)) {
795 		if (IN_HYPERVISOR_VA(va))
796 			continue;
797 		pte = x86pte_get(top, e);
798 		if (PTE_ISVALID(pte))
799 			continue;
800 		ht = htable_create(kas.a_hat, va, mmu.max_level - 1, NULL);
801 		ASSERT(ht != NULL);
802 	}
803 
804 	/*
805 	 * We are now effectively running on the kernel hat.
806 	 * Clearing use_boot_reserve shuts off using the pre-allocated boot
807 	 * reserve for all HAT allocations.  From here on, the reserves are
808 	 * only used when mapping in memory for the hat's own allocations.
809 	 */
810 	use_boot_reserve = 0;
811 	htable_adjust_reserve();
812 
813 	/*
814 	 * 32 bit kernels use only 4 of the 512 entries in its top level
815 	 * pagetable. We'll use the remainder for the "per CPU" page tables
816 	 * for VLP processes.
817 	 *
818 	 * We also map the top level kernel pagetable into the kernel to make
819 	 * it easy to use bcopy to initialize new address spaces.
820 	 */
821 	if (mmu.pae_hat) {
822 		vlp_page = vmem_alloc(heap_arena, MMU_PAGESIZE, VM_SLEEP);
823 		hat_devload(kas.a_hat, (caddr_t)vlp_page, MMU_PAGESIZE,
824 		    kas.a_hat->hat_htable->ht_pfn,
825 		    PROT_WRITE |
826 		    PROT_READ | HAT_NOSYNC | HAT_UNORDERED_OK,
827 		    HAT_LOAD | HAT_LOAD_NOCONSIST);
828 	}
829 	hat_vlp_setup(CPU);
830 
831 	/*
832 	 * Create kmap (cached mappings of kernel PTEs)
833 	 * for 32 bit we map from segmap_start .. ekernelheap
834 	 * for 64 bit we map from segmap_start .. segmap_start + segmapsize;
835 	 */
836 #if defined(__i386)
837 	size = (uintptr_t)ekernelheap - segmap_start;
838 #elif defined(__amd64)
839 	size = segmapsize;
840 #endif
841 	hat_kmap_init((uintptr_t)segmap_start, size);
842 }
843 
844 /*
845  * On 32 bit PAE mode, PTE's are 64 bits, but ordinary atomic memory references
846  * are 32 bit, so for safety we must use cas64() to install these.
847  */
848 #ifdef __i386
849 static void
850 reload_pae32(hat_t *hat, cpu_t *cpu)
851 {
852 	x86pte_t *src;
853 	x86pte_t *dest;
854 	x86pte_t pte;
855 	int i;
856 
857 	/*
858 	 * Load the 4 entries of the level 2 page table into this
859 	 * cpu's range of the vlp_page and point cr3 at them.
860 	 */
861 	ASSERT(mmu.pae_hat);
862 	src = hat->hat_vlp_ptes;
863 	dest = vlp_page + (cpu->cpu_id + 1) * VLP_NUM_PTES;
864 	for (i = 0; i < VLP_NUM_PTES; ++i) {
865 		for (;;) {
866 			pte = dest[i];
867 			if (pte == src[i])
868 				break;
869 			if (cas64(dest + i, pte, src[i]) != src[i])
870 				break;
871 		}
872 	}
873 }
874 #endif
875 
876 /*
877  * Switch to a new active hat, maintaining bit masks to track active CPUs.
878  */
879 void
880 hat_switch(hat_t *hat)
881 {
882 	uintptr_t	newcr3;
883 	cpu_t		*cpu = CPU;
884 	hat_t		*old = cpu->cpu_current_hat;
885 
886 	/*
887 	 * set up this information first, so we don't miss any cross calls
888 	 */
889 	if (old != NULL) {
890 		if (old == hat)
891 			return;
892 		if (old != kas.a_hat)
893 			CPUSET_ATOMIC_DEL(old->hat_cpus, cpu->cpu_id);
894 	}
895 
896 	/*
897 	 * Add this CPU to the active set for this HAT.
898 	 */
899 	if (hat != kas.a_hat) {
900 		CPUSET_ATOMIC_ADD(hat->hat_cpus, cpu->cpu_id);
901 	}
902 	cpu->cpu_current_hat = hat;
903 
904 	/*
905 	 * now go ahead and load cr3
906 	 */
907 	if (hat->hat_flags & HAT_VLP) {
908 #if defined(__amd64)
909 		x86pte_t *vlpptep = cpu->cpu_hat_info->hci_vlp_l2ptes;
910 
911 		VLP_COPY(hat->hat_vlp_ptes, vlpptep);
912 		newcr3 = MAKECR3(cpu->cpu_hat_info->hci_vlp_pfn);
913 #elif defined(__i386)
914 		reload_pae32(hat, cpu);
915 		newcr3 = MAKECR3(kas.a_hat->hat_htable->ht_pfn) +
916 		    (cpu->cpu_id + 1) * VLP_SIZE;
917 #endif
918 	} else {
919 		newcr3 = MAKECR3(hat->hat_htable->ht_pfn);
920 	}
921 	setcr3(newcr3);
922 	ASSERT(cpu == CPU);
923 }
924 
925 /*
926  * Utility to return a valid x86pte_t from protections, pfn, and level number
927  */
928 static x86pte_t
929 hati_mkpte(pfn_t pfn, uint_t attr, level_t level, uint_t flags)
930 {
931 	x86pte_t	pte;
932 	uint_t		cache_attr = attr & HAT_ORDER_MASK;
933 
934 	pte = MAKEPTE(pfn, level);
935 
936 	if (attr & PROT_WRITE)
937 		PTE_SET(pte, PT_WRITABLE);
938 
939 	if (attr & PROT_USER)
940 		PTE_SET(pte, PT_USER);
941 
942 	if (!(attr & PROT_EXEC))
943 		PTE_SET(pte, mmu.pt_nx);
944 
945 	/*
946 	 * Set the software bits used track ref/mod sync's and hments.
947 	 * If not using REF/MOD, set them to avoid h/w rewriting PTEs.
948 	 */
949 	if (flags & HAT_LOAD_NOCONSIST)
950 		PTE_SET(pte, PT_NOCONSIST | PT_REF | PT_MOD);
951 	else if (attr & HAT_NOSYNC)
952 		PTE_SET(pte, PT_NOSYNC | PT_REF | PT_MOD);
953 
954 	/*
955 	 * Set the caching attributes in the PTE. The combination
956 	 * of attributes are poorly defined, so we pay attention
957 	 * to them in the given order.
958 	 *
959 	 * The test for HAT_STRICTORDER is different because it's defined
960 	 * as "0" - which was a stupid thing to do, but is too late to change!
961 	 */
962 	if (cache_attr == HAT_STRICTORDER) {
963 		PTE_SET(pte, PT_NOCACHE);
964 	/*LINTED [Lint hates empty ifs, but it's the obvious way to do this] */
965 	} else if (cache_attr & (HAT_UNORDERED_OK | HAT_STORECACHING_OK)) {
966 		/* nothing to set */;
967 	} else if (cache_attr & (HAT_MERGING_OK | HAT_LOADCACHING_OK)) {
968 		PTE_SET(pte, PT_NOCACHE);
969 		if (x86_feature & X86_PAT)
970 			PTE_SET(pte, (level == 0) ? PT_PAT_4K : PT_PAT_LARGE);
971 		else
972 			PTE_SET(pte, PT_WRITETHRU);
973 	} else {
974 		panic("hati_mkpte(): bad caching attributes: %x\n", cache_attr);
975 	}
976 
977 	return (pte);
978 }
979 
980 /*
981  * Duplicate address translations of the parent to the child.
982  * This function really isn't used anymore.
983  */
984 /*ARGSUSED*/
985 int
986 hat_dup(hat_t *old, hat_t *new, caddr_t addr, size_t len, uint_t flag)
987 {
988 	ASSERT((uintptr_t)addr < kernelbase);
989 	ASSERT(new != kas.a_hat);
990 	ASSERT(old != kas.a_hat);
991 	return (0);
992 }
993 
994 /*
995  * Allocate any hat resources required for a process being swapped in.
996  */
997 /*ARGSUSED*/
998 void
999 hat_swapin(hat_t *hat)
1000 {
1001 	/* do nothing - we let everything fault back in */
1002 }
1003 
1004 /*
1005  * Unload all translations associated with an address space of a process
1006  * that is being swapped out.
1007  */
1008 void
1009 hat_swapout(hat_t *hat)
1010 {
1011 	uintptr_t	vaddr = (uintptr_t)0;
1012 	uintptr_t	eaddr = _userlimit;
1013 	htable_t	*ht = NULL;
1014 	level_t		l;
1015 
1016 	/*
1017 	 * We can't just call hat_unload(hat, 0, _userlimit...)  here, because
1018 	 * seg_spt and shared pagetables can't be swapped out.
1019 	 * Take a look at segspt_shmswapout() - it's a big no-op.
1020 	 *
1021 	 * Instead we'll walk through all the address space and unload
1022 	 * any mappings which we are sure are not shared, not locked.
1023 	 */
1024 	ASSERT(IS_PAGEALIGNED(vaddr));
1025 	ASSERT(IS_PAGEALIGNED(eaddr));
1026 	ASSERT(AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
1027 	if ((uintptr_t)hat->hat_as->a_userlimit < eaddr)
1028 		eaddr = (uintptr_t)hat->hat_as->a_userlimit;
1029 
1030 	while (vaddr < eaddr) {
1031 		(void) htable_walk(hat, &ht, &vaddr, eaddr);
1032 		if (ht == NULL)
1033 			break;
1034 
1035 		ASSERT(!IN_VA_HOLE(vaddr));
1036 
1037 		/*
1038 		 * If the page table is shared skip its entire range.
1039 		 * This code knows that only level 0 page tables are shared
1040 		 */
1041 		l = ht->ht_level;
1042 		if (ht->ht_flags & HTABLE_SHARED_PFN) {
1043 			ASSERT(l == 0);
1044 			vaddr = ht->ht_vaddr + LEVEL_SIZE(1);
1045 			htable_release(ht);
1046 			ht = NULL;
1047 			continue;
1048 		}
1049 
1050 		/*
1051 		 * If the page table has no locked entries, unload this one.
1052 		 */
1053 		if (ht->ht_lock_cnt == 0)
1054 			hat_unload(hat, (caddr_t)vaddr, LEVEL_SIZE(l),
1055 			    HAT_UNLOAD_UNMAP);
1056 
1057 		/*
1058 		 * If we have a level 0 page table with locked entries,
1059 		 * skip the entire page table, otherwise skip just one entry.
1060 		 */
1061 		if (ht->ht_lock_cnt > 0 && l == 0)
1062 			vaddr = ht->ht_vaddr + LEVEL_SIZE(1);
1063 		else
1064 			vaddr += LEVEL_SIZE(l);
1065 	}
1066 	if (ht)
1067 		htable_release(ht);
1068 
1069 	/*
1070 	 * We're in swapout because the system is low on memory, so
1071 	 * go back and flush all the htables off the cached list.
1072 	 */
1073 	htable_purge_hat(hat);
1074 }
1075 
1076 /*
1077  * returns number of bytes that have valid mappings in hat.
1078  */
1079 size_t
1080 hat_get_mapped_size(hat_t *hat)
1081 {
1082 	size_t total = 0;
1083 	int l;
1084 
1085 	for (l = 0; l <= mmu.max_page_level; l++)
1086 		total += (hat->hat_pages_mapped[l] << LEVEL_SHIFT(l));
1087 	total += hat->hat_ism_pgcnt;
1088 
1089 	return (total);
1090 }
1091 
1092 /*
1093  * enable/disable collection of stats for hat.
1094  */
1095 int
1096 hat_stats_enable(hat_t *hat)
1097 {
1098 	atomic_add_32(&hat->hat_stats, 1);
1099 	return (1);
1100 }
1101 
1102 void
1103 hat_stats_disable(hat_t *hat)
1104 {
1105 	atomic_add_32(&hat->hat_stats, -1);
1106 }
1107 
1108 /*
1109  * Utility to sync the ref/mod bits from a page table entry to the page_t
1110  * We must be holding the mapping list lock when this is called.
1111  */
1112 static void
1113 hati_sync_pte_to_page(page_t *pp, x86pte_t pte, level_t level)
1114 {
1115 	uint_t	rm = 0;
1116 	pgcnt_t	pgcnt;
1117 
1118 	if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC)
1119 		return;
1120 
1121 	if (PTE_GET(pte, PT_REF))
1122 		rm |= P_REF;
1123 
1124 	if (PTE_GET(pte, PT_MOD))
1125 		rm |= P_MOD;
1126 
1127 	if (rm == 0)
1128 		return;
1129 
1130 	/*
1131 	 * sync to all constituent pages of a large page
1132 	 */
1133 	ASSERT(x86_hm_held(pp));
1134 	pgcnt = page_get_pagecnt(level);
1135 	ASSERT(IS_P2ALIGNED(pp->p_pagenum, pgcnt));
1136 	for (; pgcnt > 0; --pgcnt) {
1137 		/*
1138 		 * hat_page_demote() can't decrease
1139 		 * pszc below this mapping size
1140 		 * since this large mapping existed after we
1141 		 * took mlist lock.
1142 		 */
1143 		ASSERT(pp->p_szc >= level);
1144 		hat_page_setattr(pp, rm);
1145 		++pp;
1146 	}
1147 }
1148 
1149 /*
1150  * This the set of PTE bits for PFN, permissions and caching
1151  * that require a TLB flush (hat_tlb_inval) if changed on a HAT_LOAD_REMAP
1152  */
1153 #define	PT_REMAP_BITS							\
1154 	(PT_PADDR | PT_NX | PT_WRITABLE | PT_WRITETHRU |		\
1155 	PT_NOCACHE | PT_PAT_4K | PT_PAT_LARGE)
1156 
1157 #define	REMAPASSERT(EX)	if (!(EX)) panic("hati_pte_map: " #EX)
1158 /*
1159  * Do the low-level work to get a mapping entered into a HAT's pagetables
1160  * and in the mapping list of the associated page_t.
1161  */
1162 static int
1163 hati_pte_map(
1164 	htable_t	*ht,
1165 	uint_t		entry,
1166 	page_t		*pp,
1167 	x86pte_t	pte,
1168 	int		flags,
1169 	void		*pte_ptr)
1170 {
1171 	hat_t		*hat = ht->ht_hat;
1172 	x86pte_t	old_pte;
1173 	level_t		l = ht->ht_level;
1174 	hment_t		*hm;
1175 	uint_t		is_consist;
1176 	int		rv = 0;
1177 
1178 	/*
1179 	 * Is this a consistant (ie. need mapping list lock) mapping?
1180 	 */
1181 	is_consist = (pp != NULL && (flags & HAT_LOAD_NOCONSIST) == 0);
1182 
1183 	/*
1184 	 * Track locked mapping count in the htable.  Do this first,
1185 	 * as we track locking even if there already is a mapping present.
1186 	 */
1187 	if ((flags & HAT_LOAD_LOCK) != 0 && hat != kas.a_hat)
1188 		HTABLE_LOCK_INC(ht);
1189 
1190 	/*
1191 	 * Acquire the page's mapping list lock and get an hment to use.
1192 	 * Note that hment_prepare() might return NULL.
1193 	 */
1194 	if (is_consist) {
1195 		x86_hm_enter(pp);
1196 		hm = hment_prepare(ht, entry, pp);
1197 	}
1198 
1199 	/*
1200 	 * Set the new pte, retrieving the old one at the same time.
1201 	 */
1202 	old_pte = x86pte_set(ht, entry, pte, pte_ptr);
1203 
1204 	/*
1205 	 * did we get a large page / page table collision?
1206 	 */
1207 	if (old_pte == LPAGE_ERROR) {
1208 		rv = -1;
1209 		goto done;
1210 	}
1211 
1212 	/*
1213 	 * If the mapping didn't change there is nothing more to do.
1214 	 */
1215 	if (PTE_EQUIV(pte, old_pte))
1216 		goto done;
1217 
1218 	/*
1219 	 * Install a new mapping in the page's mapping list
1220 	 */
1221 	if (!PTE_ISVALID(old_pte)) {
1222 		if (is_consist) {
1223 			hment_assign(ht, entry, pp, hm);
1224 			x86_hm_exit(pp);
1225 		} else {
1226 			ASSERT(flags & HAT_LOAD_NOCONSIST);
1227 		}
1228 		HTABLE_INC(ht->ht_valid_cnt);
1229 		PGCNT_INC(hat, l);
1230 		return (rv);
1231 	}
1232 
1233 	/*
1234 	 * Remap's are more complicated:
1235 	 *  - HAT_LOAD_REMAP must be specified if changing the pfn.
1236 	 *    We also require that NOCONSIST be specified.
1237 	 *  - Otherwise only permission or caching bits may change.
1238 	 */
1239 	if (!PTE_ISPAGE(old_pte, l))
1240 		panic("non-null/page mapping pte=" FMT_PTE, old_pte);
1241 
1242 	if (PTE2PFN(old_pte, l) != PTE2PFN(pte, l)) {
1243 		REMAPASSERT(flags & HAT_LOAD_REMAP);
1244 		REMAPASSERT(flags & HAT_LOAD_NOCONSIST);
1245 		REMAPASSERT(PTE_GET(old_pte, PT_SOFTWARE) >= PT_NOCONSIST);
1246 		REMAPASSERT(pf_is_memory(PTE2PFN(old_pte, l)) ==
1247 		    pf_is_memory(PTE2PFN(pte, l)));
1248 		REMAPASSERT(!is_consist);
1249 	}
1250 
1251 	/*
1252 	 * We only let remaps change the bits for PFNs, permissions
1253 	 * or caching type.
1254 	 */
1255 	ASSERT(PTE_GET(old_pte, ~(PT_REMAP_BITS | PT_REF | PT_MOD)) ==
1256 	    PTE_GET(pte, ~PT_REMAP_BITS));
1257 
1258 	/*
1259 	 * We don't create any mapping list entries on a remap, so release
1260 	 * any allocated hment after we drop the mapping list lock.
1261 	 */
1262 done:
1263 	if (is_consist) {
1264 		x86_hm_exit(pp);
1265 		if (hm != NULL)
1266 			hment_free(hm);
1267 	}
1268 	return (rv);
1269 }
1270 
1271 /*
1272  * Internal routine to load a single page table entry. This only fails if
1273  * we attempt to overwrite a page table link with a large page.
1274  */
1275 static int
1276 hati_load_common(
1277 	hat_t		*hat,
1278 	uintptr_t	va,
1279 	page_t		*pp,
1280 	uint_t		attr,
1281 	uint_t		flags,
1282 	level_t		level,
1283 	pfn_t		pfn)
1284 {
1285 	htable_t	*ht;
1286 	uint_t		entry;
1287 	x86pte_t	pte;
1288 	int		rv = 0;
1289 
1290 	/*
1291 	 * The number 16 is arbitrary and here to catch a recursion problem
1292 	 * early before we blow out the kernel stack.
1293 	 */
1294 	++curthread->t_hatdepth;
1295 	ASSERT(curthread->t_hatdepth < 16);
1296 
1297 	ASSERT(hat == kas.a_hat ||
1298 	    AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
1299 
1300 	if (flags & HAT_LOAD_SHARE)
1301 		hat->hat_flags |= HAT_SHARED;
1302 
1303 	/*
1304 	 * Find the page table that maps this page if it already exists.
1305 	 */
1306 	ht = htable_lookup(hat, va, level);
1307 
1308 	/*
1309 	 * We must have HAT_LOAD_NOCONSIST if page_t is NULL.
1310 	 */
1311 	if (pp == NULL)
1312 		flags |= HAT_LOAD_NOCONSIST;
1313 
1314 	if (ht == NULL) {
1315 		ht = htable_create(hat, va, level, NULL);
1316 		ASSERT(ht != NULL);
1317 	}
1318 	entry = htable_va2entry(va, ht);
1319 
1320 	/*
1321 	 * a bunch of paranoid error checking
1322 	 */
1323 	ASSERT(ht->ht_busy > 0);
1324 	if (ht->ht_vaddr > va || va > HTABLE_LAST_PAGE(ht))
1325 		panic("hati_load_common: bad htable %p, va %p", ht, (void *)va);
1326 	ASSERT(ht->ht_level == level);
1327 
1328 	/*
1329 	 * construct the new PTE
1330 	 */
1331 	if (hat == kas.a_hat)
1332 		attr &= ~PROT_USER;
1333 	pte = hati_mkpte(pfn, attr, level, flags);
1334 	if (hat == kas.a_hat && va >= kernelbase)
1335 		PTE_SET(pte, mmu.pt_global);
1336 
1337 	/*
1338 	 * establish the mapping
1339 	 */
1340 	rv = hati_pte_map(ht, entry, pp, pte, flags, NULL);
1341 
1342 	/*
1343 	 * release the htable and any reserves
1344 	 */
1345 	htable_release(ht);
1346 	--curthread->t_hatdepth;
1347 	return (rv);
1348 }
1349 
1350 /*
1351  * special case of hat_memload to deal with some kernel addrs for performance
1352  */
1353 static void
1354 hat_kmap_load(
1355 	caddr_t		addr,
1356 	page_t		*pp,
1357 	uint_t		attr,
1358 	uint_t		flags)
1359 {
1360 	uintptr_t	va = (uintptr_t)addr;
1361 	x86pte_t	pte;
1362 	pfn_t		pfn = page_pptonum(pp);
1363 	pgcnt_t		pg_off = mmu_btop(va - mmu.kmap_addr);
1364 	htable_t	*ht;
1365 	uint_t		entry;
1366 	void		*pte_ptr;
1367 
1368 	/*
1369 	 * construct the requested PTE
1370 	 */
1371 	attr &= ~PROT_USER;
1372 	attr |= HAT_STORECACHING_OK;
1373 	pte = hati_mkpte(pfn, attr, 0, flags);
1374 	PTE_SET(pte, mmu.pt_global);
1375 
1376 	/*
1377 	 * Figure out the pte_ptr and htable and use common code to finish up
1378 	 */
1379 	if (mmu.pae_hat)
1380 		pte_ptr = mmu.kmap_ptes + pg_off;
1381 	else
1382 		pte_ptr = (x86pte32_t *)mmu.kmap_ptes + pg_off;
1383 	ht = mmu.kmap_htables[(va - mmu.kmap_htables[0]->ht_vaddr) >>
1384 	    LEVEL_SHIFT(1)];
1385 	entry = htable_va2entry(va, ht);
1386 	++curthread->t_hatdepth;
1387 	ASSERT(curthread->t_hatdepth < 16);
1388 	(void) hati_pte_map(ht, entry, pp, pte, flags, pte_ptr);
1389 	--curthread->t_hatdepth;
1390 }
1391 
1392 /*
1393  * hat_memload() - load a translation to the given page struct
1394  *
1395  * Flags for hat_memload/hat_devload/hat_*attr.
1396  *
1397  * 	HAT_LOAD	Default flags to load a translation to the page.
1398  *
1399  * 	HAT_LOAD_LOCK	Lock down mapping resources; hat_map(), hat_memload(),
1400  *			and hat_devload().
1401  *
1402  *	HAT_LOAD_NOCONSIST Do not add mapping to page_t mapping list.
1403  *			sets PT_NOCONSIST
1404  *
1405  *	HAT_LOAD_SHARE	A flag to hat_memload() to indicate h/w page tables
1406  *			that map some user pages (not kas) is shared by more
1407  *			than one process (eg. ISM).
1408  *
1409  *	HAT_LOAD_REMAP	Reload a valid pte with a different page frame.
1410  *
1411  *	HAT_NO_KALLOC	Do not kmem_alloc while creating the mapping; at this
1412  *			point, it's setting up mapping to allocate internal
1413  *			hat layer data structures.  This flag forces hat layer
1414  *			to tap its reserves in order to prevent infinite
1415  *			recursion.
1416  *
1417  * The following is a protection attribute (like PROT_READ, etc.)
1418  *
1419  *	HAT_NOSYNC	set PT_NOSYNC - this mapping's ref/mod bits
1420  *			are never cleared.
1421  *
1422  * Installing new valid PTE's and creation of the mapping list
1423  * entry are controlled under the same lock. It's derived from the
1424  * page_t being mapped.
1425  */
1426 static uint_t supported_memload_flags =
1427 	HAT_LOAD | HAT_LOAD_LOCK | HAT_LOAD_ADV | HAT_LOAD_NOCONSIST |
1428 	HAT_LOAD_SHARE | HAT_NO_KALLOC | HAT_LOAD_REMAP | HAT_LOAD_TEXT;
1429 
1430 void
1431 hat_memload(
1432 	hat_t		*hat,
1433 	caddr_t		addr,
1434 	page_t		*pp,
1435 	uint_t		attr,
1436 	uint_t		flags)
1437 {
1438 	uintptr_t	va = (uintptr_t)addr;
1439 	level_t		level = 0;
1440 	pfn_t		pfn = page_pptonum(pp);
1441 
1442 	ASSERT(IS_PAGEALIGNED(va));
1443 	ASSERT(hat == kas.a_hat || va < _userlimit);
1444 	ASSERT(hat == kas.a_hat ||
1445 	    AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
1446 	ASSERT((flags & supported_memload_flags) == flags);
1447 
1448 	ASSERT(!IN_VA_HOLE(va));
1449 	ASSERT(!PP_ISFREE(pp));
1450 
1451 	/*
1452 	 * kernel address special case for performance.
1453 	 */
1454 	if (mmu.kmap_addr <= va && va < mmu.kmap_eaddr) {
1455 		ASSERT(hat == kas.a_hat);
1456 		hat_kmap_load(addr, pp, attr, flags);
1457 		return;
1458 	}
1459 
1460 	/*
1461 	 * This is used for memory with normal caching enabled, so
1462 	 * always set HAT_STORECACHING_OK.
1463 	 */
1464 	attr |= HAT_STORECACHING_OK;
1465 	if (hati_load_common(hat, va, pp, attr, flags, level, pfn) != 0)
1466 		panic("unexpected hati_load_common() failure");
1467 }
1468 
1469 /* ARGSUSED */
1470 void
1471 hat_memload_region(struct hat *hat, caddr_t addr, struct page *pp,
1472     uint_t attr, uint_t flags, hat_region_cookie_t rcookie)
1473 {
1474 	hat_memload(hat, addr, pp, attr, flags);
1475 }
1476 
1477 /*
1478  * Load the given array of page structs using large pages when possible
1479  */
1480 void
1481 hat_memload_array(
1482 	hat_t		*hat,
1483 	caddr_t		addr,
1484 	size_t		len,
1485 	page_t		**pages,
1486 	uint_t		attr,
1487 	uint_t		flags)
1488 {
1489 	uintptr_t	va = (uintptr_t)addr;
1490 	uintptr_t	eaddr = va + len;
1491 	level_t		level;
1492 	size_t		pgsize;
1493 	pgcnt_t		pgindx = 0;
1494 	pfn_t		pfn;
1495 	pgcnt_t		i;
1496 
1497 	ASSERT(IS_PAGEALIGNED(va));
1498 	ASSERT(hat == kas.a_hat || va + len <= _userlimit);
1499 	ASSERT(hat == kas.a_hat ||
1500 	    AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
1501 	ASSERT((flags & supported_memload_flags) == flags);
1502 
1503 	/*
1504 	 * memload is used for memory with full caching enabled, so
1505 	 * set HAT_STORECACHING_OK.
1506 	 */
1507 	attr |= HAT_STORECACHING_OK;
1508 
1509 	/*
1510 	 * handle all pages using largest possible pagesize
1511 	 */
1512 	while (va < eaddr) {
1513 		/*
1514 		 * decide what level mapping to use (ie. pagesize)
1515 		 */
1516 		pfn = page_pptonum(pages[pgindx]);
1517 		for (level = mmu.max_page_level; ; --level) {
1518 			pgsize = LEVEL_SIZE(level);
1519 			if (level == 0)
1520 				break;
1521 
1522 			if (!IS_P2ALIGNED(va, pgsize) ||
1523 			    (eaddr - va) < pgsize ||
1524 			    !IS_P2ALIGNED(pfn_to_pa(pfn), pgsize))
1525 				continue;
1526 
1527 			/*
1528 			 * To use a large mapping of this size, all the
1529 			 * pages we are passed must be sequential subpages
1530 			 * of the large page.
1531 			 * hat_page_demote() can't change p_szc because
1532 			 * all pages are locked.
1533 			 */
1534 			if (pages[pgindx]->p_szc >= level) {
1535 				for (i = 0; i < mmu_btop(pgsize); ++i) {
1536 					if (pfn + i !=
1537 					    page_pptonum(pages[pgindx + i]))
1538 						break;
1539 					ASSERT(pages[pgindx + i]->p_szc >=
1540 					    level);
1541 					ASSERT(pages[pgindx] + i ==
1542 					    pages[pgindx + i]);
1543 				}
1544 				if (i == mmu_btop(pgsize))
1545 					break;
1546 			}
1547 		}
1548 
1549 		/*
1550 		 * Load this page mapping. If the load fails, try a smaller
1551 		 * pagesize.
1552 		 */
1553 		ASSERT(!IN_VA_HOLE(va));
1554 		while (hati_load_common(hat, va, pages[pgindx], attr,
1555 		    flags, level, pfn) != 0) {
1556 			if (level == 0)
1557 				panic("unexpected hati_load_common() failure");
1558 			--level;
1559 			pgsize = LEVEL_SIZE(level);
1560 		}
1561 
1562 		/*
1563 		 * move to next page
1564 		 */
1565 		va += pgsize;
1566 		pgindx += mmu_btop(pgsize);
1567 	}
1568 }
1569 
1570 /* ARGSUSED */
1571 void
1572 hat_memload_array_region(struct hat *hat, caddr_t addr, size_t len,
1573     struct page **pps, uint_t attr, uint_t flags,
1574     hat_region_cookie_t rcookie)
1575 {
1576 	hat_memload_array(hat, addr, len, pps, attr, flags);
1577 }
1578 
1579 /*
1580  * void hat_devload(hat, addr, len, pf, attr, flags)
1581  *	load/lock the given page frame number
1582  *
1583  * Advisory ordering attributes. Apply only to device mappings.
1584  *
1585  * HAT_STRICTORDER: the CPU must issue the references in order, as the
1586  *	programmer specified.  This is the default.
1587  * HAT_UNORDERED_OK: the CPU may reorder the references (this is all kinds
1588  *	of reordering; store or load with store or load).
1589  * HAT_MERGING_OK: merging and batching: the CPU may merge individual stores
1590  *	to consecutive locations (for example, turn two consecutive byte
1591  *	stores into one halfword store), and it may batch individual loads
1592  *	(for example, turn two consecutive byte loads into one halfword load).
1593  *	This also implies re-ordering.
1594  * HAT_LOADCACHING_OK: the CPU may cache the data it fetches and reuse it
1595  *	until another store occurs.  The default is to fetch new data
1596  *	on every load.  This also implies merging.
1597  * HAT_STORECACHING_OK: the CPU may keep the data in the cache and push it to
1598  *	the device (perhaps with other data) at a later time.  The default is
1599  *	to push the data right away.  This also implies load caching.
1600  *
1601  * Equivalent of hat_memload(), but can be used for device memory where
1602  * there are no page_t's and we support additional flags (write merging, etc).
1603  * Note that we can have large page mappings with this interface.
1604  */
1605 int supported_devload_flags = HAT_LOAD | HAT_LOAD_LOCK |
1606 	HAT_LOAD_NOCONSIST | HAT_STRICTORDER | HAT_UNORDERED_OK |
1607 	HAT_MERGING_OK | HAT_LOADCACHING_OK | HAT_STORECACHING_OK;
1608 
1609 void
1610 hat_devload(
1611 	hat_t		*hat,
1612 	caddr_t		addr,
1613 	size_t		len,
1614 	pfn_t		pfn,
1615 	uint_t		attr,
1616 	int		flags)
1617 {
1618 	uintptr_t	va = ALIGN2PAGE(addr);
1619 	uintptr_t	eva = va + len;
1620 	level_t		level;
1621 	size_t		pgsize;
1622 	page_t		*pp;
1623 	int		f;	/* per PTE copy of flags  - maybe modified */
1624 	uint_t		a;	/* per PTE copy of attr */
1625 
1626 	ASSERT(IS_PAGEALIGNED(va));
1627 	ASSERT(hat == kas.a_hat || eva <= _userlimit);
1628 	ASSERT(hat == kas.a_hat ||
1629 	    AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
1630 	ASSERT((flags & supported_devload_flags) == flags);
1631 
1632 	/*
1633 	 * handle all pages
1634 	 */
1635 	while (va < eva) {
1636 
1637 		/*
1638 		 * decide what level mapping to use (ie. pagesize)
1639 		 */
1640 		for (level = mmu.max_page_level; ; --level) {
1641 			pgsize = LEVEL_SIZE(level);
1642 			if (level == 0)
1643 				break;
1644 			if (IS_P2ALIGNED(va, pgsize) &&
1645 			    (eva - va) >= pgsize &&
1646 			    IS_P2ALIGNED(pfn, mmu_btop(pgsize)))
1647 				break;
1648 		}
1649 
1650 		/*
1651 		 * If this is just memory then allow caching (this happens
1652 		 * for the nucleus pages) - though HAT_PLAT_NOCACHE can be used
1653 		 * to override that. If we don't have a page_t then make sure
1654 		 * NOCONSIST is set.
1655 		 */
1656 		a = attr;
1657 		f = flags;
1658 		if (pf_is_memory(pfn)) {
1659 			if (!(a & HAT_PLAT_NOCACHE))
1660 				a |= HAT_STORECACHING_OK;
1661 
1662 			if (f & HAT_LOAD_NOCONSIST)
1663 				pp = NULL;
1664 			else
1665 				pp = page_numtopp_nolock(pfn);
1666 		} else {
1667 			pp = NULL;
1668 			f |= HAT_LOAD_NOCONSIST;
1669 		}
1670 
1671 		/*
1672 		 * load this page mapping
1673 		 */
1674 		ASSERT(!IN_VA_HOLE(va));
1675 		while (hati_load_common(hat, va, pp, a, f, level, pfn) != 0) {
1676 			if (level == 0)
1677 				panic("unexpected hati_load_common() failure");
1678 			--level;
1679 			pgsize = LEVEL_SIZE(level);
1680 		}
1681 
1682 		/*
1683 		 * move to next page
1684 		 */
1685 		va += pgsize;
1686 		pfn += mmu_btop(pgsize);
1687 	}
1688 }
1689 
1690 /*
1691  * void hat_unlock(hat, addr, len)
1692  *	unlock the mappings to a given range of addresses
1693  *
1694  * Locks are tracked by ht_lock_cnt in the htable.
1695  */
1696 void
1697 hat_unlock(hat_t *hat, caddr_t addr, size_t len)
1698 {
1699 	uintptr_t	vaddr = (uintptr_t)addr;
1700 	uintptr_t	eaddr = vaddr + len;
1701 	htable_t	*ht = NULL;
1702 
1703 	/*
1704 	 * kernel entries are always locked, we don't track lock counts
1705 	 */
1706 	ASSERT(hat == kas.a_hat || eaddr <= _userlimit);
1707 	ASSERT(IS_PAGEALIGNED(vaddr));
1708 	ASSERT(IS_PAGEALIGNED(eaddr));
1709 	if (hat == kas.a_hat)
1710 		return;
1711 	if (eaddr > _userlimit)
1712 		panic("hat_unlock() address out of range - above _userlimit");
1713 
1714 	ASSERT(AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
1715 	while (vaddr < eaddr) {
1716 		(void) htable_walk(hat, &ht, &vaddr, eaddr);
1717 		if (ht == NULL)
1718 			break;
1719 
1720 		ASSERT(!IN_VA_HOLE(vaddr));
1721 
1722 		if (ht->ht_lock_cnt < 1)
1723 			panic("hat_unlock(): lock_cnt < 1, "
1724 			    "htable=%p, vaddr=%p\n", ht, (caddr_t)vaddr);
1725 		HTABLE_LOCK_DEC(ht);
1726 
1727 		vaddr += LEVEL_SIZE(ht->ht_level);
1728 	}
1729 	if (ht)
1730 		htable_release(ht);
1731 }
1732 
1733 /* ARGSUSED */
1734 void
1735 hat_unlock_region(struct hat *sfmmup, caddr_t addr, size_t len,
1736     hat_region_cookie_t rcookie)
1737 {
1738 	panic("No shared region support on x86");
1739 }
1740 
1741 /*
1742  * Cross call service routine to demap a virtual page on
1743  * the current CPU or flush all mappings in TLB.
1744  */
1745 /*ARGSUSED*/
1746 static int
1747 hati_demap_func(xc_arg_t a1, xc_arg_t a2, xc_arg_t a3)
1748 {
1749 	hat_t	*hat = (hat_t *)a1;
1750 	caddr_t	addr = (caddr_t)a2;
1751 
1752 	/*
1753 	 * If the target hat isn't the kernel and this CPU isn't operating
1754 	 * in the target hat, we can ignore the cross call.
1755 	 */
1756 	if (hat != kas.a_hat && hat != CPU->cpu_current_hat)
1757 		return (0);
1758 
1759 	/*
1760 	 * For a normal address, we just flush one page mapping
1761 	 */
1762 	if ((uintptr_t)addr != DEMAP_ALL_ADDR) {
1763 		mmu_tlbflush_entry(addr);
1764 		return (0);
1765 	}
1766 
1767 	/*
1768 	 * Otherwise we reload cr3 to effect a complete TLB flush.
1769 	 *
1770 	 * A reload of cr3 on a VLP process also means we must also recopy in
1771 	 * the pte values from the struct hat
1772 	 */
1773 	if (hat->hat_flags & HAT_VLP) {
1774 #if defined(__amd64)
1775 		x86pte_t *vlpptep = CPU->cpu_hat_info->hci_vlp_l2ptes;
1776 
1777 		VLP_COPY(hat->hat_vlp_ptes, vlpptep);
1778 #elif defined(__i386)
1779 		reload_pae32(hat, CPU);
1780 #endif
1781 	}
1782 	reload_cr3();
1783 	return (0);
1784 }
1785 
1786 /*
1787  * Flush all TLB entries, including global (ie. kernel) ones.
1788  */
1789 static void
1790 flush_all_tlb_entries(void)
1791 {
1792 	ulong_t cr4 = getcr4();
1793 
1794 	if (cr4 & CR4_PGE) {
1795 		setcr4(cr4 & ~(ulong_t)CR4_PGE);
1796 		setcr4(cr4);
1797 
1798 		/*
1799 		 * 32 bit PAE also needs to always reload_cr3()
1800 		 */
1801 		if (mmu.max_level == 2)
1802 			reload_cr3();
1803 	} else {
1804 		reload_cr3();
1805 	}
1806 }
1807 
1808 #define	TLB_CPU_HALTED	(01ul)
1809 #define	TLB_INVAL_ALL	(02ul)
1810 #define	CAS_TLB_INFO(cpu, old, new)	\
1811 	caslong((ulong_t *)&(cpu)->cpu_m.mcpu_tlb_info, (old), (new))
1812 
1813 /*
1814  * Record that a CPU is going idle
1815  */
1816 void
1817 tlb_going_idle(void)
1818 {
1819 	atomic_or_long((ulong_t *)&CPU->cpu_m.mcpu_tlb_info, TLB_CPU_HALTED);
1820 }
1821 
1822 /*
1823  * Service a delayed TLB flush if coming out of being idle.
1824  */
1825 void
1826 tlb_service(void)
1827 {
1828 	ulong_t flags = getflags();
1829 	ulong_t tlb_info;
1830 	ulong_t found;
1831 
1832 	/*
1833 	 * Be sure interrupts are off while doing this so that
1834 	 * higher level interrupts correctly wait for flushes to finish.
1835 	 */
1836 	if (flags & PS_IE)
1837 		flags = intr_clear();
1838 
1839 	/*
1840 	 * We only have to do something if coming out of being idle.
1841 	 */
1842 	tlb_info = CPU->cpu_m.mcpu_tlb_info;
1843 	if (tlb_info & TLB_CPU_HALTED) {
1844 		ASSERT(CPU->cpu_current_hat == kas.a_hat);
1845 
1846 		/*
1847 		 * Atomic clear and fetch of old state.
1848 		 */
1849 		while ((found = CAS_TLB_INFO(CPU, tlb_info, 0)) != tlb_info) {
1850 			ASSERT(found & TLB_CPU_HALTED);
1851 			tlb_info = found;
1852 			SMT_PAUSE();
1853 		}
1854 		if (tlb_info & TLB_INVAL_ALL)
1855 			flush_all_tlb_entries();
1856 	}
1857 
1858 	/*
1859 	 * Restore interrupt enable control bit.
1860 	 */
1861 	if (flags & PS_IE)
1862 		sti();
1863 }
1864 
1865 /*
1866  * Internal routine to do cross calls to invalidate a range of pages on
1867  * all CPUs using a given hat.
1868  */
1869 void
1870 hat_tlb_inval(hat_t *hat, uintptr_t va)
1871 {
1872 	extern int	flushes_require_xcalls;	/* from mp_startup.c */
1873 	cpuset_t	justme;
1874 	cpuset_t	check_cpus;
1875 	cpuset_t	cpus_to_shootdown;
1876 	cpu_t		*cpup;
1877 	int		c;
1878 
1879 	/*
1880 	 * If the hat is being destroyed, there are no more users, so
1881 	 * demap need not do anything.
1882 	 */
1883 	if (hat->hat_flags & HAT_FREEING)
1884 		return;
1885 
1886 	/*
1887 	 * If demapping from a shared pagetable, we best demap the
1888 	 * entire set of user TLBs, since we don't know what addresses
1889 	 * these were shared at.
1890 	 */
1891 	if (hat->hat_flags & HAT_SHARED) {
1892 		hat = kas.a_hat;
1893 		va = DEMAP_ALL_ADDR;
1894 	}
1895 
1896 	/*
1897 	 * if not running with multiple CPUs, don't use cross calls
1898 	 */
1899 	if (panicstr || !flushes_require_xcalls) {
1900 		(void) hati_demap_func((xc_arg_t)hat, (xc_arg_t)va, NULL);
1901 		return;
1902 	}
1903 
1904 
1905 	/*
1906 	 * Determine CPUs to shootdown. Kernel changes always do all CPUs.
1907 	 * Otherwise it's just CPUs currently executing in this hat.
1908 	 */
1909 	kpreempt_disable();
1910 	CPUSET_ONLY(justme, CPU->cpu_id);
1911 	if (hat == kas.a_hat)
1912 		cpus_to_shootdown = khat_cpuset;
1913 	else
1914 		cpus_to_shootdown = hat->hat_cpus;
1915 
1916 	/*
1917 	 * If any CPUs in the set are idle, just request a delayed flush
1918 	 * and avoid waking them up.
1919 	 */
1920 	check_cpus = cpus_to_shootdown;
1921 	for (c = 0; c < NCPU && !CPUSET_ISNULL(check_cpus); ++c) {
1922 		ulong_t tlb_info;
1923 
1924 		if (!CPU_IN_SET(check_cpus, c))
1925 			continue;
1926 		CPUSET_DEL(check_cpus, c);
1927 		cpup = cpu[c];
1928 		if (cpup == NULL)
1929 			continue;
1930 
1931 		tlb_info = cpup->cpu_m.mcpu_tlb_info;
1932 		while (tlb_info == TLB_CPU_HALTED) {
1933 			(void) CAS_TLB_INFO(cpup, TLB_CPU_HALTED,
1934 			    TLB_CPU_HALTED | TLB_INVAL_ALL);
1935 			SMT_PAUSE();
1936 			tlb_info = cpup->cpu_m.mcpu_tlb_info;
1937 		}
1938 		if (tlb_info == (TLB_CPU_HALTED | TLB_INVAL_ALL)) {
1939 			HATSTAT_INC(hs_tlb_inval_delayed);
1940 			CPUSET_DEL(cpus_to_shootdown, c);
1941 		}
1942 	}
1943 
1944 	if (CPUSET_ISNULL(cpus_to_shootdown) ||
1945 	    CPUSET_ISEQUAL(cpus_to_shootdown, justme)) {
1946 
1947 		(void) hati_demap_func((xc_arg_t)hat, (xc_arg_t)va, NULL);
1948 
1949 	} else {
1950 
1951 		CPUSET_ADD(cpus_to_shootdown, CPU->cpu_id);
1952 		xc_call((xc_arg_t)hat, (xc_arg_t)va, NULL, X_CALL_HIPRI,
1953 		    cpus_to_shootdown, hati_demap_func);
1954 
1955 	}
1956 	kpreempt_enable();
1957 }
1958 
1959 /*
1960  * Interior routine for HAT_UNLOADs from hat_unload_callback(),
1961  * hat_kmap_unload() OR from hat_steal() code.  This routine doesn't
1962  * handle releasing of the htables.
1963  */
1964 void
1965 hat_pte_unmap(
1966 	htable_t	*ht,
1967 	uint_t		entry,
1968 	uint_t		flags,
1969 	x86pte_t	old_pte,
1970 	void		*pte_ptr)
1971 {
1972 	hat_t		*hat = ht->ht_hat;
1973 	hment_t		*hm = NULL;
1974 	page_t		*pp = NULL;
1975 	level_t		l = ht->ht_level;
1976 	pfn_t		pfn;
1977 
1978 	/*
1979 	 * We always track the locking counts, even if nothing is unmapped
1980 	 */
1981 	if ((flags & HAT_UNLOAD_UNLOCK) != 0 && hat != kas.a_hat) {
1982 		ASSERT(ht->ht_lock_cnt > 0);
1983 		HTABLE_LOCK_DEC(ht);
1984 	}
1985 
1986 	/*
1987 	 * Figure out which page's mapping list lock to acquire using the PFN
1988 	 * passed in "old" PTE. We then attempt to invalidate the PTE.
1989 	 * If another thread, probably a hat_pageunload, has asynchronously
1990 	 * unmapped/remapped this address we'll loop here.
1991 	 */
1992 	ASSERT(ht->ht_busy > 0);
1993 	while (PTE_ISVALID(old_pte)) {
1994 		pfn = PTE2PFN(old_pte, l);
1995 		if (PTE_GET(old_pte, PT_SOFTWARE) >= PT_NOCONSIST) {
1996 			pp = NULL;
1997 		} else {
1998 			pp = page_numtopp_nolock(pfn);
1999 			if (pp == NULL) {
2000 				panic("no page_t, not NOCONSIST: old_pte="
2001 				    FMT_PTE " ht=%lx entry=0x%x pte_ptr=%lx",
2002 				    old_pte, (uintptr_t)ht, entry,
2003 				    (uintptr_t)pte_ptr);
2004 			}
2005 			x86_hm_enter(pp);
2006 		}
2007 
2008 		/*
2009 		 * If freeing the address space, check that the PTE
2010 		 * hasn't changed, as the mappings are no longer in use by
2011 		 * any thread, invalidation is unnecessary.
2012 		 * If not freeing, do a full invalidate.
2013 		 */
2014 		if (hat->hat_flags & HAT_FREEING)
2015 			old_pte = x86pte_get(ht, entry);
2016 		else
2017 			old_pte = x86pte_inval(ht, entry, old_pte, pte_ptr);
2018 
2019 		/*
2020 		 * If the page hadn't changed we've unmapped it and can proceed
2021 		 */
2022 		if (PTE_ISVALID(old_pte) && PTE2PFN(old_pte, l) == pfn)
2023 			break;
2024 
2025 		/*
2026 		 * Otherwise, we'll have to retry with the current old_pte.
2027 		 * Drop the hment lock, since the pfn may have changed.
2028 		 */
2029 		if (pp != NULL) {
2030 			x86_hm_exit(pp);
2031 			pp = NULL;
2032 		} else {
2033 			ASSERT(PTE_GET(old_pte, PT_SOFTWARE) >= PT_NOCONSIST);
2034 		}
2035 	}
2036 
2037 	/*
2038 	 * If the old mapping wasn't valid, there's nothing more to do
2039 	 */
2040 	if (!PTE_ISVALID(old_pte)) {
2041 		if (pp != NULL)
2042 			x86_hm_exit(pp);
2043 		return;
2044 	}
2045 
2046 	/*
2047 	 * Take care of syncing any MOD/REF bits and removing the hment.
2048 	 */
2049 	if (pp != NULL) {
2050 		if (!(flags & HAT_UNLOAD_NOSYNC))
2051 			hati_sync_pte_to_page(pp, old_pte, l);
2052 		hm = hment_remove(pp, ht, entry);
2053 		x86_hm_exit(pp);
2054 		if (hm != NULL)
2055 			hment_free(hm);
2056 	}
2057 
2058 	/*
2059 	 * Handle book keeping in the htable and hat
2060 	 */
2061 	ASSERT(ht->ht_valid_cnt > 0);
2062 	HTABLE_DEC(ht->ht_valid_cnt);
2063 	PGCNT_DEC(hat, l);
2064 }
2065 
2066 /*
2067  * very cheap unload implementation to special case some kernel addresses
2068  */
2069 static void
2070 hat_kmap_unload(caddr_t addr, size_t len, uint_t flags)
2071 {
2072 	uintptr_t	va = (uintptr_t)addr;
2073 	uintptr_t	eva = va + len;
2074 	pgcnt_t		pg_index;
2075 	htable_t	*ht;
2076 	uint_t		entry;
2077 	x86pte_t	*pte_ptr;
2078 	x86pte_t	old_pte;
2079 
2080 	for (; va < eva; va += MMU_PAGESIZE) {
2081 		/*
2082 		 * Get the PTE
2083 		 */
2084 		pg_index = mmu_btop(va - mmu.kmap_addr);
2085 		pte_ptr = PT_INDEX_PTR(mmu.kmap_ptes, pg_index);
2086 		old_pte = GET_PTE(pte_ptr);
2087 
2088 		/*
2089 		 * get the htable / entry
2090 		 */
2091 		ht = mmu.kmap_htables[(va - mmu.kmap_htables[0]->ht_vaddr)
2092 		    >> LEVEL_SHIFT(1)];
2093 		entry = htable_va2entry(va, ht);
2094 
2095 		/*
2096 		 * use mostly common code to unmap it.
2097 		 */
2098 		hat_pte_unmap(ht, entry, flags, old_pte, pte_ptr);
2099 	}
2100 }
2101 
2102 
2103 /*
2104  * unload a range of virtual address space (no callback)
2105  */
2106 void
2107 hat_unload(hat_t *hat, caddr_t addr, size_t len, uint_t flags)
2108 {
2109 	uintptr_t va = (uintptr_t)addr;
2110 
2111 	ASSERT(hat == kas.a_hat || va + len <= _userlimit);
2112 
2113 	/*
2114 	 * special case for performance.
2115 	 */
2116 	if (mmu.kmap_addr <= va && va < mmu.kmap_eaddr) {
2117 		ASSERT(hat == kas.a_hat);
2118 		hat_kmap_unload(addr, len, flags);
2119 	} else {
2120 		hat_unload_callback(hat, addr, len, flags, NULL);
2121 	}
2122 }
2123 
2124 /*
2125  * Do the callbacks for ranges being unloaded.
2126  */
2127 typedef struct range_info {
2128 	uintptr_t	rng_va;
2129 	ulong_t		rng_cnt;
2130 	level_t		rng_level;
2131 } range_info_t;
2132 
2133 static void
2134 handle_ranges(hat_callback_t *cb, uint_t cnt, range_info_t *range)
2135 {
2136 	/*
2137 	 * do callbacks to upper level VM system
2138 	 */
2139 	while (cb != NULL && cnt > 0) {
2140 		--cnt;
2141 		cb->hcb_start_addr = (caddr_t)range[cnt].rng_va;
2142 		cb->hcb_end_addr = cb->hcb_start_addr;
2143 		cb->hcb_end_addr +=
2144 		    range[cnt].rng_cnt << LEVEL_SIZE(range[cnt].rng_level);
2145 		cb->hcb_function(cb);
2146 	}
2147 }
2148 
2149 /*
2150  * Unload a given range of addresses (has optional callback)
2151  *
2152  * Flags:
2153  * define	HAT_UNLOAD		0x00
2154  * define	HAT_UNLOAD_NOSYNC	0x02
2155  * define	HAT_UNLOAD_UNLOCK	0x04
2156  * define	HAT_UNLOAD_OTHER	0x08 - not used
2157  * define	HAT_UNLOAD_UNMAP	0x10 - same as HAT_UNLOAD
2158  */
2159 #define	MAX_UNLOAD_CNT (8)
2160 void
2161 hat_unload_callback(
2162 	hat_t		*hat,
2163 	caddr_t		addr,
2164 	size_t		len,
2165 	uint_t		flags,
2166 	hat_callback_t	*cb)
2167 {
2168 	uintptr_t	vaddr = (uintptr_t)addr;
2169 	uintptr_t	eaddr = vaddr + len;
2170 	htable_t	*ht = NULL;
2171 	uint_t		entry;
2172 	uintptr_t	contig_va = (uintptr_t)-1L;
2173 	range_info_t	r[MAX_UNLOAD_CNT];
2174 	uint_t		r_cnt = 0;
2175 	x86pte_t	old_pte;
2176 
2177 	ASSERT(hat == kas.a_hat || eaddr <= _userlimit);
2178 	ASSERT(IS_PAGEALIGNED(vaddr));
2179 	ASSERT(IS_PAGEALIGNED(eaddr));
2180 
2181 	/*
2182 	 * Special case a single page being unloaded for speed. This happens
2183 	 * quite frequently, COW faults after a fork() for example.
2184 	 */
2185 	if (cb == NULL && len == MMU_PAGESIZE) {
2186 		ht = htable_getpte(hat, vaddr, &entry, &old_pte, 0);
2187 		if (ht != NULL) {
2188 			if (PTE_ISVALID(old_pte))
2189 				hat_pte_unmap(ht, entry, flags, old_pte, NULL);
2190 			htable_release(ht);
2191 		}
2192 		return;
2193 	}
2194 
2195 	while (vaddr < eaddr) {
2196 		old_pte = htable_walk(hat, &ht, &vaddr, eaddr);
2197 		if (ht == NULL)
2198 			break;
2199 
2200 		ASSERT(!IN_VA_HOLE(vaddr));
2201 
2202 		if (vaddr < (uintptr_t)addr)
2203 			panic("hat_unload_callback(): unmap inside large page");
2204 
2205 		/*
2206 		 * We'll do the call backs for contiguous ranges
2207 		 */
2208 		if (vaddr != contig_va ||
2209 		    (r_cnt > 0 && r[r_cnt - 1].rng_level != ht->ht_level)) {
2210 			if (r_cnt == MAX_UNLOAD_CNT) {
2211 				handle_ranges(cb, r_cnt, r);
2212 				r_cnt = 0;
2213 			}
2214 			r[r_cnt].rng_va = vaddr;
2215 			r[r_cnt].rng_cnt = 0;
2216 			r[r_cnt].rng_level = ht->ht_level;
2217 			++r_cnt;
2218 		}
2219 
2220 		/*
2221 		 * Unload one mapping from the page tables.
2222 		 */
2223 		entry = htable_va2entry(vaddr, ht);
2224 		hat_pte_unmap(ht, entry, flags, old_pte, NULL);
2225 		ASSERT(ht->ht_level <= mmu.max_page_level);
2226 		vaddr += LEVEL_SIZE(ht->ht_level);
2227 		contig_va = vaddr;
2228 		++r[r_cnt - 1].rng_cnt;
2229 	}
2230 	if (ht)
2231 		htable_release(ht);
2232 
2233 	/*
2234 	 * handle last range for callbacks
2235 	 */
2236 	if (r_cnt > 0)
2237 		handle_ranges(cb, r_cnt, r);
2238 }
2239 
2240 /*
2241  * synchronize mapping with software data structures
2242  *
2243  * This interface is currently only used by the working set monitor
2244  * driver.
2245  */
2246 /*ARGSUSED*/
2247 void
2248 hat_sync(hat_t *hat, caddr_t addr, size_t len, uint_t flags)
2249 {
2250 	uintptr_t	vaddr = (uintptr_t)addr;
2251 	uintptr_t	eaddr = vaddr + len;
2252 	htable_t	*ht = NULL;
2253 	uint_t		entry;
2254 	x86pte_t	pte;
2255 	x86pte_t	save_pte;
2256 	x86pte_t	new;
2257 	page_t		*pp;
2258 
2259 	ASSERT(!IN_VA_HOLE(vaddr));
2260 	ASSERT(IS_PAGEALIGNED(vaddr));
2261 	ASSERT(IS_PAGEALIGNED(eaddr));
2262 	ASSERT(hat == kas.a_hat || eaddr <= _userlimit);
2263 
2264 	for (; vaddr < eaddr; vaddr += LEVEL_SIZE(ht->ht_level)) {
2265 try_again:
2266 		pte = htable_walk(hat, &ht, &vaddr, eaddr);
2267 		if (ht == NULL)
2268 			break;
2269 		entry = htable_va2entry(vaddr, ht);
2270 
2271 		if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC ||
2272 		    PTE_GET(pte, PT_REF | PT_MOD) == 0)
2273 			continue;
2274 
2275 		/*
2276 		 * We need to acquire the mapping list lock to protect
2277 		 * against hat_pageunload(), hat_unload(), etc.
2278 		 */
2279 		pp = page_numtopp_nolock(PTE2PFN(pte, ht->ht_level));
2280 		if (pp == NULL)
2281 			break;
2282 		x86_hm_enter(pp);
2283 		save_pte = pte;
2284 		pte = x86pte_get(ht, entry);
2285 		if (pte != save_pte) {
2286 			x86_hm_exit(pp);
2287 			goto try_again;
2288 		}
2289 		if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC ||
2290 		    PTE_GET(pte, PT_REF | PT_MOD) == 0) {
2291 			x86_hm_exit(pp);
2292 			continue;
2293 		}
2294 
2295 		/*
2296 		 * Need to clear ref or mod bits. We may compete with
2297 		 * hardware updating the R/M bits and have to try again.
2298 		 */
2299 		if (flags == HAT_SYNC_ZERORM) {
2300 			new = pte;
2301 			PTE_CLR(new, PT_REF | PT_MOD);
2302 			pte = hati_update_pte(ht, entry, pte, new);
2303 			if (pte != 0) {
2304 				x86_hm_exit(pp);
2305 				goto try_again;
2306 			}
2307 		} else {
2308 			/*
2309 			 * sync the PTE to the page_t
2310 			 */
2311 			hati_sync_pte_to_page(pp, save_pte, ht->ht_level);
2312 		}
2313 		x86_hm_exit(pp);
2314 	}
2315 	if (ht)
2316 		htable_release(ht);
2317 }
2318 
2319 /*
2320  * void	hat_map(hat, addr, len, flags)
2321  */
2322 /*ARGSUSED*/
2323 void
2324 hat_map(hat_t *hat, caddr_t addr, size_t len, uint_t flags)
2325 {
2326 	/* does nothing */
2327 }
2328 
2329 /*
2330  * uint_t hat_getattr(hat, addr, *attr)
2331  *	returns attr for <hat,addr> in *attr.  returns 0 if there was a
2332  *	mapping and *attr is valid, nonzero if there was no mapping and
2333  *	*attr is not valid.
2334  */
2335 uint_t
2336 hat_getattr(hat_t *hat, caddr_t addr, uint_t *attr)
2337 {
2338 	uintptr_t	vaddr = ALIGN2PAGE(addr);
2339 	htable_t	*ht = NULL;
2340 	x86pte_t	pte;
2341 
2342 	ASSERT(hat == kas.a_hat || vaddr <= _userlimit);
2343 
2344 	if (IN_VA_HOLE(vaddr))
2345 		return ((uint_t)-1);
2346 
2347 	ht = htable_getpte(hat, vaddr, NULL, &pte, mmu.max_page_level);
2348 	if (ht == NULL)
2349 		return ((uint_t)-1);
2350 
2351 	if (!PTE_ISVALID(pte) || !PTE_ISPAGE(pte, ht->ht_level)) {
2352 		htable_release(ht);
2353 		return ((uint_t)-1);
2354 	}
2355 
2356 	*attr = PROT_READ;
2357 	if (PTE_GET(pte, PT_WRITABLE))
2358 		*attr |= PROT_WRITE;
2359 	if (PTE_GET(pte, PT_USER))
2360 		*attr |= PROT_USER;
2361 	if (!PTE_GET(pte, mmu.pt_nx))
2362 		*attr |= PROT_EXEC;
2363 	if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC)
2364 		*attr |= HAT_NOSYNC;
2365 	htable_release(ht);
2366 	return (0);
2367 }
2368 
2369 /*
2370  * hat_updateattr() applies the given attribute change to an existing mapping
2371  */
2372 #define	HAT_LOAD_ATTR		1
2373 #define	HAT_SET_ATTR		2
2374 #define	HAT_CLR_ATTR		3
2375 
2376 static void
2377 hat_updateattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr, int what)
2378 {
2379 	uintptr_t	vaddr = (uintptr_t)addr;
2380 	uintptr_t	eaddr = (uintptr_t)addr + len;
2381 	htable_t	*ht = NULL;
2382 	uint_t		entry;
2383 	x86pte_t	oldpte, newpte;
2384 	page_t		*pp;
2385 
2386 	ASSERT(IS_PAGEALIGNED(vaddr));
2387 	ASSERT(IS_PAGEALIGNED(eaddr));
2388 	ASSERT(hat == kas.a_hat ||
2389 	    AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
2390 	for (; vaddr < eaddr; vaddr += LEVEL_SIZE(ht->ht_level)) {
2391 try_again:
2392 		oldpte = htable_walk(hat, &ht, &vaddr, eaddr);
2393 		if (ht == NULL)
2394 			break;
2395 		if (PTE_GET(oldpte, PT_SOFTWARE) >= PT_NOCONSIST)
2396 			continue;
2397 
2398 		pp = page_numtopp_nolock(PTE2PFN(oldpte, ht->ht_level));
2399 		if (pp == NULL)
2400 			continue;
2401 		x86_hm_enter(pp);
2402 
2403 		newpte = oldpte;
2404 		/*
2405 		 * We found a page table entry in the desired range,
2406 		 * figure out the new attributes.
2407 		 */
2408 		if (what == HAT_SET_ATTR || what == HAT_LOAD_ATTR) {
2409 			if ((attr & PROT_WRITE) &&
2410 			    !PTE_GET(oldpte, PT_WRITABLE))
2411 				newpte |= PT_WRITABLE;
2412 
2413 			if ((attr & HAT_NOSYNC) &&
2414 			    PTE_GET(oldpte, PT_SOFTWARE) < PT_NOSYNC)
2415 				newpte |= PT_NOSYNC;
2416 
2417 			if ((attr & PROT_EXEC) && PTE_GET(oldpte, mmu.pt_nx))
2418 				newpte &= ~mmu.pt_nx;
2419 		}
2420 
2421 		if (what == HAT_LOAD_ATTR) {
2422 			if (!(attr & PROT_WRITE) &&
2423 			    PTE_GET(oldpte, PT_WRITABLE))
2424 				newpte &= ~PT_WRITABLE;
2425 
2426 			if (!(attr & HAT_NOSYNC) &&
2427 			    PTE_GET(oldpte, PT_SOFTWARE) >= PT_NOSYNC)
2428 				newpte &= ~PT_SOFTWARE;
2429 
2430 			if (!(attr & PROT_EXEC) && !PTE_GET(oldpte, mmu.pt_nx))
2431 				newpte |= mmu.pt_nx;
2432 		}
2433 
2434 		if (what == HAT_CLR_ATTR) {
2435 			if ((attr & PROT_WRITE) && PTE_GET(oldpte, PT_WRITABLE))
2436 				newpte &= ~PT_WRITABLE;
2437 
2438 			if ((attr & HAT_NOSYNC) &&
2439 			    PTE_GET(oldpte, PT_SOFTWARE) >= PT_NOSYNC)
2440 				newpte &= ~PT_SOFTWARE;
2441 
2442 			if ((attr & PROT_EXEC) && !PTE_GET(oldpte, mmu.pt_nx))
2443 				newpte |= mmu.pt_nx;
2444 		}
2445 
2446 		/*
2447 		 * Ensure NOSYNC/NOCONSIST mappings have REF and MOD set.
2448 		 * x86pte_set() depends on this.
2449 		 */
2450 		if (PTE_GET(newpte, PT_SOFTWARE) >= PT_NOSYNC)
2451 			newpte |= PT_REF | PT_MOD;
2452 
2453 		/*
2454 		 * what about PROT_READ or others? this code only handles:
2455 		 * EXEC, WRITE, NOSYNC
2456 		 */
2457 
2458 		/*
2459 		 * If new PTE really changed, update the table.
2460 		 */
2461 		if (newpte != oldpte) {
2462 			entry = htable_va2entry(vaddr, ht);
2463 			oldpte = hati_update_pte(ht, entry, oldpte, newpte);
2464 			if (oldpte != 0) {
2465 				x86_hm_exit(pp);
2466 				goto try_again;
2467 			}
2468 		}
2469 		x86_hm_exit(pp);
2470 	}
2471 	if (ht)
2472 		htable_release(ht);
2473 }
2474 
2475 /*
2476  * Various wrappers for hat_updateattr()
2477  */
2478 void
2479 hat_setattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr)
2480 {
2481 	ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit);
2482 	hat_updateattr(hat, addr, len, attr, HAT_SET_ATTR);
2483 }
2484 
2485 void
2486 hat_clrattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr)
2487 {
2488 	ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit);
2489 	hat_updateattr(hat, addr, len, attr, HAT_CLR_ATTR);
2490 }
2491 
2492 void
2493 hat_chgattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr)
2494 {
2495 	ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit);
2496 	hat_updateattr(hat, addr, len, attr, HAT_LOAD_ATTR);
2497 }
2498 
2499 void
2500 hat_chgprot(hat_t *hat, caddr_t addr, size_t len, uint_t vprot)
2501 {
2502 	ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit);
2503 	hat_updateattr(hat, addr, len, vprot & HAT_PROT_MASK, HAT_LOAD_ATTR);
2504 }
2505 
2506 /*
2507  * size_t hat_getpagesize(hat, addr)
2508  *	returns pagesize in bytes for <hat, addr>. returns -1 of there is
2509  *	no mapping. This is an advisory call.
2510  */
2511 ssize_t
2512 hat_getpagesize(hat_t *hat, caddr_t addr)
2513 {
2514 	uintptr_t	vaddr = ALIGN2PAGE(addr);
2515 	htable_t	*ht;
2516 	size_t		pagesize;
2517 
2518 	ASSERT(hat == kas.a_hat || vaddr <= _userlimit);
2519 	if (IN_VA_HOLE(vaddr))
2520 		return (-1);
2521 	ht = htable_getpage(hat, vaddr, NULL);
2522 	if (ht == NULL)
2523 		return (-1);
2524 	pagesize = LEVEL_SIZE(ht->ht_level);
2525 	htable_release(ht);
2526 	return (pagesize);
2527 }
2528 
2529 
2530 
2531 /*
2532  * pfn_t hat_getpfnum(hat, addr)
2533  *	returns pfn for <hat, addr> or PFN_INVALID if mapping is invalid.
2534  */
2535 pfn_t
2536 hat_getpfnum(hat_t *hat, caddr_t addr)
2537 {
2538 	uintptr_t	vaddr = ALIGN2PAGE(addr);
2539 	htable_t	*ht;
2540 	uint_t		entry;
2541 	pfn_t		pfn = PFN_INVALID;
2542 
2543 	ASSERT(hat == kas.a_hat || vaddr <= _userlimit);
2544 	if (khat_running == 0)
2545 		return (PFN_INVALID);
2546 
2547 	if (IN_VA_HOLE(vaddr))
2548 		return (PFN_INVALID);
2549 
2550 	/*
2551 	 * A very common use of hat_getpfnum() is from the DDI for kernel pages.
2552 	 * Use the kmap_ptes (which also covers the 32 bit heap) to speed
2553 	 * this up.
2554 	 */
2555 	if (mmu.kmap_addr <= vaddr && vaddr < mmu.kmap_eaddr) {
2556 		x86pte_t pte;
2557 		pgcnt_t pg_index;
2558 
2559 		pg_index = mmu_btop(vaddr - mmu.kmap_addr);
2560 		pte = GET_PTE(PT_INDEX_PTR(mmu.kmap_ptes, pg_index));
2561 		if (!PTE_ISVALID(pte))
2562 			return (PFN_INVALID);
2563 		/*LINTED [use of constant 0 causes a silly lint warning] */
2564 		return (PTE2PFN(pte, 0));
2565 	}
2566 
2567 	ht = htable_getpage(hat, vaddr, &entry);
2568 	if (ht == NULL)
2569 		return (PFN_INVALID);
2570 	ASSERT(vaddr >= ht->ht_vaddr);
2571 	ASSERT(vaddr <= HTABLE_LAST_PAGE(ht));
2572 	pfn = PTE2PFN(x86pte_get(ht, entry), ht->ht_level);
2573 	if (ht->ht_level > 0)
2574 		pfn += mmu_btop(vaddr & LEVEL_OFFSET(ht->ht_level));
2575 	htable_release(ht);
2576 	return (pfn);
2577 }
2578 
2579 /*
2580  * hat_getkpfnum() is an obsolete DDI routine, and its use is discouraged.
2581  * Use hat_getpfnum(kas.a_hat, ...) instead.
2582  *
2583  * We'd like to return PFN_INVALID if the mappings have underlying page_t's
2584  * but can't right now due to the fact that some software has grown to use
2585  * this interface incorrectly. So for now when the interface is misused,
2586  * return a warning to the user that in the future it won't work in the
2587  * way they're abusing it, and carry on.
2588  *
2589  * Note that hat_getkpfnum() is never supported on amd64.
2590  */
2591 #if !defined(__amd64)
2592 pfn_t
2593 hat_getkpfnum(caddr_t addr)
2594 {
2595 	pfn_t	pfn;
2596 	int badcaller = 0;
2597 
2598 	if (khat_running == 0)
2599 		panic("hat_getkpfnum(): called too early\n");
2600 	if ((uintptr_t)addr < kernelbase)
2601 		return (PFN_INVALID);
2602 
2603 
2604 	if (segkpm && IS_KPM_ADDR(addr)) {
2605 		badcaller = 1;
2606 		pfn = hat_kpm_va2pfn(addr);
2607 	} else {
2608 		pfn = hat_getpfnum(kas.a_hat, addr);
2609 		badcaller = pf_is_memory(pfn);
2610 	}
2611 
2612 	if (badcaller)
2613 		hat_getkpfnum_badcall(caller());
2614 	return (pfn);
2615 }
2616 #endif /* __amd64 */
2617 
2618 /*
2619  * int hat_probe(hat, addr)
2620  *	return 0 if no valid mapping is present.  Faster version
2621  *	of hat_getattr in certain architectures.
2622  */
2623 int
2624 hat_probe(hat_t *hat, caddr_t addr)
2625 {
2626 	uintptr_t	vaddr = ALIGN2PAGE(addr);
2627 	uint_t		entry;
2628 	htable_t	*ht;
2629 	pgcnt_t		pg_off;
2630 
2631 	ASSERT(hat == kas.a_hat || vaddr <= _userlimit);
2632 	ASSERT(hat == kas.a_hat ||
2633 	    AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
2634 	if (IN_VA_HOLE(vaddr))
2635 		return (0);
2636 
2637 	/*
2638 	 * Most common use of hat_probe is from segmap. We special case it
2639 	 * for performance.
2640 	 */
2641 	if (mmu.kmap_addr <= vaddr && vaddr < mmu.kmap_eaddr) {
2642 		pg_off = mmu_btop(vaddr - mmu.kmap_addr);
2643 		if (mmu.pae_hat)
2644 			return (PTE_ISVALID(mmu.kmap_ptes[pg_off]));
2645 		else
2646 			return (PTE_ISVALID(
2647 			    ((x86pte32_t *)mmu.kmap_ptes)[pg_off]));
2648 	}
2649 
2650 	ht = htable_getpage(hat, vaddr, &entry);
2651 	if (ht == NULL)
2652 		return (0);
2653 	htable_release(ht);
2654 	return (1);
2655 }
2656 
2657 /*
2658  * Find out if the segment for hat_share()/hat_unshare() is DISM or locked ISM.
2659  */
2660 static int
2661 is_it_dism(hat_t *hat, caddr_t va)
2662 {
2663 	struct seg *seg;
2664 	struct shm_data *shmd;
2665 	struct spt_data *sptd;
2666 
2667 	seg = as_findseg(hat->hat_as, va, 0);
2668 	ASSERT(seg != NULL);
2669 	ASSERT(seg->s_base <= va);
2670 	shmd = (struct shm_data *)seg->s_data;
2671 	ASSERT(shmd != NULL);
2672 	sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2673 	ASSERT(sptd != NULL);
2674 	if (sptd->spt_flags & SHM_PAGEABLE)
2675 		return (1);
2676 	return (0);
2677 }
2678 
2679 /*
2680  * Simple implementation of ISM. hat_share() is similar to hat_memload_array(),
2681  * except that we use the ism_hat's existing mappings to determine the pages
2682  * and protections to use for this hat. If we find a full properly aligned
2683  * and sized pagetable, we will attempt to share the pagetable itself.
2684  */
2685 /*ARGSUSED*/
2686 int
2687 hat_share(
2688 	hat_t		*hat,
2689 	caddr_t		addr,
2690 	hat_t		*ism_hat,
2691 	caddr_t		src_addr,
2692 	size_t		len,	/* almost useless value, see below.. */
2693 	uint_t		ismszc)
2694 {
2695 	uintptr_t	vaddr_start = (uintptr_t)addr;
2696 	uintptr_t	vaddr;
2697 	uintptr_t	eaddr = vaddr_start + len;
2698 	uintptr_t	ism_addr_start = (uintptr_t)src_addr;
2699 	uintptr_t	ism_addr = ism_addr_start;
2700 	uintptr_t	e_ism_addr = ism_addr + len;
2701 	htable_t	*ism_ht = NULL;
2702 	htable_t	*ht;
2703 	x86pte_t	pte;
2704 	page_t		*pp;
2705 	pfn_t		pfn;
2706 	level_t		l;
2707 	pgcnt_t		pgcnt;
2708 	uint_t		prot;
2709 	int		is_dism;
2710 	int		flags;
2711 
2712 	/*
2713 	 * We might be asked to share an empty DISM hat by as_dup()
2714 	 */
2715 	ASSERT(hat != kas.a_hat);
2716 	ASSERT(eaddr <= _userlimit);
2717 	if (!(ism_hat->hat_flags & HAT_SHARED)) {
2718 		ASSERT(hat_get_mapped_size(ism_hat) == 0);
2719 		return (0);
2720 	}
2721 
2722 	/*
2723 	 * The SPT segment driver often passes us a size larger than there are
2724 	 * valid mappings. That's because it rounds the segment size up to a
2725 	 * large pagesize, even if the actual memory mapped by ism_hat is less.
2726 	 */
2727 	ASSERT(IS_PAGEALIGNED(vaddr_start));
2728 	ASSERT(IS_PAGEALIGNED(ism_addr_start));
2729 	ASSERT(ism_hat->hat_flags & HAT_SHARED);
2730 	is_dism = is_it_dism(hat, addr);
2731 	while (ism_addr < e_ism_addr) {
2732 		/*
2733 		 * use htable_walk to get the next valid ISM mapping
2734 		 */
2735 		pte = htable_walk(ism_hat, &ism_ht, &ism_addr, e_ism_addr);
2736 		if (ism_ht == NULL)
2737 			break;
2738 
2739 		/*
2740 		 * First check to see if we already share the page table.
2741 		 */
2742 		l = ism_ht->ht_level;
2743 		vaddr = vaddr_start + (ism_addr - ism_addr_start);
2744 		ht = htable_lookup(hat, vaddr, l);
2745 		if (ht != NULL) {
2746 			if (ht->ht_flags & HTABLE_SHARED_PFN)
2747 				goto shared;
2748 			htable_release(ht);
2749 			goto not_shared;
2750 		}
2751 
2752 		/*
2753 		 * Can't ever share top table.
2754 		 */
2755 		if (l == mmu.max_level)
2756 			goto not_shared;
2757 
2758 		/*
2759 		 * Avoid level mismatches later due to DISM faults.
2760 		 */
2761 		if (is_dism && l > 0)
2762 			goto not_shared;
2763 
2764 		/*
2765 		 * addresses and lengths must align
2766 		 * table must be fully populated
2767 		 * no lower level page tables
2768 		 */
2769 		if (ism_addr != ism_ht->ht_vaddr ||
2770 		    (vaddr & LEVEL_OFFSET(l + 1)) != 0)
2771 			goto not_shared;
2772 
2773 		/*
2774 		 * The range of address space must cover a full table.
2775 		 */
2776 		if (e_ism_addr - ism_addr < LEVEL_SIZE(1 + 1))
2777 			goto not_shared;
2778 
2779 		/*
2780 		 * All entries in the ISM page table must be leaf PTEs.
2781 		 */
2782 		if (l > 0) {
2783 			int e;
2784 
2785 			/*
2786 			 * We know the 0th is from htable_walk() above.
2787 			 */
2788 			for (e = 1; e < HTABLE_NUM_PTES(ism_ht); ++e) {
2789 				x86pte_t pte;
2790 				pte = x86pte_get(ism_ht, e);
2791 				if (!PTE_ISPAGE(pte, l))
2792 					goto not_shared;
2793 			}
2794 		}
2795 
2796 		/*
2797 		 * share the page table
2798 		 */
2799 		ht = htable_create(hat, vaddr, l, ism_ht);
2800 shared:
2801 		ASSERT(ht->ht_flags & HTABLE_SHARED_PFN);
2802 		ASSERT(ht->ht_shares == ism_ht);
2803 		hat->hat_ism_pgcnt +=
2804 		    (ism_ht->ht_valid_cnt - ht->ht_valid_cnt) <<
2805 		    (LEVEL_SHIFT(ht->ht_level) - MMU_PAGESHIFT);
2806 		ht->ht_valid_cnt = ism_ht->ht_valid_cnt;
2807 		htable_release(ht);
2808 		ism_addr = ism_ht->ht_vaddr + LEVEL_SIZE(l + 1);
2809 		htable_release(ism_ht);
2810 		ism_ht = NULL;
2811 		continue;
2812 
2813 not_shared:
2814 		/*
2815 		 * Unable to share the page table. Instead we will
2816 		 * create new mappings from the values in the ISM mappings.
2817 		 * Figure out what level size mappings to use;
2818 		 */
2819 		for (l = ism_ht->ht_level; l > 0; --l) {
2820 			if (LEVEL_SIZE(l) <= eaddr - vaddr &&
2821 			    (vaddr & LEVEL_OFFSET(l)) == 0)
2822 				break;
2823 		}
2824 
2825 		/*
2826 		 * The ISM mapping might be larger than the share area,
2827 		 * be careful to truncate it if needed.
2828 		 */
2829 		if (eaddr - vaddr >= LEVEL_SIZE(ism_ht->ht_level)) {
2830 			pgcnt = mmu_btop(LEVEL_SIZE(ism_ht->ht_level));
2831 		} else {
2832 			pgcnt = mmu_btop(eaddr - vaddr);
2833 			l = 0;
2834 		}
2835 
2836 		pfn = PTE2PFN(pte, ism_ht->ht_level);
2837 		ASSERT(pfn != PFN_INVALID);
2838 		while (pgcnt > 0) {
2839 			/*
2840 			 * Make a new pte for the PFN for this level.
2841 			 * Copy protections for the pte from the ISM pte.
2842 			 */
2843 			pp = page_numtopp_nolock(pfn);
2844 			ASSERT(pp != NULL);
2845 
2846 			prot = PROT_USER | PROT_READ | HAT_UNORDERED_OK;
2847 			if (PTE_GET(pte, PT_WRITABLE))
2848 				prot |= PROT_WRITE;
2849 			if (!PTE_GET(pte, PT_NX))
2850 				prot |= PROT_EXEC;
2851 
2852 			flags = HAT_LOAD;
2853 			if (!is_dism)
2854 				flags |= HAT_LOAD_LOCK | HAT_LOAD_NOCONSIST;
2855 			while (hati_load_common(hat, vaddr, pp, prot, flags,
2856 			    l, pfn) != 0) {
2857 				if (l == 0)
2858 					panic("hati_load_common() failure");
2859 				--l;
2860 			}
2861 
2862 			vaddr += LEVEL_SIZE(l);
2863 			ism_addr += LEVEL_SIZE(l);
2864 			pfn += mmu_btop(LEVEL_SIZE(l));
2865 			pgcnt -= mmu_btop(LEVEL_SIZE(l));
2866 		}
2867 	}
2868 	if (ism_ht != NULL)
2869 		htable_release(ism_ht);
2870 	return (0);
2871 }
2872 
2873 
2874 /*
2875  * hat_unshare() is similar to hat_unload_callback(), but
2876  * we have to look for empty shared pagetables. Note that
2877  * hat_unshare() is always invoked against an entire segment.
2878  */
2879 /*ARGSUSED*/
2880 void
2881 hat_unshare(hat_t *hat, caddr_t addr, size_t len, uint_t ismszc)
2882 {
2883 	uintptr_t	vaddr = (uintptr_t)addr;
2884 	uintptr_t	eaddr = vaddr + len;
2885 	htable_t	*ht = NULL;
2886 	uint_t		need_demaps = 0;
2887 	int		flags = HAT_UNLOAD_UNMAP;
2888 	level_t		l;
2889 
2890 	ASSERT(hat != kas.a_hat);
2891 	ASSERT(eaddr <= _userlimit);
2892 	ASSERT(IS_PAGEALIGNED(vaddr));
2893 	ASSERT(IS_PAGEALIGNED(eaddr));
2894 
2895 	/*
2896 	 * First go through and remove any shared pagetables.
2897 	 *
2898 	 * Note that it's ok to delay the TLB shootdown till the entire range is
2899 	 * finished, because if hat_pageunload() were to unload a shared
2900 	 * pagetable page, its hat_tlb_inval() will do a global TLB invalidate.
2901 	 */
2902 	l = mmu.max_page_level;
2903 	if (l == mmu.max_level)
2904 		--l;
2905 	for (; l >= 0; --l) {
2906 		for (vaddr = (uintptr_t)addr; vaddr < eaddr;
2907 		    vaddr = (vaddr & LEVEL_MASK(l + 1)) + LEVEL_SIZE(l + 1)) {
2908 			ASSERT(!IN_VA_HOLE(vaddr));
2909 			/*
2910 			 * find a pagetable that maps the current address
2911 			 */
2912 			ht = htable_lookup(hat, vaddr, l);
2913 			if (ht == NULL)
2914 				continue;
2915 			if (ht->ht_flags & HTABLE_SHARED_PFN) {
2916 				/*
2917 				 * clear page count, set valid_cnt to 0,
2918 				 * let htable_release() finish the job
2919 				 */
2920 				hat->hat_ism_pgcnt -= ht->ht_valid_cnt <<
2921 				    (LEVEL_SHIFT(ht->ht_level) - MMU_PAGESHIFT);
2922 				ht->ht_valid_cnt = 0;
2923 				need_demaps = 1;
2924 			}
2925 			htable_release(ht);
2926 		}
2927 	}
2928 
2929 	/*
2930 	 * flush the TLBs - since we're probably dealing with MANY mappings
2931 	 * we do just one CR3 reload.
2932 	 */
2933 	if (!(hat->hat_flags & HAT_FREEING) && need_demaps)
2934 		hat_tlb_inval(hat, DEMAP_ALL_ADDR);
2935 
2936 	/*
2937 	 * Now go back and clean up any unaligned mappings that
2938 	 * couldn't share pagetables.
2939 	 */
2940 	if (!is_it_dism(hat, addr))
2941 		flags |= HAT_UNLOAD_UNLOCK;
2942 	hat_unload(hat, addr, len, flags);
2943 }
2944 
2945 
2946 /*
2947  * hat_reserve() does nothing
2948  */
2949 /*ARGSUSED*/
2950 void
2951 hat_reserve(struct as *as, caddr_t addr, size_t len)
2952 {
2953 }
2954 
2955 
2956 /*
2957  * Called when all mappings to a page should have write permission removed.
2958  * Mostly stolem from hat_pagesync()
2959  */
2960 static void
2961 hati_page_clrwrt(struct page *pp)
2962 {
2963 	hment_t		*hm = NULL;
2964 	htable_t	*ht;
2965 	uint_t		entry;
2966 	x86pte_t	old;
2967 	x86pte_t	new;
2968 	uint_t		pszc = 0;
2969 
2970 next_size:
2971 	/*
2972 	 * walk thru the mapping list clearing write permission
2973 	 */
2974 	x86_hm_enter(pp);
2975 	while ((hm = hment_walk(pp, &ht, &entry, hm)) != NULL) {
2976 		if (ht->ht_level < pszc)
2977 			continue;
2978 		old = x86pte_get(ht, entry);
2979 
2980 		for (;;) {
2981 			/*
2982 			 * Is this mapping of interest?
2983 			 */
2984 			if (PTE2PFN(old, ht->ht_level) != pp->p_pagenum ||
2985 			    PTE_GET(old, PT_WRITABLE) == 0)
2986 				break;
2987 
2988 			/*
2989 			 * Clear ref/mod writable bits. This requires cross
2990 			 * calls to ensure any executing TLBs see cleared bits.
2991 			 */
2992 			new = old;
2993 			PTE_CLR(new, PT_REF | PT_MOD | PT_WRITABLE);
2994 			old = hati_update_pte(ht, entry, old, new);
2995 			if (old != 0)
2996 				continue;
2997 
2998 			break;
2999 		}
3000 	}
3001 	x86_hm_exit(pp);
3002 	while (pszc < pp->p_szc) {
3003 		page_t *tpp;
3004 		pszc++;
3005 		tpp = PP_GROUPLEADER(pp, pszc);
3006 		if (pp != tpp) {
3007 			pp = tpp;
3008 			goto next_size;
3009 		}
3010 	}
3011 }
3012 
3013 /*
3014  * void hat_page_setattr(pp, flag)
3015  * void hat_page_clrattr(pp, flag)
3016  *	used to set/clr ref/mod bits.
3017  */
3018 void
3019 hat_page_setattr(struct page *pp, uint_t flag)
3020 {
3021 	vnode_t		*vp = pp->p_vnode;
3022 	kmutex_t	*vphm = NULL;
3023 	page_t		**listp;
3024 	int		noshuffle;
3025 
3026 	noshuffle = flag & P_NSH;
3027 	flag &= ~P_NSH;
3028 
3029 	if (PP_GETRM(pp, flag) == flag)
3030 		return;
3031 
3032 	if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp) &&
3033 	    !noshuffle) {
3034 		vphm = page_vnode_mutex(vp);
3035 		mutex_enter(vphm);
3036 	}
3037 
3038 	PP_SETRM(pp, flag);
3039 
3040 	if (vphm != NULL) {
3041 
3042 		/*
3043 		 * Some File Systems examine v_pages for NULL w/o
3044 		 * grabbing the vphm mutex. Must not let it become NULL when
3045 		 * pp is the only page on the list.
3046 		 */
3047 		if (pp->p_vpnext != pp) {
3048 			page_vpsub(&vp->v_pages, pp);
3049 			if (vp->v_pages != NULL)
3050 				listp = &vp->v_pages->p_vpprev->p_vpnext;
3051 			else
3052 				listp = &vp->v_pages;
3053 			page_vpadd(listp, pp);
3054 		}
3055 		mutex_exit(vphm);
3056 	}
3057 }
3058 
3059 void
3060 hat_page_clrattr(struct page *pp, uint_t flag)
3061 {
3062 	vnode_t		*vp = pp->p_vnode;
3063 	ASSERT(!(flag & ~(P_MOD | P_REF | P_RO)));
3064 
3065 	/*
3066 	 * Caller is expected to hold page's io lock for VMODSORT to work
3067 	 * correctly with pvn_vplist_dirty() and pvn_getdirty() when mod
3068 	 * bit is cleared.
3069 	 * We don't have assert to avoid tripping some existing third party
3070 	 * code. The dirty page is moved back to top of the v_page list
3071 	 * after IO is done in pvn_write_done().
3072 	 */
3073 	PP_CLRRM(pp, flag);
3074 
3075 	if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp)) {
3076 
3077 		/*
3078 		 * VMODSORT works by removing write permissions and getting
3079 		 * a fault when a page is made dirty. At this point
3080 		 * we need to remove write permission from all mappings
3081 		 * to this page.
3082 		 */
3083 		hati_page_clrwrt(pp);
3084 	}
3085 }
3086 
3087 /*
3088  *	If flag is specified, returns 0 if attribute is disabled
3089  *	and non zero if enabled.  If flag specifes multiple attributs
3090  *	then returns 0 if ALL atriibutes are disabled.  This is an advisory
3091  *	call.
3092  */
3093 uint_t
3094 hat_page_getattr(struct page *pp, uint_t flag)
3095 {
3096 	return (PP_GETRM(pp, flag));
3097 }
3098 
3099 
3100 /*
3101  * common code used by hat_pageunload() and hment_steal()
3102  */
3103 hment_t *
3104 hati_page_unmap(page_t *pp, htable_t *ht, uint_t entry)
3105 {
3106 	x86pte_t old_pte;
3107 	pfn_t pfn = pp->p_pagenum;
3108 	hment_t *hm;
3109 
3110 	/*
3111 	 * We need to acquire a hold on the htable in order to
3112 	 * do the invalidate. We know the htable must exist, since
3113 	 * unmap's don't release the htable until after removing any
3114 	 * hment. Having x86_hm_enter() keeps that from proceeding.
3115 	 */
3116 	htable_acquire(ht);
3117 
3118 	/*
3119 	 * Invalidate the PTE and remove the hment.
3120 	 */
3121 	old_pte = x86pte_inval(ht, entry, 0, NULL);
3122 	if (PTE2PFN(old_pte, ht->ht_level) != pfn) {
3123 		panic("x86pte_inval() failure found PTE = " FMT_PTE
3124 		    " pfn being unmapped is %lx ht=0x%lx entry=0x%x",
3125 		    old_pte, pfn, (uintptr_t)ht, entry);
3126 	}
3127 
3128 	/*
3129 	 * Clean up all the htable information for this mapping
3130 	 */
3131 	ASSERT(ht->ht_valid_cnt > 0);
3132 	HTABLE_DEC(ht->ht_valid_cnt);
3133 	PGCNT_DEC(ht->ht_hat, ht->ht_level);
3134 
3135 	/*
3136 	 * sync ref/mod bits to the page_t
3137 	 */
3138 	if (PTE_GET(old_pte, PT_SOFTWARE) < PT_NOSYNC)
3139 		hati_sync_pte_to_page(pp, old_pte, ht->ht_level);
3140 
3141 	/*
3142 	 * Remove the mapping list entry for this page.
3143 	 */
3144 	hm = hment_remove(pp, ht, entry);
3145 
3146 	/*
3147 	 * drop the mapping list lock so that we might free the
3148 	 * hment and htable.
3149 	 */
3150 	x86_hm_exit(pp);
3151 	htable_release(ht);
3152 	return (hm);
3153 }
3154 
3155 extern int	vpm_enable;
3156 /*
3157  * Unload all translations to a page. If the page is a subpage of a large
3158  * page, the large page mappings are also removed.
3159  *
3160  * The forceflags are unused.
3161  */
3162 
3163 /*ARGSUSED*/
3164 static int
3165 hati_pageunload(struct page *pp, uint_t pg_szcd, uint_t forceflag)
3166 {
3167 	page_t		*cur_pp = pp;
3168 	hment_t		*hm;
3169 	hment_t		*prev;
3170 	htable_t	*ht;
3171 	uint_t		entry;
3172 	level_t		level;
3173 
3174 #if defined(__amd64)
3175 	/*
3176 	 * clear the vpm ref.
3177 	 */
3178 	if (vpm_enable) {
3179 		pp->p_vpmref = 0;
3180 	}
3181 #endif
3182 	/*
3183 	 * The loop with next_size handles pages with multiple pagesize mappings
3184 	 */
3185 next_size:
3186 	for (;;) {
3187 
3188 		/*
3189 		 * Get a mapping list entry
3190 		 */
3191 		x86_hm_enter(cur_pp);
3192 		for (prev = NULL; ; prev = hm) {
3193 			hm = hment_walk(cur_pp, &ht, &entry, prev);
3194 			if (hm == NULL) {
3195 				x86_hm_exit(cur_pp);
3196 
3197 				/*
3198 				 * If not part of a larger page, we're done.
3199 				 */
3200 				if (cur_pp->p_szc <= pg_szcd) {
3201 					return (0);
3202 				}
3203 
3204 				/*
3205 				 * Else check the next larger page size.
3206 				 * hat_page_demote() may decrease p_szc
3207 				 * but that's ok we'll just take an extra
3208 				 * trip discover there're no larger mappings
3209 				 * and return.
3210 				 */
3211 				++pg_szcd;
3212 				cur_pp = PP_GROUPLEADER(cur_pp, pg_szcd);
3213 				goto next_size;
3214 			}
3215 
3216 			/*
3217 			 * If this mapping size matches, remove it.
3218 			 */
3219 			level = ht->ht_level;
3220 			if (level == pg_szcd)
3221 				break;
3222 		}
3223 
3224 		/*
3225 		 * Remove the mapping list entry for this page.
3226 		 * Note this does the x86_hm_exit() for us.
3227 		 */
3228 		hm = hati_page_unmap(cur_pp, ht, entry);
3229 		if (hm != NULL)
3230 			hment_free(hm);
3231 	}
3232 }
3233 
3234 int
3235 hat_pageunload(struct page *pp, uint_t forceflag)
3236 {
3237 	ASSERT(PAGE_EXCL(pp));
3238 	return (hati_pageunload(pp, 0, forceflag));
3239 }
3240 
3241 /*
3242  * Unload all large mappings to pp and reduce by 1 p_szc field of every large
3243  * page level that included pp.
3244  *
3245  * pp must be locked EXCL. Even though no other constituent pages are locked
3246  * it's legal to unload large mappings to pp because all constituent pages of
3247  * large locked mappings have to be locked SHARED.  therefore if we have EXCL
3248  * lock on one of constituent pages none of the large mappings to pp are
3249  * locked.
3250  *
3251  * Change (always decrease) p_szc field starting from the last constituent
3252  * page and ending with root constituent page so that root's pszc always shows
3253  * the area where hat_page_demote() may be active.
3254  *
3255  * This mechanism is only used for file system pages where it's not always
3256  * possible to get EXCL locks on all constituent pages to demote the size code
3257  * (as is done for anonymous or kernel large pages).
3258  */
3259 void
3260 hat_page_demote(page_t *pp)
3261 {
3262 	uint_t		pszc;
3263 	uint_t		rszc;
3264 	uint_t		szc;
3265 	page_t		*rootpp;
3266 	page_t		*firstpp;
3267 	page_t		*lastpp;
3268 	pgcnt_t		pgcnt;
3269 
3270 	ASSERT(PAGE_EXCL(pp));
3271 	ASSERT(!PP_ISFREE(pp));
3272 	ASSERT(page_szc_lock_assert(pp));
3273 
3274 	if (pp->p_szc == 0)
3275 		return;
3276 
3277 	rootpp = PP_GROUPLEADER(pp, 1);
3278 	(void) hati_pageunload(rootpp, 1, HAT_FORCE_PGUNLOAD);
3279 
3280 	/*
3281 	 * all large mappings to pp are gone
3282 	 * and no new can be setup since pp is locked exclusively.
3283 	 *
3284 	 * Lock the root to make sure there's only one hat_page_demote()
3285 	 * outstanding within the area of this root's pszc.
3286 	 *
3287 	 * Second potential hat_page_demote() is already eliminated by upper
3288 	 * VM layer via page_szc_lock() but we don't rely on it and use our
3289 	 * own locking (so that upper layer locking can be changed without
3290 	 * assumptions that hat depends on upper layer VM to prevent multiple
3291 	 * hat_page_demote() to be issued simultaneously to the same large
3292 	 * page).
3293 	 */
3294 again:
3295 	pszc = pp->p_szc;
3296 	if (pszc == 0)
3297 		return;
3298 	rootpp = PP_GROUPLEADER(pp, pszc);
3299 	x86_hm_enter(rootpp);
3300 	/*
3301 	 * If root's p_szc is different from pszc we raced with another
3302 	 * hat_page_demote().  Drop the lock and try to find the root again.
3303 	 * If root's p_szc is greater than pszc previous hat_page_demote() is
3304 	 * not done yet.  Take and release mlist lock of root's root to wait
3305 	 * for previous hat_page_demote() to complete.
3306 	 */
3307 	if ((rszc = rootpp->p_szc) != pszc) {
3308 		x86_hm_exit(rootpp);
3309 		if (rszc > pszc) {
3310 			/* p_szc of a locked non free page can't increase */
3311 			ASSERT(pp != rootpp);
3312 
3313 			rootpp = PP_GROUPLEADER(rootpp, rszc);
3314 			x86_hm_enter(rootpp);
3315 			x86_hm_exit(rootpp);
3316 		}
3317 		goto again;
3318 	}
3319 	ASSERT(pp->p_szc == pszc);
3320 
3321 	/*
3322 	 * Decrement by 1 p_szc of every constituent page of a region that
3323 	 * covered pp. For example if original szc is 3 it gets changed to 2
3324 	 * everywhere except in region 2 that covered pp. Region 2 that
3325 	 * covered pp gets demoted to 1 everywhere except in region 1 that
3326 	 * covered pp. The region 1 that covered pp is demoted to region
3327 	 * 0. It's done this way because from region 3 we removed level 3
3328 	 * mappings, from region 2 that covered pp we removed level 2 mappings
3329 	 * and from region 1 that covered pp we removed level 1 mappings.  All
3330 	 * changes are done from from high pfn's to low pfn's so that roots
3331 	 * are changed last allowing one to know the largest region where
3332 	 * hat_page_demote() is stil active by only looking at the root page.
3333 	 *
3334 	 * This algorithm is implemented in 2 while loops. First loop changes
3335 	 * p_szc of pages to the right of pp's level 1 region and second
3336 	 * loop changes p_szc of pages of level 1 region that covers pp
3337 	 * and all pages to the left of level 1 region that covers pp.
3338 	 * In the first loop p_szc keeps dropping with every iteration
3339 	 * and in the second loop it keeps increasing with every iteration.
3340 	 *
3341 	 * First loop description: Demote pages to the right of pp outside of
3342 	 * level 1 region that covers pp.  In every iteration of the while
3343 	 * loop below find the last page of szc region and the first page of
3344 	 * (szc - 1) region that is immediately to the right of (szc - 1)
3345 	 * region that covers pp.  From last such page to first such page
3346 	 * change every page's szc to szc - 1. Decrement szc and continue
3347 	 * looping until szc is 1. If pp belongs to the last (szc - 1) region
3348 	 * of szc region skip to the next iteration.
3349 	 */
3350 	szc = pszc;
3351 	while (szc > 1) {
3352 		lastpp = PP_GROUPLEADER(pp, szc);
3353 		pgcnt = page_get_pagecnt(szc);
3354 		lastpp += pgcnt - 1;
3355 		firstpp = PP_GROUPLEADER(pp, (szc - 1));
3356 		pgcnt = page_get_pagecnt(szc - 1);
3357 		if (lastpp - firstpp < pgcnt) {
3358 			szc--;
3359 			continue;
3360 		}
3361 		firstpp += pgcnt;
3362 		while (lastpp != firstpp) {
3363 			ASSERT(lastpp->p_szc == pszc);
3364 			lastpp->p_szc = szc - 1;
3365 			lastpp--;
3366 		}
3367 		firstpp->p_szc = szc - 1;
3368 		szc--;
3369 	}
3370 
3371 	/*
3372 	 * Second loop description:
3373 	 * First iteration changes p_szc to 0 of every
3374 	 * page of level 1 region that covers pp.
3375 	 * Subsequent iterations find last page of szc region
3376 	 * immediately to the left of szc region that covered pp
3377 	 * and first page of (szc + 1) region that covers pp.
3378 	 * From last to first page change p_szc of every page to szc.
3379 	 * Increment szc and continue looping until szc is pszc.
3380 	 * If pp belongs to the fist szc region of (szc + 1) region
3381 	 * skip to the next iteration.
3382 	 *
3383 	 */
3384 	szc = 0;
3385 	while (szc < pszc) {
3386 		firstpp = PP_GROUPLEADER(pp, (szc + 1));
3387 		if (szc == 0) {
3388 			pgcnt = page_get_pagecnt(1);
3389 			lastpp = firstpp + (pgcnt - 1);
3390 		} else {
3391 			lastpp = PP_GROUPLEADER(pp, szc);
3392 			if (firstpp == lastpp) {
3393 				szc++;
3394 				continue;
3395 			}
3396 			lastpp--;
3397 			pgcnt = page_get_pagecnt(szc);
3398 		}
3399 		while (lastpp != firstpp) {
3400 			ASSERT(lastpp->p_szc == pszc);
3401 			lastpp->p_szc = szc;
3402 			lastpp--;
3403 		}
3404 		firstpp->p_szc = szc;
3405 		if (firstpp == rootpp)
3406 			break;
3407 		szc++;
3408 	}
3409 	x86_hm_exit(rootpp);
3410 }
3411 
3412 /*
3413  * get hw stats from hardware into page struct and reset hw stats
3414  * returns attributes of page
3415  * Flags for hat_pagesync, hat_getstat, hat_sync
3416  *
3417  * define	HAT_SYNC_ZERORM		0x01
3418  *
3419  * Additional flags for hat_pagesync
3420  *
3421  * define	HAT_SYNC_STOPON_REF	0x02
3422  * define	HAT_SYNC_STOPON_MOD	0x04
3423  * define	HAT_SYNC_STOPON_RM	0x06
3424  * define	HAT_SYNC_STOPON_SHARED	0x08
3425  */
3426 uint_t
3427 hat_pagesync(struct page *pp, uint_t flags)
3428 {
3429 	hment_t		*hm = NULL;
3430 	htable_t	*ht;
3431 	uint_t		entry;
3432 	x86pte_t	old, save_old;
3433 	x86pte_t	new;
3434 	uchar_t		nrmbits = P_REF|P_MOD|P_RO;
3435 	extern ulong_t	po_share;
3436 	page_t		*save_pp = pp;
3437 	uint_t		pszc = 0;
3438 
3439 	ASSERT(PAGE_LOCKED(pp) || panicstr);
3440 
3441 	if (PP_ISRO(pp) && (flags & HAT_SYNC_STOPON_MOD))
3442 		return (pp->p_nrm & nrmbits);
3443 
3444 	if ((flags & HAT_SYNC_ZERORM) == 0) {
3445 
3446 		if ((flags & HAT_SYNC_STOPON_REF) != 0 && PP_ISREF(pp))
3447 			return (pp->p_nrm & nrmbits);
3448 
3449 		if ((flags & HAT_SYNC_STOPON_MOD) != 0 && PP_ISMOD(pp))
3450 			return (pp->p_nrm & nrmbits);
3451 
3452 		if ((flags & HAT_SYNC_STOPON_SHARED) != 0 &&
3453 		    hat_page_getshare(pp) > po_share) {
3454 			if (PP_ISRO(pp))
3455 				PP_SETREF(pp);
3456 			return (pp->p_nrm & nrmbits);
3457 		}
3458 	}
3459 
3460 next_size:
3461 	/*
3462 	 * walk thru the mapping list syncing (and clearing) ref/mod bits.
3463 	 */
3464 	x86_hm_enter(pp);
3465 	while ((hm = hment_walk(pp, &ht, &entry, hm)) != NULL) {
3466 		if (ht->ht_level < pszc)
3467 			continue;
3468 		old = x86pte_get(ht, entry);
3469 try_again:
3470 
3471 		ASSERT(PTE2PFN(old, ht->ht_level) == pp->p_pagenum);
3472 
3473 		if (PTE_GET(old, PT_REF | PT_MOD) == 0)
3474 			continue;
3475 
3476 		save_old = old;
3477 		if ((flags & HAT_SYNC_ZERORM) != 0) {
3478 
3479 			/*
3480 			 * Need to clear ref or mod bits. Need to demap
3481 			 * to make sure any executing TLBs see cleared bits.
3482 			 */
3483 			new = old;
3484 			PTE_CLR(new, PT_REF | PT_MOD);
3485 			old = hati_update_pte(ht, entry, old, new);
3486 			if (old != 0)
3487 				goto try_again;
3488 
3489 			old = save_old;
3490 		}
3491 
3492 		/*
3493 		 * Sync the PTE
3494 		 */
3495 		if (!(flags & HAT_SYNC_ZERORM) &&
3496 		    PTE_GET(old, PT_SOFTWARE) <= PT_NOSYNC)
3497 			hati_sync_pte_to_page(pp, old, ht->ht_level);
3498 
3499 		/*
3500 		 * can stop short if we found a ref'd or mod'd page
3501 		 */
3502 		if ((flags & HAT_SYNC_STOPON_MOD) && PP_ISMOD(save_pp) ||
3503 		    (flags & HAT_SYNC_STOPON_REF) && PP_ISREF(save_pp)) {
3504 			x86_hm_exit(pp);
3505 			goto done;
3506 		}
3507 	}
3508 	x86_hm_exit(pp);
3509 	while (pszc < pp->p_szc) {
3510 		page_t *tpp;
3511 		pszc++;
3512 		tpp = PP_GROUPLEADER(pp, pszc);
3513 		if (pp != tpp) {
3514 			pp = tpp;
3515 			goto next_size;
3516 		}
3517 	}
3518 done:
3519 	return (save_pp->p_nrm & nrmbits);
3520 }
3521 
3522 /*
3523  * returns approx number of mappings to this pp.  A return of 0 implies
3524  * there are no mappings to the page.
3525  */
3526 ulong_t
3527 hat_page_getshare(page_t *pp)
3528 {
3529 	uint_t cnt;
3530 	cnt = hment_mapcnt(pp);
3531 #if defined(__amd64)
3532 	if (vpm_enable && pp->p_vpmref) {
3533 		cnt += 1;
3534 	}
3535 #endif
3536 	return (cnt);
3537 }
3538 
3539 /*
3540  * Return 1 the number of mappings exceeds sh_thresh. Return 0
3541  * otherwise.
3542  */
3543 int
3544 hat_page_checkshare(page_t *pp, ulong_t sh_thresh)
3545 {
3546 	return (hat_page_getshare(pp) > sh_thresh);
3547 }
3548 
3549 /*
3550  * hat_softlock isn't supported anymore
3551  */
3552 /*ARGSUSED*/
3553 faultcode_t
3554 hat_softlock(
3555 	hat_t *hat,
3556 	caddr_t addr,
3557 	size_t *len,
3558 	struct page **page_array,
3559 	uint_t flags)
3560 {
3561 	return (FC_NOSUPPORT);
3562 }
3563 
3564 
3565 
3566 /*
3567  * Routine to expose supported HAT features to platform independent code.
3568  */
3569 /*ARGSUSED*/
3570 int
3571 hat_supported(enum hat_features feature, void *arg)
3572 {
3573 	switch (feature) {
3574 
3575 	case HAT_SHARED_PT:	/* this is really ISM */
3576 		return (1);
3577 
3578 	case HAT_DYNAMIC_ISM_UNMAP:
3579 		return (0);
3580 
3581 	case HAT_VMODSORT:
3582 		return (1);
3583 
3584 	case HAT_SHARED_REGIONS:
3585 		return (0);
3586 
3587 	default:
3588 		panic("hat_supported() - unknown feature");
3589 	}
3590 	return (0);
3591 }
3592 
3593 /*
3594  * Called when a thread is exiting and has been switched to the kernel AS
3595  */
3596 void
3597 hat_thread_exit(kthread_t *thd)
3598 {
3599 	ASSERT(thd->t_procp->p_as == &kas);
3600 	hat_switch(thd->t_procp->p_as->a_hat);
3601 }
3602 
3603 /*
3604  * Setup the given brand new hat structure as the new HAT on this cpu's mmu.
3605  */
3606 /*ARGSUSED*/
3607 void
3608 hat_setup(hat_t *hat, int flags)
3609 {
3610 	kpreempt_disable();
3611 
3612 	hat_switch(hat);
3613 
3614 	kpreempt_enable();
3615 }
3616 
3617 /*
3618  * Prepare for a CPU private mapping for the given address.
3619  *
3620  * The address can only be used from a single CPU and can be remapped
3621  * using hat_mempte_remap().  Return the address of the PTE.
3622  *
3623  * We do the htable_create() if necessary and increment the valid count so
3624  * the htable can't disappear.  We also hat_devload() the page table into
3625  * kernel so that the PTE is quickly accessed.
3626  */
3627 hat_mempte_t
3628 hat_mempte_setup(caddr_t addr)
3629 {
3630 	uintptr_t	va = (uintptr_t)addr;
3631 	htable_t	*ht;
3632 	uint_t		entry;
3633 	x86pte_t	oldpte;
3634 	hat_mempte_t	p;
3635 
3636 	ASSERT(IS_PAGEALIGNED(va));
3637 	ASSERT(!IN_VA_HOLE(va));
3638 	++curthread->t_hatdepth;
3639 	ht = htable_getpte(kas.a_hat, va, &entry, &oldpte, 0);
3640 	if (ht == NULL) {
3641 		ht = htable_create(kas.a_hat, va, 0, NULL);
3642 		entry = htable_va2entry(va, ht);
3643 		ASSERT(ht->ht_level == 0);
3644 		oldpte = x86pte_get(ht, entry);
3645 	}
3646 	if (PTE_ISVALID(oldpte))
3647 		panic("hat_mempte_setup(): address already mapped"
3648 		    "ht=%p, entry=%d, pte=" FMT_PTE, ht, entry, oldpte);
3649 
3650 	/*
3651 	 * increment ht_valid_cnt so that the pagetable can't disappear
3652 	 */
3653 	HTABLE_INC(ht->ht_valid_cnt);
3654 
3655 	/*
3656 	 * return the PTE physical address to the caller.
3657 	 */
3658 	htable_release(ht);
3659 	p = PT_INDEX_PHYSADDR(pfn_to_pa(ht->ht_pfn), entry);
3660 	--curthread->t_hatdepth;
3661 	return (p);
3662 }
3663 
3664 /*
3665  * Release a CPU private mapping for the given address.
3666  * We decrement the htable valid count so it might be destroyed.
3667  */
3668 /*ARGSUSED1*/
3669 void
3670 hat_mempte_release(caddr_t addr, hat_mempte_t pte_pa)
3671 {
3672 	htable_t	*ht;
3673 
3674 	/*
3675 	 * invalidate any left over mapping and decrement the htable valid count
3676 	 */
3677 	{
3678 		x86pte_t *pteptr;
3679 
3680 		pteptr = x86pte_mapin(mmu_btop(pte_pa),
3681 		    (pte_pa & MMU_PAGEOFFSET) >> mmu.pte_size_shift, NULL);
3682 		if (mmu.pae_hat)
3683 			*pteptr = 0;
3684 		else
3685 			*(x86pte32_t *)pteptr = 0;
3686 		mmu_tlbflush_entry(addr);
3687 		x86pte_mapout();
3688 	}
3689 
3690 	ht = htable_getpte(kas.a_hat, ALIGN2PAGE(addr), NULL, NULL, 0);
3691 	if (ht == NULL)
3692 		panic("hat_mempte_release(): invalid address");
3693 	ASSERT(ht->ht_level == 0);
3694 	HTABLE_DEC(ht->ht_valid_cnt);
3695 	htable_release(ht);
3696 }
3697 
3698 /*
3699  * Apply a temporary CPU private mapping to a page. We flush the TLB only
3700  * on this CPU, so this ought to have been called with preemption disabled.
3701  */
3702 void
3703 hat_mempte_remap(
3704 	pfn_t		pfn,
3705 	caddr_t		addr,
3706 	hat_mempte_t	pte_pa,
3707 	uint_t		attr,
3708 	uint_t		flags)
3709 {
3710 	uintptr_t	va = (uintptr_t)addr;
3711 	x86pte_t	pte;
3712 
3713 	/*
3714 	 * Remap the given PTE to the new page's PFN. Invalidate only
3715 	 * on this CPU.
3716 	 */
3717 #ifdef DEBUG
3718 	htable_t	*ht;
3719 	uint_t		entry;
3720 
3721 	ASSERT(IS_PAGEALIGNED(va));
3722 	ASSERT(!IN_VA_HOLE(va));
3723 	ht = htable_getpte(kas.a_hat, va, &entry, NULL, 0);
3724 	ASSERT(ht != NULL);
3725 	ASSERT(ht->ht_level == 0);
3726 	ASSERT(ht->ht_valid_cnt > 0);
3727 	ASSERT(ht->ht_pfn == mmu_btop(pte_pa));
3728 	htable_release(ht);
3729 #endif
3730 	pte = hati_mkpte(pfn, attr, 0, flags);
3731 	{
3732 		x86pte_t *pteptr;
3733 
3734 		pteptr = x86pte_mapin(mmu_btop(pte_pa),
3735 		    (pte_pa & MMU_PAGEOFFSET) >> mmu.pte_size_shift, NULL);
3736 		if (mmu.pae_hat)
3737 			*(x86pte_t *)pteptr = pte;
3738 		else
3739 			*(x86pte32_t *)pteptr = (x86pte32_t)pte;
3740 		mmu_tlbflush_entry(addr);
3741 		x86pte_mapout();
3742 	}
3743 }
3744 
3745 
3746 
3747 /*
3748  * Hat locking functions
3749  * XXX - these two functions are currently being used by hatstats
3750  * 	they can be removed by using a per-as mutex for hatstats.
3751  */
3752 void
3753 hat_enter(hat_t *hat)
3754 {
3755 	mutex_enter(&hat->hat_mutex);
3756 }
3757 
3758 void
3759 hat_exit(hat_t *hat)
3760 {
3761 	mutex_exit(&hat->hat_mutex);
3762 }
3763 
3764 /*
3765  * HAT part of cpu initialization.
3766  */
3767 void
3768 hat_cpu_online(struct cpu *cpup)
3769 {
3770 	if (cpup != CPU) {
3771 		x86pte_cpu_init(cpup);
3772 		hat_vlp_setup(cpup);
3773 	}
3774 	CPUSET_ATOMIC_ADD(khat_cpuset, cpup->cpu_id);
3775 }
3776 
3777 /*
3778  * HAT part of cpu deletion.
3779  * (currently, we only call this after the cpu is safely passivated.)
3780  */
3781 void
3782 hat_cpu_offline(struct cpu *cpup)
3783 {
3784 	ASSERT(cpup != CPU);
3785 
3786 	CPUSET_ATOMIC_DEL(khat_cpuset, cpup->cpu_id);
3787 	x86pte_cpu_fini(cpup);
3788 	hat_vlp_teardown(cpup);
3789 }
3790 
3791 /*
3792  * Function called after all CPUs are brought online.
3793  * Used to remove low address boot mappings.
3794  */
3795 void
3796 clear_boot_mappings(uintptr_t low, uintptr_t high)
3797 {
3798 	uintptr_t vaddr = low;
3799 	htable_t *ht = NULL;
3800 	level_t level;
3801 	uint_t entry;
3802 	x86pte_t pte;
3803 
3804 	/*
3805 	 * On 1st CPU we can unload the prom mappings, basically we blow away
3806 	 * all virtual mappings under _userlimit.
3807 	 */
3808 	while (vaddr < high) {
3809 		pte = htable_walk(kas.a_hat, &ht, &vaddr, high);
3810 		if (ht == NULL)
3811 			break;
3812 
3813 		level = ht->ht_level;
3814 		entry = htable_va2entry(vaddr, ht);
3815 		ASSERT(level <= mmu.max_page_level);
3816 		ASSERT(PTE_ISPAGE(pte, level));
3817 
3818 		/*
3819 		 * Unload the mapping from the page tables.
3820 		 */
3821 		(void) x86pte_inval(ht, entry, 0, NULL);
3822 		ASSERT(ht->ht_valid_cnt > 0);
3823 		HTABLE_DEC(ht->ht_valid_cnt);
3824 		PGCNT_DEC(ht->ht_hat, ht->ht_level);
3825 
3826 		vaddr += LEVEL_SIZE(ht->ht_level);
3827 	}
3828 	if (ht)
3829 		htable_release(ht);
3830 }
3831 
3832 /*
3833  * Atomically update a new translation for a single page.  If the
3834  * currently installed PTE doesn't match the value we expect to find,
3835  * it's not updated and we return the PTE we found.
3836  *
3837  * If activating nosync or NOWRITE and the page was modified we need to sync
3838  * with the page_t. Also sync with page_t if clearing ref/mod bits.
3839  */
3840 static x86pte_t
3841 hati_update_pte(htable_t *ht, uint_t entry, x86pte_t expected, x86pte_t new)
3842 {
3843 	page_t		*pp;
3844 	uint_t		rm = 0;
3845 	x86pte_t	replaced;
3846 
3847 	if (PTE_GET(expected, PT_SOFTWARE) < PT_NOSYNC &&
3848 	    PTE_GET(expected, PT_MOD | PT_REF) &&
3849 	    (PTE_GET(new, PT_NOSYNC) || !PTE_GET(new, PT_WRITABLE) ||
3850 	    !PTE_GET(new, PT_MOD | PT_REF))) {
3851 
3852 		ASSERT(!pfn_is_foreign(PTE2PFN(expected, ht->ht_level)));
3853 		pp = page_numtopp_nolock(PTE2PFN(expected, ht->ht_level));
3854 		ASSERT(pp != NULL);
3855 		if (PTE_GET(expected, PT_MOD))
3856 			rm |= P_MOD;
3857 		if (PTE_GET(expected, PT_REF))
3858 			rm |= P_REF;
3859 		PTE_CLR(new, PT_MOD | PT_REF);
3860 	}
3861 
3862 	replaced = x86pte_update(ht, entry, expected, new);
3863 	if (replaced != expected)
3864 		return (replaced);
3865 
3866 	if (rm) {
3867 		/*
3868 		 * sync to all constituent pages of a large page
3869 		 */
3870 		pgcnt_t pgcnt = page_get_pagecnt(ht->ht_level);
3871 		ASSERT(IS_P2ALIGNED(pp->p_pagenum, pgcnt));
3872 		while (pgcnt-- > 0) {
3873 			/*
3874 			 * hat_page_demote() can't decrease
3875 			 * pszc below this mapping size
3876 			 * since large mapping existed after we
3877 			 * took mlist lock.
3878 			 */
3879 			ASSERT(pp->p_szc >= ht->ht_level);
3880 			hat_page_setattr(pp, rm);
3881 			++pp;
3882 		}
3883 	}
3884 
3885 	return (0);
3886 }
3887 
3888 /* ARGSUSED */
3889 void
3890 hat_join_srd(struct hat *sfmmup, vnode_t *evp)
3891 {
3892 }
3893 
3894 /* ARGSUSED */
3895 hat_region_cookie_t
3896 hat_join_region(struct hat *sfmmup,
3897     caddr_t r_saddr,
3898     size_t r_size,
3899     void *r_obj,
3900     u_offset_t r_objoff,
3901     uchar_t r_perm,
3902     uchar_t r_pgszc,
3903     hat_rgn_cb_func_t r_cb_function,
3904     uint_t flags)
3905 {
3906 	panic("No shared region support on x86");
3907 	return (HAT_INVALID_REGION_COOKIE);
3908 }
3909 
3910 /* ARGSUSED */
3911 void
3912 hat_leave_region(struct hat *sfmmup, hat_region_cookie_t rcookie, uint_t flags)
3913 {
3914 	panic("No shared region support on x86");
3915 }
3916 
3917 /* ARGSUSED */
3918 void
3919 hat_dup_region(struct hat *sfmmup, hat_region_cookie_t rcookie)
3920 {
3921 	panic("No shared region support on x86");
3922 }
3923 
3924 
3925 /*
3926  * Kernel Physical Mapping (kpm) facility
3927  *
3928  * Most of the routines needed to support segkpm are almost no-ops on the
3929  * x86 platform.  We map in the entire segment when it is created and leave
3930  * it mapped in, so there is no additional work required to set up and tear
3931  * down individual mappings.  All of these routines were created to support
3932  * SPARC platforms that have to avoid aliasing in their virtually indexed
3933  * caches.
3934  *
3935  * Most of the routines have sanity checks in them (e.g. verifying that the
3936  * passed-in page is locked).  We don't actually care about most of these
3937  * checks on x86, but we leave them in place to identify problems in the
3938  * upper levels.
3939  */
3940 
3941 /*
3942  * Map in a locked page and return the vaddr.
3943  */
3944 /*ARGSUSED*/
3945 caddr_t
3946 hat_kpm_mapin(struct page *pp, struct kpme *kpme)
3947 {
3948 	caddr_t		vaddr;
3949 
3950 #ifdef DEBUG
3951 	if (kpm_enable == 0) {
3952 		cmn_err(CE_WARN, "hat_kpm_mapin: kpm_enable not set\n");
3953 		return ((caddr_t)NULL);
3954 	}
3955 
3956 	if (pp == NULL || PAGE_LOCKED(pp) == 0) {
3957 		cmn_err(CE_WARN, "hat_kpm_mapin: pp zero or not locked\n");
3958 		return ((caddr_t)NULL);
3959 	}
3960 #endif
3961 
3962 	vaddr = hat_kpm_page2va(pp, 1);
3963 
3964 	return (vaddr);
3965 }
3966 
3967 /*
3968  * Mapout a locked page.
3969  */
3970 /*ARGSUSED*/
3971 void
3972 hat_kpm_mapout(struct page *pp, struct kpme *kpme, caddr_t vaddr)
3973 {
3974 #ifdef DEBUG
3975 	if (kpm_enable == 0) {
3976 		cmn_err(CE_WARN, "hat_kpm_mapout: kpm_enable not set\n");
3977 		return;
3978 	}
3979 
3980 	if (IS_KPM_ADDR(vaddr) == 0) {
3981 		cmn_err(CE_WARN, "hat_kpm_mapout: no kpm address\n");
3982 		return;
3983 	}
3984 
3985 	if (pp == NULL || PAGE_LOCKED(pp) == 0) {
3986 		cmn_err(CE_WARN, "hat_kpm_mapout: page zero or not locked\n");
3987 		return;
3988 	}
3989 #endif
3990 }
3991 
3992 /*
3993  * Return the kpm virtual address for a specific pfn
3994  */
3995 caddr_t
3996 hat_kpm_pfn2va(pfn_t pfn)
3997 {
3998 	uintptr_t vaddr = (uintptr_t)kpm_vbase + mmu_ptob(pfn);
3999 
4000 	return ((caddr_t)vaddr);
4001 }
4002 
4003 /*
4004  * Return the kpm virtual address for the page at pp.
4005  */
4006 /*ARGSUSED*/
4007 caddr_t
4008 hat_kpm_page2va(struct page *pp, int checkswap)
4009 {
4010 	return (hat_kpm_pfn2va(pp->p_pagenum));
4011 }
4012 
4013 /*
4014  * Return the page frame number for the kpm virtual address vaddr.
4015  */
4016 pfn_t
4017 hat_kpm_va2pfn(caddr_t vaddr)
4018 {
4019 	pfn_t		pfn;
4020 
4021 	ASSERT(IS_KPM_ADDR(vaddr));
4022 
4023 	pfn = (pfn_t)btop(vaddr - kpm_vbase);
4024 
4025 	return (pfn);
4026 }
4027 
4028 
4029 /*
4030  * Return the page for the kpm virtual address vaddr.
4031  */
4032 page_t *
4033 hat_kpm_vaddr2page(caddr_t vaddr)
4034 {
4035 	pfn_t		pfn;
4036 
4037 	ASSERT(IS_KPM_ADDR(vaddr));
4038 
4039 	pfn = hat_kpm_va2pfn(vaddr);
4040 
4041 	return (page_numtopp_nolock(pfn));
4042 }
4043 
4044 /*
4045  * hat_kpm_fault is called from segkpm_fault when we take a page fault on a
4046  * KPM page.  This should never happen on x86
4047  */
4048 int
4049 hat_kpm_fault(hat_t *hat, caddr_t vaddr)
4050 {
4051 	panic("pagefault in seg_kpm.  hat: 0x%p  vaddr: 0x%p", hat, vaddr);
4052 
4053 	return (0);
4054 }
4055 
4056 /*ARGSUSED*/
4057 void
4058 hat_kpm_mseghash_clear(int nentries)
4059 {}
4060 
4061 /*ARGSUSED*/
4062 void
4063 hat_kpm_mseghash_update(pgcnt_t inx, struct memseg *msp)
4064 {}
4065