xref: /titanic_51/usr/src/uts/i86pc/vm/hat_i86.c (revision 2c20fda2feee4850b7ec4297e4120bc72c2bac09)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 /*
29  * VM - Hardware Address Translation management for i386 and amd64
30  *
31  * Implementation of the interfaces described in <common/vm/hat.h>
32  *
33  * Nearly all the details of how the hardware is managed should not be
34  * visible outside this layer except for misc. machine specific functions
35  * that work in conjunction with this code.
36  *
37  * Routines used only inside of i86pc/vm start with hati_ for HAT Internal.
38  */
39 
40 #include <sys/machparam.h>
41 #include <sys/machsystm.h>
42 #include <sys/mman.h>
43 #include <sys/types.h>
44 #include <sys/systm.h>
45 #include <sys/cpuvar.h>
46 #include <sys/thread.h>
47 #include <sys/proc.h>
48 #include <sys/cpu.h>
49 #include <sys/kmem.h>
50 #include <sys/disp.h>
51 #include <sys/shm.h>
52 #include <sys/sysmacros.h>
53 #include <sys/machparam.h>
54 #include <sys/vmem.h>
55 #include <sys/vmsystm.h>
56 #include <sys/promif.h>
57 #include <sys/var.h>
58 #include <sys/x86_archext.h>
59 #include <sys/atomic.h>
60 #include <sys/bitmap.h>
61 #include <sys/controlregs.h>
62 #include <sys/bootconf.h>
63 #include <sys/bootsvcs.h>
64 #include <sys/bootinfo.h>
65 #include <sys/archsystm.h>
66 
67 #include <vm/seg_kmem.h>
68 #include <vm/hat_i86.h>
69 #include <vm/as.h>
70 #include <vm/seg.h>
71 #include <vm/page.h>
72 #include <vm/seg_kp.h>
73 #include <vm/seg_kpm.h>
74 #include <vm/vm_dep.h>
75 #ifdef __xpv
76 #include <sys/hypervisor.h>
77 #endif
78 #include <vm/kboot_mmu.h>
79 #include <vm/seg_spt.h>
80 
81 #include <sys/cmn_err.h>
82 
83 /*
84  * Basic parameters for hat operation.
85  */
86 struct hat_mmu_info mmu;
87 
88 /*
89  * The page that is the kernel's top level pagetable.
90  *
91  * For 32 bit PAE support on i86pc, the kernel hat will use the 1st 4 entries
92  * on this 4K page for its top level page table. The remaining groups of
93  * 4 entries are used for per processor copies of user VLP pagetables for
94  * running threads.  See hat_switch() and reload_pae32() for details.
95  *
96  * vlp_page[0..3] - level==2 PTEs for kernel HAT
97  * vlp_page[4..7] - level==2 PTEs for user thread on cpu 0
98  * vlp_page[8..11]  - level==2 PTE for user thread on cpu 1
99  * etc...
100  */
101 static x86pte_t *vlp_page;
102 
103 /*
104  * forward declaration of internal utility routines
105  */
106 static x86pte_t hati_update_pte(htable_t *ht, uint_t entry, x86pte_t expected,
107 	x86pte_t new);
108 
109 /*
110  * The kernel address space exists in all HATs. To implement this the
111  * kernel reserves a fixed number of entries in the topmost level(s) of page
112  * tables. The values are setup during startup and then copied to every user
113  * hat created by hat_alloc(). This means that kernelbase must be:
114  *
115  *	  4Meg aligned for 32 bit kernels
116  *	512Gig aligned for x86_64 64 bit kernel
117  *
118  * The hat_kernel_range_ts describe what needs to be copied from kernel hat
119  * to each user hat.
120  */
121 typedef struct hat_kernel_range {
122 	level_t		hkr_level;
123 	uintptr_t	hkr_start_va;
124 	uintptr_t	hkr_end_va;	/* zero means to end of memory */
125 } hat_kernel_range_t;
126 #define	NUM_KERNEL_RANGE 2
127 static hat_kernel_range_t kernel_ranges[NUM_KERNEL_RANGE];
128 static int num_kernel_ranges;
129 
130 uint_t use_boot_reserve = 1;	/* cleared after early boot process */
131 uint_t can_steal_post_boot = 0;	/* set late in boot to enable stealing */
132 
133 /*
134  * enable_1gpg: controls 1g page support for user applications.
135  * By default, 1g pages are exported to user applications. enable_1gpg can
136  * be set to 0 to not export.
137  */
138 int	enable_1gpg = 1;
139 
140 /*
141  * AMD shanghai processors provide better management of 1gb ptes in its tlb.
142  * By default, 1g page suppport will be disabled for pre-shanghai AMD
143  * processors that don't have optimal tlb support for the 1g page size.
144  * chk_optimal_1gtlb can be set to 0 to force 1g page support on sub-optimal
145  * processors.
146  */
147 int	chk_optimal_1gtlb = 1;
148 
149 
150 #ifdef DEBUG
151 uint_t	map1gcnt;
152 #endif
153 
154 
155 /*
156  * A cpuset for all cpus. This is used for kernel address cross calls, since
157  * the kernel addresses apply to all cpus.
158  */
159 cpuset_t khat_cpuset;
160 
161 /*
162  * management stuff for hat structures
163  */
164 kmutex_t	hat_list_lock;
165 kcondvar_t	hat_list_cv;
166 kmem_cache_t	*hat_cache;
167 kmem_cache_t	*hat_hash_cache;
168 kmem_cache_t	*vlp_hash_cache;
169 
170 /*
171  * Simple statistics
172  */
173 struct hatstats hatstat;
174 
175 /*
176  * Some earlier hypervisor versions do not emulate cmpxchg of PTEs
177  * correctly.  For such hypervisors we must set PT_USER for kernel
178  * entries ourselves (normally the emulation would set PT_USER for
179  * kernel entries and PT_USER|PT_GLOBAL for user entries).  pt_kern is
180  * thus set appropriately.  Note that dboot/kbm is OK, as only the full
181  * HAT uses cmpxchg() and the other paths (hypercall etc.) were never
182  * incorrect.
183  */
184 int pt_kern;
185 
186 /*
187  * useful stuff for atomic access/clearing/setting REF/MOD/RO bits in page_t's.
188  */
189 extern void atomic_orb(uchar_t *addr, uchar_t val);
190 extern void atomic_andb(uchar_t *addr, uchar_t val);
191 
192 #define	PP_GETRM(pp, rmmask)    (pp->p_nrm & rmmask)
193 #define	PP_ISMOD(pp)		PP_GETRM(pp, P_MOD)
194 #define	PP_ISREF(pp)		PP_GETRM(pp, P_REF)
195 #define	PP_ISRO(pp)		PP_GETRM(pp, P_RO)
196 
197 #define	PP_SETRM(pp, rm)	atomic_orb(&(pp->p_nrm), rm)
198 #define	PP_SETMOD(pp)		PP_SETRM(pp, P_MOD)
199 #define	PP_SETREF(pp)		PP_SETRM(pp, P_REF)
200 #define	PP_SETRO(pp)		PP_SETRM(pp, P_RO)
201 
202 #define	PP_CLRRM(pp, rm)	atomic_andb(&(pp->p_nrm), ~(rm))
203 #define	PP_CLRMOD(pp)   	PP_CLRRM(pp, P_MOD)
204 #define	PP_CLRREF(pp)   	PP_CLRRM(pp, P_REF)
205 #define	PP_CLRRO(pp)    	PP_CLRRM(pp, P_RO)
206 #define	PP_CLRALL(pp)		PP_CLRRM(pp, P_MOD | P_REF | P_RO)
207 
208 /*
209  * kmem cache constructor for struct hat
210  */
211 /*ARGSUSED*/
212 static int
213 hati_constructor(void *buf, void *handle, int kmflags)
214 {
215 	hat_t	*hat = buf;
216 
217 	mutex_init(&hat->hat_mutex, NULL, MUTEX_DEFAULT, NULL);
218 	bzero(hat->hat_pages_mapped,
219 	    sizeof (pgcnt_t) * (mmu.max_page_level + 1));
220 	hat->hat_ism_pgcnt = 0;
221 	hat->hat_stats = 0;
222 	hat->hat_flags = 0;
223 	CPUSET_ZERO(hat->hat_cpus);
224 	hat->hat_htable = NULL;
225 	hat->hat_ht_hash = NULL;
226 	return (0);
227 }
228 
229 /*
230  * Allocate a hat structure for as. We also create the top level
231  * htable and initialize it to contain the kernel hat entries.
232  */
233 hat_t *
234 hat_alloc(struct as *as)
235 {
236 	hat_t			*hat;
237 	htable_t		*ht;	/* top level htable */
238 	uint_t			use_vlp;
239 	uint_t			r;
240 	hat_kernel_range_t	*rp;
241 	uintptr_t		va;
242 	uintptr_t		eva;
243 	uint_t			start;
244 	uint_t			cnt;
245 	htable_t		*src;
246 
247 	/*
248 	 * Once we start creating user process HATs we can enable
249 	 * the htable_steal() code.
250 	 */
251 	if (can_steal_post_boot == 0)
252 		can_steal_post_boot = 1;
253 
254 	ASSERT(AS_WRITE_HELD(as, &as->a_lock));
255 	hat = kmem_cache_alloc(hat_cache, KM_SLEEP);
256 	hat->hat_as = as;
257 	mutex_init(&hat->hat_mutex, NULL, MUTEX_DEFAULT, NULL);
258 	ASSERT(hat->hat_flags == 0);
259 
260 #if defined(__xpv)
261 	/*
262 	 * No VLP stuff on the hypervisor due to the 64-bit split top level
263 	 * page tables.  On 32-bit it's not needed as the hypervisor takes
264 	 * care of copying the top level PTEs to a below 4Gig page.
265 	 */
266 	use_vlp = 0;
267 #else	/* __xpv */
268 	/* 32 bit processes uses a VLP style hat when running with PAE */
269 #if defined(__amd64)
270 	use_vlp = (ttoproc(curthread)->p_model == DATAMODEL_ILP32);
271 #elif defined(__i386)
272 	use_vlp = mmu.pae_hat;
273 #endif
274 #endif	/* __xpv */
275 	if (use_vlp) {
276 		hat->hat_flags = HAT_VLP;
277 		bzero(hat->hat_vlp_ptes, VLP_SIZE);
278 	}
279 
280 	/*
281 	 * Allocate the htable hash
282 	 */
283 	if ((hat->hat_flags & HAT_VLP)) {
284 		hat->hat_num_hash = mmu.vlp_hash_cnt;
285 		hat->hat_ht_hash = kmem_cache_alloc(vlp_hash_cache, KM_SLEEP);
286 	} else {
287 		hat->hat_num_hash = mmu.hash_cnt;
288 		hat->hat_ht_hash = kmem_cache_alloc(hat_hash_cache, KM_SLEEP);
289 	}
290 	bzero(hat->hat_ht_hash, hat->hat_num_hash * sizeof (htable_t *));
291 
292 	/*
293 	 * Initialize Kernel HAT entries at the top of the top level page
294 	 * tables for the new hat.
295 	 */
296 	hat->hat_htable = NULL;
297 	hat->hat_ht_cached = NULL;
298 	XPV_DISALLOW_MIGRATE();
299 	ht = htable_create(hat, (uintptr_t)0, TOP_LEVEL(hat), NULL);
300 	hat->hat_htable = ht;
301 
302 #if defined(__amd64)
303 	if (hat->hat_flags & HAT_VLP)
304 		goto init_done;
305 #endif
306 
307 	for (r = 0; r < num_kernel_ranges; ++r) {
308 		rp = &kernel_ranges[r];
309 		for (va = rp->hkr_start_va; va != rp->hkr_end_va;
310 		    va += cnt * LEVEL_SIZE(rp->hkr_level)) {
311 
312 			if (rp->hkr_level == TOP_LEVEL(hat))
313 				ht = hat->hat_htable;
314 			else
315 				ht = htable_create(hat, va, rp->hkr_level,
316 				    NULL);
317 
318 			start = htable_va2entry(va, ht);
319 			cnt = HTABLE_NUM_PTES(ht) - start;
320 			eva = va +
321 			    ((uintptr_t)cnt << LEVEL_SHIFT(rp->hkr_level));
322 			if (rp->hkr_end_va != 0 &&
323 			    (eva > rp->hkr_end_va || eva == 0))
324 				cnt = htable_va2entry(rp->hkr_end_va, ht) -
325 				    start;
326 
327 #if defined(__i386) && !defined(__xpv)
328 			if (ht->ht_flags & HTABLE_VLP) {
329 				bcopy(&vlp_page[start],
330 				    &hat->hat_vlp_ptes[start],
331 				    cnt * sizeof (x86pte_t));
332 				continue;
333 			}
334 #endif
335 			src = htable_lookup(kas.a_hat, va, rp->hkr_level);
336 			ASSERT(src != NULL);
337 			x86pte_copy(src, ht, start, cnt);
338 			htable_release(src);
339 		}
340 	}
341 
342 init_done:
343 
344 #if defined(__xpv)
345 	/*
346 	 * Pin top level page tables after initializing them
347 	 */
348 	xen_pin(hat->hat_htable->ht_pfn, mmu.max_level);
349 #if defined(__amd64)
350 	xen_pin(hat->hat_user_ptable, mmu.max_level);
351 #endif
352 #endif
353 	XPV_ALLOW_MIGRATE();
354 
355 	/*
356 	 * Put it at the start of the global list of all hats (used by stealing)
357 	 *
358 	 * kas.a_hat is not in the list but is instead used to find the
359 	 * first and last items in the list.
360 	 *
361 	 * - kas.a_hat->hat_next points to the start of the user hats.
362 	 *   The list ends where hat->hat_next == NULL
363 	 *
364 	 * - kas.a_hat->hat_prev points to the last of the user hats.
365 	 *   The list begins where hat->hat_prev == NULL
366 	 */
367 	mutex_enter(&hat_list_lock);
368 	hat->hat_prev = NULL;
369 	hat->hat_next = kas.a_hat->hat_next;
370 	if (hat->hat_next)
371 		hat->hat_next->hat_prev = hat;
372 	else
373 		kas.a_hat->hat_prev = hat;
374 	kas.a_hat->hat_next = hat;
375 	mutex_exit(&hat_list_lock);
376 
377 	return (hat);
378 }
379 
380 /*
381  * process has finished executing but as has not been cleaned up yet.
382  */
383 /*ARGSUSED*/
384 void
385 hat_free_start(hat_t *hat)
386 {
387 	ASSERT(AS_WRITE_HELD(hat->hat_as, &hat->hat_as->a_lock));
388 
389 	/*
390 	 * If the hat is currently a stealing victim, wait for the stealing
391 	 * to finish.  Once we mark it as HAT_FREEING, htable_steal()
392 	 * won't look at its pagetables anymore.
393 	 */
394 	mutex_enter(&hat_list_lock);
395 	while (hat->hat_flags & HAT_VICTIM)
396 		cv_wait(&hat_list_cv, &hat_list_lock);
397 	hat->hat_flags |= HAT_FREEING;
398 	mutex_exit(&hat_list_lock);
399 }
400 
401 /*
402  * An address space is being destroyed, so we destroy the associated hat.
403  */
404 void
405 hat_free_end(hat_t *hat)
406 {
407 	kmem_cache_t *cache;
408 
409 	ASSERT(hat->hat_flags & HAT_FREEING);
410 
411 	/*
412 	 * must not be running on the given hat
413 	 */
414 	ASSERT(CPU->cpu_current_hat != hat);
415 
416 	/*
417 	 * Remove it from the list of HATs
418 	 */
419 	mutex_enter(&hat_list_lock);
420 	if (hat->hat_prev)
421 		hat->hat_prev->hat_next = hat->hat_next;
422 	else
423 		kas.a_hat->hat_next = hat->hat_next;
424 	if (hat->hat_next)
425 		hat->hat_next->hat_prev = hat->hat_prev;
426 	else
427 		kas.a_hat->hat_prev = hat->hat_prev;
428 	mutex_exit(&hat_list_lock);
429 	hat->hat_next = hat->hat_prev = NULL;
430 
431 #if defined(__xpv)
432 	/*
433 	 * On the hypervisor, unpin top level page table(s)
434 	 */
435 	xen_unpin(hat->hat_htable->ht_pfn);
436 #if defined(__amd64)
437 	xen_unpin(hat->hat_user_ptable);
438 #endif
439 #endif
440 
441 	/*
442 	 * Make a pass through the htables freeing them all up.
443 	 */
444 	htable_purge_hat(hat);
445 
446 	/*
447 	 * Decide which kmem cache the hash table came from, then free it.
448 	 */
449 	if (hat->hat_flags & HAT_VLP)
450 		cache = vlp_hash_cache;
451 	else
452 		cache = hat_hash_cache;
453 	kmem_cache_free(cache, hat->hat_ht_hash);
454 	hat->hat_ht_hash = NULL;
455 
456 	hat->hat_flags = 0;
457 	kmem_cache_free(hat_cache, hat);
458 }
459 
460 /*
461  * round kernelbase down to a supported value to use for _userlimit
462  *
463  * userlimit must be aligned down to an entry in the top level htable.
464  * The one exception is for 32 bit HAT's running PAE.
465  */
466 uintptr_t
467 hat_kernelbase(uintptr_t va)
468 {
469 #if defined(__i386)
470 	va &= LEVEL_MASK(1);
471 #endif
472 	if (IN_VA_HOLE(va))
473 		panic("_userlimit %p will fall in VA hole\n", (void *)va);
474 	return (va);
475 }
476 
477 /*
478  *
479  */
480 static void
481 set_max_page_level()
482 {
483 	level_t lvl;
484 
485 	if (!kbm_largepage_support) {
486 		lvl = 0;
487 	} else {
488 		if (x86_feature & X86_1GPG) {
489 			lvl = 2;
490 			if (chk_optimal_1gtlb &&
491 			    cpuid_opteron_erratum(CPU, 6671130)) {
492 				lvl = 1;
493 			}
494 			if (plat_mnode_xcheck(LEVEL_SIZE(2) >>
495 			    LEVEL_SHIFT(0))) {
496 				lvl = 1;
497 			}
498 		} else {
499 			lvl = 1;
500 		}
501 	}
502 	mmu.max_page_level = lvl;
503 
504 	if ((lvl == 2) && (enable_1gpg == 0))
505 		mmu.umax_page_level = 1;
506 	else
507 		mmu.umax_page_level = lvl;
508 }
509 
510 /*
511  * Initialize hat data structures based on processor MMU information.
512  */
513 void
514 mmu_init(void)
515 {
516 	uint_t max_htables;
517 	uint_t pa_bits;
518 	uint_t va_bits;
519 	int i;
520 
521 	/*
522 	 * If CPU enabled the page table global bit, use it for the kernel
523 	 * This is bit 7 in CR4 (PGE - Page Global Enable).
524 	 */
525 	if ((x86_feature & X86_PGE) != 0 && (getcr4() & CR4_PGE) != 0)
526 		mmu.pt_global = PT_GLOBAL;
527 
528 	/*
529 	 * Detect NX and PAE usage.
530 	 */
531 	mmu.pae_hat = kbm_pae_support;
532 	if (kbm_nx_support)
533 		mmu.pt_nx = PT_NX;
534 	else
535 		mmu.pt_nx = 0;
536 
537 	/*
538 	 * Use CPU info to set various MMU parameters
539 	 */
540 	cpuid_get_addrsize(CPU, &pa_bits, &va_bits);
541 
542 	if (va_bits < sizeof (void *) * NBBY) {
543 		mmu.hole_start = (1ul << (va_bits - 1));
544 		mmu.hole_end = 0ul - mmu.hole_start - 1;
545 	} else {
546 		mmu.hole_end = 0;
547 		mmu.hole_start = mmu.hole_end - 1;
548 	}
549 #if defined(OPTERON_ERRATUM_121)
550 	/*
551 	 * If erratum 121 has already been detected at this time, hole_start
552 	 * contains the value to be subtracted from mmu.hole_start.
553 	 */
554 	ASSERT(hole_start == 0 || opteron_erratum_121 != 0);
555 	hole_start = mmu.hole_start - hole_start;
556 #else
557 	hole_start = mmu.hole_start;
558 #endif
559 	hole_end = mmu.hole_end;
560 
561 	mmu.highest_pfn = mmu_btop((1ull << pa_bits) - 1);
562 	if (mmu.pae_hat == 0 && pa_bits > 32)
563 		mmu.highest_pfn = PFN_4G - 1;
564 
565 	if (mmu.pae_hat) {
566 		mmu.pte_size = 8;	/* 8 byte PTEs */
567 		mmu.pte_size_shift = 3;
568 	} else {
569 		mmu.pte_size = 4;	/* 4 byte PTEs */
570 		mmu.pte_size_shift = 2;
571 	}
572 
573 	if (mmu.pae_hat && (x86_feature & X86_PAE) == 0)
574 		panic("Processor does not support PAE");
575 
576 	if ((x86_feature & X86_CX8) == 0)
577 		panic("Processor does not support cmpxchg8b instruction");
578 
579 #if defined(__amd64)
580 
581 	mmu.num_level = 4;
582 	mmu.max_level = 3;
583 	mmu.ptes_per_table = 512;
584 	mmu.top_level_count = 512;
585 
586 	mmu.level_shift[0] = 12;
587 	mmu.level_shift[1] = 21;
588 	mmu.level_shift[2] = 30;
589 	mmu.level_shift[3] = 39;
590 
591 #elif defined(__i386)
592 
593 	if (mmu.pae_hat) {
594 		mmu.num_level = 3;
595 		mmu.max_level = 2;
596 		mmu.ptes_per_table = 512;
597 		mmu.top_level_count = 4;
598 
599 		mmu.level_shift[0] = 12;
600 		mmu.level_shift[1] = 21;
601 		mmu.level_shift[2] = 30;
602 
603 	} else {
604 		mmu.num_level = 2;
605 		mmu.max_level = 1;
606 		mmu.ptes_per_table = 1024;
607 		mmu.top_level_count = 1024;
608 
609 		mmu.level_shift[0] = 12;
610 		mmu.level_shift[1] = 22;
611 	}
612 
613 #endif	/* __i386 */
614 
615 	for (i = 0; i < mmu.num_level; ++i) {
616 		mmu.level_size[i] = 1UL << mmu.level_shift[i];
617 		mmu.level_offset[i] = mmu.level_size[i] - 1;
618 		mmu.level_mask[i] = ~mmu.level_offset[i];
619 	}
620 
621 	set_max_page_level();
622 
623 	mmu_page_sizes = mmu.max_page_level + 1;
624 	mmu_exported_page_sizes = mmu.umax_page_level + 1;
625 
626 	/* restrict legacy applications from using pagesizes 1g and above */
627 	mmu_legacy_page_sizes =
628 	    (mmu_exported_page_sizes > 2) ? 2 : mmu_exported_page_sizes;
629 
630 
631 	for (i = 0; i <= mmu.max_page_level; ++i) {
632 		mmu.pte_bits[i] = PT_VALID | pt_kern;
633 		if (i > 0)
634 			mmu.pte_bits[i] |= PT_PAGESIZE;
635 	}
636 
637 	/*
638 	 * NOTE Legacy 32 bit PAE mode only has the P_VALID bit at top level.
639 	 */
640 	for (i = 1; i < mmu.num_level; ++i)
641 		mmu.ptp_bits[i] = PT_PTPBITS;
642 
643 #if defined(__i386)
644 	mmu.ptp_bits[2] = PT_VALID;
645 #endif
646 
647 	/*
648 	 * Compute how many hash table entries to have per process for htables.
649 	 * We start with 1 page's worth of entries.
650 	 *
651 	 * If physical memory is small, reduce the amount need to cover it.
652 	 */
653 	max_htables = physmax / mmu.ptes_per_table;
654 	mmu.hash_cnt = MMU_PAGESIZE / sizeof (htable_t *);
655 	while (mmu.hash_cnt > 16 && mmu.hash_cnt >= max_htables)
656 		mmu.hash_cnt >>= 1;
657 	mmu.vlp_hash_cnt = mmu.hash_cnt;
658 
659 #if defined(__amd64)
660 	/*
661 	 * If running in 64 bits and physical memory is large,
662 	 * increase the size of the cache to cover all of memory for
663 	 * a 64 bit process.
664 	 */
665 #define	HASH_MAX_LENGTH 4
666 	while (mmu.hash_cnt * HASH_MAX_LENGTH < max_htables)
667 		mmu.hash_cnt <<= 1;
668 #endif
669 }
670 
671 
672 /*
673  * initialize hat data structures
674  */
675 void
676 hat_init()
677 {
678 #if defined(__i386)
679 	/*
680 	 * _userlimit must be aligned correctly
681 	 */
682 	if ((_userlimit & LEVEL_MASK(1)) != _userlimit) {
683 		prom_printf("hat_init(): _userlimit=%p, not aligned at %p\n",
684 		    (void *)_userlimit, (void *)LEVEL_SIZE(1));
685 		halt("hat_init(): Unable to continue");
686 	}
687 #endif
688 
689 	cv_init(&hat_list_cv, NULL, CV_DEFAULT, NULL);
690 
691 	/*
692 	 * initialize kmem caches
693 	 */
694 	htable_init();
695 	hment_init();
696 
697 	hat_cache = kmem_cache_create("hat_t",
698 	    sizeof (hat_t), 0, hati_constructor, NULL, NULL,
699 	    NULL, 0, 0);
700 
701 	hat_hash_cache = kmem_cache_create("HatHash",
702 	    mmu.hash_cnt * sizeof (htable_t *), 0, NULL, NULL, NULL,
703 	    NULL, 0, 0);
704 
705 	/*
706 	 * VLP hats can use a smaller hash table size on large memroy machines
707 	 */
708 	if (mmu.hash_cnt == mmu.vlp_hash_cnt) {
709 		vlp_hash_cache = hat_hash_cache;
710 	} else {
711 		vlp_hash_cache = kmem_cache_create("HatVlpHash",
712 		    mmu.vlp_hash_cnt * sizeof (htable_t *), 0, NULL, NULL, NULL,
713 		    NULL, 0, 0);
714 	}
715 
716 	/*
717 	 * Set up the kernel's hat
718 	 */
719 	AS_LOCK_ENTER(&kas, &kas.a_lock, RW_WRITER);
720 	kas.a_hat = kmem_cache_alloc(hat_cache, KM_NOSLEEP);
721 	mutex_init(&kas.a_hat->hat_mutex, NULL, MUTEX_DEFAULT, NULL);
722 	kas.a_hat->hat_as = &kas;
723 	kas.a_hat->hat_flags = 0;
724 	AS_LOCK_EXIT(&kas, &kas.a_lock);
725 
726 	CPUSET_ZERO(khat_cpuset);
727 	CPUSET_ADD(khat_cpuset, CPU->cpu_id);
728 
729 	/*
730 	 * The kernel hat's next pointer serves as the head of the hat list .
731 	 * The kernel hat's prev pointer tracks the last hat on the list for
732 	 * htable_steal() to use.
733 	 */
734 	kas.a_hat->hat_next = NULL;
735 	kas.a_hat->hat_prev = NULL;
736 
737 	/*
738 	 * Allocate an htable hash bucket for the kernel
739 	 * XX64 - tune for 64 bit procs
740 	 */
741 	kas.a_hat->hat_num_hash = mmu.hash_cnt;
742 	kas.a_hat->hat_ht_hash = kmem_cache_alloc(hat_hash_cache, KM_NOSLEEP);
743 	bzero(kas.a_hat->hat_ht_hash, mmu.hash_cnt * sizeof (htable_t *));
744 
745 	/*
746 	 * zero out the top level and cached htable pointers
747 	 */
748 	kas.a_hat->hat_ht_cached = NULL;
749 	kas.a_hat->hat_htable = NULL;
750 
751 	/*
752 	 * Pre-allocate hrm_hashtab before enabling the collection of
753 	 * refmod statistics.  Allocating on the fly would mean us
754 	 * running the risk of suffering recursive mutex enters or
755 	 * deadlocks.
756 	 */
757 	hrm_hashtab = kmem_zalloc(HRM_HASHSIZE * sizeof (struct hrmstat *),
758 	    KM_SLEEP);
759 }
760 
761 /*
762  * Prepare CPU specific pagetables for VLP processes on 64 bit kernels.
763  *
764  * Each CPU has a set of 2 pagetables that are reused for any 32 bit
765  * process it runs. They are the top level pagetable, hci_vlp_l3ptes, and
766  * the next to top level table for the bottom 512 Gig, hci_vlp_l2ptes.
767  */
768 /*ARGSUSED*/
769 static void
770 hat_vlp_setup(struct cpu *cpu)
771 {
772 #if defined(__amd64) && !defined(__xpv)
773 	struct hat_cpu_info *hci = cpu->cpu_hat_info;
774 	pfn_t pfn;
775 
776 	/*
777 	 * allocate the level==2 page table for the bottom most
778 	 * 512Gig of address space (this is where 32 bit apps live)
779 	 */
780 	ASSERT(hci != NULL);
781 	hci->hci_vlp_l2ptes = kmem_zalloc(MMU_PAGESIZE, KM_SLEEP);
782 
783 	/*
784 	 * Allocate a top level pagetable and copy the kernel's
785 	 * entries into it. Then link in hci_vlp_l2ptes in the 1st entry.
786 	 */
787 	hci->hci_vlp_l3ptes = kmem_zalloc(MMU_PAGESIZE, KM_SLEEP);
788 	hci->hci_vlp_pfn =
789 	    hat_getpfnum(kas.a_hat, (caddr_t)hci->hci_vlp_l3ptes);
790 	ASSERT(hci->hci_vlp_pfn != PFN_INVALID);
791 	bcopy(vlp_page, hci->hci_vlp_l3ptes, MMU_PAGESIZE);
792 
793 	pfn = hat_getpfnum(kas.a_hat, (caddr_t)hci->hci_vlp_l2ptes);
794 	ASSERT(pfn != PFN_INVALID);
795 	hci->hci_vlp_l3ptes[0] = MAKEPTP(pfn, 2);
796 #endif /* __amd64 && !__xpv */
797 }
798 
799 /*ARGSUSED*/
800 static void
801 hat_vlp_teardown(cpu_t *cpu)
802 {
803 #if defined(__amd64) && !defined(__xpv)
804 	struct hat_cpu_info *hci;
805 
806 	if ((hci = cpu->cpu_hat_info) == NULL)
807 		return;
808 	if (hci->hci_vlp_l2ptes)
809 		kmem_free(hci->hci_vlp_l2ptes, MMU_PAGESIZE);
810 	if (hci->hci_vlp_l3ptes)
811 		kmem_free(hci->hci_vlp_l3ptes, MMU_PAGESIZE);
812 #endif
813 }
814 
815 #define	NEXT_HKR(r, l, s, e) {			\
816 	kernel_ranges[r].hkr_level = l;		\
817 	kernel_ranges[r].hkr_start_va = s;	\
818 	kernel_ranges[r].hkr_end_va = e;	\
819 	++r;					\
820 }
821 
822 /*
823  * Finish filling in the kernel hat.
824  * Pre fill in all top level kernel page table entries for the kernel's
825  * part of the address range.  From this point on we can't use any new
826  * kernel large pages if they need PTE's at max_level
827  *
828  * create the kmap mappings.
829  */
830 void
831 hat_init_finish(void)
832 {
833 	size_t		size;
834 	uint_t		r = 0;
835 	uintptr_t	va;
836 	hat_kernel_range_t *rp;
837 
838 
839 	/*
840 	 * We are now effectively running on the kernel hat.
841 	 * Clearing use_boot_reserve shuts off using the pre-allocated boot
842 	 * reserve for all HAT allocations.  From here on, the reserves are
843 	 * only used when avoiding recursion in kmem_alloc().
844 	 */
845 	use_boot_reserve = 0;
846 	htable_adjust_reserve();
847 
848 	/*
849 	 * User HATs are initialized with copies of all kernel mappings in
850 	 * higher level page tables. Ensure that those entries exist.
851 	 */
852 #if defined(__amd64)
853 
854 	NEXT_HKR(r, 3, kernelbase, 0);
855 #if defined(__xpv)
856 	NEXT_HKR(r, 3, HYPERVISOR_VIRT_START, HYPERVISOR_VIRT_END);
857 #endif
858 
859 #elif defined(__i386)
860 
861 #if !defined(__xpv)
862 	if (mmu.pae_hat) {
863 		va = kernelbase;
864 		if ((va & LEVEL_MASK(2)) != va) {
865 			va = P2ROUNDUP(va, LEVEL_SIZE(2));
866 			NEXT_HKR(r, 1, kernelbase, va);
867 		}
868 		if (va != 0)
869 			NEXT_HKR(r, 2, va, 0);
870 	} else
871 #endif /* __xpv */
872 		NEXT_HKR(r, 1, kernelbase, 0);
873 
874 #endif /* __i386 */
875 
876 	num_kernel_ranges = r;
877 
878 	/*
879 	 * Create all the kernel pagetables that will have entries
880 	 * shared to user HATs.
881 	 */
882 	for (r = 0; r < num_kernel_ranges; ++r) {
883 		rp = &kernel_ranges[r];
884 		for (va = rp->hkr_start_va; va != rp->hkr_end_va;
885 		    va += LEVEL_SIZE(rp->hkr_level)) {
886 			htable_t *ht;
887 
888 			if (IN_HYPERVISOR_VA(va))
889 				continue;
890 
891 			/* can/must skip if a page mapping already exists */
892 			if (rp->hkr_level <= mmu.max_page_level &&
893 			    (ht = htable_getpage(kas.a_hat, va, NULL)) !=
894 			    NULL) {
895 				htable_release(ht);
896 				continue;
897 			}
898 
899 			(void) htable_create(kas.a_hat, va, rp->hkr_level - 1,
900 			    NULL);
901 		}
902 	}
903 
904 	/*
905 	 * 32 bit PAE metal kernels use only 4 of the 512 entries in the
906 	 * page holding the top level pagetable. We use the remainder for
907 	 * the "per CPU" page tables for VLP processes.
908 	 * Map the top level kernel pagetable into the kernel to make
909 	 * it easy to use bcopy access these tables.
910 	 */
911 	if (mmu.pae_hat) {
912 		vlp_page = vmem_alloc(heap_arena, MMU_PAGESIZE, VM_SLEEP);
913 		hat_devload(kas.a_hat, (caddr_t)vlp_page, MMU_PAGESIZE,
914 		    kas.a_hat->hat_htable->ht_pfn,
915 #if !defined(__xpv)
916 		    PROT_WRITE |
917 #endif
918 		    PROT_READ | HAT_NOSYNC | HAT_UNORDERED_OK,
919 		    HAT_LOAD | HAT_LOAD_NOCONSIST);
920 	}
921 	hat_vlp_setup(CPU);
922 
923 	/*
924 	 * Create kmap (cached mappings of kernel PTEs)
925 	 * for 32 bit we map from segmap_start .. ekernelheap
926 	 * for 64 bit we map from segmap_start .. segmap_start + segmapsize;
927 	 */
928 #if defined(__i386)
929 	size = (uintptr_t)ekernelheap - segmap_start;
930 #elif defined(__amd64)
931 	size = segmapsize;
932 #endif
933 	hat_kmap_init((uintptr_t)segmap_start, size);
934 }
935 
936 /*
937  * On 32 bit PAE mode, PTE's are 64 bits, but ordinary atomic memory references
938  * are 32 bit, so for safety we must use cas64() to install these.
939  */
940 #ifdef __i386
941 static void
942 reload_pae32(hat_t *hat, cpu_t *cpu)
943 {
944 	x86pte_t *src;
945 	x86pte_t *dest;
946 	x86pte_t pte;
947 	int i;
948 
949 	/*
950 	 * Load the 4 entries of the level 2 page table into this
951 	 * cpu's range of the vlp_page and point cr3 at them.
952 	 */
953 	ASSERT(mmu.pae_hat);
954 	src = hat->hat_vlp_ptes;
955 	dest = vlp_page + (cpu->cpu_id + 1) * VLP_NUM_PTES;
956 	for (i = 0; i < VLP_NUM_PTES; ++i) {
957 		for (;;) {
958 			pte = dest[i];
959 			if (pte == src[i])
960 				break;
961 			if (cas64(dest + i, pte, src[i]) != src[i])
962 				break;
963 		}
964 	}
965 }
966 #endif
967 
968 /*
969  * Switch to a new active hat, maintaining bit masks to track active CPUs.
970  *
971  * On the 32-bit PAE hypervisor, %cr3 is a 64-bit value, on metal it
972  * remains a 32-bit value.
973  */
974 void
975 hat_switch(hat_t *hat)
976 {
977 	uint64_t	newcr3;
978 	cpu_t		*cpu = CPU;
979 	hat_t		*old = cpu->cpu_current_hat;
980 
981 	/*
982 	 * set up this information first, so we don't miss any cross calls
983 	 */
984 	if (old != NULL) {
985 		if (old == hat)
986 			return;
987 		if (old != kas.a_hat)
988 			CPUSET_ATOMIC_DEL(old->hat_cpus, cpu->cpu_id);
989 	}
990 
991 	/*
992 	 * Add this CPU to the active set for this HAT.
993 	 */
994 	if (hat != kas.a_hat) {
995 		CPUSET_ATOMIC_ADD(hat->hat_cpus, cpu->cpu_id);
996 	}
997 	cpu->cpu_current_hat = hat;
998 
999 	/*
1000 	 * now go ahead and load cr3
1001 	 */
1002 	if (hat->hat_flags & HAT_VLP) {
1003 #if defined(__amd64)
1004 		x86pte_t *vlpptep = cpu->cpu_hat_info->hci_vlp_l2ptes;
1005 
1006 		VLP_COPY(hat->hat_vlp_ptes, vlpptep);
1007 		newcr3 = MAKECR3(cpu->cpu_hat_info->hci_vlp_pfn);
1008 #elif defined(__i386)
1009 		reload_pae32(hat, cpu);
1010 		newcr3 = MAKECR3(kas.a_hat->hat_htable->ht_pfn) +
1011 		    (cpu->cpu_id + 1) * VLP_SIZE;
1012 #endif
1013 	} else {
1014 		newcr3 = MAKECR3((uint64_t)hat->hat_htable->ht_pfn);
1015 	}
1016 #ifdef __xpv
1017 	{
1018 		struct mmuext_op t[2];
1019 		uint_t retcnt;
1020 		uint_t opcnt = 1;
1021 
1022 		t[0].cmd = MMUEXT_NEW_BASEPTR;
1023 		t[0].arg1.mfn = mmu_btop(pa_to_ma(newcr3));
1024 #if defined(__amd64)
1025 		/*
1026 		 * There's an interesting problem here, as to what to
1027 		 * actually specify when switching to the kernel hat.
1028 		 * For now we'll reuse the kernel hat again.
1029 		 */
1030 		t[1].cmd = MMUEXT_NEW_USER_BASEPTR;
1031 		if (hat == kas.a_hat)
1032 			t[1].arg1.mfn = mmu_btop(pa_to_ma(newcr3));
1033 		else
1034 			t[1].arg1.mfn = pfn_to_mfn(hat->hat_user_ptable);
1035 		++opcnt;
1036 #endif	/* __amd64 */
1037 		if (HYPERVISOR_mmuext_op(t, opcnt, &retcnt, DOMID_SELF) < 0)
1038 			panic("HYPERVISOR_mmu_update() failed");
1039 		ASSERT(retcnt == opcnt);
1040 
1041 	}
1042 #else
1043 	setcr3(newcr3);
1044 #endif
1045 	ASSERT(cpu == CPU);
1046 }
1047 
1048 /*
1049  * Utility to return a valid x86pte_t from protections, pfn, and level number
1050  */
1051 static x86pte_t
1052 hati_mkpte(pfn_t pfn, uint_t attr, level_t level, uint_t flags)
1053 {
1054 	x86pte_t	pte;
1055 	uint_t		cache_attr = attr & HAT_ORDER_MASK;
1056 
1057 	pte = MAKEPTE(pfn, level);
1058 
1059 	if (attr & PROT_WRITE)
1060 		PTE_SET(pte, PT_WRITABLE);
1061 
1062 	if (attr & PROT_USER)
1063 		PTE_SET(pte, PT_USER);
1064 
1065 	if (!(attr & PROT_EXEC))
1066 		PTE_SET(pte, mmu.pt_nx);
1067 
1068 	/*
1069 	 * Set the software bits used track ref/mod sync's and hments.
1070 	 * If not using REF/MOD, set them to avoid h/w rewriting PTEs.
1071 	 */
1072 	if (flags & HAT_LOAD_NOCONSIST)
1073 		PTE_SET(pte, PT_NOCONSIST | PT_REF | PT_MOD);
1074 	else if (attr & HAT_NOSYNC)
1075 		PTE_SET(pte, PT_NOSYNC | PT_REF | PT_MOD);
1076 
1077 	/*
1078 	 * Set the caching attributes in the PTE. The combination
1079 	 * of attributes are poorly defined, so we pay attention
1080 	 * to them in the given order.
1081 	 *
1082 	 * The test for HAT_STRICTORDER is different because it's defined
1083 	 * as "0" - which was a stupid thing to do, but is too late to change!
1084 	 */
1085 	if (cache_attr == HAT_STRICTORDER) {
1086 		PTE_SET(pte, PT_NOCACHE);
1087 	/*LINTED [Lint hates empty ifs, but it's the obvious way to do this] */
1088 	} else if (cache_attr & (HAT_UNORDERED_OK | HAT_STORECACHING_OK)) {
1089 		/* nothing to set */;
1090 	} else if (cache_attr & (HAT_MERGING_OK | HAT_LOADCACHING_OK)) {
1091 		PTE_SET(pte, PT_NOCACHE);
1092 		if (x86_feature & X86_PAT)
1093 			PTE_SET(pte, (level == 0) ? PT_PAT_4K : PT_PAT_LARGE);
1094 		else
1095 			PTE_SET(pte, PT_WRITETHRU);
1096 	} else {
1097 		panic("hati_mkpte(): bad caching attributes: %x\n", cache_attr);
1098 	}
1099 
1100 	return (pte);
1101 }
1102 
1103 /*
1104  * Duplicate address translations of the parent to the child.
1105  * This function really isn't used anymore.
1106  */
1107 /*ARGSUSED*/
1108 int
1109 hat_dup(hat_t *old, hat_t *new, caddr_t addr, size_t len, uint_t flag)
1110 {
1111 	ASSERT((uintptr_t)addr < kernelbase);
1112 	ASSERT(new != kas.a_hat);
1113 	ASSERT(old != kas.a_hat);
1114 	return (0);
1115 }
1116 
1117 /*
1118  * Allocate any hat resources required for a process being swapped in.
1119  */
1120 /*ARGSUSED*/
1121 void
1122 hat_swapin(hat_t *hat)
1123 {
1124 	/* do nothing - we let everything fault back in */
1125 }
1126 
1127 /*
1128  * Unload all translations associated with an address space of a process
1129  * that is being swapped out.
1130  */
1131 void
1132 hat_swapout(hat_t *hat)
1133 {
1134 	uintptr_t	vaddr = (uintptr_t)0;
1135 	uintptr_t	eaddr = _userlimit;
1136 	htable_t	*ht = NULL;
1137 	level_t		l;
1138 
1139 	XPV_DISALLOW_MIGRATE();
1140 	/*
1141 	 * We can't just call hat_unload(hat, 0, _userlimit...)  here, because
1142 	 * seg_spt and shared pagetables can't be swapped out.
1143 	 * Take a look at segspt_shmswapout() - it's a big no-op.
1144 	 *
1145 	 * Instead we'll walk through all the address space and unload
1146 	 * any mappings which we are sure are not shared, not locked.
1147 	 */
1148 	ASSERT(IS_PAGEALIGNED(vaddr));
1149 	ASSERT(IS_PAGEALIGNED(eaddr));
1150 	ASSERT(AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
1151 	if ((uintptr_t)hat->hat_as->a_userlimit < eaddr)
1152 		eaddr = (uintptr_t)hat->hat_as->a_userlimit;
1153 
1154 	while (vaddr < eaddr) {
1155 		(void) htable_walk(hat, &ht, &vaddr, eaddr);
1156 		if (ht == NULL)
1157 			break;
1158 
1159 		ASSERT(!IN_VA_HOLE(vaddr));
1160 
1161 		/*
1162 		 * If the page table is shared skip its entire range.
1163 		 */
1164 		l = ht->ht_level;
1165 		if (ht->ht_flags & HTABLE_SHARED_PFN) {
1166 			vaddr = ht->ht_vaddr + LEVEL_SIZE(l + 1);
1167 			htable_release(ht);
1168 			ht = NULL;
1169 			continue;
1170 		}
1171 
1172 		/*
1173 		 * If the page table has no locked entries, unload this one.
1174 		 */
1175 		if (ht->ht_lock_cnt == 0)
1176 			hat_unload(hat, (caddr_t)vaddr, LEVEL_SIZE(l),
1177 			    HAT_UNLOAD_UNMAP);
1178 
1179 		/*
1180 		 * If we have a level 0 page table with locked entries,
1181 		 * skip the entire page table, otherwise skip just one entry.
1182 		 */
1183 		if (ht->ht_lock_cnt > 0 && l == 0)
1184 			vaddr = ht->ht_vaddr + LEVEL_SIZE(1);
1185 		else
1186 			vaddr += LEVEL_SIZE(l);
1187 	}
1188 	if (ht)
1189 		htable_release(ht);
1190 
1191 	/*
1192 	 * We're in swapout because the system is low on memory, so
1193 	 * go back and flush all the htables off the cached list.
1194 	 */
1195 	htable_purge_hat(hat);
1196 	XPV_ALLOW_MIGRATE();
1197 }
1198 
1199 /*
1200  * returns number of bytes that have valid mappings in hat.
1201  */
1202 size_t
1203 hat_get_mapped_size(hat_t *hat)
1204 {
1205 	size_t total = 0;
1206 	int l;
1207 
1208 	for (l = 0; l <= mmu.max_page_level; l++)
1209 		total += (hat->hat_pages_mapped[l] << LEVEL_SHIFT(l));
1210 	total += hat->hat_ism_pgcnt;
1211 
1212 	return (total);
1213 }
1214 
1215 /*
1216  * enable/disable collection of stats for hat.
1217  */
1218 int
1219 hat_stats_enable(hat_t *hat)
1220 {
1221 	atomic_add_32(&hat->hat_stats, 1);
1222 	return (1);
1223 }
1224 
1225 void
1226 hat_stats_disable(hat_t *hat)
1227 {
1228 	atomic_add_32(&hat->hat_stats, -1);
1229 }
1230 
1231 /*
1232  * Utility to sync the ref/mod bits from a page table entry to the page_t
1233  * We must be holding the mapping list lock when this is called.
1234  */
1235 static void
1236 hati_sync_pte_to_page(page_t *pp, x86pte_t pte, level_t level)
1237 {
1238 	uint_t	rm = 0;
1239 	pgcnt_t	pgcnt;
1240 
1241 	if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC)
1242 		return;
1243 
1244 	if (PTE_GET(pte, PT_REF))
1245 		rm |= P_REF;
1246 
1247 	if (PTE_GET(pte, PT_MOD))
1248 		rm |= P_MOD;
1249 
1250 	if (rm == 0)
1251 		return;
1252 
1253 	/*
1254 	 * sync to all constituent pages of a large page
1255 	 */
1256 	ASSERT(x86_hm_held(pp));
1257 	pgcnt = page_get_pagecnt(level);
1258 	ASSERT(IS_P2ALIGNED(pp->p_pagenum, pgcnt));
1259 	for (; pgcnt > 0; --pgcnt) {
1260 		/*
1261 		 * hat_page_demote() can't decrease
1262 		 * pszc below this mapping size
1263 		 * since this large mapping existed after we
1264 		 * took mlist lock.
1265 		 */
1266 		ASSERT(pp->p_szc >= level);
1267 		hat_page_setattr(pp, rm);
1268 		++pp;
1269 	}
1270 }
1271 
1272 /*
1273  * This the set of PTE bits for PFN, permissions and caching
1274  * that are allowed to change on a HAT_LOAD_REMAP
1275  */
1276 #define	PT_REMAP_BITS							\
1277 	(PT_PADDR | PT_NX | PT_WRITABLE | PT_WRITETHRU |		\
1278 	PT_NOCACHE | PT_PAT_4K | PT_PAT_LARGE | PT_IGNORE | PT_REF | PT_MOD)
1279 
1280 #define	REMAPASSERT(EX)	if (!(EX)) panic("hati_pte_map: " #EX)
1281 /*
1282  * Do the low-level work to get a mapping entered into a HAT's pagetables
1283  * and in the mapping list of the associated page_t.
1284  */
1285 static int
1286 hati_pte_map(
1287 	htable_t	*ht,
1288 	uint_t		entry,
1289 	page_t		*pp,
1290 	x86pte_t	pte,
1291 	int		flags,
1292 	void		*pte_ptr)
1293 {
1294 	hat_t		*hat = ht->ht_hat;
1295 	x86pte_t	old_pte;
1296 	level_t		l = ht->ht_level;
1297 	hment_t		*hm;
1298 	uint_t		is_consist;
1299 	int		rv = 0;
1300 
1301 	/*
1302 	 * Is this a consistant (ie. need mapping list lock) mapping?
1303 	 */
1304 	is_consist = (pp != NULL && (flags & HAT_LOAD_NOCONSIST) == 0);
1305 
1306 	/*
1307 	 * Track locked mapping count in the htable.  Do this first,
1308 	 * as we track locking even if there already is a mapping present.
1309 	 */
1310 	if ((flags & HAT_LOAD_LOCK) != 0 && hat != kas.a_hat)
1311 		HTABLE_LOCK_INC(ht);
1312 
1313 	/*
1314 	 * Acquire the page's mapping list lock and get an hment to use.
1315 	 * Note that hment_prepare() might return NULL.
1316 	 */
1317 	if (is_consist) {
1318 		x86_hm_enter(pp);
1319 		hm = hment_prepare(ht, entry, pp);
1320 	}
1321 
1322 	/*
1323 	 * Set the new pte, retrieving the old one at the same time.
1324 	 */
1325 	old_pte = x86pte_set(ht, entry, pte, pte_ptr);
1326 
1327 	/*
1328 	 * did we get a large page / page table collision?
1329 	 */
1330 	if (old_pte == LPAGE_ERROR) {
1331 		rv = -1;
1332 		goto done;
1333 	}
1334 
1335 	/*
1336 	 * If the mapping didn't change there is nothing more to do.
1337 	 */
1338 	if (PTE_EQUIV(pte, old_pte))
1339 		goto done;
1340 
1341 	/*
1342 	 * Install a new mapping in the page's mapping list
1343 	 */
1344 	if (!PTE_ISVALID(old_pte)) {
1345 		if (is_consist) {
1346 			hment_assign(ht, entry, pp, hm);
1347 			x86_hm_exit(pp);
1348 		} else {
1349 			ASSERT(flags & HAT_LOAD_NOCONSIST);
1350 		}
1351 #if defined(__amd64)
1352 		if (ht->ht_flags & HTABLE_VLP) {
1353 			cpu_t *cpu = CPU;
1354 			x86pte_t *vlpptep = cpu->cpu_hat_info->hci_vlp_l2ptes;
1355 			VLP_COPY(hat->hat_vlp_ptes, vlpptep);
1356 		}
1357 #endif
1358 		HTABLE_INC(ht->ht_valid_cnt);
1359 		PGCNT_INC(hat, l);
1360 		return (rv);
1361 	}
1362 
1363 	/*
1364 	 * Remap's are more complicated:
1365 	 *  - HAT_LOAD_REMAP must be specified if changing the pfn.
1366 	 *    We also require that NOCONSIST be specified.
1367 	 *  - Otherwise only permission or caching bits may change.
1368 	 */
1369 	if (!PTE_ISPAGE(old_pte, l))
1370 		panic("non-null/page mapping pte=" FMT_PTE, old_pte);
1371 
1372 	if (PTE2PFN(old_pte, l) != PTE2PFN(pte, l)) {
1373 		REMAPASSERT(flags & HAT_LOAD_REMAP);
1374 		REMAPASSERT(flags & HAT_LOAD_NOCONSIST);
1375 		REMAPASSERT(PTE_GET(old_pte, PT_SOFTWARE) >= PT_NOCONSIST);
1376 		REMAPASSERT(pf_is_memory(PTE2PFN(old_pte, l)) ==
1377 		    pf_is_memory(PTE2PFN(pte, l)));
1378 		REMAPASSERT(!is_consist);
1379 	}
1380 
1381 	/*
1382 	 * We only let remaps change the certain bits in the PTE.
1383 	 */
1384 	if (PTE_GET(old_pte, ~PT_REMAP_BITS) != PTE_GET(pte, ~PT_REMAP_BITS))
1385 		panic("remap bits changed: old_pte="FMT_PTE", pte="FMT_PTE"\n",
1386 		    old_pte, pte);
1387 
1388 	/*
1389 	 * We don't create any mapping list entries on a remap, so release
1390 	 * any allocated hment after we drop the mapping list lock.
1391 	 */
1392 done:
1393 	if (is_consist) {
1394 		x86_hm_exit(pp);
1395 		if (hm != NULL)
1396 			hment_free(hm);
1397 	}
1398 	return (rv);
1399 }
1400 
1401 /*
1402  * Internal routine to load a single page table entry. This only fails if
1403  * we attempt to overwrite a page table link with a large page.
1404  */
1405 static int
1406 hati_load_common(
1407 	hat_t		*hat,
1408 	uintptr_t	va,
1409 	page_t		*pp,
1410 	uint_t		attr,
1411 	uint_t		flags,
1412 	level_t		level,
1413 	pfn_t		pfn)
1414 {
1415 	htable_t	*ht;
1416 	uint_t		entry;
1417 	x86pte_t	pte;
1418 	int		rv = 0;
1419 
1420 	/*
1421 	 * The number 16 is arbitrary and here to catch a recursion problem
1422 	 * early before we blow out the kernel stack.
1423 	 */
1424 	++curthread->t_hatdepth;
1425 	ASSERT(curthread->t_hatdepth < 16);
1426 
1427 	ASSERT(hat == kas.a_hat ||
1428 	    AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
1429 
1430 	if (flags & HAT_LOAD_SHARE)
1431 		hat->hat_flags |= HAT_SHARED;
1432 
1433 	/*
1434 	 * Find the page table that maps this page if it already exists.
1435 	 */
1436 	ht = htable_lookup(hat, va, level);
1437 
1438 	/*
1439 	 * We must have HAT_LOAD_NOCONSIST if page_t is NULL.
1440 	 */
1441 	if (pp == NULL)
1442 		flags |= HAT_LOAD_NOCONSIST;
1443 
1444 	if (ht == NULL) {
1445 		ht = htable_create(hat, va, level, NULL);
1446 		ASSERT(ht != NULL);
1447 	}
1448 	entry = htable_va2entry(va, ht);
1449 
1450 	/*
1451 	 * a bunch of paranoid error checking
1452 	 */
1453 	ASSERT(ht->ht_busy > 0);
1454 	if (ht->ht_vaddr > va || va > HTABLE_LAST_PAGE(ht))
1455 		panic("hati_load_common: bad htable %p, va %p",
1456 		    (void *)ht, (void *)va);
1457 	ASSERT(ht->ht_level == level);
1458 
1459 	/*
1460 	 * construct the new PTE
1461 	 */
1462 	if (hat == kas.a_hat)
1463 		attr &= ~PROT_USER;
1464 	pte = hati_mkpte(pfn, attr, level, flags);
1465 	if (hat == kas.a_hat && va >= kernelbase)
1466 		PTE_SET(pte, mmu.pt_global);
1467 
1468 	/*
1469 	 * establish the mapping
1470 	 */
1471 	rv = hati_pte_map(ht, entry, pp, pte, flags, NULL);
1472 
1473 	/*
1474 	 * release the htable and any reserves
1475 	 */
1476 	htable_release(ht);
1477 	--curthread->t_hatdepth;
1478 	return (rv);
1479 }
1480 
1481 /*
1482  * special case of hat_memload to deal with some kernel addrs for performance
1483  */
1484 static void
1485 hat_kmap_load(
1486 	caddr_t		addr,
1487 	page_t		*pp,
1488 	uint_t		attr,
1489 	uint_t		flags)
1490 {
1491 	uintptr_t	va = (uintptr_t)addr;
1492 	x86pte_t	pte;
1493 	pfn_t		pfn = page_pptonum(pp);
1494 	pgcnt_t		pg_off = mmu_btop(va - mmu.kmap_addr);
1495 	htable_t	*ht;
1496 	uint_t		entry;
1497 	void		*pte_ptr;
1498 
1499 	/*
1500 	 * construct the requested PTE
1501 	 */
1502 	attr &= ~PROT_USER;
1503 	attr |= HAT_STORECACHING_OK;
1504 	pte = hati_mkpte(pfn, attr, 0, flags);
1505 	PTE_SET(pte, mmu.pt_global);
1506 
1507 	/*
1508 	 * Figure out the pte_ptr and htable and use common code to finish up
1509 	 */
1510 	if (mmu.pae_hat)
1511 		pte_ptr = mmu.kmap_ptes + pg_off;
1512 	else
1513 		pte_ptr = (x86pte32_t *)mmu.kmap_ptes + pg_off;
1514 	ht = mmu.kmap_htables[(va - mmu.kmap_htables[0]->ht_vaddr) >>
1515 	    LEVEL_SHIFT(1)];
1516 	entry = htable_va2entry(va, ht);
1517 	++curthread->t_hatdepth;
1518 	ASSERT(curthread->t_hatdepth < 16);
1519 	(void) hati_pte_map(ht, entry, pp, pte, flags, pte_ptr);
1520 	--curthread->t_hatdepth;
1521 }
1522 
1523 /*
1524  * hat_memload() - load a translation to the given page struct
1525  *
1526  * Flags for hat_memload/hat_devload/hat_*attr.
1527  *
1528  * 	HAT_LOAD	Default flags to load a translation to the page.
1529  *
1530  * 	HAT_LOAD_LOCK	Lock down mapping resources; hat_map(), hat_memload(),
1531  *			and hat_devload().
1532  *
1533  *	HAT_LOAD_NOCONSIST Do not add mapping to page_t mapping list.
1534  *			sets PT_NOCONSIST
1535  *
1536  *	HAT_LOAD_SHARE	A flag to hat_memload() to indicate h/w page tables
1537  *			that map some user pages (not kas) is shared by more
1538  *			than one process (eg. ISM).
1539  *
1540  *	HAT_LOAD_REMAP	Reload a valid pte with a different page frame.
1541  *
1542  *	HAT_NO_KALLOC	Do not kmem_alloc while creating the mapping; at this
1543  *			point, it's setting up mapping to allocate internal
1544  *			hat layer data structures.  This flag forces hat layer
1545  *			to tap its reserves in order to prevent infinite
1546  *			recursion.
1547  *
1548  * The following is a protection attribute (like PROT_READ, etc.)
1549  *
1550  *	HAT_NOSYNC	set PT_NOSYNC - this mapping's ref/mod bits
1551  *			are never cleared.
1552  *
1553  * Installing new valid PTE's and creation of the mapping list
1554  * entry are controlled under the same lock. It's derived from the
1555  * page_t being mapped.
1556  */
1557 static uint_t supported_memload_flags =
1558 	HAT_LOAD | HAT_LOAD_LOCK | HAT_LOAD_ADV | HAT_LOAD_NOCONSIST |
1559 	HAT_LOAD_SHARE | HAT_NO_KALLOC | HAT_LOAD_REMAP | HAT_LOAD_TEXT;
1560 
1561 void
1562 hat_memload(
1563 	hat_t		*hat,
1564 	caddr_t		addr,
1565 	page_t		*pp,
1566 	uint_t		attr,
1567 	uint_t		flags)
1568 {
1569 	uintptr_t	va = (uintptr_t)addr;
1570 	level_t		level = 0;
1571 	pfn_t		pfn = page_pptonum(pp);
1572 
1573 	XPV_DISALLOW_MIGRATE();
1574 	ASSERT(IS_PAGEALIGNED(va));
1575 	ASSERT(hat == kas.a_hat || va < _userlimit);
1576 	ASSERT(hat == kas.a_hat ||
1577 	    AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
1578 	ASSERT((flags & supported_memload_flags) == flags);
1579 
1580 	ASSERT(!IN_VA_HOLE(va));
1581 	ASSERT(!PP_ISFREE(pp));
1582 
1583 	/*
1584 	 * kernel address special case for performance.
1585 	 */
1586 	if (mmu.kmap_addr <= va && va < mmu.kmap_eaddr) {
1587 		ASSERT(hat == kas.a_hat);
1588 		hat_kmap_load(addr, pp, attr, flags);
1589 		XPV_ALLOW_MIGRATE();
1590 		return;
1591 	}
1592 
1593 	/*
1594 	 * This is used for memory with normal caching enabled, so
1595 	 * always set HAT_STORECACHING_OK.
1596 	 */
1597 	attr |= HAT_STORECACHING_OK;
1598 	if (hati_load_common(hat, va, pp, attr, flags, level, pfn) != 0)
1599 		panic("unexpected hati_load_common() failure");
1600 	XPV_ALLOW_MIGRATE();
1601 }
1602 
1603 /* ARGSUSED */
1604 void
1605 hat_memload_region(struct hat *hat, caddr_t addr, struct page *pp,
1606     uint_t attr, uint_t flags, hat_region_cookie_t rcookie)
1607 {
1608 	hat_memload(hat, addr, pp, attr, flags);
1609 }
1610 
1611 /*
1612  * Load the given array of page structs using large pages when possible
1613  */
1614 void
1615 hat_memload_array(
1616 	hat_t		*hat,
1617 	caddr_t		addr,
1618 	size_t		len,
1619 	page_t		**pages,
1620 	uint_t		attr,
1621 	uint_t		flags)
1622 {
1623 	uintptr_t	va = (uintptr_t)addr;
1624 	uintptr_t	eaddr = va + len;
1625 	level_t		level;
1626 	size_t		pgsize;
1627 	pgcnt_t		pgindx = 0;
1628 	pfn_t		pfn;
1629 	pgcnt_t		i;
1630 
1631 	XPV_DISALLOW_MIGRATE();
1632 	ASSERT(IS_PAGEALIGNED(va));
1633 	ASSERT(hat == kas.a_hat || va + len <= _userlimit);
1634 	ASSERT(hat == kas.a_hat ||
1635 	    AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
1636 	ASSERT((flags & supported_memload_flags) == flags);
1637 
1638 	/*
1639 	 * memload is used for memory with full caching enabled, so
1640 	 * set HAT_STORECACHING_OK.
1641 	 */
1642 	attr |= HAT_STORECACHING_OK;
1643 
1644 	/*
1645 	 * handle all pages using largest possible pagesize
1646 	 */
1647 	while (va < eaddr) {
1648 		/*
1649 		 * decide what level mapping to use (ie. pagesize)
1650 		 */
1651 		pfn = page_pptonum(pages[pgindx]);
1652 		for (level = mmu.max_page_level; ; --level) {
1653 			pgsize = LEVEL_SIZE(level);
1654 			if (level == 0)
1655 				break;
1656 
1657 			if (!IS_P2ALIGNED(va, pgsize) ||
1658 			    (eaddr - va) < pgsize ||
1659 			    !IS_P2ALIGNED(pfn_to_pa(pfn), pgsize))
1660 				continue;
1661 
1662 			/*
1663 			 * To use a large mapping of this size, all the
1664 			 * pages we are passed must be sequential subpages
1665 			 * of the large page.
1666 			 * hat_page_demote() can't change p_szc because
1667 			 * all pages are locked.
1668 			 */
1669 			if (pages[pgindx]->p_szc >= level) {
1670 				for (i = 0; i < mmu_btop(pgsize); ++i) {
1671 					if (pfn + i !=
1672 					    page_pptonum(pages[pgindx + i]))
1673 						break;
1674 					ASSERT(pages[pgindx + i]->p_szc >=
1675 					    level);
1676 					ASSERT(pages[pgindx] + i ==
1677 					    pages[pgindx + i]);
1678 				}
1679 				if (i == mmu_btop(pgsize)) {
1680 #ifdef DEBUG
1681 					if (level == 2)
1682 						map1gcnt++;
1683 #endif
1684 					break;
1685 				}
1686 			}
1687 		}
1688 
1689 		/*
1690 		 * Load this page mapping. If the load fails, try a smaller
1691 		 * pagesize.
1692 		 */
1693 		ASSERT(!IN_VA_HOLE(va));
1694 		while (hati_load_common(hat, va, pages[pgindx], attr,
1695 		    flags, level, pfn) != 0) {
1696 			if (level == 0)
1697 				panic("unexpected hati_load_common() failure");
1698 			--level;
1699 			pgsize = LEVEL_SIZE(level);
1700 		}
1701 
1702 		/*
1703 		 * move to next page
1704 		 */
1705 		va += pgsize;
1706 		pgindx += mmu_btop(pgsize);
1707 	}
1708 	XPV_ALLOW_MIGRATE();
1709 }
1710 
1711 /* ARGSUSED */
1712 void
1713 hat_memload_array_region(struct hat *hat, caddr_t addr, size_t len,
1714     struct page **pps, uint_t attr, uint_t flags,
1715     hat_region_cookie_t rcookie)
1716 {
1717 	hat_memload_array(hat, addr, len, pps, attr, flags);
1718 }
1719 
1720 /*
1721  * void hat_devload(hat, addr, len, pf, attr, flags)
1722  *	load/lock the given page frame number
1723  *
1724  * Advisory ordering attributes. Apply only to device mappings.
1725  *
1726  * HAT_STRICTORDER: the CPU must issue the references in order, as the
1727  *	programmer specified.  This is the default.
1728  * HAT_UNORDERED_OK: the CPU may reorder the references (this is all kinds
1729  *	of reordering; store or load with store or load).
1730  * HAT_MERGING_OK: merging and batching: the CPU may merge individual stores
1731  *	to consecutive locations (for example, turn two consecutive byte
1732  *	stores into one halfword store), and it may batch individual loads
1733  *	(for example, turn two consecutive byte loads into one halfword load).
1734  *	This also implies re-ordering.
1735  * HAT_LOADCACHING_OK: the CPU may cache the data it fetches and reuse it
1736  *	until another store occurs.  The default is to fetch new data
1737  *	on every load.  This also implies merging.
1738  * HAT_STORECACHING_OK: the CPU may keep the data in the cache and push it to
1739  *	the device (perhaps with other data) at a later time.  The default is
1740  *	to push the data right away.  This also implies load caching.
1741  *
1742  * Equivalent of hat_memload(), but can be used for device memory where
1743  * there are no page_t's and we support additional flags (write merging, etc).
1744  * Note that we can have large page mappings with this interface.
1745  */
1746 int supported_devload_flags = HAT_LOAD | HAT_LOAD_LOCK |
1747 	HAT_LOAD_NOCONSIST | HAT_STRICTORDER | HAT_UNORDERED_OK |
1748 	HAT_MERGING_OK | HAT_LOADCACHING_OK | HAT_STORECACHING_OK;
1749 
1750 void
1751 hat_devload(
1752 	hat_t		*hat,
1753 	caddr_t		addr,
1754 	size_t		len,
1755 	pfn_t		pfn,
1756 	uint_t		attr,
1757 	int		flags)
1758 {
1759 	uintptr_t	va = ALIGN2PAGE(addr);
1760 	uintptr_t	eva = va + len;
1761 	level_t		level;
1762 	size_t		pgsize;
1763 	page_t		*pp;
1764 	int		f;	/* per PTE copy of flags  - maybe modified */
1765 	uint_t		a;	/* per PTE copy of attr */
1766 
1767 	XPV_DISALLOW_MIGRATE();
1768 	ASSERT(IS_PAGEALIGNED(va));
1769 	ASSERT(hat == kas.a_hat || eva <= _userlimit);
1770 	ASSERT(hat == kas.a_hat ||
1771 	    AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
1772 	ASSERT((flags & supported_devload_flags) == flags);
1773 
1774 	/*
1775 	 * handle all pages
1776 	 */
1777 	while (va < eva) {
1778 
1779 		/*
1780 		 * decide what level mapping to use (ie. pagesize)
1781 		 */
1782 		for (level = mmu.max_page_level; ; --level) {
1783 			pgsize = LEVEL_SIZE(level);
1784 			if (level == 0)
1785 				break;
1786 			if (IS_P2ALIGNED(va, pgsize) &&
1787 			    (eva - va) >= pgsize &&
1788 			    IS_P2ALIGNED(pfn, mmu_btop(pgsize))) {
1789 #ifdef DEBUG
1790 				if (level == 2)
1791 					map1gcnt++;
1792 #endif
1793 				break;
1794 			}
1795 		}
1796 
1797 		/*
1798 		 * If this is just memory then allow caching (this happens
1799 		 * for the nucleus pages) - though HAT_PLAT_NOCACHE can be used
1800 		 * to override that. If we don't have a page_t then make sure
1801 		 * NOCONSIST is set.
1802 		 */
1803 		a = attr;
1804 		f = flags;
1805 		if (!pf_is_memory(pfn))
1806 			f |= HAT_LOAD_NOCONSIST;
1807 		else if (!(a & HAT_PLAT_NOCACHE))
1808 			a |= HAT_STORECACHING_OK;
1809 
1810 		if (f & HAT_LOAD_NOCONSIST)
1811 			pp = NULL;
1812 		else
1813 			pp = page_numtopp_nolock(pfn);
1814 
1815 		/*
1816 		 * load this page mapping
1817 		 */
1818 		ASSERT(!IN_VA_HOLE(va));
1819 		while (hati_load_common(hat, va, pp, a, f, level, pfn) != 0) {
1820 			if (level == 0)
1821 				panic("unexpected hati_load_common() failure");
1822 			--level;
1823 			pgsize = LEVEL_SIZE(level);
1824 		}
1825 
1826 		/*
1827 		 * move to next page
1828 		 */
1829 		va += pgsize;
1830 		pfn += mmu_btop(pgsize);
1831 	}
1832 	XPV_ALLOW_MIGRATE();
1833 }
1834 
1835 /*
1836  * void hat_unlock(hat, addr, len)
1837  *	unlock the mappings to a given range of addresses
1838  *
1839  * Locks are tracked by ht_lock_cnt in the htable.
1840  */
1841 void
1842 hat_unlock(hat_t *hat, caddr_t addr, size_t len)
1843 {
1844 	uintptr_t	vaddr = (uintptr_t)addr;
1845 	uintptr_t	eaddr = vaddr + len;
1846 	htable_t	*ht = NULL;
1847 
1848 	/*
1849 	 * kernel entries are always locked, we don't track lock counts
1850 	 */
1851 	ASSERT(hat == kas.a_hat || eaddr <= _userlimit);
1852 	ASSERT(IS_PAGEALIGNED(vaddr));
1853 	ASSERT(IS_PAGEALIGNED(eaddr));
1854 	if (hat == kas.a_hat)
1855 		return;
1856 	if (eaddr > _userlimit)
1857 		panic("hat_unlock() address out of range - above _userlimit");
1858 
1859 	XPV_DISALLOW_MIGRATE();
1860 	ASSERT(AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
1861 	while (vaddr < eaddr) {
1862 		(void) htable_walk(hat, &ht, &vaddr, eaddr);
1863 		if (ht == NULL)
1864 			break;
1865 
1866 		ASSERT(!IN_VA_HOLE(vaddr));
1867 
1868 		if (ht->ht_lock_cnt < 1)
1869 			panic("hat_unlock(): lock_cnt < 1, "
1870 			    "htable=%p, vaddr=%p\n", (void *)ht, (void *)vaddr);
1871 		HTABLE_LOCK_DEC(ht);
1872 
1873 		vaddr += LEVEL_SIZE(ht->ht_level);
1874 	}
1875 	if (ht)
1876 		htable_release(ht);
1877 	XPV_ALLOW_MIGRATE();
1878 }
1879 
1880 /* ARGSUSED */
1881 void
1882 hat_unlock_region(struct hat *hat, caddr_t addr, size_t len,
1883     hat_region_cookie_t rcookie)
1884 {
1885 	panic("No shared region support on x86");
1886 }
1887 
1888 #if !defined(__xpv)
1889 /*
1890  * Cross call service routine to demap a virtual page on
1891  * the current CPU or flush all mappings in TLB.
1892  */
1893 /*ARGSUSED*/
1894 static int
1895 hati_demap_func(xc_arg_t a1, xc_arg_t a2, xc_arg_t a3)
1896 {
1897 	hat_t	*hat = (hat_t *)a1;
1898 	caddr_t	addr = (caddr_t)a2;
1899 
1900 	/*
1901 	 * If the target hat isn't the kernel and this CPU isn't operating
1902 	 * in the target hat, we can ignore the cross call.
1903 	 */
1904 	if (hat != kas.a_hat && hat != CPU->cpu_current_hat)
1905 		return (0);
1906 
1907 	/*
1908 	 * For a normal address, we just flush one page mapping
1909 	 */
1910 	if ((uintptr_t)addr != DEMAP_ALL_ADDR) {
1911 		mmu_tlbflush_entry(addr);
1912 		return (0);
1913 	}
1914 
1915 	/*
1916 	 * Otherwise we reload cr3 to effect a complete TLB flush.
1917 	 *
1918 	 * A reload of cr3 on a VLP process also means we must also recopy in
1919 	 * the pte values from the struct hat
1920 	 */
1921 	if (hat->hat_flags & HAT_VLP) {
1922 #if defined(__amd64)
1923 		x86pte_t *vlpptep = CPU->cpu_hat_info->hci_vlp_l2ptes;
1924 
1925 		VLP_COPY(hat->hat_vlp_ptes, vlpptep);
1926 #elif defined(__i386)
1927 		reload_pae32(hat, CPU);
1928 #endif
1929 	}
1930 	reload_cr3();
1931 	return (0);
1932 }
1933 
1934 /*
1935  * Flush all TLB entries, including global (ie. kernel) ones.
1936  */
1937 static void
1938 flush_all_tlb_entries(void)
1939 {
1940 	ulong_t cr4 = getcr4();
1941 
1942 	if (cr4 & CR4_PGE) {
1943 		setcr4(cr4 & ~(ulong_t)CR4_PGE);
1944 		setcr4(cr4);
1945 
1946 		/*
1947 		 * 32 bit PAE also needs to always reload_cr3()
1948 		 */
1949 		if (mmu.max_level == 2)
1950 			reload_cr3();
1951 	} else {
1952 		reload_cr3();
1953 	}
1954 }
1955 
1956 #define	TLB_CPU_HALTED	(01ul)
1957 #define	TLB_INVAL_ALL	(02ul)
1958 #define	CAS_TLB_INFO(cpu, old, new)	\
1959 	caslong((ulong_t *)&(cpu)->cpu_m.mcpu_tlb_info, (old), (new))
1960 
1961 /*
1962  * Record that a CPU is going idle
1963  */
1964 void
1965 tlb_going_idle(void)
1966 {
1967 	atomic_or_long((ulong_t *)&CPU->cpu_m.mcpu_tlb_info, TLB_CPU_HALTED);
1968 }
1969 
1970 /*
1971  * Service a delayed TLB flush if coming out of being idle.
1972  */
1973 void
1974 tlb_service(void)
1975 {
1976 	ulong_t flags = getflags();
1977 	ulong_t tlb_info;
1978 	ulong_t found;
1979 
1980 	/*
1981 	 * Be sure interrupts are off while doing this so that
1982 	 * higher level interrupts correctly wait for flushes to finish.
1983 	 */
1984 	if (flags & PS_IE)
1985 		flags = intr_clear();
1986 
1987 	/*
1988 	 * We only have to do something if coming out of being idle.
1989 	 */
1990 	tlb_info = CPU->cpu_m.mcpu_tlb_info;
1991 	if (tlb_info & TLB_CPU_HALTED) {
1992 		ASSERT(CPU->cpu_current_hat == kas.a_hat);
1993 
1994 		/*
1995 		 * Atomic clear and fetch of old state.
1996 		 */
1997 		while ((found = CAS_TLB_INFO(CPU, tlb_info, 0)) != tlb_info) {
1998 			ASSERT(found & TLB_CPU_HALTED);
1999 			tlb_info = found;
2000 			SMT_PAUSE();
2001 		}
2002 		if (tlb_info & TLB_INVAL_ALL)
2003 			flush_all_tlb_entries();
2004 	}
2005 
2006 	/*
2007 	 * Restore interrupt enable control bit.
2008 	 */
2009 	if (flags & PS_IE)
2010 		sti();
2011 }
2012 #endif /* !__xpv */
2013 
2014 /*
2015  * Internal routine to do cross calls to invalidate a range of pages on
2016  * all CPUs using a given hat.
2017  */
2018 void
2019 hat_tlb_inval(hat_t *hat, uintptr_t va)
2020 {
2021 	extern int	flushes_require_xcalls;	/* from mp_startup.c */
2022 	cpuset_t	justme;
2023 	cpuset_t	cpus_to_shootdown;
2024 #ifndef __xpv
2025 	cpuset_t	check_cpus;
2026 	cpu_t		*cpup;
2027 	int		c;
2028 #endif
2029 
2030 	/*
2031 	 * If the hat is being destroyed, there are no more users, so
2032 	 * demap need not do anything.
2033 	 */
2034 	if (hat->hat_flags & HAT_FREEING)
2035 		return;
2036 
2037 	/*
2038 	 * If demapping from a shared pagetable, we best demap the
2039 	 * entire set of user TLBs, since we don't know what addresses
2040 	 * these were shared at.
2041 	 */
2042 	if (hat->hat_flags & HAT_SHARED) {
2043 		hat = kas.a_hat;
2044 		va = DEMAP_ALL_ADDR;
2045 	}
2046 
2047 	/*
2048 	 * if not running with multiple CPUs, don't use cross calls
2049 	 */
2050 	if (panicstr || !flushes_require_xcalls) {
2051 #ifdef __xpv
2052 		if (va == DEMAP_ALL_ADDR)
2053 			xen_flush_tlb();
2054 		else
2055 			xen_flush_va((caddr_t)va);
2056 #else
2057 		(void) hati_demap_func((xc_arg_t)hat, (xc_arg_t)va, NULL);
2058 #endif
2059 		return;
2060 	}
2061 
2062 
2063 	/*
2064 	 * Determine CPUs to shootdown. Kernel changes always do all CPUs.
2065 	 * Otherwise it's just CPUs currently executing in this hat.
2066 	 */
2067 	kpreempt_disable();
2068 	CPUSET_ONLY(justme, CPU->cpu_id);
2069 	if (hat == kas.a_hat)
2070 		cpus_to_shootdown = khat_cpuset;
2071 	else
2072 		cpus_to_shootdown = hat->hat_cpus;
2073 
2074 #ifndef __xpv
2075 	/*
2076 	 * If any CPUs in the set are idle, just request a delayed flush
2077 	 * and avoid waking them up.
2078 	 */
2079 	check_cpus = cpus_to_shootdown;
2080 	for (c = 0; c < NCPU && !CPUSET_ISNULL(check_cpus); ++c) {
2081 		ulong_t tlb_info;
2082 
2083 		if (!CPU_IN_SET(check_cpus, c))
2084 			continue;
2085 		CPUSET_DEL(check_cpus, c);
2086 		cpup = cpu[c];
2087 		if (cpup == NULL)
2088 			continue;
2089 
2090 		tlb_info = cpup->cpu_m.mcpu_tlb_info;
2091 		while (tlb_info == TLB_CPU_HALTED) {
2092 			(void) CAS_TLB_INFO(cpup, TLB_CPU_HALTED,
2093 			    TLB_CPU_HALTED | TLB_INVAL_ALL);
2094 			SMT_PAUSE();
2095 			tlb_info = cpup->cpu_m.mcpu_tlb_info;
2096 		}
2097 		if (tlb_info == (TLB_CPU_HALTED | TLB_INVAL_ALL)) {
2098 			HATSTAT_INC(hs_tlb_inval_delayed);
2099 			CPUSET_DEL(cpus_to_shootdown, c);
2100 		}
2101 	}
2102 #endif
2103 
2104 	if (CPUSET_ISNULL(cpus_to_shootdown) ||
2105 	    CPUSET_ISEQUAL(cpus_to_shootdown, justme)) {
2106 
2107 #ifdef __xpv
2108 		if (va == DEMAP_ALL_ADDR)
2109 			xen_flush_tlb();
2110 		else
2111 			xen_flush_va((caddr_t)va);
2112 #else
2113 		(void) hati_demap_func((xc_arg_t)hat, (xc_arg_t)va, NULL);
2114 #endif
2115 
2116 	} else {
2117 
2118 		CPUSET_ADD(cpus_to_shootdown, CPU->cpu_id);
2119 #ifdef __xpv
2120 		if (va == DEMAP_ALL_ADDR)
2121 			xen_gflush_tlb(cpus_to_shootdown);
2122 		else
2123 			xen_gflush_va((caddr_t)va, cpus_to_shootdown);
2124 #else
2125 		xc_call((xc_arg_t)hat, (xc_arg_t)va, NULL, X_CALL_HIPRI,
2126 		    cpus_to_shootdown, hati_demap_func);
2127 #endif
2128 
2129 	}
2130 	kpreempt_enable();
2131 }
2132 
2133 /*
2134  * Interior routine for HAT_UNLOADs from hat_unload_callback(),
2135  * hat_kmap_unload() OR from hat_steal() code.  This routine doesn't
2136  * handle releasing of the htables.
2137  */
2138 void
2139 hat_pte_unmap(
2140 	htable_t	*ht,
2141 	uint_t		entry,
2142 	uint_t		flags,
2143 	x86pte_t	old_pte,
2144 	void		*pte_ptr)
2145 {
2146 	hat_t		*hat = ht->ht_hat;
2147 	hment_t		*hm = NULL;
2148 	page_t		*pp = NULL;
2149 	level_t		l = ht->ht_level;
2150 	pfn_t		pfn;
2151 
2152 	/*
2153 	 * We always track the locking counts, even if nothing is unmapped
2154 	 */
2155 	if ((flags & HAT_UNLOAD_UNLOCK) != 0 && hat != kas.a_hat) {
2156 		ASSERT(ht->ht_lock_cnt > 0);
2157 		HTABLE_LOCK_DEC(ht);
2158 	}
2159 
2160 	/*
2161 	 * Figure out which page's mapping list lock to acquire using the PFN
2162 	 * passed in "old" PTE. We then attempt to invalidate the PTE.
2163 	 * If another thread, probably a hat_pageunload, has asynchronously
2164 	 * unmapped/remapped this address we'll loop here.
2165 	 */
2166 	ASSERT(ht->ht_busy > 0);
2167 	while (PTE_ISVALID(old_pte)) {
2168 		pfn = PTE2PFN(old_pte, l);
2169 		if (PTE_GET(old_pte, PT_SOFTWARE) >= PT_NOCONSIST) {
2170 			pp = NULL;
2171 		} else {
2172 #ifdef __xpv
2173 			if (pfn == PFN_INVALID)
2174 				panic("Invalid PFN, but not PT_NOCONSIST");
2175 #endif
2176 			pp = page_numtopp_nolock(pfn);
2177 			if (pp == NULL) {
2178 				panic("no page_t, not NOCONSIST: old_pte="
2179 				    FMT_PTE " ht=%lx entry=0x%x pte_ptr=%lx",
2180 				    old_pte, (uintptr_t)ht, entry,
2181 				    (uintptr_t)pte_ptr);
2182 			}
2183 			x86_hm_enter(pp);
2184 		}
2185 
2186 		/*
2187 		 * If freeing the address space, check that the PTE
2188 		 * hasn't changed, as the mappings are no longer in use by
2189 		 * any thread, invalidation is unnecessary.
2190 		 * If not freeing, do a full invalidate.
2191 		 *
2192 		 * On the hypervisor we must always remove mappings, as a
2193 		 * writable mapping left behind could cause a page table
2194 		 * allocation to fail.
2195 		 */
2196 #if !defined(__xpv)
2197 		if (hat->hat_flags & HAT_FREEING)
2198 			old_pte = x86pte_get(ht, entry);
2199 		else
2200 #endif
2201 			old_pte = x86pte_inval(ht, entry, old_pte, pte_ptr);
2202 
2203 		/*
2204 		 * If the page hadn't changed we've unmapped it and can proceed
2205 		 */
2206 		if (PTE_ISVALID(old_pte) && PTE2PFN(old_pte, l) == pfn)
2207 			break;
2208 
2209 		/*
2210 		 * Otherwise, we'll have to retry with the current old_pte.
2211 		 * Drop the hment lock, since the pfn may have changed.
2212 		 */
2213 		if (pp != NULL) {
2214 			x86_hm_exit(pp);
2215 			pp = NULL;
2216 		} else {
2217 			ASSERT(PTE_GET(old_pte, PT_SOFTWARE) >= PT_NOCONSIST);
2218 		}
2219 	}
2220 
2221 	/*
2222 	 * If the old mapping wasn't valid, there's nothing more to do
2223 	 */
2224 	if (!PTE_ISVALID(old_pte)) {
2225 		if (pp != NULL)
2226 			x86_hm_exit(pp);
2227 		return;
2228 	}
2229 
2230 	/*
2231 	 * Take care of syncing any MOD/REF bits and removing the hment.
2232 	 */
2233 	if (pp != NULL) {
2234 		if (!(flags & HAT_UNLOAD_NOSYNC))
2235 			hati_sync_pte_to_page(pp, old_pte, l);
2236 		hm = hment_remove(pp, ht, entry);
2237 		x86_hm_exit(pp);
2238 		if (hm != NULL)
2239 			hment_free(hm);
2240 	}
2241 
2242 	/*
2243 	 * Handle book keeping in the htable and hat
2244 	 */
2245 	ASSERT(ht->ht_valid_cnt > 0);
2246 	HTABLE_DEC(ht->ht_valid_cnt);
2247 	PGCNT_DEC(hat, l);
2248 }
2249 
2250 /*
2251  * very cheap unload implementation to special case some kernel addresses
2252  */
2253 static void
2254 hat_kmap_unload(caddr_t addr, size_t len, uint_t flags)
2255 {
2256 	uintptr_t	va = (uintptr_t)addr;
2257 	uintptr_t	eva = va + len;
2258 	pgcnt_t		pg_index;
2259 	htable_t	*ht;
2260 	uint_t		entry;
2261 	x86pte_t	*pte_ptr;
2262 	x86pte_t	old_pte;
2263 
2264 	for (; va < eva; va += MMU_PAGESIZE) {
2265 		/*
2266 		 * Get the PTE
2267 		 */
2268 		pg_index = mmu_btop(va - mmu.kmap_addr);
2269 		pte_ptr = PT_INDEX_PTR(mmu.kmap_ptes, pg_index);
2270 		old_pte = GET_PTE(pte_ptr);
2271 
2272 		/*
2273 		 * get the htable / entry
2274 		 */
2275 		ht = mmu.kmap_htables[(va - mmu.kmap_htables[0]->ht_vaddr)
2276 		    >> LEVEL_SHIFT(1)];
2277 		entry = htable_va2entry(va, ht);
2278 
2279 		/*
2280 		 * use mostly common code to unmap it.
2281 		 */
2282 		hat_pte_unmap(ht, entry, flags, old_pte, pte_ptr);
2283 	}
2284 }
2285 
2286 
2287 /*
2288  * unload a range of virtual address space (no callback)
2289  */
2290 void
2291 hat_unload(hat_t *hat, caddr_t addr, size_t len, uint_t flags)
2292 {
2293 	uintptr_t va = (uintptr_t)addr;
2294 
2295 	XPV_DISALLOW_MIGRATE();
2296 	ASSERT(hat == kas.a_hat || va + len <= _userlimit);
2297 
2298 	/*
2299 	 * special case for performance.
2300 	 */
2301 	if (mmu.kmap_addr <= va && va < mmu.kmap_eaddr) {
2302 		ASSERT(hat == kas.a_hat);
2303 		hat_kmap_unload(addr, len, flags);
2304 	} else {
2305 		hat_unload_callback(hat, addr, len, flags, NULL);
2306 	}
2307 	XPV_ALLOW_MIGRATE();
2308 }
2309 
2310 /*
2311  * Do the callbacks for ranges being unloaded.
2312  */
2313 typedef struct range_info {
2314 	uintptr_t	rng_va;
2315 	ulong_t		rng_cnt;
2316 	level_t		rng_level;
2317 } range_info_t;
2318 
2319 static void
2320 handle_ranges(hat_callback_t *cb, uint_t cnt, range_info_t *range)
2321 {
2322 	/*
2323 	 * do callbacks to upper level VM system
2324 	 */
2325 	while (cb != NULL && cnt > 0) {
2326 		--cnt;
2327 		cb->hcb_start_addr = (caddr_t)range[cnt].rng_va;
2328 		cb->hcb_end_addr = cb->hcb_start_addr;
2329 		cb->hcb_end_addr +=
2330 		    range[cnt].rng_cnt << LEVEL_SIZE(range[cnt].rng_level);
2331 		cb->hcb_function(cb);
2332 	}
2333 }
2334 
2335 /*
2336  * Unload a given range of addresses (has optional callback)
2337  *
2338  * Flags:
2339  * define	HAT_UNLOAD		0x00
2340  * define	HAT_UNLOAD_NOSYNC	0x02
2341  * define	HAT_UNLOAD_UNLOCK	0x04
2342  * define	HAT_UNLOAD_OTHER	0x08 - not used
2343  * define	HAT_UNLOAD_UNMAP	0x10 - same as HAT_UNLOAD
2344  */
2345 #define	MAX_UNLOAD_CNT (8)
2346 void
2347 hat_unload_callback(
2348 	hat_t		*hat,
2349 	caddr_t		addr,
2350 	size_t		len,
2351 	uint_t		flags,
2352 	hat_callback_t	*cb)
2353 {
2354 	uintptr_t	vaddr = (uintptr_t)addr;
2355 	uintptr_t	eaddr = vaddr + len;
2356 	htable_t	*ht = NULL;
2357 	uint_t		entry;
2358 	uintptr_t	contig_va = (uintptr_t)-1L;
2359 	range_info_t	r[MAX_UNLOAD_CNT];
2360 	uint_t		r_cnt = 0;
2361 	x86pte_t	old_pte;
2362 
2363 	XPV_DISALLOW_MIGRATE();
2364 	ASSERT(hat == kas.a_hat || eaddr <= _userlimit);
2365 	ASSERT(IS_PAGEALIGNED(vaddr));
2366 	ASSERT(IS_PAGEALIGNED(eaddr));
2367 
2368 	/*
2369 	 * Special case a single page being unloaded for speed. This happens
2370 	 * quite frequently, COW faults after a fork() for example.
2371 	 */
2372 	if (cb == NULL && len == MMU_PAGESIZE) {
2373 		ht = htable_getpte(hat, vaddr, &entry, &old_pte, 0);
2374 		if (ht != NULL) {
2375 			if (PTE_ISVALID(old_pte))
2376 				hat_pte_unmap(ht, entry, flags, old_pte, NULL);
2377 			htable_release(ht);
2378 		}
2379 		XPV_ALLOW_MIGRATE();
2380 		return;
2381 	}
2382 
2383 	while (vaddr < eaddr) {
2384 		old_pte = htable_walk(hat, &ht, &vaddr, eaddr);
2385 		if (ht == NULL)
2386 			break;
2387 
2388 		ASSERT(!IN_VA_HOLE(vaddr));
2389 
2390 		if (vaddr < (uintptr_t)addr)
2391 			panic("hat_unload_callback(): unmap inside large page");
2392 
2393 		/*
2394 		 * We'll do the call backs for contiguous ranges
2395 		 */
2396 		if (vaddr != contig_va ||
2397 		    (r_cnt > 0 && r[r_cnt - 1].rng_level != ht->ht_level)) {
2398 			if (r_cnt == MAX_UNLOAD_CNT) {
2399 				handle_ranges(cb, r_cnt, r);
2400 				r_cnt = 0;
2401 			}
2402 			r[r_cnt].rng_va = vaddr;
2403 			r[r_cnt].rng_cnt = 0;
2404 			r[r_cnt].rng_level = ht->ht_level;
2405 			++r_cnt;
2406 		}
2407 
2408 		/*
2409 		 * Unload one mapping from the page tables.
2410 		 */
2411 		entry = htable_va2entry(vaddr, ht);
2412 		hat_pte_unmap(ht, entry, flags, old_pte, NULL);
2413 		ASSERT(ht->ht_level <= mmu.max_page_level);
2414 		vaddr += LEVEL_SIZE(ht->ht_level);
2415 		contig_va = vaddr;
2416 		++r[r_cnt - 1].rng_cnt;
2417 	}
2418 	if (ht)
2419 		htable_release(ht);
2420 
2421 	/*
2422 	 * handle last range for callbacks
2423 	 */
2424 	if (r_cnt > 0)
2425 		handle_ranges(cb, r_cnt, r);
2426 	XPV_ALLOW_MIGRATE();
2427 }
2428 
2429 /*
2430  * synchronize mapping with software data structures
2431  *
2432  * This interface is currently only used by the working set monitor
2433  * driver.
2434  */
2435 /*ARGSUSED*/
2436 void
2437 hat_sync(hat_t *hat, caddr_t addr, size_t len, uint_t flags)
2438 {
2439 	uintptr_t	vaddr = (uintptr_t)addr;
2440 	uintptr_t	eaddr = vaddr + len;
2441 	htable_t	*ht = NULL;
2442 	uint_t		entry;
2443 	x86pte_t	pte;
2444 	x86pte_t	save_pte;
2445 	x86pte_t	new;
2446 	page_t		*pp;
2447 
2448 	ASSERT(!IN_VA_HOLE(vaddr));
2449 	ASSERT(IS_PAGEALIGNED(vaddr));
2450 	ASSERT(IS_PAGEALIGNED(eaddr));
2451 	ASSERT(hat == kas.a_hat || eaddr <= _userlimit);
2452 
2453 	XPV_DISALLOW_MIGRATE();
2454 	for (; vaddr < eaddr; vaddr += LEVEL_SIZE(ht->ht_level)) {
2455 try_again:
2456 		pte = htable_walk(hat, &ht, &vaddr, eaddr);
2457 		if (ht == NULL)
2458 			break;
2459 		entry = htable_va2entry(vaddr, ht);
2460 
2461 		if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC ||
2462 		    PTE_GET(pte, PT_REF | PT_MOD) == 0)
2463 			continue;
2464 
2465 		/*
2466 		 * We need to acquire the mapping list lock to protect
2467 		 * against hat_pageunload(), hat_unload(), etc.
2468 		 */
2469 		pp = page_numtopp_nolock(PTE2PFN(pte, ht->ht_level));
2470 		if (pp == NULL)
2471 			break;
2472 		x86_hm_enter(pp);
2473 		save_pte = pte;
2474 		pte = x86pte_get(ht, entry);
2475 		if (pte != save_pte) {
2476 			x86_hm_exit(pp);
2477 			goto try_again;
2478 		}
2479 		if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC ||
2480 		    PTE_GET(pte, PT_REF | PT_MOD) == 0) {
2481 			x86_hm_exit(pp);
2482 			continue;
2483 		}
2484 
2485 		/*
2486 		 * Need to clear ref or mod bits. We may compete with
2487 		 * hardware updating the R/M bits and have to try again.
2488 		 */
2489 		if (flags == HAT_SYNC_ZERORM) {
2490 			new = pte;
2491 			PTE_CLR(new, PT_REF | PT_MOD);
2492 			pte = hati_update_pte(ht, entry, pte, new);
2493 			if (pte != 0) {
2494 				x86_hm_exit(pp);
2495 				goto try_again;
2496 			}
2497 		} else {
2498 			/*
2499 			 * sync the PTE to the page_t
2500 			 */
2501 			hati_sync_pte_to_page(pp, save_pte, ht->ht_level);
2502 		}
2503 		x86_hm_exit(pp);
2504 	}
2505 	if (ht)
2506 		htable_release(ht);
2507 	XPV_ALLOW_MIGRATE();
2508 }
2509 
2510 /*
2511  * void	hat_map(hat, addr, len, flags)
2512  */
2513 /*ARGSUSED*/
2514 void
2515 hat_map(hat_t *hat, caddr_t addr, size_t len, uint_t flags)
2516 {
2517 	/* does nothing */
2518 }
2519 
2520 /*
2521  * uint_t hat_getattr(hat, addr, *attr)
2522  *	returns attr for <hat,addr> in *attr.  returns 0 if there was a
2523  *	mapping and *attr is valid, nonzero if there was no mapping and
2524  *	*attr is not valid.
2525  */
2526 uint_t
2527 hat_getattr(hat_t *hat, caddr_t addr, uint_t *attr)
2528 {
2529 	uintptr_t	vaddr = ALIGN2PAGE(addr);
2530 	htable_t	*ht = NULL;
2531 	x86pte_t	pte;
2532 
2533 	ASSERT(hat == kas.a_hat || vaddr <= _userlimit);
2534 
2535 	if (IN_VA_HOLE(vaddr))
2536 		return ((uint_t)-1);
2537 
2538 	ht = htable_getpte(hat, vaddr, NULL, &pte, mmu.max_page_level);
2539 	if (ht == NULL)
2540 		return ((uint_t)-1);
2541 
2542 	if (!PTE_ISVALID(pte) || !PTE_ISPAGE(pte, ht->ht_level)) {
2543 		htable_release(ht);
2544 		return ((uint_t)-1);
2545 	}
2546 
2547 	*attr = PROT_READ;
2548 	if (PTE_GET(pte, PT_WRITABLE))
2549 		*attr |= PROT_WRITE;
2550 	if (PTE_GET(pte, PT_USER))
2551 		*attr |= PROT_USER;
2552 	if (!PTE_GET(pte, mmu.pt_nx))
2553 		*attr |= PROT_EXEC;
2554 	if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC)
2555 		*attr |= HAT_NOSYNC;
2556 	htable_release(ht);
2557 	return (0);
2558 }
2559 
2560 /*
2561  * hat_updateattr() applies the given attribute change to an existing mapping
2562  */
2563 #define	HAT_LOAD_ATTR		1
2564 #define	HAT_SET_ATTR		2
2565 #define	HAT_CLR_ATTR		3
2566 
2567 static void
2568 hat_updateattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr, int what)
2569 {
2570 	uintptr_t	vaddr = (uintptr_t)addr;
2571 	uintptr_t	eaddr = (uintptr_t)addr + len;
2572 	htable_t	*ht = NULL;
2573 	uint_t		entry;
2574 	x86pte_t	oldpte, newpte;
2575 	page_t		*pp;
2576 
2577 	XPV_DISALLOW_MIGRATE();
2578 	ASSERT(IS_PAGEALIGNED(vaddr));
2579 	ASSERT(IS_PAGEALIGNED(eaddr));
2580 	ASSERT(hat == kas.a_hat ||
2581 	    AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
2582 	for (; vaddr < eaddr; vaddr += LEVEL_SIZE(ht->ht_level)) {
2583 try_again:
2584 		oldpte = htable_walk(hat, &ht, &vaddr, eaddr);
2585 		if (ht == NULL)
2586 			break;
2587 		if (PTE_GET(oldpte, PT_SOFTWARE) >= PT_NOCONSIST)
2588 			continue;
2589 
2590 		pp = page_numtopp_nolock(PTE2PFN(oldpte, ht->ht_level));
2591 		if (pp == NULL)
2592 			continue;
2593 		x86_hm_enter(pp);
2594 
2595 		newpte = oldpte;
2596 		/*
2597 		 * We found a page table entry in the desired range,
2598 		 * figure out the new attributes.
2599 		 */
2600 		if (what == HAT_SET_ATTR || what == HAT_LOAD_ATTR) {
2601 			if ((attr & PROT_WRITE) &&
2602 			    !PTE_GET(oldpte, PT_WRITABLE))
2603 				newpte |= PT_WRITABLE;
2604 
2605 			if ((attr & HAT_NOSYNC) &&
2606 			    PTE_GET(oldpte, PT_SOFTWARE) < PT_NOSYNC)
2607 				newpte |= PT_NOSYNC;
2608 
2609 			if ((attr & PROT_EXEC) && PTE_GET(oldpte, mmu.pt_nx))
2610 				newpte &= ~mmu.pt_nx;
2611 		}
2612 
2613 		if (what == HAT_LOAD_ATTR) {
2614 			if (!(attr & PROT_WRITE) &&
2615 			    PTE_GET(oldpte, PT_WRITABLE))
2616 				newpte &= ~PT_WRITABLE;
2617 
2618 			if (!(attr & HAT_NOSYNC) &&
2619 			    PTE_GET(oldpte, PT_SOFTWARE) >= PT_NOSYNC)
2620 				newpte &= ~PT_SOFTWARE;
2621 
2622 			if (!(attr & PROT_EXEC) && !PTE_GET(oldpte, mmu.pt_nx))
2623 				newpte |= mmu.pt_nx;
2624 		}
2625 
2626 		if (what == HAT_CLR_ATTR) {
2627 			if ((attr & PROT_WRITE) && PTE_GET(oldpte, PT_WRITABLE))
2628 				newpte &= ~PT_WRITABLE;
2629 
2630 			if ((attr & HAT_NOSYNC) &&
2631 			    PTE_GET(oldpte, PT_SOFTWARE) >= PT_NOSYNC)
2632 				newpte &= ~PT_SOFTWARE;
2633 
2634 			if ((attr & PROT_EXEC) && !PTE_GET(oldpte, mmu.pt_nx))
2635 				newpte |= mmu.pt_nx;
2636 		}
2637 
2638 		/*
2639 		 * Ensure NOSYNC/NOCONSIST mappings have REF and MOD set.
2640 		 * x86pte_set() depends on this.
2641 		 */
2642 		if (PTE_GET(newpte, PT_SOFTWARE) >= PT_NOSYNC)
2643 			newpte |= PT_REF | PT_MOD;
2644 
2645 		/*
2646 		 * what about PROT_READ or others? this code only handles:
2647 		 * EXEC, WRITE, NOSYNC
2648 		 */
2649 
2650 		/*
2651 		 * If new PTE really changed, update the table.
2652 		 */
2653 		if (newpte != oldpte) {
2654 			entry = htable_va2entry(vaddr, ht);
2655 			oldpte = hati_update_pte(ht, entry, oldpte, newpte);
2656 			if (oldpte != 0) {
2657 				x86_hm_exit(pp);
2658 				goto try_again;
2659 			}
2660 		}
2661 		x86_hm_exit(pp);
2662 	}
2663 	if (ht)
2664 		htable_release(ht);
2665 	XPV_ALLOW_MIGRATE();
2666 }
2667 
2668 /*
2669  * Various wrappers for hat_updateattr()
2670  */
2671 void
2672 hat_setattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr)
2673 {
2674 	ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit);
2675 	hat_updateattr(hat, addr, len, attr, HAT_SET_ATTR);
2676 }
2677 
2678 void
2679 hat_clrattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr)
2680 {
2681 	ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit);
2682 	hat_updateattr(hat, addr, len, attr, HAT_CLR_ATTR);
2683 }
2684 
2685 void
2686 hat_chgattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr)
2687 {
2688 	ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit);
2689 	hat_updateattr(hat, addr, len, attr, HAT_LOAD_ATTR);
2690 }
2691 
2692 void
2693 hat_chgprot(hat_t *hat, caddr_t addr, size_t len, uint_t vprot)
2694 {
2695 	ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit);
2696 	hat_updateattr(hat, addr, len, vprot & HAT_PROT_MASK, HAT_LOAD_ATTR);
2697 }
2698 
2699 /*
2700  * size_t hat_getpagesize(hat, addr)
2701  *	returns pagesize in bytes for <hat, addr>. returns -1 of there is
2702  *	no mapping. This is an advisory call.
2703  */
2704 ssize_t
2705 hat_getpagesize(hat_t *hat, caddr_t addr)
2706 {
2707 	uintptr_t	vaddr = ALIGN2PAGE(addr);
2708 	htable_t	*ht;
2709 	size_t		pagesize;
2710 
2711 	ASSERT(hat == kas.a_hat || vaddr <= _userlimit);
2712 	if (IN_VA_HOLE(vaddr))
2713 		return (-1);
2714 	ht = htable_getpage(hat, vaddr, NULL);
2715 	if (ht == NULL)
2716 		return (-1);
2717 	pagesize = LEVEL_SIZE(ht->ht_level);
2718 	htable_release(ht);
2719 	return (pagesize);
2720 }
2721 
2722 
2723 
2724 /*
2725  * pfn_t hat_getpfnum(hat, addr)
2726  *	returns pfn for <hat, addr> or PFN_INVALID if mapping is invalid.
2727  */
2728 pfn_t
2729 hat_getpfnum(hat_t *hat, caddr_t addr)
2730 {
2731 	uintptr_t	vaddr = ALIGN2PAGE(addr);
2732 	htable_t	*ht;
2733 	uint_t		entry;
2734 	pfn_t		pfn = PFN_INVALID;
2735 
2736 	ASSERT(hat == kas.a_hat || vaddr <= _userlimit);
2737 	if (khat_running == 0)
2738 		return (PFN_INVALID);
2739 
2740 	if (IN_VA_HOLE(vaddr))
2741 		return (PFN_INVALID);
2742 
2743 	XPV_DISALLOW_MIGRATE();
2744 	/*
2745 	 * A very common use of hat_getpfnum() is from the DDI for kernel pages.
2746 	 * Use the kmap_ptes (which also covers the 32 bit heap) to speed
2747 	 * this up.
2748 	 */
2749 	if (mmu.kmap_addr <= vaddr && vaddr < mmu.kmap_eaddr) {
2750 		x86pte_t pte;
2751 		pgcnt_t pg_index;
2752 
2753 		pg_index = mmu_btop(vaddr - mmu.kmap_addr);
2754 		pte = GET_PTE(PT_INDEX_PTR(mmu.kmap_ptes, pg_index));
2755 		if (PTE_ISVALID(pte))
2756 			/*LINTED [use of constant 0 causes a lint warning] */
2757 			pfn = PTE2PFN(pte, 0);
2758 		XPV_ALLOW_MIGRATE();
2759 		return (pfn);
2760 	}
2761 
2762 	ht = htable_getpage(hat, vaddr, &entry);
2763 	if (ht == NULL) {
2764 		XPV_ALLOW_MIGRATE();
2765 		return (PFN_INVALID);
2766 	}
2767 	ASSERT(vaddr >= ht->ht_vaddr);
2768 	ASSERT(vaddr <= HTABLE_LAST_PAGE(ht));
2769 	pfn = PTE2PFN(x86pte_get(ht, entry), ht->ht_level);
2770 	if (ht->ht_level > 0)
2771 		pfn += mmu_btop(vaddr & LEVEL_OFFSET(ht->ht_level));
2772 	htable_release(ht);
2773 	XPV_ALLOW_MIGRATE();
2774 	return (pfn);
2775 }
2776 
2777 /*
2778  * hat_getkpfnum() is an obsolete DDI routine, and its use is discouraged.
2779  * Use hat_getpfnum(kas.a_hat, ...) instead.
2780  *
2781  * We'd like to return PFN_INVALID if the mappings have underlying page_t's
2782  * but can't right now due to the fact that some software has grown to use
2783  * this interface incorrectly. So for now when the interface is misused,
2784  * return a warning to the user that in the future it won't work in the
2785  * way they're abusing it, and carry on.
2786  *
2787  * Note that hat_getkpfnum() is never supported on amd64.
2788  */
2789 #if !defined(__amd64)
2790 pfn_t
2791 hat_getkpfnum(caddr_t addr)
2792 {
2793 	pfn_t	pfn;
2794 	int badcaller = 0;
2795 
2796 	if (khat_running == 0)
2797 		panic("hat_getkpfnum(): called too early\n");
2798 	if ((uintptr_t)addr < kernelbase)
2799 		return (PFN_INVALID);
2800 
2801 	XPV_DISALLOW_MIGRATE();
2802 	if (segkpm && IS_KPM_ADDR(addr)) {
2803 		badcaller = 1;
2804 		pfn = hat_kpm_va2pfn(addr);
2805 	} else {
2806 		pfn = hat_getpfnum(kas.a_hat, addr);
2807 		badcaller = pf_is_memory(pfn);
2808 	}
2809 
2810 	if (badcaller)
2811 		hat_getkpfnum_badcall(caller());
2812 	XPV_ALLOW_MIGRATE();
2813 	return (pfn);
2814 }
2815 #endif /* __amd64 */
2816 
2817 /*
2818  * int hat_probe(hat, addr)
2819  *	return 0 if no valid mapping is present.  Faster version
2820  *	of hat_getattr in certain architectures.
2821  */
2822 int
2823 hat_probe(hat_t *hat, caddr_t addr)
2824 {
2825 	uintptr_t	vaddr = ALIGN2PAGE(addr);
2826 	uint_t		entry;
2827 	htable_t	*ht;
2828 	pgcnt_t		pg_off;
2829 
2830 	ASSERT(hat == kas.a_hat || vaddr <= _userlimit);
2831 	ASSERT(hat == kas.a_hat ||
2832 	    AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
2833 	if (IN_VA_HOLE(vaddr))
2834 		return (0);
2835 
2836 	/*
2837 	 * Most common use of hat_probe is from segmap. We special case it
2838 	 * for performance.
2839 	 */
2840 	if (mmu.kmap_addr <= vaddr && vaddr < mmu.kmap_eaddr) {
2841 		pg_off = mmu_btop(vaddr - mmu.kmap_addr);
2842 		if (mmu.pae_hat)
2843 			return (PTE_ISVALID(mmu.kmap_ptes[pg_off]));
2844 		else
2845 			return (PTE_ISVALID(
2846 			    ((x86pte32_t *)mmu.kmap_ptes)[pg_off]));
2847 	}
2848 
2849 	ht = htable_getpage(hat, vaddr, &entry);
2850 	htable_release(ht);
2851 	return (ht != NULL);
2852 }
2853 
2854 /*
2855  * Find out if the segment for hat_share()/hat_unshare() is DISM or locked ISM.
2856  */
2857 static int
2858 is_it_dism(hat_t *hat, caddr_t va)
2859 {
2860 	struct seg *seg;
2861 	struct shm_data *shmd;
2862 	struct spt_data *sptd;
2863 
2864 	seg = as_findseg(hat->hat_as, va, 0);
2865 	ASSERT(seg != NULL);
2866 	ASSERT(seg->s_base <= va);
2867 	shmd = (struct shm_data *)seg->s_data;
2868 	ASSERT(shmd != NULL);
2869 	sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2870 	ASSERT(sptd != NULL);
2871 	if (sptd->spt_flags & SHM_PAGEABLE)
2872 		return (1);
2873 	return (0);
2874 }
2875 
2876 /*
2877  * Simple implementation of ISM. hat_share() is similar to hat_memload_array(),
2878  * except that we use the ism_hat's existing mappings to determine the pages
2879  * and protections to use for this hat. If we find a full properly aligned
2880  * and sized pagetable, we will attempt to share the pagetable itself.
2881  */
2882 /*ARGSUSED*/
2883 int
2884 hat_share(
2885 	hat_t		*hat,
2886 	caddr_t		addr,
2887 	hat_t		*ism_hat,
2888 	caddr_t		src_addr,
2889 	size_t		len,	/* almost useless value, see below.. */
2890 	uint_t		ismszc)
2891 {
2892 	uintptr_t	vaddr_start = (uintptr_t)addr;
2893 	uintptr_t	vaddr;
2894 	uintptr_t	eaddr = vaddr_start + len;
2895 	uintptr_t	ism_addr_start = (uintptr_t)src_addr;
2896 	uintptr_t	ism_addr = ism_addr_start;
2897 	uintptr_t	e_ism_addr = ism_addr + len;
2898 	htable_t	*ism_ht = NULL;
2899 	htable_t	*ht;
2900 	x86pte_t	pte;
2901 	page_t		*pp;
2902 	pfn_t		pfn;
2903 	level_t		l;
2904 	pgcnt_t		pgcnt;
2905 	uint_t		prot;
2906 	int		is_dism;
2907 	int		flags;
2908 
2909 	/*
2910 	 * We might be asked to share an empty DISM hat by as_dup()
2911 	 */
2912 	ASSERT(hat != kas.a_hat);
2913 	ASSERT(eaddr <= _userlimit);
2914 	if (!(ism_hat->hat_flags & HAT_SHARED)) {
2915 		ASSERT(hat_get_mapped_size(ism_hat) == 0);
2916 		return (0);
2917 	}
2918 	XPV_DISALLOW_MIGRATE();
2919 
2920 	/*
2921 	 * The SPT segment driver often passes us a size larger than there are
2922 	 * valid mappings. That's because it rounds the segment size up to a
2923 	 * large pagesize, even if the actual memory mapped by ism_hat is less.
2924 	 */
2925 	ASSERT(IS_PAGEALIGNED(vaddr_start));
2926 	ASSERT(IS_PAGEALIGNED(ism_addr_start));
2927 	ASSERT(ism_hat->hat_flags & HAT_SHARED);
2928 	is_dism = is_it_dism(hat, addr);
2929 	while (ism_addr < e_ism_addr) {
2930 		/*
2931 		 * use htable_walk to get the next valid ISM mapping
2932 		 */
2933 		pte = htable_walk(ism_hat, &ism_ht, &ism_addr, e_ism_addr);
2934 		if (ism_ht == NULL)
2935 			break;
2936 
2937 		/*
2938 		 * First check to see if we already share the page table.
2939 		 */
2940 		l = ism_ht->ht_level;
2941 		vaddr = vaddr_start + (ism_addr - ism_addr_start);
2942 		ht = htable_lookup(hat, vaddr, l);
2943 		if (ht != NULL) {
2944 			if (ht->ht_flags & HTABLE_SHARED_PFN)
2945 				goto shared;
2946 			htable_release(ht);
2947 			goto not_shared;
2948 		}
2949 
2950 		/*
2951 		 * Can't ever share top table.
2952 		 */
2953 		if (l == mmu.max_level)
2954 			goto not_shared;
2955 
2956 		/*
2957 		 * Avoid level mismatches later due to DISM faults.
2958 		 */
2959 		if (is_dism && l > 0)
2960 			goto not_shared;
2961 
2962 		/*
2963 		 * addresses and lengths must align
2964 		 * table must be fully populated
2965 		 * no lower level page tables
2966 		 */
2967 		if (ism_addr != ism_ht->ht_vaddr ||
2968 		    (vaddr & LEVEL_OFFSET(l + 1)) != 0)
2969 			goto not_shared;
2970 
2971 		/*
2972 		 * The range of address space must cover a full table.
2973 		 */
2974 		if (e_ism_addr - ism_addr < LEVEL_SIZE(l + 1))
2975 			goto not_shared;
2976 
2977 		/*
2978 		 * All entries in the ISM page table must be leaf PTEs.
2979 		 */
2980 		if (l > 0) {
2981 			int e;
2982 
2983 			/*
2984 			 * We know the 0th is from htable_walk() above.
2985 			 */
2986 			for (e = 1; e < HTABLE_NUM_PTES(ism_ht); ++e) {
2987 				x86pte_t pte;
2988 				pte = x86pte_get(ism_ht, e);
2989 				if (!PTE_ISPAGE(pte, l))
2990 					goto not_shared;
2991 			}
2992 		}
2993 
2994 		/*
2995 		 * share the page table
2996 		 */
2997 		ht = htable_create(hat, vaddr, l, ism_ht);
2998 shared:
2999 		ASSERT(ht->ht_flags & HTABLE_SHARED_PFN);
3000 		ASSERT(ht->ht_shares == ism_ht);
3001 		hat->hat_ism_pgcnt +=
3002 		    (ism_ht->ht_valid_cnt - ht->ht_valid_cnt) <<
3003 		    (LEVEL_SHIFT(ht->ht_level) - MMU_PAGESHIFT);
3004 		ht->ht_valid_cnt = ism_ht->ht_valid_cnt;
3005 		htable_release(ht);
3006 		ism_addr = ism_ht->ht_vaddr + LEVEL_SIZE(l + 1);
3007 		htable_release(ism_ht);
3008 		ism_ht = NULL;
3009 		continue;
3010 
3011 not_shared:
3012 		/*
3013 		 * Unable to share the page table. Instead we will
3014 		 * create new mappings from the values in the ISM mappings.
3015 		 * Figure out what level size mappings to use;
3016 		 */
3017 		for (l = ism_ht->ht_level; l > 0; --l) {
3018 			if (LEVEL_SIZE(l) <= eaddr - vaddr &&
3019 			    (vaddr & LEVEL_OFFSET(l)) == 0)
3020 				break;
3021 		}
3022 
3023 		/*
3024 		 * The ISM mapping might be larger than the share area,
3025 		 * be careful to truncate it if needed.
3026 		 */
3027 		if (eaddr - vaddr >= LEVEL_SIZE(ism_ht->ht_level)) {
3028 			pgcnt = mmu_btop(LEVEL_SIZE(ism_ht->ht_level));
3029 		} else {
3030 			pgcnt = mmu_btop(eaddr - vaddr);
3031 			l = 0;
3032 		}
3033 
3034 		pfn = PTE2PFN(pte, ism_ht->ht_level);
3035 		ASSERT(pfn != PFN_INVALID);
3036 		while (pgcnt > 0) {
3037 			/*
3038 			 * Make a new pte for the PFN for this level.
3039 			 * Copy protections for the pte from the ISM pte.
3040 			 */
3041 			pp = page_numtopp_nolock(pfn);
3042 			ASSERT(pp != NULL);
3043 
3044 			prot = PROT_USER | PROT_READ | HAT_UNORDERED_OK;
3045 			if (PTE_GET(pte, PT_WRITABLE))
3046 				prot |= PROT_WRITE;
3047 			if (!PTE_GET(pte, PT_NX))
3048 				prot |= PROT_EXEC;
3049 
3050 			flags = HAT_LOAD;
3051 			if (!is_dism)
3052 				flags |= HAT_LOAD_LOCK | HAT_LOAD_NOCONSIST;
3053 			while (hati_load_common(hat, vaddr, pp, prot, flags,
3054 			    l, pfn) != 0) {
3055 				if (l == 0)
3056 					panic("hati_load_common() failure");
3057 				--l;
3058 			}
3059 
3060 			vaddr += LEVEL_SIZE(l);
3061 			ism_addr += LEVEL_SIZE(l);
3062 			pfn += mmu_btop(LEVEL_SIZE(l));
3063 			pgcnt -= mmu_btop(LEVEL_SIZE(l));
3064 		}
3065 	}
3066 	if (ism_ht != NULL)
3067 		htable_release(ism_ht);
3068 	XPV_ALLOW_MIGRATE();
3069 	return (0);
3070 }
3071 
3072 
3073 /*
3074  * hat_unshare() is similar to hat_unload_callback(), but
3075  * we have to look for empty shared pagetables. Note that
3076  * hat_unshare() is always invoked against an entire segment.
3077  */
3078 /*ARGSUSED*/
3079 void
3080 hat_unshare(hat_t *hat, caddr_t addr, size_t len, uint_t ismszc)
3081 {
3082 	uint64_t	vaddr = (uintptr_t)addr;
3083 	uintptr_t	eaddr = vaddr + len;
3084 	htable_t	*ht = NULL;
3085 	uint_t		need_demaps = 0;
3086 	int		flags = HAT_UNLOAD_UNMAP;
3087 	level_t		l;
3088 
3089 	ASSERT(hat != kas.a_hat);
3090 	ASSERT(eaddr <= _userlimit);
3091 	ASSERT(IS_PAGEALIGNED(vaddr));
3092 	ASSERT(IS_PAGEALIGNED(eaddr));
3093 	XPV_DISALLOW_MIGRATE();
3094 
3095 	/*
3096 	 * First go through and remove any shared pagetables.
3097 	 *
3098 	 * Note that it's ok to delay the TLB shootdown till the entire range is
3099 	 * finished, because if hat_pageunload() were to unload a shared
3100 	 * pagetable page, its hat_tlb_inval() will do a global TLB invalidate.
3101 	 */
3102 	l = mmu.max_page_level;
3103 	if (l == mmu.max_level)
3104 		--l;
3105 	for (; l >= 0; --l) {
3106 		for (vaddr = (uintptr_t)addr; vaddr < eaddr;
3107 		    vaddr = (vaddr & LEVEL_MASK(l + 1)) + LEVEL_SIZE(l + 1)) {
3108 			ASSERT(!IN_VA_HOLE(vaddr));
3109 			/*
3110 			 * find a pagetable that maps the current address
3111 			 */
3112 			ht = htable_lookup(hat, vaddr, l);
3113 			if (ht == NULL)
3114 				continue;
3115 			if (ht->ht_flags & HTABLE_SHARED_PFN) {
3116 				/*
3117 				 * clear page count, set valid_cnt to 0,
3118 				 * let htable_release() finish the job
3119 				 */
3120 				hat->hat_ism_pgcnt -= ht->ht_valid_cnt <<
3121 				    (LEVEL_SHIFT(ht->ht_level) - MMU_PAGESHIFT);
3122 				ht->ht_valid_cnt = 0;
3123 				need_demaps = 1;
3124 			}
3125 			htable_release(ht);
3126 		}
3127 	}
3128 
3129 	/*
3130 	 * flush the TLBs - since we're probably dealing with MANY mappings
3131 	 * we do just one CR3 reload.
3132 	 */
3133 	if (!(hat->hat_flags & HAT_FREEING) && need_demaps)
3134 		hat_tlb_inval(hat, DEMAP_ALL_ADDR);
3135 
3136 	/*
3137 	 * Now go back and clean up any unaligned mappings that
3138 	 * couldn't share pagetables.
3139 	 */
3140 	if (!is_it_dism(hat, addr))
3141 		flags |= HAT_UNLOAD_UNLOCK;
3142 	hat_unload(hat, addr, len, flags);
3143 	XPV_ALLOW_MIGRATE();
3144 }
3145 
3146 
3147 /*
3148  * hat_reserve() does nothing
3149  */
3150 /*ARGSUSED*/
3151 void
3152 hat_reserve(struct as *as, caddr_t addr, size_t len)
3153 {
3154 }
3155 
3156 
3157 /*
3158  * Called when all mappings to a page should have write permission removed.
3159  * Mostly stolem from hat_pagesync()
3160  */
3161 static void
3162 hati_page_clrwrt(struct page *pp)
3163 {
3164 	hment_t		*hm = NULL;
3165 	htable_t	*ht;
3166 	uint_t		entry;
3167 	x86pte_t	old;
3168 	x86pte_t	new;
3169 	uint_t		pszc = 0;
3170 
3171 	XPV_DISALLOW_MIGRATE();
3172 next_size:
3173 	/*
3174 	 * walk thru the mapping list clearing write permission
3175 	 */
3176 	x86_hm_enter(pp);
3177 	while ((hm = hment_walk(pp, &ht, &entry, hm)) != NULL) {
3178 		if (ht->ht_level < pszc)
3179 			continue;
3180 		old = x86pte_get(ht, entry);
3181 
3182 		for (;;) {
3183 			/*
3184 			 * Is this mapping of interest?
3185 			 */
3186 			if (PTE2PFN(old, ht->ht_level) != pp->p_pagenum ||
3187 			    PTE_GET(old, PT_WRITABLE) == 0)
3188 				break;
3189 
3190 			/*
3191 			 * Clear ref/mod writable bits. This requires cross
3192 			 * calls to ensure any executing TLBs see cleared bits.
3193 			 */
3194 			new = old;
3195 			PTE_CLR(new, PT_REF | PT_MOD | PT_WRITABLE);
3196 			old = hati_update_pte(ht, entry, old, new);
3197 			if (old != 0)
3198 				continue;
3199 
3200 			break;
3201 		}
3202 	}
3203 	x86_hm_exit(pp);
3204 	while (pszc < pp->p_szc) {
3205 		page_t *tpp;
3206 		pszc++;
3207 		tpp = PP_GROUPLEADER(pp, pszc);
3208 		if (pp != tpp) {
3209 			pp = tpp;
3210 			goto next_size;
3211 		}
3212 	}
3213 	XPV_ALLOW_MIGRATE();
3214 }
3215 
3216 /*
3217  * void hat_page_setattr(pp, flag)
3218  * void hat_page_clrattr(pp, flag)
3219  *	used to set/clr ref/mod bits.
3220  */
3221 void
3222 hat_page_setattr(struct page *pp, uint_t flag)
3223 {
3224 	vnode_t		*vp = pp->p_vnode;
3225 	kmutex_t	*vphm = NULL;
3226 	page_t		**listp;
3227 	int		noshuffle;
3228 
3229 	noshuffle = flag & P_NSH;
3230 	flag &= ~P_NSH;
3231 
3232 	if (PP_GETRM(pp, flag) == flag)
3233 		return;
3234 
3235 	if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp) &&
3236 	    !noshuffle) {
3237 		vphm = page_vnode_mutex(vp);
3238 		mutex_enter(vphm);
3239 	}
3240 
3241 	PP_SETRM(pp, flag);
3242 
3243 	if (vphm != NULL) {
3244 
3245 		/*
3246 		 * Some File Systems examine v_pages for NULL w/o
3247 		 * grabbing the vphm mutex. Must not let it become NULL when
3248 		 * pp is the only page on the list.
3249 		 */
3250 		if (pp->p_vpnext != pp) {
3251 			page_vpsub(&vp->v_pages, pp);
3252 			if (vp->v_pages != NULL)
3253 				listp = &vp->v_pages->p_vpprev->p_vpnext;
3254 			else
3255 				listp = &vp->v_pages;
3256 			page_vpadd(listp, pp);
3257 		}
3258 		mutex_exit(vphm);
3259 	}
3260 }
3261 
3262 void
3263 hat_page_clrattr(struct page *pp, uint_t flag)
3264 {
3265 	vnode_t		*vp = pp->p_vnode;
3266 	ASSERT(!(flag & ~(P_MOD | P_REF | P_RO)));
3267 
3268 	/*
3269 	 * Caller is expected to hold page's io lock for VMODSORT to work
3270 	 * correctly with pvn_vplist_dirty() and pvn_getdirty() when mod
3271 	 * bit is cleared.
3272 	 * We don't have assert to avoid tripping some existing third party
3273 	 * code. The dirty page is moved back to top of the v_page list
3274 	 * after IO is done in pvn_write_done().
3275 	 */
3276 	PP_CLRRM(pp, flag);
3277 
3278 	if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp)) {
3279 
3280 		/*
3281 		 * VMODSORT works by removing write permissions and getting
3282 		 * a fault when a page is made dirty. At this point
3283 		 * we need to remove write permission from all mappings
3284 		 * to this page.
3285 		 */
3286 		hati_page_clrwrt(pp);
3287 	}
3288 }
3289 
3290 /*
3291  *	If flag is specified, returns 0 if attribute is disabled
3292  *	and non zero if enabled.  If flag specifes multiple attributs
3293  *	then returns 0 if ALL atriibutes are disabled.  This is an advisory
3294  *	call.
3295  */
3296 uint_t
3297 hat_page_getattr(struct page *pp, uint_t flag)
3298 {
3299 	return (PP_GETRM(pp, flag));
3300 }
3301 
3302 
3303 /*
3304  * common code used by hat_pageunload() and hment_steal()
3305  */
3306 hment_t *
3307 hati_page_unmap(page_t *pp, htable_t *ht, uint_t entry)
3308 {
3309 	x86pte_t old_pte;
3310 	pfn_t pfn = pp->p_pagenum;
3311 	hment_t *hm;
3312 
3313 	/*
3314 	 * We need to acquire a hold on the htable in order to
3315 	 * do the invalidate. We know the htable must exist, since
3316 	 * unmap's don't release the htable until after removing any
3317 	 * hment. Having x86_hm_enter() keeps that from proceeding.
3318 	 */
3319 	htable_acquire(ht);
3320 
3321 	/*
3322 	 * Invalidate the PTE and remove the hment.
3323 	 */
3324 	old_pte = x86pte_inval(ht, entry, 0, NULL);
3325 	if (PTE2PFN(old_pte, ht->ht_level) != pfn) {
3326 		panic("x86pte_inval() failure found PTE = " FMT_PTE
3327 		    " pfn being unmapped is %lx ht=0x%lx entry=0x%x",
3328 		    old_pte, pfn, (uintptr_t)ht, entry);
3329 	}
3330 
3331 	/*
3332 	 * Clean up all the htable information for this mapping
3333 	 */
3334 	ASSERT(ht->ht_valid_cnt > 0);
3335 	HTABLE_DEC(ht->ht_valid_cnt);
3336 	PGCNT_DEC(ht->ht_hat, ht->ht_level);
3337 
3338 	/*
3339 	 * sync ref/mod bits to the page_t
3340 	 */
3341 	if (PTE_GET(old_pte, PT_SOFTWARE) < PT_NOSYNC)
3342 		hati_sync_pte_to_page(pp, old_pte, ht->ht_level);
3343 
3344 	/*
3345 	 * Remove the mapping list entry for this page.
3346 	 */
3347 	hm = hment_remove(pp, ht, entry);
3348 
3349 	/*
3350 	 * drop the mapping list lock so that we might free the
3351 	 * hment and htable.
3352 	 */
3353 	x86_hm_exit(pp);
3354 	htable_release(ht);
3355 	return (hm);
3356 }
3357 
3358 extern int	vpm_enable;
3359 /*
3360  * Unload all translations to a page. If the page is a subpage of a large
3361  * page, the large page mappings are also removed.
3362  *
3363  * The forceflags are unused.
3364  */
3365 
3366 /*ARGSUSED*/
3367 static int
3368 hati_pageunload(struct page *pp, uint_t pg_szcd, uint_t forceflag)
3369 {
3370 	page_t		*cur_pp = pp;
3371 	hment_t		*hm;
3372 	hment_t		*prev;
3373 	htable_t	*ht;
3374 	uint_t		entry;
3375 	level_t		level;
3376 
3377 	XPV_DISALLOW_MIGRATE();
3378 #if defined(__amd64)
3379 	/*
3380 	 * clear the vpm ref.
3381 	 */
3382 	if (vpm_enable) {
3383 		pp->p_vpmref = 0;
3384 	}
3385 #endif
3386 	/*
3387 	 * The loop with next_size handles pages with multiple pagesize mappings
3388 	 */
3389 next_size:
3390 	for (;;) {
3391 
3392 		/*
3393 		 * Get a mapping list entry
3394 		 */
3395 		x86_hm_enter(cur_pp);
3396 		for (prev = NULL; ; prev = hm) {
3397 			hm = hment_walk(cur_pp, &ht, &entry, prev);
3398 			if (hm == NULL) {
3399 				x86_hm_exit(cur_pp);
3400 
3401 				/*
3402 				 * If not part of a larger page, we're done.
3403 				 */
3404 				if (cur_pp->p_szc <= pg_szcd) {
3405 					XPV_ALLOW_MIGRATE();
3406 					return (0);
3407 				}
3408 
3409 				/*
3410 				 * Else check the next larger page size.
3411 				 * hat_page_demote() may decrease p_szc
3412 				 * but that's ok we'll just take an extra
3413 				 * trip discover there're no larger mappings
3414 				 * and return.
3415 				 */
3416 				++pg_szcd;
3417 				cur_pp = PP_GROUPLEADER(cur_pp, pg_szcd);
3418 				goto next_size;
3419 			}
3420 
3421 			/*
3422 			 * If this mapping size matches, remove it.
3423 			 */
3424 			level = ht->ht_level;
3425 			if (level == pg_szcd)
3426 				break;
3427 		}
3428 
3429 		/*
3430 		 * Remove the mapping list entry for this page.
3431 		 * Note this does the x86_hm_exit() for us.
3432 		 */
3433 		hm = hati_page_unmap(cur_pp, ht, entry);
3434 		if (hm != NULL)
3435 			hment_free(hm);
3436 	}
3437 }
3438 
3439 int
3440 hat_pageunload(struct page *pp, uint_t forceflag)
3441 {
3442 	ASSERT(PAGE_EXCL(pp));
3443 	return (hati_pageunload(pp, 0, forceflag));
3444 }
3445 
3446 /*
3447  * Unload all large mappings to pp and reduce by 1 p_szc field of every large
3448  * page level that included pp.
3449  *
3450  * pp must be locked EXCL. Even though no other constituent pages are locked
3451  * it's legal to unload large mappings to pp because all constituent pages of
3452  * large locked mappings have to be locked SHARED.  therefore if we have EXCL
3453  * lock on one of constituent pages none of the large mappings to pp are
3454  * locked.
3455  *
3456  * Change (always decrease) p_szc field starting from the last constituent
3457  * page and ending with root constituent page so that root's pszc always shows
3458  * the area where hat_page_demote() may be active.
3459  *
3460  * This mechanism is only used for file system pages where it's not always
3461  * possible to get EXCL locks on all constituent pages to demote the size code
3462  * (as is done for anonymous or kernel large pages).
3463  */
3464 void
3465 hat_page_demote(page_t *pp)
3466 {
3467 	uint_t		pszc;
3468 	uint_t		rszc;
3469 	uint_t		szc;
3470 	page_t		*rootpp;
3471 	page_t		*firstpp;
3472 	page_t		*lastpp;
3473 	pgcnt_t		pgcnt;
3474 
3475 	ASSERT(PAGE_EXCL(pp));
3476 	ASSERT(!PP_ISFREE(pp));
3477 	ASSERT(page_szc_lock_assert(pp));
3478 
3479 	if (pp->p_szc == 0)
3480 		return;
3481 
3482 	rootpp = PP_GROUPLEADER(pp, 1);
3483 	(void) hati_pageunload(rootpp, 1, HAT_FORCE_PGUNLOAD);
3484 
3485 	/*
3486 	 * all large mappings to pp are gone
3487 	 * and no new can be setup since pp is locked exclusively.
3488 	 *
3489 	 * Lock the root to make sure there's only one hat_page_demote()
3490 	 * outstanding within the area of this root's pszc.
3491 	 *
3492 	 * Second potential hat_page_demote() is already eliminated by upper
3493 	 * VM layer via page_szc_lock() but we don't rely on it and use our
3494 	 * own locking (so that upper layer locking can be changed without
3495 	 * assumptions that hat depends on upper layer VM to prevent multiple
3496 	 * hat_page_demote() to be issued simultaneously to the same large
3497 	 * page).
3498 	 */
3499 again:
3500 	pszc = pp->p_szc;
3501 	if (pszc == 0)
3502 		return;
3503 	rootpp = PP_GROUPLEADER(pp, pszc);
3504 	x86_hm_enter(rootpp);
3505 	/*
3506 	 * If root's p_szc is different from pszc we raced with another
3507 	 * hat_page_demote().  Drop the lock and try to find the root again.
3508 	 * If root's p_szc is greater than pszc previous hat_page_demote() is
3509 	 * not done yet.  Take and release mlist lock of root's root to wait
3510 	 * for previous hat_page_demote() to complete.
3511 	 */
3512 	if ((rszc = rootpp->p_szc) != pszc) {
3513 		x86_hm_exit(rootpp);
3514 		if (rszc > pszc) {
3515 			/* p_szc of a locked non free page can't increase */
3516 			ASSERT(pp != rootpp);
3517 
3518 			rootpp = PP_GROUPLEADER(rootpp, rszc);
3519 			x86_hm_enter(rootpp);
3520 			x86_hm_exit(rootpp);
3521 		}
3522 		goto again;
3523 	}
3524 	ASSERT(pp->p_szc == pszc);
3525 
3526 	/*
3527 	 * Decrement by 1 p_szc of every constituent page of a region that
3528 	 * covered pp. For example if original szc is 3 it gets changed to 2
3529 	 * everywhere except in region 2 that covered pp. Region 2 that
3530 	 * covered pp gets demoted to 1 everywhere except in region 1 that
3531 	 * covered pp. The region 1 that covered pp is demoted to region
3532 	 * 0. It's done this way because from region 3 we removed level 3
3533 	 * mappings, from region 2 that covered pp we removed level 2 mappings
3534 	 * and from region 1 that covered pp we removed level 1 mappings.  All
3535 	 * changes are done from from high pfn's to low pfn's so that roots
3536 	 * are changed last allowing one to know the largest region where
3537 	 * hat_page_demote() is stil active by only looking at the root page.
3538 	 *
3539 	 * This algorithm is implemented in 2 while loops. First loop changes
3540 	 * p_szc of pages to the right of pp's level 1 region and second
3541 	 * loop changes p_szc of pages of level 1 region that covers pp
3542 	 * and all pages to the left of level 1 region that covers pp.
3543 	 * In the first loop p_szc keeps dropping with every iteration
3544 	 * and in the second loop it keeps increasing with every iteration.
3545 	 *
3546 	 * First loop description: Demote pages to the right of pp outside of
3547 	 * level 1 region that covers pp.  In every iteration of the while
3548 	 * loop below find the last page of szc region and the first page of
3549 	 * (szc - 1) region that is immediately to the right of (szc - 1)
3550 	 * region that covers pp.  From last such page to first such page
3551 	 * change every page's szc to szc - 1. Decrement szc and continue
3552 	 * looping until szc is 1. If pp belongs to the last (szc - 1) region
3553 	 * of szc region skip to the next iteration.
3554 	 */
3555 	szc = pszc;
3556 	while (szc > 1) {
3557 		lastpp = PP_GROUPLEADER(pp, szc);
3558 		pgcnt = page_get_pagecnt(szc);
3559 		lastpp += pgcnt - 1;
3560 		firstpp = PP_GROUPLEADER(pp, (szc - 1));
3561 		pgcnt = page_get_pagecnt(szc - 1);
3562 		if (lastpp - firstpp < pgcnt) {
3563 			szc--;
3564 			continue;
3565 		}
3566 		firstpp += pgcnt;
3567 		while (lastpp != firstpp) {
3568 			ASSERT(lastpp->p_szc == pszc);
3569 			lastpp->p_szc = szc - 1;
3570 			lastpp--;
3571 		}
3572 		firstpp->p_szc = szc - 1;
3573 		szc--;
3574 	}
3575 
3576 	/*
3577 	 * Second loop description:
3578 	 * First iteration changes p_szc to 0 of every
3579 	 * page of level 1 region that covers pp.
3580 	 * Subsequent iterations find last page of szc region
3581 	 * immediately to the left of szc region that covered pp
3582 	 * and first page of (szc + 1) region that covers pp.
3583 	 * From last to first page change p_szc of every page to szc.
3584 	 * Increment szc and continue looping until szc is pszc.
3585 	 * If pp belongs to the fist szc region of (szc + 1) region
3586 	 * skip to the next iteration.
3587 	 *
3588 	 */
3589 	szc = 0;
3590 	while (szc < pszc) {
3591 		firstpp = PP_GROUPLEADER(pp, (szc + 1));
3592 		if (szc == 0) {
3593 			pgcnt = page_get_pagecnt(1);
3594 			lastpp = firstpp + (pgcnt - 1);
3595 		} else {
3596 			lastpp = PP_GROUPLEADER(pp, szc);
3597 			if (firstpp == lastpp) {
3598 				szc++;
3599 				continue;
3600 			}
3601 			lastpp--;
3602 			pgcnt = page_get_pagecnt(szc);
3603 		}
3604 		while (lastpp != firstpp) {
3605 			ASSERT(lastpp->p_szc == pszc);
3606 			lastpp->p_szc = szc;
3607 			lastpp--;
3608 		}
3609 		firstpp->p_szc = szc;
3610 		if (firstpp == rootpp)
3611 			break;
3612 		szc++;
3613 	}
3614 	x86_hm_exit(rootpp);
3615 }
3616 
3617 /*
3618  * get hw stats from hardware into page struct and reset hw stats
3619  * returns attributes of page
3620  * Flags for hat_pagesync, hat_getstat, hat_sync
3621  *
3622  * define	HAT_SYNC_ZERORM		0x01
3623  *
3624  * Additional flags for hat_pagesync
3625  *
3626  * define	HAT_SYNC_STOPON_REF	0x02
3627  * define	HAT_SYNC_STOPON_MOD	0x04
3628  * define	HAT_SYNC_STOPON_RM	0x06
3629  * define	HAT_SYNC_STOPON_SHARED	0x08
3630  */
3631 uint_t
3632 hat_pagesync(struct page *pp, uint_t flags)
3633 {
3634 	hment_t		*hm = NULL;
3635 	htable_t	*ht;
3636 	uint_t		entry;
3637 	x86pte_t	old, save_old;
3638 	x86pte_t	new;
3639 	uchar_t		nrmbits = P_REF|P_MOD|P_RO;
3640 	extern ulong_t	po_share;
3641 	page_t		*save_pp = pp;
3642 	uint_t		pszc = 0;
3643 
3644 	ASSERT(PAGE_LOCKED(pp) || panicstr);
3645 
3646 	if (PP_ISRO(pp) && (flags & HAT_SYNC_STOPON_MOD))
3647 		return (pp->p_nrm & nrmbits);
3648 
3649 	if ((flags & HAT_SYNC_ZERORM) == 0) {
3650 
3651 		if ((flags & HAT_SYNC_STOPON_REF) != 0 && PP_ISREF(pp))
3652 			return (pp->p_nrm & nrmbits);
3653 
3654 		if ((flags & HAT_SYNC_STOPON_MOD) != 0 && PP_ISMOD(pp))
3655 			return (pp->p_nrm & nrmbits);
3656 
3657 		if ((flags & HAT_SYNC_STOPON_SHARED) != 0 &&
3658 		    hat_page_getshare(pp) > po_share) {
3659 			if (PP_ISRO(pp))
3660 				PP_SETREF(pp);
3661 			return (pp->p_nrm & nrmbits);
3662 		}
3663 	}
3664 
3665 	XPV_DISALLOW_MIGRATE();
3666 next_size:
3667 	/*
3668 	 * walk thru the mapping list syncing (and clearing) ref/mod bits.
3669 	 */
3670 	x86_hm_enter(pp);
3671 	while ((hm = hment_walk(pp, &ht, &entry, hm)) != NULL) {
3672 		if (ht->ht_level < pszc)
3673 			continue;
3674 		old = x86pte_get(ht, entry);
3675 try_again:
3676 
3677 		ASSERT(PTE2PFN(old, ht->ht_level) == pp->p_pagenum);
3678 
3679 		if (PTE_GET(old, PT_REF | PT_MOD) == 0)
3680 			continue;
3681 
3682 		save_old = old;
3683 		if ((flags & HAT_SYNC_ZERORM) != 0) {
3684 
3685 			/*
3686 			 * Need to clear ref or mod bits. Need to demap
3687 			 * to make sure any executing TLBs see cleared bits.
3688 			 */
3689 			new = old;
3690 			PTE_CLR(new, PT_REF | PT_MOD);
3691 			old = hati_update_pte(ht, entry, old, new);
3692 			if (old != 0)
3693 				goto try_again;
3694 
3695 			old = save_old;
3696 		}
3697 
3698 		/*
3699 		 * Sync the PTE
3700 		 */
3701 		if (!(flags & HAT_SYNC_ZERORM) &&
3702 		    PTE_GET(old, PT_SOFTWARE) <= PT_NOSYNC)
3703 			hati_sync_pte_to_page(pp, old, ht->ht_level);
3704 
3705 		/*
3706 		 * can stop short if we found a ref'd or mod'd page
3707 		 */
3708 		if ((flags & HAT_SYNC_STOPON_MOD) && PP_ISMOD(save_pp) ||
3709 		    (flags & HAT_SYNC_STOPON_REF) && PP_ISREF(save_pp)) {
3710 			x86_hm_exit(pp);
3711 			goto done;
3712 		}
3713 	}
3714 	x86_hm_exit(pp);
3715 	while (pszc < pp->p_szc) {
3716 		page_t *tpp;
3717 		pszc++;
3718 		tpp = PP_GROUPLEADER(pp, pszc);
3719 		if (pp != tpp) {
3720 			pp = tpp;
3721 			goto next_size;
3722 		}
3723 	}
3724 done:
3725 	XPV_ALLOW_MIGRATE();
3726 	return (save_pp->p_nrm & nrmbits);
3727 }
3728 
3729 /*
3730  * returns approx number of mappings to this pp.  A return of 0 implies
3731  * there are no mappings to the page.
3732  */
3733 ulong_t
3734 hat_page_getshare(page_t *pp)
3735 {
3736 	uint_t cnt;
3737 	cnt = hment_mapcnt(pp);
3738 #if defined(__amd64)
3739 	if (vpm_enable && pp->p_vpmref) {
3740 		cnt += 1;
3741 	}
3742 #endif
3743 	return (cnt);
3744 }
3745 
3746 /*
3747  * Return 1 the number of mappings exceeds sh_thresh. Return 0
3748  * otherwise.
3749  */
3750 int
3751 hat_page_checkshare(page_t *pp, ulong_t sh_thresh)
3752 {
3753 	return (hat_page_getshare(pp) > sh_thresh);
3754 }
3755 
3756 /*
3757  * hat_softlock isn't supported anymore
3758  */
3759 /*ARGSUSED*/
3760 faultcode_t
3761 hat_softlock(
3762 	hat_t *hat,
3763 	caddr_t addr,
3764 	size_t *len,
3765 	struct page **page_array,
3766 	uint_t flags)
3767 {
3768 	return (FC_NOSUPPORT);
3769 }
3770 
3771 
3772 
3773 /*
3774  * Routine to expose supported HAT features to platform independent code.
3775  */
3776 /*ARGSUSED*/
3777 int
3778 hat_supported(enum hat_features feature, void *arg)
3779 {
3780 	switch (feature) {
3781 
3782 	case HAT_SHARED_PT:	/* this is really ISM */
3783 		return (1);
3784 
3785 	case HAT_DYNAMIC_ISM_UNMAP:
3786 		return (0);
3787 
3788 	case HAT_VMODSORT:
3789 		return (1);
3790 
3791 	case HAT_SHARED_REGIONS:
3792 		return (0);
3793 
3794 	default:
3795 		panic("hat_supported() - unknown feature");
3796 	}
3797 	return (0);
3798 }
3799 
3800 /*
3801  * Called when a thread is exiting and has been switched to the kernel AS
3802  */
3803 void
3804 hat_thread_exit(kthread_t *thd)
3805 {
3806 	ASSERT(thd->t_procp->p_as == &kas);
3807 	XPV_DISALLOW_MIGRATE();
3808 	hat_switch(thd->t_procp->p_as->a_hat);
3809 	XPV_ALLOW_MIGRATE();
3810 }
3811 
3812 /*
3813  * Setup the given brand new hat structure as the new HAT on this cpu's mmu.
3814  */
3815 /*ARGSUSED*/
3816 void
3817 hat_setup(hat_t *hat, int flags)
3818 {
3819 	XPV_DISALLOW_MIGRATE();
3820 	kpreempt_disable();
3821 
3822 	hat_switch(hat);
3823 
3824 	kpreempt_enable();
3825 	XPV_ALLOW_MIGRATE();
3826 }
3827 
3828 /*
3829  * Prepare for a CPU private mapping for the given address.
3830  *
3831  * The address can only be used from a single CPU and can be remapped
3832  * using hat_mempte_remap().  Return the address of the PTE.
3833  *
3834  * We do the htable_create() if necessary and increment the valid count so
3835  * the htable can't disappear.  We also hat_devload() the page table into
3836  * kernel so that the PTE is quickly accessed.
3837  */
3838 hat_mempte_t
3839 hat_mempte_setup(caddr_t addr)
3840 {
3841 	uintptr_t	va = (uintptr_t)addr;
3842 	htable_t	*ht;
3843 	uint_t		entry;
3844 	x86pte_t	oldpte;
3845 	hat_mempte_t	p;
3846 
3847 	ASSERT(IS_PAGEALIGNED(va));
3848 	ASSERT(!IN_VA_HOLE(va));
3849 	++curthread->t_hatdepth;
3850 	XPV_DISALLOW_MIGRATE();
3851 	ht = htable_getpte(kas.a_hat, va, &entry, &oldpte, 0);
3852 	if (ht == NULL) {
3853 		ht = htable_create(kas.a_hat, va, 0, NULL);
3854 		entry = htable_va2entry(va, ht);
3855 		ASSERT(ht->ht_level == 0);
3856 		oldpte = x86pte_get(ht, entry);
3857 	}
3858 	if (PTE_ISVALID(oldpte))
3859 		panic("hat_mempte_setup(): address already mapped"
3860 		    "ht=%p, entry=%d, pte=" FMT_PTE, (void *)ht, entry, oldpte);
3861 
3862 	/*
3863 	 * increment ht_valid_cnt so that the pagetable can't disappear
3864 	 */
3865 	HTABLE_INC(ht->ht_valid_cnt);
3866 
3867 	/*
3868 	 * return the PTE physical address to the caller.
3869 	 */
3870 	htable_release(ht);
3871 	XPV_ALLOW_MIGRATE();
3872 	p = PT_INDEX_PHYSADDR(pfn_to_pa(ht->ht_pfn), entry);
3873 	--curthread->t_hatdepth;
3874 	return (p);
3875 }
3876 
3877 /*
3878  * Release a CPU private mapping for the given address.
3879  * We decrement the htable valid count so it might be destroyed.
3880  */
3881 /*ARGSUSED1*/
3882 void
3883 hat_mempte_release(caddr_t addr, hat_mempte_t pte_pa)
3884 {
3885 	htable_t	*ht;
3886 
3887 	XPV_DISALLOW_MIGRATE();
3888 	/*
3889 	 * invalidate any left over mapping and decrement the htable valid count
3890 	 */
3891 #ifdef __xpv
3892 	if (HYPERVISOR_update_va_mapping((uintptr_t)addr, 0,
3893 	    UVMF_INVLPG | UVMF_LOCAL))
3894 		panic("HYPERVISOR_update_va_mapping() failed");
3895 #else
3896 	{
3897 		x86pte_t *pteptr;
3898 
3899 		pteptr = x86pte_mapin(mmu_btop(pte_pa),
3900 		    (pte_pa & MMU_PAGEOFFSET) >> mmu.pte_size_shift, NULL);
3901 		if (mmu.pae_hat)
3902 			*pteptr = 0;
3903 		else
3904 			*(x86pte32_t *)pteptr = 0;
3905 		mmu_tlbflush_entry(addr);
3906 		x86pte_mapout();
3907 	}
3908 #endif
3909 
3910 	ht = htable_getpte(kas.a_hat, ALIGN2PAGE(addr), NULL, NULL, 0);
3911 	if (ht == NULL)
3912 		panic("hat_mempte_release(): invalid address");
3913 	ASSERT(ht->ht_level == 0);
3914 	HTABLE_DEC(ht->ht_valid_cnt);
3915 	htable_release(ht);
3916 	XPV_ALLOW_MIGRATE();
3917 }
3918 
3919 /*
3920  * Apply a temporary CPU private mapping to a page. We flush the TLB only
3921  * on this CPU, so this ought to have been called with preemption disabled.
3922  */
3923 void
3924 hat_mempte_remap(
3925 	pfn_t		pfn,
3926 	caddr_t		addr,
3927 	hat_mempte_t	pte_pa,
3928 	uint_t		attr,
3929 	uint_t		flags)
3930 {
3931 	uintptr_t	va = (uintptr_t)addr;
3932 	x86pte_t	pte;
3933 
3934 	/*
3935 	 * Remap the given PTE to the new page's PFN. Invalidate only
3936 	 * on this CPU.
3937 	 */
3938 #ifdef DEBUG
3939 	htable_t	*ht;
3940 	uint_t		entry;
3941 
3942 	ASSERT(IS_PAGEALIGNED(va));
3943 	ASSERT(!IN_VA_HOLE(va));
3944 	ht = htable_getpte(kas.a_hat, va, &entry, NULL, 0);
3945 	ASSERT(ht != NULL);
3946 	ASSERT(ht->ht_level == 0);
3947 	ASSERT(ht->ht_valid_cnt > 0);
3948 	ASSERT(ht->ht_pfn == mmu_btop(pte_pa));
3949 	htable_release(ht);
3950 #endif
3951 	XPV_DISALLOW_MIGRATE();
3952 	pte = hati_mkpte(pfn, attr, 0, flags);
3953 #ifdef __xpv
3954 	if (HYPERVISOR_update_va_mapping(va, pte, UVMF_INVLPG | UVMF_LOCAL))
3955 		panic("HYPERVISOR_update_va_mapping() failed");
3956 #else
3957 	{
3958 		x86pte_t *pteptr;
3959 
3960 		pteptr = x86pte_mapin(mmu_btop(pte_pa),
3961 		    (pte_pa & MMU_PAGEOFFSET) >> mmu.pte_size_shift, NULL);
3962 		if (mmu.pae_hat)
3963 			*(x86pte_t *)pteptr = pte;
3964 		else
3965 			*(x86pte32_t *)pteptr = (x86pte32_t)pte;
3966 		mmu_tlbflush_entry(addr);
3967 		x86pte_mapout();
3968 	}
3969 #endif
3970 	XPV_ALLOW_MIGRATE();
3971 }
3972 
3973 
3974 
3975 /*
3976  * Hat locking functions
3977  * XXX - these two functions are currently being used by hatstats
3978  * 	they can be removed by using a per-as mutex for hatstats.
3979  */
3980 void
3981 hat_enter(hat_t *hat)
3982 {
3983 	mutex_enter(&hat->hat_mutex);
3984 }
3985 
3986 void
3987 hat_exit(hat_t *hat)
3988 {
3989 	mutex_exit(&hat->hat_mutex);
3990 }
3991 
3992 /*
3993  * HAT part of cpu initialization.
3994  */
3995 void
3996 hat_cpu_online(struct cpu *cpup)
3997 {
3998 	if (cpup != CPU) {
3999 		x86pte_cpu_init(cpup);
4000 		hat_vlp_setup(cpup);
4001 	}
4002 	CPUSET_ATOMIC_ADD(khat_cpuset, cpup->cpu_id);
4003 }
4004 
4005 /*
4006  * HAT part of cpu deletion.
4007  * (currently, we only call this after the cpu is safely passivated.)
4008  */
4009 void
4010 hat_cpu_offline(struct cpu *cpup)
4011 {
4012 	ASSERT(cpup != CPU);
4013 
4014 	CPUSET_ATOMIC_DEL(khat_cpuset, cpup->cpu_id);
4015 	x86pte_cpu_fini(cpup);
4016 	hat_vlp_teardown(cpup);
4017 }
4018 
4019 /*
4020  * Function called after all CPUs are brought online.
4021  * Used to remove low address boot mappings.
4022  */
4023 void
4024 clear_boot_mappings(uintptr_t low, uintptr_t high)
4025 {
4026 	uintptr_t vaddr = low;
4027 	htable_t *ht = NULL;
4028 	level_t level;
4029 	uint_t entry;
4030 	x86pte_t pte;
4031 
4032 	/*
4033 	 * On 1st CPU we can unload the prom mappings, basically we blow away
4034 	 * all virtual mappings under _userlimit.
4035 	 */
4036 	while (vaddr < high) {
4037 		pte = htable_walk(kas.a_hat, &ht, &vaddr, high);
4038 		if (ht == NULL)
4039 			break;
4040 
4041 		level = ht->ht_level;
4042 		entry = htable_va2entry(vaddr, ht);
4043 		ASSERT(level <= mmu.max_page_level);
4044 		ASSERT(PTE_ISPAGE(pte, level));
4045 
4046 		/*
4047 		 * Unload the mapping from the page tables.
4048 		 */
4049 		(void) x86pte_inval(ht, entry, 0, NULL);
4050 		ASSERT(ht->ht_valid_cnt > 0);
4051 		HTABLE_DEC(ht->ht_valid_cnt);
4052 		PGCNT_DEC(ht->ht_hat, ht->ht_level);
4053 
4054 		vaddr += LEVEL_SIZE(ht->ht_level);
4055 	}
4056 	if (ht)
4057 		htable_release(ht);
4058 }
4059 
4060 /*
4061  * Atomically update a new translation for a single page.  If the
4062  * currently installed PTE doesn't match the value we expect to find,
4063  * it's not updated and we return the PTE we found.
4064  *
4065  * If activating nosync or NOWRITE and the page was modified we need to sync
4066  * with the page_t. Also sync with page_t if clearing ref/mod bits.
4067  */
4068 static x86pte_t
4069 hati_update_pte(htable_t *ht, uint_t entry, x86pte_t expected, x86pte_t new)
4070 {
4071 	page_t		*pp;
4072 	uint_t		rm = 0;
4073 	x86pte_t	replaced;
4074 
4075 	if (PTE_GET(expected, PT_SOFTWARE) < PT_NOSYNC &&
4076 	    PTE_GET(expected, PT_MOD | PT_REF) &&
4077 	    (PTE_GET(new, PT_NOSYNC) || !PTE_GET(new, PT_WRITABLE) ||
4078 	    !PTE_GET(new, PT_MOD | PT_REF))) {
4079 
4080 		ASSERT(!pfn_is_foreign(PTE2PFN(expected, ht->ht_level)));
4081 		pp = page_numtopp_nolock(PTE2PFN(expected, ht->ht_level));
4082 		ASSERT(pp != NULL);
4083 		if (PTE_GET(expected, PT_MOD))
4084 			rm |= P_MOD;
4085 		if (PTE_GET(expected, PT_REF))
4086 			rm |= P_REF;
4087 		PTE_CLR(new, PT_MOD | PT_REF);
4088 	}
4089 
4090 	replaced = x86pte_update(ht, entry, expected, new);
4091 	if (replaced != expected)
4092 		return (replaced);
4093 
4094 	if (rm) {
4095 		/*
4096 		 * sync to all constituent pages of a large page
4097 		 */
4098 		pgcnt_t pgcnt = page_get_pagecnt(ht->ht_level);
4099 		ASSERT(IS_P2ALIGNED(pp->p_pagenum, pgcnt));
4100 		while (pgcnt-- > 0) {
4101 			/*
4102 			 * hat_page_demote() can't decrease
4103 			 * pszc below this mapping size
4104 			 * since large mapping existed after we
4105 			 * took mlist lock.
4106 			 */
4107 			ASSERT(pp->p_szc >= ht->ht_level);
4108 			hat_page_setattr(pp, rm);
4109 			++pp;
4110 		}
4111 	}
4112 
4113 	return (0);
4114 }
4115 
4116 /* ARGSUSED */
4117 void
4118 hat_join_srd(struct hat *hat, vnode_t *evp)
4119 {
4120 }
4121 
4122 /* ARGSUSED */
4123 hat_region_cookie_t
4124 hat_join_region(struct hat *hat,
4125     caddr_t r_saddr,
4126     size_t r_size,
4127     void *r_obj,
4128     u_offset_t r_objoff,
4129     uchar_t r_perm,
4130     uchar_t r_pgszc,
4131     hat_rgn_cb_func_t r_cb_function,
4132     uint_t flags)
4133 {
4134 	panic("No shared region support on x86");
4135 	return (HAT_INVALID_REGION_COOKIE);
4136 }
4137 
4138 /* ARGSUSED */
4139 void
4140 hat_leave_region(struct hat *hat, hat_region_cookie_t rcookie, uint_t flags)
4141 {
4142 	panic("No shared region support on x86");
4143 }
4144 
4145 /* ARGSUSED */
4146 void
4147 hat_dup_region(struct hat *hat, hat_region_cookie_t rcookie)
4148 {
4149 	panic("No shared region support on x86");
4150 }
4151 
4152 
4153 /*
4154  * Kernel Physical Mapping (kpm) facility
4155  *
4156  * Most of the routines needed to support segkpm are almost no-ops on the
4157  * x86 platform.  We map in the entire segment when it is created and leave
4158  * it mapped in, so there is no additional work required to set up and tear
4159  * down individual mappings.  All of these routines were created to support
4160  * SPARC platforms that have to avoid aliasing in their virtually indexed
4161  * caches.
4162  *
4163  * Most of the routines have sanity checks in them (e.g. verifying that the
4164  * passed-in page is locked).  We don't actually care about most of these
4165  * checks on x86, but we leave them in place to identify problems in the
4166  * upper levels.
4167  */
4168 
4169 /*
4170  * Map in a locked page and return the vaddr.
4171  */
4172 /*ARGSUSED*/
4173 caddr_t
4174 hat_kpm_mapin(struct page *pp, struct kpme *kpme)
4175 {
4176 	caddr_t		vaddr;
4177 
4178 #ifdef DEBUG
4179 	if (kpm_enable == 0) {
4180 		cmn_err(CE_WARN, "hat_kpm_mapin: kpm_enable not set\n");
4181 		return ((caddr_t)NULL);
4182 	}
4183 
4184 	if (pp == NULL || PAGE_LOCKED(pp) == 0) {
4185 		cmn_err(CE_WARN, "hat_kpm_mapin: pp zero or not locked\n");
4186 		return ((caddr_t)NULL);
4187 	}
4188 #endif
4189 
4190 	vaddr = hat_kpm_page2va(pp, 1);
4191 
4192 	return (vaddr);
4193 }
4194 
4195 /*
4196  * Mapout a locked page.
4197  */
4198 /*ARGSUSED*/
4199 void
4200 hat_kpm_mapout(struct page *pp, struct kpme *kpme, caddr_t vaddr)
4201 {
4202 #ifdef DEBUG
4203 	if (kpm_enable == 0) {
4204 		cmn_err(CE_WARN, "hat_kpm_mapout: kpm_enable not set\n");
4205 		return;
4206 	}
4207 
4208 	if (IS_KPM_ADDR(vaddr) == 0) {
4209 		cmn_err(CE_WARN, "hat_kpm_mapout: no kpm address\n");
4210 		return;
4211 	}
4212 
4213 	if (pp == NULL || PAGE_LOCKED(pp) == 0) {
4214 		cmn_err(CE_WARN, "hat_kpm_mapout: page zero or not locked\n");
4215 		return;
4216 	}
4217 #endif
4218 }
4219 
4220 /*
4221  * Return the kpm virtual address for a specific pfn
4222  */
4223 caddr_t
4224 hat_kpm_pfn2va(pfn_t pfn)
4225 {
4226 	uintptr_t vaddr = (uintptr_t)kpm_vbase + mmu_ptob(pfn);
4227 
4228 	ASSERT(!pfn_is_foreign(pfn));
4229 	return ((caddr_t)vaddr);
4230 }
4231 
4232 /*
4233  * Return the kpm virtual address for the page at pp.
4234  */
4235 /*ARGSUSED*/
4236 caddr_t
4237 hat_kpm_page2va(struct page *pp, int checkswap)
4238 {
4239 	return (hat_kpm_pfn2va(pp->p_pagenum));
4240 }
4241 
4242 /*
4243  * Return the page frame number for the kpm virtual address vaddr.
4244  */
4245 pfn_t
4246 hat_kpm_va2pfn(caddr_t vaddr)
4247 {
4248 	pfn_t		pfn;
4249 
4250 	ASSERT(IS_KPM_ADDR(vaddr));
4251 
4252 	pfn = (pfn_t)btop(vaddr - kpm_vbase);
4253 
4254 	return (pfn);
4255 }
4256 
4257 
4258 /*
4259  * Return the page for the kpm virtual address vaddr.
4260  */
4261 page_t *
4262 hat_kpm_vaddr2page(caddr_t vaddr)
4263 {
4264 	pfn_t		pfn;
4265 
4266 	ASSERT(IS_KPM_ADDR(vaddr));
4267 
4268 	pfn = hat_kpm_va2pfn(vaddr);
4269 
4270 	return (page_numtopp_nolock(pfn));
4271 }
4272 
4273 /*
4274  * hat_kpm_fault is called from segkpm_fault when we take a page fault on a
4275  * KPM page.  This should never happen on x86
4276  */
4277 int
4278 hat_kpm_fault(hat_t *hat, caddr_t vaddr)
4279 {
4280 	panic("pagefault in seg_kpm.  hat: 0x%p  vaddr: 0x%p",
4281 	    (void *)hat, (void *)vaddr);
4282 
4283 	return (0);
4284 }
4285 
4286 /*ARGSUSED*/
4287 void
4288 hat_kpm_mseghash_clear(int nentries)
4289 {}
4290 
4291 /*ARGSUSED*/
4292 void
4293 hat_kpm_mseghash_update(pgcnt_t inx, struct memseg *msp)
4294 {}
4295 
4296 #ifdef __xpv
4297 /*
4298  * There are specific Hypervisor calls to establish and remove mappings
4299  * to grant table references and the privcmd driver. We have to ensure
4300  * that a page table actually exists.
4301  */
4302 void
4303 hat_prepare_mapping(hat_t *hat, caddr_t addr)
4304 {
4305 	ASSERT(IS_P2ALIGNED((uintptr_t)addr, MMU_PAGESIZE));
4306 	XPV_DISALLOW_MIGRATE();
4307 	(void) htable_create(hat, (uintptr_t)addr, 0, NULL);
4308 	XPV_ALLOW_MIGRATE();
4309 }
4310 
4311 void
4312 hat_release_mapping(hat_t *hat, caddr_t addr)
4313 {
4314 	htable_t *ht;
4315 
4316 	ASSERT(IS_P2ALIGNED((uintptr_t)addr, MMU_PAGESIZE));
4317 	XPV_DISALLOW_MIGRATE();
4318 	ht = htable_lookup(hat, (uintptr_t)addr, 0);
4319 	ASSERT(ht != NULL);
4320 	ASSERT(ht->ht_busy >= 2);
4321 	htable_release(ht);
4322 	htable_release(ht);
4323 	XPV_ALLOW_MIGRATE();
4324 									}
4325 #endif
4326