17c478bd9Sstevel@tonic-gate /* 27c478bd9Sstevel@tonic-gate * CDDL HEADER START 37c478bd9Sstevel@tonic-gate * 47c478bd9Sstevel@tonic-gate * The contents of this file are subject to the terms of the 5a85a6733Sjosephb * Common Development and Distribution License (the "License"). 6a85a6733Sjosephb * You may not use this file except in compliance with the License. 77c478bd9Sstevel@tonic-gate * 87c478bd9Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 97c478bd9Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 107c478bd9Sstevel@tonic-gate * See the License for the specific language governing permissions 117c478bd9Sstevel@tonic-gate * and limitations under the License. 127c478bd9Sstevel@tonic-gate * 137c478bd9Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 147c478bd9Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 157c478bd9Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 167c478bd9Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 177c478bd9Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 187c478bd9Sstevel@tonic-gate * 197c478bd9Sstevel@tonic-gate * CDDL HEADER END 207c478bd9Sstevel@tonic-gate */ 21ae115bc7Smrj 227c478bd9Sstevel@tonic-gate /* 23*903a11ebSrh87107 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 247c478bd9Sstevel@tonic-gate * Use is subject to license terms. 257c478bd9Sstevel@tonic-gate */ 267c478bd9Sstevel@tonic-gate 277c478bd9Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI" 287c478bd9Sstevel@tonic-gate 297c478bd9Sstevel@tonic-gate #include <sys/types.h> 307c478bd9Sstevel@tonic-gate #include <sys/sysmacros.h> 317c478bd9Sstevel@tonic-gate #include <sys/kmem.h> 327c478bd9Sstevel@tonic-gate #include <sys/atomic.h> 337c478bd9Sstevel@tonic-gate #include <sys/bitmap.h> 347c478bd9Sstevel@tonic-gate #include <sys/machparam.h> 357c478bd9Sstevel@tonic-gate #include <sys/machsystm.h> 367c478bd9Sstevel@tonic-gate #include <sys/mman.h> 377c478bd9Sstevel@tonic-gate #include <sys/systm.h> 387c478bd9Sstevel@tonic-gate #include <sys/cpuvar.h> 397c478bd9Sstevel@tonic-gate #include <sys/thread.h> 407c478bd9Sstevel@tonic-gate #include <sys/proc.h> 417c478bd9Sstevel@tonic-gate #include <sys/cpu.h> 427c478bd9Sstevel@tonic-gate #include <sys/kmem.h> 437c478bd9Sstevel@tonic-gate #include <sys/disp.h> 447c478bd9Sstevel@tonic-gate #include <sys/vmem.h> 457c478bd9Sstevel@tonic-gate #include <sys/vmsystm.h> 467c478bd9Sstevel@tonic-gate #include <sys/promif.h> 477c478bd9Sstevel@tonic-gate #include <sys/var.h> 487c478bd9Sstevel@tonic-gate #include <sys/x86_archext.h> 49ae115bc7Smrj #include <sys/archsystm.h> 507c478bd9Sstevel@tonic-gate #include <sys/bootconf.h> 517c478bd9Sstevel@tonic-gate #include <sys/dumphdr.h> 527c478bd9Sstevel@tonic-gate #include <vm/seg_kmem.h> 537c478bd9Sstevel@tonic-gate #include <vm/seg_kpm.h> 547c478bd9Sstevel@tonic-gate #include <vm/hat.h> 557c478bd9Sstevel@tonic-gate #include <vm/hat_i86.h> 567c478bd9Sstevel@tonic-gate #include <sys/cmn_err.h> 57843e1988Sjohnlev #include <sys/panic.h> 58843e1988Sjohnlev 59843e1988Sjohnlev #ifdef __xpv 60843e1988Sjohnlev #include <sys/hypervisor.h> 61843e1988Sjohnlev #include <sys/xpv_panic.h> 62843e1988Sjohnlev #endif 637c478bd9Sstevel@tonic-gate 64ae115bc7Smrj #include <sys/bootinfo.h> 65ae115bc7Smrj #include <vm/kboot_mmu.h> 66ae115bc7Smrj 67ae115bc7Smrj static void x86pte_zero(htable_t *dest, uint_t entry, uint_t count); 68ae115bc7Smrj 697c478bd9Sstevel@tonic-gate kmem_cache_t *htable_cache; 707c478bd9Sstevel@tonic-gate 717c478bd9Sstevel@tonic-gate /* 727c478bd9Sstevel@tonic-gate * The variable htable_reserve_amount, rather than HTABLE_RESERVE_AMOUNT, 737c478bd9Sstevel@tonic-gate * is used in order to facilitate testing of the htable_steal() code. 747c478bd9Sstevel@tonic-gate * By resetting htable_reserve_amount to a lower value, we can force 757c478bd9Sstevel@tonic-gate * stealing to occur. The reserve amount is a guess to get us through boot. 767c478bd9Sstevel@tonic-gate */ 777c478bd9Sstevel@tonic-gate #define HTABLE_RESERVE_AMOUNT (200) 787c478bd9Sstevel@tonic-gate uint_t htable_reserve_amount = HTABLE_RESERVE_AMOUNT; 797c478bd9Sstevel@tonic-gate kmutex_t htable_reserve_mutex; 807c478bd9Sstevel@tonic-gate uint_t htable_reserve_cnt; 817c478bd9Sstevel@tonic-gate htable_t *htable_reserve_pool; 827c478bd9Sstevel@tonic-gate 837c478bd9Sstevel@tonic-gate /* 84a85a6733Sjosephb * Used to hand test htable_steal(). 857c478bd9Sstevel@tonic-gate */ 86a85a6733Sjosephb #ifdef DEBUG 87a85a6733Sjosephb ulong_t force_steal = 0; 88a85a6733Sjosephb ulong_t ptable_cnt = 0; 89a85a6733Sjosephb #endif 90a85a6733Sjosephb 91a85a6733Sjosephb /* 92a85a6733Sjosephb * This variable is so that we can tune this via /etc/system 93a85a6733Sjosephb * Any value works, but a power of two <= mmu.ptes_per_table is best. 94a85a6733Sjosephb */ 95a85a6733Sjosephb uint_t htable_steal_passes = 8; 967c478bd9Sstevel@tonic-gate 977c478bd9Sstevel@tonic-gate /* 987c478bd9Sstevel@tonic-gate * mutex stuff for access to htable hash 997c478bd9Sstevel@tonic-gate */ 1007c478bd9Sstevel@tonic-gate #define NUM_HTABLE_MUTEX 128 1017c478bd9Sstevel@tonic-gate kmutex_t htable_mutex[NUM_HTABLE_MUTEX]; 1027c478bd9Sstevel@tonic-gate #define HTABLE_MUTEX_HASH(h) ((h) & (NUM_HTABLE_MUTEX - 1)) 1037c478bd9Sstevel@tonic-gate 1047c478bd9Sstevel@tonic-gate #define HTABLE_ENTER(h) mutex_enter(&htable_mutex[HTABLE_MUTEX_HASH(h)]); 1057c478bd9Sstevel@tonic-gate #define HTABLE_EXIT(h) mutex_exit(&htable_mutex[HTABLE_MUTEX_HASH(h)]); 1067c478bd9Sstevel@tonic-gate 1077c478bd9Sstevel@tonic-gate /* 1087c478bd9Sstevel@tonic-gate * forward declarations 1097c478bd9Sstevel@tonic-gate */ 1107c478bd9Sstevel@tonic-gate static void link_ptp(htable_t *higher, htable_t *new, uintptr_t vaddr); 1117c478bd9Sstevel@tonic-gate static void unlink_ptp(htable_t *higher, htable_t *old, uintptr_t vaddr); 1127c478bd9Sstevel@tonic-gate static void htable_free(htable_t *ht); 113ae115bc7Smrj static x86pte_t *x86pte_access_pagetable(htable_t *ht, uint_t index); 1147c478bd9Sstevel@tonic-gate static void x86pte_release_pagetable(htable_t *ht); 1157c478bd9Sstevel@tonic-gate static x86pte_t x86pte_cas(htable_t *ht, uint_t entry, x86pte_t old, 1167c478bd9Sstevel@tonic-gate x86pte_t new); 1177c478bd9Sstevel@tonic-gate 1187c478bd9Sstevel@tonic-gate /* 1197c478bd9Sstevel@tonic-gate * A counter to track if we are stealing or reaping htables. When non-zero 1207c478bd9Sstevel@tonic-gate * htable_free() will directly free htables (either to the reserve or kmem) 1217c478bd9Sstevel@tonic-gate * instead of putting them in a hat's htable cache. 1227c478bd9Sstevel@tonic-gate */ 1237c478bd9Sstevel@tonic-gate uint32_t htable_dont_cache = 0; 1247c478bd9Sstevel@tonic-gate 1257c478bd9Sstevel@tonic-gate /* 1267c478bd9Sstevel@tonic-gate * Track the number of active pagetables, so we can know how many to reap 1277c478bd9Sstevel@tonic-gate */ 1287c478bd9Sstevel@tonic-gate static uint32_t active_ptables = 0; 1297c478bd9Sstevel@tonic-gate 130843e1988Sjohnlev #ifdef __xpv 131843e1988Sjohnlev /* 132843e1988Sjohnlev * Deal with hypervisor complications. 133843e1988Sjohnlev */ 134843e1988Sjohnlev void 135843e1988Sjohnlev xen_flush_va(caddr_t va) 136843e1988Sjohnlev { 137843e1988Sjohnlev struct mmuext_op t; 138843e1988Sjohnlev uint_t count; 139843e1988Sjohnlev 140843e1988Sjohnlev if (IN_XPV_PANIC()) { 141843e1988Sjohnlev mmu_tlbflush_entry((caddr_t)va); 142843e1988Sjohnlev } else { 143843e1988Sjohnlev t.cmd = MMUEXT_INVLPG_LOCAL; 144843e1988Sjohnlev t.arg1.linear_addr = (uintptr_t)va; 145843e1988Sjohnlev if (HYPERVISOR_mmuext_op(&t, 1, &count, DOMID_SELF) < 0) 146843e1988Sjohnlev panic("HYPERVISOR_mmuext_op() failed"); 147843e1988Sjohnlev ASSERT(count == 1); 148843e1988Sjohnlev } 149843e1988Sjohnlev } 150843e1988Sjohnlev 151843e1988Sjohnlev void 152843e1988Sjohnlev xen_gflush_va(caddr_t va, cpuset_t cpus) 153843e1988Sjohnlev { 154843e1988Sjohnlev struct mmuext_op t; 155843e1988Sjohnlev uint_t count; 156843e1988Sjohnlev 157843e1988Sjohnlev if (IN_XPV_PANIC()) { 158843e1988Sjohnlev mmu_tlbflush_entry((caddr_t)va); 159843e1988Sjohnlev return; 160843e1988Sjohnlev } 161843e1988Sjohnlev 162843e1988Sjohnlev t.cmd = MMUEXT_INVLPG_MULTI; 163843e1988Sjohnlev t.arg1.linear_addr = (uintptr_t)va; 164843e1988Sjohnlev /*LINTED: constant in conditional context*/ 165843e1988Sjohnlev set_xen_guest_handle(t.arg2.vcpumask, &cpus); 166843e1988Sjohnlev if (HYPERVISOR_mmuext_op(&t, 1, &count, DOMID_SELF) < 0) 167843e1988Sjohnlev panic("HYPERVISOR_mmuext_op() failed"); 168843e1988Sjohnlev ASSERT(count == 1); 169843e1988Sjohnlev } 170843e1988Sjohnlev 171843e1988Sjohnlev void 172843e1988Sjohnlev xen_flush_tlb() 173843e1988Sjohnlev { 174843e1988Sjohnlev struct mmuext_op t; 175843e1988Sjohnlev uint_t count; 176843e1988Sjohnlev 177843e1988Sjohnlev if (IN_XPV_PANIC()) { 178843e1988Sjohnlev xpv_panic_reload_cr3(); 179843e1988Sjohnlev } else { 180843e1988Sjohnlev t.cmd = MMUEXT_TLB_FLUSH_LOCAL; 181843e1988Sjohnlev if (HYPERVISOR_mmuext_op(&t, 1, &count, DOMID_SELF) < 0) 182843e1988Sjohnlev panic("HYPERVISOR_mmuext_op() failed"); 183843e1988Sjohnlev ASSERT(count == 1); 184843e1988Sjohnlev } 185843e1988Sjohnlev } 186843e1988Sjohnlev 187843e1988Sjohnlev void 188843e1988Sjohnlev xen_gflush_tlb(cpuset_t cpus) 189843e1988Sjohnlev { 190843e1988Sjohnlev struct mmuext_op t; 191843e1988Sjohnlev uint_t count; 192843e1988Sjohnlev 193843e1988Sjohnlev ASSERT(!IN_XPV_PANIC()); 194843e1988Sjohnlev t.cmd = MMUEXT_TLB_FLUSH_MULTI; 195843e1988Sjohnlev /*LINTED: constant in conditional context*/ 196843e1988Sjohnlev set_xen_guest_handle(t.arg2.vcpumask, &cpus); 197843e1988Sjohnlev if (HYPERVISOR_mmuext_op(&t, 1, &count, DOMID_SELF) < 0) 198843e1988Sjohnlev panic("HYPERVISOR_mmuext_op() failed"); 199843e1988Sjohnlev ASSERT(count == 1); 200843e1988Sjohnlev } 201843e1988Sjohnlev 202843e1988Sjohnlev /* 203843e1988Sjohnlev * Install/Adjust a kpm mapping under the hypervisor. 204843e1988Sjohnlev * Value of "how" should be: 205843e1988Sjohnlev * PT_WRITABLE | PT_VALID - regular kpm mapping 206843e1988Sjohnlev * PT_VALID - make mapping read-only 207843e1988Sjohnlev * 0 - remove mapping 208843e1988Sjohnlev * 209843e1988Sjohnlev * returns 0 on success. non-zero for failure. 210843e1988Sjohnlev */ 211843e1988Sjohnlev int 212843e1988Sjohnlev xen_kpm_page(pfn_t pfn, uint_t how) 213843e1988Sjohnlev { 214843e1988Sjohnlev paddr_t pa = mmu_ptob((paddr_t)pfn); 215843e1988Sjohnlev x86pte_t pte = PT_NOCONSIST | PT_REF | PT_MOD; 216843e1988Sjohnlev 217843e1988Sjohnlev if (kpm_vbase == NULL) 218843e1988Sjohnlev return (0); 219843e1988Sjohnlev 220843e1988Sjohnlev if (how) 221843e1988Sjohnlev pte |= pa_to_ma(pa) | how; 222843e1988Sjohnlev else 223843e1988Sjohnlev pte = 0; 224843e1988Sjohnlev return (HYPERVISOR_update_va_mapping((uintptr_t)kpm_vbase + pa, 225843e1988Sjohnlev pte, UVMF_INVLPG | UVMF_ALL)); 226843e1988Sjohnlev } 227843e1988Sjohnlev 228843e1988Sjohnlev void 229843e1988Sjohnlev xen_pin(pfn_t pfn, level_t lvl) 230843e1988Sjohnlev { 231843e1988Sjohnlev struct mmuext_op t; 232843e1988Sjohnlev uint_t count; 233843e1988Sjohnlev 234843e1988Sjohnlev t.cmd = MMUEXT_PIN_L1_TABLE + lvl; 235843e1988Sjohnlev t.arg1.mfn = pfn_to_mfn(pfn); 236843e1988Sjohnlev if (HYPERVISOR_mmuext_op(&t, 1, &count, DOMID_SELF) < 0) 237843e1988Sjohnlev panic("HYPERVISOR_mmuext_op() failed"); 238843e1988Sjohnlev ASSERT(count == 1); 239843e1988Sjohnlev } 240843e1988Sjohnlev 241843e1988Sjohnlev void 242843e1988Sjohnlev xen_unpin(pfn_t pfn) 243843e1988Sjohnlev { 244843e1988Sjohnlev struct mmuext_op t; 245843e1988Sjohnlev uint_t count; 246843e1988Sjohnlev 247843e1988Sjohnlev t.cmd = MMUEXT_UNPIN_TABLE; 248843e1988Sjohnlev t.arg1.mfn = pfn_to_mfn(pfn); 249843e1988Sjohnlev if (HYPERVISOR_mmuext_op(&t, 1, &count, DOMID_SELF) < 0) 250843e1988Sjohnlev panic("HYPERVISOR_mmuext_op() failed"); 251843e1988Sjohnlev ASSERT(count == 1); 252843e1988Sjohnlev } 253843e1988Sjohnlev 254843e1988Sjohnlev static void 255843e1988Sjohnlev xen_map(uint64_t pte, caddr_t va) 256843e1988Sjohnlev { 257843e1988Sjohnlev if (HYPERVISOR_update_va_mapping((uintptr_t)va, pte, 258843e1988Sjohnlev UVMF_INVLPG | UVMF_LOCAL)) 259843e1988Sjohnlev panic("HYPERVISOR_update_va_mapping() failed"); 260843e1988Sjohnlev } 261843e1988Sjohnlev #endif /* __xpv */ 262843e1988Sjohnlev 2637c478bd9Sstevel@tonic-gate /* 2647c478bd9Sstevel@tonic-gate * Allocate a memory page for a hardware page table. 2657c478bd9Sstevel@tonic-gate * 266ae115bc7Smrj * A wrapper around page_get_physical(), with some extra checks. 2677c478bd9Sstevel@tonic-gate */ 268ae115bc7Smrj static pfn_t 269ae115bc7Smrj ptable_alloc(uintptr_t seed) 2707c478bd9Sstevel@tonic-gate { 2717c478bd9Sstevel@tonic-gate pfn_t pfn; 2727c478bd9Sstevel@tonic-gate page_t *pp; 2737c478bd9Sstevel@tonic-gate 274ae115bc7Smrj pfn = PFN_INVALID; 2757c478bd9Sstevel@tonic-gate atomic_add_32(&active_ptables, 1); 2767c478bd9Sstevel@tonic-gate 2777c478bd9Sstevel@tonic-gate /* 278ae115bc7Smrj * The first check is to see if there is memory in the system. If we 279ae115bc7Smrj * drop to throttlefree, then fail the ptable_alloc() and let the 280ae115bc7Smrj * stealing code kick in. Note that we have to do this test here, 281ae115bc7Smrj * since the test in page_create_throttle() would let the NOSLEEP 282ae115bc7Smrj * allocation go through and deplete the page reserves. 283a85a6733Sjosephb * 284a85a6733Sjosephb * The !NOMEMWAIT() lets pageout, fsflush, etc. skip this check. 2857c478bd9Sstevel@tonic-gate */ 286a85a6733Sjosephb if (!NOMEMWAIT() && freemem <= throttlefree + 1) 287ae115bc7Smrj return (PFN_INVALID); 2887c478bd9Sstevel@tonic-gate 289a85a6733Sjosephb #ifdef DEBUG 290a85a6733Sjosephb /* 291a85a6733Sjosephb * This code makes htable_steal() easier to test. By setting 292a85a6733Sjosephb * force_steal we force pagetable allocations to fall 293a85a6733Sjosephb * into the stealing code. Roughly 1 in ever "force_steal" 294a85a6733Sjosephb * page table allocations will fail. 295a85a6733Sjosephb */ 296ae115bc7Smrj if (proc_pageout != NULL && force_steal > 1 && 297a85a6733Sjosephb ++ptable_cnt > force_steal) { 298a85a6733Sjosephb ptable_cnt = 0; 299ae115bc7Smrj return (PFN_INVALID); 300a85a6733Sjosephb } 301a85a6733Sjosephb #endif /* DEBUG */ 302a85a6733Sjosephb 303ae115bc7Smrj pp = page_get_physical(seed); 3047c478bd9Sstevel@tonic-gate if (pp == NULL) 305ae115bc7Smrj return (PFN_INVALID); 3067c478bd9Sstevel@tonic-gate pfn = pp->p_pagenum; 3077c478bd9Sstevel@tonic-gate page_downgrade(pp); 3087c478bd9Sstevel@tonic-gate ASSERT(PAGE_SHARED(pp)); 3097c478bd9Sstevel@tonic-gate 3107c478bd9Sstevel@tonic-gate if (pfn == PFN_INVALID) 3117c478bd9Sstevel@tonic-gate panic("ptable_alloc(): Invalid PFN!!"); 312a85a6733Sjosephb HATSTAT_INC(hs_ptable_allocs); 313ae115bc7Smrj return (pfn); 3147c478bd9Sstevel@tonic-gate } 3157c478bd9Sstevel@tonic-gate 3167c478bd9Sstevel@tonic-gate /* 3177c478bd9Sstevel@tonic-gate * Free an htable's associated page table page. See the comments 3187c478bd9Sstevel@tonic-gate * for ptable_alloc(). 3197c478bd9Sstevel@tonic-gate */ 3207c478bd9Sstevel@tonic-gate static void 321ae115bc7Smrj ptable_free(pfn_t pfn) 3227c478bd9Sstevel@tonic-gate { 323ae115bc7Smrj page_t *pp = page_numtopp_nolock(pfn); 3247c478bd9Sstevel@tonic-gate 3257c478bd9Sstevel@tonic-gate /* 3267c478bd9Sstevel@tonic-gate * need to destroy the page used for the pagetable 3277c478bd9Sstevel@tonic-gate */ 3287c478bd9Sstevel@tonic-gate ASSERT(pfn != PFN_INVALID); 3297c478bd9Sstevel@tonic-gate HATSTAT_INC(hs_ptable_frees); 3307c478bd9Sstevel@tonic-gate atomic_add_32(&active_ptables, -1); 3317c478bd9Sstevel@tonic-gate if (pp == NULL) 3327c478bd9Sstevel@tonic-gate panic("ptable_free(): no page for pfn!"); 3337c478bd9Sstevel@tonic-gate ASSERT(PAGE_SHARED(pp)); 3347c478bd9Sstevel@tonic-gate ASSERT(pfn == pp->p_pagenum); 335843e1988Sjohnlev ASSERT(!IN_XPV_PANIC()); 3367c478bd9Sstevel@tonic-gate 3377c478bd9Sstevel@tonic-gate /* 3387c478bd9Sstevel@tonic-gate * Get an exclusive lock, might have to wait for a kmem reader. 3397c478bd9Sstevel@tonic-gate */ 3407c478bd9Sstevel@tonic-gate if (!page_tryupgrade(pp)) { 3417c478bd9Sstevel@tonic-gate page_unlock(pp); 3427c478bd9Sstevel@tonic-gate /* 3437c478bd9Sstevel@tonic-gate * RFE: we could change this to not loop forever 3447c478bd9Sstevel@tonic-gate * George Cameron had some idea on how to do that. 3457c478bd9Sstevel@tonic-gate * For now looping works - it's just like sfmmu. 3467c478bd9Sstevel@tonic-gate */ 3477c478bd9Sstevel@tonic-gate while (!page_lock(pp, SE_EXCL, (kmutex_t *)NULL, P_RECLAIM)) 3487c478bd9Sstevel@tonic-gate continue; 3497c478bd9Sstevel@tonic-gate } 350843e1988Sjohnlev #ifdef __xpv 351843e1988Sjohnlev if (kpm_vbase && xen_kpm_page(pfn, PT_VALID | PT_WRITABLE) < 0) 352843e1988Sjohnlev panic("failure making kpm r/w pfn=0x%lx", pfn); 353843e1988Sjohnlev #endif 3547c478bd9Sstevel@tonic-gate page_free(pp, 1); 3557c478bd9Sstevel@tonic-gate page_unresv(1); 3567c478bd9Sstevel@tonic-gate } 3577c478bd9Sstevel@tonic-gate 3587c478bd9Sstevel@tonic-gate /* 3597c478bd9Sstevel@tonic-gate * Put one htable on the reserve list. 3607c478bd9Sstevel@tonic-gate */ 3617c478bd9Sstevel@tonic-gate static void 3627c478bd9Sstevel@tonic-gate htable_put_reserve(htable_t *ht) 3637c478bd9Sstevel@tonic-gate { 3647c478bd9Sstevel@tonic-gate ht->ht_hat = NULL; /* no longer tied to a hat */ 3657c478bd9Sstevel@tonic-gate ASSERT(ht->ht_pfn == PFN_INVALID); 3667c478bd9Sstevel@tonic-gate HATSTAT_INC(hs_htable_rputs); 3677c478bd9Sstevel@tonic-gate mutex_enter(&htable_reserve_mutex); 3687c478bd9Sstevel@tonic-gate ht->ht_next = htable_reserve_pool; 3697c478bd9Sstevel@tonic-gate htable_reserve_pool = ht; 3707c478bd9Sstevel@tonic-gate ++htable_reserve_cnt; 3717c478bd9Sstevel@tonic-gate mutex_exit(&htable_reserve_mutex); 3727c478bd9Sstevel@tonic-gate } 3737c478bd9Sstevel@tonic-gate 3747c478bd9Sstevel@tonic-gate /* 3757c478bd9Sstevel@tonic-gate * Take one htable from the reserve. 3767c478bd9Sstevel@tonic-gate */ 3777c478bd9Sstevel@tonic-gate static htable_t * 3787c478bd9Sstevel@tonic-gate htable_get_reserve(void) 3797c478bd9Sstevel@tonic-gate { 3807c478bd9Sstevel@tonic-gate htable_t *ht = NULL; 3817c478bd9Sstevel@tonic-gate 3827c478bd9Sstevel@tonic-gate mutex_enter(&htable_reserve_mutex); 3837c478bd9Sstevel@tonic-gate if (htable_reserve_cnt != 0) { 3847c478bd9Sstevel@tonic-gate ht = htable_reserve_pool; 3857c478bd9Sstevel@tonic-gate ASSERT(ht != NULL); 3867c478bd9Sstevel@tonic-gate ASSERT(ht->ht_pfn == PFN_INVALID); 3877c478bd9Sstevel@tonic-gate htable_reserve_pool = ht->ht_next; 3887c478bd9Sstevel@tonic-gate --htable_reserve_cnt; 3897c478bd9Sstevel@tonic-gate HATSTAT_INC(hs_htable_rgets); 3907c478bd9Sstevel@tonic-gate } 3917c478bd9Sstevel@tonic-gate mutex_exit(&htable_reserve_mutex); 3927c478bd9Sstevel@tonic-gate return (ht); 3937c478bd9Sstevel@tonic-gate } 3947c478bd9Sstevel@tonic-gate 3957c478bd9Sstevel@tonic-gate /* 396ae115bc7Smrj * Allocate initial htables and put them on the reserve list 3977c478bd9Sstevel@tonic-gate */ 3987c478bd9Sstevel@tonic-gate void 3997c478bd9Sstevel@tonic-gate htable_initial_reserve(uint_t count) 4007c478bd9Sstevel@tonic-gate { 4017c478bd9Sstevel@tonic-gate htable_t *ht; 4027c478bd9Sstevel@tonic-gate 4037c478bd9Sstevel@tonic-gate count += HTABLE_RESERVE_AMOUNT; 4047c478bd9Sstevel@tonic-gate while (count > 0) { 4057c478bd9Sstevel@tonic-gate ht = kmem_cache_alloc(htable_cache, KM_NOSLEEP); 4067c478bd9Sstevel@tonic-gate ASSERT(ht != NULL); 4077c478bd9Sstevel@tonic-gate 4087c478bd9Sstevel@tonic-gate ASSERT(use_boot_reserve); 409ae115bc7Smrj ht->ht_pfn = PFN_INVALID; 410ae115bc7Smrj htable_put_reserve(ht); 4117c478bd9Sstevel@tonic-gate --count; 4127c478bd9Sstevel@tonic-gate } 4137c478bd9Sstevel@tonic-gate } 4147c478bd9Sstevel@tonic-gate 4157c478bd9Sstevel@tonic-gate /* 4167c478bd9Sstevel@tonic-gate * Readjust the reserves after a thread finishes using them. 4177c478bd9Sstevel@tonic-gate */ 4187c478bd9Sstevel@tonic-gate void 4197c478bd9Sstevel@tonic-gate htable_adjust_reserve() 4207c478bd9Sstevel@tonic-gate { 4217c478bd9Sstevel@tonic-gate htable_t *ht; 4227c478bd9Sstevel@tonic-gate 4237c478bd9Sstevel@tonic-gate /* 4247c478bd9Sstevel@tonic-gate * Free any excess htables in the reserve list 4257c478bd9Sstevel@tonic-gate */ 426aac11643Sjosephb while (htable_reserve_cnt > htable_reserve_amount && 427aac11643Sjosephb !USE_HAT_RESERVES()) { 4287c478bd9Sstevel@tonic-gate ht = htable_get_reserve(); 4297c478bd9Sstevel@tonic-gate if (ht == NULL) 4307c478bd9Sstevel@tonic-gate return; 4317c478bd9Sstevel@tonic-gate ASSERT(ht->ht_pfn == PFN_INVALID); 4327c478bd9Sstevel@tonic-gate kmem_cache_free(htable_cache, ht); 4337c478bd9Sstevel@tonic-gate } 4347c478bd9Sstevel@tonic-gate } 4357c478bd9Sstevel@tonic-gate 4367c478bd9Sstevel@tonic-gate 4377c478bd9Sstevel@tonic-gate /* 4387c478bd9Sstevel@tonic-gate * This routine steals htables from user processes for htable_alloc() or 4397c478bd9Sstevel@tonic-gate * for htable_reap(). 4407c478bd9Sstevel@tonic-gate */ 4417c478bd9Sstevel@tonic-gate static htable_t * 4427c478bd9Sstevel@tonic-gate htable_steal(uint_t cnt) 4437c478bd9Sstevel@tonic-gate { 4447c478bd9Sstevel@tonic-gate hat_t *hat = kas.a_hat; /* list starts with khat */ 4457c478bd9Sstevel@tonic-gate htable_t *list = NULL; 4467c478bd9Sstevel@tonic-gate htable_t *ht; 4477c478bd9Sstevel@tonic-gate htable_t *higher; 4487c478bd9Sstevel@tonic-gate uint_t h; 449a85a6733Sjosephb uint_t h_start; 450a85a6733Sjosephb static uint_t h_seed = 0; 4517c478bd9Sstevel@tonic-gate uint_t e; 4527c478bd9Sstevel@tonic-gate uintptr_t va; 4537c478bd9Sstevel@tonic-gate x86pte_t pte; 4547c478bd9Sstevel@tonic-gate uint_t stolen = 0; 4557c478bd9Sstevel@tonic-gate uint_t pass; 456a85a6733Sjosephb uint_t threshold; 4577c478bd9Sstevel@tonic-gate 4587c478bd9Sstevel@tonic-gate /* 4597c478bd9Sstevel@tonic-gate * Limit htable_steal_passes to something reasonable 4607c478bd9Sstevel@tonic-gate */ 4617c478bd9Sstevel@tonic-gate if (htable_steal_passes == 0) 4627c478bd9Sstevel@tonic-gate htable_steal_passes = 1; 4637c478bd9Sstevel@tonic-gate if (htable_steal_passes > mmu.ptes_per_table) 4647c478bd9Sstevel@tonic-gate htable_steal_passes = mmu.ptes_per_table; 4657c478bd9Sstevel@tonic-gate 4667c478bd9Sstevel@tonic-gate /* 467a85a6733Sjosephb * Loop through all user hats. The 1st pass takes cached htables that 4687c478bd9Sstevel@tonic-gate * aren't in use. The later passes steal by removing mappings, too. 4697c478bd9Sstevel@tonic-gate */ 4707c478bd9Sstevel@tonic-gate atomic_add_32(&htable_dont_cache, 1); 471a85a6733Sjosephb for (pass = 0; pass <= htable_steal_passes && stolen < cnt; ++pass) { 472a85a6733Sjosephb threshold = pass * mmu.ptes_per_table / htable_steal_passes; 473a85a6733Sjosephb hat = kas.a_hat; 4747c478bd9Sstevel@tonic-gate for (;;) { 4757c478bd9Sstevel@tonic-gate 4767c478bd9Sstevel@tonic-gate /* 477a85a6733Sjosephb * Clear the victim flag and move to next hat 4787c478bd9Sstevel@tonic-gate */ 4797c478bd9Sstevel@tonic-gate mutex_enter(&hat_list_lock); 480a85a6733Sjosephb if (hat != kas.a_hat) { 4817c478bd9Sstevel@tonic-gate hat->hat_flags &= ~HAT_VICTIM; 4827c478bd9Sstevel@tonic-gate cv_broadcast(&hat_list_cv); 483a85a6733Sjosephb } 484a85a6733Sjosephb hat = hat->hat_next; 485a85a6733Sjosephb 486a85a6733Sjosephb /* 487a85a6733Sjosephb * Skip any hat that is already being stolen from. 488a85a6733Sjosephb * 489a85a6733Sjosephb * We skip SHARED hats, as these are dummy 490a85a6733Sjosephb * hats that host ISM shared page tables. 491a85a6733Sjosephb * 492a85a6733Sjosephb * We also skip if HAT_FREEING because hat_pte_unmap() 493a85a6733Sjosephb * won't zero out the PTE's. That would lead to hitting 494a85a6733Sjosephb * stale PTEs either here or under hat_unload() when we 495a85a6733Sjosephb * steal and unload the same page table in competing 496a85a6733Sjosephb * threads. 497a85a6733Sjosephb */ 498a85a6733Sjosephb while (hat != NULL && 499a85a6733Sjosephb (hat->hat_flags & 500a85a6733Sjosephb (HAT_VICTIM | HAT_SHARED | HAT_FREEING)) != 0) 501a85a6733Sjosephb hat = hat->hat_next; 502a85a6733Sjosephb 503a85a6733Sjosephb if (hat == NULL) { 5047c478bd9Sstevel@tonic-gate mutex_exit(&hat_list_lock); 5057c478bd9Sstevel@tonic-gate break; 5067c478bd9Sstevel@tonic-gate } 507a85a6733Sjosephb 508a85a6733Sjosephb /* 509a85a6733Sjosephb * Are we finished? 510a85a6733Sjosephb */ 511a85a6733Sjosephb if (stolen == cnt) { 512a85a6733Sjosephb /* 513a85a6733Sjosephb * Try to spread the pain of stealing, 514a85a6733Sjosephb * move victim HAT to the end of the HAT list. 515a85a6733Sjosephb */ 516a85a6733Sjosephb if (pass >= 1 && cnt == 1 && 517a85a6733Sjosephb kas.a_hat->hat_prev != hat) { 518a85a6733Sjosephb 519a85a6733Sjosephb /* unlink victim hat */ 520a85a6733Sjosephb if (hat->hat_prev) 521a85a6733Sjosephb hat->hat_prev->hat_next = 522a85a6733Sjosephb hat->hat_next; 523a85a6733Sjosephb else 524a85a6733Sjosephb kas.a_hat->hat_next = 525a85a6733Sjosephb hat->hat_next; 526a85a6733Sjosephb if (hat->hat_next) 527a85a6733Sjosephb hat->hat_next->hat_prev = 528a85a6733Sjosephb hat->hat_prev; 529a85a6733Sjosephb else 530a85a6733Sjosephb kas.a_hat->hat_prev = 531a85a6733Sjosephb hat->hat_prev; 532a85a6733Sjosephb 533a85a6733Sjosephb 534a85a6733Sjosephb /* relink at end of hat list */ 535a85a6733Sjosephb hat->hat_next = NULL; 536a85a6733Sjosephb hat->hat_prev = kas.a_hat->hat_prev; 537a85a6733Sjosephb if (hat->hat_prev) 538a85a6733Sjosephb hat->hat_prev->hat_next = hat; 539a85a6733Sjosephb else 540a85a6733Sjosephb kas.a_hat->hat_next = hat; 541a85a6733Sjosephb kas.a_hat->hat_prev = hat; 542a85a6733Sjosephb 543a85a6733Sjosephb } 544a85a6733Sjosephb 545a85a6733Sjosephb mutex_exit(&hat_list_lock); 546a85a6733Sjosephb break; 547a85a6733Sjosephb } 548a85a6733Sjosephb 549a85a6733Sjosephb /* 550a85a6733Sjosephb * Mark the HAT as a stealing victim. 551a85a6733Sjosephb */ 5527c478bd9Sstevel@tonic-gate hat->hat_flags |= HAT_VICTIM; 5537c478bd9Sstevel@tonic-gate mutex_exit(&hat_list_lock); 5547c478bd9Sstevel@tonic-gate 5557c478bd9Sstevel@tonic-gate /* 5567c478bd9Sstevel@tonic-gate * Take any htables from the hat's cached "free" list. 5577c478bd9Sstevel@tonic-gate */ 5587c478bd9Sstevel@tonic-gate hat_enter(hat); 5597c478bd9Sstevel@tonic-gate while ((ht = hat->hat_ht_cached) != NULL && 5607c478bd9Sstevel@tonic-gate stolen < cnt) { 5617c478bd9Sstevel@tonic-gate hat->hat_ht_cached = ht->ht_next; 5627c478bd9Sstevel@tonic-gate ht->ht_next = list; 5637c478bd9Sstevel@tonic-gate list = ht; 5647c478bd9Sstevel@tonic-gate ++stolen; 5657c478bd9Sstevel@tonic-gate } 5667c478bd9Sstevel@tonic-gate hat_exit(hat); 5677c478bd9Sstevel@tonic-gate 5687c478bd9Sstevel@tonic-gate /* 5697c478bd9Sstevel@tonic-gate * Don't steal on first pass. 5707c478bd9Sstevel@tonic-gate */ 571a85a6733Sjosephb if (pass == 0 || stolen == cnt) 5727c478bd9Sstevel@tonic-gate continue; 5737c478bd9Sstevel@tonic-gate 5747c478bd9Sstevel@tonic-gate /* 575a85a6733Sjosephb * Search the active htables for one to steal. 576a85a6733Sjosephb * Start at a different hash bucket every time to 577a85a6733Sjosephb * help spread the pain of stealing. 5787c478bd9Sstevel@tonic-gate */ 579a85a6733Sjosephb h = h_start = h_seed++ % hat->hat_num_hash; 580a85a6733Sjosephb do { 5817c478bd9Sstevel@tonic-gate higher = NULL; 5827c478bd9Sstevel@tonic-gate HTABLE_ENTER(h); 5837c478bd9Sstevel@tonic-gate for (ht = hat->hat_ht_hash[h]; ht; 5847c478bd9Sstevel@tonic-gate ht = ht->ht_next) { 5857c478bd9Sstevel@tonic-gate 5867c478bd9Sstevel@tonic-gate /* 5877c478bd9Sstevel@tonic-gate * Can we rule out reaping? 5887c478bd9Sstevel@tonic-gate */ 5897c478bd9Sstevel@tonic-gate if (ht->ht_busy != 0 || 5907c478bd9Sstevel@tonic-gate (ht->ht_flags & HTABLE_SHARED_PFN)|| 591a85a6733Sjosephb ht->ht_level > 0 || 592a85a6733Sjosephb ht->ht_valid_cnt > threshold || 5937c478bd9Sstevel@tonic-gate ht->ht_lock_cnt != 0) 5947c478bd9Sstevel@tonic-gate continue; 5957c478bd9Sstevel@tonic-gate 5967c478bd9Sstevel@tonic-gate /* 5977c478bd9Sstevel@tonic-gate * Increment busy so the htable can't 5987c478bd9Sstevel@tonic-gate * disappear. We drop the htable mutex 5997c478bd9Sstevel@tonic-gate * to avoid deadlocks with 6007c478bd9Sstevel@tonic-gate * hat_pageunload() and the hment mutex 6017c478bd9Sstevel@tonic-gate * while we call hat_pte_unmap() 6027c478bd9Sstevel@tonic-gate */ 6037c478bd9Sstevel@tonic-gate ++ht->ht_busy; 6047c478bd9Sstevel@tonic-gate HTABLE_EXIT(h); 6057c478bd9Sstevel@tonic-gate 6067c478bd9Sstevel@tonic-gate /* 6077c478bd9Sstevel@tonic-gate * Try stealing. 6087c478bd9Sstevel@tonic-gate * - unload and invalidate all PTEs 6097c478bd9Sstevel@tonic-gate */ 6107c478bd9Sstevel@tonic-gate for (e = 0, va = ht->ht_vaddr; 611ae115bc7Smrj e < HTABLE_NUM_PTES(ht) && 6127c478bd9Sstevel@tonic-gate ht->ht_valid_cnt > 0 && 6137c478bd9Sstevel@tonic-gate ht->ht_busy == 1 && 6147c478bd9Sstevel@tonic-gate ht->ht_lock_cnt == 0; 6157c478bd9Sstevel@tonic-gate ++e, va += MMU_PAGESIZE) { 6167c478bd9Sstevel@tonic-gate pte = x86pte_get(ht, e); 6177c478bd9Sstevel@tonic-gate if (!PTE_ISVALID(pte)) 6187c478bd9Sstevel@tonic-gate continue; 6197c478bd9Sstevel@tonic-gate hat_pte_unmap(ht, e, 6207c478bd9Sstevel@tonic-gate HAT_UNLOAD, pte, NULL); 6217c478bd9Sstevel@tonic-gate } 6227c478bd9Sstevel@tonic-gate 6237c478bd9Sstevel@tonic-gate /* 6247c478bd9Sstevel@tonic-gate * Reacquire htable lock. If we didn't 6257c478bd9Sstevel@tonic-gate * remove all mappings in the table, 6267c478bd9Sstevel@tonic-gate * or another thread added a new mapping 6277c478bd9Sstevel@tonic-gate * behind us, give up on this table. 6287c478bd9Sstevel@tonic-gate */ 6297c478bd9Sstevel@tonic-gate HTABLE_ENTER(h); 6307c478bd9Sstevel@tonic-gate if (ht->ht_busy != 1 || 6317c478bd9Sstevel@tonic-gate ht->ht_valid_cnt != 0 || 6327c478bd9Sstevel@tonic-gate ht->ht_lock_cnt != 0) { 6337c478bd9Sstevel@tonic-gate --ht->ht_busy; 6347c478bd9Sstevel@tonic-gate continue; 6357c478bd9Sstevel@tonic-gate } 6367c478bd9Sstevel@tonic-gate 6377c478bd9Sstevel@tonic-gate /* 6387c478bd9Sstevel@tonic-gate * Steal it and unlink the page table. 6397c478bd9Sstevel@tonic-gate */ 6407c478bd9Sstevel@tonic-gate higher = ht->ht_parent; 6417c478bd9Sstevel@tonic-gate unlink_ptp(higher, ht, ht->ht_vaddr); 6427c478bd9Sstevel@tonic-gate 6437c478bd9Sstevel@tonic-gate /* 6447c478bd9Sstevel@tonic-gate * remove from the hash list 6457c478bd9Sstevel@tonic-gate */ 6467c478bd9Sstevel@tonic-gate if (ht->ht_next) 6477c478bd9Sstevel@tonic-gate ht->ht_next->ht_prev = 6487c478bd9Sstevel@tonic-gate ht->ht_prev; 6497c478bd9Sstevel@tonic-gate 6507c478bd9Sstevel@tonic-gate if (ht->ht_prev) { 6517c478bd9Sstevel@tonic-gate ht->ht_prev->ht_next = 6527c478bd9Sstevel@tonic-gate ht->ht_next; 6537c478bd9Sstevel@tonic-gate } else { 6547c478bd9Sstevel@tonic-gate ASSERT(hat->hat_ht_hash[h] == 6557c478bd9Sstevel@tonic-gate ht); 6567c478bd9Sstevel@tonic-gate hat->hat_ht_hash[h] = 6577c478bd9Sstevel@tonic-gate ht->ht_next; 6587c478bd9Sstevel@tonic-gate } 6597c478bd9Sstevel@tonic-gate 6607c478bd9Sstevel@tonic-gate /* 6617c478bd9Sstevel@tonic-gate * Break to outer loop to release the 662ae115bc7Smrj * higher (ht_parent) pagetable. This 6637c478bd9Sstevel@tonic-gate * spreads out the pain caused by 6647c478bd9Sstevel@tonic-gate * pagefaults. 6657c478bd9Sstevel@tonic-gate */ 6667c478bd9Sstevel@tonic-gate ht->ht_next = list; 6677c478bd9Sstevel@tonic-gate list = ht; 6687c478bd9Sstevel@tonic-gate ++stolen; 6697c478bd9Sstevel@tonic-gate break; 6707c478bd9Sstevel@tonic-gate } 6717c478bd9Sstevel@tonic-gate HTABLE_EXIT(h); 6727c478bd9Sstevel@tonic-gate if (higher != NULL) 6737c478bd9Sstevel@tonic-gate htable_release(higher); 674a85a6733Sjosephb if (++h == hat->hat_num_hash) 675a85a6733Sjosephb h = 0; 676a85a6733Sjosephb } while (stolen < cnt && h != h_start); 6777c478bd9Sstevel@tonic-gate } 6787c478bd9Sstevel@tonic-gate } 6797c478bd9Sstevel@tonic-gate atomic_add_32(&htable_dont_cache, -1); 6807c478bd9Sstevel@tonic-gate return (list); 6817c478bd9Sstevel@tonic-gate } 6827c478bd9Sstevel@tonic-gate 6837c478bd9Sstevel@tonic-gate 6847c478bd9Sstevel@tonic-gate /* 6857c478bd9Sstevel@tonic-gate * This is invoked from kmem when the system is low on memory. We try 6867c478bd9Sstevel@tonic-gate * to free hments, htables, and ptables to improve the memory situation. 6877c478bd9Sstevel@tonic-gate */ 6887c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 6897c478bd9Sstevel@tonic-gate static void 6907c478bd9Sstevel@tonic-gate htable_reap(void *handle) 6917c478bd9Sstevel@tonic-gate { 6927c478bd9Sstevel@tonic-gate uint_t reap_cnt; 6937c478bd9Sstevel@tonic-gate htable_t *list; 6947c478bd9Sstevel@tonic-gate htable_t *ht; 6957c478bd9Sstevel@tonic-gate 6967c478bd9Sstevel@tonic-gate HATSTAT_INC(hs_reap_attempts); 6977c478bd9Sstevel@tonic-gate if (!can_steal_post_boot) 6987c478bd9Sstevel@tonic-gate return; 6997c478bd9Sstevel@tonic-gate 7007c478bd9Sstevel@tonic-gate /* 7017c478bd9Sstevel@tonic-gate * Try to reap 5% of the page tables bounded by a maximum of 7027c478bd9Sstevel@tonic-gate * 5% of physmem and a minimum of 10. 7037c478bd9Sstevel@tonic-gate */ 7047c478bd9Sstevel@tonic-gate reap_cnt = MIN(MAX(physmem / 20, active_ptables / 20), 10); 7057c478bd9Sstevel@tonic-gate 7067c478bd9Sstevel@tonic-gate /* 7077c478bd9Sstevel@tonic-gate * Let htable_steal() do the work, we just call htable_free() 7087c478bd9Sstevel@tonic-gate */ 709843e1988Sjohnlev XPV_DISALLOW_MIGRATE(); 7107c478bd9Sstevel@tonic-gate list = htable_steal(reap_cnt); 711843e1988Sjohnlev XPV_ALLOW_MIGRATE(); 7127c478bd9Sstevel@tonic-gate while ((ht = list) != NULL) { 7137c478bd9Sstevel@tonic-gate list = ht->ht_next; 7147c478bd9Sstevel@tonic-gate HATSTAT_INC(hs_reaped); 7157c478bd9Sstevel@tonic-gate htable_free(ht); 7167c478bd9Sstevel@tonic-gate } 7177c478bd9Sstevel@tonic-gate 7187c478bd9Sstevel@tonic-gate /* 7197c478bd9Sstevel@tonic-gate * Free up excess reserves 7207c478bd9Sstevel@tonic-gate */ 7217c478bd9Sstevel@tonic-gate htable_adjust_reserve(); 7227c478bd9Sstevel@tonic-gate hment_adjust_reserve(); 7237c478bd9Sstevel@tonic-gate } 7247c478bd9Sstevel@tonic-gate 7257c478bd9Sstevel@tonic-gate /* 726ae115bc7Smrj * Allocate an htable, stealing one or using the reserve if necessary 7277c478bd9Sstevel@tonic-gate */ 7287c478bd9Sstevel@tonic-gate static htable_t * 7297c478bd9Sstevel@tonic-gate htable_alloc( 7307c478bd9Sstevel@tonic-gate hat_t *hat, 7317c478bd9Sstevel@tonic-gate uintptr_t vaddr, 7327c478bd9Sstevel@tonic-gate level_t level, 7337c478bd9Sstevel@tonic-gate htable_t *shared) 7347c478bd9Sstevel@tonic-gate { 7357c478bd9Sstevel@tonic-gate htable_t *ht = NULL; 7367c478bd9Sstevel@tonic-gate uint_t is_vlp; 7377c478bd9Sstevel@tonic-gate uint_t is_bare = 0; 7387c478bd9Sstevel@tonic-gate uint_t need_to_zero = 1; 7397c478bd9Sstevel@tonic-gate int kmflags = (can_steal_post_boot ? KM_NOSLEEP : KM_SLEEP); 7407c478bd9Sstevel@tonic-gate 7417c478bd9Sstevel@tonic-gate if (level < 0 || level > TOP_LEVEL(hat)) 7427c478bd9Sstevel@tonic-gate panic("htable_alloc(): level %d out of range\n", level); 7437c478bd9Sstevel@tonic-gate 7447c478bd9Sstevel@tonic-gate is_vlp = (hat->hat_flags & HAT_VLP) && level == VLP_LEVEL; 7457c478bd9Sstevel@tonic-gate if (is_vlp || shared != NULL) 7467c478bd9Sstevel@tonic-gate is_bare = 1; 7477c478bd9Sstevel@tonic-gate 7487c478bd9Sstevel@tonic-gate /* 7497c478bd9Sstevel@tonic-gate * First reuse a cached htable from the hat_ht_cached field, this 750ae115bc7Smrj * avoids unnecessary trips through kmem/page allocators. 7517c478bd9Sstevel@tonic-gate */ 7527c478bd9Sstevel@tonic-gate if (hat->hat_ht_cached != NULL && !is_bare) { 7537c478bd9Sstevel@tonic-gate hat_enter(hat); 7547c478bd9Sstevel@tonic-gate ht = hat->hat_ht_cached; 7557c478bd9Sstevel@tonic-gate if (ht != NULL) { 7567c478bd9Sstevel@tonic-gate hat->hat_ht_cached = ht->ht_next; 7577c478bd9Sstevel@tonic-gate need_to_zero = 0; 7587c478bd9Sstevel@tonic-gate /* XX64 ASSERT() they're all zero somehow */ 7597c478bd9Sstevel@tonic-gate ASSERT(ht->ht_pfn != PFN_INVALID); 7607c478bd9Sstevel@tonic-gate } 7617c478bd9Sstevel@tonic-gate hat_exit(hat); 7627c478bd9Sstevel@tonic-gate } 7637c478bd9Sstevel@tonic-gate 7647c478bd9Sstevel@tonic-gate if (ht == NULL) { 7657c478bd9Sstevel@tonic-gate /* 76697704650Sjosephb * Allocate an htable, possibly refilling the reserves. 7677c478bd9Sstevel@tonic-gate */ 76897704650Sjosephb if (USE_HAT_RESERVES()) { 7697c478bd9Sstevel@tonic-gate ht = htable_get_reserve(); 7707c478bd9Sstevel@tonic-gate } else { 7717c478bd9Sstevel@tonic-gate /* 7727c478bd9Sstevel@tonic-gate * Donate successful htable allocations to the reserve. 7737c478bd9Sstevel@tonic-gate */ 7747c478bd9Sstevel@tonic-gate for (;;) { 7757c478bd9Sstevel@tonic-gate ht = kmem_cache_alloc(htable_cache, kmflags); 7767c478bd9Sstevel@tonic-gate if (ht == NULL) 7777c478bd9Sstevel@tonic-gate break; 7787c478bd9Sstevel@tonic-gate ht->ht_pfn = PFN_INVALID; 77997704650Sjosephb if (USE_HAT_RESERVES() || 7807c478bd9Sstevel@tonic-gate htable_reserve_cnt >= htable_reserve_amount) 7817c478bd9Sstevel@tonic-gate break; 7827c478bd9Sstevel@tonic-gate htable_put_reserve(ht); 7837c478bd9Sstevel@tonic-gate } 7847c478bd9Sstevel@tonic-gate } 7857c478bd9Sstevel@tonic-gate 7867c478bd9Sstevel@tonic-gate /* 7877c478bd9Sstevel@tonic-gate * allocate a page for the hardware page table if needed 7887c478bd9Sstevel@tonic-gate */ 7897c478bd9Sstevel@tonic-gate if (ht != NULL && !is_bare) { 790a85a6733Sjosephb ht->ht_hat = hat; 791ae115bc7Smrj ht->ht_pfn = ptable_alloc((uintptr_t)ht); 7927c478bd9Sstevel@tonic-gate if (ht->ht_pfn == PFN_INVALID) { 79397704650Sjosephb if (USE_HAT_RESERVES()) 79497704650Sjosephb htable_put_reserve(ht); 79597704650Sjosephb else 7967c478bd9Sstevel@tonic-gate kmem_cache_free(htable_cache, ht); 7977c478bd9Sstevel@tonic-gate ht = NULL; 7987c478bd9Sstevel@tonic-gate } 7997c478bd9Sstevel@tonic-gate } 8007c478bd9Sstevel@tonic-gate } 8017c478bd9Sstevel@tonic-gate 8027c478bd9Sstevel@tonic-gate /* 803a85a6733Sjosephb * If allocations failed, kick off a kmem_reap() and resort to 804a85a6733Sjosephb * htable steal(). We may spin here if the system is very low on 805a85a6733Sjosephb * memory. If the kernel itself has consumed all memory and kmem_reap() 806a85a6733Sjosephb * can't free up anything, then we'll really get stuck here. 807a85a6733Sjosephb * That should only happen in a system where the administrator has 808a85a6733Sjosephb * misconfigured VM parameters via /etc/system. 8097c478bd9Sstevel@tonic-gate */ 810a85a6733Sjosephb while (ht == NULL && can_steal_post_boot) { 811a85a6733Sjosephb kmem_reap(); 8127c478bd9Sstevel@tonic-gate ht = htable_steal(1); 8137c478bd9Sstevel@tonic-gate HATSTAT_INC(hs_steals); 8147c478bd9Sstevel@tonic-gate 8157c478bd9Sstevel@tonic-gate /* 816a85a6733Sjosephb * If we stole for a bare htable, release the pagetable page. 8177c478bd9Sstevel@tonic-gate */ 818ae115bc7Smrj if (ht != NULL) { 819ae115bc7Smrj if (is_bare) { 820ae115bc7Smrj ptable_free(ht->ht_pfn); 821ae115bc7Smrj ht->ht_pfn = PFN_INVALID; 822843e1988Sjohnlev #if defined(__xpv) && defined(__amd64) 823843e1988Sjohnlev /* 824843e1988Sjohnlev * make stolen page table writable again in kpm 825843e1988Sjohnlev */ 826843e1988Sjohnlev } else if (kpm_vbase && xen_kpm_page(ht->ht_pfn, 827843e1988Sjohnlev PT_VALID | PT_WRITABLE) < 0) { 828843e1988Sjohnlev panic("failure making kpm r/w pfn=0x%lx", 829843e1988Sjohnlev ht->ht_pfn); 830843e1988Sjohnlev #endif 831ae115bc7Smrj } 832ae115bc7Smrj } 8337c478bd9Sstevel@tonic-gate } 8347c478bd9Sstevel@tonic-gate 8357c478bd9Sstevel@tonic-gate /* 836a85a6733Sjosephb * All attempts to allocate or steal failed. This should only happen 837a85a6733Sjosephb * if we run out of memory during boot, due perhaps to a huge 838a85a6733Sjosephb * boot_archive. At this point there's no way to continue. 8397c478bd9Sstevel@tonic-gate */ 8407c478bd9Sstevel@tonic-gate if (ht == NULL) 8417c478bd9Sstevel@tonic-gate panic("htable_alloc(): couldn't steal\n"); 8427c478bd9Sstevel@tonic-gate 843843e1988Sjohnlev #if defined(__amd64) && defined(__xpv) 844843e1988Sjohnlev /* 845843e1988Sjohnlev * Under the 64-bit hypervisor, we have 2 top level page tables. 846843e1988Sjohnlev * If this allocation fails, we'll resort to stealing. 847843e1988Sjohnlev * We use the stolen page indirectly, by freeing the 848843e1988Sjohnlev * stolen htable first. 849843e1988Sjohnlev */ 850843e1988Sjohnlev if (level == mmu.max_level) { 851843e1988Sjohnlev for (;;) { 852843e1988Sjohnlev htable_t *stolen; 853843e1988Sjohnlev 854843e1988Sjohnlev hat->hat_user_ptable = ptable_alloc((uintptr_t)ht + 1); 855843e1988Sjohnlev if (hat->hat_user_ptable != PFN_INVALID) 856843e1988Sjohnlev break; 857843e1988Sjohnlev stolen = htable_steal(1); 858843e1988Sjohnlev if (stolen == NULL) 859843e1988Sjohnlev panic("2nd steal ptable failed\n"); 860843e1988Sjohnlev htable_free(stolen); 861843e1988Sjohnlev } 862843e1988Sjohnlev block_zero_no_xmm(kpm_vbase + pfn_to_pa(hat->hat_user_ptable), 863843e1988Sjohnlev MMU_PAGESIZE); 864843e1988Sjohnlev } 865843e1988Sjohnlev #endif 866843e1988Sjohnlev 8677c478bd9Sstevel@tonic-gate /* 8687c478bd9Sstevel@tonic-gate * Shared page tables have all entries locked and entries may not 8697c478bd9Sstevel@tonic-gate * be added or deleted. 8707c478bd9Sstevel@tonic-gate */ 8717c478bd9Sstevel@tonic-gate ht->ht_flags = 0; 8727c478bd9Sstevel@tonic-gate if (shared != NULL) { 8737c478bd9Sstevel@tonic-gate ASSERT(shared->ht_valid_cnt > 0); 8747c478bd9Sstevel@tonic-gate ht->ht_flags |= HTABLE_SHARED_PFN; 8757c478bd9Sstevel@tonic-gate ht->ht_pfn = shared->ht_pfn; 8767c478bd9Sstevel@tonic-gate ht->ht_lock_cnt = 0; 8777c478bd9Sstevel@tonic-gate ht->ht_valid_cnt = 0; /* updated in hat_share() */ 8787c478bd9Sstevel@tonic-gate ht->ht_shares = shared; 8797c478bd9Sstevel@tonic-gate need_to_zero = 0; 8807c478bd9Sstevel@tonic-gate } else { 8817c478bd9Sstevel@tonic-gate ht->ht_shares = NULL; 8827c478bd9Sstevel@tonic-gate ht->ht_lock_cnt = 0; 8837c478bd9Sstevel@tonic-gate ht->ht_valid_cnt = 0; 8847c478bd9Sstevel@tonic-gate } 8857c478bd9Sstevel@tonic-gate 8867c478bd9Sstevel@tonic-gate /* 8877c478bd9Sstevel@tonic-gate * setup flags, etc. for VLP htables 8887c478bd9Sstevel@tonic-gate */ 8897c478bd9Sstevel@tonic-gate if (is_vlp) { 8907c478bd9Sstevel@tonic-gate ht->ht_flags |= HTABLE_VLP; 8917c478bd9Sstevel@tonic-gate ASSERT(ht->ht_pfn == PFN_INVALID); 8927c478bd9Sstevel@tonic-gate need_to_zero = 0; 8937c478bd9Sstevel@tonic-gate } 8947c478bd9Sstevel@tonic-gate 8957c478bd9Sstevel@tonic-gate /* 8967c478bd9Sstevel@tonic-gate * fill in the htable 8977c478bd9Sstevel@tonic-gate */ 8987c478bd9Sstevel@tonic-gate ht->ht_hat = hat; 8997c478bd9Sstevel@tonic-gate ht->ht_parent = NULL; 9007c478bd9Sstevel@tonic-gate ht->ht_vaddr = vaddr; 9017c478bd9Sstevel@tonic-gate ht->ht_level = level; 9027c478bd9Sstevel@tonic-gate ht->ht_busy = 1; 9037c478bd9Sstevel@tonic-gate ht->ht_next = NULL; 9047c478bd9Sstevel@tonic-gate ht->ht_prev = NULL; 9057c478bd9Sstevel@tonic-gate 9067c478bd9Sstevel@tonic-gate /* 9077c478bd9Sstevel@tonic-gate * Zero out any freshly allocated page table 9087c478bd9Sstevel@tonic-gate */ 9097c478bd9Sstevel@tonic-gate if (need_to_zero) 9107c478bd9Sstevel@tonic-gate x86pte_zero(ht, 0, mmu.ptes_per_table); 911ae115bc7Smrj 912843e1988Sjohnlev #if defined(__amd64) && defined(__xpv) 913843e1988Sjohnlev if (!is_bare && kpm_vbase) { 914843e1988Sjohnlev (void) xen_kpm_page(ht->ht_pfn, PT_VALID); 915843e1988Sjohnlev if (level == mmu.max_level) 916843e1988Sjohnlev (void) xen_kpm_page(hat->hat_user_ptable, PT_VALID); 917843e1988Sjohnlev } 918843e1988Sjohnlev #endif 919843e1988Sjohnlev 9207c478bd9Sstevel@tonic-gate return (ht); 9217c478bd9Sstevel@tonic-gate } 9227c478bd9Sstevel@tonic-gate 9237c478bd9Sstevel@tonic-gate /* 9247c478bd9Sstevel@tonic-gate * Free up an htable, either to a hat's cached list, the reserves or 9257c478bd9Sstevel@tonic-gate * back to kmem. 9267c478bd9Sstevel@tonic-gate */ 9277c478bd9Sstevel@tonic-gate static void 9287c478bd9Sstevel@tonic-gate htable_free(htable_t *ht) 9297c478bd9Sstevel@tonic-gate { 9307c478bd9Sstevel@tonic-gate hat_t *hat = ht->ht_hat; 9317c478bd9Sstevel@tonic-gate 9327c478bd9Sstevel@tonic-gate /* 9337c478bd9Sstevel@tonic-gate * If the process isn't exiting, cache the free htable in the hat 934843e1988Sjohnlev * structure. We always do this for the boot time reserve. We don't 9357c478bd9Sstevel@tonic-gate * do this if the hat is exiting or we are stealing/reaping htables. 9367c478bd9Sstevel@tonic-gate */ 9377c478bd9Sstevel@tonic-gate if (hat != NULL && 9387c478bd9Sstevel@tonic-gate !(ht->ht_flags & HTABLE_SHARED_PFN) && 9397c478bd9Sstevel@tonic-gate (use_boot_reserve || 9407c478bd9Sstevel@tonic-gate (!(hat->hat_flags & HAT_FREEING) && !htable_dont_cache))) { 9417c478bd9Sstevel@tonic-gate ASSERT((ht->ht_flags & HTABLE_VLP) == 0); 9427c478bd9Sstevel@tonic-gate ASSERT(ht->ht_pfn != PFN_INVALID); 9437c478bd9Sstevel@tonic-gate hat_enter(hat); 9447c478bd9Sstevel@tonic-gate ht->ht_next = hat->hat_ht_cached; 9457c478bd9Sstevel@tonic-gate hat->hat_ht_cached = ht; 9467c478bd9Sstevel@tonic-gate hat_exit(hat); 9477c478bd9Sstevel@tonic-gate return; 9487c478bd9Sstevel@tonic-gate } 9497c478bd9Sstevel@tonic-gate 9507c478bd9Sstevel@tonic-gate /* 9517c478bd9Sstevel@tonic-gate * If we have a hardware page table, free it. 952ae115bc7Smrj * We don't free page tables that are accessed by sharing. 9537c478bd9Sstevel@tonic-gate */ 9547c478bd9Sstevel@tonic-gate if (ht->ht_flags & HTABLE_SHARED_PFN) { 9557c478bd9Sstevel@tonic-gate ASSERT(ht->ht_pfn != PFN_INVALID); 9567c478bd9Sstevel@tonic-gate } else if (!(ht->ht_flags & HTABLE_VLP)) { 957ae115bc7Smrj ptable_free(ht->ht_pfn); 958843e1988Sjohnlev #if defined(__amd64) && defined(__xpv) 959843e1988Sjohnlev if (ht->ht_level == mmu.max_level) { 960843e1988Sjohnlev ptable_free(hat->hat_user_ptable); 961843e1988Sjohnlev hat->hat_user_ptable = PFN_INVALID; 962843e1988Sjohnlev } 963843e1988Sjohnlev #endif 9647c478bd9Sstevel@tonic-gate } 965ae115bc7Smrj ht->ht_pfn = PFN_INVALID; 9667c478bd9Sstevel@tonic-gate 9677c478bd9Sstevel@tonic-gate /* 968843e1988Sjohnlev * Free it or put into reserves. 9697c478bd9Sstevel@tonic-gate */ 970aac11643Sjosephb if (USE_HAT_RESERVES() || htable_reserve_cnt < htable_reserve_amount) { 9717c478bd9Sstevel@tonic-gate htable_put_reserve(ht); 972aac11643Sjosephb } else { 9737c478bd9Sstevel@tonic-gate kmem_cache_free(htable_cache, ht); 974aac11643Sjosephb htable_adjust_reserve(); 975aac11643Sjosephb } 9767c478bd9Sstevel@tonic-gate } 9777c478bd9Sstevel@tonic-gate 9787c478bd9Sstevel@tonic-gate 9797c478bd9Sstevel@tonic-gate /* 9807c478bd9Sstevel@tonic-gate * This is called when a hat is being destroyed or swapped out. We reap all 9817c478bd9Sstevel@tonic-gate * the remaining htables in the hat cache. If destroying all left over 9827c478bd9Sstevel@tonic-gate * htables are also destroyed. 9837c478bd9Sstevel@tonic-gate * 9847c478bd9Sstevel@tonic-gate * We also don't need to invalidate any of the PTPs nor do any demapping. 9857c478bd9Sstevel@tonic-gate */ 9867c478bd9Sstevel@tonic-gate void 9877c478bd9Sstevel@tonic-gate htable_purge_hat(hat_t *hat) 9887c478bd9Sstevel@tonic-gate { 9897c478bd9Sstevel@tonic-gate htable_t *ht; 9907c478bd9Sstevel@tonic-gate int h; 9917c478bd9Sstevel@tonic-gate 9927c478bd9Sstevel@tonic-gate /* 9937c478bd9Sstevel@tonic-gate * Purge the htable cache if just reaping. 9947c478bd9Sstevel@tonic-gate */ 9957c478bd9Sstevel@tonic-gate if (!(hat->hat_flags & HAT_FREEING)) { 9967c478bd9Sstevel@tonic-gate atomic_add_32(&htable_dont_cache, 1); 9977c478bd9Sstevel@tonic-gate for (;;) { 9987c478bd9Sstevel@tonic-gate hat_enter(hat); 9997c478bd9Sstevel@tonic-gate ht = hat->hat_ht_cached; 10007c478bd9Sstevel@tonic-gate if (ht == NULL) { 10017c478bd9Sstevel@tonic-gate hat_exit(hat); 10027c478bd9Sstevel@tonic-gate break; 10037c478bd9Sstevel@tonic-gate } 10047c478bd9Sstevel@tonic-gate hat->hat_ht_cached = ht->ht_next; 10057c478bd9Sstevel@tonic-gate hat_exit(hat); 10067c478bd9Sstevel@tonic-gate htable_free(ht); 10077c478bd9Sstevel@tonic-gate } 10087c478bd9Sstevel@tonic-gate atomic_add_32(&htable_dont_cache, -1); 10097c478bd9Sstevel@tonic-gate return; 10107c478bd9Sstevel@tonic-gate } 10117c478bd9Sstevel@tonic-gate 10127c478bd9Sstevel@tonic-gate /* 10137c478bd9Sstevel@tonic-gate * if freeing, no locking is needed 10147c478bd9Sstevel@tonic-gate */ 10157c478bd9Sstevel@tonic-gate while ((ht = hat->hat_ht_cached) != NULL) { 10167c478bd9Sstevel@tonic-gate hat->hat_ht_cached = ht->ht_next; 10177c478bd9Sstevel@tonic-gate htable_free(ht); 10187c478bd9Sstevel@tonic-gate } 10197c478bd9Sstevel@tonic-gate 10207c478bd9Sstevel@tonic-gate /* 10217c478bd9Sstevel@tonic-gate * walk thru the htable hash table and free all the htables in it. 10227c478bd9Sstevel@tonic-gate */ 10237c478bd9Sstevel@tonic-gate for (h = 0; h < hat->hat_num_hash; ++h) { 10247c478bd9Sstevel@tonic-gate while ((ht = hat->hat_ht_hash[h]) != NULL) { 10257c478bd9Sstevel@tonic-gate if (ht->ht_next) 10267c478bd9Sstevel@tonic-gate ht->ht_next->ht_prev = ht->ht_prev; 10277c478bd9Sstevel@tonic-gate 10287c478bd9Sstevel@tonic-gate if (ht->ht_prev) { 10297c478bd9Sstevel@tonic-gate ht->ht_prev->ht_next = ht->ht_next; 10307c478bd9Sstevel@tonic-gate } else { 10317c478bd9Sstevel@tonic-gate ASSERT(hat->hat_ht_hash[h] == ht); 10327c478bd9Sstevel@tonic-gate hat->hat_ht_hash[h] = ht->ht_next; 10337c478bd9Sstevel@tonic-gate } 10347c478bd9Sstevel@tonic-gate htable_free(ht); 10357c478bd9Sstevel@tonic-gate } 10367c478bd9Sstevel@tonic-gate } 10377c478bd9Sstevel@tonic-gate } 10387c478bd9Sstevel@tonic-gate 10397c478bd9Sstevel@tonic-gate /* 10407c478bd9Sstevel@tonic-gate * Unlink an entry for a table at vaddr and level out of the existing table 10417c478bd9Sstevel@tonic-gate * one level higher. We are always holding the HASH_ENTER() when doing this. 10427c478bd9Sstevel@tonic-gate */ 10437c478bd9Sstevel@tonic-gate static void 10447c478bd9Sstevel@tonic-gate unlink_ptp(htable_t *higher, htable_t *old, uintptr_t vaddr) 10457c478bd9Sstevel@tonic-gate { 10467c478bd9Sstevel@tonic-gate uint_t entry = htable_va2entry(vaddr, higher); 10477c478bd9Sstevel@tonic-gate x86pte_t expect = MAKEPTP(old->ht_pfn, old->ht_level); 10487c478bd9Sstevel@tonic-gate x86pte_t found; 1049935f8dd0Sjosephb hat_t *hat = old->ht_hat; 10507c478bd9Sstevel@tonic-gate 10517c478bd9Sstevel@tonic-gate ASSERT(higher->ht_busy > 0); 10527c478bd9Sstevel@tonic-gate ASSERT(higher->ht_valid_cnt > 0); 10537c478bd9Sstevel@tonic-gate ASSERT(old->ht_valid_cnt == 0); 10547c478bd9Sstevel@tonic-gate found = x86pte_cas(higher, entry, expect, 0); 1055843e1988Sjohnlev #ifdef __xpv 1056843e1988Sjohnlev /* 1057843e1988Sjohnlev * This is weird, but Xen apparently automatically unlinks empty 1058843e1988Sjohnlev * pagetables from the upper page table. So allow PTP to be 0 already. 1059843e1988Sjohnlev */ 1060843e1988Sjohnlev if (found != expect && found != 0) 1061843e1988Sjohnlev #else 10627c478bd9Sstevel@tonic-gate if (found != expect) 1063843e1988Sjohnlev #endif 10647c478bd9Sstevel@tonic-gate panic("Bad PTP found=" FMT_PTE ", expected=" FMT_PTE, 10657c478bd9Sstevel@tonic-gate found, expect); 1066935f8dd0Sjosephb 1067935f8dd0Sjosephb /* 10687173d045Sjosephb * When a top level VLP page table entry changes, we must issue 10697173d045Sjosephb * a reload of cr3 on all processors. 10707173d045Sjosephb * 10717173d045Sjosephb * If we don't need do do that, then we still have to INVLPG against 10727173d045Sjosephb * an address covered by the inner page table, as the latest processors 10737173d045Sjosephb * have TLB-like caches for non-leaf page table entries. 1074935f8dd0Sjosephb */ 1075935f8dd0Sjosephb if (!(hat->hat_flags & HAT_FREEING)) { 10767173d045Sjosephb hat_tlb_inval(hat, (higher->ht_flags & HTABLE_VLP) ? 10777173d045Sjosephb DEMAP_ALL_ADDR : old->ht_vaddr); 1078935f8dd0Sjosephb } 1079935f8dd0Sjosephb 10807c478bd9Sstevel@tonic-gate HTABLE_DEC(higher->ht_valid_cnt); 10817c478bd9Sstevel@tonic-gate } 10827c478bd9Sstevel@tonic-gate 10837c478bd9Sstevel@tonic-gate /* 10847c478bd9Sstevel@tonic-gate * Link an entry for a new table at vaddr and level into the existing table 10857c478bd9Sstevel@tonic-gate * one level higher. We are always holding the HASH_ENTER() when doing this. 10867c478bd9Sstevel@tonic-gate */ 10877c478bd9Sstevel@tonic-gate static void 10887c478bd9Sstevel@tonic-gate link_ptp(htable_t *higher, htable_t *new, uintptr_t vaddr) 10897c478bd9Sstevel@tonic-gate { 10907c478bd9Sstevel@tonic-gate uint_t entry = htable_va2entry(vaddr, higher); 10917c478bd9Sstevel@tonic-gate x86pte_t newptp = MAKEPTP(new->ht_pfn, new->ht_level); 10927c478bd9Sstevel@tonic-gate x86pte_t found; 10937c478bd9Sstevel@tonic-gate 10947c478bd9Sstevel@tonic-gate ASSERT(higher->ht_busy > 0); 10957c478bd9Sstevel@tonic-gate 10967c478bd9Sstevel@tonic-gate ASSERT(new->ht_level != mmu.max_level); 10977c478bd9Sstevel@tonic-gate 10987c478bd9Sstevel@tonic-gate HTABLE_INC(higher->ht_valid_cnt); 10997c478bd9Sstevel@tonic-gate 11007c478bd9Sstevel@tonic-gate found = x86pte_cas(higher, entry, 0, newptp); 1101b4b46911Skchow if ((found & ~PT_REF) != 0) 11027c478bd9Sstevel@tonic-gate panic("HAT: ptp not 0, found=" FMT_PTE, found); 1103935f8dd0Sjosephb 1104935f8dd0Sjosephb /* 1105935f8dd0Sjosephb * When any top level VLP page table entry changes, we must issue 1106935f8dd0Sjosephb * a reload of cr3 on all processors using it. 11076b60931cSjosephb * We also need to do this for the kernel hat on PAE 32 bit kernel. 1108935f8dd0Sjosephb */ 11096b60931cSjosephb if ( 11106b60931cSjosephb #ifdef __i386 11116b60931cSjosephb (higher->ht_hat == kas.a_hat && higher->ht_level == VLP_LEVEL) || 11126b60931cSjosephb #endif 11136b60931cSjosephb (higher->ht_flags & HTABLE_VLP)) 1114935f8dd0Sjosephb hat_tlb_inval(higher->ht_hat, DEMAP_ALL_ADDR); 11157c478bd9Sstevel@tonic-gate } 11167c478bd9Sstevel@tonic-gate 11177c478bd9Sstevel@tonic-gate /* 1118ae115bc7Smrj * Release of hold on an htable. If this is the last use and the pagetable 1119ae115bc7Smrj * is empty we may want to free it, then recursively look at the pagetable 1120ae115bc7Smrj * above it. The recursion is handled by the outer while() loop. 1121843e1988Sjohnlev * 1122843e1988Sjohnlev * On the metal, during process exit, we don't bother unlinking the tables from 1123843e1988Sjohnlev * upper level pagetables. They are instead handled in bulk by hat_free_end(). 1124843e1988Sjohnlev * We can't do this on the hypervisor as we need the page table to be 1125843e1988Sjohnlev * implicitly unpinnned before it goes to the free page lists. This can't 1126843e1988Sjohnlev * happen unless we fully unlink it from the page table hierarchy. 11277c478bd9Sstevel@tonic-gate */ 11287c478bd9Sstevel@tonic-gate void 11297c478bd9Sstevel@tonic-gate htable_release(htable_t *ht) 11307c478bd9Sstevel@tonic-gate { 11317c478bd9Sstevel@tonic-gate uint_t hashval; 11327c478bd9Sstevel@tonic-gate htable_t *shared; 11337c478bd9Sstevel@tonic-gate htable_t *higher; 11347c478bd9Sstevel@tonic-gate hat_t *hat; 11357c478bd9Sstevel@tonic-gate uintptr_t va; 11367c478bd9Sstevel@tonic-gate level_t level; 11377c478bd9Sstevel@tonic-gate 11387c478bd9Sstevel@tonic-gate while (ht != NULL) { 11397c478bd9Sstevel@tonic-gate shared = NULL; 11407c478bd9Sstevel@tonic-gate for (;;) { 11417c478bd9Sstevel@tonic-gate hat = ht->ht_hat; 11427c478bd9Sstevel@tonic-gate va = ht->ht_vaddr; 11437c478bd9Sstevel@tonic-gate level = ht->ht_level; 11447c478bd9Sstevel@tonic-gate hashval = HTABLE_HASH(hat, va, level); 11457c478bd9Sstevel@tonic-gate 11467c478bd9Sstevel@tonic-gate /* 11477c478bd9Sstevel@tonic-gate * The common case is that this isn't the last use of 11487c478bd9Sstevel@tonic-gate * an htable so we don't want to free the htable. 11497c478bd9Sstevel@tonic-gate */ 11507c478bd9Sstevel@tonic-gate HTABLE_ENTER(hashval); 11517c478bd9Sstevel@tonic-gate ASSERT(ht->ht_valid_cnt >= 0); 11527c478bd9Sstevel@tonic-gate ASSERT(ht->ht_busy > 0); 11537c478bd9Sstevel@tonic-gate if (ht->ht_valid_cnt > 0) 11547c478bd9Sstevel@tonic-gate break; 11557c478bd9Sstevel@tonic-gate if (ht->ht_busy > 1) 11567c478bd9Sstevel@tonic-gate break; 11572ba723d8Smec ASSERT(ht->ht_lock_cnt == 0); 11587c478bd9Sstevel@tonic-gate 1159843e1988Sjohnlev #if !defined(__xpv) 11607c478bd9Sstevel@tonic-gate /* 11617c478bd9Sstevel@tonic-gate * we always release empty shared htables 11627c478bd9Sstevel@tonic-gate */ 11637c478bd9Sstevel@tonic-gate if (!(ht->ht_flags & HTABLE_SHARED_PFN)) { 11647c478bd9Sstevel@tonic-gate 11657c478bd9Sstevel@tonic-gate /* 11667c478bd9Sstevel@tonic-gate * don't release if in address space tear down 11677c478bd9Sstevel@tonic-gate */ 11687c478bd9Sstevel@tonic-gate if (hat->hat_flags & HAT_FREEING) 11697c478bd9Sstevel@tonic-gate break; 11707c478bd9Sstevel@tonic-gate 11717c478bd9Sstevel@tonic-gate /* 11727c478bd9Sstevel@tonic-gate * At and above max_page_level, free if it's for 11737c478bd9Sstevel@tonic-gate * a boot-time kernel mapping below kernelbase. 11747c478bd9Sstevel@tonic-gate */ 11757c478bd9Sstevel@tonic-gate if (level >= mmu.max_page_level && 11767c478bd9Sstevel@tonic-gate (hat != kas.a_hat || va >= kernelbase)) 11777c478bd9Sstevel@tonic-gate break; 11787c478bd9Sstevel@tonic-gate } 1179843e1988Sjohnlev #endif /* __xpv */ 11807c478bd9Sstevel@tonic-gate 11817c478bd9Sstevel@tonic-gate /* 1182ae115bc7Smrj * Remember if we destroy an htable that shares its PFN 1183ae115bc7Smrj * from elsewhere. 11847c478bd9Sstevel@tonic-gate */ 11857c478bd9Sstevel@tonic-gate if (ht->ht_flags & HTABLE_SHARED_PFN) { 11867c478bd9Sstevel@tonic-gate ASSERT(shared == NULL); 11877c478bd9Sstevel@tonic-gate shared = ht->ht_shares; 11887c478bd9Sstevel@tonic-gate HATSTAT_INC(hs_htable_unshared); 11897c478bd9Sstevel@tonic-gate } 11907c478bd9Sstevel@tonic-gate 11917c478bd9Sstevel@tonic-gate /* 11927c478bd9Sstevel@tonic-gate * Handle release of a table and freeing the htable_t. 11937c478bd9Sstevel@tonic-gate * Unlink it from the table higher (ie. ht_parent). 11947c478bd9Sstevel@tonic-gate */ 11957c478bd9Sstevel@tonic-gate ASSERT(ht->ht_lock_cnt == 0); 11967c478bd9Sstevel@tonic-gate higher = ht->ht_parent; 11977c478bd9Sstevel@tonic-gate ASSERT(higher != NULL); 11987c478bd9Sstevel@tonic-gate 11997c478bd9Sstevel@tonic-gate /* 12007c478bd9Sstevel@tonic-gate * Unlink the pagetable. 12017c478bd9Sstevel@tonic-gate */ 12027c478bd9Sstevel@tonic-gate unlink_ptp(higher, ht, va); 12037c478bd9Sstevel@tonic-gate 12047c478bd9Sstevel@tonic-gate /* 12057c478bd9Sstevel@tonic-gate * remove this htable from its hash list 12067c478bd9Sstevel@tonic-gate */ 12077c478bd9Sstevel@tonic-gate if (ht->ht_next) 12087c478bd9Sstevel@tonic-gate ht->ht_next->ht_prev = ht->ht_prev; 12097c478bd9Sstevel@tonic-gate 12107c478bd9Sstevel@tonic-gate if (ht->ht_prev) { 12117c478bd9Sstevel@tonic-gate ht->ht_prev->ht_next = ht->ht_next; 12127c478bd9Sstevel@tonic-gate } else { 12137c478bd9Sstevel@tonic-gate ASSERT(hat->hat_ht_hash[hashval] == ht); 12147c478bd9Sstevel@tonic-gate hat->hat_ht_hash[hashval] = ht->ht_next; 12157c478bd9Sstevel@tonic-gate } 12167c478bd9Sstevel@tonic-gate HTABLE_EXIT(hashval); 12177c478bd9Sstevel@tonic-gate htable_free(ht); 12187c478bd9Sstevel@tonic-gate ht = higher; 12197c478bd9Sstevel@tonic-gate } 12207c478bd9Sstevel@tonic-gate 12217c478bd9Sstevel@tonic-gate ASSERT(ht->ht_busy >= 1); 12227c478bd9Sstevel@tonic-gate --ht->ht_busy; 12237c478bd9Sstevel@tonic-gate HTABLE_EXIT(hashval); 12247c478bd9Sstevel@tonic-gate 12257c478bd9Sstevel@tonic-gate /* 12267c478bd9Sstevel@tonic-gate * If we released a shared htable, do a release on the htable 12277c478bd9Sstevel@tonic-gate * from which it shared 12287c478bd9Sstevel@tonic-gate */ 12297c478bd9Sstevel@tonic-gate ht = shared; 12307c478bd9Sstevel@tonic-gate } 12317c478bd9Sstevel@tonic-gate } 12327c478bd9Sstevel@tonic-gate 12337c478bd9Sstevel@tonic-gate /* 12347c478bd9Sstevel@tonic-gate * Find the htable for the pagetable at the given level for the given address. 12357c478bd9Sstevel@tonic-gate * If found acquires a hold that eventually needs to be htable_release()d 12367c478bd9Sstevel@tonic-gate */ 12377c478bd9Sstevel@tonic-gate htable_t * 12387c478bd9Sstevel@tonic-gate htable_lookup(hat_t *hat, uintptr_t vaddr, level_t level) 12397c478bd9Sstevel@tonic-gate { 12407c478bd9Sstevel@tonic-gate uintptr_t base; 12417c478bd9Sstevel@tonic-gate uint_t hashval; 12427c478bd9Sstevel@tonic-gate htable_t *ht = NULL; 12437c478bd9Sstevel@tonic-gate 12447c478bd9Sstevel@tonic-gate ASSERT(level >= 0); 12457c478bd9Sstevel@tonic-gate ASSERT(level <= TOP_LEVEL(hat)); 12467c478bd9Sstevel@tonic-gate 12477173d045Sjosephb if (level == TOP_LEVEL(hat)) { 12487173d045Sjosephb #if defined(__amd64) 12497173d045Sjosephb /* 12507173d045Sjosephb * 32 bit address spaces on 64 bit kernels need to check 12517173d045Sjosephb * for overflow of the 32 bit address space 12527173d045Sjosephb */ 12537173d045Sjosephb if ((hat->hat_flags & HAT_VLP) && vaddr >= ((uint64_t)1 << 32)) 12547173d045Sjosephb return (NULL); 12557173d045Sjosephb #endif 12567c478bd9Sstevel@tonic-gate base = 0; 12577173d045Sjosephb } else { 12587c478bd9Sstevel@tonic-gate base = vaddr & LEVEL_MASK(level + 1); 12597173d045Sjosephb } 12607c478bd9Sstevel@tonic-gate 12617c478bd9Sstevel@tonic-gate hashval = HTABLE_HASH(hat, base, level); 12627c478bd9Sstevel@tonic-gate HTABLE_ENTER(hashval); 12637c478bd9Sstevel@tonic-gate for (ht = hat->hat_ht_hash[hashval]; ht; ht = ht->ht_next) { 12647c478bd9Sstevel@tonic-gate if (ht->ht_hat == hat && 12657c478bd9Sstevel@tonic-gate ht->ht_vaddr == base && 12667c478bd9Sstevel@tonic-gate ht->ht_level == level) 12677c478bd9Sstevel@tonic-gate break; 12687c478bd9Sstevel@tonic-gate } 12697c478bd9Sstevel@tonic-gate if (ht) 12707c478bd9Sstevel@tonic-gate ++ht->ht_busy; 12717c478bd9Sstevel@tonic-gate 12727c478bd9Sstevel@tonic-gate HTABLE_EXIT(hashval); 12737c478bd9Sstevel@tonic-gate return (ht); 12747c478bd9Sstevel@tonic-gate } 12757c478bd9Sstevel@tonic-gate 12767c478bd9Sstevel@tonic-gate /* 12777c478bd9Sstevel@tonic-gate * Acquires a hold on a known htable (from a locked hment entry). 12787c478bd9Sstevel@tonic-gate */ 12797c478bd9Sstevel@tonic-gate void 12807c478bd9Sstevel@tonic-gate htable_acquire(htable_t *ht) 12817c478bd9Sstevel@tonic-gate { 12827c478bd9Sstevel@tonic-gate hat_t *hat = ht->ht_hat; 12837c478bd9Sstevel@tonic-gate level_t level = ht->ht_level; 12847c478bd9Sstevel@tonic-gate uintptr_t base = ht->ht_vaddr; 12857c478bd9Sstevel@tonic-gate uint_t hashval = HTABLE_HASH(hat, base, level); 12867c478bd9Sstevel@tonic-gate 12877c478bd9Sstevel@tonic-gate HTABLE_ENTER(hashval); 12887c478bd9Sstevel@tonic-gate #ifdef DEBUG 12897c478bd9Sstevel@tonic-gate /* 12907c478bd9Sstevel@tonic-gate * make sure the htable is there 12917c478bd9Sstevel@tonic-gate */ 12927c478bd9Sstevel@tonic-gate { 12937c478bd9Sstevel@tonic-gate htable_t *h; 12947c478bd9Sstevel@tonic-gate 12957c478bd9Sstevel@tonic-gate for (h = hat->hat_ht_hash[hashval]; 12967c478bd9Sstevel@tonic-gate h && h != ht; 12977c478bd9Sstevel@tonic-gate h = h->ht_next) 12987c478bd9Sstevel@tonic-gate ; 12997c478bd9Sstevel@tonic-gate ASSERT(h == ht); 13007c478bd9Sstevel@tonic-gate } 13017c478bd9Sstevel@tonic-gate #endif /* DEBUG */ 13027c478bd9Sstevel@tonic-gate ++ht->ht_busy; 13037c478bd9Sstevel@tonic-gate HTABLE_EXIT(hashval); 13047c478bd9Sstevel@tonic-gate } 13057c478bd9Sstevel@tonic-gate 13067c478bd9Sstevel@tonic-gate /* 13077c478bd9Sstevel@tonic-gate * Find the htable for the pagetable at the given level for the given address. 13087c478bd9Sstevel@tonic-gate * If found acquires a hold that eventually needs to be htable_release()d 13097c478bd9Sstevel@tonic-gate * If not found the table is created. 13107c478bd9Sstevel@tonic-gate * 13117c478bd9Sstevel@tonic-gate * Since we can't hold a hash table mutex during allocation, we have to 13127c478bd9Sstevel@tonic-gate * drop it and redo the search on a create. Then we may have to free the newly 13137c478bd9Sstevel@tonic-gate * allocated htable if another thread raced in and created it ahead of us. 13147c478bd9Sstevel@tonic-gate */ 13157c478bd9Sstevel@tonic-gate htable_t * 13167c478bd9Sstevel@tonic-gate htable_create( 13177c478bd9Sstevel@tonic-gate hat_t *hat, 13187c478bd9Sstevel@tonic-gate uintptr_t vaddr, 13197c478bd9Sstevel@tonic-gate level_t level, 13207c478bd9Sstevel@tonic-gate htable_t *shared) 13217c478bd9Sstevel@tonic-gate { 13227c478bd9Sstevel@tonic-gate uint_t h; 13237c478bd9Sstevel@tonic-gate level_t l; 13247c478bd9Sstevel@tonic-gate uintptr_t base; 13257c478bd9Sstevel@tonic-gate htable_t *ht; 13267c478bd9Sstevel@tonic-gate htable_t *higher = NULL; 13277c478bd9Sstevel@tonic-gate htable_t *new = NULL; 13287c478bd9Sstevel@tonic-gate 13297c478bd9Sstevel@tonic-gate if (level < 0 || level > TOP_LEVEL(hat)) 13307c478bd9Sstevel@tonic-gate panic("htable_create(): level %d out of range\n", level); 13317c478bd9Sstevel@tonic-gate 13327c478bd9Sstevel@tonic-gate /* 13337c478bd9Sstevel@tonic-gate * Create the page tables in top down order. 13347c478bd9Sstevel@tonic-gate */ 13357c478bd9Sstevel@tonic-gate for (l = TOP_LEVEL(hat); l >= level; --l) { 13367c478bd9Sstevel@tonic-gate new = NULL; 13377c478bd9Sstevel@tonic-gate if (l == TOP_LEVEL(hat)) 13387c478bd9Sstevel@tonic-gate base = 0; 13397c478bd9Sstevel@tonic-gate else 13407c478bd9Sstevel@tonic-gate base = vaddr & LEVEL_MASK(l + 1); 13417c478bd9Sstevel@tonic-gate 13427c478bd9Sstevel@tonic-gate h = HTABLE_HASH(hat, base, l); 13437c478bd9Sstevel@tonic-gate try_again: 13447c478bd9Sstevel@tonic-gate /* 13457c478bd9Sstevel@tonic-gate * look up the htable at this level 13467c478bd9Sstevel@tonic-gate */ 13477c478bd9Sstevel@tonic-gate HTABLE_ENTER(h); 13487c478bd9Sstevel@tonic-gate if (l == TOP_LEVEL(hat)) { 13497c478bd9Sstevel@tonic-gate ht = hat->hat_htable; 13507c478bd9Sstevel@tonic-gate } else { 13517c478bd9Sstevel@tonic-gate for (ht = hat->hat_ht_hash[h]; ht; ht = ht->ht_next) { 13527c478bd9Sstevel@tonic-gate ASSERT(ht->ht_hat == hat); 13537c478bd9Sstevel@tonic-gate if (ht->ht_vaddr == base && 13547c478bd9Sstevel@tonic-gate ht->ht_level == l) 13557c478bd9Sstevel@tonic-gate break; 13567c478bd9Sstevel@tonic-gate } 13577c478bd9Sstevel@tonic-gate } 13587c478bd9Sstevel@tonic-gate 13597c478bd9Sstevel@tonic-gate /* 13607c478bd9Sstevel@tonic-gate * if we found the htable, increment its busy cnt 13617c478bd9Sstevel@tonic-gate * and if we had allocated a new htable, free it. 13627c478bd9Sstevel@tonic-gate */ 13637c478bd9Sstevel@tonic-gate if (ht != NULL) { 13647c478bd9Sstevel@tonic-gate /* 13657c478bd9Sstevel@tonic-gate * If we find a pre-existing shared table, it must 13667c478bd9Sstevel@tonic-gate * share from the same place. 13677c478bd9Sstevel@tonic-gate */ 13687c478bd9Sstevel@tonic-gate if (l == level && shared && ht->ht_shares && 13697c478bd9Sstevel@tonic-gate ht->ht_shares != shared) { 13707c478bd9Sstevel@tonic-gate panic("htable shared from wrong place " 1371*903a11ebSrh87107 "found htable=%p shared=%p", 1372*903a11ebSrh87107 (void *)ht, (void *)shared); 13737c478bd9Sstevel@tonic-gate } 13747c478bd9Sstevel@tonic-gate ++ht->ht_busy; 13757c478bd9Sstevel@tonic-gate HTABLE_EXIT(h); 13767c478bd9Sstevel@tonic-gate if (new) 13777c478bd9Sstevel@tonic-gate htable_free(new); 13787c478bd9Sstevel@tonic-gate if (higher != NULL) 13797c478bd9Sstevel@tonic-gate htable_release(higher); 13807c478bd9Sstevel@tonic-gate higher = ht; 13817c478bd9Sstevel@tonic-gate 13827c478bd9Sstevel@tonic-gate /* 13837c478bd9Sstevel@tonic-gate * if we didn't find it on the first search 13847c478bd9Sstevel@tonic-gate * allocate a new one and search again 13857c478bd9Sstevel@tonic-gate */ 13867c478bd9Sstevel@tonic-gate } else if (new == NULL) { 13877c478bd9Sstevel@tonic-gate HTABLE_EXIT(h); 13887c478bd9Sstevel@tonic-gate new = htable_alloc(hat, base, l, 13897c478bd9Sstevel@tonic-gate l == level ? shared : NULL); 13907c478bd9Sstevel@tonic-gate goto try_again; 13917c478bd9Sstevel@tonic-gate 13927c478bd9Sstevel@tonic-gate /* 13937c478bd9Sstevel@tonic-gate * 2nd search and still not there, use "new" table 13947c478bd9Sstevel@tonic-gate * Link new table into higher, when not at top level. 13957c478bd9Sstevel@tonic-gate */ 13967c478bd9Sstevel@tonic-gate } else { 13977c478bd9Sstevel@tonic-gate ht = new; 13987c478bd9Sstevel@tonic-gate if (higher != NULL) { 13997c478bd9Sstevel@tonic-gate link_ptp(higher, ht, base); 14007c478bd9Sstevel@tonic-gate ht->ht_parent = higher; 14017c478bd9Sstevel@tonic-gate } 14027c478bd9Sstevel@tonic-gate ht->ht_next = hat->hat_ht_hash[h]; 14037c478bd9Sstevel@tonic-gate ASSERT(ht->ht_prev == NULL); 14047c478bd9Sstevel@tonic-gate if (hat->hat_ht_hash[h]) 14057c478bd9Sstevel@tonic-gate hat->hat_ht_hash[h]->ht_prev = ht; 14067c478bd9Sstevel@tonic-gate hat->hat_ht_hash[h] = ht; 14077c478bd9Sstevel@tonic-gate HTABLE_EXIT(h); 14087c478bd9Sstevel@tonic-gate 14097c478bd9Sstevel@tonic-gate /* 14107c478bd9Sstevel@tonic-gate * Note we don't do htable_release(higher). 14117c478bd9Sstevel@tonic-gate * That happens recursively when "new" is removed by 14127c478bd9Sstevel@tonic-gate * htable_release() or htable_steal(). 14137c478bd9Sstevel@tonic-gate */ 14147c478bd9Sstevel@tonic-gate higher = ht; 14157c478bd9Sstevel@tonic-gate 14167c478bd9Sstevel@tonic-gate /* 14177c478bd9Sstevel@tonic-gate * If we just created a new shared page table we 14187c478bd9Sstevel@tonic-gate * increment the shared htable's busy count, so that 14197c478bd9Sstevel@tonic-gate * it can't be the victim of a steal even if it's empty. 14207c478bd9Sstevel@tonic-gate */ 14217c478bd9Sstevel@tonic-gate if (l == level && shared) { 14227c478bd9Sstevel@tonic-gate (void) htable_lookup(shared->ht_hat, 14237c478bd9Sstevel@tonic-gate shared->ht_vaddr, shared->ht_level); 14247c478bd9Sstevel@tonic-gate HATSTAT_INC(hs_htable_shared); 14257c478bd9Sstevel@tonic-gate } 14267c478bd9Sstevel@tonic-gate } 14277c478bd9Sstevel@tonic-gate } 14287c478bd9Sstevel@tonic-gate 14297c478bd9Sstevel@tonic-gate return (ht); 14307c478bd9Sstevel@tonic-gate } 14317c478bd9Sstevel@tonic-gate 14327c478bd9Sstevel@tonic-gate /* 1433843e1988Sjohnlev * Inherit initial pagetables from the boot program. On the 64-bit 1434843e1988Sjohnlev * hypervisor we also temporarily mark the p_index field of page table 1435843e1988Sjohnlev * pages, so we know not to try making them writable in seg_kpm. 1436ae115bc7Smrj */ 1437ae115bc7Smrj void 1438ae115bc7Smrj htable_attach( 1439ae115bc7Smrj hat_t *hat, 1440ae115bc7Smrj uintptr_t base, 1441ae115bc7Smrj level_t level, 1442ae115bc7Smrj htable_t *parent, 1443ae115bc7Smrj pfn_t pfn) 1444ae115bc7Smrj { 1445ae115bc7Smrj htable_t *ht; 1446ae115bc7Smrj uint_t h; 1447ae115bc7Smrj uint_t i; 1448ae115bc7Smrj x86pte_t pte; 1449ae115bc7Smrj x86pte_t *ptep; 1450ae115bc7Smrj page_t *pp; 1451ae115bc7Smrj extern page_t *boot_claim_page(pfn_t); 1452ae115bc7Smrj 1453ae115bc7Smrj ht = htable_get_reserve(); 1454ae115bc7Smrj if (level == mmu.max_level) 1455ae115bc7Smrj kas.a_hat->hat_htable = ht; 1456ae115bc7Smrj ht->ht_hat = hat; 1457ae115bc7Smrj ht->ht_parent = parent; 1458ae115bc7Smrj ht->ht_vaddr = base; 1459ae115bc7Smrj ht->ht_level = level; 1460ae115bc7Smrj ht->ht_busy = 1; 1461ae115bc7Smrj ht->ht_next = NULL; 1462ae115bc7Smrj ht->ht_prev = NULL; 1463ae115bc7Smrj ht->ht_flags = 0; 1464ae115bc7Smrj ht->ht_pfn = pfn; 1465ae115bc7Smrj ht->ht_lock_cnt = 0; 1466ae115bc7Smrj ht->ht_valid_cnt = 0; 1467ae115bc7Smrj if (parent != NULL) 1468ae115bc7Smrj ++parent->ht_busy; 1469ae115bc7Smrj 1470ae115bc7Smrj h = HTABLE_HASH(hat, base, level); 1471ae115bc7Smrj HTABLE_ENTER(h); 1472ae115bc7Smrj ht->ht_next = hat->hat_ht_hash[h]; 1473ae115bc7Smrj ASSERT(ht->ht_prev == NULL); 1474ae115bc7Smrj if (hat->hat_ht_hash[h]) 1475ae115bc7Smrj hat->hat_ht_hash[h]->ht_prev = ht; 1476ae115bc7Smrj hat->hat_ht_hash[h] = ht; 1477ae115bc7Smrj HTABLE_EXIT(h); 1478ae115bc7Smrj 1479ae115bc7Smrj /* 1480ae115bc7Smrj * make sure the page table physical page is not FREE 1481ae115bc7Smrj */ 1482ae115bc7Smrj if (page_resv(1, KM_NOSLEEP) == 0) 1483ae115bc7Smrj panic("page_resv() failed in ptable alloc"); 1484ae115bc7Smrj 1485ae115bc7Smrj pp = boot_claim_page(pfn); 1486ae115bc7Smrj ASSERT(pp != NULL); 1487ae115bc7Smrj page_downgrade(pp); 1488843e1988Sjohnlev #if defined(__xpv) && defined(__amd64) 1489ae115bc7Smrj /* 1490ae115bc7Smrj * Record in the page_t that is a pagetable for segkpm setup. 1491ae115bc7Smrj */ 1492ae115bc7Smrj if (kpm_vbase) 1493ae115bc7Smrj pp->p_index = 1; 1494843e1988Sjohnlev #endif 1495ae115bc7Smrj 1496ae115bc7Smrj /* 1497ae115bc7Smrj * Count valid mappings and recursively attach lower level pagetables. 1498ae115bc7Smrj */ 1499ae115bc7Smrj ptep = kbm_remap_window(pfn_to_pa(pfn), 0); 1500ae115bc7Smrj for (i = 0; i < HTABLE_NUM_PTES(ht); ++i) { 1501ae115bc7Smrj if (mmu.pae_hat) 1502ae115bc7Smrj pte = ptep[i]; 1503ae115bc7Smrj else 1504ae115bc7Smrj pte = ((x86pte32_t *)ptep)[i]; 1505ae115bc7Smrj if (!IN_HYPERVISOR_VA(base) && PTE_ISVALID(pte)) { 1506ae115bc7Smrj ++ht->ht_valid_cnt; 1507ae115bc7Smrj if (!PTE_ISPAGE(pte, level)) { 1508ae115bc7Smrj htable_attach(hat, base, level - 1, 1509ae115bc7Smrj ht, PTE2PFN(pte, level)); 1510ae115bc7Smrj ptep = kbm_remap_window(pfn_to_pa(pfn), 0); 1511ae115bc7Smrj } 1512ae115bc7Smrj } 1513ae115bc7Smrj base += LEVEL_SIZE(level); 1514ae115bc7Smrj if (base == mmu.hole_start) 1515ae115bc7Smrj base = (mmu.hole_end + MMU_PAGEOFFSET) & MMU_PAGEMASK; 1516ae115bc7Smrj } 1517ae115bc7Smrj 1518ae115bc7Smrj /* 1519ae115bc7Smrj * As long as all the mappings we had were below kernel base 1520ae115bc7Smrj * we can release the htable. 1521ae115bc7Smrj */ 1522ae115bc7Smrj if (base < kernelbase) 1523ae115bc7Smrj htable_release(ht); 1524ae115bc7Smrj } 1525ae115bc7Smrj 1526ae115bc7Smrj /* 15277c478bd9Sstevel@tonic-gate * Walk through a given htable looking for the first valid entry. This 15287c478bd9Sstevel@tonic-gate * routine takes both a starting and ending address. The starting address 15297c478bd9Sstevel@tonic-gate * is required to be within the htable provided by the caller, but there is 15307c478bd9Sstevel@tonic-gate * no such restriction on the ending address. 15317c478bd9Sstevel@tonic-gate * 15327c478bd9Sstevel@tonic-gate * If the routine finds a valid entry in the htable (at or beyond the 15337c478bd9Sstevel@tonic-gate * starting address), the PTE (and its address) will be returned. 15347c478bd9Sstevel@tonic-gate * This PTE may correspond to either a page or a pagetable - it is the 15357c478bd9Sstevel@tonic-gate * caller's responsibility to determine which. If no valid entry is 15367c478bd9Sstevel@tonic-gate * found, 0 (and invalid PTE) and the next unexamined address will be 15377c478bd9Sstevel@tonic-gate * returned. 15387c478bd9Sstevel@tonic-gate * 15397c478bd9Sstevel@tonic-gate * The loop has been carefully coded for optimization. 15407c478bd9Sstevel@tonic-gate */ 15417c478bd9Sstevel@tonic-gate static x86pte_t 15427c478bd9Sstevel@tonic-gate htable_scan(htable_t *ht, uintptr_t *vap, uintptr_t eaddr) 15437c478bd9Sstevel@tonic-gate { 15447c478bd9Sstevel@tonic-gate uint_t e; 15457c478bd9Sstevel@tonic-gate x86pte_t found_pte = (x86pte_t)0; 1546ae115bc7Smrj caddr_t pte_ptr; 1547ae115bc7Smrj caddr_t end_pte_ptr; 15487c478bd9Sstevel@tonic-gate int l = ht->ht_level; 15497c478bd9Sstevel@tonic-gate uintptr_t va = *vap & LEVEL_MASK(l); 15507c478bd9Sstevel@tonic-gate size_t pgsize = LEVEL_SIZE(l); 15517c478bd9Sstevel@tonic-gate 15527c478bd9Sstevel@tonic-gate ASSERT(va >= ht->ht_vaddr); 15537c478bd9Sstevel@tonic-gate ASSERT(va <= HTABLE_LAST_PAGE(ht)); 15547c478bd9Sstevel@tonic-gate 15557c478bd9Sstevel@tonic-gate /* 15567c478bd9Sstevel@tonic-gate * Compute the starting index and ending virtual address 15577c478bd9Sstevel@tonic-gate */ 15587c478bd9Sstevel@tonic-gate e = htable_va2entry(va, ht); 15597c478bd9Sstevel@tonic-gate 15607c478bd9Sstevel@tonic-gate /* 15617c478bd9Sstevel@tonic-gate * The following page table scan code knows that the valid 15627c478bd9Sstevel@tonic-gate * bit of a PTE is in the lowest byte AND that x86 is little endian!! 15637c478bd9Sstevel@tonic-gate */ 1564ae115bc7Smrj pte_ptr = (caddr_t)x86pte_access_pagetable(ht, 0); 1565ae115bc7Smrj end_pte_ptr = (caddr_t)PT_INDEX_PTR(pte_ptr, HTABLE_NUM_PTES(ht)); 1566ae115bc7Smrj pte_ptr = (caddr_t)PT_INDEX_PTR((x86pte_t *)pte_ptr, e); 156730f7a194Skchow while (!PTE_ISVALID(*pte_ptr)) { 15687c478bd9Sstevel@tonic-gate va += pgsize; 15697c478bd9Sstevel@tonic-gate if (va >= eaddr) 15707c478bd9Sstevel@tonic-gate break; 15717c478bd9Sstevel@tonic-gate pte_ptr += mmu.pte_size; 15727c478bd9Sstevel@tonic-gate ASSERT(pte_ptr <= end_pte_ptr); 15737c478bd9Sstevel@tonic-gate if (pte_ptr == end_pte_ptr) 15747c478bd9Sstevel@tonic-gate break; 15757c478bd9Sstevel@tonic-gate } 15767c478bd9Sstevel@tonic-gate 15777c478bd9Sstevel@tonic-gate /* 15787c478bd9Sstevel@tonic-gate * if we found a valid PTE, load the entire PTE 15797c478bd9Sstevel@tonic-gate */ 1580ae115bc7Smrj if (va < eaddr && pte_ptr != end_pte_ptr) 1581ae115bc7Smrj found_pte = GET_PTE((x86pte_t *)pte_ptr); 15827c478bd9Sstevel@tonic-gate x86pte_release_pagetable(ht); 15837c478bd9Sstevel@tonic-gate 15847c478bd9Sstevel@tonic-gate #if defined(__amd64) 15857c478bd9Sstevel@tonic-gate /* 15867c478bd9Sstevel@tonic-gate * deal with VA hole on amd64 15877c478bd9Sstevel@tonic-gate */ 15887c478bd9Sstevel@tonic-gate if (l == mmu.max_level && va >= mmu.hole_start && va <= mmu.hole_end) 15897c478bd9Sstevel@tonic-gate va = mmu.hole_end + va - mmu.hole_start; 15907c478bd9Sstevel@tonic-gate #endif /* __amd64 */ 15917c478bd9Sstevel@tonic-gate 15927c478bd9Sstevel@tonic-gate *vap = va; 15937c478bd9Sstevel@tonic-gate return (found_pte); 15947c478bd9Sstevel@tonic-gate } 15957c478bd9Sstevel@tonic-gate 15967c478bd9Sstevel@tonic-gate /* 15977c478bd9Sstevel@tonic-gate * Find the address and htable for the first populated translation at or 15987c478bd9Sstevel@tonic-gate * above the given virtual address. The caller may also specify an upper 15997c478bd9Sstevel@tonic-gate * limit to the address range to search. Uses level information to quickly 16007c478bd9Sstevel@tonic-gate * skip unpopulated sections of virtual address spaces. 16017c478bd9Sstevel@tonic-gate * 16027c478bd9Sstevel@tonic-gate * If not found returns NULL. When found, returns the htable and virt addr 16037c478bd9Sstevel@tonic-gate * and has a hold on the htable. 16047c478bd9Sstevel@tonic-gate */ 16057c478bd9Sstevel@tonic-gate x86pte_t 16067c478bd9Sstevel@tonic-gate htable_walk( 16077c478bd9Sstevel@tonic-gate struct hat *hat, 16087c478bd9Sstevel@tonic-gate htable_t **htp, 16097c478bd9Sstevel@tonic-gate uintptr_t *vaddr, 16107c478bd9Sstevel@tonic-gate uintptr_t eaddr) 16117c478bd9Sstevel@tonic-gate { 16127c478bd9Sstevel@tonic-gate uintptr_t va = *vaddr; 16137c478bd9Sstevel@tonic-gate htable_t *ht; 16147c478bd9Sstevel@tonic-gate htable_t *prev = *htp; 16157c478bd9Sstevel@tonic-gate level_t l; 16167c478bd9Sstevel@tonic-gate level_t max_mapped_level; 16177c478bd9Sstevel@tonic-gate x86pte_t pte; 16187c478bd9Sstevel@tonic-gate 16197c478bd9Sstevel@tonic-gate ASSERT(eaddr > va); 16207c478bd9Sstevel@tonic-gate 16217c478bd9Sstevel@tonic-gate /* 16227c478bd9Sstevel@tonic-gate * If this is a user address, then we know we need not look beyond 16237c478bd9Sstevel@tonic-gate * kernelbase. 16247c478bd9Sstevel@tonic-gate */ 16257c478bd9Sstevel@tonic-gate ASSERT(hat == kas.a_hat || eaddr <= kernelbase || 16267c478bd9Sstevel@tonic-gate eaddr == HTABLE_WALK_TO_END); 16277c478bd9Sstevel@tonic-gate if (hat != kas.a_hat && eaddr == HTABLE_WALK_TO_END) 16287c478bd9Sstevel@tonic-gate eaddr = kernelbase; 16297c478bd9Sstevel@tonic-gate 16307c478bd9Sstevel@tonic-gate /* 16317c478bd9Sstevel@tonic-gate * If we're coming in with a previous page table, search it first 16327c478bd9Sstevel@tonic-gate * without doing an htable_lookup(), this should be frequent. 16337c478bd9Sstevel@tonic-gate */ 16347c478bd9Sstevel@tonic-gate if (prev) { 16357c478bd9Sstevel@tonic-gate ASSERT(prev->ht_busy > 0); 16367c478bd9Sstevel@tonic-gate ASSERT(prev->ht_vaddr <= va); 16377c478bd9Sstevel@tonic-gate l = prev->ht_level; 16387c478bd9Sstevel@tonic-gate if (va <= HTABLE_LAST_PAGE(prev)) { 16397c478bd9Sstevel@tonic-gate pte = htable_scan(prev, &va, eaddr); 16407c478bd9Sstevel@tonic-gate 16417c478bd9Sstevel@tonic-gate if (PTE_ISPAGE(pte, l)) { 16427c478bd9Sstevel@tonic-gate *vaddr = va; 16437c478bd9Sstevel@tonic-gate *htp = prev; 16447c478bd9Sstevel@tonic-gate return (pte); 16457c478bd9Sstevel@tonic-gate } 16467c478bd9Sstevel@tonic-gate } 16477c478bd9Sstevel@tonic-gate 16487c478bd9Sstevel@tonic-gate /* 16497c478bd9Sstevel@tonic-gate * We found nothing in the htable provided by the caller, 16507c478bd9Sstevel@tonic-gate * so fall through and do the full search 16517c478bd9Sstevel@tonic-gate */ 16527c478bd9Sstevel@tonic-gate htable_release(prev); 16537c478bd9Sstevel@tonic-gate } 16547c478bd9Sstevel@tonic-gate 16557c478bd9Sstevel@tonic-gate /* 16567c478bd9Sstevel@tonic-gate * Find the level of the largest pagesize used by this HAT. 16577c478bd9Sstevel@tonic-gate */ 16587173d045Sjosephb if (hat->hat_ism_pgcnt > 0) { 165902bc52beSkchow max_mapped_level = mmu.umax_page_level; 16607173d045Sjosephb } else { 16617c478bd9Sstevel@tonic-gate max_mapped_level = 0; 16627c478bd9Sstevel@tonic-gate for (l = 1; l <= mmu.max_page_level; ++l) 16637c478bd9Sstevel@tonic-gate if (hat->hat_pages_mapped[l] != 0) 16647c478bd9Sstevel@tonic-gate max_mapped_level = l; 16657173d045Sjosephb } 16667c478bd9Sstevel@tonic-gate 16677c478bd9Sstevel@tonic-gate while (va < eaddr && va >= *vaddr) { 16687c478bd9Sstevel@tonic-gate ASSERT(!IN_VA_HOLE(va)); 16697c478bd9Sstevel@tonic-gate 16707c478bd9Sstevel@tonic-gate /* 16717c478bd9Sstevel@tonic-gate * Find lowest table with any entry for given address. 16727c478bd9Sstevel@tonic-gate */ 16737c478bd9Sstevel@tonic-gate for (l = 0; l <= TOP_LEVEL(hat); ++l) { 16747c478bd9Sstevel@tonic-gate ht = htable_lookup(hat, va, l); 16757c478bd9Sstevel@tonic-gate if (ht != NULL) { 16767c478bd9Sstevel@tonic-gate pte = htable_scan(ht, &va, eaddr); 16777c478bd9Sstevel@tonic-gate if (PTE_ISPAGE(pte, l)) { 16787c478bd9Sstevel@tonic-gate *vaddr = va; 16797c478bd9Sstevel@tonic-gate *htp = ht; 16807c478bd9Sstevel@tonic-gate return (pte); 16817c478bd9Sstevel@tonic-gate } 16827c478bd9Sstevel@tonic-gate htable_release(ht); 16837c478bd9Sstevel@tonic-gate break; 16847c478bd9Sstevel@tonic-gate } 16857c478bd9Sstevel@tonic-gate 16867c478bd9Sstevel@tonic-gate /* 16877173d045Sjosephb * No htable at this level for the address. If there 16887173d045Sjosephb * is no larger page size that could cover it, we can 16897173d045Sjosephb * skip right to the start of the next page table. 16908b5842f9Sdm120769 */ 16918b5842f9Sdm120769 ASSERT(l < TOP_LEVEL(hat)); 16928b5842f9Sdm120769 if (l >= max_mapped_level) { 16937c478bd9Sstevel@tonic-gate va = NEXT_ENTRY_VA(va, l + 1); 16947173d045Sjosephb if (va >= eaddr) 16958b5842f9Sdm120769 break; 16968b5842f9Sdm120769 } 16977c478bd9Sstevel@tonic-gate } 16987c478bd9Sstevel@tonic-gate } 16997c478bd9Sstevel@tonic-gate 17007c478bd9Sstevel@tonic-gate *vaddr = 0; 17017c478bd9Sstevel@tonic-gate *htp = NULL; 17027c478bd9Sstevel@tonic-gate return (0); 17037c478bd9Sstevel@tonic-gate } 17047c478bd9Sstevel@tonic-gate 17057c478bd9Sstevel@tonic-gate /* 17067c478bd9Sstevel@tonic-gate * Find the htable and page table entry index of the given virtual address 17077c478bd9Sstevel@tonic-gate * with pagesize at or below given level. 17087c478bd9Sstevel@tonic-gate * If not found returns NULL. When found, returns the htable, sets 17097c478bd9Sstevel@tonic-gate * entry, and has a hold on the htable. 17107c478bd9Sstevel@tonic-gate */ 17117c478bd9Sstevel@tonic-gate htable_t * 17127c478bd9Sstevel@tonic-gate htable_getpte( 17137c478bd9Sstevel@tonic-gate struct hat *hat, 17147c478bd9Sstevel@tonic-gate uintptr_t vaddr, 17157c478bd9Sstevel@tonic-gate uint_t *entry, 17167c478bd9Sstevel@tonic-gate x86pte_t *pte, 17177c478bd9Sstevel@tonic-gate level_t level) 17187c478bd9Sstevel@tonic-gate { 17197c478bd9Sstevel@tonic-gate htable_t *ht; 17207c478bd9Sstevel@tonic-gate level_t l; 17217c478bd9Sstevel@tonic-gate uint_t e; 17227c478bd9Sstevel@tonic-gate 17237c478bd9Sstevel@tonic-gate ASSERT(level <= mmu.max_page_level); 17247c478bd9Sstevel@tonic-gate 17257c478bd9Sstevel@tonic-gate for (l = 0; l <= level; ++l) { 17267c478bd9Sstevel@tonic-gate ht = htable_lookup(hat, vaddr, l); 17277c478bd9Sstevel@tonic-gate if (ht == NULL) 17287c478bd9Sstevel@tonic-gate continue; 17297c478bd9Sstevel@tonic-gate e = htable_va2entry(vaddr, ht); 17307c478bd9Sstevel@tonic-gate if (entry != NULL) 17317c478bd9Sstevel@tonic-gate *entry = e; 17327c478bd9Sstevel@tonic-gate if (pte != NULL) 17337c478bd9Sstevel@tonic-gate *pte = x86pte_get(ht, e); 17347c478bd9Sstevel@tonic-gate return (ht); 17357c478bd9Sstevel@tonic-gate } 17367c478bd9Sstevel@tonic-gate return (NULL); 17377c478bd9Sstevel@tonic-gate } 17387c478bd9Sstevel@tonic-gate 17397c478bd9Sstevel@tonic-gate /* 17407c478bd9Sstevel@tonic-gate * Find the htable and page table entry index of the given virtual address. 17417c478bd9Sstevel@tonic-gate * There must be a valid page mapped at the given address. 17427c478bd9Sstevel@tonic-gate * If not found returns NULL. When found, returns the htable, sets 17437c478bd9Sstevel@tonic-gate * entry, and has a hold on the htable. 17447c478bd9Sstevel@tonic-gate */ 17457c478bd9Sstevel@tonic-gate htable_t * 17467c478bd9Sstevel@tonic-gate htable_getpage(struct hat *hat, uintptr_t vaddr, uint_t *entry) 17477c478bd9Sstevel@tonic-gate { 17487c478bd9Sstevel@tonic-gate htable_t *ht; 17497c478bd9Sstevel@tonic-gate uint_t e; 17507c478bd9Sstevel@tonic-gate x86pte_t pte; 17517c478bd9Sstevel@tonic-gate 17527c478bd9Sstevel@tonic-gate ht = htable_getpte(hat, vaddr, &e, &pte, mmu.max_page_level); 17537c478bd9Sstevel@tonic-gate if (ht == NULL) 17547c478bd9Sstevel@tonic-gate return (NULL); 17557c478bd9Sstevel@tonic-gate 17567c478bd9Sstevel@tonic-gate if (entry) 17577c478bd9Sstevel@tonic-gate *entry = e; 17587c478bd9Sstevel@tonic-gate 17597c478bd9Sstevel@tonic-gate if (PTE_ISPAGE(pte, ht->ht_level)) 17607c478bd9Sstevel@tonic-gate return (ht); 17617c478bd9Sstevel@tonic-gate htable_release(ht); 17627c478bd9Sstevel@tonic-gate return (NULL); 17637c478bd9Sstevel@tonic-gate } 17647c478bd9Sstevel@tonic-gate 17657c478bd9Sstevel@tonic-gate 17667c478bd9Sstevel@tonic-gate void 17677c478bd9Sstevel@tonic-gate htable_init() 17687c478bd9Sstevel@tonic-gate { 17697c478bd9Sstevel@tonic-gate /* 17707c478bd9Sstevel@tonic-gate * To save on kernel VA usage, we avoid debug information in 32 bit 17717c478bd9Sstevel@tonic-gate * kernels. 17727c478bd9Sstevel@tonic-gate */ 17737c478bd9Sstevel@tonic-gate #if defined(__amd64) 17747c478bd9Sstevel@tonic-gate int kmem_flags = KMC_NOHASH; 17757c478bd9Sstevel@tonic-gate #elif defined(__i386) 17767c478bd9Sstevel@tonic-gate int kmem_flags = KMC_NOHASH | KMC_NODEBUG; 17777c478bd9Sstevel@tonic-gate #endif 17787c478bd9Sstevel@tonic-gate 17797c478bd9Sstevel@tonic-gate /* 17807c478bd9Sstevel@tonic-gate * initialize kmem caches 17817c478bd9Sstevel@tonic-gate */ 17827c478bd9Sstevel@tonic-gate htable_cache = kmem_cache_create("htable_t", 17837c478bd9Sstevel@tonic-gate sizeof (htable_t), 0, NULL, NULL, 17847c478bd9Sstevel@tonic-gate htable_reap, NULL, hat_memload_arena, kmem_flags); 17857c478bd9Sstevel@tonic-gate } 17867c478bd9Sstevel@tonic-gate 17877c478bd9Sstevel@tonic-gate /* 17887c478bd9Sstevel@tonic-gate * get the pte index for the virtual address in the given htable's pagetable 17897c478bd9Sstevel@tonic-gate */ 17907c478bd9Sstevel@tonic-gate uint_t 17917c478bd9Sstevel@tonic-gate htable_va2entry(uintptr_t va, htable_t *ht) 17927c478bd9Sstevel@tonic-gate { 17937c478bd9Sstevel@tonic-gate level_t l = ht->ht_level; 17947c478bd9Sstevel@tonic-gate 17957c478bd9Sstevel@tonic-gate ASSERT(va >= ht->ht_vaddr); 17967c478bd9Sstevel@tonic-gate ASSERT(va <= HTABLE_LAST_PAGE(ht)); 1797ae115bc7Smrj return ((va >> LEVEL_SHIFT(l)) & (HTABLE_NUM_PTES(ht) - 1)); 17987c478bd9Sstevel@tonic-gate } 17997c478bd9Sstevel@tonic-gate 18007c478bd9Sstevel@tonic-gate /* 18017c478bd9Sstevel@tonic-gate * Given an htable and the index of a pte in it, return the virtual address 18027c478bd9Sstevel@tonic-gate * of the page. 18037c478bd9Sstevel@tonic-gate */ 18047c478bd9Sstevel@tonic-gate uintptr_t 18057c478bd9Sstevel@tonic-gate htable_e2va(htable_t *ht, uint_t entry) 18067c478bd9Sstevel@tonic-gate { 18077c478bd9Sstevel@tonic-gate level_t l = ht->ht_level; 18087c478bd9Sstevel@tonic-gate uintptr_t va; 18097c478bd9Sstevel@tonic-gate 1810ae115bc7Smrj ASSERT(entry < HTABLE_NUM_PTES(ht)); 18117c478bd9Sstevel@tonic-gate va = ht->ht_vaddr + ((uintptr_t)entry << LEVEL_SHIFT(l)); 18127c478bd9Sstevel@tonic-gate 18137c478bd9Sstevel@tonic-gate /* 18147c478bd9Sstevel@tonic-gate * Need to skip over any VA hole in top level table 18157c478bd9Sstevel@tonic-gate */ 18167c478bd9Sstevel@tonic-gate #if defined(__amd64) 18177c478bd9Sstevel@tonic-gate if (ht->ht_level == mmu.max_level && va >= mmu.hole_start) 18187c478bd9Sstevel@tonic-gate va += ((mmu.hole_end - mmu.hole_start) + 1); 18197c478bd9Sstevel@tonic-gate #endif 18207c478bd9Sstevel@tonic-gate 18217c478bd9Sstevel@tonic-gate return (va); 18227c478bd9Sstevel@tonic-gate } 18237c478bd9Sstevel@tonic-gate 18247c478bd9Sstevel@tonic-gate /* 18257c478bd9Sstevel@tonic-gate * The code uses compare and swap instructions to read/write PTE's to 18267c478bd9Sstevel@tonic-gate * avoid atomicity problems, since PTEs can be 8 bytes on 32 bit systems. 18277c478bd9Sstevel@tonic-gate * will naturally be atomic. 18287c478bd9Sstevel@tonic-gate * 18297c478bd9Sstevel@tonic-gate * The combination of using kpreempt_disable()/_enable() and the hci_mutex 18307c478bd9Sstevel@tonic-gate * are used to ensure that an interrupt won't overwrite a temporary mapping 18317c478bd9Sstevel@tonic-gate * while it's in use. If an interrupt thread tries to access a PTE, it will 18327c478bd9Sstevel@tonic-gate * yield briefly back to the pinned thread which holds the cpu's hci_mutex. 18337c478bd9Sstevel@tonic-gate */ 18347c478bd9Sstevel@tonic-gate void 1835ae115bc7Smrj x86pte_cpu_init(cpu_t *cpu) 18367c478bd9Sstevel@tonic-gate { 18377c478bd9Sstevel@tonic-gate struct hat_cpu_info *hci; 18387c478bd9Sstevel@tonic-gate 1839ae115bc7Smrj hci = kmem_zalloc(sizeof (*hci), KM_SLEEP); 18407c478bd9Sstevel@tonic-gate mutex_init(&hci->hci_mutex, NULL, MUTEX_DEFAULT, NULL); 18417c478bd9Sstevel@tonic-gate cpu->cpu_hat_info = hci; 18427c478bd9Sstevel@tonic-gate } 18437c478bd9Sstevel@tonic-gate 1844ae115bc7Smrj void 1845ae115bc7Smrj x86pte_cpu_fini(cpu_t *cpu) 1846ae115bc7Smrj { 1847ae115bc7Smrj struct hat_cpu_info *hci = cpu->cpu_hat_info; 1848ae115bc7Smrj 1849ae115bc7Smrj kmem_free(hci, sizeof (*hci)); 1850ae115bc7Smrj cpu->cpu_hat_info = NULL; 18517c478bd9Sstevel@tonic-gate } 18527c478bd9Sstevel@tonic-gate 1853ae115bc7Smrj #ifdef __i386 1854ae115bc7Smrj /* 1855ae115bc7Smrj * On 32 bit kernels, loading a 64 bit PTE is a little tricky 1856ae115bc7Smrj */ 1857ae115bc7Smrj x86pte_t 1858ae115bc7Smrj get_pte64(x86pte_t *ptr) 1859ae115bc7Smrj { 1860ae115bc7Smrj volatile uint32_t *p = (uint32_t *)ptr; 1861ae115bc7Smrj x86pte_t t; 1862ae115bc7Smrj 1863ae115bc7Smrj ASSERT(mmu.pae_hat != 0); 1864ae115bc7Smrj for (;;) { 1865ae115bc7Smrj t = p[0]; 1866ae115bc7Smrj t |= (uint64_t)p[1] << 32; 1867ae115bc7Smrj if ((t & 0xffffffff) == p[0]) 1868ae115bc7Smrj return (t); 1869ae115bc7Smrj } 1870ae115bc7Smrj } 1871ae115bc7Smrj #endif /* __i386 */ 1872ae115bc7Smrj 18737c478bd9Sstevel@tonic-gate /* 18747c478bd9Sstevel@tonic-gate * Disable preemption and establish a mapping to the pagetable with the 18757c478bd9Sstevel@tonic-gate * given pfn. This is optimized for there case where it's the same 18767c478bd9Sstevel@tonic-gate * pfn as we last used referenced from this CPU. 18777c478bd9Sstevel@tonic-gate */ 18787c478bd9Sstevel@tonic-gate static x86pte_t * 1879ae115bc7Smrj x86pte_access_pagetable(htable_t *ht, uint_t index) 18807c478bd9Sstevel@tonic-gate { 18817c478bd9Sstevel@tonic-gate /* 18827c478bd9Sstevel@tonic-gate * VLP pagetables are contained in the hat_t 18837c478bd9Sstevel@tonic-gate */ 18847c478bd9Sstevel@tonic-gate if (ht->ht_flags & HTABLE_VLP) 1885ae115bc7Smrj return (PT_INDEX_PTR(ht->ht_hat->hat_vlp_ptes, index)); 1886ae115bc7Smrj return (x86pte_mapin(ht->ht_pfn, index, ht)); 1887ae115bc7Smrj } 18887c478bd9Sstevel@tonic-gate 18897c478bd9Sstevel@tonic-gate /* 1890ae115bc7Smrj * map the given pfn into the page table window. 18917c478bd9Sstevel@tonic-gate */ 1892ae115bc7Smrj /*ARGSUSED*/ 1893ae115bc7Smrj x86pte_t * 1894ae115bc7Smrj x86pte_mapin(pfn_t pfn, uint_t index, htable_t *ht) 1895ae115bc7Smrj { 1896ae115bc7Smrj x86pte_t *pteptr; 18978ea72728Sjosephb x86pte_t pte = 0; 1898ae115bc7Smrj x86pte_t newpte; 1899ae115bc7Smrj int x; 1900ae115bc7Smrj 19017c478bd9Sstevel@tonic-gate ASSERT(pfn != PFN_INVALID); 19027c478bd9Sstevel@tonic-gate 19037c478bd9Sstevel@tonic-gate if (!khat_running) { 1904ae115bc7Smrj caddr_t va = kbm_remap_window(pfn_to_pa(pfn), 1); 1905ae115bc7Smrj return (PT_INDEX_PTR(va, index)); 19067c478bd9Sstevel@tonic-gate } 19077c478bd9Sstevel@tonic-gate 19087c478bd9Sstevel@tonic-gate /* 1909ae115bc7Smrj * If kpm is available, use it. 1910ae115bc7Smrj */ 1911ae115bc7Smrj if (kpm_vbase) 1912ae115bc7Smrj return (PT_INDEX_PTR(hat_kpm_pfn2va(pfn), index)); 1913ae115bc7Smrj 1914ae115bc7Smrj /* 1915ae115bc7Smrj * Disable preemption and grab the CPU's hci_mutex 19167c478bd9Sstevel@tonic-gate */ 19177c478bd9Sstevel@tonic-gate kpreempt_disable(); 1918ae115bc7Smrj ASSERT(CPU->cpu_hat_info != NULL); 1919ae115bc7Smrj mutex_enter(&CPU->cpu_hat_info->hci_mutex); 1920ae115bc7Smrj x = PWIN_TABLE(CPU->cpu_id); 1921ae115bc7Smrj pteptr = (x86pte_t *)PWIN_PTE_VA(x); 19228ea72728Sjosephb #ifndef __xpv 1923ae115bc7Smrj if (mmu.pae_hat) 1924ae115bc7Smrj pte = *pteptr; 1925ae115bc7Smrj else 1926ae115bc7Smrj pte = *(x86pte32_t *)pteptr; 19278ea72728Sjosephb #endif 1928ae115bc7Smrj 1929ae115bc7Smrj newpte = MAKEPTE(pfn, 0) | mmu.pt_global | mmu.pt_nx; 1930843e1988Sjohnlev 1931843e1988Sjohnlev /* 1932843e1988Sjohnlev * For hardware we can use a writable mapping. 1933843e1988Sjohnlev */ 1934843e1988Sjohnlev #ifdef __xpv 1935843e1988Sjohnlev if (IN_XPV_PANIC()) 1936843e1988Sjohnlev #endif 1937ae115bc7Smrj newpte |= PT_WRITABLE; 1938ae115bc7Smrj 1939ae115bc7Smrj if (!PTE_EQUIV(newpte, pte)) { 1940843e1988Sjohnlev 1941843e1988Sjohnlev #ifdef __xpv 1942843e1988Sjohnlev if (!IN_XPV_PANIC()) { 1943843e1988Sjohnlev xen_map(newpte, PWIN_VA(x)); 1944843e1988Sjohnlev } else 1945843e1988Sjohnlev #endif 1946843e1988Sjohnlev { 1947843e1988Sjohnlev XPV_ALLOW_PAGETABLE_UPDATES(); 1948ae115bc7Smrj if (mmu.pae_hat) 1949ae115bc7Smrj *pteptr = newpte; 1950ae115bc7Smrj else 1951ae115bc7Smrj *(x86pte32_t *)pteptr = newpte; 1952843e1988Sjohnlev XPV_DISALLOW_PAGETABLE_UPDATES(); 1953ae115bc7Smrj mmu_tlbflush_entry((caddr_t)(PWIN_VA(x))); 19547c478bd9Sstevel@tonic-gate } 1955843e1988Sjohnlev } 1956ae115bc7Smrj return (PT_INDEX_PTR(PWIN_VA(x), index)); 19577c478bd9Sstevel@tonic-gate } 19587c478bd9Sstevel@tonic-gate 19597c478bd9Sstevel@tonic-gate /* 19607c478bd9Sstevel@tonic-gate * Release access to a page table. 19617c478bd9Sstevel@tonic-gate */ 19627c478bd9Sstevel@tonic-gate static void 19637c478bd9Sstevel@tonic-gate x86pte_release_pagetable(htable_t *ht) 19647c478bd9Sstevel@tonic-gate { 19657c478bd9Sstevel@tonic-gate /* 19667c478bd9Sstevel@tonic-gate * nothing to do for VLP htables 19677c478bd9Sstevel@tonic-gate */ 19687c478bd9Sstevel@tonic-gate if (ht->ht_flags & HTABLE_VLP) 19697c478bd9Sstevel@tonic-gate return; 19707c478bd9Sstevel@tonic-gate 1971ae115bc7Smrj x86pte_mapout(); 19727c478bd9Sstevel@tonic-gate } 19737c478bd9Sstevel@tonic-gate 1974ae115bc7Smrj void 1975ae115bc7Smrj x86pte_mapout(void) 1976ae115bc7Smrj { 1977843e1988Sjohnlev if (kpm_vbase != NULL || !khat_running) 1978ae115bc7Smrj return; 1979ae115bc7Smrj 19807c478bd9Sstevel@tonic-gate /* 1981ae115bc7Smrj * Drop the CPU's hci_mutex and restore preemption. 19827c478bd9Sstevel@tonic-gate */ 19838ea72728Sjosephb #ifdef __xpv 19848ea72728Sjosephb if (!IN_XPV_PANIC()) { 19858ea72728Sjosephb uintptr_t va; 19868ea72728Sjosephb 19878ea72728Sjosephb /* 19888ea72728Sjosephb * We need to always clear the mapping in case a page 19898ea72728Sjosephb * that was once a page table page is ballooned out. 19908ea72728Sjosephb */ 19918ea72728Sjosephb va = (uintptr_t)PWIN_VA(PWIN_TABLE(CPU->cpu_id)); 19928ea72728Sjosephb (void) HYPERVISOR_update_va_mapping(va, 0, 19938ea72728Sjosephb UVMF_INVLPG | UVMF_LOCAL); 19948ea72728Sjosephb } 19958ea72728Sjosephb #endif 1996ae115bc7Smrj mutex_exit(&CPU->cpu_hat_info->hci_mutex); 19977c478bd9Sstevel@tonic-gate kpreempt_enable(); 19987c478bd9Sstevel@tonic-gate } 19997c478bd9Sstevel@tonic-gate 20007c478bd9Sstevel@tonic-gate /* 20017c478bd9Sstevel@tonic-gate * Atomic retrieval of a pagetable entry 20027c478bd9Sstevel@tonic-gate */ 20037c478bd9Sstevel@tonic-gate x86pte_t 20047c478bd9Sstevel@tonic-gate x86pte_get(htable_t *ht, uint_t entry) 20057c478bd9Sstevel@tonic-gate { 20067c478bd9Sstevel@tonic-gate x86pte_t pte; 2007aa2ed9e5Sjosephb x86pte_t *ptep; 20087c478bd9Sstevel@tonic-gate 20097c478bd9Sstevel@tonic-gate /* 2010aa2ed9e5Sjosephb * Be careful that loading PAE entries in 32 bit kernel is atomic. 20117c478bd9Sstevel@tonic-gate */ 2012ae115bc7Smrj ASSERT(entry < mmu.ptes_per_table); 2013ae115bc7Smrj ptep = x86pte_access_pagetable(ht, entry); 2014ae115bc7Smrj pte = GET_PTE(ptep); 20157c478bd9Sstevel@tonic-gate x86pte_release_pagetable(ht); 20167c478bd9Sstevel@tonic-gate return (pte); 20177c478bd9Sstevel@tonic-gate } 20187c478bd9Sstevel@tonic-gate 20197c478bd9Sstevel@tonic-gate /* 20207c478bd9Sstevel@tonic-gate * Atomic unconditional set of a page table entry, it returns the previous 2021ae115bc7Smrj * value. For pre-existing mappings if the PFN changes, then we don't care 2022ae115bc7Smrj * about the old pte's REF / MOD bits. If the PFN remains the same, we leave 2023ae115bc7Smrj * the MOD/REF bits unchanged. 2024ae115bc7Smrj * 2025ae115bc7Smrj * If asked to overwrite a link to a lower page table with a large page 2026ae115bc7Smrj * mapping, this routine returns the special value of LPAGE_ERROR. This 2027ae115bc7Smrj * allows the upper HAT layers to retry with a smaller mapping size. 20287c478bd9Sstevel@tonic-gate */ 20297c478bd9Sstevel@tonic-gate x86pte_t 20307c478bd9Sstevel@tonic-gate x86pte_set(htable_t *ht, uint_t entry, x86pte_t new, void *ptr) 20317c478bd9Sstevel@tonic-gate { 20327c478bd9Sstevel@tonic-gate x86pte_t old; 2033ae115bc7Smrj x86pte_t prev; 20347c478bd9Sstevel@tonic-gate x86pte_t *ptep; 2035ae115bc7Smrj level_t l = ht->ht_level; 2036ae115bc7Smrj x86pte_t pfn_mask = (l != 0) ? PT_PADDR_LGPG : PT_PADDR; 2037ae115bc7Smrj x86pte_t n; 2038ae115bc7Smrj uintptr_t addr = htable_e2va(ht, entry); 2039ae115bc7Smrj hat_t *hat = ht->ht_hat; 20407c478bd9Sstevel@tonic-gate 2041ae115bc7Smrj ASSERT(new != 0); /* don't use to invalidate a PTE, see x86pte_update */ 20427c478bd9Sstevel@tonic-gate ASSERT(!(ht->ht_flags & HTABLE_SHARED_PFN)); 2043ae115bc7Smrj if (ptr == NULL) 2044ae115bc7Smrj ptep = x86pte_access_pagetable(ht, entry); 2045ae115bc7Smrj else 20467c478bd9Sstevel@tonic-gate ptep = ptr; 20477c478bd9Sstevel@tonic-gate 2048b193e412Skchow /* 2049ae115bc7Smrj * Install the new PTE. If remapping the same PFN, then 2050ae115bc7Smrj * copy existing REF/MOD bits to new mapping. 2051b193e412Skchow */ 2052ae115bc7Smrj do { 2053ae115bc7Smrj prev = GET_PTE(ptep); 2054ae115bc7Smrj n = new; 2055ae115bc7Smrj if (PTE_ISVALID(n) && (prev & pfn_mask) == (new & pfn_mask)) 2056b193e412Skchow n |= prev & (PT_REF | PT_MOD); 2057ae115bc7Smrj 2058ae115bc7Smrj /* 2059ae115bc7Smrj * Another thread may have installed this mapping already, 2060ae115bc7Smrj * flush the local TLB and be done. 2061ae115bc7Smrj */ 2062b193e412Skchow if (prev == n) { 20637c478bd9Sstevel@tonic-gate old = new; 2064843e1988Sjohnlev #ifdef __xpv 2065843e1988Sjohnlev if (!IN_XPV_PANIC()) 2066843e1988Sjohnlev xen_flush_va((caddr_t)addr); 2067843e1988Sjohnlev else 2068843e1988Sjohnlev #endif 2069ae115bc7Smrj mmu_tlbflush_entry((caddr_t)addr); 2070ae115bc7Smrj goto done; 20717c478bd9Sstevel@tonic-gate } 2072ae115bc7Smrj 2073ae115bc7Smrj /* 2074ae115bc7Smrj * Detect if we have a collision of installing a large 2075ae115bc7Smrj * page mapping where there already is a lower page table. 2076ae115bc7Smrj */ 207797704650Sjosephb if (l > 0 && (prev & PT_VALID) && !(prev & PT_PAGESIZE)) { 207897704650Sjosephb old = LPAGE_ERROR; 207997704650Sjosephb goto done; 208097704650Sjosephb } 2081ae115bc7Smrj 2082843e1988Sjohnlev XPV_ALLOW_PAGETABLE_UPDATES(); 2083ae115bc7Smrj old = CAS_PTE(ptep, prev, n); 2084843e1988Sjohnlev XPV_DISALLOW_PAGETABLE_UPDATES(); 2085ae115bc7Smrj } while (old != prev); 2086ae115bc7Smrj 2087ae115bc7Smrj /* 2088ae115bc7Smrj * Do a TLB demap if needed, ie. the old pte was valid. 2089ae115bc7Smrj * 2090ae115bc7Smrj * Note that a stale TLB writeback to the PTE here either can't happen 2091ae115bc7Smrj * or doesn't matter. The PFN can only change for NOSYNC|NOCONSIST 2092ae115bc7Smrj * mappings, but they were created with REF and MOD already set, so 2093ae115bc7Smrj * no stale writeback will happen. 2094ae115bc7Smrj * 2095ae115bc7Smrj * Segmap is the only place where remaps happen on the same pfn and for 2096ae115bc7Smrj * that we want to preserve the stale REF/MOD bits. 2097ae115bc7Smrj */ 2098ae115bc7Smrj if (old & PT_REF) 2099ae115bc7Smrj hat_tlb_inval(hat, addr); 2100ae115bc7Smrj 2101ae115bc7Smrj done: 21027c478bd9Sstevel@tonic-gate if (ptr == NULL) 21037c478bd9Sstevel@tonic-gate x86pte_release_pagetable(ht); 21047c478bd9Sstevel@tonic-gate return (old); 21057c478bd9Sstevel@tonic-gate } 21067c478bd9Sstevel@tonic-gate 21077c478bd9Sstevel@tonic-gate /* 2108ae115bc7Smrj * Atomic compare and swap of a page table entry. No TLB invalidates are done. 2109ae115bc7Smrj * This is used for links between pagetables of different levels. 2110ae115bc7Smrj * Note we always create these links with dirty/access set, so they should 2111ae115bc7Smrj * never change. 21127c478bd9Sstevel@tonic-gate */ 2113ae115bc7Smrj x86pte_t 21147c478bd9Sstevel@tonic-gate x86pte_cas(htable_t *ht, uint_t entry, x86pte_t old, x86pte_t new) 21157c478bd9Sstevel@tonic-gate { 21167c478bd9Sstevel@tonic-gate x86pte_t pte; 21177c478bd9Sstevel@tonic-gate x86pte_t *ptep; 2118843e1988Sjohnlev #ifdef __xpv 2119843e1988Sjohnlev /* 2120843e1988Sjohnlev * We can't use writable pagetables for upper level tables, so fake it. 2121843e1988Sjohnlev */ 2122843e1988Sjohnlev mmu_update_t t[2]; 2123843e1988Sjohnlev int cnt = 1; 2124843e1988Sjohnlev int count; 2125843e1988Sjohnlev maddr_t ma; 21267c478bd9Sstevel@tonic-gate 2127843e1988Sjohnlev if (!IN_XPV_PANIC()) { 2128843e1988Sjohnlev ASSERT(!(ht->ht_flags & HTABLE_VLP)); /* no VLP yet */ 2129843e1988Sjohnlev ma = pa_to_ma(PT_INDEX_PHYSADDR(pfn_to_pa(ht->ht_pfn), entry)); 2130843e1988Sjohnlev t[0].ptr = ma | MMU_NORMAL_PT_UPDATE; 2131843e1988Sjohnlev t[0].val = new; 2132843e1988Sjohnlev 2133843e1988Sjohnlev #if defined(__amd64) 2134843e1988Sjohnlev /* 2135843e1988Sjohnlev * On the 64-bit hypervisor we need to maintain the user mode 2136843e1988Sjohnlev * top page table too. 2137843e1988Sjohnlev */ 2138843e1988Sjohnlev if (ht->ht_level == mmu.max_level && ht->ht_hat != kas.a_hat) { 2139843e1988Sjohnlev ma = pa_to_ma(PT_INDEX_PHYSADDR(pfn_to_pa( 2140843e1988Sjohnlev ht->ht_hat->hat_user_ptable), entry)); 2141843e1988Sjohnlev t[1].ptr = ma | MMU_NORMAL_PT_UPDATE; 2142843e1988Sjohnlev t[1].val = new; 2143843e1988Sjohnlev ++cnt; 2144843e1988Sjohnlev } 2145843e1988Sjohnlev #endif /* __amd64 */ 2146843e1988Sjohnlev 2147843e1988Sjohnlev if (HYPERVISOR_mmu_update(t, cnt, &count, DOMID_SELF)) 2148843e1988Sjohnlev panic("HYPERVISOR_mmu_update() failed"); 2149843e1988Sjohnlev ASSERT(count == cnt); 2150843e1988Sjohnlev return (old); 2151843e1988Sjohnlev } 2152843e1988Sjohnlev #endif 2153ae115bc7Smrj ptep = x86pte_access_pagetable(ht, entry); 2154843e1988Sjohnlev XPV_ALLOW_PAGETABLE_UPDATES(); 2155ae115bc7Smrj pte = CAS_PTE(ptep, old, new); 2156843e1988Sjohnlev XPV_DISALLOW_PAGETABLE_UPDATES(); 21577c478bd9Sstevel@tonic-gate x86pte_release_pagetable(ht); 21587c478bd9Sstevel@tonic-gate return (pte); 21597c478bd9Sstevel@tonic-gate } 21607c478bd9Sstevel@tonic-gate 21617c478bd9Sstevel@tonic-gate /* 2162ae115bc7Smrj * Invalidate a page table entry as long as it currently maps something that 2163ae115bc7Smrj * matches the value determined by expect. 21647c478bd9Sstevel@tonic-gate * 2165ae115bc7Smrj * Also invalidates any TLB entries and returns the previous value of the PTE. 21667c478bd9Sstevel@tonic-gate */ 21677c478bd9Sstevel@tonic-gate x86pte_t 2168ae115bc7Smrj x86pte_inval( 2169ae115bc7Smrj htable_t *ht, 2170ae115bc7Smrj uint_t entry, 2171ae115bc7Smrj x86pte_t expect, 2172ae115bc7Smrj x86pte_t *pte_ptr) 21737c478bd9Sstevel@tonic-gate { 21747c478bd9Sstevel@tonic-gate x86pte_t *ptep; 217595c0a3c8Sjosephb x86pte_t oldpte; 217695c0a3c8Sjosephb x86pte_t found; 21777c478bd9Sstevel@tonic-gate 21787c478bd9Sstevel@tonic-gate ASSERT(!(ht->ht_flags & HTABLE_SHARED_PFN)); 217902bc52beSkchow ASSERT(ht->ht_level <= mmu.max_page_level); 218097704650Sjosephb 2181ae115bc7Smrj if (pte_ptr != NULL) 21827c478bd9Sstevel@tonic-gate ptep = pte_ptr; 2183ae115bc7Smrj else 2184ae115bc7Smrj ptep = x86pte_access_pagetable(ht, entry); 21857c478bd9Sstevel@tonic-gate 2186843e1988Sjohnlev #if defined(__xpv) 2187843e1988Sjohnlev /* 2188843e1988Sjohnlev * If exit()ing just use HYPERVISOR_mmu_update(), as we can't be racing 2189843e1988Sjohnlev * with anything else. 2190843e1988Sjohnlev */ 2191843e1988Sjohnlev if ((ht->ht_hat->hat_flags & HAT_FREEING) && !IN_XPV_PANIC()) { 2192843e1988Sjohnlev int count; 2193843e1988Sjohnlev mmu_update_t t[1]; 2194843e1988Sjohnlev maddr_t ma; 2195843e1988Sjohnlev 2196843e1988Sjohnlev oldpte = GET_PTE(ptep); 2197843e1988Sjohnlev if (expect != 0 && (oldpte & PT_PADDR) != (expect & PT_PADDR)) 2198843e1988Sjohnlev goto done; 2199843e1988Sjohnlev ma = pa_to_ma(PT_INDEX_PHYSADDR(pfn_to_pa(ht->ht_pfn), entry)); 2200843e1988Sjohnlev t[0].ptr = ma | MMU_NORMAL_PT_UPDATE; 2201843e1988Sjohnlev t[0].val = 0; 2202843e1988Sjohnlev if (HYPERVISOR_mmu_update(t, 1, &count, DOMID_SELF)) 2203843e1988Sjohnlev panic("HYPERVISOR_mmu_update() failed"); 2204843e1988Sjohnlev ASSERT(count == 1); 2205843e1988Sjohnlev goto done; 2206843e1988Sjohnlev } 2207843e1988Sjohnlev #endif /* __xpv */ 2208843e1988Sjohnlev 22097c478bd9Sstevel@tonic-gate /* 221097704650Sjosephb * Note that the loop is needed to handle changes due to h/w updating 221197704650Sjosephb * of PT_MOD/PT_REF. 22127c478bd9Sstevel@tonic-gate */ 2213ae115bc7Smrj do { 221495c0a3c8Sjosephb oldpte = GET_PTE(ptep); 221595c0a3c8Sjosephb if (expect != 0 && (oldpte & PT_PADDR) != (expect & PT_PADDR)) 221695c0a3c8Sjosephb goto done; 2217843e1988Sjohnlev XPV_ALLOW_PAGETABLE_UPDATES(); 221895c0a3c8Sjosephb found = CAS_PTE(ptep, oldpte, 0); 2219843e1988Sjohnlev XPV_DISALLOW_PAGETABLE_UPDATES(); 222095c0a3c8Sjosephb } while (found != oldpte); 222195c0a3c8Sjosephb if (oldpte & (PT_REF | PT_MOD)) 222295c0a3c8Sjosephb hat_tlb_inval(ht->ht_hat, htable_e2va(ht, entry)); 22237c478bd9Sstevel@tonic-gate 222495c0a3c8Sjosephb done: 22257c478bd9Sstevel@tonic-gate if (pte_ptr == NULL) 22267c478bd9Sstevel@tonic-gate x86pte_release_pagetable(ht); 222795c0a3c8Sjosephb return (oldpte); 22287c478bd9Sstevel@tonic-gate } 22297c478bd9Sstevel@tonic-gate 22307c478bd9Sstevel@tonic-gate /* 2231ae115bc7Smrj * Change a page table entry af it currently matches the value in expect. 22327c478bd9Sstevel@tonic-gate */ 22337c478bd9Sstevel@tonic-gate x86pte_t 2234ae115bc7Smrj x86pte_update( 2235ae115bc7Smrj htable_t *ht, 2236ae115bc7Smrj uint_t entry, 2237ae115bc7Smrj x86pte_t expect, 2238ae115bc7Smrj x86pte_t new) 22397c478bd9Sstevel@tonic-gate { 22407c478bd9Sstevel@tonic-gate x86pte_t *ptep; 2241ae115bc7Smrj x86pte_t found; 22427c478bd9Sstevel@tonic-gate 2243ae115bc7Smrj ASSERT(new != 0); 22447c478bd9Sstevel@tonic-gate ASSERT(!(ht->ht_flags & HTABLE_SHARED_PFN)); 224502bc52beSkchow ASSERT(ht->ht_level <= mmu.max_page_level); 2246ae115bc7Smrj 2247ae115bc7Smrj ptep = x86pte_access_pagetable(ht, entry); 2248843e1988Sjohnlev XPV_ALLOW_PAGETABLE_UPDATES(); 2249ae115bc7Smrj found = CAS_PTE(ptep, expect, new); 2250843e1988Sjohnlev XPV_DISALLOW_PAGETABLE_UPDATES(); 2251ae115bc7Smrj if (found == expect) { 2252ae115bc7Smrj hat_tlb_inval(ht->ht_hat, htable_e2va(ht, entry)); 22537c478bd9Sstevel@tonic-gate 22547c478bd9Sstevel@tonic-gate /* 2255ae115bc7Smrj * When removing write permission *and* clearing the 2256ae115bc7Smrj * MOD bit, check if a write happened via a stale 2257ae115bc7Smrj * TLB entry before the TLB shootdown finished. 2258ae115bc7Smrj * 2259ae115bc7Smrj * If it did happen, simply re-enable write permission and 2260ae115bc7Smrj * act like the original CAS failed. 22617c478bd9Sstevel@tonic-gate */ 2262ae115bc7Smrj if ((expect & (PT_WRITABLE | PT_MOD)) == PT_WRITABLE && 2263ae115bc7Smrj (new & (PT_WRITABLE | PT_MOD)) == 0 && 2264ae115bc7Smrj (GET_PTE(ptep) & PT_MOD) != 0) { 2265ae115bc7Smrj do { 2266ae115bc7Smrj found = GET_PTE(ptep); 2267843e1988Sjohnlev XPV_ALLOW_PAGETABLE_UPDATES(); 2268ae115bc7Smrj found = 2269ae115bc7Smrj CAS_PTE(ptep, found, found | PT_WRITABLE); 2270843e1988Sjohnlev XPV_DISALLOW_PAGETABLE_UPDATES(); 2271ae115bc7Smrj } while ((found & PT_WRITABLE) == 0); 2272ae115bc7Smrj } 2273ae115bc7Smrj } 22747c478bd9Sstevel@tonic-gate x86pte_release_pagetable(ht); 2275ae115bc7Smrj return (found); 22767c478bd9Sstevel@tonic-gate } 22777c478bd9Sstevel@tonic-gate 2278843e1988Sjohnlev #ifndef __xpv 22797c478bd9Sstevel@tonic-gate /* 22807c478bd9Sstevel@tonic-gate * Copy page tables - this is just a little more complicated than the 22817c478bd9Sstevel@tonic-gate * previous routines. Note that it's also not atomic! It also is never 22827c478bd9Sstevel@tonic-gate * used for VLP pagetables. 22837c478bd9Sstevel@tonic-gate */ 22847c478bd9Sstevel@tonic-gate void 22857c478bd9Sstevel@tonic-gate x86pte_copy(htable_t *src, htable_t *dest, uint_t entry, uint_t count) 22867c478bd9Sstevel@tonic-gate { 22877c478bd9Sstevel@tonic-gate caddr_t src_va; 22887c478bd9Sstevel@tonic-gate caddr_t dst_va; 22897c478bd9Sstevel@tonic-gate size_t size; 2290ae115bc7Smrj x86pte_t *pteptr; 2291ae115bc7Smrj x86pte_t pte; 22927c478bd9Sstevel@tonic-gate 22937c478bd9Sstevel@tonic-gate ASSERT(khat_running); 22947c478bd9Sstevel@tonic-gate ASSERT(!(dest->ht_flags & HTABLE_VLP)); 22957c478bd9Sstevel@tonic-gate ASSERT(!(src->ht_flags & HTABLE_VLP)); 22967c478bd9Sstevel@tonic-gate ASSERT(!(src->ht_flags & HTABLE_SHARED_PFN)); 22977c478bd9Sstevel@tonic-gate ASSERT(!(dest->ht_flags & HTABLE_SHARED_PFN)); 22987c478bd9Sstevel@tonic-gate 22997c478bd9Sstevel@tonic-gate /* 2300ae115bc7Smrj * Acquire access to the CPU pagetable windows for the dest and source. 23017c478bd9Sstevel@tonic-gate */ 2302ae115bc7Smrj dst_va = (caddr_t)x86pte_access_pagetable(dest, entry); 2303ae115bc7Smrj if (kpm_vbase) { 2304ae115bc7Smrj src_va = (caddr_t) 2305ae115bc7Smrj PT_INDEX_PTR(hat_kpm_pfn2va(src->ht_pfn), entry); 23067c478bd9Sstevel@tonic-gate } else { 2307ae115bc7Smrj uint_t x = PWIN_SRC(CPU->cpu_id); 23087c478bd9Sstevel@tonic-gate 23097c478bd9Sstevel@tonic-gate /* 23107c478bd9Sstevel@tonic-gate * Finish defining the src pagetable mapping 23117c478bd9Sstevel@tonic-gate */ 2312ae115bc7Smrj src_va = (caddr_t)PT_INDEX_PTR(PWIN_VA(x), entry); 2313ae115bc7Smrj pte = MAKEPTE(src->ht_pfn, 0) | mmu.pt_global | mmu.pt_nx; 2314ae115bc7Smrj pteptr = (x86pte_t *)PWIN_PTE_VA(x); 2315ae115bc7Smrj if (mmu.pae_hat) 2316ae115bc7Smrj *pteptr = pte; 2317ae115bc7Smrj else 2318ae115bc7Smrj *(x86pte32_t *)pteptr = pte; 2319ae115bc7Smrj mmu_tlbflush_entry((caddr_t)(PWIN_VA(x))); 23207c478bd9Sstevel@tonic-gate } 23217c478bd9Sstevel@tonic-gate 23227c478bd9Sstevel@tonic-gate /* 23237c478bd9Sstevel@tonic-gate * now do the copy 23247c478bd9Sstevel@tonic-gate */ 23257c478bd9Sstevel@tonic-gate size = count << mmu.pte_size_shift; 23267c478bd9Sstevel@tonic-gate bcopy(src_va, dst_va, size); 23277c478bd9Sstevel@tonic-gate 23287c478bd9Sstevel@tonic-gate x86pte_release_pagetable(dest); 23297c478bd9Sstevel@tonic-gate } 23307c478bd9Sstevel@tonic-gate 2331843e1988Sjohnlev #else /* __xpv */ 2332843e1988Sjohnlev 2333843e1988Sjohnlev /* 2334843e1988Sjohnlev * The hypervisor only supports writable pagetables at level 0, so we have 2335843e1988Sjohnlev * to install these 1 by 1 the slow way. 2336843e1988Sjohnlev */ 2337843e1988Sjohnlev void 2338843e1988Sjohnlev x86pte_copy(htable_t *src, htable_t *dest, uint_t entry, uint_t count) 2339843e1988Sjohnlev { 2340843e1988Sjohnlev caddr_t src_va; 2341843e1988Sjohnlev x86pte_t pte; 2342843e1988Sjohnlev 2343843e1988Sjohnlev ASSERT(!IN_XPV_PANIC()); 2344843e1988Sjohnlev src_va = (caddr_t)x86pte_access_pagetable(src, entry); 2345843e1988Sjohnlev while (count) { 2346843e1988Sjohnlev if (mmu.pae_hat) 2347843e1988Sjohnlev pte = *(x86pte_t *)src_va; 2348843e1988Sjohnlev else 2349843e1988Sjohnlev pte = *(x86pte32_t *)src_va; 2350843e1988Sjohnlev if (pte != 0) { 2351843e1988Sjohnlev set_pteval(pfn_to_pa(dest->ht_pfn), entry, 2352843e1988Sjohnlev dest->ht_level, pte); 2353843e1988Sjohnlev #ifdef __amd64 2354843e1988Sjohnlev if (dest->ht_level == mmu.max_level && 2355843e1988Sjohnlev htable_e2va(dest, entry) < HYPERVISOR_VIRT_END) 2356843e1988Sjohnlev set_pteval( 2357843e1988Sjohnlev pfn_to_pa(dest->ht_hat->hat_user_ptable), 2358843e1988Sjohnlev entry, dest->ht_level, pte); 2359843e1988Sjohnlev #endif 2360843e1988Sjohnlev } 2361843e1988Sjohnlev --count; 2362843e1988Sjohnlev ++entry; 2363843e1988Sjohnlev src_va += mmu.pte_size; 2364843e1988Sjohnlev } 2365843e1988Sjohnlev x86pte_release_pagetable(src); 2366843e1988Sjohnlev } 2367843e1988Sjohnlev #endif /* __xpv */ 2368843e1988Sjohnlev 23697c478bd9Sstevel@tonic-gate /* 23707c478bd9Sstevel@tonic-gate * Zero page table entries - Note this doesn't use atomic stores! 23717c478bd9Sstevel@tonic-gate */ 2372ae115bc7Smrj static void 23737c478bd9Sstevel@tonic-gate x86pte_zero(htable_t *dest, uint_t entry, uint_t count) 23747c478bd9Sstevel@tonic-gate { 23757c478bd9Sstevel@tonic-gate caddr_t dst_va; 23767c478bd9Sstevel@tonic-gate size_t size; 2377843e1988Sjohnlev #ifdef __xpv 2378843e1988Sjohnlev int x; 2379843e1988Sjohnlev x86pte_t newpte; 2380843e1988Sjohnlev #endif 23817c478bd9Sstevel@tonic-gate 23827c478bd9Sstevel@tonic-gate /* 23837c478bd9Sstevel@tonic-gate * Map in the page table to be zeroed. 23847c478bd9Sstevel@tonic-gate */ 23857c478bd9Sstevel@tonic-gate ASSERT(!(dest->ht_flags & HTABLE_SHARED_PFN)); 23867c478bd9Sstevel@tonic-gate ASSERT(!(dest->ht_flags & HTABLE_VLP)); 2387ae115bc7Smrj 2388843e1988Sjohnlev /* 2389843e1988Sjohnlev * On the hypervisor we don't use x86pte_access_pagetable() since 2390843e1988Sjohnlev * in this case the page is not pinned yet. 2391843e1988Sjohnlev */ 2392843e1988Sjohnlev #ifdef __xpv 2393843e1988Sjohnlev if (kpm_vbase == NULL) { 2394843e1988Sjohnlev kpreempt_disable(); 2395843e1988Sjohnlev ASSERT(CPU->cpu_hat_info != NULL); 2396843e1988Sjohnlev mutex_enter(&CPU->cpu_hat_info->hci_mutex); 2397843e1988Sjohnlev x = PWIN_TABLE(CPU->cpu_id); 2398843e1988Sjohnlev newpte = MAKEPTE(dest->ht_pfn, 0) | PT_WRITABLE; 2399843e1988Sjohnlev xen_map(newpte, PWIN_VA(x)); 2400843e1988Sjohnlev dst_va = (caddr_t)PT_INDEX_PTR(PWIN_VA(x), entry); 2401843e1988Sjohnlev } else 2402843e1988Sjohnlev #endif 2403ae115bc7Smrj dst_va = (caddr_t)x86pte_access_pagetable(dest, entry); 2404ae115bc7Smrj 24057c478bd9Sstevel@tonic-gate size = count << mmu.pte_size_shift; 2406ae115bc7Smrj ASSERT(size > BLOCKZEROALIGN); 2407ae115bc7Smrj #ifdef __i386 2408ae115bc7Smrj if ((x86_feature & X86_SSE2) == 0) 24097c478bd9Sstevel@tonic-gate bzero(dst_va, size); 2410ae115bc7Smrj else 2411ae115bc7Smrj #endif 2412ae115bc7Smrj block_zero_no_xmm(dst_va, size); 2413ae115bc7Smrj 2414843e1988Sjohnlev #ifdef __xpv 2415843e1988Sjohnlev if (kpm_vbase == NULL) { 2416843e1988Sjohnlev xen_map(0, PWIN_VA(x)); 2417843e1988Sjohnlev mutex_exit(&CPU->cpu_hat_info->hci_mutex); 2418843e1988Sjohnlev kpreempt_enable(); 2419843e1988Sjohnlev } else 2420843e1988Sjohnlev #endif 24217c478bd9Sstevel@tonic-gate x86pte_release_pagetable(dest); 24227c478bd9Sstevel@tonic-gate } 24237c478bd9Sstevel@tonic-gate 24247c478bd9Sstevel@tonic-gate /* 24257c478bd9Sstevel@tonic-gate * Called to ensure that all pagetables are in the system dump 24267c478bd9Sstevel@tonic-gate */ 24277c478bd9Sstevel@tonic-gate void 24287c478bd9Sstevel@tonic-gate hat_dump(void) 24297c478bd9Sstevel@tonic-gate { 24307c478bd9Sstevel@tonic-gate hat_t *hat; 24317c478bd9Sstevel@tonic-gate uint_t h; 24327c478bd9Sstevel@tonic-gate htable_t *ht; 24337c478bd9Sstevel@tonic-gate 24347c478bd9Sstevel@tonic-gate /* 2435a85a6733Sjosephb * Dump all page tables 24367c478bd9Sstevel@tonic-gate */ 2437a85a6733Sjosephb for (hat = kas.a_hat; hat != NULL; hat = hat->hat_next) { 24387c478bd9Sstevel@tonic-gate for (h = 0; h < hat->hat_num_hash; ++h) { 24397c478bd9Sstevel@tonic-gate for (ht = hat->hat_ht_hash[h]; ht; ht = ht->ht_next) { 2440a85a6733Sjosephb if ((ht->ht_flags & HTABLE_VLP) == 0) 24417c478bd9Sstevel@tonic-gate dump_page(ht->ht_pfn); 24427c478bd9Sstevel@tonic-gate } 24437c478bd9Sstevel@tonic-gate } 24447c478bd9Sstevel@tonic-gate } 24457c478bd9Sstevel@tonic-gate } 2446