17c478bd9Sstevel@tonic-gate /* 27c478bd9Sstevel@tonic-gate * CDDL HEADER START 37c478bd9Sstevel@tonic-gate * 47c478bd9Sstevel@tonic-gate * The contents of this file are subject to the terms of the 5a85a6733Sjosephb * Common Development and Distribution License (the "License"). 6a85a6733Sjosephb * You may not use this file except in compliance with the License. 77c478bd9Sstevel@tonic-gate * 87c478bd9Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 97c478bd9Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 107c478bd9Sstevel@tonic-gate * See the License for the specific language governing permissions 117c478bd9Sstevel@tonic-gate * and limitations under the License. 127c478bd9Sstevel@tonic-gate * 137c478bd9Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 147c478bd9Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 157c478bd9Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 167c478bd9Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 177c478bd9Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 187c478bd9Sstevel@tonic-gate * 197c478bd9Sstevel@tonic-gate * CDDL HEADER END 207c478bd9Sstevel@tonic-gate */ 21ae115bc7Smrj 227c478bd9Sstevel@tonic-gate /* 23903a11ebSrh87107 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 247c478bd9Sstevel@tonic-gate * Use is subject to license terms. 257c478bd9Sstevel@tonic-gate */ 267c478bd9Sstevel@tonic-gate 277c478bd9Sstevel@tonic-gate #include <sys/types.h> 287c478bd9Sstevel@tonic-gate #include <sys/sysmacros.h> 297c478bd9Sstevel@tonic-gate #include <sys/kmem.h> 307c478bd9Sstevel@tonic-gate #include <sys/atomic.h> 317c478bd9Sstevel@tonic-gate #include <sys/bitmap.h> 327c478bd9Sstevel@tonic-gate #include <sys/machparam.h> 337c478bd9Sstevel@tonic-gate #include <sys/machsystm.h> 347c478bd9Sstevel@tonic-gate #include <sys/mman.h> 357c478bd9Sstevel@tonic-gate #include <sys/systm.h> 367c478bd9Sstevel@tonic-gate #include <sys/cpuvar.h> 377c478bd9Sstevel@tonic-gate #include <sys/thread.h> 387c478bd9Sstevel@tonic-gate #include <sys/proc.h> 397c478bd9Sstevel@tonic-gate #include <sys/cpu.h> 407c478bd9Sstevel@tonic-gate #include <sys/kmem.h> 417c478bd9Sstevel@tonic-gate #include <sys/disp.h> 427c478bd9Sstevel@tonic-gate #include <sys/vmem.h> 437c478bd9Sstevel@tonic-gate #include <sys/vmsystm.h> 447c478bd9Sstevel@tonic-gate #include <sys/promif.h> 457c478bd9Sstevel@tonic-gate #include <sys/var.h> 467c478bd9Sstevel@tonic-gate #include <sys/x86_archext.h> 47ae115bc7Smrj #include <sys/archsystm.h> 487c478bd9Sstevel@tonic-gate #include <sys/bootconf.h> 497c478bd9Sstevel@tonic-gate #include <sys/dumphdr.h> 507c478bd9Sstevel@tonic-gate #include <vm/seg_kmem.h> 517c478bd9Sstevel@tonic-gate #include <vm/seg_kpm.h> 527c478bd9Sstevel@tonic-gate #include <vm/hat.h> 537c478bd9Sstevel@tonic-gate #include <vm/hat_i86.h> 547c478bd9Sstevel@tonic-gate #include <sys/cmn_err.h> 55843e1988Sjohnlev #include <sys/panic.h> 56843e1988Sjohnlev 57843e1988Sjohnlev #ifdef __xpv 58843e1988Sjohnlev #include <sys/hypervisor.h> 59843e1988Sjohnlev #include <sys/xpv_panic.h> 60843e1988Sjohnlev #endif 617c478bd9Sstevel@tonic-gate 62ae115bc7Smrj #include <sys/bootinfo.h> 63ae115bc7Smrj #include <vm/kboot_mmu.h> 64ae115bc7Smrj 65ae115bc7Smrj static void x86pte_zero(htable_t *dest, uint_t entry, uint_t count); 66ae115bc7Smrj 677c478bd9Sstevel@tonic-gate kmem_cache_t *htable_cache; 687c478bd9Sstevel@tonic-gate 697c478bd9Sstevel@tonic-gate /* 707c478bd9Sstevel@tonic-gate * The variable htable_reserve_amount, rather than HTABLE_RESERVE_AMOUNT, 717c478bd9Sstevel@tonic-gate * is used in order to facilitate testing of the htable_steal() code. 727c478bd9Sstevel@tonic-gate * By resetting htable_reserve_amount to a lower value, we can force 737c478bd9Sstevel@tonic-gate * stealing to occur. The reserve amount is a guess to get us through boot. 747c478bd9Sstevel@tonic-gate */ 757c478bd9Sstevel@tonic-gate #define HTABLE_RESERVE_AMOUNT (200) 767c478bd9Sstevel@tonic-gate uint_t htable_reserve_amount = HTABLE_RESERVE_AMOUNT; 777c478bd9Sstevel@tonic-gate kmutex_t htable_reserve_mutex; 787c478bd9Sstevel@tonic-gate uint_t htable_reserve_cnt; 797c478bd9Sstevel@tonic-gate htable_t *htable_reserve_pool; 807c478bd9Sstevel@tonic-gate 817c478bd9Sstevel@tonic-gate /* 82a85a6733Sjosephb * Used to hand test htable_steal(). 837c478bd9Sstevel@tonic-gate */ 84a85a6733Sjosephb #ifdef DEBUG 85a85a6733Sjosephb ulong_t force_steal = 0; 86a85a6733Sjosephb ulong_t ptable_cnt = 0; 87a85a6733Sjosephb #endif 88a85a6733Sjosephb 89a85a6733Sjosephb /* 90a85a6733Sjosephb * This variable is so that we can tune this via /etc/system 91a85a6733Sjosephb * Any value works, but a power of two <= mmu.ptes_per_table is best. 92a85a6733Sjosephb */ 93a85a6733Sjosephb uint_t htable_steal_passes = 8; 947c478bd9Sstevel@tonic-gate 957c478bd9Sstevel@tonic-gate /* 967c478bd9Sstevel@tonic-gate * mutex stuff for access to htable hash 977c478bd9Sstevel@tonic-gate */ 987c478bd9Sstevel@tonic-gate #define NUM_HTABLE_MUTEX 128 997c478bd9Sstevel@tonic-gate kmutex_t htable_mutex[NUM_HTABLE_MUTEX]; 1007c478bd9Sstevel@tonic-gate #define HTABLE_MUTEX_HASH(h) ((h) & (NUM_HTABLE_MUTEX - 1)) 1017c478bd9Sstevel@tonic-gate 1027c478bd9Sstevel@tonic-gate #define HTABLE_ENTER(h) mutex_enter(&htable_mutex[HTABLE_MUTEX_HASH(h)]); 1037c478bd9Sstevel@tonic-gate #define HTABLE_EXIT(h) mutex_exit(&htable_mutex[HTABLE_MUTEX_HASH(h)]); 1047c478bd9Sstevel@tonic-gate 1057c478bd9Sstevel@tonic-gate /* 1067c478bd9Sstevel@tonic-gate * forward declarations 1077c478bd9Sstevel@tonic-gate */ 1087c478bd9Sstevel@tonic-gate static void link_ptp(htable_t *higher, htable_t *new, uintptr_t vaddr); 1097c478bd9Sstevel@tonic-gate static void unlink_ptp(htable_t *higher, htable_t *old, uintptr_t vaddr); 1107c478bd9Sstevel@tonic-gate static void htable_free(htable_t *ht); 111ae115bc7Smrj static x86pte_t *x86pte_access_pagetable(htable_t *ht, uint_t index); 1127c478bd9Sstevel@tonic-gate static void x86pte_release_pagetable(htable_t *ht); 1137c478bd9Sstevel@tonic-gate static x86pte_t x86pte_cas(htable_t *ht, uint_t entry, x86pte_t old, 1147c478bd9Sstevel@tonic-gate x86pte_t new); 1157c478bd9Sstevel@tonic-gate 1167c478bd9Sstevel@tonic-gate /* 1177c478bd9Sstevel@tonic-gate * A counter to track if we are stealing or reaping htables. When non-zero 1187c478bd9Sstevel@tonic-gate * htable_free() will directly free htables (either to the reserve or kmem) 1197c478bd9Sstevel@tonic-gate * instead of putting them in a hat's htable cache. 1207c478bd9Sstevel@tonic-gate */ 1217c478bd9Sstevel@tonic-gate uint32_t htable_dont_cache = 0; 1227c478bd9Sstevel@tonic-gate 1237c478bd9Sstevel@tonic-gate /* 1247c478bd9Sstevel@tonic-gate * Track the number of active pagetables, so we can know how many to reap 1257c478bd9Sstevel@tonic-gate */ 1267c478bd9Sstevel@tonic-gate static uint32_t active_ptables = 0; 1277c478bd9Sstevel@tonic-gate 128843e1988Sjohnlev #ifdef __xpv 129843e1988Sjohnlev /* 130843e1988Sjohnlev * Deal with hypervisor complications. 131843e1988Sjohnlev */ 132843e1988Sjohnlev void 133843e1988Sjohnlev xen_flush_va(caddr_t va) 134843e1988Sjohnlev { 135843e1988Sjohnlev struct mmuext_op t; 136843e1988Sjohnlev uint_t count; 137843e1988Sjohnlev 138843e1988Sjohnlev if (IN_XPV_PANIC()) { 139843e1988Sjohnlev mmu_tlbflush_entry((caddr_t)va); 140843e1988Sjohnlev } else { 141843e1988Sjohnlev t.cmd = MMUEXT_INVLPG_LOCAL; 142843e1988Sjohnlev t.arg1.linear_addr = (uintptr_t)va; 143843e1988Sjohnlev if (HYPERVISOR_mmuext_op(&t, 1, &count, DOMID_SELF) < 0) 144843e1988Sjohnlev panic("HYPERVISOR_mmuext_op() failed"); 145843e1988Sjohnlev ASSERT(count == 1); 146843e1988Sjohnlev } 147843e1988Sjohnlev } 148843e1988Sjohnlev 149843e1988Sjohnlev void 150843e1988Sjohnlev xen_gflush_va(caddr_t va, cpuset_t cpus) 151843e1988Sjohnlev { 152843e1988Sjohnlev struct mmuext_op t; 153843e1988Sjohnlev uint_t count; 154843e1988Sjohnlev 155843e1988Sjohnlev if (IN_XPV_PANIC()) { 156843e1988Sjohnlev mmu_tlbflush_entry((caddr_t)va); 157843e1988Sjohnlev return; 158843e1988Sjohnlev } 159843e1988Sjohnlev 160843e1988Sjohnlev t.cmd = MMUEXT_INVLPG_MULTI; 161843e1988Sjohnlev t.arg1.linear_addr = (uintptr_t)va; 162843e1988Sjohnlev /*LINTED: constant in conditional context*/ 163843e1988Sjohnlev set_xen_guest_handle(t.arg2.vcpumask, &cpus); 164843e1988Sjohnlev if (HYPERVISOR_mmuext_op(&t, 1, &count, DOMID_SELF) < 0) 165843e1988Sjohnlev panic("HYPERVISOR_mmuext_op() failed"); 166843e1988Sjohnlev ASSERT(count == 1); 167843e1988Sjohnlev } 168843e1988Sjohnlev 169843e1988Sjohnlev void 170843e1988Sjohnlev xen_flush_tlb() 171843e1988Sjohnlev { 172843e1988Sjohnlev struct mmuext_op t; 173843e1988Sjohnlev uint_t count; 174843e1988Sjohnlev 175843e1988Sjohnlev if (IN_XPV_PANIC()) { 176843e1988Sjohnlev xpv_panic_reload_cr3(); 177843e1988Sjohnlev } else { 178843e1988Sjohnlev t.cmd = MMUEXT_TLB_FLUSH_LOCAL; 179843e1988Sjohnlev if (HYPERVISOR_mmuext_op(&t, 1, &count, DOMID_SELF) < 0) 180843e1988Sjohnlev panic("HYPERVISOR_mmuext_op() failed"); 181843e1988Sjohnlev ASSERT(count == 1); 182843e1988Sjohnlev } 183843e1988Sjohnlev } 184843e1988Sjohnlev 185843e1988Sjohnlev void 186843e1988Sjohnlev xen_gflush_tlb(cpuset_t cpus) 187843e1988Sjohnlev { 188843e1988Sjohnlev struct mmuext_op t; 189843e1988Sjohnlev uint_t count; 190843e1988Sjohnlev 191843e1988Sjohnlev ASSERT(!IN_XPV_PANIC()); 192843e1988Sjohnlev t.cmd = MMUEXT_TLB_FLUSH_MULTI; 193843e1988Sjohnlev /*LINTED: constant in conditional context*/ 194843e1988Sjohnlev set_xen_guest_handle(t.arg2.vcpumask, &cpus); 195843e1988Sjohnlev if (HYPERVISOR_mmuext_op(&t, 1, &count, DOMID_SELF) < 0) 196843e1988Sjohnlev panic("HYPERVISOR_mmuext_op() failed"); 197843e1988Sjohnlev ASSERT(count == 1); 198843e1988Sjohnlev } 199843e1988Sjohnlev 200843e1988Sjohnlev /* 201843e1988Sjohnlev * Install/Adjust a kpm mapping under the hypervisor. 202843e1988Sjohnlev * Value of "how" should be: 203843e1988Sjohnlev * PT_WRITABLE | PT_VALID - regular kpm mapping 204843e1988Sjohnlev * PT_VALID - make mapping read-only 205843e1988Sjohnlev * 0 - remove mapping 206843e1988Sjohnlev * 207843e1988Sjohnlev * returns 0 on success. non-zero for failure. 208843e1988Sjohnlev */ 209843e1988Sjohnlev int 210843e1988Sjohnlev xen_kpm_page(pfn_t pfn, uint_t how) 211843e1988Sjohnlev { 212843e1988Sjohnlev paddr_t pa = mmu_ptob((paddr_t)pfn); 213843e1988Sjohnlev x86pte_t pte = PT_NOCONSIST | PT_REF | PT_MOD; 214843e1988Sjohnlev 215843e1988Sjohnlev if (kpm_vbase == NULL) 216843e1988Sjohnlev return (0); 217843e1988Sjohnlev 218843e1988Sjohnlev if (how) 219843e1988Sjohnlev pte |= pa_to_ma(pa) | how; 220843e1988Sjohnlev else 221843e1988Sjohnlev pte = 0; 222843e1988Sjohnlev return (HYPERVISOR_update_va_mapping((uintptr_t)kpm_vbase + pa, 223843e1988Sjohnlev pte, UVMF_INVLPG | UVMF_ALL)); 224843e1988Sjohnlev } 225843e1988Sjohnlev 226843e1988Sjohnlev void 227843e1988Sjohnlev xen_pin(pfn_t pfn, level_t lvl) 228843e1988Sjohnlev { 229843e1988Sjohnlev struct mmuext_op t; 230843e1988Sjohnlev uint_t count; 231843e1988Sjohnlev 232843e1988Sjohnlev t.cmd = MMUEXT_PIN_L1_TABLE + lvl; 233843e1988Sjohnlev t.arg1.mfn = pfn_to_mfn(pfn); 234843e1988Sjohnlev if (HYPERVISOR_mmuext_op(&t, 1, &count, DOMID_SELF) < 0) 235843e1988Sjohnlev panic("HYPERVISOR_mmuext_op() failed"); 236843e1988Sjohnlev ASSERT(count == 1); 237843e1988Sjohnlev } 238843e1988Sjohnlev 239843e1988Sjohnlev void 240843e1988Sjohnlev xen_unpin(pfn_t pfn) 241843e1988Sjohnlev { 242843e1988Sjohnlev struct mmuext_op t; 243843e1988Sjohnlev uint_t count; 244843e1988Sjohnlev 245843e1988Sjohnlev t.cmd = MMUEXT_UNPIN_TABLE; 246843e1988Sjohnlev t.arg1.mfn = pfn_to_mfn(pfn); 247843e1988Sjohnlev if (HYPERVISOR_mmuext_op(&t, 1, &count, DOMID_SELF) < 0) 248843e1988Sjohnlev panic("HYPERVISOR_mmuext_op() failed"); 249843e1988Sjohnlev ASSERT(count == 1); 250843e1988Sjohnlev } 251843e1988Sjohnlev 252843e1988Sjohnlev static void 253843e1988Sjohnlev xen_map(uint64_t pte, caddr_t va) 254843e1988Sjohnlev { 255843e1988Sjohnlev if (HYPERVISOR_update_va_mapping((uintptr_t)va, pte, 256843e1988Sjohnlev UVMF_INVLPG | UVMF_LOCAL)) 257843e1988Sjohnlev panic("HYPERVISOR_update_va_mapping() failed"); 258843e1988Sjohnlev } 259843e1988Sjohnlev #endif /* __xpv */ 260843e1988Sjohnlev 2617c478bd9Sstevel@tonic-gate /* 2627c478bd9Sstevel@tonic-gate * Allocate a memory page for a hardware page table. 2637c478bd9Sstevel@tonic-gate * 264ae115bc7Smrj * A wrapper around page_get_physical(), with some extra checks. 2657c478bd9Sstevel@tonic-gate */ 266ae115bc7Smrj static pfn_t 267*86c1f4dcSVikram Hegde ptable_alloc(void) 2687c478bd9Sstevel@tonic-gate { 2697c478bd9Sstevel@tonic-gate pfn_t pfn; 2707c478bd9Sstevel@tonic-gate page_t *pp; 2717c478bd9Sstevel@tonic-gate 272ae115bc7Smrj pfn = PFN_INVALID; 2737c478bd9Sstevel@tonic-gate atomic_add_32(&active_ptables, 1); 2747c478bd9Sstevel@tonic-gate 2757c478bd9Sstevel@tonic-gate /* 276ae115bc7Smrj * The first check is to see if there is memory in the system. If we 277ae115bc7Smrj * drop to throttlefree, then fail the ptable_alloc() and let the 278ae115bc7Smrj * stealing code kick in. Note that we have to do this test here, 279ae115bc7Smrj * since the test in page_create_throttle() would let the NOSLEEP 280ae115bc7Smrj * allocation go through and deplete the page reserves. 281a85a6733Sjosephb * 282a85a6733Sjosephb * The !NOMEMWAIT() lets pageout, fsflush, etc. skip this check. 2837c478bd9Sstevel@tonic-gate */ 284a85a6733Sjosephb if (!NOMEMWAIT() && freemem <= throttlefree + 1) 285ae115bc7Smrj return (PFN_INVALID); 2867c478bd9Sstevel@tonic-gate 287a85a6733Sjosephb #ifdef DEBUG 288a85a6733Sjosephb /* 289a85a6733Sjosephb * This code makes htable_steal() easier to test. By setting 290a85a6733Sjosephb * force_steal we force pagetable allocations to fall 291a85a6733Sjosephb * into the stealing code. Roughly 1 in ever "force_steal" 292a85a6733Sjosephb * page table allocations will fail. 293a85a6733Sjosephb */ 294ae115bc7Smrj if (proc_pageout != NULL && force_steal > 1 && 295a85a6733Sjosephb ++ptable_cnt > force_steal) { 296a85a6733Sjosephb ptable_cnt = 0; 297ae115bc7Smrj return (PFN_INVALID); 298a85a6733Sjosephb } 299a85a6733Sjosephb #endif /* DEBUG */ 300a85a6733Sjosephb 301*86c1f4dcSVikram Hegde pp = page_get_physical(KM_NOSLEEP); 3027c478bd9Sstevel@tonic-gate if (pp == NULL) 303ae115bc7Smrj return (PFN_INVALID); 3047c478bd9Sstevel@tonic-gate ASSERT(PAGE_SHARED(pp)); 305*86c1f4dcSVikram Hegde pfn = pp->p_pagenum; 3067c478bd9Sstevel@tonic-gate if (pfn == PFN_INVALID) 3077c478bd9Sstevel@tonic-gate panic("ptable_alloc(): Invalid PFN!!"); 308a85a6733Sjosephb HATSTAT_INC(hs_ptable_allocs); 309ae115bc7Smrj return (pfn); 3107c478bd9Sstevel@tonic-gate } 3117c478bd9Sstevel@tonic-gate 3127c478bd9Sstevel@tonic-gate /* 3137c478bd9Sstevel@tonic-gate * Free an htable's associated page table page. See the comments 3147c478bd9Sstevel@tonic-gate * for ptable_alloc(). 3157c478bd9Sstevel@tonic-gate */ 3167c478bd9Sstevel@tonic-gate static void 317ae115bc7Smrj ptable_free(pfn_t pfn) 3187c478bd9Sstevel@tonic-gate { 319ae115bc7Smrj page_t *pp = page_numtopp_nolock(pfn); 3207c478bd9Sstevel@tonic-gate 3217c478bd9Sstevel@tonic-gate /* 3227c478bd9Sstevel@tonic-gate * need to destroy the page used for the pagetable 3237c478bd9Sstevel@tonic-gate */ 3247c478bd9Sstevel@tonic-gate ASSERT(pfn != PFN_INVALID); 3257c478bd9Sstevel@tonic-gate HATSTAT_INC(hs_ptable_frees); 3267c478bd9Sstevel@tonic-gate atomic_add_32(&active_ptables, -1); 3277c478bd9Sstevel@tonic-gate if (pp == NULL) 3287c478bd9Sstevel@tonic-gate panic("ptable_free(): no page for pfn!"); 3297c478bd9Sstevel@tonic-gate ASSERT(pfn == pp->p_pagenum); 330843e1988Sjohnlev ASSERT(!IN_XPV_PANIC()); 331843e1988Sjohnlev #ifdef __xpv 332843e1988Sjohnlev if (kpm_vbase && xen_kpm_page(pfn, PT_VALID | PT_WRITABLE) < 0) 333843e1988Sjohnlev panic("failure making kpm r/w pfn=0x%lx", pfn); 334843e1988Sjohnlev #endif 335*86c1f4dcSVikram Hegde page_free_physical(pp); 3367c478bd9Sstevel@tonic-gate } 3377c478bd9Sstevel@tonic-gate 3387c478bd9Sstevel@tonic-gate /* 3397c478bd9Sstevel@tonic-gate * Put one htable on the reserve list. 3407c478bd9Sstevel@tonic-gate */ 3417c478bd9Sstevel@tonic-gate static void 3427c478bd9Sstevel@tonic-gate htable_put_reserve(htable_t *ht) 3437c478bd9Sstevel@tonic-gate { 3447c478bd9Sstevel@tonic-gate ht->ht_hat = NULL; /* no longer tied to a hat */ 3457c478bd9Sstevel@tonic-gate ASSERT(ht->ht_pfn == PFN_INVALID); 3467c478bd9Sstevel@tonic-gate HATSTAT_INC(hs_htable_rputs); 3477c478bd9Sstevel@tonic-gate mutex_enter(&htable_reserve_mutex); 3487c478bd9Sstevel@tonic-gate ht->ht_next = htable_reserve_pool; 3497c478bd9Sstevel@tonic-gate htable_reserve_pool = ht; 3507c478bd9Sstevel@tonic-gate ++htable_reserve_cnt; 3517c478bd9Sstevel@tonic-gate mutex_exit(&htable_reserve_mutex); 3527c478bd9Sstevel@tonic-gate } 3537c478bd9Sstevel@tonic-gate 3547c478bd9Sstevel@tonic-gate /* 3557c478bd9Sstevel@tonic-gate * Take one htable from the reserve. 3567c478bd9Sstevel@tonic-gate */ 3577c478bd9Sstevel@tonic-gate static htable_t * 3587c478bd9Sstevel@tonic-gate htable_get_reserve(void) 3597c478bd9Sstevel@tonic-gate { 3607c478bd9Sstevel@tonic-gate htable_t *ht = NULL; 3617c478bd9Sstevel@tonic-gate 3627c478bd9Sstevel@tonic-gate mutex_enter(&htable_reserve_mutex); 3637c478bd9Sstevel@tonic-gate if (htable_reserve_cnt != 0) { 3647c478bd9Sstevel@tonic-gate ht = htable_reserve_pool; 3657c478bd9Sstevel@tonic-gate ASSERT(ht != NULL); 3667c478bd9Sstevel@tonic-gate ASSERT(ht->ht_pfn == PFN_INVALID); 3677c478bd9Sstevel@tonic-gate htable_reserve_pool = ht->ht_next; 3687c478bd9Sstevel@tonic-gate --htable_reserve_cnt; 3697c478bd9Sstevel@tonic-gate HATSTAT_INC(hs_htable_rgets); 3707c478bd9Sstevel@tonic-gate } 3717c478bd9Sstevel@tonic-gate mutex_exit(&htable_reserve_mutex); 3727c478bd9Sstevel@tonic-gate return (ht); 3737c478bd9Sstevel@tonic-gate } 3747c478bd9Sstevel@tonic-gate 3757c478bd9Sstevel@tonic-gate /* 376ae115bc7Smrj * Allocate initial htables and put them on the reserve list 3777c478bd9Sstevel@tonic-gate */ 3787c478bd9Sstevel@tonic-gate void 3797c478bd9Sstevel@tonic-gate htable_initial_reserve(uint_t count) 3807c478bd9Sstevel@tonic-gate { 3817c478bd9Sstevel@tonic-gate htable_t *ht; 3827c478bd9Sstevel@tonic-gate 3837c478bd9Sstevel@tonic-gate count += HTABLE_RESERVE_AMOUNT; 3847c478bd9Sstevel@tonic-gate while (count > 0) { 3857c478bd9Sstevel@tonic-gate ht = kmem_cache_alloc(htable_cache, KM_NOSLEEP); 3867c478bd9Sstevel@tonic-gate ASSERT(ht != NULL); 3877c478bd9Sstevel@tonic-gate 3887c478bd9Sstevel@tonic-gate ASSERT(use_boot_reserve); 389ae115bc7Smrj ht->ht_pfn = PFN_INVALID; 390ae115bc7Smrj htable_put_reserve(ht); 3917c478bd9Sstevel@tonic-gate --count; 3927c478bd9Sstevel@tonic-gate } 3937c478bd9Sstevel@tonic-gate } 3947c478bd9Sstevel@tonic-gate 3957c478bd9Sstevel@tonic-gate /* 3967c478bd9Sstevel@tonic-gate * Readjust the reserves after a thread finishes using them. 3977c478bd9Sstevel@tonic-gate */ 3987c478bd9Sstevel@tonic-gate void 3997c478bd9Sstevel@tonic-gate htable_adjust_reserve() 4007c478bd9Sstevel@tonic-gate { 4017c478bd9Sstevel@tonic-gate htable_t *ht; 4027c478bd9Sstevel@tonic-gate 4037c478bd9Sstevel@tonic-gate /* 4047c478bd9Sstevel@tonic-gate * Free any excess htables in the reserve list 4057c478bd9Sstevel@tonic-gate */ 406aac11643Sjosephb while (htable_reserve_cnt > htable_reserve_amount && 407aac11643Sjosephb !USE_HAT_RESERVES()) { 4087c478bd9Sstevel@tonic-gate ht = htable_get_reserve(); 4097c478bd9Sstevel@tonic-gate if (ht == NULL) 4107c478bd9Sstevel@tonic-gate return; 4117c478bd9Sstevel@tonic-gate ASSERT(ht->ht_pfn == PFN_INVALID); 4127c478bd9Sstevel@tonic-gate kmem_cache_free(htable_cache, ht); 4137c478bd9Sstevel@tonic-gate } 4147c478bd9Sstevel@tonic-gate } 4157c478bd9Sstevel@tonic-gate 4167c478bd9Sstevel@tonic-gate 4177c478bd9Sstevel@tonic-gate /* 4187c478bd9Sstevel@tonic-gate * This routine steals htables from user processes for htable_alloc() or 4197c478bd9Sstevel@tonic-gate * for htable_reap(). 4207c478bd9Sstevel@tonic-gate */ 4217c478bd9Sstevel@tonic-gate static htable_t * 4227c478bd9Sstevel@tonic-gate htable_steal(uint_t cnt) 4237c478bd9Sstevel@tonic-gate { 4247c478bd9Sstevel@tonic-gate hat_t *hat = kas.a_hat; /* list starts with khat */ 4257c478bd9Sstevel@tonic-gate htable_t *list = NULL; 4267c478bd9Sstevel@tonic-gate htable_t *ht; 4277c478bd9Sstevel@tonic-gate htable_t *higher; 4287c478bd9Sstevel@tonic-gate uint_t h; 429a85a6733Sjosephb uint_t h_start; 430a85a6733Sjosephb static uint_t h_seed = 0; 4317c478bd9Sstevel@tonic-gate uint_t e; 4327c478bd9Sstevel@tonic-gate uintptr_t va; 4337c478bd9Sstevel@tonic-gate x86pte_t pte; 4347c478bd9Sstevel@tonic-gate uint_t stolen = 0; 4357c478bd9Sstevel@tonic-gate uint_t pass; 436a85a6733Sjosephb uint_t threshold; 4377c478bd9Sstevel@tonic-gate 4387c478bd9Sstevel@tonic-gate /* 4397c478bd9Sstevel@tonic-gate * Limit htable_steal_passes to something reasonable 4407c478bd9Sstevel@tonic-gate */ 4417c478bd9Sstevel@tonic-gate if (htable_steal_passes == 0) 4427c478bd9Sstevel@tonic-gate htable_steal_passes = 1; 4437c478bd9Sstevel@tonic-gate if (htable_steal_passes > mmu.ptes_per_table) 4447c478bd9Sstevel@tonic-gate htable_steal_passes = mmu.ptes_per_table; 4457c478bd9Sstevel@tonic-gate 4467c478bd9Sstevel@tonic-gate /* 447a85a6733Sjosephb * Loop through all user hats. The 1st pass takes cached htables that 4487c478bd9Sstevel@tonic-gate * aren't in use. The later passes steal by removing mappings, too. 4497c478bd9Sstevel@tonic-gate */ 4507c478bd9Sstevel@tonic-gate atomic_add_32(&htable_dont_cache, 1); 451a85a6733Sjosephb for (pass = 0; pass <= htable_steal_passes && stolen < cnt; ++pass) { 452a85a6733Sjosephb threshold = pass * mmu.ptes_per_table / htable_steal_passes; 453a85a6733Sjosephb hat = kas.a_hat; 4547c478bd9Sstevel@tonic-gate for (;;) { 4557c478bd9Sstevel@tonic-gate 4567c478bd9Sstevel@tonic-gate /* 457a85a6733Sjosephb * Clear the victim flag and move to next hat 4587c478bd9Sstevel@tonic-gate */ 4597c478bd9Sstevel@tonic-gate mutex_enter(&hat_list_lock); 460a85a6733Sjosephb if (hat != kas.a_hat) { 4617c478bd9Sstevel@tonic-gate hat->hat_flags &= ~HAT_VICTIM; 4627c478bd9Sstevel@tonic-gate cv_broadcast(&hat_list_cv); 463a85a6733Sjosephb } 464a85a6733Sjosephb hat = hat->hat_next; 465a85a6733Sjosephb 466a85a6733Sjosephb /* 467a85a6733Sjosephb * Skip any hat that is already being stolen from. 468a85a6733Sjosephb * 469a85a6733Sjosephb * We skip SHARED hats, as these are dummy 470a85a6733Sjosephb * hats that host ISM shared page tables. 471a85a6733Sjosephb * 472a85a6733Sjosephb * We also skip if HAT_FREEING because hat_pte_unmap() 473a85a6733Sjosephb * won't zero out the PTE's. That would lead to hitting 474a85a6733Sjosephb * stale PTEs either here or under hat_unload() when we 475a85a6733Sjosephb * steal and unload the same page table in competing 476a85a6733Sjosephb * threads. 477a85a6733Sjosephb */ 478a85a6733Sjosephb while (hat != NULL && 479a85a6733Sjosephb (hat->hat_flags & 480a85a6733Sjosephb (HAT_VICTIM | HAT_SHARED | HAT_FREEING)) != 0) 481a85a6733Sjosephb hat = hat->hat_next; 482a85a6733Sjosephb 483a85a6733Sjosephb if (hat == NULL) { 4847c478bd9Sstevel@tonic-gate mutex_exit(&hat_list_lock); 4857c478bd9Sstevel@tonic-gate break; 4867c478bd9Sstevel@tonic-gate } 487a85a6733Sjosephb 488a85a6733Sjosephb /* 489a85a6733Sjosephb * Are we finished? 490a85a6733Sjosephb */ 491a85a6733Sjosephb if (stolen == cnt) { 492a85a6733Sjosephb /* 493a85a6733Sjosephb * Try to spread the pain of stealing, 494a85a6733Sjosephb * move victim HAT to the end of the HAT list. 495a85a6733Sjosephb */ 496a85a6733Sjosephb if (pass >= 1 && cnt == 1 && 497a85a6733Sjosephb kas.a_hat->hat_prev != hat) { 498a85a6733Sjosephb 499a85a6733Sjosephb /* unlink victim hat */ 500a85a6733Sjosephb if (hat->hat_prev) 501a85a6733Sjosephb hat->hat_prev->hat_next = 502a85a6733Sjosephb hat->hat_next; 503a85a6733Sjosephb else 504a85a6733Sjosephb kas.a_hat->hat_next = 505a85a6733Sjosephb hat->hat_next; 506a85a6733Sjosephb if (hat->hat_next) 507a85a6733Sjosephb hat->hat_next->hat_prev = 508a85a6733Sjosephb hat->hat_prev; 509a85a6733Sjosephb else 510a85a6733Sjosephb kas.a_hat->hat_prev = 511a85a6733Sjosephb hat->hat_prev; 512a85a6733Sjosephb 513a85a6733Sjosephb 514a85a6733Sjosephb /* relink at end of hat list */ 515a85a6733Sjosephb hat->hat_next = NULL; 516a85a6733Sjosephb hat->hat_prev = kas.a_hat->hat_prev; 517a85a6733Sjosephb if (hat->hat_prev) 518a85a6733Sjosephb hat->hat_prev->hat_next = hat; 519a85a6733Sjosephb else 520a85a6733Sjosephb kas.a_hat->hat_next = hat; 521a85a6733Sjosephb kas.a_hat->hat_prev = hat; 522a85a6733Sjosephb 523a85a6733Sjosephb } 524a85a6733Sjosephb 525a85a6733Sjosephb mutex_exit(&hat_list_lock); 526a85a6733Sjosephb break; 527a85a6733Sjosephb } 528a85a6733Sjosephb 529a85a6733Sjosephb /* 530a85a6733Sjosephb * Mark the HAT as a stealing victim. 531a85a6733Sjosephb */ 5327c478bd9Sstevel@tonic-gate hat->hat_flags |= HAT_VICTIM; 5337c478bd9Sstevel@tonic-gate mutex_exit(&hat_list_lock); 5347c478bd9Sstevel@tonic-gate 5357c478bd9Sstevel@tonic-gate /* 5367c478bd9Sstevel@tonic-gate * Take any htables from the hat's cached "free" list. 5377c478bd9Sstevel@tonic-gate */ 5387c478bd9Sstevel@tonic-gate hat_enter(hat); 5397c478bd9Sstevel@tonic-gate while ((ht = hat->hat_ht_cached) != NULL && 5407c478bd9Sstevel@tonic-gate stolen < cnt) { 5417c478bd9Sstevel@tonic-gate hat->hat_ht_cached = ht->ht_next; 5427c478bd9Sstevel@tonic-gate ht->ht_next = list; 5437c478bd9Sstevel@tonic-gate list = ht; 5447c478bd9Sstevel@tonic-gate ++stolen; 5457c478bd9Sstevel@tonic-gate } 5467c478bd9Sstevel@tonic-gate hat_exit(hat); 5477c478bd9Sstevel@tonic-gate 5487c478bd9Sstevel@tonic-gate /* 5497c478bd9Sstevel@tonic-gate * Don't steal on first pass. 5507c478bd9Sstevel@tonic-gate */ 551a85a6733Sjosephb if (pass == 0 || stolen == cnt) 5527c478bd9Sstevel@tonic-gate continue; 5537c478bd9Sstevel@tonic-gate 5547c478bd9Sstevel@tonic-gate /* 555a85a6733Sjosephb * Search the active htables for one to steal. 556a85a6733Sjosephb * Start at a different hash bucket every time to 557a85a6733Sjosephb * help spread the pain of stealing. 5587c478bd9Sstevel@tonic-gate */ 559a85a6733Sjosephb h = h_start = h_seed++ % hat->hat_num_hash; 560a85a6733Sjosephb do { 5617c478bd9Sstevel@tonic-gate higher = NULL; 5627c478bd9Sstevel@tonic-gate HTABLE_ENTER(h); 5637c478bd9Sstevel@tonic-gate for (ht = hat->hat_ht_hash[h]; ht; 5647c478bd9Sstevel@tonic-gate ht = ht->ht_next) { 5657c478bd9Sstevel@tonic-gate 5667c478bd9Sstevel@tonic-gate /* 5677c478bd9Sstevel@tonic-gate * Can we rule out reaping? 5687c478bd9Sstevel@tonic-gate */ 5697c478bd9Sstevel@tonic-gate if (ht->ht_busy != 0 || 5707c478bd9Sstevel@tonic-gate (ht->ht_flags & HTABLE_SHARED_PFN)|| 571a85a6733Sjosephb ht->ht_level > 0 || 572a85a6733Sjosephb ht->ht_valid_cnt > threshold || 5737c478bd9Sstevel@tonic-gate ht->ht_lock_cnt != 0) 5747c478bd9Sstevel@tonic-gate continue; 5757c478bd9Sstevel@tonic-gate 5767c478bd9Sstevel@tonic-gate /* 5777c478bd9Sstevel@tonic-gate * Increment busy so the htable can't 5787c478bd9Sstevel@tonic-gate * disappear. We drop the htable mutex 5797c478bd9Sstevel@tonic-gate * to avoid deadlocks with 5807c478bd9Sstevel@tonic-gate * hat_pageunload() and the hment mutex 5817c478bd9Sstevel@tonic-gate * while we call hat_pte_unmap() 5827c478bd9Sstevel@tonic-gate */ 5837c478bd9Sstevel@tonic-gate ++ht->ht_busy; 5847c478bd9Sstevel@tonic-gate HTABLE_EXIT(h); 5857c478bd9Sstevel@tonic-gate 5867c478bd9Sstevel@tonic-gate /* 5877c478bd9Sstevel@tonic-gate * Try stealing. 5887c478bd9Sstevel@tonic-gate * - unload and invalidate all PTEs 5897c478bd9Sstevel@tonic-gate */ 5907c478bd9Sstevel@tonic-gate for (e = 0, va = ht->ht_vaddr; 591ae115bc7Smrj e < HTABLE_NUM_PTES(ht) && 5927c478bd9Sstevel@tonic-gate ht->ht_valid_cnt > 0 && 5937c478bd9Sstevel@tonic-gate ht->ht_busy == 1 && 5947c478bd9Sstevel@tonic-gate ht->ht_lock_cnt == 0; 5957c478bd9Sstevel@tonic-gate ++e, va += MMU_PAGESIZE) { 5967c478bd9Sstevel@tonic-gate pte = x86pte_get(ht, e); 5977c478bd9Sstevel@tonic-gate if (!PTE_ISVALID(pte)) 5987c478bd9Sstevel@tonic-gate continue; 5997c478bd9Sstevel@tonic-gate hat_pte_unmap(ht, e, 6007c478bd9Sstevel@tonic-gate HAT_UNLOAD, pte, NULL); 6017c478bd9Sstevel@tonic-gate } 6027c478bd9Sstevel@tonic-gate 6037c478bd9Sstevel@tonic-gate /* 6047c478bd9Sstevel@tonic-gate * Reacquire htable lock. If we didn't 6057c478bd9Sstevel@tonic-gate * remove all mappings in the table, 6067c478bd9Sstevel@tonic-gate * or another thread added a new mapping 6077c478bd9Sstevel@tonic-gate * behind us, give up on this table. 6087c478bd9Sstevel@tonic-gate */ 6097c478bd9Sstevel@tonic-gate HTABLE_ENTER(h); 6107c478bd9Sstevel@tonic-gate if (ht->ht_busy != 1 || 6117c478bd9Sstevel@tonic-gate ht->ht_valid_cnt != 0 || 6127c478bd9Sstevel@tonic-gate ht->ht_lock_cnt != 0) { 6137c478bd9Sstevel@tonic-gate --ht->ht_busy; 6147c478bd9Sstevel@tonic-gate continue; 6157c478bd9Sstevel@tonic-gate } 6167c478bd9Sstevel@tonic-gate 6177c478bd9Sstevel@tonic-gate /* 6187c478bd9Sstevel@tonic-gate * Steal it and unlink the page table. 6197c478bd9Sstevel@tonic-gate */ 6207c478bd9Sstevel@tonic-gate higher = ht->ht_parent; 6217c478bd9Sstevel@tonic-gate unlink_ptp(higher, ht, ht->ht_vaddr); 6227c478bd9Sstevel@tonic-gate 6237c478bd9Sstevel@tonic-gate /* 6247c478bd9Sstevel@tonic-gate * remove from the hash list 6257c478bd9Sstevel@tonic-gate */ 6267c478bd9Sstevel@tonic-gate if (ht->ht_next) 6277c478bd9Sstevel@tonic-gate ht->ht_next->ht_prev = 6287c478bd9Sstevel@tonic-gate ht->ht_prev; 6297c478bd9Sstevel@tonic-gate 6307c478bd9Sstevel@tonic-gate if (ht->ht_prev) { 6317c478bd9Sstevel@tonic-gate ht->ht_prev->ht_next = 6327c478bd9Sstevel@tonic-gate ht->ht_next; 6337c478bd9Sstevel@tonic-gate } else { 6347c478bd9Sstevel@tonic-gate ASSERT(hat->hat_ht_hash[h] == 6357c478bd9Sstevel@tonic-gate ht); 6367c478bd9Sstevel@tonic-gate hat->hat_ht_hash[h] = 6377c478bd9Sstevel@tonic-gate ht->ht_next; 6387c478bd9Sstevel@tonic-gate } 6397c478bd9Sstevel@tonic-gate 6407c478bd9Sstevel@tonic-gate /* 6417c478bd9Sstevel@tonic-gate * Break to outer loop to release the 642ae115bc7Smrj * higher (ht_parent) pagetable. This 6437c478bd9Sstevel@tonic-gate * spreads out the pain caused by 6447c478bd9Sstevel@tonic-gate * pagefaults. 6457c478bd9Sstevel@tonic-gate */ 6467c478bd9Sstevel@tonic-gate ht->ht_next = list; 6477c478bd9Sstevel@tonic-gate list = ht; 6487c478bd9Sstevel@tonic-gate ++stolen; 6497c478bd9Sstevel@tonic-gate break; 6507c478bd9Sstevel@tonic-gate } 6517c478bd9Sstevel@tonic-gate HTABLE_EXIT(h); 6527c478bd9Sstevel@tonic-gate if (higher != NULL) 6537c478bd9Sstevel@tonic-gate htable_release(higher); 654a85a6733Sjosephb if (++h == hat->hat_num_hash) 655a85a6733Sjosephb h = 0; 656a85a6733Sjosephb } while (stolen < cnt && h != h_start); 6577c478bd9Sstevel@tonic-gate } 6587c478bd9Sstevel@tonic-gate } 6597c478bd9Sstevel@tonic-gate atomic_add_32(&htable_dont_cache, -1); 6607c478bd9Sstevel@tonic-gate return (list); 6617c478bd9Sstevel@tonic-gate } 6627c478bd9Sstevel@tonic-gate 6637c478bd9Sstevel@tonic-gate /* 6647c478bd9Sstevel@tonic-gate * This is invoked from kmem when the system is low on memory. We try 6657c478bd9Sstevel@tonic-gate * to free hments, htables, and ptables to improve the memory situation. 6667c478bd9Sstevel@tonic-gate */ 6677c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 6687c478bd9Sstevel@tonic-gate static void 6697c478bd9Sstevel@tonic-gate htable_reap(void *handle) 6707c478bd9Sstevel@tonic-gate { 6717c478bd9Sstevel@tonic-gate uint_t reap_cnt; 6727c478bd9Sstevel@tonic-gate htable_t *list; 6737c478bd9Sstevel@tonic-gate htable_t *ht; 6747c478bd9Sstevel@tonic-gate 6757c478bd9Sstevel@tonic-gate HATSTAT_INC(hs_reap_attempts); 6767c478bd9Sstevel@tonic-gate if (!can_steal_post_boot) 6777c478bd9Sstevel@tonic-gate return; 6787c478bd9Sstevel@tonic-gate 6797c478bd9Sstevel@tonic-gate /* 6807c478bd9Sstevel@tonic-gate * Try to reap 5% of the page tables bounded by a maximum of 6817c478bd9Sstevel@tonic-gate * 5% of physmem and a minimum of 10. 6827c478bd9Sstevel@tonic-gate */ 6837c478bd9Sstevel@tonic-gate reap_cnt = MIN(MAX(physmem / 20, active_ptables / 20), 10); 6847c478bd9Sstevel@tonic-gate 6857c478bd9Sstevel@tonic-gate /* 6867c478bd9Sstevel@tonic-gate * Let htable_steal() do the work, we just call htable_free() 6877c478bd9Sstevel@tonic-gate */ 688843e1988Sjohnlev XPV_DISALLOW_MIGRATE(); 6897c478bd9Sstevel@tonic-gate list = htable_steal(reap_cnt); 690843e1988Sjohnlev XPV_ALLOW_MIGRATE(); 6917c478bd9Sstevel@tonic-gate while ((ht = list) != NULL) { 6927c478bd9Sstevel@tonic-gate list = ht->ht_next; 6937c478bd9Sstevel@tonic-gate HATSTAT_INC(hs_reaped); 6947c478bd9Sstevel@tonic-gate htable_free(ht); 6957c478bd9Sstevel@tonic-gate } 6967c478bd9Sstevel@tonic-gate 6977c478bd9Sstevel@tonic-gate /* 6987c478bd9Sstevel@tonic-gate * Free up excess reserves 6997c478bd9Sstevel@tonic-gate */ 7007c478bd9Sstevel@tonic-gate htable_adjust_reserve(); 7017c478bd9Sstevel@tonic-gate hment_adjust_reserve(); 7027c478bd9Sstevel@tonic-gate } 7037c478bd9Sstevel@tonic-gate 7047c478bd9Sstevel@tonic-gate /* 705ae115bc7Smrj * Allocate an htable, stealing one or using the reserve if necessary 7067c478bd9Sstevel@tonic-gate */ 7077c478bd9Sstevel@tonic-gate static htable_t * 7087c478bd9Sstevel@tonic-gate htable_alloc( 7097c478bd9Sstevel@tonic-gate hat_t *hat, 7107c478bd9Sstevel@tonic-gate uintptr_t vaddr, 7117c478bd9Sstevel@tonic-gate level_t level, 7127c478bd9Sstevel@tonic-gate htable_t *shared) 7137c478bd9Sstevel@tonic-gate { 7147c478bd9Sstevel@tonic-gate htable_t *ht = NULL; 7157c478bd9Sstevel@tonic-gate uint_t is_vlp; 7167c478bd9Sstevel@tonic-gate uint_t is_bare = 0; 7177c478bd9Sstevel@tonic-gate uint_t need_to_zero = 1; 7187c478bd9Sstevel@tonic-gate int kmflags = (can_steal_post_boot ? KM_NOSLEEP : KM_SLEEP); 7197c478bd9Sstevel@tonic-gate 7207c478bd9Sstevel@tonic-gate if (level < 0 || level > TOP_LEVEL(hat)) 7217c478bd9Sstevel@tonic-gate panic("htable_alloc(): level %d out of range\n", level); 7227c478bd9Sstevel@tonic-gate 7237c478bd9Sstevel@tonic-gate is_vlp = (hat->hat_flags & HAT_VLP) && level == VLP_LEVEL; 7247c478bd9Sstevel@tonic-gate if (is_vlp || shared != NULL) 7257c478bd9Sstevel@tonic-gate is_bare = 1; 7267c478bd9Sstevel@tonic-gate 7277c478bd9Sstevel@tonic-gate /* 7287c478bd9Sstevel@tonic-gate * First reuse a cached htable from the hat_ht_cached field, this 729ae115bc7Smrj * avoids unnecessary trips through kmem/page allocators. 7307c478bd9Sstevel@tonic-gate */ 7317c478bd9Sstevel@tonic-gate if (hat->hat_ht_cached != NULL && !is_bare) { 7327c478bd9Sstevel@tonic-gate hat_enter(hat); 7337c478bd9Sstevel@tonic-gate ht = hat->hat_ht_cached; 7347c478bd9Sstevel@tonic-gate if (ht != NULL) { 7357c478bd9Sstevel@tonic-gate hat->hat_ht_cached = ht->ht_next; 7367c478bd9Sstevel@tonic-gate need_to_zero = 0; 7377c478bd9Sstevel@tonic-gate /* XX64 ASSERT() they're all zero somehow */ 7387c478bd9Sstevel@tonic-gate ASSERT(ht->ht_pfn != PFN_INVALID); 7397c478bd9Sstevel@tonic-gate } 7407c478bd9Sstevel@tonic-gate hat_exit(hat); 7417c478bd9Sstevel@tonic-gate } 7427c478bd9Sstevel@tonic-gate 7437c478bd9Sstevel@tonic-gate if (ht == NULL) { 7447c478bd9Sstevel@tonic-gate /* 74597704650Sjosephb * Allocate an htable, possibly refilling the reserves. 7467c478bd9Sstevel@tonic-gate */ 74797704650Sjosephb if (USE_HAT_RESERVES()) { 7487c478bd9Sstevel@tonic-gate ht = htable_get_reserve(); 7497c478bd9Sstevel@tonic-gate } else { 7507c478bd9Sstevel@tonic-gate /* 7517c478bd9Sstevel@tonic-gate * Donate successful htable allocations to the reserve. 7527c478bd9Sstevel@tonic-gate */ 7537c478bd9Sstevel@tonic-gate for (;;) { 7547c478bd9Sstevel@tonic-gate ht = kmem_cache_alloc(htable_cache, kmflags); 7557c478bd9Sstevel@tonic-gate if (ht == NULL) 7567c478bd9Sstevel@tonic-gate break; 7577c478bd9Sstevel@tonic-gate ht->ht_pfn = PFN_INVALID; 75897704650Sjosephb if (USE_HAT_RESERVES() || 7597c478bd9Sstevel@tonic-gate htable_reserve_cnt >= htable_reserve_amount) 7607c478bd9Sstevel@tonic-gate break; 7617c478bd9Sstevel@tonic-gate htable_put_reserve(ht); 7627c478bd9Sstevel@tonic-gate } 7637c478bd9Sstevel@tonic-gate } 7647c478bd9Sstevel@tonic-gate 7657c478bd9Sstevel@tonic-gate /* 7667c478bd9Sstevel@tonic-gate * allocate a page for the hardware page table if needed 7677c478bd9Sstevel@tonic-gate */ 7687c478bd9Sstevel@tonic-gate if (ht != NULL && !is_bare) { 769a85a6733Sjosephb ht->ht_hat = hat; 770*86c1f4dcSVikram Hegde ht->ht_pfn = ptable_alloc(); 7717c478bd9Sstevel@tonic-gate if (ht->ht_pfn == PFN_INVALID) { 77297704650Sjosephb if (USE_HAT_RESERVES()) 77397704650Sjosephb htable_put_reserve(ht); 77497704650Sjosephb else 7757c478bd9Sstevel@tonic-gate kmem_cache_free(htable_cache, ht); 7767c478bd9Sstevel@tonic-gate ht = NULL; 7777c478bd9Sstevel@tonic-gate } 7787c478bd9Sstevel@tonic-gate } 7797c478bd9Sstevel@tonic-gate } 7807c478bd9Sstevel@tonic-gate 7817c478bd9Sstevel@tonic-gate /* 782a85a6733Sjosephb * If allocations failed, kick off a kmem_reap() and resort to 783a85a6733Sjosephb * htable steal(). We may spin here if the system is very low on 784a85a6733Sjosephb * memory. If the kernel itself has consumed all memory and kmem_reap() 785a85a6733Sjosephb * can't free up anything, then we'll really get stuck here. 786a85a6733Sjosephb * That should only happen in a system where the administrator has 787a85a6733Sjosephb * misconfigured VM parameters via /etc/system. 7887c478bd9Sstevel@tonic-gate */ 789a85a6733Sjosephb while (ht == NULL && can_steal_post_boot) { 790a85a6733Sjosephb kmem_reap(); 7917c478bd9Sstevel@tonic-gate ht = htable_steal(1); 7927c478bd9Sstevel@tonic-gate HATSTAT_INC(hs_steals); 7937c478bd9Sstevel@tonic-gate 7947c478bd9Sstevel@tonic-gate /* 795a85a6733Sjosephb * If we stole for a bare htable, release the pagetable page. 7967c478bd9Sstevel@tonic-gate */ 797ae115bc7Smrj if (ht != NULL) { 798ae115bc7Smrj if (is_bare) { 799ae115bc7Smrj ptable_free(ht->ht_pfn); 800ae115bc7Smrj ht->ht_pfn = PFN_INVALID; 801843e1988Sjohnlev #if defined(__xpv) && defined(__amd64) 802843e1988Sjohnlev /* 803843e1988Sjohnlev * make stolen page table writable again in kpm 804843e1988Sjohnlev */ 805843e1988Sjohnlev } else if (kpm_vbase && xen_kpm_page(ht->ht_pfn, 806843e1988Sjohnlev PT_VALID | PT_WRITABLE) < 0) { 807843e1988Sjohnlev panic("failure making kpm r/w pfn=0x%lx", 808843e1988Sjohnlev ht->ht_pfn); 809843e1988Sjohnlev #endif 810ae115bc7Smrj } 811ae115bc7Smrj } 8127c478bd9Sstevel@tonic-gate } 8137c478bd9Sstevel@tonic-gate 8147c478bd9Sstevel@tonic-gate /* 815a85a6733Sjosephb * All attempts to allocate or steal failed. This should only happen 816a85a6733Sjosephb * if we run out of memory during boot, due perhaps to a huge 817a85a6733Sjosephb * boot_archive. At this point there's no way to continue. 8187c478bd9Sstevel@tonic-gate */ 8197c478bd9Sstevel@tonic-gate if (ht == NULL) 8207c478bd9Sstevel@tonic-gate panic("htable_alloc(): couldn't steal\n"); 8217c478bd9Sstevel@tonic-gate 822843e1988Sjohnlev #if defined(__amd64) && defined(__xpv) 823843e1988Sjohnlev /* 824843e1988Sjohnlev * Under the 64-bit hypervisor, we have 2 top level page tables. 825843e1988Sjohnlev * If this allocation fails, we'll resort to stealing. 826843e1988Sjohnlev * We use the stolen page indirectly, by freeing the 827843e1988Sjohnlev * stolen htable first. 828843e1988Sjohnlev */ 829843e1988Sjohnlev if (level == mmu.max_level) { 830843e1988Sjohnlev for (;;) { 831843e1988Sjohnlev htable_t *stolen; 832843e1988Sjohnlev 833*86c1f4dcSVikram Hegde hat->hat_user_ptable = ptable_alloc(); 834843e1988Sjohnlev if (hat->hat_user_ptable != PFN_INVALID) 835843e1988Sjohnlev break; 836843e1988Sjohnlev stolen = htable_steal(1); 837843e1988Sjohnlev if (stolen == NULL) 838843e1988Sjohnlev panic("2nd steal ptable failed\n"); 839843e1988Sjohnlev htable_free(stolen); 840843e1988Sjohnlev } 841843e1988Sjohnlev block_zero_no_xmm(kpm_vbase + pfn_to_pa(hat->hat_user_ptable), 842843e1988Sjohnlev MMU_PAGESIZE); 843843e1988Sjohnlev } 844843e1988Sjohnlev #endif 845843e1988Sjohnlev 8467c478bd9Sstevel@tonic-gate /* 8477c478bd9Sstevel@tonic-gate * Shared page tables have all entries locked and entries may not 8487c478bd9Sstevel@tonic-gate * be added or deleted. 8497c478bd9Sstevel@tonic-gate */ 8507c478bd9Sstevel@tonic-gate ht->ht_flags = 0; 8517c478bd9Sstevel@tonic-gate if (shared != NULL) { 8527c478bd9Sstevel@tonic-gate ASSERT(shared->ht_valid_cnt > 0); 8537c478bd9Sstevel@tonic-gate ht->ht_flags |= HTABLE_SHARED_PFN; 8547c478bd9Sstevel@tonic-gate ht->ht_pfn = shared->ht_pfn; 8557c478bd9Sstevel@tonic-gate ht->ht_lock_cnt = 0; 8567c478bd9Sstevel@tonic-gate ht->ht_valid_cnt = 0; /* updated in hat_share() */ 8577c478bd9Sstevel@tonic-gate ht->ht_shares = shared; 8587c478bd9Sstevel@tonic-gate need_to_zero = 0; 8597c478bd9Sstevel@tonic-gate } else { 8607c478bd9Sstevel@tonic-gate ht->ht_shares = NULL; 8617c478bd9Sstevel@tonic-gate ht->ht_lock_cnt = 0; 8627c478bd9Sstevel@tonic-gate ht->ht_valid_cnt = 0; 8637c478bd9Sstevel@tonic-gate } 8647c478bd9Sstevel@tonic-gate 8657c478bd9Sstevel@tonic-gate /* 8667c478bd9Sstevel@tonic-gate * setup flags, etc. for VLP htables 8677c478bd9Sstevel@tonic-gate */ 8687c478bd9Sstevel@tonic-gate if (is_vlp) { 8697c478bd9Sstevel@tonic-gate ht->ht_flags |= HTABLE_VLP; 8707c478bd9Sstevel@tonic-gate ASSERT(ht->ht_pfn == PFN_INVALID); 8717c478bd9Sstevel@tonic-gate need_to_zero = 0; 8727c478bd9Sstevel@tonic-gate } 8737c478bd9Sstevel@tonic-gate 8747c478bd9Sstevel@tonic-gate /* 8757c478bd9Sstevel@tonic-gate * fill in the htable 8767c478bd9Sstevel@tonic-gate */ 8777c478bd9Sstevel@tonic-gate ht->ht_hat = hat; 8787c478bd9Sstevel@tonic-gate ht->ht_parent = NULL; 8797c478bd9Sstevel@tonic-gate ht->ht_vaddr = vaddr; 8807c478bd9Sstevel@tonic-gate ht->ht_level = level; 8817c478bd9Sstevel@tonic-gate ht->ht_busy = 1; 8827c478bd9Sstevel@tonic-gate ht->ht_next = NULL; 8837c478bd9Sstevel@tonic-gate ht->ht_prev = NULL; 8847c478bd9Sstevel@tonic-gate 8857c478bd9Sstevel@tonic-gate /* 8867c478bd9Sstevel@tonic-gate * Zero out any freshly allocated page table 8877c478bd9Sstevel@tonic-gate */ 8887c478bd9Sstevel@tonic-gate if (need_to_zero) 8897c478bd9Sstevel@tonic-gate x86pte_zero(ht, 0, mmu.ptes_per_table); 890ae115bc7Smrj 891843e1988Sjohnlev #if defined(__amd64) && defined(__xpv) 892843e1988Sjohnlev if (!is_bare && kpm_vbase) { 893843e1988Sjohnlev (void) xen_kpm_page(ht->ht_pfn, PT_VALID); 894843e1988Sjohnlev if (level == mmu.max_level) 895843e1988Sjohnlev (void) xen_kpm_page(hat->hat_user_ptable, PT_VALID); 896843e1988Sjohnlev } 897843e1988Sjohnlev #endif 898843e1988Sjohnlev 8997c478bd9Sstevel@tonic-gate return (ht); 9007c478bd9Sstevel@tonic-gate } 9017c478bd9Sstevel@tonic-gate 9027c478bd9Sstevel@tonic-gate /* 9037c478bd9Sstevel@tonic-gate * Free up an htable, either to a hat's cached list, the reserves or 9047c478bd9Sstevel@tonic-gate * back to kmem. 9057c478bd9Sstevel@tonic-gate */ 9067c478bd9Sstevel@tonic-gate static void 9077c478bd9Sstevel@tonic-gate htable_free(htable_t *ht) 9087c478bd9Sstevel@tonic-gate { 9097c478bd9Sstevel@tonic-gate hat_t *hat = ht->ht_hat; 9107c478bd9Sstevel@tonic-gate 9117c478bd9Sstevel@tonic-gate /* 9127c478bd9Sstevel@tonic-gate * If the process isn't exiting, cache the free htable in the hat 913843e1988Sjohnlev * structure. We always do this for the boot time reserve. We don't 9147c478bd9Sstevel@tonic-gate * do this if the hat is exiting or we are stealing/reaping htables. 9157c478bd9Sstevel@tonic-gate */ 9167c478bd9Sstevel@tonic-gate if (hat != NULL && 9177c478bd9Sstevel@tonic-gate !(ht->ht_flags & HTABLE_SHARED_PFN) && 9187c478bd9Sstevel@tonic-gate (use_boot_reserve || 9197c478bd9Sstevel@tonic-gate (!(hat->hat_flags & HAT_FREEING) && !htable_dont_cache))) { 9207c478bd9Sstevel@tonic-gate ASSERT((ht->ht_flags & HTABLE_VLP) == 0); 9217c478bd9Sstevel@tonic-gate ASSERT(ht->ht_pfn != PFN_INVALID); 9227c478bd9Sstevel@tonic-gate hat_enter(hat); 9237c478bd9Sstevel@tonic-gate ht->ht_next = hat->hat_ht_cached; 9247c478bd9Sstevel@tonic-gate hat->hat_ht_cached = ht; 9257c478bd9Sstevel@tonic-gate hat_exit(hat); 9267c478bd9Sstevel@tonic-gate return; 9277c478bd9Sstevel@tonic-gate } 9287c478bd9Sstevel@tonic-gate 9297c478bd9Sstevel@tonic-gate /* 9307c478bd9Sstevel@tonic-gate * If we have a hardware page table, free it. 931ae115bc7Smrj * We don't free page tables that are accessed by sharing. 9327c478bd9Sstevel@tonic-gate */ 9337c478bd9Sstevel@tonic-gate if (ht->ht_flags & HTABLE_SHARED_PFN) { 9347c478bd9Sstevel@tonic-gate ASSERT(ht->ht_pfn != PFN_INVALID); 9357c478bd9Sstevel@tonic-gate } else if (!(ht->ht_flags & HTABLE_VLP)) { 936ae115bc7Smrj ptable_free(ht->ht_pfn); 937843e1988Sjohnlev #if defined(__amd64) && defined(__xpv) 938843e1988Sjohnlev if (ht->ht_level == mmu.max_level) { 939843e1988Sjohnlev ptable_free(hat->hat_user_ptable); 940843e1988Sjohnlev hat->hat_user_ptable = PFN_INVALID; 941843e1988Sjohnlev } 942843e1988Sjohnlev #endif 9437c478bd9Sstevel@tonic-gate } 944ae115bc7Smrj ht->ht_pfn = PFN_INVALID; 9457c478bd9Sstevel@tonic-gate 9467c478bd9Sstevel@tonic-gate /* 947843e1988Sjohnlev * Free it or put into reserves. 9487c478bd9Sstevel@tonic-gate */ 949aac11643Sjosephb if (USE_HAT_RESERVES() || htable_reserve_cnt < htable_reserve_amount) { 9507c478bd9Sstevel@tonic-gate htable_put_reserve(ht); 951aac11643Sjosephb } else { 9527c478bd9Sstevel@tonic-gate kmem_cache_free(htable_cache, ht); 953aac11643Sjosephb htable_adjust_reserve(); 954aac11643Sjosephb } 9557c478bd9Sstevel@tonic-gate } 9567c478bd9Sstevel@tonic-gate 9577c478bd9Sstevel@tonic-gate 9587c478bd9Sstevel@tonic-gate /* 9597c478bd9Sstevel@tonic-gate * This is called when a hat is being destroyed or swapped out. We reap all 9607c478bd9Sstevel@tonic-gate * the remaining htables in the hat cache. If destroying all left over 9617c478bd9Sstevel@tonic-gate * htables are also destroyed. 9627c478bd9Sstevel@tonic-gate * 9637c478bd9Sstevel@tonic-gate * We also don't need to invalidate any of the PTPs nor do any demapping. 9647c478bd9Sstevel@tonic-gate */ 9657c478bd9Sstevel@tonic-gate void 9667c478bd9Sstevel@tonic-gate htable_purge_hat(hat_t *hat) 9677c478bd9Sstevel@tonic-gate { 9687c478bd9Sstevel@tonic-gate htable_t *ht; 9697c478bd9Sstevel@tonic-gate int h; 9707c478bd9Sstevel@tonic-gate 9717c478bd9Sstevel@tonic-gate /* 9727c478bd9Sstevel@tonic-gate * Purge the htable cache if just reaping. 9737c478bd9Sstevel@tonic-gate */ 9747c478bd9Sstevel@tonic-gate if (!(hat->hat_flags & HAT_FREEING)) { 9757c478bd9Sstevel@tonic-gate atomic_add_32(&htable_dont_cache, 1); 9767c478bd9Sstevel@tonic-gate for (;;) { 9777c478bd9Sstevel@tonic-gate hat_enter(hat); 9787c478bd9Sstevel@tonic-gate ht = hat->hat_ht_cached; 9797c478bd9Sstevel@tonic-gate if (ht == NULL) { 9807c478bd9Sstevel@tonic-gate hat_exit(hat); 9817c478bd9Sstevel@tonic-gate break; 9827c478bd9Sstevel@tonic-gate } 9837c478bd9Sstevel@tonic-gate hat->hat_ht_cached = ht->ht_next; 9847c478bd9Sstevel@tonic-gate hat_exit(hat); 9857c478bd9Sstevel@tonic-gate htable_free(ht); 9867c478bd9Sstevel@tonic-gate } 9877c478bd9Sstevel@tonic-gate atomic_add_32(&htable_dont_cache, -1); 9887c478bd9Sstevel@tonic-gate return; 9897c478bd9Sstevel@tonic-gate } 9907c478bd9Sstevel@tonic-gate 9917c478bd9Sstevel@tonic-gate /* 9927c478bd9Sstevel@tonic-gate * if freeing, no locking is needed 9937c478bd9Sstevel@tonic-gate */ 9947c478bd9Sstevel@tonic-gate while ((ht = hat->hat_ht_cached) != NULL) { 9957c478bd9Sstevel@tonic-gate hat->hat_ht_cached = ht->ht_next; 9967c478bd9Sstevel@tonic-gate htable_free(ht); 9977c478bd9Sstevel@tonic-gate } 9987c478bd9Sstevel@tonic-gate 9997c478bd9Sstevel@tonic-gate /* 10007c478bd9Sstevel@tonic-gate * walk thru the htable hash table and free all the htables in it. 10017c478bd9Sstevel@tonic-gate */ 10027c478bd9Sstevel@tonic-gate for (h = 0; h < hat->hat_num_hash; ++h) { 10037c478bd9Sstevel@tonic-gate while ((ht = hat->hat_ht_hash[h]) != NULL) { 10047c478bd9Sstevel@tonic-gate if (ht->ht_next) 10057c478bd9Sstevel@tonic-gate ht->ht_next->ht_prev = ht->ht_prev; 10067c478bd9Sstevel@tonic-gate 10077c478bd9Sstevel@tonic-gate if (ht->ht_prev) { 10087c478bd9Sstevel@tonic-gate ht->ht_prev->ht_next = ht->ht_next; 10097c478bd9Sstevel@tonic-gate } else { 10107c478bd9Sstevel@tonic-gate ASSERT(hat->hat_ht_hash[h] == ht); 10117c478bd9Sstevel@tonic-gate hat->hat_ht_hash[h] = ht->ht_next; 10127c478bd9Sstevel@tonic-gate } 10137c478bd9Sstevel@tonic-gate htable_free(ht); 10147c478bd9Sstevel@tonic-gate } 10157c478bd9Sstevel@tonic-gate } 10167c478bd9Sstevel@tonic-gate } 10177c478bd9Sstevel@tonic-gate 10187c478bd9Sstevel@tonic-gate /* 10197c478bd9Sstevel@tonic-gate * Unlink an entry for a table at vaddr and level out of the existing table 10207c478bd9Sstevel@tonic-gate * one level higher. We are always holding the HASH_ENTER() when doing this. 10217c478bd9Sstevel@tonic-gate */ 10227c478bd9Sstevel@tonic-gate static void 10237c478bd9Sstevel@tonic-gate unlink_ptp(htable_t *higher, htable_t *old, uintptr_t vaddr) 10247c478bd9Sstevel@tonic-gate { 10257c478bd9Sstevel@tonic-gate uint_t entry = htable_va2entry(vaddr, higher); 10267c478bd9Sstevel@tonic-gate x86pte_t expect = MAKEPTP(old->ht_pfn, old->ht_level); 10277c478bd9Sstevel@tonic-gate x86pte_t found; 1028935f8dd0Sjosephb hat_t *hat = old->ht_hat; 10297c478bd9Sstevel@tonic-gate 10307c478bd9Sstevel@tonic-gate ASSERT(higher->ht_busy > 0); 10317c478bd9Sstevel@tonic-gate ASSERT(higher->ht_valid_cnt > 0); 10327c478bd9Sstevel@tonic-gate ASSERT(old->ht_valid_cnt == 0); 10337c478bd9Sstevel@tonic-gate found = x86pte_cas(higher, entry, expect, 0); 1034843e1988Sjohnlev #ifdef __xpv 1035843e1988Sjohnlev /* 1036843e1988Sjohnlev * This is weird, but Xen apparently automatically unlinks empty 1037843e1988Sjohnlev * pagetables from the upper page table. So allow PTP to be 0 already. 1038843e1988Sjohnlev */ 1039843e1988Sjohnlev if (found != expect && found != 0) 1040843e1988Sjohnlev #else 10417c478bd9Sstevel@tonic-gate if (found != expect) 1042843e1988Sjohnlev #endif 10437c478bd9Sstevel@tonic-gate panic("Bad PTP found=" FMT_PTE ", expected=" FMT_PTE, 10447c478bd9Sstevel@tonic-gate found, expect); 1045935f8dd0Sjosephb 1046935f8dd0Sjosephb /* 10477173d045Sjosephb * When a top level VLP page table entry changes, we must issue 10487173d045Sjosephb * a reload of cr3 on all processors. 10497173d045Sjosephb * 10507173d045Sjosephb * If we don't need do do that, then we still have to INVLPG against 10517173d045Sjosephb * an address covered by the inner page table, as the latest processors 10527173d045Sjosephb * have TLB-like caches for non-leaf page table entries. 1053935f8dd0Sjosephb */ 1054935f8dd0Sjosephb if (!(hat->hat_flags & HAT_FREEING)) { 10557173d045Sjosephb hat_tlb_inval(hat, (higher->ht_flags & HTABLE_VLP) ? 10567173d045Sjosephb DEMAP_ALL_ADDR : old->ht_vaddr); 1057935f8dd0Sjosephb } 1058935f8dd0Sjosephb 10597c478bd9Sstevel@tonic-gate HTABLE_DEC(higher->ht_valid_cnt); 10607c478bd9Sstevel@tonic-gate } 10617c478bd9Sstevel@tonic-gate 10627c478bd9Sstevel@tonic-gate /* 10637c478bd9Sstevel@tonic-gate * Link an entry for a new table at vaddr and level into the existing table 10647c478bd9Sstevel@tonic-gate * one level higher. We are always holding the HASH_ENTER() when doing this. 10657c478bd9Sstevel@tonic-gate */ 10667c478bd9Sstevel@tonic-gate static void 10677c478bd9Sstevel@tonic-gate link_ptp(htable_t *higher, htable_t *new, uintptr_t vaddr) 10687c478bd9Sstevel@tonic-gate { 10697c478bd9Sstevel@tonic-gate uint_t entry = htable_va2entry(vaddr, higher); 10707c478bd9Sstevel@tonic-gate x86pte_t newptp = MAKEPTP(new->ht_pfn, new->ht_level); 10717c478bd9Sstevel@tonic-gate x86pte_t found; 10727c478bd9Sstevel@tonic-gate 10737c478bd9Sstevel@tonic-gate ASSERT(higher->ht_busy > 0); 10747c478bd9Sstevel@tonic-gate 10757c478bd9Sstevel@tonic-gate ASSERT(new->ht_level != mmu.max_level); 10767c478bd9Sstevel@tonic-gate 10777c478bd9Sstevel@tonic-gate HTABLE_INC(higher->ht_valid_cnt); 10787c478bd9Sstevel@tonic-gate 10797c478bd9Sstevel@tonic-gate found = x86pte_cas(higher, entry, 0, newptp); 1080b4b46911Skchow if ((found & ~PT_REF) != 0) 10817c478bd9Sstevel@tonic-gate panic("HAT: ptp not 0, found=" FMT_PTE, found); 1082935f8dd0Sjosephb 1083935f8dd0Sjosephb /* 1084935f8dd0Sjosephb * When any top level VLP page table entry changes, we must issue 1085935f8dd0Sjosephb * a reload of cr3 on all processors using it. 10866b60931cSjosephb * We also need to do this for the kernel hat on PAE 32 bit kernel. 1087935f8dd0Sjosephb */ 10886b60931cSjosephb if ( 10896b60931cSjosephb #ifdef __i386 10906b60931cSjosephb (higher->ht_hat == kas.a_hat && higher->ht_level == VLP_LEVEL) || 10916b60931cSjosephb #endif 10926b60931cSjosephb (higher->ht_flags & HTABLE_VLP)) 1093935f8dd0Sjosephb hat_tlb_inval(higher->ht_hat, DEMAP_ALL_ADDR); 10947c478bd9Sstevel@tonic-gate } 10957c478bd9Sstevel@tonic-gate 10967c478bd9Sstevel@tonic-gate /* 1097ae115bc7Smrj * Release of hold on an htable. If this is the last use and the pagetable 1098ae115bc7Smrj * is empty we may want to free it, then recursively look at the pagetable 1099ae115bc7Smrj * above it. The recursion is handled by the outer while() loop. 1100843e1988Sjohnlev * 1101843e1988Sjohnlev * On the metal, during process exit, we don't bother unlinking the tables from 1102843e1988Sjohnlev * upper level pagetables. They are instead handled in bulk by hat_free_end(). 1103843e1988Sjohnlev * We can't do this on the hypervisor as we need the page table to be 1104843e1988Sjohnlev * implicitly unpinnned before it goes to the free page lists. This can't 1105843e1988Sjohnlev * happen unless we fully unlink it from the page table hierarchy. 11067c478bd9Sstevel@tonic-gate */ 11077c478bd9Sstevel@tonic-gate void 11087c478bd9Sstevel@tonic-gate htable_release(htable_t *ht) 11097c478bd9Sstevel@tonic-gate { 11107c478bd9Sstevel@tonic-gate uint_t hashval; 11117c478bd9Sstevel@tonic-gate htable_t *shared; 11127c478bd9Sstevel@tonic-gate htable_t *higher; 11137c478bd9Sstevel@tonic-gate hat_t *hat; 11147c478bd9Sstevel@tonic-gate uintptr_t va; 11157c478bd9Sstevel@tonic-gate level_t level; 11167c478bd9Sstevel@tonic-gate 11177c478bd9Sstevel@tonic-gate while (ht != NULL) { 11187c478bd9Sstevel@tonic-gate shared = NULL; 11197c478bd9Sstevel@tonic-gate for (;;) { 11207c478bd9Sstevel@tonic-gate hat = ht->ht_hat; 11217c478bd9Sstevel@tonic-gate va = ht->ht_vaddr; 11227c478bd9Sstevel@tonic-gate level = ht->ht_level; 11237c478bd9Sstevel@tonic-gate hashval = HTABLE_HASH(hat, va, level); 11247c478bd9Sstevel@tonic-gate 11257c478bd9Sstevel@tonic-gate /* 11267c478bd9Sstevel@tonic-gate * The common case is that this isn't the last use of 11277c478bd9Sstevel@tonic-gate * an htable so we don't want to free the htable. 11287c478bd9Sstevel@tonic-gate */ 11297c478bd9Sstevel@tonic-gate HTABLE_ENTER(hashval); 11307c478bd9Sstevel@tonic-gate ASSERT(ht->ht_valid_cnt >= 0); 11317c478bd9Sstevel@tonic-gate ASSERT(ht->ht_busy > 0); 11327c478bd9Sstevel@tonic-gate if (ht->ht_valid_cnt > 0) 11337c478bd9Sstevel@tonic-gate break; 11347c478bd9Sstevel@tonic-gate if (ht->ht_busy > 1) 11357c478bd9Sstevel@tonic-gate break; 11362ba723d8Smec ASSERT(ht->ht_lock_cnt == 0); 11377c478bd9Sstevel@tonic-gate 1138843e1988Sjohnlev #if !defined(__xpv) 11397c478bd9Sstevel@tonic-gate /* 11407c478bd9Sstevel@tonic-gate * we always release empty shared htables 11417c478bd9Sstevel@tonic-gate */ 11427c478bd9Sstevel@tonic-gate if (!(ht->ht_flags & HTABLE_SHARED_PFN)) { 11437c478bd9Sstevel@tonic-gate 11447c478bd9Sstevel@tonic-gate /* 11457c478bd9Sstevel@tonic-gate * don't release if in address space tear down 11467c478bd9Sstevel@tonic-gate */ 11477c478bd9Sstevel@tonic-gate if (hat->hat_flags & HAT_FREEING) 11487c478bd9Sstevel@tonic-gate break; 11497c478bd9Sstevel@tonic-gate 11507c478bd9Sstevel@tonic-gate /* 11517c478bd9Sstevel@tonic-gate * At and above max_page_level, free if it's for 11527c478bd9Sstevel@tonic-gate * a boot-time kernel mapping below kernelbase. 11537c478bd9Sstevel@tonic-gate */ 11547c478bd9Sstevel@tonic-gate if (level >= mmu.max_page_level && 11557c478bd9Sstevel@tonic-gate (hat != kas.a_hat || va >= kernelbase)) 11567c478bd9Sstevel@tonic-gate break; 11577c478bd9Sstevel@tonic-gate } 1158843e1988Sjohnlev #endif /* __xpv */ 11597c478bd9Sstevel@tonic-gate 11607c478bd9Sstevel@tonic-gate /* 1161ae115bc7Smrj * Remember if we destroy an htable that shares its PFN 1162ae115bc7Smrj * from elsewhere. 11637c478bd9Sstevel@tonic-gate */ 11647c478bd9Sstevel@tonic-gate if (ht->ht_flags & HTABLE_SHARED_PFN) { 11657c478bd9Sstevel@tonic-gate ASSERT(shared == NULL); 11667c478bd9Sstevel@tonic-gate shared = ht->ht_shares; 11677c478bd9Sstevel@tonic-gate HATSTAT_INC(hs_htable_unshared); 11687c478bd9Sstevel@tonic-gate } 11697c478bd9Sstevel@tonic-gate 11707c478bd9Sstevel@tonic-gate /* 11717c478bd9Sstevel@tonic-gate * Handle release of a table and freeing the htable_t. 11727c478bd9Sstevel@tonic-gate * Unlink it from the table higher (ie. ht_parent). 11737c478bd9Sstevel@tonic-gate */ 11747c478bd9Sstevel@tonic-gate ASSERT(ht->ht_lock_cnt == 0); 11757c478bd9Sstevel@tonic-gate higher = ht->ht_parent; 11767c478bd9Sstevel@tonic-gate ASSERT(higher != NULL); 11777c478bd9Sstevel@tonic-gate 11787c478bd9Sstevel@tonic-gate /* 11797c478bd9Sstevel@tonic-gate * Unlink the pagetable. 11807c478bd9Sstevel@tonic-gate */ 11817c478bd9Sstevel@tonic-gate unlink_ptp(higher, ht, va); 11827c478bd9Sstevel@tonic-gate 11837c478bd9Sstevel@tonic-gate /* 11847c478bd9Sstevel@tonic-gate * remove this htable from its hash list 11857c478bd9Sstevel@tonic-gate */ 11867c478bd9Sstevel@tonic-gate if (ht->ht_next) 11877c478bd9Sstevel@tonic-gate ht->ht_next->ht_prev = ht->ht_prev; 11887c478bd9Sstevel@tonic-gate 11897c478bd9Sstevel@tonic-gate if (ht->ht_prev) { 11907c478bd9Sstevel@tonic-gate ht->ht_prev->ht_next = ht->ht_next; 11917c478bd9Sstevel@tonic-gate } else { 11927c478bd9Sstevel@tonic-gate ASSERT(hat->hat_ht_hash[hashval] == ht); 11937c478bd9Sstevel@tonic-gate hat->hat_ht_hash[hashval] = ht->ht_next; 11947c478bd9Sstevel@tonic-gate } 11957c478bd9Sstevel@tonic-gate HTABLE_EXIT(hashval); 11967c478bd9Sstevel@tonic-gate htable_free(ht); 11977c478bd9Sstevel@tonic-gate ht = higher; 11987c478bd9Sstevel@tonic-gate } 11997c478bd9Sstevel@tonic-gate 12007c478bd9Sstevel@tonic-gate ASSERT(ht->ht_busy >= 1); 12017c478bd9Sstevel@tonic-gate --ht->ht_busy; 12027c478bd9Sstevel@tonic-gate HTABLE_EXIT(hashval); 12037c478bd9Sstevel@tonic-gate 12047c478bd9Sstevel@tonic-gate /* 12057c478bd9Sstevel@tonic-gate * If we released a shared htable, do a release on the htable 12067c478bd9Sstevel@tonic-gate * from which it shared 12077c478bd9Sstevel@tonic-gate */ 12087c478bd9Sstevel@tonic-gate ht = shared; 12097c478bd9Sstevel@tonic-gate } 12107c478bd9Sstevel@tonic-gate } 12117c478bd9Sstevel@tonic-gate 12127c478bd9Sstevel@tonic-gate /* 12137c478bd9Sstevel@tonic-gate * Find the htable for the pagetable at the given level for the given address. 12147c478bd9Sstevel@tonic-gate * If found acquires a hold that eventually needs to be htable_release()d 12157c478bd9Sstevel@tonic-gate */ 12167c478bd9Sstevel@tonic-gate htable_t * 12177c478bd9Sstevel@tonic-gate htable_lookup(hat_t *hat, uintptr_t vaddr, level_t level) 12187c478bd9Sstevel@tonic-gate { 12197c478bd9Sstevel@tonic-gate uintptr_t base; 12207c478bd9Sstevel@tonic-gate uint_t hashval; 12217c478bd9Sstevel@tonic-gate htable_t *ht = NULL; 12227c478bd9Sstevel@tonic-gate 12237c478bd9Sstevel@tonic-gate ASSERT(level >= 0); 12247c478bd9Sstevel@tonic-gate ASSERT(level <= TOP_LEVEL(hat)); 12257c478bd9Sstevel@tonic-gate 12267173d045Sjosephb if (level == TOP_LEVEL(hat)) { 12277173d045Sjosephb #if defined(__amd64) 12287173d045Sjosephb /* 12297173d045Sjosephb * 32 bit address spaces on 64 bit kernels need to check 12307173d045Sjosephb * for overflow of the 32 bit address space 12317173d045Sjosephb */ 12327173d045Sjosephb if ((hat->hat_flags & HAT_VLP) && vaddr >= ((uint64_t)1 << 32)) 12337173d045Sjosephb return (NULL); 12347173d045Sjosephb #endif 12357c478bd9Sstevel@tonic-gate base = 0; 12367173d045Sjosephb } else { 12377c478bd9Sstevel@tonic-gate base = vaddr & LEVEL_MASK(level + 1); 12387173d045Sjosephb } 12397c478bd9Sstevel@tonic-gate 12407c478bd9Sstevel@tonic-gate hashval = HTABLE_HASH(hat, base, level); 12417c478bd9Sstevel@tonic-gate HTABLE_ENTER(hashval); 12427c478bd9Sstevel@tonic-gate for (ht = hat->hat_ht_hash[hashval]; ht; ht = ht->ht_next) { 12437c478bd9Sstevel@tonic-gate if (ht->ht_hat == hat && 12447c478bd9Sstevel@tonic-gate ht->ht_vaddr == base && 12457c478bd9Sstevel@tonic-gate ht->ht_level == level) 12467c478bd9Sstevel@tonic-gate break; 12477c478bd9Sstevel@tonic-gate } 12487c478bd9Sstevel@tonic-gate if (ht) 12497c478bd9Sstevel@tonic-gate ++ht->ht_busy; 12507c478bd9Sstevel@tonic-gate 12517c478bd9Sstevel@tonic-gate HTABLE_EXIT(hashval); 12527c478bd9Sstevel@tonic-gate return (ht); 12537c478bd9Sstevel@tonic-gate } 12547c478bd9Sstevel@tonic-gate 12557c478bd9Sstevel@tonic-gate /* 12567c478bd9Sstevel@tonic-gate * Acquires a hold on a known htable (from a locked hment entry). 12577c478bd9Sstevel@tonic-gate */ 12587c478bd9Sstevel@tonic-gate void 12597c478bd9Sstevel@tonic-gate htable_acquire(htable_t *ht) 12607c478bd9Sstevel@tonic-gate { 12617c478bd9Sstevel@tonic-gate hat_t *hat = ht->ht_hat; 12627c478bd9Sstevel@tonic-gate level_t level = ht->ht_level; 12637c478bd9Sstevel@tonic-gate uintptr_t base = ht->ht_vaddr; 12647c478bd9Sstevel@tonic-gate uint_t hashval = HTABLE_HASH(hat, base, level); 12657c478bd9Sstevel@tonic-gate 12667c478bd9Sstevel@tonic-gate HTABLE_ENTER(hashval); 12677c478bd9Sstevel@tonic-gate #ifdef DEBUG 12687c478bd9Sstevel@tonic-gate /* 12697c478bd9Sstevel@tonic-gate * make sure the htable is there 12707c478bd9Sstevel@tonic-gate */ 12717c478bd9Sstevel@tonic-gate { 12727c478bd9Sstevel@tonic-gate htable_t *h; 12737c478bd9Sstevel@tonic-gate 12747c478bd9Sstevel@tonic-gate for (h = hat->hat_ht_hash[hashval]; 12757c478bd9Sstevel@tonic-gate h && h != ht; 12767c478bd9Sstevel@tonic-gate h = h->ht_next) 12777c478bd9Sstevel@tonic-gate ; 12787c478bd9Sstevel@tonic-gate ASSERT(h == ht); 12797c478bd9Sstevel@tonic-gate } 12807c478bd9Sstevel@tonic-gate #endif /* DEBUG */ 12817c478bd9Sstevel@tonic-gate ++ht->ht_busy; 12827c478bd9Sstevel@tonic-gate HTABLE_EXIT(hashval); 12837c478bd9Sstevel@tonic-gate } 12847c478bd9Sstevel@tonic-gate 12857c478bd9Sstevel@tonic-gate /* 12867c478bd9Sstevel@tonic-gate * Find the htable for the pagetable at the given level for the given address. 12877c478bd9Sstevel@tonic-gate * If found acquires a hold that eventually needs to be htable_release()d 12887c478bd9Sstevel@tonic-gate * If not found the table is created. 12897c478bd9Sstevel@tonic-gate * 12907c478bd9Sstevel@tonic-gate * Since we can't hold a hash table mutex during allocation, we have to 12917c478bd9Sstevel@tonic-gate * drop it and redo the search on a create. Then we may have to free the newly 12927c478bd9Sstevel@tonic-gate * allocated htable if another thread raced in and created it ahead of us. 12937c478bd9Sstevel@tonic-gate */ 12947c478bd9Sstevel@tonic-gate htable_t * 12957c478bd9Sstevel@tonic-gate htable_create( 12967c478bd9Sstevel@tonic-gate hat_t *hat, 12977c478bd9Sstevel@tonic-gate uintptr_t vaddr, 12987c478bd9Sstevel@tonic-gate level_t level, 12997c478bd9Sstevel@tonic-gate htable_t *shared) 13007c478bd9Sstevel@tonic-gate { 13017c478bd9Sstevel@tonic-gate uint_t h; 13027c478bd9Sstevel@tonic-gate level_t l; 13037c478bd9Sstevel@tonic-gate uintptr_t base; 13047c478bd9Sstevel@tonic-gate htable_t *ht; 13057c478bd9Sstevel@tonic-gate htable_t *higher = NULL; 13067c478bd9Sstevel@tonic-gate htable_t *new = NULL; 13077c478bd9Sstevel@tonic-gate 13087c478bd9Sstevel@tonic-gate if (level < 0 || level > TOP_LEVEL(hat)) 13097c478bd9Sstevel@tonic-gate panic("htable_create(): level %d out of range\n", level); 13107c478bd9Sstevel@tonic-gate 13117c478bd9Sstevel@tonic-gate /* 13127c478bd9Sstevel@tonic-gate * Create the page tables in top down order. 13137c478bd9Sstevel@tonic-gate */ 13147c478bd9Sstevel@tonic-gate for (l = TOP_LEVEL(hat); l >= level; --l) { 13157c478bd9Sstevel@tonic-gate new = NULL; 13167c478bd9Sstevel@tonic-gate if (l == TOP_LEVEL(hat)) 13177c478bd9Sstevel@tonic-gate base = 0; 13187c478bd9Sstevel@tonic-gate else 13197c478bd9Sstevel@tonic-gate base = vaddr & LEVEL_MASK(l + 1); 13207c478bd9Sstevel@tonic-gate 13217c478bd9Sstevel@tonic-gate h = HTABLE_HASH(hat, base, l); 13227c478bd9Sstevel@tonic-gate try_again: 13237c478bd9Sstevel@tonic-gate /* 13247c478bd9Sstevel@tonic-gate * look up the htable at this level 13257c478bd9Sstevel@tonic-gate */ 13267c478bd9Sstevel@tonic-gate HTABLE_ENTER(h); 13277c478bd9Sstevel@tonic-gate if (l == TOP_LEVEL(hat)) { 13287c478bd9Sstevel@tonic-gate ht = hat->hat_htable; 13297c478bd9Sstevel@tonic-gate } else { 13307c478bd9Sstevel@tonic-gate for (ht = hat->hat_ht_hash[h]; ht; ht = ht->ht_next) { 13317c478bd9Sstevel@tonic-gate ASSERT(ht->ht_hat == hat); 13327c478bd9Sstevel@tonic-gate if (ht->ht_vaddr == base && 13337c478bd9Sstevel@tonic-gate ht->ht_level == l) 13347c478bd9Sstevel@tonic-gate break; 13357c478bd9Sstevel@tonic-gate } 13367c478bd9Sstevel@tonic-gate } 13377c478bd9Sstevel@tonic-gate 13387c478bd9Sstevel@tonic-gate /* 13397c478bd9Sstevel@tonic-gate * if we found the htable, increment its busy cnt 13407c478bd9Sstevel@tonic-gate * and if we had allocated a new htable, free it. 13417c478bd9Sstevel@tonic-gate */ 13427c478bd9Sstevel@tonic-gate if (ht != NULL) { 13437c478bd9Sstevel@tonic-gate /* 13447c478bd9Sstevel@tonic-gate * If we find a pre-existing shared table, it must 13457c478bd9Sstevel@tonic-gate * share from the same place. 13467c478bd9Sstevel@tonic-gate */ 13477c478bd9Sstevel@tonic-gate if (l == level && shared && ht->ht_shares && 13487c478bd9Sstevel@tonic-gate ht->ht_shares != shared) { 13497c478bd9Sstevel@tonic-gate panic("htable shared from wrong place " 1350903a11ebSrh87107 "found htable=%p shared=%p", 1351903a11ebSrh87107 (void *)ht, (void *)shared); 13527c478bd9Sstevel@tonic-gate } 13537c478bd9Sstevel@tonic-gate ++ht->ht_busy; 13547c478bd9Sstevel@tonic-gate HTABLE_EXIT(h); 13557c478bd9Sstevel@tonic-gate if (new) 13567c478bd9Sstevel@tonic-gate htable_free(new); 13577c478bd9Sstevel@tonic-gate if (higher != NULL) 13587c478bd9Sstevel@tonic-gate htable_release(higher); 13597c478bd9Sstevel@tonic-gate higher = ht; 13607c478bd9Sstevel@tonic-gate 13617c478bd9Sstevel@tonic-gate /* 13627c478bd9Sstevel@tonic-gate * if we didn't find it on the first search 13637c478bd9Sstevel@tonic-gate * allocate a new one and search again 13647c478bd9Sstevel@tonic-gate */ 13657c478bd9Sstevel@tonic-gate } else if (new == NULL) { 13667c478bd9Sstevel@tonic-gate HTABLE_EXIT(h); 13677c478bd9Sstevel@tonic-gate new = htable_alloc(hat, base, l, 13687c478bd9Sstevel@tonic-gate l == level ? shared : NULL); 13697c478bd9Sstevel@tonic-gate goto try_again; 13707c478bd9Sstevel@tonic-gate 13717c478bd9Sstevel@tonic-gate /* 13727c478bd9Sstevel@tonic-gate * 2nd search and still not there, use "new" table 13737c478bd9Sstevel@tonic-gate * Link new table into higher, when not at top level. 13747c478bd9Sstevel@tonic-gate */ 13757c478bd9Sstevel@tonic-gate } else { 13767c478bd9Sstevel@tonic-gate ht = new; 13777c478bd9Sstevel@tonic-gate if (higher != NULL) { 13787c478bd9Sstevel@tonic-gate link_ptp(higher, ht, base); 13797c478bd9Sstevel@tonic-gate ht->ht_parent = higher; 13807c478bd9Sstevel@tonic-gate } 13817c478bd9Sstevel@tonic-gate ht->ht_next = hat->hat_ht_hash[h]; 13827c478bd9Sstevel@tonic-gate ASSERT(ht->ht_prev == NULL); 13837c478bd9Sstevel@tonic-gate if (hat->hat_ht_hash[h]) 13847c478bd9Sstevel@tonic-gate hat->hat_ht_hash[h]->ht_prev = ht; 13857c478bd9Sstevel@tonic-gate hat->hat_ht_hash[h] = ht; 13867c478bd9Sstevel@tonic-gate HTABLE_EXIT(h); 13877c478bd9Sstevel@tonic-gate 13887c478bd9Sstevel@tonic-gate /* 13897c478bd9Sstevel@tonic-gate * Note we don't do htable_release(higher). 13907c478bd9Sstevel@tonic-gate * That happens recursively when "new" is removed by 13917c478bd9Sstevel@tonic-gate * htable_release() or htable_steal(). 13927c478bd9Sstevel@tonic-gate */ 13937c478bd9Sstevel@tonic-gate higher = ht; 13947c478bd9Sstevel@tonic-gate 13957c478bd9Sstevel@tonic-gate /* 13967c478bd9Sstevel@tonic-gate * If we just created a new shared page table we 13977c478bd9Sstevel@tonic-gate * increment the shared htable's busy count, so that 13987c478bd9Sstevel@tonic-gate * it can't be the victim of a steal even if it's empty. 13997c478bd9Sstevel@tonic-gate */ 14007c478bd9Sstevel@tonic-gate if (l == level && shared) { 14017c478bd9Sstevel@tonic-gate (void) htable_lookup(shared->ht_hat, 14027c478bd9Sstevel@tonic-gate shared->ht_vaddr, shared->ht_level); 14037c478bd9Sstevel@tonic-gate HATSTAT_INC(hs_htable_shared); 14047c478bd9Sstevel@tonic-gate } 14057c478bd9Sstevel@tonic-gate } 14067c478bd9Sstevel@tonic-gate } 14077c478bd9Sstevel@tonic-gate 14087c478bd9Sstevel@tonic-gate return (ht); 14097c478bd9Sstevel@tonic-gate } 14107c478bd9Sstevel@tonic-gate 14117c478bd9Sstevel@tonic-gate /* 1412843e1988Sjohnlev * Inherit initial pagetables from the boot program. On the 64-bit 1413843e1988Sjohnlev * hypervisor we also temporarily mark the p_index field of page table 1414843e1988Sjohnlev * pages, so we know not to try making them writable in seg_kpm. 1415ae115bc7Smrj */ 1416ae115bc7Smrj void 1417ae115bc7Smrj htable_attach( 1418ae115bc7Smrj hat_t *hat, 1419ae115bc7Smrj uintptr_t base, 1420ae115bc7Smrj level_t level, 1421ae115bc7Smrj htable_t *parent, 1422ae115bc7Smrj pfn_t pfn) 1423ae115bc7Smrj { 1424ae115bc7Smrj htable_t *ht; 1425ae115bc7Smrj uint_t h; 1426ae115bc7Smrj uint_t i; 1427ae115bc7Smrj x86pte_t pte; 1428ae115bc7Smrj x86pte_t *ptep; 1429ae115bc7Smrj page_t *pp; 1430ae115bc7Smrj extern page_t *boot_claim_page(pfn_t); 1431ae115bc7Smrj 1432ae115bc7Smrj ht = htable_get_reserve(); 1433ae115bc7Smrj if (level == mmu.max_level) 1434ae115bc7Smrj kas.a_hat->hat_htable = ht; 1435ae115bc7Smrj ht->ht_hat = hat; 1436ae115bc7Smrj ht->ht_parent = parent; 1437ae115bc7Smrj ht->ht_vaddr = base; 1438ae115bc7Smrj ht->ht_level = level; 1439ae115bc7Smrj ht->ht_busy = 1; 1440ae115bc7Smrj ht->ht_next = NULL; 1441ae115bc7Smrj ht->ht_prev = NULL; 1442ae115bc7Smrj ht->ht_flags = 0; 1443ae115bc7Smrj ht->ht_pfn = pfn; 1444ae115bc7Smrj ht->ht_lock_cnt = 0; 1445ae115bc7Smrj ht->ht_valid_cnt = 0; 1446ae115bc7Smrj if (parent != NULL) 1447ae115bc7Smrj ++parent->ht_busy; 1448ae115bc7Smrj 1449ae115bc7Smrj h = HTABLE_HASH(hat, base, level); 1450ae115bc7Smrj HTABLE_ENTER(h); 1451ae115bc7Smrj ht->ht_next = hat->hat_ht_hash[h]; 1452ae115bc7Smrj ASSERT(ht->ht_prev == NULL); 1453ae115bc7Smrj if (hat->hat_ht_hash[h]) 1454ae115bc7Smrj hat->hat_ht_hash[h]->ht_prev = ht; 1455ae115bc7Smrj hat->hat_ht_hash[h] = ht; 1456ae115bc7Smrj HTABLE_EXIT(h); 1457ae115bc7Smrj 1458ae115bc7Smrj /* 1459ae115bc7Smrj * make sure the page table physical page is not FREE 1460ae115bc7Smrj */ 1461ae115bc7Smrj if (page_resv(1, KM_NOSLEEP) == 0) 1462ae115bc7Smrj panic("page_resv() failed in ptable alloc"); 1463ae115bc7Smrj 1464ae115bc7Smrj pp = boot_claim_page(pfn); 1465ae115bc7Smrj ASSERT(pp != NULL); 1466ae115bc7Smrj page_downgrade(pp); 1467843e1988Sjohnlev #if defined(__xpv) && defined(__amd64) 1468ae115bc7Smrj /* 1469ae115bc7Smrj * Record in the page_t that is a pagetable for segkpm setup. 1470ae115bc7Smrj */ 1471ae115bc7Smrj if (kpm_vbase) 1472ae115bc7Smrj pp->p_index = 1; 1473843e1988Sjohnlev #endif 1474ae115bc7Smrj 1475ae115bc7Smrj /* 1476ae115bc7Smrj * Count valid mappings and recursively attach lower level pagetables. 1477ae115bc7Smrj */ 1478ae115bc7Smrj ptep = kbm_remap_window(pfn_to_pa(pfn), 0); 1479ae115bc7Smrj for (i = 0; i < HTABLE_NUM_PTES(ht); ++i) { 1480ae115bc7Smrj if (mmu.pae_hat) 1481ae115bc7Smrj pte = ptep[i]; 1482ae115bc7Smrj else 1483ae115bc7Smrj pte = ((x86pte32_t *)ptep)[i]; 1484ae115bc7Smrj if (!IN_HYPERVISOR_VA(base) && PTE_ISVALID(pte)) { 1485ae115bc7Smrj ++ht->ht_valid_cnt; 1486ae115bc7Smrj if (!PTE_ISPAGE(pte, level)) { 1487ae115bc7Smrj htable_attach(hat, base, level - 1, 1488ae115bc7Smrj ht, PTE2PFN(pte, level)); 1489ae115bc7Smrj ptep = kbm_remap_window(pfn_to_pa(pfn), 0); 1490ae115bc7Smrj } 1491ae115bc7Smrj } 1492ae115bc7Smrj base += LEVEL_SIZE(level); 1493ae115bc7Smrj if (base == mmu.hole_start) 1494ae115bc7Smrj base = (mmu.hole_end + MMU_PAGEOFFSET) & MMU_PAGEMASK; 1495ae115bc7Smrj } 1496ae115bc7Smrj 1497ae115bc7Smrj /* 1498ae115bc7Smrj * As long as all the mappings we had were below kernel base 1499ae115bc7Smrj * we can release the htable. 1500ae115bc7Smrj */ 1501ae115bc7Smrj if (base < kernelbase) 1502ae115bc7Smrj htable_release(ht); 1503ae115bc7Smrj } 1504ae115bc7Smrj 1505ae115bc7Smrj /* 15067c478bd9Sstevel@tonic-gate * Walk through a given htable looking for the first valid entry. This 15077c478bd9Sstevel@tonic-gate * routine takes both a starting and ending address. The starting address 15087c478bd9Sstevel@tonic-gate * is required to be within the htable provided by the caller, but there is 15097c478bd9Sstevel@tonic-gate * no such restriction on the ending address. 15107c478bd9Sstevel@tonic-gate * 15117c478bd9Sstevel@tonic-gate * If the routine finds a valid entry in the htable (at or beyond the 15127c478bd9Sstevel@tonic-gate * starting address), the PTE (and its address) will be returned. 15137c478bd9Sstevel@tonic-gate * This PTE may correspond to either a page or a pagetable - it is the 15147c478bd9Sstevel@tonic-gate * caller's responsibility to determine which. If no valid entry is 15157c478bd9Sstevel@tonic-gate * found, 0 (and invalid PTE) and the next unexamined address will be 15167c478bd9Sstevel@tonic-gate * returned. 15177c478bd9Sstevel@tonic-gate * 15187c478bd9Sstevel@tonic-gate * The loop has been carefully coded for optimization. 15197c478bd9Sstevel@tonic-gate */ 15207c478bd9Sstevel@tonic-gate static x86pte_t 15217c478bd9Sstevel@tonic-gate htable_scan(htable_t *ht, uintptr_t *vap, uintptr_t eaddr) 15227c478bd9Sstevel@tonic-gate { 15237c478bd9Sstevel@tonic-gate uint_t e; 15247c478bd9Sstevel@tonic-gate x86pte_t found_pte = (x86pte_t)0; 1525ae115bc7Smrj caddr_t pte_ptr; 1526ae115bc7Smrj caddr_t end_pte_ptr; 15277c478bd9Sstevel@tonic-gate int l = ht->ht_level; 15287c478bd9Sstevel@tonic-gate uintptr_t va = *vap & LEVEL_MASK(l); 15297c478bd9Sstevel@tonic-gate size_t pgsize = LEVEL_SIZE(l); 15307c478bd9Sstevel@tonic-gate 15317c478bd9Sstevel@tonic-gate ASSERT(va >= ht->ht_vaddr); 15327c478bd9Sstevel@tonic-gate ASSERT(va <= HTABLE_LAST_PAGE(ht)); 15337c478bd9Sstevel@tonic-gate 15347c478bd9Sstevel@tonic-gate /* 15357c478bd9Sstevel@tonic-gate * Compute the starting index and ending virtual address 15367c478bd9Sstevel@tonic-gate */ 15377c478bd9Sstevel@tonic-gate e = htable_va2entry(va, ht); 15387c478bd9Sstevel@tonic-gate 15397c478bd9Sstevel@tonic-gate /* 15407c478bd9Sstevel@tonic-gate * The following page table scan code knows that the valid 15417c478bd9Sstevel@tonic-gate * bit of a PTE is in the lowest byte AND that x86 is little endian!! 15427c478bd9Sstevel@tonic-gate */ 1543ae115bc7Smrj pte_ptr = (caddr_t)x86pte_access_pagetable(ht, 0); 1544ae115bc7Smrj end_pte_ptr = (caddr_t)PT_INDEX_PTR(pte_ptr, HTABLE_NUM_PTES(ht)); 1545ae115bc7Smrj pte_ptr = (caddr_t)PT_INDEX_PTR((x86pte_t *)pte_ptr, e); 154630f7a194Skchow while (!PTE_ISVALID(*pte_ptr)) { 15477c478bd9Sstevel@tonic-gate va += pgsize; 15487c478bd9Sstevel@tonic-gate if (va >= eaddr) 15497c478bd9Sstevel@tonic-gate break; 15507c478bd9Sstevel@tonic-gate pte_ptr += mmu.pte_size; 15517c478bd9Sstevel@tonic-gate ASSERT(pte_ptr <= end_pte_ptr); 15527c478bd9Sstevel@tonic-gate if (pte_ptr == end_pte_ptr) 15537c478bd9Sstevel@tonic-gate break; 15547c478bd9Sstevel@tonic-gate } 15557c478bd9Sstevel@tonic-gate 15567c478bd9Sstevel@tonic-gate /* 15577c478bd9Sstevel@tonic-gate * if we found a valid PTE, load the entire PTE 15587c478bd9Sstevel@tonic-gate */ 1559ae115bc7Smrj if (va < eaddr && pte_ptr != end_pte_ptr) 1560ae115bc7Smrj found_pte = GET_PTE((x86pte_t *)pte_ptr); 15617c478bd9Sstevel@tonic-gate x86pte_release_pagetable(ht); 15627c478bd9Sstevel@tonic-gate 15637c478bd9Sstevel@tonic-gate #if defined(__amd64) 15647c478bd9Sstevel@tonic-gate /* 15657c478bd9Sstevel@tonic-gate * deal with VA hole on amd64 15667c478bd9Sstevel@tonic-gate */ 15677c478bd9Sstevel@tonic-gate if (l == mmu.max_level && va >= mmu.hole_start && va <= mmu.hole_end) 15687c478bd9Sstevel@tonic-gate va = mmu.hole_end + va - mmu.hole_start; 15697c478bd9Sstevel@tonic-gate #endif /* __amd64 */ 15707c478bd9Sstevel@tonic-gate 15717c478bd9Sstevel@tonic-gate *vap = va; 15727c478bd9Sstevel@tonic-gate return (found_pte); 15737c478bd9Sstevel@tonic-gate } 15747c478bd9Sstevel@tonic-gate 15757c478bd9Sstevel@tonic-gate /* 15767c478bd9Sstevel@tonic-gate * Find the address and htable for the first populated translation at or 15777c478bd9Sstevel@tonic-gate * above the given virtual address. The caller may also specify an upper 15787c478bd9Sstevel@tonic-gate * limit to the address range to search. Uses level information to quickly 15797c478bd9Sstevel@tonic-gate * skip unpopulated sections of virtual address spaces. 15807c478bd9Sstevel@tonic-gate * 15817c478bd9Sstevel@tonic-gate * If not found returns NULL. When found, returns the htable and virt addr 15827c478bd9Sstevel@tonic-gate * and has a hold on the htable. 15837c478bd9Sstevel@tonic-gate */ 15847c478bd9Sstevel@tonic-gate x86pte_t 15857c478bd9Sstevel@tonic-gate htable_walk( 15867c478bd9Sstevel@tonic-gate struct hat *hat, 15877c478bd9Sstevel@tonic-gate htable_t **htp, 15887c478bd9Sstevel@tonic-gate uintptr_t *vaddr, 15897c478bd9Sstevel@tonic-gate uintptr_t eaddr) 15907c478bd9Sstevel@tonic-gate { 15917c478bd9Sstevel@tonic-gate uintptr_t va = *vaddr; 15927c478bd9Sstevel@tonic-gate htable_t *ht; 15937c478bd9Sstevel@tonic-gate htable_t *prev = *htp; 15947c478bd9Sstevel@tonic-gate level_t l; 15957c478bd9Sstevel@tonic-gate level_t max_mapped_level; 15967c478bd9Sstevel@tonic-gate x86pte_t pte; 15977c478bd9Sstevel@tonic-gate 15987c478bd9Sstevel@tonic-gate ASSERT(eaddr > va); 15997c478bd9Sstevel@tonic-gate 16007c478bd9Sstevel@tonic-gate /* 16017c478bd9Sstevel@tonic-gate * If this is a user address, then we know we need not look beyond 16027c478bd9Sstevel@tonic-gate * kernelbase. 16037c478bd9Sstevel@tonic-gate */ 16047c478bd9Sstevel@tonic-gate ASSERT(hat == kas.a_hat || eaddr <= kernelbase || 16057c478bd9Sstevel@tonic-gate eaddr == HTABLE_WALK_TO_END); 16067c478bd9Sstevel@tonic-gate if (hat != kas.a_hat && eaddr == HTABLE_WALK_TO_END) 16077c478bd9Sstevel@tonic-gate eaddr = kernelbase; 16087c478bd9Sstevel@tonic-gate 16097c478bd9Sstevel@tonic-gate /* 16107c478bd9Sstevel@tonic-gate * If we're coming in with a previous page table, search it first 16117c478bd9Sstevel@tonic-gate * without doing an htable_lookup(), this should be frequent. 16127c478bd9Sstevel@tonic-gate */ 16137c478bd9Sstevel@tonic-gate if (prev) { 16147c478bd9Sstevel@tonic-gate ASSERT(prev->ht_busy > 0); 16157c478bd9Sstevel@tonic-gate ASSERT(prev->ht_vaddr <= va); 16167c478bd9Sstevel@tonic-gate l = prev->ht_level; 16177c478bd9Sstevel@tonic-gate if (va <= HTABLE_LAST_PAGE(prev)) { 16187c478bd9Sstevel@tonic-gate pte = htable_scan(prev, &va, eaddr); 16197c478bd9Sstevel@tonic-gate 16207c478bd9Sstevel@tonic-gate if (PTE_ISPAGE(pte, l)) { 16217c478bd9Sstevel@tonic-gate *vaddr = va; 16227c478bd9Sstevel@tonic-gate *htp = prev; 16237c478bd9Sstevel@tonic-gate return (pte); 16247c478bd9Sstevel@tonic-gate } 16257c478bd9Sstevel@tonic-gate } 16267c478bd9Sstevel@tonic-gate 16277c478bd9Sstevel@tonic-gate /* 16287c478bd9Sstevel@tonic-gate * We found nothing in the htable provided by the caller, 16297c478bd9Sstevel@tonic-gate * so fall through and do the full search 16307c478bd9Sstevel@tonic-gate */ 16317c478bd9Sstevel@tonic-gate htable_release(prev); 16327c478bd9Sstevel@tonic-gate } 16337c478bd9Sstevel@tonic-gate 16347c478bd9Sstevel@tonic-gate /* 16357c478bd9Sstevel@tonic-gate * Find the level of the largest pagesize used by this HAT. 16367c478bd9Sstevel@tonic-gate */ 16377173d045Sjosephb if (hat->hat_ism_pgcnt > 0) { 163802bc52beSkchow max_mapped_level = mmu.umax_page_level; 16397173d045Sjosephb } else { 16407c478bd9Sstevel@tonic-gate max_mapped_level = 0; 16417c478bd9Sstevel@tonic-gate for (l = 1; l <= mmu.max_page_level; ++l) 16427c478bd9Sstevel@tonic-gate if (hat->hat_pages_mapped[l] != 0) 16437c478bd9Sstevel@tonic-gate max_mapped_level = l; 16447173d045Sjosephb } 16457c478bd9Sstevel@tonic-gate 16467c478bd9Sstevel@tonic-gate while (va < eaddr && va >= *vaddr) { 16477c478bd9Sstevel@tonic-gate ASSERT(!IN_VA_HOLE(va)); 16487c478bd9Sstevel@tonic-gate 16497c478bd9Sstevel@tonic-gate /* 16507c478bd9Sstevel@tonic-gate * Find lowest table with any entry for given address. 16517c478bd9Sstevel@tonic-gate */ 16527c478bd9Sstevel@tonic-gate for (l = 0; l <= TOP_LEVEL(hat); ++l) { 16537c478bd9Sstevel@tonic-gate ht = htable_lookup(hat, va, l); 16547c478bd9Sstevel@tonic-gate if (ht != NULL) { 16557c478bd9Sstevel@tonic-gate pte = htable_scan(ht, &va, eaddr); 16567c478bd9Sstevel@tonic-gate if (PTE_ISPAGE(pte, l)) { 16577c478bd9Sstevel@tonic-gate *vaddr = va; 16587c478bd9Sstevel@tonic-gate *htp = ht; 16597c478bd9Sstevel@tonic-gate return (pte); 16607c478bd9Sstevel@tonic-gate } 16617c478bd9Sstevel@tonic-gate htable_release(ht); 16627c478bd9Sstevel@tonic-gate break; 16637c478bd9Sstevel@tonic-gate } 16647c478bd9Sstevel@tonic-gate 16657c478bd9Sstevel@tonic-gate /* 16667173d045Sjosephb * No htable at this level for the address. If there 16677173d045Sjosephb * is no larger page size that could cover it, we can 16687173d045Sjosephb * skip right to the start of the next page table. 16698b5842f9Sdm120769 */ 16708b5842f9Sdm120769 ASSERT(l < TOP_LEVEL(hat)); 16718b5842f9Sdm120769 if (l >= max_mapped_level) { 16727c478bd9Sstevel@tonic-gate va = NEXT_ENTRY_VA(va, l + 1); 16737173d045Sjosephb if (va >= eaddr) 16748b5842f9Sdm120769 break; 16758b5842f9Sdm120769 } 16767c478bd9Sstevel@tonic-gate } 16777c478bd9Sstevel@tonic-gate } 16787c478bd9Sstevel@tonic-gate 16797c478bd9Sstevel@tonic-gate *vaddr = 0; 16807c478bd9Sstevel@tonic-gate *htp = NULL; 16817c478bd9Sstevel@tonic-gate return (0); 16827c478bd9Sstevel@tonic-gate } 16837c478bd9Sstevel@tonic-gate 16847c478bd9Sstevel@tonic-gate /* 16857c478bd9Sstevel@tonic-gate * Find the htable and page table entry index of the given virtual address 16867c478bd9Sstevel@tonic-gate * with pagesize at or below given level. 16877c478bd9Sstevel@tonic-gate * If not found returns NULL. When found, returns the htable, sets 16887c478bd9Sstevel@tonic-gate * entry, and has a hold on the htable. 16897c478bd9Sstevel@tonic-gate */ 16907c478bd9Sstevel@tonic-gate htable_t * 16917c478bd9Sstevel@tonic-gate htable_getpte( 16927c478bd9Sstevel@tonic-gate struct hat *hat, 16937c478bd9Sstevel@tonic-gate uintptr_t vaddr, 16947c478bd9Sstevel@tonic-gate uint_t *entry, 16957c478bd9Sstevel@tonic-gate x86pte_t *pte, 16967c478bd9Sstevel@tonic-gate level_t level) 16977c478bd9Sstevel@tonic-gate { 16987c478bd9Sstevel@tonic-gate htable_t *ht; 16997c478bd9Sstevel@tonic-gate level_t l; 17007c478bd9Sstevel@tonic-gate uint_t e; 17017c478bd9Sstevel@tonic-gate 17027c478bd9Sstevel@tonic-gate ASSERT(level <= mmu.max_page_level); 17037c478bd9Sstevel@tonic-gate 17047c478bd9Sstevel@tonic-gate for (l = 0; l <= level; ++l) { 17057c478bd9Sstevel@tonic-gate ht = htable_lookup(hat, vaddr, l); 17067c478bd9Sstevel@tonic-gate if (ht == NULL) 17077c478bd9Sstevel@tonic-gate continue; 17087c478bd9Sstevel@tonic-gate e = htable_va2entry(vaddr, ht); 17097c478bd9Sstevel@tonic-gate if (entry != NULL) 17107c478bd9Sstevel@tonic-gate *entry = e; 17117c478bd9Sstevel@tonic-gate if (pte != NULL) 17127c478bd9Sstevel@tonic-gate *pte = x86pte_get(ht, e); 17137c478bd9Sstevel@tonic-gate return (ht); 17147c478bd9Sstevel@tonic-gate } 17157c478bd9Sstevel@tonic-gate return (NULL); 17167c478bd9Sstevel@tonic-gate } 17177c478bd9Sstevel@tonic-gate 17187c478bd9Sstevel@tonic-gate /* 17197c478bd9Sstevel@tonic-gate * Find the htable and page table entry index of the given virtual address. 17207c478bd9Sstevel@tonic-gate * There must be a valid page mapped at the given address. 17217c478bd9Sstevel@tonic-gate * If not found returns NULL. When found, returns the htable, sets 17227c478bd9Sstevel@tonic-gate * entry, and has a hold on the htable. 17237c478bd9Sstevel@tonic-gate */ 17247c478bd9Sstevel@tonic-gate htable_t * 17257c478bd9Sstevel@tonic-gate htable_getpage(struct hat *hat, uintptr_t vaddr, uint_t *entry) 17267c478bd9Sstevel@tonic-gate { 17277c478bd9Sstevel@tonic-gate htable_t *ht; 17287c478bd9Sstevel@tonic-gate uint_t e; 17297c478bd9Sstevel@tonic-gate x86pte_t pte; 17307c478bd9Sstevel@tonic-gate 17317c478bd9Sstevel@tonic-gate ht = htable_getpte(hat, vaddr, &e, &pte, mmu.max_page_level); 17327c478bd9Sstevel@tonic-gate if (ht == NULL) 17337c478bd9Sstevel@tonic-gate return (NULL); 17347c478bd9Sstevel@tonic-gate 17357c478bd9Sstevel@tonic-gate if (entry) 17367c478bd9Sstevel@tonic-gate *entry = e; 17377c478bd9Sstevel@tonic-gate 17387c478bd9Sstevel@tonic-gate if (PTE_ISPAGE(pte, ht->ht_level)) 17397c478bd9Sstevel@tonic-gate return (ht); 17407c478bd9Sstevel@tonic-gate htable_release(ht); 17417c478bd9Sstevel@tonic-gate return (NULL); 17427c478bd9Sstevel@tonic-gate } 17437c478bd9Sstevel@tonic-gate 17447c478bd9Sstevel@tonic-gate 17457c478bd9Sstevel@tonic-gate void 17467c478bd9Sstevel@tonic-gate htable_init() 17477c478bd9Sstevel@tonic-gate { 17487c478bd9Sstevel@tonic-gate /* 17497c478bd9Sstevel@tonic-gate * To save on kernel VA usage, we avoid debug information in 32 bit 17507c478bd9Sstevel@tonic-gate * kernels. 17517c478bd9Sstevel@tonic-gate */ 17527c478bd9Sstevel@tonic-gate #if defined(__amd64) 17537c478bd9Sstevel@tonic-gate int kmem_flags = KMC_NOHASH; 17547c478bd9Sstevel@tonic-gate #elif defined(__i386) 17557c478bd9Sstevel@tonic-gate int kmem_flags = KMC_NOHASH | KMC_NODEBUG; 17567c478bd9Sstevel@tonic-gate #endif 17577c478bd9Sstevel@tonic-gate 17587c478bd9Sstevel@tonic-gate /* 17597c478bd9Sstevel@tonic-gate * initialize kmem caches 17607c478bd9Sstevel@tonic-gate */ 17617c478bd9Sstevel@tonic-gate htable_cache = kmem_cache_create("htable_t", 17627c478bd9Sstevel@tonic-gate sizeof (htable_t), 0, NULL, NULL, 17637c478bd9Sstevel@tonic-gate htable_reap, NULL, hat_memload_arena, kmem_flags); 17647c478bd9Sstevel@tonic-gate } 17657c478bd9Sstevel@tonic-gate 17667c478bd9Sstevel@tonic-gate /* 17677c478bd9Sstevel@tonic-gate * get the pte index for the virtual address in the given htable's pagetable 17687c478bd9Sstevel@tonic-gate */ 17697c478bd9Sstevel@tonic-gate uint_t 17707c478bd9Sstevel@tonic-gate htable_va2entry(uintptr_t va, htable_t *ht) 17717c478bd9Sstevel@tonic-gate { 17727c478bd9Sstevel@tonic-gate level_t l = ht->ht_level; 17737c478bd9Sstevel@tonic-gate 17747c478bd9Sstevel@tonic-gate ASSERT(va >= ht->ht_vaddr); 17757c478bd9Sstevel@tonic-gate ASSERT(va <= HTABLE_LAST_PAGE(ht)); 1776ae115bc7Smrj return ((va >> LEVEL_SHIFT(l)) & (HTABLE_NUM_PTES(ht) - 1)); 17777c478bd9Sstevel@tonic-gate } 17787c478bd9Sstevel@tonic-gate 17797c478bd9Sstevel@tonic-gate /* 17807c478bd9Sstevel@tonic-gate * Given an htable and the index of a pte in it, return the virtual address 17817c478bd9Sstevel@tonic-gate * of the page. 17827c478bd9Sstevel@tonic-gate */ 17837c478bd9Sstevel@tonic-gate uintptr_t 17847c478bd9Sstevel@tonic-gate htable_e2va(htable_t *ht, uint_t entry) 17857c478bd9Sstevel@tonic-gate { 17867c478bd9Sstevel@tonic-gate level_t l = ht->ht_level; 17877c478bd9Sstevel@tonic-gate uintptr_t va; 17887c478bd9Sstevel@tonic-gate 1789ae115bc7Smrj ASSERT(entry < HTABLE_NUM_PTES(ht)); 17907c478bd9Sstevel@tonic-gate va = ht->ht_vaddr + ((uintptr_t)entry << LEVEL_SHIFT(l)); 17917c478bd9Sstevel@tonic-gate 17927c478bd9Sstevel@tonic-gate /* 17937c478bd9Sstevel@tonic-gate * Need to skip over any VA hole in top level table 17947c478bd9Sstevel@tonic-gate */ 17957c478bd9Sstevel@tonic-gate #if defined(__amd64) 17967c478bd9Sstevel@tonic-gate if (ht->ht_level == mmu.max_level && va >= mmu.hole_start) 17977c478bd9Sstevel@tonic-gate va += ((mmu.hole_end - mmu.hole_start) + 1); 17987c478bd9Sstevel@tonic-gate #endif 17997c478bd9Sstevel@tonic-gate 18007c478bd9Sstevel@tonic-gate return (va); 18017c478bd9Sstevel@tonic-gate } 18027c478bd9Sstevel@tonic-gate 18037c478bd9Sstevel@tonic-gate /* 18047c478bd9Sstevel@tonic-gate * The code uses compare and swap instructions to read/write PTE's to 18057c478bd9Sstevel@tonic-gate * avoid atomicity problems, since PTEs can be 8 bytes on 32 bit systems. 18067c478bd9Sstevel@tonic-gate * will naturally be atomic. 18077c478bd9Sstevel@tonic-gate * 18087c478bd9Sstevel@tonic-gate * The combination of using kpreempt_disable()/_enable() and the hci_mutex 18097c478bd9Sstevel@tonic-gate * are used to ensure that an interrupt won't overwrite a temporary mapping 18107c478bd9Sstevel@tonic-gate * while it's in use. If an interrupt thread tries to access a PTE, it will 18117c478bd9Sstevel@tonic-gate * yield briefly back to the pinned thread which holds the cpu's hci_mutex. 18127c478bd9Sstevel@tonic-gate */ 18137c478bd9Sstevel@tonic-gate void 1814ae115bc7Smrj x86pte_cpu_init(cpu_t *cpu) 18157c478bd9Sstevel@tonic-gate { 18167c478bd9Sstevel@tonic-gate struct hat_cpu_info *hci; 18177c478bd9Sstevel@tonic-gate 1818ae115bc7Smrj hci = kmem_zalloc(sizeof (*hci), KM_SLEEP); 18197c478bd9Sstevel@tonic-gate mutex_init(&hci->hci_mutex, NULL, MUTEX_DEFAULT, NULL); 18207c478bd9Sstevel@tonic-gate cpu->cpu_hat_info = hci; 18217c478bd9Sstevel@tonic-gate } 18227c478bd9Sstevel@tonic-gate 1823ae115bc7Smrj void 1824ae115bc7Smrj x86pte_cpu_fini(cpu_t *cpu) 1825ae115bc7Smrj { 1826ae115bc7Smrj struct hat_cpu_info *hci = cpu->cpu_hat_info; 1827ae115bc7Smrj 1828ae115bc7Smrj kmem_free(hci, sizeof (*hci)); 1829ae115bc7Smrj cpu->cpu_hat_info = NULL; 18307c478bd9Sstevel@tonic-gate } 18317c478bd9Sstevel@tonic-gate 1832ae115bc7Smrj #ifdef __i386 1833ae115bc7Smrj /* 1834ae115bc7Smrj * On 32 bit kernels, loading a 64 bit PTE is a little tricky 1835ae115bc7Smrj */ 1836ae115bc7Smrj x86pte_t 1837ae115bc7Smrj get_pte64(x86pte_t *ptr) 1838ae115bc7Smrj { 1839ae115bc7Smrj volatile uint32_t *p = (uint32_t *)ptr; 1840ae115bc7Smrj x86pte_t t; 1841ae115bc7Smrj 1842ae115bc7Smrj ASSERT(mmu.pae_hat != 0); 1843ae115bc7Smrj for (;;) { 1844ae115bc7Smrj t = p[0]; 1845ae115bc7Smrj t |= (uint64_t)p[1] << 32; 1846ae115bc7Smrj if ((t & 0xffffffff) == p[0]) 1847ae115bc7Smrj return (t); 1848ae115bc7Smrj } 1849ae115bc7Smrj } 1850ae115bc7Smrj #endif /* __i386 */ 1851ae115bc7Smrj 18527c478bd9Sstevel@tonic-gate /* 18537c478bd9Sstevel@tonic-gate * Disable preemption and establish a mapping to the pagetable with the 18547c478bd9Sstevel@tonic-gate * given pfn. This is optimized for there case where it's the same 18557c478bd9Sstevel@tonic-gate * pfn as we last used referenced from this CPU. 18567c478bd9Sstevel@tonic-gate */ 18577c478bd9Sstevel@tonic-gate static x86pte_t * 1858ae115bc7Smrj x86pte_access_pagetable(htable_t *ht, uint_t index) 18597c478bd9Sstevel@tonic-gate { 18607c478bd9Sstevel@tonic-gate /* 18617c478bd9Sstevel@tonic-gate * VLP pagetables are contained in the hat_t 18627c478bd9Sstevel@tonic-gate */ 18637c478bd9Sstevel@tonic-gate if (ht->ht_flags & HTABLE_VLP) 1864ae115bc7Smrj return (PT_INDEX_PTR(ht->ht_hat->hat_vlp_ptes, index)); 1865ae115bc7Smrj return (x86pte_mapin(ht->ht_pfn, index, ht)); 1866ae115bc7Smrj } 18677c478bd9Sstevel@tonic-gate 18687c478bd9Sstevel@tonic-gate /* 1869ae115bc7Smrj * map the given pfn into the page table window. 18707c478bd9Sstevel@tonic-gate */ 1871ae115bc7Smrj /*ARGSUSED*/ 1872ae115bc7Smrj x86pte_t * 1873ae115bc7Smrj x86pte_mapin(pfn_t pfn, uint_t index, htable_t *ht) 1874ae115bc7Smrj { 1875ae115bc7Smrj x86pte_t *pteptr; 18768ea72728Sjosephb x86pte_t pte = 0; 1877ae115bc7Smrj x86pte_t newpte; 1878ae115bc7Smrj int x; 1879ae115bc7Smrj 18807c478bd9Sstevel@tonic-gate ASSERT(pfn != PFN_INVALID); 18817c478bd9Sstevel@tonic-gate 18827c478bd9Sstevel@tonic-gate if (!khat_running) { 1883ae115bc7Smrj caddr_t va = kbm_remap_window(pfn_to_pa(pfn), 1); 1884ae115bc7Smrj return (PT_INDEX_PTR(va, index)); 18857c478bd9Sstevel@tonic-gate } 18867c478bd9Sstevel@tonic-gate 18877c478bd9Sstevel@tonic-gate /* 1888ae115bc7Smrj * If kpm is available, use it. 1889ae115bc7Smrj */ 1890ae115bc7Smrj if (kpm_vbase) 1891ae115bc7Smrj return (PT_INDEX_PTR(hat_kpm_pfn2va(pfn), index)); 1892ae115bc7Smrj 1893ae115bc7Smrj /* 1894ae115bc7Smrj * Disable preemption and grab the CPU's hci_mutex 18957c478bd9Sstevel@tonic-gate */ 18967c478bd9Sstevel@tonic-gate kpreempt_disable(); 1897ae115bc7Smrj ASSERT(CPU->cpu_hat_info != NULL); 1898ae115bc7Smrj mutex_enter(&CPU->cpu_hat_info->hci_mutex); 1899ae115bc7Smrj x = PWIN_TABLE(CPU->cpu_id); 1900ae115bc7Smrj pteptr = (x86pte_t *)PWIN_PTE_VA(x); 19018ea72728Sjosephb #ifndef __xpv 1902ae115bc7Smrj if (mmu.pae_hat) 1903ae115bc7Smrj pte = *pteptr; 1904ae115bc7Smrj else 1905ae115bc7Smrj pte = *(x86pte32_t *)pteptr; 19068ea72728Sjosephb #endif 1907ae115bc7Smrj 1908ae115bc7Smrj newpte = MAKEPTE(pfn, 0) | mmu.pt_global | mmu.pt_nx; 1909843e1988Sjohnlev 1910843e1988Sjohnlev /* 1911843e1988Sjohnlev * For hardware we can use a writable mapping. 1912843e1988Sjohnlev */ 1913843e1988Sjohnlev #ifdef __xpv 1914843e1988Sjohnlev if (IN_XPV_PANIC()) 1915843e1988Sjohnlev #endif 1916ae115bc7Smrj newpte |= PT_WRITABLE; 1917ae115bc7Smrj 1918ae115bc7Smrj if (!PTE_EQUIV(newpte, pte)) { 1919843e1988Sjohnlev 1920843e1988Sjohnlev #ifdef __xpv 1921843e1988Sjohnlev if (!IN_XPV_PANIC()) { 1922843e1988Sjohnlev xen_map(newpte, PWIN_VA(x)); 1923843e1988Sjohnlev } else 1924843e1988Sjohnlev #endif 1925843e1988Sjohnlev { 1926843e1988Sjohnlev XPV_ALLOW_PAGETABLE_UPDATES(); 1927ae115bc7Smrj if (mmu.pae_hat) 1928ae115bc7Smrj *pteptr = newpte; 1929ae115bc7Smrj else 1930ae115bc7Smrj *(x86pte32_t *)pteptr = newpte; 1931843e1988Sjohnlev XPV_DISALLOW_PAGETABLE_UPDATES(); 1932ae115bc7Smrj mmu_tlbflush_entry((caddr_t)(PWIN_VA(x))); 19337c478bd9Sstevel@tonic-gate } 1934843e1988Sjohnlev } 1935ae115bc7Smrj return (PT_INDEX_PTR(PWIN_VA(x), index)); 19367c478bd9Sstevel@tonic-gate } 19377c478bd9Sstevel@tonic-gate 19387c478bd9Sstevel@tonic-gate /* 19397c478bd9Sstevel@tonic-gate * Release access to a page table. 19407c478bd9Sstevel@tonic-gate */ 19417c478bd9Sstevel@tonic-gate static void 19427c478bd9Sstevel@tonic-gate x86pte_release_pagetable(htable_t *ht) 19437c478bd9Sstevel@tonic-gate { 19447c478bd9Sstevel@tonic-gate /* 19457c478bd9Sstevel@tonic-gate * nothing to do for VLP htables 19467c478bd9Sstevel@tonic-gate */ 19477c478bd9Sstevel@tonic-gate if (ht->ht_flags & HTABLE_VLP) 19487c478bd9Sstevel@tonic-gate return; 19497c478bd9Sstevel@tonic-gate 1950ae115bc7Smrj x86pte_mapout(); 19517c478bd9Sstevel@tonic-gate } 19527c478bd9Sstevel@tonic-gate 1953ae115bc7Smrj void 1954ae115bc7Smrj x86pte_mapout(void) 1955ae115bc7Smrj { 1956843e1988Sjohnlev if (kpm_vbase != NULL || !khat_running) 1957ae115bc7Smrj return; 1958ae115bc7Smrj 19597c478bd9Sstevel@tonic-gate /* 1960ae115bc7Smrj * Drop the CPU's hci_mutex and restore preemption. 19617c478bd9Sstevel@tonic-gate */ 19628ea72728Sjosephb #ifdef __xpv 19638ea72728Sjosephb if (!IN_XPV_PANIC()) { 19648ea72728Sjosephb uintptr_t va; 19658ea72728Sjosephb 19668ea72728Sjosephb /* 19678ea72728Sjosephb * We need to always clear the mapping in case a page 19688ea72728Sjosephb * that was once a page table page is ballooned out. 19698ea72728Sjosephb */ 19708ea72728Sjosephb va = (uintptr_t)PWIN_VA(PWIN_TABLE(CPU->cpu_id)); 19718ea72728Sjosephb (void) HYPERVISOR_update_va_mapping(va, 0, 19728ea72728Sjosephb UVMF_INVLPG | UVMF_LOCAL); 19738ea72728Sjosephb } 19748ea72728Sjosephb #endif 1975ae115bc7Smrj mutex_exit(&CPU->cpu_hat_info->hci_mutex); 19767c478bd9Sstevel@tonic-gate kpreempt_enable(); 19777c478bd9Sstevel@tonic-gate } 19787c478bd9Sstevel@tonic-gate 19797c478bd9Sstevel@tonic-gate /* 19807c478bd9Sstevel@tonic-gate * Atomic retrieval of a pagetable entry 19817c478bd9Sstevel@tonic-gate */ 19827c478bd9Sstevel@tonic-gate x86pte_t 19837c478bd9Sstevel@tonic-gate x86pte_get(htable_t *ht, uint_t entry) 19847c478bd9Sstevel@tonic-gate { 19857c478bd9Sstevel@tonic-gate x86pte_t pte; 1986aa2ed9e5Sjosephb x86pte_t *ptep; 19877c478bd9Sstevel@tonic-gate 19887c478bd9Sstevel@tonic-gate /* 1989aa2ed9e5Sjosephb * Be careful that loading PAE entries in 32 bit kernel is atomic. 19907c478bd9Sstevel@tonic-gate */ 1991ae115bc7Smrj ASSERT(entry < mmu.ptes_per_table); 1992ae115bc7Smrj ptep = x86pte_access_pagetable(ht, entry); 1993ae115bc7Smrj pte = GET_PTE(ptep); 19947c478bd9Sstevel@tonic-gate x86pte_release_pagetable(ht); 19957c478bd9Sstevel@tonic-gate return (pte); 19967c478bd9Sstevel@tonic-gate } 19977c478bd9Sstevel@tonic-gate 19987c478bd9Sstevel@tonic-gate /* 19997c478bd9Sstevel@tonic-gate * Atomic unconditional set of a page table entry, it returns the previous 2000ae115bc7Smrj * value. For pre-existing mappings if the PFN changes, then we don't care 2001ae115bc7Smrj * about the old pte's REF / MOD bits. If the PFN remains the same, we leave 2002ae115bc7Smrj * the MOD/REF bits unchanged. 2003ae115bc7Smrj * 2004ae115bc7Smrj * If asked to overwrite a link to a lower page table with a large page 2005ae115bc7Smrj * mapping, this routine returns the special value of LPAGE_ERROR. This 2006ae115bc7Smrj * allows the upper HAT layers to retry with a smaller mapping size. 20077c478bd9Sstevel@tonic-gate */ 20087c478bd9Sstevel@tonic-gate x86pte_t 20097c478bd9Sstevel@tonic-gate x86pte_set(htable_t *ht, uint_t entry, x86pte_t new, void *ptr) 20107c478bd9Sstevel@tonic-gate { 20117c478bd9Sstevel@tonic-gate x86pte_t old; 2012ae115bc7Smrj x86pte_t prev; 20137c478bd9Sstevel@tonic-gate x86pte_t *ptep; 2014ae115bc7Smrj level_t l = ht->ht_level; 2015ae115bc7Smrj x86pte_t pfn_mask = (l != 0) ? PT_PADDR_LGPG : PT_PADDR; 2016ae115bc7Smrj x86pte_t n; 2017ae115bc7Smrj uintptr_t addr = htable_e2va(ht, entry); 2018ae115bc7Smrj hat_t *hat = ht->ht_hat; 20197c478bd9Sstevel@tonic-gate 2020ae115bc7Smrj ASSERT(new != 0); /* don't use to invalidate a PTE, see x86pte_update */ 20217c478bd9Sstevel@tonic-gate ASSERT(!(ht->ht_flags & HTABLE_SHARED_PFN)); 2022ae115bc7Smrj if (ptr == NULL) 2023ae115bc7Smrj ptep = x86pte_access_pagetable(ht, entry); 2024ae115bc7Smrj else 20257c478bd9Sstevel@tonic-gate ptep = ptr; 20267c478bd9Sstevel@tonic-gate 2027b193e412Skchow /* 2028ae115bc7Smrj * Install the new PTE. If remapping the same PFN, then 2029ae115bc7Smrj * copy existing REF/MOD bits to new mapping. 2030b193e412Skchow */ 2031ae115bc7Smrj do { 2032ae115bc7Smrj prev = GET_PTE(ptep); 2033ae115bc7Smrj n = new; 2034ae115bc7Smrj if (PTE_ISVALID(n) && (prev & pfn_mask) == (new & pfn_mask)) 2035b193e412Skchow n |= prev & (PT_REF | PT_MOD); 2036ae115bc7Smrj 2037ae115bc7Smrj /* 2038ae115bc7Smrj * Another thread may have installed this mapping already, 2039ae115bc7Smrj * flush the local TLB and be done. 2040ae115bc7Smrj */ 2041b193e412Skchow if (prev == n) { 20427c478bd9Sstevel@tonic-gate old = new; 2043843e1988Sjohnlev #ifdef __xpv 2044843e1988Sjohnlev if (!IN_XPV_PANIC()) 2045843e1988Sjohnlev xen_flush_va((caddr_t)addr); 2046843e1988Sjohnlev else 2047843e1988Sjohnlev #endif 2048ae115bc7Smrj mmu_tlbflush_entry((caddr_t)addr); 2049ae115bc7Smrj goto done; 20507c478bd9Sstevel@tonic-gate } 2051ae115bc7Smrj 2052ae115bc7Smrj /* 2053ae115bc7Smrj * Detect if we have a collision of installing a large 2054ae115bc7Smrj * page mapping where there already is a lower page table. 2055ae115bc7Smrj */ 205697704650Sjosephb if (l > 0 && (prev & PT_VALID) && !(prev & PT_PAGESIZE)) { 205797704650Sjosephb old = LPAGE_ERROR; 205897704650Sjosephb goto done; 205997704650Sjosephb } 2060ae115bc7Smrj 2061843e1988Sjohnlev XPV_ALLOW_PAGETABLE_UPDATES(); 2062ae115bc7Smrj old = CAS_PTE(ptep, prev, n); 2063843e1988Sjohnlev XPV_DISALLOW_PAGETABLE_UPDATES(); 2064ae115bc7Smrj } while (old != prev); 2065ae115bc7Smrj 2066ae115bc7Smrj /* 2067ae115bc7Smrj * Do a TLB demap if needed, ie. the old pte was valid. 2068ae115bc7Smrj * 2069ae115bc7Smrj * Note that a stale TLB writeback to the PTE here either can't happen 2070ae115bc7Smrj * or doesn't matter. The PFN can only change for NOSYNC|NOCONSIST 2071ae115bc7Smrj * mappings, but they were created with REF and MOD already set, so 2072ae115bc7Smrj * no stale writeback will happen. 2073ae115bc7Smrj * 2074ae115bc7Smrj * Segmap is the only place where remaps happen on the same pfn and for 2075ae115bc7Smrj * that we want to preserve the stale REF/MOD bits. 2076ae115bc7Smrj */ 2077ae115bc7Smrj if (old & PT_REF) 2078ae115bc7Smrj hat_tlb_inval(hat, addr); 2079ae115bc7Smrj 2080ae115bc7Smrj done: 20817c478bd9Sstevel@tonic-gate if (ptr == NULL) 20827c478bd9Sstevel@tonic-gate x86pte_release_pagetable(ht); 20837c478bd9Sstevel@tonic-gate return (old); 20847c478bd9Sstevel@tonic-gate } 20857c478bd9Sstevel@tonic-gate 20867c478bd9Sstevel@tonic-gate /* 2087ae115bc7Smrj * Atomic compare and swap of a page table entry. No TLB invalidates are done. 2088ae115bc7Smrj * This is used for links between pagetables of different levels. 2089ae115bc7Smrj * Note we always create these links with dirty/access set, so they should 2090ae115bc7Smrj * never change. 20917c478bd9Sstevel@tonic-gate */ 2092ae115bc7Smrj x86pte_t 20937c478bd9Sstevel@tonic-gate x86pte_cas(htable_t *ht, uint_t entry, x86pte_t old, x86pte_t new) 20947c478bd9Sstevel@tonic-gate { 20957c478bd9Sstevel@tonic-gate x86pte_t pte; 20967c478bd9Sstevel@tonic-gate x86pte_t *ptep; 2097843e1988Sjohnlev #ifdef __xpv 2098843e1988Sjohnlev /* 2099843e1988Sjohnlev * We can't use writable pagetables for upper level tables, so fake it. 2100843e1988Sjohnlev */ 2101843e1988Sjohnlev mmu_update_t t[2]; 2102843e1988Sjohnlev int cnt = 1; 2103843e1988Sjohnlev int count; 2104843e1988Sjohnlev maddr_t ma; 21057c478bd9Sstevel@tonic-gate 2106843e1988Sjohnlev if (!IN_XPV_PANIC()) { 2107843e1988Sjohnlev ASSERT(!(ht->ht_flags & HTABLE_VLP)); /* no VLP yet */ 2108843e1988Sjohnlev ma = pa_to_ma(PT_INDEX_PHYSADDR(pfn_to_pa(ht->ht_pfn), entry)); 2109843e1988Sjohnlev t[0].ptr = ma | MMU_NORMAL_PT_UPDATE; 2110843e1988Sjohnlev t[0].val = new; 2111843e1988Sjohnlev 2112843e1988Sjohnlev #if defined(__amd64) 2113843e1988Sjohnlev /* 2114843e1988Sjohnlev * On the 64-bit hypervisor we need to maintain the user mode 2115843e1988Sjohnlev * top page table too. 2116843e1988Sjohnlev */ 2117843e1988Sjohnlev if (ht->ht_level == mmu.max_level && ht->ht_hat != kas.a_hat) { 2118843e1988Sjohnlev ma = pa_to_ma(PT_INDEX_PHYSADDR(pfn_to_pa( 2119843e1988Sjohnlev ht->ht_hat->hat_user_ptable), entry)); 2120843e1988Sjohnlev t[1].ptr = ma | MMU_NORMAL_PT_UPDATE; 2121843e1988Sjohnlev t[1].val = new; 2122843e1988Sjohnlev ++cnt; 2123843e1988Sjohnlev } 2124843e1988Sjohnlev #endif /* __amd64 */ 2125843e1988Sjohnlev 2126843e1988Sjohnlev if (HYPERVISOR_mmu_update(t, cnt, &count, DOMID_SELF)) 2127843e1988Sjohnlev panic("HYPERVISOR_mmu_update() failed"); 2128843e1988Sjohnlev ASSERT(count == cnt); 2129843e1988Sjohnlev return (old); 2130843e1988Sjohnlev } 2131843e1988Sjohnlev #endif 2132ae115bc7Smrj ptep = x86pte_access_pagetable(ht, entry); 2133843e1988Sjohnlev XPV_ALLOW_PAGETABLE_UPDATES(); 2134ae115bc7Smrj pte = CAS_PTE(ptep, old, new); 2135843e1988Sjohnlev XPV_DISALLOW_PAGETABLE_UPDATES(); 21367c478bd9Sstevel@tonic-gate x86pte_release_pagetable(ht); 21377c478bd9Sstevel@tonic-gate return (pte); 21387c478bd9Sstevel@tonic-gate } 21397c478bd9Sstevel@tonic-gate 21407c478bd9Sstevel@tonic-gate /* 2141ae115bc7Smrj * Invalidate a page table entry as long as it currently maps something that 2142ae115bc7Smrj * matches the value determined by expect. 21437c478bd9Sstevel@tonic-gate * 2144ae115bc7Smrj * Also invalidates any TLB entries and returns the previous value of the PTE. 21457c478bd9Sstevel@tonic-gate */ 21467c478bd9Sstevel@tonic-gate x86pte_t 2147ae115bc7Smrj x86pte_inval( 2148ae115bc7Smrj htable_t *ht, 2149ae115bc7Smrj uint_t entry, 2150ae115bc7Smrj x86pte_t expect, 2151ae115bc7Smrj x86pte_t *pte_ptr) 21527c478bd9Sstevel@tonic-gate { 21537c478bd9Sstevel@tonic-gate x86pte_t *ptep; 215495c0a3c8Sjosephb x86pte_t oldpte; 215595c0a3c8Sjosephb x86pte_t found; 21567c478bd9Sstevel@tonic-gate 21577c478bd9Sstevel@tonic-gate ASSERT(!(ht->ht_flags & HTABLE_SHARED_PFN)); 215802bc52beSkchow ASSERT(ht->ht_level <= mmu.max_page_level); 215997704650Sjosephb 2160ae115bc7Smrj if (pte_ptr != NULL) 21617c478bd9Sstevel@tonic-gate ptep = pte_ptr; 2162ae115bc7Smrj else 2163ae115bc7Smrj ptep = x86pte_access_pagetable(ht, entry); 21647c478bd9Sstevel@tonic-gate 2165843e1988Sjohnlev #if defined(__xpv) 2166843e1988Sjohnlev /* 2167843e1988Sjohnlev * If exit()ing just use HYPERVISOR_mmu_update(), as we can't be racing 2168843e1988Sjohnlev * with anything else. 2169843e1988Sjohnlev */ 2170843e1988Sjohnlev if ((ht->ht_hat->hat_flags & HAT_FREEING) && !IN_XPV_PANIC()) { 2171843e1988Sjohnlev int count; 2172843e1988Sjohnlev mmu_update_t t[1]; 2173843e1988Sjohnlev maddr_t ma; 2174843e1988Sjohnlev 2175843e1988Sjohnlev oldpte = GET_PTE(ptep); 2176843e1988Sjohnlev if (expect != 0 && (oldpte & PT_PADDR) != (expect & PT_PADDR)) 2177843e1988Sjohnlev goto done; 2178843e1988Sjohnlev ma = pa_to_ma(PT_INDEX_PHYSADDR(pfn_to_pa(ht->ht_pfn), entry)); 2179843e1988Sjohnlev t[0].ptr = ma | MMU_NORMAL_PT_UPDATE; 2180843e1988Sjohnlev t[0].val = 0; 2181843e1988Sjohnlev if (HYPERVISOR_mmu_update(t, 1, &count, DOMID_SELF)) 2182843e1988Sjohnlev panic("HYPERVISOR_mmu_update() failed"); 2183843e1988Sjohnlev ASSERT(count == 1); 2184843e1988Sjohnlev goto done; 2185843e1988Sjohnlev } 2186843e1988Sjohnlev #endif /* __xpv */ 2187843e1988Sjohnlev 21887c478bd9Sstevel@tonic-gate /* 218997704650Sjosephb * Note that the loop is needed to handle changes due to h/w updating 219097704650Sjosephb * of PT_MOD/PT_REF. 21917c478bd9Sstevel@tonic-gate */ 2192ae115bc7Smrj do { 219395c0a3c8Sjosephb oldpte = GET_PTE(ptep); 219495c0a3c8Sjosephb if (expect != 0 && (oldpte & PT_PADDR) != (expect & PT_PADDR)) 219595c0a3c8Sjosephb goto done; 2196843e1988Sjohnlev XPV_ALLOW_PAGETABLE_UPDATES(); 219795c0a3c8Sjosephb found = CAS_PTE(ptep, oldpte, 0); 2198843e1988Sjohnlev XPV_DISALLOW_PAGETABLE_UPDATES(); 219995c0a3c8Sjosephb } while (found != oldpte); 220095c0a3c8Sjosephb if (oldpte & (PT_REF | PT_MOD)) 220195c0a3c8Sjosephb hat_tlb_inval(ht->ht_hat, htable_e2va(ht, entry)); 22027c478bd9Sstevel@tonic-gate 220395c0a3c8Sjosephb done: 22047c478bd9Sstevel@tonic-gate if (pte_ptr == NULL) 22057c478bd9Sstevel@tonic-gate x86pte_release_pagetable(ht); 220695c0a3c8Sjosephb return (oldpte); 22077c478bd9Sstevel@tonic-gate } 22087c478bd9Sstevel@tonic-gate 22097c478bd9Sstevel@tonic-gate /* 2210ae115bc7Smrj * Change a page table entry af it currently matches the value in expect. 22117c478bd9Sstevel@tonic-gate */ 22127c478bd9Sstevel@tonic-gate x86pte_t 2213ae115bc7Smrj x86pte_update( 2214ae115bc7Smrj htable_t *ht, 2215ae115bc7Smrj uint_t entry, 2216ae115bc7Smrj x86pte_t expect, 2217ae115bc7Smrj x86pte_t new) 22187c478bd9Sstevel@tonic-gate { 22197c478bd9Sstevel@tonic-gate x86pte_t *ptep; 2220ae115bc7Smrj x86pte_t found; 22217c478bd9Sstevel@tonic-gate 2222ae115bc7Smrj ASSERT(new != 0); 22237c478bd9Sstevel@tonic-gate ASSERT(!(ht->ht_flags & HTABLE_SHARED_PFN)); 222402bc52beSkchow ASSERT(ht->ht_level <= mmu.max_page_level); 2225ae115bc7Smrj 2226ae115bc7Smrj ptep = x86pte_access_pagetable(ht, entry); 2227843e1988Sjohnlev XPV_ALLOW_PAGETABLE_UPDATES(); 2228ae115bc7Smrj found = CAS_PTE(ptep, expect, new); 2229843e1988Sjohnlev XPV_DISALLOW_PAGETABLE_UPDATES(); 2230ae115bc7Smrj if (found == expect) { 2231ae115bc7Smrj hat_tlb_inval(ht->ht_hat, htable_e2va(ht, entry)); 22327c478bd9Sstevel@tonic-gate 22337c478bd9Sstevel@tonic-gate /* 2234ae115bc7Smrj * When removing write permission *and* clearing the 2235ae115bc7Smrj * MOD bit, check if a write happened via a stale 2236ae115bc7Smrj * TLB entry before the TLB shootdown finished. 2237ae115bc7Smrj * 2238ae115bc7Smrj * If it did happen, simply re-enable write permission and 2239ae115bc7Smrj * act like the original CAS failed. 22407c478bd9Sstevel@tonic-gate */ 2241ae115bc7Smrj if ((expect & (PT_WRITABLE | PT_MOD)) == PT_WRITABLE && 2242ae115bc7Smrj (new & (PT_WRITABLE | PT_MOD)) == 0 && 2243ae115bc7Smrj (GET_PTE(ptep) & PT_MOD) != 0) { 2244ae115bc7Smrj do { 2245ae115bc7Smrj found = GET_PTE(ptep); 2246843e1988Sjohnlev XPV_ALLOW_PAGETABLE_UPDATES(); 2247ae115bc7Smrj found = 2248ae115bc7Smrj CAS_PTE(ptep, found, found | PT_WRITABLE); 2249843e1988Sjohnlev XPV_DISALLOW_PAGETABLE_UPDATES(); 2250ae115bc7Smrj } while ((found & PT_WRITABLE) == 0); 2251ae115bc7Smrj } 2252ae115bc7Smrj } 22537c478bd9Sstevel@tonic-gate x86pte_release_pagetable(ht); 2254ae115bc7Smrj return (found); 22557c478bd9Sstevel@tonic-gate } 22567c478bd9Sstevel@tonic-gate 2257843e1988Sjohnlev #ifndef __xpv 22587c478bd9Sstevel@tonic-gate /* 22597c478bd9Sstevel@tonic-gate * Copy page tables - this is just a little more complicated than the 22607c478bd9Sstevel@tonic-gate * previous routines. Note that it's also not atomic! It also is never 22617c478bd9Sstevel@tonic-gate * used for VLP pagetables. 22627c478bd9Sstevel@tonic-gate */ 22637c478bd9Sstevel@tonic-gate void 22647c478bd9Sstevel@tonic-gate x86pte_copy(htable_t *src, htable_t *dest, uint_t entry, uint_t count) 22657c478bd9Sstevel@tonic-gate { 22667c478bd9Sstevel@tonic-gate caddr_t src_va; 22677c478bd9Sstevel@tonic-gate caddr_t dst_va; 22687c478bd9Sstevel@tonic-gate size_t size; 2269ae115bc7Smrj x86pte_t *pteptr; 2270ae115bc7Smrj x86pte_t pte; 22717c478bd9Sstevel@tonic-gate 22727c478bd9Sstevel@tonic-gate ASSERT(khat_running); 22737c478bd9Sstevel@tonic-gate ASSERT(!(dest->ht_flags & HTABLE_VLP)); 22747c478bd9Sstevel@tonic-gate ASSERT(!(src->ht_flags & HTABLE_VLP)); 22757c478bd9Sstevel@tonic-gate ASSERT(!(src->ht_flags & HTABLE_SHARED_PFN)); 22767c478bd9Sstevel@tonic-gate ASSERT(!(dest->ht_flags & HTABLE_SHARED_PFN)); 22777c478bd9Sstevel@tonic-gate 22787c478bd9Sstevel@tonic-gate /* 2279ae115bc7Smrj * Acquire access to the CPU pagetable windows for the dest and source. 22807c478bd9Sstevel@tonic-gate */ 2281ae115bc7Smrj dst_va = (caddr_t)x86pte_access_pagetable(dest, entry); 2282ae115bc7Smrj if (kpm_vbase) { 2283ae115bc7Smrj src_va = (caddr_t) 2284ae115bc7Smrj PT_INDEX_PTR(hat_kpm_pfn2va(src->ht_pfn), entry); 22857c478bd9Sstevel@tonic-gate } else { 2286ae115bc7Smrj uint_t x = PWIN_SRC(CPU->cpu_id); 22877c478bd9Sstevel@tonic-gate 22887c478bd9Sstevel@tonic-gate /* 22897c478bd9Sstevel@tonic-gate * Finish defining the src pagetable mapping 22907c478bd9Sstevel@tonic-gate */ 2291ae115bc7Smrj src_va = (caddr_t)PT_INDEX_PTR(PWIN_VA(x), entry); 2292ae115bc7Smrj pte = MAKEPTE(src->ht_pfn, 0) | mmu.pt_global | mmu.pt_nx; 2293ae115bc7Smrj pteptr = (x86pte_t *)PWIN_PTE_VA(x); 2294ae115bc7Smrj if (mmu.pae_hat) 2295ae115bc7Smrj *pteptr = pte; 2296ae115bc7Smrj else 2297ae115bc7Smrj *(x86pte32_t *)pteptr = pte; 2298ae115bc7Smrj mmu_tlbflush_entry((caddr_t)(PWIN_VA(x))); 22997c478bd9Sstevel@tonic-gate } 23007c478bd9Sstevel@tonic-gate 23017c478bd9Sstevel@tonic-gate /* 23027c478bd9Sstevel@tonic-gate * now do the copy 23037c478bd9Sstevel@tonic-gate */ 23047c478bd9Sstevel@tonic-gate size = count << mmu.pte_size_shift; 23057c478bd9Sstevel@tonic-gate bcopy(src_va, dst_va, size); 23067c478bd9Sstevel@tonic-gate 23077c478bd9Sstevel@tonic-gate x86pte_release_pagetable(dest); 23087c478bd9Sstevel@tonic-gate } 23097c478bd9Sstevel@tonic-gate 2310843e1988Sjohnlev #else /* __xpv */ 2311843e1988Sjohnlev 2312843e1988Sjohnlev /* 2313843e1988Sjohnlev * The hypervisor only supports writable pagetables at level 0, so we have 2314843e1988Sjohnlev * to install these 1 by 1 the slow way. 2315843e1988Sjohnlev */ 2316843e1988Sjohnlev void 2317843e1988Sjohnlev x86pte_copy(htable_t *src, htable_t *dest, uint_t entry, uint_t count) 2318843e1988Sjohnlev { 2319843e1988Sjohnlev caddr_t src_va; 2320843e1988Sjohnlev x86pte_t pte; 2321843e1988Sjohnlev 2322843e1988Sjohnlev ASSERT(!IN_XPV_PANIC()); 2323843e1988Sjohnlev src_va = (caddr_t)x86pte_access_pagetable(src, entry); 2324843e1988Sjohnlev while (count) { 2325843e1988Sjohnlev if (mmu.pae_hat) 2326843e1988Sjohnlev pte = *(x86pte_t *)src_va; 2327843e1988Sjohnlev else 2328843e1988Sjohnlev pte = *(x86pte32_t *)src_va; 2329843e1988Sjohnlev if (pte != 0) { 2330843e1988Sjohnlev set_pteval(pfn_to_pa(dest->ht_pfn), entry, 2331843e1988Sjohnlev dest->ht_level, pte); 2332843e1988Sjohnlev #ifdef __amd64 2333843e1988Sjohnlev if (dest->ht_level == mmu.max_level && 2334843e1988Sjohnlev htable_e2va(dest, entry) < HYPERVISOR_VIRT_END) 2335843e1988Sjohnlev set_pteval( 2336843e1988Sjohnlev pfn_to_pa(dest->ht_hat->hat_user_ptable), 2337843e1988Sjohnlev entry, dest->ht_level, pte); 2338843e1988Sjohnlev #endif 2339843e1988Sjohnlev } 2340843e1988Sjohnlev --count; 2341843e1988Sjohnlev ++entry; 2342843e1988Sjohnlev src_va += mmu.pte_size; 2343843e1988Sjohnlev } 2344843e1988Sjohnlev x86pte_release_pagetable(src); 2345843e1988Sjohnlev } 2346843e1988Sjohnlev #endif /* __xpv */ 2347843e1988Sjohnlev 23487c478bd9Sstevel@tonic-gate /* 23497c478bd9Sstevel@tonic-gate * Zero page table entries - Note this doesn't use atomic stores! 23507c478bd9Sstevel@tonic-gate */ 2351ae115bc7Smrj static void 23527c478bd9Sstevel@tonic-gate x86pte_zero(htable_t *dest, uint_t entry, uint_t count) 23537c478bd9Sstevel@tonic-gate { 23547c478bd9Sstevel@tonic-gate caddr_t dst_va; 23557c478bd9Sstevel@tonic-gate size_t size; 2356843e1988Sjohnlev #ifdef __xpv 2357843e1988Sjohnlev int x; 2358843e1988Sjohnlev x86pte_t newpte; 2359843e1988Sjohnlev #endif 23607c478bd9Sstevel@tonic-gate 23617c478bd9Sstevel@tonic-gate /* 23627c478bd9Sstevel@tonic-gate * Map in the page table to be zeroed. 23637c478bd9Sstevel@tonic-gate */ 23647c478bd9Sstevel@tonic-gate ASSERT(!(dest->ht_flags & HTABLE_SHARED_PFN)); 23657c478bd9Sstevel@tonic-gate ASSERT(!(dest->ht_flags & HTABLE_VLP)); 2366ae115bc7Smrj 2367843e1988Sjohnlev /* 2368843e1988Sjohnlev * On the hypervisor we don't use x86pte_access_pagetable() since 2369843e1988Sjohnlev * in this case the page is not pinned yet. 2370843e1988Sjohnlev */ 2371843e1988Sjohnlev #ifdef __xpv 2372843e1988Sjohnlev if (kpm_vbase == NULL) { 2373843e1988Sjohnlev kpreempt_disable(); 2374843e1988Sjohnlev ASSERT(CPU->cpu_hat_info != NULL); 2375843e1988Sjohnlev mutex_enter(&CPU->cpu_hat_info->hci_mutex); 2376843e1988Sjohnlev x = PWIN_TABLE(CPU->cpu_id); 2377843e1988Sjohnlev newpte = MAKEPTE(dest->ht_pfn, 0) | PT_WRITABLE; 2378843e1988Sjohnlev xen_map(newpte, PWIN_VA(x)); 2379843e1988Sjohnlev dst_va = (caddr_t)PT_INDEX_PTR(PWIN_VA(x), entry); 2380843e1988Sjohnlev } else 2381843e1988Sjohnlev #endif 2382ae115bc7Smrj dst_va = (caddr_t)x86pte_access_pagetable(dest, entry); 2383ae115bc7Smrj 23847c478bd9Sstevel@tonic-gate size = count << mmu.pte_size_shift; 2385ae115bc7Smrj ASSERT(size > BLOCKZEROALIGN); 2386ae115bc7Smrj #ifdef __i386 2387ae115bc7Smrj if ((x86_feature & X86_SSE2) == 0) 23887c478bd9Sstevel@tonic-gate bzero(dst_va, size); 2389ae115bc7Smrj else 2390ae115bc7Smrj #endif 2391ae115bc7Smrj block_zero_no_xmm(dst_va, size); 2392ae115bc7Smrj 2393843e1988Sjohnlev #ifdef __xpv 2394843e1988Sjohnlev if (kpm_vbase == NULL) { 2395843e1988Sjohnlev xen_map(0, PWIN_VA(x)); 2396843e1988Sjohnlev mutex_exit(&CPU->cpu_hat_info->hci_mutex); 2397843e1988Sjohnlev kpreempt_enable(); 2398843e1988Sjohnlev } else 2399843e1988Sjohnlev #endif 24007c478bd9Sstevel@tonic-gate x86pte_release_pagetable(dest); 24017c478bd9Sstevel@tonic-gate } 24027c478bd9Sstevel@tonic-gate 24037c478bd9Sstevel@tonic-gate /* 24047c478bd9Sstevel@tonic-gate * Called to ensure that all pagetables are in the system dump 24057c478bd9Sstevel@tonic-gate */ 24067c478bd9Sstevel@tonic-gate void 24077c478bd9Sstevel@tonic-gate hat_dump(void) 24087c478bd9Sstevel@tonic-gate { 24097c478bd9Sstevel@tonic-gate hat_t *hat; 24107c478bd9Sstevel@tonic-gate uint_t h; 24117c478bd9Sstevel@tonic-gate htable_t *ht; 24127c478bd9Sstevel@tonic-gate 24137c478bd9Sstevel@tonic-gate /* 2414a85a6733Sjosephb * Dump all page tables 24157c478bd9Sstevel@tonic-gate */ 2416a85a6733Sjosephb for (hat = kas.a_hat; hat != NULL; hat = hat->hat_next) { 24177c478bd9Sstevel@tonic-gate for (h = 0; h < hat->hat_num_hash; ++h) { 24187c478bd9Sstevel@tonic-gate for (ht = hat->hat_ht_hash[h]; ht; ht = ht->ht_next) { 2419a85a6733Sjosephb if ((ht->ht_flags & HTABLE_VLP) == 0) 24207c478bd9Sstevel@tonic-gate dump_page(ht->ht_pfn); 24217c478bd9Sstevel@tonic-gate } 24227c478bd9Sstevel@tonic-gate } 24237c478bd9Sstevel@tonic-gate } 24247c478bd9Sstevel@tonic-gate } 2425