17c478bd9Sstevel@tonic-gate /* 27c478bd9Sstevel@tonic-gate * CDDL HEADER START 37c478bd9Sstevel@tonic-gate * 47c478bd9Sstevel@tonic-gate * The contents of this file are subject to the terms of the 5a85a6733Sjosephb * Common Development and Distribution License (the "License"). 6a85a6733Sjosephb * You may not use this file except in compliance with the License. 77c478bd9Sstevel@tonic-gate * 87c478bd9Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 97c478bd9Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 107c478bd9Sstevel@tonic-gate * See the License for the specific language governing permissions 117c478bd9Sstevel@tonic-gate * and limitations under the License. 127c478bd9Sstevel@tonic-gate * 137c478bd9Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 147c478bd9Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 157c478bd9Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 167c478bd9Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 177c478bd9Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 187c478bd9Sstevel@tonic-gate * 197c478bd9Sstevel@tonic-gate * CDDL HEADER END 207c478bd9Sstevel@tonic-gate */ 21ae115bc7Smrj 227c478bd9Sstevel@tonic-gate /* 236c9930aeSJoe Bonasera * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved. 247c478bd9Sstevel@tonic-gate */ 257c478bd9Sstevel@tonic-gate 267c478bd9Sstevel@tonic-gate #include <sys/types.h> 277c478bd9Sstevel@tonic-gate #include <sys/sysmacros.h> 287c478bd9Sstevel@tonic-gate #include <sys/kmem.h> 297c478bd9Sstevel@tonic-gate #include <sys/atomic.h> 307c478bd9Sstevel@tonic-gate #include <sys/bitmap.h> 317c478bd9Sstevel@tonic-gate #include <sys/machparam.h> 327c478bd9Sstevel@tonic-gate #include <sys/machsystm.h> 337c478bd9Sstevel@tonic-gate #include <sys/mman.h> 347c478bd9Sstevel@tonic-gate #include <sys/systm.h> 357c478bd9Sstevel@tonic-gate #include <sys/cpuvar.h> 367c478bd9Sstevel@tonic-gate #include <sys/thread.h> 377c478bd9Sstevel@tonic-gate #include <sys/proc.h> 387c478bd9Sstevel@tonic-gate #include <sys/cpu.h> 397c478bd9Sstevel@tonic-gate #include <sys/kmem.h> 407c478bd9Sstevel@tonic-gate #include <sys/disp.h> 417c478bd9Sstevel@tonic-gate #include <sys/vmem.h> 427c478bd9Sstevel@tonic-gate #include <sys/vmsystm.h> 437c478bd9Sstevel@tonic-gate #include <sys/promif.h> 447c478bd9Sstevel@tonic-gate #include <sys/var.h> 457c478bd9Sstevel@tonic-gate #include <sys/x86_archext.h> 46ae115bc7Smrj #include <sys/archsystm.h> 477c478bd9Sstevel@tonic-gate #include <sys/bootconf.h> 487c478bd9Sstevel@tonic-gate #include <sys/dumphdr.h> 497c478bd9Sstevel@tonic-gate #include <vm/seg_kmem.h> 507c478bd9Sstevel@tonic-gate #include <vm/seg_kpm.h> 517c478bd9Sstevel@tonic-gate #include <vm/hat.h> 527c478bd9Sstevel@tonic-gate #include <vm/hat_i86.h> 537c478bd9Sstevel@tonic-gate #include <sys/cmn_err.h> 54843e1988Sjohnlev #include <sys/panic.h> 55843e1988Sjohnlev 56843e1988Sjohnlev #ifdef __xpv 57843e1988Sjohnlev #include <sys/hypervisor.h> 58843e1988Sjohnlev #include <sys/xpv_panic.h> 59843e1988Sjohnlev #endif 607c478bd9Sstevel@tonic-gate 61ae115bc7Smrj #include <sys/bootinfo.h> 62ae115bc7Smrj #include <vm/kboot_mmu.h> 63ae115bc7Smrj 64ae115bc7Smrj static void x86pte_zero(htable_t *dest, uint_t entry, uint_t count); 65ae115bc7Smrj 667c478bd9Sstevel@tonic-gate kmem_cache_t *htable_cache; 677c478bd9Sstevel@tonic-gate 687c478bd9Sstevel@tonic-gate /* 697c478bd9Sstevel@tonic-gate * The variable htable_reserve_amount, rather than HTABLE_RESERVE_AMOUNT, 707c478bd9Sstevel@tonic-gate * is used in order to facilitate testing of the htable_steal() code. 717c478bd9Sstevel@tonic-gate * By resetting htable_reserve_amount to a lower value, we can force 727c478bd9Sstevel@tonic-gate * stealing to occur. The reserve amount is a guess to get us through boot. 737c478bd9Sstevel@tonic-gate */ 747c478bd9Sstevel@tonic-gate #define HTABLE_RESERVE_AMOUNT (200) 757c478bd9Sstevel@tonic-gate uint_t htable_reserve_amount = HTABLE_RESERVE_AMOUNT; 767c478bd9Sstevel@tonic-gate kmutex_t htable_reserve_mutex; 777c478bd9Sstevel@tonic-gate uint_t htable_reserve_cnt; 787c478bd9Sstevel@tonic-gate htable_t *htable_reserve_pool; 797c478bd9Sstevel@tonic-gate 807c478bd9Sstevel@tonic-gate /* 81a85a6733Sjosephb * Used to hand test htable_steal(). 827c478bd9Sstevel@tonic-gate */ 83a85a6733Sjosephb #ifdef DEBUG 84a85a6733Sjosephb ulong_t force_steal = 0; 85a85a6733Sjosephb ulong_t ptable_cnt = 0; 86a85a6733Sjosephb #endif 87a85a6733Sjosephb 88a85a6733Sjosephb /* 89a85a6733Sjosephb * This variable is so that we can tune this via /etc/system 90a85a6733Sjosephb * Any value works, but a power of two <= mmu.ptes_per_table is best. 91a85a6733Sjosephb */ 92a85a6733Sjosephb uint_t htable_steal_passes = 8; 937c478bd9Sstevel@tonic-gate 947c478bd9Sstevel@tonic-gate /* 957c478bd9Sstevel@tonic-gate * mutex stuff for access to htable hash 967c478bd9Sstevel@tonic-gate */ 977c478bd9Sstevel@tonic-gate #define NUM_HTABLE_MUTEX 128 987c478bd9Sstevel@tonic-gate kmutex_t htable_mutex[NUM_HTABLE_MUTEX]; 997c478bd9Sstevel@tonic-gate #define HTABLE_MUTEX_HASH(h) ((h) & (NUM_HTABLE_MUTEX - 1)) 1007c478bd9Sstevel@tonic-gate 1017c478bd9Sstevel@tonic-gate #define HTABLE_ENTER(h) mutex_enter(&htable_mutex[HTABLE_MUTEX_HASH(h)]); 1027c478bd9Sstevel@tonic-gate #define HTABLE_EXIT(h) mutex_exit(&htable_mutex[HTABLE_MUTEX_HASH(h)]); 1037c478bd9Sstevel@tonic-gate 1047c478bd9Sstevel@tonic-gate /* 1057c478bd9Sstevel@tonic-gate * forward declarations 1067c478bd9Sstevel@tonic-gate */ 1077c478bd9Sstevel@tonic-gate static void link_ptp(htable_t *higher, htable_t *new, uintptr_t vaddr); 1087c478bd9Sstevel@tonic-gate static void unlink_ptp(htable_t *higher, htable_t *old, uintptr_t vaddr); 1097c478bd9Sstevel@tonic-gate static void htable_free(htable_t *ht); 110ae115bc7Smrj static x86pte_t *x86pte_access_pagetable(htable_t *ht, uint_t index); 1117c478bd9Sstevel@tonic-gate static void x86pte_release_pagetable(htable_t *ht); 1127c478bd9Sstevel@tonic-gate static x86pte_t x86pte_cas(htable_t *ht, uint_t entry, x86pte_t old, 1137c478bd9Sstevel@tonic-gate x86pte_t new); 1147c478bd9Sstevel@tonic-gate 1157c478bd9Sstevel@tonic-gate /* 1167c478bd9Sstevel@tonic-gate * A counter to track if we are stealing or reaping htables. When non-zero 1177c478bd9Sstevel@tonic-gate * htable_free() will directly free htables (either to the reserve or kmem) 1187c478bd9Sstevel@tonic-gate * instead of putting them in a hat's htable cache. 1197c478bd9Sstevel@tonic-gate */ 1207c478bd9Sstevel@tonic-gate uint32_t htable_dont_cache = 0; 1217c478bd9Sstevel@tonic-gate 1227c478bd9Sstevel@tonic-gate /* 1237c478bd9Sstevel@tonic-gate * Track the number of active pagetables, so we can know how many to reap 1247c478bd9Sstevel@tonic-gate */ 1257c478bd9Sstevel@tonic-gate static uint32_t active_ptables = 0; 1267c478bd9Sstevel@tonic-gate 127843e1988Sjohnlev #ifdef __xpv 128843e1988Sjohnlev /* 129843e1988Sjohnlev * Deal with hypervisor complications. 130843e1988Sjohnlev */ 131843e1988Sjohnlev void 132843e1988Sjohnlev xen_flush_va(caddr_t va) 133843e1988Sjohnlev { 134843e1988Sjohnlev struct mmuext_op t; 135843e1988Sjohnlev uint_t count; 136843e1988Sjohnlev 137843e1988Sjohnlev if (IN_XPV_PANIC()) { 138843e1988Sjohnlev mmu_tlbflush_entry((caddr_t)va); 139843e1988Sjohnlev } else { 140843e1988Sjohnlev t.cmd = MMUEXT_INVLPG_LOCAL; 141843e1988Sjohnlev t.arg1.linear_addr = (uintptr_t)va; 142843e1988Sjohnlev if (HYPERVISOR_mmuext_op(&t, 1, &count, DOMID_SELF) < 0) 143843e1988Sjohnlev panic("HYPERVISOR_mmuext_op() failed"); 144843e1988Sjohnlev ASSERT(count == 1); 145843e1988Sjohnlev } 146843e1988Sjohnlev } 147843e1988Sjohnlev 148843e1988Sjohnlev void 149843e1988Sjohnlev xen_gflush_va(caddr_t va, cpuset_t cpus) 150843e1988Sjohnlev { 151843e1988Sjohnlev struct mmuext_op t; 152843e1988Sjohnlev uint_t count; 153843e1988Sjohnlev 154843e1988Sjohnlev if (IN_XPV_PANIC()) { 155843e1988Sjohnlev mmu_tlbflush_entry((caddr_t)va); 156843e1988Sjohnlev return; 157843e1988Sjohnlev } 158843e1988Sjohnlev 159843e1988Sjohnlev t.cmd = MMUEXT_INVLPG_MULTI; 160843e1988Sjohnlev t.arg1.linear_addr = (uintptr_t)va; 161843e1988Sjohnlev /*LINTED: constant in conditional context*/ 162843e1988Sjohnlev set_xen_guest_handle(t.arg2.vcpumask, &cpus); 163843e1988Sjohnlev if (HYPERVISOR_mmuext_op(&t, 1, &count, DOMID_SELF) < 0) 164843e1988Sjohnlev panic("HYPERVISOR_mmuext_op() failed"); 165843e1988Sjohnlev ASSERT(count == 1); 166843e1988Sjohnlev } 167843e1988Sjohnlev 168843e1988Sjohnlev void 169843e1988Sjohnlev xen_flush_tlb() 170843e1988Sjohnlev { 171843e1988Sjohnlev struct mmuext_op t; 172843e1988Sjohnlev uint_t count; 173843e1988Sjohnlev 174843e1988Sjohnlev if (IN_XPV_PANIC()) { 175843e1988Sjohnlev xpv_panic_reload_cr3(); 176843e1988Sjohnlev } else { 177843e1988Sjohnlev t.cmd = MMUEXT_TLB_FLUSH_LOCAL; 178843e1988Sjohnlev if (HYPERVISOR_mmuext_op(&t, 1, &count, DOMID_SELF) < 0) 179843e1988Sjohnlev panic("HYPERVISOR_mmuext_op() failed"); 180843e1988Sjohnlev ASSERT(count == 1); 181843e1988Sjohnlev } 182843e1988Sjohnlev } 183843e1988Sjohnlev 184843e1988Sjohnlev void 185843e1988Sjohnlev xen_gflush_tlb(cpuset_t cpus) 186843e1988Sjohnlev { 187843e1988Sjohnlev struct mmuext_op t; 188843e1988Sjohnlev uint_t count; 189843e1988Sjohnlev 190843e1988Sjohnlev ASSERT(!IN_XPV_PANIC()); 191843e1988Sjohnlev t.cmd = MMUEXT_TLB_FLUSH_MULTI; 192843e1988Sjohnlev /*LINTED: constant in conditional context*/ 193843e1988Sjohnlev set_xen_guest_handle(t.arg2.vcpumask, &cpus); 194843e1988Sjohnlev if (HYPERVISOR_mmuext_op(&t, 1, &count, DOMID_SELF) < 0) 195843e1988Sjohnlev panic("HYPERVISOR_mmuext_op() failed"); 196843e1988Sjohnlev ASSERT(count == 1); 197843e1988Sjohnlev } 198843e1988Sjohnlev 199843e1988Sjohnlev /* 200843e1988Sjohnlev * Install/Adjust a kpm mapping under the hypervisor. 201843e1988Sjohnlev * Value of "how" should be: 202843e1988Sjohnlev * PT_WRITABLE | PT_VALID - regular kpm mapping 203843e1988Sjohnlev * PT_VALID - make mapping read-only 204843e1988Sjohnlev * 0 - remove mapping 205843e1988Sjohnlev * 206843e1988Sjohnlev * returns 0 on success. non-zero for failure. 207843e1988Sjohnlev */ 208843e1988Sjohnlev int 209843e1988Sjohnlev xen_kpm_page(pfn_t pfn, uint_t how) 210843e1988Sjohnlev { 211843e1988Sjohnlev paddr_t pa = mmu_ptob((paddr_t)pfn); 212843e1988Sjohnlev x86pte_t pte = PT_NOCONSIST | PT_REF | PT_MOD; 213843e1988Sjohnlev 214843e1988Sjohnlev if (kpm_vbase == NULL) 215843e1988Sjohnlev return (0); 216843e1988Sjohnlev 217843e1988Sjohnlev if (how) 218843e1988Sjohnlev pte |= pa_to_ma(pa) | how; 219843e1988Sjohnlev else 220843e1988Sjohnlev pte = 0; 221843e1988Sjohnlev return (HYPERVISOR_update_va_mapping((uintptr_t)kpm_vbase + pa, 222843e1988Sjohnlev pte, UVMF_INVLPG | UVMF_ALL)); 223843e1988Sjohnlev } 224843e1988Sjohnlev 225843e1988Sjohnlev void 226843e1988Sjohnlev xen_pin(pfn_t pfn, level_t lvl) 227843e1988Sjohnlev { 228843e1988Sjohnlev struct mmuext_op t; 229843e1988Sjohnlev uint_t count; 230843e1988Sjohnlev 231843e1988Sjohnlev t.cmd = MMUEXT_PIN_L1_TABLE + lvl; 232843e1988Sjohnlev t.arg1.mfn = pfn_to_mfn(pfn); 233843e1988Sjohnlev if (HYPERVISOR_mmuext_op(&t, 1, &count, DOMID_SELF) < 0) 234843e1988Sjohnlev panic("HYPERVISOR_mmuext_op() failed"); 235843e1988Sjohnlev ASSERT(count == 1); 236843e1988Sjohnlev } 237843e1988Sjohnlev 238843e1988Sjohnlev void 239843e1988Sjohnlev xen_unpin(pfn_t pfn) 240843e1988Sjohnlev { 241843e1988Sjohnlev struct mmuext_op t; 242843e1988Sjohnlev uint_t count; 243843e1988Sjohnlev 244843e1988Sjohnlev t.cmd = MMUEXT_UNPIN_TABLE; 245843e1988Sjohnlev t.arg1.mfn = pfn_to_mfn(pfn); 246843e1988Sjohnlev if (HYPERVISOR_mmuext_op(&t, 1, &count, DOMID_SELF) < 0) 247843e1988Sjohnlev panic("HYPERVISOR_mmuext_op() failed"); 248843e1988Sjohnlev ASSERT(count == 1); 249843e1988Sjohnlev } 250843e1988Sjohnlev 251843e1988Sjohnlev static void 252843e1988Sjohnlev xen_map(uint64_t pte, caddr_t va) 253843e1988Sjohnlev { 254843e1988Sjohnlev if (HYPERVISOR_update_va_mapping((uintptr_t)va, pte, 255843e1988Sjohnlev UVMF_INVLPG | UVMF_LOCAL)) 256843e1988Sjohnlev panic("HYPERVISOR_update_va_mapping() failed"); 257843e1988Sjohnlev } 258843e1988Sjohnlev #endif /* __xpv */ 259843e1988Sjohnlev 2607c478bd9Sstevel@tonic-gate /* 2617c478bd9Sstevel@tonic-gate * Allocate a memory page for a hardware page table. 2627c478bd9Sstevel@tonic-gate * 263ae115bc7Smrj * A wrapper around page_get_physical(), with some extra checks. 2647c478bd9Sstevel@tonic-gate */ 265ae115bc7Smrj static pfn_t 266a77271f8SVikram Hegde ptable_alloc(uintptr_t seed) 2677c478bd9Sstevel@tonic-gate { 2687c478bd9Sstevel@tonic-gate pfn_t pfn; 2697c478bd9Sstevel@tonic-gate page_t *pp; 2707c478bd9Sstevel@tonic-gate 271ae115bc7Smrj pfn = PFN_INVALID; 2727c478bd9Sstevel@tonic-gate 2737c478bd9Sstevel@tonic-gate /* 274ae115bc7Smrj * The first check is to see if there is memory in the system. If we 275ae115bc7Smrj * drop to throttlefree, then fail the ptable_alloc() and let the 276ae115bc7Smrj * stealing code kick in. Note that we have to do this test here, 277ae115bc7Smrj * since the test in page_create_throttle() would let the NOSLEEP 278ae115bc7Smrj * allocation go through and deplete the page reserves. 279a85a6733Sjosephb * 280a85a6733Sjosephb * The !NOMEMWAIT() lets pageout, fsflush, etc. skip this check. 2817c478bd9Sstevel@tonic-gate */ 282a85a6733Sjosephb if (!NOMEMWAIT() && freemem <= throttlefree + 1) 283ae115bc7Smrj return (PFN_INVALID); 2847c478bd9Sstevel@tonic-gate 285a85a6733Sjosephb #ifdef DEBUG 286a85a6733Sjosephb /* 287a85a6733Sjosephb * This code makes htable_steal() easier to test. By setting 288a85a6733Sjosephb * force_steal we force pagetable allocations to fall 289a85a6733Sjosephb * into the stealing code. Roughly 1 in ever "force_steal" 290a85a6733Sjosephb * page table allocations will fail. 291a85a6733Sjosephb */ 292ae115bc7Smrj if (proc_pageout != NULL && force_steal > 1 && 293a85a6733Sjosephb ++ptable_cnt > force_steal) { 294a85a6733Sjosephb ptable_cnt = 0; 295ae115bc7Smrj return (PFN_INVALID); 296a85a6733Sjosephb } 297a85a6733Sjosephb #endif /* DEBUG */ 298a85a6733Sjosephb 299a77271f8SVikram Hegde pp = page_get_physical(seed); 3007c478bd9Sstevel@tonic-gate if (pp == NULL) 301ae115bc7Smrj return (PFN_INVALID); 3027c478bd9Sstevel@tonic-gate ASSERT(PAGE_SHARED(pp)); 30386c1f4dcSVikram Hegde pfn = pp->p_pagenum; 3047c478bd9Sstevel@tonic-gate if (pfn == PFN_INVALID) 3057c478bd9Sstevel@tonic-gate panic("ptable_alloc(): Invalid PFN!!"); 3061a5e258fSJosef 'Jeff' Sipek atomic_inc_32(&active_ptables); 307a85a6733Sjosephb HATSTAT_INC(hs_ptable_allocs); 308ae115bc7Smrj return (pfn); 3097c478bd9Sstevel@tonic-gate } 3107c478bd9Sstevel@tonic-gate 3117c478bd9Sstevel@tonic-gate /* 3127c478bd9Sstevel@tonic-gate * Free an htable's associated page table page. See the comments 3137c478bd9Sstevel@tonic-gate * for ptable_alloc(). 3147c478bd9Sstevel@tonic-gate */ 3157c478bd9Sstevel@tonic-gate static void 316ae115bc7Smrj ptable_free(pfn_t pfn) 3177c478bd9Sstevel@tonic-gate { 318ae115bc7Smrj page_t *pp = page_numtopp_nolock(pfn); 3197c478bd9Sstevel@tonic-gate 3207c478bd9Sstevel@tonic-gate /* 3217c478bd9Sstevel@tonic-gate * need to destroy the page used for the pagetable 3227c478bd9Sstevel@tonic-gate */ 3237c478bd9Sstevel@tonic-gate ASSERT(pfn != PFN_INVALID); 3247c478bd9Sstevel@tonic-gate HATSTAT_INC(hs_ptable_frees); 3251a5e258fSJosef 'Jeff' Sipek atomic_dec_32(&active_ptables); 3267c478bd9Sstevel@tonic-gate if (pp == NULL) 3277c478bd9Sstevel@tonic-gate panic("ptable_free(): no page for pfn!"); 328a77271f8SVikram Hegde ASSERT(PAGE_SHARED(pp)); 3297c478bd9Sstevel@tonic-gate ASSERT(pfn == pp->p_pagenum); 330843e1988Sjohnlev ASSERT(!IN_XPV_PANIC()); 331a77271f8SVikram Hegde 332a77271f8SVikram Hegde /* 333a77271f8SVikram Hegde * Get an exclusive lock, might have to wait for a kmem reader. 334a77271f8SVikram Hegde */ 335a77271f8SVikram Hegde if (!page_tryupgrade(pp)) { 3366c9930aeSJoe Bonasera u_offset_t off = pp->p_offset; 337a77271f8SVikram Hegde page_unlock(pp); 3386c9930aeSJoe Bonasera pp = page_lookup(&kvp, off, SE_EXCL); 3396c9930aeSJoe Bonasera if (pp == NULL) 3406c9930aeSJoe Bonasera panic("page not found"); 341a77271f8SVikram Hegde } 342843e1988Sjohnlev #ifdef __xpv 343843e1988Sjohnlev if (kpm_vbase && xen_kpm_page(pfn, PT_VALID | PT_WRITABLE) < 0) 344843e1988Sjohnlev panic("failure making kpm r/w pfn=0x%lx", pfn); 345843e1988Sjohnlev #endif 3466c9930aeSJoe Bonasera page_hashout(pp, NULL); 347a77271f8SVikram Hegde page_free(pp, 1); 348a77271f8SVikram Hegde page_unresv(1); 3497c478bd9Sstevel@tonic-gate } 3507c478bd9Sstevel@tonic-gate 3517c478bd9Sstevel@tonic-gate /* 3527c478bd9Sstevel@tonic-gate * Put one htable on the reserve list. 3537c478bd9Sstevel@tonic-gate */ 3547c478bd9Sstevel@tonic-gate static void 3557c478bd9Sstevel@tonic-gate htable_put_reserve(htable_t *ht) 3567c478bd9Sstevel@tonic-gate { 3577c478bd9Sstevel@tonic-gate ht->ht_hat = NULL; /* no longer tied to a hat */ 3587c478bd9Sstevel@tonic-gate ASSERT(ht->ht_pfn == PFN_INVALID); 3597c478bd9Sstevel@tonic-gate HATSTAT_INC(hs_htable_rputs); 3607c478bd9Sstevel@tonic-gate mutex_enter(&htable_reserve_mutex); 3617c478bd9Sstevel@tonic-gate ht->ht_next = htable_reserve_pool; 3627c478bd9Sstevel@tonic-gate htable_reserve_pool = ht; 3637c478bd9Sstevel@tonic-gate ++htable_reserve_cnt; 3647c478bd9Sstevel@tonic-gate mutex_exit(&htable_reserve_mutex); 3657c478bd9Sstevel@tonic-gate } 3667c478bd9Sstevel@tonic-gate 3677c478bd9Sstevel@tonic-gate /* 3687c478bd9Sstevel@tonic-gate * Take one htable from the reserve. 3697c478bd9Sstevel@tonic-gate */ 3707c478bd9Sstevel@tonic-gate static htable_t * 3717c478bd9Sstevel@tonic-gate htable_get_reserve(void) 3727c478bd9Sstevel@tonic-gate { 3737c478bd9Sstevel@tonic-gate htable_t *ht = NULL; 3747c478bd9Sstevel@tonic-gate 3757c478bd9Sstevel@tonic-gate mutex_enter(&htable_reserve_mutex); 3767c478bd9Sstevel@tonic-gate if (htable_reserve_cnt != 0) { 3777c478bd9Sstevel@tonic-gate ht = htable_reserve_pool; 3787c478bd9Sstevel@tonic-gate ASSERT(ht != NULL); 3797c478bd9Sstevel@tonic-gate ASSERT(ht->ht_pfn == PFN_INVALID); 3807c478bd9Sstevel@tonic-gate htable_reserve_pool = ht->ht_next; 3817c478bd9Sstevel@tonic-gate --htable_reserve_cnt; 3827c478bd9Sstevel@tonic-gate HATSTAT_INC(hs_htable_rgets); 3837c478bd9Sstevel@tonic-gate } 3847c478bd9Sstevel@tonic-gate mutex_exit(&htable_reserve_mutex); 3857c478bd9Sstevel@tonic-gate return (ht); 3867c478bd9Sstevel@tonic-gate } 3877c478bd9Sstevel@tonic-gate 3887c478bd9Sstevel@tonic-gate /* 389ae115bc7Smrj * Allocate initial htables and put them on the reserve list 3907c478bd9Sstevel@tonic-gate */ 3917c478bd9Sstevel@tonic-gate void 3927c478bd9Sstevel@tonic-gate htable_initial_reserve(uint_t count) 3937c478bd9Sstevel@tonic-gate { 3947c478bd9Sstevel@tonic-gate htable_t *ht; 3957c478bd9Sstevel@tonic-gate 3967c478bd9Sstevel@tonic-gate count += HTABLE_RESERVE_AMOUNT; 3977c478bd9Sstevel@tonic-gate while (count > 0) { 3987c478bd9Sstevel@tonic-gate ht = kmem_cache_alloc(htable_cache, KM_NOSLEEP); 3997c478bd9Sstevel@tonic-gate ASSERT(ht != NULL); 4007c478bd9Sstevel@tonic-gate 4017c478bd9Sstevel@tonic-gate ASSERT(use_boot_reserve); 402ae115bc7Smrj ht->ht_pfn = PFN_INVALID; 403ae115bc7Smrj htable_put_reserve(ht); 4047c478bd9Sstevel@tonic-gate --count; 4057c478bd9Sstevel@tonic-gate } 4067c478bd9Sstevel@tonic-gate } 4077c478bd9Sstevel@tonic-gate 4087c478bd9Sstevel@tonic-gate /* 4097c478bd9Sstevel@tonic-gate * Readjust the reserves after a thread finishes using them. 4107c478bd9Sstevel@tonic-gate */ 4117c478bd9Sstevel@tonic-gate void 4127c478bd9Sstevel@tonic-gate htable_adjust_reserve() 4137c478bd9Sstevel@tonic-gate { 4147c478bd9Sstevel@tonic-gate htable_t *ht; 4157c478bd9Sstevel@tonic-gate 4167c478bd9Sstevel@tonic-gate /* 4177c478bd9Sstevel@tonic-gate * Free any excess htables in the reserve list 4187c478bd9Sstevel@tonic-gate */ 419aac11643Sjosephb while (htable_reserve_cnt > htable_reserve_amount && 420aac11643Sjosephb !USE_HAT_RESERVES()) { 4217c478bd9Sstevel@tonic-gate ht = htable_get_reserve(); 4227c478bd9Sstevel@tonic-gate if (ht == NULL) 4237c478bd9Sstevel@tonic-gate return; 4247c478bd9Sstevel@tonic-gate ASSERT(ht->ht_pfn == PFN_INVALID); 4257c478bd9Sstevel@tonic-gate kmem_cache_free(htable_cache, ht); 4267c478bd9Sstevel@tonic-gate } 4277c478bd9Sstevel@tonic-gate } 4287c478bd9Sstevel@tonic-gate 429*b59c4a48SBoris Protopopov /* 430*b59c4a48SBoris Protopopov * Search the active htables for one to steal. Start at a different hash 431*b59c4a48SBoris Protopopov * bucket every time to help spread the pain of stealing 432*b59c4a48SBoris Protopopov */ 433*b59c4a48SBoris Protopopov static void 434*b59c4a48SBoris Protopopov htable_steal_active(hat_t *hat, uint_t cnt, uint_t threshold, 435*b59c4a48SBoris Protopopov uint_t *stolen, htable_t **list) 436*b59c4a48SBoris Protopopov { 437*b59c4a48SBoris Protopopov static uint_t h_seed = 0; 438*b59c4a48SBoris Protopopov htable_t *higher, *ht; 439*b59c4a48SBoris Protopopov uint_t h, e, h_start; 440*b59c4a48SBoris Protopopov uintptr_t va; 441*b59c4a48SBoris Protopopov x86pte_t pte; 442*b59c4a48SBoris Protopopov 443*b59c4a48SBoris Protopopov h = h_start = h_seed++ % hat->hat_num_hash; 444*b59c4a48SBoris Protopopov do { 445*b59c4a48SBoris Protopopov higher = NULL; 446*b59c4a48SBoris Protopopov HTABLE_ENTER(h); 447*b59c4a48SBoris Protopopov for (ht = hat->hat_ht_hash[h]; ht; ht = ht->ht_next) { 4487c478bd9Sstevel@tonic-gate 4497c478bd9Sstevel@tonic-gate /* 450*b59c4a48SBoris Protopopov * Can we rule out reaping? 451*b59c4a48SBoris Protopopov */ 452*b59c4a48SBoris Protopopov if (ht->ht_busy != 0 || 453*b59c4a48SBoris Protopopov (ht->ht_flags & HTABLE_SHARED_PFN) || 454*b59c4a48SBoris Protopopov ht->ht_level > 0 || ht->ht_valid_cnt > threshold || 455*b59c4a48SBoris Protopopov ht->ht_lock_cnt != 0) 456*b59c4a48SBoris Protopopov continue; 457*b59c4a48SBoris Protopopov 458*b59c4a48SBoris Protopopov /* 459*b59c4a48SBoris Protopopov * Increment busy so the htable can't disappear. We 460*b59c4a48SBoris Protopopov * drop the htable mutex to avoid deadlocks with 461*b59c4a48SBoris Protopopov * hat_pageunload() and the hment mutex while we 462*b59c4a48SBoris Protopopov * call hat_pte_unmap() 463*b59c4a48SBoris Protopopov */ 464*b59c4a48SBoris Protopopov ++ht->ht_busy; 465*b59c4a48SBoris Protopopov HTABLE_EXIT(h); 466*b59c4a48SBoris Protopopov 467*b59c4a48SBoris Protopopov /* 468*b59c4a48SBoris Protopopov * Try stealing. 469*b59c4a48SBoris Protopopov * - unload and invalidate all PTEs 470*b59c4a48SBoris Protopopov */ 471*b59c4a48SBoris Protopopov for (e = 0, va = ht->ht_vaddr; 472*b59c4a48SBoris Protopopov e < HTABLE_NUM_PTES(ht) && ht->ht_valid_cnt > 0 && 473*b59c4a48SBoris Protopopov ht->ht_busy == 1 && ht->ht_lock_cnt == 0; 474*b59c4a48SBoris Protopopov ++e, va += MMU_PAGESIZE) { 475*b59c4a48SBoris Protopopov pte = x86pte_get(ht, e); 476*b59c4a48SBoris Protopopov if (!PTE_ISVALID(pte)) 477*b59c4a48SBoris Protopopov continue; 478*b59c4a48SBoris Protopopov hat_pte_unmap(ht, e, HAT_UNLOAD, pte, NULL); 479*b59c4a48SBoris Protopopov } 480*b59c4a48SBoris Protopopov 481*b59c4a48SBoris Protopopov /* 482*b59c4a48SBoris Protopopov * Reacquire htable lock. If we didn't remove all 483*b59c4a48SBoris Protopopov * mappings in the table, or another thread added a new 484*b59c4a48SBoris Protopopov * mapping behind us, give up on this table. 485*b59c4a48SBoris Protopopov */ 486*b59c4a48SBoris Protopopov HTABLE_ENTER(h); 487*b59c4a48SBoris Protopopov if (ht->ht_busy != 1 || ht->ht_valid_cnt != 0 || 488*b59c4a48SBoris Protopopov ht->ht_lock_cnt != 0) { 489*b59c4a48SBoris Protopopov --ht->ht_busy; 490*b59c4a48SBoris Protopopov continue; 491*b59c4a48SBoris Protopopov } 492*b59c4a48SBoris Protopopov 493*b59c4a48SBoris Protopopov /* 494*b59c4a48SBoris Protopopov * Steal it and unlink the page table. 495*b59c4a48SBoris Protopopov */ 496*b59c4a48SBoris Protopopov higher = ht->ht_parent; 497*b59c4a48SBoris Protopopov unlink_ptp(higher, ht, ht->ht_vaddr); 498*b59c4a48SBoris Protopopov 499*b59c4a48SBoris Protopopov /* 500*b59c4a48SBoris Protopopov * remove from the hash list 501*b59c4a48SBoris Protopopov */ 502*b59c4a48SBoris Protopopov if (ht->ht_next) 503*b59c4a48SBoris Protopopov ht->ht_next->ht_prev = ht->ht_prev; 504*b59c4a48SBoris Protopopov 505*b59c4a48SBoris Protopopov if (ht->ht_prev) { 506*b59c4a48SBoris Protopopov ht->ht_prev->ht_next = ht->ht_next; 507*b59c4a48SBoris Protopopov } else { 508*b59c4a48SBoris Protopopov ASSERT(hat->hat_ht_hash[h] == ht); 509*b59c4a48SBoris Protopopov hat->hat_ht_hash[h] = ht->ht_next; 510*b59c4a48SBoris Protopopov } 511*b59c4a48SBoris Protopopov 512*b59c4a48SBoris Protopopov /* 513*b59c4a48SBoris Protopopov * Break to outer loop to release the 514*b59c4a48SBoris Protopopov * higher (ht_parent) pagetable. This 515*b59c4a48SBoris Protopopov * spreads out the pain caused by 516*b59c4a48SBoris Protopopov * pagefaults. 517*b59c4a48SBoris Protopopov */ 518*b59c4a48SBoris Protopopov ht->ht_next = *list; 519*b59c4a48SBoris Protopopov *list = ht; 520*b59c4a48SBoris Protopopov ++*stolen; 521*b59c4a48SBoris Protopopov break; 522*b59c4a48SBoris Protopopov } 523*b59c4a48SBoris Protopopov HTABLE_EXIT(h); 524*b59c4a48SBoris Protopopov if (higher != NULL) 525*b59c4a48SBoris Protopopov htable_release(higher); 526*b59c4a48SBoris Protopopov if (++h == hat->hat_num_hash) 527*b59c4a48SBoris Protopopov h = 0; 528*b59c4a48SBoris Protopopov } while (*stolen < cnt && h != h_start); 529*b59c4a48SBoris Protopopov } 530*b59c4a48SBoris Protopopov 531*b59c4a48SBoris Protopopov /* 532*b59c4a48SBoris Protopopov * Move hat to the end of the kas list 533*b59c4a48SBoris Protopopov */ 534*b59c4a48SBoris Protopopov static void 535*b59c4a48SBoris Protopopov move_victim(hat_t *hat) 536*b59c4a48SBoris Protopopov { 537*b59c4a48SBoris Protopopov ASSERT(MUTEX_HELD(&hat_list_lock)); 538*b59c4a48SBoris Protopopov 539*b59c4a48SBoris Protopopov /* unlink victim hat */ 540*b59c4a48SBoris Protopopov if (hat->hat_prev) 541*b59c4a48SBoris Protopopov hat->hat_prev->hat_next = hat->hat_next; 542*b59c4a48SBoris Protopopov else 543*b59c4a48SBoris Protopopov kas.a_hat->hat_next = hat->hat_next; 544*b59c4a48SBoris Protopopov 545*b59c4a48SBoris Protopopov if (hat->hat_next) 546*b59c4a48SBoris Protopopov hat->hat_next->hat_prev = hat->hat_prev; 547*b59c4a48SBoris Protopopov else 548*b59c4a48SBoris Protopopov kas.a_hat->hat_prev = hat->hat_prev; 549*b59c4a48SBoris Protopopov /* relink at end of hat list */ 550*b59c4a48SBoris Protopopov hat->hat_next = NULL; 551*b59c4a48SBoris Protopopov hat->hat_prev = kas.a_hat->hat_prev; 552*b59c4a48SBoris Protopopov if (hat->hat_prev) 553*b59c4a48SBoris Protopopov hat->hat_prev->hat_next = hat; 554*b59c4a48SBoris Protopopov else 555*b59c4a48SBoris Protopopov kas.a_hat->hat_next = hat; 556*b59c4a48SBoris Protopopov 557*b59c4a48SBoris Protopopov kas.a_hat->hat_prev = hat; 558*b59c4a48SBoris Protopopov } 559*b59c4a48SBoris Protopopov 560*b59c4a48SBoris Protopopov /* 561*b59c4a48SBoris Protopopov * This routine steals htables from user processes. Called by htable_reap 562*b59c4a48SBoris Protopopov * (reap=TRUE) or htable_alloc (reap=FALSE). 5637c478bd9Sstevel@tonic-gate */ 5647c478bd9Sstevel@tonic-gate static htable_t * 565*b59c4a48SBoris Protopopov htable_steal(uint_t cnt, boolean_t reap) 5667c478bd9Sstevel@tonic-gate { 5677c478bd9Sstevel@tonic-gate hat_t *hat = kas.a_hat; /* list starts with khat */ 5687c478bd9Sstevel@tonic-gate htable_t *list = NULL; 5697c478bd9Sstevel@tonic-gate htable_t *ht; 5707c478bd9Sstevel@tonic-gate uint_t stolen = 0; 5717c478bd9Sstevel@tonic-gate uint_t pass; 572a85a6733Sjosephb uint_t threshold; 5737c478bd9Sstevel@tonic-gate 5747c478bd9Sstevel@tonic-gate /* 5757c478bd9Sstevel@tonic-gate * Limit htable_steal_passes to something reasonable 5767c478bd9Sstevel@tonic-gate */ 5777c478bd9Sstevel@tonic-gate if (htable_steal_passes == 0) 5787c478bd9Sstevel@tonic-gate htable_steal_passes = 1; 5797c478bd9Sstevel@tonic-gate if (htable_steal_passes > mmu.ptes_per_table) 5807c478bd9Sstevel@tonic-gate htable_steal_passes = mmu.ptes_per_table; 5817c478bd9Sstevel@tonic-gate 5827c478bd9Sstevel@tonic-gate /* 583a85a6733Sjosephb * Loop through all user hats. The 1st pass takes cached htables that 5847c478bd9Sstevel@tonic-gate * aren't in use. The later passes steal by removing mappings, too. 5857c478bd9Sstevel@tonic-gate */ 5861a5e258fSJosef 'Jeff' Sipek atomic_inc_32(&htable_dont_cache); 587a85a6733Sjosephb for (pass = 0; pass <= htable_steal_passes && stolen < cnt; ++pass) { 588a85a6733Sjosephb threshold = pass * mmu.ptes_per_table / htable_steal_passes; 5897c478bd9Sstevel@tonic-gate 5907c478bd9Sstevel@tonic-gate mutex_enter(&hat_list_lock); 591a85a6733Sjosephb 592*b59c4a48SBoris Protopopov /* skip the first hat (kernel) */ 593*b59c4a48SBoris Protopopov hat = kas.a_hat->hat_next; 594*b59c4a48SBoris Protopopov for (;;) { 595a85a6733Sjosephb /* 596a85a6733Sjosephb * Skip any hat that is already being stolen from. 597a85a6733Sjosephb * 598a85a6733Sjosephb * We skip SHARED hats, as these are dummy 599a85a6733Sjosephb * hats that host ISM shared page tables. 600a85a6733Sjosephb * 601a85a6733Sjosephb * We also skip if HAT_FREEING because hat_pte_unmap() 602a85a6733Sjosephb * won't zero out the PTE's. That would lead to hitting 603a85a6733Sjosephb * stale PTEs either here or under hat_unload() when we 604a85a6733Sjosephb * steal and unload the same page table in competing 605a85a6733Sjosephb * threads. 606a85a6733Sjosephb */ 607a85a6733Sjosephb while (hat != NULL && 608a85a6733Sjosephb (hat->hat_flags & 609a85a6733Sjosephb (HAT_VICTIM | HAT_SHARED | HAT_FREEING)) != 0) 610a85a6733Sjosephb hat = hat->hat_next; 611a85a6733Sjosephb 612*b59c4a48SBoris Protopopov if (hat == NULL) 6137c478bd9Sstevel@tonic-gate break; 614a85a6733Sjosephb 615a85a6733Sjosephb /* 616*b59c4a48SBoris Protopopov * Mark the HAT as a stealing victim so that it is 617*b59c4a48SBoris Protopopov * not freed from under us, e.g. in as_free() 618a85a6733Sjosephb */ 6197c478bd9Sstevel@tonic-gate hat->hat_flags |= HAT_VICTIM; 6207c478bd9Sstevel@tonic-gate mutex_exit(&hat_list_lock); 6217c478bd9Sstevel@tonic-gate 6227c478bd9Sstevel@tonic-gate /* 6237c478bd9Sstevel@tonic-gate * Take any htables from the hat's cached "free" list. 6247c478bd9Sstevel@tonic-gate */ 6257c478bd9Sstevel@tonic-gate hat_enter(hat); 6267c478bd9Sstevel@tonic-gate while ((ht = hat->hat_ht_cached) != NULL && 6277c478bd9Sstevel@tonic-gate stolen < cnt) { 6287c478bd9Sstevel@tonic-gate hat->hat_ht_cached = ht->ht_next; 6297c478bd9Sstevel@tonic-gate ht->ht_next = list; 6307c478bd9Sstevel@tonic-gate list = ht; 6317c478bd9Sstevel@tonic-gate ++stolen; 6327c478bd9Sstevel@tonic-gate } 6337c478bd9Sstevel@tonic-gate hat_exit(hat); 6347c478bd9Sstevel@tonic-gate 6357c478bd9Sstevel@tonic-gate /* 636*b59c4a48SBoris Protopopov * Don't steal active htables on first pass. 6377c478bd9Sstevel@tonic-gate */ 638*b59c4a48SBoris Protopopov if (pass != 0 && (stolen < cnt)) 639*b59c4a48SBoris Protopopov htable_steal_active(hat, cnt, threshold, 640*b59c4a48SBoris Protopopov &stolen, &list); 641*b59c4a48SBoris Protopopov 642*b59c4a48SBoris Protopopov /* 643*b59c4a48SBoris Protopopov * do synchronous teardown for the reap case so that 644*b59c4a48SBoris Protopopov * we can forget hat; at this time, hat is 645*b59c4a48SBoris Protopopov * guaranteed to be around because HAT_VICTIM is set 646*b59c4a48SBoris Protopopov * (see htable_free() for similar code) 647*b59c4a48SBoris Protopopov */ 648*b59c4a48SBoris Protopopov for (ht = list; (ht) && (reap); ht = ht->ht_next) { 649*b59c4a48SBoris Protopopov if (ht->ht_hat == NULL) 6507c478bd9Sstevel@tonic-gate continue; 651*b59c4a48SBoris Protopopov ASSERT(ht->ht_hat == hat); 652*b59c4a48SBoris Protopopov #if defined(__xpv) && defined(__amd64) 653*b59c4a48SBoris Protopopov if (!(ht->ht_flags & HTABLE_VLP) && 654*b59c4a48SBoris Protopopov ht->ht_level == mmu.max_level) { 655*b59c4a48SBoris Protopopov ptable_free(hat->hat_user_ptable); 656*b59c4a48SBoris Protopopov hat->hat_user_ptable = PFN_INVALID; 657*b59c4a48SBoris Protopopov } 658*b59c4a48SBoris Protopopov #endif 659*b59c4a48SBoris Protopopov /* 660*b59c4a48SBoris Protopopov * forget the hat 661*b59c4a48SBoris Protopopov */ 662*b59c4a48SBoris Protopopov ht->ht_hat = NULL; 663*b59c4a48SBoris Protopopov } 664*b59c4a48SBoris Protopopov 665*b59c4a48SBoris Protopopov mutex_enter(&hat_list_lock); 6667c478bd9Sstevel@tonic-gate 6677c478bd9Sstevel@tonic-gate /* 668*b59c4a48SBoris Protopopov * Are we finished? 6697c478bd9Sstevel@tonic-gate */ 670*b59c4a48SBoris Protopopov if (stolen == cnt) { 6717c478bd9Sstevel@tonic-gate /* 672*b59c4a48SBoris Protopopov * Try to spread the pain of stealing, 673*b59c4a48SBoris Protopopov * move victim HAT to the end of the HAT list. 6747c478bd9Sstevel@tonic-gate */ 675*b59c4a48SBoris Protopopov if (pass >= 1 && cnt == 1 && 676*b59c4a48SBoris Protopopov kas.a_hat->hat_prev != hat) 677*b59c4a48SBoris Protopopov move_victim(hat); 6787c478bd9Sstevel@tonic-gate /* 679*b59c4a48SBoris Protopopov * We are finished 6807c478bd9Sstevel@tonic-gate */ 6817c478bd9Sstevel@tonic-gate } 6827c478bd9Sstevel@tonic-gate 6837c478bd9Sstevel@tonic-gate /* 684*b59c4a48SBoris Protopopov * Clear the victim flag, hat can go away now (once 685*b59c4a48SBoris Protopopov * the lock is dropped) 6867c478bd9Sstevel@tonic-gate */ 687*b59c4a48SBoris Protopopov if (hat->hat_flags & HAT_VICTIM) { 688*b59c4a48SBoris Protopopov ASSERT(hat != kas.a_hat); 689*b59c4a48SBoris Protopopov hat->hat_flags &= ~HAT_VICTIM; 690*b59c4a48SBoris Protopopov cv_broadcast(&hat_list_cv); 6917c478bd9Sstevel@tonic-gate } 6927c478bd9Sstevel@tonic-gate 693*b59c4a48SBoris Protopopov /* move on to the next hat */ 694*b59c4a48SBoris Protopopov hat = hat->hat_next; 6957c478bd9Sstevel@tonic-gate } 6967c478bd9Sstevel@tonic-gate 697*b59c4a48SBoris Protopopov mutex_exit(&hat_list_lock); 698*b59c4a48SBoris Protopopov 6997c478bd9Sstevel@tonic-gate } 700*b59c4a48SBoris Protopopov ASSERT(!MUTEX_HELD(&hat_list_lock)); 701*b59c4a48SBoris Protopopov 7021a5e258fSJosef 'Jeff' Sipek atomic_dec_32(&htable_dont_cache); 7037c478bd9Sstevel@tonic-gate return (list); 7047c478bd9Sstevel@tonic-gate } 7057c478bd9Sstevel@tonic-gate 7067c478bd9Sstevel@tonic-gate /* 7077c478bd9Sstevel@tonic-gate * This is invoked from kmem when the system is low on memory. We try 7087c478bd9Sstevel@tonic-gate * to free hments, htables, and ptables to improve the memory situation. 7097c478bd9Sstevel@tonic-gate */ 7107c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 7117c478bd9Sstevel@tonic-gate static void 7127c478bd9Sstevel@tonic-gate htable_reap(void *handle) 7137c478bd9Sstevel@tonic-gate { 7147c478bd9Sstevel@tonic-gate uint_t reap_cnt; 7157c478bd9Sstevel@tonic-gate htable_t *list; 7167c478bd9Sstevel@tonic-gate htable_t *ht; 7177c478bd9Sstevel@tonic-gate 7187c478bd9Sstevel@tonic-gate HATSTAT_INC(hs_reap_attempts); 7197c478bd9Sstevel@tonic-gate if (!can_steal_post_boot) 7207c478bd9Sstevel@tonic-gate return; 7217c478bd9Sstevel@tonic-gate 7227c478bd9Sstevel@tonic-gate /* 7237c478bd9Sstevel@tonic-gate * Try to reap 5% of the page tables bounded by a maximum of 7247c478bd9Sstevel@tonic-gate * 5% of physmem and a minimum of 10. 7257c478bd9Sstevel@tonic-gate */ 7265f661bbcSJakub Jermar reap_cnt = MAX(MIN(physmem / 20, active_ptables / 20), 10); 7277c478bd9Sstevel@tonic-gate 7287c478bd9Sstevel@tonic-gate /* 729*b59c4a48SBoris Protopopov * Note: htable_dont_cache should be set at the time of 730*b59c4a48SBoris Protopopov * invoking htable_free() 731*b59c4a48SBoris Protopopov */ 732*b59c4a48SBoris Protopopov atomic_inc_32(&htable_dont_cache); 733*b59c4a48SBoris Protopopov /* 7347c478bd9Sstevel@tonic-gate * Let htable_steal() do the work, we just call htable_free() 7357c478bd9Sstevel@tonic-gate */ 736843e1988Sjohnlev XPV_DISALLOW_MIGRATE(); 737*b59c4a48SBoris Protopopov list = htable_steal(reap_cnt, B_TRUE); 738843e1988Sjohnlev XPV_ALLOW_MIGRATE(); 7397c478bd9Sstevel@tonic-gate while ((ht = list) != NULL) { 7407c478bd9Sstevel@tonic-gate list = ht->ht_next; 7417c478bd9Sstevel@tonic-gate HATSTAT_INC(hs_reaped); 7427c478bd9Sstevel@tonic-gate htable_free(ht); 7437c478bd9Sstevel@tonic-gate } 744*b59c4a48SBoris Protopopov atomic_dec_32(&htable_dont_cache); 7457c478bd9Sstevel@tonic-gate 7467c478bd9Sstevel@tonic-gate /* 7477c478bd9Sstevel@tonic-gate * Free up excess reserves 7487c478bd9Sstevel@tonic-gate */ 7497c478bd9Sstevel@tonic-gate htable_adjust_reserve(); 7507c478bd9Sstevel@tonic-gate hment_adjust_reserve(); 7517c478bd9Sstevel@tonic-gate } 7527c478bd9Sstevel@tonic-gate 7537c478bd9Sstevel@tonic-gate /* 754ae115bc7Smrj * Allocate an htable, stealing one or using the reserve if necessary 7557c478bd9Sstevel@tonic-gate */ 7567c478bd9Sstevel@tonic-gate static htable_t * 7577c478bd9Sstevel@tonic-gate htable_alloc( 7587c478bd9Sstevel@tonic-gate hat_t *hat, 7597c478bd9Sstevel@tonic-gate uintptr_t vaddr, 7607c478bd9Sstevel@tonic-gate level_t level, 7617c478bd9Sstevel@tonic-gate htable_t *shared) 7627c478bd9Sstevel@tonic-gate { 7637c478bd9Sstevel@tonic-gate htable_t *ht = NULL; 7647c478bd9Sstevel@tonic-gate uint_t is_vlp; 7657c478bd9Sstevel@tonic-gate uint_t is_bare = 0; 7667c478bd9Sstevel@tonic-gate uint_t need_to_zero = 1; 7677c478bd9Sstevel@tonic-gate int kmflags = (can_steal_post_boot ? KM_NOSLEEP : KM_SLEEP); 7687c478bd9Sstevel@tonic-gate 7697c478bd9Sstevel@tonic-gate if (level < 0 || level > TOP_LEVEL(hat)) 7707c478bd9Sstevel@tonic-gate panic("htable_alloc(): level %d out of range\n", level); 7717c478bd9Sstevel@tonic-gate 7727c478bd9Sstevel@tonic-gate is_vlp = (hat->hat_flags & HAT_VLP) && level == VLP_LEVEL; 7737c478bd9Sstevel@tonic-gate if (is_vlp || shared != NULL) 7747c478bd9Sstevel@tonic-gate is_bare = 1; 7757c478bd9Sstevel@tonic-gate 7767c478bd9Sstevel@tonic-gate /* 7777c478bd9Sstevel@tonic-gate * First reuse a cached htable from the hat_ht_cached field, this 778ae115bc7Smrj * avoids unnecessary trips through kmem/page allocators. 7797c478bd9Sstevel@tonic-gate */ 7807c478bd9Sstevel@tonic-gate if (hat->hat_ht_cached != NULL && !is_bare) { 7817c478bd9Sstevel@tonic-gate hat_enter(hat); 7827c478bd9Sstevel@tonic-gate ht = hat->hat_ht_cached; 7837c478bd9Sstevel@tonic-gate if (ht != NULL) { 7847c478bd9Sstevel@tonic-gate hat->hat_ht_cached = ht->ht_next; 7857c478bd9Sstevel@tonic-gate need_to_zero = 0; 7867c478bd9Sstevel@tonic-gate /* XX64 ASSERT() they're all zero somehow */ 7877c478bd9Sstevel@tonic-gate ASSERT(ht->ht_pfn != PFN_INVALID); 7887c478bd9Sstevel@tonic-gate } 7897c478bd9Sstevel@tonic-gate hat_exit(hat); 7907c478bd9Sstevel@tonic-gate } 7917c478bd9Sstevel@tonic-gate 7927c478bd9Sstevel@tonic-gate if (ht == NULL) { 7937c478bd9Sstevel@tonic-gate /* 79497704650Sjosephb * Allocate an htable, possibly refilling the reserves. 7957c478bd9Sstevel@tonic-gate */ 79697704650Sjosephb if (USE_HAT_RESERVES()) { 7977c478bd9Sstevel@tonic-gate ht = htable_get_reserve(); 7987c478bd9Sstevel@tonic-gate } else { 7997c478bd9Sstevel@tonic-gate /* 8007c478bd9Sstevel@tonic-gate * Donate successful htable allocations to the reserve. 8017c478bd9Sstevel@tonic-gate */ 8027c478bd9Sstevel@tonic-gate for (;;) { 8037c478bd9Sstevel@tonic-gate ht = kmem_cache_alloc(htable_cache, kmflags); 8047c478bd9Sstevel@tonic-gate if (ht == NULL) 8057c478bd9Sstevel@tonic-gate break; 8067c478bd9Sstevel@tonic-gate ht->ht_pfn = PFN_INVALID; 80797704650Sjosephb if (USE_HAT_RESERVES() || 8087c478bd9Sstevel@tonic-gate htable_reserve_cnt >= htable_reserve_amount) 8097c478bd9Sstevel@tonic-gate break; 8107c478bd9Sstevel@tonic-gate htable_put_reserve(ht); 8117c478bd9Sstevel@tonic-gate } 8127c478bd9Sstevel@tonic-gate } 8137c478bd9Sstevel@tonic-gate 8147c478bd9Sstevel@tonic-gate /* 8157c478bd9Sstevel@tonic-gate * allocate a page for the hardware page table if needed 8167c478bd9Sstevel@tonic-gate */ 8177c478bd9Sstevel@tonic-gate if (ht != NULL && !is_bare) { 818a85a6733Sjosephb ht->ht_hat = hat; 819a77271f8SVikram Hegde ht->ht_pfn = ptable_alloc((uintptr_t)ht); 8207c478bd9Sstevel@tonic-gate if (ht->ht_pfn == PFN_INVALID) { 82197704650Sjosephb if (USE_HAT_RESERVES()) 82297704650Sjosephb htable_put_reserve(ht); 82397704650Sjosephb else 8247c478bd9Sstevel@tonic-gate kmem_cache_free(htable_cache, ht); 8257c478bd9Sstevel@tonic-gate ht = NULL; 8267c478bd9Sstevel@tonic-gate } 8277c478bd9Sstevel@tonic-gate } 8287c478bd9Sstevel@tonic-gate } 8297c478bd9Sstevel@tonic-gate 8307c478bd9Sstevel@tonic-gate /* 831a85a6733Sjosephb * If allocations failed, kick off a kmem_reap() and resort to 832a85a6733Sjosephb * htable steal(). We may spin here if the system is very low on 833a85a6733Sjosephb * memory. If the kernel itself has consumed all memory and kmem_reap() 834a85a6733Sjosephb * can't free up anything, then we'll really get stuck here. 835a85a6733Sjosephb * That should only happen in a system where the administrator has 836a85a6733Sjosephb * misconfigured VM parameters via /etc/system. 8377c478bd9Sstevel@tonic-gate */ 838a85a6733Sjosephb while (ht == NULL && can_steal_post_boot) { 839a85a6733Sjosephb kmem_reap(); 840*b59c4a48SBoris Protopopov ht = htable_steal(1, B_FALSE); 8417c478bd9Sstevel@tonic-gate HATSTAT_INC(hs_steals); 8427c478bd9Sstevel@tonic-gate 8437c478bd9Sstevel@tonic-gate /* 844a85a6733Sjosephb * If we stole for a bare htable, release the pagetable page. 8457c478bd9Sstevel@tonic-gate */ 846ae115bc7Smrj if (ht != NULL) { 847ae115bc7Smrj if (is_bare) { 848ae115bc7Smrj ptable_free(ht->ht_pfn); 849ae115bc7Smrj ht->ht_pfn = PFN_INVALID; 850843e1988Sjohnlev #if defined(__xpv) && defined(__amd64) 851843e1988Sjohnlev /* 852843e1988Sjohnlev * make stolen page table writable again in kpm 853843e1988Sjohnlev */ 854843e1988Sjohnlev } else if (kpm_vbase && xen_kpm_page(ht->ht_pfn, 855843e1988Sjohnlev PT_VALID | PT_WRITABLE) < 0) { 856843e1988Sjohnlev panic("failure making kpm r/w pfn=0x%lx", 857843e1988Sjohnlev ht->ht_pfn); 858843e1988Sjohnlev #endif 859ae115bc7Smrj } 860ae115bc7Smrj } 8617c478bd9Sstevel@tonic-gate } 8627c478bd9Sstevel@tonic-gate 8637c478bd9Sstevel@tonic-gate /* 864a85a6733Sjosephb * All attempts to allocate or steal failed. This should only happen 865a85a6733Sjosephb * if we run out of memory during boot, due perhaps to a huge 866a85a6733Sjosephb * boot_archive. At this point there's no way to continue. 8677c478bd9Sstevel@tonic-gate */ 8687c478bd9Sstevel@tonic-gate if (ht == NULL) 8697c478bd9Sstevel@tonic-gate panic("htable_alloc(): couldn't steal\n"); 8707c478bd9Sstevel@tonic-gate 871843e1988Sjohnlev #if defined(__amd64) && defined(__xpv) 872843e1988Sjohnlev /* 873843e1988Sjohnlev * Under the 64-bit hypervisor, we have 2 top level page tables. 874843e1988Sjohnlev * If this allocation fails, we'll resort to stealing. 875843e1988Sjohnlev * We use the stolen page indirectly, by freeing the 876843e1988Sjohnlev * stolen htable first. 877843e1988Sjohnlev */ 878843e1988Sjohnlev if (level == mmu.max_level) { 879843e1988Sjohnlev for (;;) { 880843e1988Sjohnlev htable_t *stolen; 881843e1988Sjohnlev 882a77271f8SVikram Hegde hat->hat_user_ptable = ptable_alloc((uintptr_t)ht + 1); 883843e1988Sjohnlev if (hat->hat_user_ptable != PFN_INVALID) 884843e1988Sjohnlev break; 885*b59c4a48SBoris Protopopov stolen = htable_steal(1, B_FALSE); 886843e1988Sjohnlev if (stolen == NULL) 887843e1988Sjohnlev panic("2nd steal ptable failed\n"); 888843e1988Sjohnlev htable_free(stolen); 889843e1988Sjohnlev } 890843e1988Sjohnlev block_zero_no_xmm(kpm_vbase + pfn_to_pa(hat->hat_user_ptable), 891843e1988Sjohnlev MMU_PAGESIZE); 892843e1988Sjohnlev } 893843e1988Sjohnlev #endif 894843e1988Sjohnlev 8957c478bd9Sstevel@tonic-gate /* 8967c478bd9Sstevel@tonic-gate * Shared page tables have all entries locked and entries may not 8977c478bd9Sstevel@tonic-gate * be added or deleted. 8987c478bd9Sstevel@tonic-gate */ 8997c478bd9Sstevel@tonic-gate ht->ht_flags = 0; 9007c478bd9Sstevel@tonic-gate if (shared != NULL) { 9017c478bd9Sstevel@tonic-gate ASSERT(shared->ht_valid_cnt > 0); 9027c478bd9Sstevel@tonic-gate ht->ht_flags |= HTABLE_SHARED_PFN; 9037c478bd9Sstevel@tonic-gate ht->ht_pfn = shared->ht_pfn; 9047c478bd9Sstevel@tonic-gate ht->ht_lock_cnt = 0; 9057c478bd9Sstevel@tonic-gate ht->ht_valid_cnt = 0; /* updated in hat_share() */ 9067c478bd9Sstevel@tonic-gate ht->ht_shares = shared; 9077c478bd9Sstevel@tonic-gate need_to_zero = 0; 9087c478bd9Sstevel@tonic-gate } else { 9097c478bd9Sstevel@tonic-gate ht->ht_shares = NULL; 9107c478bd9Sstevel@tonic-gate ht->ht_lock_cnt = 0; 9117c478bd9Sstevel@tonic-gate ht->ht_valid_cnt = 0; 9127c478bd9Sstevel@tonic-gate } 9137c478bd9Sstevel@tonic-gate 9147c478bd9Sstevel@tonic-gate /* 9157c478bd9Sstevel@tonic-gate * setup flags, etc. for VLP htables 9167c478bd9Sstevel@tonic-gate */ 9177c478bd9Sstevel@tonic-gate if (is_vlp) { 9187c478bd9Sstevel@tonic-gate ht->ht_flags |= HTABLE_VLP; 9197c478bd9Sstevel@tonic-gate ASSERT(ht->ht_pfn == PFN_INVALID); 9207c478bd9Sstevel@tonic-gate need_to_zero = 0; 9217c478bd9Sstevel@tonic-gate } 9227c478bd9Sstevel@tonic-gate 9237c478bd9Sstevel@tonic-gate /* 9247c478bd9Sstevel@tonic-gate * fill in the htable 9257c478bd9Sstevel@tonic-gate */ 9267c478bd9Sstevel@tonic-gate ht->ht_hat = hat; 9277c478bd9Sstevel@tonic-gate ht->ht_parent = NULL; 9287c478bd9Sstevel@tonic-gate ht->ht_vaddr = vaddr; 9297c478bd9Sstevel@tonic-gate ht->ht_level = level; 9307c478bd9Sstevel@tonic-gate ht->ht_busy = 1; 9317c478bd9Sstevel@tonic-gate ht->ht_next = NULL; 9327c478bd9Sstevel@tonic-gate ht->ht_prev = NULL; 9337c478bd9Sstevel@tonic-gate 9347c478bd9Sstevel@tonic-gate /* 9357c478bd9Sstevel@tonic-gate * Zero out any freshly allocated page table 9367c478bd9Sstevel@tonic-gate */ 9377c478bd9Sstevel@tonic-gate if (need_to_zero) 9387c478bd9Sstevel@tonic-gate x86pte_zero(ht, 0, mmu.ptes_per_table); 939ae115bc7Smrj 940843e1988Sjohnlev #if defined(__amd64) && defined(__xpv) 941843e1988Sjohnlev if (!is_bare && kpm_vbase) { 942843e1988Sjohnlev (void) xen_kpm_page(ht->ht_pfn, PT_VALID); 943843e1988Sjohnlev if (level == mmu.max_level) 944843e1988Sjohnlev (void) xen_kpm_page(hat->hat_user_ptable, PT_VALID); 945843e1988Sjohnlev } 946843e1988Sjohnlev #endif 947843e1988Sjohnlev 9487c478bd9Sstevel@tonic-gate return (ht); 9497c478bd9Sstevel@tonic-gate } 9507c478bd9Sstevel@tonic-gate 9517c478bd9Sstevel@tonic-gate /* 9527c478bd9Sstevel@tonic-gate * Free up an htable, either to a hat's cached list, the reserves or 9537c478bd9Sstevel@tonic-gate * back to kmem. 9547c478bd9Sstevel@tonic-gate */ 9557c478bd9Sstevel@tonic-gate static void 9567c478bd9Sstevel@tonic-gate htable_free(htable_t *ht) 9577c478bd9Sstevel@tonic-gate { 9587c478bd9Sstevel@tonic-gate hat_t *hat = ht->ht_hat; 9597c478bd9Sstevel@tonic-gate 9607c478bd9Sstevel@tonic-gate /* 9617c478bd9Sstevel@tonic-gate * If the process isn't exiting, cache the free htable in the hat 962843e1988Sjohnlev * structure. We always do this for the boot time reserve. We don't 9637c478bd9Sstevel@tonic-gate * do this if the hat is exiting or we are stealing/reaping htables. 9647c478bd9Sstevel@tonic-gate */ 9657c478bd9Sstevel@tonic-gate if (hat != NULL && 9667c478bd9Sstevel@tonic-gate !(ht->ht_flags & HTABLE_SHARED_PFN) && 9677c478bd9Sstevel@tonic-gate (use_boot_reserve || 9687c478bd9Sstevel@tonic-gate (!(hat->hat_flags & HAT_FREEING) && !htable_dont_cache))) { 9697c478bd9Sstevel@tonic-gate ASSERT((ht->ht_flags & HTABLE_VLP) == 0); 9707c478bd9Sstevel@tonic-gate ASSERT(ht->ht_pfn != PFN_INVALID); 9717c478bd9Sstevel@tonic-gate hat_enter(hat); 9727c478bd9Sstevel@tonic-gate ht->ht_next = hat->hat_ht_cached; 9737c478bd9Sstevel@tonic-gate hat->hat_ht_cached = ht; 9747c478bd9Sstevel@tonic-gate hat_exit(hat); 9757c478bd9Sstevel@tonic-gate return; 9767c478bd9Sstevel@tonic-gate } 9777c478bd9Sstevel@tonic-gate 9787c478bd9Sstevel@tonic-gate /* 9797c478bd9Sstevel@tonic-gate * If we have a hardware page table, free it. 980ae115bc7Smrj * We don't free page tables that are accessed by sharing. 9817c478bd9Sstevel@tonic-gate */ 9827c478bd9Sstevel@tonic-gate if (ht->ht_flags & HTABLE_SHARED_PFN) { 9837c478bd9Sstevel@tonic-gate ASSERT(ht->ht_pfn != PFN_INVALID); 9847c478bd9Sstevel@tonic-gate } else if (!(ht->ht_flags & HTABLE_VLP)) { 985ae115bc7Smrj ptable_free(ht->ht_pfn); 986843e1988Sjohnlev #if defined(__amd64) && defined(__xpv) 987*b59c4a48SBoris Protopopov if (ht->ht_level == mmu.max_level && hat != NULL) { 988843e1988Sjohnlev ptable_free(hat->hat_user_ptable); 989843e1988Sjohnlev hat->hat_user_ptable = PFN_INVALID; 990843e1988Sjohnlev } 991843e1988Sjohnlev #endif 9927c478bd9Sstevel@tonic-gate } 993ae115bc7Smrj ht->ht_pfn = PFN_INVALID; 9947c478bd9Sstevel@tonic-gate 9957c478bd9Sstevel@tonic-gate /* 996843e1988Sjohnlev * Free it or put into reserves. 9977c478bd9Sstevel@tonic-gate */ 998aac11643Sjosephb if (USE_HAT_RESERVES() || htable_reserve_cnt < htable_reserve_amount) { 9997c478bd9Sstevel@tonic-gate htable_put_reserve(ht); 1000aac11643Sjosephb } else { 10017c478bd9Sstevel@tonic-gate kmem_cache_free(htable_cache, ht); 1002aac11643Sjosephb htable_adjust_reserve(); 1003aac11643Sjosephb } 10047c478bd9Sstevel@tonic-gate } 10057c478bd9Sstevel@tonic-gate 10067c478bd9Sstevel@tonic-gate 10077c478bd9Sstevel@tonic-gate /* 10087c478bd9Sstevel@tonic-gate * This is called when a hat is being destroyed or swapped out. We reap all 10097c478bd9Sstevel@tonic-gate * the remaining htables in the hat cache. If destroying all left over 10107c478bd9Sstevel@tonic-gate * htables are also destroyed. 10117c478bd9Sstevel@tonic-gate * 10127c478bd9Sstevel@tonic-gate * We also don't need to invalidate any of the PTPs nor do any demapping. 10137c478bd9Sstevel@tonic-gate */ 10147c478bd9Sstevel@tonic-gate void 10157c478bd9Sstevel@tonic-gate htable_purge_hat(hat_t *hat) 10167c478bd9Sstevel@tonic-gate { 10177c478bd9Sstevel@tonic-gate htable_t *ht; 10187c478bd9Sstevel@tonic-gate int h; 10197c478bd9Sstevel@tonic-gate 10207c478bd9Sstevel@tonic-gate /* 10217c478bd9Sstevel@tonic-gate * Purge the htable cache if just reaping. 10227c478bd9Sstevel@tonic-gate */ 10237c478bd9Sstevel@tonic-gate if (!(hat->hat_flags & HAT_FREEING)) { 10241a5e258fSJosef 'Jeff' Sipek atomic_inc_32(&htable_dont_cache); 10257c478bd9Sstevel@tonic-gate for (;;) { 10267c478bd9Sstevel@tonic-gate hat_enter(hat); 10277c478bd9Sstevel@tonic-gate ht = hat->hat_ht_cached; 10287c478bd9Sstevel@tonic-gate if (ht == NULL) { 10297c478bd9Sstevel@tonic-gate hat_exit(hat); 10307c478bd9Sstevel@tonic-gate break; 10317c478bd9Sstevel@tonic-gate } 10327c478bd9Sstevel@tonic-gate hat->hat_ht_cached = ht->ht_next; 10337c478bd9Sstevel@tonic-gate hat_exit(hat); 10347c478bd9Sstevel@tonic-gate htable_free(ht); 10357c478bd9Sstevel@tonic-gate } 10361a5e258fSJosef 'Jeff' Sipek atomic_dec_32(&htable_dont_cache); 10377c478bd9Sstevel@tonic-gate return; 10387c478bd9Sstevel@tonic-gate } 10397c478bd9Sstevel@tonic-gate 10407c478bd9Sstevel@tonic-gate /* 10417c478bd9Sstevel@tonic-gate * if freeing, no locking is needed 10427c478bd9Sstevel@tonic-gate */ 10437c478bd9Sstevel@tonic-gate while ((ht = hat->hat_ht_cached) != NULL) { 10447c478bd9Sstevel@tonic-gate hat->hat_ht_cached = ht->ht_next; 10457c478bd9Sstevel@tonic-gate htable_free(ht); 10467c478bd9Sstevel@tonic-gate } 10477c478bd9Sstevel@tonic-gate 10487c478bd9Sstevel@tonic-gate /* 10497c478bd9Sstevel@tonic-gate * walk thru the htable hash table and free all the htables in it. 10507c478bd9Sstevel@tonic-gate */ 10517c478bd9Sstevel@tonic-gate for (h = 0; h < hat->hat_num_hash; ++h) { 10527c478bd9Sstevel@tonic-gate while ((ht = hat->hat_ht_hash[h]) != NULL) { 10537c478bd9Sstevel@tonic-gate if (ht->ht_next) 10547c478bd9Sstevel@tonic-gate ht->ht_next->ht_prev = ht->ht_prev; 10557c478bd9Sstevel@tonic-gate 10567c478bd9Sstevel@tonic-gate if (ht->ht_prev) { 10577c478bd9Sstevel@tonic-gate ht->ht_prev->ht_next = ht->ht_next; 10587c478bd9Sstevel@tonic-gate } else { 10597c478bd9Sstevel@tonic-gate ASSERT(hat->hat_ht_hash[h] == ht); 10607c478bd9Sstevel@tonic-gate hat->hat_ht_hash[h] = ht->ht_next; 10617c478bd9Sstevel@tonic-gate } 10627c478bd9Sstevel@tonic-gate htable_free(ht); 10637c478bd9Sstevel@tonic-gate } 10647c478bd9Sstevel@tonic-gate } 10657c478bd9Sstevel@tonic-gate } 10667c478bd9Sstevel@tonic-gate 10677c478bd9Sstevel@tonic-gate /* 10687c478bd9Sstevel@tonic-gate * Unlink an entry for a table at vaddr and level out of the existing table 10697c478bd9Sstevel@tonic-gate * one level higher. We are always holding the HASH_ENTER() when doing this. 10707c478bd9Sstevel@tonic-gate */ 10717c478bd9Sstevel@tonic-gate static void 10727c478bd9Sstevel@tonic-gate unlink_ptp(htable_t *higher, htable_t *old, uintptr_t vaddr) 10737c478bd9Sstevel@tonic-gate { 10747c478bd9Sstevel@tonic-gate uint_t entry = htable_va2entry(vaddr, higher); 10757c478bd9Sstevel@tonic-gate x86pte_t expect = MAKEPTP(old->ht_pfn, old->ht_level); 10767c478bd9Sstevel@tonic-gate x86pte_t found; 1077935f8dd0Sjosephb hat_t *hat = old->ht_hat; 10787c478bd9Sstevel@tonic-gate 10797c478bd9Sstevel@tonic-gate ASSERT(higher->ht_busy > 0); 10807c478bd9Sstevel@tonic-gate ASSERT(higher->ht_valid_cnt > 0); 10817c478bd9Sstevel@tonic-gate ASSERT(old->ht_valid_cnt == 0); 10827c478bd9Sstevel@tonic-gate found = x86pte_cas(higher, entry, expect, 0); 1083843e1988Sjohnlev #ifdef __xpv 1084843e1988Sjohnlev /* 1085843e1988Sjohnlev * This is weird, but Xen apparently automatically unlinks empty 1086843e1988Sjohnlev * pagetables from the upper page table. So allow PTP to be 0 already. 1087843e1988Sjohnlev */ 1088843e1988Sjohnlev if (found != expect && found != 0) 1089843e1988Sjohnlev #else 10907c478bd9Sstevel@tonic-gate if (found != expect) 1091843e1988Sjohnlev #endif 10927c478bd9Sstevel@tonic-gate panic("Bad PTP found=" FMT_PTE ", expected=" FMT_PTE, 10937c478bd9Sstevel@tonic-gate found, expect); 1094935f8dd0Sjosephb 1095935f8dd0Sjosephb /* 10967173d045Sjosephb * When a top level VLP page table entry changes, we must issue 10977173d045Sjosephb * a reload of cr3 on all processors. 10987173d045Sjosephb * 10997173d045Sjosephb * If we don't need do do that, then we still have to INVLPG against 11007173d045Sjosephb * an address covered by the inner page table, as the latest processors 11017173d045Sjosephb * have TLB-like caches for non-leaf page table entries. 1102935f8dd0Sjosephb */ 1103935f8dd0Sjosephb if (!(hat->hat_flags & HAT_FREEING)) { 11047173d045Sjosephb hat_tlb_inval(hat, (higher->ht_flags & HTABLE_VLP) ? 11057173d045Sjosephb DEMAP_ALL_ADDR : old->ht_vaddr); 1106935f8dd0Sjosephb } 1107935f8dd0Sjosephb 11087c478bd9Sstevel@tonic-gate HTABLE_DEC(higher->ht_valid_cnt); 11097c478bd9Sstevel@tonic-gate } 11107c478bd9Sstevel@tonic-gate 11117c478bd9Sstevel@tonic-gate /* 11127c478bd9Sstevel@tonic-gate * Link an entry for a new table at vaddr and level into the existing table 11137c478bd9Sstevel@tonic-gate * one level higher. We are always holding the HASH_ENTER() when doing this. 11147c478bd9Sstevel@tonic-gate */ 11157c478bd9Sstevel@tonic-gate static void 11167c478bd9Sstevel@tonic-gate link_ptp(htable_t *higher, htable_t *new, uintptr_t vaddr) 11177c478bd9Sstevel@tonic-gate { 11187c478bd9Sstevel@tonic-gate uint_t entry = htable_va2entry(vaddr, higher); 11197c478bd9Sstevel@tonic-gate x86pte_t newptp = MAKEPTP(new->ht_pfn, new->ht_level); 11207c478bd9Sstevel@tonic-gate x86pte_t found; 11217c478bd9Sstevel@tonic-gate 11227c478bd9Sstevel@tonic-gate ASSERT(higher->ht_busy > 0); 11237c478bd9Sstevel@tonic-gate 11247c478bd9Sstevel@tonic-gate ASSERT(new->ht_level != mmu.max_level); 11257c478bd9Sstevel@tonic-gate 11267c478bd9Sstevel@tonic-gate HTABLE_INC(higher->ht_valid_cnt); 11277c478bd9Sstevel@tonic-gate 11287c478bd9Sstevel@tonic-gate found = x86pte_cas(higher, entry, 0, newptp); 1129b4b46911Skchow if ((found & ~PT_REF) != 0) 11307c478bd9Sstevel@tonic-gate panic("HAT: ptp not 0, found=" FMT_PTE, found); 1131935f8dd0Sjosephb 1132935f8dd0Sjosephb /* 1133935f8dd0Sjosephb * When any top level VLP page table entry changes, we must issue 1134935f8dd0Sjosephb * a reload of cr3 on all processors using it. 11356b60931cSjosephb * We also need to do this for the kernel hat on PAE 32 bit kernel. 1136935f8dd0Sjosephb */ 11376b60931cSjosephb if ( 11386b60931cSjosephb #ifdef __i386 11396b60931cSjosephb (higher->ht_hat == kas.a_hat && higher->ht_level == VLP_LEVEL) || 11406b60931cSjosephb #endif 11416b60931cSjosephb (higher->ht_flags & HTABLE_VLP)) 1142935f8dd0Sjosephb hat_tlb_inval(higher->ht_hat, DEMAP_ALL_ADDR); 11437c478bd9Sstevel@tonic-gate } 11447c478bd9Sstevel@tonic-gate 11457c478bd9Sstevel@tonic-gate /* 1146ae115bc7Smrj * Release of hold on an htable. If this is the last use and the pagetable 1147ae115bc7Smrj * is empty we may want to free it, then recursively look at the pagetable 1148ae115bc7Smrj * above it. The recursion is handled by the outer while() loop. 1149843e1988Sjohnlev * 1150843e1988Sjohnlev * On the metal, during process exit, we don't bother unlinking the tables from 1151843e1988Sjohnlev * upper level pagetables. They are instead handled in bulk by hat_free_end(). 1152843e1988Sjohnlev * We can't do this on the hypervisor as we need the page table to be 1153843e1988Sjohnlev * implicitly unpinnned before it goes to the free page lists. This can't 1154843e1988Sjohnlev * happen unless we fully unlink it from the page table hierarchy. 11557c478bd9Sstevel@tonic-gate */ 11567c478bd9Sstevel@tonic-gate void 11577c478bd9Sstevel@tonic-gate htable_release(htable_t *ht) 11587c478bd9Sstevel@tonic-gate { 11597c478bd9Sstevel@tonic-gate uint_t hashval; 11607c478bd9Sstevel@tonic-gate htable_t *shared; 11617c478bd9Sstevel@tonic-gate htable_t *higher; 11627c478bd9Sstevel@tonic-gate hat_t *hat; 11637c478bd9Sstevel@tonic-gate uintptr_t va; 11647c478bd9Sstevel@tonic-gate level_t level; 11657c478bd9Sstevel@tonic-gate 11667c478bd9Sstevel@tonic-gate while (ht != NULL) { 11677c478bd9Sstevel@tonic-gate shared = NULL; 11687c478bd9Sstevel@tonic-gate for (;;) { 11697c478bd9Sstevel@tonic-gate hat = ht->ht_hat; 11707c478bd9Sstevel@tonic-gate va = ht->ht_vaddr; 11717c478bd9Sstevel@tonic-gate level = ht->ht_level; 11727c478bd9Sstevel@tonic-gate hashval = HTABLE_HASH(hat, va, level); 11737c478bd9Sstevel@tonic-gate 11747c478bd9Sstevel@tonic-gate /* 11757c478bd9Sstevel@tonic-gate * The common case is that this isn't the last use of 11767c478bd9Sstevel@tonic-gate * an htable so we don't want to free the htable. 11777c478bd9Sstevel@tonic-gate */ 11787c478bd9Sstevel@tonic-gate HTABLE_ENTER(hashval); 11797c478bd9Sstevel@tonic-gate ASSERT(ht->ht_valid_cnt >= 0); 11807c478bd9Sstevel@tonic-gate ASSERT(ht->ht_busy > 0); 11817c478bd9Sstevel@tonic-gate if (ht->ht_valid_cnt > 0) 11827c478bd9Sstevel@tonic-gate break; 11837c478bd9Sstevel@tonic-gate if (ht->ht_busy > 1) 11847c478bd9Sstevel@tonic-gate break; 11852ba723d8Smec ASSERT(ht->ht_lock_cnt == 0); 11867c478bd9Sstevel@tonic-gate 1187843e1988Sjohnlev #if !defined(__xpv) 11887c478bd9Sstevel@tonic-gate /* 11897c478bd9Sstevel@tonic-gate * we always release empty shared htables 11907c478bd9Sstevel@tonic-gate */ 11917c478bd9Sstevel@tonic-gate if (!(ht->ht_flags & HTABLE_SHARED_PFN)) { 11927c478bd9Sstevel@tonic-gate 11937c478bd9Sstevel@tonic-gate /* 11947c478bd9Sstevel@tonic-gate * don't release if in address space tear down 11957c478bd9Sstevel@tonic-gate */ 11967c478bd9Sstevel@tonic-gate if (hat->hat_flags & HAT_FREEING) 11977c478bd9Sstevel@tonic-gate break; 11987c478bd9Sstevel@tonic-gate 11997c478bd9Sstevel@tonic-gate /* 12007c478bd9Sstevel@tonic-gate * At and above max_page_level, free if it's for 12017c478bd9Sstevel@tonic-gate * a boot-time kernel mapping below kernelbase. 12027c478bd9Sstevel@tonic-gate */ 12037c478bd9Sstevel@tonic-gate if (level >= mmu.max_page_level && 12047c478bd9Sstevel@tonic-gate (hat != kas.a_hat || va >= kernelbase)) 12057c478bd9Sstevel@tonic-gate break; 12067c478bd9Sstevel@tonic-gate } 1207843e1988Sjohnlev #endif /* __xpv */ 12087c478bd9Sstevel@tonic-gate 12097c478bd9Sstevel@tonic-gate /* 1210ae115bc7Smrj * Remember if we destroy an htable that shares its PFN 1211ae115bc7Smrj * from elsewhere. 12127c478bd9Sstevel@tonic-gate */ 12137c478bd9Sstevel@tonic-gate if (ht->ht_flags & HTABLE_SHARED_PFN) { 12147c478bd9Sstevel@tonic-gate ASSERT(shared == NULL); 12157c478bd9Sstevel@tonic-gate shared = ht->ht_shares; 12167c478bd9Sstevel@tonic-gate HATSTAT_INC(hs_htable_unshared); 12177c478bd9Sstevel@tonic-gate } 12187c478bd9Sstevel@tonic-gate 12197c478bd9Sstevel@tonic-gate /* 12207c478bd9Sstevel@tonic-gate * Handle release of a table and freeing the htable_t. 12217c478bd9Sstevel@tonic-gate * Unlink it from the table higher (ie. ht_parent). 12227c478bd9Sstevel@tonic-gate */ 12237c478bd9Sstevel@tonic-gate higher = ht->ht_parent; 12247c478bd9Sstevel@tonic-gate ASSERT(higher != NULL); 12257c478bd9Sstevel@tonic-gate 12267c478bd9Sstevel@tonic-gate /* 12277c478bd9Sstevel@tonic-gate * Unlink the pagetable. 12287c478bd9Sstevel@tonic-gate */ 12297c478bd9Sstevel@tonic-gate unlink_ptp(higher, ht, va); 12307c478bd9Sstevel@tonic-gate 12317c478bd9Sstevel@tonic-gate /* 12327c478bd9Sstevel@tonic-gate * remove this htable from its hash list 12337c478bd9Sstevel@tonic-gate */ 12347c478bd9Sstevel@tonic-gate if (ht->ht_next) 12357c478bd9Sstevel@tonic-gate ht->ht_next->ht_prev = ht->ht_prev; 12367c478bd9Sstevel@tonic-gate 12377c478bd9Sstevel@tonic-gate if (ht->ht_prev) { 12387c478bd9Sstevel@tonic-gate ht->ht_prev->ht_next = ht->ht_next; 12397c478bd9Sstevel@tonic-gate } else { 12407c478bd9Sstevel@tonic-gate ASSERT(hat->hat_ht_hash[hashval] == ht); 12417c478bd9Sstevel@tonic-gate hat->hat_ht_hash[hashval] = ht->ht_next; 12427c478bd9Sstevel@tonic-gate } 12437c478bd9Sstevel@tonic-gate HTABLE_EXIT(hashval); 12447c478bd9Sstevel@tonic-gate htable_free(ht); 12457c478bd9Sstevel@tonic-gate ht = higher; 12467c478bd9Sstevel@tonic-gate } 12477c478bd9Sstevel@tonic-gate 12487c478bd9Sstevel@tonic-gate ASSERT(ht->ht_busy >= 1); 12497c478bd9Sstevel@tonic-gate --ht->ht_busy; 12507c478bd9Sstevel@tonic-gate HTABLE_EXIT(hashval); 12517c478bd9Sstevel@tonic-gate 12527c478bd9Sstevel@tonic-gate /* 12537c478bd9Sstevel@tonic-gate * If we released a shared htable, do a release on the htable 12547c478bd9Sstevel@tonic-gate * from which it shared 12557c478bd9Sstevel@tonic-gate */ 12567c478bd9Sstevel@tonic-gate ht = shared; 12577c478bd9Sstevel@tonic-gate } 12587c478bd9Sstevel@tonic-gate } 12597c478bd9Sstevel@tonic-gate 12607c478bd9Sstevel@tonic-gate /* 12617c478bd9Sstevel@tonic-gate * Find the htable for the pagetable at the given level for the given address. 12627c478bd9Sstevel@tonic-gate * If found acquires a hold that eventually needs to be htable_release()d 12637c478bd9Sstevel@tonic-gate */ 12647c478bd9Sstevel@tonic-gate htable_t * 12657c478bd9Sstevel@tonic-gate htable_lookup(hat_t *hat, uintptr_t vaddr, level_t level) 12667c478bd9Sstevel@tonic-gate { 12677c478bd9Sstevel@tonic-gate uintptr_t base; 12687c478bd9Sstevel@tonic-gate uint_t hashval; 12697c478bd9Sstevel@tonic-gate htable_t *ht = NULL; 12707c478bd9Sstevel@tonic-gate 12717c478bd9Sstevel@tonic-gate ASSERT(level >= 0); 12727c478bd9Sstevel@tonic-gate ASSERT(level <= TOP_LEVEL(hat)); 12737c478bd9Sstevel@tonic-gate 12747173d045Sjosephb if (level == TOP_LEVEL(hat)) { 12757173d045Sjosephb #if defined(__amd64) 12767173d045Sjosephb /* 12777173d045Sjosephb * 32 bit address spaces on 64 bit kernels need to check 12787173d045Sjosephb * for overflow of the 32 bit address space 12797173d045Sjosephb */ 12807173d045Sjosephb if ((hat->hat_flags & HAT_VLP) && vaddr >= ((uint64_t)1 << 32)) 12817173d045Sjosephb return (NULL); 12827173d045Sjosephb #endif 12837c478bd9Sstevel@tonic-gate base = 0; 12847173d045Sjosephb } else { 12857c478bd9Sstevel@tonic-gate base = vaddr & LEVEL_MASK(level + 1); 12867173d045Sjosephb } 12877c478bd9Sstevel@tonic-gate 12887c478bd9Sstevel@tonic-gate hashval = HTABLE_HASH(hat, base, level); 12897c478bd9Sstevel@tonic-gate HTABLE_ENTER(hashval); 12907c478bd9Sstevel@tonic-gate for (ht = hat->hat_ht_hash[hashval]; ht; ht = ht->ht_next) { 12917c478bd9Sstevel@tonic-gate if (ht->ht_hat == hat && 12927c478bd9Sstevel@tonic-gate ht->ht_vaddr == base && 12937c478bd9Sstevel@tonic-gate ht->ht_level == level) 12947c478bd9Sstevel@tonic-gate break; 12957c478bd9Sstevel@tonic-gate } 12967c478bd9Sstevel@tonic-gate if (ht) 12977c478bd9Sstevel@tonic-gate ++ht->ht_busy; 12987c478bd9Sstevel@tonic-gate 12997c478bd9Sstevel@tonic-gate HTABLE_EXIT(hashval); 13007c478bd9Sstevel@tonic-gate return (ht); 13017c478bd9Sstevel@tonic-gate } 13027c478bd9Sstevel@tonic-gate 13037c478bd9Sstevel@tonic-gate /* 13047c478bd9Sstevel@tonic-gate * Acquires a hold on a known htable (from a locked hment entry). 13057c478bd9Sstevel@tonic-gate */ 13067c478bd9Sstevel@tonic-gate void 13077c478bd9Sstevel@tonic-gate htable_acquire(htable_t *ht) 13087c478bd9Sstevel@tonic-gate { 13097c478bd9Sstevel@tonic-gate hat_t *hat = ht->ht_hat; 13107c478bd9Sstevel@tonic-gate level_t level = ht->ht_level; 13117c478bd9Sstevel@tonic-gate uintptr_t base = ht->ht_vaddr; 13127c478bd9Sstevel@tonic-gate uint_t hashval = HTABLE_HASH(hat, base, level); 13137c478bd9Sstevel@tonic-gate 13147c478bd9Sstevel@tonic-gate HTABLE_ENTER(hashval); 13157c478bd9Sstevel@tonic-gate #ifdef DEBUG 13167c478bd9Sstevel@tonic-gate /* 13177c478bd9Sstevel@tonic-gate * make sure the htable is there 13187c478bd9Sstevel@tonic-gate */ 13197c478bd9Sstevel@tonic-gate { 13207c478bd9Sstevel@tonic-gate htable_t *h; 13217c478bd9Sstevel@tonic-gate 13227c478bd9Sstevel@tonic-gate for (h = hat->hat_ht_hash[hashval]; 13237c478bd9Sstevel@tonic-gate h && h != ht; 13247c478bd9Sstevel@tonic-gate h = h->ht_next) 13257c478bd9Sstevel@tonic-gate ; 13267c478bd9Sstevel@tonic-gate ASSERT(h == ht); 13277c478bd9Sstevel@tonic-gate } 13287c478bd9Sstevel@tonic-gate #endif /* DEBUG */ 13297c478bd9Sstevel@tonic-gate ++ht->ht_busy; 13307c478bd9Sstevel@tonic-gate HTABLE_EXIT(hashval); 13317c478bd9Sstevel@tonic-gate } 13327c478bd9Sstevel@tonic-gate 13337c478bd9Sstevel@tonic-gate /* 13347c478bd9Sstevel@tonic-gate * Find the htable for the pagetable at the given level for the given address. 13357c478bd9Sstevel@tonic-gate * If found acquires a hold that eventually needs to be htable_release()d 13367c478bd9Sstevel@tonic-gate * If not found the table is created. 13377c478bd9Sstevel@tonic-gate * 13387c478bd9Sstevel@tonic-gate * Since we can't hold a hash table mutex during allocation, we have to 13397c478bd9Sstevel@tonic-gate * drop it and redo the search on a create. Then we may have to free the newly 13407c478bd9Sstevel@tonic-gate * allocated htable if another thread raced in and created it ahead of us. 13417c478bd9Sstevel@tonic-gate */ 13427c478bd9Sstevel@tonic-gate htable_t * 13437c478bd9Sstevel@tonic-gate htable_create( 13447c478bd9Sstevel@tonic-gate hat_t *hat, 13457c478bd9Sstevel@tonic-gate uintptr_t vaddr, 13467c478bd9Sstevel@tonic-gate level_t level, 13477c478bd9Sstevel@tonic-gate htable_t *shared) 13487c478bd9Sstevel@tonic-gate { 13497c478bd9Sstevel@tonic-gate uint_t h; 13507c478bd9Sstevel@tonic-gate level_t l; 13517c478bd9Sstevel@tonic-gate uintptr_t base; 13527c478bd9Sstevel@tonic-gate htable_t *ht; 13537c478bd9Sstevel@tonic-gate htable_t *higher = NULL; 13547c478bd9Sstevel@tonic-gate htable_t *new = NULL; 13557c478bd9Sstevel@tonic-gate 13567c478bd9Sstevel@tonic-gate if (level < 0 || level > TOP_LEVEL(hat)) 13577c478bd9Sstevel@tonic-gate panic("htable_create(): level %d out of range\n", level); 13587c478bd9Sstevel@tonic-gate 13597c478bd9Sstevel@tonic-gate /* 13607c478bd9Sstevel@tonic-gate * Create the page tables in top down order. 13617c478bd9Sstevel@tonic-gate */ 13627c478bd9Sstevel@tonic-gate for (l = TOP_LEVEL(hat); l >= level; --l) { 13637c478bd9Sstevel@tonic-gate new = NULL; 13647c478bd9Sstevel@tonic-gate if (l == TOP_LEVEL(hat)) 13657c478bd9Sstevel@tonic-gate base = 0; 13667c478bd9Sstevel@tonic-gate else 13677c478bd9Sstevel@tonic-gate base = vaddr & LEVEL_MASK(l + 1); 13687c478bd9Sstevel@tonic-gate 13697c478bd9Sstevel@tonic-gate h = HTABLE_HASH(hat, base, l); 13707c478bd9Sstevel@tonic-gate try_again: 13717c478bd9Sstevel@tonic-gate /* 13727c478bd9Sstevel@tonic-gate * look up the htable at this level 13737c478bd9Sstevel@tonic-gate */ 13747c478bd9Sstevel@tonic-gate HTABLE_ENTER(h); 13757c478bd9Sstevel@tonic-gate if (l == TOP_LEVEL(hat)) { 13767c478bd9Sstevel@tonic-gate ht = hat->hat_htable; 13777c478bd9Sstevel@tonic-gate } else { 13787c478bd9Sstevel@tonic-gate for (ht = hat->hat_ht_hash[h]; ht; ht = ht->ht_next) { 13797c478bd9Sstevel@tonic-gate ASSERT(ht->ht_hat == hat); 13807c478bd9Sstevel@tonic-gate if (ht->ht_vaddr == base && 13817c478bd9Sstevel@tonic-gate ht->ht_level == l) 13827c478bd9Sstevel@tonic-gate break; 13837c478bd9Sstevel@tonic-gate } 13847c478bd9Sstevel@tonic-gate } 13857c478bd9Sstevel@tonic-gate 13867c478bd9Sstevel@tonic-gate /* 13877c478bd9Sstevel@tonic-gate * if we found the htable, increment its busy cnt 13887c478bd9Sstevel@tonic-gate * and if we had allocated a new htable, free it. 13897c478bd9Sstevel@tonic-gate */ 13907c478bd9Sstevel@tonic-gate if (ht != NULL) { 13917c478bd9Sstevel@tonic-gate /* 13927c478bd9Sstevel@tonic-gate * If we find a pre-existing shared table, it must 13937c478bd9Sstevel@tonic-gate * share from the same place. 13947c478bd9Sstevel@tonic-gate */ 13957c478bd9Sstevel@tonic-gate if (l == level && shared && ht->ht_shares && 13967c478bd9Sstevel@tonic-gate ht->ht_shares != shared) { 13977c478bd9Sstevel@tonic-gate panic("htable shared from wrong place " 1398903a11ebSrh87107 "found htable=%p shared=%p", 1399903a11ebSrh87107 (void *)ht, (void *)shared); 14007c478bd9Sstevel@tonic-gate } 14017c478bd9Sstevel@tonic-gate ++ht->ht_busy; 14027c478bd9Sstevel@tonic-gate HTABLE_EXIT(h); 14037c478bd9Sstevel@tonic-gate if (new) 14047c478bd9Sstevel@tonic-gate htable_free(new); 14057c478bd9Sstevel@tonic-gate if (higher != NULL) 14067c478bd9Sstevel@tonic-gate htable_release(higher); 14077c478bd9Sstevel@tonic-gate higher = ht; 14087c478bd9Sstevel@tonic-gate 14097c478bd9Sstevel@tonic-gate /* 14107c478bd9Sstevel@tonic-gate * if we didn't find it on the first search 14117c478bd9Sstevel@tonic-gate * allocate a new one and search again 14127c478bd9Sstevel@tonic-gate */ 14137c478bd9Sstevel@tonic-gate } else if (new == NULL) { 14147c478bd9Sstevel@tonic-gate HTABLE_EXIT(h); 14157c478bd9Sstevel@tonic-gate new = htable_alloc(hat, base, l, 14167c478bd9Sstevel@tonic-gate l == level ? shared : NULL); 14177c478bd9Sstevel@tonic-gate goto try_again; 14187c478bd9Sstevel@tonic-gate 14197c478bd9Sstevel@tonic-gate /* 14207c478bd9Sstevel@tonic-gate * 2nd search and still not there, use "new" table 14217c478bd9Sstevel@tonic-gate * Link new table into higher, when not at top level. 14227c478bd9Sstevel@tonic-gate */ 14237c478bd9Sstevel@tonic-gate } else { 14247c478bd9Sstevel@tonic-gate ht = new; 14257c478bd9Sstevel@tonic-gate if (higher != NULL) { 14267c478bd9Sstevel@tonic-gate link_ptp(higher, ht, base); 14277c478bd9Sstevel@tonic-gate ht->ht_parent = higher; 14287c478bd9Sstevel@tonic-gate } 14297c478bd9Sstevel@tonic-gate ht->ht_next = hat->hat_ht_hash[h]; 14307c478bd9Sstevel@tonic-gate ASSERT(ht->ht_prev == NULL); 14317c478bd9Sstevel@tonic-gate if (hat->hat_ht_hash[h]) 14327c478bd9Sstevel@tonic-gate hat->hat_ht_hash[h]->ht_prev = ht; 14337c478bd9Sstevel@tonic-gate hat->hat_ht_hash[h] = ht; 14347c478bd9Sstevel@tonic-gate HTABLE_EXIT(h); 14357c478bd9Sstevel@tonic-gate 14367c478bd9Sstevel@tonic-gate /* 14377c478bd9Sstevel@tonic-gate * Note we don't do htable_release(higher). 14387c478bd9Sstevel@tonic-gate * That happens recursively when "new" is removed by 14397c478bd9Sstevel@tonic-gate * htable_release() or htable_steal(). 14407c478bd9Sstevel@tonic-gate */ 14417c478bd9Sstevel@tonic-gate higher = ht; 14427c478bd9Sstevel@tonic-gate 14437c478bd9Sstevel@tonic-gate /* 14447c478bd9Sstevel@tonic-gate * If we just created a new shared page table we 14457c478bd9Sstevel@tonic-gate * increment the shared htable's busy count, so that 14467c478bd9Sstevel@tonic-gate * it can't be the victim of a steal even if it's empty. 14477c478bd9Sstevel@tonic-gate */ 14487c478bd9Sstevel@tonic-gate if (l == level && shared) { 14497c478bd9Sstevel@tonic-gate (void) htable_lookup(shared->ht_hat, 14507c478bd9Sstevel@tonic-gate shared->ht_vaddr, shared->ht_level); 14517c478bd9Sstevel@tonic-gate HATSTAT_INC(hs_htable_shared); 14527c478bd9Sstevel@tonic-gate } 14537c478bd9Sstevel@tonic-gate } 14547c478bd9Sstevel@tonic-gate } 14557c478bd9Sstevel@tonic-gate 14567c478bd9Sstevel@tonic-gate return (ht); 14577c478bd9Sstevel@tonic-gate } 14587c478bd9Sstevel@tonic-gate 14597c478bd9Sstevel@tonic-gate /* 1460843e1988Sjohnlev * Inherit initial pagetables from the boot program. On the 64-bit 1461843e1988Sjohnlev * hypervisor we also temporarily mark the p_index field of page table 1462843e1988Sjohnlev * pages, so we know not to try making them writable in seg_kpm. 1463ae115bc7Smrj */ 1464ae115bc7Smrj void 1465ae115bc7Smrj htable_attach( 1466ae115bc7Smrj hat_t *hat, 1467ae115bc7Smrj uintptr_t base, 1468ae115bc7Smrj level_t level, 1469ae115bc7Smrj htable_t *parent, 1470ae115bc7Smrj pfn_t pfn) 1471ae115bc7Smrj { 1472ae115bc7Smrj htable_t *ht; 1473ae115bc7Smrj uint_t h; 1474ae115bc7Smrj uint_t i; 1475ae115bc7Smrj x86pte_t pte; 1476ae115bc7Smrj x86pte_t *ptep; 1477ae115bc7Smrj page_t *pp; 1478ae115bc7Smrj extern page_t *boot_claim_page(pfn_t); 1479ae115bc7Smrj 1480ae115bc7Smrj ht = htable_get_reserve(); 1481ae115bc7Smrj if (level == mmu.max_level) 1482ae115bc7Smrj kas.a_hat->hat_htable = ht; 1483ae115bc7Smrj ht->ht_hat = hat; 1484ae115bc7Smrj ht->ht_parent = parent; 1485ae115bc7Smrj ht->ht_vaddr = base; 1486ae115bc7Smrj ht->ht_level = level; 1487ae115bc7Smrj ht->ht_busy = 1; 1488ae115bc7Smrj ht->ht_next = NULL; 1489ae115bc7Smrj ht->ht_prev = NULL; 1490ae115bc7Smrj ht->ht_flags = 0; 1491ae115bc7Smrj ht->ht_pfn = pfn; 1492ae115bc7Smrj ht->ht_lock_cnt = 0; 1493ae115bc7Smrj ht->ht_valid_cnt = 0; 1494ae115bc7Smrj if (parent != NULL) 1495ae115bc7Smrj ++parent->ht_busy; 1496ae115bc7Smrj 1497ae115bc7Smrj h = HTABLE_HASH(hat, base, level); 1498ae115bc7Smrj HTABLE_ENTER(h); 1499ae115bc7Smrj ht->ht_next = hat->hat_ht_hash[h]; 1500ae115bc7Smrj ASSERT(ht->ht_prev == NULL); 1501ae115bc7Smrj if (hat->hat_ht_hash[h]) 1502ae115bc7Smrj hat->hat_ht_hash[h]->ht_prev = ht; 1503ae115bc7Smrj hat->hat_ht_hash[h] = ht; 1504ae115bc7Smrj HTABLE_EXIT(h); 1505ae115bc7Smrj 1506ae115bc7Smrj /* 1507ae115bc7Smrj * make sure the page table physical page is not FREE 1508ae115bc7Smrj */ 1509ae115bc7Smrj if (page_resv(1, KM_NOSLEEP) == 0) 1510ae115bc7Smrj panic("page_resv() failed in ptable alloc"); 1511ae115bc7Smrj 1512ae115bc7Smrj pp = boot_claim_page(pfn); 1513ae115bc7Smrj ASSERT(pp != NULL); 15142d44e974SJoe Bonasera 15152d44e974SJoe Bonasera /* 15162d44e974SJoe Bonasera * Page table pages that were allocated by dboot or 15172d44e974SJoe Bonasera * in very early startup didn't go through boot_mapin() 15182d44e974SJoe Bonasera * and so won't have vnode/offsets. Fix that here. 15192d44e974SJoe Bonasera */ 15202d44e974SJoe Bonasera if (pp->p_vnode == NULL) { 15212d44e974SJoe Bonasera /* match offset calculation in page_get_physical() */ 15222d44e974SJoe Bonasera u_offset_t offset = (uintptr_t)ht; 15232d44e974SJoe Bonasera if (offset > kernelbase) 15242d44e974SJoe Bonasera offset -= kernelbase; 15252d44e974SJoe Bonasera offset <<= MMU_PAGESHIFT; 15262d44e974SJoe Bonasera #if defined(__amd64) 15272d44e974SJoe Bonasera offset += mmu.hole_start; /* something in VA hole */ 15282d44e974SJoe Bonasera #else 15292d44e974SJoe Bonasera offset += 1ULL << 40; /* something > 4 Gig */ 15302d44e974SJoe Bonasera #endif 15312d44e974SJoe Bonasera ASSERT(page_exists(&kvp, offset) == NULL); 15322d44e974SJoe Bonasera (void) page_hashin(pp, &kvp, offset, NULL); 15332d44e974SJoe Bonasera } 1534ae115bc7Smrj page_downgrade(pp); 1535843e1988Sjohnlev #if defined(__xpv) && defined(__amd64) 1536ae115bc7Smrj /* 1537ae115bc7Smrj * Record in the page_t that is a pagetable for segkpm setup. 1538ae115bc7Smrj */ 1539ae115bc7Smrj if (kpm_vbase) 1540ae115bc7Smrj pp->p_index = 1; 1541843e1988Sjohnlev #endif 1542ae115bc7Smrj 1543ae115bc7Smrj /* 1544ae115bc7Smrj * Count valid mappings and recursively attach lower level pagetables. 1545ae115bc7Smrj */ 1546ae115bc7Smrj ptep = kbm_remap_window(pfn_to_pa(pfn), 0); 1547ae115bc7Smrj for (i = 0; i < HTABLE_NUM_PTES(ht); ++i) { 1548ae115bc7Smrj if (mmu.pae_hat) 1549ae115bc7Smrj pte = ptep[i]; 1550ae115bc7Smrj else 1551ae115bc7Smrj pte = ((x86pte32_t *)ptep)[i]; 1552ae115bc7Smrj if (!IN_HYPERVISOR_VA(base) && PTE_ISVALID(pte)) { 1553ae115bc7Smrj ++ht->ht_valid_cnt; 1554ae115bc7Smrj if (!PTE_ISPAGE(pte, level)) { 1555ae115bc7Smrj htable_attach(hat, base, level - 1, 1556ae115bc7Smrj ht, PTE2PFN(pte, level)); 1557ae115bc7Smrj ptep = kbm_remap_window(pfn_to_pa(pfn), 0); 1558ae115bc7Smrj } 1559ae115bc7Smrj } 1560ae115bc7Smrj base += LEVEL_SIZE(level); 1561ae115bc7Smrj if (base == mmu.hole_start) 1562ae115bc7Smrj base = (mmu.hole_end + MMU_PAGEOFFSET) & MMU_PAGEMASK; 1563ae115bc7Smrj } 1564ae115bc7Smrj 1565ae115bc7Smrj /* 1566ae115bc7Smrj * As long as all the mappings we had were below kernel base 1567ae115bc7Smrj * we can release the htable. 1568ae115bc7Smrj */ 1569ae115bc7Smrj if (base < kernelbase) 1570ae115bc7Smrj htable_release(ht); 1571ae115bc7Smrj } 1572ae115bc7Smrj 1573ae115bc7Smrj /* 15747c478bd9Sstevel@tonic-gate * Walk through a given htable looking for the first valid entry. This 15757c478bd9Sstevel@tonic-gate * routine takes both a starting and ending address. The starting address 15767c478bd9Sstevel@tonic-gate * is required to be within the htable provided by the caller, but there is 15777c478bd9Sstevel@tonic-gate * no such restriction on the ending address. 15787c478bd9Sstevel@tonic-gate * 15797c478bd9Sstevel@tonic-gate * If the routine finds a valid entry in the htable (at or beyond the 15807c478bd9Sstevel@tonic-gate * starting address), the PTE (and its address) will be returned. 15817c478bd9Sstevel@tonic-gate * This PTE may correspond to either a page or a pagetable - it is the 15827c478bd9Sstevel@tonic-gate * caller's responsibility to determine which. If no valid entry is 15837c478bd9Sstevel@tonic-gate * found, 0 (and invalid PTE) and the next unexamined address will be 15847c478bd9Sstevel@tonic-gate * returned. 15857c478bd9Sstevel@tonic-gate * 15867c478bd9Sstevel@tonic-gate * The loop has been carefully coded for optimization. 15877c478bd9Sstevel@tonic-gate */ 15887c478bd9Sstevel@tonic-gate static x86pte_t 15897c478bd9Sstevel@tonic-gate htable_scan(htable_t *ht, uintptr_t *vap, uintptr_t eaddr) 15907c478bd9Sstevel@tonic-gate { 15917c478bd9Sstevel@tonic-gate uint_t e; 15927c478bd9Sstevel@tonic-gate x86pte_t found_pte = (x86pte_t)0; 1593ae115bc7Smrj caddr_t pte_ptr; 1594ae115bc7Smrj caddr_t end_pte_ptr; 15957c478bd9Sstevel@tonic-gate int l = ht->ht_level; 15967c478bd9Sstevel@tonic-gate uintptr_t va = *vap & LEVEL_MASK(l); 15977c478bd9Sstevel@tonic-gate size_t pgsize = LEVEL_SIZE(l); 15987c478bd9Sstevel@tonic-gate 15997c478bd9Sstevel@tonic-gate ASSERT(va >= ht->ht_vaddr); 16007c478bd9Sstevel@tonic-gate ASSERT(va <= HTABLE_LAST_PAGE(ht)); 16017c478bd9Sstevel@tonic-gate 16027c478bd9Sstevel@tonic-gate /* 16037c478bd9Sstevel@tonic-gate * Compute the starting index and ending virtual address 16047c478bd9Sstevel@tonic-gate */ 16057c478bd9Sstevel@tonic-gate e = htable_va2entry(va, ht); 16067c478bd9Sstevel@tonic-gate 16077c478bd9Sstevel@tonic-gate /* 16087c478bd9Sstevel@tonic-gate * The following page table scan code knows that the valid 16097c478bd9Sstevel@tonic-gate * bit of a PTE is in the lowest byte AND that x86 is little endian!! 16107c478bd9Sstevel@tonic-gate */ 1611ae115bc7Smrj pte_ptr = (caddr_t)x86pte_access_pagetable(ht, 0); 1612ae115bc7Smrj end_pte_ptr = (caddr_t)PT_INDEX_PTR(pte_ptr, HTABLE_NUM_PTES(ht)); 1613ae115bc7Smrj pte_ptr = (caddr_t)PT_INDEX_PTR((x86pte_t *)pte_ptr, e); 161430f7a194Skchow while (!PTE_ISVALID(*pte_ptr)) { 16157c478bd9Sstevel@tonic-gate va += pgsize; 16167c478bd9Sstevel@tonic-gate if (va >= eaddr) 16177c478bd9Sstevel@tonic-gate break; 16187c478bd9Sstevel@tonic-gate pte_ptr += mmu.pte_size; 16197c478bd9Sstevel@tonic-gate ASSERT(pte_ptr <= end_pte_ptr); 16207c478bd9Sstevel@tonic-gate if (pte_ptr == end_pte_ptr) 16217c478bd9Sstevel@tonic-gate break; 16227c478bd9Sstevel@tonic-gate } 16237c478bd9Sstevel@tonic-gate 16247c478bd9Sstevel@tonic-gate /* 16257c478bd9Sstevel@tonic-gate * if we found a valid PTE, load the entire PTE 16267c478bd9Sstevel@tonic-gate */ 1627ae115bc7Smrj if (va < eaddr && pte_ptr != end_pte_ptr) 1628ae115bc7Smrj found_pte = GET_PTE((x86pte_t *)pte_ptr); 16297c478bd9Sstevel@tonic-gate x86pte_release_pagetable(ht); 16307c478bd9Sstevel@tonic-gate 16317c478bd9Sstevel@tonic-gate #if defined(__amd64) 16327c478bd9Sstevel@tonic-gate /* 16337c478bd9Sstevel@tonic-gate * deal with VA hole on amd64 16347c478bd9Sstevel@tonic-gate */ 16357c478bd9Sstevel@tonic-gate if (l == mmu.max_level && va >= mmu.hole_start && va <= mmu.hole_end) 16367c478bd9Sstevel@tonic-gate va = mmu.hole_end + va - mmu.hole_start; 16377c478bd9Sstevel@tonic-gate #endif /* __amd64 */ 16387c478bd9Sstevel@tonic-gate 16397c478bd9Sstevel@tonic-gate *vap = va; 16407c478bd9Sstevel@tonic-gate return (found_pte); 16417c478bd9Sstevel@tonic-gate } 16427c478bd9Sstevel@tonic-gate 16437c478bd9Sstevel@tonic-gate /* 16447c478bd9Sstevel@tonic-gate * Find the address and htable for the first populated translation at or 16457c478bd9Sstevel@tonic-gate * above the given virtual address. The caller may also specify an upper 16467c478bd9Sstevel@tonic-gate * limit to the address range to search. Uses level information to quickly 16477c478bd9Sstevel@tonic-gate * skip unpopulated sections of virtual address spaces. 16487c478bd9Sstevel@tonic-gate * 16497c478bd9Sstevel@tonic-gate * If not found returns NULL. When found, returns the htable and virt addr 16507c478bd9Sstevel@tonic-gate * and has a hold on the htable. 16517c478bd9Sstevel@tonic-gate */ 16527c478bd9Sstevel@tonic-gate x86pte_t 16537c478bd9Sstevel@tonic-gate htable_walk( 16547c478bd9Sstevel@tonic-gate struct hat *hat, 16557c478bd9Sstevel@tonic-gate htable_t **htp, 16567c478bd9Sstevel@tonic-gate uintptr_t *vaddr, 16577c478bd9Sstevel@tonic-gate uintptr_t eaddr) 16587c478bd9Sstevel@tonic-gate { 16597c478bd9Sstevel@tonic-gate uintptr_t va = *vaddr; 16607c478bd9Sstevel@tonic-gate htable_t *ht; 16617c478bd9Sstevel@tonic-gate htable_t *prev = *htp; 16627c478bd9Sstevel@tonic-gate level_t l; 16637c478bd9Sstevel@tonic-gate level_t max_mapped_level; 16647c478bd9Sstevel@tonic-gate x86pte_t pte; 16657c478bd9Sstevel@tonic-gate 16667c478bd9Sstevel@tonic-gate ASSERT(eaddr > va); 16677c478bd9Sstevel@tonic-gate 16687c478bd9Sstevel@tonic-gate /* 16697c478bd9Sstevel@tonic-gate * If this is a user address, then we know we need not look beyond 16707c478bd9Sstevel@tonic-gate * kernelbase. 16717c478bd9Sstevel@tonic-gate */ 16727c478bd9Sstevel@tonic-gate ASSERT(hat == kas.a_hat || eaddr <= kernelbase || 16737c478bd9Sstevel@tonic-gate eaddr == HTABLE_WALK_TO_END); 16747c478bd9Sstevel@tonic-gate if (hat != kas.a_hat && eaddr == HTABLE_WALK_TO_END) 16757c478bd9Sstevel@tonic-gate eaddr = kernelbase; 16767c478bd9Sstevel@tonic-gate 16777c478bd9Sstevel@tonic-gate /* 16787c478bd9Sstevel@tonic-gate * If we're coming in with a previous page table, search it first 16797c478bd9Sstevel@tonic-gate * without doing an htable_lookup(), this should be frequent. 16807c478bd9Sstevel@tonic-gate */ 16817c478bd9Sstevel@tonic-gate if (prev) { 16827c478bd9Sstevel@tonic-gate ASSERT(prev->ht_busy > 0); 16837c478bd9Sstevel@tonic-gate ASSERT(prev->ht_vaddr <= va); 16847c478bd9Sstevel@tonic-gate l = prev->ht_level; 16857c478bd9Sstevel@tonic-gate if (va <= HTABLE_LAST_PAGE(prev)) { 16867c478bd9Sstevel@tonic-gate pte = htable_scan(prev, &va, eaddr); 16877c478bd9Sstevel@tonic-gate 16887c478bd9Sstevel@tonic-gate if (PTE_ISPAGE(pte, l)) { 16897c478bd9Sstevel@tonic-gate *vaddr = va; 16907c478bd9Sstevel@tonic-gate *htp = prev; 16917c478bd9Sstevel@tonic-gate return (pte); 16927c478bd9Sstevel@tonic-gate } 16937c478bd9Sstevel@tonic-gate } 16947c478bd9Sstevel@tonic-gate 16957c478bd9Sstevel@tonic-gate /* 16967c478bd9Sstevel@tonic-gate * We found nothing in the htable provided by the caller, 16977c478bd9Sstevel@tonic-gate * so fall through and do the full search 16987c478bd9Sstevel@tonic-gate */ 16997c478bd9Sstevel@tonic-gate htable_release(prev); 17007c478bd9Sstevel@tonic-gate } 17017c478bd9Sstevel@tonic-gate 17027c478bd9Sstevel@tonic-gate /* 17037c478bd9Sstevel@tonic-gate * Find the level of the largest pagesize used by this HAT. 17047c478bd9Sstevel@tonic-gate */ 17057173d045Sjosephb if (hat->hat_ism_pgcnt > 0) { 170602bc52beSkchow max_mapped_level = mmu.umax_page_level; 17077173d045Sjosephb } else { 17087c478bd9Sstevel@tonic-gate max_mapped_level = 0; 17097c478bd9Sstevel@tonic-gate for (l = 1; l <= mmu.max_page_level; ++l) 17107c478bd9Sstevel@tonic-gate if (hat->hat_pages_mapped[l] != 0) 17117c478bd9Sstevel@tonic-gate max_mapped_level = l; 17127173d045Sjosephb } 17137c478bd9Sstevel@tonic-gate 17147c478bd9Sstevel@tonic-gate while (va < eaddr && va >= *vaddr) { 17157c478bd9Sstevel@tonic-gate ASSERT(!IN_VA_HOLE(va)); 17167c478bd9Sstevel@tonic-gate 17177c478bd9Sstevel@tonic-gate /* 17187c478bd9Sstevel@tonic-gate * Find lowest table with any entry for given address. 17197c478bd9Sstevel@tonic-gate */ 17207c478bd9Sstevel@tonic-gate for (l = 0; l <= TOP_LEVEL(hat); ++l) { 17217c478bd9Sstevel@tonic-gate ht = htable_lookup(hat, va, l); 17227c478bd9Sstevel@tonic-gate if (ht != NULL) { 17237c478bd9Sstevel@tonic-gate pte = htable_scan(ht, &va, eaddr); 17247c478bd9Sstevel@tonic-gate if (PTE_ISPAGE(pte, l)) { 17257c478bd9Sstevel@tonic-gate *vaddr = va; 17267c478bd9Sstevel@tonic-gate *htp = ht; 17277c478bd9Sstevel@tonic-gate return (pte); 17287c478bd9Sstevel@tonic-gate } 17297c478bd9Sstevel@tonic-gate htable_release(ht); 17307c478bd9Sstevel@tonic-gate break; 17317c478bd9Sstevel@tonic-gate } 17327c478bd9Sstevel@tonic-gate 17337c478bd9Sstevel@tonic-gate /* 17347173d045Sjosephb * No htable at this level for the address. If there 17357173d045Sjosephb * is no larger page size that could cover it, we can 17367173d045Sjosephb * skip right to the start of the next page table. 17378b5842f9Sdm120769 */ 17388b5842f9Sdm120769 ASSERT(l < TOP_LEVEL(hat)); 17398b5842f9Sdm120769 if (l >= max_mapped_level) { 17407c478bd9Sstevel@tonic-gate va = NEXT_ENTRY_VA(va, l + 1); 17417173d045Sjosephb if (va >= eaddr) 17428b5842f9Sdm120769 break; 17438b5842f9Sdm120769 } 17447c478bd9Sstevel@tonic-gate } 17457c478bd9Sstevel@tonic-gate } 17467c478bd9Sstevel@tonic-gate 17477c478bd9Sstevel@tonic-gate *vaddr = 0; 17487c478bd9Sstevel@tonic-gate *htp = NULL; 17497c478bd9Sstevel@tonic-gate return (0); 17507c478bd9Sstevel@tonic-gate } 17517c478bd9Sstevel@tonic-gate 17527c478bd9Sstevel@tonic-gate /* 17537c478bd9Sstevel@tonic-gate * Find the htable and page table entry index of the given virtual address 17547c478bd9Sstevel@tonic-gate * with pagesize at or below given level. 17557c478bd9Sstevel@tonic-gate * If not found returns NULL. When found, returns the htable, sets 17567c478bd9Sstevel@tonic-gate * entry, and has a hold on the htable. 17577c478bd9Sstevel@tonic-gate */ 17587c478bd9Sstevel@tonic-gate htable_t * 17597c478bd9Sstevel@tonic-gate htable_getpte( 17607c478bd9Sstevel@tonic-gate struct hat *hat, 17617c478bd9Sstevel@tonic-gate uintptr_t vaddr, 17627c478bd9Sstevel@tonic-gate uint_t *entry, 17637c478bd9Sstevel@tonic-gate x86pte_t *pte, 17647c478bd9Sstevel@tonic-gate level_t level) 17657c478bd9Sstevel@tonic-gate { 17667c478bd9Sstevel@tonic-gate htable_t *ht; 17677c478bd9Sstevel@tonic-gate level_t l; 17687c478bd9Sstevel@tonic-gate uint_t e; 17697c478bd9Sstevel@tonic-gate 17707c478bd9Sstevel@tonic-gate ASSERT(level <= mmu.max_page_level); 17717c478bd9Sstevel@tonic-gate 17727c478bd9Sstevel@tonic-gate for (l = 0; l <= level; ++l) { 17737c478bd9Sstevel@tonic-gate ht = htable_lookup(hat, vaddr, l); 17747c478bd9Sstevel@tonic-gate if (ht == NULL) 17757c478bd9Sstevel@tonic-gate continue; 17767c478bd9Sstevel@tonic-gate e = htable_va2entry(vaddr, ht); 17777c478bd9Sstevel@tonic-gate if (entry != NULL) 17787c478bd9Sstevel@tonic-gate *entry = e; 17797c478bd9Sstevel@tonic-gate if (pte != NULL) 17807c478bd9Sstevel@tonic-gate *pte = x86pte_get(ht, e); 17817c478bd9Sstevel@tonic-gate return (ht); 17827c478bd9Sstevel@tonic-gate } 17837c478bd9Sstevel@tonic-gate return (NULL); 17847c478bd9Sstevel@tonic-gate } 17857c478bd9Sstevel@tonic-gate 17867c478bd9Sstevel@tonic-gate /* 17877c478bd9Sstevel@tonic-gate * Find the htable and page table entry index of the given virtual address. 17887c478bd9Sstevel@tonic-gate * There must be a valid page mapped at the given address. 17897c478bd9Sstevel@tonic-gate * If not found returns NULL. When found, returns the htable, sets 17907c478bd9Sstevel@tonic-gate * entry, and has a hold on the htable. 17917c478bd9Sstevel@tonic-gate */ 17927c478bd9Sstevel@tonic-gate htable_t * 17937c478bd9Sstevel@tonic-gate htable_getpage(struct hat *hat, uintptr_t vaddr, uint_t *entry) 17947c478bd9Sstevel@tonic-gate { 17957c478bd9Sstevel@tonic-gate htable_t *ht; 17967c478bd9Sstevel@tonic-gate uint_t e; 17977c478bd9Sstevel@tonic-gate x86pte_t pte; 17987c478bd9Sstevel@tonic-gate 17997c478bd9Sstevel@tonic-gate ht = htable_getpte(hat, vaddr, &e, &pte, mmu.max_page_level); 18007c478bd9Sstevel@tonic-gate if (ht == NULL) 18017c478bd9Sstevel@tonic-gate return (NULL); 18027c478bd9Sstevel@tonic-gate 18037c478bd9Sstevel@tonic-gate if (entry) 18047c478bd9Sstevel@tonic-gate *entry = e; 18057c478bd9Sstevel@tonic-gate 18067c478bd9Sstevel@tonic-gate if (PTE_ISPAGE(pte, ht->ht_level)) 18077c478bd9Sstevel@tonic-gate return (ht); 18087c478bd9Sstevel@tonic-gate htable_release(ht); 18097c478bd9Sstevel@tonic-gate return (NULL); 18107c478bd9Sstevel@tonic-gate } 18117c478bd9Sstevel@tonic-gate 18127c478bd9Sstevel@tonic-gate 18137c478bd9Sstevel@tonic-gate void 18147c478bd9Sstevel@tonic-gate htable_init() 18157c478bd9Sstevel@tonic-gate { 18167c478bd9Sstevel@tonic-gate /* 18177c478bd9Sstevel@tonic-gate * To save on kernel VA usage, we avoid debug information in 32 bit 18187c478bd9Sstevel@tonic-gate * kernels. 18197c478bd9Sstevel@tonic-gate */ 18207c478bd9Sstevel@tonic-gate #if defined(__amd64) 18217c478bd9Sstevel@tonic-gate int kmem_flags = KMC_NOHASH; 18227c478bd9Sstevel@tonic-gate #elif defined(__i386) 18237c478bd9Sstevel@tonic-gate int kmem_flags = KMC_NOHASH | KMC_NODEBUG; 18247c478bd9Sstevel@tonic-gate #endif 18257c478bd9Sstevel@tonic-gate 18267c478bd9Sstevel@tonic-gate /* 18277c478bd9Sstevel@tonic-gate * initialize kmem caches 18287c478bd9Sstevel@tonic-gate */ 18297c478bd9Sstevel@tonic-gate htable_cache = kmem_cache_create("htable_t", 18307c478bd9Sstevel@tonic-gate sizeof (htable_t), 0, NULL, NULL, 18317c478bd9Sstevel@tonic-gate htable_reap, NULL, hat_memload_arena, kmem_flags); 18327c478bd9Sstevel@tonic-gate } 18337c478bd9Sstevel@tonic-gate 18347c478bd9Sstevel@tonic-gate /* 18357c478bd9Sstevel@tonic-gate * get the pte index for the virtual address in the given htable's pagetable 18367c478bd9Sstevel@tonic-gate */ 18377c478bd9Sstevel@tonic-gate uint_t 18387c478bd9Sstevel@tonic-gate htable_va2entry(uintptr_t va, htable_t *ht) 18397c478bd9Sstevel@tonic-gate { 18407c478bd9Sstevel@tonic-gate level_t l = ht->ht_level; 18417c478bd9Sstevel@tonic-gate 18427c478bd9Sstevel@tonic-gate ASSERT(va >= ht->ht_vaddr); 18437c478bd9Sstevel@tonic-gate ASSERT(va <= HTABLE_LAST_PAGE(ht)); 1844ae115bc7Smrj return ((va >> LEVEL_SHIFT(l)) & (HTABLE_NUM_PTES(ht) - 1)); 18457c478bd9Sstevel@tonic-gate } 18467c478bd9Sstevel@tonic-gate 18477c478bd9Sstevel@tonic-gate /* 18487c478bd9Sstevel@tonic-gate * Given an htable and the index of a pte in it, return the virtual address 18497c478bd9Sstevel@tonic-gate * of the page. 18507c478bd9Sstevel@tonic-gate */ 18517c478bd9Sstevel@tonic-gate uintptr_t 18527c478bd9Sstevel@tonic-gate htable_e2va(htable_t *ht, uint_t entry) 18537c478bd9Sstevel@tonic-gate { 18547c478bd9Sstevel@tonic-gate level_t l = ht->ht_level; 18557c478bd9Sstevel@tonic-gate uintptr_t va; 18567c478bd9Sstevel@tonic-gate 1857ae115bc7Smrj ASSERT(entry < HTABLE_NUM_PTES(ht)); 18587c478bd9Sstevel@tonic-gate va = ht->ht_vaddr + ((uintptr_t)entry << LEVEL_SHIFT(l)); 18597c478bd9Sstevel@tonic-gate 18607c478bd9Sstevel@tonic-gate /* 18617c478bd9Sstevel@tonic-gate * Need to skip over any VA hole in top level table 18627c478bd9Sstevel@tonic-gate */ 18637c478bd9Sstevel@tonic-gate #if defined(__amd64) 18647c478bd9Sstevel@tonic-gate if (ht->ht_level == mmu.max_level && va >= mmu.hole_start) 18657c478bd9Sstevel@tonic-gate va += ((mmu.hole_end - mmu.hole_start) + 1); 18667c478bd9Sstevel@tonic-gate #endif 18677c478bd9Sstevel@tonic-gate 18687c478bd9Sstevel@tonic-gate return (va); 18697c478bd9Sstevel@tonic-gate } 18707c478bd9Sstevel@tonic-gate 18717c478bd9Sstevel@tonic-gate /* 18727c478bd9Sstevel@tonic-gate * The code uses compare and swap instructions to read/write PTE's to 18737c478bd9Sstevel@tonic-gate * avoid atomicity problems, since PTEs can be 8 bytes on 32 bit systems. 18747c478bd9Sstevel@tonic-gate * will naturally be atomic. 18757c478bd9Sstevel@tonic-gate * 18767c478bd9Sstevel@tonic-gate * The combination of using kpreempt_disable()/_enable() and the hci_mutex 18777c478bd9Sstevel@tonic-gate * are used to ensure that an interrupt won't overwrite a temporary mapping 18787c478bd9Sstevel@tonic-gate * while it's in use. If an interrupt thread tries to access a PTE, it will 18797c478bd9Sstevel@tonic-gate * yield briefly back to the pinned thread which holds the cpu's hci_mutex. 18807c478bd9Sstevel@tonic-gate */ 18817c478bd9Sstevel@tonic-gate void 1882ae115bc7Smrj x86pte_cpu_init(cpu_t *cpu) 18837c478bd9Sstevel@tonic-gate { 18847c478bd9Sstevel@tonic-gate struct hat_cpu_info *hci; 18857c478bd9Sstevel@tonic-gate 1886ae115bc7Smrj hci = kmem_zalloc(sizeof (*hci), KM_SLEEP); 18877c478bd9Sstevel@tonic-gate mutex_init(&hci->hci_mutex, NULL, MUTEX_DEFAULT, NULL); 18887c478bd9Sstevel@tonic-gate cpu->cpu_hat_info = hci; 18897c478bd9Sstevel@tonic-gate } 18907c478bd9Sstevel@tonic-gate 1891ae115bc7Smrj void 1892ae115bc7Smrj x86pte_cpu_fini(cpu_t *cpu) 1893ae115bc7Smrj { 1894ae115bc7Smrj struct hat_cpu_info *hci = cpu->cpu_hat_info; 1895ae115bc7Smrj 1896ae115bc7Smrj kmem_free(hci, sizeof (*hci)); 1897ae115bc7Smrj cpu->cpu_hat_info = NULL; 18987c478bd9Sstevel@tonic-gate } 18997c478bd9Sstevel@tonic-gate 1900ae115bc7Smrj #ifdef __i386 1901ae115bc7Smrj /* 1902ae115bc7Smrj * On 32 bit kernels, loading a 64 bit PTE is a little tricky 1903ae115bc7Smrj */ 1904ae115bc7Smrj x86pte_t 1905ae115bc7Smrj get_pte64(x86pte_t *ptr) 1906ae115bc7Smrj { 1907ae115bc7Smrj volatile uint32_t *p = (uint32_t *)ptr; 1908ae115bc7Smrj x86pte_t t; 1909ae115bc7Smrj 1910ae115bc7Smrj ASSERT(mmu.pae_hat != 0); 1911ae115bc7Smrj for (;;) { 1912ae115bc7Smrj t = p[0]; 1913ae115bc7Smrj t |= (uint64_t)p[1] << 32; 1914ae115bc7Smrj if ((t & 0xffffffff) == p[0]) 1915ae115bc7Smrj return (t); 1916ae115bc7Smrj } 1917ae115bc7Smrj } 1918ae115bc7Smrj #endif /* __i386 */ 1919ae115bc7Smrj 19207c478bd9Sstevel@tonic-gate /* 19217c478bd9Sstevel@tonic-gate * Disable preemption and establish a mapping to the pagetable with the 19227c478bd9Sstevel@tonic-gate * given pfn. This is optimized for there case where it's the same 19237c478bd9Sstevel@tonic-gate * pfn as we last used referenced from this CPU. 19247c478bd9Sstevel@tonic-gate */ 19257c478bd9Sstevel@tonic-gate static x86pte_t * 1926ae115bc7Smrj x86pte_access_pagetable(htable_t *ht, uint_t index) 19277c478bd9Sstevel@tonic-gate { 19287c478bd9Sstevel@tonic-gate /* 19297c478bd9Sstevel@tonic-gate * VLP pagetables are contained in the hat_t 19307c478bd9Sstevel@tonic-gate */ 19317c478bd9Sstevel@tonic-gate if (ht->ht_flags & HTABLE_VLP) 1932ae115bc7Smrj return (PT_INDEX_PTR(ht->ht_hat->hat_vlp_ptes, index)); 1933ae115bc7Smrj return (x86pte_mapin(ht->ht_pfn, index, ht)); 1934ae115bc7Smrj } 19357c478bd9Sstevel@tonic-gate 19367c478bd9Sstevel@tonic-gate /* 1937ae115bc7Smrj * map the given pfn into the page table window. 19387c478bd9Sstevel@tonic-gate */ 1939ae115bc7Smrj /*ARGSUSED*/ 1940ae115bc7Smrj x86pte_t * 1941ae115bc7Smrj x86pte_mapin(pfn_t pfn, uint_t index, htable_t *ht) 1942ae115bc7Smrj { 1943ae115bc7Smrj x86pte_t *pteptr; 19448ea72728Sjosephb x86pte_t pte = 0; 1945ae115bc7Smrj x86pte_t newpte; 1946ae115bc7Smrj int x; 1947ae115bc7Smrj 19487c478bd9Sstevel@tonic-gate ASSERT(pfn != PFN_INVALID); 19497c478bd9Sstevel@tonic-gate 19507c478bd9Sstevel@tonic-gate if (!khat_running) { 1951ae115bc7Smrj caddr_t va = kbm_remap_window(pfn_to_pa(pfn), 1); 1952ae115bc7Smrj return (PT_INDEX_PTR(va, index)); 19537c478bd9Sstevel@tonic-gate } 19547c478bd9Sstevel@tonic-gate 19557c478bd9Sstevel@tonic-gate /* 1956ae115bc7Smrj * If kpm is available, use it. 1957ae115bc7Smrj */ 1958ae115bc7Smrj if (kpm_vbase) 1959ae115bc7Smrj return (PT_INDEX_PTR(hat_kpm_pfn2va(pfn), index)); 1960ae115bc7Smrj 1961ae115bc7Smrj /* 1962ae115bc7Smrj * Disable preemption and grab the CPU's hci_mutex 19637c478bd9Sstevel@tonic-gate */ 19647c478bd9Sstevel@tonic-gate kpreempt_disable(); 1965ae115bc7Smrj ASSERT(CPU->cpu_hat_info != NULL); 1966ae115bc7Smrj mutex_enter(&CPU->cpu_hat_info->hci_mutex); 1967ae115bc7Smrj x = PWIN_TABLE(CPU->cpu_id); 1968ae115bc7Smrj pteptr = (x86pte_t *)PWIN_PTE_VA(x); 19698ea72728Sjosephb #ifndef __xpv 1970ae115bc7Smrj if (mmu.pae_hat) 1971ae115bc7Smrj pte = *pteptr; 1972ae115bc7Smrj else 1973ae115bc7Smrj pte = *(x86pte32_t *)pteptr; 19748ea72728Sjosephb #endif 1975ae115bc7Smrj 1976ae115bc7Smrj newpte = MAKEPTE(pfn, 0) | mmu.pt_global | mmu.pt_nx; 1977843e1988Sjohnlev 1978843e1988Sjohnlev /* 1979843e1988Sjohnlev * For hardware we can use a writable mapping. 1980843e1988Sjohnlev */ 1981843e1988Sjohnlev #ifdef __xpv 1982843e1988Sjohnlev if (IN_XPV_PANIC()) 1983843e1988Sjohnlev #endif 1984ae115bc7Smrj newpte |= PT_WRITABLE; 1985ae115bc7Smrj 1986ae115bc7Smrj if (!PTE_EQUIV(newpte, pte)) { 1987843e1988Sjohnlev 1988843e1988Sjohnlev #ifdef __xpv 1989843e1988Sjohnlev if (!IN_XPV_PANIC()) { 1990843e1988Sjohnlev xen_map(newpte, PWIN_VA(x)); 1991843e1988Sjohnlev } else 1992843e1988Sjohnlev #endif 1993843e1988Sjohnlev { 1994843e1988Sjohnlev XPV_ALLOW_PAGETABLE_UPDATES(); 1995ae115bc7Smrj if (mmu.pae_hat) 1996ae115bc7Smrj *pteptr = newpte; 1997ae115bc7Smrj else 1998ae115bc7Smrj *(x86pte32_t *)pteptr = newpte; 1999843e1988Sjohnlev XPV_DISALLOW_PAGETABLE_UPDATES(); 2000ae115bc7Smrj mmu_tlbflush_entry((caddr_t)(PWIN_VA(x))); 20017c478bd9Sstevel@tonic-gate } 2002843e1988Sjohnlev } 2003ae115bc7Smrj return (PT_INDEX_PTR(PWIN_VA(x), index)); 20047c478bd9Sstevel@tonic-gate } 20057c478bd9Sstevel@tonic-gate 20067c478bd9Sstevel@tonic-gate /* 20077c478bd9Sstevel@tonic-gate * Release access to a page table. 20087c478bd9Sstevel@tonic-gate */ 20097c478bd9Sstevel@tonic-gate static void 20107c478bd9Sstevel@tonic-gate x86pte_release_pagetable(htable_t *ht) 20117c478bd9Sstevel@tonic-gate { 20127c478bd9Sstevel@tonic-gate /* 20137c478bd9Sstevel@tonic-gate * nothing to do for VLP htables 20147c478bd9Sstevel@tonic-gate */ 20157c478bd9Sstevel@tonic-gate if (ht->ht_flags & HTABLE_VLP) 20167c478bd9Sstevel@tonic-gate return; 20177c478bd9Sstevel@tonic-gate 2018ae115bc7Smrj x86pte_mapout(); 20197c478bd9Sstevel@tonic-gate } 20207c478bd9Sstevel@tonic-gate 2021ae115bc7Smrj void 2022ae115bc7Smrj x86pte_mapout(void) 2023ae115bc7Smrj { 2024843e1988Sjohnlev if (kpm_vbase != NULL || !khat_running) 2025ae115bc7Smrj return; 2026ae115bc7Smrj 20277c478bd9Sstevel@tonic-gate /* 2028ae115bc7Smrj * Drop the CPU's hci_mutex and restore preemption. 20297c478bd9Sstevel@tonic-gate */ 20308ea72728Sjosephb #ifdef __xpv 20318ea72728Sjosephb if (!IN_XPV_PANIC()) { 20328ea72728Sjosephb uintptr_t va; 20338ea72728Sjosephb 20348ea72728Sjosephb /* 20358ea72728Sjosephb * We need to always clear the mapping in case a page 20368ea72728Sjosephb * that was once a page table page is ballooned out. 20378ea72728Sjosephb */ 20388ea72728Sjosephb va = (uintptr_t)PWIN_VA(PWIN_TABLE(CPU->cpu_id)); 20398ea72728Sjosephb (void) HYPERVISOR_update_va_mapping(va, 0, 20408ea72728Sjosephb UVMF_INVLPG | UVMF_LOCAL); 20418ea72728Sjosephb } 20428ea72728Sjosephb #endif 2043ae115bc7Smrj mutex_exit(&CPU->cpu_hat_info->hci_mutex); 20447c478bd9Sstevel@tonic-gate kpreempt_enable(); 20457c478bd9Sstevel@tonic-gate } 20467c478bd9Sstevel@tonic-gate 20477c478bd9Sstevel@tonic-gate /* 20487c478bd9Sstevel@tonic-gate * Atomic retrieval of a pagetable entry 20497c478bd9Sstevel@tonic-gate */ 20507c478bd9Sstevel@tonic-gate x86pte_t 20517c478bd9Sstevel@tonic-gate x86pte_get(htable_t *ht, uint_t entry) 20527c478bd9Sstevel@tonic-gate { 20537c478bd9Sstevel@tonic-gate x86pte_t pte; 2054aa2ed9e5Sjosephb x86pte_t *ptep; 20557c478bd9Sstevel@tonic-gate 20567c478bd9Sstevel@tonic-gate /* 2057aa2ed9e5Sjosephb * Be careful that loading PAE entries in 32 bit kernel is atomic. 20587c478bd9Sstevel@tonic-gate */ 2059ae115bc7Smrj ASSERT(entry < mmu.ptes_per_table); 2060ae115bc7Smrj ptep = x86pte_access_pagetable(ht, entry); 2061ae115bc7Smrj pte = GET_PTE(ptep); 20627c478bd9Sstevel@tonic-gate x86pte_release_pagetable(ht); 20637c478bd9Sstevel@tonic-gate return (pte); 20647c478bd9Sstevel@tonic-gate } 20657c478bd9Sstevel@tonic-gate 20667c478bd9Sstevel@tonic-gate /* 20677c478bd9Sstevel@tonic-gate * Atomic unconditional set of a page table entry, it returns the previous 2068ae115bc7Smrj * value. For pre-existing mappings if the PFN changes, then we don't care 2069ae115bc7Smrj * about the old pte's REF / MOD bits. If the PFN remains the same, we leave 2070ae115bc7Smrj * the MOD/REF bits unchanged. 2071ae115bc7Smrj * 2072ae115bc7Smrj * If asked to overwrite a link to a lower page table with a large page 2073ae115bc7Smrj * mapping, this routine returns the special value of LPAGE_ERROR. This 2074ae115bc7Smrj * allows the upper HAT layers to retry with a smaller mapping size. 20757c478bd9Sstevel@tonic-gate */ 20767c478bd9Sstevel@tonic-gate x86pte_t 20777c478bd9Sstevel@tonic-gate x86pte_set(htable_t *ht, uint_t entry, x86pte_t new, void *ptr) 20787c478bd9Sstevel@tonic-gate { 20797c478bd9Sstevel@tonic-gate x86pte_t old; 2080ae115bc7Smrj x86pte_t prev; 20817c478bd9Sstevel@tonic-gate x86pte_t *ptep; 2082ae115bc7Smrj level_t l = ht->ht_level; 2083ae115bc7Smrj x86pte_t pfn_mask = (l != 0) ? PT_PADDR_LGPG : PT_PADDR; 2084ae115bc7Smrj x86pte_t n; 2085ae115bc7Smrj uintptr_t addr = htable_e2va(ht, entry); 2086ae115bc7Smrj hat_t *hat = ht->ht_hat; 20877c478bd9Sstevel@tonic-gate 2088ae115bc7Smrj ASSERT(new != 0); /* don't use to invalidate a PTE, see x86pte_update */ 20897c478bd9Sstevel@tonic-gate ASSERT(!(ht->ht_flags & HTABLE_SHARED_PFN)); 2090ae115bc7Smrj if (ptr == NULL) 2091ae115bc7Smrj ptep = x86pte_access_pagetable(ht, entry); 2092ae115bc7Smrj else 20937c478bd9Sstevel@tonic-gate ptep = ptr; 20947c478bd9Sstevel@tonic-gate 2095b193e412Skchow /* 2096ae115bc7Smrj * Install the new PTE. If remapping the same PFN, then 2097ae115bc7Smrj * copy existing REF/MOD bits to new mapping. 2098b193e412Skchow */ 2099ae115bc7Smrj do { 2100ae115bc7Smrj prev = GET_PTE(ptep); 2101ae115bc7Smrj n = new; 2102ae115bc7Smrj if (PTE_ISVALID(n) && (prev & pfn_mask) == (new & pfn_mask)) 2103b193e412Skchow n |= prev & (PT_REF | PT_MOD); 2104ae115bc7Smrj 2105ae115bc7Smrj /* 2106ae115bc7Smrj * Another thread may have installed this mapping already, 2107ae115bc7Smrj * flush the local TLB and be done. 2108ae115bc7Smrj */ 2109b193e412Skchow if (prev == n) { 21107c478bd9Sstevel@tonic-gate old = new; 2111843e1988Sjohnlev #ifdef __xpv 2112843e1988Sjohnlev if (!IN_XPV_PANIC()) 2113843e1988Sjohnlev xen_flush_va((caddr_t)addr); 2114843e1988Sjohnlev else 2115843e1988Sjohnlev #endif 2116ae115bc7Smrj mmu_tlbflush_entry((caddr_t)addr); 2117ae115bc7Smrj goto done; 21187c478bd9Sstevel@tonic-gate } 2119ae115bc7Smrj 2120ae115bc7Smrj /* 2121ae115bc7Smrj * Detect if we have a collision of installing a large 2122ae115bc7Smrj * page mapping where there already is a lower page table. 2123ae115bc7Smrj */ 212497704650Sjosephb if (l > 0 && (prev & PT_VALID) && !(prev & PT_PAGESIZE)) { 212597704650Sjosephb old = LPAGE_ERROR; 212697704650Sjosephb goto done; 212797704650Sjosephb } 2128ae115bc7Smrj 2129843e1988Sjohnlev XPV_ALLOW_PAGETABLE_UPDATES(); 2130ae115bc7Smrj old = CAS_PTE(ptep, prev, n); 2131843e1988Sjohnlev XPV_DISALLOW_PAGETABLE_UPDATES(); 2132ae115bc7Smrj } while (old != prev); 2133ae115bc7Smrj 2134ae115bc7Smrj /* 2135ae115bc7Smrj * Do a TLB demap if needed, ie. the old pte was valid. 2136ae115bc7Smrj * 2137ae115bc7Smrj * Note that a stale TLB writeback to the PTE here either can't happen 2138ae115bc7Smrj * or doesn't matter. The PFN can only change for NOSYNC|NOCONSIST 2139ae115bc7Smrj * mappings, but they were created with REF and MOD already set, so 2140ae115bc7Smrj * no stale writeback will happen. 2141ae115bc7Smrj * 2142ae115bc7Smrj * Segmap is the only place where remaps happen on the same pfn and for 2143ae115bc7Smrj * that we want to preserve the stale REF/MOD bits. 2144ae115bc7Smrj */ 2145ae115bc7Smrj if (old & PT_REF) 2146ae115bc7Smrj hat_tlb_inval(hat, addr); 2147ae115bc7Smrj 2148ae115bc7Smrj done: 21497c478bd9Sstevel@tonic-gate if (ptr == NULL) 21507c478bd9Sstevel@tonic-gate x86pte_release_pagetable(ht); 21517c478bd9Sstevel@tonic-gate return (old); 21527c478bd9Sstevel@tonic-gate } 21537c478bd9Sstevel@tonic-gate 21547c478bd9Sstevel@tonic-gate /* 2155ae115bc7Smrj * Atomic compare and swap of a page table entry. No TLB invalidates are done. 2156ae115bc7Smrj * This is used for links between pagetables of different levels. 2157ae115bc7Smrj * Note we always create these links with dirty/access set, so they should 2158ae115bc7Smrj * never change. 21597c478bd9Sstevel@tonic-gate */ 2160ae115bc7Smrj x86pte_t 21617c478bd9Sstevel@tonic-gate x86pte_cas(htable_t *ht, uint_t entry, x86pte_t old, x86pte_t new) 21627c478bd9Sstevel@tonic-gate { 21637c478bd9Sstevel@tonic-gate x86pte_t pte; 21647c478bd9Sstevel@tonic-gate x86pte_t *ptep; 2165843e1988Sjohnlev #ifdef __xpv 2166843e1988Sjohnlev /* 2167843e1988Sjohnlev * We can't use writable pagetables for upper level tables, so fake it. 2168843e1988Sjohnlev */ 2169843e1988Sjohnlev mmu_update_t t[2]; 2170843e1988Sjohnlev int cnt = 1; 2171843e1988Sjohnlev int count; 2172843e1988Sjohnlev maddr_t ma; 21737c478bd9Sstevel@tonic-gate 2174843e1988Sjohnlev if (!IN_XPV_PANIC()) { 2175843e1988Sjohnlev ASSERT(!(ht->ht_flags & HTABLE_VLP)); /* no VLP yet */ 2176843e1988Sjohnlev ma = pa_to_ma(PT_INDEX_PHYSADDR(pfn_to_pa(ht->ht_pfn), entry)); 2177843e1988Sjohnlev t[0].ptr = ma | MMU_NORMAL_PT_UPDATE; 2178843e1988Sjohnlev t[0].val = new; 2179843e1988Sjohnlev 2180843e1988Sjohnlev #if defined(__amd64) 2181843e1988Sjohnlev /* 2182843e1988Sjohnlev * On the 64-bit hypervisor we need to maintain the user mode 2183843e1988Sjohnlev * top page table too. 2184843e1988Sjohnlev */ 2185843e1988Sjohnlev if (ht->ht_level == mmu.max_level && ht->ht_hat != kas.a_hat) { 2186843e1988Sjohnlev ma = pa_to_ma(PT_INDEX_PHYSADDR(pfn_to_pa( 2187843e1988Sjohnlev ht->ht_hat->hat_user_ptable), entry)); 2188843e1988Sjohnlev t[1].ptr = ma | MMU_NORMAL_PT_UPDATE; 2189843e1988Sjohnlev t[1].val = new; 2190843e1988Sjohnlev ++cnt; 2191843e1988Sjohnlev } 2192843e1988Sjohnlev #endif /* __amd64 */ 2193843e1988Sjohnlev 2194843e1988Sjohnlev if (HYPERVISOR_mmu_update(t, cnt, &count, DOMID_SELF)) 2195843e1988Sjohnlev panic("HYPERVISOR_mmu_update() failed"); 2196843e1988Sjohnlev ASSERT(count == cnt); 2197843e1988Sjohnlev return (old); 2198843e1988Sjohnlev } 2199843e1988Sjohnlev #endif 2200ae115bc7Smrj ptep = x86pte_access_pagetable(ht, entry); 2201843e1988Sjohnlev XPV_ALLOW_PAGETABLE_UPDATES(); 2202ae115bc7Smrj pte = CAS_PTE(ptep, old, new); 2203843e1988Sjohnlev XPV_DISALLOW_PAGETABLE_UPDATES(); 22047c478bd9Sstevel@tonic-gate x86pte_release_pagetable(ht); 22057c478bd9Sstevel@tonic-gate return (pte); 22067c478bd9Sstevel@tonic-gate } 22077c478bd9Sstevel@tonic-gate 22087c478bd9Sstevel@tonic-gate /* 2209ae115bc7Smrj * Invalidate a page table entry as long as it currently maps something that 2210ae115bc7Smrj * matches the value determined by expect. 22117c478bd9Sstevel@tonic-gate * 2212ae115bc7Smrj * Also invalidates any TLB entries and returns the previous value of the PTE. 22137c478bd9Sstevel@tonic-gate */ 22147c478bd9Sstevel@tonic-gate x86pte_t 2215ae115bc7Smrj x86pte_inval( 2216ae115bc7Smrj htable_t *ht, 2217ae115bc7Smrj uint_t entry, 2218ae115bc7Smrj x86pte_t expect, 2219ae115bc7Smrj x86pte_t *pte_ptr) 22207c478bd9Sstevel@tonic-gate { 22217c478bd9Sstevel@tonic-gate x86pte_t *ptep; 222295c0a3c8Sjosephb x86pte_t oldpte; 222395c0a3c8Sjosephb x86pte_t found; 22247c478bd9Sstevel@tonic-gate 22257c478bd9Sstevel@tonic-gate ASSERT(!(ht->ht_flags & HTABLE_SHARED_PFN)); 222602bc52beSkchow ASSERT(ht->ht_level <= mmu.max_page_level); 222797704650Sjosephb 2228ae115bc7Smrj if (pte_ptr != NULL) 22297c478bd9Sstevel@tonic-gate ptep = pte_ptr; 2230ae115bc7Smrj else 2231ae115bc7Smrj ptep = x86pte_access_pagetable(ht, entry); 22327c478bd9Sstevel@tonic-gate 2233843e1988Sjohnlev #if defined(__xpv) 2234843e1988Sjohnlev /* 2235843e1988Sjohnlev * If exit()ing just use HYPERVISOR_mmu_update(), as we can't be racing 2236843e1988Sjohnlev * with anything else. 2237843e1988Sjohnlev */ 2238843e1988Sjohnlev if ((ht->ht_hat->hat_flags & HAT_FREEING) && !IN_XPV_PANIC()) { 2239843e1988Sjohnlev int count; 2240843e1988Sjohnlev mmu_update_t t[1]; 2241843e1988Sjohnlev maddr_t ma; 2242843e1988Sjohnlev 2243843e1988Sjohnlev oldpte = GET_PTE(ptep); 2244843e1988Sjohnlev if (expect != 0 && (oldpte & PT_PADDR) != (expect & PT_PADDR)) 2245843e1988Sjohnlev goto done; 2246843e1988Sjohnlev ma = pa_to_ma(PT_INDEX_PHYSADDR(pfn_to_pa(ht->ht_pfn), entry)); 2247843e1988Sjohnlev t[0].ptr = ma | MMU_NORMAL_PT_UPDATE; 2248843e1988Sjohnlev t[0].val = 0; 2249843e1988Sjohnlev if (HYPERVISOR_mmu_update(t, 1, &count, DOMID_SELF)) 2250843e1988Sjohnlev panic("HYPERVISOR_mmu_update() failed"); 2251843e1988Sjohnlev ASSERT(count == 1); 2252843e1988Sjohnlev goto done; 2253843e1988Sjohnlev } 2254843e1988Sjohnlev #endif /* __xpv */ 2255843e1988Sjohnlev 22567c478bd9Sstevel@tonic-gate /* 225797704650Sjosephb * Note that the loop is needed to handle changes due to h/w updating 225897704650Sjosephb * of PT_MOD/PT_REF. 22597c478bd9Sstevel@tonic-gate */ 2260ae115bc7Smrj do { 226195c0a3c8Sjosephb oldpte = GET_PTE(ptep); 226295c0a3c8Sjosephb if (expect != 0 && (oldpte & PT_PADDR) != (expect & PT_PADDR)) 226395c0a3c8Sjosephb goto done; 2264843e1988Sjohnlev XPV_ALLOW_PAGETABLE_UPDATES(); 226595c0a3c8Sjosephb found = CAS_PTE(ptep, oldpte, 0); 2266843e1988Sjohnlev XPV_DISALLOW_PAGETABLE_UPDATES(); 226795c0a3c8Sjosephb } while (found != oldpte); 226895c0a3c8Sjosephb if (oldpte & (PT_REF | PT_MOD)) 226995c0a3c8Sjosephb hat_tlb_inval(ht->ht_hat, htable_e2va(ht, entry)); 22707c478bd9Sstevel@tonic-gate 227195c0a3c8Sjosephb done: 22727c478bd9Sstevel@tonic-gate if (pte_ptr == NULL) 22737c478bd9Sstevel@tonic-gate x86pte_release_pagetable(ht); 227495c0a3c8Sjosephb return (oldpte); 22757c478bd9Sstevel@tonic-gate } 22767c478bd9Sstevel@tonic-gate 22777c478bd9Sstevel@tonic-gate /* 2278ae115bc7Smrj * Change a page table entry af it currently matches the value in expect. 22797c478bd9Sstevel@tonic-gate */ 22807c478bd9Sstevel@tonic-gate x86pte_t 2281ae115bc7Smrj x86pte_update( 2282ae115bc7Smrj htable_t *ht, 2283ae115bc7Smrj uint_t entry, 2284ae115bc7Smrj x86pte_t expect, 2285ae115bc7Smrj x86pte_t new) 22867c478bd9Sstevel@tonic-gate { 22877c478bd9Sstevel@tonic-gate x86pte_t *ptep; 2288ae115bc7Smrj x86pte_t found; 22897c478bd9Sstevel@tonic-gate 2290ae115bc7Smrj ASSERT(new != 0); 22917c478bd9Sstevel@tonic-gate ASSERT(!(ht->ht_flags & HTABLE_SHARED_PFN)); 229202bc52beSkchow ASSERT(ht->ht_level <= mmu.max_page_level); 2293ae115bc7Smrj 2294ae115bc7Smrj ptep = x86pte_access_pagetable(ht, entry); 2295843e1988Sjohnlev XPV_ALLOW_PAGETABLE_UPDATES(); 2296ae115bc7Smrj found = CAS_PTE(ptep, expect, new); 2297843e1988Sjohnlev XPV_DISALLOW_PAGETABLE_UPDATES(); 2298ae115bc7Smrj if (found == expect) { 2299ae115bc7Smrj hat_tlb_inval(ht->ht_hat, htable_e2va(ht, entry)); 23007c478bd9Sstevel@tonic-gate 23017c478bd9Sstevel@tonic-gate /* 2302ae115bc7Smrj * When removing write permission *and* clearing the 2303ae115bc7Smrj * MOD bit, check if a write happened via a stale 2304ae115bc7Smrj * TLB entry before the TLB shootdown finished. 2305ae115bc7Smrj * 2306ae115bc7Smrj * If it did happen, simply re-enable write permission and 2307ae115bc7Smrj * act like the original CAS failed. 23087c478bd9Sstevel@tonic-gate */ 2309ae115bc7Smrj if ((expect & (PT_WRITABLE | PT_MOD)) == PT_WRITABLE && 2310ae115bc7Smrj (new & (PT_WRITABLE | PT_MOD)) == 0 && 2311ae115bc7Smrj (GET_PTE(ptep) & PT_MOD) != 0) { 2312ae115bc7Smrj do { 2313ae115bc7Smrj found = GET_PTE(ptep); 2314843e1988Sjohnlev XPV_ALLOW_PAGETABLE_UPDATES(); 2315ae115bc7Smrj found = 2316ae115bc7Smrj CAS_PTE(ptep, found, found | PT_WRITABLE); 2317843e1988Sjohnlev XPV_DISALLOW_PAGETABLE_UPDATES(); 2318ae115bc7Smrj } while ((found & PT_WRITABLE) == 0); 2319ae115bc7Smrj } 2320ae115bc7Smrj } 23217c478bd9Sstevel@tonic-gate x86pte_release_pagetable(ht); 2322ae115bc7Smrj return (found); 23237c478bd9Sstevel@tonic-gate } 23247c478bd9Sstevel@tonic-gate 2325843e1988Sjohnlev #ifndef __xpv 23267c478bd9Sstevel@tonic-gate /* 23277c478bd9Sstevel@tonic-gate * Copy page tables - this is just a little more complicated than the 23287c478bd9Sstevel@tonic-gate * previous routines. Note that it's also not atomic! It also is never 23297c478bd9Sstevel@tonic-gate * used for VLP pagetables. 23307c478bd9Sstevel@tonic-gate */ 23317c478bd9Sstevel@tonic-gate void 23327c478bd9Sstevel@tonic-gate x86pte_copy(htable_t *src, htable_t *dest, uint_t entry, uint_t count) 23337c478bd9Sstevel@tonic-gate { 23347c478bd9Sstevel@tonic-gate caddr_t src_va; 23357c478bd9Sstevel@tonic-gate caddr_t dst_va; 23367c478bd9Sstevel@tonic-gate size_t size; 2337ae115bc7Smrj x86pte_t *pteptr; 2338ae115bc7Smrj x86pte_t pte; 23397c478bd9Sstevel@tonic-gate 23407c478bd9Sstevel@tonic-gate ASSERT(khat_running); 23417c478bd9Sstevel@tonic-gate ASSERT(!(dest->ht_flags & HTABLE_VLP)); 23427c478bd9Sstevel@tonic-gate ASSERT(!(src->ht_flags & HTABLE_VLP)); 23437c478bd9Sstevel@tonic-gate ASSERT(!(src->ht_flags & HTABLE_SHARED_PFN)); 23447c478bd9Sstevel@tonic-gate ASSERT(!(dest->ht_flags & HTABLE_SHARED_PFN)); 23457c478bd9Sstevel@tonic-gate 23467c478bd9Sstevel@tonic-gate /* 2347ae115bc7Smrj * Acquire access to the CPU pagetable windows for the dest and source. 23487c478bd9Sstevel@tonic-gate */ 2349ae115bc7Smrj dst_va = (caddr_t)x86pte_access_pagetable(dest, entry); 2350ae115bc7Smrj if (kpm_vbase) { 2351ae115bc7Smrj src_va = (caddr_t) 2352ae115bc7Smrj PT_INDEX_PTR(hat_kpm_pfn2va(src->ht_pfn), entry); 23537c478bd9Sstevel@tonic-gate } else { 2354ae115bc7Smrj uint_t x = PWIN_SRC(CPU->cpu_id); 23557c478bd9Sstevel@tonic-gate 23567c478bd9Sstevel@tonic-gate /* 23577c478bd9Sstevel@tonic-gate * Finish defining the src pagetable mapping 23587c478bd9Sstevel@tonic-gate */ 2359ae115bc7Smrj src_va = (caddr_t)PT_INDEX_PTR(PWIN_VA(x), entry); 2360ae115bc7Smrj pte = MAKEPTE(src->ht_pfn, 0) | mmu.pt_global | mmu.pt_nx; 2361ae115bc7Smrj pteptr = (x86pte_t *)PWIN_PTE_VA(x); 2362ae115bc7Smrj if (mmu.pae_hat) 2363ae115bc7Smrj *pteptr = pte; 2364ae115bc7Smrj else 2365ae115bc7Smrj *(x86pte32_t *)pteptr = pte; 2366ae115bc7Smrj mmu_tlbflush_entry((caddr_t)(PWIN_VA(x))); 23677c478bd9Sstevel@tonic-gate } 23687c478bd9Sstevel@tonic-gate 23697c478bd9Sstevel@tonic-gate /* 23707c478bd9Sstevel@tonic-gate * now do the copy 23717c478bd9Sstevel@tonic-gate */ 23727c478bd9Sstevel@tonic-gate size = count << mmu.pte_size_shift; 23737c478bd9Sstevel@tonic-gate bcopy(src_va, dst_va, size); 23747c478bd9Sstevel@tonic-gate 23757c478bd9Sstevel@tonic-gate x86pte_release_pagetable(dest); 23767c478bd9Sstevel@tonic-gate } 23777c478bd9Sstevel@tonic-gate 2378843e1988Sjohnlev #else /* __xpv */ 2379843e1988Sjohnlev 2380843e1988Sjohnlev /* 2381843e1988Sjohnlev * The hypervisor only supports writable pagetables at level 0, so we have 2382843e1988Sjohnlev * to install these 1 by 1 the slow way. 2383843e1988Sjohnlev */ 2384843e1988Sjohnlev void 2385843e1988Sjohnlev x86pte_copy(htable_t *src, htable_t *dest, uint_t entry, uint_t count) 2386843e1988Sjohnlev { 2387843e1988Sjohnlev caddr_t src_va; 2388843e1988Sjohnlev x86pte_t pte; 2389843e1988Sjohnlev 2390843e1988Sjohnlev ASSERT(!IN_XPV_PANIC()); 2391843e1988Sjohnlev src_va = (caddr_t)x86pte_access_pagetable(src, entry); 2392843e1988Sjohnlev while (count) { 2393843e1988Sjohnlev if (mmu.pae_hat) 2394843e1988Sjohnlev pte = *(x86pte_t *)src_va; 2395843e1988Sjohnlev else 2396843e1988Sjohnlev pte = *(x86pte32_t *)src_va; 2397843e1988Sjohnlev if (pte != 0) { 2398843e1988Sjohnlev set_pteval(pfn_to_pa(dest->ht_pfn), entry, 2399843e1988Sjohnlev dest->ht_level, pte); 2400843e1988Sjohnlev #ifdef __amd64 2401843e1988Sjohnlev if (dest->ht_level == mmu.max_level && 2402843e1988Sjohnlev htable_e2va(dest, entry) < HYPERVISOR_VIRT_END) 2403843e1988Sjohnlev set_pteval( 2404843e1988Sjohnlev pfn_to_pa(dest->ht_hat->hat_user_ptable), 2405843e1988Sjohnlev entry, dest->ht_level, pte); 2406843e1988Sjohnlev #endif 2407843e1988Sjohnlev } 2408843e1988Sjohnlev --count; 2409843e1988Sjohnlev ++entry; 2410843e1988Sjohnlev src_va += mmu.pte_size; 2411843e1988Sjohnlev } 2412843e1988Sjohnlev x86pte_release_pagetable(src); 2413843e1988Sjohnlev } 2414843e1988Sjohnlev #endif /* __xpv */ 2415843e1988Sjohnlev 24167c478bd9Sstevel@tonic-gate /* 24177c478bd9Sstevel@tonic-gate * Zero page table entries - Note this doesn't use atomic stores! 24187c478bd9Sstevel@tonic-gate */ 2419ae115bc7Smrj static void 24207c478bd9Sstevel@tonic-gate x86pte_zero(htable_t *dest, uint_t entry, uint_t count) 24217c478bd9Sstevel@tonic-gate { 24227c478bd9Sstevel@tonic-gate caddr_t dst_va; 24237c478bd9Sstevel@tonic-gate size_t size; 2424843e1988Sjohnlev #ifdef __xpv 2425843e1988Sjohnlev int x; 2426843e1988Sjohnlev x86pte_t newpte; 2427843e1988Sjohnlev #endif 24287c478bd9Sstevel@tonic-gate 24297c478bd9Sstevel@tonic-gate /* 24307c478bd9Sstevel@tonic-gate * Map in the page table to be zeroed. 24317c478bd9Sstevel@tonic-gate */ 24327c478bd9Sstevel@tonic-gate ASSERT(!(dest->ht_flags & HTABLE_SHARED_PFN)); 24337c478bd9Sstevel@tonic-gate ASSERT(!(dest->ht_flags & HTABLE_VLP)); 2434ae115bc7Smrj 2435843e1988Sjohnlev /* 2436843e1988Sjohnlev * On the hypervisor we don't use x86pte_access_pagetable() since 2437843e1988Sjohnlev * in this case the page is not pinned yet. 2438843e1988Sjohnlev */ 2439843e1988Sjohnlev #ifdef __xpv 2440843e1988Sjohnlev if (kpm_vbase == NULL) { 2441843e1988Sjohnlev kpreempt_disable(); 2442843e1988Sjohnlev ASSERT(CPU->cpu_hat_info != NULL); 2443843e1988Sjohnlev mutex_enter(&CPU->cpu_hat_info->hci_mutex); 2444843e1988Sjohnlev x = PWIN_TABLE(CPU->cpu_id); 2445843e1988Sjohnlev newpte = MAKEPTE(dest->ht_pfn, 0) | PT_WRITABLE; 2446843e1988Sjohnlev xen_map(newpte, PWIN_VA(x)); 2447843e1988Sjohnlev dst_va = (caddr_t)PT_INDEX_PTR(PWIN_VA(x), entry); 2448843e1988Sjohnlev } else 2449843e1988Sjohnlev #endif 2450ae115bc7Smrj dst_va = (caddr_t)x86pte_access_pagetable(dest, entry); 2451ae115bc7Smrj 24527c478bd9Sstevel@tonic-gate size = count << mmu.pte_size_shift; 2453ae115bc7Smrj ASSERT(size > BLOCKZEROALIGN); 2454ae115bc7Smrj #ifdef __i386 24557417cfdeSKuriakose Kuruvilla if (!is_x86_feature(x86_featureset, X86FSET_SSE2)) 24567c478bd9Sstevel@tonic-gate bzero(dst_va, size); 2457ae115bc7Smrj else 2458ae115bc7Smrj #endif 2459ae115bc7Smrj block_zero_no_xmm(dst_va, size); 2460ae115bc7Smrj 2461843e1988Sjohnlev #ifdef __xpv 2462843e1988Sjohnlev if (kpm_vbase == NULL) { 2463843e1988Sjohnlev xen_map(0, PWIN_VA(x)); 2464843e1988Sjohnlev mutex_exit(&CPU->cpu_hat_info->hci_mutex); 2465843e1988Sjohnlev kpreempt_enable(); 2466843e1988Sjohnlev } else 2467843e1988Sjohnlev #endif 24687c478bd9Sstevel@tonic-gate x86pte_release_pagetable(dest); 24697c478bd9Sstevel@tonic-gate } 24707c478bd9Sstevel@tonic-gate 24717c478bd9Sstevel@tonic-gate /* 24727c478bd9Sstevel@tonic-gate * Called to ensure that all pagetables are in the system dump 24737c478bd9Sstevel@tonic-gate */ 24747c478bd9Sstevel@tonic-gate void 24757c478bd9Sstevel@tonic-gate hat_dump(void) 24767c478bd9Sstevel@tonic-gate { 24777c478bd9Sstevel@tonic-gate hat_t *hat; 24787c478bd9Sstevel@tonic-gate uint_t h; 24797c478bd9Sstevel@tonic-gate htable_t *ht; 24807c478bd9Sstevel@tonic-gate 24817c478bd9Sstevel@tonic-gate /* 2482a85a6733Sjosephb * Dump all page tables 24837c478bd9Sstevel@tonic-gate */ 2484a85a6733Sjosephb for (hat = kas.a_hat; hat != NULL; hat = hat->hat_next) { 24857c478bd9Sstevel@tonic-gate for (h = 0; h < hat->hat_num_hash; ++h) { 24867c478bd9Sstevel@tonic-gate for (ht = hat->hat_ht_hash[h]; ht; ht = ht->ht_next) { 2487a85a6733Sjosephb if ((ht->ht_flags & HTABLE_VLP) == 0) 24887c478bd9Sstevel@tonic-gate dump_page(ht->ht_pfn); 24897c478bd9Sstevel@tonic-gate } 24907c478bd9Sstevel@tonic-gate } 24917c478bd9Sstevel@tonic-gate } 24927c478bd9Sstevel@tonic-gate } 2493