17c478bd9Sstevel@tonic-gate /* 27c478bd9Sstevel@tonic-gate * CDDL HEADER START 37c478bd9Sstevel@tonic-gate * 47c478bd9Sstevel@tonic-gate * The contents of this file are subject to the terms of the 5a85a6733Sjosephb * Common Development and Distribution License (the "License"). 6a85a6733Sjosephb * You may not use this file except in compliance with the License. 77c478bd9Sstevel@tonic-gate * 87c478bd9Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 97c478bd9Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 107c478bd9Sstevel@tonic-gate * See the License for the specific language governing permissions 117c478bd9Sstevel@tonic-gate * and limitations under the License. 127c478bd9Sstevel@tonic-gate * 137c478bd9Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 147c478bd9Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 157c478bd9Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 167c478bd9Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 177c478bd9Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 187c478bd9Sstevel@tonic-gate * 197c478bd9Sstevel@tonic-gate * CDDL HEADER END 207c478bd9Sstevel@tonic-gate */ 21ae115bc7Smrj 227c478bd9Sstevel@tonic-gate /* 236c9930aeSJoe Bonasera * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved. 24a6a74e0eSMatthew Ahrens * Copyright (c) 2014 by Delphix. All rights reserved. 258c1d5be3SJoshua M. Clulow * Copyright 2015 Joyent, Inc. 267c478bd9Sstevel@tonic-gate */ 277c478bd9Sstevel@tonic-gate 287c478bd9Sstevel@tonic-gate #include <sys/types.h> 297c478bd9Sstevel@tonic-gate #include <sys/sysmacros.h> 307c478bd9Sstevel@tonic-gate #include <sys/kmem.h> 317c478bd9Sstevel@tonic-gate #include <sys/atomic.h> 327c478bd9Sstevel@tonic-gate #include <sys/bitmap.h> 337c478bd9Sstevel@tonic-gate #include <sys/machparam.h> 347c478bd9Sstevel@tonic-gate #include <sys/machsystm.h> 357c478bd9Sstevel@tonic-gate #include <sys/mman.h> 367c478bd9Sstevel@tonic-gate #include <sys/systm.h> 377c478bd9Sstevel@tonic-gate #include <sys/cpuvar.h> 387c478bd9Sstevel@tonic-gate #include <sys/thread.h> 397c478bd9Sstevel@tonic-gate #include <sys/proc.h> 407c478bd9Sstevel@tonic-gate #include <sys/cpu.h> 417c478bd9Sstevel@tonic-gate #include <sys/kmem.h> 427c478bd9Sstevel@tonic-gate #include <sys/disp.h> 437c478bd9Sstevel@tonic-gate #include <sys/vmem.h> 447c478bd9Sstevel@tonic-gate #include <sys/vmsystm.h> 457c478bd9Sstevel@tonic-gate #include <sys/promif.h> 467c478bd9Sstevel@tonic-gate #include <sys/var.h> 477c478bd9Sstevel@tonic-gate #include <sys/x86_archext.h> 48ae115bc7Smrj #include <sys/archsystm.h> 497c478bd9Sstevel@tonic-gate #include <sys/bootconf.h> 507c478bd9Sstevel@tonic-gate #include <sys/dumphdr.h> 517c478bd9Sstevel@tonic-gate #include <vm/seg_kmem.h> 527c478bd9Sstevel@tonic-gate #include <vm/seg_kpm.h> 537c478bd9Sstevel@tonic-gate #include <vm/hat.h> 547c478bd9Sstevel@tonic-gate #include <vm/hat_i86.h> 557c478bd9Sstevel@tonic-gate #include <sys/cmn_err.h> 56843e1988Sjohnlev #include <sys/panic.h> 57843e1988Sjohnlev 58843e1988Sjohnlev #ifdef __xpv 59843e1988Sjohnlev #include <sys/hypervisor.h> 60843e1988Sjohnlev #include <sys/xpv_panic.h> 61843e1988Sjohnlev #endif 627c478bd9Sstevel@tonic-gate 63ae115bc7Smrj #include <sys/bootinfo.h> 64ae115bc7Smrj #include <vm/kboot_mmu.h> 65ae115bc7Smrj 66ae115bc7Smrj static void x86pte_zero(htable_t *dest, uint_t entry, uint_t count); 67ae115bc7Smrj 687c478bd9Sstevel@tonic-gate kmem_cache_t *htable_cache; 697c478bd9Sstevel@tonic-gate 707c478bd9Sstevel@tonic-gate /* 717c478bd9Sstevel@tonic-gate * The variable htable_reserve_amount, rather than HTABLE_RESERVE_AMOUNT, 727c478bd9Sstevel@tonic-gate * is used in order to facilitate testing of the htable_steal() code. 737c478bd9Sstevel@tonic-gate * By resetting htable_reserve_amount to a lower value, we can force 747c478bd9Sstevel@tonic-gate * stealing to occur. The reserve amount is a guess to get us through boot. 757c478bd9Sstevel@tonic-gate */ 767c478bd9Sstevel@tonic-gate #define HTABLE_RESERVE_AMOUNT (200) 777c478bd9Sstevel@tonic-gate uint_t htable_reserve_amount = HTABLE_RESERVE_AMOUNT; 787c478bd9Sstevel@tonic-gate kmutex_t htable_reserve_mutex; 797c478bd9Sstevel@tonic-gate uint_t htable_reserve_cnt; 807c478bd9Sstevel@tonic-gate htable_t *htable_reserve_pool; 817c478bd9Sstevel@tonic-gate 827c478bd9Sstevel@tonic-gate /* 83a85a6733Sjosephb * Used to hand test htable_steal(). 847c478bd9Sstevel@tonic-gate */ 85a85a6733Sjosephb #ifdef DEBUG 86a85a6733Sjosephb ulong_t force_steal = 0; 87a85a6733Sjosephb ulong_t ptable_cnt = 0; 88a85a6733Sjosephb #endif 89a85a6733Sjosephb 90a85a6733Sjosephb /* 91a85a6733Sjosephb * This variable is so that we can tune this via /etc/system 92a85a6733Sjosephb * Any value works, but a power of two <= mmu.ptes_per_table is best. 93a85a6733Sjosephb */ 94a85a6733Sjosephb uint_t htable_steal_passes = 8; 957c478bd9Sstevel@tonic-gate 967c478bd9Sstevel@tonic-gate /* 977c478bd9Sstevel@tonic-gate * mutex stuff for access to htable hash 987c478bd9Sstevel@tonic-gate */ 997c478bd9Sstevel@tonic-gate #define NUM_HTABLE_MUTEX 128 1007c478bd9Sstevel@tonic-gate kmutex_t htable_mutex[NUM_HTABLE_MUTEX]; 1017c478bd9Sstevel@tonic-gate #define HTABLE_MUTEX_HASH(h) ((h) & (NUM_HTABLE_MUTEX - 1)) 1027c478bd9Sstevel@tonic-gate 1037c478bd9Sstevel@tonic-gate #define HTABLE_ENTER(h) mutex_enter(&htable_mutex[HTABLE_MUTEX_HASH(h)]); 1047c478bd9Sstevel@tonic-gate #define HTABLE_EXIT(h) mutex_exit(&htable_mutex[HTABLE_MUTEX_HASH(h)]); 1057c478bd9Sstevel@tonic-gate 1067c478bd9Sstevel@tonic-gate /* 1077c478bd9Sstevel@tonic-gate * forward declarations 1087c478bd9Sstevel@tonic-gate */ 1097c478bd9Sstevel@tonic-gate static void link_ptp(htable_t *higher, htable_t *new, uintptr_t vaddr); 1107c478bd9Sstevel@tonic-gate static void unlink_ptp(htable_t *higher, htable_t *old, uintptr_t vaddr); 1117c478bd9Sstevel@tonic-gate static void htable_free(htable_t *ht); 112ae115bc7Smrj static x86pte_t *x86pte_access_pagetable(htable_t *ht, uint_t index); 1137c478bd9Sstevel@tonic-gate static void x86pte_release_pagetable(htable_t *ht); 1147c478bd9Sstevel@tonic-gate static x86pte_t x86pte_cas(htable_t *ht, uint_t entry, x86pte_t old, 1157c478bd9Sstevel@tonic-gate x86pte_t new); 1167c478bd9Sstevel@tonic-gate 1177c478bd9Sstevel@tonic-gate /* 1187c478bd9Sstevel@tonic-gate * A counter to track if we are stealing or reaping htables. When non-zero 1197c478bd9Sstevel@tonic-gate * htable_free() will directly free htables (either to the reserve or kmem) 1207c478bd9Sstevel@tonic-gate * instead of putting them in a hat's htable cache. 1217c478bd9Sstevel@tonic-gate */ 1227c478bd9Sstevel@tonic-gate uint32_t htable_dont_cache = 0; 1237c478bd9Sstevel@tonic-gate 1247c478bd9Sstevel@tonic-gate /* 1257c478bd9Sstevel@tonic-gate * Track the number of active pagetables, so we can know how many to reap 1267c478bd9Sstevel@tonic-gate */ 1277c478bd9Sstevel@tonic-gate static uint32_t active_ptables = 0; 1287c478bd9Sstevel@tonic-gate 129843e1988Sjohnlev #ifdef __xpv 130843e1988Sjohnlev /* 131843e1988Sjohnlev * Deal with hypervisor complications. 132843e1988Sjohnlev */ 133843e1988Sjohnlev void 134843e1988Sjohnlev xen_flush_va(caddr_t va) 135843e1988Sjohnlev { 136843e1988Sjohnlev struct mmuext_op t; 137843e1988Sjohnlev uint_t count; 138843e1988Sjohnlev 139843e1988Sjohnlev if (IN_XPV_PANIC()) { 140843e1988Sjohnlev mmu_tlbflush_entry((caddr_t)va); 141843e1988Sjohnlev } else { 142843e1988Sjohnlev t.cmd = MMUEXT_INVLPG_LOCAL; 143843e1988Sjohnlev t.arg1.linear_addr = (uintptr_t)va; 144843e1988Sjohnlev if (HYPERVISOR_mmuext_op(&t, 1, &count, DOMID_SELF) < 0) 145843e1988Sjohnlev panic("HYPERVISOR_mmuext_op() failed"); 146843e1988Sjohnlev ASSERT(count == 1); 147843e1988Sjohnlev } 148843e1988Sjohnlev } 149843e1988Sjohnlev 150843e1988Sjohnlev void 151843e1988Sjohnlev xen_gflush_va(caddr_t va, cpuset_t cpus) 152843e1988Sjohnlev { 153843e1988Sjohnlev struct mmuext_op t; 154843e1988Sjohnlev uint_t count; 155843e1988Sjohnlev 156843e1988Sjohnlev if (IN_XPV_PANIC()) { 157843e1988Sjohnlev mmu_tlbflush_entry((caddr_t)va); 158843e1988Sjohnlev return; 159843e1988Sjohnlev } 160843e1988Sjohnlev 161843e1988Sjohnlev t.cmd = MMUEXT_INVLPG_MULTI; 162843e1988Sjohnlev t.arg1.linear_addr = (uintptr_t)va; 163843e1988Sjohnlev /*LINTED: constant in conditional context*/ 164843e1988Sjohnlev set_xen_guest_handle(t.arg2.vcpumask, &cpus); 165843e1988Sjohnlev if (HYPERVISOR_mmuext_op(&t, 1, &count, DOMID_SELF) < 0) 166843e1988Sjohnlev panic("HYPERVISOR_mmuext_op() failed"); 167843e1988Sjohnlev ASSERT(count == 1); 168843e1988Sjohnlev } 169843e1988Sjohnlev 170843e1988Sjohnlev void 171843e1988Sjohnlev xen_flush_tlb() 172843e1988Sjohnlev { 173843e1988Sjohnlev struct mmuext_op t; 174843e1988Sjohnlev uint_t count; 175843e1988Sjohnlev 176843e1988Sjohnlev if (IN_XPV_PANIC()) { 177843e1988Sjohnlev xpv_panic_reload_cr3(); 178843e1988Sjohnlev } else { 179843e1988Sjohnlev t.cmd = MMUEXT_TLB_FLUSH_LOCAL; 180843e1988Sjohnlev if (HYPERVISOR_mmuext_op(&t, 1, &count, DOMID_SELF) < 0) 181843e1988Sjohnlev panic("HYPERVISOR_mmuext_op() failed"); 182843e1988Sjohnlev ASSERT(count == 1); 183843e1988Sjohnlev } 184843e1988Sjohnlev } 185843e1988Sjohnlev 186843e1988Sjohnlev void 187843e1988Sjohnlev xen_gflush_tlb(cpuset_t cpus) 188843e1988Sjohnlev { 189843e1988Sjohnlev struct mmuext_op t; 190843e1988Sjohnlev uint_t count; 191843e1988Sjohnlev 192843e1988Sjohnlev ASSERT(!IN_XPV_PANIC()); 193843e1988Sjohnlev t.cmd = MMUEXT_TLB_FLUSH_MULTI; 194843e1988Sjohnlev /*LINTED: constant in conditional context*/ 195843e1988Sjohnlev set_xen_guest_handle(t.arg2.vcpumask, &cpus); 196843e1988Sjohnlev if (HYPERVISOR_mmuext_op(&t, 1, &count, DOMID_SELF) < 0) 197843e1988Sjohnlev panic("HYPERVISOR_mmuext_op() failed"); 198843e1988Sjohnlev ASSERT(count == 1); 199843e1988Sjohnlev } 200843e1988Sjohnlev 201843e1988Sjohnlev /* 202843e1988Sjohnlev * Install/Adjust a kpm mapping under the hypervisor. 203843e1988Sjohnlev * Value of "how" should be: 204843e1988Sjohnlev * PT_WRITABLE | PT_VALID - regular kpm mapping 205843e1988Sjohnlev * PT_VALID - make mapping read-only 206843e1988Sjohnlev * 0 - remove mapping 207843e1988Sjohnlev * 208843e1988Sjohnlev * returns 0 on success. non-zero for failure. 209843e1988Sjohnlev */ 210843e1988Sjohnlev int 211843e1988Sjohnlev xen_kpm_page(pfn_t pfn, uint_t how) 212843e1988Sjohnlev { 213843e1988Sjohnlev paddr_t pa = mmu_ptob((paddr_t)pfn); 214843e1988Sjohnlev x86pte_t pte = PT_NOCONSIST | PT_REF | PT_MOD; 215843e1988Sjohnlev 216843e1988Sjohnlev if (kpm_vbase == NULL) 217843e1988Sjohnlev return (0); 218843e1988Sjohnlev 219843e1988Sjohnlev if (how) 220843e1988Sjohnlev pte |= pa_to_ma(pa) | how; 221843e1988Sjohnlev else 222843e1988Sjohnlev pte = 0; 223843e1988Sjohnlev return (HYPERVISOR_update_va_mapping((uintptr_t)kpm_vbase + pa, 224843e1988Sjohnlev pte, UVMF_INVLPG | UVMF_ALL)); 225843e1988Sjohnlev } 226843e1988Sjohnlev 227843e1988Sjohnlev void 228843e1988Sjohnlev xen_pin(pfn_t pfn, level_t lvl) 229843e1988Sjohnlev { 230843e1988Sjohnlev struct mmuext_op t; 231843e1988Sjohnlev uint_t count; 232843e1988Sjohnlev 233843e1988Sjohnlev t.cmd = MMUEXT_PIN_L1_TABLE + lvl; 234843e1988Sjohnlev t.arg1.mfn = pfn_to_mfn(pfn); 235843e1988Sjohnlev if (HYPERVISOR_mmuext_op(&t, 1, &count, DOMID_SELF) < 0) 236843e1988Sjohnlev panic("HYPERVISOR_mmuext_op() failed"); 237843e1988Sjohnlev ASSERT(count == 1); 238843e1988Sjohnlev } 239843e1988Sjohnlev 240843e1988Sjohnlev void 241843e1988Sjohnlev xen_unpin(pfn_t pfn) 242843e1988Sjohnlev { 243843e1988Sjohnlev struct mmuext_op t; 244843e1988Sjohnlev uint_t count; 245843e1988Sjohnlev 246843e1988Sjohnlev t.cmd = MMUEXT_UNPIN_TABLE; 247843e1988Sjohnlev t.arg1.mfn = pfn_to_mfn(pfn); 248843e1988Sjohnlev if (HYPERVISOR_mmuext_op(&t, 1, &count, DOMID_SELF) < 0) 249843e1988Sjohnlev panic("HYPERVISOR_mmuext_op() failed"); 250843e1988Sjohnlev ASSERT(count == 1); 251843e1988Sjohnlev } 252843e1988Sjohnlev 253843e1988Sjohnlev static void 254843e1988Sjohnlev xen_map(uint64_t pte, caddr_t va) 255843e1988Sjohnlev { 256843e1988Sjohnlev if (HYPERVISOR_update_va_mapping((uintptr_t)va, pte, 257843e1988Sjohnlev UVMF_INVLPG | UVMF_LOCAL)) 258843e1988Sjohnlev panic("HYPERVISOR_update_va_mapping() failed"); 259843e1988Sjohnlev } 260843e1988Sjohnlev #endif /* __xpv */ 261843e1988Sjohnlev 2627c478bd9Sstevel@tonic-gate /* 2637c478bd9Sstevel@tonic-gate * Allocate a memory page for a hardware page table. 2647c478bd9Sstevel@tonic-gate * 265ae115bc7Smrj * A wrapper around page_get_physical(), with some extra checks. 2667c478bd9Sstevel@tonic-gate */ 267ae115bc7Smrj static pfn_t 268a77271f8SVikram Hegde ptable_alloc(uintptr_t seed) 2697c478bd9Sstevel@tonic-gate { 2707c478bd9Sstevel@tonic-gate pfn_t pfn; 2717c478bd9Sstevel@tonic-gate page_t *pp; 2727c478bd9Sstevel@tonic-gate 273ae115bc7Smrj pfn = PFN_INVALID; 2747c478bd9Sstevel@tonic-gate 2757c478bd9Sstevel@tonic-gate /* 276ae115bc7Smrj * The first check is to see if there is memory in the system. If we 277ae115bc7Smrj * drop to throttlefree, then fail the ptable_alloc() and let the 278ae115bc7Smrj * stealing code kick in. Note that we have to do this test here, 279ae115bc7Smrj * since the test in page_create_throttle() would let the NOSLEEP 280ae115bc7Smrj * allocation go through and deplete the page reserves. 281a85a6733Sjosephb * 282a85a6733Sjosephb * The !NOMEMWAIT() lets pageout, fsflush, etc. skip this check. 2837c478bd9Sstevel@tonic-gate */ 284a85a6733Sjosephb if (!NOMEMWAIT() && freemem <= throttlefree + 1) 285ae115bc7Smrj return (PFN_INVALID); 2867c478bd9Sstevel@tonic-gate 287a85a6733Sjosephb #ifdef DEBUG 288a85a6733Sjosephb /* 289a85a6733Sjosephb * This code makes htable_steal() easier to test. By setting 290a85a6733Sjosephb * force_steal we force pagetable allocations to fall 291a85a6733Sjosephb * into the stealing code. Roughly 1 in ever "force_steal" 292a85a6733Sjosephb * page table allocations will fail. 293a85a6733Sjosephb */ 294ae115bc7Smrj if (proc_pageout != NULL && force_steal > 1 && 295a85a6733Sjosephb ++ptable_cnt > force_steal) { 296a85a6733Sjosephb ptable_cnt = 0; 297ae115bc7Smrj return (PFN_INVALID); 298a85a6733Sjosephb } 299a85a6733Sjosephb #endif /* DEBUG */ 300a85a6733Sjosephb 301a77271f8SVikram Hegde pp = page_get_physical(seed); 3027c478bd9Sstevel@tonic-gate if (pp == NULL) 303ae115bc7Smrj return (PFN_INVALID); 3047c478bd9Sstevel@tonic-gate ASSERT(PAGE_SHARED(pp)); 30586c1f4dcSVikram Hegde pfn = pp->p_pagenum; 3067c478bd9Sstevel@tonic-gate if (pfn == PFN_INVALID) 3077c478bd9Sstevel@tonic-gate panic("ptable_alloc(): Invalid PFN!!"); 3081a5e258fSJosef 'Jeff' Sipek atomic_inc_32(&active_ptables); 309a85a6733Sjosephb HATSTAT_INC(hs_ptable_allocs); 310ae115bc7Smrj return (pfn); 3117c478bd9Sstevel@tonic-gate } 3127c478bd9Sstevel@tonic-gate 3137c478bd9Sstevel@tonic-gate /* 3147c478bd9Sstevel@tonic-gate * Free an htable's associated page table page. See the comments 3157c478bd9Sstevel@tonic-gate * for ptable_alloc(). 3167c478bd9Sstevel@tonic-gate */ 3177c478bd9Sstevel@tonic-gate static void 318ae115bc7Smrj ptable_free(pfn_t pfn) 3197c478bd9Sstevel@tonic-gate { 320ae115bc7Smrj page_t *pp = page_numtopp_nolock(pfn); 3217c478bd9Sstevel@tonic-gate 3227c478bd9Sstevel@tonic-gate /* 3237c478bd9Sstevel@tonic-gate * need to destroy the page used for the pagetable 3247c478bd9Sstevel@tonic-gate */ 3257c478bd9Sstevel@tonic-gate ASSERT(pfn != PFN_INVALID); 3267c478bd9Sstevel@tonic-gate HATSTAT_INC(hs_ptable_frees); 3271a5e258fSJosef 'Jeff' Sipek atomic_dec_32(&active_ptables); 3287c478bd9Sstevel@tonic-gate if (pp == NULL) 3297c478bd9Sstevel@tonic-gate panic("ptable_free(): no page for pfn!"); 330a77271f8SVikram Hegde ASSERT(PAGE_SHARED(pp)); 3317c478bd9Sstevel@tonic-gate ASSERT(pfn == pp->p_pagenum); 332843e1988Sjohnlev ASSERT(!IN_XPV_PANIC()); 333a77271f8SVikram Hegde 334a77271f8SVikram Hegde /* 335a77271f8SVikram Hegde * Get an exclusive lock, might have to wait for a kmem reader. 336a77271f8SVikram Hegde */ 337a77271f8SVikram Hegde if (!page_tryupgrade(pp)) { 3386c9930aeSJoe Bonasera u_offset_t off = pp->p_offset; 339a77271f8SVikram Hegde page_unlock(pp); 3406c9930aeSJoe Bonasera pp = page_lookup(&kvp, off, SE_EXCL); 3416c9930aeSJoe Bonasera if (pp == NULL) 3426c9930aeSJoe Bonasera panic("page not found"); 343a77271f8SVikram Hegde } 344843e1988Sjohnlev #ifdef __xpv 345843e1988Sjohnlev if (kpm_vbase && xen_kpm_page(pfn, PT_VALID | PT_WRITABLE) < 0) 346843e1988Sjohnlev panic("failure making kpm r/w pfn=0x%lx", pfn); 347843e1988Sjohnlev #endif 3486c9930aeSJoe Bonasera page_hashout(pp, NULL); 349a77271f8SVikram Hegde page_free(pp, 1); 350a77271f8SVikram Hegde page_unresv(1); 3517c478bd9Sstevel@tonic-gate } 3527c478bd9Sstevel@tonic-gate 3537c478bd9Sstevel@tonic-gate /* 3547c478bd9Sstevel@tonic-gate * Put one htable on the reserve list. 3557c478bd9Sstevel@tonic-gate */ 3567c478bd9Sstevel@tonic-gate static void 3577c478bd9Sstevel@tonic-gate htable_put_reserve(htable_t *ht) 3587c478bd9Sstevel@tonic-gate { 3597c478bd9Sstevel@tonic-gate ht->ht_hat = NULL; /* no longer tied to a hat */ 3607c478bd9Sstevel@tonic-gate ASSERT(ht->ht_pfn == PFN_INVALID); 3617c478bd9Sstevel@tonic-gate HATSTAT_INC(hs_htable_rputs); 3627c478bd9Sstevel@tonic-gate mutex_enter(&htable_reserve_mutex); 3637c478bd9Sstevel@tonic-gate ht->ht_next = htable_reserve_pool; 3647c478bd9Sstevel@tonic-gate htable_reserve_pool = ht; 3657c478bd9Sstevel@tonic-gate ++htable_reserve_cnt; 3667c478bd9Sstevel@tonic-gate mutex_exit(&htable_reserve_mutex); 3677c478bd9Sstevel@tonic-gate } 3687c478bd9Sstevel@tonic-gate 3697c478bd9Sstevel@tonic-gate /* 3707c478bd9Sstevel@tonic-gate * Take one htable from the reserve. 3717c478bd9Sstevel@tonic-gate */ 3727c478bd9Sstevel@tonic-gate static htable_t * 3737c478bd9Sstevel@tonic-gate htable_get_reserve(void) 3747c478bd9Sstevel@tonic-gate { 3757c478bd9Sstevel@tonic-gate htable_t *ht = NULL; 3767c478bd9Sstevel@tonic-gate 3777c478bd9Sstevel@tonic-gate mutex_enter(&htable_reserve_mutex); 3787c478bd9Sstevel@tonic-gate if (htable_reserve_cnt != 0) { 3797c478bd9Sstevel@tonic-gate ht = htable_reserve_pool; 3807c478bd9Sstevel@tonic-gate ASSERT(ht != NULL); 3817c478bd9Sstevel@tonic-gate ASSERT(ht->ht_pfn == PFN_INVALID); 3827c478bd9Sstevel@tonic-gate htable_reserve_pool = ht->ht_next; 3837c478bd9Sstevel@tonic-gate --htable_reserve_cnt; 3847c478bd9Sstevel@tonic-gate HATSTAT_INC(hs_htable_rgets); 3857c478bd9Sstevel@tonic-gate } 3867c478bd9Sstevel@tonic-gate mutex_exit(&htable_reserve_mutex); 3877c478bd9Sstevel@tonic-gate return (ht); 3887c478bd9Sstevel@tonic-gate } 3897c478bd9Sstevel@tonic-gate 3907c478bd9Sstevel@tonic-gate /* 391ae115bc7Smrj * Allocate initial htables and put them on the reserve list 3927c478bd9Sstevel@tonic-gate */ 3937c478bd9Sstevel@tonic-gate void 3947c478bd9Sstevel@tonic-gate htable_initial_reserve(uint_t count) 3957c478bd9Sstevel@tonic-gate { 3967c478bd9Sstevel@tonic-gate htable_t *ht; 3977c478bd9Sstevel@tonic-gate 3987c478bd9Sstevel@tonic-gate count += HTABLE_RESERVE_AMOUNT; 3997c478bd9Sstevel@tonic-gate while (count > 0) { 4007c478bd9Sstevel@tonic-gate ht = kmem_cache_alloc(htable_cache, KM_NOSLEEP); 4017c478bd9Sstevel@tonic-gate ASSERT(ht != NULL); 4027c478bd9Sstevel@tonic-gate 4037c478bd9Sstevel@tonic-gate ASSERT(use_boot_reserve); 404ae115bc7Smrj ht->ht_pfn = PFN_INVALID; 405ae115bc7Smrj htable_put_reserve(ht); 4067c478bd9Sstevel@tonic-gate --count; 4077c478bd9Sstevel@tonic-gate } 4087c478bd9Sstevel@tonic-gate } 4097c478bd9Sstevel@tonic-gate 4107c478bd9Sstevel@tonic-gate /* 4117c478bd9Sstevel@tonic-gate * Readjust the reserves after a thread finishes using them. 4127c478bd9Sstevel@tonic-gate */ 4137c478bd9Sstevel@tonic-gate void 4147c478bd9Sstevel@tonic-gate htable_adjust_reserve() 4157c478bd9Sstevel@tonic-gate { 4167c478bd9Sstevel@tonic-gate htable_t *ht; 4177c478bd9Sstevel@tonic-gate 4187c478bd9Sstevel@tonic-gate /* 4197c478bd9Sstevel@tonic-gate * Free any excess htables in the reserve list 4207c478bd9Sstevel@tonic-gate */ 421aac11643Sjosephb while (htable_reserve_cnt > htable_reserve_amount && 422aac11643Sjosephb !USE_HAT_RESERVES()) { 4237c478bd9Sstevel@tonic-gate ht = htable_get_reserve(); 4247c478bd9Sstevel@tonic-gate if (ht == NULL) 4257c478bd9Sstevel@tonic-gate return; 4267c478bd9Sstevel@tonic-gate ASSERT(ht->ht_pfn == PFN_INVALID); 4277c478bd9Sstevel@tonic-gate kmem_cache_free(htable_cache, ht); 4287c478bd9Sstevel@tonic-gate } 4297c478bd9Sstevel@tonic-gate } 4307c478bd9Sstevel@tonic-gate 431b59c4a48SBoris Protopopov /* 432b59c4a48SBoris Protopopov * Search the active htables for one to steal. Start at a different hash 433b59c4a48SBoris Protopopov * bucket every time to help spread the pain of stealing 434b59c4a48SBoris Protopopov */ 435b59c4a48SBoris Protopopov static void 436b59c4a48SBoris Protopopov htable_steal_active(hat_t *hat, uint_t cnt, uint_t threshold, 437b59c4a48SBoris Protopopov uint_t *stolen, htable_t **list) 438b59c4a48SBoris Protopopov { 439b59c4a48SBoris Protopopov static uint_t h_seed = 0; 440b59c4a48SBoris Protopopov htable_t *higher, *ht; 441b59c4a48SBoris Protopopov uint_t h, e, h_start; 442b59c4a48SBoris Protopopov uintptr_t va; 443b59c4a48SBoris Protopopov x86pte_t pte; 444b59c4a48SBoris Protopopov 445b59c4a48SBoris Protopopov h = h_start = h_seed++ % hat->hat_num_hash; 446b59c4a48SBoris Protopopov do { 447b59c4a48SBoris Protopopov higher = NULL; 448b59c4a48SBoris Protopopov HTABLE_ENTER(h); 449b59c4a48SBoris Protopopov for (ht = hat->hat_ht_hash[h]; ht; ht = ht->ht_next) { 4507c478bd9Sstevel@tonic-gate 4517c478bd9Sstevel@tonic-gate /* 452b59c4a48SBoris Protopopov * Can we rule out reaping? 453b59c4a48SBoris Protopopov */ 454b59c4a48SBoris Protopopov if (ht->ht_busy != 0 || 455b59c4a48SBoris Protopopov (ht->ht_flags & HTABLE_SHARED_PFN) || 456b59c4a48SBoris Protopopov ht->ht_level > 0 || ht->ht_valid_cnt > threshold || 457b59c4a48SBoris Protopopov ht->ht_lock_cnt != 0) 458b59c4a48SBoris Protopopov continue; 459b59c4a48SBoris Protopopov 460b59c4a48SBoris Protopopov /* 461b59c4a48SBoris Protopopov * Increment busy so the htable can't disappear. We 462b59c4a48SBoris Protopopov * drop the htable mutex to avoid deadlocks with 463b59c4a48SBoris Protopopov * hat_pageunload() and the hment mutex while we 464b59c4a48SBoris Protopopov * call hat_pte_unmap() 465b59c4a48SBoris Protopopov */ 466b59c4a48SBoris Protopopov ++ht->ht_busy; 467b59c4a48SBoris Protopopov HTABLE_EXIT(h); 468b59c4a48SBoris Protopopov 469b59c4a48SBoris Protopopov /* 470b59c4a48SBoris Protopopov * Try stealing. 471b59c4a48SBoris Protopopov * - unload and invalidate all PTEs 472b59c4a48SBoris Protopopov */ 473b59c4a48SBoris Protopopov for (e = 0, va = ht->ht_vaddr; 474b59c4a48SBoris Protopopov e < HTABLE_NUM_PTES(ht) && ht->ht_valid_cnt > 0 && 475b59c4a48SBoris Protopopov ht->ht_busy == 1 && ht->ht_lock_cnt == 0; 476b59c4a48SBoris Protopopov ++e, va += MMU_PAGESIZE) { 477b59c4a48SBoris Protopopov pte = x86pte_get(ht, e); 478b59c4a48SBoris Protopopov if (!PTE_ISVALID(pte)) 479b59c4a48SBoris Protopopov continue; 480a6a74e0eSMatthew Ahrens hat_pte_unmap(ht, e, HAT_UNLOAD, pte, NULL, 481a6a74e0eSMatthew Ahrens B_TRUE); 482b59c4a48SBoris Protopopov } 483b59c4a48SBoris Protopopov 484b59c4a48SBoris Protopopov /* 485b59c4a48SBoris Protopopov * Reacquire htable lock. If we didn't remove all 486b59c4a48SBoris Protopopov * mappings in the table, or another thread added a new 487b59c4a48SBoris Protopopov * mapping behind us, give up on this table. 488b59c4a48SBoris Protopopov */ 489b59c4a48SBoris Protopopov HTABLE_ENTER(h); 490b59c4a48SBoris Protopopov if (ht->ht_busy != 1 || ht->ht_valid_cnt != 0 || 491b59c4a48SBoris Protopopov ht->ht_lock_cnt != 0) { 492b59c4a48SBoris Protopopov --ht->ht_busy; 493b59c4a48SBoris Protopopov continue; 494b59c4a48SBoris Protopopov } 495b59c4a48SBoris Protopopov 496b59c4a48SBoris Protopopov /* 497b59c4a48SBoris Protopopov * Steal it and unlink the page table. 498b59c4a48SBoris Protopopov */ 499b59c4a48SBoris Protopopov higher = ht->ht_parent; 500b59c4a48SBoris Protopopov unlink_ptp(higher, ht, ht->ht_vaddr); 501b59c4a48SBoris Protopopov 502b59c4a48SBoris Protopopov /* 503b59c4a48SBoris Protopopov * remove from the hash list 504b59c4a48SBoris Protopopov */ 505b59c4a48SBoris Protopopov if (ht->ht_next) 506b59c4a48SBoris Protopopov ht->ht_next->ht_prev = ht->ht_prev; 507b59c4a48SBoris Protopopov 508b59c4a48SBoris Protopopov if (ht->ht_prev) { 509b59c4a48SBoris Protopopov ht->ht_prev->ht_next = ht->ht_next; 510b59c4a48SBoris Protopopov } else { 511b59c4a48SBoris Protopopov ASSERT(hat->hat_ht_hash[h] == ht); 512b59c4a48SBoris Protopopov hat->hat_ht_hash[h] = ht->ht_next; 513b59c4a48SBoris Protopopov } 514b59c4a48SBoris Protopopov 515b59c4a48SBoris Protopopov /* 516b59c4a48SBoris Protopopov * Break to outer loop to release the 517b59c4a48SBoris Protopopov * higher (ht_parent) pagetable. This 518b59c4a48SBoris Protopopov * spreads out the pain caused by 519b59c4a48SBoris Protopopov * pagefaults. 520b59c4a48SBoris Protopopov */ 521b59c4a48SBoris Protopopov ht->ht_next = *list; 522b59c4a48SBoris Protopopov *list = ht; 523b59c4a48SBoris Protopopov ++*stolen; 524b59c4a48SBoris Protopopov break; 525b59c4a48SBoris Protopopov } 526b59c4a48SBoris Protopopov HTABLE_EXIT(h); 527b59c4a48SBoris Protopopov if (higher != NULL) 528b59c4a48SBoris Protopopov htable_release(higher); 529b59c4a48SBoris Protopopov if (++h == hat->hat_num_hash) 530b59c4a48SBoris Protopopov h = 0; 531b59c4a48SBoris Protopopov } while (*stolen < cnt && h != h_start); 532b59c4a48SBoris Protopopov } 533b59c4a48SBoris Protopopov 534b59c4a48SBoris Protopopov /* 535b59c4a48SBoris Protopopov * Move hat to the end of the kas list 536b59c4a48SBoris Protopopov */ 537b59c4a48SBoris Protopopov static void 538b59c4a48SBoris Protopopov move_victim(hat_t *hat) 539b59c4a48SBoris Protopopov { 540b59c4a48SBoris Protopopov ASSERT(MUTEX_HELD(&hat_list_lock)); 541b59c4a48SBoris Protopopov 542b59c4a48SBoris Protopopov /* unlink victim hat */ 543b59c4a48SBoris Protopopov if (hat->hat_prev) 544b59c4a48SBoris Protopopov hat->hat_prev->hat_next = hat->hat_next; 545b59c4a48SBoris Protopopov else 546b59c4a48SBoris Protopopov kas.a_hat->hat_next = hat->hat_next; 547b59c4a48SBoris Protopopov 548b59c4a48SBoris Protopopov if (hat->hat_next) 549b59c4a48SBoris Protopopov hat->hat_next->hat_prev = hat->hat_prev; 550b59c4a48SBoris Protopopov else 551b59c4a48SBoris Protopopov kas.a_hat->hat_prev = hat->hat_prev; 552b59c4a48SBoris Protopopov /* relink at end of hat list */ 553b59c4a48SBoris Protopopov hat->hat_next = NULL; 554b59c4a48SBoris Protopopov hat->hat_prev = kas.a_hat->hat_prev; 555b59c4a48SBoris Protopopov if (hat->hat_prev) 556b59c4a48SBoris Protopopov hat->hat_prev->hat_next = hat; 557b59c4a48SBoris Protopopov else 558b59c4a48SBoris Protopopov kas.a_hat->hat_next = hat; 559b59c4a48SBoris Protopopov 560b59c4a48SBoris Protopopov kas.a_hat->hat_prev = hat; 561b59c4a48SBoris Protopopov } 562b59c4a48SBoris Protopopov 563b59c4a48SBoris Protopopov /* 564b59c4a48SBoris Protopopov * This routine steals htables from user processes. Called by htable_reap 565b59c4a48SBoris Protopopov * (reap=TRUE) or htable_alloc (reap=FALSE). 5667c478bd9Sstevel@tonic-gate */ 5677c478bd9Sstevel@tonic-gate static htable_t * 568b59c4a48SBoris Protopopov htable_steal(uint_t cnt, boolean_t reap) 5697c478bd9Sstevel@tonic-gate { 5707c478bd9Sstevel@tonic-gate hat_t *hat = kas.a_hat; /* list starts with khat */ 5717c478bd9Sstevel@tonic-gate htable_t *list = NULL; 5727c478bd9Sstevel@tonic-gate htable_t *ht; 5737c478bd9Sstevel@tonic-gate uint_t stolen = 0; 574*091194e6SBryan Cantrill uint_t pass, passes; 575a85a6733Sjosephb uint_t threshold; 5767c478bd9Sstevel@tonic-gate 5777c478bd9Sstevel@tonic-gate /* 5787c478bd9Sstevel@tonic-gate * Limit htable_steal_passes to something reasonable 5797c478bd9Sstevel@tonic-gate */ 5807c478bd9Sstevel@tonic-gate if (htable_steal_passes == 0) 5817c478bd9Sstevel@tonic-gate htable_steal_passes = 1; 5827c478bd9Sstevel@tonic-gate if (htable_steal_passes > mmu.ptes_per_table) 5837c478bd9Sstevel@tonic-gate htable_steal_passes = mmu.ptes_per_table; 5847c478bd9Sstevel@tonic-gate 5857c478bd9Sstevel@tonic-gate /* 586*091194e6SBryan Cantrill * If we're stealing merely as part of kmem reaping (versus stealing 587*091194e6SBryan Cantrill * to assure forward progress), we don't want to actually steal any 588*091194e6SBryan Cantrill * active htables. (Stealing active htables merely to give memory 589*091194e6SBryan Cantrill * back to the system can inadvertently kick off an htable crime wave 590*091194e6SBryan Cantrill * as active processes repeatedly steal htables from one another, 591*091194e6SBryan Cantrill * plummeting the system into a kind of HAT lawlessness that can 592*091194e6SBryan Cantrill * become so violent as to impede the one thing that can end it: the 593*091194e6SBryan Cantrill * freeing of memory via ARC reclaim and other means.) So if we're 594*091194e6SBryan Cantrill * reaping, we limit ourselves to the first pass that steals cached 595*091194e6SBryan Cantrill * htables that aren't in use -- which gives memory back, but averts 596*091194e6SBryan Cantrill * the entire breakdown of social order. 597*091194e6SBryan Cantrill */ 598*091194e6SBryan Cantrill passes = reap ? 0 : htable_steal_passes; 599*091194e6SBryan Cantrill 600*091194e6SBryan Cantrill /* 601a85a6733Sjosephb * Loop through all user hats. The 1st pass takes cached htables that 6027c478bd9Sstevel@tonic-gate * aren't in use. The later passes steal by removing mappings, too. 6037c478bd9Sstevel@tonic-gate */ 6041a5e258fSJosef 'Jeff' Sipek atomic_inc_32(&htable_dont_cache); 605*091194e6SBryan Cantrill for (pass = 0; pass <= passes && stolen < cnt; ++pass) { 606a85a6733Sjosephb threshold = pass * mmu.ptes_per_table / htable_steal_passes; 6077c478bd9Sstevel@tonic-gate 6087c478bd9Sstevel@tonic-gate mutex_enter(&hat_list_lock); 609a85a6733Sjosephb 610b59c4a48SBoris Protopopov /* skip the first hat (kernel) */ 611b59c4a48SBoris Protopopov hat = kas.a_hat->hat_next; 612b59c4a48SBoris Protopopov for (;;) { 613a85a6733Sjosephb /* 614a85a6733Sjosephb * Skip any hat that is already being stolen from. 615a85a6733Sjosephb * 616a85a6733Sjosephb * We skip SHARED hats, as these are dummy 617a85a6733Sjosephb * hats that host ISM shared page tables. 618a85a6733Sjosephb * 619a85a6733Sjosephb * We also skip if HAT_FREEING because hat_pte_unmap() 620a85a6733Sjosephb * won't zero out the PTE's. That would lead to hitting 621a85a6733Sjosephb * stale PTEs either here or under hat_unload() when we 622a85a6733Sjosephb * steal and unload the same page table in competing 623a85a6733Sjosephb * threads. 624a85a6733Sjosephb */ 625a85a6733Sjosephb while (hat != NULL && 626a85a6733Sjosephb (hat->hat_flags & 627a85a6733Sjosephb (HAT_VICTIM | HAT_SHARED | HAT_FREEING)) != 0) 628a85a6733Sjosephb hat = hat->hat_next; 629a85a6733Sjosephb 630b59c4a48SBoris Protopopov if (hat == NULL) 6317c478bd9Sstevel@tonic-gate break; 632a85a6733Sjosephb 633a85a6733Sjosephb /* 634b59c4a48SBoris Protopopov * Mark the HAT as a stealing victim so that it is 635b59c4a48SBoris Protopopov * not freed from under us, e.g. in as_free() 636a85a6733Sjosephb */ 6377c478bd9Sstevel@tonic-gate hat->hat_flags |= HAT_VICTIM; 6387c478bd9Sstevel@tonic-gate mutex_exit(&hat_list_lock); 6397c478bd9Sstevel@tonic-gate 6407c478bd9Sstevel@tonic-gate /* 6417c478bd9Sstevel@tonic-gate * Take any htables from the hat's cached "free" list. 6427c478bd9Sstevel@tonic-gate */ 6437c478bd9Sstevel@tonic-gate hat_enter(hat); 6447c478bd9Sstevel@tonic-gate while ((ht = hat->hat_ht_cached) != NULL && 6457c478bd9Sstevel@tonic-gate stolen < cnt) { 6467c478bd9Sstevel@tonic-gate hat->hat_ht_cached = ht->ht_next; 6477c478bd9Sstevel@tonic-gate ht->ht_next = list; 6487c478bd9Sstevel@tonic-gate list = ht; 6497c478bd9Sstevel@tonic-gate ++stolen; 6507c478bd9Sstevel@tonic-gate } 6517c478bd9Sstevel@tonic-gate hat_exit(hat); 6527c478bd9Sstevel@tonic-gate 6537c478bd9Sstevel@tonic-gate /* 654b59c4a48SBoris Protopopov * Don't steal active htables on first pass. 6557c478bd9Sstevel@tonic-gate */ 656b59c4a48SBoris Protopopov if (pass != 0 && (stolen < cnt)) 657b59c4a48SBoris Protopopov htable_steal_active(hat, cnt, threshold, 658b59c4a48SBoris Protopopov &stolen, &list); 659b59c4a48SBoris Protopopov 660b59c4a48SBoris Protopopov /* 661b59c4a48SBoris Protopopov * do synchronous teardown for the reap case so that 662b59c4a48SBoris Protopopov * we can forget hat; at this time, hat is 663b59c4a48SBoris Protopopov * guaranteed to be around because HAT_VICTIM is set 664b59c4a48SBoris Protopopov * (see htable_free() for similar code) 665b59c4a48SBoris Protopopov */ 666b59c4a48SBoris Protopopov for (ht = list; (ht) && (reap); ht = ht->ht_next) { 667b59c4a48SBoris Protopopov if (ht->ht_hat == NULL) 6687c478bd9Sstevel@tonic-gate continue; 669b59c4a48SBoris Protopopov ASSERT(ht->ht_hat == hat); 670b59c4a48SBoris Protopopov #if defined(__xpv) && defined(__amd64) 671b59c4a48SBoris Protopopov if (!(ht->ht_flags & HTABLE_VLP) && 672b59c4a48SBoris Protopopov ht->ht_level == mmu.max_level) { 673b59c4a48SBoris Protopopov ptable_free(hat->hat_user_ptable); 674b59c4a48SBoris Protopopov hat->hat_user_ptable = PFN_INVALID; 675b59c4a48SBoris Protopopov } 676b59c4a48SBoris Protopopov #endif 677b59c4a48SBoris Protopopov /* 678b59c4a48SBoris Protopopov * forget the hat 679b59c4a48SBoris Protopopov */ 680b59c4a48SBoris Protopopov ht->ht_hat = NULL; 681b59c4a48SBoris Protopopov } 682b59c4a48SBoris Protopopov 683b59c4a48SBoris Protopopov mutex_enter(&hat_list_lock); 6847c478bd9Sstevel@tonic-gate 6857c478bd9Sstevel@tonic-gate /* 686b59c4a48SBoris Protopopov * Are we finished? 6877c478bd9Sstevel@tonic-gate */ 688b59c4a48SBoris Protopopov if (stolen == cnt) { 6897c478bd9Sstevel@tonic-gate /* 690b59c4a48SBoris Protopopov * Try to spread the pain of stealing, 691b59c4a48SBoris Protopopov * move victim HAT to the end of the HAT list. 6927c478bd9Sstevel@tonic-gate */ 693b59c4a48SBoris Protopopov if (pass >= 1 && cnt == 1 && 694b59c4a48SBoris Protopopov kas.a_hat->hat_prev != hat) 695b59c4a48SBoris Protopopov move_victim(hat); 6967c478bd9Sstevel@tonic-gate /* 697b59c4a48SBoris Protopopov * We are finished 6987c478bd9Sstevel@tonic-gate */ 6997c478bd9Sstevel@tonic-gate } 7007c478bd9Sstevel@tonic-gate 7017c478bd9Sstevel@tonic-gate /* 702b59c4a48SBoris Protopopov * Clear the victim flag, hat can go away now (once 703b59c4a48SBoris Protopopov * the lock is dropped) 7047c478bd9Sstevel@tonic-gate */ 705b59c4a48SBoris Protopopov if (hat->hat_flags & HAT_VICTIM) { 706b59c4a48SBoris Protopopov ASSERT(hat != kas.a_hat); 707b59c4a48SBoris Protopopov hat->hat_flags &= ~HAT_VICTIM; 708b59c4a48SBoris Protopopov cv_broadcast(&hat_list_cv); 7097c478bd9Sstevel@tonic-gate } 7107c478bd9Sstevel@tonic-gate 711b59c4a48SBoris Protopopov /* move on to the next hat */ 712b59c4a48SBoris Protopopov hat = hat->hat_next; 7137c478bd9Sstevel@tonic-gate } 7147c478bd9Sstevel@tonic-gate 715b59c4a48SBoris Protopopov mutex_exit(&hat_list_lock); 716b59c4a48SBoris Protopopov 7177c478bd9Sstevel@tonic-gate } 718b59c4a48SBoris Protopopov ASSERT(!MUTEX_HELD(&hat_list_lock)); 719b59c4a48SBoris Protopopov 7201a5e258fSJosef 'Jeff' Sipek atomic_dec_32(&htable_dont_cache); 7217c478bd9Sstevel@tonic-gate return (list); 7227c478bd9Sstevel@tonic-gate } 7237c478bd9Sstevel@tonic-gate 7247c478bd9Sstevel@tonic-gate /* 7257c478bd9Sstevel@tonic-gate * This is invoked from kmem when the system is low on memory. We try 7267c478bd9Sstevel@tonic-gate * to free hments, htables, and ptables to improve the memory situation. 7277c478bd9Sstevel@tonic-gate */ 7287c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 7297c478bd9Sstevel@tonic-gate static void 7307c478bd9Sstevel@tonic-gate htable_reap(void *handle) 7317c478bd9Sstevel@tonic-gate { 7327c478bd9Sstevel@tonic-gate uint_t reap_cnt; 7337c478bd9Sstevel@tonic-gate htable_t *list; 7347c478bd9Sstevel@tonic-gate htable_t *ht; 7357c478bd9Sstevel@tonic-gate 7367c478bd9Sstevel@tonic-gate HATSTAT_INC(hs_reap_attempts); 7377c478bd9Sstevel@tonic-gate if (!can_steal_post_boot) 7387c478bd9Sstevel@tonic-gate return; 7397c478bd9Sstevel@tonic-gate 7407c478bd9Sstevel@tonic-gate /* 7417c478bd9Sstevel@tonic-gate * Try to reap 5% of the page tables bounded by a maximum of 7427c478bd9Sstevel@tonic-gate * 5% of physmem and a minimum of 10. 7437c478bd9Sstevel@tonic-gate */ 7445f661bbcSJakub Jermar reap_cnt = MAX(MIN(physmem / 20, active_ptables / 20), 10); 7457c478bd9Sstevel@tonic-gate 7467c478bd9Sstevel@tonic-gate /* 747b59c4a48SBoris Protopopov * Note: htable_dont_cache should be set at the time of 748b59c4a48SBoris Protopopov * invoking htable_free() 749b59c4a48SBoris Protopopov */ 750b59c4a48SBoris Protopopov atomic_inc_32(&htable_dont_cache); 751b59c4a48SBoris Protopopov /* 7527c478bd9Sstevel@tonic-gate * Let htable_steal() do the work, we just call htable_free() 7537c478bd9Sstevel@tonic-gate */ 754843e1988Sjohnlev XPV_DISALLOW_MIGRATE(); 755b59c4a48SBoris Protopopov list = htable_steal(reap_cnt, B_TRUE); 756843e1988Sjohnlev XPV_ALLOW_MIGRATE(); 7577c478bd9Sstevel@tonic-gate while ((ht = list) != NULL) { 7587c478bd9Sstevel@tonic-gate list = ht->ht_next; 7597c478bd9Sstevel@tonic-gate HATSTAT_INC(hs_reaped); 7607c478bd9Sstevel@tonic-gate htable_free(ht); 7617c478bd9Sstevel@tonic-gate } 762b59c4a48SBoris Protopopov atomic_dec_32(&htable_dont_cache); 7637c478bd9Sstevel@tonic-gate 7647c478bd9Sstevel@tonic-gate /* 7657c478bd9Sstevel@tonic-gate * Free up excess reserves 7667c478bd9Sstevel@tonic-gate */ 7677c478bd9Sstevel@tonic-gate htable_adjust_reserve(); 7687c478bd9Sstevel@tonic-gate hment_adjust_reserve(); 7697c478bd9Sstevel@tonic-gate } 7707c478bd9Sstevel@tonic-gate 7717c478bd9Sstevel@tonic-gate /* 772ae115bc7Smrj * Allocate an htable, stealing one or using the reserve if necessary 7737c478bd9Sstevel@tonic-gate */ 7747c478bd9Sstevel@tonic-gate static htable_t * 7757c478bd9Sstevel@tonic-gate htable_alloc( 7767c478bd9Sstevel@tonic-gate hat_t *hat, 7777c478bd9Sstevel@tonic-gate uintptr_t vaddr, 7787c478bd9Sstevel@tonic-gate level_t level, 7797c478bd9Sstevel@tonic-gate htable_t *shared) 7807c478bd9Sstevel@tonic-gate { 7817c478bd9Sstevel@tonic-gate htable_t *ht = NULL; 7827c478bd9Sstevel@tonic-gate uint_t is_vlp; 7837c478bd9Sstevel@tonic-gate uint_t is_bare = 0; 7847c478bd9Sstevel@tonic-gate uint_t need_to_zero = 1; 7857c478bd9Sstevel@tonic-gate int kmflags = (can_steal_post_boot ? KM_NOSLEEP : KM_SLEEP); 7867c478bd9Sstevel@tonic-gate 7877c478bd9Sstevel@tonic-gate if (level < 0 || level > TOP_LEVEL(hat)) 7887c478bd9Sstevel@tonic-gate panic("htable_alloc(): level %d out of range\n", level); 7897c478bd9Sstevel@tonic-gate 7907c478bd9Sstevel@tonic-gate is_vlp = (hat->hat_flags & HAT_VLP) && level == VLP_LEVEL; 7917c478bd9Sstevel@tonic-gate if (is_vlp || shared != NULL) 7927c478bd9Sstevel@tonic-gate is_bare = 1; 7937c478bd9Sstevel@tonic-gate 7947c478bd9Sstevel@tonic-gate /* 7957c478bd9Sstevel@tonic-gate * First reuse a cached htable from the hat_ht_cached field, this 796ae115bc7Smrj * avoids unnecessary trips through kmem/page allocators. 7977c478bd9Sstevel@tonic-gate */ 7987c478bd9Sstevel@tonic-gate if (hat->hat_ht_cached != NULL && !is_bare) { 7997c478bd9Sstevel@tonic-gate hat_enter(hat); 8007c478bd9Sstevel@tonic-gate ht = hat->hat_ht_cached; 8017c478bd9Sstevel@tonic-gate if (ht != NULL) { 8027c478bd9Sstevel@tonic-gate hat->hat_ht_cached = ht->ht_next; 8037c478bd9Sstevel@tonic-gate need_to_zero = 0; 8047c478bd9Sstevel@tonic-gate /* XX64 ASSERT() they're all zero somehow */ 8057c478bd9Sstevel@tonic-gate ASSERT(ht->ht_pfn != PFN_INVALID); 8067c478bd9Sstevel@tonic-gate } 8077c478bd9Sstevel@tonic-gate hat_exit(hat); 8087c478bd9Sstevel@tonic-gate } 8097c478bd9Sstevel@tonic-gate 8107c478bd9Sstevel@tonic-gate if (ht == NULL) { 8117c478bd9Sstevel@tonic-gate /* 81297704650Sjosephb * Allocate an htable, possibly refilling the reserves. 8137c478bd9Sstevel@tonic-gate */ 81497704650Sjosephb if (USE_HAT_RESERVES()) { 8157c478bd9Sstevel@tonic-gate ht = htable_get_reserve(); 8167c478bd9Sstevel@tonic-gate } else { 8177c478bd9Sstevel@tonic-gate /* 8187c478bd9Sstevel@tonic-gate * Donate successful htable allocations to the reserve. 8197c478bd9Sstevel@tonic-gate */ 8207c478bd9Sstevel@tonic-gate for (;;) { 8217c478bd9Sstevel@tonic-gate ht = kmem_cache_alloc(htable_cache, kmflags); 8227c478bd9Sstevel@tonic-gate if (ht == NULL) 8237c478bd9Sstevel@tonic-gate break; 8247c478bd9Sstevel@tonic-gate ht->ht_pfn = PFN_INVALID; 82597704650Sjosephb if (USE_HAT_RESERVES() || 8267c478bd9Sstevel@tonic-gate htable_reserve_cnt >= htable_reserve_amount) 8277c478bd9Sstevel@tonic-gate break; 8287c478bd9Sstevel@tonic-gate htable_put_reserve(ht); 8297c478bd9Sstevel@tonic-gate } 8307c478bd9Sstevel@tonic-gate } 8317c478bd9Sstevel@tonic-gate 8327c478bd9Sstevel@tonic-gate /* 8337c478bd9Sstevel@tonic-gate * allocate a page for the hardware page table if needed 8347c478bd9Sstevel@tonic-gate */ 8357c478bd9Sstevel@tonic-gate if (ht != NULL && !is_bare) { 836a85a6733Sjosephb ht->ht_hat = hat; 837a77271f8SVikram Hegde ht->ht_pfn = ptable_alloc((uintptr_t)ht); 8387c478bd9Sstevel@tonic-gate if (ht->ht_pfn == PFN_INVALID) { 83997704650Sjosephb if (USE_HAT_RESERVES()) 84097704650Sjosephb htable_put_reserve(ht); 84197704650Sjosephb else 8427c478bd9Sstevel@tonic-gate kmem_cache_free(htable_cache, ht); 8437c478bd9Sstevel@tonic-gate ht = NULL; 8447c478bd9Sstevel@tonic-gate } 8457c478bd9Sstevel@tonic-gate } 8467c478bd9Sstevel@tonic-gate } 8477c478bd9Sstevel@tonic-gate 8487c478bd9Sstevel@tonic-gate /* 849a85a6733Sjosephb * If allocations failed, kick off a kmem_reap() and resort to 850a85a6733Sjosephb * htable steal(). We may spin here if the system is very low on 851a85a6733Sjosephb * memory. If the kernel itself has consumed all memory and kmem_reap() 852a85a6733Sjosephb * can't free up anything, then we'll really get stuck here. 853a85a6733Sjosephb * That should only happen in a system where the administrator has 854a85a6733Sjosephb * misconfigured VM parameters via /etc/system. 8557c478bd9Sstevel@tonic-gate */ 856a85a6733Sjosephb while (ht == NULL && can_steal_post_boot) { 857a85a6733Sjosephb kmem_reap(); 858b59c4a48SBoris Protopopov ht = htable_steal(1, B_FALSE); 8597c478bd9Sstevel@tonic-gate HATSTAT_INC(hs_steals); 8607c478bd9Sstevel@tonic-gate 8617c478bd9Sstevel@tonic-gate /* 862a85a6733Sjosephb * If we stole for a bare htable, release the pagetable page. 8637c478bd9Sstevel@tonic-gate */ 864ae115bc7Smrj if (ht != NULL) { 865ae115bc7Smrj if (is_bare) { 866ae115bc7Smrj ptable_free(ht->ht_pfn); 867ae115bc7Smrj ht->ht_pfn = PFN_INVALID; 868843e1988Sjohnlev #if defined(__xpv) && defined(__amd64) 869843e1988Sjohnlev /* 870843e1988Sjohnlev * make stolen page table writable again in kpm 871843e1988Sjohnlev */ 872843e1988Sjohnlev } else if (kpm_vbase && xen_kpm_page(ht->ht_pfn, 873843e1988Sjohnlev PT_VALID | PT_WRITABLE) < 0) { 874843e1988Sjohnlev panic("failure making kpm r/w pfn=0x%lx", 875843e1988Sjohnlev ht->ht_pfn); 876843e1988Sjohnlev #endif 877ae115bc7Smrj } 878ae115bc7Smrj } 8797c478bd9Sstevel@tonic-gate } 8807c478bd9Sstevel@tonic-gate 8817c478bd9Sstevel@tonic-gate /* 882a85a6733Sjosephb * All attempts to allocate or steal failed. This should only happen 883a85a6733Sjosephb * if we run out of memory during boot, due perhaps to a huge 884a85a6733Sjosephb * boot_archive. At this point there's no way to continue. 8857c478bd9Sstevel@tonic-gate */ 8867c478bd9Sstevel@tonic-gate if (ht == NULL) 8877c478bd9Sstevel@tonic-gate panic("htable_alloc(): couldn't steal\n"); 8887c478bd9Sstevel@tonic-gate 889843e1988Sjohnlev #if defined(__amd64) && defined(__xpv) 890843e1988Sjohnlev /* 891843e1988Sjohnlev * Under the 64-bit hypervisor, we have 2 top level page tables. 892843e1988Sjohnlev * If this allocation fails, we'll resort to stealing. 893843e1988Sjohnlev * We use the stolen page indirectly, by freeing the 894843e1988Sjohnlev * stolen htable first. 895843e1988Sjohnlev */ 896843e1988Sjohnlev if (level == mmu.max_level) { 897843e1988Sjohnlev for (;;) { 898843e1988Sjohnlev htable_t *stolen; 899843e1988Sjohnlev 900a77271f8SVikram Hegde hat->hat_user_ptable = ptable_alloc((uintptr_t)ht + 1); 901843e1988Sjohnlev if (hat->hat_user_ptable != PFN_INVALID) 902843e1988Sjohnlev break; 903b59c4a48SBoris Protopopov stolen = htable_steal(1, B_FALSE); 904843e1988Sjohnlev if (stolen == NULL) 905843e1988Sjohnlev panic("2nd steal ptable failed\n"); 906843e1988Sjohnlev htable_free(stolen); 907843e1988Sjohnlev } 908843e1988Sjohnlev block_zero_no_xmm(kpm_vbase + pfn_to_pa(hat->hat_user_ptable), 909843e1988Sjohnlev MMU_PAGESIZE); 910843e1988Sjohnlev } 911843e1988Sjohnlev #endif 912843e1988Sjohnlev 9137c478bd9Sstevel@tonic-gate /* 9147c478bd9Sstevel@tonic-gate * Shared page tables have all entries locked and entries may not 9157c478bd9Sstevel@tonic-gate * be added or deleted. 9167c478bd9Sstevel@tonic-gate */ 9177c478bd9Sstevel@tonic-gate ht->ht_flags = 0; 9187c478bd9Sstevel@tonic-gate if (shared != NULL) { 9197c478bd9Sstevel@tonic-gate ASSERT(shared->ht_valid_cnt > 0); 9207c478bd9Sstevel@tonic-gate ht->ht_flags |= HTABLE_SHARED_PFN; 9217c478bd9Sstevel@tonic-gate ht->ht_pfn = shared->ht_pfn; 9227c478bd9Sstevel@tonic-gate ht->ht_lock_cnt = 0; 9237c478bd9Sstevel@tonic-gate ht->ht_valid_cnt = 0; /* updated in hat_share() */ 9247c478bd9Sstevel@tonic-gate ht->ht_shares = shared; 9257c478bd9Sstevel@tonic-gate need_to_zero = 0; 9267c478bd9Sstevel@tonic-gate } else { 9277c478bd9Sstevel@tonic-gate ht->ht_shares = NULL; 9287c478bd9Sstevel@tonic-gate ht->ht_lock_cnt = 0; 9297c478bd9Sstevel@tonic-gate ht->ht_valid_cnt = 0; 9307c478bd9Sstevel@tonic-gate } 9317c478bd9Sstevel@tonic-gate 9327c478bd9Sstevel@tonic-gate /* 9337c478bd9Sstevel@tonic-gate * setup flags, etc. for VLP htables 9347c478bd9Sstevel@tonic-gate */ 9357c478bd9Sstevel@tonic-gate if (is_vlp) { 9367c478bd9Sstevel@tonic-gate ht->ht_flags |= HTABLE_VLP; 9377c478bd9Sstevel@tonic-gate ASSERT(ht->ht_pfn == PFN_INVALID); 9387c478bd9Sstevel@tonic-gate need_to_zero = 0; 9397c478bd9Sstevel@tonic-gate } 9407c478bd9Sstevel@tonic-gate 9417c478bd9Sstevel@tonic-gate /* 9427c478bd9Sstevel@tonic-gate * fill in the htable 9437c478bd9Sstevel@tonic-gate */ 9447c478bd9Sstevel@tonic-gate ht->ht_hat = hat; 9457c478bd9Sstevel@tonic-gate ht->ht_parent = NULL; 9467c478bd9Sstevel@tonic-gate ht->ht_vaddr = vaddr; 9477c478bd9Sstevel@tonic-gate ht->ht_level = level; 9487c478bd9Sstevel@tonic-gate ht->ht_busy = 1; 9497c478bd9Sstevel@tonic-gate ht->ht_next = NULL; 9507c478bd9Sstevel@tonic-gate ht->ht_prev = NULL; 9517c478bd9Sstevel@tonic-gate 9527c478bd9Sstevel@tonic-gate /* 9537c478bd9Sstevel@tonic-gate * Zero out any freshly allocated page table 9547c478bd9Sstevel@tonic-gate */ 9557c478bd9Sstevel@tonic-gate if (need_to_zero) 9567c478bd9Sstevel@tonic-gate x86pte_zero(ht, 0, mmu.ptes_per_table); 957ae115bc7Smrj 958843e1988Sjohnlev #if defined(__amd64) && defined(__xpv) 959843e1988Sjohnlev if (!is_bare && kpm_vbase) { 960843e1988Sjohnlev (void) xen_kpm_page(ht->ht_pfn, PT_VALID); 961843e1988Sjohnlev if (level == mmu.max_level) 962843e1988Sjohnlev (void) xen_kpm_page(hat->hat_user_ptable, PT_VALID); 963843e1988Sjohnlev } 964843e1988Sjohnlev #endif 965843e1988Sjohnlev 9667c478bd9Sstevel@tonic-gate return (ht); 9677c478bd9Sstevel@tonic-gate } 9687c478bd9Sstevel@tonic-gate 9697c478bd9Sstevel@tonic-gate /* 9707c478bd9Sstevel@tonic-gate * Free up an htable, either to a hat's cached list, the reserves or 9717c478bd9Sstevel@tonic-gate * back to kmem. 9727c478bd9Sstevel@tonic-gate */ 9737c478bd9Sstevel@tonic-gate static void 9747c478bd9Sstevel@tonic-gate htable_free(htable_t *ht) 9757c478bd9Sstevel@tonic-gate { 9767c478bd9Sstevel@tonic-gate hat_t *hat = ht->ht_hat; 9777c478bd9Sstevel@tonic-gate 9787c478bd9Sstevel@tonic-gate /* 9797c478bd9Sstevel@tonic-gate * If the process isn't exiting, cache the free htable in the hat 980843e1988Sjohnlev * structure. We always do this for the boot time reserve. We don't 9817c478bd9Sstevel@tonic-gate * do this if the hat is exiting or we are stealing/reaping htables. 9827c478bd9Sstevel@tonic-gate */ 9837c478bd9Sstevel@tonic-gate if (hat != NULL && 9847c478bd9Sstevel@tonic-gate !(ht->ht_flags & HTABLE_SHARED_PFN) && 9857c478bd9Sstevel@tonic-gate (use_boot_reserve || 9867c478bd9Sstevel@tonic-gate (!(hat->hat_flags & HAT_FREEING) && !htable_dont_cache))) { 9877c478bd9Sstevel@tonic-gate ASSERT((ht->ht_flags & HTABLE_VLP) == 0); 9887c478bd9Sstevel@tonic-gate ASSERT(ht->ht_pfn != PFN_INVALID); 9897c478bd9Sstevel@tonic-gate hat_enter(hat); 9907c478bd9Sstevel@tonic-gate ht->ht_next = hat->hat_ht_cached; 9917c478bd9Sstevel@tonic-gate hat->hat_ht_cached = ht; 9927c478bd9Sstevel@tonic-gate hat_exit(hat); 9937c478bd9Sstevel@tonic-gate return; 9947c478bd9Sstevel@tonic-gate } 9957c478bd9Sstevel@tonic-gate 9967c478bd9Sstevel@tonic-gate /* 9977c478bd9Sstevel@tonic-gate * If we have a hardware page table, free it. 998ae115bc7Smrj * We don't free page tables that are accessed by sharing. 9997c478bd9Sstevel@tonic-gate */ 10007c478bd9Sstevel@tonic-gate if (ht->ht_flags & HTABLE_SHARED_PFN) { 10017c478bd9Sstevel@tonic-gate ASSERT(ht->ht_pfn != PFN_INVALID); 10027c478bd9Sstevel@tonic-gate } else if (!(ht->ht_flags & HTABLE_VLP)) { 1003ae115bc7Smrj ptable_free(ht->ht_pfn); 1004843e1988Sjohnlev #if defined(__amd64) && defined(__xpv) 1005b59c4a48SBoris Protopopov if (ht->ht_level == mmu.max_level && hat != NULL) { 1006843e1988Sjohnlev ptable_free(hat->hat_user_ptable); 1007843e1988Sjohnlev hat->hat_user_ptable = PFN_INVALID; 1008843e1988Sjohnlev } 1009843e1988Sjohnlev #endif 10107c478bd9Sstevel@tonic-gate } 1011ae115bc7Smrj ht->ht_pfn = PFN_INVALID; 10127c478bd9Sstevel@tonic-gate 10137c478bd9Sstevel@tonic-gate /* 1014843e1988Sjohnlev * Free it or put into reserves. 10157c478bd9Sstevel@tonic-gate */ 1016aac11643Sjosephb if (USE_HAT_RESERVES() || htable_reserve_cnt < htable_reserve_amount) { 10177c478bd9Sstevel@tonic-gate htable_put_reserve(ht); 1018aac11643Sjosephb } else { 10197c478bd9Sstevel@tonic-gate kmem_cache_free(htable_cache, ht); 1020aac11643Sjosephb htable_adjust_reserve(); 1021aac11643Sjosephb } 10227c478bd9Sstevel@tonic-gate } 10237c478bd9Sstevel@tonic-gate 10247c478bd9Sstevel@tonic-gate 10257c478bd9Sstevel@tonic-gate /* 10267c478bd9Sstevel@tonic-gate * This is called when a hat is being destroyed or swapped out. We reap all 10277c478bd9Sstevel@tonic-gate * the remaining htables in the hat cache. If destroying all left over 10287c478bd9Sstevel@tonic-gate * htables are also destroyed. 10297c478bd9Sstevel@tonic-gate * 10307c478bd9Sstevel@tonic-gate * We also don't need to invalidate any of the PTPs nor do any demapping. 10317c478bd9Sstevel@tonic-gate */ 10327c478bd9Sstevel@tonic-gate void 10337c478bd9Sstevel@tonic-gate htable_purge_hat(hat_t *hat) 10347c478bd9Sstevel@tonic-gate { 10357c478bd9Sstevel@tonic-gate htable_t *ht; 10367c478bd9Sstevel@tonic-gate int h; 10377c478bd9Sstevel@tonic-gate 10387c478bd9Sstevel@tonic-gate /* 10397c478bd9Sstevel@tonic-gate * Purge the htable cache if just reaping. 10407c478bd9Sstevel@tonic-gate */ 10417c478bd9Sstevel@tonic-gate if (!(hat->hat_flags & HAT_FREEING)) { 10421a5e258fSJosef 'Jeff' Sipek atomic_inc_32(&htable_dont_cache); 10437c478bd9Sstevel@tonic-gate for (;;) { 10447c478bd9Sstevel@tonic-gate hat_enter(hat); 10457c478bd9Sstevel@tonic-gate ht = hat->hat_ht_cached; 10467c478bd9Sstevel@tonic-gate if (ht == NULL) { 10477c478bd9Sstevel@tonic-gate hat_exit(hat); 10487c478bd9Sstevel@tonic-gate break; 10497c478bd9Sstevel@tonic-gate } 10507c478bd9Sstevel@tonic-gate hat->hat_ht_cached = ht->ht_next; 10517c478bd9Sstevel@tonic-gate hat_exit(hat); 10527c478bd9Sstevel@tonic-gate htable_free(ht); 10537c478bd9Sstevel@tonic-gate } 10541a5e258fSJosef 'Jeff' Sipek atomic_dec_32(&htable_dont_cache); 10557c478bd9Sstevel@tonic-gate return; 10567c478bd9Sstevel@tonic-gate } 10577c478bd9Sstevel@tonic-gate 10587c478bd9Sstevel@tonic-gate /* 10597c478bd9Sstevel@tonic-gate * if freeing, no locking is needed 10607c478bd9Sstevel@tonic-gate */ 10617c478bd9Sstevel@tonic-gate while ((ht = hat->hat_ht_cached) != NULL) { 10627c478bd9Sstevel@tonic-gate hat->hat_ht_cached = ht->ht_next; 10637c478bd9Sstevel@tonic-gate htable_free(ht); 10647c478bd9Sstevel@tonic-gate } 10657c478bd9Sstevel@tonic-gate 10667c478bd9Sstevel@tonic-gate /* 10677c478bd9Sstevel@tonic-gate * walk thru the htable hash table and free all the htables in it. 10687c478bd9Sstevel@tonic-gate */ 10697c478bd9Sstevel@tonic-gate for (h = 0; h < hat->hat_num_hash; ++h) { 10707c478bd9Sstevel@tonic-gate while ((ht = hat->hat_ht_hash[h]) != NULL) { 10717c478bd9Sstevel@tonic-gate if (ht->ht_next) 10727c478bd9Sstevel@tonic-gate ht->ht_next->ht_prev = ht->ht_prev; 10737c478bd9Sstevel@tonic-gate 10747c478bd9Sstevel@tonic-gate if (ht->ht_prev) { 10757c478bd9Sstevel@tonic-gate ht->ht_prev->ht_next = ht->ht_next; 10767c478bd9Sstevel@tonic-gate } else { 10777c478bd9Sstevel@tonic-gate ASSERT(hat->hat_ht_hash[h] == ht); 10787c478bd9Sstevel@tonic-gate hat->hat_ht_hash[h] = ht->ht_next; 10797c478bd9Sstevel@tonic-gate } 10807c478bd9Sstevel@tonic-gate htable_free(ht); 10817c478bd9Sstevel@tonic-gate } 10827c478bd9Sstevel@tonic-gate } 10837c478bd9Sstevel@tonic-gate } 10847c478bd9Sstevel@tonic-gate 10857c478bd9Sstevel@tonic-gate /* 10867c478bd9Sstevel@tonic-gate * Unlink an entry for a table at vaddr and level out of the existing table 10877c478bd9Sstevel@tonic-gate * one level higher. We are always holding the HASH_ENTER() when doing this. 10887c478bd9Sstevel@tonic-gate */ 10897c478bd9Sstevel@tonic-gate static void 10907c478bd9Sstevel@tonic-gate unlink_ptp(htable_t *higher, htable_t *old, uintptr_t vaddr) 10917c478bd9Sstevel@tonic-gate { 10927c478bd9Sstevel@tonic-gate uint_t entry = htable_va2entry(vaddr, higher); 10937c478bd9Sstevel@tonic-gate x86pte_t expect = MAKEPTP(old->ht_pfn, old->ht_level); 10947c478bd9Sstevel@tonic-gate x86pte_t found; 1095935f8dd0Sjosephb hat_t *hat = old->ht_hat; 10967c478bd9Sstevel@tonic-gate 10977c478bd9Sstevel@tonic-gate ASSERT(higher->ht_busy > 0); 10987c478bd9Sstevel@tonic-gate ASSERT(higher->ht_valid_cnt > 0); 10997c478bd9Sstevel@tonic-gate ASSERT(old->ht_valid_cnt == 0); 11007c478bd9Sstevel@tonic-gate found = x86pte_cas(higher, entry, expect, 0); 1101843e1988Sjohnlev #ifdef __xpv 1102843e1988Sjohnlev /* 1103843e1988Sjohnlev * This is weird, but Xen apparently automatically unlinks empty 1104843e1988Sjohnlev * pagetables from the upper page table. So allow PTP to be 0 already. 1105843e1988Sjohnlev */ 1106843e1988Sjohnlev if (found != expect && found != 0) 1107843e1988Sjohnlev #else 11087c478bd9Sstevel@tonic-gate if (found != expect) 1109843e1988Sjohnlev #endif 11107c478bd9Sstevel@tonic-gate panic("Bad PTP found=" FMT_PTE ", expected=" FMT_PTE, 11117c478bd9Sstevel@tonic-gate found, expect); 1112935f8dd0Sjosephb 1113935f8dd0Sjosephb /* 11147173d045Sjosephb * When a top level VLP page table entry changes, we must issue 11157173d045Sjosephb * a reload of cr3 on all processors. 11167173d045Sjosephb * 11177173d045Sjosephb * If we don't need do do that, then we still have to INVLPG against 11187173d045Sjosephb * an address covered by the inner page table, as the latest processors 11197173d045Sjosephb * have TLB-like caches for non-leaf page table entries. 1120935f8dd0Sjosephb */ 1121935f8dd0Sjosephb if (!(hat->hat_flags & HAT_FREEING)) { 11227173d045Sjosephb hat_tlb_inval(hat, (higher->ht_flags & HTABLE_VLP) ? 11237173d045Sjosephb DEMAP_ALL_ADDR : old->ht_vaddr); 1124935f8dd0Sjosephb } 1125935f8dd0Sjosephb 11267c478bd9Sstevel@tonic-gate HTABLE_DEC(higher->ht_valid_cnt); 11277c478bd9Sstevel@tonic-gate } 11287c478bd9Sstevel@tonic-gate 11297c478bd9Sstevel@tonic-gate /* 11307c478bd9Sstevel@tonic-gate * Link an entry for a new table at vaddr and level into the existing table 11317c478bd9Sstevel@tonic-gate * one level higher. We are always holding the HASH_ENTER() when doing this. 11327c478bd9Sstevel@tonic-gate */ 11337c478bd9Sstevel@tonic-gate static void 11347c478bd9Sstevel@tonic-gate link_ptp(htable_t *higher, htable_t *new, uintptr_t vaddr) 11357c478bd9Sstevel@tonic-gate { 11367c478bd9Sstevel@tonic-gate uint_t entry = htable_va2entry(vaddr, higher); 11377c478bd9Sstevel@tonic-gate x86pte_t newptp = MAKEPTP(new->ht_pfn, new->ht_level); 11387c478bd9Sstevel@tonic-gate x86pte_t found; 11397c478bd9Sstevel@tonic-gate 11407c478bd9Sstevel@tonic-gate ASSERT(higher->ht_busy > 0); 11417c478bd9Sstevel@tonic-gate 11427c478bd9Sstevel@tonic-gate ASSERT(new->ht_level != mmu.max_level); 11437c478bd9Sstevel@tonic-gate 11447c478bd9Sstevel@tonic-gate HTABLE_INC(higher->ht_valid_cnt); 11457c478bd9Sstevel@tonic-gate 11467c478bd9Sstevel@tonic-gate found = x86pte_cas(higher, entry, 0, newptp); 1147b4b46911Skchow if ((found & ~PT_REF) != 0) 11487c478bd9Sstevel@tonic-gate panic("HAT: ptp not 0, found=" FMT_PTE, found); 1149935f8dd0Sjosephb 1150935f8dd0Sjosephb /* 1151935f8dd0Sjosephb * When any top level VLP page table entry changes, we must issue 1152935f8dd0Sjosephb * a reload of cr3 on all processors using it. 11536b60931cSjosephb * We also need to do this for the kernel hat on PAE 32 bit kernel. 1154935f8dd0Sjosephb */ 11556b60931cSjosephb if ( 11566b60931cSjosephb #ifdef __i386 11576b60931cSjosephb (higher->ht_hat == kas.a_hat && higher->ht_level == VLP_LEVEL) || 11586b60931cSjosephb #endif 11596b60931cSjosephb (higher->ht_flags & HTABLE_VLP)) 1160935f8dd0Sjosephb hat_tlb_inval(higher->ht_hat, DEMAP_ALL_ADDR); 11617c478bd9Sstevel@tonic-gate } 11627c478bd9Sstevel@tonic-gate 11637c478bd9Sstevel@tonic-gate /* 1164ae115bc7Smrj * Release of hold on an htable. If this is the last use and the pagetable 1165ae115bc7Smrj * is empty we may want to free it, then recursively look at the pagetable 1166ae115bc7Smrj * above it. The recursion is handled by the outer while() loop. 1167843e1988Sjohnlev * 1168843e1988Sjohnlev * On the metal, during process exit, we don't bother unlinking the tables from 1169843e1988Sjohnlev * upper level pagetables. They are instead handled in bulk by hat_free_end(). 1170843e1988Sjohnlev * We can't do this on the hypervisor as we need the page table to be 1171843e1988Sjohnlev * implicitly unpinnned before it goes to the free page lists. This can't 1172843e1988Sjohnlev * happen unless we fully unlink it from the page table hierarchy. 11737c478bd9Sstevel@tonic-gate */ 11747c478bd9Sstevel@tonic-gate void 11757c478bd9Sstevel@tonic-gate htable_release(htable_t *ht) 11767c478bd9Sstevel@tonic-gate { 11777c478bd9Sstevel@tonic-gate uint_t hashval; 11787c478bd9Sstevel@tonic-gate htable_t *shared; 11797c478bd9Sstevel@tonic-gate htable_t *higher; 11807c478bd9Sstevel@tonic-gate hat_t *hat; 11817c478bd9Sstevel@tonic-gate uintptr_t va; 11827c478bd9Sstevel@tonic-gate level_t level; 11837c478bd9Sstevel@tonic-gate 11847c478bd9Sstevel@tonic-gate while (ht != NULL) { 11857c478bd9Sstevel@tonic-gate shared = NULL; 11867c478bd9Sstevel@tonic-gate for (;;) { 11877c478bd9Sstevel@tonic-gate hat = ht->ht_hat; 11887c478bd9Sstevel@tonic-gate va = ht->ht_vaddr; 11897c478bd9Sstevel@tonic-gate level = ht->ht_level; 11907c478bd9Sstevel@tonic-gate hashval = HTABLE_HASH(hat, va, level); 11917c478bd9Sstevel@tonic-gate 11927c478bd9Sstevel@tonic-gate /* 11937c478bd9Sstevel@tonic-gate * The common case is that this isn't the last use of 11947c478bd9Sstevel@tonic-gate * an htable so we don't want to free the htable. 11957c478bd9Sstevel@tonic-gate */ 11967c478bd9Sstevel@tonic-gate HTABLE_ENTER(hashval); 11977c478bd9Sstevel@tonic-gate ASSERT(ht->ht_valid_cnt >= 0); 11987c478bd9Sstevel@tonic-gate ASSERT(ht->ht_busy > 0); 11997c478bd9Sstevel@tonic-gate if (ht->ht_valid_cnt > 0) 12007c478bd9Sstevel@tonic-gate break; 12017c478bd9Sstevel@tonic-gate if (ht->ht_busy > 1) 12027c478bd9Sstevel@tonic-gate break; 12032ba723d8Smec ASSERT(ht->ht_lock_cnt == 0); 12047c478bd9Sstevel@tonic-gate 1205843e1988Sjohnlev #if !defined(__xpv) 12067c478bd9Sstevel@tonic-gate /* 12077c478bd9Sstevel@tonic-gate * we always release empty shared htables 12087c478bd9Sstevel@tonic-gate */ 12097c478bd9Sstevel@tonic-gate if (!(ht->ht_flags & HTABLE_SHARED_PFN)) { 12107c478bd9Sstevel@tonic-gate 12117c478bd9Sstevel@tonic-gate /* 12127c478bd9Sstevel@tonic-gate * don't release if in address space tear down 12137c478bd9Sstevel@tonic-gate */ 12147c478bd9Sstevel@tonic-gate if (hat->hat_flags & HAT_FREEING) 12157c478bd9Sstevel@tonic-gate break; 12167c478bd9Sstevel@tonic-gate 12177c478bd9Sstevel@tonic-gate /* 12187c478bd9Sstevel@tonic-gate * At and above max_page_level, free if it's for 12197c478bd9Sstevel@tonic-gate * a boot-time kernel mapping below kernelbase. 12207c478bd9Sstevel@tonic-gate */ 12217c478bd9Sstevel@tonic-gate if (level >= mmu.max_page_level && 12227c478bd9Sstevel@tonic-gate (hat != kas.a_hat || va >= kernelbase)) 12237c478bd9Sstevel@tonic-gate break; 12247c478bd9Sstevel@tonic-gate } 1225843e1988Sjohnlev #endif /* __xpv */ 12267c478bd9Sstevel@tonic-gate 12277c478bd9Sstevel@tonic-gate /* 1228ae115bc7Smrj * Remember if we destroy an htable that shares its PFN 1229ae115bc7Smrj * from elsewhere. 12307c478bd9Sstevel@tonic-gate */ 12317c478bd9Sstevel@tonic-gate if (ht->ht_flags & HTABLE_SHARED_PFN) { 12327c478bd9Sstevel@tonic-gate ASSERT(shared == NULL); 12337c478bd9Sstevel@tonic-gate shared = ht->ht_shares; 12347c478bd9Sstevel@tonic-gate HATSTAT_INC(hs_htable_unshared); 12357c478bd9Sstevel@tonic-gate } 12367c478bd9Sstevel@tonic-gate 12377c478bd9Sstevel@tonic-gate /* 12387c478bd9Sstevel@tonic-gate * Handle release of a table and freeing the htable_t. 12397c478bd9Sstevel@tonic-gate * Unlink it from the table higher (ie. ht_parent). 12407c478bd9Sstevel@tonic-gate */ 12417c478bd9Sstevel@tonic-gate higher = ht->ht_parent; 12427c478bd9Sstevel@tonic-gate ASSERT(higher != NULL); 12437c478bd9Sstevel@tonic-gate 12447c478bd9Sstevel@tonic-gate /* 12457c478bd9Sstevel@tonic-gate * Unlink the pagetable. 12467c478bd9Sstevel@tonic-gate */ 12477c478bd9Sstevel@tonic-gate unlink_ptp(higher, ht, va); 12487c478bd9Sstevel@tonic-gate 12497c478bd9Sstevel@tonic-gate /* 12507c478bd9Sstevel@tonic-gate * remove this htable from its hash list 12517c478bd9Sstevel@tonic-gate */ 12527c478bd9Sstevel@tonic-gate if (ht->ht_next) 12537c478bd9Sstevel@tonic-gate ht->ht_next->ht_prev = ht->ht_prev; 12547c478bd9Sstevel@tonic-gate 12557c478bd9Sstevel@tonic-gate if (ht->ht_prev) { 12567c478bd9Sstevel@tonic-gate ht->ht_prev->ht_next = ht->ht_next; 12577c478bd9Sstevel@tonic-gate } else { 12587c478bd9Sstevel@tonic-gate ASSERT(hat->hat_ht_hash[hashval] == ht); 12597c478bd9Sstevel@tonic-gate hat->hat_ht_hash[hashval] = ht->ht_next; 12607c478bd9Sstevel@tonic-gate } 12617c478bd9Sstevel@tonic-gate HTABLE_EXIT(hashval); 12627c478bd9Sstevel@tonic-gate htable_free(ht); 12637c478bd9Sstevel@tonic-gate ht = higher; 12647c478bd9Sstevel@tonic-gate } 12657c478bd9Sstevel@tonic-gate 12667c478bd9Sstevel@tonic-gate ASSERT(ht->ht_busy >= 1); 12677c478bd9Sstevel@tonic-gate --ht->ht_busy; 12687c478bd9Sstevel@tonic-gate HTABLE_EXIT(hashval); 12697c478bd9Sstevel@tonic-gate 12707c478bd9Sstevel@tonic-gate /* 12717c478bd9Sstevel@tonic-gate * If we released a shared htable, do a release on the htable 12727c478bd9Sstevel@tonic-gate * from which it shared 12737c478bd9Sstevel@tonic-gate */ 12747c478bd9Sstevel@tonic-gate ht = shared; 12757c478bd9Sstevel@tonic-gate } 12767c478bd9Sstevel@tonic-gate } 12777c478bd9Sstevel@tonic-gate 12787c478bd9Sstevel@tonic-gate /* 12797c478bd9Sstevel@tonic-gate * Find the htable for the pagetable at the given level for the given address. 12807c478bd9Sstevel@tonic-gate * If found acquires a hold that eventually needs to be htable_release()d 12817c478bd9Sstevel@tonic-gate */ 12827c478bd9Sstevel@tonic-gate htable_t * 12837c478bd9Sstevel@tonic-gate htable_lookup(hat_t *hat, uintptr_t vaddr, level_t level) 12847c478bd9Sstevel@tonic-gate { 12857c478bd9Sstevel@tonic-gate uintptr_t base; 12867c478bd9Sstevel@tonic-gate uint_t hashval; 12877c478bd9Sstevel@tonic-gate htable_t *ht = NULL; 12887c478bd9Sstevel@tonic-gate 12897c478bd9Sstevel@tonic-gate ASSERT(level >= 0); 12907c478bd9Sstevel@tonic-gate ASSERT(level <= TOP_LEVEL(hat)); 12917c478bd9Sstevel@tonic-gate 12927173d045Sjosephb if (level == TOP_LEVEL(hat)) { 12937173d045Sjosephb #if defined(__amd64) 12947173d045Sjosephb /* 12957173d045Sjosephb * 32 bit address spaces on 64 bit kernels need to check 12967173d045Sjosephb * for overflow of the 32 bit address space 12977173d045Sjosephb */ 12987173d045Sjosephb if ((hat->hat_flags & HAT_VLP) && vaddr >= ((uint64_t)1 << 32)) 12997173d045Sjosephb return (NULL); 13007173d045Sjosephb #endif 13017c478bd9Sstevel@tonic-gate base = 0; 13027173d045Sjosephb } else { 13037c478bd9Sstevel@tonic-gate base = vaddr & LEVEL_MASK(level + 1); 13047173d045Sjosephb } 13057c478bd9Sstevel@tonic-gate 13067c478bd9Sstevel@tonic-gate hashval = HTABLE_HASH(hat, base, level); 13077c478bd9Sstevel@tonic-gate HTABLE_ENTER(hashval); 13087c478bd9Sstevel@tonic-gate for (ht = hat->hat_ht_hash[hashval]; ht; ht = ht->ht_next) { 13097c478bd9Sstevel@tonic-gate if (ht->ht_hat == hat && 13107c478bd9Sstevel@tonic-gate ht->ht_vaddr == base && 13117c478bd9Sstevel@tonic-gate ht->ht_level == level) 13127c478bd9Sstevel@tonic-gate break; 13137c478bd9Sstevel@tonic-gate } 13147c478bd9Sstevel@tonic-gate if (ht) 13157c478bd9Sstevel@tonic-gate ++ht->ht_busy; 13167c478bd9Sstevel@tonic-gate 13177c478bd9Sstevel@tonic-gate HTABLE_EXIT(hashval); 13187c478bd9Sstevel@tonic-gate return (ht); 13197c478bd9Sstevel@tonic-gate } 13207c478bd9Sstevel@tonic-gate 13217c478bd9Sstevel@tonic-gate /* 13227c478bd9Sstevel@tonic-gate * Acquires a hold on a known htable (from a locked hment entry). 13237c478bd9Sstevel@tonic-gate */ 13247c478bd9Sstevel@tonic-gate void 13257c478bd9Sstevel@tonic-gate htable_acquire(htable_t *ht) 13267c478bd9Sstevel@tonic-gate { 13277c478bd9Sstevel@tonic-gate hat_t *hat = ht->ht_hat; 13287c478bd9Sstevel@tonic-gate level_t level = ht->ht_level; 13297c478bd9Sstevel@tonic-gate uintptr_t base = ht->ht_vaddr; 13307c478bd9Sstevel@tonic-gate uint_t hashval = HTABLE_HASH(hat, base, level); 13317c478bd9Sstevel@tonic-gate 13327c478bd9Sstevel@tonic-gate HTABLE_ENTER(hashval); 13337c478bd9Sstevel@tonic-gate #ifdef DEBUG 13347c478bd9Sstevel@tonic-gate /* 13357c478bd9Sstevel@tonic-gate * make sure the htable is there 13367c478bd9Sstevel@tonic-gate */ 13377c478bd9Sstevel@tonic-gate { 13387c478bd9Sstevel@tonic-gate htable_t *h; 13397c478bd9Sstevel@tonic-gate 13407c478bd9Sstevel@tonic-gate for (h = hat->hat_ht_hash[hashval]; 13417c478bd9Sstevel@tonic-gate h && h != ht; 13427c478bd9Sstevel@tonic-gate h = h->ht_next) 13437c478bd9Sstevel@tonic-gate ; 13447c478bd9Sstevel@tonic-gate ASSERT(h == ht); 13457c478bd9Sstevel@tonic-gate } 13467c478bd9Sstevel@tonic-gate #endif /* DEBUG */ 13477c478bd9Sstevel@tonic-gate ++ht->ht_busy; 13487c478bd9Sstevel@tonic-gate HTABLE_EXIT(hashval); 13497c478bd9Sstevel@tonic-gate } 13507c478bd9Sstevel@tonic-gate 13517c478bd9Sstevel@tonic-gate /* 13527c478bd9Sstevel@tonic-gate * Find the htable for the pagetable at the given level for the given address. 13537c478bd9Sstevel@tonic-gate * If found acquires a hold that eventually needs to be htable_release()d 13547c478bd9Sstevel@tonic-gate * If not found the table is created. 13557c478bd9Sstevel@tonic-gate * 13567c478bd9Sstevel@tonic-gate * Since we can't hold a hash table mutex during allocation, we have to 13577c478bd9Sstevel@tonic-gate * drop it and redo the search on a create. Then we may have to free the newly 13587c478bd9Sstevel@tonic-gate * allocated htable if another thread raced in and created it ahead of us. 13597c478bd9Sstevel@tonic-gate */ 13607c478bd9Sstevel@tonic-gate htable_t * 13617c478bd9Sstevel@tonic-gate htable_create( 13627c478bd9Sstevel@tonic-gate hat_t *hat, 13637c478bd9Sstevel@tonic-gate uintptr_t vaddr, 13647c478bd9Sstevel@tonic-gate level_t level, 13657c478bd9Sstevel@tonic-gate htable_t *shared) 13667c478bd9Sstevel@tonic-gate { 13677c478bd9Sstevel@tonic-gate uint_t h; 13687c478bd9Sstevel@tonic-gate level_t l; 13697c478bd9Sstevel@tonic-gate uintptr_t base; 13707c478bd9Sstevel@tonic-gate htable_t *ht; 13717c478bd9Sstevel@tonic-gate htable_t *higher = NULL; 13727c478bd9Sstevel@tonic-gate htable_t *new = NULL; 13737c478bd9Sstevel@tonic-gate 13747c478bd9Sstevel@tonic-gate if (level < 0 || level > TOP_LEVEL(hat)) 13757c478bd9Sstevel@tonic-gate panic("htable_create(): level %d out of range\n", level); 13767c478bd9Sstevel@tonic-gate 13777c478bd9Sstevel@tonic-gate /* 13787c478bd9Sstevel@tonic-gate * Create the page tables in top down order. 13797c478bd9Sstevel@tonic-gate */ 13807c478bd9Sstevel@tonic-gate for (l = TOP_LEVEL(hat); l >= level; --l) { 13817c478bd9Sstevel@tonic-gate new = NULL; 13827c478bd9Sstevel@tonic-gate if (l == TOP_LEVEL(hat)) 13837c478bd9Sstevel@tonic-gate base = 0; 13847c478bd9Sstevel@tonic-gate else 13857c478bd9Sstevel@tonic-gate base = vaddr & LEVEL_MASK(l + 1); 13867c478bd9Sstevel@tonic-gate 13877c478bd9Sstevel@tonic-gate h = HTABLE_HASH(hat, base, l); 13887c478bd9Sstevel@tonic-gate try_again: 13897c478bd9Sstevel@tonic-gate /* 13907c478bd9Sstevel@tonic-gate * look up the htable at this level 13917c478bd9Sstevel@tonic-gate */ 13927c478bd9Sstevel@tonic-gate HTABLE_ENTER(h); 13937c478bd9Sstevel@tonic-gate if (l == TOP_LEVEL(hat)) { 13947c478bd9Sstevel@tonic-gate ht = hat->hat_htable; 13957c478bd9Sstevel@tonic-gate } else { 13967c478bd9Sstevel@tonic-gate for (ht = hat->hat_ht_hash[h]; ht; ht = ht->ht_next) { 13977c478bd9Sstevel@tonic-gate ASSERT(ht->ht_hat == hat); 13987c478bd9Sstevel@tonic-gate if (ht->ht_vaddr == base && 13997c478bd9Sstevel@tonic-gate ht->ht_level == l) 14007c478bd9Sstevel@tonic-gate break; 14017c478bd9Sstevel@tonic-gate } 14027c478bd9Sstevel@tonic-gate } 14037c478bd9Sstevel@tonic-gate 14047c478bd9Sstevel@tonic-gate /* 14057c478bd9Sstevel@tonic-gate * if we found the htable, increment its busy cnt 14067c478bd9Sstevel@tonic-gate * and if we had allocated a new htable, free it. 14077c478bd9Sstevel@tonic-gate */ 14087c478bd9Sstevel@tonic-gate if (ht != NULL) { 14097c478bd9Sstevel@tonic-gate /* 14107c478bd9Sstevel@tonic-gate * If we find a pre-existing shared table, it must 14117c478bd9Sstevel@tonic-gate * share from the same place. 14127c478bd9Sstevel@tonic-gate */ 14137c478bd9Sstevel@tonic-gate if (l == level && shared && ht->ht_shares && 14147c478bd9Sstevel@tonic-gate ht->ht_shares != shared) { 14157c478bd9Sstevel@tonic-gate panic("htable shared from wrong place " 1416903a11ebSrh87107 "found htable=%p shared=%p", 1417903a11ebSrh87107 (void *)ht, (void *)shared); 14187c478bd9Sstevel@tonic-gate } 14197c478bd9Sstevel@tonic-gate ++ht->ht_busy; 14207c478bd9Sstevel@tonic-gate HTABLE_EXIT(h); 14217c478bd9Sstevel@tonic-gate if (new) 14227c478bd9Sstevel@tonic-gate htable_free(new); 14237c478bd9Sstevel@tonic-gate if (higher != NULL) 14247c478bd9Sstevel@tonic-gate htable_release(higher); 14257c478bd9Sstevel@tonic-gate higher = ht; 14267c478bd9Sstevel@tonic-gate 14277c478bd9Sstevel@tonic-gate /* 14287c478bd9Sstevel@tonic-gate * if we didn't find it on the first search 14297c478bd9Sstevel@tonic-gate * allocate a new one and search again 14307c478bd9Sstevel@tonic-gate */ 14317c478bd9Sstevel@tonic-gate } else if (new == NULL) { 14327c478bd9Sstevel@tonic-gate HTABLE_EXIT(h); 14337c478bd9Sstevel@tonic-gate new = htable_alloc(hat, base, l, 14347c478bd9Sstevel@tonic-gate l == level ? shared : NULL); 14357c478bd9Sstevel@tonic-gate goto try_again; 14367c478bd9Sstevel@tonic-gate 14377c478bd9Sstevel@tonic-gate /* 14387c478bd9Sstevel@tonic-gate * 2nd search and still not there, use "new" table 14397c478bd9Sstevel@tonic-gate * Link new table into higher, when not at top level. 14407c478bd9Sstevel@tonic-gate */ 14417c478bd9Sstevel@tonic-gate } else { 14427c478bd9Sstevel@tonic-gate ht = new; 14437c478bd9Sstevel@tonic-gate if (higher != NULL) { 14447c478bd9Sstevel@tonic-gate link_ptp(higher, ht, base); 14457c478bd9Sstevel@tonic-gate ht->ht_parent = higher; 14467c478bd9Sstevel@tonic-gate } 14477c478bd9Sstevel@tonic-gate ht->ht_next = hat->hat_ht_hash[h]; 14487c478bd9Sstevel@tonic-gate ASSERT(ht->ht_prev == NULL); 14497c478bd9Sstevel@tonic-gate if (hat->hat_ht_hash[h]) 14507c478bd9Sstevel@tonic-gate hat->hat_ht_hash[h]->ht_prev = ht; 14517c478bd9Sstevel@tonic-gate hat->hat_ht_hash[h] = ht; 14527c478bd9Sstevel@tonic-gate HTABLE_EXIT(h); 14537c478bd9Sstevel@tonic-gate 14547c478bd9Sstevel@tonic-gate /* 14557c478bd9Sstevel@tonic-gate * Note we don't do htable_release(higher). 14567c478bd9Sstevel@tonic-gate * That happens recursively when "new" is removed by 14577c478bd9Sstevel@tonic-gate * htable_release() or htable_steal(). 14587c478bd9Sstevel@tonic-gate */ 14597c478bd9Sstevel@tonic-gate higher = ht; 14607c478bd9Sstevel@tonic-gate 14617c478bd9Sstevel@tonic-gate /* 14627c478bd9Sstevel@tonic-gate * If we just created a new shared page table we 14637c478bd9Sstevel@tonic-gate * increment the shared htable's busy count, so that 14647c478bd9Sstevel@tonic-gate * it can't be the victim of a steal even if it's empty. 14657c478bd9Sstevel@tonic-gate */ 14667c478bd9Sstevel@tonic-gate if (l == level && shared) { 14677c478bd9Sstevel@tonic-gate (void) htable_lookup(shared->ht_hat, 14687c478bd9Sstevel@tonic-gate shared->ht_vaddr, shared->ht_level); 14697c478bd9Sstevel@tonic-gate HATSTAT_INC(hs_htable_shared); 14707c478bd9Sstevel@tonic-gate } 14717c478bd9Sstevel@tonic-gate } 14727c478bd9Sstevel@tonic-gate } 14737c478bd9Sstevel@tonic-gate 14747c478bd9Sstevel@tonic-gate return (ht); 14757c478bd9Sstevel@tonic-gate } 14767c478bd9Sstevel@tonic-gate 14777c478bd9Sstevel@tonic-gate /* 1478843e1988Sjohnlev * Inherit initial pagetables from the boot program. On the 64-bit 1479843e1988Sjohnlev * hypervisor we also temporarily mark the p_index field of page table 1480843e1988Sjohnlev * pages, so we know not to try making them writable in seg_kpm. 1481ae115bc7Smrj */ 1482ae115bc7Smrj void 1483ae115bc7Smrj htable_attach( 1484ae115bc7Smrj hat_t *hat, 1485ae115bc7Smrj uintptr_t base, 1486ae115bc7Smrj level_t level, 1487ae115bc7Smrj htable_t *parent, 1488ae115bc7Smrj pfn_t pfn) 1489ae115bc7Smrj { 1490ae115bc7Smrj htable_t *ht; 1491ae115bc7Smrj uint_t h; 1492ae115bc7Smrj uint_t i; 1493ae115bc7Smrj x86pte_t pte; 1494ae115bc7Smrj x86pte_t *ptep; 1495ae115bc7Smrj page_t *pp; 1496ae115bc7Smrj extern page_t *boot_claim_page(pfn_t); 1497ae115bc7Smrj 1498ae115bc7Smrj ht = htable_get_reserve(); 1499ae115bc7Smrj if (level == mmu.max_level) 1500ae115bc7Smrj kas.a_hat->hat_htable = ht; 1501ae115bc7Smrj ht->ht_hat = hat; 1502ae115bc7Smrj ht->ht_parent = parent; 1503ae115bc7Smrj ht->ht_vaddr = base; 1504ae115bc7Smrj ht->ht_level = level; 1505ae115bc7Smrj ht->ht_busy = 1; 1506ae115bc7Smrj ht->ht_next = NULL; 1507ae115bc7Smrj ht->ht_prev = NULL; 1508ae115bc7Smrj ht->ht_flags = 0; 1509ae115bc7Smrj ht->ht_pfn = pfn; 1510ae115bc7Smrj ht->ht_lock_cnt = 0; 1511ae115bc7Smrj ht->ht_valid_cnt = 0; 1512ae115bc7Smrj if (parent != NULL) 1513ae115bc7Smrj ++parent->ht_busy; 1514ae115bc7Smrj 1515ae115bc7Smrj h = HTABLE_HASH(hat, base, level); 1516ae115bc7Smrj HTABLE_ENTER(h); 1517ae115bc7Smrj ht->ht_next = hat->hat_ht_hash[h]; 1518ae115bc7Smrj ASSERT(ht->ht_prev == NULL); 1519ae115bc7Smrj if (hat->hat_ht_hash[h]) 1520ae115bc7Smrj hat->hat_ht_hash[h]->ht_prev = ht; 1521ae115bc7Smrj hat->hat_ht_hash[h] = ht; 1522ae115bc7Smrj HTABLE_EXIT(h); 1523ae115bc7Smrj 1524ae115bc7Smrj /* 1525ae115bc7Smrj * make sure the page table physical page is not FREE 1526ae115bc7Smrj */ 1527ae115bc7Smrj if (page_resv(1, KM_NOSLEEP) == 0) 1528ae115bc7Smrj panic("page_resv() failed in ptable alloc"); 1529ae115bc7Smrj 1530ae115bc7Smrj pp = boot_claim_page(pfn); 1531ae115bc7Smrj ASSERT(pp != NULL); 15322d44e974SJoe Bonasera 15332d44e974SJoe Bonasera /* 15342d44e974SJoe Bonasera * Page table pages that were allocated by dboot or 15352d44e974SJoe Bonasera * in very early startup didn't go through boot_mapin() 15362d44e974SJoe Bonasera * and so won't have vnode/offsets. Fix that here. 15372d44e974SJoe Bonasera */ 15382d44e974SJoe Bonasera if (pp->p_vnode == NULL) { 15392d44e974SJoe Bonasera /* match offset calculation in page_get_physical() */ 15402d44e974SJoe Bonasera u_offset_t offset = (uintptr_t)ht; 15412d44e974SJoe Bonasera if (offset > kernelbase) 15422d44e974SJoe Bonasera offset -= kernelbase; 15432d44e974SJoe Bonasera offset <<= MMU_PAGESHIFT; 15442d44e974SJoe Bonasera #if defined(__amd64) 15452d44e974SJoe Bonasera offset += mmu.hole_start; /* something in VA hole */ 15462d44e974SJoe Bonasera #else 15472d44e974SJoe Bonasera offset += 1ULL << 40; /* something > 4 Gig */ 15482d44e974SJoe Bonasera #endif 15492d44e974SJoe Bonasera ASSERT(page_exists(&kvp, offset) == NULL); 15502d44e974SJoe Bonasera (void) page_hashin(pp, &kvp, offset, NULL); 15512d44e974SJoe Bonasera } 1552ae115bc7Smrj page_downgrade(pp); 1553843e1988Sjohnlev #if defined(__xpv) && defined(__amd64) 1554ae115bc7Smrj /* 1555ae115bc7Smrj * Record in the page_t that is a pagetable for segkpm setup. 1556ae115bc7Smrj */ 1557ae115bc7Smrj if (kpm_vbase) 1558ae115bc7Smrj pp->p_index = 1; 1559843e1988Sjohnlev #endif 1560ae115bc7Smrj 1561ae115bc7Smrj /* 1562ae115bc7Smrj * Count valid mappings and recursively attach lower level pagetables. 1563ae115bc7Smrj */ 1564ae115bc7Smrj ptep = kbm_remap_window(pfn_to_pa(pfn), 0); 1565ae115bc7Smrj for (i = 0; i < HTABLE_NUM_PTES(ht); ++i) { 1566ae115bc7Smrj if (mmu.pae_hat) 1567ae115bc7Smrj pte = ptep[i]; 1568ae115bc7Smrj else 1569ae115bc7Smrj pte = ((x86pte32_t *)ptep)[i]; 1570ae115bc7Smrj if (!IN_HYPERVISOR_VA(base) && PTE_ISVALID(pte)) { 1571ae115bc7Smrj ++ht->ht_valid_cnt; 1572ae115bc7Smrj if (!PTE_ISPAGE(pte, level)) { 1573ae115bc7Smrj htable_attach(hat, base, level - 1, 1574ae115bc7Smrj ht, PTE2PFN(pte, level)); 1575ae115bc7Smrj ptep = kbm_remap_window(pfn_to_pa(pfn), 0); 1576ae115bc7Smrj } 1577ae115bc7Smrj } 1578ae115bc7Smrj base += LEVEL_SIZE(level); 1579ae115bc7Smrj if (base == mmu.hole_start) 1580ae115bc7Smrj base = (mmu.hole_end + MMU_PAGEOFFSET) & MMU_PAGEMASK; 1581ae115bc7Smrj } 1582ae115bc7Smrj 1583ae115bc7Smrj /* 1584ae115bc7Smrj * As long as all the mappings we had were below kernel base 1585ae115bc7Smrj * we can release the htable. 1586ae115bc7Smrj */ 1587ae115bc7Smrj if (base < kernelbase) 1588ae115bc7Smrj htable_release(ht); 1589ae115bc7Smrj } 1590ae115bc7Smrj 1591ae115bc7Smrj /* 15927c478bd9Sstevel@tonic-gate * Walk through a given htable looking for the first valid entry. This 15937c478bd9Sstevel@tonic-gate * routine takes both a starting and ending address. The starting address 15947c478bd9Sstevel@tonic-gate * is required to be within the htable provided by the caller, but there is 15957c478bd9Sstevel@tonic-gate * no such restriction on the ending address. 15967c478bd9Sstevel@tonic-gate * 15977c478bd9Sstevel@tonic-gate * If the routine finds a valid entry in the htable (at or beyond the 15987c478bd9Sstevel@tonic-gate * starting address), the PTE (and its address) will be returned. 15997c478bd9Sstevel@tonic-gate * This PTE may correspond to either a page or a pagetable - it is the 16007c478bd9Sstevel@tonic-gate * caller's responsibility to determine which. If no valid entry is 16017c478bd9Sstevel@tonic-gate * found, 0 (and invalid PTE) and the next unexamined address will be 16027c478bd9Sstevel@tonic-gate * returned. 16037c478bd9Sstevel@tonic-gate * 16047c478bd9Sstevel@tonic-gate * The loop has been carefully coded for optimization. 16057c478bd9Sstevel@tonic-gate */ 16067c478bd9Sstevel@tonic-gate static x86pte_t 16077c478bd9Sstevel@tonic-gate htable_scan(htable_t *ht, uintptr_t *vap, uintptr_t eaddr) 16087c478bd9Sstevel@tonic-gate { 16097c478bd9Sstevel@tonic-gate uint_t e; 16107c478bd9Sstevel@tonic-gate x86pte_t found_pte = (x86pte_t)0; 1611ae115bc7Smrj caddr_t pte_ptr; 1612ae115bc7Smrj caddr_t end_pte_ptr; 16137c478bd9Sstevel@tonic-gate int l = ht->ht_level; 16147c478bd9Sstevel@tonic-gate uintptr_t va = *vap & LEVEL_MASK(l); 16157c478bd9Sstevel@tonic-gate size_t pgsize = LEVEL_SIZE(l); 16167c478bd9Sstevel@tonic-gate 16177c478bd9Sstevel@tonic-gate ASSERT(va >= ht->ht_vaddr); 16187c478bd9Sstevel@tonic-gate ASSERT(va <= HTABLE_LAST_PAGE(ht)); 16197c478bd9Sstevel@tonic-gate 16207c478bd9Sstevel@tonic-gate /* 16217c478bd9Sstevel@tonic-gate * Compute the starting index and ending virtual address 16227c478bd9Sstevel@tonic-gate */ 16237c478bd9Sstevel@tonic-gate e = htable_va2entry(va, ht); 16247c478bd9Sstevel@tonic-gate 16257c478bd9Sstevel@tonic-gate /* 16267c478bd9Sstevel@tonic-gate * The following page table scan code knows that the valid 16277c478bd9Sstevel@tonic-gate * bit of a PTE is in the lowest byte AND that x86 is little endian!! 16287c478bd9Sstevel@tonic-gate */ 1629ae115bc7Smrj pte_ptr = (caddr_t)x86pte_access_pagetable(ht, 0); 1630ae115bc7Smrj end_pte_ptr = (caddr_t)PT_INDEX_PTR(pte_ptr, HTABLE_NUM_PTES(ht)); 1631ae115bc7Smrj pte_ptr = (caddr_t)PT_INDEX_PTR((x86pte_t *)pte_ptr, e); 163230f7a194Skchow while (!PTE_ISVALID(*pte_ptr)) { 16337c478bd9Sstevel@tonic-gate va += pgsize; 16347c478bd9Sstevel@tonic-gate if (va >= eaddr) 16357c478bd9Sstevel@tonic-gate break; 16367c478bd9Sstevel@tonic-gate pte_ptr += mmu.pte_size; 16377c478bd9Sstevel@tonic-gate ASSERT(pte_ptr <= end_pte_ptr); 16387c478bd9Sstevel@tonic-gate if (pte_ptr == end_pte_ptr) 16397c478bd9Sstevel@tonic-gate break; 16407c478bd9Sstevel@tonic-gate } 16417c478bd9Sstevel@tonic-gate 16427c478bd9Sstevel@tonic-gate /* 16437c478bd9Sstevel@tonic-gate * if we found a valid PTE, load the entire PTE 16447c478bd9Sstevel@tonic-gate */ 1645ae115bc7Smrj if (va < eaddr && pte_ptr != end_pte_ptr) 1646ae115bc7Smrj found_pte = GET_PTE((x86pte_t *)pte_ptr); 16477c478bd9Sstevel@tonic-gate x86pte_release_pagetable(ht); 16487c478bd9Sstevel@tonic-gate 16497c478bd9Sstevel@tonic-gate #if defined(__amd64) 16507c478bd9Sstevel@tonic-gate /* 16517c478bd9Sstevel@tonic-gate * deal with VA hole on amd64 16527c478bd9Sstevel@tonic-gate */ 16537c478bd9Sstevel@tonic-gate if (l == mmu.max_level && va >= mmu.hole_start && va <= mmu.hole_end) 16547c478bd9Sstevel@tonic-gate va = mmu.hole_end + va - mmu.hole_start; 16557c478bd9Sstevel@tonic-gate #endif /* __amd64 */ 16567c478bd9Sstevel@tonic-gate 16577c478bd9Sstevel@tonic-gate *vap = va; 16587c478bd9Sstevel@tonic-gate return (found_pte); 16597c478bd9Sstevel@tonic-gate } 16607c478bd9Sstevel@tonic-gate 16617c478bd9Sstevel@tonic-gate /* 16627c478bd9Sstevel@tonic-gate * Find the address and htable for the first populated translation at or 16637c478bd9Sstevel@tonic-gate * above the given virtual address. The caller may also specify an upper 16647c478bd9Sstevel@tonic-gate * limit to the address range to search. Uses level information to quickly 16657c478bd9Sstevel@tonic-gate * skip unpopulated sections of virtual address spaces. 16667c478bd9Sstevel@tonic-gate * 16677c478bd9Sstevel@tonic-gate * If not found returns NULL. When found, returns the htable and virt addr 16687c478bd9Sstevel@tonic-gate * and has a hold on the htable. 16697c478bd9Sstevel@tonic-gate */ 16707c478bd9Sstevel@tonic-gate x86pte_t 16717c478bd9Sstevel@tonic-gate htable_walk( 16727c478bd9Sstevel@tonic-gate struct hat *hat, 16737c478bd9Sstevel@tonic-gate htable_t **htp, 16747c478bd9Sstevel@tonic-gate uintptr_t *vaddr, 16757c478bd9Sstevel@tonic-gate uintptr_t eaddr) 16767c478bd9Sstevel@tonic-gate { 16777c478bd9Sstevel@tonic-gate uintptr_t va = *vaddr; 16787c478bd9Sstevel@tonic-gate htable_t *ht; 16797c478bd9Sstevel@tonic-gate htable_t *prev = *htp; 16807c478bd9Sstevel@tonic-gate level_t l; 16817c478bd9Sstevel@tonic-gate level_t max_mapped_level; 16827c478bd9Sstevel@tonic-gate x86pte_t pte; 16837c478bd9Sstevel@tonic-gate 16847c478bd9Sstevel@tonic-gate ASSERT(eaddr > va); 16857c478bd9Sstevel@tonic-gate 16867c478bd9Sstevel@tonic-gate /* 16877c478bd9Sstevel@tonic-gate * If this is a user address, then we know we need not look beyond 16887c478bd9Sstevel@tonic-gate * kernelbase. 16897c478bd9Sstevel@tonic-gate */ 16907c478bd9Sstevel@tonic-gate ASSERT(hat == kas.a_hat || eaddr <= kernelbase || 16917c478bd9Sstevel@tonic-gate eaddr == HTABLE_WALK_TO_END); 16927c478bd9Sstevel@tonic-gate if (hat != kas.a_hat && eaddr == HTABLE_WALK_TO_END) 16937c478bd9Sstevel@tonic-gate eaddr = kernelbase; 16947c478bd9Sstevel@tonic-gate 16957c478bd9Sstevel@tonic-gate /* 16967c478bd9Sstevel@tonic-gate * If we're coming in with a previous page table, search it first 16977c478bd9Sstevel@tonic-gate * without doing an htable_lookup(), this should be frequent. 16987c478bd9Sstevel@tonic-gate */ 16997c478bd9Sstevel@tonic-gate if (prev) { 17007c478bd9Sstevel@tonic-gate ASSERT(prev->ht_busy > 0); 17017c478bd9Sstevel@tonic-gate ASSERT(prev->ht_vaddr <= va); 17027c478bd9Sstevel@tonic-gate l = prev->ht_level; 17037c478bd9Sstevel@tonic-gate if (va <= HTABLE_LAST_PAGE(prev)) { 17047c478bd9Sstevel@tonic-gate pte = htable_scan(prev, &va, eaddr); 17057c478bd9Sstevel@tonic-gate 17067c478bd9Sstevel@tonic-gate if (PTE_ISPAGE(pte, l)) { 17077c478bd9Sstevel@tonic-gate *vaddr = va; 17087c478bd9Sstevel@tonic-gate *htp = prev; 17097c478bd9Sstevel@tonic-gate return (pte); 17107c478bd9Sstevel@tonic-gate } 17117c478bd9Sstevel@tonic-gate } 17127c478bd9Sstevel@tonic-gate 17137c478bd9Sstevel@tonic-gate /* 17147c478bd9Sstevel@tonic-gate * We found nothing in the htable provided by the caller, 17157c478bd9Sstevel@tonic-gate * so fall through and do the full search 17167c478bd9Sstevel@tonic-gate */ 17177c478bd9Sstevel@tonic-gate htable_release(prev); 17187c478bd9Sstevel@tonic-gate } 17197c478bd9Sstevel@tonic-gate 17207c478bd9Sstevel@tonic-gate /* 17217c478bd9Sstevel@tonic-gate * Find the level of the largest pagesize used by this HAT. 17227c478bd9Sstevel@tonic-gate */ 17237173d045Sjosephb if (hat->hat_ism_pgcnt > 0) { 172402bc52beSkchow max_mapped_level = mmu.umax_page_level; 17257173d045Sjosephb } else { 17267c478bd9Sstevel@tonic-gate max_mapped_level = 0; 17277c478bd9Sstevel@tonic-gate for (l = 1; l <= mmu.max_page_level; ++l) 17287c478bd9Sstevel@tonic-gate if (hat->hat_pages_mapped[l] != 0) 17297c478bd9Sstevel@tonic-gate max_mapped_level = l; 17307173d045Sjosephb } 17317c478bd9Sstevel@tonic-gate 17327c478bd9Sstevel@tonic-gate while (va < eaddr && va >= *vaddr) { 17337c478bd9Sstevel@tonic-gate /* 17347c478bd9Sstevel@tonic-gate * Find lowest table with any entry for given address. 17357c478bd9Sstevel@tonic-gate */ 17367c478bd9Sstevel@tonic-gate for (l = 0; l <= TOP_LEVEL(hat); ++l) { 17377c478bd9Sstevel@tonic-gate ht = htable_lookup(hat, va, l); 17387c478bd9Sstevel@tonic-gate if (ht != NULL) { 17397c478bd9Sstevel@tonic-gate pte = htable_scan(ht, &va, eaddr); 17407c478bd9Sstevel@tonic-gate if (PTE_ISPAGE(pte, l)) { 17418c1d5be3SJoshua M. Clulow VERIFY(!IN_VA_HOLE(va)); 17427c478bd9Sstevel@tonic-gate *vaddr = va; 17437c478bd9Sstevel@tonic-gate *htp = ht; 17447c478bd9Sstevel@tonic-gate return (pte); 17457c478bd9Sstevel@tonic-gate } 17467c478bd9Sstevel@tonic-gate htable_release(ht); 17477c478bd9Sstevel@tonic-gate break; 17487c478bd9Sstevel@tonic-gate } 17497c478bd9Sstevel@tonic-gate 17507c478bd9Sstevel@tonic-gate /* 17517173d045Sjosephb * No htable at this level for the address. If there 17527173d045Sjosephb * is no larger page size that could cover it, we can 17537173d045Sjosephb * skip right to the start of the next page table. 17548b5842f9Sdm120769 */ 17558b5842f9Sdm120769 ASSERT(l < TOP_LEVEL(hat)); 17568b5842f9Sdm120769 if (l >= max_mapped_level) { 17577c478bd9Sstevel@tonic-gate va = NEXT_ENTRY_VA(va, l + 1); 17587173d045Sjosephb if (va >= eaddr) 17598b5842f9Sdm120769 break; 17608b5842f9Sdm120769 } 17617c478bd9Sstevel@tonic-gate } 17627c478bd9Sstevel@tonic-gate } 17637c478bd9Sstevel@tonic-gate 17647c478bd9Sstevel@tonic-gate *vaddr = 0; 17657c478bd9Sstevel@tonic-gate *htp = NULL; 17667c478bd9Sstevel@tonic-gate return (0); 17677c478bd9Sstevel@tonic-gate } 17687c478bd9Sstevel@tonic-gate 17697c478bd9Sstevel@tonic-gate /* 17707c478bd9Sstevel@tonic-gate * Find the htable and page table entry index of the given virtual address 17717c478bd9Sstevel@tonic-gate * with pagesize at or below given level. 17727c478bd9Sstevel@tonic-gate * If not found returns NULL. When found, returns the htable, sets 17737c478bd9Sstevel@tonic-gate * entry, and has a hold on the htable. 17747c478bd9Sstevel@tonic-gate */ 17757c478bd9Sstevel@tonic-gate htable_t * 17767c478bd9Sstevel@tonic-gate htable_getpte( 17777c478bd9Sstevel@tonic-gate struct hat *hat, 17787c478bd9Sstevel@tonic-gate uintptr_t vaddr, 17797c478bd9Sstevel@tonic-gate uint_t *entry, 17807c478bd9Sstevel@tonic-gate x86pte_t *pte, 17817c478bd9Sstevel@tonic-gate level_t level) 17827c478bd9Sstevel@tonic-gate { 17837c478bd9Sstevel@tonic-gate htable_t *ht; 17847c478bd9Sstevel@tonic-gate level_t l; 17857c478bd9Sstevel@tonic-gate uint_t e; 17867c478bd9Sstevel@tonic-gate 17877c478bd9Sstevel@tonic-gate ASSERT(level <= mmu.max_page_level); 17887c478bd9Sstevel@tonic-gate 17897c478bd9Sstevel@tonic-gate for (l = 0; l <= level; ++l) { 17907c478bd9Sstevel@tonic-gate ht = htable_lookup(hat, vaddr, l); 17917c478bd9Sstevel@tonic-gate if (ht == NULL) 17927c478bd9Sstevel@tonic-gate continue; 17937c478bd9Sstevel@tonic-gate e = htable_va2entry(vaddr, ht); 17947c478bd9Sstevel@tonic-gate if (entry != NULL) 17957c478bd9Sstevel@tonic-gate *entry = e; 17967c478bd9Sstevel@tonic-gate if (pte != NULL) 17977c478bd9Sstevel@tonic-gate *pte = x86pte_get(ht, e); 17987c478bd9Sstevel@tonic-gate return (ht); 17997c478bd9Sstevel@tonic-gate } 18007c478bd9Sstevel@tonic-gate return (NULL); 18017c478bd9Sstevel@tonic-gate } 18027c478bd9Sstevel@tonic-gate 18037c478bd9Sstevel@tonic-gate /* 18047c478bd9Sstevel@tonic-gate * Find the htable and page table entry index of the given virtual address. 18057c478bd9Sstevel@tonic-gate * There must be a valid page mapped at the given address. 18067c478bd9Sstevel@tonic-gate * If not found returns NULL. When found, returns the htable, sets 18077c478bd9Sstevel@tonic-gate * entry, and has a hold on the htable. 18087c478bd9Sstevel@tonic-gate */ 18097c478bd9Sstevel@tonic-gate htable_t * 18107c478bd9Sstevel@tonic-gate htable_getpage(struct hat *hat, uintptr_t vaddr, uint_t *entry) 18117c478bd9Sstevel@tonic-gate { 18127c478bd9Sstevel@tonic-gate htable_t *ht; 18137c478bd9Sstevel@tonic-gate uint_t e; 18147c478bd9Sstevel@tonic-gate x86pte_t pte; 18157c478bd9Sstevel@tonic-gate 18167c478bd9Sstevel@tonic-gate ht = htable_getpte(hat, vaddr, &e, &pte, mmu.max_page_level); 18177c478bd9Sstevel@tonic-gate if (ht == NULL) 18187c478bd9Sstevel@tonic-gate return (NULL); 18197c478bd9Sstevel@tonic-gate 18207c478bd9Sstevel@tonic-gate if (entry) 18217c478bd9Sstevel@tonic-gate *entry = e; 18227c478bd9Sstevel@tonic-gate 18237c478bd9Sstevel@tonic-gate if (PTE_ISPAGE(pte, ht->ht_level)) 18247c478bd9Sstevel@tonic-gate return (ht); 18257c478bd9Sstevel@tonic-gate htable_release(ht); 18267c478bd9Sstevel@tonic-gate return (NULL); 18277c478bd9Sstevel@tonic-gate } 18287c478bd9Sstevel@tonic-gate 18297c478bd9Sstevel@tonic-gate 18307c478bd9Sstevel@tonic-gate void 18317c478bd9Sstevel@tonic-gate htable_init() 18327c478bd9Sstevel@tonic-gate { 18337c478bd9Sstevel@tonic-gate /* 18347c478bd9Sstevel@tonic-gate * To save on kernel VA usage, we avoid debug information in 32 bit 18357c478bd9Sstevel@tonic-gate * kernels. 18367c478bd9Sstevel@tonic-gate */ 18377c478bd9Sstevel@tonic-gate #if defined(__amd64) 18387c478bd9Sstevel@tonic-gate int kmem_flags = KMC_NOHASH; 18397c478bd9Sstevel@tonic-gate #elif defined(__i386) 18407c478bd9Sstevel@tonic-gate int kmem_flags = KMC_NOHASH | KMC_NODEBUG; 18417c478bd9Sstevel@tonic-gate #endif 18427c478bd9Sstevel@tonic-gate 18437c478bd9Sstevel@tonic-gate /* 18447c478bd9Sstevel@tonic-gate * initialize kmem caches 18457c478bd9Sstevel@tonic-gate */ 18467c478bd9Sstevel@tonic-gate htable_cache = kmem_cache_create("htable_t", 18477c478bd9Sstevel@tonic-gate sizeof (htable_t), 0, NULL, NULL, 18487c478bd9Sstevel@tonic-gate htable_reap, NULL, hat_memload_arena, kmem_flags); 18497c478bd9Sstevel@tonic-gate } 18507c478bd9Sstevel@tonic-gate 18517c478bd9Sstevel@tonic-gate /* 18527c478bd9Sstevel@tonic-gate * get the pte index for the virtual address in the given htable's pagetable 18537c478bd9Sstevel@tonic-gate */ 18547c478bd9Sstevel@tonic-gate uint_t 18557c478bd9Sstevel@tonic-gate htable_va2entry(uintptr_t va, htable_t *ht) 18567c478bd9Sstevel@tonic-gate { 18577c478bd9Sstevel@tonic-gate level_t l = ht->ht_level; 18587c478bd9Sstevel@tonic-gate 18597c478bd9Sstevel@tonic-gate ASSERT(va >= ht->ht_vaddr); 18607c478bd9Sstevel@tonic-gate ASSERT(va <= HTABLE_LAST_PAGE(ht)); 1861ae115bc7Smrj return ((va >> LEVEL_SHIFT(l)) & (HTABLE_NUM_PTES(ht) - 1)); 18627c478bd9Sstevel@tonic-gate } 18637c478bd9Sstevel@tonic-gate 18647c478bd9Sstevel@tonic-gate /* 18657c478bd9Sstevel@tonic-gate * Given an htable and the index of a pte in it, return the virtual address 18667c478bd9Sstevel@tonic-gate * of the page. 18677c478bd9Sstevel@tonic-gate */ 18687c478bd9Sstevel@tonic-gate uintptr_t 18697c478bd9Sstevel@tonic-gate htable_e2va(htable_t *ht, uint_t entry) 18707c478bd9Sstevel@tonic-gate { 18717c478bd9Sstevel@tonic-gate level_t l = ht->ht_level; 18727c478bd9Sstevel@tonic-gate uintptr_t va; 18737c478bd9Sstevel@tonic-gate 1874ae115bc7Smrj ASSERT(entry < HTABLE_NUM_PTES(ht)); 18757c478bd9Sstevel@tonic-gate va = ht->ht_vaddr + ((uintptr_t)entry << LEVEL_SHIFT(l)); 18767c478bd9Sstevel@tonic-gate 18777c478bd9Sstevel@tonic-gate /* 18787c478bd9Sstevel@tonic-gate * Need to skip over any VA hole in top level table 18797c478bd9Sstevel@tonic-gate */ 18807c478bd9Sstevel@tonic-gate #if defined(__amd64) 18817c478bd9Sstevel@tonic-gate if (ht->ht_level == mmu.max_level && va >= mmu.hole_start) 18827c478bd9Sstevel@tonic-gate va += ((mmu.hole_end - mmu.hole_start) + 1); 18837c478bd9Sstevel@tonic-gate #endif 18847c478bd9Sstevel@tonic-gate 18857c478bd9Sstevel@tonic-gate return (va); 18867c478bd9Sstevel@tonic-gate } 18877c478bd9Sstevel@tonic-gate 18887c478bd9Sstevel@tonic-gate /* 18897c478bd9Sstevel@tonic-gate * The code uses compare and swap instructions to read/write PTE's to 18907c478bd9Sstevel@tonic-gate * avoid atomicity problems, since PTEs can be 8 bytes on 32 bit systems. 18917c478bd9Sstevel@tonic-gate * will naturally be atomic. 18927c478bd9Sstevel@tonic-gate * 18937c478bd9Sstevel@tonic-gate * The combination of using kpreempt_disable()/_enable() and the hci_mutex 18947c478bd9Sstevel@tonic-gate * are used to ensure that an interrupt won't overwrite a temporary mapping 18957c478bd9Sstevel@tonic-gate * while it's in use. If an interrupt thread tries to access a PTE, it will 18967c478bd9Sstevel@tonic-gate * yield briefly back to the pinned thread which holds the cpu's hci_mutex. 18977c478bd9Sstevel@tonic-gate */ 18987c478bd9Sstevel@tonic-gate void 1899ae115bc7Smrj x86pte_cpu_init(cpu_t *cpu) 19007c478bd9Sstevel@tonic-gate { 19017c478bd9Sstevel@tonic-gate struct hat_cpu_info *hci; 19027c478bd9Sstevel@tonic-gate 1903ae115bc7Smrj hci = kmem_zalloc(sizeof (*hci), KM_SLEEP); 19047c478bd9Sstevel@tonic-gate mutex_init(&hci->hci_mutex, NULL, MUTEX_DEFAULT, NULL); 19057c478bd9Sstevel@tonic-gate cpu->cpu_hat_info = hci; 19067c478bd9Sstevel@tonic-gate } 19077c478bd9Sstevel@tonic-gate 1908ae115bc7Smrj void 1909ae115bc7Smrj x86pte_cpu_fini(cpu_t *cpu) 1910ae115bc7Smrj { 1911ae115bc7Smrj struct hat_cpu_info *hci = cpu->cpu_hat_info; 1912ae115bc7Smrj 1913ae115bc7Smrj kmem_free(hci, sizeof (*hci)); 1914ae115bc7Smrj cpu->cpu_hat_info = NULL; 19157c478bd9Sstevel@tonic-gate } 19167c478bd9Sstevel@tonic-gate 1917ae115bc7Smrj #ifdef __i386 1918ae115bc7Smrj /* 1919ae115bc7Smrj * On 32 bit kernels, loading a 64 bit PTE is a little tricky 1920ae115bc7Smrj */ 1921ae115bc7Smrj x86pte_t 1922ae115bc7Smrj get_pte64(x86pte_t *ptr) 1923ae115bc7Smrj { 1924ae115bc7Smrj volatile uint32_t *p = (uint32_t *)ptr; 1925ae115bc7Smrj x86pte_t t; 1926ae115bc7Smrj 1927ae115bc7Smrj ASSERT(mmu.pae_hat != 0); 1928ae115bc7Smrj for (;;) { 1929ae115bc7Smrj t = p[0]; 1930ae115bc7Smrj t |= (uint64_t)p[1] << 32; 1931ae115bc7Smrj if ((t & 0xffffffff) == p[0]) 1932ae115bc7Smrj return (t); 1933ae115bc7Smrj } 1934ae115bc7Smrj } 1935ae115bc7Smrj #endif /* __i386 */ 1936ae115bc7Smrj 19377c478bd9Sstevel@tonic-gate /* 19387c478bd9Sstevel@tonic-gate * Disable preemption and establish a mapping to the pagetable with the 19397c478bd9Sstevel@tonic-gate * given pfn. This is optimized for there case where it's the same 19407c478bd9Sstevel@tonic-gate * pfn as we last used referenced from this CPU. 19417c478bd9Sstevel@tonic-gate */ 19427c478bd9Sstevel@tonic-gate static x86pte_t * 1943ae115bc7Smrj x86pte_access_pagetable(htable_t *ht, uint_t index) 19447c478bd9Sstevel@tonic-gate { 19457c478bd9Sstevel@tonic-gate /* 19467c478bd9Sstevel@tonic-gate * VLP pagetables are contained in the hat_t 19477c478bd9Sstevel@tonic-gate */ 19487c478bd9Sstevel@tonic-gate if (ht->ht_flags & HTABLE_VLP) 1949ae115bc7Smrj return (PT_INDEX_PTR(ht->ht_hat->hat_vlp_ptes, index)); 1950ae115bc7Smrj return (x86pte_mapin(ht->ht_pfn, index, ht)); 1951ae115bc7Smrj } 19527c478bd9Sstevel@tonic-gate 19537c478bd9Sstevel@tonic-gate /* 1954ae115bc7Smrj * map the given pfn into the page table window. 19557c478bd9Sstevel@tonic-gate */ 1956ae115bc7Smrj /*ARGSUSED*/ 1957ae115bc7Smrj x86pte_t * 1958ae115bc7Smrj x86pte_mapin(pfn_t pfn, uint_t index, htable_t *ht) 1959ae115bc7Smrj { 1960ae115bc7Smrj x86pte_t *pteptr; 19618ea72728Sjosephb x86pte_t pte = 0; 1962ae115bc7Smrj x86pte_t newpte; 1963ae115bc7Smrj int x; 1964ae115bc7Smrj 19657c478bd9Sstevel@tonic-gate ASSERT(pfn != PFN_INVALID); 19667c478bd9Sstevel@tonic-gate 19677c478bd9Sstevel@tonic-gate if (!khat_running) { 1968ae115bc7Smrj caddr_t va = kbm_remap_window(pfn_to_pa(pfn), 1); 1969ae115bc7Smrj return (PT_INDEX_PTR(va, index)); 19707c478bd9Sstevel@tonic-gate } 19717c478bd9Sstevel@tonic-gate 19727c478bd9Sstevel@tonic-gate /* 1973ae115bc7Smrj * If kpm is available, use it. 1974ae115bc7Smrj */ 1975ae115bc7Smrj if (kpm_vbase) 1976ae115bc7Smrj return (PT_INDEX_PTR(hat_kpm_pfn2va(pfn), index)); 1977ae115bc7Smrj 1978ae115bc7Smrj /* 1979ae115bc7Smrj * Disable preemption and grab the CPU's hci_mutex 19807c478bd9Sstevel@tonic-gate */ 19817c478bd9Sstevel@tonic-gate kpreempt_disable(); 1982ae115bc7Smrj ASSERT(CPU->cpu_hat_info != NULL); 1983ae115bc7Smrj mutex_enter(&CPU->cpu_hat_info->hci_mutex); 1984ae115bc7Smrj x = PWIN_TABLE(CPU->cpu_id); 1985ae115bc7Smrj pteptr = (x86pte_t *)PWIN_PTE_VA(x); 19868ea72728Sjosephb #ifndef __xpv 1987ae115bc7Smrj if (mmu.pae_hat) 1988ae115bc7Smrj pte = *pteptr; 1989ae115bc7Smrj else 1990ae115bc7Smrj pte = *(x86pte32_t *)pteptr; 19918ea72728Sjosephb #endif 1992ae115bc7Smrj 1993ae115bc7Smrj newpte = MAKEPTE(pfn, 0) | mmu.pt_global | mmu.pt_nx; 1994843e1988Sjohnlev 1995843e1988Sjohnlev /* 1996843e1988Sjohnlev * For hardware we can use a writable mapping. 1997843e1988Sjohnlev */ 1998843e1988Sjohnlev #ifdef __xpv 1999843e1988Sjohnlev if (IN_XPV_PANIC()) 2000843e1988Sjohnlev #endif 2001ae115bc7Smrj newpte |= PT_WRITABLE; 2002ae115bc7Smrj 2003ae115bc7Smrj if (!PTE_EQUIV(newpte, pte)) { 2004843e1988Sjohnlev 2005843e1988Sjohnlev #ifdef __xpv 2006843e1988Sjohnlev if (!IN_XPV_PANIC()) { 2007843e1988Sjohnlev xen_map(newpte, PWIN_VA(x)); 2008843e1988Sjohnlev } else 2009843e1988Sjohnlev #endif 2010843e1988Sjohnlev { 2011843e1988Sjohnlev XPV_ALLOW_PAGETABLE_UPDATES(); 2012ae115bc7Smrj if (mmu.pae_hat) 2013ae115bc7Smrj *pteptr = newpte; 2014ae115bc7Smrj else 2015ae115bc7Smrj *(x86pte32_t *)pteptr = newpte; 2016843e1988Sjohnlev XPV_DISALLOW_PAGETABLE_UPDATES(); 2017ae115bc7Smrj mmu_tlbflush_entry((caddr_t)(PWIN_VA(x))); 20187c478bd9Sstevel@tonic-gate } 2019843e1988Sjohnlev } 2020ae115bc7Smrj return (PT_INDEX_PTR(PWIN_VA(x), index)); 20217c478bd9Sstevel@tonic-gate } 20227c478bd9Sstevel@tonic-gate 20237c478bd9Sstevel@tonic-gate /* 20247c478bd9Sstevel@tonic-gate * Release access to a page table. 20257c478bd9Sstevel@tonic-gate */ 20267c478bd9Sstevel@tonic-gate static void 20277c478bd9Sstevel@tonic-gate x86pte_release_pagetable(htable_t *ht) 20287c478bd9Sstevel@tonic-gate { 20297c478bd9Sstevel@tonic-gate /* 20307c478bd9Sstevel@tonic-gate * nothing to do for VLP htables 20317c478bd9Sstevel@tonic-gate */ 20327c478bd9Sstevel@tonic-gate if (ht->ht_flags & HTABLE_VLP) 20337c478bd9Sstevel@tonic-gate return; 20347c478bd9Sstevel@tonic-gate 2035ae115bc7Smrj x86pte_mapout(); 20367c478bd9Sstevel@tonic-gate } 20377c478bd9Sstevel@tonic-gate 2038ae115bc7Smrj void 2039ae115bc7Smrj x86pte_mapout(void) 2040ae115bc7Smrj { 2041843e1988Sjohnlev if (kpm_vbase != NULL || !khat_running) 2042ae115bc7Smrj return; 2043ae115bc7Smrj 20447c478bd9Sstevel@tonic-gate /* 2045ae115bc7Smrj * Drop the CPU's hci_mutex and restore preemption. 20467c478bd9Sstevel@tonic-gate */ 20478ea72728Sjosephb #ifdef __xpv 20488ea72728Sjosephb if (!IN_XPV_PANIC()) { 20498ea72728Sjosephb uintptr_t va; 20508ea72728Sjosephb 20518ea72728Sjosephb /* 20528ea72728Sjosephb * We need to always clear the mapping in case a page 20538ea72728Sjosephb * that was once a page table page is ballooned out. 20548ea72728Sjosephb */ 20558ea72728Sjosephb va = (uintptr_t)PWIN_VA(PWIN_TABLE(CPU->cpu_id)); 20568ea72728Sjosephb (void) HYPERVISOR_update_va_mapping(va, 0, 20578ea72728Sjosephb UVMF_INVLPG | UVMF_LOCAL); 20588ea72728Sjosephb } 20598ea72728Sjosephb #endif 2060ae115bc7Smrj mutex_exit(&CPU->cpu_hat_info->hci_mutex); 20617c478bd9Sstevel@tonic-gate kpreempt_enable(); 20627c478bd9Sstevel@tonic-gate } 20637c478bd9Sstevel@tonic-gate 20647c478bd9Sstevel@tonic-gate /* 20657c478bd9Sstevel@tonic-gate * Atomic retrieval of a pagetable entry 20667c478bd9Sstevel@tonic-gate */ 20677c478bd9Sstevel@tonic-gate x86pte_t 20687c478bd9Sstevel@tonic-gate x86pte_get(htable_t *ht, uint_t entry) 20697c478bd9Sstevel@tonic-gate { 20707c478bd9Sstevel@tonic-gate x86pte_t pte; 2071aa2ed9e5Sjosephb x86pte_t *ptep; 20727c478bd9Sstevel@tonic-gate 20737c478bd9Sstevel@tonic-gate /* 2074aa2ed9e5Sjosephb * Be careful that loading PAE entries in 32 bit kernel is atomic. 20757c478bd9Sstevel@tonic-gate */ 2076ae115bc7Smrj ASSERT(entry < mmu.ptes_per_table); 2077ae115bc7Smrj ptep = x86pte_access_pagetable(ht, entry); 2078ae115bc7Smrj pte = GET_PTE(ptep); 20797c478bd9Sstevel@tonic-gate x86pte_release_pagetable(ht); 20807c478bd9Sstevel@tonic-gate return (pte); 20817c478bd9Sstevel@tonic-gate } 20827c478bd9Sstevel@tonic-gate 20837c478bd9Sstevel@tonic-gate /* 20847c478bd9Sstevel@tonic-gate * Atomic unconditional set of a page table entry, it returns the previous 2085ae115bc7Smrj * value. For pre-existing mappings if the PFN changes, then we don't care 2086ae115bc7Smrj * about the old pte's REF / MOD bits. If the PFN remains the same, we leave 2087ae115bc7Smrj * the MOD/REF bits unchanged. 2088ae115bc7Smrj * 2089ae115bc7Smrj * If asked to overwrite a link to a lower page table with a large page 2090ae115bc7Smrj * mapping, this routine returns the special value of LPAGE_ERROR. This 2091ae115bc7Smrj * allows the upper HAT layers to retry with a smaller mapping size. 20927c478bd9Sstevel@tonic-gate */ 20937c478bd9Sstevel@tonic-gate x86pte_t 20947c478bd9Sstevel@tonic-gate x86pte_set(htable_t *ht, uint_t entry, x86pte_t new, void *ptr) 20957c478bd9Sstevel@tonic-gate { 20967c478bd9Sstevel@tonic-gate x86pte_t old; 2097ae115bc7Smrj x86pte_t prev; 20987c478bd9Sstevel@tonic-gate x86pte_t *ptep; 2099ae115bc7Smrj level_t l = ht->ht_level; 2100ae115bc7Smrj x86pte_t pfn_mask = (l != 0) ? PT_PADDR_LGPG : PT_PADDR; 2101ae115bc7Smrj x86pte_t n; 2102ae115bc7Smrj uintptr_t addr = htable_e2va(ht, entry); 2103ae115bc7Smrj hat_t *hat = ht->ht_hat; 21047c478bd9Sstevel@tonic-gate 2105ae115bc7Smrj ASSERT(new != 0); /* don't use to invalidate a PTE, see x86pte_update */ 21067c478bd9Sstevel@tonic-gate ASSERT(!(ht->ht_flags & HTABLE_SHARED_PFN)); 2107ae115bc7Smrj if (ptr == NULL) 2108ae115bc7Smrj ptep = x86pte_access_pagetable(ht, entry); 2109ae115bc7Smrj else 21107c478bd9Sstevel@tonic-gate ptep = ptr; 21117c478bd9Sstevel@tonic-gate 2112b193e412Skchow /* 2113ae115bc7Smrj * Install the new PTE. If remapping the same PFN, then 2114ae115bc7Smrj * copy existing REF/MOD bits to new mapping. 2115b193e412Skchow */ 2116ae115bc7Smrj do { 2117ae115bc7Smrj prev = GET_PTE(ptep); 2118ae115bc7Smrj n = new; 2119ae115bc7Smrj if (PTE_ISVALID(n) && (prev & pfn_mask) == (new & pfn_mask)) 2120b193e412Skchow n |= prev & (PT_REF | PT_MOD); 2121ae115bc7Smrj 2122ae115bc7Smrj /* 2123ae115bc7Smrj * Another thread may have installed this mapping already, 2124ae115bc7Smrj * flush the local TLB and be done. 2125ae115bc7Smrj */ 2126b193e412Skchow if (prev == n) { 21277c478bd9Sstevel@tonic-gate old = new; 2128843e1988Sjohnlev #ifdef __xpv 2129843e1988Sjohnlev if (!IN_XPV_PANIC()) 2130843e1988Sjohnlev xen_flush_va((caddr_t)addr); 2131843e1988Sjohnlev else 2132843e1988Sjohnlev #endif 2133ae115bc7Smrj mmu_tlbflush_entry((caddr_t)addr); 2134ae115bc7Smrj goto done; 21357c478bd9Sstevel@tonic-gate } 2136ae115bc7Smrj 2137ae115bc7Smrj /* 2138ae115bc7Smrj * Detect if we have a collision of installing a large 2139ae115bc7Smrj * page mapping where there already is a lower page table. 2140ae115bc7Smrj */ 214197704650Sjosephb if (l > 0 && (prev & PT_VALID) && !(prev & PT_PAGESIZE)) { 214297704650Sjosephb old = LPAGE_ERROR; 214397704650Sjosephb goto done; 214497704650Sjosephb } 2145ae115bc7Smrj 2146843e1988Sjohnlev XPV_ALLOW_PAGETABLE_UPDATES(); 2147ae115bc7Smrj old = CAS_PTE(ptep, prev, n); 2148843e1988Sjohnlev XPV_DISALLOW_PAGETABLE_UPDATES(); 2149ae115bc7Smrj } while (old != prev); 2150ae115bc7Smrj 2151ae115bc7Smrj /* 2152ae115bc7Smrj * Do a TLB demap if needed, ie. the old pte was valid. 2153ae115bc7Smrj * 2154ae115bc7Smrj * Note that a stale TLB writeback to the PTE here either can't happen 2155ae115bc7Smrj * or doesn't matter. The PFN can only change for NOSYNC|NOCONSIST 2156ae115bc7Smrj * mappings, but they were created with REF and MOD already set, so 2157ae115bc7Smrj * no stale writeback will happen. 2158ae115bc7Smrj * 2159ae115bc7Smrj * Segmap is the only place where remaps happen on the same pfn and for 2160ae115bc7Smrj * that we want to preserve the stale REF/MOD bits. 2161ae115bc7Smrj */ 2162ae115bc7Smrj if (old & PT_REF) 2163ae115bc7Smrj hat_tlb_inval(hat, addr); 2164ae115bc7Smrj 2165ae115bc7Smrj done: 21667c478bd9Sstevel@tonic-gate if (ptr == NULL) 21677c478bd9Sstevel@tonic-gate x86pte_release_pagetable(ht); 21687c478bd9Sstevel@tonic-gate return (old); 21697c478bd9Sstevel@tonic-gate } 21707c478bd9Sstevel@tonic-gate 21717c478bd9Sstevel@tonic-gate /* 2172ae115bc7Smrj * Atomic compare and swap of a page table entry. No TLB invalidates are done. 2173ae115bc7Smrj * This is used for links between pagetables of different levels. 2174ae115bc7Smrj * Note we always create these links with dirty/access set, so they should 2175ae115bc7Smrj * never change. 21767c478bd9Sstevel@tonic-gate */ 2177ae115bc7Smrj x86pte_t 21787c478bd9Sstevel@tonic-gate x86pte_cas(htable_t *ht, uint_t entry, x86pte_t old, x86pte_t new) 21797c478bd9Sstevel@tonic-gate { 21807c478bd9Sstevel@tonic-gate x86pte_t pte; 21817c478bd9Sstevel@tonic-gate x86pte_t *ptep; 2182843e1988Sjohnlev #ifdef __xpv 2183843e1988Sjohnlev /* 2184843e1988Sjohnlev * We can't use writable pagetables for upper level tables, so fake it. 2185843e1988Sjohnlev */ 2186843e1988Sjohnlev mmu_update_t t[2]; 2187843e1988Sjohnlev int cnt = 1; 2188843e1988Sjohnlev int count; 2189843e1988Sjohnlev maddr_t ma; 21907c478bd9Sstevel@tonic-gate 2191843e1988Sjohnlev if (!IN_XPV_PANIC()) { 2192843e1988Sjohnlev ASSERT(!(ht->ht_flags & HTABLE_VLP)); /* no VLP yet */ 2193843e1988Sjohnlev ma = pa_to_ma(PT_INDEX_PHYSADDR(pfn_to_pa(ht->ht_pfn), entry)); 2194843e1988Sjohnlev t[0].ptr = ma | MMU_NORMAL_PT_UPDATE; 2195843e1988Sjohnlev t[0].val = new; 2196843e1988Sjohnlev 2197843e1988Sjohnlev #if defined(__amd64) 2198843e1988Sjohnlev /* 2199843e1988Sjohnlev * On the 64-bit hypervisor we need to maintain the user mode 2200843e1988Sjohnlev * top page table too. 2201843e1988Sjohnlev */ 2202843e1988Sjohnlev if (ht->ht_level == mmu.max_level && ht->ht_hat != kas.a_hat) { 2203843e1988Sjohnlev ma = pa_to_ma(PT_INDEX_PHYSADDR(pfn_to_pa( 2204843e1988Sjohnlev ht->ht_hat->hat_user_ptable), entry)); 2205843e1988Sjohnlev t[1].ptr = ma | MMU_NORMAL_PT_UPDATE; 2206843e1988Sjohnlev t[1].val = new; 2207843e1988Sjohnlev ++cnt; 2208843e1988Sjohnlev } 2209843e1988Sjohnlev #endif /* __amd64 */ 2210843e1988Sjohnlev 2211843e1988Sjohnlev if (HYPERVISOR_mmu_update(t, cnt, &count, DOMID_SELF)) 2212843e1988Sjohnlev panic("HYPERVISOR_mmu_update() failed"); 2213843e1988Sjohnlev ASSERT(count == cnt); 2214843e1988Sjohnlev return (old); 2215843e1988Sjohnlev } 2216843e1988Sjohnlev #endif 2217ae115bc7Smrj ptep = x86pte_access_pagetable(ht, entry); 2218843e1988Sjohnlev XPV_ALLOW_PAGETABLE_UPDATES(); 2219ae115bc7Smrj pte = CAS_PTE(ptep, old, new); 2220843e1988Sjohnlev XPV_DISALLOW_PAGETABLE_UPDATES(); 22217c478bd9Sstevel@tonic-gate x86pte_release_pagetable(ht); 22227c478bd9Sstevel@tonic-gate return (pte); 22237c478bd9Sstevel@tonic-gate } 22247c478bd9Sstevel@tonic-gate 22257c478bd9Sstevel@tonic-gate /* 2226ae115bc7Smrj * Invalidate a page table entry as long as it currently maps something that 2227ae115bc7Smrj * matches the value determined by expect. 22287c478bd9Sstevel@tonic-gate * 2229a6a74e0eSMatthew Ahrens * If tlb is set, also invalidates any TLB entries. 2230a6a74e0eSMatthew Ahrens * 2231a6a74e0eSMatthew Ahrens * Returns the previous value of the PTE. 22327c478bd9Sstevel@tonic-gate */ 22337c478bd9Sstevel@tonic-gate x86pte_t 2234ae115bc7Smrj x86pte_inval( 2235ae115bc7Smrj htable_t *ht, 2236ae115bc7Smrj uint_t entry, 2237ae115bc7Smrj x86pte_t expect, 2238a6a74e0eSMatthew Ahrens x86pte_t *pte_ptr, 2239a6a74e0eSMatthew Ahrens boolean_t tlb) 22407c478bd9Sstevel@tonic-gate { 22417c478bd9Sstevel@tonic-gate x86pte_t *ptep; 224295c0a3c8Sjosephb x86pte_t oldpte; 224395c0a3c8Sjosephb x86pte_t found; 22447c478bd9Sstevel@tonic-gate 22457c478bd9Sstevel@tonic-gate ASSERT(!(ht->ht_flags & HTABLE_SHARED_PFN)); 224602bc52beSkchow ASSERT(ht->ht_level <= mmu.max_page_level); 224797704650Sjosephb 2248ae115bc7Smrj if (pte_ptr != NULL) 22497c478bd9Sstevel@tonic-gate ptep = pte_ptr; 2250ae115bc7Smrj else 2251ae115bc7Smrj ptep = x86pte_access_pagetable(ht, entry); 22527c478bd9Sstevel@tonic-gate 2253843e1988Sjohnlev #if defined(__xpv) 2254843e1988Sjohnlev /* 2255843e1988Sjohnlev * If exit()ing just use HYPERVISOR_mmu_update(), as we can't be racing 2256843e1988Sjohnlev * with anything else. 2257843e1988Sjohnlev */ 2258843e1988Sjohnlev if ((ht->ht_hat->hat_flags & HAT_FREEING) && !IN_XPV_PANIC()) { 2259843e1988Sjohnlev int count; 2260843e1988Sjohnlev mmu_update_t t[1]; 2261843e1988Sjohnlev maddr_t ma; 2262843e1988Sjohnlev 2263843e1988Sjohnlev oldpte = GET_PTE(ptep); 2264843e1988Sjohnlev if (expect != 0 && (oldpte & PT_PADDR) != (expect & PT_PADDR)) 2265843e1988Sjohnlev goto done; 2266843e1988Sjohnlev ma = pa_to_ma(PT_INDEX_PHYSADDR(pfn_to_pa(ht->ht_pfn), entry)); 2267843e1988Sjohnlev t[0].ptr = ma | MMU_NORMAL_PT_UPDATE; 2268843e1988Sjohnlev t[0].val = 0; 2269843e1988Sjohnlev if (HYPERVISOR_mmu_update(t, 1, &count, DOMID_SELF)) 2270843e1988Sjohnlev panic("HYPERVISOR_mmu_update() failed"); 2271843e1988Sjohnlev ASSERT(count == 1); 2272843e1988Sjohnlev goto done; 2273843e1988Sjohnlev } 2274843e1988Sjohnlev #endif /* __xpv */ 2275843e1988Sjohnlev 22767c478bd9Sstevel@tonic-gate /* 227797704650Sjosephb * Note that the loop is needed to handle changes due to h/w updating 227897704650Sjosephb * of PT_MOD/PT_REF. 22797c478bd9Sstevel@tonic-gate */ 2280ae115bc7Smrj do { 228195c0a3c8Sjosephb oldpte = GET_PTE(ptep); 228295c0a3c8Sjosephb if (expect != 0 && (oldpte & PT_PADDR) != (expect & PT_PADDR)) 228395c0a3c8Sjosephb goto done; 2284843e1988Sjohnlev XPV_ALLOW_PAGETABLE_UPDATES(); 228595c0a3c8Sjosephb found = CAS_PTE(ptep, oldpte, 0); 2286843e1988Sjohnlev XPV_DISALLOW_PAGETABLE_UPDATES(); 228795c0a3c8Sjosephb } while (found != oldpte); 2288a6a74e0eSMatthew Ahrens if (tlb && (oldpte & (PT_REF | PT_MOD))) 228995c0a3c8Sjosephb hat_tlb_inval(ht->ht_hat, htable_e2va(ht, entry)); 22907c478bd9Sstevel@tonic-gate 229195c0a3c8Sjosephb done: 22927c478bd9Sstevel@tonic-gate if (pte_ptr == NULL) 22937c478bd9Sstevel@tonic-gate x86pte_release_pagetable(ht); 229495c0a3c8Sjosephb return (oldpte); 22957c478bd9Sstevel@tonic-gate } 22967c478bd9Sstevel@tonic-gate 22977c478bd9Sstevel@tonic-gate /* 2298ae115bc7Smrj * Change a page table entry af it currently matches the value in expect. 22997c478bd9Sstevel@tonic-gate */ 23007c478bd9Sstevel@tonic-gate x86pte_t 2301ae115bc7Smrj x86pte_update( 2302ae115bc7Smrj htable_t *ht, 2303ae115bc7Smrj uint_t entry, 2304ae115bc7Smrj x86pte_t expect, 2305ae115bc7Smrj x86pte_t new) 23067c478bd9Sstevel@tonic-gate { 23077c478bd9Sstevel@tonic-gate x86pte_t *ptep; 2308ae115bc7Smrj x86pte_t found; 23097c478bd9Sstevel@tonic-gate 2310ae115bc7Smrj ASSERT(new != 0); 23117c478bd9Sstevel@tonic-gate ASSERT(!(ht->ht_flags & HTABLE_SHARED_PFN)); 231202bc52beSkchow ASSERT(ht->ht_level <= mmu.max_page_level); 2313ae115bc7Smrj 2314ae115bc7Smrj ptep = x86pte_access_pagetable(ht, entry); 2315843e1988Sjohnlev XPV_ALLOW_PAGETABLE_UPDATES(); 2316ae115bc7Smrj found = CAS_PTE(ptep, expect, new); 2317843e1988Sjohnlev XPV_DISALLOW_PAGETABLE_UPDATES(); 2318ae115bc7Smrj if (found == expect) { 2319ae115bc7Smrj hat_tlb_inval(ht->ht_hat, htable_e2va(ht, entry)); 23207c478bd9Sstevel@tonic-gate 23217c478bd9Sstevel@tonic-gate /* 2322ae115bc7Smrj * When removing write permission *and* clearing the 2323ae115bc7Smrj * MOD bit, check if a write happened via a stale 2324ae115bc7Smrj * TLB entry before the TLB shootdown finished. 2325ae115bc7Smrj * 2326ae115bc7Smrj * If it did happen, simply re-enable write permission and 2327ae115bc7Smrj * act like the original CAS failed. 23287c478bd9Sstevel@tonic-gate */ 2329ae115bc7Smrj if ((expect & (PT_WRITABLE | PT_MOD)) == PT_WRITABLE && 2330ae115bc7Smrj (new & (PT_WRITABLE | PT_MOD)) == 0 && 2331ae115bc7Smrj (GET_PTE(ptep) & PT_MOD) != 0) { 2332ae115bc7Smrj do { 2333ae115bc7Smrj found = GET_PTE(ptep); 2334843e1988Sjohnlev XPV_ALLOW_PAGETABLE_UPDATES(); 2335ae115bc7Smrj found = 2336ae115bc7Smrj CAS_PTE(ptep, found, found | PT_WRITABLE); 2337843e1988Sjohnlev XPV_DISALLOW_PAGETABLE_UPDATES(); 2338ae115bc7Smrj } while ((found & PT_WRITABLE) == 0); 2339ae115bc7Smrj } 2340ae115bc7Smrj } 23417c478bd9Sstevel@tonic-gate x86pte_release_pagetable(ht); 2342ae115bc7Smrj return (found); 23437c478bd9Sstevel@tonic-gate } 23447c478bd9Sstevel@tonic-gate 2345843e1988Sjohnlev #ifndef __xpv 23467c478bd9Sstevel@tonic-gate /* 23477c478bd9Sstevel@tonic-gate * Copy page tables - this is just a little more complicated than the 23487c478bd9Sstevel@tonic-gate * previous routines. Note that it's also not atomic! It also is never 23497c478bd9Sstevel@tonic-gate * used for VLP pagetables. 23507c478bd9Sstevel@tonic-gate */ 23517c478bd9Sstevel@tonic-gate void 23527c478bd9Sstevel@tonic-gate x86pte_copy(htable_t *src, htable_t *dest, uint_t entry, uint_t count) 23537c478bd9Sstevel@tonic-gate { 23547c478bd9Sstevel@tonic-gate caddr_t src_va; 23557c478bd9Sstevel@tonic-gate caddr_t dst_va; 23567c478bd9Sstevel@tonic-gate size_t size; 2357ae115bc7Smrj x86pte_t *pteptr; 2358ae115bc7Smrj x86pte_t pte; 23597c478bd9Sstevel@tonic-gate 23607c478bd9Sstevel@tonic-gate ASSERT(khat_running); 23617c478bd9Sstevel@tonic-gate ASSERT(!(dest->ht_flags & HTABLE_VLP)); 23627c478bd9Sstevel@tonic-gate ASSERT(!(src->ht_flags & HTABLE_VLP)); 23637c478bd9Sstevel@tonic-gate ASSERT(!(src->ht_flags & HTABLE_SHARED_PFN)); 23647c478bd9Sstevel@tonic-gate ASSERT(!(dest->ht_flags & HTABLE_SHARED_PFN)); 23657c478bd9Sstevel@tonic-gate 23667c478bd9Sstevel@tonic-gate /* 2367ae115bc7Smrj * Acquire access to the CPU pagetable windows for the dest and source. 23687c478bd9Sstevel@tonic-gate */ 2369ae115bc7Smrj dst_va = (caddr_t)x86pte_access_pagetable(dest, entry); 2370ae115bc7Smrj if (kpm_vbase) { 2371ae115bc7Smrj src_va = (caddr_t) 2372ae115bc7Smrj PT_INDEX_PTR(hat_kpm_pfn2va(src->ht_pfn), entry); 23737c478bd9Sstevel@tonic-gate } else { 2374ae115bc7Smrj uint_t x = PWIN_SRC(CPU->cpu_id); 23757c478bd9Sstevel@tonic-gate 23767c478bd9Sstevel@tonic-gate /* 23777c478bd9Sstevel@tonic-gate * Finish defining the src pagetable mapping 23787c478bd9Sstevel@tonic-gate */ 2379ae115bc7Smrj src_va = (caddr_t)PT_INDEX_PTR(PWIN_VA(x), entry); 2380ae115bc7Smrj pte = MAKEPTE(src->ht_pfn, 0) | mmu.pt_global | mmu.pt_nx; 2381ae115bc7Smrj pteptr = (x86pte_t *)PWIN_PTE_VA(x); 2382ae115bc7Smrj if (mmu.pae_hat) 2383ae115bc7Smrj *pteptr = pte; 2384ae115bc7Smrj else 2385ae115bc7Smrj *(x86pte32_t *)pteptr = pte; 2386ae115bc7Smrj mmu_tlbflush_entry((caddr_t)(PWIN_VA(x))); 23877c478bd9Sstevel@tonic-gate } 23887c478bd9Sstevel@tonic-gate 23897c478bd9Sstevel@tonic-gate /* 23907c478bd9Sstevel@tonic-gate * now do the copy 23917c478bd9Sstevel@tonic-gate */ 23927c478bd9Sstevel@tonic-gate size = count << mmu.pte_size_shift; 23937c478bd9Sstevel@tonic-gate bcopy(src_va, dst_va, size); 23947c478bd9Sstevel@tonic-gate 23957c478bd9Sstevel@tonic-gate x86pte_release_pagetable(dest); 23967c478bd9Sstevel@tonic-gate } 23977c478bd9Sstevel@tonic-gate 2398843e1988Sjohnlev #else /* __xpv */ 2399843e1988Sjohnlev 2400843e1988Sjohnlev /* 2401843e1988Sjohnlev * The hypervisor only supports writable pagetables at level 0, so we have 2402843e1988Sjohnlev * to install these 1 by 1 the slow way. 2403843e1988Sjohnlev */ 2404843e1988Sjohnlev void 2405843e1988Sjohnlev x86pte_copy(htable_t *src, htable_t *dest, uint_t entry, uint_t count) 2406843e1988Sjohnlev { 2407843e1988Sjohnlev caddr_t src_va; 2408843e1988Sjohnlev x86pte_t pte; 2409843e1988Sjohnlev 2410843e1988Sjohnlev ASSERT(!IN_XPV_PANIC()); 2411843e1988Sjohnlev src_va = (caddr_t)x86pte_access_pagetable(src, entry); 2412843e1988Sjohnlev while (count) { 2413843e1988Sjohnlev if (mmu.pae_hat) 2414843e1988Sjohnlev pte = *(x86pte_t *)src_va; 2415843e1988Sjohnlev else 2416843e1988Sjohnlev pte = *(x86pte32_t *)src_va; 2417843e1988Sjohnlev if (pte != 0) { 2418843e1988Sjohnlev set_pteval(pfn_to_pa(dest->ht_pfn), entry, 2419843e1988Sjohnlev dest->ht_level, pte); 2420843e1988Sjohnlev #ifdef __amd64 2421843e1988Sjohnlev if (dest->ht_level == mmu.max_level && 2422843e1988Sjohnlev htable_e2va(dest, entry) < HYPERVISOR_VIRT_END) 2423843e1988Sjohnlev set_pteval( 2424843e1988Sjohnlev pfn_to_pa(dest->ht_hat->hat_user_ptable), 2425843e1988Sjohnlev entry, dest->ht_level, pte); 2426843e1988Sjohnlev #endif 2427843e1988Sjohnlev } 2428843e1988Sjohnlev --count; 2429843e1988Sjohnlev ++entry; 2430843e1988Sjohnlev src_va += mmu.pte_size; 2431843e1988Sjohnlev } 2432843e1988Sjohnlev x86pte_release_pagetable(src); 2433843e1988Sjohnlev } 2434843e1988Sjohnlev #endif /* __xpv */ 2435843e1988Sjohnlev 24367c478bd9Sstevel@tonic-gate /* 24377c478bd9Sstevel@tonic-gate * Zero page table entries - Note this doesn't use atomic stores! 24387c478bd9Sstevel@tonic-gate */ 2439ae115bc7Smrj static void 24407c478bd9Sstevel@tonic-gate x86pte_zero(htable_t *dest, uint_t entry, uint_t count) 24417c478bd9Sstevel@tonic-gate { 24427c478bd9Sstevel@tonic-gate caddr_t dst_va; 24437c478bd9Sstevel@tonic-gate size_t size; 2444843e1988Sjohnlev #ifdef __xpv 2445843e1988Sjohnlev int x; 2446843e1988Sjohnlev x86pte_t newpte; 2447843e1988Sjohnlev #endif 24487c478bd9Sstevel@tonic-gate 24497c478bd9Sstevel@tonic-gate /* 24507c478bd9Sstevel@tonic-gate * Map in the page table to be zeroed. 24517c478bd9Sstevel@tonic-gate */ 24527c478bd9Sstevel@tonic-gate ASSERT(!(dest->ht_flags & HTABLE_SHARED_PFN)); 24537c478bd9Sstevel@tonic-gate ASSERT(!(dest->ht_flags & HTABLE_VLP)); 2454ae115bc7Smrj 2455843e1988Sjohnlev /* 2456843e1988Sjohnlev * On the hypervisor we don't use x86pte_access_pagetable() since 2457843e1988Sjohnlev * in this case the page is not pinned yet. 2458843e1988Sjohnlev */ 2459843e1988Sjohnlev #ifdef __xpv 2460843e1988Sjohnlev if (kpm_vbase == NULL) { 2461843e1988Sjohnlev kpreempt_disable(); 2462843e1988Sjohnlev ASSERT(CPU->cpu_hat_info != NULL); 2463843e1988Sjohnlev mutex_enter(&CPU->cpu_hat_info->hci_mutex); 2464843e1988Sjohnlev x = PWIN_TABLE(CPU->cpu_id); 2465843e1988Sjohnlev newpte = MAKEPTE(dest->ht_pfn, 0) | PT_WRITABLE; 2466843e1988Sjohnlev xen_map(newpte, PWIN_VA(x)); 2467843e1988Sjohnlev dst_va = (caddr_t)PT_INDEX_PTR(PWIN_VA(x), entry); 2468843e1988Sjohnlev } else 2469843e1988Sjohnlev #endif 2470ae115bc7Smrj dst_va = (caddr_t)x86pte_access_pagetable(dest, entry); 2471ae115bc7Smrj 24727c478bd9Sstevel@tonic-gate size = count << mmu.pte_size_shift; 2473ae115bc7Smrj ASSERT(size > BLOCKZEROALIGN); 2474ae115bc7Smrj #ifdef __i386 24757417cfdeSKuriakose Kuruvilla if (!is_x86_feature(x86_featureset, X86FSET_SSE2)) 24767c478bd9Sstevel@tonic-gate bzero(dst_va, size); 2477ae115bc7Smrj else 2478ae115bc7Smrj #endif 2479ae115bc7Smrj block_zero_no_xmm(dst_va, size); 2480ae115bc7Smrj 2481843e1988Sjohnlev #ifdef __xpv 2482843e1988Sjohnlev if (kpm_vbase == NULL) { 2483843e1988Sjohnlev xen_map(0, PWIN_VA(x)); 2484843e1988Sjohnlev mutex_exit(&CPU->cpu_hat_info->hci_mutex); 2485843e1988Sjohnlev kpreempt_enable(); 2486843e1988Sjohnlev } else 2487843e1988Sjohnlev #endif 24887c478bd9Sstevel@tonic-gate x86pte_release_pagetable(dest); 24897c478bd9Sstevel@tonic-gate } 24907c478bd9Sstevel@tonic-gate 24917c478bd9Sstevel@tonic-gate /* 24927c478bd9Sstevel@tonic-gate * Called to ensure that all pagetables are in the system dump 24937c478bd9Sstevel@tonic-gate */ 24947c478bd9Sstevel@tonic-gate void 24957c478bd9Sstevel@tonic-gate hat_dump(void) 24967c478bd9Sstevel@tonic-gate { 24977c478bd9Sstevel@tonic-gate hat_t *hat; 24987c478bd9Sstevel@tonic-gate uint_t h; 24997c478bd9Sstevel@tonic-gate htable_t *ht; 25007c478bd9Sstevel@tonic-gate 25017c478bd9Sstevel@tonic-gate /* 2502a85a6733Sjosephb * Dump all page tables 25037c478bd9Sstevel@tonic-gate */ 2504a85a6733Sjosephb for (hat = kas.a_hat; hat != NULL; hat = hat->hat_next) { 25057c478bd9Sstevel@tonic-gate for (h = 0; h < hat->hat_num_hash; ++h) { 25067c478bd9Sstevel@tonic-gate for (ht = hat->hat_ht_hash[h]; ht; ht = ht->ht_next) { 2507a85a6733Sjosephb if ((ht->ht_flags & HTABLE_VLP) == 0) 25087c478bd9Sstevel@tonic-gate dump_page(ht->ht_pfn); 25097c478bd9Sstevel@tonic-gate } 25107c478bd9Sstevel@tonic-gate } 25117c478bd9Sstevel@tonic-gate } 25127c478bd9Sstevel@tonic-gate } 2513