17c478bd9Sstevel@tonic-gate /* 27c478bd9Sstevel@tonic-gate * CDDL HEADER START 37c478bd9Sstevel@tonic-gate * 47c478bd9Sstevel@tonic-gate * The contents of this file are subject to the terms of the 5*a85a6733Sjosephb * Common Development and Distribution License (the "License"). 6*a85a6733Sjosephb * You may not use this file except in compliance with the License. 77c478bd9Sstevel@tonic-gate * 87c478bd9Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 97c478bd9Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 107c478bd9Sstevel@tonic-gate * See the License for the specific language governing permissions 117c478bd9Sstevel@tonic-gate * and limitations under the License. 127c478bd9Sstevel@tonic-gate * 137c478bd9Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 147c478bd9Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 157c478bd9Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 167c478bd9Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 177c478bd9Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 187c478bd9Sstevel@tonic-gate * 197c478bd9Sstevel@tonic-gate * CDDL HEADER END 207c478bd9Sstevel@tonic-gate */ 217c478bd9Sstevel@tonic-gate /* 22b4b46911Skchow * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 237c478bd9Sstevel@tonic-gate * Use is subject to license terms. 247c478bd9Sstevel@tonic-gate */ 257c478bd9Sstevel@tonic-gate 267c478bd9Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI" 277c478bd9Sstevel@tonic-gate 287c478bd9Sstevel@tonic-gate #include <sys/types.h> 297c478bd9Sstevel@tonic-gate #include <sys/sysmacros.h> 307c478bd9Sstevel@tonic-gate #include <sys/kmem.h> 317c478bd9Sstevel@tonic-gate #include <sys/atomic.h> 327c478bd9Sstevel@tonic-gate #include <sys/bitmap.h> 337c478bd9Sstevel@tonic-gate #include <sys/machparam.h> 347c478bd9Sstevel@tonic-gate #include <sys/machsystm.h> 357c478bd9Sstevel@tonic-gate #include <sys/mman.h> 367c478bd9Sstevel@tonic-gate #include <sys/systm.h> 377c478bd9Sstevel@tonic-gate #include <sys/cpuvar.h> 387c478bd9Sstevel@tonic-gate #include <sys/thread.h> 397c478bd9Sstevel@tonic-gate #include <sys/proc.h> 407c478bd9Sstevel@tonic-gate #include <sys/cpu.h> 417c478bd9Sstevel@tonic-gate #include <sys/kmem.h> 427c478bd9Sstevel@tonic-gate #include <sys/disp.h> 437c478bd9Sstevel@tonic-gate #include <sys/vmem.h> 447c478bd9Sstevel@tonic-gate #include <sys/vmsystm.h> 457c478bd9Sstevel@tonic-gate #include <sys/promif.h> 467c478bd9Sstevel@tonic-gate #include <sys/var.h> 477c478bd9Sstevel@tonic-gate #include <sys/x86_archext.h> 487c478bd9Sstevel@tonic-gate #include <sys/bootconf.h> 497c478bd9Sstevel@tonic-gate #include <sys/dumphdr.h> 507c478bd9Sstevel@tonic-gate #include <vm/seg_kmem.h> 517c478bd9Sstevel@tonic-gate #include <vm/seg_kpm.h> 527c478bd9Sstevel@tonic-gate #include <vm/hat.h> 537c478bd9Sstevel@tonic-gate #include <vm/hat_i86.h> 547c478bd9Sstevel@tonic-gate #include <sys/cmn_err.h> 557c478bd9Sstevel@tonic-gate 567c478bd9Sstevel@tonic-gate kmem_cache_t *htable_cache; 577c478bd9Sstevel@tonic-gate extern cpuset_t khat_cpuset; 587c478bd9Sstevel@tonic-gate 597c478bd9Sstevel@tonic-gate /* 607c478bd9Sstevel@tonic-gate * The variable htable_reserve_amount, rather than HTABLE_RESERVE_AMOUNT, 617c478bd9Sstevel@tonic-gate * is used in order to facilitate testing of the htable_steal() code. 627c478bd9Sstevel@tonic-gate * By resetting htable_reserve_amount to a lower value, we can force 637c478bd9Sstevel@tonic-gate * stealing to occur. The reserve amount is a guess to get us through boot. 647c478bd9Sstevel@tonic-gate */ 657c478bd9Sstevel@tonic-gate #define HTABLE_RESERVE_AMOUNT (200) 667c478bd9Sstevel@tonic-gate uint_t htable_reserve_amount = HTABLE_RESERVE_AMOUNT; 677c478bd9Sstevel@tonic-gate kmutex_t htable_reserve_mutex; 687c478bd9Sstevel@tonic-gate uint_t htable_reserve_cnt; 697c478bd9Sstevel@tonic-gate htable_t *htable_reserve_pool; 707c478bd9Sstevel@tonic-gate 717c478bd9Sstevel@tonic-gate /* 72*a85a6733Sjosephb * Used to hand test htable_steal(). 737c478bd9Sstevel@tonic-gate */ 74*a85a6733Sjosephb #ifdef DEBUG 75*a85a6733Sjosephb ulong_t force_steal = 0; 76*a85a6733Sjosephb ulong_t ptable_cnt = 0; 77*a85a6733Sjosephb #endif 78*a85a6733Sjosephb 79*a85a6733Sjosephb /* 80*a85a6733Sjosephb * This variable is so that we can tune this via /etc/system 81*a85a6733Sjosephb * Any value works, but a power of two <= mmu.ptes_per_table is best. 82*a85a6733Sjosephb */ 83*a85a6733Sjosephb uint_t htable_steal_passes = 8; 847c478bd9Sstevel@tonic-gate 857c478bd9Sstevel@tonic-gate /* 867c478bd9Sstevel@tonic-gate * mutex stuff for access to htable hash 877c478bd9Sstevel@tonic-gate */ 887c478bd9Sstevel@tonic-gate #define NUM_HTABLE_MUTEX 128 897c478bd9Sstevel@tonic-gate kmutex_t htable_mutex[NUM_HTABLE_MUTEX]; 907c478bd9Sstevel@tonic-gate #define HTABLE_MUTEX_HASH(h) ((h) & (NUM_HTABLE_MUTEX - 1)) 917c478bd9Sstevel@tonic-gate 927c478bd9Sstevel@tonic-gate #define HTABLE_ENTER(h) mutex_enter(&htable_mutex[HTABLE_MUTEX_HASH(h)]); 937c478bd9Sstevel@tonic-gate #define HTABLE_EXIT(h) mutex_exit(&htable_mutex[HTABLE_MUTEX_HASH(h)]); 947c478bd9Sstevel@tonic-gate 957c478bd9Sstevel@tonic-gate /* 967c478bd9Sstevel@tonic-gate * forward declarations 977c478bd9Sstevel@tonic-gate */ 987c478bd9Sstevel@tonic-gate static void link_ptp(htable_t *higher, htable_t *new, uintptr_t vaddr); 997c478bd9Sstevel@tonic-gate static void unlink_ptp(htable_t *higher, htable_t *old, uintptr_t vaddr); 1007c478bd9Sstevel@tonic-gate static void htable_free(htable_t *ht); 1017c478bd9Sstevel@tonic-gate static x86pte_t *x86pte_access_pagetable(htable_t *ht); 1027c478bd9Sstevel@tonic-gate static void x86pte_release_pagetable(htable_t *ht); 1037c478bd9Sstevel@tonic-gate static x86pte_t x86pte_cas(htable_t *ht, uint_t entry, x86pte_t old, 1047c478bd9Sstevel@tonic-gate x86pte_t new); 1057c478bd9Sstevel@tonic-gate 1067c478bd9Sstevel@tonic-gate /* 1077c478bd9Sstevel@tonic-gate * Address used for kernel page tables. See ptable_alloc() below. 1087c478bd9Sstevel@tonic-gate */ 1097c478bd9Sstevel@tonic-gate uintptr_t ptable_va = 0; 1107c478bd9Sstevel@tonic-gate size_t ptable_sz = 2 * MMU_PAGESIZE; 1117c478bd9Sstevel@tonic-gate 1127c478bd9Sstevel@tonic-gate /* 1137c478bd9Sstevel@tonic-gate * A counter to track if we are stealing or reaping htables. When non-zero 1147c478bd9Sstevel@tonic-gate * htable_free() will directly free htables (either to the reserve or kmem) 1157c478bd9Sstevel@tonic-gate * instead of putting them in a hat's htable cache. 1167c478bd9Sstevel@tonic-gate */ 1177c478bd9Sstevel@tonic-gate uint32_t htable_dont_cache = 0; 1187c478bd9Sstevel@tonic-gate 1197c478bd9Sstevel@tonic-gate /* 1207c478bd9Sstevel@tonic-gate * Track the number of active pagetables, so we can know how many to reap 1217c478bd9Sstevel@tonic-gate */ 1227c478bd9Sstevel@tonic-gate static uint32_t active_ptables = 0; 1237c478bd9Sstevel@tonic-gate 1247c478bd9Sstevel@tonic-gate /* 1257c478bd9Sstevel@tonic-gate * Allocate a memory page for a hardware page table. 1267c478bd9Sstevel@tonic-gate * 1277c478bd9Sstevel@tonic-gate * The pages allocated for page tables are currently gotten in a hacked up 1287c478bd9Sstevel@tonic-gate * way. It works for now, but really needs to be fixed up a bit. 1297c478bd9Sstevel@tonic-gate * 1307c478bd9Sstevel@tonic-gate * During boot: The boot loader controls physical memory allocation via 1317c478bd9Sstevel@tonic-gate * boot_alloc(). To avoid conflict with vmem, we just do boot_alloc()s with 1327c478bd9Sstevel@tonic-gate * addresses less than kernelbase. These addresses are ignored when we take 1337c478bd9Sstevel@tonic-gate * over mappings from the boot loader. 1347c478bd9Sstevel@tonic-gate * 1357c478bd9Sstevel@tonic-gate * Post-boot: we currently use page_create_va() on the kvp with fake offsets, 1367c478bd9Sstevel@tonic-gate * segments and virt address. This is pretty bogus, but was copied from the 1377c478bd9Sstevel@tonic-gate * old hat_i86.c code. A better approach would be to have a custom 1387c478bd9Sstevel@tonic-gate * page_get_physical() interface that can specify either mnode random or 1397c478bd9Sstevel@tonic-gate * mnode local and takes a page from whatever color has the MOST available - 1407c478bd9Sstevel@tonic-gate * this would have a minimal impact on page coloring. 1417c478bd9Sstevel@tonic-gate * 1427c478bd9Sstevel@tonic-gate * For now the htable pointer in ht is only used to compute a unique vnode 1437c478bd9Sstevel@tonic-gate * offset for the page. 1447c478bd9Sstevel@tonic-gate */ 1457c478bd9Sstevel@tonic-gate static void 1467c478bd9Sstevel@tonic-gate ptable_alloc(htable_t *ht) 1477c478bd9Sstevel@tonic-gate { 1487c478bd9Sstevel@tonic-gate pfn_t pfn; 1497c478bd9Sstevel@tonic-gate page_t *pp; 1507c478bd9Sstevel@tonic-gate u_offset_t offset; 1517c478bd9Sstevel@tonic-gate static struct seg tmpseg; 1527c478bd9Sstevel@tonic-gate static int first_time = 1; 1537c478bd9Sstevel@tonic-gate 1547c478bd9Sstevel@tonic-gate /* 1557c478bd9Sstevel@tonic-gate * Allocating the associated hardware page table is very different 1567c478bd9Sstevel@tonic-gate * before boot has finished. We get a physical page to from boot 1577c478bd9Sstevel@tonic-gate * w/o eating up any kernel address space. 1587c478bd9Sstevel@tonic-gate */ 1597c478bd9Sstevel@tonic-gate ht->ht_pfn = PFN_INVALID; 1607c478bd9Sstevel@tonic-gate atomic_add_32(&active_ptables, 1); 1617c478bd9Sstevel@tonic-gate 1627c478bd9Sstevel@tonic-gate if (use_boot_reserve) { 1637c478bd9Sstevel@tonic-gate ASSERT(ptable_va != 0); 1647c478bd9Sstevel@tonic-gate 1657c478bd9Sstevel@tonic-gate /* 1667c478bd9Sstevel@tonic-gate * Allocate, then demap the ptable_va, so that we're 1677c478bd9Sstevel@tonic-gate * sure there exist page table entries for the addresses 1687c478bd9Sstevel@tonic-gate */ 1697c478bd9Sstevel@tonic-gate if (first_time) { 1707c478bd9Sstevel@tonic-gate first_time = 0; 1717c478bd9Sstevel@tonic-gate if ((uintptr_t)BOP_ALLOC(bootops, (caddr_t)ptable_va, 1727c478bd9Sstevel@tonic-gate ptable_sz, BO_NO_ALIGN) != ptable_va) 1737c478bd9Sstevel@tonic-gate panic("BOP_ALLOC failed"); 1747c478bd9Sstevel@tonic-gate 1757c478bd9Sstevel@tonic-gate hat_boot_demap(ptable_va); 1767c478bd9Sstevel@tonic-gate hat_boot_demap(ptable_va + MMU_PAGESIZE); 1777c478bd9Sstevel@tonic-gate } 1787c478bd9Sstevel@tonic-gate 1797c478bd9Sstevel@tonic-gate pfn = ((uintptr_t)BOP_EALLOC(bootops, 0, MMU_PAGESIZE, 1807c478bd9Sstevel@tonic-gate BO_NO_ALIGN, BOPF_X86_ALLOC_PHYS)) >> MMU_PAGESHIFT; 1817c478bd9Sstevel@tonic-gate if (page_resv(1, KM_NOSLEEP) == 0) 1827c478bd9Sstevel@tonic-gate panic("page_resv() failed in ptable alloc"); 1837c478bd9Sstevel@tonic-gate 1847c478bd9Sstevel@tonic-gate pp = page_numtopp_nolock(pfn); 1857c478bd9Sstevel@tonic-gate ASSERT(pp != NULL); 1867c478bd9Sstevel@tonic-gate if (pp->p_szc != 0) 1877c478bd9Sstevel@tonic-gate page_boot_demote(pp); 1887c478bd9Sstevel@tonic-gate pp = page_numtopp(pfn, SE_EXCL); 1897c478bd9Sstevel@tonic-gate ASSERT(pp != NULL); 1907c478bd9Sstevel@tonic-gate 1917c478bd9Sstevel@tonic-gate } else { 1927c478bd9Sstevel@tonic-gate /* 1937c478bd9Sstevel@tonic-gate * Post boot get a page for the table. 1947c478bd9Sstevel@tonic-gate * 1957c478bd9Sstevel@tonic-gate * The first check is to see if there is memory in 1967c478bd9Sstevel@tonic-gate * the system. If we drop to throttlefree, then fail 1977c478bd9Sstevel@tonic-gate * the ptable_alloc() and let the stealing code kick in. 1987c478bd9Sstevel@tonic-gate * Note that we have to do this test here, since the test in 1997c478bd9Sstevel@tonic-gate * page_create_throttle() would let the NOSLEEP allocation 2007c478bd9Sstevel@tonic-gate * go through and deplete the page reserves. 201*a85a6733Sjosephb * 202*a85a6733Sjosephb * The !NOMEMWAIT() lets pageout, fsflush, etc. skip this check. 2037c478bd9Sstevel@tonic-gate */ 204*a85a6733Sjosephb if (!NOMEMWAIT() && freemem <= throttlefree + 1) 2057c478bd9Sstevel@tonic-gate return; 2067c478bd9Sstevel@tonic-gate 207*a85a6733Sjosephb #ifdef DEBUG 208*a85a6733Sjosephb /* 209*a85a6733Sjosephb * This code makes htable_ steal() easier to test. By setting 210*a85a6733Sjosephb * force_steal we force pagetable allocations to fall 211*a85a6733Sjosephb * into the stealing code. Roughly 1 in ever "force_steal" 212*a85a6733Sjosephb * page table allocations will fail. 213*a85a6733Sjosephb */ 214*a85a6733Sjosephb if (ht->ht_hat != kas.a_hat && force_steal > 1 && 215*a85a6733Sjosephb ++ptable_cnt > force_steal) { 216*a85a6733Sjosephb ptable_cnt = 0; 217*a85a6733Sjosephb return; 218*a85a6733Sjosephb } 219*a85a6733Sjosephb #endif /* DEBUG */ 220*a85a6733Sjosephb 2217c478bd9Sstevel@tonic-gate /* 2227c478bd9Sstevel@tonic-gate * This code is temporary, so don't review too critically. 2237c478bd9Sstevel@tonic-gate * I'm awaiting a new phys page allocator from Kit -- Joe 2247c478bd9Sstevel@tonic-gate * 2257c478bd9Sstevel@tonic-gate * We need assign an offset for the page to call 2267c478bd9Sstevel@tonic-gate * page_create_va. To avoid conflicts with other pages, 2277c478bd9Sstevel@tonic-gate * we get creative with the offset. 2287c478bd9Sstevel@tonic-gate * for 32 bits, we pic an offset > 4Gig 2297c478bd9Sstevel@tonic-gate * for 64 bits, pic an offset somewhere in the VA hole. 2307c478bd9Sstevel@tonic-gate */ 2317c478bd9Sstevel@tonic-gate offset = (uintptr_t)ht - kernelbase; 2327c478bd9Sstevel@tonic-gate offset <<= MMU_PAGESHIFT; 2337c478bd9Sstevel@tonic-gate #if defined(__amd64) 2347c478bd9Sstevel@tonic-gate offset += mmu.hole_start; /* something in VA hole */ 2357c478bd9Sstevel@tonic-gate #else 2367c478bd9Sstevel@tonic-gate offset += 1ULL << 40; /* something > 4 Gig */ 2377c478bd9Sstevel@tonic-gate #endif 2387c478bd9Sstevel@tonic-gate 2397c478bd9Sstevel@tonic-gate if (page_resv(1, KM_NOSLEEP) == 0) 2407c478bd9Sstevel@tonic-gate return; 2417c478bd9Sstevel@tonic-gate 2427c478bd9Sstevel@tonic-gate #ifdef DEBUG 2437c478bd9Sstevel@tonic-gate pp = page_exists(&kvp, offset); 2447c478bd9Sstevel@tonic-gate if (pp != NULL) 2457c478bd9Sstevel@tonic-gate panic("ptable already exists %p", pp); 2467c478bd9Sstevel@tonic-gate #endif 2477c478bd9Sstevel@tonic-gate pp = page_create_va(&kvp, offset, MMU_PAGESIZE, 2487c478bd9Sstevel@tonic-gate PG_EXCL | PG_NORELOC, &tmpseg, 2497c478bd9Sstevel@tonic-gate (void *)((uintptr_t)ht << MMU_PAGESHIFT)); 2507c478bd9Sstevel@tonic-gate if (pp == NULL) 2517c478bd9Sstevel@tonic-gate return; 2527c478bd9Sstevel@tonic-gate page_io_unlock(pp); 2537c478bd9Sstevel@tonic-gate page_hashout(pp, NULL); 2547c478bd9Sstevel@tonic-gate pfn = pp->p_pagenum; 2557c478bd9Sstevel@tonic-gate } 2567c478bd9Sstevel@tonic-gate page_downgrade(pp); 2577c478bd9Sstevel@tonic-gate ASSERT(PAGE_SHARED(pp)); 2587c478bd9Sstevel@tonic-gate 2597c478bd9Sstevel@tonic-gate if (pfn == PFN_INVALID) 2607c478bd9Sstevel@tonic-gate panic("ptable_alloc(): Invalid PFN!!"); 2617c478bd9Sstevel@tonic-gate ht->ht_pfn = pfn; 262*a85a6733Sjosephb HATSTAT_INC(hs_ptable_allocs); 2637c478bd9Sstevel@tonic-gate } 2647c478bd9Sstevel@tonic-gate 2657c478bd9Sstevel@tonic-gate /* 2667c478bd9Sstevel@tonic-gate * Free an htable's associated page table page. See the comments 2677c478bd9Sstevel@tonic-gate * for ptable_alloc(). 2687c478bd9Sstevel@tonic-gate */ 2697c478bd9Sstevel@tonic-gate static void 2707c478bd9Sstevel@tonic-gate ptable_free(htable_t *ht) 2717c478bd9Sstevel@tonic-gate { 2727c478bd9Sstevel@tonic-gate pfn_t pfn = ht->ht_pfn; 2737c478bd9Sstevel@tonic-gate page_t *pp; 2747c478bd9Sstevel@tonic-gate 2757c478bd9Sstevel@tonic-gate /* 2767c478bd9Sstevel@tonic-gate * need to destroy the page used for the pagetable 2777c478bd9Sstevel@tonic-gate */ 2787c478bd9Sstevel@tonic-gate ASSERT(pfn != PFN_INVALID); 2797c478bd9Sstevel@tonic-gate HATSTAT_INC(hs_ptable_frees); 2807c478bd9Sstevel@tonic-gate atomic_add_32(&active_ptables, -1); 2817c478bd9Sstevel@tonic-gate pp = page_numtopp_nolock(pfn); 2827c478bd9Sstevel@tonic-gate if (pp == NULL) 2837c478bd9Sstevel@tonic-gate panic("ptable_free(): no page for pfn!"); 2847c478bd9Sstevel@tonic-gate ASSERT(PAGE_SHARED(pp)); 2857c478bd9Sstevel@tonic-gate ASSERT(pfn == pp->p_pagenum); 2867c478bd9Sstevel@tonic-gate 2877c478bd9Sstevel@tonic-gate /* 2887c478bd9Sstevel@tonic-gate * Get an exclusive lock, might have to wait for a kmem reader. 2897c478bd9Sstevel@tonic-gate */ 2907c478bd9Sstevel@tonic-gate if (!page_tryupgrade(pp)) { 2917c478bd9Sstevel@tonic-gate page_unlock(pp); 2927c478bd9Sstevel@tonic-gate /* 2937c478bd9Sstevel@tonic-gate * RFE: we could change this to not loop forever 2947c478bd9Sstevel@tonic-gate * George Cameron had some idea on how to do that. 2957c478bd9Sstevel@tonic-gate * For now looping works - it's just like sfmmu. 2967c478bd9Sstevel@tonic-gate */ 2977c478bd9Sstevel@tonic-gate while (!page_lock(pp, SE_EXCL, (kmutex_t *)NULL, P_RECLAIM)) 2987c478bd9Sstevel@tonic-gate continue; 2997c478bd9Sstevel@tonic-gate } 3007c478bd9Sstevel@tonic-gate page_free(pp, 1); 3017c478bd9Sstevel@tonic-gate page_unresv(1); 3027c478bd9Sstevel@tonic-gate ht->ht_pfn = PFN_INVALID; 3037c478bd9Sstevel@tonic-gate } 3047c478bd9Sstevel@tonic-gate 3057c478bd9Sstevel@tonic-gate /* 3067c478bd9Sstevel@tonic-gate * Put one htable on the reserve list. 3077c478bd9Sstevel@tonic-gate */ 3087c478bd9Sstevel@tonic-gate static void 3097c478bd9Sstevel@tonic-gate htable_put_reserve(htable_t *ht) 3107c478bd9Sstevel@tonic-gate { 3117c478bd9Sstevel@tonic-gate ht->ht_hat = NULL; /* no longer tied to a hat */ 3127c478bd9Sstevel@tonic-gate ASSERT(ht->ht_pfn == PFN_INVALID); 3137c478bd9Sstevel@tonic-gate HATSTAT_INC(hs_htable_rputs); 3147c478bd9Sstevel@tonic-gate mutex_enter(&htable_reserve_mutex); 3157c478bd9Sstevel@tonic-gate ht->ht_next = htable_reserve_pool; 3167c478bd9Sstevel@tonic-gate htable_reserve_pool = ht; 3177c478bd9Sstevel@tonic-gate ++htable_reserve_cnt; 3187c478bd9Sstevel@tonic-gate mutex_exit(&htable_reserve_mutex); 3197c478bd9Sstevel@tonic-gate } 3207c478bd9Sstevel@tonic-gate 3217c478bd9Sstevel@tonic-gate /* 3227c478bd9Sstevel@tonic-gate * Take one htable from the reserve. 3237c478bd9Sstevel@tonic-gate */ 3247c478bd9Sstevel@tonic-gate static htable_t * 3257c478bd9Sstevel@tonic-gate htable_get_reserve(void) 3267c478bd9Sstevel@tonic-gate { 3277c478bd9Sstevel@tonic-gate htable_t *ht = NULL; 3287c478bd9Sstevel@tonic-gate 3297c478bd9Sstevel@tonic-gate mutex_enter(&htable_reserve_mutex); 3307c478bd9Sstevel@tonic-gate if (htable_reserve_cnt != 0) { 3317c478bd9Sstevel@tonic-gate ht = htable_reserve_pool; 3327c478bd9Sstevel@tonic-gate ASSERT(ht != NULL); 3337c478bd9Sstevel@tonic-gate ASSERT(ht->ht_pfn == PFN_INVALID); 3347c478bd9Sstevel@tonic-gate htable_reserve_pool = ht->ht_next; 3357c478bd9Sstevel@tonic-gate --htable_reserve_cnt; 3367c478bd9Sstevel@tonic-gate HATSTAT_INC(hs_htable_rgets); 3377c478bd9Sstevel@tonic-gate } 3387c478bd9Sstevel@tonic-gate mutex_exit(&htable_reserve_mutex); 3397c478bd9Sstevel@tonic-gate return (ht); 3407c478bd9Sstevel@tonic-gate } 3417c478bd9Sstevel@tonic-gate 3427c478bd9Sstevel@tonic-gate /* 3437c478bd9Sstevel@tonic-gate * Allocate initial htables with page tables and put them on the kernel hat's 3447c478bd9Sstevel@tonic-gate * cache list. 3457c478bd9Sstevel@tonic-gate */ 3467c478bd9Sstevel@tonic-gate void 3477c478bd9Sstevel@tonic-gate htable_initial_reserve(uint_t count) 3487c478bd9Sstevel@tonic-gate { 3497c478bd9Sstevel@tonic-gate htable_t *ht; 3507c478bd9Sstevel@tonic-gate hat_t *hat = kas.a_hat; 3517c478bd9Sstevel@tonic-gate 3527c478bd9Sstevel@tonic-gate count += HTABLE_RESERVE_AMOUNT; 3537c478bd9Sstevel@tonic-gate while (count > 0) { 3547c478bd9Sstevel@tonic-gate ht = kmem_cache_alloc(htable_cache, KM_NOSLEEP); 3557c478bd9Sstevel@tonic-gate ASSERT(ht != NULL); 3567c478bd9Sstevel@tonic-gate 3577c478bd9Sstevel@tonic-gate ASSERT(use_boot_reserve); 3587c478bd9Sstevel@tonic-gate ht->ht_hat = kas.a_hat; /* so htable_free() works */ 3597c478bd9Sstevel@tonic-gate ht->ht_flags = 0; /* so x86pte_zero works */ 3607c478bd9Sstevel@tonic-gate ptable_alloc(ht); 3617c478bd9Sstevel@tonic-gate if (ht->ht_pfn == PFN_INVALID) 3627c478bd9Sstevel@tonic-gate panic("ptable_alloc() failed"); 3637c478bd9Sstevel@tonic-gate 3647c478bd9Sstevel@tonic-gate x86pte_zero(ht, 0, mmu.ptes_per_table); 3657c478bd9Sstevel@tonic-gate 3667c478bd9Sstevel@tonic-gate ht->ht_next = hat->hat_ht_cached; 3677c478bd9Sstevel@tonic-gate hat->hat_ht_cached = ht; 3687c478bd9Sstevel@tonic-gate --count; 3697c478bd9Sstevel@tonic-gate } 3707c478bd9Sstevel@tonic-gate } 3717c478bd9Sstevel@tonic-gate 3727c478bd9Sstevel@tonic-gate /* 3737c478bd9Sstevel@tonic-gate * Readjust the reserves after a thread finishes using them. 3747c478bd9Sstevel@tonic-gate * 3757c478bd9Sstevel@tonic-gate * The first time this is called post boot, we'll also clear out the 3767c478bd9Sstevel@tonic-gate * extra boot htables that were put in the kernel hat's cache list. 3777c478bd9Sstevel@tonic-gate */ 3787c478bd9Sstevel@tonic-gate void 3797c478bd9Sstevel@tonic-gate htable_adjust_reserve() 3807c478bd9Sstevel@tonic-gate { 3817c478bd9Sstevel@tonic-gate static int first_time = 1; 3827c478bd9Sstevel@tonic-gate htable_t *ht; 3837c478bd9Sstevel@tonic-gate 3847c478bd9Sstevel@tonic-gate ASSERT(curthread != hat_reserves_thread); 3857c478bd9Sstevel@tonic-gate 3867c478bd9Sstevel@tonic-gate /* 3877c478bd9Sstevel@tonic-gate * The first time this is called after we can steal, we free up the 3887c478bd9Sstevel@tonic-gate * the kernel's cache htable list. It has lots of extra htable/page 3897c478bd9Sstevel@tonic-gate * tables that were allocated for boot up. 3907c478bd9Sstevel@tonic-gate */ 3917c478bd9Sstevel@tonic-gate if (first_time) { 3927c478bd9Sstevel@tonic-gate first_time = 0; 3937c478bd9Sstevel@tonic-gate while ((ht = kas.a_hat->hat_ht_cached) != NULL) { 3947c478bd9Sstevel@tonic-gate kas.a_hat->hat_ht_cached = ht->ht_next; 3957c478bd9Sstevel@tonic-gate ASSERT(ht->ht_hat == kas.a_hat); 3967c478bd9Sstevel@tonic-gate ptable_free(ht); 3977c478bd9Sstevel@tonic-gate htable_put_reserve(ht); 3987c478bd9Sstevel@tonic-gate } 3997c478bd9Sstevel@tonic-gate return; 4007c478bd9Sstevel@tonic-gate } 4017c478bd9Sstevel@tonic-gate 4027c478bd9Sstevel@tonic-gate /* 4037c478bd9Sstevel@tonic-gate * Free any excess htables in the reserve list 4047c478bd9Sstevel@tonic-gate */ 4057c478bd9Sstevel@tonic-gate while (htable_reserve_cnt > htable_reserve_amount) { 4067c478bd9Sstevel@tonic-gate ht = htable_get_reserve(); 4077c478bd9Sstevel@tonic-gate if (ht == NULL) 4087c478bd9Sstevel@tonic-gate return; 4097c478bd9Sstevel@tonic-gate ASSERT(ht->ht_pfn == PFN_INVALID); 4107c478bd9Sstevel@tonic-gate kmem_cache_free(htable_cache, ht); 4117c478bd9Sstevel@tonic-gate } 4127c478bd9Sstevel@tonic-gate } 4137c478bd9Sstevel@tonic-gate 4147c478bd9Sstevel@tonic-gate 4157c478bd9Sstevel@tonic-gate /* 4167c478bd9Sstevel@tonic-gate * This routine steals htables from user processes for htable_alloc() or 4177c478bd9Sstevel@tonic-gate * for htable_reap(). 4187c478bd9Sstevel@tonic-gate */ 4197c478bd9Sstevel@tonic-gate static htable_t * 4207c478bd9Sstevel@tonic-gate htable_steal(uint_t cnt) 4217c478bd9Sstevel@tonic-gate { 4227c478bd9Sstevel@tonic-gate hat_t *hat = kas.a_hat; /* list starts with khat */ 4237c478bd9Sstevel@tonic-gate htable_t *list = NULL; 4247c478bd9Sstevel@tonic-gate htable_t *ht; 4257c478bd9Sstevel@tonic-gate htable_t *higher; 4267c478bd9Sstevel@tonic-gate uint_t h; 427*a85a6733Sjosephb uint_t h_start; 428*a85a6733Sjosephb static uint_t h_seed = 0; 4297c478bd9Sstevel@tonic-gate uint_t e; 4307c478bd9Sstevel@tonic-gate uintptr_t va; 4317c478bd9Sstevel@tonic-gate x86pte_t pte; 4327c478bd9Sstevel@tonic-gate uint_t stolen = 0; 4337c478bd9Sstevel@tonic-gate uint_t pass; 434*a85a6733Sjosephb uint_t threshold; 4357c478bd9Sstevel@tonic-gate 4367c478bd9Sstevel@tonic-gate /* 4377c478bd9Sstevel@tonic-gate * Limit htable_steal_passes to something reasonable 4387c478bd9Sstevel@tonic-gate */ 4397c478bd9Sstevel@tonic-gate if (htable_steal_passes == 0) 4407c478bd9Sstevel@tonic-gate htable_steal_passes = 1; 4417c478bd9Sstevel@tonic-gate if (htable_steal_passes > mmu.ptes_per_table) 4427c478bd9Sstevel@tonic-gate htable_steal_passes = mmu.ptes_per_table; 4437c478bd9Sstevel@tonic-gate 4447c478bd9Sstevel@tonic-gate /* 445*a85a6733Sjosephb * Loop through all user hats. The 1st pass takes cached htables that 4467c478bd9Sstevel@tonic-gate * aren't in use. The later passes steal by removing mappings, too. 4477c478bd9Sstevel@tonic-gate */ 4487c478bd9Sstevel@tonic-gate atomic_add_32(&htable_dont_cache, 1); 449*a85a6733Sjosephb for (pass = 0; pass <= htable_steal_passes && stolen < cnt; ++pass) { 450*a85a6733Sjosephb threshold = pass * mmu.ptes_per_table / htable_steal_passes; 451*a85a6733Sjosephb hat = kas.a_hat; 4527c478bd9Sstevel@tonic-gate for (;;) { 4537c478bd9Sstevel@tonic-gate 4547c478bd9Sstevel@tonic-gate /* 455*a85a6733Sjosephb * Clear the victim flag and move to next hat 4567c478bd9Sstevel@tonic-gate */ 4577c478bd9Sstevel@tonic-gate mutex_enter(&hat_list_lock); 458*a85a6733Sjosephb if (hat != kas.a_hat) { 4597c478bd9Sstevel@tonic-gate hat->hat_flags &= ~HAT_VICTIM; 4607c478bd9Sstevel@tonic-gate cv_broadcast(&hat_list_cv); 461*a85a6733Sjosephb } 462*a85a6733Sjosephb hat = hat->hat_next; 463*a85a6733Sjosephb 464*a85a6733Sjosephb /* 465*a85a6733Sjosephb * Skip any hat that is already being stolen from. 466*a85a6733Sjosephb * 467*a85a6733Sjosephb * We skip SHARED hats, as these are dummy 468*a85a6733Sjosephb * hats that host ISM shared page tables. 469*a85a6733Sjosephb * 470*a85a6733Sjosephb * We also skip if HAT_FREEING because hat_pte_unmap() 471*a85a6733Sjosephb * won't zero out the PTE's. That would lead to hitting 472*a85a6733Sjosephb * stale PTEs either here or under hat_unload() when we 473*a85a6733Sjosephb * steal and unload the same page table in competing 474*a85a6733Sjosephb * threads. 475*a85a6733Sjosephb */ 476*a85a6733Sjosephb while (hat != NULL && 477*a85a6733Sjosephb (hat->hat_flags & 478*a85a6733Sjosephb (HAT_VICTIM | HAT_SHARED | HAT_FREEING)) != 0) 479*a85a6733Sjosephb hat = hat->hat_next; 480*a85a6733Sjosephb 481*a85a6733Sjosephb if (hat == NULL) { 4827c478bd9Sstevel@tonic-gate mutex_exit(&hat_list_lock); 4837c478bd9Sstevel@tonic-gate break; 4847c478bd9Sstevel@tonic-gate } 485*a85a6733Sjosephb 486*a85a6733Sjosephb /* 487*a85a6733Sjosephb * Are we finished? 488*a85a6733Sjosephb */ 489*a85a6733Sjosephb if (stolen == cnt) { 490*a85a6733Sjosephb /* 491*a85a6733Sjosephb * Try to spread the pain of stealing, 492*a85a6733Sjosephb * move victim HAT to the end of the HAT list. 493*a85a6733Sjosephb */ 494*a85a6733Sjosephb if (pass >= 1 && cnt == 1 && 495*a85a6733Sjosephb kas.a_hat->hat_prev != hat) { 496*a85a6733Sjosephb 497*a85a6733Sjosephb /* unlink victim hat */ 498*a85a6733Sjosephb if (hat->hat_prev) 499*a85a6733Sjosephb hat->hat_prev->hat_next = 500*a85a6733Sjosephb hat->hat_next; 501*a85a6733Sjosephb else 502*a85a6733Sjosephb kas.a_hat->hat_next = 503*a85a6733Sjosephb hat->hat_next; 504*a85a6733Sjosephb if (hat->hat_next) 505*a85a6733Sjosephb hat->hat_next->hat_prev = 506*a85a6733Sjosephb hat->hat_prev; 507*a85a6733Sjosephb else 508*a85a6733Sjosephb kas.a_hat->hat_prev = 509*a85a6733Sjosephb hat->hat_prev; 510*a85a6733Sjosephb 511*a85a6733Sjosephb 512*a85a6733Sjosephb /* relink at end of hat list */ 513*a85a6733Sjosephb hat->hat_next = NULL; 514*a85a6733Sjosephb hat->hat_prev = kas.a_hat->hat_prev; 515*a85a6733Sjosephb if (hat->hat_prev) 516*a85a6733Sjosephb hat->hat_prev->hat_next = hat; 517*a85a6733Sjosephb else 518*a85a6733Sjosephb kas.a_hat->hat_next = hat; 519*a85a6733Sjosephb kas.a_hat->hat_prev = hat; 520*a85a6733Sjosephb 521*a85a6733Sjosephb } 522*a85a6733Sjosephb 523*a85a6733Sjosephb mutex_exit(&hat_list_lock); 524*a85a6733Sjosephb break; 525*a85a6733Sjosephb } 526*a85a6733Sjosephb 527*a85a6733Sjosephb /* 528*a85a6733Sjosephb * Mark the HAT as a stealing victim. 529*a85a6733Sjosephb */ 5307c478bd9Sstevel@tonic-gate hat->hat_flags |= HAT_VICTIM; 5317c478bd9Sstevel@tonic-gate mutex_exit(&hat_list_lock); 5327c478bd9Sstevel@tonic-gate 5337c478bd9Sstevel@tonic-gate /* 5347c478bd9Sstevel@tonic-gate * Take any htables from the hat's cached "free" list. 5357c478bd9Sstevel@tonic-gate */ 5367c478bd9Sstevel@tonic-gate hat_enter(hat); 5377c478bd9Sstevel@tonic-gate while ((ht = hat->hat_ht_cached) != NULL && 5387c478bd9Sstevel@tonic-gate stolen < cnt) { 5397c478bd9Sstevel@tonic-gate hat->hat_ht_cached = ht->ht_next; 5407c478bd9Sstevel@tonic-gate ht->ht_next = list; 5417c478bd9Sstevel@tonic-gate list = ht; 5427c478bd9Sstevel@tonic-gate ++stolen; 5437c478bd9Sstevel@tonic-gate } 5447c478bd9Sstevel@tonic-gate hat_exit(hat); 5457c478bd9Sstevel@tonic-gate 5467c478bd9Sstevel@tonic-gate /* 5477c478bd9Sstevel@tonic-gate * Don't steal on first pass. 5487c478bd9Sstevel@tonic-gate */ 549*a85a6733Sjosephb if (pass == 0 || stolen == cnt) 5507c478bd9Sstevel@tonic-gate continue; 5517c478bd9Sstevel@tonic-gate 5527c478bd9Sstevel@tonic-gate /* 553*a85a6733Sjosephb * Search the active htables for one to steal. 554*a85a6733Sjosephb * Start at a different hash bucket every time to 555*a85a6733Sjosephb * help spread the pain of stealing. 5567c478bd9Sstevel@tonic-gate */ 557*a85a6733Sjosephb h = h_start = h_seed++ % hat->hat_num_hash; 558*a85a6733Sjosephb do { 5597c478bd9Sstevel@tonic-gate higher = NULL; 5607c478bd9Sstevel@tonic-gate HTABLE_ENTER(h); 5617c478bd9Sstevel@tonic-gate for (ht = hat->hat_ht_hash[h]; ht; 5627c478bd9Sstevel@tonic-gate ht = ht->ht_next) { 5637c478bd9Sstevel@tonic-gate 5647c478bd9Sstevel@tonic-gate /* 5657c478bd9Sstevel@tonic-gate * Can we rule out reaping? 5667c478bd9Sstevel@tonic-gate */ 5677c478bd9Sstevel@tonic-gate if (ht->ht_busy != 0 || 5687c478bd9Sstevel@tonic-gate (ht->ht_flags & HTABLE_SHARED_PFN)|| 569*a85a6733Sjosephb ht->ht_level > 0 || 570*a85a6733Sjosephb ht->ht_valid_cnt > threshold || 5717c478bd9Sstevel@tonic-gate ht->ht_lock_cnt != 0) 5727c478bd9Sstevel@tonic-gate continue; 5737c478bd9Sstevel@tonic-gate 5747c478bd9Sstevel@tonic-gate /* 5757c478bd9Sstevel@tonic-gate * Increment busy so the htable can't 5767c478bd9Sstevel@tonic-gate * disappear. We drop the htable mutex 5777c478bd9Sstevel@tonic-gate * to avoid deadlocks with 5787c478bd9Sstevel@tonic-gate * hat_pageunload() and the hment mutex 5797c478bd9Sstevel@tonic-gate * while we call hat_pte_unmap() 5807c478bd9Sstevel@tonic-gate */ 5817c478bd9Sstevel@tonic-gate ++ht->ht_busy; 5827c478bd9Sstevel@tonic-gate HTABLE_EXIT(h); 5837c478bd9Sstevel@tonic-gate 5847c478bd9Sstevel@tonic-gate /* 5857c478bd9Sstevel@tonic-gate * Try stealing. 5867c478bd9Sstevel@tonic-gate * - unload and invalidate all PTEs 5877c478bd9Sstevel@tonic-gate */ 5887c478bd9Sstevel@tonic-gate for (e = 0, va = ht->ht_vaddr; 5897c478bd9Sstevel@tonic-gate e < ht->ht_num_ptes && 5907c478bd9Sstevel@tonic-gate ht->ht_valid_cnt > 0 && 5917c478bd9Sstevel@tonic-gate ht->ht_busy == 1 && 5927c478bd9Sstevel@tonic-gate ht->ht_lock_cnt == 0; 5937c478bd9Sstevel@tonic-gate ++e, va += MMU_PAGESIZE) { 5947c478bd9Sstevel@tonic-gate pte = x86pte_get(ht, e); 5957c478bd9Sstevel@tonic-gate if (!PTE_ISVALID(pte)) 5967c478bd9Sstevel@tonic-gate continue; 5977c478bd9Sstevel@tonic-gate hat_pte_unmap(ht, e, 5987c478bd9Sstevel@tonic-gate HAT_UNLOAD, pte, NULL); 5997c478bd9Sstevel@tonic-gate } 6007c478bd9Sstevel@tonic-gate 6017c478bd9Sstevel@tonic-gate /* 6027c478bd9Sstevel@tonic-gate * Reacquire htable lock. If we didn't 6037c478bd9Sstevel@tonic-gate * remove all mappings in the table, 6047c478bd9Sstevel@tonic-gate * or another thread added a new mapping 6057c478bd9Sstevel@tonic-gate * behind us, give up on this table. 6067c478bd9Sstevel@tonic-gate */ 6077c478bd9Sstevel@tonic-gate HTABLE_ENTER(h); 6087c478bd9Sstevel@tonic-gate if (ht->ht_busy != 1 || 6097c478bd9Sstevel@tonic-gate ht->ht_valid_cnt != 0 || 6107c478bd9Sstevel@tonic-gate ht->ht_lock_cnt != 0) { 6117c478bd9Sstevel@tonic-gate --ht->ht_busy; 6127c478bd9Sstevel@tonic-gate continue; 6137c478bd9Sstevel@tonic-gate } 6147c478bd9Sstevel@tonic-gate 6157c478bd9Sstevel@tonic-gate /* 6167c478bd9Sstevel@tonic-gate * Steal it and unlink the page table. 6177c478bd9Sstevel@tonic-gate */ 6187c478bd9Sstevel@tonic-gate higher = ht->ht_parent; 6197c478bd9Sstevel@tonic-gate unlink_ptp(higher, ht, ht->ht_vaddr); 6207c478bd9Sstevel@tonic-gate 6217c478bd9Sstevel@tonic-gate /* 6227c478bd9Sstevel@tonic-gate * remove from the hash list 6237c478bd9Sstevel@tonic-gate */ 6247c478bd9Sstevel@tonic-gate if (ht->ht_next) 6257c478bd9Sstevel@tonic-gate ht->ht_next->ht_prev = 6267c478bd9Sstevel@tonic-gate ht->ht_prev; 6277c478bd9Sstevel@tonic-gate 6287c478bd9Sstevel@tonic-gate if (ht->ht_prev) { 6297c478bd9Sstevel@tonic-gate ht->ht_prev->ht_next = 6307c478bd9Sstevel@tonic-gate ht->ht_next; 6317c478bd9Sstevel@tonic-gate } else { 6327c478bd9Sstevel@tonic-gate ASSERT(hat->hat_ht_hash[h] == 6337c478bd9Sstevel@tonic-gate ht); 6347c478bd9Sstevel@tonic-gate hat->hat_ht_hash[h] = 6357c478bd9Sstevel@tonic-gate ht->ht_next; 6367c478bd9Sstevel@tonic-gate } 6377c478bd9Sstevel@tonic-gate 6387c478bd9Sstevel@tonic-gate /* 6397c478bd9Sstevel@tonic-gate * Break to outer loop to release the 6407c478bd9Sstevel@tonic-gate * higher (ht_parent) pagtable. This 6417c478bd9Sstevel@tonic-gate * spreads out the pain caused by 6427c478bd9Sstevel@tonic-gate * pagefaults. 6437c478bd9Sstevel@tonic-gate */ 6447c478bd9Sstevel@tonic-gate ht->ht_next = list; 6457c478bd9Sstevel@tonic-gate list = ht; 6467c478bd9Sstevel@tonic-gate ++stolen; 6477c478bd9Sstevel@tonic-gate break; 6487c478bd9Sstevel@tonic-gate } 6497c478bd9Sstevel@tonic-gate HTABLE_EXIT(h); 6507c478bd9Sstevel@tonic-gate if (higher != NULL) 6517c478bd9Sstevel@tonic-gate htable_release(higher); 652*a85a6733Sjosephb if (++h == hat->hat_num_hash) 653*a85a6733Sjosephb h = 0; 654*a85a6733Sjosephb } while (stolen < cnt && h != h_start); 6557c478bd9Sstevel@tonic-gate } 6567c478bd9Sstevel@tonic-gate } 6577c478bd9Sstevel@tonic-gate atomic_add_32(&htable_dont_cache, -1); 6587c478bd9Sstevel@tonic-gate return (list); 6597c478bd9Sstevel@tonic-gate } 6607c478bd9Sstevel@tonic-gate 6617c478bd9Sstevel@tonic-gate 6627c478bd9Sstevel@tonic-gate /* 6637c478bd9Sstevel@tonic-gate * This is invoked from kmem when the system is low on memory. We try 6647c478bd9Sstevel@tonic-gate * to free hments, htables, and ptables to improve the memory situation. 6657c478bd9Sstevel@tonic-gate */ 6667c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 6677c478bd9Sstevel@tonic-gate static void 6687c478bd9Sstevel@tonic-gate htable_reap(void *handle) 6697c478bd9Sstevel@tonic-gate { 6707c478bd9Sstevel@tonic-gate uint_t reap_cnt; 6717c478bd9Sstevel@tonic-gate htable_t *list; 6727c478bd9Sstevel@tonic-gate htable_t *ht; 6737c478bd9Sstevel@tonic-gate 6747c478bd9Sstevel@tonic-gate HATSTAT_INC(hs_reap_attempts); 6757c478bd9Sstevel@tonic-gate if (!can_steal_post_boot) 6767c478bd9Sstevel@tonic-gate return; 6777c478bd9Sstevel@tonic-gate 6787c478bd9Sstevel@tonic-gate /* 6797c478bd9Sstevel@tonic-gate * Try to reap 5% of the page tables bounded by a maximum of 6807c478bd9Sstevel@tonic-gate * 5% of physmem and a minimum of 10. 6817c478bd9Sstevel@tonic-gate */ 6827c478bd9Sstevel@tonic-gate reap_cnt = MIN(MAX(physmem / 20, active_ptables / 20), 10); 6837c478bd9Sstevel@tonic-gate 6847c478bd9Sstevel@tonic-gate /* 6857c478bd9Sstevel@tonic-gate * Let htable_steal() do the work, we just call htable_free() 6867c478bd9Sstevel@tonic-gate */ 6877c478bd9Sstevel@tonic-gate list = htable_steal(reap_cnt); 6887c478bd9Sstevel@tonic-gate while ((ht = list) != NULL) { 6897c478bd9Sstevel@tonic-gate list = ht->ht_next; 6907c478bd9Sstevel@tonic-gate HATSTAT_INC(hs_reaped); 6917c478bd9Sstevel@tonic-gate htable_free(ht); 6927c478bd9Sstevel@tonic-gate } 6937c478bd9Sstevel@tonic-gate 6947c478bd9Sstevel@tonic-gate /* 6957c478bd9Sstevel@tonic-gate * Free up excess reserves 6967c478bd9Sstevel@tonic-gate */ 6977c478bd9Sstevel@tonic-gate htable_adjust_reserve(); 6987c478bd9Sstevel@tonic-gate hment_adjust_reserve(); 6997c478bd9Sstevel@tonic-gate } 7007c478bd9Sstevel@tonic-gate 7017c478bd9Sstevel@tonic-gate /* 7027c478bd9Sstevel@tonic-gate * allocate an htable, stealing one or using the reserve if necessary 7037c478bd9Sstevel@tonic-gate */ 7047c478bd9Sstevel@tonic-gate static htable_t * 7057c478bd9Sstevel@tonic-gate htable_alloc( 7067c478bd9Sstevel@tonic-gate hat_t *hat, 7077c478bd9Sstevel@tonic-gate uintptr_t vaddr, 7087c478bd9Sstevel@tonic-gate level_t level, 7097c478bd9Sstevel@tonic-gate htable_t *shared) 7107c478bd9Sstevel@tonic-gate { 7117c478bd9Sstevel@tonic-gate htable_t *ht = NULL; 7127c478bd9Sstevel@tonic-gate uint_t is_vlp; 7137c478bd9Sstevel@tonic-gate uint_t is_bare = 0; 7147c478bd9Sstevel@tonic-gate uint_t need_to_zero = 1; 7157c478bd9Sstevel@tonic-gate int kmflags = (can_steal_post_boot ? KM_NOSLEEP : KM_SLEEP); 7167c478bd9Sstevel@tonic-gate 7177c478bd9Sstevel@tonic-gate if (level < 0 || level > TOP_LEVEL(hat)) 7187c478bd9Sstevel@tonic-gate panic("htable_alloc(): level %d out of range\n", level); 7197c478bd9Sstevel@tonic-gate 7207c478bd9Sstevel@tonic-gate is_vlp = (hat->hat_flags & HAT_VLP) && level == VLP_LEVEL; 7217c478bd9Sstevel@tonic-gate if (is_vlp || shared != NULL) 7227c478bd9Sstevel@tonic-gate is_bare = 1; 7237c478bd9Sstevel@tonic-gate 7247c478bd9Sstevel@tonic-gate /* 7257c478bd9Sstevel@tonic-gate * First reuse a cached htable from the hat_ht_cached field, this 7267c478bd9Sstevel@tonic-gate * avoids unnecessary trips through kmem/page allocators. This is also 7277c478bd9Sstevel@tonic-gate * what happens during use_boot_reserve. 7287c478bd9Sstevel@tonic-gate */ 7297c478bd9Sstevel@tonic-gate if (hat->hat_ht_cached != NULL && !is_bare) { 7307c478bd9Sstevel@tonic-gate hat_enter(hat); 7317c478bd9Sstevel@tonic-gate ht = hat->hat_ht_cached; 7327c478bd9Sstevel@tonic-gate if (ht != NULL) { 7337c478bd9Sstevel@tonic-gate hat->hat_ht_cached = ht->ht_next; 7347c478bd9Sstevel@tonic-gate need_to_zero = 0; 7357c478bd9Sstevel@tonic-gate /* XX64 ASSERT() they're all zero somehow */ 7367c478bd9Sstevel@tonic-gate ASSERT(ht->ht_pfn != PFN_INVALID); 7377c478bd9Sstevel@tonic-gate } 7387c478bd9Sstevel@tonic-gate hat_exit(hat); 7397c478bd9Sstevel@tonic-gate } 7407c478bd9Sstevel@tonic-gate 7417c478bd9Sstevel@tonic-gate if (ht == NULL) { 7427c478bd9Sstevel@tonic-gate ASSERT(!use_boot_reserve); 7437c478bd9Sstevel@tonic-gate /* 7447c478bd9Sstevel@tonic-gate * When allocating for hat_memload_arena, we use the reserve. 7457c478bd9Sstevel@tonic-gate * Also use reserves if we are in a panic(). 7467c478bd9Sstevel@tonic-gate */ 7477c478bd9Sstevel@tonic-gate if (curthread == hat_reserves_thread || panicstr != NULL) { 7487c478bd9Sstevel@tonic-gate ASSERT(panicstr != NULL || !is_bare); 7497c478bd9Sstevel@tonic-gate ASSERT(panicstr != NULL || 7507c478bd9Sstevel@tonic-gate curthread == hat_reserves_thread); 7517c478bd9Sstevel@tonic-gate ht = htable_get_reserve(); 7527c478bd9Sstevel@tonic-gate } else { 7537c478bd9Sstevel@tonic-gate /* 7547c478bd9Sstevel@tonic-gate * Donate successful htable allocations to the reserve. 7557c478bd9Sstevel@tonic-gate */ 7567c478bd9Sstevel@tonic-gate for (;;) { 7577c478bd9Sstevel@tonic-gate ASSERT(curthread != hat_reserves_thread); 7587c478bd9Sstevel@tonic-gate ht = kmem_cache_alloc(htable_cache, kmflags); 7597c478bd9Sstevel@tonic-gate if (ht == NULL) 7607c478bd9Sstevel@tonic-gate break; 7617c478bd9Sstevel@tonic-gate ht->ht_pfn = PFN_INVALID; 7627c478bd9Sstevel@tonic-gate if (curthread == hat_reserves_thread || 7637c478bd9Sstevel@tonic-gate panicstr != NULL || 7647c478bd9Sstevel@tonic-gate htable_reserve_cnt >= htable_reserve_amount) 7657c478bd9Sstevel@tonic-gate break; 7667c478bd9Sstevel@tonic-gate htable_put_reserve(ht); 7677c478bd9Sstevel@tonic-gate } 7687c478bd9Sstevel@tonic-gate } 7697c478bd9Sstevel@tonic-gate 7707c478bd9Sstevel@tonic-gate /* 7717c478bd9Sstevel@tonic-gate * allocate a page for the hardware page table if needed 7727c478bd9Sstevel@tonic-gate */ 7737c478bd9Sstevel@tonic-gate if (ht != NULL && !is_bare) { 774*a85a6733Sjosephb ht->ht_hat = hat; 7757c478bd9Sstevel@tonic-gate ptable_alloc(ht); 7767c478bd9Sstevel@tonic-gate if (ht->ht_pfn == PFN_INVALID) { 7777c478bd9Sstevel@tonic-gate kmem_cache_free(htable_cache, ht); 7787c478bd9Sstevel@tonic-gate ht = NULL; 7797c478bd9Sstevel@tonic-gate } 7807c478bd9Sstevel@tonic-gate } 7817c478bd9Sstevel@tonic-gate } 7827c478bd9Sstevel@tonic-gate 7837c478bd9Sstevel@tonic-gate /* 784*a85a6733Sjosephb * If allocations failed, kick off a kmem_reap() and resort to 785*a85a6733Sjosephb * htable steal(). We may spin here if the system is very low on 786*a85a6733Sjosephb * memory. If the kernel itself has consumed all memory and kmem_reap() 787*a85a6733Sjosephb * can't free up anything, then we'll really get stuck here. 788*a85a6733Sjosephb * That should only happen in a system where the administrator has 789*a85a6733Sjosephb * misconfigured VM parameters via /etc/system. 7907c478bd9Sstevel@tonic-gate */ 791*a85a6733Sjosephb while (ht == NULL && can_steal_post_boot) { 792*a85a6733Sjosephb kmem_reap(); 7937c478bd9Sstevel@tonic-gate ht = htable_steal(1); 7947c478bd9Sstevel@tonic-gate HATSTAT_INC(hs_steals); 7957c478bd9Sstevel@tonic-gate 7967c478bd9Sstevel@tonic-gate /* 797*a85a6733Sjosephb * If we stole for a bare htable, release the pagetable page. 7987c478bd9Sstevel@tonic-gate */ 7997c478bd9Sstevel@tonic-gate if (ht != NULL && is_bare) 8007c478bd9Sstevel@tonic-gate ptable_free(ht); 8017c478bd9Sstevel@tonic-gate } 8027c478bd9Sstevel@tonic-gate 8037c478bd9Sstevel@tonic-gate /* 804*a85a6733Sjosephb * All attempts to allocate or steal failed. This should only happen 805*a85a6733Sjosephb * if we run out of memory during boot, due perhaps to a huge 806*a85a6733Sjosephb * boot_archive. At this point there's no way to continue. 8077c478bd9Sstevel@tonic-gate */ 8087c478bd9Sstevel@tonic-gate if (ht == NULL) 8097c478bd9Sstevel@tonic-gate panic("htable_alloc(): couldn't steal\n"); 8107c478bd9Sstevel@tonic-gate 8117c478bd9Sstevel@tonic-gate /* 8127c478bd9Sstevel@tonic-gate * Shared page tables have all entries locked and entries may not 8137c478bd9Sstevel@tonic-gate * be added or deleted. 8147c478bd9Sstevel@tonic-gate */ 8157c478bd9Sstevel@tonic-gate ht->ht_flags = 0; 8167c478bd9Sstevel@tonic-gate if (shared != NULL) { 8177c478bd9Sstevel@tonic-gate ASSERT(level == 0); 8187c478bd9Sstevel@tonic-gate ASSERT(shared->ht_valid_cnt > 0); 8197c478bd9Sstevel@tonic-gate ht->ht_flags |= HTABLE_SHARED_PFN; 8207c478bd9Sstevel@tonic-gate ht->ht_pfn = shared->ht_pfn; 8217c478bd9Sstevel@tonic-gate ht->ht_lock_cnt = 0; 8227c478bd9Sstevel@tonic-gate ht->ht_valid_cnt = 0; /* updated in hat_share() */ 8237c478bd9Sstevel@tonic-gate ht->ht_shares = shared; 8247c478bd9Sstevel@tonic-gate need_to_zero = 0; 8257c478bd9Sstevel@tonic-gate } else { 8267c478bd9Sstevel@tonic-gate ht->ht_shares = NULL; 8277c478bd9Sstevel@tonic-gate ht->ht_lock_cnt = 0; 8287c478bd9Sstevel@tonic-gate ht->ht_valid_cnt = 0; 8297c478bd9Sstevel@tonic-gate } 8307c478bd9Sstevel@tonic-gate 8317c478bd9Sstevel@tonic-gate /* 8327c478bd9Sstevel@tonic-gate * setup flags, etc. for VLP htables 8337c478bd9Sstevel@tonic-gate */ 8347c478bd9Sstevel@tonic-gate if (is_vlp) { 8357c478bd9Sstevel@tonic-gate ht->ht_flags |= HTABLE_VLP; 8367c478bd9Sstevel@tonic-gate ht->ht_num_ptes = VLP_NUM_PTES; 8377c478bd9Sstevel@tonic-gate ASSERT(ht->ht_pfn == PFN_INVALID); 8387c478bd9Sstevel@tonic-gate need_to_zero = 0; 8397c478bd9Sstevel@tonic-gate } else if (level == mmu.max_level) { 8407c478bd9Sstevel@tonic-gate ht->ht_num_ptes = mmu.top_level_count; 8417c478bd9Sstevel@tonic-gate } else { 8427c478bd9Sstevel@tonic-gate ht->ht_num_ptes = mmu.ptes_per_table; 8437c478bd9Sstevel@tonic-gate } 8447c478bd9Sstevel@tonic-gate 8457c478bd9Sstevel@tonic-gate /* 8467c478bd9Sstevel@tonic-gate * fill in the htable 8477c478bd9Sstevel@tonic-gate */ 8487c478bd9Sstevel@tonic-gate ht->ht_hat = hat; 8497c478bd9Sstevel@tonic-gate ht->ht_parent = NULL; 8507c478bd9Sstevel@tonic-gate ht->ht_vaddr = vaddr; 8517c478bd9Sstevel@tonic-gate ht->ht_level = level; 8527c478bd9Sstevel@tonic-gate ht->ht_busy = 1; 8537c478bd9Sstevel@tonic-gate ht->ht_next = NULL; 8547c478bd9Sstevel@tonic-gate ht->ht_prev = NULL; 8557c478bd9Sstevel@tonic-gate 8567c478bd9Sstevel@tonic-gate /* 8577c478bd9Sstevel@tonic-gate * Zero out any freshly allocated page table 8587c478bd9Sstevel@tonic-gate */ 8597c478bd9Sstevel@tonic-gate if (need_to_zero) 8607c478bd9Sstevel@tonic-gate x86pte_zero(ht, 0, mmu.ptes_per_table); 8617c478bd9Sstevel@tonic-gate return (ht); 8627c478bd9Sstevel@tonic-gate } 8637c478bd9Sstevel@tonic-gate 8647c478bd9Sstevel@tonic-gate /* 8657c478bd9Sstevel@tonic-gate * Free up an htable, either to a hat's cached list, the reserves or 8667c478bd9Sstevel@tonic-gate * back to kmem. 8677c478bd9Sstevel@tonic-gate */ 8687c478bd9Sstevel@tonic-gate static void 8697c478bd9Sstevel@tonic-gate htable_free(htable_t *ht) 8707c478bd9Sstevel@tonic-gate { 8717c478bd9Sstevel@tonic-gate hat_t *hat = ht->ht_hat; 8727c478bd9Sstevel@tonic-gate 8737c478bd9Sstevel@tonic-gate /* 8747c478bd9Sstevel@tonic-gate * If the process isn't exiting, cache the free htable in the hat 8757c478bd9Sstevel@tonic-gate * structure. We always do this for the boot reserve. We don't 8767c478bd9Sstevel@tonic-gate * do this if the hat is exiting or we are stealing/reaping htables. 8777c478bd9Sstevel@tonic-gate */ 8787c478bd9Sstevel@tonic-gate if (hat != NULL && 8797c478bd9Sstevel@tonic-gate !(ht->ht_flags & HTABLE_SHARED_PFN) && 8807c478bd9Sstevel@tonic-gate (use_boot_reserve || 8817c478bd9Sstevel@tonic-gate (!(hat->hat_flags & HAT_FREEING) && !htable_dont_cache))) { 8827c478bd9Sstevel@tonic-gate ASSERT((ht->ht_flags & HTABLE_VLP) == 0); 8837c478bd9Sstevel@tonic-gate ASSERT(ht->ht_pfn != PFN_INVALID); 8847c478bd9Sstevel@tonic-gate hat_enter(hat); 8857c478bd9Sstevel@tonic-gate ht->ht_next = hat->hat_ht_cached; 8867c478bd9Sstevel@tonic-gate hat->hat_ht_cached = ht; 8877c478bd9Sstevel@tonic-gate hat_exit(hat); 8887c478bd9Sstevel@tonic-gate return; 8897c478bd9Sstevel@tonic-gate } 8907c478bd9Sstevel@tonic-gate 8917c478bd9Sstevel@tonic-gate /* 8927c478bd9Sstevel@tonic-gate * If we have a hardware page table, free it. 8937c478bd9Sstevel@tonic-gate * We don't free page tables that are accessed by sharing someone else. 8947c478bd9Sstevel@tonic-gate */ 8957c478bd9Sstevel@tonic-gate if (ht->ht_flags & HTABLE_SHARED_PFN) { 8967c478bd9Sstevel@tonic-gate ASSERT(ht->ht_pfn != PFN_INVALID); 8977c478bd9Sstevel@tonic-gate ht->ht_pfn = PFN_INVALID; 8987c478bd9Sstevel@tonic-gate } else if (!(ht->ht_flags & HTABLE_VLP)) { 8997c478bd9Sstevel@tonic-gate ptable_free(ht); 9007c478bd9Sstevel@tonic-gate } 9017c478bd9Sstevel@tonic-gate 9027c478bd9Sstevel@tonic-gate /* 9037c478bd9Sstevel@tonic-gate * If we are the thread using the reserves, put free htables 9047c478bd9Sstevel@tonic-gate * into reserves. 9057c478bd9Sstevel@tonic-gate */ 9067c478bd9Sstevel@tonic-gate if (curthread == hat_reserves_thread || 9077c478bd9Sstevel@tonic-gate htable_reserve_cnt < htable_reserve_amount) 9087c478bd9Sstevel@tonic-gate htable_put_reserve(ht); 9097c478bd9Sstevel@tonic-gate else 9107c478bd9Sstevel@tonic-gate kmem_cache_free(htable_cache, ht); 9117c478bd9Sstevel@tonic-gate } 9127c478bd9Sstevel@tonic-gate 9137c478bd9Sstevel@tonic-gate 9147c478bd9Sstevel@tonic-gate /* 9157c478bd9Sstevel@tonic-gate * This is called when a hat is being destroyed or swapped out. We reap all 9167c478bd9Sstevel@tonic-gate * the remaining htables in the hat cache. If destroying all left over 9177c478bd9Sstevel@tonic-gate * htables are also destroyed. 9187c478bd9Sstevel@tonic-gate * 9197c478bd9Sstevel@tonic-gate * We also don't need to invalidate any of the PTPs nor do any demapping. 9207c478bd9Sstevel@tonic-gate */ 9217c478bd9Sstevel@tonic-gate void 9227c478bd9Sstevel@tonic-gate htable_purge_hat(hat_t *hat) 9237c478bd9Sstevel@tonic-gate { 9247c478bd9Sstevel@tonic-gate htable_t *ht; 9257c478bd9Sstevel@tonic-gate int h; 9267c478bd9Sstevel@tonic-gate 9277c478bd9Sstevel@tonic-gate /* 9287c478bd9Sstevel@tonic-gate * Purge the htable cache if just reaping. 9297c478bd9Sstevel@tonic-gate */ 9307c478bd9Sstevel@tonic-gate if (!(hat->hat_flags & HAT_FREEING)) { 9317c478bd9Sstevel@tonic-gate atomic_add_32(&htable_dont_cache, 1); 9327c478bd9Sstevel@tonic-gate for (;;) { 9337c478bd9Sstevel@tonic-gate hat_enter(hat); 9347c478bd9Sstevel@tonic-gate ht = hat->hat_ht_cached; 9357c478bd9Sstevel@tonic-gate if (ht == NULL) { 9367c478bd9Sstevel@tonic-gate hat_exit(hat); 9377c478bd9Sstevel@tonic-gate break; 9387c478bd9Sstevel@tonic-gate } 9397c478bd9Sstevel@tonic-gate hat->hat_ht_cached = ht->ht_next; 9407c478bd9Sstevel@tonic-gate hat_exit(hat); 9417c478bd9Sstevel@tonic-gate htable_free(ht); 9427c478bd9Sstevel@tonic-gate } 9437c478bd9Sstevel@tonic-gate atomic_add_32(&htable_dont_cache, -1); 9447c478bd9Sstevel@tonic-gate return; 9457c478bd9Sstevel@tonic-gate } 9467c478bd9Sstevel@tonic-gate 9477c478bd9Sstevel@tonic-gate /* 9487c478bd9Sstevel@tonic-gate * if freeing, no locking is needed 9497c478bd9Sstevel@tonic-gate */ 9507c478bd9Sstevel@tonic-gate while ((ht = hat->hat_ht_cached) != NULL) { 9517c478bd9Sstevel@tonic-gate hat->hat_ht_cached = ht->ht_next; 9527c478bd9Sstevel@tonic-gate htable_free(ht); 9537c478bd9Sstevel@tonic-gate } 9547c478bd9Sstevel@tonic-gate 9557c478bd9Sstevel@tonic-gate /* 9567c478bd9Sstevel@tonic-gate * walk thru the htable hash table and free all the htables in it. 9577c478bd9Sstevel@tonic-gate */ 9587c478bd9Sstevel@tonic-gate for (h = 0; h < hat->hat_num_hash; ++h) { 9597c478bd9Sstevel@tonic-gate while ((ht = hat->hat_ht_hash[h]) != NULL) { 9607c478bd9Sstevel@tonic-gate if (ht->ht_next) 9617c478bd9Sstevel@tonic-gate ht->ht_next->ht_prev = ht->ht_prev; 9627c478bd9Sstevel@tonic-gate 9637c478bd9Sstevel@tonic-gate if (ht->ht_prev) { 9647c478bd9Sstevel@tonic-gate ht->ht_prev->ht_next = ht->ht_next; 9657c478bd9Sstevel@tonic-gate } else { 9667c478bd9Sstevel@tonic-gate ASSERT(hat->hat_ht_hash[h] == ht); 9677c478bd9Sstevel@tonic-gate hat->hat_ht_hash[h] = ht->ht_next; 9687c478bd9Sstevel@tonic-gate } 9697c478bd9Sstevel@tonic-gate htable_free(ht); 9707c478bd9Sstevel@tonic-gate } 9717c478bd9Sstevel@tonic-gate } 9727c478bd9Sstevel@tonic-gate } 9737c478bd9Sstevel@tonic-gate 9747c478bd9Sstevel@tonic-gate /* 9757c478bd9Sstevel@tonic-gate * Unlink an entry for a table at vaddr and level out of the existing table 9767c478bd9Sstevel@tonic-gate * one level higher. We are always holding the HASH_ENTER() when doing this. 9777c478bd9Sstevel@tonic-gate */ 9787c478bd9Sstevel@tonic-gate static void 9797c478bd9Sstevel@tonic-gate unlink_ptp(htable_t *higher, htable_t *old, uintptr_t vaddr) 9807c478bd9Sstevel@tonic-gate { 9817c478bd9Sstevel@tonic-gate uint_t entry = htable_va2entry(vaddr, higher); 9827c478bd9Sstevel@tonic-gate x86pte_t expect = MAKEPTP(old->ht_pfn, old->ht_level); 9837c478bd9Sstevel@tonic-gate x86pte_t found; 9847c478bd9Sstevel@tonic-gate 9857c478bd9Sstevel@tonic-gate ASSERT(higher->ht_busy > 0); 9867c478bd9Sstevel@tonic-gate ASSERT(higher->ht_valid_cnt > 0); 9877c478bd9Sstevel@tonic-gate ASSERT(old->ht_valid_cnt == 0); 9887c478bd9Sstevel@tonic-gate found = x86pte_cas(higher, entry, expect, 0); 9897c478bd9Sstevel@tonic-gate if (found != expect) 9907c478bd9Sstevel@tonic-gate panic("Bad PTP found=" FMT_PTE ", expected=" FMT_PTE, 9917c478bd9Sstevel@tonic-gate found, expect); 9927c478bd9Sstevel@tonic-gate HTABLE_DEC(higher->ht_valid_cnt); 9937c478bd9Sstevel@tonic-gate } 9947c478bd9Sstevel@tonic-gate 9957c478bd9Sstevel@tonic-gate /* 9967c478bd9Sstevel@tonic-gate * Link an entry for a new table at vaddr and level into the existing table 9977c478bd9Sstevel@tonic-gate * one level higher. We are always holding the HASH_ENTER() when doing this. 9987c478bd9Sstevel@tonic-gate */ 9997c478bd9Sstevel@tonic-gate static void 10007c478bd9Sstevel@tonic-gate link_ptp(htable_t *higher, htable_t *new, uintptr_t vaddr) 10017c478bd9Sstevel@tonic-gate { 10027c478bd9Sstevel@tonic-gate uint_t entry = htable_va2entry(vaddr, higher); 10037c478bd9Sstevel@tonic-gate x86pte_t newptp = MAKEPTP(new->ht_pfn, new->ht_level); 10047c478bd9Sstevel@tonic-gate x86pte_t found; 10057c478bd9Sstevel@tonic-gate 10067c478bd9Sstevel@tonic-gate ASSERT(higher->ht_busy > 0); 10077c478bd9Sstevel@tonic-gate 10087c478bd9Sstevel@tonic-gate ASSERT(new->ht_level != mmu.max_level); 10097c478bd9Sstevel@tonic-gate 10107c478bd9Sstevel@tonic-gate HTABLE_INC(higher->ht_valid_cnt); 10117c478bd9Sstevel@tonic-gate 10127c478bd9Sstevel@tonic-gate found = x86pte_cas(higher, entry, 0, newptp); 1013b4b46911Skchow if ((found & ~PT_REF) != 0) 10147c478bd9Sstevel@tonic-gate panic("HAT: ptp not 0, found=" FMT_PTE, found); 10157c478bd9Sstevel@tonic-gate } 10167c478bd9Sstevel@tonic-gate 10177c478bd9Sstevel@tonic-gate /* 10187c478bd9Sstevel@tonic-gate * Release of an htable. 10197c478bd9Sstevel@tonic-gate * 10207c478bd9Sstevel@tonic-gate * During process exit, some empty page tables are not unlinked - hat_free_end() 10217c478bd9Sstevel@tonic-gate * cleans them up. Upper level pagetable (mmu.max_page_level and higher) are 10227c478bd9Sstevel@tonic-gate * only released during hat_free_end() or by htable_steal(). We always 10237c478bd9Sstevel@tonic-gate * release SHARED page tables. 10247c478bd9Sstevel@tonic-gate */ 10257c478bd9Sstevel@tonic-gate void 10267c478bd9Sstevel@tonic-gate htable_release(htable_t *ht) 10277c478bd9Sstevel@tonic-gate { 10287c478bd9Sstevel@tonic-gate uint_t hashval; 10297c478bd9Sstevel@tonic-gate htable_t *shared; 10307c478bd9Sstevel@tonic-gate htable_t *higher; 10317c478bd9Sstevel@tonic-gate hat_t *hat; 10327c478bd9Sstevel@tonic-gate uintptr_t va; 10337c478bd9Sstevel@tonic-gate level_t level; 10347c478bd9Sstevel@tonic-gate 10357c478bd9Sstevel@tonic-gate while (ht != NULL) { 10367c478bd9Sstevel@tonic-gate shared = NULL; 10377c478bd9Sstevel@tonic-gate for (;;) { 10387c478bd9Sstevel@tonic-gate hat = ht->ht_hat; 10397c478bd9Sstevel@tonic-gate va = ht->ht_vaddr; 10407c478bd9Sstevel@tonic-gate level = ht->ht_level; 10417c478bd9Sstevel@tonic-gate hashval = HTABLE_HASH(hat, va, level); 10427c478bd9Sstevel@tonic-gate 10437c478bd9Sstevel@tonic-gate /* 10447c478bd9Sstevel@tonic-gate * The common case is that this isn't the last use of 10457c478bd9Sstevel@tonic-gate * an htable so we don't want to free the htable. 10467c478bd9Sstevel@tonic-gate */ 10477c478bd9Sstevel@tonic-gate HTABLE_ENTER(hashval); 10487c478bd9Sstevel@tonic-gate ASSERT(ht->ht_lock_cnt == 0 || ht->ht_valid_cnt > 0); 10497c478bd9Sstevel@tonic-gate ASSERT(ht->ht_valid_cnt >= 0); 10507c478bd9Sstevel@tonic-gate ASSERT(ht->ht_busy > 0); 10517c478bd9Sstevel@tonic-gate if (ht->ht_valid_cnt > 0) 10527c478bd9Sstevel@tonic-gate break; 10537c478bd9Sstevel@tonic-gate if (ht->ht_busy > 1) 10547c478bd9Sstevel@tonic-gate break; 10557c478bd9Sstevel@tonic-gate 10567c478bd9Sstevel@tonic-gate /* 10577c478bd9Sstevel@tonic-gate * we always release empty shared htables 10587c478bd9Sstevel@tonic-gate */ 10597c478bd9Sstevel@tonic-gate if (!(ht->ht_flags & HTABLE_SHARED_PFN)) { 10607c478bd9Sstevel@tonic-gate 10617c478bd9Sstevel@tonic-gate /* 10627c478bd9Sstevel@tonic-gate * don't release if in address space tear down 10637c478bd9Sstevel@tonic-gate */ 10647c478bd9Sstevel@tonic-gate if (hat->hat_flags & HAT_FREEING) 10657c478bd9Sstevel@tonic-gate break; 10667c478bd9Sstevel@tonic-gate 10677c478bd9Sstevel@tonic-gate /* 10687c478bd9Sstevel@tonic-gate * At and above max_page_level, free if it's for 10697c478bd9Sstevel@tonic-gate * a boot-time kernel mapping below kernelbase. 10707c478bd9Sstevel@tonic-gate */ 10717c478bd9Sstevel@tonic-gate if (level >= mmu.max_page_level && 10727c478bd9Sstevel@tonic-gate (hat != kas.a_hat || va >= kernelbase)) 10737c478bd9Sstevel@tonic-gate break; 10747c478bd9Sstevel@tonic-gate } 10757c478bd9Sstevel@tonic-gate 10767c478bd9Sstevel@tonic-gate /* 10777c478bd9Sstevel@tonic-gate * remember if we destroy an htable that shares its PFN 10787c478bd9Sstevel@tonic-gate * from elsewhere 10797c478bd9Sstevel@tonic-gate */ 10807c478bd9Sstevel@tonic-gate if (ht->ht_flags & HTABLE_SHARED_PFN) { 10817c478bd9Sstevel@tonic-gate ASSERT(ht->ht_level == 0); 10827c478bd9Sstevel@tonic-gate ASSERT(shared == NULL); 10837c478bd9Sstevel@tonic-gate shared = ht->ht_shares; 10847c478bd9Sstevel@tonic-gate HATSTAT_INC(hs_htable_unshared); 10857c478bd9Sstevel@tonic-gate } 10867c478bd9Sstevel@tonic-gate 10877c478bd9Sstevel@tonic-gate /* 10887c478bd9Sstevel@tonic-gate * Handle release of a table and freeing the htable_t. 10897c478bd9Sstevel@tonic-gate * Unlink it from the table higher (ie. ht_parent). 10907c478bd9Sstevel@tonic-gate */ 10917c478bd9Sstevel@tonic-gate ASSERT(ht->ht_lock_cnt == 0); 10927c478bd9Sstevel@tonic-gate higher = ht->ht_parent; 10937c478bd9Sstevel@tonic-gate ASSERT(higher != NULL); 10947c478bd9Sstevel@tonic-gate 10957c478bd9Sstevel@tonic-gate /* 10967c478bd9Sstevel@tonic-gate * Unlink the pagetable. 10977c478bd9Sstevel@tonic-gate */ 10987c478bd9Sstevel@tonic-gate unlink_ptp(higher, ht, va); 10997c478bd9Sstevel@tonic-gate 11007c478bd9Sstevel@tonic-gate /* 11017c478bd9Sstevel@tonic-gate * When any top level VLP page table entry changes, we 11027c478bd9Sstevel@tonic-gate * must issue a reload of cr3 on all processors. 11037c478bd9Sstevel@tonic-gate */ 11047c478bd9Sstevel@tonic-gate if ((hat->hat_flags & HAT_VLP) && 11057c478bd9Sstevel@tonic-gate level == VLP_LEVEL - 1) 11067c478bd9Sstevel@tonic-gate hat_demap(hat, DEMAP_ALL_ADDR); 11077c478bd9Sstevel@tonic-gate 11087c478bd9Sstevel@tonic-gate /* 11097c478bd9Sstevel@tonic-gate * remove this htable from its hash list 11107c478bd9Sstevel@tonic-gate */ 11117c478bd9Sstevel@tonic-gate if (ht->ht_next) 11127c478bd9Sstevel@tonic-gate ht->ht_next->ht_prev = ht->ht_prev; 11137c478bd9Sstevel@tonic-gate 11147c478bd9Sstevel@tonic-gate if (ht->ht_prev) { 11157c478bd9Sstevel@tonic-gate ht->ht_prev->ht_next = ht->ht_next; 11167c478bd9Sstevel@tonic-gate } else { 11177c478bd9Sstevel@tonic-gate ASSERT(hat->hat_ht_hash[hashval] == ht); 11187c478bd9Sstevel@tonic-gate hat->hat_ht_hash[hashval] = ht->ht_next; 11197c478bd9Sstevel@tonic-gate } 11207c478bd9Sstevel@tonic-gate HTABLE_EXIT(hashval); 11217c478bd9Sstevel@tonic-gate htable_free(ht); 11227c478bd9Sstevel@tonic-gate ht = higher; 11237c478bd9Sstevel@tonic-gate } 11247c478bd9Sstevel@tonic-gate 11257c478bd9Sstevel@tonic-gate ASSERT(ht->ht_busy >= 1); 11267c478bd9Sstevel@tonic-gate --ht->ht_busy; 11277c478bd9Sstevel@tonic-gate HTABLE_EXIT(hashval); 11287c478bd9Sstevel@tonic-gate 11297c478bd9Sstevel@tonic-gate /* 11307c478bd9Sstevel@tonic-gate * If we released a shared htable, do a release on the htable 11317c478bd9Sstevel@tonic-gate * from which it shared 11327c478bd9Sstevel@tonic-gate */ 11337c478bd9Sstevel@tonic-gate ht = shared; 11347c478bd9Sstevel@tonic-gate } 11357c478bd9Sstevel@tonic-gate } 11367c478bd9Sstevel@tonic-gate 11377c478bd9Sstevel@tonic-gate /* 11387c478bd9Sstevel@tonic-gate * Find the htable for the pagetable at the given level for the given address. 11397c478bd9Sstevel@tonic-gate * If found acquires a hold that eventually needs to be htable_release()d 11407c478bd9Sstevel@tonic-gate */ 11417c478bd9Sstevel@tonic-gate htable_t * 11427c478bd9Sstevel@tonic-gate htable_lookup(hat_t *hat, uintptr_t vaddr, level_t level) 11437c478bd9Sstevel@tonic-gate { 11447c478bd9Sstevel@tonic-gate uintptr_t base; 11457c478bd9Sstevel@tonic-gate uint_t hashval; 11467c478bd9Sstevel@tonic-gate htable_t *ht = NULL; 11477c478bd9Sstevel@tonic-gate 11487c478bd9Sstevel@tonic-gate ASSERT(level >= 0); 11497c478bd9Sstevel@tonic-gate ASSERT(level <= TOP_LEVEL(hat)); 11507c478bd9Sstevel@tonic-gate 11517c478bd9Sstevel@tonic-gate if (level == TOP_LEVEL(hat)) 11527c478bd9Sstevel@tonic-gate base = 0; 11537c478bd9Sstevel@tonic-gate else 11547c478bd9Sstevel@tonic-gate base = vaddr & LEVEL_MASK(level + 1); 11557c478bd9Sstevel@tonic-gate 11567c478bd9Sstevel@tonic-gate hashval = HTABLE_HASH(hat, base, level); 11577c478bd9Sstevel@tonic-gate HTABLE_ENTER(hashval); 11587c478bd9Sstevel@tonic-gate for (ht = hat->hat_ht_hash[hashval]; ht; ht = ht->ht_next) { 11597c478bd9Sstevel@tonic-gate if (ht->ht_hat == hat && 11607c478bd9Sstevel@tonic-gate ht->ht_vaddr == base && 11617c478bd9Sstevel@tonic-gate ht->ht_level == level) 11627c478bd9Sstevel@tonic-gate break; 11637c478bd9Sstevel@tonic-gate } 11647c478bd9Sstevel@tonic-gate if (ht) 11657c478bd9Sstevel@tonic-gate ++ht->ht_busy; 11667c478bd9Sstevel@tonic-gate 11677c478bd9Sstevel@tonic-gate HTABLE_EXIT(hashval); 11687c478bd9Sstevel@tonic-gate return (ht); 11697c478bd9Sstevel@tonic-gate } 11707c478bd9Sstevel@tonic-gate 11717c478bd9Sstevel@tonic-gate /* 11727c478bd9Sstevel@tonic-gate * Acquires a hold on a known htable (from a locked hment entry). 11737c478bd9Sstevel@tonic-gate */ 11747c478bd9Sstevel@tonic-gate void 11757c478bd9Sstevel@tonic-gate htable_acquire(htable_t *ht) 11767c478bd9Sstevel@tonic-gate { 11777c478bd9Sstevel@tonic-gate hat_t *hat = ht->ht_hat; 11787c478bd9Sstevel@tonic-gate level_t level = ht->ht_level; 11797c478bd9Sstevel@tonic-gate uintptr_t base = ht->ht_vaddr; 11807c478bd9Sstevel@tonic-gate uint_t hashval = HTABLE_HASH(hat, base, level); 11817c478bd9Sstevel@tonic-gate 11827c478bd9Sstevel@tonic-gate HTABLE_ENTER(hashval); 11837c478bd9Sstevel@tonic-gate #ifdef DEBUG 11847c478bd9Sstevel@tonic-gate /* 11857c478bd9Sstevel@tonic-gate * make sure the htable is there 11867c478bd9Sstevel@tonic-gate */ 11877c478bd9Sstevel@tonic-gate { 11887c478bd9Sstevel@tonic-gate htable_t *h; 11897c478bd9Sstevel@tonic-gate 11907c478bd9Sstevel@tonic-gate for (h = hat->hat_ht_hash[hashval]; 11917c478bd9Sstevel@tonic-gate h && h != ht; 11927c478bd9Sstevel@tonic-gate h = h->ht_next) 11937c478bd9Sstevel@tonic-gate ; 11947c478bd9Sstevel@tonic-gate ASSERT(h == ht); 11957c478bd9Sstevel@tonic-gate } 11967c478bd9Sstevel@tonic-gate #endif /* DEBUG */ 11977c478bd9Sstevel@tonic-gate ++ht->ht_busy; 11987c478bd9Sstevel@tonic-gate HTABLE_EXIT(hashval); 11997c478bd9Sstevel@tonic-gate } 12007c478bd9Sstevel@tonic-gate 12017c478bd9Sstevel@tonic-gate /* 12027c478bd9Sstevel@tonic-gate * Find the htable for the pagetable at the given level for the given address. 12037c478bd9Sstevel@tonic-gate * If found acquires a hold that eventually needs to be htable_release()d 12047c478bd9Sstevel@tonic-gate * If not found the table is created. 12057c478bd9Sstevel@tonic-gate * 12067c478bd9Sstevel@tonic-gate * Since we can't hold a hash table mutex during allocation, we have to 12077c478bd9Sstevel@tonic-gate * drop it and redo the search on a create. Then we may have to free the newly 12087c478bd9Sstevel@tonic-gate * allocated htable if another thread raced in and created it ahead of us. 12097c478bd9Sstevel@tonic-gate */ 12107c478bd9Sstevel@tonic-gate htable_t * 12117c478bd9Sstevel@tonic-gate htable_create( 12127c478bd9Sstevel@tonic-gate hat_t *hat, 12137c478bd9Sstevel@tonic-gate uintptr_t vaddr, 12147c478bd9Sstevel@tonic-gate level_t level, 12157c478bd9Sstevel@tonic-gate htable_t *shared) 12167c478bd9Sstevel@tonic-gate { 12177c478bd9Sstevel@tonic-gate uint_t h; 12187c478bd9Sstevel@tonic-gate level_t l; 12197c478bd9Sstevel@tonic-gate uintptr_t base; 12207c478bd9Sstevel@tonic-gate htable_t *ht; 12217c478bd9Sstevel@tonic-gate htable_t *higher = NULL; 12227c478bd9Sstevel@tonic-gate htable_t *new = NULL; 12237c478bd9Sstevel@tonic-gate 12247c478bd9Sstevel@tonic-gate if (level < 0 || level > TOP_LEVEL(hat)) 12257c478bd9Sstevel@tonic-gate panic("htable_create(): level %d out of range\n", level); 12267c478bd9Sstevel@tonic-gate 12277c478bd9Sstevel@tonic-gate /* 12287c478bd9Sstevel@tonic-gate * Create the page tables in top down order. 12297c478bd9Sstevel@tonic-gate */ 12307c478bd9Sstevel@tonic-gate for (l = TOP_LEVEL(hat); l >= level; --l) { 12317c478bd9Sstevel@tonic-gate new = NULL; 12327c478bd9Sstevel@tonic-gate if (l == TOP_LEVEL(hat)) 12337c478bd9Sstevel@tonic-gate base = 0; 12347c478bd9Sstevel@tonic-gate else 12357c478bd9Sstevel@tonic-gate base = vaddr & LEVEL_MASK(l + 1); 12367c478bd9Sstevel@tonic-gate 12377c478bd9Sstevel@tonic-gate h = HTABLE_HASH(hat, base, l); 12387c478bd9Sstevel@tonic-gate try_again: 12397c478bd9Sstevel@tonic-gate /* 12407c478bd9Sstevel@tonic-gate * look up the htable at this level 12417c478bd9Sstevel@tonic-gate */ 12427c478bd9Sstevel@tonic-gate HTABLE_ENTER(h); 12437c478bd9Sstevel@tonic-gate if (l == TOP_LEVEL(hat)) { 12447c478bd9Sstevel@tonic-gate ht = hat->hat_htable; 12457c478bd9Sstevel@tonic-gate } else { 12467c478bd9Sstevel@tonic-gate for (ht = hat->hat_ht_hash[h]; ht; ht = ht->ht_next) { 12477c478bd9Sstevel@tonic-gate ASSERT(ht->ht_hat == hat); 12487c478bd9Sstevel@tonic-gate if (ht->ht_vaddr == base && 12497c478bd9Sstevel@tonic-gate ht->ht_level == l) 12507c478bd9Sstevel@tonic-gate break; 12517c478bd9Sstevel@tonic-gate } 12527c478bd9Sstevel@tonic-gate } 12537c478bd9Sstevel@tonic-gate 12547c478bd9Sstevel@tonic-gate /* 12557c478bd9Sstevel@tonic-gate * if we found the htable, increment its busy cnt 12567c478bd9Sstevel@tonic-gate * and if we had allocated a new htable, free it. 12577c478bd9Sstevel@tonic-gate */ 12587c478bd9Sstevel@tonic-gate if (ht != NULL) { 12597c478bd9Sstevel@tonic-gate /* 12607c478bd9Sstevel@tonic-gate * If we find a pre-existing shared table, it must 12617c478bd9Sstevel@tonic-gate * share from the same place. 12627c478bd9Sstevel@tonic-gate */ 12637c478bd9Sstevel@tonic-gate if (l == level && shared && ht->ht_shares && 12647c478bd9Sstevel@tonic-gate ht->ht_shares != shared) { 12657c478bd9Sstevel@tonic-gate panic("htable shared from wrong place " 12667c478bd9Sstevel@tonic-gate "found htable=%p shared=%p", ht, shared); 12677c478bd9Sstevel@tonic-gate } 12687c478bd9Sstevel@tonic-gate ++ht->ht_busy; 12697c478bd9Sstevel@tonic-gate HTABLE_EXIT(h); 12707c478bd9Sstevel@tonic-gate if (new) 12717c478bd9Sstevel@tonic-gate htable_free(new); 12727c478bd9Sstevel@tonic-gate if (higher != NULL) 12737c478bd9Sstevel@tonic-gate htable_release(higher); 12747c478bd9Sstevel@tonic-gate higher = ht; 12757c478bd9Sstevel@tonic-gate 12767c478bd9Sstevel@tonic-gate /* 12777c478bd9Sstevel@tonic-gate * if we didn't find it on the first search 12787c478bd9Sstevel@tonic-gate * allocate a new one and search again 12797c478bd9Sstevel@tonic-gate */ 12807c478bd9Sstevel@tonic-gate } else if (new == NULL) { 12817c478bd9Sstevel@tonic-gate HTABLE_EXIT(h); 12827c478bd9Sstevel@tonic-gate new = htable_alloc(hat, base, l, 12837c478bd9Sstevel@tonic-gate l == level ? shared : NULL); 12847c478bd9Sstevel@tonic-gate goto try_again; 12857c478bd9Sstevel@tonic-gate 12867c478bd9Sstevel@tonic-gate /* 12877c478bd9Sstevel@tonic-gate * 2nd search and still not there, use "new" table 12887c478bd9Sstevel@tonic-gate * Link new table into higher, when not at top level. 12897c478bd9Sstevel@tonic-gate */ 12907c478bd9Sstevel@tonic-gate } else { 12917c478bd9Sstevel@tonic-gate ht = new; 12927c478bd9Sstevel@tonic-gate if (higher != NULL) { 12937c478bd9Sstevel@tonic-gate link_ptp(higher, ht, base); 12947c478bd9Sstevel@tonic-gate ht->ht_parent = higher; 12957c478bd9Sstevel@tonic-gate 12967c478bd9Sstevel@tonic-gate /* 12977c478bd9Sstevel@tonic-gate * When any top level VLP page table changes, 12987c478bd9Sstevel@tonic-gate * we must reload cr3 on all processors. 12997c478bd9Sstevel@tonic-gate */ 13007c478bd9Sstevel@tonic-gate #ifdef __i386 13017c478bd9Sstevel@tonic-gate if (mmu.pae_hat && 13027c478bd9Sstevel@tonic-gate #else /* !__i386 */ 13037c478bd9Sstevel@tonic-gate if ((hat->hat_flags & HAT_VLP) && 13047c478bd9Sstevel@tonic-gate #endif /* __i386 */ 13057c478bd9Sstevel@tonic-gate l == VLP_LEVEL - 1) 13067c478bd9Sstevel@tonic-gate hat_demap(hat, DEMAP_ALL_ADDR); 13077c478bd9Sstevel@tonic-gate } 13087c478bd9Sstevel@tonic-gate ht->ht_next = hat->hat_ht_hash[h]; 13097c478bd9Sstevel@tonic-gate ASSERT(ht->ht_prev == NULL); 13107c478bd9Sstevel@tonic-gate if (hat->hat_ht_hash[h]) 13117c478bd9Sstevel@tonic-gate hat->hat_ht_hash[h]->ht_prev = ht; 13127c478bd9Sstevel@tonic-gate hat->hat_ht_hash[h] = ht; 13137c478bd9Sstevel@tonic-gate HTABLE_EXIT(h); 13147c478bd9Sstevel@tonic-gate 13157c478bd9Sstevel@tonic-gate /* 13167c478bd9Sstevel@tonic-gate * Note we don't do htable_release(higher). 13177c478bd9Sstevel@tonic-gate * That happens recursively when "new" is removed by 13187c478bd9Sstevel@tonic-gate * htable_release() or htable_steal(). 13197c478bd9Sstevel@tonic-gate */ 13207c478bd9Sstevel@tonic-gate higher = ht; 13217c478bd9Sstevel@tonic-gate 13227c478bd9Sstevel@tonic-gate /* 13237c478bd9Sstevel@tonic-gate * If we just created a new shared page table we 13247c478bd9Sstevel@tonic-gate * increment the shared htable's busy count, so that 13257c478bd9Sstevel@tonic-gate * it can't be the victim of a steal even if it's empty. 13267c478bd9Sstevel@tonic-gate */ 13277c478bd9Sstevel@tonic-gate if (l == level && shared) { 13287c478bd9Sstevel@tonic-gate (void) htable_lookup(shared->ht_hat, 13297c478bd9Sstevel@tonic-gate shared->ht_vaddr, shared->ht_level); 13307c478bd9Sstevel@tonic-gate HATSTAT_INC(hs_htable_shared); 13317c478bd9Sstevel@tonic-gate } 13327c478bd9Sstevel@tonic-gate } 13337c478bd9Sstevel@tonic-gate } 13347c478bd9Sstevel@tonic-gate 13357c478bd9Sstevel@tonic-gate return (ht); 13367c478bd9Sstevel@tonic-gate } 13377c478bd9Sstevel@tonic-gate 13387c478bd9Sstevel@tonic-gate /* 13397c478bd9Sstevel@tonic-gate * Walk through a given htable looking for the first valid entry. This 13407c478bd9Sstevel@tonic-gate * routine takes both a starting and ending address. The starting address 13417c478bd9Sstevel@tonic-gate * is required to be within the htable provided by the caller, but there is 13427c478bd9Sstevel@tonic-gate * no such restriction on the ending address. 13437c478bd9Sstevel@tonic-gate * 13447c478bd9Sstevel@tonic-gate * If the routine finds a valid entry in the htable (at or beyond the 13457c478bd9Sstevel@tonic-gate * starting address), the PTE (and its address) will be returned. 13467c478bd9Sstevel@tonic-gate * This PTE may correspond to either a page or a pagetable - it is the 13477c478bd9Sstevel@tonic-gate * caller's responsibility to determine which. If no valid entry is 13487c478bd9Sstevel@tonic-gate * found, 0 (and invalid PTE) and the next unexamined address will be 13497c478bd9Sstevel@tonic-gate * returned. 13507c478bd9Sstevel@tonic-gate * 13517c478bd9Sstevel@tonic-gate * The loop has been carefully coded for optimization. 13527c478bd9Sstevel@tonic-gate */ 13537c478bd9Sstevel@tonic-gate static x86pte_t 13547c478bd9Sstevel@tonic-gate htable_scan(htable_t *ht, uintptr_t *vap, uintptr_t eaddr) 13557c478bd9Sstevel@tonic-gate { 13567c478bd9Sstevel@tonic-gate uint_t e; 13577c478bd9Sstevel@tonic-gate x86pte_t found_pte = (x86pte_t)0; 13587c478bd9Sstevel@tonic-gate char *pte_ptr; 13597c478bd9Sstevel@tonic-gate char *end_pte_ptr; 13607c478bd9Sstevel@tonic-gate int l = ht->ht_level; 13617c478bd9Sstevel@tonic-gate uintptr_t va = *vap & LEVEL_MASK(l); 13627c478bd9Sstevel@tonic-gate size_t pgsize = LEVEL_SIZE(l); 13637c478bd9Sstevel@tonic-gate 13647c478bd9Sstevel@tonic-gate ASSERT(va >= ht->ht_vaddr); 13657c478bd9Sstevel@tonic-gate ASSERT(va <= HTABLE_LAST_PAGE(ht)); 13667c478bd9Sstevel@tonic-gate 13677c478bd9Sstevel@tonic-gate /* 13687c478bd9Sstevel@tonic-gate * Compute the starting index and ending virtual address 13697c478bd9Sstevel@tonic-gate */ 13707c478bd9Sstevel@tonic-gate e = htable_va2entry(va, ht); 13717c478bd9Sstevel@tonic-gate 13727c478bd9Sstevel@tonic-gate /* 13737c478bd9Sstevel@tonic-gate * The following page table scan code knows that the valid 13747c478bd9Sstevel@tonic-gate * bit of a PTE is in the lowest byte AND that x86 is little endian!! 13757c478bd9Sstevel@tonic-gate */ 13767c478bd9Sstevel@tonic-gate pte_ptr = (char *)x86pte_access_pagetable(ht); 13777c478bd9Sstevel@tonic-gate end_pte_ptr = pte_ptr + (ht->ht_num_ptes << mmu.pte_size_shift); 13787c478bd9Sstevel@tonic-gate pte_ptr += e << mmu.pte_size_shift; 13797c478bd9Sstevel@tonic-gate while (*pte_ptr == 0) { 13807c478bd9Sstevel@tonic-gate va += pgsize; 13817c478bd9Sstevel@tonic-gate if (va >= eaddr) 13827c478bd9Sstevel@tonic-gate break; 13837c478bd9Sstevel@tonic-gate pte_ptr += mmu.pte_size; 13847c478bd9Sstevel@tonic-gate ASSERT(pte_ptr <= end_pte_ptr); 13857c478bd9Sstevel@tonic-gate if (pte_ptr == end_pte_ptr) 13867c478bd9Sstevel@tonic-gate break; 13877c478bd9Sstevel@tonic-gate } 13887c478bd9Sstevel@tonic-gate 13897c478bd9Sstevel@tonic-gate /* 13907c478bd9Sstevel@tonic-gate * if we found a valid PTE, load the entire PTE 13917c478bd9Sstevel@tonic-gate */ 13927c478bd9Sstevel@tonic-gate if (va < eaddr && pte_ptr != end_pte_ptr) { 13937c478bd9Sstevel@tonic-gate if (mmu.pae_hat) { 1394aa2ed9e5Sjosephb ATOMIC_LOAD64((x86pte_t *)pte_ptr, found_pte); 13957c478bd9Sstevel@tonic-gate } else { 13967c478bd9Sstevel@tonic-gate found_pte = *(x86pte32_t *)pte_ptr; 13977c478bd9Sstevel@tonic-gate } 13987c478bd9Sstevel@tonic-gate } 13997c478bd9Sstevel@tonic-gate x86pte_release_pagetable(ht); 14007c478bd9Sstevel@tonic-gate 14017c478bd9Sstevel@tonic-gate #if defined(__amd64) 14027c478bd9Sstevel@tonic-gate /* 14037c478bd9Sstevel@tonic-gate * deal with VA hole on amd64 14047c478bd9Sstevel@tonic-gate */ 14057c478bd9Sstevel@tonic-gate if (l == mmu.max_level && va >= mmu.hole_start && va <= mmu.hole_end) 14067c478bd9Sstevel@tonic-gate va = mmu.hole_end + va - mmu.hole_start; 14077c478bd9Sstevel@tonic-gate #endif /* __amd64 */ 14087c478bd9Sstevel@tonic-gate 14097c478bd9Sstevel@tonic-gate *vap = va; 14107c478bd9Sstevel@tonic-gate return (found_pte); 14117c478bd9Sstevel@tonic-gate } 14127c478bd9Sstevel@tonic-gate 14137c478bd9Sstevel@tonic-gate /* 14147c478bd9Sstevel@tonic-gate * Find the address and htable for the first populated translation at or 14157c478bd9Sstevel@tonic-gate * above the given virtual address. The caller may also specify an upper 14167c478bd9Sstevel@tonic-gate * limit to the address range to search. Uses level information to quickly 14177c478bd9Sstevel@tonic-gate * skip unpopulated sections of virtual address spaces. 14187c478bd9Sstevel@tonic-gate * 14197c478bd9Sstevel@tonic-gate * If not found returns NULL. When found, returns the htable and virt addr 14207c478bd9Sstevel@tonic-gate * and has a hold on the htable. 14217c478bd9Sstevel@tonic-gate */ 14227c478bd9Sstevel@tonic-gate x86pte_t 14237c478bd9Sstevel@tonic-gate htable_walk( 14247c478bd9Sstevel@tonic-gate struct hat *hat, 14257c478bd9Sstevel@tonic-gate htable_t **htp, 14267c478bd9Sstevel@tonic-gate uintptr_t *vaddr, 14277c478bd9Sstevel@tonic-gate uintptr_t eaddr) 14287c478bd9Sstevel@tonic-gate { 14297c478bd9Sstevel@tonic-gate uintptr_t va = *vaddr; 14307c478bd9Sstevel@tonic-gate htable_t *ht; 14317c478bd9Sstevel@tonic-gate htable_t *prev = *htp; 14327c478bd9Sstevel@tonic-gate level_t l; 14337c478bd9Sstevel@tonic-gate level_t max_mapped_level; 14347c478bd9Sstevel@tonic-gate x86pte_t pte; 14357c478bd9Sstevel@tonic-gate 14367c478bd9Sstevel@tonic-gate ASSERT(eaddr > va); 14377c478bd9Sstevel@tonic-gate 14387c478bd9Sstevel@tonic-gate /* 14397c478bd9Sstevel@tonic-gate * If this is a user address, then we know we need not look beyond 14407c478bd9Sstevel@tonic-gate * kernelbase. 14417c478bd9Sstevel@tonic-gate */ 14427c478bd9Sstevel@tonic-gate ASSERT(hat == kas.a_hat || eaddr <= kernelbase || 14437c478bd9Sstevel@tonic-gate eaddr == HTABLE_WALK_TO_END); 14447c478bd9Sstevel@tonic-gate if (hat != kas.a_hat && eaddr == HTABLE_WALK_TO_END) 14457c478bd9Sstevel@tonic-gate eaddr = kernelbase; 14467c478bd9Sstevel@tonic-gate 14477c478bd9Sstevel@tonic-gate /* 14487c478bd9Sstevel@tonic-gate * If we're coming in with a previous page table, search it first 14497c478bd9Sstevel@tonic-gate * without doing an htable_lookup(), this should be frequent. 14507c478bd9Sstevel@tonic-gate */ 14517c478bd9Sstevel@tonic-gate if (prev) { 14527c478bd9Sstevel@tonic-gate ASSERT(prev->ht_busy > 0); 14537c478bd9Sstevel@tonic-gate ASSERT(prev->ht_vaddr <= va); 14547c478bd9Sstevel@tonic-gate l = prev->ht_level; 14557c478bd9Sstevel@tonic-gate if (va <= HTABLE_LAST_PAGE(prev)) { 14567c478bd9Sstevel@tonic-gate pte = htable_scan(prev, &va, eaddr); 14577c478bd9Sstevel@tonic-gate 14587c478bd9Sstevel@tonic-gate if (PTE_ISPAGE(pte, l)) { 14597c478bd9Sstevel@tonic-gate *vaddr = va; 14607c478bd9Sstevel@tonic-gate *htp = prev; 14617c478bd9Sstevel@tonic-gate return (pte); 14627c478bd9Sstevel@tonic-gate } 14637c478bd9Sstevel@tonic-gate } 14647c478bd9Sstevel@tonic-gate 14657c478bd9Sstevel@tonic-gate /* 14667c478bd9Sstevel@tonic-gate * We found nothing in the htable provided by the caller, 14677c478bd9Sstevel@tonic-gate * so fall through and do the full search 14687c478bd9Sstevel@tonic-gate */ 14697c478bd9Sstevel@tonic-gate htable_release(prev); 14707c478bd9Sstevel@tonic-gate } 14717c478bd9Sstevel@tonic-gate 14727c478bd9Sstevel@tonic-gate /* 14737c478bd9Sstevel@tonic-gate * Find the level of the largest pagesize used by this HAT. 14747c478bd9Sstevel@tonic-gate */ 14757c478bd9Sstevel@tonic-gate max_mapped_level = 0; 14767c478bd9Sstevel@tonic-gate for (l = 1; l <= mmu.max_page_level; ++l) 14777c478bd9Sstevel@tonic-gate if (hat->hat_pages_mapped[l] != 0) 14787c478bd9Sstevel@tonic-gate max_mapped_level = l; 14797c478bd9Sstevel@tonic-gate 14807c478bd9Sstevel@tonic-gate while (va < eaddr && va >= *vaddr) { 14817c478bd9Sstevel@tonic-gate ASSERT(!IN_VA_HOLE(va)); 14827c478bd9Sstevel@tonic-gate 14837c478bd9Sstevel@tonic-gate /* 14847c478bd9Sstevel@tonic-gate * Find lowest table with any entry for given address. 14857c478bd9Sstevel@tonic-gate */ 14867c478bd9Sstevel@tonic-gate for (l = 0; l <= TOP_LEVEL(hat); ++l) { 14877c478bd9Sstevel@tonic-gate ht = htable_lookup(hat, va, l); 14887c478bd9Sstevel@tonic-gate if (ht != NULL) { 14897c478bd9Sstevel@tonic-gate pte = htable_scan(ht, &va, eaddr); 14907c478bd9Sstevel@tonic-gate if (PTE_ISPAGE(pte, l)) { 14917c478bd9Sstevel@tonic-gate *vaddr = va; 14927c478bd9Sstevel@tonic-gate *htp = ht; 14937c478bd9Sstevel@tonic-gate return (pte); 14947c478bd9Sstevel@tonic-gate } 14957c478bd9Sstevel@tonic-gate htable_release(ht); 14967c478bd9Sstevel@tonic-gate break; 14977c478bd9Sstevel@tonic-gate } 14987c478bd9Sstevel@tonic-gate 14997c478bd9Sstevel@tonic-gate /* 15007c478bd9Sstevel@tonic-gate * The ht is never NULL at the top level since 15017c478bd9Sstevel@tonic-gate * the top level htable is created in hat_alloc(). 15027c478bd9Sstevel@tonic-gate */ 15037c478bd9Sstevel@tonic-gate ASSERT(l < TOP_LEVEL(hat)); 15047c478bd9Sstevel@tonic-gate 15057c478bd9Sstevel@tonic-gate /* 15067c478bd9Sstevel@tonic-gate * No htable covers the address. If there is no 15077c478bd9Sstevel@tonic-gate * larger page size that could cover it, we 15087c478bd9Sstevel@tonic-gate * skip to the start of the next page table. 15097c478bd9Sstevel@tonic-gate */ 15107c478bd9Sstevel@tonic-gate if (l >= max_mapped_level) { 15117c478bd9Sstevel@tonic-gate va = NEXT_ENTRY_VA(va, l + 1); 15127c478bd9Sstevel@tonic-gate break; 15137c478bd9Sstevel@tonic-gate } 15147c478bd9Sstevel@tonic-gate } 15157c478bd9Sstevel@tonic-gate } 15167c478bd9Sstevel@tonic-gate 15177c478bd9Sstevel@tonic-gate *vaddr = 0; 15187c478bd9Sstevel@tonic-gate *htp = NULL; 15197c478bd9Sstevel@tonic-gate return (0); 15207c478bd9Sstevel@tonic-gate } 15217c478bd9Sstevel@tonic-gate 15227c478bd9Sstevel@tonic-gate /* 15237c478bd9Sstevel@tonic-gate * Find the htable and page table entry index of the given virtual address 15247c478bd9Sstevel@tonic-gate * with pagesize at or below given level. 15257c478bd9Sstevel@tonic-gate * If not found returns NULL. When found, returns the htable, sets 15267c478bd9Sstevel@tonic-gate * entry, and has a hold on the htable. 15277c478bd9Sstevel@tonic-gate */ 15287c478bd9Sstevel@tonic-gate htable_t * 15297c478bd9Sstevel@tonic-gate htable_getpte( 15307c478bd9Sstevel@tonic-gate struct hat *hat, 15317c478bd9Sstevel@tonic-gate uintptr_t vaddr, 15327c478bd9Sstevel@tonic-gate uint_t *entry, 15337c478bd9Sstevel@tonic-gate x86pte_t *pte, 15347c478bd9Sstevel@tonic-gate level_t level) 15357c478bd9Sstevel@tonic-gate { 15367c478bd9Sstevel@tonic-gate htable_t *ht; 15377c478bd9Sstevel@tonic-gate level_t l; 15387c478bd9Sstevel@tonic-gate uint_t e; 15397c478bd9Sstevel@tonic-gate 15407c478bd9Sstevel@tonic-gate ASSERT(level <= mmu.max_page_level); 15417c478bd9Sstevel@tonic-gate 15427c478bd9Sstevel@tonic-gate for (l = 0; l <= level; ++l) { 15437c478bd9Sstevel@tonic-gate ht = htable_lookup(hat, vaddr, l); 15447c478bd9Sstevel@tonic-gate if (ht == NULL) 15457c478bd9Sstevel@tonic-gate continue; 15467c478bd9Sstevel@tonic-gate e = htable_va2entry(vaddr, ht); 15477c478bd9Sstevel@tonic-gate if (entry != NULL) 15487c478bd9Sstevel@tonic-gate *entry = e; 15497c478bd9Sstevel@tonic-gate if (pte != NULL) 15507c478bd9Sstevel@tonic-gate *pte = x86pte_get(ht, e); 15517c478bd9Sstevel@tonic-gate return (ht); 15527c478bd9Sstevel@tonic-gate } 15537c478bd9Sstevel@tonic-gate return (NULL); 15547c478bd9Sstevel@tonic-gate } 15557c478bd9Sstevel@tonic-gate 15567c478bd9Sstevel@tonic-gate /* 15577c478bd9Sstevel@tonic-gate * Find the htable and page table entry index of the given virtual address. 15587c478bd9Sstevel@tonic-gate * There must be a valid page mapped at the given address. 15597c478bd9Sstevel@tonic-gate * If not found returns NULL. When found, returns the htable, sets 15607c478bd9Sstevel@tonic-gate * entry, and has a hold on the htable. 15617c478bd9Sstevel@tonic-gate */ 15627c478bd9Sstevel@tonic-gate htable_t * 15637c478bd9Sstevel@tonic-gate htable_getpage(struct hat *hat, uintptr_t vaddr, uint_t *entry) 15647c478bd9Sstevel@tonic-gate { 15657c478bd9Sstevel@tonic-gate htable_t *ht; 15667c478bd9Sstevel@tonic-gate uint_t e; 15677c478bd9Sstevel@tonic-gate x86pte_t pte; 15687c478bd9Sstevel@tonic-gate 15697c478bd9Sstevel@tonic-gate ht = htable_getpte(hat, vaddr, &e, &pte, mmu.max_page_level); 15707c478bd9Sstevel@tonic-gate if (ht == NULL) 15717c478bd9Sstevel@tonic-gate return (NULL); 15727c478bd9Sstevel@tonic-gate 15737c478bd9Sstevel@tonic-gate if (entry) 15747c478bd9Sstevel@tonic-gate *entry = e; 15757c478bd9Sstevel@tonic-gate 15767c478bd9Sstevel@tonic-gate if (PTE_ISPAGE(pte, ht->ht_level)) 15777c478bd9Sstevel@tonic-gate return (ht); 15787c478bd9Sstevel@tonic-gate htable_release(ht); 15797c478bd9Sstevel@tonic-gate return (NULL); 15807c478bd9Sstevel@tonic-gate } 15817c478bd9Sstevel@tonic-gate 15827c478bd9Sstevel@tonic-gate 15837c478bd9Sstevel@tonic-gate void 15847c478bd9Sstevel@tonic-gate htable_init() 15857c478bd9Sstevel@tonic-gate { 15867c478bd9Sstevel@tonic-gate /* 15877c478bd9Sstevel@tonic-gate * To save on kernel VA usage, we avoid debug information in 32 bit 15887c478bd9Sstevel@tonic-gate * kernels. 15897c478bd9Sstevel@tonic-gate */ 15907c478bd9Sstevel@tonic-gate #if defined(__amd64) 15917c478bd9Sstevel@tonic-gate int kmem_flags = KMC_NOHASH; 15927c478bd9Sstevel@tonic-gate #elif defined(__i386) 15937c478bd9Sstevel@tonic-gate int kmem_flags = KMC_NOHASH | KMC_NODEBUG; 15947c478bd9Sstevel@tonic-gate #endif 15957c478bd9Sstevel@tonic-gate 15967c478bd9Sstevel@tonic-gate /* 15977c478bd9Sstevel@tonic-gate * initialize kmem caches 15987c478bd9Sstevel@tonic-gate */ 15997c478bd9Sstevel@tonic-gate htable_cache = kmem_cache_create("htable_t", 16007c478bd9Sstevel@tonic-gate sizeof (htable_t), 0, NULL, NULL, 16017c478bd9Sstevel@tonic-gate htable_reap, NULL, hat_memload_arena, kmem_flags); 16027c478bd9Sstevel@tonic-gate } 16037c478bd9Sstevel@tonic-gate 16047c478bd9Sstevel@tonic-gate /* 16057c478bd9Sstevel@tonic-gate * get the pte index for the virtual address in the given htable's pagetable 16067c478bd9Sstevel@tonic-gate */ 16077c478bd9Sstevel@tonic-gate uint_t 16087c478bd9Sstevel@tonic-gate htable_va2entry(uintptr_t va, htable_t *ht) 16097c478bd9Sstevel@tonic-gate { 16107c478bd9Sstevel@tonic-gate level_t l = ht->ht_level; 16117c478bd9Sstevel@tonic-gate 16127c478bd9Sstevel@tonic-gate ASSERT(va >= ht->ht_vaddr); 16137c478bd9Sstevel@tonic-gate ASSERT(va <= HTABLE_LAST_PAGE(ht)); 16147c478bd9Sstevel@tonic-gate return ((va >> LEVEL_SHIFT(l)) & (ht->ht_num_ptes - 1)); 16157c478bd9Sstevel@tonic-gate } 16167c478bd9Sstevel@tonic-gate 16177c478bd9Sstevel@tonic-gate /* 16187c478bd9Sstevel@tonic-gate * Given an htable and the index of a pte in it, return the virtual address 16197c478bd9Sstevel@tonic-gate * of the page. 16207c478bd9Sstevel@tonic-gate */ 16217c478bd9Sstevel@tonic-gate uintptr_t 16227c478bd9Sstevel@tonic-gate htable_e2va(htable_t *ht, uint_t entry) 16237c478bd9Sstevel@tonic-gate { 16247c478bd9Sstevel@tonic-gate level_t l = ht->ht_level; 16257c478bd9Sstevel@tonic-gate uintptr_t va; 16267c478bd9Sstevel@tonic-gate 16277c478bd9Sstevel@tonic-gate ASSERT(entry < ht->ht_num_ptes); 16287c478bd9Sstevel@tonic-gate va = ht->ht_vaddr + ((uintptr_t)entry << LEVEL_SHIFT(l)); 16297c478bd9Sstevel@tonic-gate 16307c478bd9Sstevel@tonic-gate /* 16317c478bd9Sstevel@tonic-gate * Need to skip over any VA hole in top level table 16327c478bd9Sstevel@tonic-gate */ 16337c478bd9Sstevel@tonic-gate #if defined(__amd64) 16347c478bd9Sstevel@tonic-gate if (ht->ht_level == mmu.max_level && va >= mmu.hole_start) 16357c478bd9Sstevel@tonic-gate va += ((mmu.hole_end - mmu.hole_start) + 1); 16367c478bd9Sstevel@tonic-gate #endif 16377c478bd9Sstevel@tonic-gate 16387c478bd9Sstevel@tonic-gate return (va); 16397c478bd9Sstevel@tonic-gate } 16407c478bd9Sstevel@tonic-gate 16417c478bd9Sstevel@tonic-gate /* 16427c478bd9Sstevel@tonic-gate * The code uses compare and swap instructions to read/write PTE's to 16437c478bd9Sstevel@tonic-gate * avoid atomicity problems, since PTEs can be 8 bytes on 32 bit systems. 16447c478bd9Sstevel@tonic-gate * Again this can be optimized on 64 bit systems, since aligned load/store 16457c478bd9Sstevel@tonic-gate * will naturally be atomic. 16467c478bd9Sstevel@tonic-gate * 16477c478bd9Sstevel@tonic-gate * The combination of using kpreempt_disable()/_enable() and the hci_mutex 16487c478bd9Sstevel@tonic-gate * are used to ensure that an interrupt won't overwrite a temporary mapping 16497c478bd9Sstevel@tonic-gate * while it's in use. If an interrupt thread tries to access a PTE, it will 16507c478bd9Sstevel@tonic-gate * yield briefly back to the pinned thread which holds the cpu's hci_mutex. 16517c478bd9Sstevel@tonic-gate */ 16527c478bd9Sstevel@tonic-gate 16537c478bd9Sstevel@tonic-gate static struct hat_cpu_info init_hci; /* used for cpu 0 */ 16547c478bd9Sstevel@tonic-gate 16557c478bd9Sstevel@tonic-gate /* 16567c478bd9Sstevel@tonic-gate * Initialize a CPU private window for mapping page tables. 16577c478bd9Sstevel@tonic-gate * There will be 3 total pages of addressing needed: 16587c478bd9Sstevel@tonic-gate * 16597c478bd9Sstevel@tonic-gate * 1 for r/w access to pagetables 16607c478bd9Sstevel@tonic-gate * 1 for r access when copying pagetables (hat_alloc) 16617c478bd9Sstevel@tonic-gate * 1 that will map the PTEs for the 1st 2, so we can access them quickly 16627c478bd9Sstevel@tonic-gate * 16637c478bd9Sstevel@tonic-gate * We use vmem_xalloc() to get a correct alignment so that only one 16647c478bd9Sstevel@tonic-gate * hat_mempte_setup() is needed. 16657c478bd9Sstevel@tonic-gate */ 16667c478bd9Sstevel@tonic-gate void 16677c478bd9Sstevel@tonic-gate x86pte_cpu_init(cpu_t *cpu, void *pages) 16687c478bd9Sstevel@tonic-gate { 16697c478bd9Sstevel@tonic-gate struct hat_cpu_info *hci; 16707c478bd9Sstevel@tonic-gate caddr_t va; 16717c478bd9Sstevel@tonic-gate 16727c478bd9Sstevel@tonic-gate /* 16737c478bd9Sstevel@tonic-gate * We can't use kmem_alloc/vmem_alloc for the 1st CPU, as this is 16747c478bd9Sstevel@tonic-gate * called before we've activated our own HAT 16757c478bd9Sstevel@tonic-gate */ 16767c478bd9Sstevel@tonic-gate if (pages != NULL) { 16777c478bd9Sstevel@tonic-gate hci = &init_hci; 16787c478bd9Sstevel@tonic-gate va = pages; 16797c478bd9Sstevel@tonic-gate } else { 16807c478bd9Sstevel@tonic-gate hci = kmem_alloc(sizeof (struct hat_cpu_info), KM_SLEEP); 16817c478bd9Sstevel@tonic-gate va = vmem_xalloc(heap_arena, 3 * MMU_PAGESIZE, MMU_PAGESIZE, 0, 16827c478bd9Sstevel@tonic-gate LEVEL_SIZE(1), NULL, NULL, VM_SLEEP); 16837c478bd9Sstevel@tonic-gate } 16847c478bd9Sstevel@tonic-gate mutex_init(&hci->hci_mutex, NULL, MUTEX_DEFAULT, NULL); 16857c478bd9Sstevel@tonic-gate 16867c478bd9Sstevel@tonic-gate /* 16877c478bd9Sstevel@tonic-gate * If we are using segkpm, then there is no need for any of the 16887c478bd9Sstevel@tonic-gate * mempte support. We can access the desired memory through a kpm 16897c478bd9Sstevel@tonic-gate * mapping rather than setting up a temporary mempte mapping. 16907c478bd9Sstevel@tonic-gate */ 16917c478bd9Sstevel@tonic-gate if (kpm_enable == 0) { 16927c478bd9Sstevel@tonic-gate hci->hci_mapped_pfn = PFN_INVALID; 16937c478bd9Sstevel@tonic-gate 16947c478bd9Sstevel@tonic-gate hci->hci_kernel_pte = 16957c478bd9Sstevel@tonic-gate hat_mempte_kern_setup(va, va + (2 * MMU_PAGESIZE)); 16967c478bd9Sstevel@tonic-gate hci->hci_pagetable_va = (void *)va; 16977c478bd9Sstevel@tonic-gate } 16987c478bd9Sstevel@tonic-gate 16997c478bd9Sstevel@tonic-gate cpu->cpu_hat_info = hci; 17007c478bd9Sstevel@tonic-gate } 17017c478bd9Sstevel@tonic-gate 17027c478bd9Sstevel@tonic-gate /* 17037c478bd9Sstevel@tonic-gate * Macro to establish temporary mappings for x86pte_XXX routines. 17047c478bd9Sstevel@tonic-gate */ 17057c478bd9Sstevel@tonic-gate #define X86PTE_REMAP(addr, pte, index, perm, pfn) { \ 17067c478bd9Sstevel@tonic-gate x86pte_t t; \ 17077c478bd9Sstevel@tonic-gate \ 17087c478bd9Sstevel@tonic-gate t = MAKEPTE((pfn), 0) | (perm) | mmu.pt_global | mmu.pt_nx;\ 17097c478bd9Sstevel@tonic-gate if (mmu.pae_hat) \ 17107c478bd9Sstevel@tonic-gate pte[index] = t; \ 17117c478bd9Sstevel@tonic-gate else \ 17127c478bd9Sstevel@tonic-gate ((x86pte32_t *)(pte))[index] = t; \ 17137c478bd9Sstevel@tonic-gate mmu_tlbflush_entry((caddr_t)(addr)); \ 17147c478bd9Sstevel@tonic-gate } 17157c478bd9Sstevel@tonic-gate 17167c478bd9Sstevel@tonic-gate /* 17177c478bd9Sstevel@tonic-gate * Disable preemption and establish a mapping to the pagetable with the 17187c478bd9Sstevel@tonic-gate * given pfn. This is optimized for there case where it's the same 17197c478bd9Sstevel@tonic-gate * pfn as we last used referenced from this CPU. 17207c478bd9Sstevel@tonic-gate */ 17217c478bd9Sstevel@tonic-gate static x86pte_t * 17227c478bd9Sstevel@tonic-gate x86pte_access_pagetable(htable_t *ht) 17237c478bd9Sstevel@tonic-gate { 17247c478bd9Sstevel@tonic-gate pfn_t pfn; 17257c478bd9Sstevel@tonic-gate struct hat_cpu_info *hci; 17267c478bd9Sstevel@tonic-gate 17277c478bd9Sstevel@tonic-gate /* 17287c478bd9Sstevel@tonic-gate * VLP pagetables are contained in the hat_t 17297c478bd9Sstevel@tonic-gate */ 17307c478bd9Sstevel@tonic-gate if (ht->ht_flags & HTABLE_VLP) 17317c478bd9Sstevel@tonic-gate return (ht->ht_hat->hat_vlp_ptes); 17327c478bd9Sstevel@tonic-gate 17337c478bd9Sstevel@tonic-gate /* 17347c478bd9Sstevel@tonic-gate * During early boot, use hat_boot_remap() of a page table adddress. 17357c478bd9Sstevel@tonic-gate */ 17367c478bd9Sstevel@tonic-gate pfn = ht->ht_pfn; 17377c478bd9Sstevel@tonic-gate ASSERT(pfn != PFN_INVALID); 17387c478bd9Sstevel@tonic-gate if (kpm_enable) 17397c478bd9Sstevel@tonic-gate return ((x86pte_t *)hat_kpm_pfn2va(pfn)); 17407c478bd9Sstevel@tonic-gate 17417c478bd9Sstevel@tonic-gate if (!khat_running) { 17427c478bd9Sstevel@tonic-gate (void) hat_boot_remap(ptable_va, pfn); 17437c478bd9Sstevel@tonic-gate return ((x86pte_t *)ptable_va); 17447c478bd9Sstevel@tonic-gate } 17457c478bd9Sstevel@tonic-gate 17467c478bd9Sstevel@tonic-gate /* 17477c478bd9Sstevel@tonic-gate * Normally, disable preemption and grab the CPU's hci_mutex 17487c478bd9Sstevel@tonic-gate */ 17497c478bd9Sstevel@tonic-gate kpreempt_disable(); 17507c478bd9Sstevel@tonic-gate hci = CPU->cpu_hat_info; 17517c478bd9Sstevel@tonic-gate ASSERT(hci != NULL); 17527c478bd9Sstevel@tonic-gate mutex_enter(&hci->hci_mutex); 17537c478bd9Sstevel@tonic-gate if (hci->hci_mapped_pfn != pfn) { 17547c478bd9Sstevel@tonic-gate /* 17557c478bd9Sstevel@tonic-gate * The current mapping doesn't already point to this page. 17567c478bd9Sstevel@tonic-gate * Update the CPU specific pagetable mapping to map the pfn. 17577c478bd9Sstevel@tonic-gate */ 17587c478bd9Sstevel@tonic-gate X86PTE_REMAP(hci->hci_pagetable_va, hci->hci_kernel_pte, 0, 17597c478bd9Sstevel@tonic-gate PT_WRITABLE, pfn); 17607c478bd9Sstevel@tonic-gate hci->hci_mapped_pfn = pfn; 17617c478bd9Sstevel@tonic-gate } 17627c478bd9Sstevel@tonic-gate return (hci->hci_pagetable_va); 17637c478bd9Sstevel@tonic-gate } 17647c478bd9Sstevel@tonic-gate 17657c478bd9Sstevel@tonic-gate /* 17667c478bd9Sstevel@tonic-gate * Release access to a page table. 17677c478bd9Sstevel@tonic-gate */ 17687c478bd9Sstevel@tonic-gate static void 17697c478bd9Sstevel@tonic-gate x86pte_release_pagetable(htable_t *ht) 17707c478bd9Sstevel@tonic-gate { 17717c478bd9Sstevel@tonic-gate struct hat_cpu_info *hci; 17727c478bd9Sstevel@tonic-gate 17737c478bd9Sstevel@tonic-gate if (kpm_enable) 17747c478bd9Sstevel@tonic-gate return; 17757c478bd9Sstevel@tonic-gate 17767c478bd9Sstevel@tonic-gate /* 17777c478bd9Sstevel@tonic-gate * nothing to do for VLP htables 17787c478bd9Sstevel@tonic-gate */ 17797c478bd9Sstevel@tonic-gate if (ht->ht_flags & HTABLE_VLP) 17807c478bd9Sstevel@tonic-gate return; 17817c478bd9Sstevel@tonic-gate 17827c478bd9Sstevel@tonic-gate /* 17837c478bd9Sstevel@tonic-gate * During boot-up hat_kern_setup(), erase the boot loader remapping. 17847c478bd9Sstevel@tonic-gate */ 17857c478bd9Sstevel@tonic-gate if (!khat_running) { 17867c478bd9Sstevel@tonic-gate hat_boot_demap(ptable_va); 17877c478bd9Sstevel@tonic-gate return; 17887c478bd9Sstevel@tonic-gate } 17897c478bd9Sstevel@tonic-gate 17907c478bd9Sstevel@tonic-gate /* 17917c478bd9Sstevel@tonic-gate * Normal Operation: drop the CPU's hci_mutex and restore preemption 17927c478bd9Sstevel@tonic-gate */ 17937c478bd9Sstevel@tonic-gate hci = CPU->cpu_hat_info; 17947c478bd9Sstevel@tonic-gate ASSERT(hci != NULL); 17957c478bd9Sstevel@tonic-gate mutex_exit(&hci->hci_mutex); 17967c478bd9Sstevel@tonic-gate kpreempt_enable(); 17977c478bd9Sstevel@tonic-gate } 17987c478bd9Sstevel@tonic-gate 17997c478bd9Sstevel@tonic-gate /* 18007c478bd9Sstevel@tonic-gate * Atomic retrieval of a pagetable entry 18017c478bd9Sstevel@tonic-gate */ 18027c478bd9Sstevel@tonic-gate x86pte_t 18037c478bd9Sstevel@tonic-gate x86pte_get(htable_t *ht, uint_t entry) 18047c478bd9Sstevel@tonic-gate { 18057c478bd9Sstevel@tonic-gate x86pte_t pte; 18067c478bd9Sstevel@tonic-gate x86pte32_t *pte32p; 1807aa2ed9e5Sjosephb x86pte_t *ptep; 18087c478bd9Sstevel@tonic-gate 18097c478bd9Sstevel@tonic-gate /* 1810aa2ed9e5Sjosephb * Be careful that loading PAE entries in 32 bit kernel is atomic. 18117c478bd9Sstevel@tonic-gate */ 18127c478bd9Sstevel@tonic-gate ptep = x86pte_access_pagetable(ht); 18137c478bd9Sstevel@tonic-gate if (mmu.pae_hat) { 1814aa2ed9e5Sjosephb ATOMIC_LOAD64(ptep + entry, pte); 18157c478bd9Sstevel@tonic-gate } else { 18167c478bd9Sstevel@tonic-gate pte32p = (x86pte32_t *)ptep; 18177c478bd9Sstevel@tonic-gate pte = pte32p[entry]; 18187c478bd9Sstevel@tonic-gate } 18197c478bd9Sstevel@tonic-gate x86pte_release_pagetable(ht); 18207c478bd9Sstevel@tonic-gate return (pte); 18217c478bd9Sstevel@tonic-gate } 18227c478bd9Sstevel@tonic-gate 18237c478bd9Sstevel@tonic-gate /* 18247c478bd9Sstevel@tonic-gate * Atomic unconditional set of a page table entry, it returns the previous 18257c478bd9Sstevel@tonic-gate * value. 18267c478bd9Sstevel@tonic-gate */ 18277c478bd9Sstevel@tonic-gate x86pte_t 18287c478bd9Sstevel@tonic-gate x86pte_set(htable_t *ht, uint_t entry, x86pte_t new, void *ptr) 18297c478bd9Sstevel@tonic-gate { 18307c478bd9Sstevel@tonic-gate x86pte_t old; 1831b193e412Skchow x86pte_t prev, n; 18327c478bd9Sstevel@tonic-gate x86pte_t *ptep; 18337c478bd9Sstevel@tonic-gate x86pte32_t *pte32p; 18347c478bd9Sstevel@tonic-gate x86pte32_t n32, p32; 18357c478bd9Sstevel@tonic-gate 18367c478bd9Sstevel@tonic-gate ASSERT(!(ht->ht_flags & HTABLE_SHARED_PFN)); 18377c478bd9Sstevel@tonic-gate if (ptr == NULL) { 18387c478bd9Sstevel@tonic-gate ptep = x86pte_access_pagetable(ht); 18397c478bd9Sstevel@tonic-gate ptep = (void *)((caddr_t)ptep + (entry << mmu.pte_size_shift)); 18407c478bd9Sstevel@tonic-gate } else { 18417c478bd9Sstevel@tonic-gate ptep = ptr; 18427c478bd9Sstevel@tonic-gate } 18437c478bd9Sstevel@tonic-gate 18447c478bd9Sstevel@tonic-gate if (mmu.pae_hat) { 18457c478bd9Sstevel@tonic-gate for (;;) { 18467c478bd9Sstevel@tonic-gate prev = *ptep; 1847b193e412Skchow n = new; 1848b193e412Skchow /* 1849b193e412Skchow * prevent potential data loss by preserving the MOD 1850b193e412Skchow * bit if set in the current PTE and the pfns are the 1851b193e412Skchow * same. For example, segmap can reissue a read-only 1852b193e412Skchow * hat_memload on top of a dirty page. 1853b193e412Skchow */ 1854b193e412Skchow if (PTE_ISVALID(prev) && PTE2PFN(prev, ht->ht_level) == 1855b193e412Skchow PTE2PFN(n, ht->ht_level)) { 1856b193e412Skchow n |= prev & (PT_REF | PT_MOD); 1857b193e412Skchow } 1858b193e412Skchow if (prev == n) { 18597c478bd9Sstevel@tonic-gate old = new; 18607c478bd9Sstevel@tonic-gate break; 18617c478bd9Sstevel@tonic-gate } 1862b193e412Skchow old = cas64(ptep, prev, n); 18637c478bd9Sstevel@tonic-gate if (old == prev) 18647c478bd9Sstevel@tonic-gate break; 18657c478bd9Sstevel@tonic-gate } 18667c478bd9Sstevel@tonic-gate } else { 18677c478bd9Sstevel@tonic-gate pte32p = (x86pte32_t *)ptep; 18687c478bd9Sstevel@tonic-gate for (;;) { 18697c478bd9Sstevel@tonic-gate p32 = *pte32p; 1870b193e412Skchow n32 = new; 1871b193e412Skchow if (PTE_ISVALID(p32) && PTE2PFN(p32, ht->ht_level) == 1872b193e412Skchow PTE2PFN(n32, ht->ht_level)) { 1873b193e412Skchow n32 |= p32 & (PT_REF | PT_MOD); 1874b193e412Skchow } 18757c478bd9Sstevel@tonic-gate if (p32 == n32) { 18767c478bd9Sstevel@tonic-gate old = new; 18777c478bd9Sstevel@tonic-gate break; 18787c478bd9Sstevel@tonic-gate } 18797c478bd9Sstevel@tonic-gate old = cas32(pte32p, p32, n32); 18807c478bd9Sstevel@tonic-gate if (old == p32) 18817c478bd9Sstevel@tonic-gate break; 18827c478bd9Sstevel@tonic-gate } 18837c478bd9Sstevel@tonic-gate } 18847c478bd9Sstevel@tonic-gate if (ptr == NULL) 18857c478bd9Sstevel@tonic-gate x86pte_release_pagetable(ht); 18867c478bd9Sstevel@tonic-gate return (old); 18877c478bd9Sstevel@tonic-gate } 18887c478bd9Sstevel@tonic-gate 18897c478bd9Sstevel@tonic-gate /* 18907c478bd9Sstevel@tonic-gate * Atomic compare and swap of a page table entry. 18917c478bd9Sstevel@tonic-gate */ 18927c478bd9Sstevel@tonic-gate static x86pte_t 18937c478bd9Sstevel@tonic-gate x86pte_cas(htable_t *ht, uint_t entry, x86pte_t old, x86pte_t new) 18947c478bd9Sstevel@tonic-gate { 18957c478bd9Sstevel@tonic-gate x86pte_t pte; 18967c478bd9Sstevel@tonic-gate x86pte_t *ptep; 18977c478bd9Sstevel@tonic-gate x86pte32_t pte32, o32, n32; 18987c478bd9Sstevel@tonic-gate x86pte32_t *pte32p; 18997c478bd9Sstevel@tonic-gate 19007c478bd9Sstevel@tonic-gate ASSERT(!(ht->ht_flags & HTABLE_SHARED_PFN)); 19017c478bd9Sstevel@tonic-gate ptep = x86pte_access_pagetable(ht); 19027c478bd9Sstevel@tonic-gate if (mmu.pae_hat) { 19037c478bd9Sstevel@tonic-gate pte = cas64(&ptep[entry], old, new); 19047c478bd9Sstevel@tonic-gate } else { 19057c478bd9Sstevel@tonic-gate o32 = old; 19067c478bd9Sstevel@tonic-gate n32 = new; 19077c478bd9Sstevel@tonic-gate pte32p = (x86pte32_t *)ptep; 19087c478bd9Sstevel@tonic-gate pte32 = cas32(&pte32p[entry], o32, n32); 19097c478bd9Sstevel@tonic-gate pte = pte32; 19107c478bd9Sstevel@tonic-gate } 19117c478bd9Sstevel@tonic-gate x86pte_release_pagetable(ht); 19127c478bd9Sstevel@tonic-gate 19137c478bd9Sstevel@tonic-gate return (pte); 19147c478bd9Sstevel@tonic-gate } 19157c478bd9Sstevel@tonic-gate 19167c478bd9Sstevel@tonic-gate /* 19177c478bd9Sstevel@tonic-gate * data structure for cross call information 19187c478bd9Sstevel@tonic-gate */ 19197c478bd9Sstevel@tonic-gate typedef struct xcall_info { 19207c478bd9Sstevel@tonic-gate x86pte_t xi_pte; 19217c478bd9Sstevel@tonic-gate x86pte_t xi_old; 19227c478bd9Sstevel@tonic-gate x86pte_t *xi_pteptr; 19237c478bd9Sstevel@tonic-gate pfn_t xi_pfn; 19247c478bd9Sstevel@tonic-gate processorid_t xi_cpuid; 19257c478bd9Sstevel@tonic-gate level_t xi_level; 19267c478bd9Sstevel@tonic-gate xc_func_t xi_func; 19277c478bd9Sstevel@tonic-gate } xcall_info_t; 19287c478bd9Sstevel@tonic-gate 19297c478bd9Sstevel@tonic-gate /* 19307c478bd9Sstevel@tonic-gate * Cross call service function to atomically invalidate a PTE and flush TLBs 19317c478bd9Sstevel@tonic-gate */ 19327c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 19337c478bd9Sstevel@tonic-gate static int 19347c478bd9Sstevel@tonic-gate x86pte_inval_func(xc_arg_t a1, xc_arg_t a2, xc_arg_t a3) 19357c478bd9Sstevel@tonic-gate { 19367c478bd9Sstevel@tonic-gate xcall_info_t *xi = (xcall_info_t *)a1; 19377c478bd9Sstevel@tonic-gate caddr_t addr = (caddr_t)a2; 19387c478bd9Sstevel@tonic-gate 19397c478bd9Sstevel@tonic-gate /* 19407c478bd9Sstevel@tonic-gate * Only the initiating cpu invalidates the page table entry. 19417c478bd9Sstevel@tonic-gate * It returns the previous PTE value to the caller. 19427c478bd9Sstevel@tonic-gate */ 19437c478bd9Sstevel@tonic-gate if (CPU->cpu_id == xi->xi_cpuid) { 19447c478bd9Sstevel@tonic-gate x86pte_t *ptep = xi->xi_pteptr; 19457c478bd9Sstevel@tonic-gate pfn_t pfn = xi->xi_pfn; 19467c478bd9Sstevel@tonic-gate level_t level = xi->xi_level; 19477c478bd9Sstevel@tonic-gate x86pte_t old; 19487c478bd9Sstevel@tonic-gate x86pte_t prev; 19497c478bd9Sstevel@tonic-gate x86pte32_t *pte32p; 19507c478bd9Sstevel@tonic-gate x86pte32_t p32; 19517c478bd9Sstevel@tonic-gate 19527c478bd9Sstevel@tonic-gate if (mmu.pae_hat) { 19537c478bd9Sstevel@tonic-gate for (;;) { 19547c478bd9Sstevel@tonic-gate prev = *ptep; 19557c478bd9Sstevel@tonic-gate if (PTE2PFN(prev, level) != pfn) 19567c478bd9Sstevel@tonic-gate break; 19577c478bd9Sstevel@tonic-gate old = cas64(ptep, prev, 0); 19587c478bd9Sstevel@tonic-gate if (old == prev) 19597c478bd9Sstevel@tonic-gate break; 19607c478bd9Sstevel@tonic-gate } 19617c478bd9Sstevel@tonic-gate } else { 19627c478bd9Sstevel@tonic-gate pte32p = (x86pte32_t *)ptep; 19637c478bd9Sstevel@tonic-gate for (;;) { 19647c478bd9Sstevel@tonic-gate p32 = *pte32p; 19657c478bd9Sstevel@tonic-gate if (PTE2PFN(p32, level) != pfn) 19667c478bd9Sstevel@tonic-gate break; 19677c478bd9Sstevel@tonic-gate old = cas32(pte32p, p32, 0); 19687c478bd9Sstevel@tonic-gate if (old == p32) 19697c478bd9Sstevel@tonic-gate break; 19707c478bd9Sstevel@tonic-gate } 19717c478bd9Sstevel@tonic-gate prev = p32; 19727c478bd9Sstevel@tonic-gate } 19737c478bd9Sstevel@tonic-gate xi->xi_pte = prev; 19747c478bd9Sstevel@tonic-gate } 19757c478bd9Sstevel@tonic-gate 19767c478bd9Sstevel@tonic-gate /* 19777c478bd9Sstevel@tonic-gate * For a normal address, we just flush one page mapping 19787c478bd9Sstevel@tonic-gate * Otherwise reload cr3 to effect a complete TLB flush. 19797c478bd9Sstevel@tonic-gate * 19807c478bd9Sstevel@tonic-gate * Note we don't reload VLP pte's -- this assume we never have a 19817c478bd9Sstevel@tonic-gate * large page size at VLP_LEVEL for VLP processes. 19827c478bd9Sstevel@tonic-gate */ 19837c478bd9Sstevel@tonic-gate if ((uintptr_t)addr != DEMAP_ALL_ADDR) { 19847c478bd9Sstevel@tonic-gate mmu_tlbflush_entry(addr); 19857c478bd9Sstevel@tonic-gate } else { 19867c478bd9Sstevel@tonic-gate reload_cr3(); 19877c478bd9Sstevel@tonic-gate } 19887c478bd9Sstevel@tonic-gate return (0); 19897c478bd9Sstevel@tonic-gate } 19907c478bd9Sstevel@tonic-gate 19917c478bd9Sstevel@tonic-gate /* 19927c478bd9Sstevel@tonic-gate * Cross call service function to atomically change a PTE and flush TLBs 19937c478bd9Sstevel@tonic-gate */ 19947c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 19957c478bd9Sstevel@tonic-gate static int 19967c478bd9Sstevel@tonic-gate x86pte_update_func(xc_arg_t a1, xc_arg_t a2, xc_arg_t a3) 19977c478bd9Sstevel@tonic-gate { 19987c478bd9Sstevel@tonic-gate xcall_info_t *xi = (xcall_info_t *)a1; 19997c478bd9Sstevel@tonic-gate caddr_t addr = (caddr_t)a2; 20007c478bd9Sstevel@tonic-gate 20017c478bd9Sstevel@tonic-gate /* 20027c478bd9Sstevel@tonic-gate * Only the initiating cpu changes the page table entry. 20037c478bd9Sstevel@tonic-gate * It returns the previous PTE value to the caller. 20047c478bd9Sstevel@tonic-gate */ 20057c478bd9Sstevel@tonic-gate if (CPU->cpu_id == xi->xi_cpuid) { 20067c478bd9Sstevel@tonic-gate x86pte_t *ptep = xi->xi_pteptr; 20077c478bd9Sstevel@tonic-gate x86pte_t new = xi->xi_pte; 20087c478bd9Sstevel@tonic-gate x86pte_t old = xi->xi_old; 20097c478bd9Sstevel@tonic-gate x86pte_t prev; 20107c478bd9Sstevel@tonic-gate 20117c478bd9Sstevel@tonic-gate if (mmu.pae_hat) { 20127c478bd9Sstevel@tonic-gate prev = cas64(ptep, old, new); 20137c478bd9Sstevel@tonic-gate } else { 20147c478bd9Sstevel@tonic-gate x86pte32_t o32 = old; 20157c478bd9Sstevel@tonic-gate x86pte32_t n32 = new; 20167c478bd9Sstevel@tonic-gate x86pte32_t *pte32p = (x86pte32_t *)ptep; 20177c478bd9Sstevel@tonic-gate prev = cas32(pte32p, o32, n32); 20187c478bd9Sstevel@tonic-gate } 20197c478bd9Sstevel@tonic-gate 20207c478bd9Sstevel@tonic-gate xi->xi_pte = prev; 20217c478bd9Sstevel@tonic-gate } 20227c478bd9Sstevel@tonic-gate 20237c478bd9Sstevel@tonic-gate /* 20247c478bd9Sstevel@tonic-gate * Flush the TLB entry 20257c478bd9Sstevel@tonic-gate */ 20267c478bd9Sstevel@tonic-gate if ((uintptr_t)addr != DEMAP_ALL_ADDR) 20277c478bd9Sstevel@tonic-gate mmu_tlbflush_entry(addr); 20287c478bd9Sstevel@tonic-gate else 20297c478bd9Sstevel@tonic-gate reload_cr3(); 20307c478bd9Sstevel@tonic-gate return (0); 20317c478bd9Sstevel@tonic-gate } 20327c478bd9Sstevel@tonic-gate 20337c478bd9Sstevel@tonic-gate /* 20347c478bd9Sstevel@tonic-gate * Use cross calls to change a page table entry and invalidate TLBs. 20357c478bd9Sstevel@tonic-gate */ 20367c478bd9Sstevel@tonic-gate void 20377c478bd9Sstevel@tonic-gate x86pte_xcall(hat_t *hat, xcall_info_t *xi, uintptr_t addr) 20387c478bd9Sstevel@tonic-gate { 20397c478bd9Sstevel@tonic-gate cpuset_t cpus; 20407c478bd9Sstevel@tonic-gate 20417c478bd9Sstevel@tonic-gate /* 20427c478bd9Sstevel@tonic-gate * Given the current implementation of hat_share(), doing a 20437c478bd9Sstevel@tonic-gate * hat_pageunload() on a shared page table requries invalidating 20447c478bd9Sstevel@tonic-gate * all user TLB entries on all CPUs. 20457c478bd9Sstevel@tonic-gate */ 20467c478bd9Sstevel@tonic-gate if (hat->hat_flags & HAT_SHARED) { 20477c478bd9Sstevel@tonic-gate hat = kas.a_hat; 20487c478bd9Sstevel@tonic-gate addr = DEMAP_ALL_ADDR; 20497c478bd9Sstevel@tonic-gate } 20507c478bd9Sstevel@tonic-gate 20517c478bd9Sstevel@tonic-gate /* 20527c478bd9Sstevel@tonic-gate * Use a cross call to do the invalidations. 20537c478bd9Sstevel@tonic-gate * Note the current CPU always has to be in the cross call CPU set. 20547c478bd9Sstevel@tonic-gate */ 20557c478bd9Sstevel@tonic-gate kpreempt_disable(); 20567c478bd9Sstevel@tonic-gate xi->xi_cpuid = CPU->cpu_id; 20577c478bd9Sstevel@tonic-gate CPUSET_ZERO(cpus); 20587c478bd9Sstevel@tonic-gate if (hat == kas.a_hat) { 20597c478bd9Sstevel@tonic-gate CPUSET_OR(cpus, khat_cpuset); 20607c478bd9Sstevel@tonic-gate } else { 20617c478bd9Sstevel@tonic-gate mutex_enter(&hat->hat_switch_mutex); 20627c478bd9Sstevel@tonic-gate CPUSET_OR(cpus, hat->hat_cpus); 20637c478bd9Sstevel@tonic-gate CPUSET_ADD(cpus, CPU->cpu_id); 20647c478bd9Sstevel@tonic-gate } 20657c478bd9Sstevel@tonic-gate 20667c478bd9Sstevel@tonic-gate /* 20677c478bd9Sstevel@tonic-gate * Use a cross call to modify the page table entry and invalidate TLBs. 20687c478bd9Sstevel@tonic-gate * If we're panic'ing, don't bother with the cross call. 20697c478bd9Sstevel@tonic-gate * Note the panicstr check isn't bullet proof and the panic system 20707c478bd9Sstevel@tonic-gate * ought to be made tighter. 20717c478bd9Sstevel@tonic-gate */ 20727c478bd9Sstevel@tonic-gate if (panicstr == NULL) 20737c478bd9Sstevel@tonic-gate xc_wait_sync((xc_arg_t)xi, addr, NULL, X_CALL_HIPRI, 20747c478bd9Sstevel@tonic-gate cpus, xi->xi_func); 20757c478bd9Sstevel@tonic-gate else 20767c478bd9Sstevel@tonic-gate (void) xi->xi_func((xc_arg_t)xi, (xc_arg_t)addr, NULL); 20777c478bd9Sstevel@tonic-gate if (hat != kas.a_hat) 20787c478bd9Sstevel@tonic-gate mutex_exit(&hat->hat_switch_mutex); 20797c478bd9Sstevel@tonic-gate kpreempt_enable(); 20807c478bd9Sstevel@tonic-gate } 20817c478bd9Sstevel@tonic-gate 20827c478bd9Sstevel@tonic-gate /* 20837c478bd9Sstevel@tonic-gate * Invalidate a page table entry if it currently maps the given pfn. 20847c478bd9Sstevel@tonic-gate * This returns the previous value of the PTE. 20857c478bd9Sstevel@tonic-gate */ 20867c478bd9Sstevel@tonic-gate x86pte_t 20877c478bd9Sstevel@tonic-gate x86pte_invalidate_pfn(htable_t *ht, uint_t entry, pfn_t pfn, void *pte_ptr) 20887c478bd9Sstevel@tonic-gate { 20897c478bd9Sstevel@tonic-gate xcall_info_t xi; 20907c478bd9Sstevel@tonic-gate x86pte_t *ptep; 20917c478bd9Sstevel@tonic-gate hat_t *hat; 20927c478bd9Sstevel@tonic-gate uintptr_t addr; 20937c478bd9Sstevel@tonic-gate 20947c478bd9Sstevel@tonic-gate ASSERT(!(ht->ht_flags & HTABLE_SHARED_PFN)); 20957c478bd9Sstevel@tonic-gate if (pte_ptr != NULL) { 20967c478bd9Sstevel@tonic-gate ptep = pte_ptr; 20977c478bd9Sstevel@tonic-gate } else { 20987c478bd9Sstevel@tonic-gate ptep = x86pte_access_pagetable(ht); 20997c478bd9Sstevel@tonic-gate ptep = (void *)((caddr_t)ptep + (entry << mmu.pte_size_shift)); 21007c478bd9Sstevel@tonic-gate } 21017c478bd9Sstevel@tonic-gate 21027c478bd9Sstevel@tonic-gate /* 21037c478bd9Sstevel@tonic-gate * Fill in the structure used by the cross call function to do the 21047c478bd9Sstevel@tonic-gate * invalidation. 21057c478bd9Sstevel@tonic-gate */ 21067c478bd9Sstevel@tonic-gate xi.xi_pte = 0; 21077c478bd9Sstevel@tonic-gate xi.xi_pteptr = ptep; 21087c478bd9Sstevel@tonic-gate xi.xi_pfn = pfn; 21097c478bd9Sstevel@tonic-gate xi.xi_level = ht->ht_level; 21107c478bd9Sstevel@tonic-gate xi.xi_func = x86pte_inval_func; 21117c478bd9Sstevel@tonic-gate ASSERT(xi.xi_level != VLP_LEVEL); 21127c478bd9Sstevel@tonic-gate 21137c478bd9Sstevel@tonic-gate hat = ht->ht_hat; 21147c478bd9Sstevel@tonic-gate addr = htable_e2va(ht, entry); 21157c478bd9Sstevel@tonic-gate 21167c478bd9Sstevel@tonic-gate x86pte_xcall(hat, &xi, addr); 21177c478bd9Sstevel@tonic-gate 21187c478bd9Sstevel@tonic-gate if (pte_ptr == NULL) 21197c478bd9Sstevel@tonic-gate x86pte_release_pagetable(ht); 21207c478bd9Sstevel@tonic-gate return (xi.xi_pte); 21217c478bd9Sstevel@tonic-gate } 21227c478bd9Sstevel@tonic-gate 21237c478bd9Sstevel@tonic-gate /* 21247c478bd9Sstevel@tonic-gate * update a PTE and invalidate any stale TLB entries. 21257c478bd9Sstevel@tonic-gate */ 21267c478bd9Sstevel@tonic-gate x86pte_t 21277c478bd9Sstevel@tonic-gate x86pte_update(htable_t *ht, uint_t entry, x86pte_t expected, x86pte_t new) 21287c478bd9Sstevel@tonic-gate { 21297c478bd9Sstevel@tonic-gate xcall_info_t xi; 21307c478bd9Sstevel@tonic-gate x86pte_t *ptep; 21317c478bd9Sstevel@tonic-gate hat_t *hat; 21327c478bd9Sstevel@tonic-gate uintptr_t addr; 21337c478bd9Sstevel@tonic-gate 21347c478bd9Sstevel@tonic-gate ASSERT(!(ht->ht_flags & HTABLE_SHARED_PFN)); 21357c478bd9Sstevel@tonic-gate ptep = x86pte_access_pagetable(ht); 21367c478bd9Sstevel@tonic-gate ptep = (void *)((caddr_t)ptep + (entry << mmu.pte_size_shift)); 21377c478bd9Sstevel@tonic-gate 21387c478bd9Sstevel@tonic-gate /* 21397c478bd9Sstevel@tonic-gate * Fill in the structure used by the cross call function to do the 21407c478bd9Sstevel@tonic-gate * invalidation. 21417c478bd9Sstevel@tonic-gate */ 21427c478bd9Sstevel@tonic-gate xi.xi_pte = new; 21437c478bd9Sstevel@tonic-gate xi.xi_old = expected; 21447c478bd9Sstevel@tonic-gate xi.xi_pteptr = ptep; 21457c478bd9Sstevel@tonic-gate xi.xi_func = x86pte_update_func; 21467c478bd9Sstevel@tonic-gate 21477c478bd9Sstevel@tonic-gate hat = ht->ht_hat; 21487c478bd9Sstevel@tonic-gate addr = htable_e2va(ht, entry); 21497c478bd9Sstevel@tonic-gate 21507c478bd9Sstevel@tonic-gate x86pte_xcall(hat, &xi, addr); 21517c478bd9Sstevel@tonic-gate 21527c478bd9Sstevel@tonic-gate x86pte_release_pagetable(ht); 21537c478bd9Sstevel@tonic-gate return (xi.xi_pte); 21547c478bd9Sstevel@tonic-gate } 21557c478bd9Sstevel@tonic-gate 21567c478bd9Sstevel@tonic-gate /* 21577c478bd9Sstevel@tonic-gate * Copy page tables - this is just a little more complicated than the 21587c478bd9Sstevel@tonic-gate * previous routines. Note that it's also not atomic! It also is never 21597c478bd9Sstevel@tonic-gate * used for VLP pagetables. 21607c478bd9Sstevel@tonic-gate */ 21617c478bd9Sstevel@tonic-gate void 21627c478bd9Sstevel@tonic-gate x86pte_copy(htable_t *src, htable_t *dest, uint_t entry, uint_t count) 21637c478bd9Sstevel@tonic-gate { 21647c478bd9Sstevel@tonic-gate struct hat_cpu_info *hci; 21657c478bd9Sstevel@tonic-gate caddr_t src_va; 21667c478bd9Sstevel@tonic-gate caddr_t dst_va; 21677c478bd9Sstevel@tonic-gate size_t size; 21687c478bd9Sstevel@tonic-gate 21697c478bd9Sstevel@tonic-gate ASSERT(khat_running); 21707c478bd9Sstevel@tonic-gate ASSERT(!(dest->ht_flags & HTABLE_VLP)); 21717c478bd9Sstevel@tonic-gate ASSERT(!(src->ht_flags & HTABLE_VLP)); 21727c478bd9Sstevel@tonic-gate ASSERT(!(src->ht_flags & HTABLE_SHARED_PFN)); 21737c478bd9Sstevel@tonic-gate ASSERT(!(dest->ht_flags & HTABLE_SHARED_PFN)); 21747c478bd9Sstevel@tonic-gate 21757c478bd9Sstevel@tonic-gate /* 21767c478bd9Sstevel@tonic-gate * Acquire access to the CPU pagetable window for the destination. 21777c478bd9Sstevel@tonic-gate */ 21787c478bd9Sstevel@tonic-gate dst_va = (caddr_t)x86pte_access_pagetable(dest); 21797c478bd9Sstevel@tonic-gate if (kpm_enable) { 21807c478bd9Sstevel@tonic-gate src_va = (caddr_t)x86pte_access_pagetable(src); 21817c478bd9Sstevel@tonic-gate } else { 21827c478bd9Sstevel@tonic-gate hci = CPU->cpu_hat_info; 21837c478bd9Sstevel@tonic-gate 21847c478bd9Sstevel@tonic-gate /* 21857c478bd9Sstevel@tonic-gate * Finish defining the src pagetable mapping 21867c478bd9Sstevel@tonic-gate */ 21877c478bd9Sstevel@tonic-gate src_va = dst_va + MMU_PAGESIZE; 21887c478bd9Sstevel@tonic-gate X86PTE_REMAP(src_va, hci->hci_kernel_pte, 1, 0, src->ht_pfn); 21897c478bd9Sstevel@tonic-gate } 21907c478bd9Sstevel@tonic-gate 21917c478bd9Sstevel@tonic-gate /* 21927c478bd9Sstevel@tonic-gate * now do the copy 21937c478bd9Sstevel@tonic-gate */ 21947c478bd9Sstevel@tonic-gate 21957c478bd9Sstevel@tonic-gate dst_va += entry << mmu.pte_size_shift; 21967c478bd9Sstevel@tonic-gate src_va += entry << mmu.pte_size_shift; 21977c478bd9Sstevel@tonic-gate size = count << mmu.pte_size_shift; 21987c478bd9Sstevel@tonic-gate bcopy(src_va, dst_va, size); 21997c478bd9Sstevel@tonic-gate 22007c478bd9Sstevel@tonic-gate x86pte_release_pagetable(dest); 22017c478bd9Sstevel@tonic-gate } 22027c478bd9Sstevel@tonic-gate 22037c478bd9Sstevel@tonic-gate /* 22047c478bd9Sstevel@tonic-gate * Zero page table entries - Note this doesn't use atomic stores! 22057c478bd9Sstevel@tonic-gate */ 22067c478bd9Sstevel@tonic-gate void 22077c478bd9Sstevel@tonic-gate x86pte_zero(htable_t *dest, uint_t entry, uint_t count) 22087c478bd9Sstevel@tonic-gate { 22097c478bd9Sstevel@tonic-gate caddr_t dst_va; 22107c478bd9Sstevel@tonic-gate x86pte_t *p; 22117c478bd9Sstevel@tonic-gate x86pte32_t *p32; 22127c478bd9Sstevel@tonic-gate size_t size; 22137c478bd9Sstevel@tonic-gate extern void hat_pte_zero(void *, size_t); 22147c478bd9Sstevel@tonic-gate 22157c478bd9Sstevel@tonic-gate /* 22167c478bd9Sstevel@tonic-gate * Map in the page table to be zeroed. 22177c478bd9Sstevel@tonic-gate */ 22187c478bd9Sstevel@tonic-gate ASSERT(!(dest->ht_flags & HTABLE_SHARED_PFN)); 22197c478bd9Sstevel@tonic-gate ASSERT(!(dest->ht_flags & HTABLE_VLP)); 22207c478bd9Sstevel@tonic-gate dst_va = (caddr_t)x86pte_access_pagetable(dest); 22217c478bd9Sstevel@tonic-gate dst_va += entry << mmu.pte_size_shift; 22227c478bd9Sstevel@tonic-gate size = count << mmu.pte_size_shift; 22237c478bd9Sstevel@tonic-gate if (x86_feature & X86_SSE2) { 22247c478bd9Sstevel@tonic-gate hat_pte_zero(dst_va, size); 22257c478bd9Sstevel@tonic-gate } else if (khat_running) { 22267c478bd9Sstevel@tonic-gate bzero(dst_va, size); 22277c478bd9Sstevel@tonic-gate } else { 22287c478bd9Sstevel@tonic-gate /* 22297c478bd9Sstevel@tonic-gate * Can't just use bzero during boot because it checks the 22307c478bd9Sstevel@tonic-gate * address against kernelbase. Instead just use a zero loop. 22317c478bd9Sstevel@tonic-gate */ 22327c478bd9Sstevel@tonic-gate if (mmu.pae_hat) { 22337c478bd9Sstevel@tonic-gate p = (x86pte_t *)dst_va; 22347c478bd9Sstevel@tonic-gate while (count-- > 0) 22357c478bd9Sstevel@tonic-gate *p++ = 0; 22367c478bd9Sstevel@tonic-gate } else { 22377c478bd9Sstevel@tonic-gate p32 = (x86pte32_t *)dst_va; 22387c478bd9Sstevel@tonic-gate while (count-- > 0) 22397c478bd9Sstevel@tonic-gate *p32++ = 0; 22407c478bd9Sstevel@tonic-gate } 22417c478bd9Sstevel@tonic-gate } 22427c478bd9Sstevel@tonic-gate x86pte_release_pagetable(dest); 22437c478bd9Sstevel@tonic-gate } 22447c478bd9Sstevel@tonic-gate 22457c478bd9Sstevel@tonic-gate /* 22467c478bd9Sstevel@tonic-gate * Called to ensure that all pagetables are in the system dump 22477c478bd9Sstevel@tonic-gate */ 22487c478bd9Sstevel@tonic-gate void 22497c478bd9Sstevel@tonic-gate hat_dump(void) 22507c478bd9Sstevel@tonic-gate { 22517c478bd9Sstevel@tonic-gate hat_t *hat; 22527c478bd9Sstevel@tonic-gate uint_t h; 22537c478bd9Sstevel@tonic-gate htable_t *ht; 22547c478bd9Sstevel@tonic-gate 22557c478bd9Sstevel@tonic-gate /* 2256*a85a6733Sjosephb * Dump all page tables 22577c478bd9Sstevel@tonic-gate */ 2258*a85a6733Sjosephb for (hat = kas.a_hat; hat != NULL; hat = hat->hat_next) { 22597c478bd9Sstevel@tonic-gate for (h = 0; h < hat->hat_num_hash; ++h) { 22607c478bd9Sstevel@tonic-gate for (ht = hat->hat_ht_hash[h]; ht; ht = ht->ht_next) { 2261*a85a6733Sjosephb if ((ht->ht_flags & HTABLE_VLP) == 0) 22627c478bd9Sstevel@tonic-gate dump_page(ht->ht_pfn); 22637c478bd9Sstevel@tonic-gate } 22647c478bd9Sstevel@tonic-gate } 22657c478bd9Sstevel@tonic-gate } 22667c478bd9Sstevel@tonic-gate } 2267