17c478bd9Sstevel@tonic-gate /* 27c478bd9Sstevel@tonic-gate * CDDL HEADER START 37c478bd9Sstevel@tonic-gate * 47c478bd9Sstevel@tonic-gate * The contents of this file are subject to the terms of the 5a85a6733Sjosephb * Common Development and Distribution License (the "License"). 6a85a6733Sjosephb * You may not use this file except in compliance with the License. 77c478bd9Sstevel@tonic-gate * 87c478bd9Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 97c478bd9Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 107c478bd9Sstevel@tonic-gate * See the License for the specific language governing permissions 117c478bd9Sstevel@tonic-gate * and limitations under the License. 127c478bd9Sstevel@tonic-gate * 137c478bd9Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 147c478bd9Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 157c478bd9Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 167c478bd9Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 177c478bd9Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 187c478bd9Sstevel@tonic-gate * 197c478bd9Sstevel@tonic-gate * CDDL HEADER END 207c478bd9Sstevel@tonic-gate */ 217c478bd9Sstevel@tonic-gate /* 226c9930aeSJoe Bonasera * Copyright (c) 1992, 2010, Oracle and/or its affiliates. All rights reserved. 237c478bd9Sstevel@tonic-gate */ 24a3114836SGerry Liu /* 25a3114836SGerry Liu * Copyright (c) 2010, Intel Corporation. 26a3114836SGerry Liu * All rights reserved. 27a3114836SGerry Liu */ 28c7c6ab2aSGarrett D'Amore /* 29c7c6ab2aSGarrett D'Amore * Copyright 2011 Nexenta Systems, Inc. All rights reserved. 30a6a74e0eSMatthew Ahrens * Copyright (c) 2014, 2015 by Delphix. All rights reserved. 31c7c6ab2aSGarrett D'Amore */ 327c478bd9Sstevel@tonic-gate 337c478bd9Sstevel@tonic-gate /* 347c478bd9Sstevel@tonic-gate * VM - Hardware Address Translation management for i386 and amd64 357c478bd9Sstevel@tonic-gate * 367c478bd9Sstevel@tonic-gate * Implementation of the interfaces described in <common/vm/hat.h> 377c478bd9Sstevel@tonic-gate * 387c478bd9Sstevel@tonic-gate * Nearly all the details of how the hardware is managed should not be 397c478bd9Sstevel@tonic-gate * visible outside this layer except for misc. machine specific functions 407c478bd9Sstevel@tonic-gate * that work in conjunction with this code. 417c478bd9Sstevel@tonic-gate * 427c478bd9Sstevel@tonic-gate * Routines used only inside of i86pc/vm start with hati_ for HAT Internal. 437c478bd9Sstevel@tonic-gate */ 447c478bd9Sstevel@tonic-gate 457c478bd9Sstevel@tonic-gate #include <sys/machparam.h> 467c478bd9Sstevel@tonic-gate #include <sys/machsystm.h> 477c478bd9Sstevel@tonic-gate #include <sys/mman.h> 487c478bd9Sstevel@tonic-gate #include <sys/types.h> 497c478bd9Sstevel@tonic-gate #include <sys/systm.h> 507c478bd9Sstevel@tonic-gate #include <sys/cpuvar.h> 517c478bd9Sstevel@tonic-gate #include <sys/thread.h> 527c478bd9Sstevel@tonic-gate #include <sys/proc.h> 537c478bd9Sstevel@tonic-gate #include <sys/cpu.h> 547c478bd9Sstevel@tonic-gate #include <sys/kmem.h> 557c478bd9Sstevel@tonic-gate #include <sys/disp.h> 567c478bd9Sstevel@tonic-gate #include <sys/shm.h> 577c478bd9Sstevel@tonic-gate #include <sys/sysmacros.h> 587c478bd9Sstevel@tonic-gate #include <sys/machparam.h> 597c478bd9Sstevel@tonic-gate #include <sys/vmem.h> 607c478bd9Sstevel@tonic-gate #include <sys/vmsystm.h> 617c478bd9Sstevel@tonic-gate #include <sys/promif.h> 627c478bd9Sstevel@tonic-gate #include <sys/var.h> 637c478bd9Sstevel@tonic-gate #include <sys/x86_archext.h> 647c478bd9Sstevel@tonic-gate #include <sys/atomic.h> 657c478bd9Sstevel@tonic-gate #include <sys/bitmap.h> 66ae115bc7Smrj #include <sys/controlregs.h> 67ae115bc7Smrj #include <sys/bootconf.h> 68ae115bc7Smrj #include <sys/bootsvcs.h> 69ae115bc7Smrj #include <sys/bootinfo.h> 7095c0a3c8Sjosephb #include <sys/archsystm.h> 717c478bd9Sstevel@tonic-gate 727c478bd9Sstevel@tonic-gate #include <vm/seg_kmem.h> 737c478bd9Sstevel@tonic-gate #include <vm/hat_i86.h> 747c478bd9Sstevel@tonic-gate #include <vm/as.h> 757c478bd9Sstevel@tonic-gate #include <vm/seg.h> 767c478bd9Sstevel@tonic-gate #include <vm/page.h> 777c478bd9Sstevel@tonic-gate #include <vm/seg_kp.h> 787c478bd9Sstevel@tonic-gate #include <vm/seg_kpm.h> 797c478bd9Sstevel@tonic-gate #include <vm/vm_dep.h> 80843e1988Sjohnlev #ifdef __xpv 81843e1988Sjohnlev #include <sys/hypervisor.h> 82843e1988Sjohnlev #endif 83ae115bc7Smrj #include <vm/kboot_mmu.h> 84250b7ff9Sjosephb #include <vm/seg_spt.h> 857c478bd9Sstevel@tonic-gate 867c478bd9Sstevel@tonic-gate #include <sys/cmn_err.h> 877c478bd9Sstevel@tonic-gate 887c478bd9Sstevel@tonic-gate /* 897c478bd9Sstevel@tonic-gate * Basic parameters for hat operation. 907c478bd9Sstevel@tonic-gate */ 917c478bd9Sstevel@tonic-gate struct hat_mmu_info mmu; 927c478bd9Sstevel@tonic-gate 937c478bd9Sstevel@tonic-gate /* 947c478bd9Sstevel@tonic-gate * The page that is the kernel's top level pagetable. 957c478bd9Sstevel@tonic-gate * 96843e1988Sjohnlev * For 32 bit PAE support on i86pc, the kernel hat will use the 1st 4 entries 977c478bd9Sstevel@tonic-gate * on this 4K page for its top level page table. The remaining groups of 987c478bd9Sstevel@tonic-gate * 4 entries are used for per processor copies of user VLP pagetables for 997c478bd9Sstevel@tonic-gate * running threads. See hat_switch() and reload_pae32() for details. 1007c478bd9Sstevel@tonic-gate * 101843e1988Sjohnlev * vlp_page[0..3] - level==2 PTEs for kernel HAT 102843e1988Sjohnlev * vlp_page[4..7] - level==2 PTEs for user thread on cpu 0 103843e1988Sjohnlev * vlp_page[8..11] - level==2 PTE for user thread on cpu 1 104843e1988Sjohnlev * etc... 1057c478bd9Sstevel@tonic-gate */ 1067c478bd9Sstevel@tonic-gate static x86pte_t *vlp_page; 1077c478bd9Sstevel@tonic-gate 1087c478bd9Sstevel@tonic-gate /* 1097c478bd9Sstevel@tonic-gate * forward declaration of internal utility routines 1107c478bd9Sstevel@tonic-gate */ 1117c478bd9Sstevel@tonic-gate static x86pte_t hati_update_pte(htable_t *ht, uint_t entry, x86pte_t expected, 1127c478bd9Sstevel@tonic-gate x86pte_t new); 1137c478bd9Sstevel@tonic-gate 1147c478bd9Sstevel@tonic-gate /* 1157c478bd9Sstevel@tonic-gate * The kernel address space exists in all HATs. To implement this the 116843e1988Sjohnlev * kernel reserves a fixed number of entries in the topmost level(s) of page 117843e1988Sjohnlev * tables. The values are setup during startup and then copied to every user 118843e1988Sjohnlev * hat created by hat_alloc(). This means that kernelbase must be: 1197c478bd9Sstevel@tonic-gate * 1207c478bd9Sstevel@tonic-gate * 4Meg aligned for 32 bit kernels 1217c478bd9Sstevel@tonic-gate * 512Gig aligned for x86_64 64 bit kernel 1227c478bd9Sstevel@tonic-gate * 123843e1988Sjohnlev * The hat_kernel_range_ts describe what needs to be copied from kernel hat 124843e1988Sjohnlev * to each user hat. 1257c478bd9Sstevel@tonic-gate */ 126843e1988Sjohnlev typedef struct hat_kernel_range { 127843e1988Sjohnlev level_t hkr_level; 128843e1988Sjohnlev uintptr_t hkr_start_va; 129843e1988Sjohnlev uintptr_t hkr_end_va; /* zero means to end of memory */ 130843e1988Sjohnlev } hat_kernel_range_t; 131843e1988Sjohnlev #define NUM_KERNEL_RANGE 2 132843e1988Sjohnlev static hat_kernel_range_t kernel_ranges[NUM_KERNEL_RANGE]; 133843e1988Sjohnlev static int num_kernel_ranges; 1347c478bd9Sstevel@tonic-gate 1357c478bd9Sstevel@tonic-gate uint_t use_boot_reserve = 1; /* cleared after early boot process */ 1367c478bd9Sstevel@tonic-gate uint_t can_steal_post_boot = 0; /* set late in boot to enable stealing */ 1377c478bd9Sstevel@tonic-gate 138512cf780Skchow /* 139512cf780Skchow * enable_1gpg: controls 1g page support for user applications. 140512cf780Skchow * By default, 1g pages are exported to user applications. enable_1gpg can 141512cf780Skchow * be set to 0 to not export. 142512cf780Skchow */ 14378b03d3aSkchow int enable_1gpg = 1; 14402bc52beSkchow 145512cf780Skchow /* 146512cf780Skchow * AMD shanghai processors provide better management of 1gb ptes in its tlb. 14721584dbcSPavel Tatashin * By default, 1g page support will be disabled for pre-shanghai AMD 148512cf780Skchow * processors that don't have optimal tlb support for the 1g page size. 149512cf780Skchow * chk_optimal_1gtlb can be set to 0 to force 1g page support on sub-optimal 150512cf780Skchow * processors. 151512cf780Skchow */ 152512cf780Skchow int chk_optimal_1gtlb = 1; 153512cf780Skchow 154512cf780Skchow 15502bc52beSkchow #ifdef DEBUG 15602bc52beSkchow uint_t map1gcnt; 15702bc52beSkchow #endif 15802bc52beSkchow 15902bc52beSkchow 1607c478bd9Sstevel@tonic-gate /* 1617c478bd9Sstevel@tonic-gate * A cpuset for all cpus. This is used for kernel address cross calls, since 1627c478bd9Sstevel@tonic-gate * the kernel addresses apply to all cpus. 1637c478bd9Sstevel@tonic-gate */ 1647c478bd9Sstevel@tonic-gate cpuset_t khat_cpuset; 1657c478bd9Sstevel@tonic-gate 1667c478bd9Sstevel@tonic-gate /* 1677c478bd9Sstevel@tonic-gate * management stuff for hat structures 1687c478bd9Sstevel@tonic-gate */ 1697c478bd9Sstevel@tonic-gate kmutex_t hat_list_lock; 1707c478bd9Sstevel@tonic-gate kcondvar_t hat_list_cv; 1717c478bd9Sstevel@tonic-gate kmem_cache_t *hat_cache; 1727c478bd9Sstevel@tonic-gate kmem_cache_t *hat_hash_cache; 1737c478bd9Sstevel@tonic-gate kmem_cache_t *vlp_hash_cache; 1747c478bd9Sstevel@tonic-gate 1757c478bd9Sstevel@tonic-gate /* 1767c478bd9Sstevel@tonic-gate * Simple statistics 1777c478bd9Sstevel@tonic-gate */ 1787c478bd9Sstevel@tonic-gate struct hatstats hatstat; 1797c478bd9Sstevel@tonic-gate 1807c478bd9Sstevel@tonic-gate /* 181ab4a9bebSjohnlev * Some earlier hypervisor versions do not emulate cmpxchg of PTEs 182ab4a9bebSjohnlev * correctly. For such hypervisors we must set PT_USER for kernel 183ab4a9bebSjohnlev * entries ourselves (normally the emulation would set PT_USER for 184ab4a9bebSjohnlev * kernel entries and PT_USER|PT_GLOBAL for user entries). pt_kern is 185ab4a9bebSjohnlev * thus set appropriately. Note that dboot/kbm is OK, as only the full 186ab4a9bebSjohnlev * HAT uses cmpxchg() and the other paths (hypercall etc.) were never 187ab4a9bebSjohnlev * incorrect. 188ab4a9bebSjohnlev */ 189ab4a9bebSjohnlev int pt_kern; 190ab4a9bebSjohnlev 191ab4a9bebSjohnlev /* 1927c478bd9Sstevel@tonic-gate * useful stuff for atomic access/clearing/setting REF/MOD/RO bits in page_t's. 1937c478bd9Sstevel@tonic-gate */ 1947c478bd9Sstevel@tonic-gate extern void atomic_orb(uchar_t *addr, uchar_t val); 1957c478bd9Sstevel@tonic-gate extern void atomic_andb(uchar_t *addr, uchar_t val); 1967c478bd9Sstevel@tonic-gate 197a3114836SGerry Liu #ifndef __xpv 198a3114836SGerry Liu extern pfn_t memseg_get_start(struct memseg *); 199a3114836SGerry Liu #endif 200a3114836SGerry Liu 2017c478bd9Sstevel@tonic-gate #define PP_GETRM(pp, rmmask) (pp->p_nrm & rmmask) 2027c478bd9Sstevel@tonic-gate #define PP_ISMOD(pp) PP_GETRM(pp, P_MOD) 2037c478bd9Sstevel@tonic-gate #define PP_ISREF(pp) PP_GETRM(pp, P_REF) 2047c478bd9Sstevel@tonic-gate #define PP_ISRO(pp) PP_GETRM(pp, P_RO) 2057c478bd9Sstevel@tonic-gate 2067c478bd9Sstevel@tonic-gate #define PP_SETRM(pp, rm) atomic_orb(&(pp->p_nrm), rm) 2077c478bd9Sstevel@tonic-gate #define PP_SETMOD(pp) PP_SETRM(pp, P_MOD) 2087c478bd9Sstevel@tonic-gate #define PP_SETREF(pp) PP_SETRM(pp, P_REF) 2097c478bd9Sstevel@tonic-gate #define PP_SETRO(pp) PP_SETRM(pp, P_RO) 2107c478bd9Sstevel@tonic-gate 2117c478bd9Sstevel@tonic-gate #define PP_CLRRM(pp, rm) atomic_andb(&(pp->p_nrm), ~(rm)) 2127c478bd9Sstevel@tonic-gate #define PP_CLRMOD(pp) PP_CLRRM(pp, P_MOD) 2137c478bd9Sstevel@tonic-gate #define PP_CLRREF(pp) PP_CLRRM(pp, P_REF) 2147c478bd9Sstevel@tonic-gate #define PP_CLRRO(pp) PP_CLRRM(pp, P_RO) 2157c478bd9Sstevel@tonic-gate #define PP_CLRALL(pp) PP_CLRRM(pp, P_MOD | P_REF | P_RO) 2167c478bd9Sstevel@tonic-gate 2177c478bd9Sstevel@tonic-gate /* 2187c478bd9Sstevel@tonic-gate * kmem cache constructor for struct hat 2197c478bd9Sstevel@tonic-gate */ 2207c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 2217c478bd9Sstevel@tonic-gate static int 2227c478bd9Sstevel@tonic-gate hati_constructor(void *buf, void *handle, int kmflags) 2237c478bd9Sstevel@tonic-gate { 2247c478bd9Sstevel@tonic-gate hat_t *hat = buf; 2257c478bd9Sstevel@tonic-gate 2267c478bd9Sstevel@tonic-gate mutex_init(&hat->hat_mutex, NULL, MUTEX_DEFAULT, NULL); 2277c478bd9Sstevel@tonic-gate bzero(hat->hat_pages_mapped, 2287c478bd9Sstevel@tonic-gate sizeof (pgcnt_t) * (mmu.max_page_level + 1)); 229250b7ff9Sjosephb hat->hat_ism_pgcnt = 0; 2307c478bd9Sstevel@tonic-gate hat->hat_stats = 0; 2317c478bd9Sstevel@tonic-gate hat->hat_flags = 0; 2327c478bd9Sstevel@tonic-gate CPUSET_ZERO(hat->hat_cpus); 2337c478bd9Sstevel@tonic-gate hat->hat_htable = NULL; 2347c478bd9Sstevel@tonic-gate hat->hat_ht_hash = NULL; 2357c478bd9Sstevel@tonic-gate return (0); 2367c478bd9Sstevel@tonic-gate } 2377c478bd9Sstevel@tonic-gate 2387c478bd9Sstevel@tonic-gate /* 2397c478bd9Sstevel@tonic-gate * Allocate a hat structure for as. We also create the top level 2407c478bd9Sstevel@tonic-gate * htable and initialize it to contain the kernel hat entries. 2417c478bd9Sstevel@tonic-gate */ 2427c478bd9Sstevel@tonic-gate hat_t * 2437c478bd9Sstevel@tonic-gate hat_alloc(struct as *as) 2447c478bd9Sstevel@tonic-gate { 2457c478bd9Sstevel@tonic-gate hat_t *hat; 2467c478bd9Sstevel@tonic-gate htable_t *ht; /* top level htable */ 2477c478bd9Sstevel@tonic-gate uint_t use_vlp; 248843e1988Sjohnlev uint_t r; 249843e1988Sjohnlev hat_kernel_range_t *rp; 250843e1988Sjohnlev uintptr_t va; 251843e1988Sjohnlev uintptr_t eva; 252843e1988Sjohnlev uint_t start; 253843e1988Sjohnlev uint_t cnt; 254843e1988Sjohnlev htable_t *src; 2557c478bd9Sstevel@tonic-gate 2567c478bd9Sstevel@tonic-gate /* 2577c478bd9Sstevel@tonic-gate * Once we start creating user process HATs we can enable 2587c478bd9Sstevel@tonic-gate * the htable_steal() code. 2597c478bd9Sstevel@tonic-gate */ 2607c478bd9Sstevel@tonic-gate if (can_steal_post_boot == 0) 2617c478bd9Sstevel@tonic-gate can_steal_post_boot = 1; 2627c478bd9Sstevel@tonic-gate 263*dc32d872SJosef 'Jeff' Sipek ASSERT(AS_WRITE_HELD(as)); 2647c478bd9Sstevel@tonic-gate hat = kmem_cache_alloc(hat_cache, KM_SLEEP); 2657c478bd9Sstevel@tonic-gate hat->hat_as = as; 2667c478bd9Sstevel@tonic-gate mutex_init(&hat->hat_mutex, NULL, MUTEX_DEFAULT, NULL); 2677c478bd9Sstevel@tonic-gate ASSERT(hat->hat_flags == 0); 2687c478bd9Sstevel@tonic-gate 269843e1988Sjohnlev #if defined(__xpv) 2707c478bd9Sstevel@tonic-gate /* 271843e1988Sjohnlev * No VLP stuff on the hypervisor due to the 64-bit split top level 272843e1988Sjohnlev * page tables. On 32-bit it's not needed as the hypervisor takes 273843e1988Sjohnlev * care of copying the top level PTEs to a below 4Gig page. 2747c478bd9Sstevel@tonic-gate */ 275843e1988Sjohnlev use_vlp = 0; 276843e1988Sjohnlev #else /* __xpv */ 277843e1988Sjohnlev /* 32 bit processes uses a VLP style hat when running with PAE */ 2787c478bd9Sstevel@tonic-gate #if defined(__amd64) 2797c478bd9Sstevel@tonic-gate use_vlp = (ttoproc(curthread)->p_model == DATAMODEL_ILP32); 2807c478bd9Sstevel@tonic-gate #elif defined(__i386) 2817c478bd9Sstevel@tonic-gate use_vlp = mmu.pae_hat; 2827c478bd9Sstevel@tonic-gate #endif 283843e1988Sjohnlev #endif /* __xpv */ 2847c478bd9Sstevel@tonic-gate if (use_vlp) { 2857c478bd9Sstevel@tonic-gate hat->hat_flags = HAT_VLP; 2867c478bd9Sstevel@tonic-gate bzero(hat->hat_vlp_ptes, VLP_SIZE); 2877c478bd9Sstevel@tonic-gate } 2887c478bd9Sstevel@tonic-gate 2897c478bd9Sstevel@tonic-gate /* 2907c478bd9Sstevel@tonic-gate * Allocate the htable hash 2917c478bd9Sstevel@tonic-gate */ 2927c478bd9Sstevel@tonic-gate if ((hat->hat_flags & HAT_VLP)) { 2937c478bd9Sstevel@tonic-gate hat->hat_num_hash = mmu.vlp_hash_cnt; 2947c478bd9Sstevel@tonic-gate hat->hat_ht_hash = kmem_cache_alloc(vlp_hash_cache, KM_SLEEP); 2957c478bd9Sstevel@tonic-gate } else { 2967c478bd9Sstevel@tonic-gate hat->hat_num_hash = mmu.hash_cnt; 2977c478bd9Sstevel@tonic-gate hat->hat_ht_hash = kmem_cache_alloc(hat_hash_cache, KM_SLEEP); 2987c478bd9Sstevel@tonic-gate } 2997c478bd9Sstevel@tonic-gate bzero(hat->hat_ht_hash, hat->hat_num_hash * sizeof (htable_t *)); 3007c478bd9Sstevel@tonic-gate 3017c478bd9Sstevel@tonic-gate /* 3027c478bd9Sstevel@tonic-gate * Initialize Kernel HAT entries at the top of the top level page 303843e1988Sjohnlev * tables for the new hat. 3047c478bd9Sstevel@tonic-gate */ 3057c478bd9Sstevel@tonic-gate hat->hat_htable = NULL; 3067c478bd9Sstevel@tonic-gate hat->hat_ht_cached = NULL; 307843e1988Sjohnlev XPV_DISALLOW_MIGRATE(); 3087c478bd9Sstevel@tonic-gate ht = htable_create(hat, (uintptr_t)0, TOP_LEVEL(hat), NULL); 3097c478bd9Sstevel@tonic-gate hat->hat_htable = ht; 3107c478bd9Sstevel@tonic-gate 311843e1988Sjohnlev #if defined(__amd64) 312843e1988Sjohnlev if (hat->hat_flags & HAT_VLP) 313843e1988Sjohnlev goto init_done; 314843e1988Sjohnlev #endif 315843e1988Sjohnlev 316843e1988Sjohnlev for (r = 0; r < num_kernel_ranges; ++r) { 317843e1988Sjohnlev rp = &kernel_ranges[r]; 318843e1988Sjohnlev for (va = rp->hkr_start_va; va != rp->hkr_end_va; 319843e1988Sjohnlev va += cnt * LEVEL_SIZE(rp->hkr_level)) { 320843e1988Sjohnlev 321843e1988Sjohnlev if (rp->hkr_level == TOP_LEVEL(hat)) 322843e1988Sjohnlev ht = hat->hat_htable; 323843e1988Sjohnlev else 324843e1988Sjohnlev ht = htable_create(hat, va, rp->hkr_level, 325843e1988Sjohnlev NULL); 326843e1988Sjohnlev 327843e1988Sjohnlev start = htable_va2entry(va, ht); 328843e1988Sjohnlev cnt = HTABLE_NUM_PTES(ht) - start; 329843e1988Sjohnlev eva = va + 330843e1988Sjohnlev ((uintptr_t)cnt << LEVEL_SHIFT(rp->hkr_level)); 331843e1988Sjohnlev if (rp->hkr_end_va != 0 && 332843e1988Sjohnlev (eva > rp->hkr_end_va || eva == 0)) 333843e1988Sjohnlev cnt = htable_va2entry(rp->hkr_end_va, ht) - 334843e1988Sjohnlev start; 335843e1988Sjohnlev 336843e1988Sjohnlev #if defined(__i386) && !defined(__xpv) 337843e1988Sjohnlev if (ht->ht_flags & HTABLE_VLP) { 338843e1988Sjohnlev bcopy(&vlp_page[start], 339843e1988Sjohnlev &hat->hat_vlp_ptes[start], 340843e1988Sjohnlev cnt * sizeof (x86pte_t)); 341843e1988Sjohnlev continue; 3427c478bd9Sstevel@tonic-gate } 3437c478bd9Sstevel@tonic-gate #endif 344843e1988Sjohnlev src = htable_lookup(kas.a_hat, va, rp->hkr_level); 345843e1988Sjohnlev ASSERT(src != NULL); 346843e1988Sjohnlev x86pte_copy(src, ht, start, cnt); 347843e1988Sjohnlev htable_release(src); 348843e1988Sjohnlev } 349843e1988Sjohnlev } 350843e1988Sjohnlev 351843e1988Sjohnlev init_done: 352843e1988Sjohnlev 353843e1988Sjohnlev #if defined(__xpv) 354843e1988Sjohnlev /* 355843e1988Sjohnlev * Pin top level page tables after initializing them 356843e1988Sjohnlev */ 357843e1988Sjohnlev xen_pin(hat->hat_htable->ht_pfn, mmu.max_level); 358843e1988Sjohnlev #if defined(__amd64) 359843e1988Sjohnlev xen_pin(hat->hat_user_ptable, mmu.max_level); 360843e1988Sjohnlev #endif 361843e1988Sjohnlev #endif 362551bc2a6Smrj XPV_ALLOW_MIGRATE(); 3637c478bd9Sstevel@tonic-gate 3647c478bd9Sstevel@tonic-gate /* 365a85a6733Sjosephb * Put it at the start of the global list of all hats (used by stealing) 366a85a6733Sjosephb * 367a85a6733Sjosephb * kas.a_hat is not in the list but is instead used to find the 368a85a6733Sjosephb * first and last items in the list. 369a85a6733Sjosephb * 370a85a6733Sjosephb * - kas.a_hat->hat_next points to the start of the user hats. 371a85a6733Sjosephb * The list ends where hat->hat_next == NULL 372a85a6733Sjosephb * 373a85a6733Sjosephb * - kas.a_hat->hat_prev points to the last of the user hats. 374a85a6733Sjosephb * The list begins where hat->hat_prev == NULL 3757c478bd9Sstevel@tonic-gate */ 3767c478bd9Sstevel@tonic-gate mutex_enter(&hat_list_lock); 377a85a6733Sjosephb hat->hat_prev = NULL; 3787c478bd9Sstevel@tonic-gate hat->hat_next = kas.a_hat->hat_next; 379a85a6733Sjosephb if (hat->hat_next) 380a85a6733Sjosephb hat->hat_next->hat_prev = hat; 381a85a6733Sjosephb else 382a85a6733Sjosephb kas.a_hat->hat_prev = hat; 3837c478bd9Sstevel@tonic-gate kas.a_hat->hat_next = hat; 3847c478bd9Sstevel@tonic-gate mutex_exit(&hat_list_lock); 3857c478bd9Sstevel@tonic-gate 3867c478bd9Sstevel@tonic-gate return (hat); 3877c478bd9Sstevel@tonic-gate } 3887c478bd9Sstevel@tonic-gate 3897c478bd9Sstevel@tonic-gate /* 3907c478bd9Sstevel@tonic-gate * process has finished executing but as has not been cleaned up yet. 3917c478bd9Sstevel@tonic-gate */ 3927c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 3937c478bd9Sstevel@tonic-gate void 3947c478bd9Sstevel@tonic-gate hat_free_start(hat_t *hat) 3957c478bd9Sstevel@tonic-gate { 396*dc32d872SJosef 'Jeff' Sipek ASSERT(AS_WRITE_HELD(hat->hat_as)); 397a85a6733Sjosephb 398a85a6733Sjosephb /* 399a85a6733Sjosephb * If the hat is currently a stealing victim, wait for the stealing 400a85a6733Sjosephb * to finish. Once we mark it as HAT_FREEING, htable_steal() 401a85a6733Sjosephb * won't look at its pagetables anymore. 402a85a6733Sjosephb */ 4037c478bd9Sstevel@tonic-gate mutex_enter(&hat_list_lock); 404a85a6733Sjosephb while (hat->hat_flags & HAT_VICTIM) 405a85a6733Sjosephb cv_wait(&hat_list_cv, &hat_list_lock); 4067c478bd9Sstevel@tonic-gate hat->hat_flags |= HAT_FREEING; 4077c478bd9Sstevel@tonic-gate mutex_exit(&hat_list_lock); 4087c478bd9Sstevel@tonic-gate } 4097c478bd9Sstevel@tonic-gate 4107c478bd9Sstevel@tonic-gate /* 4117c478bd9Sstevel@tonic-gate * An address space is being destroyed, so we destroy the associated hat. 4127c478bd9Sstevel@tonic-gate */ 4137c478bd9Sstevel@tonic-gate void 4147c478bd9Sstevel@tonic-gate hat_free_end(hat_t *hat) 4157c478bd9Sstevel@tonic-gate { 4167c478bd9Sstevel@tonic-gate kmem_cache_t *cache; 4177c478bd9Sstevel@tonic-gate 4187c478bd9Sstevel@tonic-gate ASSERT(hat->hat_flags & HAT_FREEING); 4197c478bd9Sstevel@tonic-gate 4207c478bd9Sstevel@tonic-gate /* 4217c478bd9Sstevel@tonic-gate * must not be running on the given hat 4227c478bd9Sstevel@tonic-gate */ 4237c478bd9Sstevel@tonic-gate ASSERT(CPU->cpu_current_hat != hat); 4247c478bd9Sstevel@tonic-gate 4257c478bd9Sstevel@tonic-gate /* 426a85a6733Sjosephb * Remove it from the list of HATs 4277c478bd9Sstevel@tonic-gate */ 4287c478bd9Sstevel@tonic-gate mutex_enter(&hat_list_lock); 429a85a6733Sjosephb if (hat->hat_prev) 4307c478bd9Sstevel@tonic-gate hat->hat_prev->hat_next = hat->hat_next; 431a85a6733Sjosephb else 4327c478bd9Sstevel@tonic-gate kas.a_hat->hat_next = hat->hat_next; 433a85a6733Sjosephb if (hat->hat_next) 434a85a6733Sjosephb hat->hat_next->hat_prev = hat->hat_prev; 435a85a6733Sjosephb else 436a85a6733Sjosephb kas.a_hat->hat_prev = hat->hat_prev; 4377c478bd9Sstevel@tonic-gate mutex_exit(&hat_list_lock); 438a85a6733Sjosephb hat->hat_next = hat->hat_prev = NULL; 4397c478bd9Sstevel@tonic-gate 440843e1988Sjohnlev #if defined(__xpv) 441843e1988Sjohnlev /* 442843e1988Sjohnlev * On the hypervisor, unpin top level page table(s) 443843e1988Sjohnlev */ 444843e1988Sjohnlev xen_unpin(hat->hat_htable->ht_pfn); 445843e1988Sjohnlev #if defined(__amd64) 446843e1988Sjohnlev xen_unpin(hat->hat_user_ptable); 447843e1988Sjohnlev #endif 448843e1988Sjohnlev #endif 449843e1988Sjohnlev 4507c478bd9Sstevel@tonic-gate /* 4517c478bd9Sstevel@tonic-gate * Make a pass through the htables freeing them all up. 4527c478bd9Sstevel@tonic-gate */ 4537c478bd9Sstevel@tonic-gate htable_purge_hat(hat); 4547c478bd9Sstevel@tonic-gate 4557c478bd9Sstevel@tonic-gate /* 4567c478bd9Sstevel@tonic-gate * Decide which kmem cache the hash table came from, then free it. 4577c478bd9Sstevel@tonic-gate */ 4587c478bd9Sstevel@tonic-gate if (hat->hat_flags & HAT_VLP) 4597c478bd9Sstevel@tonic-gate cache = vlp_hash_cache; 4607c478bd9Sstevel@tonic-gate else 4617c478bd9Sstevel@tonic-gate cache = hat_hash_cache; 4627c478bd9Sstevel@tonic-gate kmem_cache_free(cache, hat->hat_ht_hash); 4637c478bd9Sstevel@tonic-gate hat->hat_ht_hash = NULL; 4647c478bd9Sstevel@tonic-gate 4657c478bd9Sstevel@tonic-gate hat->hat_flags = 0; 4667c478bd9Sstevel@tonic-gate kmem_cache_free(hat_cache, hat); 4677c478bd9Sstevel@tonic-gate } 4687c478bd9Sstevel@tonic-gate 4697c478bd9Sstevel@tonic-gate /* 4707c478bd9Sstevel@tonic-gate * round kernelbase down to a supported value to use for _userlimit 4717c478bd9Sstevel@tonic-gate * 4727c478bd9Sstevel@tonic-gate * userlimit must be aligned down to an entry in the top level htable. 4737c478bd9Sstevel@tonic-gate * The one exception is for 32 bit HAT's running PAE. 4747c478bd9Sstevel@tonic-gate */ 4757c478bd9Sstevel@tonic-gate uintptr_t 4767c478bd9Sstevel@tonic-gate hat_kernelbase(uintptr_t va) 4777c478bd9Sstevel@tonic-gate { 4787c478bd9Sstevel@tonic-gate #if defined(__i386) 4797c478bd9Sstevel@tonic-gate va &= LEVEL_MASK(1); 4807c478bd9Sstevel@tonic-gate #endif 4817c478bd9Sstevel@tonic-gate if (IN_VA_HOLE(va)) 4827c478bd9Sstevel@tonic-gate panic("_userlimit %p will fall in VA hole\n", (void *)va); 4837c478bd9Sstevel@tonic-gate return (va); 4847c478bd9Sstevel@tonic-gate } 4857c478bd9Sstevel@tonic-gate 4867c478bd9Sstevel@tonic-gate /* 487512cf780Skchow * 488512cf780Skchow */ 489512cf780Skchow static void 490512cf780Skchow set_max_page_level() 491512cf780Skchow { 492512cf780Skchow level_t lvl; 493512cf780Skchow 494512cf780Skchow if (!kbm_largepage_support) { 495512cf780Skchow lvl = 0; 4966f0cf5c5Skchow } else { 4977417cfdeSKuriakose Kuruvilla if (is_x86_feature(x86_featureset, X86FSET_1GPG)) { 498512cf780Skchow lvl = 2; 4996f0cf5c5Skchow if (chk_optimal_1gtlb && 5006f0cf5c5Skchow cpuid_opteron_erratum(CPU, 6671130)) { 501512cf780Skchow lvl = 1; 502512cf780Skchow } 5036f0cf5c5Skchow if (plat_mnode_xcheck(LEVEL_SIZE(2) >> 5046f0cf5c5Skchow LEVEL_SHIFT(0))) { 505512cf780Skchow lvl = 1; 506512cf780Skchow } 507512cf780Skchow } else { 508512cf780Skchow lvl = 1; 509512cf780Skchow } 5106f0cf5c5Skchow } 511512cf780Skchow mmu.max_page_level = lvl; 512512cf780Skchow 513512cf780Skchow if ((lvl == 2) && (enable_1gpg == 0)) 514512cf780Skchow mmu.umax_page_level = 1; 515512cf780Skchow else 516512cf780Skchow mmu.umax_page_level = lvl; 517512cf780Skchow } 518512cf780Skchow 519512cf780Skchow /* 5207c478bd9Sstevel@tonic-gate * Initialize hat data structures based on processor MMU information. 5217c478bd9Sstevel@tonic-gate */ 5227c478bd9Sstevel@tonic-gate void 5237c478bd9Sstevel@tonic-gate mmu_init(void) 5247c478bd9Sstevel@tonic-gate { 5257c478bd9Sstevel@tonic-gate uint_t max_htables; 5267c478bd9Sstevel@tonic-gate uint_t pa_bits; 5277c478bd9Sstevel@tonic-gate uint_t va_bits; 5287c478bd9Sstevel@tonic-gate int i; 5297c478bd9Sstevel@tonic-gate 5307c478bd9Sstevel@tonic-gate /* 531ae115bc7Smrj * If CPU enabled the page table global bit, use it for the kernel 532ae115bc7Smrj * This is bit 7 in CR4 (PGE - Page Global Enable). 5337c478bd9Sstevel@tonic-gate */ 5347417cfdeSKuriakose Kuruvilla if (is_x86_feature(x86_featureset, X86FSET_PGE) && 5357417cfdeSKuriakose Kuruvilla (getcr4() & CR4_PGE) != 0) 5367c478bd9Sstevel@tonic-gate mmu.pt_global = PT_GLOBAL; 5377c478bd9Sstevel@tonic-gate 5387c478bd9Sstevel@tonic-gate /* 539ae115bc7Smrj * Detect NX and PAE usage. 5407c478bd9Sstevel@tonic-gate */ 541ae115bc7Smrj mmu.pae_hat = kbm_pae_support; 542ae115bc7Smrj if (kbm_nx_support) 5437c478bd9Sstevel@tonic-gate mmu.pt_nx = PT_NX; 544ae115bc7Smrj else 5457c478bd9Sstevel@tonic-gate mmu.pt_nx = 0; 5467c478bd9Sstevel@tonic-gate 5477c478bd9Sstevel@tonic-gate /* 5487c478bd9Sstevel@tonic-gate * Use CPU info to set various MMU parameters 5497c478bd9Sstevel@tonic-gate */ 5507c478bd9Sstevel@tonic-gate cpuid_get_addrsize(CPU, &pa_bits, &va_bits); 5517c478bd9Sstevel@tonic-gate 5527c478bd9Sstevel@tonic-gate if (va_bits < sizeof (void *) * NBBY) { 5537c478bd9Sstevel@tonic-gate mmu.hole_start = (1ul << (va_bits - 1)); 5547c478bd9Sstevel@tonic-gate mmu.hole_end = 0ul - mmu.hole_start - 1; 5557c478bd9Sstevel@tonic-gate } else { 5567c478bd9Sstevel@tonic-gate mmu.hole_end = 0; 5577c478bd9Sstevel@tonic-gate mmu.hole_start = mmu.hole_end - 1; 5587c478bd9Sstevel@tonic-gate } 5597c478bd9Sstevel@tonic-gate #if defined(OPTERON_ERRATUM_121) 5607c478bd9Sstevel@tonic-gate /* 5617c478bd9Sstevel@tonic-gate * If erratum 121 has already been detected at this time, hole_start 5627c478bd9Sstevel@tonic-gate * contains the value to be subtracted from mmu.hole_start. 5637c478bd9Sstevel@tonic-gate */ 5647c478bd9Sstevel@tonic-gate ASSERT(hole_start == 0 || opteron_erratum_121 != 0); 5657c478bd9Sstevel@tonic-gate hole_start = mmu.hole_start - hole_start; 5667c478bd9Sstevel@tonic-gate #else 5677c478bd9Sstevel@tonic-gate hole_start = mmu.hole_start; 5687c478bd9Sstevel@tonic-gate #endif 5697c478bd9Sstevel@tonic-gate hole_end = mmu.hole_end; 5707c478bd9Sstevel@tonic-gate 5717c478bd9Sstevel@tonic-gate mmu.highest_pfn = mmu_btop((1ull << pa_bits) - 1); 5727c478bd9Sstevel@tonic-gate if (mmu.pae_hat == 0 && pa_bits > 32) 5737c478bd9Sstevel@tonic-gate mmu.highest_pfn = PFN_4G - 1; 5747c478bd9Sstevel@tonic-gate 5757c478bd9Sstevel@tonic-gate if (mmu.pae_hat) { 5767c478bd9Sstevel@tonic-gate mmu.pte_size = 8; /* 8 byte PTEs */ 5777c478bd9Sstevel@tonic-gate mmu.pte_size_shift = 3; 5787c478bd9Sstevel@tonic-gate } else { 5797c478bd9Sstevel@tonic-gate mmu.pte_size = 4; /* 4 byte PTEs */ 5807c478bd9Sstevel@tonic-gate mmu.pte_size_shift = 2; 5817c478bd9Sstevel@tonic-gate } 5827c478bd9Sstevel@tonic-gate 5837417cfdeSKuriakose Kuruvilla if (mmu.pae_hat && !is_x86_feature(x86_featureset, X86FSET_PAE)) 5847c478bd9Sstevel@tonic-gate panic("Processor does not support PAE"); 5857c478bd9Sstevel@tonic-gate 5867417cfdeSKuriakose Kuruvilla if (!is_x86_feature(x86_featureset, X86FSET_CX8)) 5877c478bd9Sstevel@tonic-gate panic("Processor does not support cmpxchg8b instruction"); 5887c478bd9Sstevel@tonic-gate 5897c478bd9Sstevel@tonic-gate #if defined(__amd64) 5907c478bd9Sstevel@tonic-gate 5917c478bd9Sstevel@tonic-gate mmu.num_level = 4; 5927c478bd9Sstevel@tonic-gate mmu.max_level = 3; 5937c478bd9Sstevel@tonic-gate mmu.ptes_per_table = 512; 5947c478bd9Sstevel@tonic-gate mmu.top_level_count = 512; 5957c478bd9Sstevel@tonic-gate 5967c478bd9Sstevel@tonic-gate mmu.level_shift[0] = 12; 5977c478bd9Sstevel@tonic-gate mmu.level_shift[1] = 21; 5987c478bd9Sstevel@tonic-gate mmu.level_shift[2] = 30; 5997c478bd9Sstevel@tonic-gate mmu.level_shift[3] = 39; 6007c478bd9Sstevel@tonic-gate 6017c478bd9Sstevel@tonic-gate #elif defined(__i386) 6027c478bd9Sstevel@tonic-gate 6037c478bd9Sstevel@tonic-gate if (mmu.pae_hat) { 6047c478bd9Sstevel@tonic-gate mmu.num_level = 3; 6057c478bd9Sstevel@tonic-gate mmu.max_level = 2; 6067c478bd9Sstevel@tonic-gate mmu.ptes_per_table = 512; 6077c478bd9Sstevel@tonic-gate mmu.top_level_count = 4; 6087c478bd9Sstevel@tonic-gate 6097c478bd9Sstevel@tonic-gate mmu.level_shift[0] = 12; 6107c478bd9Sstevel@tonic-gate mmu.level_shift[1] = 21; 6117c478bd9Sstevel@tonic-gate mmu.level_shift[2] = 30; 6127c478bd9Sstevel@tonic-gate 6137c478bd9Sstevel@tonic-gate } else { 6147c478bd9Sstevel@tonic-gate mmu.num_level = 2; 6157c478bd9Sstevel@tonic-gate mmu.max_level = 1; 6167c478bd9Sstevel@tonic-gate mmu.ptes_per_table = 1024; 6177c478bd9Sstevel@tonic-gate mmu.top_level_count = 1024; 6187c478bd9Sstevel@tonic-gate 6197c478bd9Sstevel@tonic-gate mmu.level_shift[0] = 12; 6207c478bd9Sstevel@tonic-gate mmu.level_shift[1] = 22; 6217c478bd9Sstevel@tonic-gate } 6227c478bd9Sstevel@tonic-gate 6237c478bd9Sstevel@tonic-gate #endif /* __i386 */ 6247c478bd9Sstevel@tonic-gate 6257c478bd9Sstevel@tonic-gate for (i = 0; i < mmu.num_level; ++i) { 6267c478bd9Sstevel@tonic-gate mmu.level_size[i] = 1UL << mmu.level_shift[i]; 6277c478bd9Sstevel@tonic-gate mmu.level_offset[i] = mmu.level_size[i] - 1; 6287c478bd9Sstevel@tonic-gate mmu.level_mask[i] = ~mmu.level_offset[i]; 6297c478bd9Sstevel@tonic-gate } 6307c478bd9Sstevel@tonic-gate 631512cf780Skchow set_max_page_level(); 632472714d6Skchow 633472714d6Skchow mmu_page_sizes = mmu.max_page_level + 1; 634472714d6Skchow mmu_exported_page_sizes = mmu.umax_page_level + 1; 635472714d6Skchow 636472714d6Skchow /* restrict legacy applications from using pagesizes 1g and above */ 637472714d6Skchow mmu_legacy_page_sizes = 638472714d6Skchow (mmu_exported_page_sizes > 2) ? 2 : mmu_exported_page_sizes; 639472714d6Skchow 640472714d6Skchow 641ae115bc7Smrj for (i = 0; i <= mmu.max_page_level; ++i) { 642ab4a9bebSjohnlev mmu.pte_bits[i] = PT_VALID | pt_kern; 643ae115bc7Smrj if (i > 0) 644ae115bc7Smrj mmu.pte_bits[i] |= PT_PAGESIZE; 645ae115bc7Smrj } 6467c478bd9Sstevel@tonic-gate 6477c478bd9Sstevel@tonic-gate /* 6487c478bd9Sstevel@tonic-gate * NOTE Legacy 32 bit PAE mode only has the P_VALID bit at top level. 6497c478bd9Sstevel@tonic-gate */ 6507c478bd9Sstevel@tonic-gate for (i = 1; i < mmu.num_level; ++i) 6517c478bd9Sstevel@tonic-gate mmu.ptp_bits[i] = PT_PTPBITS; 652ae115bc7Smrj 6537c478bd9Sstevel@tonic-gate #if defined(__i386) 6547c478bd9Sstevel@tonic-gate mmu.ptp_bits[2] = PT_VALID; 6557c478bd9Sstevel@tonic-gate #endif 6567c478bd9Sstevel@tonic-gate 6577c478bd9Sstevel@tonic-gate /* 6587c478bd9Sstevel@tonic-gate * Compute how many hash table entries to have per process for htables. 6597c478bd9Sstevel@tonic-gate * We start with 1 page's worth of entries. 6607c478bd9Sstevel@tonic-gate * 6617c478bd9Sstevel@tonic-gate * If physical memory is small, reduce the amount need to cover it. 6627c478bd9Sstevel@tonic-gate */ 6637c478bd9Sstevel@tonic-gate max_htables = physmax / mmu.ptes_per_table; 6647c478bd9Sstevel@tonic-gate mmu.hash_cnt = MMU_PAGESIZE / sizeof (htable_t *); 6657c478bd9Sstevel@tonic-gate while (mmu.hash_cnt > 16 && mmu.hash_cnt >= max_htables) 6667c478bd9Sstevel@tonic-gate mmu.hash_cnt >>= 1; 6677c478bd9Sstevel@tonic-gate mmu.vlp_hash_cnt = mmu.hash_cnt; 6687c478bd9Sstevel@tonic-gate 6697c478bd9Sstevel@tonic-gate #if defined(__amd64) 6707c478bd9Sstevel@tonic-gate /* 6717c478bd9Sstevel@tonic-gate * If running in 64 bits and physical memory is large, 6727c478bd9Sstevel@tonic-gate * increase the size of the cache to cover all of memory for 6737c478bd9Sstevel@tonic-gate * a 64 bit process. 6747c478bd9Sstevel@tonic-gate */ 6757c478bd9Sstevel@tonic-gate #define HASH_MAX_LENGTH 4 6767c478bd9Sstevel@tonic-gate while (mmu.hash_cnt * HASH_MAX_LENGTH < max_htables) 6777c478bd9Sstevel@tonic-gate mmu.hash_cnt <<= 1; 6787c478bd9Sstevel@tonic-gate #endif 6797c478bd9Sstevel@tonic-gate } 6807c478bd9Sstevel@tonic-gate 6817c478bd9Sstevel@tonic-gate 6827c478bd9Sstevel@tonic-gate /* 6837c478bd9Sstevel@tonic-gate * initialize hat data structures 6847c478bd9Sstevel@tonic-gate */ 6857c478bd9Sstevel@tonic-gate void 6867c478bd9Sstevel@tonic-gate hat_init() 6877c478bd9Sstevel@tonic-gate { 6887c478bd9Sstevel@tonic-gate #if defined(__i386) 6897c478bd9Sstevel@tonic-gate /* 6907c478bd9Sstevel@tonic-gate * _userlimit must be aligned correctly 6917c478bd9Sstevel@tonic-gate */ 6927c478bd9Sstevel@tonic-gate if ((_userlimit & LEVEL_MASK(1)) != _userlimit) { 6937c478bd9Sstevel@tonic-gate prom_printf("hat_init(): _userlimit=%p, not aligned at %p\n", 6947c478bd9Sstevel@tonic-gate (void *)_userlimit, (void *)LEVEL_SIZE(1)); 6957c478bd9Sstevel@tonic-gate halt("hat_init(): Unable to continue"); 6967c478bd9Sstevel@tonic-gate } 6977c478bd9Sstevel@tonic-gate #endif 6987c478bd9Sstevel@tonic-gate 6997c478bd9Sstevel@tonic-gate cv_init(&hat_list_cv, NULL, CV_DEFAULT, NULL); 7007c478bd9Sstevel@tonic-gate 7017c478bd9Sstevel@tonic-gate /* 7027c478bd9Sstevel@tonic-gate * initialize kmem caches 7037c478bd9Sstevel@tonic-gate */ 7047c478bd9Sstevel@tonic-gate htable_init(); 7057c478bd9Sstevel@tonic-gate hment_init(); 7067c478bd9Sstevel@tonic-gate 7077c478bd9Sstevel@tonic-gate hat_cache = kmem_cache_create("hat_t", 7087c478bd9Sstevel@tonic-gate sizeof (hat_t), 0, hati_constructor, NULL, NULL, 7097c478bd9Sstevel@tonic-gate NULL, 0, 0); 7107c478bd9Sstevel@tonic-gate 7117c478bd9Sstevel@tonic-gate hat_hash_cache = kmem_cache_create("HatHash", 7127c478bd9Sstevel@tonic-gate mmu.hash_cnt * sizeof (htable_t *), 0, NULL, NULL, NULL, 7137c478bd9Sstevel@tonic-gate NULL, 0, 0); 7147c478bd9Sstevel@tonic-gate 7157c478bd9Sstevel@tonic-gate /* 7167c478bd9Sstevel@tonic-gate * VLP hats can use a smaller hash table size on large memroy machines 7177c478bd9Sstevel@tonic-gate */ 7187c478bd9Sstevel@tonic-gate if (mmu.hash_cnt == mmu.vlp_hash_cnt) { 7197c478bd9Sstevel@tonic-gate vlp_hash_cache = hat_hash_cache; 7207c478bd9Sstevel@tonic-gate } else { 7217c478bd9Sstevel@tonic-gate vlp_hash_cache = kmem_cache_create("HatVlpHash", 7227c478bd9Sstevel@tonic-gate mmu.vlp_hash_cnt * sizeof (htable_t *), 0, NULL, NULL, NULL, 7237c478bd9Sstevel@tonic-gate NULL, 0, 0); 7247c478bd9Sstevel@tonic-gate } 7257c478bd9Sstevel@tonic-gate 7267c478bd9Sstevel@tonic-gate /* 7277c478bd9Sstevel@tonic-gate * Set up the kernel's hat 7287c478bd9Sstevel@tonic-gate */ 729*dc32d872SJosef 'Jeff' Sipek AS_LOCK_ENTER(&kas, RW_WRITER); 7307c478bd9Sstevel@tonic-gate kas.a_hat = kmem_cache_alloc(hat_cache, KM_NOSLEEP); 7317c478bd9Sstevel@tonic-gate mutex_init(&kas.a_hat->hat_mutex, NULL, MUTEX_DEFAULT, NULL); 7327c478bd9Sstevel@tonic-gate kas.a_hat->hat_as = &kas; 7337c478bd9Sstevel@tonic-gate kas.a_hat->hat_flags = 0; 734*dc32d872SJosef 'Jeff' Sipek AS_LOCK_EXIT(&kas); 7357c478bd9Sstevel@tonic-gate 7367c478bd9Sstevel@tonic-gate CPUSET_ZERO(khat_cpuset); 7377c478bd9Sstevel@tonic-gate CPUSET_ADD(khat_cpuset, CPU->cpu_id); 7387c478bd9Sstevel@tonic-gate 7397c478bd9Sstevel@tonic-gate /* 7407c478bd9Sstevel@tonic-gate * The kernel hat's next pointer serves as the head of the hat list . 741a85a6733Sjosephb * The kernel hat's prev pointer tracks the last hat on the list for 742a85a6733Sjosephb * htable_steal() to use. 7437c478bd9Sstevel@tonic-gate */ 7447c478bd9Sstevel@tonic-gate kas.a_hat->hat_next = NULL; 745a85a6733Sjosephb kas.a_hat->hat_prev = NULL; 7467c478bd9Sstevel@tonic-gate 7477c478bd9Sstevel@tonic-gate /* 7487c478bd9Sstevel@tonic-gate * Allocate an htable hash bucket for the kernel 7497c478bd9Sstevel@tonic-gate * XX64 - tune for 64 bit procs 7507c478bd9Sstevel@tonic-gate */ 7517c478bd9Sstevel@tonic-gate kas.a_hat->hat_num_hash = mmu.hash_cnt; 7527c478bd9Sstevel@tonic-gate kas.a_hat->hat_ht_hash = kmem_cache_alloc(hat_hash_cache, KM_NOSLEEP); 7537c478bd9Sstevel@tonic-gate bzero(kas.a_hat->hat_ht_hash, mmu.hash_cnt * sizeof (htable_t *)); 7547c478bd9Sstevel@tonic-gate 7557c478bd9Sstevel@tonic-gate /* 7567c478bd9Sstevel@tonic-gate * zero out the top level and cached htable pointers 7577c478bd9Sstevel@tonic-gate */ 7587c478bd9Sstevel@tonic-gate kas.a_hat->hat_ht_cached = NULL; 7597c478bd9Sstevel@tonic-gate kas.a_hat->hat_htable = NULL; 7609d9461f9Strevtom 7619d9461f9Strevtom /* 7629d9461f9Strevtom * Pre-allocate hrm_hashtab before enabling the collection of 7639d9461f9Strevtom * refmod statistics. Allocating on the fly would mean us 7649d9461f9Strevtom * running the risk of suffering recursive mutex enters or 7659d9461f9Strevtom * deadlocks. 7669d9461f9Strevtom */ 7679d9461f9Strevtom hrm_hashtab = kmem_zalloc(HRM_HASHSIZE * sizeof (struct hrmstat *), 7689d9461f9Strevtom KM_SLEEP); 7697c478bd9Sstevel@tonic-gate } 7707c478bd9Sstevel@tonic-gate 7717c478bd9Sstevel@tonic-gate /* 7727c478bd9Sstevel@tonic-gate * Prepare CPU specific pagetables for VLP processes on 64 bit kernels. 7737c478bd9Sstevel@tonic-gate * 7747c478bd9Sstevel@tonic-gate * Each CPU has a set of 2 pagetables that are reused for any 32 bit 7757c478bd9Sstevel@tonic-gate * process it runs. They are the top level pagetable, hci_vlp_l3ptes, and 7767c478bd9Sstevel@tonic-gate * the next to top level table for the bottom 512 Gig, hci_vlp_l2ptes. 7777c478bd9Sstevel@tonic-gate */ 7787c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 7797c478bd9Sstevel@tonic-gate static void 7807c478bd9Sstevel@tonic-gate hat_vlp_setup(struct cpu *cpu) 7817c478bd9Sstevel@tonic-gate { 782843e1988Sjohnlev #if defined(__amd64) && !defined(__xpv) 7837c478bd9Sstevel@tonic-gate struct hat_cpu_info *hci = cpu->cpu_hat_info; 7847c478bd9Sstevel@tonic-gate pfn_t pfn; 7857c478bd9Sstevel@tonic-gate 7867c478bd9Sstevel@tonic-gate /* 7877c478bd9Sstevel@tonic-gate * allocate the level==2 page table for the bottom most 7887c478bd9Sstevel@tonic-gate * 512Gig of address space (this is where 32 bit apps live) 7897c478bd9Sstevel@tonic-gate */ 7907c478bd9Sstevel@tonic-gate ASSERT(hci != NULL); 7917c478bd9Sstevel@tonic-gate hci->hci_vlp_l2ptes = kmem_zalloc(MMU_PAGESIZE, KM_SLEEP); 7927c478bd9Sstevel@tonic-gate 7937c478bd9Sstevel@tonic-gate /* 7947c478bd9Sstevel@tonic-gate * Allocate a top level pagetable and copy the kernel's 7957c478bd9Sstevel@tonic-gate * entries into it. Then link in hci_vlp_l2ptes in the 1st entry. 7967c478bd9Sstevel@tonic-gate */ 7977c478bd9Sstevel@tonic-gate hci->hci_vlp_l3ptes = kmem_zalloc(MMU_PAGESIZE, KM_SLEEP); 7987c478bd9Sstevel@tonic-gate hci->hci_vlp_pfn = 7997c478bd9Sstevel@tonic-gate hat_getpfnum(kas.a_hat, (caddr_t)hci->hci_vlp_l3ptes); 8007c478bd9Sstevel@tonic-gate ASSERT(hci->hci_vlp_pfn != PFN_INVALID); 801843e1988Sjohnlev bcopy(vlp_page, hci->hci_vlp_l3ptes, MMU_PAGESIZE); 8027c478bd9Sstevel@tonic-gate 8037c478bd9Sstevel@tonic-gate pfn = hat_getpfnum(kas.a_hat, (caddr_t)hci->hci_vlp_l2ptes); 8047c478bd9Sstevel@tonic-gate ASSERT(pfn != PFN_INVALID); 8057c478bd9Sstevel@tonic-gate hci->hci_vlp_l3ptes[0] = MAKEPTP(pfn, 2); 806843e1988Sjohnlev #endif /* __amd64 && !__xpv */ 8077c478bd9Sstevel@tonic-gate } 8087c478bd9Sstevel@tonic-gate 809ae115bc7Smrj /*ARGSUSED*/ 810ae115bc7Smrj static void 811ae115bc7Smrj hat_vlp_teardown(cpu_t *cpu) 812ae115bc7Smrj { 813843e1988Sjohnlev #if defined(__amd64) && !defined(__xpv) 814ae115bc7Smrj struct hat_cpu_info *hci; 815ae115bc7Smrj 816ae115bc7Smrj if ((hci = cpu->cpu_hat_info) == NULL) 817ae115bc7Smrj return; 818ae115bc7Smrj if (hci->hci_vlp_l2ptes) 819ae115bc7Smrj kmem_free(hci->hci_vlp_l2ptes, MMU_PAGESIZE); 820ae115bc7Smrj if (hci->hci_vlp_l3ptes) 821ae115bc7Smrj kmem_free(hci->hci_vlp_l3ptes, MMU_PAGESIZE); 822843e1988Sjohnlev #endif 823843e1988Sjohnlev } 824843e1988Sjohnlev 825843e1988Sjohnlev #define NEXT_HKR(r, l, s, e) { \ 826843e1988Sjohnlev kernel_ranges[r].hkr_level = l; \ 827843e1988Sjohnlev kernel_ranges[r].hkr_start_va = s; \ 828843e1988Sjohnlev kernel_ranges[r].hkr_end_va = e; \ 829843e1988Sjohnlev ++r; \ 830ae115bc7Smrj } 831ae115bc7Smrj 8327c478bd9Sstevel@tonic-gate /* 8337c478bd9Sstevel@tonic-gate * Finish filling in the kernel hat. 8347c478bd9Sstevel@tonic-gate * Pre fill in all top level kernel page table entries for the kernel's 8357c478bd9Sstevel@tonic-gate * part of the address range. From this point on we can't use any new 8367c478bd9Sstevel@tonic-gate * kernel large pages if they need PTE's at max_level 837ae115bc7Smrj * 838ae115bc7Smrj * create the kmap mappings. 8397c478bd9Sstevel@tonic-gate */ 8407c478bd9Sstevel@tonic-gate void 8417c478bd9Sstevel@tonic-gate hat_init_finish(void) 8427c478bd9Sstevel@tonic-gate { 843ae115bc7Smrj size_t size; 844843e1988Sjohnlev uint_t r = 0; 845843e1988Sjohnlev uintptr_t va; 846843e1988Sjohnlev hat_kernel_range_t *rp; 8477c478bd9Sstevel@tonic-gate 8487c478bd9Sstevel@tonic-gate 8497c478bd9Sstevel@tonic-gate /* 8507c478bd9Sstevel@tonic-gate * We are now effectively running on the kernel hat. 8517c478bd9Sstevel@tonic-gate * Clearing use_boot_reserve shuts off using the pre-allocated boot 8527c478bd9Sstevel@tonic-gate * reserve for all HAT allocations. From here on, the reserves are 853843e1988Sjohnlev * only used when avoiding recursion in kmem_alloc(). 8547c478bd9Sstevel@tonic-gate */ 8557c478bd9Sstevel@tonic-gate use_boot_reserve = 0; 8567c478bd9Sstevel@tonic-gate htable_adjust_reserve(); 8577c478bd9Sstevel@tonic-gate 8587c478bd9Sstevel@tonic-gate /* 859843e1988Sjohnlev * User HATs are initialized with copies of all kernel mappings in 860843e1988Sjohnlev * higher level page tables. Ensure that those entries exist. 861843e1988Sjohnlev */ 862843e1988Sjohnlev #if defined(__amd64) 863843e1988Sjohnlev 864843e1988Sjohnlev NEXT_HKR(r, 3, kernelbase, 0); 865843e1988Sjohnlev #if defined(__xpv) 866843e1988Sjohnlev NEXT_HKR(r, 3, HYPERVISOR_VIRT_START, HYPERVISOR_VIRT_END); 867843e1988Sjohnlev #endif 868843e1988Sjohnlev 869843e1988Sjohnlev #elif defined(__i386) 870843e1988Sjohnlev 871843e1988Sjohnlev #if !defined(__xpv) 872843e1988Sjohnlev if (mmu.pae_hat) { 873843e1988Sjohnlev va = kernelbase; 874843e1988Sjohnlev if ((va & LEVEL_MASK(2)) != va) { 875843e1988Sjohnlev va = P2ROUNDUP(va, LEVEL_SIZE(2)); 876843e1988Sjohnlev NEXT_HKR(r, 1, kernelbase, va); 877843e1988Sjohnlev } 878843e1988Sjohnlev if (va != 0) 879843e1988Sjohnlev NEXT_HKR(r, 2, va, 0); 880843e1988Sjohnlev } else 881843e1988Sjohnlev #endif /* __xpv */ 882843e1988Sjohnlev NEXT_HKR(r, 1, kernelbase, 0); 883843e1988Sjohnlev 884843e1988Sjohnlev #endif /* __i386 */ 885843e1988Sjohnlev 886843e1988Sjohnlev num_kernel_ranges = r; 887843e1988Sjohnlev 888843e1988Sjohnlev /* 889843e1988Sjohnlev * Create all the kernel pagetables that will have entries 890843e1988Sjohnlev * shared to user HATs. 891843e1988Sjohnlev */ 892843e1988Sjohnlev for (r = 0; r < num_kernel_ranges; ++r) { 893843e1988Sjohnlev rp = &kernel_ranges[r]; 894843e1988Sjohnlev for (va = rp->hkr_start_va; va != rp->hkr_end_va; 895843e1988Sjohnlev va += LEVEL_SIZE(rp->hkr_level)) { 896843e1988Sjohnlev htable_t *ht; 897843e1988Sjohnlev 898843e1988Sjohnlev if (IN_HYPERVISOR_VA(va)) 899843e1988Sjohnlev continue; 900843e1988Sjohnlev 901843e1988Sjohnlev /* can/must skip if a page mapping already exists */ 902843e1988Sjohnlev if (rp->hkr_level <= mmu.max_page_level && 903843e1988Sjohnlev (ht = htable_getpage(kas.a_hat, va, NULL)) != 904843e1988Sjohnlev NULL) { 905843e1988Sjohnlev htable_release(ht); 906843e1988Sjohnlev continue; 907843e1988Sjohnlev } 908843e1988Sjohnlev 909843e1988Sjohnlev (void) htable_create(kas.a_hat, va, rp->hkr_level - 1, 910843e1988Sjohnlev NULL); 911843e1988Sjohnlev } 912843e1988Sjohnlev } 913843e1988Sjohnlev 914843e1988Sjohnlev /* 915843e1988Sjohnlev * 32 bit PAE metal kernels use only 4 of the 512 entries in the 916843e1988Sjohnlev * page holding the top level pagetable. We use the remainder for 917843e1988Sjohnlev * the "per CPU" page tables for VLP processes. 918843e1988Sjohnlev * Map the top level kernel pagetable into the kernel to make 919843e1988Sjohnlev * it easy to use bcopy access these tables. 9207c478bd9Sstevel@tonic-gate */ 9217c478bd9Sstevel@tonic-gate if (mmu.pae_hat) { 9227c478bd9Sstevel@tonic-gate vlp_page = vmem_alloc(heap_arena, MMU_PAGESIZE, VM_SLEEP); 9237c478bd9Sstevel@tonic-gate hat_devload(kas.a_hat, (caddr_t)vlp_page, MMU_PAGESIZE, 9247c478bd9Sstevel@tonic-gate kas.a_hat->hat_htable->ht_pfn, 925843e1988Sjohnlev #if !defined(__xpv) 926ae115bc7Smrj PROT_WRITE | 927843e1988Sjohnlev #endif 928ae115bc7Smrj PROT_READ | HAT_NOSYNC | HAT_UNORDERED_OK, 9297c478bd9Sstevel@tonic-gate HAT_LOAD | HAT_LOAD_NOCONSIST); 9307c478bd9Sstevel@tonic-gate } 9317c478bd9Sstevel@tonic-gate hat_vlp_setup(CPU); 932ae115bc7Smrj 933ae115bc7Smrj /* 934ae115bc7Smrj * Create kmap (cached mappings of kernel PTEs) 935ae115bc7Smrj * for 32 bit we map from segmap_start .. ekernelheap 936ae115bc7Smrj * for 64 bit we map from segmap_start .. segmap_start + segmapsize; 937ae115bc7Smrj */ 938ae115bc7Smrj #if defined(__i386) 939ae115bc7Smrj size = (uintptr_t)ekernelheap - segmap_start; 940ae115bc7Smrj #elif defined(__amd64) 941ae115bc7Smrj size = segmapsize; 942ae115bc7Smrj #endif 943ae115bc7Smrj hat_kmap_init((uintptr_t)segmap_start, size); 9447c478bd9Sstevel@tonic-gate } 9457c478bd9Sstevel@tonic-gate 9467c478bd9Sstevel@tonic-gate /* 9477c478bd9Sstevel@tonic-gate * On 32 bit PAE mode, PTE's are 64 bits, but ordinary atomic memory references 94875d94465SJosef 'Jeff' Sipek * are 32 bit, so for safety we must use atomic_cas_64() to install these. 9497c478bd9Sstevel@tonic-gate */ 9507c478bd9Sstevel@tonic-gate #ifdef __i386 9517c478bd9Sstevel@tonic-gate static void 9527c478bd9Sstevel@tonic-gate reload_pae32(hat_t *hat, cpu_t *cpu) 9537c478bd9Sstevel@tonic-gate { 9547c478bd9Sstevel@tonic-gate x86pte_t *src; 9557c478bd9Sstevel@tonic-gate x86pte_t *dest; 9567c478bd9Sstevel@tonic-gate x86pte_t pte; 9577c478bd9Sstevel@tonic-gate int i; 9587c478bd9Sstevel@tonic-gate 9597c478bd9Sstevel@tonic-gate /* 9607c478bd9Sstevel@tonic-gate * Load the 4 entries of the level 2 page table into this 9617c478bd9Sstevel@tonic-gate * cpu's range of the vlp_page and point cr3 at them. 9627c478bd9Sstevel@tonic-gate */ 9637c478bd9Sstevel@tonic-gate ASSERT(mmu.pae_hat); 9647c478bd9Sstevel@tonic-gate src = hat->hat_vlp_ptes; 9657c478bd9Sstevel@tonic-gate dest = vlp_page + (cpu->cpu_id + 1) * VLP_NUM_PTES; 9667c478bd9Sstevel@tonic-gate for (i = 0; i < VLP_NUM_PTES; ++i) { 9677c478bd9Sstevel@tonic-gate for (;;) { 9687c478bd9Sstevel@tonic-gate pte = dest[i]; 9697c478bd9Sstevel@tonic-gate if (pte == src[i]) 9707c478bd9Sstevel@tonic-gate break; 97175d94465SJosef 'Jeff' Sipek if (atomic_cas_64(dest + i, pte, src[i]) != src[i]) 9727c478bd9Sstevel@tonic-gate break; 9737c478bd9Sstevel@tonic-gate } 9747c478bd9Sstevel@tonic-gate } 9757c478bd9Sstevel@tonic-gate } 9767c478bd9Sstevel@tonic-gate #endif 9777c478bd9Sstevel@tonic-gate 9787c478bd9Sstevel@tonic-gate /* 9797c478bd9Sstevel@tonic-gate * Switch to a new active hat, maintaining bit masks to track active CPUs. 980843e1988Sjohnlev * 981843e1988Sjohnlev * On the 32-bit PAE hypervisor, %cr3 is a 64-bit value, on metal it 982843e1988Sjohnlev * remains a 32-bit value. 9837c478bd9Sstevel@tonic-gate */ 9847c478bd9Sstevel@tonic-gate void 9857c478bd9Sstevel@tonic-gate hat_switch(hat_t *hat) 9867c478bd9Sstevel@tonic-gate { 987843e1988Sjohnlev uint64_t newcr3; 9887c478bd9Sstevel@tonic-gate cpu_t *cpu = CPU; 9897c478bd9Sstevel@tonic-gate hat_t *old = cpu->cpu_current_hat; 9907c478bd9Sstevel@tonic-gate 9917c478bd9Sstevel@tonic-gate /* 9927c478bd9Sstevel@tonic-gate * set up this information first, so we don't miss any cross calls 9937c478bd9Sstevel@tonic-gate */ 9947c478bd9Sstevel@tonic-gate if (old != NULL) { 9957c478bd9Sstevel@tonic-gate if (old == hat) 9967c478bd9Sstevel@tonic-gate return; 9977c478bd9Sstevel@tonic-gate if (old != kas.a_hat) 9987c478bd9Sstevel@tonic-gate CPUSET_ATOMIC_DEL(old->hat_cpus, cpu->cpu_id); 9997c478bd9Sstevel@tonic-gate } 10007c478bd9Sstevel@tonic-gate 10017c478bd9Sstevel@tonic-gate /* 100295c0a3c8Sjosephb * Add this CPU to the active set for this HAT. 10037c478bd9Sstevel@tonic-gate */ 10047c478bd9Sstevel@tonic-gate if (hat != kas.a_hat) { 10057c478bd9Sstevel@tonic-gate CPUSET_ATOMIC_ADD(hat->hat_cpus, cpu->cpu_id); 10067c478bd9Sstevel@tonic-gate } 10077c478bd9Sstevel@tonic-gate cpu->cpu_current_hat = hat; 10087c478bd9Sstevel@tonic-gate 10097c478bd9Sstevel@tonic-gate /* 10107c478bd9Sstevel@tonic-gate * now go ahead and load cr3 10117c478bd9Sstevel@tonic-gate */ 10127c478bd9Sstevel@tonic-gate if (hat->hat_flags & HAT_VLP) { 10137c478bd9Sstevel@tonic-gate #if defined(__amd64) 10147c478bd9Sstevel@tonic-gate x86pte_t *vlpptep = cpu->cpu_hat_info->hci_vlp_l2ptes; 10157c478bd9Sstevel@tonic-gate 10167c478bd9Sstevel@tonic-gate VLP_COPY(hat->hat_vlp_ptes, vlpptep); 10177c478bd9Sstevel@tonic-gate newcr3 = MAKECR3(cpu->cpu_hat_info->hci_vlp_pfn); 10187c478bd9Sstevel@tonic-gate #elif defined(__i386) 10197c478bd9Sstevel@tonic-gate reload_pae32(hat, cpu); 10207c478bd9Sstevel@tonic-gate newcr3 = MAKECR3(kas.a_hat->hat_htable->ht_pfn) + 10217c478bd9Sstevel@tonic-gate (cpu->cpu_id + 1) * VLP_SIZE; 10227c478bd9Sstevel@tonic-gate #endif 10237c478bd9Sstevel@tonic-gate } else { 1024843e1988Sjohnlev newcr3 = MAKECR3((uint64_t)hat->hat_htable->ht_pfn); 10257c478bd9Sstevel@tonic-gate } 1026843e1988Sjohnlev #ifdef __xpv 1027843e1988Sjohnlev { 1028843e1988Sjohnlev struct mmuext_op t[2]; 1029843e1988Sjohnlev uint_t retcnt; 1030843e1988Sjohnlev uint_t opcnt = 1; 1031843e1988Sjohnlev 1032843e1988Sjohnlev t[0].cmd = MMUEXT_NEW_BASEPTR; 1033843e1988Sjohnlev t[0].arg1.mfn = mmu_btop(pa_to_ma(newcr3)); 1034843e1988Sjohnlev #if defined(__amd64) 1035843e1988Sjohnlev /* 1036843e1988Sjohnlev * There's an interesting problem here, as to what to 1037843e1988Sjohnlev * actually specify when switching to the kernel hat. 1038843e1988Sjohnlev * For now we'll reuse the kernel hat again. 1039843e1988Sjohnlev */ 1040843e1988Sjohnlev t[1].cmd = MMUEXT_NEW_USER_BASEPTR; 1041843e1988Sjohnlev if (hat == kas.a_hat) 1042843e1988Sjohnlev t[1].arg1.mfn = mmu_btop(pa_to_ma(newcr3)); 1043843e1988Sjohnlev else 1044843e1988Sjohnlev t[1].arg1.mfn = pfn_to_mfn(hat->hat_user_ptable); 1045843e1988Sjohnlev ++opcnt; 1046843e1988Sjohnlev #endif /* __amd64 */ 1047843e1988Sjohnlev if (HYPERVISOR_mmuext_op(t, opcnt, &retcnt, DOMID_SELF) < 0) 1048843e1988Sjohnlev panic("HYPERVISOR_mmu_update() failed"); 1049843e1988Sjohnlev ASSERT(retcnt == opcnt); 1050843e1988Sjohnlev 1051843e1988Sjohnlev } 1052843e1988Sjohnlev #else 10537c478bd9Sstevel@tonic-gate setcr3(newcr3); 1054843e1988Sjohnlev #endif 10557c478bd9Sstevel@tonic-gate ASSERT(cpu == CPU); 10567c478bd9Sstevel@tonic-gate } 10577c478bd9Sstevel@tonic-gate 10587c478bd9Sstevel@tonic-gate /* 10597c478bd9Sstevel@tonic-gate * Utility to return a valid x86pte_t from protections, pfn, and level number 10607c478bd9Sstevel@tonic-gate */ 10617c478bd9Sstevel@tonic-gate static x86pte_t 10627c478bd9Sstevel@tonic-gate hati_mkpte(pfn_t pfn, uint_t attr, level_t level, uint_t flags) 10637c478bd9Sstevel@tonic-gate { 10647c478bd9Sstevel@tonic-gate x86pte_t pte; 10657c478bd9Sstevel@tonic-gate uint_t cache_attr = attr & HAT_ORDER_MASK; 10667c478bd9Sstevel@tonic-gate 10677c478bd9Sstevel@tonic-gate pte = MAKEPTE(pfn, level); 10687c478bd9Sstevel@tonic-gate 10697c478bd9Sstevel@tonic-gate if (attr & PROT_WRITE) 10707c478bd9Sstevel@tonic-gate PTE_SET(pte, PT_WRITABLE); 10717c478bd9Sstevel@tonic-gate 10727c478bd9Sstevel@tonic-gate if (attr & PROT_USER) 10737c478bd9Sstevel@tonic-gate PTE_SET(pte, PT_USER); 10747c478bd9Sstevel@tonic-gate 10757c478bd9Sstevel@tonic-gate if (!(attr & PROT_EXEC)) 10767c478bd9Sstevel@tonic-gate PTE_SET(pte, mmu.pt_nx); 10777c478bd9Sstevel@tonic-gate 10787c478bd9Sstevel@tonic-gate /* 1079ae115bc7Smrj * Set the software bits used track ref/mod sync's and hments. 1080ae115bc7Smrj * If not using REF/MOD, set them to avoid h/w rewriting PTEs. 10817c478bd9Sstevel@tonic-gate */ 10827c478bd9Sstevel@tonic-gate if (flags & HAT_LOAD_NOCONSIST) 1083ae115bc7Smrj PTE_SET(pte, PT_NOCONSIST | PT_REF | PT_MOD); 1084ae115bc7Smrj else if (attr & HAT_NOSYNC) 1085ae115bc7Smrj PTE_SET(pte, PT_NOSYNC | PT_REF | PT_MOD); 10867c478bd9Sstevel@tonic-gate 10877c478bd9Sstevel@tonic-gate /* 10887c478bd9Sstevel@tonic-gate * Set the caching attributes in the PTE. The combination 10897c478bd9Sstevel@tonic-gate * of attributes are poorly defined, so we pay attention 10907c478bd9Sstevel@tonic-gate * to them in the given order. 10917c478bd9Sstevel@tonic-gate * 10927c478bd9Sstevel@tonic-gate * The test for HAT_STRICTORDER is different because it's defined 10937c478bd9Sstevel@tonic-gate * as "0" - which was a stupid thing to do, but is too late to change! 10947c478bd9Sstevel@tonic-gate */ 10957c478bd9Sstevel@tonic-gate if (cache_attr == HAT_STRICTORDER) { 10967c478bd9Sstevel@tonic-gate PTE_SET(pte, PT_NOCACHE); 10977c478bd9Sstevel@tonic-gate /*LINTED [Lint hates empty ifs, but it's the obvious way to do this] */ 10987c478bd9Sstevel@tonic-gate } else if (cache_attr & (HAT_UNORDERED_OK | HAT_STORECACHING_OK)) { 10997c478bd9Sstevel@tonic-gate /* nothing to set */; 11007c478bd9Sstevel@tonic-gate } else if (cache_attr & (HAT_MERGING_OK | HAT_LOADCACHING_OK)) { 11017c478bd9Sstevel@tonic-gate PTE_SET(pte, PT_NOCACHE); 11027417cfdeSKuriakose Kuruvilla if (is_x86_feature(x86_featureset, X86FSET_PAT)) 11037c478bd9Sstevel@tonic-gate PTE_SET(pte, (level == 0) ? PT_PAT_4K : PT_PAT_LARGE); 11047c478bd9Sstevel@tonic-gate else 11057c478bd9Sstevel@tonic-gate PTE_SET(pte, PT_WRITETHRU); 11067c478bd9Sstevel@tonic-gate } else { 11077c478bd9Sstevel@tonic-gate panic("hati_mkpte(): bad caching attributes: %x\n", cache_attr); 11087c478bd9Sstevel@tonic-gate } 11097c478bd9Sstevel@tonic-gate 11107c478bd9Sstevel@tonic-gate return (pte); 11117c478bd9Sstevel@tonic-gate } 11127c478bd9Sstevel@tonic-gate 11137c478bd9Sstevel@tonic-gate /* 11147c478bd9Sstevel@tonic-gate * Duplicate address translations of the parent to the child. 11157c478bd9Sstevel@tonic-gate * This function really isn't used anymore. 11167c478bd9Sstevel@tonic-gate */ 11177c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 11187c478bd9Sstevel@tonic-gate int 11197c478bd9Sstevel@tonic-gate hat_dup(hat_t *old, hat_t *new, caddr_t addr, size_t len, uint_t flag) 11207c478bd9Sstevel@tonic-gate { 11217c478bd9Sstevel@tonic-gate ASSERT((uintptr_t)addr < kernelbase); 11227c478bd9Sstevel@tonic-gate ASSERT(new != kas.a_hat); 11237c478bd9Sstevel@tonic-gate ASSERT(old != kas.a_hat); 11247c478bd9Sstevel@tonic-gate return (0); 11257c478bd9Sstevel@tonic-gate } 11267c478bd9Sstevel@tonic-gate 11277c478bd9Sstevel@tonic-gate /* 11287c478bd9Sstevel@tonic-gate * Allocate any hat resources required for a process being swapped in. 11297c478bd9Sstevel@tonic-gate */ 11307c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 11317c478bd9Sstevel@tonic-gate void 11327c478bd9Sstevel@tonic-gate hat_swapin(hat_t *hat) 11337c478bd9Sstevel@tonic-gate { 11347c478bd9Sstevel@tonic-gate /* do nothing - we let everything fault back in */ 11357c478bd9Sstevel@tonic-gate } 11367c478bd9Sstevel@tonic-gate 11377c478bd9Sstevel@tonic-gate /* 11387c478bd9Sstevel@tonic-gate * Unload all translations associated with an address space of a process 11397c478bd9Sstevel@tonic-gate * that is being swapped out. 11407c478bd9Sstevel@tonic-gate */ 11417c478bd9Sstevel@tonic-gate void 11427c478bd9Sstevel@tonic-gate hat_swapout(hat_t *hat) 11437c478bd9Sstevel@tonic-gate { 11447c478bd9Sstevel@tonic-gate uintptr_t vaddr = (uintptr_t)0; 11457c478bd9Sstevel@tonic-gate uintptr_t eaddr = _userlimit; 11467c478bd9Sstevel@tonic-gate htable_t *ht = NULL; 11477c478bd9Sstevel@tonic-gate level_t l; 11487c478bd9Sstevel@tonic-gate 1149843e1988Sjohnlev XPV_DISALLOW_MIGRATE(); 11507c478bd9Sstevel@tonic-gate /* 11517c478bd9Sstevel@tonic-gate * We can't just call hat_unload(hat, 0, _userlimit...) here, because 11527c478bd9Sstevel@tonic-gate * seg_spt and shared pagetables can't be swapped out. 11537c478bd9Sstevel@tonic-gate * Take a look at segspt_shmswapout() - it's a big no-op. 11547c478bd9Sstevel@tonic-gate * 11557c478bd9Sstevel@tonic-gate * Instead we'll walk through all the address space and unload 11567c478bd9Sstevel@tonic-gate * any mappings which we are sure are not shared, not locked. 11577c478bd9Sstevel@tonic-gate */ 11587c478bd9Sstevel@tonic-gate ASSERT(IS_PAGEALIGNED(vaddr)); 11597c478bd9Sstevel@tonic-gate ASSERT(IS_PAGEALIGNED(eaddr)); 1160*dc32d872SJosef 'Jeff' Sipek ASSERT(AS_LOCK_HELD(hat->hat_as)); 11617c478bd9Sstevel@tonic-gate if ((uintptr_t)hat->hat_as->a_userlimit < eaddr) 11627c478bd9Sstevel@tonic-gate eaddr = (uintptr_t)hat->hat_as->a_userlimit; 11637c478bd9Sstevel@tonic-gate 11647c478bd9Sstevel@tonic-gate while (vaddr < eaddr) { 11657c478bd9Sstevel@tonic-gate (void) htable_walk(hat, &ht, &vaddr, eaddr); 11667c478bd9Sstevel@tonic-gate if (ht == NULL) 11677c478bd9Sstevel@tonic-gate break; 11687c478bd9Sstevel@tonic-gate 11697c478bd9Sstevel@tonic-gate ASSERT(!IN_VA_HOLE(vaddr)); 11707c478bd9Sstevel@tonic-gate 11717c478bd9Sstevel@tonic-gate /* 11727c478bd9Sstevel@tonic-gate * If the page table is shared skip its entire range. 11737c478bd9Sstevel@tonic-gate */ 11747c478bd9Sstevel@tonic-gate l = ht->ht_level; 11757c478bd9Sstevel@tonic-gate if (ht->ht_flags & HTABLE_SHARED_PFN) { 1176ae320ee6Speterte vaddr = ht->ht_vaddr + LEVEL_SIZE(l + 1); 11777c478bd9Sstevel@tonic-gate htable_release(ht); 11787c478bd9Sstevel@tonic-gate ht = NULL; 11797c478bd9Sstevel@tonic-gate continue; 11807c478bd9Sstevel@tonic-gate } 11817c478bd9Sstevel@tonic-gate 11827c478bd9Sstevel@tonic-gate /* 11837c478bd9Sstevel@tonic-gate * If the page table has no locked entries, unload this one. 11847c478bd9Sstevel@tonic-gate */ 11857c478bd9Sstevel@tonic-gate if (ht->ht_lock_cnt == 0) 11867c478bd9Sstevel@tonic-gate hat_unload(hat, (caddr_t)vaddr, LEVEL_SIZE(l), 11877c478bd9Sstevel@tonic-gate HAT_UNLOAD_UNMAP); 11887c478bd9Sstevel@tonic-gate 11897c478bd9Sstevel@tonic-gate /* 11907c478bd9Sstevel@tonic-gate * If we have a level 0 page table with locked entries, 11917c478bd9Sstevel@tonic-gate * skip the entire page table, otherwise skip just one entry. 11927c478bd9Sstevel@tonic-gate */ 11937c478bd9Sstevel@tonic-gate if (ht->ht_lock_cnt > 0 && l == 0) 11947c478bd9Sstevel@tonic-gate vaddr = ht->ht_vaddr + LEVEL_SIZE(1); 11957c478bd9Sstevel@tonic-gate else 11967c478bd9Sstevel@tonic-gate vaddr += LEVEL_SIZE(l); 11977c478bd9Sstevel@tonic-gate } 11987c478bd9Sstevel@tonic-gate if (ht) 11997c478bd9Sstevel@tonic-gate htable_release(ht); 12007c478bd9Sstevel@tonic-gate 12017c478bd9Sstevel@tonic-gate /* 12027c478bd9Sstevel@tonic-gate * We're in swapout because the system is low on memory, so 12037c478bd9Sstevel@tonic-gate * go back and flush all the htables off the cached list. 12047c478bd9Sstevel@tonic-gate */ 12057c478bd9Sstevel@tonic-gate htable_purge_hat(hat); 1206843e1988Sjohnlev XPV_ALLOW_MIGRATE(); 12077c478bd9Sstevel@tonic-gate } 12087c478bd9Sstevel@tonic-gate 12097c478bd9Sstevel@tonic-gate /* 12107c478bd9Sstevel@tonic-gate * returns number of bytes that have valid mappings in hat. 12117c478bd9Sstevel@tonic-gate */ 12127c478bd9Sstevel@tonic-gate size_t 12137c478bd9Sstevel@tonic-gate hat_get_mapped_size(hat_t *hat) 12147c478bd9Sstevel@tonic-gate { 12157c478bd9Sstevel@tonic-gate size_t total = 0; 12167c478bd9Sstevel@tonic-gate int l; 12177c478bd9Sstevel@tonic-gate 12187c478bd9Sstevel@tonic-gate for (l = 0; l <= mmu.max_page_level; l++) 12197c478bd9Sstevel@tonic-gate total += (hat->hat_pages_mapped[l] << LEVEL_SHIFT(l)); 1220250b7ff9Sjosephb total += hat->hat_ism_pgcnt; 12217c478bd9Sstevel@tonic-gate 12227c478bd9Sstevel@tonic-gate return (total); 12237c478bd9Sstevel@tonic-gate } 12247c478bd9Sstevel@tonic-gate 12257c478bd9Sstevel@tonic-gate /* 12267c478bd9Sstevel@tonic-gate * enable/disable collection of stats for hat. 12277c478bd9Sstevel@tonic-gate */ 12287c478bd9Sstevel@tonic-gate int 12297c478bd9Sstevel@tonic-gate hat_stats_enable(hat_t *hat) 12307c478bd9Sstevel@tonic-gate { 12311a5e258fSJosef 'Jeff' Sipek atomic_inc_32(&hat->hat_stats); 12327c478bd9Sstevel@tonic-gate return (1); 12337c478bd9Sstevel@tonic-gate } 12347c478bd9Sstevel@tonic-gate 12357c478bd9Sstevel@tonic-gate void 12367c478bd9Sstevel@tonic-gate hat_stats_disable(hat_t *hat) 12377c478bd9Sstevel@tonic-gate { 12381a5e258fSJosef 'Jeff' Sipek atomic_dec_32(&hat->hat_stats); 12397c478bd9Sstevel@tonic-gate } 12407c478bd9Sstevel@tonic-gate 12417c478bd9Sstevel@tonic-gate /* 12427c478bd9Sstevel@tonic-gate * Utility to sync the ref/mod bits from a page table entry to the page_t 12437c478bd9Sstevel@tonic-gate * We must be holding the mapping list lock when this is called. 12447c478bd9Sstevel@tonic-gate */ 12457c478bd9Sstevel@tonic-gate static void 12467c478bd9Sstevel@tonic-gate hati_sync_pte_to_page(page_t *pp, x86pte_t pte, level_t level) 12477c478bd9Sstevel@tonic-gate { 12487c478bd9Sstevel@tonic-gate uint_t rm = 0; 12497c478bd9Sstevel@tonic-gate pgcnt_t pgcnt; 12507c478bd9Sstevel@tonic-gate 1251ae115bc7Smrj if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC) 12527c478bd9Sstevel@tonic-gate return; 12537c478bd9Sstevel@tonic-gate 12547c478bd9Sstevel@tonic-gate if (PTE_GET(pte, PT_REF)) 12557c478bd9Sstevel@tonic-gate rm |= P_REF; 12567c478bd9Sstevel@tonic-gate 12577c478bd9Sstevel@tonic-gate if (PTE_GET(pte, PT_MOD)) 12587c478bd9Sstevel@tonic-gate rm |= P_MOD; 12597c478bd9Sstevel@tonic-gate 12607c478bd9Sstevel@tonic-gate if (rm == 0) 12617c478bd9Sstevel@tonic-gate return; 12627c478bd9Sstevel@tonic-gate 12637c478bd9Sstevel@tonic-gate /* 12647c478bd9Sstevel@tonic-gate * sync to all constituent pages of a large page 12657c478bd9Sstevel@tonic-gate */ 12667c478bd9Sstevel@tonic-gate ASSERT(x86_hm_held(pp)); 12677c478bd9Sstevel@tonic-gate pgcnt = page_get_pagecnt(level); 12687c478bd9Sstevel@tonic-gate ASSERT(IS_P2ALIGNED(pp->p_pagenum, pgcnt)); 12697c478bd9Sstevel@tonic-gate for (; pgcnt > 0; --pgcnt) { 12707c478bd9Sstevel@tonic-gate /* 12717c478bd9Sstevel@tonic-gate * hat_page_demote() can't decrease 12727c478bd9Sstevel@tonic-gate * pszc below this mapping size 12737c478bd9Sstevel@tonic-gate * since this large mapping existed after we 12747c478bd9Sstevel@tonic-gate * took mlist lock. 12757c478bd9Sstevel@tonic-gate */ 12767c478bd9Sstevel@tonic-gate ASSERT(pp->p_szc >= level); 12777c478bd9Sstevel@tonic-gate hat_page_setattr(pp, rm); 12787c478bd9Sstevel@tonic-gate ++pp; 12797c478bd9Sstevel@tonic-gate } 12807c478bd9Sstevel@tonic-gate } 12817c478bd9Sstevel@tonic-gate 12827c478bd9Sstevel@tonic-gate /* 12837c478bd9Sstevel@tonic-gate * This the set of PTE bits for PFN, permissions and caching 1284843e1988Sjohnlev * that are allowed to change on a HAT_LOAD_REMAP 12857c478bd9Sstevel@tonic-gate */ 12867c478bd9Sstevel@tonic-gate #define PT_REMAP_BITS \ 12877c478bd9Sstevel@tonic-gate (PT_PADDR | PT_NX | PT_WRITABLE | PT_WRITETHRU | \ 1288843e1988Sjohnlev PT_NOCACHE | PT_PAT_4K | PT_PAT_LARGE | PT_IGNORE | PT_REF | PT_MOD) 12897c478bd9Sstevel@tonic-gate 1290b193e412Skchow #define REMAPASSERT(EX) if (!(EX)) panic("hati_pte_map: " #EX) 12917c478bd9Sstevel@tonic-gate /* 12927c478bd9Sstevel@tonic-gate * Do the low-level work to get a mapping entered into a HAT's pagetables 12937c478bd9Sstevel@tonic-gate * and in the mapping list of the associated page_t. 12947c478bd9Sstevel@tonic-gate */ 1295ae115bc7Smrj static int 12967c478bd9Sstevel@tonic-gate hati_pte_map( 12977c478bd9Sstevel@tonic-gate htable_t *ht, 12987c478bd9Sstevel@tonic-gate uint_t entry, 12997c478bd9Sstevel@tonic-gate page_t *pp, 13007c478bd9Sstevel@tonic-gate x86pte_t pte, 13017c478bd9Sstevel@tonic-gate int flags, 13027c478bd9Sstevel@tonic-gate void *pte_ptr) 13037c478bd9Sstevel@tonic-gate { 13047c478bd9Sstevel@tonic-gate hat_t *hat = ht->ht_hat; 13057c478bd9Sstevel@tonic-gate x86pte_t old_pte; 13067c478bd9Sstevel@tonic-gate level_t l = ht->ht_level; 13077c478bd9Sstevel@tonic-gate hment_t *hm; 13087c478bd9Sstevel@tonic-gate uint_t is_consist; 130925540de2SJakub Jermar uint_t is_locked; 1310ae115bc7Smrj int rv = 0; 13117c478bd9Sstevel@tonic-gate 13127c478bd9Sstevel@tonic-gate /* 131321584dbcSPavel Tatashin * Is this a consistent (ie. need mapping list lock) mapping? 13147c478bd9Sstevel@tonic-gate */ 13157c478bd9Sstevel@tonic-gate is_consist = (pp != NULL && (flags & HAT_LOAD_NOCONSIST) == 0); 13167c478bd9Sstevel@tonic-gate 13177c478bd9Sstevel@tonic-gate /* 13187c478bd9Sstevel@tonic-gate * Track locked mapping count in the htable. Do this first, 13197c478bd9Sstevel@tonic-gate * as we track locking even if there already is a mapping present. 13207c478bd9Sstevel@tonic-gate */ 132125540de2SJakub Jermar is_locked = (flags & HAT_LOAD_LOCK) != 0 && hat != kas.a_hat; 132225540de2SJakub Jermar if (is_locked) 13237c478bd9Sstevel@tonic-gate HTABLE_LOCK_INC(ht); 13247c478bd9Sstevel@tonic-gate 13257c478bd9Sstevel@tonic-gate /* 13267c478bd9Sstevel@tonic-gate * Acquire the page's mapping list lock and get an hment to use. 13277c478bd9Sstevel@tonic-gate * Note that hment_prepare() might return NULL. 13287c478bd9Sstevel@tonic-gate */ 13297c478bd9Sstevel@tonic-gate if (is_consist) { 13307c478bd9Sstevel@tonic-gate x86_hm_enter(pp); 13317c478bd9Sstevel@tonic-gate hm = hment_prepare(ht, entry, pp); 13327c478bd9Sstevel@tonic-gate } 13337c478bd9Sstevel@tonic-gate 13347c478bd9Sstevel@tonic-gate /* 13357c478bd9Sstevel@tonic-gate * Set the new pte, retrieving the old one at the same time. 13367c478bd9Sstevel@tonic-gate */ 13377c478bd9Sstevel@tonic-gate old_pte = x86pte_set(ht, entry, pte, pte_ptr); 13387c478bd9Sstevel@tonic-gate 13397c478bd9Sstevel@tonic-gate /* 134025540de2SJakub Jermar * Did we get a large page / page table collision? 1341ae115bc7Smrj */ 1342ae115bc7Smrj if (old_pte == LPAGE_ERROR) { 134325540de2SJakub Jermar if (is_locked) 134425540de2SJakub Jermar HTABLE_LOCK_DEC(ht); 1345ae115bc7Smrj rv = -1; 1346ae115bc7Smrj goto done; 1347ae115bc7Smrj } 1348ae115bc7Smrj 1349ae115bc7Smrj /* 13507c478bd9Sstevel@tonic-gate * If the mapping didn't change there is nothing more to do. 13517c478bd9Sstevel@tonic-gate */ 1352ae115bc7Smrj if (PTE_EQUIV(pte, old_pte)) 1353ae115bc7Smrj goto done; 13547c478bd9Sstevel@tonic-gate 13557c478bd9Sstevel@tonic-gate /* 13567c478bd9Sstevel@tonic-gate * Install a new mapping in the page's mapping list 13577c478bd9Sstevel@tonic-gate */ 13587c478bd9Sstevel@tonic-gate if (!PTE_ISVALID(old_pte)) { 13597c478bd9Sstevel@tonic-gate if (is_consist) { 13607c478bd9Sstevel@tonic-gate hment_assign(ht, entry, pp, hm); 13617c478bd9Sstevel@tonic-gate x86_hm_exit(pp); 13627c478bd9Sstevel@tonic-gate } else { 13637c478bd9Sstevel@tonic-gate ASSERT(flags & HAT_LOAD_NOCONSIST); 13647c478bd9Sstevel@tonic-gate } 136502bc52beSkchow #if defined(__amd64) 136602bc52beSkchow if (ht->ht_flags & HTABLE_VLP) { 136702bc52beSkchow cpu_t *cpu = CPU; 136802bc52beSkchow x86pte_t *vlpptep = cpu->cpu_hat_info->hci_vlp_l2ptes; 136902bc52beSkchow VLP_COPY(hat->hat_vlp_ptes, vlpptep); 137002bc52beSkchow } 137102bc52beSkchow #endif 13727c478bd9Sstevel@tonic-gate HTABLE_INC(ht->ht_valid_cnt); 13737c478bd9Sstevel@tonic-gate PGCNT_INC(hat, l); 1374ae115bc7Smrj return (rv); 13757c478bd9Sstevel@tonic-gate } 13767c478bd9Sstevel@tonic-gate 13777c478bd9Sstevel@tonic-gate /* 13787c478bd9Sstevel@tonic-gate * Remap's are more complicated: 13797c478bd9Sstevel@tonic-gate * - HAT_LOAD_REMAP must be specified if changing the pfn. 13807c478bd9Sstevel@tonic-gate * We also require that NOCONSIST be specified. 13817c478bd9Sstevel@tonic-gate * - Otherwise only permission or caching bits may change. 13827c478bd9Sstevel@tonic-gate */ 13837c478bd9Sstevel@tonic-gate if (!PTE_ISPAGE(old_pte, l)) 13847c478bd9Sstevel@tonic-gate panic("non-null/page mapping pte=" FMT_PTE, old_pte); 13857c478bd9Sstevel@tonic-gate 13867c478bd9Sstevel@tonic-gate if (PTE2PFN(old_pte, l) != PTE2PFN(pte, l)) { 1387b193e412Skchow REMAPASSERT(flags & HAT_LOAD_REMAP); 1388b193e412Skchow REMAPASSERT(flags & HAT_LOAD_NOCONSIST); 1389ae115bc7Smrj REMAPASSERT(PTE_GET(old_pte, PT_SOFTWARE) >= PT_NOCONSIST); 1390b193e412Skchow REMAPASSERT(pf_is_memory(PTE2PFN(old_pte, l)) == 13917c478bd9Sstevel@tonic-gate pf_is_memory(PTE2PFN(pte, l))); 1392b193e412Skchow REMAPASSERT(!is_consist); 13937c478bd9Sstevel@tonic-gate } 13947c478bd9Sstevel@tonic-gate 13957c478bd9Sstevel@tonic-gate /* 1396843e1988Sjohnlev * We only let remaps change the certain bits in the PTE. 13977c478bd9Sstevel@tonic-gate */ 1398843e1988Sjohnlev if (PTE_GET(old_pte, ~PT_REMAP_BITS) != PTE_GET(pte, ~PT_REMAP_BITS)) 1399843e1988Sjohnlev panic("remap bits changed: old_pte="FMT_PTE", pte="FMT_PTE"\n", 1400843e1988Sjohnlev old_pte, pte); 14017c478bd9Sstevel@tonic-gate 14027c478bd9Sstevel@tonic-gate /* 14037c478bd9Sstevel@tonic-gate * We don't create any mapping list entries on a remap, so release 14047c478bd9Sstevel@tonic-gate * any allocated hment after we drop the mapping list lock. 14057c478bd9Sstevel@tonic-gate */ 1406ae115bc7Smrj done: 14077c478bd9Sstevel@tonic-gate if (is_consist) { 14087c478bd9Sstevel@tonic-gate x86_hm_exit(pp); 14097c478bd9Sstevel@tonic-gate if (hm != NULL) 14107c478bd9Sstevel@tonic-gate hment_free(hm); 14117c478bd9Sstevel@tonic-gate } 1412ae115bc7Smrj return (rv); 14137c478bd9Sstevel@tonic-gate } 14147c478bd9Sstevel@tonic-gate 14157c478bd9Sstevel@tonic-gate /* 1416ae115bc7Smrj * Internal routine to load a single page table entry. This only fails if 1417ae115bc7Smrj * we attempt to overwrite a page table link with a large page. 14187c478bd9Sstevel@tonic-gate */ 1419ae115bc7Smrj static int 14207c478bd9Sstevel@tonic-gate hati_load_common( 14217c478bd9Sstevel@tonic-gate hat_t *hat, 14227c478bd9Sstevel@tonic-gate uintptr_t va, 14237c478bd9Sstevel@tonic-gate page_t *pp, 14247c478bd9Sstevel@tonic-gate uint_t attr, 14257c478bd9Sstevel@tonic-gate uint_t flags, 14267c478bd9Sstevel@tonic-gate level_t level, 14277c478bd9Sstevel@tonic-gate pfn_t pfn) 14287c478bd9Sstevel@tonic-gate { 14297c478bd9Sstevel@tonic-gate htable_t *ht; 14307c478bd9Sstevel@tonic-gate uint_t entry; 14317c478bd9Sstevel@tonic-gate x86pte_t pte; 1432ae115bc7Smrj int rv = 0; 14337c478bd9Sstevel@tonic-gate 1434aac11643Sjosephb /* 1435aac11643Sjosephb * The number 16 is arbitrary and here to catch a recursion problem 1436aac11643Sjosephb * early before we blow out the kernel stack. 1437aac11643Sjosephb */ 1438aac11643Sjosephb ++curthread->t_hatdepth; 1439aac11643Sjosephb ASSERT(curthread->t_hatdepth < 16); 1440aac11643Sjosephb 1441*dc32d872SJosef 'Jeff' Sipek ASSERT(hat == kas.a_hat || AS_LOCK_HELD(hat->hat_as)); 14427c478bd9Sstevel@tonic-gate 14437c478bd9Sstevel@tonic-gate if (flags & HAT_LOAD_SHARE) 14447c478bd9Sstevel@tonic-gate hat->hat_flags |= HAT_SHARED; 14457c478bd9Sstevel@tonic-gate 14467c478bd9Sstevel@tonic-gate /* 14477c478bd9Sstevel@tonic-gate * Find the page table that maps this page if it already exists. 14487c478bd9Sstevel@tonic-gate */ 14497c478bd9Sstevel@tonic-gate ht = htable_lookup(hat, va, level); 14507c478bd9Sstevel@tonic-gate 14517c478bd9Sstevel@tonic-gate /* 1452aac11643Sjosephb * We must have HAT_LOAD_NOCONSIST if page_t is NULL. 14537c478bd9Sstevel@tonic-gate */ 1454aac11643Sjosephb if (pp == NULL) 14557c478bd9Sstevel@tonic-gate flags |= HAT_LOAD_NOCONSIST; 14567c478bd9Sstevel@tonic-gate 14577c478bd9Sstevel@tonic-gate if (ht == NULL) { 14587c478bd9Sstevel@tonic-gate ht = htable_create(hat, va, level, NULL); 14597c478bd9Sstevel@tonic-gate ASSERT(ht != NULL); 14607c478bd9Sstevel@tonic-gate } 14617c478bd9Sstevel@tonic-gate entry = htable_va2entry(va, ht); 14627c478bd9Sstevel@tonic-gate 14637c478bd9Sstevel@tonic-gate /* 14647c478bd9Sstevel@tonic-gate * a bunch of paranoid error checking 14657c478bd9Sstevel@tonic-gate */ 14667c478bd9Sstevel@tonic-gate ASSERT(ht->ht_busy > 0); 14677c478bd9Sstevel@tonic-gate if (ht->ht_vaddr > va || va > HTABLE_LAST_PAGE(ht)) 1468903a11ebSrh87107 panic("hati_load_common: bad htable %p, va %p", 1469903a11ebSrh87107 (void *)ht, (void *)va); 14707c478bd9Sstevel@tonic-gate ASSERT(ht->ht_level == level); 14717c478bd9Sstevel@tonic-gate 14727c478bd9Sstevel@tonic-gate /* 14737c478bd9Sstevel@tonic-gate * construct the new PTE 14747c478bd9Sstevel@tonic-gate */ 14757c478bd9Sstevel@tonic-gate if (hat == kas.a_hat) 14767c478bd9Sstevel@tonic-gate attr &= ~PROT_USER; 14777c478bd9Sstevel@tonic-gate pte = hati_mkpte(pfn, attr, level, flags); 14787c478bd9Sstevel@tonic-gate if (hat == kas.a_hat && va >= kernelbase) 14797c478bd9Sstevel@tonic-gate PTE_SET(pte, mmu.pt_global); 14807c478bd9Sstevel@tonic-gate 14817c478bd9Sstevel@tonic-gate /* 14827c478bd9Sstevel@tonic-gate * establish the mapping 14837c478bd9Sstevel@tonic-gate */ 1484ae115bc7Smrj rv = hati_pte_map(ht, entry, pp, pte, flags, NULL); 14857c478bd9Sstevel@tonic-gate 14867c478bd9Sstevel@tonic-gate /* 14877c478bd9Sstevel@tonic-gate * release the htable and any reserves 14887c478bd9Sstevel@tonic-gate */ 14897c478bd9Sstevel@tonic-gate htable_release(ht); 1490aac11643Sjosephb --curthread->t_hatdepth; 1491ae115bc7Smrj return (rv); 14927c478bd9Sstevel@tonic-gate } 14937c478bd9Sstevel@tonic-gate 14947c478bd9Sstevel@tonic-gate /* 14957c478bd9Sstevel@tonic-gate * special case of hat_memload to deal with some kernel addrs for performance 14967c478bd9Sstevel@tonic-gate */ 14977c478bd9Sstevel@tonic-gate static void 14987c478bd9Sstevel@tonic-gate hat_kmap_load( 14997c478bd9Sstevel@tonic-gate caddr_t addr, 15007c478bd9Sstevel@tonic-gate page_t *pp, 15017c478bd9Sstevel@tonic-gate uint_t attr, 15027c478bd9Sstevel@tonic-gate uint_t flags) 15037c478bd9Sstevel@tonic-gate { 15047c478bd9Sstevel@tonic-gate uintptr_t va = (uintptr_t)addr; 15057c478bd9Sstevel@tonic-gate x86pte_t pte; 15067c478bd9Sstevel@tonic-gate pfn_t pfn = page_pptonum(pp); 15077c478bd9Sstevel@tonic-gate pgcnt_t pg_off = mmu_btop(va - mmu.kmap_addr); 15087c478bd9Sstevel@tonic-gate htable_t *ht; 15097c478bd9Sstevel@tonic-gate uint_t entry; 15107c478bd9Sstevel@tonic-gate void *pte_ptr; 15117c478bd9Sstevel@tonic-gate 15127c478bd9Sstevel@tonic-gate /* 15137c478bd9Sstevel@tonic-gate * construct the requested PTE 15147c478bd9Sstevel@tonic-gate */ 15157c478bd9Sstevel@tonic-gate attr &= ~PROT_USER; 15167c478bd9Sstevel@tonic-gate attr |= HAT_STORECACHING_OK; 15177c478bd9Sstevel@tonic-gate pte = hati_mkpte(pfn, attr, 0, flags); 15187c478bd9Sstevel@tonic-gate PTE_SET(pte, mmu.pt_global); 15197c478bd9Sstevel@tonic-gate 15207c478bd9Sstevel@tonic-gate /* 15217c478bd9Sstevel@tonic-gate * Figure out the pte_ptr and htable and use common code to finish up 15227c478bd9Sstevel@tonic-gate */ 15237c478bd9Sstevel@tonic-gate if (mmu.pae_hat) 15247c478bd9Sstevel@tonic-gate pte_ptr = mmu.kmap_ptes + pg_off; 15257c478bd9Sstevel@tonic-gate else 15267c478bd9Sstevel@tonic-gate pte_ptr = (x86pte32_t *)mmu.kmap_ptes + pg_off; 15277c478bd9Sstevel@tonic-gate ht = mmu.kmap_htables[(va - mmu.kmap_htables[0]->ht_vaddr) >> 15287c478bd9Sstevel@tonic-gate LEVEL_SHIFT(1)]; 15297c478bd9Sstevel@tonic-gate entry = htable_va2entry(va, ht); 1530aac11643Sjosephb ++curthread->t_hatdepth; 1531aac11643Sjosephb ASSERT(curthread->t_hatdepth < 16); 1532ae115bc7Smrj (void) hati_pte_map(ht, entry, pp, pte, flags, pte_ptr); 1533aac11643Sjosephb --curthread->t_hatdepth; 15347c478bd9Sstevel@tonic-gate } 15357c478bd9Sstevel@tonic-gate 15367c478bd9Sstevel@tonic-gate /* 15377c478bd9Sstevel@tonic-gate * hat_memload() - load a translation to the given page struct 15387c478bd9Sstevel@tonic-gate * 15397c478bd9Sstevel@tonic-gate * Flags for hat_memload/hat_devload/hat_*attr. 15407c478bd9Sstevel@tonic-gate * 15417c478bd9Sstevel@tonic-gate * HAT_LOAD Default flags to load a translation to the page. 15427c478bd9Sstevel@tonic-gate * 15437c478bd9Sstevel@tonic-gate * HAT_LOAD_LOCK Lock down mapping resources; hat_map(), hat_memload(), 15447c478bd9Sstevel@tonic-gate * and hat_devload(). 15457c478bd9Sstevel@tonic-gate * 15467c478bd9Sstevel@tonic-gate * HAT_LOAD_NOCONSIST Do not add mapping to page_t mapping list. 1547ae115bc7Smrj * sets PT_NOCONSIST 15487c478bd9Sstevel@tonic-gate * 15497c478bd9Sstevel@tonic-gate * HAT_LOAD_SHARE A flag to hat_memload() to indicate h/w page tables 15507c478bd9Sstevel@tonic-gate * that map some user pages (not kas) is shared by more 15517c478bd9Sstevel@tonic-gate * than one process (eg. ISM). 15527c478bd9Sstevel@tonic-gate * 15537c478bd9Sstevel@tonic-gate * HAT_LOAD_REMAP Reload a valid pte with a different page frame. 15547c478bd9Sstevel@tonic-gate * 15557c478bd9Sstevel@tonic-gate * HAT_NO_KALLOC Do not kmem_alloc while creating the mapping; at this 15567c478bd9Sstevel@tonic-gate * point, it's setting up mapping to allocate internal 15577c478bd9Sstevel@tonic-gate * hat layer data structures. This flag forces hat layer 15587c478bd9Sstevel@tonic-gate * to tap its reserves in order to prevent infinite 15597c478bd9Sstevel@tonic-gate * recursion. 15607c478bd9Sstevel@tonic-gate * 15617c478bd9Sstevel@tonic-gate * The following is a protection attribute (like PROT_READ, etc.) 15627c478bd9Sstevel@tonic-gate * 1563ae115bc7Smrj * HAT_NOSYNC set PT_NOSYNC - this mapping's ref/mod bits 15647c478bd9Sstevel@tonic-gate * are never cleared. 15657c478bd9Sstevel@tonic-gate * 15667c478bd9Sstevel@tonic-gate * Installing new valid PTE's and creation of the mapping list 15677c478bd9Sstevel@tonic-gate * entry are controlled under the same lock. It's derived from the 15687c478bd9Sstevel@tonic-gate * page_t being mapped. 15697c478bd9Sstevel@tonic-gate */ 15707c478bd9Sstevel@tonic-gate static uint_t supported_memload_flags = 15717c478bd9Sstevel@tonic-gate HAT_LOAD | HAT_LOAD_LOCK | HAT_LOAD_ADV | HAT_LOAD_NOCONSIST | 15727c478bd9Sstevel@tonic-gate HAT_LOAD_SHARE | HAT_NO_KALLOC | HAT_LOAD_REMAP | HAT_LOAD_TEXT; 15737c478bd9Sstevel@tonic-gate 15747c478bd9Sstevel@tonic-gate void 15757c478bd9Sstevel@tonic-gate hat_memload( 15767c478bd9Sstevel@tonic-gate hat_t *hat, 15777c478bd9Sstevel@tonic-gate caddr_t addr, 15787c478bd9Sstevel@tonic-gate page_t *pp, 15797c478bd9Sstevel@tonic-gate uint_t attr, 15807c478bd9Sstevel@tonic-gate uint_t flags) 15817c478bd9Sstevel@tonic-gate { 15827c478bd9Sstevel@tonic-gate uintptr_t va = (uintptr_t)addr; 15837c478bd9Sstevel@tonic-gate level_t level = 0; 15847c478bd9Sstevel@tonic-gate pfn_t pfn = page_pptonum(pp); 15857c478bd9Sstevel@tonic-gate 1586843e1988Sjohnlev XPV_DISALLOW_MIGRATE(); 15877c478bd9Sstevel@tonic-gate ASSERT(IS_PAGEALIGNED(va)); 1588ae115bc7Smrj ASSERT(hat == kas.a_hat || va < _userlimit); 1589*dc32d872SJosef 'Jeff' Sipek ASSERT(hat == kas.a_hat || AS_LOCK_HELD(hat->hat_as)); 15907c478bd9Sstevel@tonic-gate ASSERT((flags & supported_memload_flags) == flags); 15917c478bd9Sstevel@tonic-gate 15927c478bd9Sstevel@tonic-gate ASSERT(!IN_VA_HOLE(va)); 15937c478bd9Sstevel@tonic-gate ASSERT(!PP_ISFREE(pp)); 15947c478bd9Sstevel@tonic-gate 15957c478bd9Sstevel@tonic-gate /* 15967c478bd9Sstevel@tonic-gate * kernel address special case for performance. 15977c478bd9Sstevel@tonic-gate */ 15987c478bd9Sstevel@tonic-gate if (mmu.kmap_addr <= va && va < mmu.kmap_eaddr) { 15997c478bd9Sstevel@tonic-gate ASSERT(hat == kas.a_hat); 16007c478bd9Sstevel@tonic-gate hat_kmap_load(addr, pp, attr, flags); 1601843e1988Sjohnlev XPV_ALLOW_MIGRATE(); 16027c478bd9Sstevel@tonic-gate return; 16037c478bd9Sstevel@tonic-gate } 16047c478bd9Sstevel@tonic-gate 16057c478bd9Sstevel@tonic-gate /* 16067c478bd9Sstevel@tonic-gate * This is used for memory with normal caching enabled, so 16077c478bd9Sstevel@tonic-gate * always set HAT_STORECACHING_OK. 16087c478bd9Sstevel@tonic-gate */ 16097c478bd9Sstevel@tonic-gate attr |= HAT_STORECACHING_OK; 1610ae115bc7Smrj if (hati_load_common(hat, va, pp, attr, flags, level, pfn) != 0) 1611ae115bc7Smrj panic("unexpected hati_load_common() failure"); 1612843e1988Sjohnlev XPV_ALLOW_MIGRATE(); 16137c478bd9Sstevel@tonic-gate } 16147c478bd9Sstevel@tonic-gate 161505d3dc4bSpaulsan /* ARGSUSED */ 161605d3dc4bSpaulsan void 161705d3dc4bSpaulsan hat_memload_region(struct hat *hat, caddr_t addr, struct page *pp, 161805d3dc4bSpaulsan uint_t attr, uint_t flags, hat_region_cookie_t rcookie) 161905d3dc4bSpaulsan { 162005d3dc4bSpaulsan hat_memload(hat, addr, pp, attr, flags); 162105d3dc4bSpaulsan } 162205d3dc4bSpaulsan 16237c478bd9Sstevel@tonic-gate /* 16247c478bd9Sstevel@tonic-gate * Load the given array of page structs using large pages when possible 16257c478bd9Sstevel@tonic-gate */ 16267c478bd9Sstevel@tonic-gate void 16277c478bd9Sstevel@tonic-gate hat_memload_array( 16287c478bd9Sstevel@tonic-gate hat_t *hat, 16297c478bd9Sstevel@tonic-gate caddr_t addr, 16307c478bd9Sstevel@tonic-gate size_t len, 16317c478bd9Sstevel@tonic-gate page_t **pages, 16327c478bd9Sstevel@tonic-gate uint_t attr, 16337c478bd9Sstevel@tonic-gate uint_t flags) 16347c478bd9Sstevel@tonic-gate { 16357c478bd9Sstevel@tonic-gate uintptr_t va = (uintptr_t)addr; 16367c478bd9Sstevel@tonic-gate uintptr_t eaddr = va + len; 16377c478bd9Sstevel@tonic-gate level_t level; 16387c478bd9Sstevel@tonic-gate size_t pgsize; 16397c478bd9Sstevel@tonic-gate pgcnt_t pgindx = 0; 16407c478bd9Sstevel@tonic-gate pfn_t pfn; 16417c478bd9Sstevel@tonic-gate pgcnt_t i; 16427c478bd9Sstevel@tonic-gate 1643843e1988Sjohnlev XPV_DISALLOW_MIGRATE(); 16447c478bd9Sstevel@tonic-gate ASSERT(IS_PAGEALIGNED(va)); 1645ae115bc7Smrj ASSERT(hat == kas.a_hat || va + len <= _userlimit); 1646*dc32d872SJosef 'Jeff' Sipek ASSERT(hat == kas.a_hat || AS_LOCK_HELD(hat->hat_as)); 16477c478bd9Sstevel@tonic-gate ASSERT((flags & supported_memload_flags) == flags); 16487c478bd9Sstevel@tonic-gate 16497c478bd9Sstevel@tonic-gate /* 16507c478bd9Sstevel@tonic-gate * memload is used for memory with full caching enabled, so 16517c478bd9Sstevel@tonic-gate * set HAT_STORECACHING_OK. 16527c478bd9Sstevel@tonic-gate */ 16537c478bd9Sstevel@tonic-gate attr |= HAT_STORECACHING_OK; 16547c478bd9Sstevel@tonic-gate 16557c478bd9Sstevel@tonic-gate /* 16567c478bd9Sstevel@tonic-gate * handle all pages using largest possible pagesize 16577c478bd9Sstevel@tonic-gate */ 16587c478bd9Sstevel@tonic-gate while (va < eaddr) { 16597c478bd9Sstevel@tonic-gate /* 16607c478bd9Sstevel@tonic-gate * decide what level mapping to use (ie. pagesize) 16617c478bd9Sstevel@tonic-gate */ 16627c478bd9Sstevel@tonic-gate pfn = page_pptonum(pages[pgindx]); 16637c478bd9Sstevel@tonic-gate for (level = mmu.max_page_level; ; --level) { 16647c478bd9Sstevel@tonic-gate pgsize = LEVEL_SIZE(level); 16657c478bd9Sstevel@tonic-gate if (level == 0) 16667c478bd9Sstevel@tonic-gate break; 1667ae115bc7Smrj 16687c478bd9Sstevel@tonic-gate if (!IS_P2ALIGNED(va, pgsize) || 16697c478bd9Sstevel@tonic-gate (eaddr - va) < pgsize || 1670ae115bc7Smrj !IS_P2ALIGNED(pfn_to_pa(pfn), pgsize)) 16717c478bd9Sstevel@tonic-gate continue; 16727c478bd9Sstevel@tonic-gate 16737c478bd9Sstevel@tonic-gate /* 16747c478bd9Sstevel@tonic-gate * To use a large mapping of this size, all the 16757c478bd9Sstevel@tonic-gate * pages we are passed must be sequential subpages 16767c478bd9Sstevel@tonic-gate * of the large page. 16777c478bd9Sstevel@tonic-gate * hat_page_demote() can't change p_szc because 16787c478bd9Sstevel@tonic-gate * all pages are locked. 16797c478bd9Sstevel@tonic-gate */ 16807c478bd9Sstevel@tonic-gate if (pages[pgindx]->p_szc >= level) { 16817c478bd9Sstevel@tonic-gate for (i = 0; i < mmu_btop(pgsize); ++i) { 16827c478bd9Sstevel@tonic-gate if (pfn + i != 16837c478bd9Sstevel@tonic-gate page_pptonum(pages[pgindx + i])) 16847c478bd9Sstevel@tonic-gate break; 16857c478bd9Sstevel@tonic-gate ASSERT(pages[pgindx + i]->p_szc >= 16867c478bd9Sstevel@tonic-gate level); 16877c478bd9Sstevel@tonic-gate ASSERT(pages[pgindx] + i == 16887c478bd9Sstevel@tonic-gate pages[pgindx + i]); 16897c478bd9Sstevel@tonic-gate } 169002bc52beSkchow if (i == mmu_btop(pgsize)) { 169102bc52beSkchow #ifdef DEBUG 169202bc52beSkchow if (level == 2) 169302bc52beSkchow map1gcnt++; 169402bc52beSkchow #endif 16957c478bd9Sstevel@tonic-gate break; 16967c478bd9Sstevel@tonic-gate } 16977c478bd9Sstevel@tonic-gate } 169802bc52beSkchow } 16997c478bd9Sstevel@tonic-gate 17007c478bd9Sstevel@tonic-gate /* 1701ae115bc7Smrj * Load this page mapping. If the load fails, try a smaller 1702ae115bc7Smrj * pagesize. 17037c478bd9Sstevel@tonic-gate */ 17047c478bd9Sstevel@tonic-gate ASSERT(!IN_VA_HOLE(va)); 1705ae115bc7Smrj while (hati_load_common(hat, va, pages[pgindx], attr, 1706ae115bc7Smrj flags, level, pfn) != 0) { 1707ae115bc7Smrj if (level == 0) 1708ae115bc7Smrj panic("unexpected hati_load_common() failure"); 1709ae115bc7Smrj --level; 1710ae115bc7Smrj pgsize = LEVEL_SIZE(level); 1711ae115bc7Smrj } 17127c478bd9Sstevel@tonic-gate 17137c478bd9Sstevel@tonic-gate /* 17147c478bd9Sstevel@tonic-gate * move to next page 17157c478bd9Sstevel@tonic-gate */ 17167c478bd9Sstevel@tonic-gate va += pgsize; 17177c478bd9Sstevel@tonic-gate pgindx += mmu_btop(pgsize); 17187c478bd9Sstevel@tonic-gate } 1719843e1988Sjohnlev XPV_ALLOW_MIGRATE(); 17207c478bd9Sstevel@tonic-gate } 17217c478bd9Sstevel@tonic-gate 172205d3dc4bSpaulsan /* ARGSUSED */ 172305d3dc4bSpaulsan void 172405d3dc4bSpaulsan hat_memload_array_region(struct hat *hat, caddr_t addr, size_t len, 172505d3dc4bSpaulsan struct page **pps, uint_t attr, uint_t flags, 172605d3dc4bSpaulsan hat_region_cookie_t rcookie) 172705d3dc4bSpaulsan { 172805d3dc4bSpaulsan hat_memload_array(hat, addr, len, pps, attr, flags); 172905d3dc4bSpaulsan } 173005d3dc4bSpaulsan 17317c478bd9Sstevel@tonic-gate /* 17327c478bd9Sstevel@tonic-gate * void hat_devload(hat, addr, len, pf, attr, flags) 17337c478bd9Sstevel@tonic-gate * load/lock the given page frame number 17347c478bd9Sstevel@tonic-gate * 17357c478bd9Sstevel@tonic-gate * Advisory ordering attributes. Apply only to device mappings. 17367c478bd9Sstevel@tonic-gate * 17377c478bd9Sstevel@tonic-gate * HAT_STRICTORDER: the CPU must issue the references in order, as the 17387c478bd9Sstevel@tonic-gate * programmer specified. This is the default. 17397c478bd9Sstevel@tonic-gate * HAT_UNORDERED_OK: the CPU may reorder the references (this is all kinds 17407c478bd9Sstevel@tonic-gate * of reordering; store or load with store or load). 17417c478bd9Sstevel@tonic-gate * HAT_MERGING_OK: merging and batching: the CPU may merge individual stores 17427c478bd9Sstevel@tonic-gate * to consecutive locations (for example, turn two consecutive byte 17437c478bd9Sstevel@tonic-gate * stores into one halfword store), and it may batch individual loads 17447c478bd9Sstevel@tonic-gate * (for example, turn two consecutive byte loads into one halfword load). 17457c478bd9Sstevel@tonic-gate * This also implies re-ordering. 17467c478bd9Sstevel@tonic-gate * HAT_LOADCACHING_OK: the CPU may cache the data it fetches and reuse it 17477c478bd9Sstevel@tonic-gate * until another store occurs. The default is to fetch new data 17487c478bd9Sstevel@tonic-gate * on every load. This also implies merging. 17497c478bd9Sstevel@tonic-gate * HAT_STORECACHING_OK: the CPU may keep the data in the cache and push it to 17507c478bd9Sstevel@tonic-gate * the device (perhaps with other data) at a later time. The default is 17517c478bd9Sstevel@tonic-gate * to push the data right away. This also implies load caching. 17527c478bd9Sstevel@tonic-gate * 17537c478bd9Sstevel@tonic-gate * Equivalent of hat_memload(), but can be used for device memory where 17547c478bd9Sstevel@tonic-gate * there are no page_t's and we support additional flags (write merging, etc). 17557c478bd9Sstevel@tonic-gate * Note that we can have large page mappings with this interface. 17567c478bd9Sstevel@tonic-gate */ 17577c478bd9Sstevel@tonic-gate int supported_devload_flags = HAT_LOAD | HAT_LOAD_LOCK | 17587c478bd9Sstevel@tonic-gate HAT_LOAD_NOCONSIST | HAT_STRICTORDER | HAT_UNORDERED_OK | 17597c478bd9Sstevel@tonic-gate HAT_MERGING_OK | HAT_LOADCACHING_OK | HAT_STORECACHING_OK; 17607c478bd9Sstevel@tonic-gate 17617c478bd9Sstevel@tonic-gate void 17627c478bd9Sstevel@tonic-gate hat_devload( 17637c478bd9Sstevel@tonic-gate hat_t *hat, 17647c478bd9Sstevel@tonic-gate caddr_t addr, 17657c478bd9Sstevel@tonic-gate size_t len, 17667c478bd9Sstevel@tonic-gate pfn_t pfn, 17677c478bd9Sstevel@tonic-gate uint_t attr, 17687c478bd9Sstevel@tonic-gate int flags) 17697c478bd9Sstevel@tonic-gate { 17707c478bd9Sstevel@tonic-gate uintptr_t va = ALIGN2PAGE(addr); 17717c478bd9Sstevel@tonic-gate uintptr_t eva = va + len; 17727c478bd9Sstevel@tonic-gate level_t level; 17737c478bd9Sstevel@tonic-gate size_t pgsize; 17747c478bd9Sstevel@tonic-gate page_t *pp; 17757c478bd9Sstevel@tonic-gate int f; /* per PTE copy of flags - maybe modified */ 17767c478bd9Sstevel@tonic-gate uint_t a; /* per PTE copy of attr */ 17777c478bd9Sstevel@tonic-gate 1778843e1988Sjohnlev XPV_DISALLOW_MIGRATE(); 17797c478bd9Sstevel@tonic-gate ASSERT(IS_PAGEALIGNED(va)); 1780ae115bc7Smrj ASSERT(hat == kas.a_hat || eva <= _userlimit); 1781*dc32d872SJosef 'Jeff' Sipek ASSERT(hat == kas.a_hat || AS_LOCK_HELD(hat->hat_as)); 17827c478bd9Sstevel@tonic-gate ASSERT((flags & supported_devload_flags) == flags); 17837c478bd9Sstevel@tonic-gate 17847c478bd9Sstevel@tonic-gate /* 17857c478bd9Sstevel@tonic-gate * handle all pages 17867c478bd9Sstevel@tonic-gate */ 17877c478bd9Sstevel@tonic-gate while (va < eva) { 17887c478bd9Sstevel@tonic-gate 17897c478bd9Sstevel@tonic-gate /* 17907c478bd9Sstevel@tonic-gate * decide what level mapping to use (ie. pagesize) 17917c478bd9Sstevel@tonic-gate */ 17927c478bd9Sstevel@tonic-gate for (level = mmu.max_page_level; ; --level) { 17937c478bd9Sstevel@tonic-gate pgsize = LEVEL_SIZE(level); 17947c478bd9Sstevel@tonic-gate if (level == 0) 17957c478bd9Sstevel@tonic-gate break; 17967c478bd9Sstevel@tonic-gate if (IS_P2ALIGNED(va, pgsize) && 17977c478bd9Sstevel@tonic-gate (eva - va) >= pgsize && 179802bc52beSkchow IS_P2ALIGNED(pfn, mmu_btop(pgsize))) { 179902bc52beSkchow #ifdef DEBUG 180002bc52beSkchow if (level == 2) 180102bc52beSkchow map1gcnt++; 180202bc52beSkchow #endif 18037c478bd9Sstevel@tonic-gate break; 18047c478bd9Sstevel@tonic-gate } 180502bc52beSkchow } 18067c478bd9Sstevel@tonic-gate 18077c478bd9Sstevel@tonic-gate /* 1808ae115bc7Smrj * If this is just memory then allow caching (this happens 18097c478bd9Sstevel@tonic-gate * for the nucleus pages) - though HAT_PLAT_NOCACHE can be used 1810ae115bc7Smrj * to override that. If we don't have a page_t then make sure 18117c478bd9Sstevel@tonic-gate * NOCONSIST is set. 18127c478bd9Sstevel@tonic-gate */ 18137c478bd9Sstevel@tonic-gate a = attr; 18147c478bd9Sstevel@tonic-gate f = flags; 1815843e1988Sjohnlev if (!pf_is_memory(pfn)) 1816843e1988Sjohnlev f |= HAT_LOAD_NOCONSIST; 1817843e1988Sjohnlev else if (!(a & HAT_PLAT_NOCACHE)) 18187c478bd9Sstevel@tonic-gate a |= HAT_STORECACHING_OK; 18197c478bd9Sstevel@tonic-gate 18207c478bd9Sstevel@tonic-gate if (f & HAT_LOAD_NOCONSIST) 18217c478bd9Sstevel@tonic-gate pp = NULL; 18227c478bd9Sstevel@tonic-gate else 18237c478bd9Sstevel@tonic-gate pp = page_numtopp_nolock(pfn); 18247c478bd9Sstevel@tonic-gate 18257c478bd9Sstevel@tonic-gate /* 1826e803c1e6SPrakash Sangappa * Check to make sure we are really trying to map a valid 1827e803c1e6SPrakash Sangappa * memory page. The caller wishing to intentionally map 1828e803c1e6SPrakash Sangappa * free memory pages will have passed the HAT_LOAD_NOCONSIST 1829e803c1e6SPrakash Sangappa * flag, then pp will be NULL. 1830e803c1e6SPrakash Sangappa */ 1831e803c1e6SPrakash Sangappa if (pp != NULL) { 1832e803c1e6SPrakash Sangappa if (PP_ISFREE(pp)) { 1833e803c1e6SPrakash Sangappa panic("hat_devload: loading " 1834e803c1e6SPrakash Sangappa "a mapping to free page %p", (void *)pp); 1835e803c1e6SPrakash Sangappa } 1836e803c1e6SPrakash Sangappa 1837e803c1e6SPrakash Sangappa if (!PAGE_LOCKED(pp) && !PP_ISNORELOC(pp)) { 1838e803c1e6SPrakash Sangappa panic("hat_devload: loading a mapping " 1839e803c1e6SPrakash Sangappa "to an unlocked page %p", 1840e803c1e6SPrakash Sangappa (void *)pp); 1841e803c1e6SPrakash Sangappa } 1842e803c1e6SPrakash Sangappa } 1843e803c1e6SPrakash Sangappa 1844e803c1e6SPrakash Sangappa /* 18457c478bd9Sstevel@tonic-gate * load this page mapping 18467c478bd9Sstevel@tonic-gate */ 18477c478bd9Sstevel@tonic-gate ASSERT(!IN_VA_HOLE(va)); 1848ae115bc7Smrj while (hati_load_common(hat, va, pp, a, f, level, pfn) != 0) { 1849ae115bc7Smrj if (level == 0) 1850ae115bc7Smrj panic("unexpected hati_load_common() failure"); 1851ae115bc7Smrj --level; 1852ae115bc7Smrj pgsize = LEVEL_SIZE(level); 1853ae115bc7Smrj } 18547c478bd9Sstevel@tonic-gate 18557c478bd9Sstevel@tonic-gate /* 18567c478bd9Sstevel@tonic-gate * move to next page 18577c478bd9Sstevel@tonic-gate */ 18587c478bd9Sstevel@tonic-gate va += pgsize; 18597c478bd9Sstevel@tonic-gate pfn += mmu_btop(pgsize); 18607c478bd9Sstevel@tonic-gate } 1861843e1988Sjohnlev XPV_ALLOW_MIGRATE(); 18627c478bd9Sstevel@tonic-gate } 18637c478bd9Sstevel@tonic-gate 18647c478bd9Sstevel@tonic-gate /* 18657c478bd9Sstevel@tonic-gate * void hat_unlock(hat, addr, len) 18667c478bd9Sstevel@tonic-gate * unlock the mappings to a given range of addresses 18677c478bd9Sstevel@tonic-gate * 18687c478bd9Sstevel@tonic-gate * Locks are tracked by ht_lock_cnt in the htable. 18697c478bd9Sstevel@tonic-gate */ 18707c478bd9Sstevel@tonic-gate void 18717c478bd9Sstevel@tonic-gate hat_unlock(hat_t *hat, caddr_t addr, size_t len) 18727c478bd9Sstevel@tonic-gate { 18737c478bd9Sstevel@tonic-gate uintptr_t vaddr = (uintptr_t)addr; 18747c478bd9Sstevel@tonic-gate uintptr_t eaddr = vaddr + len; 18757c478bd9Sstevel@tonic-gate htable_t *ht = NULL; 18767c478bd9Sstevel@tonic-gate 18777c478bd9Sstevel@tonic-gate /* 18787c478bd9Sstevel@tonic-gate * kernel entries are always locked, we don't track lock counts 18797c478bd9Sstevel@tonic-gate */ 1880ae115bc7Smrj ASSERT(hat == kas.a_hat || eaddr <= _userlimit); 18817c478bd9Sstevel@tonic-gate ASSERT(IS_PAGEALIGNED(vaddr)); 18827c478bd9Sstevel@tonic-gate ASSERT(IS_PAGEALIGNED(eaddr)); 18837c478bd9Sstevel@tonic-gate if (hat == kas.a_hat) 18847c478bd9Sstevel@tonic-gate return; 18857c478bd9Sstevel@tonic-gate if (eaddr > _userlimit) 18867c478bd9Sstevel@tonic-gate panic("hat_unlock() address out of range - above _userlimit"); 18877c478bd9Sstevel@tonic-gate 1888843e1988Sjohnlev XPV_DISALLOW_MIGRATE(); 1889*dc32d872SJosef 'Jeff' Sipek ASSERT(AS_LOCK_HELD(hat->hat_as)); 18907c478bd9Sstevel@tonic-gate while (vaddr < eaddr) { 18917c478bd9Sstevel@tonic-gate (void) htable_walk(hat, &ht, &vaddr, eaddr); 18927c478bd9Sstevel@tonic-gate if (ht == NULL) 18937c478bd9Sstevel@tonic-gate break; 18947c478bd9Sstevel@tonic-gate 18957c478bd9Sstevel@tonic-gate ASSERT(!IN_VA_HOLE(vaddr)); 18967c478bd9Sstevel@tonic-gate 18977c478bd9Sstevel@tonic-gate if (ht->ht_lock_cnt < 1) 18987c478bd9Sstevel@tonic-gate panic("hat_unlock(): lock_cnt < 1, " 1899903a11ebSrh87107 "htable=%p, vaddr=%p\n", (void *)ht, (void *)vaddr); 19007c478bd9Sstevel@tonic-gate HTABLE_LOCK_DEC(ht); 19017c478bd9Sstevel@tonic-gate 19027c478bd9Sstevel@tonic-gate vaddr += LEVEL_SIZE(ht->ht_level); 19037c478bd9Sstevel@tonic-gate } 19047c478bd9Sstevel@tonic-gate if (ht) 19057c478bd9Sstevel@tonic-gate htable_release(ht); 1906843e1988Sjohnlev XPV_ALLOW_MIGRATE(); 19077c478bd9Sstevel@tonic-gate } 19087c478bd9Sstevel@tonic-gate 190905d3dc4bSpaulsan /* ARGSUSED */ 191005d3dc4bSpaulsan void 19117dacfc44Spaulsan hat_unlock_region(struct hat *hat, caddr_t addr, size_t len, 191205d3dc4bSpaulsan hat_region_cookie_t rcookie) 191305d3dc4bSpaulsan { 191405d3dc4bSpaulsan panic("No shared region support on x86"); 191505d3dc4bSpaulsan } 191605d3dc4bSpaulsan 1917843e1988Sjohnlev #if !defined(__xpv) 19187c478bd9Sstevel@tonic-gate /* 19197c478bd9Sstevel@tonic-gate * Cross call service routine to demap a virtual page on 19207c478bd9Sstevel@tonic-gate * the current CPU or flush all mappings in TLB. 19217c478bd9Sstevel@tonic-gate */ 19227c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 19237c478bd9Sstevel@tonic-gate static int 19247c478bd9Sstevel@tonic-gate hati_demap_func(xc_arg_t a1, xc_arg_t a2, xc_arg_t a3) 19257c478bd9Sstevel@tonic-gate { 19267c478bd9Sstevel@tonic-gate hat_t *hat = (hat_t *)a1; 19277c478bd9Sstevel@tonic-gate caddr_t addr = (caddr_t)a2; 1928a6a74e0eSMatthew Ahrens size_t len = (size_t)a3; 19297c478bd9Sstevel@tonic-gate 19307c478bd9Sstevel@tonic-gate /* 19317c478bd9Sstevel@tonic-gate * If the target hat isn't the kernel and this CPU isn't operating 19327c478bd9Sstevel@tonic-gate * in the target hat, we can ignore the cross call. 19337c478bd9Sstevel@tonic-gate */ 19347c478bd9Sstevel@tonic-gate if (hat != kas.a_hat && hat != CPU->cpu_current_hat) 19357c478bd9Sstevel@tonic-gate return (0); 19367c478bd9Sstevel@tonic-gate 19377c478bd9Sstevel@tonic-gate /* 1938a6a74e0eSMatthew Ahrens * For a normal address, we flush a range of contiguous mappings 19397c478bd9Sstevel@tonic-gate */ 19407c478bd9Sstevel@tonic-gate if ((uintptr_t)addr != DEMAP_ALL_ADDR) { 1941a6a74e0eSMatthew Ahrens for (size_t i = 0; i < len; i += MMU_PAGESIZE) 1942a6a74e0eSMatthew Ahrens mmu_tlbflush_entry(addr + i); 19437c478bd9Sstevel@tonic-gate return (0); 19447c478bd9Sstevel@tonic-gate } 19457c478bd9Sstevel@tonic-gate 19467c478bd9Sstevel@tonic-gate /* 19477c478bd9Sstevel@tonic-gate * Otherwise we reload cr3 to effect a complete TLB flush. 19487c478bd9Sstevel@tonic-gate * 19497c478bd9Sstevel@tonic-gate * A reload of cr3 on a VLP process also means we must also recopy in 19507c478bd9Sstevel@tonic-gate * the pte values from the struct hat 19517c478bd9Sstevel@tonic-gate */ 19527c478bd9Sstevel@tonic-gate if (hat->hat_flags & HAT_VLP) { 19537c478bd9Sstevel@tonic-gate #if defined(__amd64) 19547c478bd9Sstevel@tonic-gate x86pte_t *vlpptep = CPU->cpu_hat_info->hci_vlp_l2ptes; 19557c478bd9Sstevel@tonic-gate 19567c478bd9Sstevel@tonic-gate VLP_COPY(hat->hat_vlp_ptes, vlpptep); 19577c478bd9Sstevel@tonic-gate #elif defined(__i386) 19587c478bd9Sstevel@tonic-gate reload_pae32(hat, CPU); 19597c478bd9Sstevel@tonic-gate #endif 19607c478bd9Sstevel@tonic-gate } 19617c478bd9Sstevel@tonic-gate reload_cr3(); 19627c478bd9Sstevel@tonic-gate return (0); 19637c478bd9Sstevel@tonic-gate } 19647c478bd9Sstevel@tonic-gate 19657c478bd9Sstevel@tonic-gate /* 196695c0a3c8Sjosephb * Flush all TLB entries, including global (ie. kernel) ones. 196795c0a3c8Sjosephb */ 196895c0a3c8Sjosephb static void 196995c0a3c8Sjosephb flush_all_tlb_entries(void) 197095c0a3c8Sjosephb { 197195c0a3c8Sjosephb ulong_t cr4 = getcr4(); 197295c0a3c8Sjosephb 197395c0a3c8Sjosephb if (cr4 & CR4_PGE) { 197495c0a3c8Sjosephb setcr4(cr4 & ~(ulong_t)CR4_PGE); 197595c0a3c8Sjosephb setcr4(cr4); 197695c0a3c8Sjosephb 197795c0a3c8Sjosephb /* 197895c0a3c8Sjosephb * 32 bit PAE also needs to always reload_cr3() 197995c0a3c8Sjosephb */ 198095c0a3c8Sjosephb if (mmu.max_level == 2) 198195c0a3c8Sjosephb reload_cr3(); 198295c0a3c8Sjosephb } else { 198395c0a3c8Sjosephb reload_cr3(); 198495c0a3c8Sjosephb } 198595c0a3c8Sjosephb } 198695c0a3c8Sjosephb 198795c0a3c8Sjosephb #define TLB_CPU_HALTED (01ul) 198895c0a3c8Sjosephb #define TLB_INVAL_ALL (02ul) 198995c0a3c8Sjosephb #define CAS_TLB_INFO(cpu, old, new) \ 199075d94465SJosef 'Jeff' Sipek atomic_cas_ulong((ulong_t *)&(cpu)->cpu_m.mcpu_tlb_info, (old), (new)) 199195c0a3c8Sjosephb 199295c0a3c8Sjosephb /* 199395c0a3c8Sjosephb * Record that a CPU is going idle 199495c0a3c8Sjosephb */ 199595c0a3c8Sjosephb void 199695c0a3c8Sjosephb tlb_going_idle(void) 199795c0a3c8Sjosephb { 199875d94465SJosef 'Jeff' Sipek atomic_or_ulong((ulong_t *)&CPU->cpu_m.mcpu_tlb_info, TLB_CPU_HALTED); 199995c0a3c8Sjosephb } 200095c0a3c8Sjosephb 200195c0a3c8Sjosephb /* 200295c0a3c8Sjosephb * Service a delayed TLB flush if coming out of being idle. 200321584dbcSPavel Tatashin * It will be called from cpu idle notification with interrupt disabled. 200495c0a3c8Sjosephb */ 200595c0a3c8Sjosephb void 200695c0a3c8Sjosephb tlb_service(void) 200795c0a3c8Sjosephb { 200895c0a3c8Sjosephb ulong_t tlb_info; 200995c0a3c8Sjosephb ulong_t found; 201095c0a3c8Sjosephb 201195c0a3c8Sjosephb /* 201295c0a3c8Sjosephb * We only have to do something if coming out of being idle. 201395c0a3c8Sjosephb */ 201495c0a3c8Sjosephb tlb_info = CPU->cpu_m.mcpu_tlb_info; 201595c0a3c8Sjosephb if (tlb_info & TLB_CPU_HALTED) { 201695c0a3c8Sjosephb ASSERT(CPU->cpu_current_hat == kas.a_hat); 201795c0a3c8Sjosephb 201895c0a3c8Sjosephb /* 201995c0a3c8Sjosephb * Atomic clear and fetch of old state. 202095c0a3c8Sjosephb */ 202195c0a3c8Sjosephb while ((found = CAS_TLB_INFO(CPU, tlb_info, 0)) != tlb_info) { 202295c0a3c8Sjosephb ASSERT(found & TLB_CPU_HALTED); 202395c0a3c8Sjosephb tlb_info = found; 202495c0a3c8Sjosephb SMT_PAUSE(); 202595c0a3c8Sjosephb } 202695c0a3c8Sjosephb if (tlb_info & TLB_INVAL_ALL) 202795c0a3c8Sjosephb flush_all_tlb_entries(); 202895c0a3c8Sjosephb } 202995c0a3c8Sjosephb } 2030843e1988Sjohnlev #endif /* !__xpv */ 203195c0a3c8Sjosephb 203295c0a3c8Sjosephb /* 20337c478bd9Sstevel@tonic-gate * Internal routine to do cross calls to invalidate a range of pages on 20347c478bd9Sstevel@tonic-gate * all CPUs using a given hat. 20357c478bd9Sstevel@tonic-gate */ 20367c478bd9Sstevel@tonic-gate void 2037a6a74e0eSMatthew Ahrens hat_tlb_inval_range(hat_t *hat, uintptr_t va, size_t len) 20387c478bd9Sstevel@tonic-gate { 20397c478bd9Sstevel@tonic-gate extern int flushes_require_xcalls; /* from mp_startup.c */ 20407c478bd9Sstevel@tonic-gate cpuset_t justme; 2041ae115bc7Smrj cpuset_t cpus_to_shootdown; 2042843e1988Sjohnlev #ifndef __xpv 2043843e1988Sjohnlev cpuset_t check_cpus; 204495c0a3c8Sjosephb cpu_t *cpup; 204595c0a3c8Sjosephb int c; 2046843e1988Sjohnlev #endif 20477c478bd9Sstevel@tonic-gate 20487c478bd9Sstevel@tonic-gate /* 20497c478bd9Sstevel@tonic-gate * If the hat is being destroyed, there are no more users, so 20507c478bd9Sstevel@tonic-gate * demap need not do anything. 20517c478bd9Sstevel@tonic-gate */ 20527c478bd9Sstevel@tonic-gate if (hat->hat_flags & HAT_FREEING) 20537c478bd9Sstevel@tonic-gate return; 20547c478bd9Sstevel@tonic-gate 20557c478bd9Sstevel@tonic-gate /* 20567c478bd9Sstevel@tonic-gate * If demapping from a shared pagetable, we best demap the 20577c478bd9Sstevel@tonic-gate * entire set of user TLBs, since we don't know what addresses 20587c478bd9Sstevel@tonic-gate * these were shared at. 20597c478bd9Sstevel@tonic-gate */ 20607c478bd9Sstevel@tonic-gate if (hat->hat_flags & HAT_SHARED) { 20617c478bd9Sstevel@tonic-gate hat = kas.a_hat; 20627c478bd9Sstevel@tonic-gate va = DEMAP_ALL_ADDR; 20637c478bd9Sstevel@tonic-gate } 20647c478bd9Sstevel@tonic-gate 20657c478bd9Sstevel@tonic-gate /* 20667c478bd9Sstevel@tonic-gate * if not running with multiple CPUs, don't use cross calls 20677c478bd9Sstevel@tonic-gate */ 20687c478bd9Sstevel@tonic-gate if (panicstr || !flushes_require_xcalls) { 2069843e1988Sjohnlev #ifdef __xpv 2070a6a74e0eSMatthew Ahrens if (va == DEMAP_ALL_ADDR) { 2071843e1988Sjohnlev xen_flush_tlb(); 2072a6a74e0eSMatthew Ahrens } else { 2073a6a74e0eSMatthew Ahrens for (size_t i = 0; i < len; i += MMU_PAGESIZE) 2074a6a74e0eSMatthew Ahrens xen_flush_va((caddr_t)(va + i)); 2075a6a74e0eSMatthew Ahrens } 2076843e1988Sjohnlev #else 2077a6a74e0eSMatthew Ahrens (void) hati_demap_func((xc_arg_t)hat, 2078a6a74e0eSMatthew Ahrens (xc_arg_t)va, (xc_arg_t)len); 2079843e1988Sjohnlev #endif 20807c478bd9Sstevel@tonic-gate return; 20817c478bd9Sstevel@tonic-gate } 20827c478bd9Sstevel@tonic-gate 20837c478bd9Sstevel@tonic-gate 20847c478bd9Sstevel@tonic-gate /* 2085ae115bc7Smrj * Determine CPUs to shootdown. Kernel changes always do all CPUs. 2086ae115bc7Smrj * Otherwise it's just CPUs currently executing in this hat. 20877c478bd9Sstevel@tonic-gate */ 20887c478bd9Sstevel@tonic-gate kpreempt_disable(); 20897c478bd9Sstevel@tonic-gate CPUSET_ONLY(justme, CPU->cpu_id); 2090ae115bc7Smrj if (hat == kas.a_hat) 2091ae115bc7Smrj cpus_to_shootdown = khat_cpuset; 20927c478bd9Sstevel@tonic-gate else 2093ae115bc7Smrj cpus_to_shootdown = hat->hat_cpus; 2094ae115bc7Smrj 2095843e1988Sjohnlev #ifndef __xpv 209695c0a3c8Sjosephb /* 209795c0a3c8Sjosephb * If any CPUs in the set are idle, just request a delayed flush 209895c0a3c8Sjosephb * and avoid waking them up. 209995c0a3c8Sjosephb */ 210095c0a3c8Sjosephb check_cpus = cpus_to_shootdown; 210195c0a3c8Sjosephb for (c = 0; c < NCPU && !CPUSET_ISNULL(check_cpus); ++c) { 210295c0a3c8Sjosephb ulong_t tlb_info; 210395c0a3c8Sjosephb 210495c0a3c8Sjosephb if (!CPU_IN_SET(check_cpus, c)) 210595c0a3c8Sjosephb continue; 210695c0a3c8Sjosephb CPUSET_DEL(check_cpus, c); 210795c0a3c8Sjosephb cpup = cpu[c]; 210895c0a3c8Sjosephb if (cpup == NULL) 210995c0a3c8Sjosephb continue; 211095c0a3c8Sjosephb 211195c0a3c8Sjosephb tlb_info = cpup->cpu_m.mcpu_tlb_info; 211295c0a3c8Sjosephb while (tlb_info == TLB_CPU_HALTED) { 211395c0a3c8Sjosephb (void) CAS_TLB_INFO(cpup, TLB_CPU_HALTED, 211495c0a3c8Sjosephb TLB_CPU_HALTED | TLB_INVAL_ALL); 211595c0a3c8Sjosephb SMT_PAUSE(); 211695c0a3c8Sjosephb tlb_info = cpup->cpu_m.mcpu_tlb_info; 211795c0a3c8Sjosephb } 211895c0a3c8Sjosephb if (tlb_info == (TLB_CPU_HALTED | TLB_INVAL_ALL)) { 211995c0a3c8Sjosephb HATSTAT_INC(hs_tlb_inval_delayed); 212095c0a3c8Sjosephb CPUSET_DEL(cpus_to_shootdown, c); 212195c0a3c8Sjosephb } 212295c0a3c8Sjosephb } 2123843e1988Sjohnlev #endif 212495c0a3c8Sjosephb 2125ae115bc7Smrj if (CPUSET_ISNULL(cpus_to_shootdown) || 2126ae115bc7Smrj CPUSET_ISEQUAL(cpus_to_shootdown, justme)) { 2127ae115bc7Smrj 2128843e1988Sjohnlev #ifdef __xpv 2129a6a74e0eSMatthew Ahrens if (va == DEMAP_ALL_ADDR) { 2130843e1988Sjohnlev xen_flush_tlb(); 2131a6a74e0eSMatthew Ahrens } else { 2132a6a74e0eSMatthew Ahrens for (size_t i = 0; i < len; i += MMU_PAGESIZE) 2133a6a74e0eSMatthew Ahrens xen_flush_va((caddr_t)(va + i)); 2134a6a74e0eSMatthew Ahrens } 2135843e1988Sjohnlev #else 2136a6a74e0eSMatthew Ahrens (void) hati_demap_func((xc_arg_t)hat, 2137a6a74e0eSMatthew Ahrens (xc_arg_t)va, (xc_arg_t)len); 2138843e1988Sjohnlev #endif 2139ae115bc7Smrj 2140ae115bc7Smrj } else { 2141ae115bc7Smrj 2142ae115bc7Smrj CPUSET_ADD(cpus_to_shootdown, CPU->cpu_id); 2143843e1988Sjohnlev #ifdef __xpv 2144a6a74e0eSMatthew Ahrens if (va == DEMAP_ALL_ADDR) { 2145843e1988Sjohnlev xen_gflush_tlb(cpus_to_shootdown); 2146a6a74e0eSMatthew Ahrens } else { 2147a6a74e0eSMatthew Ahrens for (size_t i = 0; i < len; i += MMU_PAGESIZE) { 2148a6a74e0eSMatthew Ahrens xen_gflush_va((caddr_t)(va + i), 2149a6a74e0eSMatthew Ahrens cpus_to_shootdown); 2150a6a74e0eSMatthew Ahrens } 2151a6a74e0eSMatthew Ahrens } 2152843e1988Sjohnlev #else 2153a6a74e0eSMatthew Ahrens xc_call((xc_arg_t)hat, (xc_arg_t)va, (xc_arg_t)len, 2154f34a7178SJoe Bonasera CPUSET2BV(cpus_to_shootdown), hati_demap_func); 2155843e1988Sjohnlev #endif 2156ae115bc7Smrj 2157ae115bc7Smrj } 21587c478bd9Sstevel@tonic-gate kpreempt_enable(); 21597c478bd9Sstevel@tonic-gate } 21607c478bd9Sstevel@tonic-gate 2161a6a74e0eSMatthew Ahrens void 2162a6a74e0eSMatthew Ahrens hat_tlb_inval(hat_t *hat, uintptr_t va) 2163a6a74e0eSMatthew Ahrens { 2164a6a74e0eSMatthew Ahrens hat_tlb_inval_range(hat, va, MMU_PAGESIZE); 2165a6a74e0eSMatthew Ahrens } 2166a6a74e0eSMatthew Ahrens 21677c478bd9Sstevel@tonic-gate /* 21687c478bd9Sstevel@tonic-gate * Interior routine for HAT_UNLOADs from hat_unload_callback(), 21697c478bd9Sstevel@tonic-gate * hat_kmap_unload() OR from hat_steal() code. This routine doesn't 21707c478bd9Sstevel@tonic-gate * handle releasing of the htables. 21717c478bd9Sstevel@tonic-gate */ 21727c478bd9Sstevel@tonic-gate void 21737c478bd9Sstevel@tonic-gate hat_pte_unmap( 21747c478bd9Sstevel@tonic-gate htable_t *ht, 21757c478bd9Sstevel@tonic-gate uint_t entry, 21767c478bd9Sstevel@tonic-gate uint_t flags, 21777c478bd9Sstevel@tonic-gate x86pte_t old_pte, 2178a6a74e0eSMatthew Ahrens void *pte_ptr, 2179a6a74e0eSMatthew Ahrens boolean_t tlb) 21807c478bd9Sstevel@tonic-gate { 21817c478bd9Sstevel@tonic-gate hat_t *hat = ht->ht_hat; 21827c478bd9Sstevel@tonic-gate hment_t *hm = NULL; 21837c478bd9Sstevel@tonic-gate page_t *pp = NULL; 21847c478bd9Sstevel@tonic-gate level_t l = ht->ht_level; 21857c478bd9Sstevel@tonic-gate pfn_t pfn; 21867c478bd9Sstevel@tonic-gate 21877c478bd9Sstevel@tonic-gate /* 21887c478bd9Sstevel@tonic-gate * We always track the locking counts, even if nothing is unmapped 21897c478bd9Sstevel@tonic-gate */ 21907c478bd9Sstevel@tonic-gate if ((flags & HAT_UNLOAD_UNLOCK) != 0 && hat != kas.a_hat) { 21917c478bd9Sstevel@tonic-gate ASSERT(ht->ht_lock_cnt > 0); 21927c478bd9Sstevel@tonic-gate HTABLE_LOCK_DEC(ht); 21937c478bd9Sstevel@tonic-gate } 21947c478bd9Sstevel@tonic-gate 21957c478bd9Sstevel@tonic-gate /* 21967c478bd9Sstevel@tonic-gate * Figure out which page's mapping list lock to acquire using the PFN 21977c478bd9Sstevel@tonic-gate * passed in "old" PTE. We then attempt to invalidate the PTE. 21987c478bd9Sstevel@tonic-gate * If another thread, probably a hat_pageunload, has asynchronously 21997c478bd9Sstevel@tonic-gate * unmapped/remapped this address we'll loop here. 22007c478bd9Sstevel@tonic-gate */ 22017c478bd9Sstevel@tonic-gate ASSERT(ht->ht_busy > 0); 22027c478bd9Sstevel@tonic-gate while (PTE_ISVALID(old_pte)) { 22037c478bd9Sstevel@tonic-gate pfn = PTE2PFN(old_pte, l); 2204ae115bc7Smrj if (PTE_GET(old_pte, PT_SOFTWARE) >= PT_NOCONSIST) { 22057c478bd9Sstevel@tonic-gate pp = NULL; 22067c478bd9Sstevel@tonic-gate } else { 2207843e1988Sjohnlev #ifdef __xpv 2208843e1988Sjohnlev if (pfn == PFN_INVALID) 2209843e1988Sjohnlev panic("Invalid PFN, but not PT_NOCONSIST"); 2210843e1988Sjohnlev #endif 22117c478bd9Sstevel@tonic-gate pp = page_numtopp_nolock(pfn); 2212aa2ed9e5Sjosephb if (pp == NULL) { 2213aa2ed9e5Sjosephb panic("no page_t, not NOCONSIST: old_pte=" 2214aa2ed9e5Sjosephb FMT_PTE " ht=%lx entry=0x%x pte_ptr=%lx", 2215aa2ed9e5Sjosephb old_pte, (uintptr_t)ht, entry, 2216aa2ed9e5Sjosephb (uintptr_t)pte_ptr); 2217aa2ed9e5Sjosephb } 22187c478bd9Sstevel@tonic-gate x86_hm_enter(pp); 22197c478bd9Sstevel@tonic-gate } 2220aa2ed9e5Sjosephb 2221a6a74e0eSMatthew Ahrens old_pte = x86pte_inval(ht, entry, old_pte, pte_ptr, tlb); 22227c478bd9Sstevel@tonic-gate 22237c478bd9Sstevel@tonic-gate /* 22247c478bd9Sstevel@tonic-gate * If the page hadn't changed we've unmapped it and can proceed 22257c478bd9Sstevel@tonic-gate */ 22267c478bd9Sstevel@tonic-gate if (PTE_ISVALID(old_pte) && PTE2PFN(old_pte, l) == pfn) 22277c478bd9Sstevel@tonic-gate break; 22287c478bd9Sstevel@tonic-gate 22297c478bd9Sstevel@tonic-gate /* 22307c478bd9Sstevel@tonic-gate * Otherwise, we'll have to retry with the current old_pte. 22317c478bd9Sstevel@tonic-gate * Drop the hment lock, since the pfn may have changed. 22327c478bd9Sstevel@tonic-gate */ 22337c478bd9Sstevel@tonic-gate if (pp != NULL) { 22347c478bd9Sstevel@tonic-gate x86_hm_exit(pp); 22357c478bd9Sstevel@tonic-gate pp = NULL; 22367c478bd9Sstevel@tonic-gate } else { 2237ae115bc7Smrj ASSERT(PTE_GET(old_pte, PT_SOFTWARE) >= PT_NOCONSIST); 22387c478bd9Sstevel@tonic-gate } 22397c478bd9Sstevel@tonic-gate } 22407c478bd9Sstevel@tonic-gate 22417c478bd9Sstevel@tonic-gate /* 22427c478bd9Sstevel@tonic-gate * If the old mapping wasn't valid, there's nothing more to do 22437c478bd9Sstevel@tonic-gate */ 22447c478bd9Sstevel@tonic-gate if (!PTE_ISVALID(old_pte)) { 22457c478bd9Sstevel@tonic-gate if (pp != NULL) 22467c478bd9Sstevel@tonic-gate x86_hm_exit(pp); 22477c478bd9Sstevel@tonic-gate return; 22487c478bd9Sstevel@tonic-gate } 22497c478bd9Sstevel@tonic-gate 22507c478bd9Sstevel@tonic-gate /* 22517c478bd9Sstevel@tonic-gate * Take care of syncing any MOD/REF bits and removing the hment. 22527c478bd9Sstevel@tonic-gate */ 22537c478bd9Sstevel@tonic-gate if (pp != NULL) { 22547c478bd9Sstevel@tonic-gate if (!(flags & HAT_UNLOAD_NOSYNC)) 22557c478bd9Sstevel@tonic-gate hati_sync_pte_to_page(pp, old_pte, l); 22567c478bd9Sstevel@tonic-gate hm = hment_remove(pp, ht, entry); 22577c478bd9Sstevel@tonic-gate x86_hm_exit(pp); 22587c478bd9Sstevel@tonic-gate if (hm != NULL) 22597c478bd9Sstevel@tonic-gate hment_free(hm); 22607c478bd9Sstevel@tonic-gate } 22617c478bd9Sstevel@tonic-gate 22627c478bd9Sstevel@tonic-gate /* 22637c478bd9Sstevel@tonic-gate * Handle book keeping in the htable and hat 22647c478bd9Sstevel@tonic-gate */ 22657c478bd9Sstevel@tonic-gate ASSERT(ht->ht_valid_cnt > 0); 22667c478bd9Sstevel@tonic-gate HTABLE_DEC(ht->ht_valid_cnt); 22677c478bd9Sstevel@tonic-gate PGCNT_DEC(hat, l); 22687c478bd9Sstevel@tonic-gate } 22697c478bd9Sstevel@tonic-gate 22707c478bd9Sstevel@tonic-gate /* 22717c478bd9Sstevel@tonic-gate * very cheap unload implementation to special case some kernel addresses 22727c478bd9Sstevel@tonic-gate */ 22737c478bd9Sstevel@tonic-gate static void 22747c478bd9Sstevel@tonic-gate hat_kmap_unload(caddr_t addr, size_t len, uint_t flags) 22757c478bd9Sstevel@tonic-gate { 22767c478bd9Sstevel@tonic-gate uintptr_t va = (uintptr_t)addr; 22777c478bd9Sstevel@tonic-gate uintptr_t eva = va + len; 2278ae115bc7Smrj pgcnt_t pg_index; 22797c478bd9Sstevel@tonic-gate htable_t *ht; 22807c478bd9Sstevel@tonic-gate uint_t entry; 2281ae115bc7Smrj x86pte_t *pte_ptr; 22827c478bd9Sstevel@tonic-gate x86pte_t old_pte; 22837c478bd9Sstevel@tonic-gate 22847c478bd9Sstevel@tonic-gate for (; va < eva; va += MMU_PAGESIZE) { 22857c478bd9Sstevel@tonic-gate /* 22867c478bd9Sstevel@tonic-gate * Get the PTE 22877c478bd9Sstevel@tonic-gate */ 2288ae115bc7Smrj pg_index = mmu_btop(va - mmu.kmap_addr); 2289ae115bc7Smrj pte_ptr = PT_INDEX_PTR(mmu.kmap_ptes, pg_index); 2290ae115bc7Smrj old_pte = GET_PTE(pte_ptr); 22917c478bd9Sstevel@tonic-gate 22927c478bd9Sstevel@tonic-gate /* 22937c478bd9Sstevel@tonic-gate * get the htable / entry 22947c478bd9Sstevel@tonic-gate */ 22957c478bd9Sstevel@tonic-gate ht = mmu.kmap_htables[(va - mmu.kmap_htables[0]->ht_vaddr) 22967c478bd9Sstevel@tonic-gate >> LEVEL_SHIFT(1)]; 22977c478bd9Sstevel@tonic-gate entry = htable_va2entry(va, ht); 22987c478bd9Sstevel@tonic-gate 22997c478bd9Sstevel@tonic-gate /* 23007c478bd9Sstevel@tonic-gate * use mostly common code to unmap it. 23017c478bd9Sstevel@tonic-gate */ 2302a6a74e0eSMatthew Ahrens hat_pte_unmap(ht, entry, flags, old_pte, pte_ptr, B_TRUE); 23037c478bd9Sstevel@tonic-gate } 23047c478bd9Sstevel@tonic-gate } 23057c478bd9Sstevel@tonic-gate 23067c478bd9Sstevel@tonic-gate 23077c478bd9Sstevel@tonic-gate /* 23087c478bd9Sstevel@tonic-gate * unload a range of virtual address space (no callback) 23097c478bd9Sstevel@tonic-gate */ 23107c478bd9Sstevel@tonic-gate void 23117c478bd9Sstevel@tonic-gate hat_unload(hat_t *hat, caddr_t addr, size_t len, uint_t flags) 23127c478bd9Sstevel@tonic-gate { 23137c478bd9Sstevel@tonic-gate uintptr_t va = (uintptr_t)addr; 2314ae115bc7Smrj 2315843e1988Sjohnlev XPV_DISALLOW_MIGRATE(); 2316ae115bc7Smrj ASSERT(hat == kas.a_hat || va + len <= _userlimit); 23177c478bd9Sstevel@tonic-gate 23187c478bd9Sstevel@tonic-gate /* 23197c478bd9Sstevel@tonic-gate * special case for performance. 23207c478bd9Sstevel@tonic-gate */ 23217c478bd9Sstevel@tonic-gate if (mmu.kmap_addr <= va && va < mmu.kmap_eaddr) { 23227c478bd9Sstevel@tonic-gate ASSERT(hat == kas.a_hat); 23237c478bd9Sstevel@tonic-gate hat_kmap_unload(addr, len, flags); 2324ae115bc7Smrj } else { 23257c478bd9Sstevel@tonic-gate hat_unload_callback(hat, addr, len, flags, NULL); 23267c478bd9Sstevel@tonic-gate } 2327843e1988Sjohnlev XPV_ALLOW_MIGRATE(); 2328ae115bc7Smrj } 23297c478bd9Sstevel@tonic-gate 23307c478bd9Sstevel@tonic-gate /* 23317c478bd9Sstevel@tonic-gate * Do the callbacks for ranges being unloaded. 23327c478bd9Sstevel@tonic-gate */ 23337c478bd9Sstevel@tonic-gate typedef struct range_info { 23347c478bd9Sstevel@tonic-gate uintptr_t rng_va; 23357c478bd9Sstevel@tonic-gate ulong_t rng_cnt; 23367c478bd9Sstevel@tonic-gate level_t rng_level; 23377c478bd9Sstevel@tonic-gate } range_info_t; 23387c478bd9Sstevel@tonic-gate 23397c478bd9Sstevel@tonic-gate /* 2340a6a74e0eSMatthew Ahrens * Invalidate the TLB, and perform the callback to the upper level VM system, 2341a6a74e0eSMatthew Ahrens * for the specified ranges of contiguous pages. 23427c478bd9Sstevel@tonic-gate */ 2343a6a74e0eSMatthew Ahrens static void 2344a6a74e0eSMatthew Ahrens handle_ranges(hat_t *hat, hat_callback_t *cb, uint_t cnt, range_info_t *range) 2345a6a74e0eSMatthew Ahrens { 2346a6a74e0eSMatthew Ahrens while (cnt > 0) { 2347a6a74e0eSMatthew Ahrens size_t len; 2348a6a74e0eSMatthew Ahrens 23497c478bd9Sstevel@tonic-gate --cnt; 2350a6a74e0eSMatthew Ahrens len = range[cnt].rng_cnt << LEVEL_SHIFT(range[cnt].rng_level); 2351a6a74e0eSMatthew Ahrens hat_tlb_inval_range(hat, (uintptr_t)range[cnt].rng_va, len); 2352a6a74e0eSMatthew Ahrens 2353a6a74e0eSMatthew Ahrens if (cb != NULL) { 23547c478bd9Sstevel@tonic-gate cb->hcb_start_addr = (caddr_t)range[cnt].rng_va; 23557c478bd9Sstevel@tonic-gate cb->hcb_end_addr = cb->hcb_start_addr; 2356a6a74e0eSMatthew Ahrens cb->hcb_end_addr += len; 23577c478bd9Sstevel@tonic-gate cb->hcb_function(cb); 23587c478bd9Sstevel@tonic-gate } 23597c478bd9Sstevel@tonic-gate } 2360a6a74e0eSMatthew Ahrens } 23617c478bd9Sstevel@tonic-gate 23627c478bd9Sstevel@tonic-gate /* 23637c478bd9Sstevel@tonic-gate * Unload a given range of addresses (has optional callback) 23647c478bd9Sstevel@tonic-gate * 23657c478bd9Sstevel@tonic-gate * Flags: 23667c478bd9Sstevel@tonic-gate * define HAT_UNLOAD 0x00 23677c478bd9Sstevel@tonic-gate * define HAT_UNLOAD_NOSYNC 0x02 23687c478bd9Sstevel@tonic-gate * define HAT_UNLOAD_UNLOCK 0x04 23697c478bd9Sstevel@tonic-gate * define HAT_UNLOAD_OTHER 0x08 - not used 23707c478bd9Sstevel@tonic-gate * define HAT_UNLOAD_UNMAP 0x10 - same as HAT_UNLOAD 23717c478bd9Sstevel@tonic-gate */ 23727c478bd9Sstevel@tonic-gate #define MAX_UNLOAD_CNT (8) 23737c478bd9Sstevel@tonic-gate void 23747c478bd9Sstevel@tonic-gate hat_unload_callback( 23757c478bd9Sstevel@tonic-gate hat_t *hat, 23767c478bd9Sstevel@tonic-gate caddr_t addr, 23777c478bd9Sstevel@tonic-gate size_t len, 23787c478bd9Sstevel@tonic-gate uint_t flags, 23797c478bd9Sstevel@tonic-gate hat_callback_t *cb) 23807c478bd9Sstevel@tonic-gate { 23817c478bd9Sstevel@tonic-gate uintptr_t vaddr = (uintptr_t)addr; 23827c478bd9Sstevel@tonic-gate uintptr_t eaddr = vaddr + len; 23837c478bd9Sstevel@tonic-gate htable_t *ht = NULL; 23847c478bd9Sstevel@tonic-gate uint_t entry; 2385aa2ed9e5Sjosephb uintptr_t contig_va = (uintptr_t)-1L; 23867c478bd9Sstevel@tonic-gate range_info_t r[MAX_UNLOAD_CNT]; 23877c478bd9Sstevel@tonic-gate uint_t r_cnt = 0; 23887c478bd9Sstevel@tonic-gate x86pte_t old_pte; 23897c478bd9Sstevel@tonic-gate 2390843e1988Sjohnlev XPV_DISALLOW_MIGRATE(); 2391ae115bc7Smrj ASSERT(hat == kas.a_hat || eaddr <= _userlimit); 23927c478bd9Sstevel@tonic-gate ASSERT(IS_PAGEALIGNED(vaddr)); 23937c478bd9Sstevel@tonic-gate ASSERT(IS_PAGEALIGNED(eaddr)); 23947c478bd9Sstevel@tonic-gate 2395ae115bc7Smrj /* 2396ae115bc7Smrj * Special case a single page being unloaded for speed. This happens 2397ae115bc7Smrj * quite frequently, COW faults after a fork() for example. 2398ae115bc7Smrj */ 2399ae115bc7Smrj if (cb == NULL && len == MMU_PAGESIZE) { 2400ae115bc7Smrj ht = htable_getpte(hat, vaddr, &entry, &old_pte, 0); 2401ae115bc7Smrj if (ht != NULL) { 2402a6a74e0eSMatthew Ahrens if (PTE_ISVALID(old_pte)) { 2403a6a74e0eSMatthew Ahrens hat_pte_unmap(ht, entry, flags, old_pte, 2404a6a74e0eSMatthew Ahrens NULL, B_TRUE); 2405a6a74e0eSMatthew Ahrens } 2406ae115bc7Smrj htable_release(ht); 2407ae115bc7Smrj } 2408843e1988Sjohnlev XPV_ALLOW_MIGRATE(); 2409ae115bc7Smrj return; 2410ae115bc7Smrj } 2411ae115bc7Smrj 24127c478bd9Sstevel@tonic-gate while (vaddr < eaddr) { 24137c478bd9Sstevel@tonic-gate old_pte = htable_walk(hat, &ht, &vaddr, eaddr); 24147c478bd9Sstevel@tonic-gate if (ht == NULL) 24157c478bd9Sstevel@tonic-gate break; 24167c478bd9Sstevel@tonic-gate 24177c478bd9Sstevel@tonic-gate ASSERT(!IN_VA_HOLE(vaddr)); 24187c478bd9Sstevel@tonic-gate 24197c478bd9Sstevel@tonic-gate if (vaddr < (uintptr_t)addr) 24207c478bd9Sstevel@tonic-gate panic("hat_unload_callback(): unmap inside large page"); 24217c478bd9Sstevel@tonic-gate 24227c478bd9Sstevel@tonic-gate /* 24237c478bd9Sstevel@tonic-gate * We'll do the call backs for contiguous ranges 24247c478bd9Sstevel@tonic-gate */ 2425aa2ed9e5Sjosephb if (vaddr != contig_va || 24267c478bd9Sstevel@tonic-gate (r_cnt > 0 && r[r_cnt - 1].rng_level != ht->ht_level)) { 24277c478bd9Sstevel@tonic-gate if (r_cnt == MAX_UNLOAD_CNT) { 2428a6a74e0eSMatthew Ahrens handle_ranges(hat, cb, r_cnt, r); 24297c478bd9Sstevel@tonic-gate r_cnt = 0; 24307c478bd9Sstevel@tonic-gate } 24317c478bd9Sstevel@tonic-gate r[r_cnt].rng_va = vaddr; 24327c478bd9Sstevel@tonic-gate r[r_cnt].rng_cnt = 0; 24337c478bd9Sstevel@tonic-gate r[r_cnt].rng_level = ht->ht_level; 24347c478bd9Sstevel@tonic-gate ++r_cnt; 24357c478bd9Sstevel@tonic-gate } 24367c478bd9Sstevel@tonic-gate 24377c478bd9Sstevel@tonic-gate /* 2438a6a74e0eSMatthew Ahrens * Unload one mapping (for a single page) from the page tables. 2439a6a74e0eSMatthew Ahrens * Note that we do not remove the mapping from the TLB yet, 2440a6a74e0eSMatthew Ahrens * as indicated by the tlb=FALSE argument to hat_pte_unmap(). 2441a6a74e0eSMatthew Ahrens * handle_ranges() will clear the TLB entries with one call to 2442a6a74e0eSMatthew Ahrens * hat_tlb_inval_range() per contiguous range. This is 2443a6a74e0eSMatthew Ahrens * safe because the page can not be reused until the 2444a6a74e0eSMatthew Ahrens * callback is made (or we return). 24457c478bd9Sstevel@tonic-gate */ 24467c478bd9Sstevel@tonic-gate entry = htable_va2entry(vaddr, ht); 2447a6a74e0eSMatthew Ahrens hat_pte_unmap(ht, entry, flags, old_pte, NULL, B_FALSE); 24487c478bd9Sstevel@tonic-gate ASSERT(ht->ht_level <= mmu.max_page_level); 24497c478bd9Sstevel@tonic-gate vaddr += LEVEL_SIZE(ht->ht_level); 2450aa2ed9e5Sjosephb contig_va = vaddr; 24517c478bd9Sstevel@tonic-gate ++r[r_cnt - 1].rng_cnt; 24527c478bd9Sstevel@tonic-gate } 24537c478bd9Sstevel@tonic-gate if (ht) 24547c478bd9Sstevel@tonic-gate htable_release(ht); 24557c478bd9Sstevel@tonic-gate 24567c478bd9Sstevel@tonic-gate /* 24577c478bd9Sstevel@tonic-gate * handle last range for callbacks 24587c478bd9Sstevel@tonic-gate */ 24597c478bd9Sstevel@tonic-gate if (r_cnt > 0) 2460a6a74e0eSMatthew Ahrens handle_ranges(hat, cb, r_cnt, r); 2461843e1988Sjohnlev XPV_ALLOW_MIGRATE(); 24627c478bd9Sstevel@tonic-gate } 24637c478bd9Sstevel@tonic-gate 24647c478bd9Sstevel@tonic-gate /* 24659e0b7c70SDave Plauger * Invalidate a virtual address translation on a slave CPU during 24669e0b7c70SDave Plauger * panic() dumps. 2467ca3e8d88SDave Plauger */ 2468ca3e8d88SDave Plauger void 2469ca3e8d88SDave Plauger hat_flush_range(hat_t *hat, caddr_t va, size_t size) 2470ca3e8d88SDave Plauger { 2471ca3e8d88SDave Plauger ssize_t sz; 2472ca3e8d88SDave Plauger caddr_t endva = va + size; 2473ca3e8d88SDave Plauger 2474ca3e8d88SDave Plauger while (va < endva) { 2475ca3e8d88SDave Plauger sz = hat_getpagesize(hat, va); 24769e0b7c70SDave Plauger if (sz < 0) { 2477ca3e8d88SDave Plauger #ifdef __xpv 2478ca3e8d88SDave Plauger xen_flush_tlb(); 24799e0b7c70SDave Plauger #else 24809e0b7c70SDave Plauger flush_all_tlb_entries(); 24819e0b7c70SDave Plauger #endif 24829e0b7c70SDave Plauger break; 24839e0b7c70SDave Plauger } 24849e0b7c70SDave Plauger #ifdef __xpv 2485ca3e8d88SDave Plauger xen_flush_va(va); 2486ca3e8d88SDave Plauger #else 24879e0b7c70SDave Plauger mmu_tlbflush_entry(va); 2488ca3e8d88SDave Plauger #endif 2489ca3e8d88SDave Plauger va += sz; 2490ca3e8d88SDave Plauger } 2491ca3e8d88SDave Plauger } 2492ca3e8d88SDave Plauger 2493ca3e8d88SDave Plauger /* 24947c478bd9Sstevel@tonic-gate * synchronize mapping with software data structures 24957c478bd9Sstevel@tonic-gate * 24967c478bd9Sstevel@tonic-gate * This interface is currently only used by the working set monitor 24977c478bd9Sstevel@tonic-gate * driver. 24987c478bd9Sstevel@tonic-gate */ 24997c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 25007c478bd9Sstevel@tonic-gate void 25017c478bd9Sstevel@tonic-gate hat_sync(hat_t *hat, caddr_t addr, size_t len, uint_t flags) 25027c478bd9Sstevel@tonic-gate { 25037c478bd9Sstevel@tonic-gate uintptr_t vaddr = (uintptr_t)addr; 25047c478bd9Sstevel@tonic-gate uintptr_t eaddr = vaddr + len; 25057c478bd9Sstevel@tonic-gate htable_t *ht = NULL; 25067c478bd9Sstevel@tonic-gate uint_t entry; 25077c478bd9Sstevel@tonic-gate x86pte_t pte; 25087c478bd9Sstevel@tonic-gate x86pte_t save_pte; 25097c478bd9Sstevel@tonic-gate x86pte_t new; 25107c478bd9Sstevel@tonic-gate page_t *pp; 25117c478bd9Sstevel@tonic-gate 25127c478bd9Sstevel@tonic-gate ASSERT(!IN_VA_HOLE(vaddr)); 25137c478bd9Sstevel@tonic-gate ASSERT(IS_PAGEALIGNED(vaddr)); 25147c478bd9Sstevel@tonic-gate ASSERT(IS_PAGEALIGNED(eaddr)); 2515ae115bc7Smrj ASSERT(hat == kas.a_hat || eaddr <= _userlimit); 25167c478bd9Sstevel@tonic-gate 2517843e1988Sjohnlev XPV_DISALLOW_MIGRATE(); 25187c478bd9Sstevel@tonic-gate for (; vaddr < eaddr; vaddr += LEVEL_SIZE(ht->ht_level)) { 25197c478bd9Sstevel@tonic-gate try_again: 25207c478bd9Sstevel@tonic-gate pte = htable_walk(hat, &ht, &vaddr, eaddr); 25217c478bd9Sstevel@tonic-gate if (ht == NULL) 25227c478bd9Sstevel@tonic-gate break; 25237c478bd9Sstevel@tonic-gate entry = htable_va2entry(vaddr, ht); 25247c478bd9Sstevel@tonic-gate 2525ae115bc7Smrj if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC || 25267c478bd9Sstevel@tonic-gate PTE_GET(pte, PT_REF | PT_MOD) == 0) 25277c478bd9Sstevel@tonic-gate continue; 25287c478bd9Sstevel@tonic-gate 25297c478bd9Sstevel@tonic-gate /* 25307c478bd9Sstevel@tonic-gate * We need to acquire the mapping list lock to protect 25317c478bd9Sstevel@tonic-gate * against hat_pageunload(), hat_unload(), etc. 25327c478bd9Sstevel@tonic-gate */ 25337c478bd9Sstevel@tonic-gate pp = page_numtopp_nolock(PTE2PFN(pte, ht->ht_level)); 25347c478bd9Sstevel@tonic-gate if (pp == NULL) 25357c478bd9Sstevel@tonic-gate break; 25367c478bd9Sstevel@tonic-gate x86_hm_enter(pp); 25377c478bd9Sstevel@tonic-gate save_pte = pte; 25387c478bd9Sstevel@tonic-gate pte = x86pte_get(ht, entry); 25397c478bd9Sstevel@tonic-gate if (pte != save_pte) { 25407c478bd9Sstevel@tonic-gate x86_hm_exit(pp); 25417c478bd9Sstevel@tonic-gate goto try_again; 25427c478bd9Sstevel@tonic-gate } 2543ae115bc7Smrj if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC || 25447c478bd9Sstevel@tonic-gate PTE_GET(pte, PT_REF | PT_MOD) == 0) { 25457c478bd9Sstevel@tonic-gate x86_hm_exit(pp); 25467c478bd9Sstevel@tonic-gate continue; 25477c478bd9Sstevel@tonic-gate } 25487c478bd9Sstevel@tonic-gate 25497c478bd9Sstevel@tonic-gate /* 25507c478bd9Sstevel@tonic-gate * Need to clear ref or mod bits. We may compete with 25517c478bd9Sstevel@tonic-gate * hardware updating the R/M bits and have to try again. 25527c478bd9Sstevel@tonic-gate */ 25537c478bd9Sstevel@tonic-gate if (flags == HAT_SYNC_ZERORM) { 25547c478bd9Sstevel@tonic-gate new = pte; 25557c478bd9Sstevel@tonic-gate PTE_CLR(new, PT_REF | PT_MOD); 25567c478bd9Sstevel@tonic-gate pte = hati_update_pte(ht, entry, pte, new); 25577c478bd9Sstevel@tonic-gate if (pte != 0) { 25587c478bd9Sstevel@tonic-gate x86_hm_exit(pp); 25597c478bd9Sstevel@tonic-gate goto try_again; 25607c478bd9Sstevel@tonic-gate } 25617c478bd9Sstevel@tonic-gate } else { 25627c478bd9Sstevel@tonic-gate /* 25637c478bd9Sstevel@tonic-gate * sync the PTE to the page_t 25647c478bd9Sstevel@tonic-gate */ 25657c478bd9Sstevel@tonic-gate hati_sync_pte_to_page(pp, save_pte, ht->ht_level); 25667c478bd9Sstevel@tonic-gate } 25677c478bd9Sstevel@tonic-gate x86_hm_exit(pp); 25687c478bd9Sstevel@tonic-gate } 25697c478bd9Sstevel@tonic-gate if (ht) 25707c478bd9Sstevel@tonic-gate htable_release(ht); 2571843e1988Sjohnlev XPV_ALLOW_MIGRATE(); 25727c478bd9Sstevel@tonic-gate } 25737c478bd9Sstevel@tonic-gate 25747c478bd9Sstevel@tonic-gate /* 25757c478bd9Sstevel@tonic-gate * void hat_map(hat, addr, len, flags) 25767c478bd9Sstevel@tonic-gate */ 25777c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 25787c478bd9Sstevel@tonic-gate void 25797c478bd9Sstevel@tonic-gate hat_map(hat_t *hat, caddr_t addr, size_t len, uint_t flags) 25807c478bd9Sstevel@tonic-gate { 25817c478bd9Sstevel@tonic-gate /* does nothing */ 25827c478bd9Sstevel@tonic-gate } 25837c478bd9Sstevel@tonic-gate 25847c478bd9Sstevel@tonic-gate /* 25857c478bd9Sstevel@tonic-gate * uint_t hat_getattr(hat, addr, *attr) 25867c478bd9Sstevel@tonic-gate * returns attr for <hat,addr> in *attr. returns 0 if there was a 25877c478bd9Sstevel@tonic-gate * mapping and *attr is valid, nonzero if there was no mapping and 25887c478bd9Sstevel@tonic-gate * *attr is not valid. 25897c478bd9Sstevel@tonic-gate */ 25907c478bd9Sstevel@tonic-gate uint_t 25917c478bd9Sstevel@tonic-gate hat_getattr(hat_t *hat, caddr_t addr, uint_t *attr) 25927c478bd9Sstevel@tonic-gate { 25937c478bd9Sstevel@tonic-gate uintptr_t vaddr = ALIGN2PAGE(addr); 25947c478bd9Sstevel@tonic-gate htable_t *ht = NULL; 25957c478bd9Sstevel@tonic-gate x86pte_t pte; 25967c478bd9Sstevel@tonic-gate 2597ae115bc7Smrj ASSERT(hat == kas.a_hat || vaddr <= _userlimit); 25987c478bd9Sstevel@tonic-gate 25997c478bd9Sstevel@tonic-gate if (IN_VA_HOLE(vaddr)) 26007c478bd9Sstevel@tonic-gate return ((uint_t)-1); 26017c478bd9Sstevel@tonic-gate 2602ae115bc7Smrj ht = htable_getpte(hat, vaddr, NULL, &pte, mmu.max_page_level); 26037c478bd9Sstevel@tonic-gate if (ht == NULL) 26047c478bd9Sstevel@tonic-gate return ((uint_t)-1); 26057c478bd9Sstevel@tonic-gate 26067c478bd9Sstevel@tonic-gate if (!PTE_ISVALID(pte) || !PTE_ISPAGE(pte, ht->ht_level)) { 26077c478bd9Sstevel@tonic-gate htable_release(ht); 26087c478bd9Sstevel@tonic-gate return ((uint_t)-1); 26097c478bd9Sstevel@tonic-gate } 26107c478bd9Sstevel@tonic-gate 26117c478bd9Sstevel@tonic-gate *attr = PROT_READ; 26127c478bd9Sstevel@tonic-gate if (PTE_GET(pte, PT_WRITABLE)) 26137c478bd9Sstevel@tonic-gate *attr |= PROT_WRITE; 26147c478bd9Sstevel@tonic-gate if (PTE_GET(pte, PT_USER)) 26157c478bd9Sstevel@tonic-gate *attr |= PROT_USER; 26167c478bd9Sstevel@tonic-gate if (!PTE_GET(pte, mmu.pt_nx)) 26177c478bd9Sstevel@tonic-gate *attr |= PROT_EXEC; 2618ae115bc7Smrj if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC) 26197c478bd9Sstevel@tonic-gate *attr |= HAT_NOSYNC; 26207c478bd9Sstevel@tonic-gate htable_release(ht); 26217c478bd9Sstevel@tonic-gate return (0); 26227c478bd9Sstevel@tonic-gate } 26237c478bd9Sstevel@tonic-gate 26247c478bd9Sstevel@tonic-gate /* 26257c478bd9Sstevel@tonic-gate * hat_updateattr() applies the given attribute change to an existing mapping 26267c478bd9Sstevel@tonic-gate */ 26277c478bd9Sstevel@tonic-gate #define HAT_LOAD_ATTR 1 26287c478bd9Sstevel@tonic-gate #define HAT_SET_ATTR 2 26297c478bd9Sstevel@tonic-gate #define HAT_CLR_ATTR 3 26307c478bd9Sstevel@tonic-gate 26317c478bd9Sstevel@tonic-gate static void 26327c478bd9Sstevel@tonic-gate hat_updateattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr, int what) 26337c478bd9Sstevel@tonic-gate { 26347c478bd9Sstevel@tonic-gate uintptr_t vaddr = (uintptr_t)addr; 26357c478bd9Sstevel@tonic-gate uintptr_t eaddr = (uintptr_t)addr + len; 26367c478bd9Sstevel@tonic-gate htable_t *ht = NULL; 26377c478bd9Sstevel@tonic-gate uint_t entry; 26387c478bd9Sstevel@tonic-gate x86pte_t oldpte, newpte; 26397c478bd9Sstevel@tonic-gate page_t *pp; 26407c478bd9Sstevel@tonic-gate 2641843e1988Sjohnlev XPV_DISALLOW_MIGRATE(); 26427c478bd9Sstevel@tonic-gate ASSERT(IS_PAGEALIGNED(vaddr)); 26437c478bd9Sstevel@tonic-gate ASSERT(IS_PAGEALIGNED(eaddr)); 2644*dc32d872SJosef 'Jeff' Sipek ASSERT(hat == kas.a_hat || AS_LOCK_HELD(hat->hat_as)); 26457c478bd9Sstevel@tonic-gate for (; vaddr < eaddr; vaddr += LEVEL_SIZE(ht->ht_level)) { 26467c478bd9Sstevel@tonic-gate try_again: 26477c478bd9Sstevel@tonic-gate oldpte = htable_walk(hat, &ht, &vaddr, eaddr); 26487c478bd9Sstevel@tonic-gate if (ht == NULL) 26497c478bd9Sstevel@tonic-gate break; 2650ae115bc7Smrj if (PTE_GET(oldpte, PT_SOFTWARE) >= PT_NOCONSIST) 26517c478bd9Sstevel@tonic-gate continue; 26527c478bd9Sstevel@tonic-gate 26537c478bd9Sstevel@tonic-gate pp = page_numtopp_nolock(PTE2PFN(oldpte, ht->ht_level)); 26547c478bd9Sstevel@tonic-gate if (pp == NULL) 26557c478bd9Sstevel@tonic-gate continue; 26567c478bd9Sstevel@tonic-gate x86_hm_enter(pp); 26577c478bd9Sstevel@tonic-gate 26587c478bd9Sstevel@tonic-gate newpte = oldpte; 26597c478bd9Sstevel@tonic-gate /* 26607c478bd9Sstevel@tonic-gate * We found a page table entry in the desired range, 26617c478bd9Sstevel@tonic-gate * figure out the new attributes. 26627c478bd9Sstevel@tonic-gate */ 26637c478bd9Sstevel@tonic-gate if (what == HAT_SET_ATTR || what == HAT_LOAD_ATTR) { 26647c478bd9Sstevel@tonic-gate if ((attr & PROT_WRITE) && 26657c478bd9Sstevel@tonic-gate !PTE_GET(oldpte, PT_WRITABLE)) 26667c478bd9Sstevel@tonic-gate newpte |= PT_WRITABLE; 26677c478bd9Sstevel@tonic-gate 2668ae115bc7Smrj if ((attr & HAT_NOSYNC) && 2669ae115bc7Smrj PTE_GET(oldpte, PT_SOFTWARE) < PT_NOSYNC) 26707c478bd9Sstevel@tonic-gate newpte |= PT_NOSYNC; 26717c478bd9Sstevel@tonic-gate 26727c478bd9Sstevel@tonic-gate if ((attr & PROT_EXEC) && PTE_GET(oldpte, mmu.pt_nx)) 26737c478bd9Sstevel@tonic-gate newpte &= ~mmu.pt_nx; 26747c478bd9Sstevel@tonic-gate } 26757c478bd9Sstevel@tonic-gate 26767c478bd9Sstevel@tonic-gate if (what == HAT_LOAD_ATTR) { 26777c478bd9Sstevel@tonic-gate if (!(attr & PROT_WRITE) && 26787c478bd9Sstevel@tonic-gate PTE_GET(oldpte, PT_WRITABLE)) 26797c478bd9Sstevel@tonic-gate newpte &= ~PT_WRITABLE; 26807c478bd9Sstevel@tonic-gate 2681ae115bc7Smrj if (!(attr & HAT_NOSYNC) && 2682ae115bc7Smrj PTE_GET(oldpte, PT_SOFTWARE) >= PT_NOSYNC) 2683ae115bc7Smrj newpte &= ~PT_SOFTWARE; 26847c478bd9Sstevel@tonic-gate 26857c478bd9Sstevel@tonic-gate if (!(attr & PROT_EXEC) && !PTE_GET(oldpte, mmu.pt_nx)) 26867c478bd9Sstevel@tonic-gate newpte |= mmu.pt_nx; 26877c478bd9Sstevel@tonic-gate } 26887c478bd9Sstevel@tonic-gate 26897c478bd9Sstevel@tonic-gate if (what == HAT_CLR_ATTR) { 26907c478bd9Sstevel@tonic-gate if ((attr & PROT_WRITE) && PTE_GET(oldpte, PT_WRITABLE)) 26917c478bd9Sstevel@tonic-gate newpte &= ~PT_WRITABLE; 26927c478bd9Sstevel@tonic-gate 2693ae115bc7Smrj if ((attr & HAT_NOSYNC) && 2694ae115bc7Smrj PTE_GET(oldpte, PT_SOFTWARE) >= PT_NOSYNC) 2695ae115bc7Smrj newpte &= ~PT_SOFTWARE; 26967c478bd9Sstevel@tonic-gate 26977c478bd9Sstevel@tonic-gate if ((attr & PROT_EXEC) && !PTE_GET(oldpte, mmu.pt_nx)) 26987c478bd9Sstevel@tonic-gate newpte |= mmu.pt_nx; 26997c478bd9Sstevel@tonic-gate } 27007c478bd9Sstevel@tonic-gate 27017c478bd9Sstevel@tonic-gate /* 2702ae115bc7Smrj * Ensure NOSYNC/NOCONSIST mappings have REF and MOD set. 2703ae115bc7Smrj * x86pte_set() depends on this. 2704ae115bc7Smrj */ 2705ae115bc7Smrj if (PTE_GET(newpte, PT_SOFTWARE) >= PT_NOSYNC) 2706ae115bc7Smrj newpte |= PT_REF | PT_MOD; 2707ae115bc7Smrj 2708ae115bc7Smrj /* 27097c478bd9Sstevel@tonic-gate * what about PROT_READ or others? this code only handles: 27107c478bd9Sstevel@tonic-gate * EXEC, WRITE, NOSYNC 27117c478bd9Sstevel@tonic-gate */ 27127c478bd9Sstevel@tonic-gate 27137c478bd9Sstevel@tonic-gate /* 27147c478bd9Sstevel@tonic-gate * If new PTE really changed, update the table. 27157c478bd9Sstevel@tonic-gate */ 27167c478bd9Sstevel@tonic-gate if (newpte != oldpte) { 27177c478bd9Sstevel@tonic-gate entry = htable_va2entry(vaddr, ht); 27187c478bd9Sstevel@tonic-gate oldpte = hati_update_pte(ht, entry, oldpte, newpte); 27197c478bd9Sstevel@tonic-gate if (oldpte != 0) { 27207c478bd9Sstevel@tonic-gate x86_hm_exit(pp); 27217c478bd9Sstevel@tonic-gate goto try_again; 27227c478bd9Sstevel@tonic-gate } 27237c478bd9Sstevel@tonic-gate } 27247c478bd9Sstevel@tonic-gate x86_hm_exit(pp); 27257c478bd9Sstevel@tonic-gate } 27267c478bd9Sstevel@tonic-gate if (ht) 27277c478bd9Sstevel@tonic-gate htable_release(ht); 2728843e1988Sjohnlev XPV_ALLOW_MIGRATE(); 27297c478bd9Sstevel@tonic-gate } 27307c478bd9Sstevel@tonic-gate 27317c478bd9Sstevel@tonic-gate /* 27327c478bd9Sstevel@tonic-gate * Various wrappers for hat_updateattr() 27337c478bd9Sstevel@tonic-gate */ 27347c478bd9Sstevel@tonic-gate void 27357c478bd9Sstevel@tonic-gate hat_setattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr) 27367c478bd9Sstevel@tonic-gate { 2737ae115bc7Smrj ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit); 27387c478bd9Sstevel@tonic-gate hat_updateattr(hat, addr, len, attr, HAT_SET_ATTR); 27397c478bd9Sstevel@tonic-gate } 27407c478bd9Sstevel@tonic-gate 27417c478bd9Sstevel@tonic-gate void 27427c478bd9Sstevel@tonic-gate hat_clrattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr) 27437c478bd9Sstevel@tonic-gate { 2744ae115bc7Smrj ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit); 27457c478bd9Sstevel@tonic-gate hat_updateattr(hat, addr, len, attr, HAT_CLR_ATTR); 27467c478bd9Sstevel@tonic-gate } 27477c478bd9Sstevel@tonic-gate 27487c478bd9Sstevel@tonic-gate void 27497c478bd9Sstevel@tonic-gate hat_chgattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr) 27507c478bd9Sstevel@tonic-gate { 2751ae115bc7Smrj ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit); 27527c478bd9Sstevel@tonic-gate hat_updateattr(hat, addr, len, attr, HAT_LOAD_ATTR); 27537c478bd9Sstevel@tonic-gate } 27547c478bd9Sstevel@tonic-gate 27557c478bd9Sstevel@tonic-gate void 27567c478bd9Sstevel@tonic-gate hat_chgprot(hat_t *hat, caddr_t addr, size_t len, uint_t vprot) 27577c478bd9Sstevel@tonic-gate { 2758ae115bc7Smrj ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit); 27597c478bd9Sstevel@tonic-gate hat_updateattr(hat, addr, len, vprot & HAT_PROT_MASK, HAT_LOAD_ATTR); 27607c478bd9Sstevel@tonic-gate } 27617c478bd9Sstevel@tonic-gate 27627c478bd9Sstevel@tonic-gate /* 27637c478bd9Sstevel@tonic-gate * size_t hat_getpagesize(hat, addr) 27647c478bd9Sstevel@tonic-gate * returns pagesize in bytes for <hat, addr>. returns -1 of there is 27657c478bd9Sstevel@tonic-gate * no mapping. This is an advisory call. 27667c478bd9Sstevel@tonic-gate */ 27677c478bd9Sstevel@tonic-gate ssize_t 27687c478bd9Sstevel@tonic-gate hat_getpagesize(hat_t *hat, caddr_t addr) 27697c478bd9Sstevel@tonic-gate { 27707c478bd9Sstevel@tonic-gate uintptr_t vaddr = ALIGN2PAGE(addr); 27717c478bd9Sstevel@tonic-gate htable_t *ht; 27727c478bd9Sstevel@tonic-gate size_t pagesize; 27737c478bd9Sstevel@tonic-gate 2774ae115bc7Smrj ASSERT(hat == kas.a_hat || vaddr <= _userlimit); 27757c478bd9Sstevel@tonic-gate if (IN_VA_HOLE(vaddr)) 27767c478bd9Sstevel@tonic-gate return (-1); 27777c478bd9Sstevel@tonic-gate ht = htable_getpage(hat, vaddr, NULL); 27787c478bd9Sstevel@tonic-gate if (ht == NULL) 27797c478bd9Sstevel@tonic-gate return (-1); 27807c478bd9Sstevel@tonic-gate pagesize = LEVEL_SIZE(ht->ht_level); 27817c478bd9Sstevel@tonic-gate htable_release(ht); 27827c478bd9Sstevel@tonic-gate return (pagesize); 27837c478bd9Sstevel@tonic-gate } 27847c478bd9Sstevel@tonic-gate 27857c478bd9Sstevel@tonic-gate 27867c478bd9Sstevel@tonic-gate 27877c478bd9Sstevel@tonic-gate /* 27887c478bd9Sstevel@tonic-gate * pfn_t hat_getpfnum(hat, addr) 27897c478bd9Sstevel@tonic-gate * returns pfn for <hat, addr> or PFN_INVALID if mapping is invalid. 27907c478bd9Sstevel@tonic-gate */ 27917c478bd9Sstevel@tonic-gate pfn_t 27927c478bd9Sstevel@tonic-gate hat_getpfnum(hat_t *hat, caddr_t addr) 27937c478bd9Sstevel@tonic-gate { 27947c478bd9Sstevel@tonic-gate uintptr_t vaddr = ALIGN2PAGE(addr); 27957c478bd9Sstevel@tonic-gate htable_t *ht; 27967c478bd9Sstevel@tonic-gate uint_t entry; 27977c478bd9Sstevel@tonic-gate pfn_t pfn = PFN_INVALID; 27987c478bd9Sstevel@tonic-gate 2799ae115bc7Smrj ASSERT(hat == kas.a_hat || vaddr <= _userlimit); 28007c478bd9Sstevel@tonic-gate if (khat_running == 0) 2801ae115bc7Smrj return (PFN_INVALID); 28027c478bd9Sstevel@tonic-gate 28037c478bd9Sstevel@tonic-gate if (IN_VA_HOLE(vaddr)) 28047c478bd9Sstevel@tonic-gate return (PFN_INVALID); 28057c478bd9Sstevel@tonic-gate 2806843e1988Sjohnlev XPV_DISALLOW_MIGRATE(); 28077c478bd9Sstevel@tonic-gate /* 28087c478bd9Sstevel@tonic-gate * A very common use of hat_getpfnum() is from the DDI for kernel pages. 28097c478bd9Sstevel@tonic-gate * Use the kmap_ptes (which also covers the 32 bit heap) to speed 28107c478bd9Sstevel@tonic-gate * this up. 28117c478bd9Sstevel@tonic-gate */ 28127c478bd9Sstevel@tonic-gate if (mmu.kmap_addr <= vaddr && vaddr < mmu.kmap_eaddr) { 28137c478bd9Sstevel@tonic-gate x86pte_t pte; 2814ae115bc7Smrj pgcnt_t pg_index; 28157c478bd9Sstevel@tonic-gate 2816ae115bc7Smrj pg_index = mmu_btop(vaddr - mmu.kmap_addr); 2817ae115bc7Smrj pte = GET_PTE(PT_INDEX_PTR(mmu.kmap_ptes, pg_index)); 2818843e1988Sjohnlev if (PTE_ISVALID(pte)) 2819843e1988Sjohnlev /*LINTED [use of constant 0 causes a lint warning] */ 2820843e1988Sjohnlev pfn = PTE2PFN(pte, 0); 2821843e1988Sjohnlev XPV_ALLOW_MIGRATE(); 2822843e1988Sjohnlev return (pfn); 28237c478bd9Sstevel@tonic-gate } 28247c478bd9Sstevel@tonic-gate 28257c478bd9Sstevel@tonic-gate ht = htable_getpage(hat, vaddr, &entry); 2826843e1988Sjohnlev if (ht == NULL) { 2827843e1988Sjohnlev XPV_ALLOW_MIGRATE(); 28287c478bd9Sstevel@tonic-gate return (PFN_INVALID); 2829843e1988Sjohnlev } 28307c478bd9Sstevel@tonic-gate ASSERT(vaddr >= ht->ht_vaddr); 28317c478bd9Sstevel@tonic-gate ASSERT(vaddr <= HTABLE_LAST_PAGE(ht)); 28327c478bd9Sstevel@tonic-gate pfn = PTE2PFN(x86pte_get(ht, entry), ht->ht_level); 28337c478bd9Sstevel@tonic-gate if (ht->ht_level > 0) 28347c478bd9Sstevel@tonic-gate pfn += mmu_btop(vaddr & LEVEL_OFFSET(ht->ht_level)); 28357c478bd9Sstevel@tonic-gate htable_release(ht); 2836843e1988Sjohnlev XPV_ALLOW_MIGRATE(); 28377c478bd9Sstevel@tonic-gate return (pfn); 28387c478bd9Sstevel@tonic-gate } 28397c478bd9Sstevel@tonic-gate 28407c478bd9Sstevel@tonic-gate /* 28417c478bd9Sstevel@tonic-gate * int hat_probe(hat, addr) 28427c478bd9Sstevel@tonic-gate * return 0 if no valid mapping is present. Faster version 28437c478bd9Sstevel@tonic-gate * of hat_getattr in certain architectures. 28447c478bd9Sstevel@tonic-gate */ 28457c478bd9Sstevel@tonic-gate int 28467c478bd9Sstevel@tonic-gate hat_probe(hat_t *hat, caddr_t addr) 28477c478bd9Sstevel@tonic-gate { 28487c478bd9Sstevel@tonic-gate uintptr_t vaddr = ALIGN2PAGE(addr); 28497c478bd9Sstevel@tonic-gate uint_t entry; 28507c478bd9Sstevel@tonic-gate htable_t *ht; 28517c478bd9Sstevel@tonic-gate pgcnt_t pg_off; 28527c478bd9Sstevel@tonic-gate 2853ae115bc7Smrj ASSERT(hat == kas.a_hat || vaddr <= _userlimit); 2854*dc32d872SJosef 'Jeff' Sipek ASSERT(hat == kas.a_hat || AS_LOCK_HELD(hat->hat_as)); 28557c478bd9Sstevel@tonic-gate if (IN_VA_HOLE(vaddr)) 28567c478bd9Sstevel@tonic-gate return (0); 28577c478bd9Sstevel@tonic-gate 28587c478bd9Sstevel@tonic-gate /* 28597c478bd9Sstevel@tonic-gate * Most common use of hat_probe is from segmap. We special case it 28607c478bd9Sstevel@tonic-gate * for performance. 28617c478bd9Sstevel@tonic-gate */ 28627c478bd9Sstevel@tonic-gate if (mmu.kmap_addr <= vaddr && vaddr < mmu.kmap_eaddr) { 28637c478bd9Sstevel@tonic-gate pg_off = mmu_btop(vaddr - mmu.kmap_addr); 28647c478bd9Sstevel@tonic-gate if (mmu.pae_hat) 28657c478bd9Sstevel@tonic-gate return (PTE_ISVALID(mmu.kmap_ptes[pg_off])); 28667c478bd9Sstevel@tonic-gate else 28677c478bd9Sstevel@tonic-gate return (PTE_ISVALID( 28687c478bd9Sstevel@tonic-gate ((x86pte32_t *)mmu.kmap_ptes)[pg_off])); 28697c478bd9Sstevel@tonic-gate } 28707c478bd9Sstevel@tonic-gate 28717c478bd9Sstevel@tonic-gate ht = htable_getpage(hat, vaddr, &entry); 28727c478bd9Sstevel@tonic-gate htable_release(ht); 2873843e1988Sjohnlev return (ht != NULL); 28747c478bd9Sstevel@tonic-gate } 28757c478bd9Sstevel@tonic-gate 28767c478bd9Sstevel@tonic-gate /* 2877250b7ff9Sjosephb * Find out if the segment for hat_share()/hat_unshare() is DISM or locked ISM. 2878250b7ff9Sjosephb */ 2879250b7ff9Sjosephb static int 2880250b7ff9Sjosephb is_it_dism(hat_t *hat, caddr_t va) 2881250b7ff9Sjosephb { 2882250b7ff9Sjosephb struct seg *seg; 2883250b7ff9Sjosephb struct shm_data *shmd; 2884250b7ff9Sjosephb struct spt_data *sptd; 2885250b7ff9Sjosephb 2886250b7ff9Sjosephb seg = as_findseg(hat->hat_as, va, 0); 2887250b7ff9Sjosephb ASSERT(seg != NULL); 2888250b7ff9Sjosephb ASSERT(seg->s_base <= va); 2889250b7ff9Sjosephb shmd = (struct shm_data *)seg->s_data; 2890250b7ff9Sjosephb ASSERT(shmd != NULL); 2891250b7ff9Sjosephb sptd = (struct spt_data *)shmd->shm_sptseg->s_data; 2892250b7ff9Sjosephb ASSERT(sptd != NULL); 2893250b7ff9Sjosephb if (sptd->spt_flags & SHM_PAGEABLE) 2894250b7ff9Sjosephb return (1); 2895250b7ff9Sjosephb return (0); 2896250b7ff9Sjosephb } 2897250b7ff9Sjosephb 2898250b7ff9Sjosephb /* 2899250b7ff9Sjosephb * Simple implementation of ISM. hat_share() is similar to hat_memload_array(), 29007c478bd9Sstevel@tonic-gate * except that we use the ism_hat's existing mappings to determine the pages 2901250b7ff9Sjosephb * and protections to use for this hat. If we find a full properly aligned 2902250b7ff9Sjosephb * and sized pagetable, we will attempt to share the pagetable itself. 29037c478bd9Sstevel@tonic-gate */ 29047c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 29057c478bd9Sstevel@tonic-gate int 29067c478bd9Sstevel@tonic-gate hat_share( 29077c478bd9Sstevel@tonic-gate hat_t *hat, 29087c478bd9Sstevel@tonic-gate caddr_t addr, 29097c478bd9Sstevel@tonic-gate hat_t *ism_hat, 29107c478bd9Sstevel@tonic-gate caddr_t src_addr, 29117c478bd9Sstevel@tonic-gate size_t len, /* almost useless value, see below.. */ 29127c478bd9Sstevel@tonic-gate uint_t ismszc) 29137c478bd9Sstevel@tonic-gate { 29147c478bd9Sstevel@tonic-gate uintptr_t vaddr_start = (uintptr_t)addr; 29157c478bd9Sstevel@tonic-gate uintptr_t vaddr; 29167c478bd9Sstevel@tonic-gate uintptr_t eaddr = vaddr_start + len; 29177c478bd9Sstevel@tonic-gate uintptr_t ism_addr_start = (uintptr_t)src_addr; 29187c478bd9Sstevel@tonic-gate uintptr_t ism_addr = ism_addr_start; 29197c478bd9Sstevel@tonic-gate uintptr_t e_ism_addr = ism_addr + len; 29207c478bd9Sstevel@tonic-gate htable_t *ism_ht = NULL; 29217c478bd9Sstevel@tonic-gate htable_t *ht; 29227c478bd9Sstevel@tonic-gate x86pte_t pte; 29237c478bd9Sstevel@tonic-gate page_t *pp; 29247c478bd9Sstevel@tonic-gate pfn_t pfn; 29257c478bd9Sstevel@tonic-gate level_t l; 29267c478bd9Sstevel@tonic-gate pgcnt_t pgcnt; 29277c478bd9Sstevel@tonic-gate uint_t prot; 2928250b7ff9Sjosephb int is_dism; 2929250b7ff9Sjosephb int flags; 29307c478bd9Sstevel@tonic-gate 29317c478bd9Sstevel@tonic-gate /* 29327c478bd9Sstevel@tonic-gate * We might be asked to share an empty DISM hat by as_dup() 29337c478bd9Sstevel@tonic-gate */ 29347c478bd9Sstevel@tonic-gate ASSERT(hat != kas.a_hat); 2935ae115bc7Smrj ASSERT(eaddr <= _userlimit); 29367c478bd9Sstevel@tonic-gate if (!(ism_hat->hat_flags & HAT_SHARED)) { 29377c478bd9Sstevel@tonic-gate ASSERT(hat_get_mapped_size(ism_hat) == 0); 29387c478bd9Sstevel@tonic-gate return (0); 29397c478bd9Sstevel@tonic-gate } 2940843e1988Sjohnlev XPV_DISALLOW_MIGRATE(); 29417c478bd9Sstevel@tonic-gate 29427c478bd9Sstevel@tonic-gate /* 29437c478bd9Sstevel@tonic-gate * The SPT segment driver often passes us a size larger than there are 29447c478bd9Sstevel@tonic-gate * valid mappings. That's because it rounds the segment size up to a 29457c478bd9Sstevel@tonic-gate * large pagesize, even if the actual memory mapped by ism_hat is less. 29467c478bd9Sstevel@tonic-gate */ 29477c478bd9Sstevel@tonic-gate ASSERT(IS_PAGEALIGNED(vaddr_start)); 29487c478bd9Sstevel@tonic-gate ASSERT(IS_PAGEALIGNED(ism_addr_start)); 29497c478bd9Sstevel@tonic-gate ASSERT(ism_hat->hat_flags & HAT_SHARED); 2950250b7ff9Sjosephb is_dism = is_it_dism(hat, addr); 29517c478bd9Sstevel@tonic-gate while (ism_addr < e_ism_addr) { 29527c478bd9Sstevel@tonic-gate /* 29537c478bd9Sstevel@tonic-gate * use htable_walk to get the next valid ISM mapping 29547c478bd9Sstevel@tonic-gate */ 29557c478bd9Sstevel@tonic-gate pte = htable_walk(ism_hat, &ism_ht, &ism_addr, e_ism_addr); 29567c478bd9Sstevel@tonic-gate if (ism_ht == NULL) 29577c478bd9Sstevel@tonic-gate break; 29587c478bd9Sstevel@tonic-gate 29597c478bd9Sstevel@tonic-gate /* 2960250b7ff9Sjosephb * First check to see if we already share the page table. 29617c478bd9Sstevel@tonic-gate */ 2962250b7ff9Sjosephb l = ism_ht->ht_level; 29637c478bd9Sstevel@tonic-gate vaddr = vaddr_start + (ism_addr - ism_addr_start); 2964250b7ff9Sjosephb ht = htable_lookup(hat, vaddr, l); 2965250b7ff9Sjosephb if (ht != NULL) { 2966250b7ff9Sjosephb if (ht->ht_flags & HTABLE_SHARED_PFN) 2967250b7ff9Sjosephb goto shared; 2968250b7ff9Sjosephb htable_release(ht); 2969250b7ff9Sjosephb goto not_shared; 2970250b7ff9Sjosephb } 2971250b7ff9Sjosephb 2972250b7ff9Sjosephb /* 2973250b7ff9Sjosephb * Can't ever share top table. 2974250b7ff9Sjosephb */ 2975250b7ff9Sjosephb if (l == mmu.max_level) 2976250b7ff9Sjosephb goto not_shared; 2977250b7ff9Sjosephb 2978250b7ff9Sjosephb /* 2979250b7ff9Sjosephb * Avoid level mismatches later due to DISM faults. 2980250b7ff9Sjosephb */ 2981250b7ff9Sjosephb if (is_dism && l > 0) 2982250b7ff9Sjosephb goto not_shared; 2983250b7ff9Sjosephb 2984250b7ff9Sjosephb /* 2985250b7ff9Sjosephb * addresses and lengths must align 2986250b7ff9Sjosephb * table must be fully populated 2987250b7ff9Sjosephb * no lower level page tables 2988250b7ff9Sjosephb */ 2989250b7ff9Sjosephb if (ism_addr != ism_ht->ht_vaddr || 2990250b7ff9Sjosephb (vaddr & LEVEL_OFFSET(l + 1)) != 0) 2991250b7ff9Sjosephb goto not_shared; 2992250b7ff9Sjosephb 2993250b7ff9Sjosephb /* 2994250b7ff9Sjosephb * The range of address space must cover a full table. 2995250b7ff9Sjosephb */ 29961d03c31eSjohnlev if (e_ism_addr - ism_addr < LEVEL_SIZE(l + 1)) 2997250b7ff9Sjosephb goto not_shared; 2998250b7ff9Sjosephb 2999250b7ff9Sjosephb /* 3000250b7ff9Sjosephb * All entries in the ISM page table must be leaf PTEs. 3001250b7ff9Sjosephb */ 3002250b7ff9Sjosephb if (l > 0) { 3003250b7ff9Sjosephb int e; 3004250b7ff9Sjosephb 3005250b7ff9Sjosephb /* 3006250b7ff9Sjosephb * We know the 0th is from htable_walk() above. 3007250b7ff9Sjosephb */ 3008250b7ff9Sjosephb for (e = 1; e < HTABLE_NUM_PTES(ism_ht); ++e) { 3009250b7ff9Sjosephb x86pte_t pte; 3010250b7ff9Sjosephb pte = x86pte_get(ism_ht, e); 3011250b7ff9Sjosephb if (!PTE_ISPAGE(pte, l)) 3012250b7ff9Sjosephb goto not_shared; 3013250b7ff9Sjosephb } 3014250b7ff9Sjosephb } 3015250b7ff9Sjosephb 3016250b7ff9Sjosephb /* 3017250b7ff9Sjosephb * share the page table 3018250b7ff9Sjosephb */ 3019250b7ff9Sjosephb ht = htable_create(hat, vaddr, l, ism_ht); 3020250b7ff9Sjosephb shared: 3021250b7ff9Sjosephb ASSERT(ht->ht_flags & HTABLE_SHARED_PFN); 3022250b7ff9Sjosephb ASSERT(ht->ht_shares == ism_ht); 3023250b7ff9Sjosephb hat->hat_ism_pgcnt += 3024250b7ff9Sjosephb (ism_ht->ht_valid_cnt - ht->ht_valid_cnt) << 3025250b7ff9Sjosephb (LEVEL_SHIFT(ht->ht_level) - MMU_PAGESHIFT); 3026250b7ff9Sjosephb ht->ht_valid_cnt = ism_ht->ht_valid_cnt; 3027250b7ff9Sjosephb htable_release(ht); 3028250b7ff9Sjosephb ism_addr = ism_ht->ht_vaddr + LEVEL_SIZE(l + 1); 3029250b7ff9Sjosephb htable_release(ism_ht); 3030250b7ff9Sjosephb ism_ht = NULL; 3031250b7ff9Sjosephb continue; 3032250b7ff9Sjosephb 3033250b7ff9Sjosephb not_shared: 3034250b7ff9Sjosephb /* 3035250b7ff9Sjosephb * Unable to share the page table. Instead we will 3036250b7ff9Sjosephb * create new mappings from the values in the ISM mappings. 3037250b7ff9Sjosephb * Figure out what level size mappings to use; 3038250b7ff9Sjosephb */ 30397c478bd9Sstevel@tonic-gate for (l = ism_ht->ht_level; l > 0; --l) { 30407c478bd9Sstevel@tonic-gate if (LEVEL_SIZE(l) <= eaddr - vaddr && 30417c478bd9Sstevel@tonic-gate (vaddr & LEVEL_OFFSET(l)) == 0) 30427c478bd9Sstevel@tonic-gate break; 30437c478bd9Sstevel@tonic-gate } 30447c478bd9Sstevel@tonic-gate 30457c478bd9Sstevel@tonic-gate /* 30467c478bd9Sstevel@tonic-gate * The ISM mapping might be larger than the share area, 3047250b7ff9Sjosephb * be careful to truncate it if needed. 30487c478bd9Sstevel@tonic-gate */ 30497c478bd9Sstevel@tonic-gate if (eaddr - vaddr >= LEVEL_SIZE(ism_ht->ht_level)) { 30507c478bd9Sstevel@tonic-gate pgcnt = mmu_btop(LEVEL_SIZE(ism_ht->ht_level)); 30517c478bd9Sstevel@tonic-gate } else { 30527c478bd9Sstevel@tonic-gate pgcnt = mmu_btop(eaddr - vaddr); 30537c478bd9Sstevel@tonic-gate l = 0; 30547c478bd9Sstevel@tonic-gate } 30557c478bd9Sstevel@tonic-gate 30567c478bd9Sstevel@tonic-gate pfn = PTE2PFN(pte, ism_ht->ht_level); 30577c478bd9Sstevel@tonic-gate ASSERT(pfn != PFN_INVALID); 30587c478bd9Sstevel@tonic-gate while (pgcnt > 0) { 30597c478bd9Sstevel@tonic-gate /* 30607c478bd9Sstevel@tonic-gate * Make a new pte for the PFN for this level. 30617c478bd9Sstevel@tonic-gate * Copy protections for the pte from the ISM pte. 30627c478bd9Sstevel@tonic-gate */ 30637c478bd9Sstevel@tonic-gate pp = page_numtopp_nolock(pfn); 30647c478bd9Sstevel@tonic-gate ASSERT(pp != NULL); 30657c478bd9Sstevel@tonic-gate 30667c478bd9Sstevel@tonic-gate prot = PROT_USER | PROT_READ | HAT_UNORDERED_OK; 30677c478bd9Sstevel@tonic-gate if (PTE_GET(pte, PT_WRITABLE)) 30687c478bd9Sstevel@tonic-gate prot |= PROT_WRITE; 30697c478bd9Sstevel@tonic-gate if (!PTE_GET(pte, PT_NX)) 30707c478bd9Sstevel@tonic-gate prot |= PROT_EXEC; 30717c478bd9Sstevel@tonic-gate 3072250b7ff9Sjosephb flags = HAT_LOAD; 3073250b7ff9Sjosephb if (!is_dism) 3074250b7ff9Sjosephb flags |= HAT_LOAD_LOCK | HAT_LOAD_NOCONSIST; 3075250b7ff9Sjosephb while (hati_load_common(hat, vaddr, pp, prot, flags, 3076ae115bc7Smrj l, pfn) != 0) { 3077ae115bc7Smrj if (l == 0) 3078ae115bc7Smrj panic("hati_load_common() failure"); 3079ae115bc7Smrj --l; 3080ae115bc7Smrj } 30817c478bd9Sstevel@tonic-gate 30827c478bd9Sstevel@tonic-gate vaddr += LEVEL_SIZE(l); 30837c478bd9Sstevel@tonic-gate ism_addr += LEVEL_SIZE(l); 30847c478bd9Sstevel@tonic-gate pfn += mmu_btop(LEVEL_SIZE(l)); 30857c478bd9Sstevel@tonic-gate pgcnt -= mmu_btop(LEVEL_SIZE(l)); 30867c478bd9Sstevel@tonic-gate } 30877c478bd9Sstevel@tonic-gate } 30887c478bd9Sstevel@tonic-gate if (ism_ht != NULL) 30897c478bd9Sstevel@tonic-gate htable_release(ism_ht); 3090843e1988Sjohnlev XPV_ALLOW_MIGRATE(); 30917c478bd9Sstevel@tonic-gate return (0); 30927c478bd9Sstevel@tonic-gate } 30937c478bd9Sstevel@tonic-gate 30947c478bd9Sstevel@tonic-gate 30957c478bd9Sstevel@tonic-gate /* 30967c478bd9Sstevel@tonic-gate * hat_unshare() is similar to hat_unload_callback(), but 30977c478bd9Sstevel@tonic-gate * we have to look for empty shared pagetables. Note that 30987c478bd9Sstevel@tonic-gate * hat_unshare() is always invoked against an entire segment. 30997c478bd9Sstevel@tonic-gate */ 31007c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 31017c478bd9Sstevel@tonic-gate void 31027c478bd9Sstevel@tonic-gate hat_unshare(hat_t *hat, caddr_t addr, size_t len, uint_t ismszc) 31037c478bd9Sstevel@tonic-gate { 31047173d045Sjosephb uint64_t vaddr = (uintptr_t)addr; 31057c478bd9Sstevel@tonic-gate uintptr_t eaddr = vaddr + len; 31067c478bd9Sstevel@tonic-gate htable_t *ht = NULL; 31077c478bd9Sstevel@tonic-gate uint_t need_demaps = 0; 3108250b7ff9Sjosephb int flags = HAT_UNLOAD_UNMAP; 3109250b7ff9Sjosephb level_t l; 31107c478bd9Sstevel@tonic-gate 31117c478bd9Sstevel@tonic-gate ASSERT(hat != kas.a_hat); 3112ae115bc7Smrj ASSERT(eaddr <= _userlimit); 31137c478bd9Sstevel@tonic-gate ASSERT(IS_PAGEALIGNED(vaddr)); 31147c478bd9Sstevel@tonic-gate ASSERT(IS_PAGEALIGNED(eaddr)); 3115843e1988Sjohnlev XPV_DISALLOW_MIGRATE(); 31167c478bd9Sstevel@tonic-gate 31177c478bd9Sstevel@tonic-gate /* 31187c478bd9Sstevel@tonic-gate * First go through and remove any shared pagetables. 31197c478bd9Sstevel@tonic-gate * 3120ae115bc7Smrj * Note that it's ok to delay the TLB shootdown till the entire range is 31217c478bd9Sstevel@tonic-gate * finished, because if hat_pageunload() were to unload a shared 3122ae115bc7Smrj * pagetable page, its hat_tlb_inval() will do a global TLB invalidate. 31237c478bd9Sstevel@tonic-gate */ 3124250b7ff9Sjosephb l = mmu.max_page_level; 3125250b7ff9Sjosephb if (l == mmu.max_level) 3126250b7ff9Sjosephb --l; 3127250b7ff9Sjosephb for (; l >= 0; --l) { 3128250b7ff9Sjosephb for (vaddr = (uintptr_t)addr; vaddr < eaddr; 3129250b7ff9Sjosephb vaddr = (vaddr & LEVEL_MASK(l + 1)) + LEVEL_SIZE(l + 1)) { 31307c478bd9Sstevel@tonic-gate ASSERT(!IN_VA_HOLE(vaddr)); 31317c478bd9Sstevel@tonic-gate /* 3132250b7ff9Sjosephb * find a pagetable that maps the current address 31337c478bd9Sstevel@tonic-gate */ 3134250b7ff9Sjosephb ht = htable_lookup(hat, vaddr, l); 3135250b7ff9Sjosephb if (ht == NULL) 3136250b7ff9Sjosephb continue; 31377c478bd9Sstevel@tonic-gate if (ht->ht_flags & HTABLE_SHARED_PFN) { 31387c478bd9Sstevel@tonic-gate /* 3139250b7ff9Sjosephb * clear page count, set valid_cnt to 0, 3140250b7ff9Sjosephb * let htable_release() finish the job 31417c478bd9Sstevel@tonic-gate */ 3142250b7ff9Sjosephb hat->hat_ism_pgcnt -= ht->ht_valid_cnt << 3143250b7ff9Sjosephb (LEVEL_SHIFT(ht->ht_level) - MMU_PAGESHIFT); 31447c478bd9Sstevel@tonic-gate ht->ht_valid_cnt = 0; 31457c478bd9Sstevel@tonic-gate need_demaps = 1; 31467c478bd9Sstevel@tonic-gate } 31477c478bd9Sstevel@tonic-gate htable_release(ht); 31487c478bd9Sstevel@tonic-gate } 31497c478bd9Sstevel@tonic-gate } 31507c478bd9Sstevel@tonic-gate 31517c478bd9Sstevel@tonic-gate /* 31527c478bd9Sstevel@tonic-gate * flush the TLBs - since we're probably dealing with MANY mappings 31537c478bd9Sstevel@tonic-gate * we do just one CR3 reload. 31547c478bd9Sstevel@tonic-gate */ 31557c478bd9Sstevel@tonic-gate if (!(hat->hat_flags & HAT_FREEING) && need_demaps) 3156ae115bc7Smrj hat_tlb_inval(hat, DEMAP_ALL_ADDR); 31577c478bd9Sstevel@tonic-gate 31587c478bd9Sstevel@tonic-gate /* 31597c478bd9Sstevel@tonic-gate * Now go back and clean up any unaligned mappings that 31607c478bd9Sstevel@tonic-gate * couldn't share pagetables. 31617c478bd9Sstevel@tonic-gate */ 3162250b7ff9Sjosephb if (!is_it_dism(hat, addr)) 3163250b7ff9Sjosephb flags |= HAT_UNLOAD_UNLOCK; 3164250b7ff9Sjosephb hat_unload(hat, addr, len, flags); 3165843e1988Sjohnlev XPV_ALLOW_MIGRATE(); 31667c478bd9Sstevel@tonic-gate } 31677c478bd9Sstevel@tonic-gate 31687c478bd9Sstevel@tonic-gate 31697c478bd9Sstevel@tonic-gate /* 31707c478bd9Sstevel@tonic-gate * hat_reserve() does nothing 31717c478bd9Sstevel@tonic-gate */ 31727c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 31737c478bd9Sstevel@tonic-gate void 31747c478bd9Sstevel@tonic-gate hat_reserve(struct as *as, caddr_t addr, size_t len) 31757c478bd9Sstevel@tonic-gate { 31767c478bd9Sstevel@tonic-gate } 31777c478bd9Sstevel@tonic-gate 31787c478bd9Sstevel@tonic-gate 31797c478bd9Sstevel@tonic-gate /* 31807c478bd9Sstevel@tonic-gate * Called when all mappings to a page should have write permission removed. 318121584dbcSPavel Tatashin * Mostly stolen from hat_pagesync() 31827c478bd9Sstevel@tonic-gate */ 31837c478bd9Sstevel@tonic-gate static void 31847c478bd9Sstevel@tonic-gate hati_page_clrwrt(struct page *pp) 31857c478bd9Sstevel@tonic-gate { 31867c478bd9Sstevel@tonic-gate hment_t *hm = NULL; 31877c478bd9Sstevel@tonic-gate htable_t *ht; 31887c478bd9Sstevel@tonic-gate uint_t entry; 31897c478bd9Sstevel@tonic-gate x86pte_t old; 31907c478bd9Sstevel@tonic-gate x86pte_t new; 31917c478bd9Sstevel@tonic-gate uint_t pszc = 0; 31927c478bd9Sstevel@tonic-gate 3193843e1988Sjohnlev XPV_DISALLOW_MIGRATE(); 31947c478bd9Sstevel@tonic-gate next_size: 31957c478bd9Sstevel@tonic-gate /* 31967c478bd9Sstevel@tonic-gate * walk thru the mapping list clearing write permission 31977c478bd9Sstevel@tonic-gate */ 31987c478bd9Sstevel@tonic-gate x86_hm_enter(pp); 31997c478bd9Sstevel@tonic-gate while ((hm = hment_walk(pp, &ht, &entry, hm)) != NULL) { 32007c478bd9Sstevel@tonic-gate if (ht->ht_level < pszc) 32017c478bd9Sstevel@tonic-gate continue; 32027c478bd9Sstevel@tonic-gate old = x86pte_get(ht, entry); 32037c478bd9Sstevel@tonic-gate 32047c478bd9Sstevel@tonic-gate for (;;) { 32057c478bd9Sstevel@tonic-gate /* 32067c478bd9Sstevel@tonic-gate * Is this mapping of interest? 32077c478bd9Sstevel@tonic-gate */ 32087c478bd9Sstevel@tonic-gate if (PTE2PFN(old, ht->ht_level) != pp->p_pagenum || 32097c478bd9Sstevel@tonic-gate PTE_GET(old, PT_WRITABLE) == 0) 32107c478bd9Sstevel@tonic-gate break; 32117c478bd9Sstevel@tonic-gate 32127c478bd9Sstevel@tonic-gate /* 32137c478bd9Sstevel@tonic-gate * Clear ref/mod writable bits. This requires cross 32147c478bd9Sstevel@tonic-gate * calls to ensure any executing TLBs see cleared bits. 32157c478bd9Sstevel@tonic-gate */ 32167c478bd9Sstevel@tonic-gate new = old; 32177c478bd9Sstevel@tonic-gate PTE_CLR(new, PT_REF | PT_MOD | PT_WRITABLE); 32187c478bd9Sstevel@tonic-gate old = hati_update_pte(ht, entry, old, new); 32197c478bd9Sstevel@tonic-gate if (old != 0) 32207c478bd9Sstevel@tonic-gate continue; 32217c478bd9Sstevel@tonic-gate 32227c478bd9Sstevel@tonic-gate break; 32237c478bd9Sstevel@tonic-gate } 32247c478bd9Sstevel@tonic-gate } 32257c478bd9Sstevel@tonic-gate x86_hm_exit(pp); 32267c478bd9Sstevel@tonic-gate while (pszc < pp->p_szc) { 32277c478bd9Sstevel@tonic-gate page_t *tpp; 32287c478bd9Sstevel@tonic-gate pszc++; 32297c478bd9Sstevel@tonic-gate tpp = PP_GROUPLEADER(pp, pszc); 32307c478bd9Sstevel@tonic-gate if (pp != tpp) { 32317c478bd9Sstevel@tonic-gate pp = tpp; 32327c478bd9Sstevel@tonic-gate goto next_size; 32337c478bd9Sstevel@tonic-gate } 32347c478bd9Sstevel@tonic-gate } 3235843e1988Sjohnlev XPV_ALLOW_MIGRATE(); 32367c478bd9Sstevel@tonic-gate } 32377c478bd9Sstevel@tonic-gate 32387c478bd9Sstevel@tonic-gate /* 32397c478bd9Sstevel@tonic-gate * void hat_page_setattr(pp, flag) 32407c478bd9Sstevel@tonic-gate * void hat_page_clrattr(pp, flag) 32417c478bd9Sstevel@tonic-gate * used to set/clr ref/mod bits. 32427c478bd9Sstevel@tonic-gate */ 32437c478bd9Sstevel@tonic-gate void 32447c478bd9Sstevel@tonic-gate hat_page_setattr(struct page *pp, uint_t flag) 32457c478bd9Sstevel@tonic-gate { 32467c478bd9Sstevel@tonic-gate vnode_t *vp = pp->p_vnode; 32477c478bd9Sstevel@tonic-gate kmutex_t *vphm = NULL; 32487c478bd9Sstevel@tonic-gate page_t **listp; 3249d9615970Sqiao int noshuffle; 3250d9615970Sqiao 3251d9615970Sqiao noshuffle = flag & P_NSH; 3252d9615970Sqiao flag &= ~P_NSH; 32537c478bd9Sstevel@tonic-gate 32547c478bd9Sstevel@tonic-gate if (PP_GETRM(pp, flag) == flag) 32557c478bd9Sstevel@tonic-gate return; 32567c478bd9Sstevel@tonic-gate 3257d9615970Sqiao if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp) && 3258d9615970Sqiao !noshuffle) { 32597c478bd9Sstevel@tonic-gate vphm = page_vnode_mutex(vp); 32607c478bd9Sstevel@tonic-gate mutex_enter(vphm); 32617c478bd9Sstevel@tonic-gate } 32627c478bd9Sstevel@tonic-gate 32637c478bd9Sstevel@tonic-gate PP_SETRM(pp, flag); 32647c478bd9Sstevel@tonic-gate 32657c478bd9Sstevel@tonic-gate if (vphm != NULL) { 32667c478bd9Sstevel@tonic-gate 32677c478bd9Sstevel@tonic-gate /* 32687c478bd9Sstevel@tonic-gate * Some File Systems examine v_pages for NULL w/o 32697c478bd9Sstevel@tonic-gate * grabbing the vphm mutex. Must not let it become NULL when 32707c478bd9Sstevel@tonic-gate * pp is the only page on the list. 32717c478bd9Sstevel@tonic-gate */ 32727c478bd9Sstevel@tonic-gate if (pp->p_vpnext != pp) { 32737c478bd9Sstevel@tonic-gate page_vpsub(&vp->v_pages, pp); 32747c478bd9Sstevel@tonic-gate if (vp->v_pages != NULL) 32757c478bd9Sstevel@tonic-gate listp = &vp->v_pages->p_vpprev->p_vpnext; 32767c478bd9Sstevel@tonic-gate else 32777c478bd9Sstevel@tonic-gate listp = &vp->v_pages; 32787c478bd9Sstevel@tonic-gate page_vpadd(listp, pp); 32797c478bd9Sstevel@tonic-gate } 32807c478bd9Sstevel@tonic-gate mutex_exit(vphm); 32817c478bd9Sstevel@tonic-gate } 32827c478bd9Sstevel@tonic-gate } 32837c478bd9Sstevel@tonic-gate 32847c478bd9Sstevel@tonic-gate void 32857c478bd9Sstevel@tonic-gate hat_page_clrattr(struct page *pp, uint_t flag) 32867c478bd9Sstevel@tonic-gate { 32877c478bd9Sstevel@tonic-gate vnode_t *vp = pp->p_vnode; 32887c478bd9Sstevel@tonic-gate ASSERT(!(flag & ~(P_MOD | P_REF | P_RO))); 32897c478bd9Sstevel@tonic-gate 32907c478bd9Sstevel@tonic-gate /* 3291a71e32b6Sstans * Caller is expected to hold page's io lock for VMODSORT to work 3292a71e32b6Sstans * correctly with pvn_vplist_dirty() and pvn_getdirty() when mod 3293a71e32b6Sstans * bit is cleared. 3294a71e32b6Sstans * We don't have assert to avoid tripping some existing third party 3295a71e32b6Sstans * code. The dirty page is moved back to top of the v_page list 3296a71e32b6Sstans * after IO is done in pvn_write_done(). 32977c478bd9Sstevel@tonic-gate */ 32987c478bd9Sstevel@tonic-gate PP_CLRRM(pp, flag); 32997c478bd9Sstevel@tonic-gate 3300a71e32b6Sstans if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp)) { 33017c478bd9Sstevel@tonic-gate 33027c478bd9Sstevel@tonic-gate /* 33037c478bd9Sstevel@tonic-gate * VMODSORT works by removing write permissions and getting 33047c478bd9Sstevel@tonic-gate * a fault when a page is made dirty. At this point 33057c478bd9Sstevel@tonic-gate * we need to remove write permission from all mappings 33067c478bd9Sstevel@tonic-gate * to this page. 33077c478bd9Sstevel@tonic-gate */ 33087c478bd9Sstevel@tonic-gate hati_page_clrwrt(pp); 33097c478bd9Sstevel@tonic-gate } 33107c478bd9Sstevel@tonic-gate } 33117c478bd9Sstevel@tonic-gate 33127c478bd9Sstevel@tonic-gate /* 33137c478bd9Sstevel@tonic-gate * If flag is specified, returns 0 if attribute is disabled 331421584dbcSPavel Tatashin * and non zero if enabled. If flag specifes multiple attributes 331521584dbcSPavel Tatashin * then returns 0 if ALL attributes are disabled. This is an advisory 33167c478bd9Sstevel@tonic-gate * call. 33177c478bd9Sstevel@tonic-gate */ 33187c478bd9Sstevel@tonic-gate uint_t 33197c478bd9Sstevel@tonic-gate hat_page_getattr(struct page *pp, uint_t flag) 33207c478bd9Sstevel@tonic-gate { 33217c478bd9Sstevel@tonic-gate return (PP_GETRM(pp, flag)); 33227c478bd9Sstevel@tonic-gate } 33237c478bd9Sstevel@tonic-gate 33247c478bd9Sstevel@tonic-gate 33257c478bd9Sstevel@tonic-gate /* 33267c478bd9Sstevel@tonic-gate * common code used by hat_pageunload() and hment_steal() 33277c478bd9Sstevel@tonic-gate */ 33287c478bd9Sstevel@tonic-gate hment_t * 33297c478bd9Sstevel@tonic-gate hati_page_unmap(page_t *pp, htable_t *ht, uint_t entry) 33307c478bd9Sstevel@tonic-gate { 33317c478bd9Sstevel@tonic-gate x86pte_t old_pte; 33327c478bd9Sstevel@tonic-gate pfn_t pfn = pp->p_pagenum; 33337c478bd9Sstevel@tonic-gate hment_t *hm; 33347c478bd9Sstevel@tonic-gate 33357c478bd9Sstevel@tonic-gate /* 33367c478bd9Sstevel@tonic-gate * We need to acquire a hold on the htable in order to 33377c478bd9Sstevel@tonic-gate * do the invalidate. We know the htable must exist, since 33387c478bd9Sstevel@tonic-gate * unmap's don't release the htable until after removing any 33397c478bd9Sstevel@tonic-gate * hment. Having x86_hm_enter() keeps that from proceeding. 33407c478bd9Sstevel@tonic-gate */ 33417c478bd9Sstevel@tonic-gate htable_acquire(ht); 33427c478bd9Sstevel@tonic-gate 33437c478bd9Sstevel@tonic-gate /* 33447c478bd9Sstevel@tonic-gate * Invalidate the PTE and remove the hment. 33457c478bd9Sstevel@tonic-gate */ 3346a6a74e0eSMatthew Ahrens old_pte = x86pte_inval(ht, entry, 0, NULL, B_TRUE); 3347aa2ed9e5Sjosephb if (PTE2PFN(old_pte, ht->ht_level) != pfn) { 3348ae115bc7Smrj panic("x86pte_inval() failure found PTE = " FMT_PTE 3349aa2ed9e5Sjosephb " pfn being unmapped is %lx ht=0x%lx entry=0x%x", 3350aa2ed9e5Sjosephb old_pte, pfn, (uintptr_t)ht, entry); 3351aa2ed9e5Sjosephb } 33527c478bd9Sstevel@tonic-gate 33537c478bd9Sstevel@tonic-gate /* 33547c478bd9Sstevel@tonic-gate * Clean up all the htable information for this mapping 33557c478bd9Sstevel@tonic-gate */ 33567c478bd9Sstevel@tonic-gate ASSERT(ht->ht_valid_cnt > 0); 33577c478bd9Sstevel@tonic-gate HTABLE_DEC(ht->ht_valid_cnt); 33587c478bd9Sstevel@tonic-gate PGCNT_DEC(ht->ht_hat, ht->ht_level); 33597c478bd9Sstevel@tonic-gate 33607c478bd9Sstevel@tonic-gate /* 33617c478bd9Sstevel@tonic-gate * sync ref/mod bits to the page_t 33627c478bd9Sstevel@tonic-gate */ 3363ae115bc7Smrj if (PTE_GET(old_pte, PT_SOFTWARE) < PT_NOSYNC) 33647c478bd9Sstevel@tonic-gate hati_sync_pte_to_page(pp, old_pte, ht->ht_level); 33657c478bd9Sstevel@tonic-gate 33667c478bd9Sstevel@tonic-gate /* 33677c478bd9Sstevel@tonic-gate * Remove the mapping list entry for this page. 33687c478bd9Sstevel@tonic-gate */ 33697c478bd9Sstevel@tonic-gate hm = hment_remove(pp, ht, entry); 33707c478bd9Sstevel@tonic-gate 33717c478bd9Sstevel@tonic-gate /* 33727c478bd9Sstevel@tonic-gate * drop the mapping list lock so that we might free the 33737c478bd9Sstevel@tonic-gate * hment and htable. 33747c478bd9Sstevel@tonic-gate */ 33757c478bd9Sstevel@tonic-gate x86_hm_exit(pp); 33767c478bd9Sstevel@tonic-gate htable_release(ht); 33777c478bd9Sstevel@tonic-gate return (hm); 33787c478bd9Sstevel@tonic-gate } 33797c478bd9Sstevel@tonic-gate 3380a5652762Spraks extern int vpm_enable; 33817c478bd9Sstevel@tonic-gate /* 33827c478bd9Sstevel@tonic-gate * Unload all translations to a page. If the page is a subpage of a large 33837c478bd9Sstevel@tonic-gate * page, the large page mappings are also removed. 33847c478bd9Sstevel@tonic-gate * 33857c478bd9Sstevel@tonic-gate * The forceflags are unused. 33867c478bd9Sstevel@tonic-gate */ 33877c478bd9Sstevel@tonic-gate 33887c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 33897c478bd9Sstevel@tonic-gate static int 33907c478bd9Sstevel@tonic-gate hati_pageunload(struct page *pp, uint_t pg_szcd, uint_t forceflag) 33917c478bd9Sstevel@tonic-gate { 33927c478bd9Sstevel@tonic-gate page_t *cur_pp = pp; 33937c478bd9Sstevel@tonic-gate hment_t *hm; 33947c478bd9Sstevel@tonic-gate hment_t *prev; 33957c478bd9Sstevel@tonic-gate htable_t *ht; 33967c478bd9Sstevel@tonic-gate uint_t entry; 33977c478bd9Sstevel@tonic-gate level_t level; 33987c478bd9Sstevel@tonic-gate 3399843e1988Sjohnlev XPV_DISALLOW_MIGRATE(); 34006c9930aeSJoe Bonasera 34016c9930aeSJoe Bonasera /* 34026c9930aeSJoe Bonasera * prevent recursion due to kmem_free() 34036c9930aeSJoe Bonasera */ 34046c9930aeSJoe Bonasera ++curthread->t_hatdepth; 34056c9930aeSJoe Bonasera ASSERT(curthread->t_hatdepth < 16); 34066c9930aeSJoe Bonasera 3407a5652762Spraks #if defined(__amd64) 3408a5652762Spraks /* 3409a5652762Spraks * clear the vpm ref. 3410a5652762Spraks */ 3411a5652762Spraks if (vpm_enable) { 3412a5652762Spraks pp->p_vpmref = 0; 3413a5652762Spraks } 3414a5652762Spraks #endif 34157c478bd9Sstevel@tonic-gate /* 34167c478bd9Sstevel@tonic-gate * The loop with next_size handles pages with multiple pagesize mappings 34177c478bd9Sstevel@tonic-gate */ 34187c478bd9Sstevel@tonic-gate next_size: 34197c478bd9Sstevel@tonic-gate for (;;) { 34207c478bd9Sstevel@tonic-gate 34217c478bd9Sstevel@tonic-gate /* 34227c478bd9Sstevel@tonic-gate * Get a mapping list entry 34237c478bd9Sstevel@tonic-gate */ 34247c478bd9Sstevel@tonic-gate x86_hm_enter(cur_pp); 34257c478bd9Sstevel@tonic-gate for (prev = NULL; ; prev = hm) { 34267c478bd9Sstevel@tonic-gate hm = hment_walk(cur_pp, &ht, &entry, prev); 34277c478bd9Sstevel@tonic-gate if (hm == NULL) { 34287c478bd9Sstevel@tonic-gate x86_hm_exit(cur_pp); 34297c478bd9Sstevel@tonic-gate 34307c478bd9Sstevel@tonic-gate /* 34317c478bd9Sstevel@tonic-gate * If not part of a larger page, we're done. 34327c478bd9Sstevel@tonic-gate */ 3433ae115bc7Smrj if (cur_pp->p_szc <= pg_szcd) { 34346c9930aeSJoe Bonasera ASSERT(curthread->t_hatdepth > 0); 34356c9930aeSJoe Bonasera --curthread->t_hatdepth; 3436843e1988Sjohnlev XPV_ALLOW_MIGRATE(); 34377c478bd9Sstevel@tonic-gate return (0); 3438ae115bc7Smrj } 34397c478bd9Sstevel@tonic-gate 34407c478bd9Sstevel@tonic-gate /* 34417c478bd9Sstevel@tonic-gate * Else check the next larger page size. 34427c478bd9Sstevel@tonic-gate * hat_page_demote() may decrease p_szc 34437c478bd9Sstevel@tonic-gate * but that's ok we'll just take an extra 34447c478bd9Sstevel@tonic-gate * trip discover there're no larger mappings 34457c478bd9Sstevel@tonic-gate * and return. 34467c478bd9Sstevel@tonic-gate */ 34477c478bd9Sstevel@tonic-gate ++pg_szcd; 34487c478bd9Sstevel@tonic-gate cur_pp = PP_GROUPLEADER(cur_pp, pg_szcd); 34497c478bd9Sstevel@tonic-gate goto next_size; 34507c478bd9Sstevel@tonic-gate } 34517c478bd9Sstevel@tonic-gate 34527c478bd9Sstevel@tonic-gate /* 34537c478bd9Sstevel@tonic-gate * If this mapping size matches, remove it. 34547c478bd9Sstevel@tonic-gate */ 34557c478bd9Sstevel@tonic-gate level = ht->ht_level; 34567c478bd9Sstevel@tonic-gate if (level == pg_szcd) 34577c478bd9Sstevel@tonic-gate break; 34587c478bd9Sstevel@tonic-gate } 34597c478bd9Sstevel@tonic-gate 34607c478bd9Sstevel@tonic-gate /* 34617c478bd9Sstevel@tonic-gate * Remove the mapping list entry for this page. 34627c478bd9Sstevel@tonic-gate * Note this does the x86_hm_exit() for us. 34637c478bd9Sstevel@tonic-gate */ 34647c478bd9Sstevel@tonic-gate hm = hati_page_unmap(cur_pp, ht, entry); 34657c478bd9Sstevel@tonic-gate if (hm != NULL) 34667c478bd9Sstevel@tonic-gate hment_free(hm); 34677c478bd9Sstevel@tonic-gate } 34687c478bd9Sstevel@tonic-gate } 34697c478bd9Sstevel@tonic-gate 34707c478bd9Sstevel@tonic-gate int 34717c478bd9Sstevel@tonic-gate hat_pageunload(struct page *pp, uint_t forceflag) 34727c478bd9Sstevel@tonic-gate { 34737c478bd9Sstevel@tonic-gate ASSERT(PAGE_EXCL(pp)); 34747c478bd9Sstevel@tonic-gate return (hati_pageunload(pp, 0, forceflag)); 34757c478bd9Sstevel@tonic-gate } 34767c478bd9Sstevel@tonic-gate 34777c478bd9Sstevel@tonic-gate /* 34787c478bd9Sstevel@tonic-gate * Unload all large mappings to pp and reduce by 1 p_szc field of every large 34797c478bd9Sstevel@tonic-gate * page level that included pp. 34807c478bd9Sstevel@tonic-gate * 34817c478bd9Sstevel@tonic-gate * pp must be locked EXCL. Even though no other constituent pages are locked 34827c478bd9Sstevel@tonic-gate * it's legal to unload large mappings to pp because all constituent pages of 34837c478bd9Sstevel@tonic-gate * large locked mappings have to be locked SHARED. therefore if we have EXCL 34847c478bd9Sstevel@tonic-gate * lock on one of constituent pages none of the large mappings to pp are 34857c478bd9Sstevel@tonic-gate * locked. 34867c478bd9Sstevel@tonic-gate * 34877c478bd9Sstevel@tonic-gate * Change (always decrease) p_szc field starting from the last constituent 34887c478bd9Sstevel@tonic-gate * page and ending with root constituent page so that root's pszc always shows 34897c478bd9Sstevel@tonic-gate * the area where hat_page_demote() may be active. 34907c478bd9Sstevel@tonic-gate * 34917c478bd9Sstevel@tonic-gate * This mechanism is only used for file system pages where it's not always 34927c478bd9Sstevel@tonic-gate * possible to get EXCL locks on all constituent pages to demote the size code 34937c478bd9Sstevel@tonic-gate * (as is done for anonymous or kernel large pages). 34947c478bd9Sstevel@tonic-gate */ 34957c478bd9Sstevel@tonic-gate void 34967c478bd9Sstevel@tonic-gate hat_page_demote(page_t *pp) 34977c478bd9Sstevel@tonic-gate { 34987c478bd9Sstevel@tonic-gate uint_t pszc; 34997c478bd9Sstevel@tonic-gate uint_t rszc; 35007c478bd9Sstevel@tonic-gate uint_t szc; 35017c478bd9Sstevel@tonic-gate page_t *rootpp; 35027c478bd9Sstevel@tonic-gate page_t *firstpp; 35037c478bd9Sstevel@tonic-gate page_t *lastpp; 35047c478bd9Sstevel@tonic-gate pgcnt_t pgcnt; 35057c478bd9Sstevel@tonic-gate 35067c478bd9Sstevel@tonic-gate ASSERT(PAGE_EXCL(pp)); 35077c478bd9Sstevel@tonic-gate ASSERT(!PP_ISFREE(pp)); 35087c478bd9Sstevel@tonic-gate ASSERT(page_szc_lock_assert(pp)); 35097c478bd9Sstevel@tonic-gate 35107c478bd9Sstevel@tonic-gate if (pp->p_szc == 0) 35117c478bd9Sstevel@tonic-gate return; 35127c478bd9Sstevel@tonic-gate 35137c478bd9Sstevel@tonic-gate rootpp = PP_GROUPLEADER(pp, 1); 35147c478bd9Sstevel@tonic-gate (void) hati_pageunload(rootpp, 1, HAT_FORCE_PGUNLOAD); 35157c478bd9Sstevel@tonic-gate 35167c478bd9Sstevel@tonic-gate /* 35177c478bd9Sstevel@tonic-gate * all large mappings to pp are gone 35187c478bd9Sstevel@tonic-gate * and no new can be setup since pp is locked exclusively. 35197c478bd9Sstevel@tonic-gate * 35207c478bd9Sstevel@tonic-gate * Lock the root to make sure there's only one hat_page_demote() 35217c478bd9Sstevel@tonic-gate * outstanding within the area of this root's pszc. 35227c478bd9Sstevel@tonic-gate * 35237c478bd9Sstevel@tonic-gate * Second potential hat_page_demote() is already eliminated by upper 35247c478bd9Sstevel@tonic-gate * VM layer via page_szc_lock() but we don't rely on it and use our 35257c478bd9Sstevel@tonic-gate * own locking (so that upper layer locking can be changed without 35267c478bd9Sstevel@tonic-gate * assumptions that hat depends on upper layer VM to prevent multiple 35277c478bd9Sstevel@tonic-gate * hat_page_demote() to be issued simultaneously to the same large 35287c478bd9Sstevel@tonic-gate * page). 35297c478bd9Sstevel@tonic-gate */ 35307c478bd9Sstevel@tonic-gate again: 35317c478bd9Sstevel@tonic-gate pszc = pp->p_szc; 35327c478bd9Sstevel@tonic-gate if (pszc == 0) 35337c478bd9Sstevel@tonic-gate return; 35347c478bd9Sstevel@tonic-gate rootpp = PP_GROUPLEADER(pp, pszc); 35357c478bd9Sstevel@tonic-gate x86_hm_enter(rootpp); 35367c478bd9Sstevel@tonic-gate /* 35377c478bd9Sstevel@tonic-gate * If root's p_szc is different from pszc we raced with another 35387c478bd9Sstevel@tonic-gate * hat_page_demote(). Drop the lock and try to find the root again. 35397c478bd9Sstevel@tonic-gate * If root's p_szc is greater than pszc previous hat_page_demote() is 35407c478bd9Sstevel@tonic-gate * not done yet. Take and release mlist lock of root's root to wait 35417c478bd9Sstevel@tonic-gate * for previous hat_page_demote() to complete. 35427c478bd9Sstevel@tonic-gate */ 35437c478bd9Sstevel@tonic-gate if ((rszc = rootpp->p_szc) != pszc) { 35447c478bd9Sstevel@tonic-gate x86_hm_exit(rootpp); 35457c478bd9Sstevel@tonic-gate if (rszc > pszc) { 35467c478bd9Sstevel@tonic-gate /* p_szc of a locked non free page can't increase */ 35477c478bd9Sstevel@tonic-gate ASSERT(pp != rootpp); 35487c478bd9Sstevel@tonic-gate 35497c478bd9Sstevel@tonic-gate rootpp = PP_GROUPLEADER(rootpp, rszc); 35507c478bd9Sstevel@tonic-gate x86_hm_enter(rootpp); 35517c478bd9Sstevel@tonic-gate x86_hm_exit(rootpp); 35527c478bd9Sstevel@tonic-gate } 35537c478bd9Sstevel@tonic-gate goto again; 35547c478bd9Sstevel@tonic-gate } 35557c478bd9Sstevel@tonic-gate ASSERT(pp->p_szc == pszc); 35567c478bd9Sstevel@tonic-gate 35577c478bd9Sstevel@tonic-gate /* 35587c478bd9Sstevel@tonic-gate * Decrement by 1 p_szc of every constituent page of a region that 35597c478bd9Sstevel@tonic-gate * covered pp. For example if original szc is 3 it gets changed to 2 35607c478bd9Sstevel@tonic-gate * everywhere except in region 2 that covered pp. Region 2 that 35617c478bd9Sstevel@tonic-gate * covered pp gets demoted to 1 everywhere except in region 1 that 35627c478bd9Sstevel@tonic-gate * covered pp. The region 1 that covered pp is demoted to region 35637c478bd9Sstevel@tonic-gate * 0. It's done this way because from region 3 we removed level 3 35647c478bd9Sstevel@tonic-gate * mappings, from region 2 that covered pp we removed level 2 mappings 35657c478bd9Sstevel@tonic-gate * and from region 1 that covered pp we removed level 1 mappings. All 35667c478bd9Sstevel@tonic-gate * changes are done from from high pfn's to low pfn's so that roots 35677c478bd9Sstevel@tonic-gate * are changed last allowing one to know the largest region where 35687c478bd9Sstevel@tonic-gate * hat_page_demote() is stil active by only looking at the root page. 35697c478bd9Sstevel@tonic-gate * 35707c478bd9Sstevel@tonic-gate * This algorithm is implemented in 2 while loops. First loop changes 35717c478bd9Sstevel@tonic-gate * p_szc of pages to the right of pp's level 1 region and second 35727c478bd9Sstevel@tonic-gate * loop changes p_szc of pages of level 1 region that covers pp 35737c478bd9Sstevel@tonic-gate * and all pages to the left of level 1 region that covers pp. 35747c478bd9Sstevel@tonic-gate * In the first loop p_szc keeps dropping with every iteration 35757c478bd9Sstevel@tonic-gate * and in the second loop it keeps increasing with every iteration. 35767c478bd9Sstevel@tonic-gate * 35777c478bd9Sstevel@tonic-gate * First loop description: Demote pages to the right of pp outside of 35787c478bd9Sstevel@tonic-gate * level 1 region that covers pp. In every iteration of the while 35797c478bd9Sstevel@tonic-gate * loop below find the last page of szc region and the first page of 35807c478bd9Sstevel@tonic-gate * (szc - 1) region that is immediately to the right of (szc - 1) 35817c478bd9Sstevel@tonic-gate * region that covers pp. From last such page to first such page 35827c478bd9Sstevel@tonic-gate * change every page's szc to szc - 1. Decrement szc and continue 35837c478bd9Sstevel@tonic-gate * looping until szc is 1. If pp belongs to the last (szc - 1) region 35847c478bd9Sstevel@tonic-gate * of szc region skip to the next iteration. 35857c478bd9Sstevel@tonic-gate */ 35867c478bd9Sstevel@tonic-gate szc = pszc; 35877c478bd9Sstevel@tonic-gate while (szc > 1) { 35887c478bd9Sstevel@tonic-gate lastpp = PP_GROUPLEADER(pp, szc); 35897c478bd9Sstevel@tonic-gate pgcnt = page_get_pagecnt(szc); 35907c478bd9Sstevel@tonic-gate lastpp += pgcnt - 1; 35917c478bd9Sstevel@tonic-gate firstpp = PP_GROUPLEADER(pp, (szc - 1)); 35927c478bd9Sstevel@tonic-gate pgcnt = page_get_pagecnt(szc - 1); 35937c478bd9Sstevel@tonic-gate if (lastpp - firstpp < pgcnt) { 35947c478bd9Sstevel@tonic-gate szc--; 35957c478bd9Sstevel@tonic-gate continue; 35967c478bd9Sstevel@tonic-gate } 35977c478bd9Sstevel@tonic-gate firstpp += pgcnt; 35987c478bd9Sstevel@tonic-gate while (lastpp != firstpp) { 35997c478bd9Sstevel@tonic-gate ASSERT(lastpp->p_szc == pszc); 36007c478bd9Sstevel@tonic-gate lastpp->p_szc = szc - 1; 36017c478bd9Sstevel@tonic-gate lastpp--; 36027c478bd9Sstevel@tonic-gate } 36037c478bd9Sstevel@tonic-gate firstpp->p_szc = szc - 1; 36047c478bd9Sstevel@tonic-gate szc--; 36057c478bd9Sstevel@tonic-gate } 36067c478bd9Sstevel@tonic-gate 36077c478bd9Sstevel@tonic-gate /* 36087c478bd9Sstevel@tonic-gate * Second loop description: 36097c478bd9Sstevel@tonic-gate * First iteration changes p_szc to 0 of every 36107c478bd9Sstevel@tonic-gate * page of level 1 region that covers pp. 36117c478bd9Sstevel@tonic-gate * Subsequent iterations find last page of szc region 36127c478bd9Sstevel@tonic-gate * immediately to the left of szc region that covered pp 36137c478bd9Sstevel@tonic-gate * and first page of (szc + 1) region that covers pp. 36147c478bd9Sstevel@tonic-gate * From last to first page change p_szc of every page to szc. 36157c478bd9Sstevel@tonic-gate * Increment szc and continue looping until szc is pszc. 36167c478bd9Sstevel@tonic-gate * If pp belongs to the fist szc region of (szc + 1) region 36177c478bd9Sstevel@tonic-gate * skip to the next iteration. 36187c478bd9Sstevel@tonic-gate * 36197c478bd9Sstevel@tonic-gate */ 36207c478bd9Sstevel@tonic-gate szc = 0; 36217c478bd9Sstevel@tonic-gate while (szc < pszc) { 36227c478bd9Sstevel@tonic-gate firstpp = PP_GROUPLEADER(pp, (szc + 1)); 36237c478bd9Sstevel@tonic-gate if (szc == 0) { 36247c478bd9Sstevel@tonic-gate pgcnt = page_get_pagecnt(1); 36257c478bd9Sstevel@tonic-gate lastpp = firstpp + (pgcnt - 1); 36267c478bd9Sstevel@tonic-gate } else { 36277c478bd9Sstevel@tonic-gate lastpp = PP_GROUPLEADER(pp, szc); 36287c478bd9Sstevel@tonic-gate if (firstpp == lastpp) { 36297c478bd9Sstevel@tonic-gate szc++; 36307c478bd9Sstevel@tonic-gate continue; 36317c478bd9Sstevel@tonic-gate } 36327c478bd9Sstevel@tonic-gate lastpp--; 36337c478bd9Sstevel@tonic-gate pgcnt = page_get_pagecnt(szc); 36347c478bd9Sstevel@tonic-gate } 36357c478bd9Sstevel@tonic-gate while (lastpp != firstpp) { 36367c478bd9Sstevel@tonic-gate ASSERT(lastpp->p_szc == pszc); 36377c478bd9Sstevel@tonic-gate lastpp->p_szc = szc; 36387c478bd9Sstevel@tonic-gate lastpp--; 36397c478bd9Sstevel@tonic-gate } 36407c478bd9Sstevel@tonic-gate firstpp->p_szc = szc; 36417c478bd9Sstevel@tonic-gate if (firstpp == rootpp) 36427c478bd9Sstevel@tonic-gate break; 36437c478bd9Sstevel@tonic-gate szc++; 36447c478bd9Sstevel@tonic-gate } 36457c478bd9Sstevel@tonic-gate x86_hm_exit(rootpp); 36467c478bd9Sstevel@tonic-gate } 36477c478bd9Sstevel@tonic-gate 36487c478bd9Sstevel@tonic-gate /* 36497c478bd9Sstevel@tonic-gate * get hw stats from hardware into page struct and reset hw stats 36507c478bd9Sstevel@tonic-gate * returns attributes of page 36517c478bd9Sstevel@tonic-gate * Flags for hat_pagesync, hat_getstat, hat_sync 36527c478bd9Sstevel@tonic-gate * 36537c478bd9Sstevel@tonic-gate * define HAT_SYNC_ZERORM 0x01 36547c478bd9Sstevel@tonic-gate * 36557c478bd9Sstevel@tonic-gate * Additional flags for hat_pagesync 36567c478bd9Sstevel@tonic-gate * 36577c478bd9Sstevel@tonic-gate * define HAT_SYNC_STOPON_REF 0x02 36587c478bd9Sstevel@tonic-gate * define HAT_SYNC_STOPON_MOD 0x04 36597c478bd9Sstevel@tonic-gate * define HAT_SYNC_STOPON_RM 0x06 36607c478bd9Sstevel@tonic-gate * define HAT_SYNC_STOPON_SHARED 0x08 36617c478bd9Sstevel@tonic-gate */ 36627c478bd9Sstevel@tonic-gate uint_t 36637c478bd9Sstevel@tonic-gate hat_pagesync(struct page *pp, uint_t flags) 36647c478bd9Sstevel@tonic-gate { 36657c478bd9Sstevel@tonic-gate hment_t *hm = NULL; 36667c478bd9Sstevel@tonic-gate htable_t *ht; 36677c478bd9Sstevel@tonic-gate uint_t entry; 36687c478bd9Sstevel@tonic-gate x86pte_t old, save_old; 36697c478bd9Sstevel@tonic-gate x86pte_t new; 36707c478bd9Sstevel@tonic-gate uchar_t nrmbits = P_REF|P_MOD|P_RO; 36717c478bd9Sstevel@tonic-gate extern ulong_t po_share; 36727c478bd9Sstevel@tonic-gate page_t *save_pp = pp; 36737c478bd9Sstevel@tonic-gate uint_t pszc = 0; 36747c478bd9Sstevel@tonic-gate 36757c478bd9Sstevel@tonic-gate ASSERT(PAGE_LOCKED(pp) || panicstr); 36767c478bd9Sstevel@tonic-gate 36777c478bd9Sstevel@tonic-gate if (PP_ISRO(pp) && (flags & HAT_SYNC_STOPON_MOD)) 36787c478bd9Sstevel@tonic-gate return (pp->p_nrm & nrmbits); 36797c478bd9Sstevel@tonic-gate 36807c478bd9Sstevel@tonic-gate if ((flags & HAT_SYNC_ZERORM) == 0) { 36817c478bd9Sstevel@tonic-gate 36827c478bd9Sstevel@tonic-gate if ((flags & HAT_SYNC_STOPON_REF) != 0 && PP_ISREF(pp)) 36837c478bd9Sstevel@tonic-gate return (pp->p_nrm & nrmbits); 36847c478bd9Sstevel@tonic-gate 36857c478bd9Sstevel@tonic-gate if ((flags & HAT_SYNC_STOPON_MOD) != 0 && PP_ISMOD(pp)) 36867c478bd9Sstevel@tonic-gate return (pp->p_nrm & nrmbits); 36877c478bd9Sstevel@tonic-gate 36887c478bd9Sstevel@tonic-gate if ((flags & HAT_SYNC_STOPON_SHARED) != 0 && 36897c478bd9Sstevel@tonic-gate hat_page_getshare(pp) > po_share) { 36907c478bd9Sstevel@tonic-gate if (PP_ISRO(pp)) 36917c478bd9Sstevel@tonic-gate PP_SETREF(pp); 36927c478bd9Sstevel@tonic-gate return (pp->p_nrm & nrmbits); 36937c478bd9Sstevel@tonic-gate } 36947c478bd9Sstevel@tonic-gate } 36957c478bd9Sstevel@tonic-gate 3696843e1988Sjohnlev XPV_DISALLOW_MIGRATE(); 36977c478bd9Sstevel@tonic-gate next_size: 36987c478bd9Sstevel@tonic-gate /* 36997c478bd9Sstevel@tonic-gate * walk thru the mapping list syncing (and clearing) ref/mod bits. 37007c478bd9Sstevel@tonic-gate */ 37017c478bd9Sstevel@tonic-gate x86_hm_enter(pp); 37027c478bd9Sstevel@tonic-gate while ((hm = hment_walk(pp, &ht, &entry, hm)) != NULL) { 37037c478bd9Sstevel@tonic-gate if (ht->ht_level < pszc) 37047c478bd9Sstevel@tonic-gate continue; 37057c478bd9Sstevel@tonic-gate old = x86pte_get(ht, entry); 37067c478bd9Sstevel@tonic-gate try_again: 37077c478bd9Sstevel@tonic-gate 37087c478bd9Sstevel@tonic-gate ASSERT(PTE2PFN(old, ht->ht_level) == pp->p_pagenum); 37097c478bd9Sstevel@tonic-gate 37107c478bd9Sstevel@tonic-gate if (PTE_GET(old, PT_REF | PT_MOD) == 0) 37117c478bd9Sstevel@tonic-gate continue; 37127c478bd9Sstevel@tonic-gate 37137c478bd9Sstevel@tonic-gate save_old = old; 37147c478bd9Sstevel@tonic-gate if ((flags & HAT_SYNC_ZERORM) != 0) { 37157c478bd9Sstevel@tonic-gate 37167c478bd9Sstevel@tonic-gate /* 37177c478bd9Sstevel@tonic-gate * Need to clear ref or mod bits. Need to demap 37187c478bd9Sstevel@tonic-gate * to make sure any executing TLBs see cleared bits. 37197c478bd9Sstevel@tonic-gate */ 37207c478bd9Sstevel@tonic-gate new = old; 37217c478bd9Sstevel@tonic-gate PTE_CLR(new, PT_REF | PT_MOD); 37227c478bd9Sstevel@tonic-gate old = hati_update_pte(ht, entry, old, new); 37237c478bd9Sstevel@tonic-gate if (old != 0) 37247c478bd9Sstevel@tonic-gate goto try_again; 37257c478bd9Sstevel@tonic-gate 37267c478bd9Sstevel@tonic-gate old = save_old; 37277c478bd9Sstevel@tonic-gate } 37287c478bd9Sstevel@tonic-gate 37297c478bd9Sstevel@tonic-gate /* 37307c478bd9Sstevel@tonic-gate * Sync the PTE 37317c478bd9Sstevel@tonic-gate */ 3732ae115bc7Smrj if (!(flags & HAT_SYNC_ZERORM) && 3733ae115bc7Smrj PTE_GET(old, PT_SOFTWARE) <= PT_NOSYNC) 37347c478bd9Sstevel@tonic-gate hati_sync_pte_to_page(pp, old, ht->ht_level); 37357c478bd9Sstevel@tonic-gate 37367c478bd9Sstevel@tonic-gate /* 37377c478bd9Sstevel@tonic-gate * can stop short if we found a ref'd or mod'd page 37387c478bd9Sstevel@tonic-gate */ 37397c478bd9Sstevel@tonic-gate if ((flags & HAT_SYNC_STOPON_MOD) && PP_ISMOD(save_pp) || 37407c478bd9Sstevel@tonic-gate (flags & HAT_SYNC_STOPON_REF) && PP_ISREF(save_pp)) { 37417c478bd9Sstevel@tonic-gate x86_hm_exit(pp); 3742ae115bc7Smrj goto done; 37437c478bd9Sstevel@tonic-gate } 37447c478bd9Sstevel@tonic-gate } 37457c478bd9Sstevel@tonic-gate x86_hm_exit(pp); 37467c478bd9Sstevel@tonic-gate while (pszc < pp->p_szc) { 37477c478bd9Sstevel@tonic-gate page_t *tpp; 37487c478bd9Sstevel@tonic-gate pszc++; 37497c478bd9Sstevel@tonic-gate tpp = PP_GROUPLEADER(pp, pszc); 37507c478bd9Sstevel@tonic-gate if (pp != tpp) { 37517c478bd9Sstevel@tonic-gate pp = tpp; 37527c478bd9Sstevel@tonic-gate goto next_size; 37537c478bd9Sstevel@tonic-gate } 37547c478bd9Sstevel@tonic-gate } 3755ae115bc7Smrj done: 3756843e1988Sjohnlev XPV_ALLOW_MIGRATE(); 37577c478bd9Sstevel@tonic-gate return (save_pp->p_nrm & nrmbits); 37587c478bd9Sstevel@tonic-gate } 37597c478bd9Sstevel@tonic-gate 37607c478bd9Sstevel@tonic-gate /* 37617c478bd9Sstevel@tonic-gate * returns approx number of mappings to this pp. A return of 0 implies 37627c478bd9Sstevel@tonic-gate * there are no mappings to the page. 37637c478bd9Sstevel@tonic-gate */ 37647c478bd9Sstevel@tonic-gate ulong_t 37657c478bd9Sstevel@tonic-gate hat_page_getshare(page_t *pp) 37667c478bd9Sstevel@tonic-gate { 37677c478bd9Sstevel@tonic-gate uint_t cnt; 37687c478bd9Sstevel@tonic-gate cnt = hment_mapcnt(pp); 3769a5652762Spraks #if defined(__amd64) 3770a5652762Spraks if (vpm_enable && pp->p_vpmref) { 3771a5652762Spraks cnt += 1; 3772a5652762Spraks } 3773a5652762Spraks #endif 37747c478bd9Sstevel@tonic-gate return (cnt); 37757c478bd9Sstevel@tonic-gate } 37767c478bd9Sstevel@tonic-gate 37777c478bd9Sstevel@tonic-gate /* 377805d3dc4bSpaulsan * Return 1 the number of mappings exceeds sh_thresh. Return 0 377905d3dc4bSpaulsan * otherwise. 378005d3dc4bSpaulsan */ 378105d3dc4bSpaulsan int 378205d3dc4bSpaulsan hat_page_checkshare(page_t *pp, ulong_t sh_thresh) 378305d3dc4bSpaulsan { 378405d3dc4bSpaulsan return (hat_page_getshare(pp) > sh_thresh); 378505d3dc4bSpaulsan } 378605d3dc4bSpaulsan 378705d3dc4bSpaulsan /* 37887c478bd9Sstevel@tonic-gate * hat_softlock isn't supported anymore 37897c478bd9Sstevel@tonic-gate */ 37907c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 37917c478bd9Sstevel@tonic-gate faultcode_t 37927c478bd9Sstevel@tonic-gate hat_softlock( 37937c478bd9Sstevel@tonic-gate hat_t *hat, 37947c478bd9Sstevel@tonic-gate caddr_t addr, 37957c478bd9Sstevel@tonic-gate size_t *len, 37967c478bd9Sstevel@tonic-gate struct page **page_array, 37977c478bd9Sstevel@tonic-gate uint_t flags) 37987c478bd9Sstevel@tonic-gate { 37997c478bd9Sstevel@tonic-gate return (FC_NOSUPPORT); 38007c478bd9Sstevel@tonic-gate } 38017c478bd9Sstevel@tonic-gate 38027c478bd9Sstevel@tonic-gate 38037c478bd9Sstevel@tonic-gate 38047c478bd9Sstevel@tonic-gate /* 38057c478bd9Sstevel@tonic-gate * Routine to expose supported HAT features to platform independent code. 38067c478bd9Sstevel@tonic-gate */ 38077c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 38087c478bd9Sstevel@tonic-gate int 38097c478bd9Sstevel@tonic-gate hat_supported(enum hat_features feature, void *arg) 38107c478bd9Sstevel@tonic-gate { 38117c478bd9Sstevel@tonic-gate switch (feature) { 38127c478bd9Sstevel@tonic-gate 38137c478bd9Sstevel@tonic-gate case HAT_SHARED_PT: /* this is really ISM */ 38147c478bd9Sstevel@tonic-gate return (1); 38157c478bd9Sstevel@tonic-gate 38167c478bd9Sstevel@tonic-gate case HAT_DYNAMIC_ISM_UNMAP: 38177c478bd9Sstevel@tonic-gate return (0); 38187c478bd9Sstevel@tonic-gate 38197c478bd9Sstevel@tonic-gate case HAT_VMODSORT: 38207c478bd9Sstevel@tonic-gate return (1); 38217c478bd9Sstevel@tonic-gate 382205d3dc4bSpaulsan case HAT_SHARED_REGIONS: 382305d3dc4bSpaulsan return (0); 382405d3dc4bSpaulsan 38257c478bd9Sstevel@tonic-gate default: 38267c478bd9Sstevel@tonic-gate panic("hat_supported() - unknown feature"); 38277c478bd9Sstevel@tonic-gate } 38287c478bd9Sstevel@tonic-gate return (0); 38297c478bd9Sstevel@tonic-gate } 38307c478bd9Sstevel@tonic-gate 38317c478bd9Sstevel@tonic-gate /* 38327c478bd9Sstevel@tonic-gate * Called when a thread is exiting and has been switched to the kernel AS 38337c478bd9Sstevel@tonic-gate */ 38347c478bd9Sstevel@tonic-gate void 38357c478bd9Sstevel@tonic-gate hat_thread_exit(kthread_t *thd) 38367c478bd9Sstevel@tonic-gate { 38377c478bd9Sstevel@tonic-gate ASSERT(thd->t_procp->p_as == &kas); 3838843e1988Sjohnlev XPV_DISALLOW_MIGRATE(); 38397c478bd9Sstevel@tonic-gate hat_switch(thd->t_procp->p_as->a_hat); 3840843e1988Sjohnlev XPV_ALLOW_MIGRATE(); 38417c478bd9Sstevel@tonic-gate } 38427c478bd9Sstevel@tonic-gate 38437c478bd9Sstevel@tonic-gate /* 38447c478bd9Sstevel@tonic-gate * Setup the given brand new hat structure as the new HAT on this cpu's mmu. 38457c478bd9Sstevel@tonic-gate */ 38467c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 38477c478bd9Sstevel@tonic-gate void 38487c478bd9Sstevel@tonic-gate hat_setup(hat_t *hat, int flags) 38497c478bd9Sstevel@tonic-gate { 3850843e1988Sjohnlev XPV_DISALLOW_MIGRATE(); 38517c478bd9Sstevel@tonic-gate kpreempt_disable(); 38527c478bd9Sstevel@tonic-gate 38537c478bd9Sstevel@tonic-gate hat_switch(hat); 38547c478bd9Sstevel@tonic-gate 38557c478bd9Sstevel@tonic-gate kpreempt_enable(); 3856843e1988Sjohnlev XPV_ALLOW_MIGRATE(); 38577c478bd9Sstevel@tonic-gate } 38587c478bd9Sstevel@tonic-gate 38597c478bd9Sstevel@tonic-gate /* 38607c478bd9Sstevel@tonic-gate * Prepare for a CPU private mapping for the given address. 38617c478bd9Sstevel@tonic-gate * 38627c478bd9Sstevel@tonic-gate * The address can only be used from a single CPU and can be remapped 38637c478bd9Sstevel@tonic-gate * using hat_mempte_remap(). Return the address of the PTE. 38647c478bd9Sstevel@tonic-gate * 38657c478bd9Sstevel@tonic-gate * We do the htable_create() if necessary and increment the valid count so 38667c478bd9Sstevel@tonic-gate * the htable can't disappear. We also hat_devload() the page table into 38677c478bd9Sstevel@tonic-gate * kernel so that the PTE is quickly accessed. 38687c478bd9Sstevel@tonic-gate */ 3869ae115bc7Smrj hat_mempte_t 3870ae115bc7Smrj hat_mempte_setup(caddr_t addr) 38717c478bd9Sstevel@tonic-gate { 38727c478bd9Sstevel@tonic-gate uintptr_t va = (uintptr_t)addr; 38737c478bd9Sstevel@tonic-gate htable_t *ht; 38747c478bd9Sstevel@tonic-gate uint_t entry; 38757c478bd9Sstevel@tonic-gate x86pte_t oldpte; 3876ae115bc7Smrj hat_mempte_t p; 38777c478bd9Sstevel@tonic-gate 38787c478bd9Sstevel@tonic-gate ASSERT(IS_PAGEALIGNED(va)); 38797c478bd9Sstevel@tonic-gate ASSERT(!IN_VA_HOLE(va)); 3880aac11643Sjosephb ++curthread->t_hatdepth; 3881551bc2a6Smrj XPV_DISALLOW_MIGRATE(); 38827c478bd9Sstevel@tonic-gate ht = htable_getpte(kas.a_hat, va, &entry, &oldpte, 0); 38837c478bd9Sstevel@tonic-gate if (ht == NULL) { 38847c478bd9Sstevel@tonic-gate ht = htable_create(kas.a_hat, va, 0, NULL); 38857c478bd9Sstevel@tonic-gate entry = htable_va2entry(va, ht); 38867c478bd9Sstevel@tonic-gate ASSERT(ht->ht_level == 0); 38877c478bd9Sstevel@tonic-gate oldpte = x86pte_get(ht, entry); 38887c478bd9Sstevel@tonic-gate } 38897c478bd9Sstevel@tonic-gate if (PTE_ISVALID(oldpte)) 38907c478bd9Sstevel@tonic-gate panic("hat_mempte_setup(): address already mapped" 3891903a11ebSrh87107 "ht=%p, entry=%d, pte=" FMT_PTE, (void *)ht, entry, oldpte); 38927c478bd9Sstevel@tonic-gate 38937c478bd9Sstevel@tonic-gate /* 38947c478bd9Sstevel@tonic-gate * increment ht_valid_cnt so that the pagetable can't disappear 38957c478bd9Sstevel@tonic-gate */ 38967c478bd9Sstevel@tonic-gate HTABLE_INC(ht->ht_valid_cnt); 38977c478bd9Sstevel@tonic-gate 38987c478bd9Sstevel@tonic-gate /* 3899ae115bc7Smrj * return the PTE physical address to the caller. 39007c478bd9Sstevel@tonic-gate */ 39017c478bd9Sstevel@tonic-gate htable_release(ht); 3902551bc2a6Smrj XPV_ALLOW_MIGRATE(); 3903ae115bc7Smrj p = PT_INDEX_PHYSADDR(pfn_to_pa(ht->ht_pfn), entry); 3904aac11643Sjosephb --curthread->t_hatdepth; 3905ae115bc7Smrj return (p); 39067c478bd9Sstevel@tonic-gate } 39077c478bd9Sstevel@tonic-gate 39087c478bd9Sstevel@tonic-gate /* 39097c478bd9Sstevel@tonic-gate * Release a CPU private mapping for the given address. 39107c478bd9Sstevel@tonic-gate * We decrement the htable valid count so it might be destroyed. 39117c478bd9Sstevel@tonic-gate */ 3912ae115bc7Smrj /*ARGSUSED1*/ 39137c478bd9Sstevel@tonic-gate void 3914ae115bc7Smrj hat_mempte_release(caddr_t addr, hat_mempte_t pte_pa) 39157c478bd9Sstevel@tonic-gate { 39167c478bd9Sstevel@tonic-gate htable_t *ht; 39177c478bd9Sstevel@tonic-gate 3918551bc2a6Smrj XPV_DISALLOW_MIGRATE(); 39197c478bd9Sstevel@tonic-gate /* 3920ae115bc7Smrj * invalidate any left over mapping and decrement the htable valid count 39217c478bd9Sstevel@tonic-gate */ 3922843e1988Sjohnlev #ifdef __xpv 3923843e1988Sjohnlev if (HYPERVISOR_update_va_mapping((uintptr_t)addr, 0, 3924843e1988Sjohnlev UVMF_INVLPG | UVMF_LOCAL)) 3925843e1988Sjohnlev panic("HYPERVISOR_update_va_mapping() failed"); 3926843e1988Sjohnlev #else 3927ae115bc7Smrj { 3928ae115bc7Smrj x86pte_t *pteptr; 3929ae115bc7Smrj 3930ae115bc7Smrj pteptr = x86pte_mapin(mmu_btop(pte_pa), 3931ae115bc7Smrj (pte_pa & MMU_PAGEOFFSET) >> mmu.pte_size_shift, NULL); 39327c478bd9Sstevel@tonic-gate if (mmu.pae_hat) 3933ae115bc7Smrj *pteptr = 0; 39347c478bd9Sstevel@tonic-gate else 39357c478bd9Sstevel@tonic-gate *(x86pte32_t *)pteptr = 0; 39367c478bd9Sstevel@tonic-gate mmu_tlbflush_entry(addr); 3937ae115bc7Smrj x86pte_mapout(); 3938ae115bc7Smrj } 3939843e1988Sjohnlev #endif 3940ae115bc7Smrj 39417c478bd9Sstevel@tonic-gate ht = htable_getpte(kas.a_hat, ALIGN2PAGE(addr), NULL, NULL, 0); 39427c478bd9Sstevel@tonic-gate if (ht == NULL) 39437c478bd9Sstevel@tonic-gate panic("hat_mempte_release(): invalid address"); 39447c478bd9Sstevel@tonic-gate ASSERT(ht->ht_level == 0); 39457c478bd9Sstevel@tonic-gate HTABLE_DEC(ht->ht_valid_cnt); 39467c478bd9Sstevel@tonic-gate htable_release(ht); 3947551bc2a6Smrj XPV_ALLOW_MIGRATE(); 39487c478bd9Sstevel@tonic-gate } 39497c478bd9Sstevel@tonic-gate 39507c478bd9Sstevel@tonic-gate /* 39517c478bd9Sstevel@tonic-gate * Apply a temporary CPU private mapping to a page. We flush the TLB only 39527c478bd9Sstevel@tonic-gate * on this CPU, so this ought to have been called with preemption disabled. 39537c478bd9Sstevel@tonic-gate */ 39547c478bd9Sstevel@tonic-gate void 39557c478bd9Sstevel@tonic-gate hat_mempte_remap( 39567c478bd9Sstevel@tonic-gate pfn_t pfn, 39577c478bd9Sstevel@tonic-gate caddr_t addr, 3958ae115bc7Smrj hat_mempte_t pte_pa, 39597c478bd9Sstevel@tonic-gate uint_t attr, 39607c478bd9Sstevel@tonic-gate uint_t flags) 39617c478bd9Sstevel@tonic-gate { 39627c478bd9Sstevel@tonic-gate uintptr_t va = (uintptr_t)addr; 39637c478bd9Sstevel@tonic-gate x86pte_t pte; 39647c478bd9Sstevel@tonic-gate 39657c478bd9Sstevel@tonic-gate /* 39667c478bd9Sstevel@tonic-gate * Remap the given PTE to the new page's PFN. Invalidate only 39677c478bd9Sstevel@tonic-gate * on this CPU. 39687c478bd9Sstevel@tonic-gate */ 39697c478bd9Sstevel@tonic-gate #ifdef DEBUG 39707c478bd9Sstevel@tonic-gate htable_t *ht; 39717c478bd9Sstevel@tonic-gate uint_t entry; 39727c478bd9Sstevel@tonic-gate 39737c478bd9Sstevel@tonic-gate ASSERT(IS_PAGEALIGNED(va)); 39747c478bd9Sstevel@tonic-gate ASSERT(!IN_VA_HOLE(va)); 39757c478bd9Sstevel@tonic-gate ht = htable_getpte(kas.a_hat, va, &entry, NULL, 0); 39767c478bd9Sstevel@tonic-gate ASSERT(ht != NULL); 39777c478bd9Sstevel@tonic-gate ASSERT(ht->ht_level == 0); 39787c478bd9Sstevel@tonic-gate ASSERT(ht->ht_valid_cnt > 0); 3979ae115bc7Smrj ASSERT(ht->ht_pfn == mmu_btop(pte_pa)); 39807c478bd9Sstevel@tonic-gate htable_release(ht); 39817c478bd9Sstevel@tonic-gate #endif 3982843e1988Sjohnlev XPV_DISALLOW_MIGRATE(); 39837c478bd9Sstevel@tonic-gate pte = hati_mkpte(pfn, attr, 0, flags); 3984843e1988Sjohnlev #ifdef __xpv 3985843e1988Sjohnlev if (HYPERVISOR_update_va_mapping(va, pte, UVMF_INVLPG | UVMF_LOCAL)) 3986843e1988Sjohnlev panic("HYPERVISOR_update_va_mapping() failed"); 3987843e1988Sjohnlev #else 3988ae115bc7Smrj { 3989ae115bc7Smrj x86pte_t *pteptr; 3990ae115bc7Smrj 3991ae115bc7Smrj pteptr = x86pte_mapin(mmu_btop(pte_pa), 3992ae115bc7Smrj (pte_pa & MMU_PAGEOFFSET) >> mmu.pte_size_shift, NULL); 39937c478bd9Sstevel@tonic-gate if (mmu.pae_hat) 39947c478bd9Sstevel@tonic-gate *(x86pte_t *)pteptr = pte; 39957c478bd9Sstevel@tonic-gate else 39967c478bd9Sstevel@tonic-gate *(x86pte32_t *)pteptr = (x86pte32_t)pte; 39977c478bd9Sstevel@tonic-gate mmu_tlbflush_entry(addr); 3998ae115bc7Smrj x86pte_mapout(); 3999ae115bc7Smrj } 4000843e1988Sjohnlev #endif 4001843e1988Sjohnlev XPV_ALLOW_MIGRATE(); 40027c478bd9Sstevel@tonic-gate } 40037c478bd9Sstevel@tonic-gate 40047c478bd9Sstevel@tonic-gate 40057c478bd9Sstevel@tonic-gate 40067c478bd9Sstevel@tonic-gate /* 40077c478bd9Sstevel@tonic-gate * Hat locking functions 40087c478bd9Sstevel@tonic-gate * XXX - these two functions are currently being used by hatstats 40097c478bd9Sstevel@tonic-gate * they can be removed by using a per-as mutex for hatstats. 40107c478bd9Sstevel@tonic-gate */ 40117c478bd9Sstevel@tonic-gate void 40127c478bd9Sstevel@tonic-gate hat_enter(hat_t *hat) 40137c478bd9Sstevel@tonic-gate { 40147c478bd9Sstevel@tonic-gate mutex_enter(&hat->hat_mutex); 40157c478bd9Sstevel@tonic-gate } 40167c478bd9Sstevel@tonic-gate 40177c478bd9Sstevel@tonic-gate void 40187c478bd9Sstevel@tonic-gate hat_exit(hat_t *hat) 40197c478bd9Sstevel@tonic-gate { 40207c478bd9Sstevel@tonic-gate mutex_exit(&hat->hat_mutex); 40217c478bd9Sstevel@tonic-gate } 40227c478bd9Sstevel@tonic-gate 40237c478bd9Sstevel@tonic-gate /* 4024ae115bc7Smrj * HAT part of cpu initialization. 40257c478bd9Sstevel@tonic-gate */ 40267c478bd9Sstevel@tonic-gate void 40277c478bd9Sstevel@tonic-gate hat_cpu_online(struct cpu *cpup) 40287c478bd9Sstevel@tonic-gate { 40297c478bd9Sstevel@tonic-gate if (cpup != CPU) { 4030ae115bc7Smrj x86pte_cpu_init(cpup); 40317c478bd9Sstevel@tonic-gate hat_vlp_setup(cpup); 40327c478bd9Sstevel@tonic-gate } 40337c478bd9Sstevel@tonic-gate CPUSET_ATOMIC_ADD(khat_cpuset, cpup->cpu_id); 40347c478bd9Sstevel@tonic-gate } 40357c478bd9Sstevel@tonic-gate 40367c478bd9Sstevel@tonic-gate /* 4037ae115bc7Smrj * HAT part of cpu deletion. 4038ae115bc7Smrj * (currently, we only call this after the cpu is safely passivated.) 4039ae115bc7Smrj */ 4040ae115bc7Smrj void 4041ae115bc7Smrj hat_cpu_offline(struct cpu *cpup) 4042ae115bc7Smrj { 4043ae115bc7Smrj ASSERT(cpup != CPU); 4044ae115bc7Smrj 4045ae115bc7Smrj CPUSET_ATOMIC_DEL(khat_cpuset, cpup->cpu_id); 4046ae115bc7Smrj hat_vlp_teardown(cpup); 4047a3114836SGerry Liu x86pte_cpu_fini(cpup); 4048ae115bc7Smrj } 4049ae115bc7Smrj 4050ae115bc7Smrj /* 40517c478bd9Sstevel@tonic-gate * Function called after all CPUs are brought online. 40527c478bd9Sstevel@tonic-gate * Used to remove low address boot mappings. 40537c478bd9Sstevel@tonic-gate */ 40547c478bd9Sstevel@tonic-gate void 40557c478bd9Sstevel@tonic-gate clear_boot_mappings(uintptr_t low, uintptr_t high) 40567c478bd9Sstevel@tonic-gate { 40577c478bd9Sstevel@tonic-gate uintptr_t vaddr = low; 40587c478bd9Sstevel@tonic-gate htable_t *ht = NULL; 40597c478bd9Sstevel@tonic-gate level_t level; 40607c478bd9Sstevel@tonic-gate uint_t entry; 40617c478bd9Sstevel@tonic-gate x86pte_t pte; 40627c478bd9Sstevel@tonic-gate 40637c478bd9Sstevel@tonic-gate /* 40647c478bd9Sstevel@tonic-gate * On 1st CPU we can unload the prom mappings, basically we blow away 4065ae115bc7Smrj * all virtual mappings under _userlimit. 40667c478bd9Sstevel@tonic-gate */ 40677c478bd9Sstevel@tonic-gate while (vaddr < high) { 40687c478bd9Sstevel@tonic-gate pte = htable_walk(kas.a_hat, &ht, &vaddr, high); 40697c478bd9Sstevel@tonic-gate if (ht == NULL) 40707c478bd9Sstevel@tonic-gate break; 40717c478bd9Sstevel@tonic-gate 40727c478bd9Sstevel@tonic-gate level = ht->ht_level; 40737c478bd9Sstevel@tonic-gate entry = htable_va2entry(vaddr, ht); 40747c478bd9Sstevel@tonic-gate ASSERT(level <= mmu.max_page_level); 40757c478bd9Sstevel@tonic-gate ASSERT(PTE_ISPAGE(pte, level)); 40767c478bd9Sstevel@tonic-gate 40777c478bd9Sstevel@tonic-gate /* 40787c478bd9Sstevel@tonic-gate * Unload the mapping from the page tables. 40797c478bd9Sstevel@tonic-gate */ 4080a6a74e0eSMatthew Ahrens (void) x86pte_inval(ht, entry, 0, NULL, B_TRUE); 40817c478bd9Sstevel@tonic-gate ASSERT(ht->ht_valid_cnt > 0); 40827c478bd9Sstevel@tonic-gate HTABLE_DEC(ht->ht_valid_cnt); 40837c478bd9Sstevel@tonic-gate PGCNT_DEC(ht->ht_hat, ht->ht_level); 40847c478bd9Sstevel@tonic-gate 40857c478bd9Sstevel@tonic-gate vaddr += LEVEL_SIZE(ht->ht_level); 40867c478bd9Sstevel@tonic-gate } 40877c478bd9Sstevel@tonic-gate if (ht) 40887c478bd9Sstevel@tonic-gate htable_release(ht); 40897c478bd9Sstevel@tonic-gate } 40907c478bd9Sstevel@tonic-gate 40917c478bd9Sstevel@tonic-gate /* 40927c478bd9Sstevel@tonic-gate * Atomically update a new translation for a single page. If the 40937c478bd9Sstevel@tonic-gate * currently installed PTE doesn't match the value we expect to find, 40947c478bd9Sstevel@tonic-gate * it's not updated and we return the PTE we found. 40957c478bd9Sstevel@tonic-gate * 40967c478bd9Sstevel@tonic-gate * If activating nosync or NOWRITE and the page was modified we need to sync 40977c478bd9Sstevel@tonic-gate * with the page_t. Also sync with page_t if clearing ref/mod bits. 40987c478bd9Sstevel@tonic-gate */ 40997c478bd9Sstevel@tonic-gate static x86pte_t 41007c478bd9Sstevel@tonic-gate hati_update_pte(htable_t *ht, uint_t entry, x86pte_t expected, x86pte_t new) 41017c478bd9Sstevel@tonic-gate { 41027c478bd9Sstevel@tonic-gate page_t *pp; 41037c478bd9Sstevel@tonic-gate uint_t rm = 0; 41047c478bd9Sstevel@tonic-gate x86pte_t replaced; 41057c478bd9Sstevel@tonic-gate 4106ae115bc7Smrj if (PTE_GET(expected, PT_SOFTWARE) < PT_NOSYNC && 41077c478bd9Sstevel@tonic-gate PTE_GET(expected, PT_MOD | PT_REF) && 41087c478bd9Sstevel@tonic-gate (PTE_GET(new, PT_NOSYNC) || !PTE_GET(new, PT_WRITABLE) || 41097c478bd9Sstevel@tonic-gate !PTE_GET(new, PT_MOD | PT_REF))) { 41107c478bd9Sstevel@tonic-gate 4111ae115bc7Smrj ASSERT(!pfn_is_foreign(PTE2PFN(expected, ht->ht_level))); 41127c478bd9Sstevel@tonic-gate pp = page_numtopp_nolock(PTE2PFN(expected, ht->ht_level)); 41137c478bd9Sstevel@tonic-gate ASSERT(pp != NULL); 41147c478bd9Sstevel@tonic-gate if (PTE_GET(expected, PT_MOD)) 41157c478bd9Sstevel@tonic-gate rm |= P_MOD; 41167c478bd9Sstevel@tonic-gate if (PTE_GET(expected, PT_REF)) 41177c478bd9Sstevel@tonic-gate rm |= P_REF; 41187c478bd9Sstevel@tonic-gate PTE_CLR(new, PT_MOD | PT_REF); 41197c478bd9Sstevel@tonic-gate } 41207c478bd9Sstevel@tonic-gate 41217c478bd9Sstevel@tonic-gate replaced = x86pte_update(ht, entry, expected, new); 41227c478bd9Sstevel@tonic-gate if (replaced != expected) 41237c478bd9Sstevel@tonic-gate return (replaced); 41247c478bd9Sstevel@tonic-gate 41257c478bd9Sstevel@tonic-gate if (rm) { 41267c478bd9Sstevel@tonic-gate /* 41277c478bd9Sstevel@tonic-gate * sync to all constituent pages of a large page 41287c478bd9Sstevel@tonic-gate */ 41297c478bd9Sstevel@tonic-gate pgcnt_t pgcnt = page_get_pagecnt(ht->ht_level); 41307c478bd9Sstevel@tonic-gate ASSERT(IS_P2ALIGNED(pp->p_pagenum, pgcnt)); 41317c478bd9Sstevel@tonic-gate while (pgcnt-- > 0) { 41327c478bd9Sstevel@tonic-gate /* 41337c478bd9Sstevel@tonic-gate * hat_page_demote() can't decrease 41347c478bd9Sstevel@tonic-gate * pszc below this mapping size 41357c478bd9Sstevel@tonic-gate * since large mapping existed after we 41367c478bd9Sstevel@tonic-gate * took mlist lock. 41377c478bd9Sstevel@tonic-gate */ 41387c478bd9Sstevel@tonic-gate ASSERT(pp->p_szc >= ht->ht_level); 41397c478bd9Sstevel@tonic-gate hat_page_setattr(pp, rm); 41407c478bd9Sstevel@tonic-gate ++pp; 41417c478bd9Sstevel@tonic-gate } 41427c478bd9Sstevel@tonic-gate } 41437c478bd9Sstevel@tonic-gate 41447c478bd9Sstevel@tonic-gate return (0); 41457c478bd9Sstevel@tonic-gate } 41467c478bd9Sstevel@tonic-gate 414705d3dc4bSpaulsan /* ARGSUSED */ 414805d3dc4bSpaulsan void 41497dacfc44Spaulsan hat_join_srd(struct hat *hat, vnode_t *evp) 415005d3dc4bSpaulsan { 415105d3dc4bSpaulsan } 415205d3dc4bSpaulsan 415305d3dc4bSpaulsan /* ARGSUSED */ 415405d3dc4bSpaulsan hat_region_cookie_t 41557dacfc44Spaulsan hat_join_region(struct hat *hat, 415605d3dc4bSpaulsan caddr_t r_saddr, 415705d3dc4bSpaulsan size_t r_size, 415805d3dc4bSpaulsan void *r_obj, 415905d3dc4bSpaulsan u_offset_t r_objoff, 416005d3dc4bSpaulsan uchar_t r_perm, 416105d3dc4bSpaulsan uchar_t r_pgszc, 416205d3dc4bSpaulsan hat_rgn_cb_func_t r_cb_function, 416305d3dc4bSpaulsan uint_t flags) 416405d3dc4bSpaulsan { 416505d3dc4bSpaulsan panic("No shared region support on x86"); 416605d3dc4bSpaulsan return (HAT_INVALID_REGION_COOKIE); 416705d3dc4bSpaulsan } 416805d3dc4bSpaulsan 416905d3dc4bSpaulsan /* ARGSUSED */ 417005d3dc4bSpaulsan void 41717dacfc44Spaulsan hat_leave_region(struct hat *hat, hat_region_cookie_t rcookie, uint_t flags) 417205d3dc4bSpaulsan { 417305d3dc4bSpaulsan panic("No shared region support on x86"); 417405d3dc4bSpaulsan } 417505d3dc4bSpaulsan 417605d3dc4bSpaulsan /* ARGSUSED */ 417705d3dc4bSpaulsan void 41787dacfc44Spaulsan hat_dup_region(struct hat *hat, hat_region_cookie_t rcookie) 417905d3dc4bSpaulsan { 418005d3dc4bSpaulsan panic("No shared region support on x86"); 418105d3dc4bSpaulsan } 418205d3dc4bSpaulsan 418305d3dc4bSpaulsan 41847c478bd9Sstevel@tonic-gate /* 41857c478bd9Sstevel@tonic-gate * Kernel Physical Mapping (kpm) facility 41867c478bd9Sstevel@tonic-gate * 41877c478bd9Sstevel@tonic-gate * Most of the routines needed to support segkpm are almost no-ops on the 41887c478bd9Sstevel@tonic-gate * x86 platform. We map in the entire segment when it is created and leave 41897c478bd9Sstevel@tonic-gate * it mapped in, so there is no additional work required to set up and tear 41907c478bd9Sstevel@tonic-gate * down individual mappings. All of these routines were created to support 41917c478bd9Sstevel@tonic-gate * SPARC platforms that have to avoid aliasing in their virtually indexed 41927c478bd9Sstevel@tonic-gate * caches. 41937c478bd9Sstevel@tonic-gate * 41947c478bd9Sstevel@tonic-gate * Most of the routines have sanity checks in them (e.g. verifying that the 41957c478bd9Sstevel@tonic-gate * passed-in page is locked). We don't actually care about most of these 41967c478bd9Sstevel@tonic-gate * checks on x86, but we leave them in place to identify problems in the 41977c478bd9Sstevel@tonic-gate * upper levels. 41987c478bd9Sstevel@tonic-gate */ 41997c478bd9Sstevel@tonic-gate 42007c478bd9Sstevel@tonic-gate /* 42017c478bd9Sstevel@tonic-gate * Map in a locked page and return the vaddr. 42027c478bd9Sstevel@tonic-gate */ 42037c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 42047c478bd9Sstevel@tonic-gate caddr_t 42057c478bd9Sstevel@tonic-gate hat_kpm_mapin(struct page *pp, struct kpme *kpme) 42067c478bd9Sstevel@tonic-gate { 42077c478bd9Sstevel@tonic-gate caddr_t vaddr; 42087c478bd9Sstevel@tonic-gate 42097c478bd9Sstevel@tonic-gate #ifdef DEBUG 42107c478bd9Sstevel@tonic-gate if (kpm_enable == 0) { 42117c478bd9Sstevel@tonic-gate cmn_err(CE_WARN, "hat_kpm_mapin: kpm_enable not set\n"); 42127c478bd9Sstevel@tonic-gate return ((caddr_t)NULL); 42137c478bd9Sstevel@tonic-gate } 42147c478bd9Sstevel@tonic-gate 42157c478bd9Sstevel@tonic-gate if (pp == NULL || PAGE_LOCKED(pp) == 0) { 42167c478bd9Sstevel@tonic-gate cmn_err(CE_WARN, "hat_kpm_mapin: pp zero or not locked\n"); 42177c478bd9Sstevel@tonic-gate return ((caddr_t)NULL); 42187c478bd9Sstevel@tonic-gate } 42197c478bd9Sstevel@tonic-gate #endif 42207c478bd9Sstevel@tonic-gate 42217c478bd9Sstevel@tonic-gate vaddr = hat_kpm_page2va(pp, 1); 42227c478bd9Sstevel@tonic-gate 42237c478bd9Sstevel@tonic-gate return (vaddr); 42247c478bd9Sstevel@tonic-gate } 42257c478bd9Sstevel@tonic-gate 42267c478bd9Sstevel@tonic-gate /* 42277c478bd9Sstevel@tonic-gate * Mapout a locked page. 42287c478bd9Sstevel@tonic-gate */ 42297c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 42307c478bd9Sstevel@tonic-gate void 42317c478bd9Sstevel@tonic-gate hat_kpm_mapout(struct page *pp, struct kpme *kpme, caddr_t vaddr) 42327c478bd9Sstevel@tonic-gate { 42337c478bd9Sstevel@tonic-gate #ifdef DEBUG 42347c478bd9Sstevel@tonic-gate if (kpm_enable == 0) { 42357c478bd9Sstevel@tonic-gate cmn_err(CE_WARN, "hat_kpm_mapout: kpm_enable not set\n"); 42367c478bd9Sstevel@tonic-gate return; 42377c478bd9Sstevel@tonic-gate } 42387c478bd9Sstevel@tonic-gate 42397c478bd9Sstevel@tonic-gate if (IS_KPM_ADDR(vaddr) == 0) { 42407c478bd9Sstevel@tonic-gate cmn_err(CE_WARN, "hat_kpm_mapout: no kpm address\n"); 42417c478bd9Sstevel@tonic-gate return; 42427c478bd9Sstevel@tonic-gate } 42437c478bd9Sstevel@tonic-gate 42447c478bd9Sstevel@tonic-gate if (pp == NULL || PAGE_LOCKED(pp) == 0) { 42457c478bd9Sstevel@tonic-gate cmn_err(CE_WARN, "hat_kpm_mapout: page zero or not locked\n"); 42467c478bd9Sstevel@tonic-gate return; 42477c478bd9Sstevel@tonic-gate } 42487c478bd9Sstevel@tonic-gate #endif 42497c478bd9Sstevel@tonic-gate } 42507c478bd9Sstevel@tonic-gate 42517c478bd9Sstevel@tonic-gate /* 4252d20abfaaSPavel Tatashin * hat_kpm_mapin_pfn is used to obtain a kpm mapping for physical 4253d20abfaaSPavel Tatashin * memory addresses that are not described by a page_t. It can 4254d20abfaaSPavel Tatashin * also be used for normal pages that are not locked, but beware 4255d20abfaaSPavel Tatashin * this is dangerous - no locking is performed, so the identity of 4256d20abfaaSPavel Tatashin * the page could change. hat_kpm_mapin_pfn is not supported when 4257d20abfaaSPavel Tatashin * vac_colors > 1, because the chosen va depends on the page identity, 4258d20abfaaSPavel Tatashin * which could change. 4259d20abfaaSPavel Tatashin * The caller must only pass pfn's for valid physical addresses; violation 4260d20abfaaSPavel Tatashin * of this rule will cause panic. 4261d20abfaaSPavel Tatashin */ 4262d20abfaaSPavel Tatashin caddr_t 4263d20abfaaSPavel Tatashin hat_kpm_mapin_pfn(pfn_t pfn) 4264d20abfaaSPavel Tatashin { 4265d20abfaaSPavel Tatashin caddr_t paddr, vaddr; 4266d20abfaaSPavel Tatashin 4267d20abfaaSPavel Tatashin if (kpm_enable == 0) 4268d20abfaaSPavel Tatashin return ((caddr_t)NULL); 4269d20abfaaSPavel Tatashin 4270d20abfaaSPavel Tatashin paddr = (caddr_t)ptob(pfn); 4271d20abfaaSPavel Tatashin vaddr = (uintptr_t)kpm_vbase + paddr; 4272d20abfaaSPavel Tatashin 4273d20abfaaSPavel Tatashin return ((caddr_t)vaddr); 4274d20abfaaSPavel Tatashin } 4275d20abfaaSPavel Tatashin 4276d20abfaaSPavel Tatashin /*ARGSUSED*/ 4277d20abfaaSPavel Tatashin void 4278d20abfaaSPavel Tatashin hat_kpm_mapout_pfn(pfn_t pfn) 4279d20abfaaSPavel Tatashin { 4280d20abfaaSPavel Tatashin /* empty */ 4281d20abfaaSPavel Tatashin } 4282d20abfaaSPavel Tatashin 4283d20abfaaSPavel Tatashin /* 42847c478bd9Sstevel@tonic-gate * Return the kpm virtual address for a specific pfn 42857c478bd9Sstevel@tonic-gate */ 42867c478bd9Sstevel@tonic-gate caddr_t 42877c478bd9Sstevel@tonic-gate hat_kpm_pfn2va(pfn_t pfn) 42887c478bd9Sstevel@tonic-gate { 4289ae115bc7Smrj uintptr_t vaddr = (uintptr_t)kpm_vbase + mmu_ptob(pfn); 42907c478bd9Sstevel@tonic-gate 4291d2b85481Srscott ASSERT(!pfn_is_foreign(pfn)); 42927c478bd9Sstevel@tonic-gate return ((caddr_t)vaddr); 42937c478bd9Sstevel@tonic-gate } 42947c478bd9Sstevel@tonic-gate 42957c478bd9Sstevel@tonic-gate /* 42967c478bd9Sstevel@tonic-gate * Return the kpm virtual address for the page at pp. 42977c478bd9Sstevel@tonic-gate */ 42987c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 42997c478bd9Sstevel@tonic-gate caddr_t 43007c478bd9Sstevel@tonic-gate hat_kpm_page2va(struct page *pp, int checkswap) 43017c478bd9Sstevel@tonic-gate { 43027c478bd9Sstevel@tonic-gate return (hat_kpm_pfn2va(pp->p_pagenum)); 43037c478bd9Sstevel@tonic-gate } 43047c478bd9Sstevel@tonic-gate 43057c478bd9Sstevel@tonic-gate /* 43067c478bd9Sstevel@tonic-gate * Return the page frame number for the kpm virtual address vaddr. 43077c478bd9Sstevel@tonic-gate */ 43087c478bd9Sstevel@tonic-gate pfn_t 43097c478bd9Sstevel@tonic-gate hat_kpm_va2pfn(caddr_t vaddr) 43107c478bd9Sstevel@tonic-gate { 43117c478bd9Sstevel@tonic-gate pfn_t pfn; 43127c478bd9Sstevel@tonic-gate 43137c478bd9Sstevel@tonic-gate ASSERT(IS_KPM_ADDR(vaddr)); 43147c478bd9Sstevel@tonic-gate 43157c478bd9Sstevel@tonic-gate pfn = (pfn_t)btop(vaddr - kpm_vbase); 43167c478bd9Sstevel@tonic-gate 43177c478bd9Sstevel@tonic-gate return (pfn); 43187c478bd9Sstevel@tonic-gate } 43197c478bd9Sstevel@tonic-gate 43207c478bd9Sstevel@tonic-gate 43217c478bd9Sstevel@tonic-gate /* 43227c478bd9Sstevel@tonic-gate * Return the page for the kpm virtual address vaddr. 43237c478bd9Sstevel@tonic-gate */ 43247c478bd9Sstevel@tonic-gate page_t * 43257c478bd9Sstevel@tonic-gate hat_kpm_vaddr2page(caddr_t vaddr) 43267c478bd9Sstevel@tonic-gate { 43277c478bd9Sstevel@tonic-gate pfn_t pfn; 43287c478bd9Sstevel@tonic-gate 43297c478bd9Sstevel@tonic-gate ASSERT(IS_KPM_ADDR(vaddr)); 43307c478bd9Sstevel@tonic-gate 43317c478bd9Sstevel@tonic-gate pfn = hat_kpm_va2pfn(vaddr); 43327c478bd9Sstevel@tonic-gate 43337c478bd9Sstevel@tonic-gate return (page_numtopp_nolock(pfn)); 43347c478bd9Sstevel@tonic-gate } 43357c478bd9Sstevel@tonic-gate 43367c478bd9Sstevel@tonic-gate /* 43377c478bd9Sstevel@tonic-gate * hat_kpm_fault is called from segkpm_fault when we take a page fault on a 43387c478bd9Sstevel@tonic-gate * KPM page. This should never happen on x86 43397c478bd9Sstevel@tonic-gate */ 43407c478bd9Sstevel@tonic-gate int 43417c478bd9Sstevel@tonic-gate hat_kpm_fault(hat_t *hat, caddr_t vaddr) 43427c478bd9Sstevel@tonic-gate { 4343903a11ebSrh87107 panic("pagefault in seg_kpm. hat: 0x%p vaddr: 0x%p", 4344903a11ebSrh87107 (void *)hat, (void *)vaddr); 43457c478bd9Sstevel@tonic-gate 43467c478bd9Sstevel@tonic-gate return (0); 43477c478bd9Sstevel@tonic-gate } 43487c478bd9Sstevel@tonic-gate 43497c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 43507c478bd9Sstevel@tonic-gate void 43517c478bd9Sstevel@tonic-gate hat_kpm_mseghash_clear(int nentries) 43527c478bd9Sstevel@tonic-gate {} 43537c478bd9Sstevel@tonic-gate 43547c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 43557c478bd9Sstevel@tonic-gate void 43567c478bd9Sstevel@tonic-gate hat_kpm_mseghash_update(pgcnt_t inx, struct memseg *msp) 43577c478bd9Sstevel@tonic-gate {} 4358843e1988Sjohnlev 4359a3114836SGerry Liu #ifndef __xpv 4360a3114836SGerry Liu void 4361a3114836SGerry Liu hat_kpm_addmem_mseg_update(struct memseg *msp, pgcnt_t nkpmpgs, 4362a3114836SGerry Liu offset_t kpm_pages_off) 4363a3114836SGerry Liu { 4364a3114836SGerry Liu _NOTE(ARGUNUSED(nkpmpgs, kpm_pages_off)); 4365a3114836SGerry Liu pfn_t base, end; 4366a3114836SGerry Liu 4367a3114836SGerry Liu /* 4368a3114836SGerry Liu * kphysm_add_memory_dynamic() does not set nkpmpgs 4369a3114836SGerry Liu * when page_t memory is externally allocated. That 4370a3114836SGerry Liu * code must properly calculate nkpmpgs in all cases 4371a3114836SGerry Liu * if nkpmpgs needs to be used at some point. 4372a3114836SGerry Liu */ 4373a3114836SGerry Liu 4374a3114836SGerry Liu /* 4375a3114836SGerry Liu * The meta (page_t) pages for dynamically added memory are allocated 4376a3114836SGerry Liu * either from the incoming memory itself or from existing memory. 4377a3114836SGerry Liu * In the former case the base of the incoming pages will be different 4378a3114836SGerry Liu * than the base of the dynamic segment so call memseg_get_start() to 4379a3114836SGerry Liu * get the actual base of the incoming memory for each case. 4380a3114836SGerry Liu */ 4381a3114836SGerry Liu 4382a3114836SGerry Liu base = memseg_get_start(msp); 4383a3114836SGerry Liu end = msp->pages_end; 4384a3114836SGerry Liu 4385a3114836SGerry Liu hat_devload(kas.a_hat, kpm_vbase + mmu_ptob(base), 4386a3114836SGerry Liu mmu_ptob(end - base), base, PROT_READ | PROT_WRITE, 4387a3114836SGerry Liu HAT_LOAD | HAT_LOAD_LOCK | HAT_LOAD_NOCONSIST); 4388a3114836SGerry Liu } 4389a3114836SGerry Liu 4390a3114836SGerry Liu void 4391a3114836SGerry Liu hat_kpm_addmem_mseg_insert(struct memseg *msp) 4392a3114836SGerry Liu { 4393a3114836SGerry Liu _NOTE(ARGUNUSED(msp)); 4394a3114836SGerry Liu } 4395a3114836SGerry Liu 4396a3114836SGerry Liu void 4397a3114836SGerry Liu hat_kpm_addmem_memsegs_update(struct memseg *msp) 4398a3114836SGerry Liu { 4399a3114836SGerry Liu _NOTE(ARGUNUSED(msp)); 4400a3114836SGerry Liu } 4401a3114836SGerry Liu 4402a3114836SGerry Liu /* 4403a3114836SGerry Liu * Return end of metadata for an already setup memseg. 4404a3114836SGerry Liu * X86 platforms don't need per-page meta data to support kpm. 4405a3114836SGerry Liu */ 4406a3114836SGerry Liu caddr_t 4407a3114836SGerry Liu hat_kpm_mseg_reuse(struct memseg *msp) 4408a3114836SGerry Liu { 4409a3114836SGerry Liu return ((caddr_t)msp->epages); 4410a3114836SGerry Liu } 4411a3114836SGerry Liu 4412a3114836SGerry Liu void 4413a3114836SGerry Liu hat_kpm_delmem_mseg_update(struct memseg *msp, struct memseg **mspp) 4414a3114836SGerry Liu { 4415a3114836SGerry Liu _NOTE(ARGUNUSED(msp, mspp)); 4416a3114836SGerry Liu ASSERT(0); 4417a3114836SGerry Liu } 4418a3114836SGerry Liu 4419a3114836SGerry Liu void 4420a3114836SGerry Liu hat_kpm_split_mseg_update(struct memseg *msp, struct memseg **mspp, 4421a3114836SGerry Liu struct memseg *lo, struct memseg *mid, struct memseg *hi) 4422a3114836SGerry Liu { 4423a3114836SGerry Liu _NOTE(ARGUNUSED(msp, mspp, lo, mid, hi)); 4424a3114836SGerry Liu ASSERT(0); 4425a3114836SGerry Liu } 4426a3114836SGerry Liu 4427a3114836SGerry Liu /* 4428a3114836SGerry Liu * Walk the memsegs chain, applying func to each memseg span. 4429a3114836SGerry Liu */ 4430a3114836SGerry Liu void 4431a3114836SGerry Liu hat_kpm_walk(void (*func)(void *, void *, size_t), void *arg) 4432a3114836SGerry Liu { 4433a3114836SGerry Liu pfn_t pbase, pend; 4434a3114836SGerry Liu void *base; 4435a3114836SGerry Liu size_t size; 4436a3114836SGerry Liu struct memseg *msp; 4437a3114836SGerry Liu 4438a3114836SGerry Liu for (msp = memsegs; msp; msp = msp->next) { 4439a3114836SGerry Liu pbase = msp->pages_base; 4440a3114836SGerry Liu pend = msp->pages_end; 4441a3114836SGerry Liu base = ptob(pbase) + kpm_vbase; 4442a3114836SGerry Liu size = ptob(pend - pbase); 4443a3114836SGerry Liu func(arg, base, size); 4444a3114836SGerry Liu } 4445a3114836SGerry Liu } 4446a3114836SGerry Liu 4447a3114836SGerry Liu #else /* __xpv */ 4448a3114836SGerry Liu 4449843e1988Sjohnlev /* 4450843e1988Sjohnlev * There are specific Hypervisor calls to establish and remove mappings 4451843e1988Sjohnlev * to grant table references and the privcmd driver. We have to ensure 4452843e1988Sjohnlev * that a page table actually exists. 4453843e1988Sjohnlev */ 4454843e1988Sjohnlev void 44557eea693dSMark Johnson hat_prepare_mapping(hat_t *hat, caddr_t addr, uint64_t *pte_ma) 4456843e1988Sjohnlev { 44577eea693dSMark Johnson maddr_t base_ma; 44587eea693dSMark Johnson htable_t *ht; 44597eea693dSMark Johnson uint_t entry; 44607eea693dSMark Johnson 4461843e1988Sjohnlev ASSERT(IS_P2ALIGNED((uintptr_t)addr, MMU_PAGESIZE)); 4462551bc2a6Smrj XPV_DISALLOW_MIGRATE(); 44637eea693dSMark Johnson ht = htable_create(hat, (uintptr_t)addr, 0, NULL); 44647eea693dSMark Johnson 44657eea693dSMark Johnson /* 44667eea693dSMark Johnson * if an address for pte_ma is passed in, return the MA of the pte 44677eea693dSMark Johnson * for this specific address. This address is only valid as long 44687eea693dSMark Johnson * as the htable stays locked. 44697eea693dSMark Johnson */ 44707eea693dSMark Johnson if (pte_ma != NULL) { 44717eea693dSMark Johnson entry = htable_va2entry((uintptr_t)addr, ht); 44727eea693dSMark Johnson base_ma = pa_to_ma(ptob(ht->ht_pfn)); 44737eea693dSMark Johnson *pte_ma = base_ma + (entry << mmu.pte_size_shift); 44747eea693dSMark Johnson } 4475551bc2a6Smrj XPV_ALLOW_MIGRATE(); 4476843e1988Sjohnlev } 4477843e1988Sjohnlev 4478843e1988Sjohnlev void 4479843e1988Sjohnlev hat_release_mapping(hat_t *hat, caddr_t addr) 4480843e1988Sjohnlev { 4481843e1988Sjohnlev htable_t *ht; 4482843e1988Sjohnlev 4483843e1988Sjohnlev ASSERT(IS_P2ALIGNED((uintptr_t)addr, MMU_PAGESIZE)); 4484551bc2a6Smrj XPV_DISALLOW_MIGRATE(); 4485843e1988Sjohnlev ht = htable_lookup(hat, (uintptr_t)addr, 0); 4486843e1988Sjohnlev ASSERT(ht != NULL); 4487843e1988Sjohnlev ASSERT(ht->ht_busy >= 2); 4488843e1988Sjohnlev htable_release(ht); 4489843e1988Sjohnlev htable_release(ht); 4490551bc2a6Smrj XPV_ALLOW_MIGRATE(); 4491843e1988Sjohnlev } 4492a3114836SGerry Liu #endif /* __xpv */ 4493