17c478bd9Sstevel@tonic-gate /* 27c478bd9Sstevel@tonic-gate * CDDL HEADER START 37c478bd9Sstevel@tonic-gate * 47c478bd9Sstevel@tonic-gate * The contents of this file are subject to the terms of the 58bc68872Selowe * Common Development and Distribution License (the "License"). 68bc68872Selowe * You may not use this file except in compliance with the License. 77c478bd9Sstevel@tonic-gate * 87c478bd9Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 97c478bd9Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 107c478bd9Sstevel@tonic-gate * See the License for the specific language governing permissions 117c478bd9Sstevel@tonic-gate * and limitations under the License. 127c478bd9Sstevel@tonic-gate * 137c478bd9Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 147c478bd9Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 157c478bd9Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 167c478bd9Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 177c478bd9Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 187c478bd9Sstevel@tonic-gate * 197c478bd9Sstevel@tonic-gate * CDDL HEADER END 207c478bd9Sstevel@tonic-gate */ 217c478bd9Sstevel@tonic-gate /* 22cb15d5d9SPeter Rival * Copyright (c) 1991, 2010, Oracle and/or its affiliates. All rights reserved. 23*2c164fafSPatrick Mooney * Copyright 2019 Joyent, Inc. 247c478bd9Sstevel@tonic-gate */ 257c478bd9Sstevel@tonic-gate 267c478bd9Sstevel@tonic-gate 277c478bd9Sstevel@tonic-gate /* 287c478bd9Sstevel@tonic-gate * VM - page locking primitives 297c478bd9Sstevel@tonic-gate */ 307c478bd9Sstevel@tonic-gate #include <sys/param.h> 317c478bd9Sstevel@tonic-gate #include <sys/t_lock.h> 327c478bd9Sstevel@tonic-gate #include <sys/vtrace.h> 337c478bd9Sstevel@tonic-gate #include <sys/debug.h> 347c478bd9Sstevel@tonic-gate #include <sys/cmn_err.h> 357c478bd9Sstevel@tonic-gate #include <sys/bitmap.h> 367c478bd9Sstevel@tonic-gate #include <sys/lockstat.h> 37d7d93655Sblakej #include <sys/sysmacros.h> 387c478bd9Sstevel@tonic-gate #include <sys/condvar_impl.h> 397c478bd9Sstevel@tonic-gate #include <vm/page.h> 407c478bd9Sstevel@tonic-gate #include <vm/seg_enum.h> 417c478bd9Sstevel@tonic-gate #include <vm/vm_dep.h> 42af4c679fSSean McEnroe #include <vm/seg_kmem.h> 437c478bd9Sstevel@tonic-gate 447c478bd9Sstevel@tonic-gate /* 45cb15d5d9SPeter Rival * This global mutex array is for logical page locking. 467c478bd9Sstevel@tonic-gate * The following fields in the page structure are protected 477c478bd9Sstevel@tonic-gate * by this lock: 487c478bd9Sstevel@tonic-gate * 497c478bd9Sstevel@tonic-gate * p_lckcnt 507c478bd9Sstevel@tonic-gate * p_cowcnt 517c478bd9Sstevel@tonic-gate */ 52cb15d5d9SPeter Rival pad_mutex_t page_llocks[8 * NCPU_P2]; 537c478bd9Sstevel@tonic-gate 547c478bd9Sstevel@tonic-gate /* 557c478bd9Sstevel@tonic-gate * This is a global lock for the logical page free list. The 567c478bd9Sstevel@tonic-gate * logical free list, in this implementation, is maintained as two 577c478bd9Sstevel@tonic-gate * separate physical lists - the cache list and the free list. 587c478bd9Sstevel@tonic-gate */ 597c478bd9Sstevel@tonic-gate kmutex_t page_freelock; 607c478bd9Sstevel@tonic-gate 617c478bd9Sstevel@tonic-gate /* 627c478bd9Sstevel@tonic-gate * The hash table, page_hash[], the p_selock fields, and the 637c478bd9Sstevel@tonic-gate * list of pages associated with vnodes are protected by arrays of mutexes. 647c478bd9Sstevel@tonic-gate * 657c478bd9Sstevel@tonic-gate * Unless the hashes are changed radically, the table sizes must be 667c478bd9Sstevel@tonic-gate * a power of two. Also, we typically need more mutexes for the 677c478bd9Sstevel@tonic-gate * vnodes since these locks are occasionally held for long periods. 687c478bd9Sstevel@tonic-gate * And since there seem to be two special vnodes (kvp and swapvp), 697c478bd9Sstevel@tonic-gate * we make room for private mutexes for them. 707c478bd9Sstevel@tonic-gate * 717c478bd9Sstevel@tonic-gate * The pse_mutex[] array holds the mutexes to protect the p_selock 727c478bd9Sstevel@tonic-gate * fields of all page_t structures. 737c478bd9Sstevel@tonic-gate * 747c478bd9Sstevel@tonic-gate * PAGE_SE_MUTEX(pp) returns the address of the appropriate mutex 757c478bd9Sstevel@tonic-gate * when given a pointer to a page_t. 767c478bd9Sstevel@tonic-gate * 77d7d93655Sblakej * PIO_TABLE_SIZE must be a power of two. One could argue that we 787c478bd9Sstevel@tonic-gate * should go to the trouble of setting it up at run time and base it 797c478bd9Sstevel@tonic-gate * on memory size rather than the number of compile time CPUs. 807c478bd9Sstevel@tonic-gate * 81d7d93655Sblakej * XX64 We should be using physmem size to calculate PIO_SHIFT. 827c478bd9Sstevel@tonic-gate * 837c478bd9Sstevel@tonic-gate * These might break in 64 bit world. 847c478bd9Sstevel@tonic-gate */ 85d7d93655Sblakej #define PIO_SHIFT 7 /* log2(sizeof(page_t)) */ 86d7d93655Sblakej #define PIO_TABLE_SIZE 128 /* number of io mutexes to have */ 877c478bd9Sstevel@tonic-gate 887c478bd9Sstevel@tonic-gate pad_mutex_t ph_mutex[PH_TABLE_SIZE]; 897c478bd9Sstevel@tonic-gate kmutex_t pio_mutex[PIO_TABLE_SIZE]; 907c478bd9Sstevel@tonic-gate 917c478bd9Sstevel@tonic-gate #define PAGE_IO_MUTEX(pp) \ 927c478bd9Sstevel@tonic-gate &pio_mutex[(((uintptr_t)pp) >> PIO_SHIFT) & (PIO_TABLE_SIZE - 1)] 937c478bd9Sstevel@tonic-gate 94d7d93655Sblakej /* 95d7d93655Sblakej * The pse_mutex[] array is allocated in the platform startup code 96d7d93655Sblakej * based on the size of the machine at startup. 97d7d93655Sblakej */ 98d7d93655Sblakej extern pad_mutex_t *pse_mutex; /* Locks protecting pp->p_selock */ 99d7d93655Sblakej extern size_t pse_table_size; /* Number of mutexes in pse_mutex[] */ 100d7d93655Sblakej extern int pse_shift; /* log2(pse_table_size) */ 101d7d93655Sblakej #define PAGE_SE_MUTEX(pp) &pse_mutex[ \ 102d7d93655Sblakej ((((uintptr_t)(pp) >> pse_shift) ^ ((uintptr_t)(pp))) >> 7) & \ 103d7d93655Sblakej (pse_table_size - 1)].pad_mutex 104d7d93655Sblakej 1057c478bd9Sstevel@tonic-gate #define PSZC_MTX_TABLE_SIZE 128 1067c478bd9Sstevel@tonic-gate #define PSZC_MTX_TABLE_SHIFT 7 1077c478bd9Sstevel@tonic-gate 1087c478bd9Sstevel@tonic-gate static pad_mutex_t pszc_mutex[PSZC_MTX_TABLE_SIZE]; 1097c478bd9Sstevel@tonic-gate 1107c478bd9Sstevel@tonic-gate #define PAGE_SZC_MUTEX(_pp) \ 1117c478bd9Sstevel@tonic-gate &pszc_mutex[((((uintptr_t)(_pp) >> PSZC_MTX_TABLE_SHIFT) ^ \ 1127c478bd9Sstevel@tonic-gate ((uintptr_t)(_pp) >> (PSZC_MTX_TABLE_SHIFT << 1)) ^ \ 1137c478bd9Sstevel@tonic-gate ((uintptr_t)(_pp) >> (3 * PSZC_MTX_TABLE_SHIFT))) & \ 1147c478bd9Sstevel@tonic-gate (PSZC_MTX_TABLE_SIZE - 1))].pad_mutex 1157c478bd9Sstevel@tonic-gate 1167c478bd9Sstevel@tonic-gate /* 1177c478bd9Sstevel@tonic-gate * The vph_mutex[] array holds the mutexes to protect the vnode chains, 1187c478bd9Sstevel@tonic-gate * (i.e., the list of pages anchored by v_pages and connected via p_vpprev 1197c478bd9Sstevel@tonic-gate * and p_vpnext). 1207c478bd9Sstevel@tonic-gate * 1217c478bd9Sstevel@tonic-gate * The page_vnode_mutex(vp) function returns the address of the appropriate 1227c478bd9Sstevel@tonic-gate * mutex from this array given a pointer to a vnode. It is complicated 1237c478bd9Sstevel@tonic-gate * by the fact that the kernel's vnode and the swapfs vnode are referenced 1247c478bd9Sstevel@tonic-gate * frequently enough to warrent their own mutexes. 1257c478bd9Sstevel@tonic-gate * 1267c478bd9Sstevel@tonic-gate * The VP_HASH_FUNC returns the index into the vph_mutex array given 1277c478bd9Sstevel@tonic-gate * an address of a vnode. 1287c478bd9Sstevel@tonic-gate */ 1297c478bd9Sstevel@tonic-gate 130ac52b00eSqiao #if defined(_LP64) 131cb15d5d9SPeter Rival #define VPH_TABLE_SIZE (8 * NCPU_P2) 132ac52b00eSqiao #else /* 32 bits */ 133cb15d5d9SPeter Rival #define VPH_TABLE_SIZE (2 * NCPU_P2) 134ac52b00eSqiao #endif 1357c478bd9Sstevel@tonic-gate 1367c478bd9Sstevel@tonic-gate #define VP_HASH_FUNC(vp) \ 1377c478bd9Sstevel@tonic-gate ((((uintptr_t)(vp) >> 6) + \ 1387c478bd9Sstevel@tonic-gate ((uintptr_t)(vp) >> 8) + \ 1397c478bd9Sstevel@tonic-gate ((uintptr_t)(vp) >> 10) + \ 1407c478bd9Sstevel@tonic-gate ((uintptr_t)(vp) >> 12)) \ 1417c478bd9Sstevel@tonic-gate & (VPH_TABLE_SIZE - 1)) 1427c478bd9Sstevel@tonic-gate 143ad23a2dbSjohansen /* 144ad23a2dbSjohansen * Two slots after VPH_TABLE_SIZE are reserved in vph_mutex for kernel vnodes. 145ad23a2dbSjohansen * The lock for kvp is VPH_TABLE_SIZE + 0, and the lock for zvp is 146ad23a2dbSjohansen * VPH_TABLE_SIZE + 1. 147ad23a2dbSjohansen */ 148ad23a2dbSjohansen 1497c478bd9Sstevel@tonic-gate kmutex_t vph_mutex[VPH_TABLE_SIZE + 2]; 1507c478bd9Sstevel@tonic-gate 1517c478bd9Sstevel@tonic-gate /* 1527c478bd9Sstevel@tonic-gate * Initialize the locks used by the Virtual Memory Management system. 1537c478bd9Sstevel@tonic-gate */ 1547c478bd9Sstevel@tonic-gate void 1557c478bd9Sstevel@tonic-gate page_lock_init() 1567c478bd9Sstevel@tonic-gate { 1577c478bd9Sstevel@tonic-gate } 1587c478bd9Sstevel@tonic-gate 1597c478bd9Sstevel@tonic-gate /* 160d7d93655Sblakej * Return a value for pse_shift based on npg (the number of physical pages) 161d7d93655Sblakej * and ncpu (the maximum number of CPUs). This is called by platform startup 162d7d93655Sblakej * code. 163d7d93655Sblakej * 164d7d93655Sblakej * Lockstat data from TPC-H runs showed that contention on the pse_mutex[] 165d7d93655Sblakej * locks grew approximately as the square of the number of threads executing. 166d7d93655Sblakej * So the primary scaling factor used is NCPU^2. The size of the machine in 167d7d93655Sblakej * megabytes is used as an upper bound, particularly for sun4v machines which 168d7d93655Sblakej * all claim to have 256 CPUs maximum, and the old value of PSE_TABLE_SIZE 169d7d93655Sblakej * (128) is used as a minimum. Since the size of the table has to be a power 170d7d93655Sblakej * of two, the calculated size is rounded up to the next power of two. 171d7d93655Sblakej */ 172d7d93655Sblakej /*ARGSUSED*/ 173d7d93655Sblakej int 174d7d93655Sblakej size_pse_array(pgcnt_t npg, int ncpu) 175d7d93655Sblakej { 176d7d93655Sblakej size_t size; 177d7d93655Sblakej pgcnt_t pp_per_mb = (1024 * 1024) / PAGESIZE; 178d7d93655Sblakej 179d7d93655Sblakej size = MAX(128, MIN(npg / pp_per_mb, 2 * ncpu * ncpu)); 180d7d93655Sblakej size += (1 << (highbit(size) - 1)) - 1; 181d7d93655Sblakej return (highbit(size) - 1); 182d7d93655Sblakej } 183d7d93655Sblakej 184d7d93655Sblakej /* 1857c478bd9Sstevel@tonic-gate * At present we only use page ownership to aid debugging, so it's 1867c478bd9Sstevel@tonic-gate * OK if the owner field isn't exact. In the 32-bit world two thread ids 1877c478bd9Sstevel@tonic-gate * can map to the same owner because we just 'or' in 0x80000000 and 1887c478bd9Sstevel@tonic-gate * then clear the second highest bit, so that (for example) 0x2faced00 1897c478bd9Sstevel@tonic-gate * and 0xafaced00 both map to 0xafaced00. 1907c478bd9Sstevel@tonic-gate * In the 64-bit world, p_selock may not be large enough to hold a full 1917c478bd9Sstevel@tonic-gate * thread pointer. If we ever need precise ownership (e.g. if we implement 1927c478bd9Sstevel@tonic-gate * priority inheritance for page locks) then p_selock should become a 1937c478bd9Sstevel@tonic-gate * uintptr_t and SE_WRITER should be -((uintptr_t)curthread >> 2). 1947c478bd9Sstevel@tonic-gate */ 1957c478bd9Sstevel@tonic-gate #define SE_WRITER (((selock_t)(ulong_t)curthread | INT_MIN) & ~SE_EWANTED) 1967c478bd9Sstevel@tonic-gate #define SE_READER 1 1977c478bd9Sstevel@tonic-gate 1987c478bd9Sstevel@tonic-gate /* 1997c478bd9Sstevel@tonic-gate * A page that is deleted must be marked as such using the 2007c478bd9Sstevel@tonic-gate * page_lock_delete() function. The page must be exclusively locked. 2017c478bd9Sstevel@tonic-gate * The SE_DELETED marker is put in p_selock when this function is called. 2027c478bd9Sstevel@tonic-gate * SE_DELETED must be distinct from any SE_WRITER value. 2037c478bd9Sstevel@tonic-gate */ 2047c478bd9Sstevel@tonic-gate #define SE_DELETED (1 | INT_MIN) 2057c478bd9Sstevel@tonic-gate 2067c478bd9Sstevel@tonic-gate #ifdef VM_STATS 2077c478bd9Sstevel@tonic-gate uint_t vph_kvp_count; 2087c478bd9Sstevel@tonic-gate uint_t vph_swapfsvp_count; 2097c478bd9Sstevel@tonic-gate uint_t vph_other; 2107c478bd9Sstevel@tonic-gate #endif /* VM_STATS */ 2117c478bd9Sstevel@tonic-gate 2127c478bd9Sstevel@tonic-gate #ifdef VM_STATS 2137c478bd9Sstevel@tonic-gate uint_t page_lock_count; 2147c478bd9Sstevel@tonic-gate uint_t page_lock_miss; 2157c478bd9Sstevel@tonic-gate uint_t page_lock_miss_lock; 2167c478bd9Sstevel@tonic-gate uint_t page_lock_reclaim; 2177c478bd9Sstevel@tonic-gate uint_t page_lock_bad_reclaim; 2187c478bd9Sstevel@tonic-gate uint_t page_lock_same_page; 2197c478bd9Sstevel@tonic-gate uint_t page_lock_upgrade; 220db874c57Selowe uint_t page_lock_retired; 2217c478bd9Sstevel@tonic-gate uint_t page_lock_upgrade_failed; 2227c478bd9Sstevel@tonic-gate uint_t page_lock_deleted; 2237c478bd9Sstevel@tonic-gate 2247c478bd9Sstevel@tonic-gate uint_t page_trylock_locked; 225db874c57Selowe uint_t page_trylock_failed; 2267c478bd9Sstevel@tonic-gate uint_t page_trylock_missed; 2277c478bd9Sstevel@tonic-gate 2287c478bd9Sstevel@tonic-gate uint_t page_try_reclaim_upgrade; 2297c478bd9Sstevel@tonic-gate #endif /* VM_STATS */ 2307c478bd9Sstevel@tonic-gate 2317c478bd9Sstevel@tonic-gate /* 2327c478bd9Sstevel@tonic-gate * Acquire the "shared/exclusive" lock on a page. 2337c478bd9Sstevel@tonic-gate * 2347c478bd9Sstevel@tonic-gate * Returns 1 on success and locks the page appropriately. 2357c478bd9Sstevel@tonic-gate * 0 on failure and does not lock the page. 2367c478bd9Sstevel@tonic-gate * 2377c478bd9Sstevel@tonic-gate * If `lock' is non-NULL, it will be dropped and reacquired in the 2387c478bd9Sstevel@tonic-gate * failure case. This routine can block, and if it does 2397c478bd9Sstevel@tonic-gate * it will always return a failure since the page identity [vp, off] 2407c478bd9Sstevel@tonic-gate * or state may have changed. 2417c478bd9Sstevel@tonic-gate */ 2427c478bd9Sstevel@tonic-gate 2437c478bd9Sstevel@tonic-gate int 2447c478bd9Sstevel@tonic-gate page_lock(page_t *pp, se_t se, kmutex_t *lock, reclaim_t reclaim) 2457c478bd9Sstevel@tonic-gate { 2467c478bd9Sstevel@tonic-gate return (page_lock_es(pp, se, lock, reclaim, 0)); 2477c478bd9Sstevel@tonic-gate } 2487c478bd9Sstevel@tonic-gate 2497c478bd9Sstevel@tonic-gate /* 2507c478bd9Sstevel@tonic-gate * With the addition of reader-writer lock semantics to page_lock_es, 2517c478bd9Sstevel@tonic-gate * callers wanting an exclusive (writer) lock may prevent shared-lock 2527c478bd9Sstevel@tonic-gate * (reader) starvation by setting the es parameter to SE_EXCL_WANTED. 2537c478bd9Sstevel@tonic-gate * In this case, when an exclusive lock cannot be acquired, p_selock's 254db874c57Selowe * SE_EWANTED bit is set. Shared-lock (reader) requests are also denied 255db874c57Selowe * if the page is slated for retirement. 2567c478bd9Sstevel@tonic-gate * 257db874c57Selowe * The se and es parameters determine if the lock should be granted 258db874c57Selowe * based on the following decision table: 2597c478bd9Sstevel@tonic-gate * 260db874c57Selowe * Lock wanted es flags p_selock/SE_EWANTED Action 261db874c57Selowe * ----------- -------------- ------------------- --------- 262db874c57Selowe * SE_EXCL any [1][2] unlocked/any grant lock, clear SE_EWANTED 263db874c57Selowe * SE_EXCL SE_EWANTED any lock/any deny, set SE_EWANTED 264db874c57Selowe * SE_EXCL none any lock/any deny 2658bc68872Selowe * SE_SHARED n/a [2] shared/0 grant 2668bc68872Selowe * SE_SHARED n/a [2] unlocked/0 grant 267db874c57Selowe * SE_SHARED n/a shared/1 deny 268db874c57Selowe * SE_SHARED n/a unlocked/1 deny 269db874c57Selowe * SE_SHARED n/a excl/any deny 270db874c57Selowe * 271db874c57Selowe * Notes: 272db874c57Selowe * [1] The code grants an exclusive lock to the caller and clears the bit 2737c478bd9Sstevel@tonic-gate * SE_EWANTED whenever p_selock is unlocked, regardless of the SE_EWANTED 2747c478bd9Sstevel@tonic-gate * bit's value. This was deemed acceptable as we are not concerned about 2757c478bd9Sstevel@tonic-gate * exclusive-lock starvation. If this ever becomes an issue, a priority or 276db874c57Selowe * fifo mechanism should also be implemented. Meantime, the thread that 277db874c57Selowe * set SE_EWANTED should be prepared to catch this condition and reset it 278db874c57Selowe * 279db874c57Selowe * [2] Retired pages may not be locked at any time, regardless of the 280db874c57Selowe * dispostion of se, unless the es parameter has SE_RETIRED flag set. 281db874c57Selowe * 282db874c57Selowe * Notes on values of "es": 283db874c57Selowe * 284db874c57Selowe * es & 1: page_lookup_create will attempt page relocation 285db874c57Selowe * es & SE_EXCL_WANTED: caller wants SE_EWANTED set (eg. delete 286db874c57Selowe * memory thread); this prevents reader-starvation of waiting 287db874c57Selowe * writer thread(s) by giving priority to writers over readers. 288db874c57Selowe * es & SE_RETIRED: caller wants to lock pages even if they are 289db874c57Selowe * retired. Default is to deny the lock if the page is retired. 290db874c57Selowe * 291db874c57Selowe * And yes, we know, the semantics of this function are too complicated. 292db874c57Selowe * It's on the list to be cleaned up. 2937c478bd9Sstevel@tonic-gate */ 2947c478bd9Sstevel@tonic-gate int 2957c478bd9Sstevel@tonic-gate page_lock_es(page_t *pp, se_t se, kmutex_t *lock, reclaim_t reclaim, int es) 2967c478bd9Sstevel@tonic-gate { 2977c478bd9Sstevel@tonic-gate int retval; 2987c478bd9Sstevel@tonic-gate kmutex_t *pse = PAGE_SE_MUTEX(pp); 2997c478bd9Sstevel@tonic-gate int upgraded; 3007c478bd9Sstevel@tonic-gate int reclaim_it; 3017c478bd9Sstevel@tonic-gate 3027c478bd9Sstevel@tonic-gate ASSERT(lock != NULL ? MUTEX_HELD(lock) : 1); 3037c478bd9Sstevel@tonic-gate 3047c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_lock_count); 3057c478bd9Sstevel@tonic-gate 3067c478bd9Sstevel@tonic-gate upgraded = 0; 3077c478bd9Sstevel@tonic-gate reclaim_it = 0; 3087c478bd9Sstevel@tonic-gate 3097c478bd9Sstevel@tonic-gate mutex_enter(pse); 3107c478bd9Sstevel@tonic-gate 3117c478bd9Sstevel@tonic-gate ASSERT(((es & SE_EXCL_WANTED) == 0) || 312db874c57Selowe ((es & SE_EXCL_WANTED) && (se == SE_EXCL))); 313db874c57Selowe 314db874c57Selowe if (PP_RETIRED(pp) && !(es & SE_RETIRED)) { 315db874c57Selowe mutex_exit(pse); 316db874c57Selowe VM_STAT_ADD(page_lock_retired); 317db874c57Selowe return (0); 318db874c57Selowe } 3197c478bd9Sstevel@tonic-gate 3207c478bd9Sstevel@tonic-gate if (se == SE_SHARED && es == 1 && pp->p_selock == 0) { 3217c478bd9Sstevel@tonic-gate se = SE_EXCL; 3227c478bd9Sstevel@tonic-gate } 3237c478bd9Sstevel@tonic-gate 3247c478bd9Sstevel@tonic-gate if ((reclaim == P_RECLAIM) && (PP_ISFREE(pp))) { 3257c478bd9Sstevel@tonic-gate 3267c478bd9Sstevel@tonic-gate reclaim_it = 1; 3277c478bd9Sstevel@tonic-gate if (se == SE_SHARED) { 3287c478bd9Sstevel@tonic-gate /* 3297c478bd9Sstevel@tonic-gate * This is an interesting situation. 3307c478bd9Sstevel@tonic-gate * 3317c478bd9Sstevel@tonic-gate * Remember that p_free can only change if 3327c478bd9Sstevel@tonic-gate * p_selock < 0. 3337c478bd9Sstevel@tonic-gate * p_free does not depend on our holding `pse'. 3347c478bd9Sstevel@tonic-gate * And, since we hold `pse', p_selock can not change. 3357c478bd9Sstevel@tonic-gate * So, if p_free changes on us, the page is already 3367c478bd9Sstevel@tonic-gate * exclusively held, and we would fail to get p_selock 3377c478bd9Sstevel@tonic-gate * regardless. 3387c478bd9Sstevel@tonic-gate * 3397c478bd9Sstevel@tonic-gate * We want to avoid getting the share 3407c478bd9Sstevel@tonic-gate * lock on a free page that needs to be reclaimed. 3417c478bd9Sstevel@tonic-gate * It is possible that some other thread has the share 3427c478bd9Sstevel@tonic-gate * lock and has left the free page on the cache list. 3437c478bd9Sstevel@tonic-gate * pvn_vplist_dirty() does this for brief periods. 3447c478bd9Sstevel@tonic-gate * If the se_share is currently SE_EXCL, we will fail 3457c478bd9Sstevel@tonic-gate * to acquire p_selock anyway. Blocking is the 3467c478bd9Sstevel@tonic-gate * right thing to do. 3477c478bd9Sstevel@tonic-gate * If we need to reclaim this page, we must get 3487c478bd9Sstevel@tonic-gate * exclusive access to it, force the upgrade now. 3497c478bd9Sstevel@tonic-gate * Again, we will fail to acquire p_selock if the 3507c478bd9Sstevel@tonic-gate * page is not free and block. 3517c478bd9Sstevel@tonic-gate */ 3527c478bd9Sstevel@tonic-gate upgraded = 1; 3537c478bd9Sstevel@tonic-gate se = SE_EXCL; 3547c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_lock_upgrade); 3557c478bd9Sstevel@tonic-gate } 3567c478bd9Sstevel@tonic-gate } 3577c478bd9Sstevel@tonic-gate 3587c478bd9Sstevel@tonic-gate if (se == SE_EXCL) { 359db874c57Selowe if (!(es & SE_EXCL_WANTED) && (pp->p_selock & SE_EWANTED)) { 3607c478bd9Sstevel@tonic-gate /* 3617c478bd9Sstevel@tonic-gate * if the caller wants a writer lock (but did not 3627c478bd9Sstevel@tonic-gate * specify exclusive access), and there is a pending 3637c478bd9Sstevel@tonic-gate * writer that wants exclusive access, return failure 3647c478bd9Sstevel@tonic-gate */ 3657c478bd9Sstevel@tonic-gate retval = 0; 3667c478bd9Sstevel@tonic-gate } else if ((pp->p_selock & ~SE_EWANTED) == 0) { 3677c478bd9Sstevel@tonic-gate /* no reader/writer lock held */ 3687c478bd9Sstevel@tonic-gate /* this clears our setting of the SE_EWANTED bit */ 3697c478bd9Sstevel@tonic-gate pp->p_selock = SE_WRITER; 3707c478bd9Sstevel@tonic-gate retval = 1; 3717c478bd9Sstevel@tonic-gate } else { 3727c478bd9Sstevel@tonic-gate /* page is locked */ 373db874c57Selowe if (es & SE_EXCL_WANTED) { 3747c478bd9Sstevel@tonic-gate /* set the SE_EWANTED bit */ 3757c478bd9Sstevel@tonic-gate pp->p_selock |= SE_EWANTED; 3767c478bd9Sstevel@tonic-gate } 3777c478bd9Sstevel@tonic-gate retval = 0; 3787c478bd9Sstevel@tonic-gate } 3797c478bd9Sstevel@tonic-gate } else { 3807c478bd9Sstevel@tonic-gate retval = 0; 3817c478bd9Sstevel@tonic-gate if (pp->p_selock >= 0) { 382db874c57Selowe if ((pp->p_selock & SE_EWANTED) == 0) { 3837c478bd9Sstevel@tonic-gate pp->p_selock += SE_READER; 3847c478bd9Sstevel@tonic-gate retval = 1; 3857c478bd9Sstevel@tonic-gate } 3867c478bd9Sstevel@tonic-gate } 3877c478bd9Sstevel@tonic-gate } 3887c478bd9Sstevel@tonic-gate 3897c478bd9Sstevel@tonic-gate if (retval == 0) { 3907c478bd9Sstevel@tonic-gate if ((pp->p_selock & ~SE_EWANTED) == SE_DELETED) { 3917c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_lock_deleted); 3927c478bd9Sstevel@tonic-gate mutex_exit(pse); 3937c478bd9Sstevel@tonic-gate return (retval); 3947c478bd9Sstevel@tonic-gate } 3957c478bd9Sstevel@tonic-gate 3967c478bd9Sstevel@tonic-gate #ifdef VM_STATS 3977c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_lock_miss); 3987c478bd9Sstevel@tonic-gate if (upgraded) { 3997c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_lock_upgrade_failed); 4007c478bd9Sstevel@tonic-gate } 4017c478bd9Sstevel@tonic-gate #endif 4027c478bd9Sstevel@tonic-gate if (lock) { 4037c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_lock_miss_lock); 4047c478bd9Sstevel@tonic-gate mutex_exit(lock); 4057c478bd9Sstevel@tonic-gate } 4067c478bd9Sstevel@tonic-gate 4077c478bd9Sstevel@tonic-gate /* 4087c478bd9Sstevel@tonic-gate * Now, wait for the page to be unlocked and 4097c478bd9Sstevel@tonic-gate * release the lock protecting p_cv and p_selock. 4107c478bd9Sstevel@tonic-gate */ 4117c478bd9Sstevel@tonic-gate cv_wait(&pp->p_cv, pse); 4127c478bd9Sstevel@tonic-gate mutex_exit(pse); 4137c478bd9Sstevel@tonic-gate 4147c478bd9Sstevel@tonic-gate /* 4157c478bd9Sstevel@tonic-gate * The page identity may have changed while we were 4167c478bd9Sstevel@tonic-gate * blocked. If we are willing to depend on "pp" 4177c478bd9Sstevel@tonic-gate * still pointing to a valid page structure (i.e., 4187c478bd9Sstevel@tonic-gate * assuming page structures are not dynamically allocated 4197c478bd9Sstevel@tonic-gate * or freed), we could try to lock the page if its 4207c478bd9Sstevel@tonic-gate * identity hasn't changed. 4217c478bd9Sstevel@tonic-gate * 4227c478bd9Sstevel@tonic-gate * This needs to be measured, since we come back from 4237c478bd9Sstevel@tonic-gate * cv_wait holding pse (the expensive part of this 4247c478bd9Sstevel@tonic-gate * operation) we might as well try the cheap part. 4257c478bd9Sstevel@tonic-gate * Though we would also have to confirm that dropping 4267c478bd9Sstevel@tonic-gate * `lock' did not cause any grief to the callers. 4277c478bd9Sstevel@tonic-gate */ 4287c478bd9Sstevel@tonic-gate if (lock) { 4297c478bd9Sstevel@tonic-gate mutex_enter(lock); 4307c478bd9Sstevel@tonic-gate } 4317c478bd9Sstevel@tonic-gate } else { 4327c478bd9Sstevel@tonic-gate /* 4337c478bd9Sstevel@tonic-gate * We have the page lock. 4347c478bd9Sstevel@tonic-gate * If we needed to reclaim the page, and the page 4357c478bd9Sstevel@tonic-gate * needed reclaiming (ie, it was free), then we 4367c478bd9Sstevel@tonic-gate * have the page exclusively locked. We may need 4377c478bd9Sstevel@tonic-gate * to downgrade the page. 4387c478bd9Sstevel@tonic-gate */ 4397c478bd9Sstevel@tonic-gate ASSERT((upgraded) ? 4407c478bd9Sstevel@tonic-gate ((PP_ISFREE(pp)) && PAGE_EXCL(pp)) : 1); 4417c478bd9Sstevel@tonic-gate mutex_exit(pse); 4427c478bd9Sstevel@tonic-gate 4437c478bd9Sstevel@tonic-gate /* 4447c478bd9Sstevel@tonic-gate * We now hold this page's lock, either shared or 4457c478bd9Sstevel@tonic-gate * exclusive. This will prevent its identity from changing. 4467c478bd9Sstevel@tonic-gate * The page, however, may or may not be free. If the caller 4477c478bd9Sstevel@tonic-gate * requested, and it is free, go reclaim it from the 4487c478bd9Sstevel@tonic-gate * free list. If the page can't be reclaimed, return failure 4497c478bd9Sstevel@tonic-gate * so that the caller can start all over again. 4507c478bd9Sstevel@tonic-gate * 4517c478bd9Sstevel@tonic-gate * NOTE:page_reclaim() releases the page lock (p_selock) 4527c478bd9Sstevel@tonic-gate * if it can't be reclaimed. 4537c478bd9Sstevel@tonic-gate */ 4547c478bd9Sstevel@tonic-gate if (reclaim_it) { 4557c478bd9Sstevel@tonic-gate if (!page_reclaim(pp, lock)) { 4567c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_lock_bad_reclaim); 4577c478bd9Sstevel@tonic-gate retval = 0; 4587c478bd9Sstevel@tonic-gate } else { 4597c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_lock_reclaim); 4607c478bd9Sstevel@tonic-gate if (upgraded) { 4617c478bd9Sstevel@tonic-gate page_downgrade(pp); 4627c478bd9Sstevel@tonic-gate } 4637c478bd9Sstevel@tonic-gate } 4647c478bd9Sstevel@tonic-gate } 4657c478bd9Sstevel@tonic-gate } 4667c478bd9Sstevel@tonic-gate return (retval); 4677c478bd9Sstevel@tonic-gate } 4687c478bd9Sstevel@tonic-gate 4697c478bd9Sstevel@tonic-gate /* 4707c478bd9Sstevel@tonic-gate * Clear the SE_EWANTED bit from p_selock. This function allows 4717c478bd9Sstevel@tonic-gate * callers of page_lock_es and page_try_reclaim_lock to clear 4727c478bd9Sstevel@tonic-gate * their setting of this bit if they decide they no longer wish 4737c478bd9Sstevel@tonic-gate * to gain exclusive access to the page. Currently only 4747c478bd9Sstevel@tonic-gate * delete_memory_thread uses this when the delete memory 4757c478bd9Sstevel@tonic-gate * operation is cancelled. 4767c478bd9Sstevel@tonic-gate */ 4777c478bd9Sstevel@tonic-gate void 4787c478bd9Sstevel@tonic-gate page_lock_clr_exclwanted(page_t *pp) 4797c478bd9Sstevel@tonic-gate { 4807c478bd9Sstevel@tonic-gate kmutex_t *pse = PAGE_SE_MUTEX(pp); 4817c478bd9Sstevel@tonic-gate 4827c478bd9Sstevel@tonic-gate mutex_enter(pse); 4837c478bd9Sstevel@tonic-gate pp->p_selock &= ~SE_EWANTED; 4847c478bd9Sstevel@tonic-gate if (CV_HAS_WAITERS(&pp->p_cv)) 4857c478bd9Sstevel@tonic-gate cv_broadcast(&pp->p_cv); 4867c478bd9Sstevel@tonic-gate mutex_exit(pse); 4877c478bd9Sstevel@tonic-gate } 4887c478bd9Sstevel@tonic-gate 4897c478bd9Sstevel@tonic-gate /* 4907c478bd9Sstevel@tonic-gate * Read the comments inside of page_lock_es() carefully. 4917c478bd9Sstevel@tonic-gate * 4927c478bd9Sstevel@tonic-gate * SE_EXCL callers specifying es == SE_EXCL_WANTED will cause the 4937c478bd9Sstevel@tonic-gate * SE_EWANTED bit of p_selock to be set when the lock cannot be obtained. 4947c478bd9Sstevel@tonic-gate * This is used by threads subject to reader-starvation (eg. memory delete). 4957c478bd9Sstevel@tonic-gate * 4967c478bd9Sstevel@tonic-gate * When a thread using SE_EXCL_WANTED does not obtain the SE_EXCL lock, 4977c478bd9Sstevel@tonic-gate * it is expected that it will retry at a later time. Threads that will 4987c478bd9Sstevel@tonic-gate * not retry the lock *must* call page_lock_clr_exclwanted to clear the 4997c478bd9Sstevel@tonic-gate * SE_EWANTED bit. (When a thread using SE_EXCL_WANTED obtains the lock, 5007c478bd9Sstevel@tonic-gate * the bit is cleared.) 5017c478bd9Sstevel@tonic-gate */ 5027c478bd9Sstevel@tonic-gate int 5037c478bd9Sstevel@tonic-gate page_try_reclaim_lock(page_t *pp, se_t se, int es) 5047c478bd9Sstevel@tonic-gate { 5057c478bd9Sstevel@tonic-gate kmutex_t *pse = PAGE_SE_MUTEX(pp); 5067c478bd9Sstevel@tonic-gate selock_t old; 5077c478bd9Sstevel@tonic-gate 5087c478bd9Sstevel@tonic-gate mutex_enter(pse); 5097c478bd9Sstevel@tonic-gate 5107c478bd9Sstevel@tonic-gate old = pp->p_selock; 5117c478bd9Sstevel@tonic-gate 5127c478bd9Sstevel@tonic-gate ASSERT(((es & SE_EXCL_WANTED) == 0) || 513db874c57Selowe ((es & SE_EXCL_WANTED) && (se == SE_EXCL))); 514db874c57Selowe 515db874c57Selowe if (PP_RETIRED(pp) && !(es & SE_RETIRED)) { 516db874c57Selowe mutex_exit(pse); 517db874c57Selowe VM_STAT_ADD(page_trylock_failed); 518db874c57Selowe return (0); 519db874c57Selowe } 5207c478bd9Sstevel@tonic-gate 5217c478bd9Sstevel@tonic-gate if (se == SE_SHARED && es == 1 && old == 0) { 5227c478bd9Sstevel@tonic-gate se = SE_EXCL; 5237c478bd9Sstevel@tonic-gate } 5247c478bd9Sstevel@tonic-gate 5257c478bd9Sstevel@tonic-gate if (se == SE_SHARED) { 5267c478bd9Sstevel@tonic-gate if (!PP_ISFREE(pp)) { 5277c478bd9Sstevel@tonic-gate if (old >= 0) { 528db874c57Selowe /* 529db874c57Selowe * Readers are not allowed when excl wanted 530db874c57Selowe */ 531db874c57Selowe if ((old & SE_EWANTED) == 0) { 5327c478bd9Sstevel@tonic-gate pp->p_selock = old + SE_READER; 5337c478bd9Sstevel@tonic-gate mutex_exit(pse); 5347c478bd9Sstevel@tonic-gate return (1); 5357c478bd9Sstevel@tonic-gate } 5367c478bd9Sstevel@tonic-gate } 5377c478bd9Sstevel@tonic-gate mutex_exit(pse); 5387c478bd9Sstevel@tonic-gate return (0); 5397c478bd9Sstevel@tonic-gate } 5407c478bd9Sstevel@tonic-gate /* 5417c478bd9Sstevel@tonic-gate * The page is free, so we really want SE_EXCL (below) 5427c478bd9Sstevel@tonic-gate */ 5437c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_try_reclaim_upgrade); 5447c478bd9Sstevel@tonic-gate } 5457c478bd9Sstevel@tonic-gate 5467c478bd9Sstevel@tonic-gate /* 5477c478bd9Sstevel@tonic-gate * The caller wants a writer lock. We try for it only if 5487c478bd9Sstevel@tonic-gate * SE_EWANTED is not set, or if the caller specified 5497c478bd9Sstevel@tonic-gate * SE_EXCL_WANTED. 5507c478bd9Sstevel@tonic-gate */ 551db874c57Selowe if (!(old & SE_EWANTED) || (es & SE_EXCL_WANTED)) { 5527c478bd9Sstevel@tonic-gate if ((old & ~SE_EWANTED) == 0) { 5537c478bd9Sstevel@tonic-gate /* no reader/writer lock held */ 5547c478bd9Sstevel@tonic-gate /* this clears out our setting of the SE_EWANTED bit */ 5557c478bd9Sstevel@tonic-gate pp->p_selock = SE_WRITER; 5567c478bd9Sstevel@tonic-gate mutex_exit(pse); 5577c478bd9Sstevel@tonic-gate return (1); 5587c478bd9Sstevel@tonic-gate } 5597c478bd9Sstevel@tonic-gate } 560db874c57Selowe if (es & SE_EXCL_WANTED) { 5617c478bd9Sstevel@tonic-gate /* page is locked, set the SE_EWANTED bit */ 5627c478bd9Sstevel@tonic-gate pp->p_selock |= SE_EWANTED; 5637c478bd9Sstevel@tonic-gate } 5647c478bd9Sstevel@tonic-gate mutex_exit(pse); 5657c478bd9Sstevel@tonic-gate return (0); 5667c478bd9Sstevel@tonic-gate } 5677c478bd9Sstevel@tonic-gate 5687c478bd9Sstevel@tonic-gate /* 5697c478bd9Sstevel@tonic-gate * Acquire a page's "shared/exclusive" lock, but never block. 5707c478bd9Sstevel@tonic-gate * Returns 1 on success, 0 on failure. 5717c478bd9Sstevel@tonic-gate */ 5727c478bd9Sstevel@tonic-gate int 5737c478bd9Sstevel@tonic-gate page_trylock(page_t *pp, se_t se) 5747c478bd9Sstevel@tonic-gate { 5757c478bd9Sstevel@tonic-gate kmutex_t *pse = PAGE_SE_MUTEX(pp); 5767c478bd9Sstevel@tonic-gate 5777c478bd9Sstevel@tonic-gate mutex_enter(pse); 578db874c57Selowe if (pp->p_selock & SE_EWANTED || PP_RETIRED(pp) || 57924e9c58bSelowe (se == SE_SHARED && PP_PR_NOSHARE(pp))) { 580db874c57Selowe /* 581db874c57Selowe * Fail if a thread wants exclusive access and page is 582db874c57Selowe * retired, if the page is slated for retirement, or a 583db874c57Selowe * share lock is requested. 584db874c57Selowe */ 5857c478bd9Sstevel@tonic-gate mutex_exit(pse); 586db874c57Selowe VM_STAT_ADD(page_trylock_failed); 5877c478bd9Sstevel@tonic-gate return (0); 5887c478bd9Sstevel@tonic-gate } 5897c478bd9Sstevel@tonic-gate 5907c478bd9Sstevel@tonic-gate if (se == SE_EXCL) { 5917c478bd9Sstevel@tonic-gate if (pp->p_selock == 0) { 5927c478bd9Sstevel@tonic-gate pp->p_selock = SE_WRITER; 5937c478bd9Sstevel@tonic-gate mutex_exit(pse); 5947c478bd9Sstevel@tonic-gate return (1); 5957c478bd9Sstevel@tonic-gate } 5967c478bd9Sstevel@tonic-gate } else { 5977c478bd9Sstevel@tonic-gate if (pp->p_selock >= 0) { 5987c478bd9Sstevel@tonic-gate pp->p_selock += SE_READER; 5997c478bd9Sstevel@tonic-gate mutex_exit(pse); 6007c478bd9Sstevel@tonic-gate return (1); 6017c478bd9Sstevel@tonic-gate } 6027c478bd9Sstevel@tonic-gate } 6037c478bd9Sstevel@tonic-gate mutex_exit(pse); 6047c478bd9Sstevel@tonic-gate return (0); 6057c478bd9Sstevel@tonic-gate } 6067c478bd9Sstevel@tonic-gate 6077c478bd9Sstevel@tonic-gate /* 608db874c57Selowe * Variant of page_unlock() specifically for the page freelist 609db874c57Selowe * code. The mere existence of this code is a vile hack that 610db874c57Selowe * has resulted due to the backwards locking order of the page 611db874c57Selowe * freelist manager; please don't call it. 612db874c57Selowe */ 613db874c57Selowe void 6148b464eb8Smec page_unlock_nocapture(page_t *pp) 615db874c57Selowe { 616db874c57Selowe kmutex_t *pse = PAGE_SE_MUTEX(pp); 617db874c57Selowe selock_t old; 618db874c57Selowe 619db874c57Selowe mutex_enter(pse); 620db874c57Selowe 621db874c57Selowe old = pp->p_selock; 622db874c57Selowe if ((old & ~SE_EWANTED) == SE_READER) { 623db874c57Selowe pp->p_selock = old & ~SE_READER; 624db874c57Selowe if (CV_HAS_WAITERS(&pp->p_cv)) 625db874c57Selowe cv_broadcast(&pp->p_cv); 626db874c57Selowe } else if ((old & ~SE_EWANTED) == SE_DELETED) { 627903a11ebSrh87107 panic("page_unlock_nocapture: page %p is deleted", (void *)pp); 628db874c57Selowe } else if (old < 0) { 629db874c57Selowe pp->p_selock &= SE_EWANTED; 630db874c57Selowe if (CV_HAS_WAITERS(&pp->p_cv)) 631db874c57Selowe cv_broadcast(&pp->p_cv); 632db874c57Selowe } else if ((old & ~SE_EWANTED) > SE_READER) { 633db874c57Selowe pp->p_selock = old - SE_READER; 634db874c57Selowe } else { 635903a11ebSrh87107 panic("page_unlock_nocapture: page %p is not locked", 636903a11ebSrh87107 (void *)pp); 637db874c57Selowe } 638db874c57Selowe 639db874c57Selowe mutex_exit(pse); 640db874c57Selowe } 641db874c57Selowe 642db874c57Selowe /* 6437c478bd9Sstevel@tonic-gate * Release the page's "shared/exclusive" lock and wake up anyone 6447c478bd9Sstevel@tonic-gate * who might be waiting for it. 6457c478bd9Sstevel@tonic-gate */ 6467c478bd9Sstevel@tonic-gate void 6477c478bd9Sstevel@tonic-gate page_unlock(page_t *pp) 6487c478bd9Sstevel@tonic-gate { 6497c478bd9Sstevel@tonic-gate kmutex_t *pse = PAGE_SE_MUTEX(pp); 6507c478bd9Sstevel@tonic-gate selock_t old; 6517c478bd9Sstevel@tonic-gate 6527c478bd9Sstevel@tonic-gate mutex_enter(pse); 653db874c57Selowe 6547c478bd9Sstevel@tonic-gate old = pp->p_selock; 6557c478bd9Sstevel@tonic-gate if ((old & ~SE_EWANTED) == SE_READER) { 6567c478bd9Sstevel@tonic-gate pp->p_selock = old & ~SE_READER; 6577c478bd9Sstevel@tonic-gate if (CV_HAS_WAITERS(&pp->p_cv)) 6587c478bd9Sstevel@tonic-gate cv_broadcast(&pp->p_cv); 6597c478bd9Sstevel@tonic-gate } else if ((old & ~SE_EWANTED) == SE_DELETED) { 660903a11ebSrh87107 panic("page_unlock: page %p is deleted", (void *)pp); 6617c478bd9Sstevel@tonic-gate } else if (old < 0) { 6627c478bd9Sstevel@tonic-gate pp->p_selock &= SE_EWANTED; 6637c478bd9Sstevel@tonic-gate if (CV_HAS_WAITERS(&pp->p_cv)) 6647c478bd9Sstevel@tonic-gate cv_broadcast(&pp->p_cv); 6657c478bd9Sstevel@tonic-gate } else if ((old & ~SE_EWANTED) > SE_READER) { 6667c478bd9Sstevel@tonic-gate pp->p_selock = old - SE_READER; 6677c478bd9Sstevel@tonic-gate } else { 668903a11ebSrh87107 panic("page_unlock: page %p is not locked", (void *)pp); 6697c478bd9Sstevel@tonic-gate } 670db874c57Selowe 6718b464eb8Smec if (pp->p_selock == 0) { 672db874c57Selowe /* 6738b464eb8Smec * If the T_CAPTURING bit is set, that means that we should 6748b464eb8Smec * not try and capture the page again as we could recurse 6758b464eb8Smec * which could lead to a stack overflow panic or spending a 6768b464eb8Smec * relatively long time in the kernel making no progress. 677db874c57Selowe */ 6788b464eb8Smec if ((pp->p_toxic & PR_CAPTURE) && 6798b464eb8Smec !(curthread->t_flag & T_CAPTURING) && 6808b464eb8Smec !PP_RETIRED(pp)) { 681db874c57Selowe pp->p_selock = SE_WRITER; 6827c478bd9Sstevel@tonic-gate mutex_exit(pse); 6838b464eb8Smec page_unlock_capture(pp); 684db874c57Selowe } else { 685db874c57Selowe mutex_exit(pse); 686db874c57Selowe } 687db874c57Selowe } else { 688db874c57Selowe mutex_exit(pse); 689db874c57Selowe } 6907c478bd9Sstevel@tonic-gate } 6917c478bd9Sstevel@tonic-gate 6927c478bd9Sstevel@tonic-gate /* 6937c478bd9Sstevel@tonic-gate * Try to upgrade the lock on the page from a "shared" to an 6947c478bd9Sstevel@tonic-gate * "exclusive" lock. Since this upgrade operation is done while 6957c478bd9Sstevel@tonic-gate * holding the mutex protecting this page, no one else can acquire this page's 6967c478bd9Sstevel@tonic-gate * lock and change the page. Thus, it is safe to drop the "shared" 6977c478bd9Sstevel@tonic-gate * lock and attempt to acquire the "exclusive" lock. 6987c478bd9Sstevel@tonic-gate * 6997c478bd9Sstevel@tonic-gate * Returns 1 on success, 0 on failure. 7007c478bd9Sstevel@tonic-gate */ 7017c478bd9Sstevel@tonic-gate int 7027c478bd9Sstevel@tonic-gate page_tryupgrade(page_t *pp) 7037c478bd9Sstevel@tonic-gate { 7047c478bd9Sstevel@tonic-gate kmutex_t *pse = PAGE_SE_MUTEX(pp); 7057c478bd9Sstevel@tonic-gate 7067c478bd9Sstevel@tonic-gate mutex_enter(pse); 7077c478bd9Sstevel@tonic-gate if (!(pp->p_selock & SE_EWANTED)) { 7087c478bd9Sstevel@tonic-gate /* no threads want exclusive access, try upgrade */ 7097c478bd9Sstevel@tonic-gate if (pp->p_selock == SE_READER) { 7107c478bd9Sstevel@tonic-gate /* convert to exclusive lock */ 7117c478bd9Sstevel@tonic-gate pp->p_selock = SE_WRITER; 7127c478bd9Sstevel@tonic-gate mutex_exit(pse); 7137c478bd9Sstevel@tonic-gate return (1); 7147c478bd9Sstevel@tonic-gate } 7157c478bd9Sstevel@tonic-gate } 7167c478bd9Sstevel@tonic-gate mutex_exit(pse); 7177c478bd9Sstevel@tonic-gate return (0); 7187c478bd9Sstevel@tonic-gate } 7197c478bd9Sstevel@tonic-gate 7207c478bd9Sstevel@tonic-gate /* 7217c478bd9Sstevel@tonic-gate * Downgrade the "exclusive" lock on the page to a "shared" lock 7227c478bd9Sstevel@tonic-gate * while holding the mutex protecting this page's p_selock field. 7237c478bd9Sstevel@tonic-gate */ 7247c478bd9Sstevel@tonic-gate void 7257c478bd9Sstevel@tonic-gate page_downgrade(page_t *pp) 7267c478bd9Sstevel@tonic-gate { 7277c478bd9Sstevel@tonic-gate kmutex_t *pse = PAGE_SE_MUTEX(pp); 7287c478bd9Sstevel@tonic-gate int excl_waiting; 7297c478bd9Sstevel@tonic-gate 7307c478bd9Sstevel@tonic-gate ASSERT((pp->p_selock & ~SE_EWANTED) != SE_DELETED); 7317c478bd9Sstevel@tonic-gate ASSERT(PAGE_EXCL(pp)); 7327c478bd9Sstevel@tonic-gate 7337c478bd9Sstevel@tonic-gate mutex_enter(pse); 7347c478bd9Sstevel@tonic-gate excl_waiting = pp->p_selock & SE_EWANTED; 7357c478bd9Sstevel@tonic-gate pp->p_selock = SE_READER | excl_waiting; 7367c478bd9Sstevel@tonic-gate if (CV_HAS_WAITERS(&pp->p_cv)) 7377c478bd9Sstevel@tonic-gate cv_broadcast(&pp->p_cv); 7387c478bd9Sstevel@tonic-gate mutex_exit(pse); 7397c478bd9Sstevel@tonic-gate } 7407c478bd9Sstevel@tonic-gate 7417c478bd9Sstevel@tonic-gate void 7427c478bd9Sstevel@tonic-gate page_lock_delete(page_t *pp) 7437c478bd9Sstevel@tonic-gate { 7447c478bd9Sstevel@tonic-gate kmutex_t *pse = PAGE_SE_MUTEX(pp); 7457c478bd9Sstevel@tonic-gate 7467c478bd9Sstevel@tonic-gate ASSERT(PAGE_EXCL(pp)); 7477c478bd9Sstevel@tonic-gate ASSERT(pp->p_vnode == NULL); 7487c478bd9Sstevel@tonic-gate ASSERT(pp->p_offset == (u_offset_t)-1); 7497c478bd9Sstevel@tonic-gate ASSERT(!PP_ISFREE(pp)); 7507c478bd9Sstevel@tonic-gate 7517c478bd9Sstevel@tonic-gate mutex_enter(pse); 7527c478bd9Sstevel@tonic-gate pp->p_selock = SE_DELETED; 7537c478bd9Sstevel@tonic-gate if (CV_HAS_WAITERS(&pp->p_cv)) 7547c478bd9Sstevel@tonic-gate cv_broadcast(&pp->p_cv); 7557c478bd9Sstevel@tonic-gate mutex_exit(pse); 7567c478bd9Sstevel@tonic-gate } 7577c478bd9Sstevel@tonic-gate 7588b464eb8Smec int 7598b464eb8Smec page_deleted(page_t *pp) 7608b464eb8Smec { 7618b464eb8Smec return (pp->p_selock == SE_DELETED); 7628b464eb8Smec } 7638b464eb8Smec 7647c478bd9Sstevel@tonic-gate /* 7657c478bd9Sstevel@tonic-gate * Implement the io lock for pages 7667c478bd9Sstevel@tonic-gate */ 7677c478bd9Sstevel@tonic-gate void 7687c478bd9Sstevel@tonic-gate page_iolock_init(page_t *pp) 7697c478bd9Sstevel@tonic-gate { 7707c478bd9Sstevel@tonic-gate pp->p_iolock_state = 0; 7717c478bd9Sstevel@tonic-gate cv_init(&pp->p_io_cv, NULL, CV_DEFAULT, NULL); 7727c478bd9Sstevel@tonic-gate } 7737c478bd9Sstevel@tonic-gate 7747c478bd9Sstevel@tonic-gate /* 7757c478bd9Sstevel@tonic-gate * Acquire the i/o lock on a page. 7767c478bd9Sstevel@tonic-gate */ 7777c478bd9Sstevel@tonic-gate void 7787c478bd9Sstevel@tonic-gate page_io_lock(page_t *pp) 7797c478bd9Sstevel@tonic-gate { 7807c478bd9Sstevel@tonic-gate kmutex_t *pio; 7817c478bd9Sstevel@tonic-gate 7827c478bd9Sstevel@tonic-gate pio = PAGE_IO_MUTEX(pp); 7837c478bd9Sstevel@tonic-gate mutex_enter(pio); 7847c478bd9Sstevel@tonic-gate while (pp->p_iolock_state & PAGE_IO_INUSE) { 7857c478bd9Sstevel@tonic-gate cv_wait(&(pp->p_io_cv), pio); 7867c478bd9Sstevel@tonic-gate } 7877c478bd9Sstevel@tonic-gate pp->p_iolock_state |= PAGE_IO_INUSE; 7887c478bd9Sstevel@tonic-gate mutex_exit(pio); 7897c478bd9Sstevel@tonic-gate } 7907c478bd9Sstevel@tonic-gate 7917c478bd9Sstevel@tonic-gate /* 7927c478bd9Sstevel@tonic-gate * Release the i/o lock on a page. 7937c478bd9Sstevel@tonic-gate */ 7947c478bd9Sstevel@tonic-gate void 7957c478bd9Sstevel@tonic-gate page_io_unlock(page_t *pp) 7967c478bd9Sstevel@tonic-gate { 7977c478bd9Sstevel@tonic-gate kmutex_t *pio; 7987c478bd9Sstevel@tonic-gate 7997c478bd9Sstevel@tonic-gate pio = PAGE_IO_MUTEX(pp); 8007c478bd9Sstevel@tonic-gate mutex_enter(pio); 801a71e32b6Sstans cv_broadcast(&pp->p_io_cv); 8027c478bd9Sstevel@tonic-gate pp->p_iolock_state &= ~PAGE_IO_INUSE; 8037c478bd9Sstevel@tonic-gate mutex_exit(pio); 8047c478bd9Sstevel@tonic-gate } 8057c478bd9Sstevel@tonic-gate 8067c478bd9Sstevel@tonic-gate /* 8077c478bd9Sstevel@tonic-gate * Try to acquire the i/o lock on a page without blocking. 8087c478bd9Sstevel@tonic-gate * Returns 1 on success, 0 on failure. 8097c478bd9Sstevel@tonic-gate */ 8107c478bd9Sstevel@tonic-gate int 8117c478bd9Sstevel@tonic-gate page_io_trylock(page_t *pp) 8127c478bd9Sstevel@tonic-gate { 8137c478bd9Sstevel@tonic-gate kmutex_t *pio; 8147c478bd9Sstevel@tonic-gate 8157c478bd9Sstevel@tonic-gate if (pp->p_iolock_state & PAGE_IO_INUSE) 8167c478bd9Sstevel@tonic-gate return (0); 8177c478bd9Sstevel@tonic-gate 8187c478bd9Sstevel@tonic-gate pio = PAGE_IO_MUTEX(pp); 8197c478bd9Sstevel@tonic-gate mutex_enter(pio); 8207c478bd9Sstevel@tonic-gate 8217c478bd9Sstevel@tonic-gate if (pp->p_iolock_state & PAGE_IO_INUSE) { 8227c478bd9Sstevel@tonic-gate mutex_exit(pio); 8237c478bd9Sstevel@tonic-gate return (0); 8247c478bd9Sstevel@tonic-gate } 8257c478bd9Sstevel@tonic-gate pp->p_iolock_state |= PAGE_IO_INUSE; 8267c478bd9Sstevel@tonic-gate mutex_exit(pio); 8277c478bd9Sstevel@tonic-gate 8287c478bd9Sstevel@tonic-gate return (1); 8297c478bd9Sstevel@tonic-gate } 8307c478bd9Sstevel@tonic-gate 8317c478bd9Sstevel@tonic-gate /* 832a71e32b6Sstans * Wait until the i/o lock is not held. 833a71e32b6Sstans */ 834a71e32b6Sstans void 835a71e32b6Sstans page_io_wait(page_t *pp) 836a71e32b6Sstans { 837a71e32b6Sstans kmutex_t *pio; 838a71e32b6Sstans 839a71e32b6Sstans pio = PAGE_IO_MUTEX(pp); 840a71e32b6Sstans mutex_enter(pio); 841a71e32b6Sstans while (pp->p_iolock_state & PAGE_IO_INUSE) { 842a71e32b6Sstans cv_wait(&(pp->p_io_cv), pio); 843a71e32b6Sstans } 844a71e32b6Sstans mutex_exit(pio); 845a71e32b6Sstans } 846a71e32b6Sstans 847a71e32b6Sstans /* 848a71e32b6Sstans * Returns 1 on success, 0 on failure. 849a71e32b6Sstans */ 850a71e32b6Sstans int 851a71e32b6Sstans page_io_locked(page_t *pp) 852a71e32b6Sstans { 853a71e32b6Sstans return (pp->p_iolock_state & PAGE_IO_INUSE); 854a71e32b6Sstans } 855a71e32b6Sstans 856a71e32b6Sstans /* 8577c478bd9Sstevel@tonic-gate * Assert that the i/o lock on a page is held. 8587c478bd9Sstevel@tonic-gate * Returns 1 on success, 0 on failure. 8597c478bd9Sstevel@tonic-gate */ 8607c478bd9Sstevel@tonic-gate int 8617c478bd9Sstevel@tonic-gate page_iolock_assert(page_t *pp) 8627c478bd9Sstevel@tonic-gate { 863a71e32b6Sstans return (page_io_locked(pp)); 8647c478bd9Sstevel@tonic-gate } 8657c478bd9Sstevel@tonic-gate 8667c478bd9Sstevel@tonic-gate /* 8677c478bd9Sstevel@tonic-gate * Wrapper exported to kernel routines that are built 8687c478bd9Sstevel@tonic-gate * platform-independent (the macro is platform-dependent; 8697c478bd9Sstevel@tonic-gate * the size of vph_mutex[] is based on NCPU). 8707c478bd9Sstevel@tonic-gate * 8717c478bd9Sstevel@tonic-gate * Note that you can do stress testing on this by setting the 8727c478bd9Sstevel@tonic-gate * variable page_vnode_mutex_stress to something other than 8737c478bd9Sstevel@tonic-gate * zero in a DEBUG kernel in a debugger after loading the kernel. 8747c478bd9Sstevel@tonic-gate * Setting it after the kernel is running may not work correctly. 8757c478bd9Sstevel@tonic-gate */ 8767c478bd9Sstevel@tonic-gate #ifdef DEBUG 8777c478bd9Sstevel@tonic-gate static int page_vnode_mutex_stress = 0; 8787c478bd9Sstevel@tonic-gate #endif 8797c478bd9Sstevel@tonic-gate 8807c478bd9Sstevel@tonic-gate kmutex_t * 8817c478bd9Sstevel@tonic-gate page_vnode_mutex(vnode_t *vp) 8827c478bd9Sstevel@tonic-gate { 8837c478bd9Sstevel@tonic-gate if (vp == &kvp) 8847c478bd9Sstevel@tonic-gate return (&vph_mutex[VPH_TABLE_SIZE + 0]); 885ad23a2dbSjohansen 886ad23a2dbSjohansen if (vp == &zvp) 887ad23a2dbSjohansen return (&vph_mutex[VPH_TABLE_SIZE + 1]); 8887c478bd9Sstevel@tonic-gate #ifdef DEBUG 8897c478bd9Sstevel@tonic-gate if (page_vnode_mutex_stress != 0) 8907c478bd9Sstevel@tonic-gate return (&vph_mutex[0]); 8917c478bd9Sstevel@tonic-gate #endif 8927c478bd9Sstevel@tonic-gate 8937c478bd9Sstevel@tonic-gate return (&vph_mutex[VP_HASH_FUNC(vp)]); 8947c478bd9Sstevel@tonic-gate } 8957c478bd9Sstevel@tonic-gate 8967c478bd9Sstevel@tonic-gate kmutex_t * 8977c478bd9Sstevel@tonic-gate page_se_mutex(page_t *pp) 8987c478bd9Sstevel@tonic-gate { 8997c478bd9Sstevel@tonic-gate return (PAGE_SE_MUTEX(pp)); 9007c478bd9Sstevel@tonic-gate } 9017c478bd9Sstevel@tonic-gate 9027c478bd9Sstevel@tonic-gate #ifdef VM_STATS 9037c478bd9Sstevel@tonic-gate uint_t pszclck_stat[4]; 9047c478bd9Sstevel@tonic-gate #endif 9057c478bd9Sstevel@tonic-gate /* 9067c478bd9Sstevel@tonic-gate * Find, take and return a mutex held by hat_page_demote(). 9077c478bd9Sstevel@tonic-gate * Called by page_demote_vp_pages() before hat_page_demote() call and by 9087c478bd9Sstevel@tonic-gate * routines that want to block hat_page_demote() but can't do it 9097c478bd9Sstevel@tonic-gate * via locking all constituent pages. 9107c478bd9Sstevel@tonic-gate * 9117c478bd9Sstevel@tonic-gate * Return NULL if p_szc is 0. 9127c478bd9Sstevel@tonic-gate * 9137c478bd9Sstevel@tonic-gate * It should only be used for pages that can be demoted by hat_page_demote() 9147c478bd9Sstevel@tonic-gate * i.e. non swapfs file system pages. The logic here is lifted from 9157c478bd9Sstevel@tonic-gate * sfmmu_mlspl_enter() except there's no need to worry about p_szc increase 9167c478bd9Sstevel@tonic-gate * since the page is locked and not free. 9177c478bd9Sstevel@tonic-gate * 9187c478bd9Sstevel@tonic-gate * Hash of the root page is used to find the lock. 9197c478bd9Sstevel@tonic-gate * To find the root in the presense of hat_page_demote() chageing the location 9207c478bd9Sstevel@tonic-gate * of the root this routine relies on the fact that hat_page_demote() changes 9217c478bd9Sstevel@tonic-gate * root last. 9227c478bd9Sstevel@tonic-gate * 9237c478bd9Sstevel@tonic-gate * If NULL is returned pp's p_szc is guaranteed to be 0. If non NULL is 9247c478bd9Sstevel@tonic-gate * returned pp's p_szc may be any value. 9257c478bd9Sstevel@tonic-gate */ 9267c478bd9Sstevel@tonic-gate kmutex_t * 9277c478bd9Sstevel@tonic-gate page_szc_lock(page_t *pp) 9287c478bd9Sstevel@tonic-gate { 9297c478bd9Sstevel@tonic-gate kmutex_t *mtx; 9307c478bd9Sstevel@tonic-gate page_t *rootpp; 9317c478bd9Sstevel@tonic-gate uint_t szc; 9327c478bd9Sstevel@tonic-gate uint_t rszc; 9337c478bd9Sstevel@tonic-gate uint_t pszc = pp->p_szc; 9347c478bd9Sstevel@tonic-gate 9357c478bd9Sstevel@tonic-gate ASSERT(pp != NULL); 9367c478bd9Sstevel@tonic-gate ASSERT(PAGE_LOCKED(pp)); 9377c478bd9Sstevel@tonic-gate ASSERT(!PP_ISFREE(pp)); 9387c478bd9Sstevel@tonic-gate ASSERT(pp->p_vnode != NULL); 9397c478bd9Sstevel@tonic-gate ASSERT(!IS_SWAPFSVP(pp->p_vnode)); 940ad23a2dbSjohansen ASSERT(!PP_ISKAS(pp)); 9417c478bd9Sstevel@tonic-gate 9427c478bd9Sstevel@tonic-gate again: 9437c478bd9Sstevel@tonic-gate if (pszc == 0) { 9447c478bd9Sstevel@tonic-gate VM_STAT_ADD(pszclck_stat[0]); 9457c478bd9Sstevel@tonic-gate return (NULL); 9467c478bd9Sstevel@tonic-gate } 9477c478bd9Sstevel@tonic-gate 9487c478bd9Sstevel@tonic-gate /* The lock lives in the root page */ 9497c478bd9Sstevel@tonic-gate 9507c478bd9Sstevel@tonic-gate rootpp = PP_GROUPLEADER(pp, pszc); 9517c478bd9Sstevel@tonic-gate mtx = PAGE_SZC_MUTEX(rootpp); 9527c478bd9Sstevel@tonic-gate mutex_enter(mtx); 9537c478bd9Sstevel@tonic-gate 9547c478bd9Sstevel@tonic-gate /* 9557c478bd9Sstevel@tonic-gate * since p_szc can only decrease if pp == rootpp 9567c478bd9Sstevel@tonic-gate * rootpp will be always the same i.e we have the right root 9577c478bd9Sstevel@tonic-gate * regardless of rootpp->p_szc. 9587c478bd9Sstevel@tonic-gate * If location of pp's root didn't change after we took 9597c478bd9Sstevel@tonic-gate * the lock we have the right root. return mutex hashed off it. 9607c478bd9Sstevel@tonic-gate */ 9617c478bd9Sstevel@tonic-gate if (pp == rootpp || (rszc = rootpp->p_szc) == pszc) { 9627c478bd9Sstevel@tonic-gate VM_STAT_ADD(pszclck_stat[1]); 9637c478bd9Sstevel@tonic-gate return (mtx); 9647c478bd9Sstevel@tonic-gate } 9657c478bd9Sstevel@tonic-gate 9667c478bd9Sstevel@tonic-gate /* 9677c478bd9Sstevel@tonic-gate * root location changed because page got demoted. 9687c478bd9Sstevel@tonic-gate * locate the new root. 9697c478bd9Sstevel@tonic-gate */ 9707c478bd9Sstevel@tonic-gate if (rszc < pszc) { 9717c478bd9Sstevel@tonic-gate szc = pp->p_szc; 9727c478bd9Sstevel@tonic-gate ASSERT(szc < pszc); 9737c478bd9Sstevel@tonic-gate mutex_exit(mtx); 9747c478bd9Sstevel@tonic-gate pszc = szc; 9757c478bd9Sstevel@tonic-gate VM_STAT_ADD(pszclck_stat[2]); 9767c478bd9Sstevel@tonic-gate goto again; 9777c478bd9Sstevel@tonic-gate } 9787c478bd9Sstevel@tonic-gate 9797c478bd9Sstevel@tonic-gate VM_STAT_ADD(pszclck_stat[3]); 9807c478bd9Sstevel@tonic-gate /* 9817c478bd9Sstevel@tonic-gate * current hat_page_demote not done yet. 9827c478bd9Sstevel@tonic-gate * wait for it to finish. 9837c478bd9Sstevel@tonic-gate */ 9847c478bd9Sstevel@tonic-gate mutex_exit(mtx); 9857c478bd9Sstevel@tonic-gate rootpp = PP_GROUPLEADER(rootpp, rszc); 9867c478bd9Sstevel@tonic-gate mtx = PAGE_SZC_MUTEX(rootpp); 9877c478bd9Sstevel@tonic-gate mutex_enter(mtx); 9887c478bd9Sstevel@tonic-gate mutex_exit(mtx); 9897c478bd9Sstevel@tonic-gate ASSERT(rootpp->p_szc < rszc); 9907c478bd9Sstevel@tonic-gate goto again; 9917c478bd9Sstevel@tonic-gate } 9927c478bd9Sstevel@tonic-gate 9937c478bd9Sstevel@tonic-gate int 9947c478bd9Sstevel@tonic-gate page_szc_lock_assert(page_t *pp) 9957c478bd9Sstevel@tonic-gate { 9967c478bd9Sstevel@tonic-gate page_t *rootpp = PP_PAGEROOT(pp); 9977c478bd9Sstevel@tonic-gate kmutex_t *mtx = PAGE_SZC_MUTEX(rootpp); 9987c478bd9Sstevel@tonic-gate 9997c478bd9Sstevel@tonic-gate return (MUTEX_HELD(mtx)); 10007c478bd9Sstevel@tonic-gate } 1001ae115bc7Smrj 1002ae115bc7Smrj /* 1003ae115bc7Smrj * memseg locking 1004ae115bc7Smrj */ 1005ae115bc7Smrj static krwlock_t memsegslock; 1006ae115bc7Smrj 1007ae115bc7Smrj /* 1008ae115bc7Smrj * memlist (phys_install, phys_avail) locking. 1009ae115bc7Smrj */ 1010ae115bc7Smrj static krwlock_t memlists_lock; 1011ae115bc7Smrj 1012af4c679fSSean McEnroe int 1013af4c679fSSean McEnroe memsegs_trylock(int writer) 1014af4c679fSSean McEnroe { 1015af4c679fSSean McEnroe return (rw_tryenter(&memsegslock, writer ? RW_WRITER : RW_READER)); 1016af4c679fSSean McEnroe } 1017af4c679fSSean McEnroe 1018ae115bc7Smrj void 1019ae115bc7Smrj memsegs_lock(int writer) 1020ae115bc7Smrj { 1021ae115bc7Smrj rw_enter(&memsegslock, writer ? RW_WRITER : RW_READER); 1022ae115bc7Smrj } 1023ae115bc7Smrj 1024ae115bc7Smrj /*ARGSUSED*/ 1025ae115bc7Smrj void 1026ae115bc7Smrj memsegs_unlock(int writer) 1027ae115bc7Smrj { 1028ae115bc7Smrj rw_exit(&memsegslock); 1029ae115bc7Smrj } 1030ae115bc7Smrj 1031ae115bc7Smrj int 1032ae115bc7Smrj memsegs_lock_held(void) 1033ae115bc7Smrj { 1034ae115bc7Smrj return (RW_LOCK_HELD(&memsegslock)); 1035ae115bc7Smrj } 1036ae115bc7Smrj 1037ae115bc7Smrj void 1038ae115bc7Smrj memlist_read_lock(void) 1039ae115bc7Smrj { 1040ae115bc7Smrj rw_enter(&memlists_lock, RW_READER); 1041ae115bc7Smrj } 1042ae115bc7Smrj 1043ae115bc7Smrj void 1044ae115bc7Smrj memlist_read_unlock(void) 1045ae115bc7Smrj { 1046ae115bc7Smrj rw_exit(&memlists_lock); 1047ae115bc7Smrj } 1048ae115bc7Smrj 1049ae115bc7Smrj void 1050ae115bc7Smrj memlist_write_lock(void) 1051ae115bc7Smrj { 1052ae115bc7Smrj rw_enter(&memlists_lock, RW_WRITER); 1053ae115bc7Smrj } 1054ae115bc7Smrj 1055ae115bc7Smrj void 1056ae115bc7Smrj memlist_write_unlock(void) 1057ae115bc7Smrj { 1058ae115bc7Smrj rw_exit(&memlists_lock); 1059ae115bc7Smrj } 1060