160727d8bSWarner Losh /*- 2796df753SPedro F. Giffuni * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU) 351369649SPedro F. Giffuni * 4df8bae1dSRodney W. Grimes * Copyright (c) 1991, 1993 5df8bae1dSRodney W. Grimes * The Regents of the University of California. All rights reserved. 6df8bae1dSRodney W. Grimes * 7df8bae1dSRodney W. Grimes * This code is derived from software contributed to Berkeley by 8df8bae1dSRodney W. Grimes * The Mach Operating System project at Carnegie-Mellon University. 9df8bae1dSRodney W. Grimes * 10df8bae1dSRodney W. Grimes * Redistribution and use in source and binary forms, with or without 11df8bae1dSRodney W. Grimes * modification, are permitted provided that the following conditions 12df8bae1dSRodney W. Grimes * are met: 13df8bae1dSRodney W. Grimes * 1. Redistributions of source code must retain the above copyright 14df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer. 15df8bae1dSRodney W. Grimes * 2. Redistributions in binary form must reproduce the above copyright 16df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer in the 17df8bae1dSRodney W. Grimes * documentation and/or other materials provided with the distribution. 18fbbd9655SWarner Losh * 3. Neither the name of the University nor the names of its contributors 19df8bae1dSRodney W. Grimes * may be used to endorse or promote products derived from this software 20df8bae1dSRodney W. Grimes * without specific prior written permission. 21df8bae1dSRodney W. Grimes * 22df8bae1dSRodney W. Grimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23df8bae1dSRodney W. Grimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24df8bae1dSRodney W. Grimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25df8bae1dSRodney W. Grimes * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26df8bae1dSRodney W. Grimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27df8bae1dSRodney W. Grimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28df8bae1dSRodney W. Grimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29df8bae1dSRodney W. Grimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30df8bae1dSRodney W. Grimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31df8bae1dSRodney W. Grimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32df8bae1dSRodney W. Grimes * SUCH DAMAGE. 33df8bae1dSRodney W. Grimes * 34df8bae1dSRodney W. Grimes * 35df8bae1dSRodney W. Grimes * Copyright (c) 1987, 1990 Carnegie-Mellon University. 36df8bae1dSRodney W. Grimes * All rights reserved. 37df8bae1dSRodney W. Grimes * 38df8bae1dSRodney W. Grimes * Authors: Avadis Tevanian, Jr., Michael Wayne Young 39df8bae1dSRodney W. Grimes * 40df8bae1dSRodney W. Grimes * Permission to use, copy, modify and distribute this software and 41df8bae1dSRodney W. Grimes * its documentation is hereby granted, provided that both the copyright 42df8bae1dSRodney W. Grimes * notice and this permission notice appear in all copies of the 43df8bae1dSRodney W. Grimes * software, derivative works or modified versions, and any portions 44df8bae1dSRodney W. Grimes * thereof, and that both notices appear in supporting documentation. 45df8bae1dSRodney W. Grimes * 46df8bae1dSRodney W. Grimes * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 47df8bae1dSRodney W. Grimes * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 48df8bae1dSRodney W. Grimes * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 49df8bae1dSRodney W. Grimes * 50df8bae1dSRodney W. Grimes * Carnegie Mellon requests users of this software to return to 51df8bae1dSRodney W. Grimes * 52df8bae1dSRodney W. Grimes * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 53df8bae1dSRodney W. Grimes * School of Computer Science 54df8bae1dSRodney W. Grimes * Carnegie Mellon University 55df8bae1dSRodney W. Grimes * Pittsburgh PA 15213-3890 56df8bae1dSRodney W. Grimes * 57df8bae1dSRodney W. Grimes * any improvements or extensions that they make and grant Carnegie the 58df8bae1dSRodney W. Grimes * rights to redistribute these changes. 59df8bae1dSRodney W. Grimes */ 60df8bae1dSRodney W. Grimes 61df8bae1dSRodney W. Grimes /* 62df8bae1dSRodney W. Grimes * Resident memory system definitions. 63df8bae1dSRodney W. Grimes */ 64df8bae1dSRodney W. Grimes 65df8bae1dSRodney W. Grimes #ifndef _VM_PAGE_ 66df8bae1dSRodney W. Grimes #define _VM_PAGE_ 67df8bae1dSRodney W. Grimes 68f919ebdeSDavid Greenman #include <vm/pmap.h> 69431fb8abSMark Johnston #include <vm/_vm_phys.h> 70069e9bc1SDoug Rabson 71df8bae1dSRodney W. Grimes /* 72df8bae1dSRodney W. Grimes * Management of resident (logical) pages. 73df8bae1dSRodney W. Grimes * 74df8bae1dSRodney W. Grimes * A small structure is kept for each resident 75df8bae1dSRodney W. Grimes * page, indexed by page number. Each structure 76da384208SAlan Cox * is an element of several collections: 77df8bae1dSRodney W. Grimes * 78da384208SAlan Cox * A radix tree used to quickly 79df8bae1dSRodney W. Grimes * perform object/offset lookups 80df8bae1dSRodney W. Grimes * 81df8bae1dSRodney W. Grimes * A list of all pages for a given object, 82df8bae1dSRodney W. Grimes * so they can be quickly deactivated at 83df8bae1dSRodney W. Grimes * time of deallocation. 84df8bae1dSRodney W. Grimes * 85df8bae1dSRodney W. Grimes * An ordered list of pages due for pageout. 86df8bae1dSRodney W. Grimes * 87df8bae1dSRodney W. Grimes * In addition, the structure contains the object 88df8bae1dSRodney W. Grimes * and offset to which this page belongs (for pageout), 89df8bae1dSRodney W. Grimes * and sundry status bits. 90df8bae1dSRodney W. Grimes * 913c76db4cSAlan Cox * In general, operations on this structure's mutable fields are 920012f373SJeff Roberson * synchronized using either one of or a combination of locks. If a 930012f373SJeff Roberson * field is annotated with two of these locks then holding either is 940012f373SJeff Roberson * sufficient for read access but both are required for write access. 95efec381dSMark Johnston * The queue lock for a page depends on the value of its queue field and is 96efec381dSMark Johnston * described in detail below. 970012f373SJeff Roberson * 980012f373SJeff Roberson * The following annotations are possible: 99958d8f52SMark Johnston * (A) the field must be accessed using atomic(9) and may require 100958d8f52SMark Johnston * additional synchronization. 1010012f373SJeff Roberson * (B) the page busy lock. 1020012f373SJeff Roberson * (C) the field is immutable. 103efec381dSMark Johnston * (F) the per-domain lock for the free queues. 1040012f373SJeff Roberson * (M) Machine dependent, defined by pmap layer. 1050012f373SJeff Roberson * (O) the object that the page belongs to. 1060012f373SJeff Roberson * (Q) the page's queue lock. 1070012f373SJeff Roberson * 1080012f373SJeff Roberson * The busy lock is an embedded reader-writer lock that protects the 1090012f373SJeff Roberson * page's contents and identity (i.e., its <object, pindex> tuple) as 1100012f373SJeff Roberson * well as certain valid/dirty modifications. To avoid bloating the 1110012f373SJeff Roberson * the page structure, the busy lock lacks some of the features available 1120012f373SJeff Roberson * the kernel's general-purpose synchronization primitives. As a result, 1130012f373SJeff Roberson * busy lock ordering rules are not verified, lock recursion is not 1140012f373SJeff Roberson * detected, and an attempt to xbusy a busy page or sbusy an xbusy page 1150012f373SJeff Roberson * results will trigger a panic rather than causing the thread to block. 1160012f373SJeff Roberson * vm_page_sleep_if_busy() can be used to sleep until the page's busy 1170012f373SJeff Roberson * state changes, after which the caller must re-lookup the page and 1180012f373SJeff Roberson * re-evaluate its state. vm_page_busy_acquire() will block until 1190012f373SJeff Roberson * the lock is acquired. 1200012f373SJeff Roberson * 1210012f373SJeff Roberson * The valid field is protected by the page busy lock (B) and object 1220012f373SJeff Roberson * lock (O). Transitions from invalid to valid are generally done 1230012f373SJeff Roberson * via I/O or zero filling and do not require the object lock. 1240012f373SJeff Roberson * These must be protected with the busy lock to prevent page-in or 1250012f373SJeff Roberson * creation races. Page invalidation generally happens as a result 1260012f373SJeff Roberson * of truncate or msync. When invalidated, pages must not be present 1270012f373SJeff Roberson * in pmap and must hold the object lock to prevent concurrent 1280012f373SJeff Roberson * speculative read-only mappings that do not require busy. I/O 1290012f373SJeff Roberson * routines may check for validity without a lock if they are prepared 1300012f373SJeff Roberson * to handle invalidation races with higher level locks (vnode) or are 1310012f373SJeff Roberson * unconcerned with races so long as they hold a reference to prevent 1320012f373SJeff Roberson * recycling. When a valid bit is set while holding a shared busy 1330012f373SJeff Roberson * lock (A) atomic operations are used to protect against concurrent 1340012f373SJeff Roberson * modification. 1350ce3ba8cSKip Macy * 136abb9b935SKonstantin Belousov * In contrast, the synchronization of accesses to the page's 1370012f373SJeff Roberson * dirty field is a mix of machine dependent (M) and busy (B). In 1380012f373SJeff Roberson * the machine-independent layer, the page busy must be held to 1390012f373SJeff Roberson * operate on the field. However, the pmap layer is permitted to 1400012f373SJeff Roberson * set all bits within the field without holding that lock. If the 1410012f373SJeff Roberson * underlying architecture does not support atomic read-modify-write 142abb9b935SKonstantin Belousov * operations on the field's type, then the machine-independent 1432042bb37SKonstantin Belousov * layer uses a 32-bit atomic on the aligned 32-bit word that 144abb9b935SKonstantin Belousov * contains the dirty field. In the machine-independent layer, 145abb9b935SKonstantin Belousov * the implementation of read-modify-write operations on the 1460012f373SJeff Roberson * field is encapsulated in vm_page_clear_dirty_mask(). An 1470012f373SJeff Roberson * exclusive busy lock combined with pmap_remove_{write/all}() is the 1480012f373SJeff Roberson * only way to ensure a page can not become dirty. I/O generally 1490012f373SJeff Roberson * removes the page from pmap to ensure exclusive access and atomic 1500012f373SJeff Roberson * writes. 1511d3a1bcfSMark Johnston * 152fee2a2faSMark Johnston * The ref_count field tracks references to the page. References that 153fee2a2faSMark Johnston * prevent the page from being reclaimable are called wirings and are 154fee2a2faSMark Johnston * counted in the low bits of ref_count. The containing object's 155fee2a2faSMark Johnston * reference, if one exists, is counted using the VPRC_OBJREF bit in the 156fee2a2faSMark Johnston * ref_count field. Additionally, the VPRC_BLOCKED bit is used to 157fee2a2faSMark Johnston * atomically check for wirings and prevent new wirings via 158fee2a2faSMark Johnston * pmap_extract_and_hold(). When a page belongs to an object, it may be 159fee2a2faSMark Johnston * wired only when the object is locked, or the page is busy, or by 160fee2a2faSMark Johnston * pmap_extract_and_hold(). As a result, if the object is locked and the 161fee2a2faSMark Johnston * page is not busy (or is exclusively busied by the current thread), and 162fee2a2faSMark Johnston * the page is unmapped, its wire count will not increase. The ref_count 163fee2a2faSMark Johnston * field is updated using atomic operations in most cases, except when it 164fee2a2faSMark Johnston * is known that no other references to the page exist, such as in the page 165fee2a2faSMark Johnston * allocator. A page may be present in the page queues, or even actively 166fee2a2faSMark Johnston * scanned by the page daemon, without an explicitly counted referenced. 167fee2a2faSMark Johnston * The page daemon must therefore handle the possibility of a concurrent 168fee2a2faSMark Johnston * free of the page. 1691d3a1bcfSMark Johnston * 170dc71caa0SMark Johnston * The queue state of a page consists of the queue and act_count fields of 171dc71caa0SMark Johnston * its atomically updated state, and the subset of atomic flags specified 172dc71caa0SMark Johnston * by PGA_QUEUE_STATE_MASK. The queue field contains the page's page queue 173dc71caa0SMark Johnston * index, or PQ_NONE if it does not belong to a page queue. To modify the 174dc71caa0SMark Johnston * queue field, the page queue lock corresponding to the old value must be 175dc71caa0SMark Johnston * held, unless that value is PQ_NONE, in which case the queue index must 176dc71caa0SMark Johnston * be updated using an atomic RMW operation. There is one exception to 177dc71caa0SMark Johnston * this rule: the page daemon may transition the queue field from 178dc71caa0SMark Johnston * PQ_INACTIVE to PQ_NONE immediately prior to freeing the page during an 179dc71caa0SMark Johnston * inactive queue scan. At that point the page is already dequeued and no 180dc71caa0SMark Johnston * other references to that vm_page structure can exist. The PGA_ENQUEUED 181dc71caa0SMark Johnston * flag, when set, indicates that the page structure is physically inserted 182dc71caa0SMark Johnston * into the queue corresponding to the page's queue index, and may only be 183dc71caa0SMark Johnston * set or cleared with the corresponding page queue lock held. 1845cd29d0fSMark Johnston * 185dc71caa0SMark Johnston * To avoid contention on page queue locks, page queue operations (enqueue, 186dc71caa0SMark Johnston * dequeue, requeue) are batched using fixed-size per-CPU queues. A 187dc71caa0SMark Johnston * deferred operation is requested by setting one of the flags in 188dc71caa0SMark Johnston * PGA_QUEUE_OP_MASK and inserting an entry into a batch queue. When a 189dc71caa0SMark Johnston * queue is full, an attempt to insert a new entry will lock the page 190dc71caa0SMark Johnston * queues and trigger processing of the pending entries. The 191dc71caa0SMark Johnston * type-stability of vm_page structures is crucial to this scheme since the 192dc71caa0SMark Johnston * processing of entries in a given batch queue may be deferred 193dc71caa0SMark Johnston * indefinitely. In particular, a page may be freed with pending batch 194dc71caa0SMark Johnston * queue entries. The page queue operation flags must be set using atomic 195dc71caa0SMark Johnston * RWM operations. 196df8bae1dSRodney W. Grimes */ 197df8bae1dSRodney W. Grimes 198561cc9fcSKonstantin Belousov #if PAGE_SIZE == 4096 199561cc9fcSKonstantin Belousov #define VM_PAGE_BITS_ALL 0xffu 200561cc9fcSKonstantin Belousov typedef uint8_t vm_page_bits_t; 201561cc9fcSKonstantin Belousov #elif PAGE_SIZE == 8192 202561cc9fcSKonstantin Belousov #define VM_PAGE_BITS_ALL 0xffffu 203561cc9fcSKonstantin Belousov typedef uint16_t vm_page_bits_t; 204561cc9fcSKonstantin Belousov #elif PAGE_SIZE == 16384 205561cc9fcSKonstantin Belousov #define VM_PAGE_BITS_ALL 0xffffffffu 206561cc9fcSKonstantin Belousov typedef uint32_t vm_page_bits_t; 207561cc9fcSKonstantin Belousov #elif PAGE_SIZE == 32768 208561cc9fcSKonstantin Belousov #define VM_PAGE_BITS_ALL 0xfffffffffffffffflu 209561cc9fcSKonstantin Belousov typedef uint64_t vm_page_bits_t; 210561cc9fcSKonstantin Belousov #endif 211561cc9fcSKonstantin Belousov 2125cff1f4dSMark Johnston typedef union vm_page_astate { 2135cff1f4dSMark Johnston struct { 2145cff1f4dSMark Johnston uint16_t flags; 2155cff1f4dSMark Johnston uint8_t queue; 2165cff1f4dSMark Johnston uint8_t act_count; 2175cff1f4dSMark Johnston }; 2185cff1f4dSMark Johnston uint32_t _bits; 2195cff1f4dSMark Johnston } vm_page_astate_t; 2205cff1f4dSMark Johnston 221df8bae1dSRodney W. Grimes struct vm_page { 222c325e866SKonstantin Belousov union { 223c325e866SKonstantin Belousov TAILQ_ENTRY(vm_page) q; /* page queue or free list (Q) */ 224c325e866SKonstantin Belousov struct { 225c325e866SKonstantin Belousov SLIST_ENTRY(vm_page) ss; /* private slists */ 226c325e866SKonstantin Belousov } s; 227c325e866SKonstantin Belousov struct { 228c325e866SKonstantin Belousov u_long p; 229c325e866SKonstantin Belousov u_long v; 230c325e866SKonstantin Belousov } memguard; 231584061b4SJeff Roberson struct { 232584061b4SJeff Roberson void *slab; 233584061b4SJeff Roberson void *zone; 234584061b4SJeff Roberson } uma; 235c325e866SKonstantin Belousov } plinks; 236e3975643SJake Burkholder TAILQ_ENTRY(vm_page) listq; /* pages in same object (O) */ 237fee2a2faSMark Johnston vm_object_t object; /* which object am I in (O) */ 23843319c11SAlan Cox vm_pindex_t pindex; /* offset into object (O,P) */ 2395cd29d0fSMark Johnston vm_paddr_t phys_addr; /* physical address of page (C) */ 240763df3ecSPedro F. Giffuni struct md_page md; /* machine dependent stuff */ 241b119329dSMark Johnston u_int ref_count; /* page references (A) */ 242958d8f52SMark Johnston u_int busy_lock; /* busy owners lock (A) */ 243958d8f52SMark Johnston union vm_page_astate a; /* state accessed atomically (A) */ 244eeacb3b0SMark Johnston uint8_t order; /* index of the buddy queue (F) */ 245eeacb3b0SMark Johnston uint8_t pool; /* vm_phys freepool index (F) */ 2463a2ba997SMark Johnston uint8_t flags; /* page PG_* flags (P) */ 247e8bcf696SMark Johnston uint8_t oflags; /* page VPO_* flags (O) */ 248dd05fa19SAlan Cox int8_t psind; /* pagesizes[] index (O) */ 2491d3a1bcfSMark Johnston int8_t segind; /* vm_phys segment index (C) */ 250c325e866SKonstantin Belousov /* NOTE that these must support one bit per DEV_BSIZE in a page */ 251bd7e5f99SJohn Dyson /* so, on normal X86 kernels, they must be at least 8 bits wide */ 2520012f373SJeff Roberson vm_page_bits_t valid; /* valid DEV_BSIZE chunk map (O,B) */ 2530012f373SJeff Roberson vm_page_bits_t dirty; /* dirty DEV_BSIZE chunk map (M,B) */ 254df8bae1dSRodney W. Grimes }; 255df8bae1dSRodney W. Grimes 2565786be7cSAlan Cox /* 257fee2a2faSMark Johnston * Special bits used in the ref_count field. 258fee2a2faSMark Johnston * 259fee2a2faSMark Johnston * ref_count is normally used to count wirings that prevent the page from being 260fee2a2faSMark Johnston * reclaimed, but also supports several special types of references that do not 261fee2a2faSMark Johnston * prevent reclamation. Accesses to the ref_count field must be atomic unless 262fee2a2faSMark Johnston * the page is unallocated. 263fee2a2faSMark Johnston * 264fee2a2faSMark Johnston * VPRC_OBJREF is the reference held by the containing object. It can set or 265fee2a2faSMark Johnston * cleared only when the corresponding object's write lock is held. 266fee2a2faSMark Johnston * 267fee2a2faSMark Johnston * VPRC_BLOCKED is used to atomically block wirings via pmap lookups while 268efec381dSMark Johnston * attempting to tear down all mappings of a given page. The page busy lock and 269fee2a2faSMark Johnston * object write lock must both be held in order to set or clear this bit. 270fee2a2faSMark Johnston */ 271fee2a2faSMark Johnston #define VPRC_BLOCKED 0x40000000u /* mappings are being removed */ 272fee2a2faSMark Johnston #define VPRC_OBJREF 0x80000000u /* object reference, cleared with (O) */ 273fee2a2faSMark Johnston #define VPRC_WIRE_COUNT(c) ((c) & ~(VPRC_BLOCKED | VPRC_OBJREF)) 274fee2a2faSMark Johnston #define VPRC_WIRE_COUNT_MAX (~(VPRC_BLOCKED | VPRC_OBJREF)) 275fee2a2faSMark Johnston 276fee2a2faSMark Johnston /* 2775786be7cSAlan Cox * Page flags stored in oflags: 2785786be7cSAlan Cox * 2795786be7cSAlan Cox * Access to these page flags is synchronized by the lock on the object 2805786be7cSAlan Cox * containing the page (O). 281d98d0ce2SKonstantin Belousov * 282d98d0ce2SKonstantin Belousov * Note: VPO_UNMANAGED (used by OBJT_DEVICE, OBJT_PHYS and OBJT_SG) 283d98d0ce2SKonstantin Belousov * indicates that the page is not under PV management but 284d98d0ce2SKonstantin Belousov * otherwise should be treated as a normal page. Pages not 285d98d0ce2SKonstantin Belousov * under PV management cannot be paged out via the 286d98d0ce2SKonstantin Belousov * object/vm_page_t because there is no knowledge of their pte 287d98d0ce2SKonstantin Belousov * mappings, and such pages are also not on any PQ queue. 288d98d0ce2SKonstantin Belousov * 2895786be7cSAlan Cox */ 29049bfa624SAlan Cox #define VPO_KMEM_EXEC 0x01 /* kmem mapping allows execution */ 291c7aebda8SAttilio Rao #define VPO_SWAPSLEEP 0x02 /* waiting for swap to finish */ 292081a4881SAlan Cox #define VPO_UNMANAGED 0x04 /* no PV management for page */ 293081a4881SAlan Cox #define VPO_SWAPINPROG 0x08 /* swap I/O in progress on page */ 2945786be7cSAlan Cox 295c7aebda8SAttilio Rao /* 296c7aebda8SAttilio Rao * Busy page implementation details. 297c7aebda8SAttilio Rao * The algorithm is taken mostly by rwlock(9) and sx(9) locks implementation, 298c7aebda8SAttilio Rao * even if the support for owner identity is removed because of size 299c7aebda8SAttilio Rao * constraints. Checks on lock recursion are then not possible, while the 300c7aebda8SAttilio Rao * lock assertions effectiveness is someway reduced. 301c7aebda8SAttilio Rao */ 302c7aebda8SAttilio Rao #define VPB_BIT_SHARED 0x01 303c7aebda8SAttilio Rao #define VPB_BIT_EXCLUSIVE 0x02 304c7aebda8SAttilio Rao #define VPB_BIT_WAITERS 0x04 305c7aebda8SAttilio Rao #define VPB_BIT_FLAGMASK \ 306c7aebda8SAttilio Rao (VPB_BIT_SHARED | VPB_BIT_EXCLUSIVE | VPB_BIT_WAITERS) 307c7aebda8SAttilio Rao 308c7aebda8SAttilio Rao #define VPB_SHARERS_SHIFT 3 309c7aebda8SAttilio Rao #define VPB_SHARERS(x) \ 310c7aebda8SAttilio Rao (((x) & ~VPB_BIT_FLAGMASK) >> VPB_SHARERS_SHIFT) 311c7aebda8SAttilio Rao #define VPB_SHARERS_WORD(x) ((x) << VPB_SHARERS_SHIFT | VPB_BIT_SHARED) 312c7aebda8SAttilio Rao #define VPB_ONE_SHARER (1 << VPB_SHARERS_SHIFT) 313c7aebda8SAttilio Rao 314b631c36fSKonstantin Belousov #define VPB_SINGLE_EXCLUSIVE VPB_BIT_EXCLUSIVE 315b631c36fSKonstantin Belousov #ifdef INVARIANTS 316b631c36fSKonstantin Belousov #define VPB_CURTHREAD_EXCLUSIVE \ 317b631c36fSKonstantin Belousov (VPB_BIT_EXCLUSIVE | ((u_int)(uintptr_t)curthread & ~VPB_BIT_FLAGMASK)) 318b631c36fSKonstantin Belousov #else 319b631c36fSKonstantin Belousov #define VPB_CURTHREAD_EXCLUSIVE VPB_SINGLE_EXCLUSIVE 320b631c36fSKonstantin Belousov #endif 321c7aebda8SAttilio Rao 322c7aebda8SAttilio Rao #define VPB_UNBUSIED VPB_SHARERS_WORD(0) 323c7aebda8SAttilio Rao 324ee9e43f8SJeff Roberson /* Freed lock blocks both shared and exclusive. */ 325ee9e43f8SJeff Roberson #define VPB_FREED (0xffffffff - VPB_BIT_SHARED) 326ee9e43f8SJeff Roberson 32744e46b9eSAlan Cox #define PQ_NONE 255 32844e46b9eSAlan Cox #define PQ_INACTIVE 0 32944e46b9eSAlan Cox #define PQ_ACTIVE 1 330ebcddc72SAlan Cox #define PQ_LAUNDRY 2 331b1fd102eSMark Johnston #define PQ_UNSWAPPABLE 3 332b1fd102eSMark Johnston #define PQ_COUNT 4 333ef39c05bSAlexander Leidinger 334b9e8fb64SKonstantin Belousov #ifndef VM_PAGE_HAVE_PGLIST 3358d220203SAlan Cox TAILQ_HEAD(pglist, vm_page); 336b9e8fb64SKonstantin Belousov #define VM_PAGE_HAVE_PGLIST 337b9e8fb64SKonstantin Belousov #endif 338c325e866SKonstantin Belousov SLIST_HEAD(spglist, vm_page); 33970c17636SAlan Cox 340449c2e92SKonstantin Belousov #ifdef _KERNEL 341bfc8c24cSGleb Smirnoff extern vm_page_t bogus_page; 342449c2e92SKonstantin Belousov #endif /* _KERNEL */ 343449c2e92SKonstantin Belousov 3444ceaf45dSAttilio Rao extern struct mtx_padalign pa_lock[]; 3452965a453SKip Macy 3462965a453SKip Macy #if defined(__arm__) 3472965a453SKip Macy #define PDRSHIFT PDR_SHIFT 3482965a453SKip Macy #elif !defined(PDRSHIFT) 3492965a453SKip Macy #define PDRSHIFT 21 3502965a453SKip Macy #endif 3512965a453SKip Macy 3522965a453SKip Macy #define pa_index(pa) ((pa) >> PDRSHIFT) 3534ceaf45dSAttilio Rao #define PA_LOCKPTR(pa) ((struct mtx *)(&pa_lock[pa_index(pa) % PA_LOCK_COUNT])) 3542965a453SKip Macy #define PA_LOCKOBJPTR(pa) ((struct lock_object *)PA_LOCKPTR((pa))) 3552965a453SKip Macy #define PA_LOCK(pa) mtx_lock(PA_LOCKPTR(pa)) 3562965a453SKip Macy #define PA_TRYLOCK(pa) mtx_trylock(PA_LOCKPTR(pa)) 3572965a453SKip Macy #define PA_UNLOCK(pa) mtx_unlock(PA_LOCKPTR(pa)) 3582965a453SKip Macy #define PA_UNLOCK_COND(pa) \ 3592965a453SKip Macy do { \ 360567e51e1SAlan Cox if ((pa) != 0) { \ 361567e51e1SAlan Cox PA_UNLOCK((pa)); \ 362567e51e1SAlan Cox (pa) = 0; \ 363567e51e1SAlan Cox } \ 3642965a453SKip Macy } while (0) 3652965a453SKip Macy 3662965a453SKip Macy #define PA_LOCK_ASSERT(pa, a) mtx_assert(PA_LOCKPTR(pa), (a)) 3672965a453SKip Macy 368f4b36404SMatt Macy #if defined(KLD_MODULE) && !defined(KLD_TIED) 369cf1911a9SKonstantin Belousov #define vm_page_lock(m) vm_page_lock_KBI((m), LOCK_FILE, LOCK_LINE) 370cf1911a9SKonstantin Belousov #define vm_page_unlock(m) vm_page_unlock_KBI((m), LOCK_FILE, LOCK_LINE) 371cf1911a9SKonstantin Belousov #define vm_page_trylock(m) vm_page_trylock_KBI((m), LOCK_FILE, LOCK_LINE) 372cf1911a9SKonstantin Belousov #else /* !KLD_MODULE */ 3732965a453SKip Macy #define vm_page_lockptr(m) (PA_LOCKPTR(VM_PAGE_TO_PHYS((m)))) 3742965a453SKip Macy #define vm_page_lock(m) mtx_lock(vm_page_lockptr((m))) 3752965a453SKip Macy #define vm_page_unlock(m) mtx_unlock(vm_page_lockptr((m))) 3762965a453SKip Macy #define vm_page_trylock(m) mtx_trylock(vm_page_lockptr((m))) 377ef5ba5a3SAlan Cox #endif 378ef5ba5a3SAlan Cox #if defined(INVARIANTS) 379b4171812SAlan Cox #define vm_page_assert_locked(m) \ 380b4171812SAlan Cox vm_page_assert_locked_KBI((m), __FILE__, __LINE__) 381ef5ba5a3SAlan Cox #define vm_page_lock_assert(m, a) \ 382ef5ba5a3SAlan Cox vm_page_lock_assert_KBI((m), (a), __FILE__, __LINE__) 383ef5ba5a3SAlan Cox #else 384b4171812SAlan Cox #define vm_page_assert_locked(m) 385ef5ba5a3SAlan Cox #define vm_page_lock_assert(m, a) 386cf1911a9SKonstantin Belousov #endif 387e67e0775SAlan Cox 388df8bae1dSRodney W. Grimes /* 389369763e3SAlan Cox * The vm_page's aflags are updated using atomic operations. To set or clear 390369763e3SAlan Cox * these flags, the functions vm_page_aflag_set() and vm_page_aflag_clear() 391369763e3SAlan Cox * must be used. Neither these flags nor these functions are part of the KBI. 3923407fefeSKonstantin Belousov * 393b4171812SAlan Cox * PGA_REFERENCED may be cleared only if the page is locked. It is set by 394b4171812SAlan Cox * both the MI and MD VM layers. However, kernel loadable modules should not 395b4171812SAlan Cox * directly set this flag. They should call vm_page_reference() instead. 396ce186587SAlan Cox * 397afb69e6bSKonstantin Belousov * PGA_WRITEABLE is set exclusively on managed pages by pmap_enter(). 398afb69e6bSKonstantin Belousov * When it does so, the object must be locked, or the page must be 399afb69e6bSKonstantin Belousov * exclusive busied. The MI VM layer must never access this flag 400afb69e6bSKonstantin Belousov * directly. Instead, it should call pmap_page_is_write_mapped(). 40157bd5cceSNathan Whitehorn * 40257bd5cceSNathan Whitehorn * PGA_EXECUTABLE may be set by pmap routines, and indicates that a page has 4036031c68dSAlan Cox * at least one executable mapping. It is not consumed by the MI VM layer. 4045cd29d0fSMark Johnston * 405fff5403fSJeff Roberson * PGA_NOSYNC must be set and cleared with the page busy lock held. 406fff5403fSJeff Roberson * 4075cd29d0fSMark Johnston * PGA_ENQUEUED is set and cleared when a page is inserted into or removed 4085cd29d0fSMark Johnston * from a page queue, respectively. It determines whether the plinks.q field 409efec381dSMark Johnston * of the page is valid. To set or clear this flag, page's "queue" field must 410efec381dSMark Johnston * be a valid queue index, and the corresponding page queue lock must be held. 4115cd29d0fSMark Johnston * 4125cd29d0fSMark Johnston * PGA_DEQUEUE is set when the page is scheduled to be dequeued from a page 4135cd29d0fSMark Johnston * queue, and cleared when the dequeue request is processed. A page may 4145cd29d0fSMark Johnston * have PGA_DEQUEUE set and PGA_ENQUEUED cleared, for instance if a dequeue 4155cd29d0fSMark Johnston * is requested after the page is scheduled to be enqueued but before it is 416efec381dSMark Johnston * actually inserted into the page queue. 4175cd29d0fSMark Johnston * 4185cd29d0fSMark Johnston * PGA_REQUEUE is set when the page is scheduled to be enqueued or requeued 419efec381dSMark Johnston * in its page queue. 4205cd29d0fSMark Johnston * 4215cd29d0fSMark Johnston * PGA_REQUEUE_HEAD is a special flag for enqueuing pages near the head of 422efec381dSMark Johnston * the inactive queue, thus bypassing LRU. 423efec381dSMark Johnston * 424efec381dSMark Johnston * The PGA_DEQUEUE, PGA_REQUEUE and PGA_REQUEUE_HEAD flags must be set using an 425efec381dSMark Johnston * atomic RMW operation to ensure that the "queue" field is a valid queue index, 426efec381dSMark Johnston * and the corresponding page queue lock must be held when clearing any of the 427efec381dSMark Johnston * flags. 428a8081778SJeff Roberson * 429a8081778SJeff Roberson * PGA_SWAP_FREE is used to defer freeing swap space to the pageout daemon 430a8081778SJeff Roberson * when the context that dirties the page does not have the object write lock 431a8081778SJeff Roberson * held. 432df8bae1dSRodney W. Grimes */ 4333a2ba997SMark Johnston #define PGA_WRITEABLE 0x0001 /* page may be mapped writeable */ 4343a2ba997SMark Johnston #define PGA_REFERENCED 0x0002 /* page has been referenced */ 4353a2ba997SMark Johnston #define PGA_EXECUTABLE 0x0004 /* page may be mapped executable */ 4363a2ba997SMark Johnston #define PGA_ENQUEUED 0x0008 /* page is enqueued in a page queue */ 4373a2ba997SMark Johnston #define PGA_DEQUEUE 0x0010 /* page is due to be dequeued */ 4383a2ba997SMark Johnston #define PGA_REQUEUE 0x0020 /* page is due to be requeued */ 4393a2ba997SMark Johnston #define PGA_REQUEUE_HEAD 0x0040 /* page requeue should bypass LRU */ 4403a2ba997SMark Johnston #define PGA_NOSYNC 0x0080 /* do not collect for syncer */ 441a8081778SJeff Roberson #define PGA_SWAP_FREE 0x0100 /* page with swap space was dirtied */ 442a8081778SJeff Roberson #define PGA_SWAP_SPACE 0x0200 /* page has allocated swap space */ 4435cd29d0fSMark Johnston 4446fbaf685SMark Johnston #define PGA_QUEUE_OP_MASK (PGA_DEQUEUE | PGA_REQUEUE | PGA_REQUEUE_HEAD) 4456fbaf685SMark Johnston #define PGA_QUEUE_STATE_MASK (PGA_ENQUEUED | PGA_QUEUE_OP_MASK) 4463407fefeSKonstantin Belousov 4473407fefeSKonstantin Belousov /* 448efec381dSMark Johnston * Page flags. Updates to these flags are not synchronized, and thus they must 449efec381dSMark Johnston * be set during page allocation or free to avoid races. 450d9a73522SMark Johnston * 451d9a73522SMark Johnston * The PG_PCPU_CACHE flag is set at allocation time if the page was 452d9a73522SMark Johnston * allocated from a per-CPU cache. It is cleared the next time that the 453d9a73522SMark Johnston * page is allocated from the physical memory allocator. 4543407fefeSKonstantin Belousov */ 4553a2ba997SMark Johnston #define PG_PCPU_CACHE 0x01 /* was allocated from per-CPU caches */ 4563a2ba997SMark Johnston #define PG_FICTITIOUS 0x02 /* physical page doesn't exist */ 4573a2ba997SMark Johnston #define PG_ZERO 0x04 /* page is zeroed */ 4583a2ba997SMark Johnston #define PG_MARKER 0x08 /* special queue marker page */ 4593a2ba997SMark Johnston #define PG_NODUMP 0x10 /* don't include this page in a dump */ 460df8bae1dSRodney W. Grimes 46124a1cce3SDavid Greenman /* 46224a1cce3SDavid Greenman * Misc constants. 46324a1cce3SDavid Greenman */ 46424a1cce3SDavid Greenman #define ACT_DECLINE 1 46524a1cce3SDavid Greenman #define ACT_ADVANCE 3 46638efa82bSJohn Dyson #define ACT_INIT 5 4675070c7f8SJohn Dyson #define ACT_MAX 64 468df8bae1dSRodney W. Grimes 469c4473420SPeter Wemm #ifdef _KERNEL 47004a18977SAlan Cox 471d950c589SKonstantin Belousov #include <sys/kassert.h> 472369763e3SAlan Cox #include <machine/atomic.h> 473369763e3SAlan Cox 474df8bae1dSRodney W. Grimes /* 475b1fd102eSMark Johnston * Each pageable resident page falls into one of five lists: 476df8bae1dSRodney W. Grimes * 477df8bae1dSRodney W. Grimes * free 478df8bae1dSRodney W. Grimes * Available for allocation now. 47924a1cce3SDavid Greenman * 480df8bae1dSRodney W. Grimes * inactive 4816c5e9bbdSMike Pritchard * Low activity, candidates for reclamation. 4823453bca8SAlan Cox * This list is approximately LRU ordered. 4833453bca8SAlan Cox * 4843453bca8SAlan Cox * laundry 485df8bae1dSRodney W. Grimes * This is the list of pages that should be 486df8bae1dSRodney W. Grimes * paged out next. 48724a1cce3SDavid Greenman * 488b1fd102eSMark Johnston * unswappable 489b1fd102eSMark Johnston * Dirty anonymous pages that cannot be paged 490b1fd102eSMark Johnston * out because no swap device is configured. 491b1fd102eSMark Johnston * 492df8bae1dSRodney W. Grimes * active 4933453bca8SAlan Cox * Pages that are "active", i.e., they have been 49424a1cce3SDavid Greenman * recently referenced. 49510ad4d48SJohn Dyson * 496df8bae1dSRodney W. Grimes */ 497df8bae1dSRodney W. Grimes 4980d94caffSDavid Greenman extern vm_page_t vm_page_array; /* First resident page in table */ 49913a0b7bcSKonstantin Belousov extern long vm_page_array_size; /* number of vm_page_t's */ 5000d94caffSDavid Greenman extern long first_page; /* first physical page number */ 5010d94caffSDavid Greenman 502df8bae1dSRodney W. Grimes #define VM_PAGE_TO_PHYS(entry) ((entry)->phys_addr) 503df8bae1dSRodney W. Grimes 504a5073058SJason A. Harmening /* 5057c989c15SJason A. Harmening * PHYS_TO_VM_PAGE() returns the vm_page_t object that represents a memory 506a5073058SJason A. Harmening * page to which the given physical address belongs. The correct vm_page_t 507a5073058SJason A. Harmening * object is returned for addresses that are not page-aligned. 508a5073058SJason A. Harmening */ 509b6de32bdSKonstantin Belousov vm_page_t PHYS_TO_VM_PAGE(vm_paddr_t pa); 510df8bae1dSRodney W. Grimes 51189fc8bdbSGleb Smirnoff /* 51289fc8bdbSGleb Smirnoff * Page allocation parameters for vm_page for the functions 51389fc8bdbSGleb Smirnoff * vm_page_alloc(), vm_page_grab(), vm_page_alloc_contig() and 51489fc8bdbSGleb Smirnoff * vm_page_alloc_freelist(). Some functions support only a subset 51589fc8bdbSGleb Smirnoff * of the flags, and ignore others, see the flags legend. 51689fc8bdbSGleb Smirnoff * 5175471caf6SAlan Cox * The meaning of VM_ALLOC_ZERO differs slightly between the vm_page_alloc*() 5185471caf6SAlan Cox * and the vm_page_grab*() functions. See these functions for details. 5195471caf6SAlan Cox * 52089fc8bdbSGleb Smirnoff * Bits 0 - 1 define class. 52189fc8bdbSGleb Smirnoff * Bits 2 - 15 dedicated for flags. 52289fc8bdbSGleb Smirnoff * Legend: 52389fc8bdbSGleb Smirnoff * (a) - vm_page_alloc() supports the flag. 52489fc8bdbSGleb Smirnoff * (c) - vm_page_alloc_contig() supports the flag. 52589fc8bdbSGleb Smirnoff * (g) - vm_page_grab() supports the flag. 526b498f71bSMark Johnston * (n) - vm_page_alloc_noobj() and vm_page_alloc_freelist() support the flag. 5275471caf6SAlan Cox * (p) - vm_page_grab_pages() supports the flag. 52889fc8bdbSGleb Smirnoff * Bits above 15 define the count of additional pages that the caller 52989fc8bdbSGleb Smirnoff * intends to allocate. 53089fc8bdbSGleb Smirnoff */ 5316d40c3d3SDavid Greenman #define VM_ALLOC_NORMAL 0 5326d40c3d3SDavid Greenman #define VM_ALLOC_INTERRUPT 1 5336d40c3d3SDavid Greenman #define VM_ALLOC_SYSTEM 2 534827b2fa0SAlan Cox #define VM_ALLOC_CLASS_MASK 3 535b498f71bSMark Johnston #define VM_ALLOC_WAITOK 0x0008 /* (acn) Sleep and retry */ 536b498f71bSMark Johnston #define VM_ALLOC_WAITFAIL 0x0010 /* (acn) Sleep and return error */ 537b498f71bSMark Johnston #define VM_ALLOC_WIRED 0x0020 /* (acgnp) Allocate a wired page */ 538b498f71bSMark Johnston #define VM_ALLOC_ZERO 0x0040 /* (acgnp) Allocate a zeroed page */ 539660344caSRyan Stone #define VM_ALLOC_NORECLAIM 0x0080 /* (c) Do not reclaim after failure */ 540a9d6f1feSMark Johnston #define VM_ALLOC_AVAIL0 0x0100 5415471caf6SAlan Cox #define VM_ALLOC_NOBUSY 0x0200 /* (acgp) Do not excl busy the page */ 542c7575748SJeff Roberson #define VM_ALLOC_NOCREAT 0x0400 /* (gp) Don't create a page */ 543a9d6f1feSMark Johnston #define VM_ALLOC_AVAIL1 0x0800 5445471caf6SAlan Cox #define VM_ALLOC_IGN_SBUSY 0x1000 /* (gp) Ignore shared busy flag */ 54589fc8bdbSGleb Smirnoff #define VM_ALLOC_NODUMP 0x2000 /* (ag) don't include in dump */ 5465471caf6SAlan Cox #define VM_ALLOC_SBUSY 0x4000 /* (acgp) Shared busy the page */ 547b498f71bSMark Johnston #define VM_ALLOC_NOWAIT 0x8000 /* (acgnp) Do not sleep */ 548c40cf9bcSMark Johnston #define VM_ALLOC_COUNT_MAX 0xffff 5495f195aa3SKonstantin Belousov #define VM_ALLOC_COUNT_SHIFT 16 550c40cf9bcSMark Johnston #define VM_ALLOC_COUNT_MASK (VM_ALLOC_COUNT(VM_ALLOC_COUNT_MAX)) 551c40cf9bcSMark Johnston #define VM_ALLOC_COUNT(count) ({ \ 552c40cf9bcSMark Johnston KASSERT((count) <= VM_ALLOC_COUNT_MAX, \ 553c40cf9bcSMark Johnston ("%s: invalid VM_ALLOC_COUNT value", __func__)); \ 554c40cf9bcSMark Johnston (count) << VM_ALLOC_COUNT_SHIFT; \ 555c40cf9bcSMark Johnston }) 5560d94caffSDavid Greenman 557b32ecf44SKonstantin Belousov #ifdef M_NOWAIT 558b32ecf44SKonstantin Belousov static inline int 559b32ecf44SKonstantin Belousov malloc2vm_flags(int malloc_flags) 560b32ecf44SKonstantin Belousov { 561b32ecf44SKonstantin Belousov int pflags; 562b32ecf44SKonstantin Belousov 563962b064aSKonstantin Belousov KASSERT((malloc_flags & M_USE_RESERVE) == 0 || 564962b064aSKonstantin Belousov (malloc_flags & M_NOWAIT) != 0, 565962b064aSKonstantin Belousov ("M_USE_RESERVE requires M_NOWAIT")); 566b32ecf44SKonstantin Belousov pflags = (malloc_flags & M_USE_RESERVE) != 0 ? VM_ALLOC_INTERRUPT : 567b32ecf44SKonstantin Belousov VM_ALLOC_SYSTEM; 568b32ecf44SKonstantin Belousov if ((malloc_flags & M_ZERO) != 0) 569b32ecf44SKonstantin Belousov pflags |= VM_ALLOC_ZERO; 570b32ecf44SKonstantin Belousov if ((malloc_flags & M_NODUMP) != 0) 571b32ecf44SKonstantin Belousov pflags |= VM_ALLOC_NODUMP; 5728d6fbbb8SJeff Roberson if ((malloc_flags & M_NOWAIT)) 5738d6fbbb8SJeff Roberson pflags |= VM_ALLOC_NOWAIT; 5748d6fbbb8SJeff Roberson if ((malloc_flags & M_WAITOK)) 5758d6fbbb8SJeff Roberson pflags |= VM_ALLOC_WAITOK; 576660344caSRyan Stone if ((malloc_flags & M_NORECLAIM)) 577660344caSRyan Stone pflags |= VM_ALLOC_NORECLAIM; 578b32ecf44SKonstantin Belousov return (pflags); 579b32ecf44SKonstantin Belousov } 580b32ecf44SKonstantin Belousov #endif 581b32ecf44SKonstantin Belousov 58288302601SAlan Cox /* 58388302601SAlan Cox * Predicates supported by vm_page_ps_test(): 58488302601SAlan Cox * 58588302601SAlan Cox * PS_ALL_DIRTY is true only if the entire (super)page is dirty. 58688302601SAlan Cox * However, it can be spuriously false when the (super)page has become 58788302601SAlan Cox * dirty in the pmap but that information has not been propagated to the 58888302601SAlan Cox * machine-independent layer. 58988302601SAlan Cox */ 59088302601SAlan Cox #define PS_ALL_DIRTY 0x1 59188302601SAlan Cox #define PS_ALL_VALID 0x2 59288302601SAlan Cox #define PS_NONE_BUSY 0x4 59388302601SAlan Cox 594fb1d575cSJeff Roberson bool vm_page_busy_acquire(vm_page_t m, int allocflags); 595c7aebda8SAttilio Rao void vm_page_busy_downgrade(vm_page_t m); 59663e97555SJeff Roberson int vm_page_busy_tryupgrade(vm_page_t m); 59787b64663SMark Johnston bool vm_page_busy_sleep(vm_page_t m, const char *msg, int allocflags); 598f212367bSJeff Roberson void vm_page_busy_sleep_unlocked(vm_object_t obj, vm_page_t m, 59987b64663SMark Johnston vm_pindex_t pindex, const char *wmesg, int allocflags); 6001b40f8c0SMatthew Dillon void vm_page_free(vm_page_t m); 6011b40f8c0SMatthew Dillon void vm_page_free_zero(vm_page_t m); 6021b40f8c0SMatthew Dillon 6031b40f8c0SMatthew Dillon void vm_page_activate (vm_page_t); 6042051980fSAlan Cox void vm_page_advise(vm_page_t m, int advice); 6051b40f8c0SMatthew Dillon vm_page_t vm_page_alloc(vm_object_t, vm_pindex_t, int); 606ef435ae7SJeff Roberson vm_page_t vm_page_alloc_domain(vm_object_t, vm_pindex_t, int, int); 60733fff5d5SMark Johnston vm_page_t vm_page_alloc_after(vm_object_t, vm_pindex_t, int, vm_page_t); 608ef435ae7SJeff Roberson vm_page_t vm_page_alloc_domain_after(vm_object_t, vm_pindex_t, int, int, 609ef435ae7SJeff Roberson vm_page_t); 610fbd80bd0SAlan Cox vm_page_t vm_page_alloc_contig(vm_object_t object, vm_pindex_t pindex, int req, 611fbd80bd0SAlan Cox u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment, 612fbd80bd0SAlan Cox vm_paddr_t boundary, vm_memattr_t memattr); 613ef435ae7SJeff Roberson vm_page_t vm_page_alloc_contig_domain(vm_object_t object, 614ef435ae7SJeff Roberson vm_pindex_t pindex, int domain, int req, u_long npages, vm_paddr_t low, 615ef435ae7SJeff Roberson vm_paddr_t high, u_long alignment, vm_paddr_t boundary, 616ef435ae7SJeff Roberson vm_memattr_t memattr); 617aa546366SJayachandran C. vm_page_t vm_page_alloc_freelist(int, int); 618ef435ae7SJeff Roberson vm_page_t vm_page_alloc_freelist_domain(int, int, int); 619b498f71bSMark Johnston vm_page_t vm_page_alloc_noobj(int); 620b498f71bSMark Johnston vm_page_t vm_page_alloc_noobj_domain(int, int); 62192db9f3bSMark Johnston vm_page_t vm_page_alloc_noobj_contig(int req, u_long npages, vm_paddr_t low, 62292db9f3bSMark Johnston vm_paddr_t high, u_long alignment, vm_paddr_t boundary, 62392db9f3bSMark Johnston vm_memattr_t memattr); 62492db9f3bSMark Johnston vm_page_t vm_page_alloc_noobj_contig_domain(int domain, int req, u_long npages, 62592db9f3bSMark Johnston vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary, 62692db9f3bSMark Johnston vm_memattr_t memattr); 6277f935055SJeff Roberson void vm_page_bits_set(vm_page_t m, vm_page_bits_t *bits, vm_page_bits_t set); 628ffc568baSScott Long bool vm_page_blacklist_add(vm_paddr_t pa, bool verbose); 6291b40f8c0SMatthew Dillon vm_page_t vm_page_grab(vm_object_t, vm_pindex_t, int); 630c49be4f1SJeff Roberson vm_page_t vm_page_grab_unlocked(vm_object_t, vm_pindex_t, int); 6319df950b3SMark Johnston int vm_page_grab_pages(vm_object_t object, vm_pindex_t pindex, int allocflags, 6325471caf6SAlan Cox vm_page_t *ma, int count); 633c49be4f1SJeff Roberson int vm_page_grab_pages_unlocked(vm_object_t object, vm_pindex_t pindex, 634c49be4f1SJeff Roberson int allocflags, vm_page_t *ma, int count); 635c7575748SJeff Roberson int vm_page_grab_valid(vm_page_t *mp, vm_object_t object, vm_pindex_t pindex, 636c7575748SJeff Roberson int allocflags); 637c49be4f1SJeff Roberson int vm_page_grab_valid_unlocked(vm_page_t *mp, vm_object_t object, 638c49be4f1SJeff Roberson vm_pindex_t pindex, int allocflags); 6391b40f8c0SMatthew Dillon void vm_page_deactivate(vm_page_t); 6403138cd36SMark Johnston void vm_page_deactivate_noreuse(vm_page_t); 6418d220203SAlan Cox void vm_page_dequeue(vm_page_t m); 642e8bcf696SMark Johnston void vm_page_dequeue_deferred(vm_page_t m); 643b382c10aSKonstantin Belousov vm_page_t vm_page_find_least(vm_object_t, vm_pindex_t); 6444dfa06e1SChuck Silvers void vm_page_free_invalid(vm_page_t); 64510cf2560SAlan Cox vm_page_t vm_page_getfake(vm_paddr_t paddr, vm_memattr_t memattr); 646e461aae7SKonstantin Belousov void vm_page_initfake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr); 6470292c54bSConrad Meyer void vm_page_init_marker(vm_page_t marker, int queue, uint16_t aflags); 6485b10e79eSKonstantin Belousov void vm_page_init_page(vm_page_t m, vm_paddr_t pa, int segind); 649e946b949SAttilio Rao int vm_page_insert (vm_page_t, vm_object_t, vm_pindex_t); 6500012f373SJeff Roberson void vm_page_invalid(vm_page_t m); 651ebcddc72SAlan Cox void vm_page_launder(vm_page_t m); 6521b40f8c0SMatthew Dillon vm_page_t vm_page_lookup(vm_object_t, vm_pindex_t); 653c2c6fb90SBryan Drewery vm_page_t vm_page_lookup_unlocked(vm_object_t, vm_pindex_t); 65491b4f427SAlan Cox vm_page_t vm_page_next(vm_page_t m); 655386eba08SMark Johnston void vm_page_pqbatch_drain(void); 656386eba08SMark Johnston void vm_page_pqbatch_submit(vm_page_t m, uint8_t queue); 657f3f38e25SMark Johnston bool vm_page_pqstate_commit(vm_page_t m, vm_page_astate_t *old, 658f3f38e25SMark Johnston vm_page_astate_t new); 65991b4f427SAlan Cox vm_page_t vm_page_prev(vm_page_t m); 66088302601SAlan Cox bool vm_page_ps_test(vm_page_t m, int flags, vm_page_t skip_m); 66110cf2560SAlan Cox void vm_page_putfake(vm_page_t m); 662b6c00483SKonstantin Belousov void vm_page_readahead_finish(vm_page_t m); 6632619c5ccSJason A. Harmening int vm_page_reclaim_contig(int req, u_long npages, vm_paddr_t low, 664c869e672SAlan Cox vm_paddr_t high, u_long alignment, vm_paddr_t boundary); 6652619c5ccSJason A. Harmening int vm_page_reclaim_contig_domain(int domain, int req, u_long npages, 666ef435ae7SJeff Roberson vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary); 6672619c5ccSJason A. Harmening int vm_page_reclaim_contig_domain_ext(int domain, int req, u_long npages, 6688b0dafdbSAndrew Gallatin vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary, 6698b0dafdbSAndrew Gallatin int desired_runs); 6703407fefeSKonstantin Belousov void vm_page_reference(vm_page_t m); 67198549e2dSMark Johnston #define VPR_TRYFREE 0x01 67298549e2dSMark Johnston #define VPR_NOREUSE 0x02 67398549e2dSMark Johnston void vm_page_release(vm_page_t m, int flags); 67498549e2dSMark Johnston void vm_page_release_locked(vm_page_t m, int flags); 6756be21eb7SJeff Roberson vm_page_t vm_page_relookup(vm_object_t, vm_pindex_t); 6760fd977b3SMark Johnston bool vm_page_remove(vm_page_t); 6773cf3b4e6SJeff Roberson bool vm_page_remove_xbusy(vm_page_t); 678e946b949SAttilio Rao int vm_page_rename(vm_page_t, vm_object_t, vm_pindex_t); 6793cf3b4e6SJeff Roberson void vm_page_replace(vm_page_t mnew, vm_object_t object, 6803cf3b4e6SJeff Roberson vm_pindex_t pindex, vm_page_t mold); 681c7aebda8SAttilio Rao int vm_page_sbusied(vm_page_t m); 682a8081778SJeff Roberson vm_page_bits_t vm_page_set_dirty(vm_page_t m); 683dc874f98SKonstantin Belousov void vm_page_set_valid_range(vm_page_t m, int base, int size); 684889eb0fcSAlan Cox vm_offset_t vm_page_startup(vm_offset_t vaddr); 685c7aebda8SAttilio Rao void vm_page_sunbusy(vm_page_t m); 686fee2a2faSMark Johnston bool vm_page_try_remove_all(vm_page_t m); 687fee2a2faSMark Johnston bool vm_page_try_remove_write(vm_page_t m); 688c7aebda8SAttilio Rao int vm_page_trysbusy(vm_page_t m); 689205be21dSJeff Roberson int vm_page_tryxbusy(vm_page_t m); 6908c22654dSAlan Cox void vm_page_unhold_pages(vm_page_t *ma, int count); 691b1fd102eSMark Johnston void vm_page_unswappable(vm_page_t m); 692fee2a2faSMark Johnston void vm_page_unwire(vm_page_t m, uint8_t queue); 6931d3a1bcfSMark Johnston bool vm_page_unwire_noq(vm_page_t m); 69410cf2560SAlan Cox void vm_page_updatefake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr); 6951b40f8c0SMatthew Dillon void vm_page_wire(vm_page_t); 696fee2a2faSMark Johnston bool vm_page_wire_mapped(vm_page_t m); 697c7aebda8SAttilio Rao void vm_page_xunbusy_hard(vm_page_t m); 698b631c36fSKonstantin Belousov void vm_page_xunbusy_hard_unchecked(vm_page_t m); 6991b40f8c0SMatthew Dillon void vm_page_set_validclean (vm_page_t, int, int); 7001b40f8c0SMatthew Dillon void vm_page_clear_dirty(vm_page_t, int, int); 7011b40f8c0SMatthew Dillon void vm_page_set_invalid(vm_page_t, int, int); 7020012f373SJeff Roberson void vm_page_valid(vm_page_t m); 7031b40f8c0SMatthew Dillon int vm_page_is_valid(vm_page_t, int, int); 7041b40f8c0SMatthew Dillon void vm_page_test_dirty(vm_page_t); 705561cc9fcSKonstantin Belousov vm_page_bits_t vm_page_bits(int base, int size); 7068d17e694SJulian Elischer void vm_page_zero_invalid(vm_page_t m, boolean_t setvalid); 7078c8ee2eeSKonstantin Belousov void vm_page_free_pages_toq(struct spglist *free, bool update_wire_count); 70898cb733cSKenneth D. Merry 709eddc9291SAlan Cox void vm_page_dirty_KBI(vm_page_t m); 710cf1911a9SKonstantin Belousov void vm_page_lock_KBI(vm_page_t m, const char *file, int line); 711cf1911a9SKonstantin Belousov void vm_page_unlock_KBI(vm_page_t m, const char *file, int line); 712cf1911a9SKonstantin Belousov int vm_page_trylock_KBI(vm_page_t m, const char *file, int line); 713cf1911a9SKonstantin Belousov #if defined(INVARIANTS) || defined(INVARIANT_SUPPORT) 714b4171812SAlan Cox void vm_page_assert_locked_KBI(vm_page_t m, const char *file, int line); 715cf1911a9SKonstantin Belousov void vm_page_lock_assert_KBI(vm_page_t m, int a, const char *file, int line); 716cf1911a9SKonstantin Belousov #endif 717cf1911a9SKonstantin Belousov 718958d8f52SMark Johnston #define vm_page_busy_fetch(m) atomic_load_int(&(m)->busy_lock) 719958d8f52SMark Johnston 7200012f373SJeff Roberson #define vm_page_assert_busied(m) \ 7210012f373SJeff Roberson KASSERT(vm_page_busied(m), \ 7220012f373SJeff Roberson ("vm_page_assert_busied: page %p not busy @ %s:%d", \ 7230012f373SJeff Roberson (m), __FILE__, __LINE__)) 7240012f373SJeff Roberson 725c7aebda8SAttilio Rao #define vm_page_assert_sbusied(m) \ 726c7aebda8SAttilio Rao KASSERT(vm_page_sbusied(m), \ 727c7aebda8SAttilio Rao ("vm_page_assert_sbusied: page %p not shared busy @ %s:%d", \ 728dc62d559SConrad Meyer (m), __FILE__, __LINE__)) 729c7aebda8SAttilio Rao 730c7aebda8SAttilio Rao #define vm_page_assert_unbusied(m) \ 731958d8f52SMark Johnston KASSERT((vm_page_busy_fetch(m) & ~VPB_BIT_WAITERS) != \ 732ee9e43f8SJeff Roberson VPB_CURTHREAD_EXCLUSIVE, \ 7330ee1cd6dSJason A. Harmening ("vm_page_assert_unbusied: page %p busy_lock %#x owned" \ 7340ee1cd6dSJason A. Harmening " by me (%p) @ %s:%d", \ 7350ee1cd6dSJason A. Harmening (m), (m)->busy_lock, curthread, __FILE__, __LINE__)); \ 736c7aebda8SAttilio Rao 737b631c36fSKonstantin Belousov #define vm_page_assert_xbusied_unchecked(m) do { \ 738c7aebda8SAttilio Rao KASSERT(vm_page_xbusied(m), \ 739c7aebda8SAttilio Rao ("vm_page_assert_xbusied: page %p not exclusive busy @ %s:%d", \ 740b631c36fSKonstantin Belousov (m), __FILE__, __LINE__)); \ 741b631c36fSKonstantin Belousov } while (0) 742b631c36fSKonstantin Belousov #define vm_page_assert_xbusied(m) do { \ 743b631c36fSKonstantin Belousov vm_page_assert_xbusied_unchecked(m); \ 744958d8f52SMark Johnston KASSERT((vm_page_busy_fetch(m) & ~VPB_BIT_WAITERS) == \ 745b631c36fSKonstantin Belousov VPB_CURTHREAD_EXCLUSIVE, \ 746b631c36fSKonstantin Belousov ("vm_page_assert_xbusied: page %p busy_lock %#x not owned" \ 7470ee1cd6dSJason A. Harmening " by me (%p) @ %s:%d", \ 7480ee1cd6dSJason A. Harmening (m), (m)->busy_lock, curthread, __FILE__, __LINE__)); \ 749b631c36fSKonstantin Belousov } while (0) 750c7aebda8SAttilio Rao 751c7aebda8SAttilio Rao #define vm_page_busied(m) \ 752958d8f52SMark Johnston (vm_page_busy_fetch(m) != VPB_UNBUSIED) 753c7aebda8SAttilio Rao 754c7aebda8SAttilio Rao #define vm_page_xbusied(m) \ 755958d8f52SMark Johnston ((vm_page_busy_fetch(m) & VPB_SINGLE_EXCLUSIVE) != 0) 756c7aebda8SAttilio Rao 757ee9e43f8SJeff Roberson #define vm_page_busy_freed(m) \ 758958d8f52SMark Johnston (vm_page_busy_fetch(m) == VPB_FREED) 759ee9e43f8SJeff Roberson 760505cd5d1SKonstantin Belousov /* Note: page m's lock must not be owned by the caller. */ 761c7aebda8SAttilio Rao #define vm_page_xunbusy(m) do { \ 762c7aebda8SAttilio Rao if (!atomic_cmpset_rel_int(&(m)->busy_lock, \ 763b631c36fSKonstantin Belousov VPB_CURTHREAD_EXCLUSIVE, VPB_UNBUSIED)) \ 764c7aebda8SAttilio Rao vm_page_xunbusy_hard(m); \ 765c7aebda8SAttilio Rao } while (0) 766b631c36fSKonstantin Belousov #define vm_page_xunbusy_unchecked(m) do { \ 767b631c36fSKonstantin Belousov if (!atomic_cmpset_rel_int(&(m)->busy_lock, \ 768b631c36fSKonstantin Belousov VPB_CURTHREAD_EXCLUSIVE, VPB_UNBUSIED)) \ 769b631c36fSKonstantin Belousov vm_page_xunbusy_hard_unchecked(m); \ 770b631c36fSKonstantin Belousov } while (0) 771c7aebda8SAttilio Rao 7723b1025d2SKonstantin Belousov #ifdef INVARIANTS 773205be21dSJeff Roberson void vm_page_object_busy_assert(vm_page_t m); 774205be21dSJeff Roberson #define VM_PAGE_OBJECT_BUSY_ASSERT(m) vm_page_object_busy_assert(m) 7753a2ba997SMark Johnston void vm_page_assert_pga_writeable(vm_page_t m, uint16_t bits); 776afb69e6bSKonstantin Belousov #define VM_PAGE_ASSERT_PGA_WRITEABLE(m, bits) \ 777afb69e6bSKonstantin Belousov vm_page_assert_pga_writeable(m, bits) 778958d8f52SMark Johnston /* 779958d8f52SMark Johnston * Claim ownership of a page's xbusy state. In non-INVARIANTS kernels this 780958d8f52SMark Johnston * operation is a no-op since ownership is not tracked. In particular 781958d8f52SMark Johnston * this macro does not provide any synchronization with the previous owner. 782958d8f52SMark Johnston */ 783e9ceb9ddSJeff Roberson #define vm_page_xbusy_claim(m) do { \ 784f72e5be5SMark Johnston u_int _busy_lock; \ 785f72e5be5SMark Johnston \ 786e9ceb9ddSJeff Roberson vm_page_assert_xbusied_unchecked((m)); \ 787f72e5be5SMark Johnston do { \ 788958d8f52SMark Johnston _busy_lock = vm_page_busy_fetch(m); \ 789f72e5be5SMark Johnston } while (!atomic_cmpset_int(&(m)->busy_lock, _busy_lock, \ 790f72e5be5SMark Johnston (_busy_lock & VPB_BIT_FLAGMASK) | VPB_CURTHREAD_EXCLUSIVE)); \ 791e9ceb9ddSJeff Roberson } while (0) 7923b1025d2SKonstantin Belousov #else 793205be21dSJeff Roberson #define VM_PAGE_OBJECT_BUSY_ASSERT(m) (void)0 794afb69e6bSKonstantin Belousov #define VM_PAGE_ASSERT_PGA_WRITEABLE(m, bits) (void)0 795e9ceb9ddSJeff Roberson #define vm_page_xbusy_claim(m) 7963b1025d2SKonstantin Belousov #endif 7973b1025d2SKonstantin Belousov 798c2f22e97SMark Johnston #if BYTE_ORDER == BIG_ENDIAN 799c2f22e97SMark Johnston #define VM_PAGE_AFLAG_SHIFT 16 800c2f22e97SMark Johnston #else 801c2f22e97SMark Johnston #define VM_PAGE_AFLAG_SHIFT 0 802c2f22e97SMark Johnston #endif 8037cdeaf33SMark Johnston 80441fd4b94SMark Johnston /* 8056fbaf685SMark Johnston * Load a snapshot of a page's 32-bit atomic state. 80641fd4b94SMark Johnston */ 8076fbaf685SMark Johnston static inline vm_page_astate_t 8086fbaf685SMark Johnston vm_page_astate_load(vm_page_t m) 8096fbaf685SMark Johnston { 8106fbaf685SMark Johnston vm_page_astate_t a; 81141fd4b94SMark Johnston 812cbc080b4SMark Johnston a._bits = atomic_load_32(&m->a._bits); 8136fbaf685SMark Johnston return (a); 8146fbaf685SMark Johnston } 8156fbaf685SMark Johnston 8166fbaf685SMark Johnston /* 8176fbaf685SMark Johnston * Atomically compare and set a page's atomic state. 8186fbaf685SMark Johnston */ 8196fbaf685SMark Johnston static inline bool 8206fbaf685SMark Johnston vm_page_astate_fcmpset(vm_page_t m, vm_page_astate_t *old, vm_page_astate_t new) 8216fbaf685SMark Johnston { 8226fbaf685SMark Johnston 8236fbaf685SMark Johnston KASSERT(new.queue == PQ_INACTIVE || (new.flags & PGA_REQUEUE_HEAD) == 0, 8246fbaf685SMark Johnston ("%s: invalid head requeue request for page %p", __func__, m)); 8256fbaf685SMark Johnston KASSERT((new.flags & PGA_ENQUEUED) == 0 || new.queue != PQ_NONE, 8266fbaf685SMark Johnston ("%s: setting PGA_ENQUEUED with PQ_NONE in page %p", __func__, m)); 8276fbaf685SMark Johnston KASSERT(new._bits != old->_bits, 8286fbaf685SMark Johnston ("%s: bits are unchanged", __func__)); 8296fbaf685SMark Johnston 8306fbaf685SMark Johnston return (atomic_fcmpset_32(&m->a._bits, &old->_bits, new._bits) != 0); 8316fbaf685SMark Johnston } 832369763e3SAlan Cox 833369763e3SAlan Cox /* 834369763e3SAlan Cox * Clear the given bits in the specified page. 835369763e3SAlan Cox */ 836369763e3SAlan Cox static inline void 8373a2ba997SMark Johnston vm_page_aflag_clear(vm_page_t m, uint16_t bits) 838369763e3SAlan Cox { 839369763e3SAlan Cox uint32_t *addr, val; 840369763e3SAlan Cox 841369763e3SAlan Cox /* 842369763e3SAlan Cox * Access the whole 32-bit word containing the aflags field with an 843369763e3SAlan Cox * atomic update. Parallel non-atomic updates to the other fields 844369763e3SAlan Cox * within this word are handled properly by the atomic update. 845369763e3SAlan Cox */ 8466fbaf685SMark Johnston addr = (void *)&m->a; 8477cdeaf33SMark Johnston val = bits << VM_PAGE_AFLAG_SHIFT; 848369763e3SAlan Cox atomic_clear_32(addr, val); 849369763e3SAlan Cox } 850369763e3SAlan Cox 851369763e3SAlan Cox /* 852369763e3SAlan Cox * Set the given bits in the specified page. 853369763e3SAlan Cox */ 854369763e3SAlan Cox static inline void 8553a2ba997SMark Johnston vm_page_aflag_set(vm_page_t m, uint16_t bits) 856369763e3SAlan Cox { 857369763e3SAlan Cox uint32_t *addr, val; 858369763e3SAlan Cox 859afb69e6bSKonstantin Belousov VM_PAGE_ASSERT_PGA_WRITEABLE(m, bits); 860369763e3SAlan Cox 861369763e3SAlan Cox /* 862369763e3SAlan Cox * Access the whole 32-bit word containing the aflags field with an 863369763e3SAlan Cox * atomic update. Parallel non-atomic updates to the other fields 864369763e3SAlan Cox * within this word are handled properly by the atomic update. 865369763e3SAlan Cox */ 8666fbaf685SMark Johnston addr = (void *)&m->a; 8677cdeaf33SMark Johnston val = bits << VM_PAGE_AFLAG_SHIFT; 868369763e3SAlan Cox atomic_set_32(addr, val); 869369763e3SAlan Cox } 870369763e3SAlan Cox 871e8bcf696SMark Johnston /* 872eddc9291SAlan Cox * vm_page_dirty: 873eddc9291SAlan Cox * 874eddc9291SAlan Cox * Set all bits in the page's dirty field. 875eddc9291SAlan Cox * 876eddc9291SAlan Cox * The object containing the specified page must be locked if the 877eddc9291SAlan Cox * call is made from the machine-independent layer. 878eddc9291SAlan Cox * 879eddc9291SAlan Cox * See vm_page_clear_dirty_mask(). 880eddc9291SAlan Cox */ 881eddc9291SAlan Cox static __inline void 882eddc9291SAlan Cox vm_page_dirty(vm_page_t m) 883eddc9291SAlan Cox { 884eddc9291SAlan Cox 885eddc9291SAlan Cox /* Use vm_page_dirty_KBI() under INVARIANTS to save memory. */ 886f4b36404SMatt Macy #if (defined(KLD_MODULE) && !defined(KLD_TIED)) || defined(INVARIANTS) 887eddc9291SAlan Cox vm_page_dirty_KBI(m); 888eddc9291SAlan Cox #else 889eddc9291SAlan Cox m->dirty = VM_PAGE_BITS_ALL; 890eddc9291SAlan Cox #endif 891eddc9291SAlan Cox } 892eddc9291SAlan Cox 893eddc9291SAlan Cox /* 894f3b676f0SAlan Cox * vm_page_undirty: 895f3b676f0SAlan Cox * 896f3b676f0SAlan Cox * Set page to not be dirty. Note: does not clear pmap modify bits 897f3b676f0SAlan Cox */ 898f3b676f0SAlan Cox static __inline void 899f3b676f0SAlan Cox vm_page_undirty(vm_page_t m) 900f3b676f0SAlan Cox { 9013b1025d2SKonstantin Belousov 902205be21dSJeff Roberson VM_PAGE_OBJECT_BUSY_ASSERT(m); 903f3b676f0SAlan Cox m->dirty = 0; 904f3b676f0SAlan Cox } 905f3b676f0SAlan Cox 906f3f38e25SMark Johnston static inline uint8_t 907f3f38e25SMark Johnston _vm_page_queue(vm_page_astate_t as) 908f3f38e25SMark Johnston { 909f3f38e25SMark Johnston 910f3f38e25SMark Johnston if ((as.flags & PGA_DEQUEUE) != 0) 911f3f38e25SMark Johnston return (PQ_NONE); 912f3f38e25SMark Johnston return (as.queue); 913f3f38e25SMark Johnston } 914f3f38e25SMark Johnston 9151b5c869dSMark Johnston /* 9161b5c869dSMark Johnston * vm_page_queue: 9171b5c869dSMark Johnston * 918f3f38e25SMark Johnston * Return the index of the queue containing m. 9191b5c869dSMark Johnston */ 9201b5c869dSMark Johnston static inline uint8_t 9211b5c869dSMark Johnston vm_page_queue(vm_page_t m) 9221b5c869dSMark Johnston { 9231b5c869dSMark Johnston 924f3f38e25SMark Johnston return (_vm_page_queue(vm_page_astate_load(m))); 9251b5c869dSMark Johnston } 9261b5c869dSMark Johnston 927ebcddc72SAlan Cox static inline bool 928ebcddc72SAlan Cox vm_page_active(vm_page_t m) 929ebcddc72SAlan Cox { 930ebcddc72SAlan Cox 9311b5c869dSMark Johnston return (vm_page_queue(m) == PQ_ACTIVE); 932ebcddc72SAlan Cox } 933ebcddc72SAlan Cox 934ebcddc72SAlan Cox static inline bool 935ebcddc72SAlan Cox vm_page_inactive(vm_page_t m) 936ebcddc72SAlan Cox { 937ebcddc72SAlan Cox 9381b5c869dSMark Johnston return (vm_page_queue(m) == PQ_INACTIVE); 939ebcddc72SAlan Cox } 940ebcddc72SAlan Cox 941ebcddc72SAlan Cox static inline bool 942ebcddc72SAlan Cox vm_page_in_laundry(vm_page_t m) 943ebcddc72SAlan Cox { 9441b5c869dSMark Johnston uint8_t queue; 945ebcddc72SAlan Cox 9461b5c869dSMark Johnston queue = vm_page_queue(m); 9471b5c869dSMark Johnston return (queue == PQ_LAUNDRY || queue == PQ_UNSWAPPABLE); 9485cd29d0fSMark Johnston } 9495cd29d0fSMark Johnston 9505cd29d0fSMark Johnston /* 951fee2a2faSMark Johnston * vm_page_drop: 952fee2a2faSMark Johnston * 953fee2a2faSMark Johnston * Release a reference to a page and return the old reference count. 954fee2a2faSMark Johnston */ 955fee2a2faSMark Johnston static inline u_int 956fee2a2faSMark Johnston vm_page_drop(vm_page_t m, u_int val) 957fee2a2faSMark Johnston { 95838547d59SMark Johnston u_int old; 959fee2a2faSMark Johnston 960fee2a2faSMark Johnston /* 961fee2a2faSMark Johnston * Synchronize with vm_page_free_prep(): ensure that all updates to the 962fee2a2faSMark Johnston * page structure are visible before it is freed. 963fee2a2faSMark Johnston */ 964fee2a2faSMark Johnston atomic_thread_fence_rel(); 96538547d59SMark Johnston old = atomic_fetchadd_int(&m->ref_count, -val); 96638547d59SMark Johnston KASSERT(old != VPRC_BLOCKED, 96738547d59SMark Johnston ("vm_page_drop: page %p has an invalid refcount value", m)); 96838547d59SMark Johnston return (old); 969fee2a2faSMark Johnston } 970fee2a2faSMark Johnston 971fee2a2faSMark Johnston /* 972eeacb3b0SMark Johnston * vm_page_wired: 9731d3a1bcfSMark Johnston * 974fee2a2faSMark Johnston * Perform a racy check to determine whether a reference prevents the page 975fee2a2faSMark Johnston * from being reclaimable. If the page's object is locked, and the page is 976a9ea09e5SMark Johnston * unmapped and exclusively busied by the current thread, no new wirings 977a9ea09e5SMark Johnston * may be created. 9781d3a1bcfSMark Johnston */ 9791d3a1bcfSMark Johnston static inline bool 980d842aa51SMark Johnston vm_page_wired(vm_page_t m) 981d842aa51SMark Johnston { 982d842aa51SMark Johnston 983fee2a2faSMark Johnston return (VPRC_WIRE_COUNT(m->ref_count) > 0); 984d842aa51SMark Johnston } 985d842aa51SMark Johnston 9860012f373SJeff Roberson static inline bool 9870012f373SJeff Roberson vm_page_all_valid(vm_page_t m) 9880012f373SJeff Roberson { 9890012f373SJeff Roberson 9900012f373SJeff Roberson return (m->valid == VM_PAGE_BITS_ALL); 9910012f373SJeff Roberson } 9920012f373SJeff Roberson 9930012f373SJeff Roberson static inline bool 994934bfc12SKonstantin Belousov vm_page_any_valid(vm_page_t m) 995934bfc12SKonstantin Belousov { 996934bfc12SKonstantin Belousov 997934bfc12SKonstantin Belousov return (m->valid != 0); 998934bfc12SKonstantin Belousov } 999934bfc12SKonstantin Belousov 1000934bfc12SKonstantin Belousov static inline bool 10010012f373SJeff Roberson vm_page_none_valid(vm_page_t m) 10020012f373SJeff Roberson { 10030012f373SJeff Roberson 10040012f373SJeff Roberson return (m->valid == 0); 10050012f373SJeff Roberson } 10060012f373SJeff Roberson 1007431fb8abSMark Johnston static inline int 1008*cb20a74cSStephen J. Kiernan vm_page_domain(vm_page_t m __numa_used) 1009431fb8abSMark Johnston { 1010431fb8abSMark Johnston #ifdef NUMA 1011431fb8abSMark Johnston int domn, segind; 1012431fb8abSMark Johnston 1013431fb8abSMark Johnston segind = m->segind; 1014431fb8abSMark Johnston KASSERT(segind < vm_phys_nsegs, ("segind %d m %p", segind, m)); 1015431fb8abSMark Johnston domn = vm_phys_segs[segind].domain; 1016431fb8abSMark Johnston KASSERT(domn >= 0 && domn < vm_ndomains, ("domain %d m %p", domn, m)); 1017431fb8abSMark Johnston return (domn); 1018431fb8abSMark Johnston #else 1019431fb8abSMark Johnston return (0); 1020431fb8abSMark Johnston #endif 1021431fb8abSMark Johnston } 1022431fb8abSMark Johnston 1023c4473420SPeter Wemm #endif /* _KERNEL */ 1024df8bae1dSRodney W. Grimes #endif /* !_VM_PAGE_ */ 1025