xref: /freebsd/sys/vm/vm_page.h (revision 5b78ff830791633c02a3d906b2c8f5c9b3bb1a91)
160727d8bSWarner Losh /*-
2796df753SPedro F. Giffuni  * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU)
351369649SPedro F. Giffuni  *
4df8bae1dSRodney W. Grimes  * Copyright (c) 1991, 1993
5df8bae1dSRodney W. Grimes  *	The Regents of the University of California.  All rights reserved.
6df8bae1dSRodney W. Grimes  *
7df8bae1dSRodney W. Grimes  * This code is derived from software contributed to Berkeley by
8df8bae1dSRodney W. Grimes  * The Mach Operating System project at Carnegie-Mellon University.
9df8bae1dSRodney W. Grimes  *
10df8bae1dSRodney W. Grimes  * Redistribution and use in source and binary forms, with or without
11df8bae1dSRodney W. Grimes  * modification, are permitted provided that the following conditions
12df8bae1dSRodney W. Grimes  * are met:
13df8bae1dSRodney W. Grimes  * 1. Redistributions of source code must retain the above copyright
14df8bae1dSRodney W. Grimes  *    notice, this list of conditions and the following disclaimer.
15df8bae1dSRodney W. Grimes  * 2. Redistributions in binary form must reproduce the above copyright
16df8bae1dSRodney W. Grimes  *    notice, this list of conditions and the following disclaimer in the
17df8bae1dSRodney W. Grimes  *    documentation and/or other materials provided with the distribution.
18fbbd9655SWarner Losh  * 3. Neither the name of the University nor the names of its contributors
19df8bae1dSRodney W. Grimes  *    may be used to endorse or promote products derived from this software
20df8bae1dSRodney W. Grimes  *    without specific prior written permission.
21df8bae1dSRodney W. Grimes  *
22df8bae1dSRodney W. Grimes  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23df8bae1dSRodney W. Grimes  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24df8bae1dSRodney W. Grimes  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25df8bae1dSRodney W. Grimes  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26df8bae1dSRodney W. Grimes  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27df8bae1dSRodney W. Grimes  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28df8bae1dSRodney W. Grimes  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29df8bae1dSRodney W. Grimes  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30df8bae1dSRodney W. Grimes  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31df8bae1dSRodney W. Grimes  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32df8bae1dSRodney W. Grimes  * SUCH DAMAGE.
33df8bae1dSRodney W. Grimes  *
34df8bae1dSRodney W. Grimes  *
35df8bae1dSRodney W. Grimes  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
36df8bae1dSRodney W. Grimes  * All rights reserved.
37df8bae1dSRodney W. Grimes  *
38df8bae1dSRodney W. Grimes  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
39df8bae1dSRodney W. Grimes  *
40df8bae1dSRodney W. Grimes  * Permission to use, copy, modify and distribute this software and
41df8bae1dSRodney W. Grimes  * its documentation is hereby granted, provided that both the copyright
42df8bae1dSRodney W. Grimes  * notice and this permission notice appear in all copies of the
43df8bae1dSRodney W. Grimes  * software, derivative works or modified versions, and any portions
44df8bae1dSRodney W. Grimes  * thereof, and that both notices appear in supporting documentation.
45df8bae1dSRodney W. Grimes  *
46df8bae1dSRodney W. Grimes  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
47df8bae1dSRodney W. Grimes  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
48df8bae1dSRodney W. Grimes  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
49df8bae1dSRodney W. Grimes  *
50df8bae1dSRodney W. Grimes  * Carnegie Mellon requests users of this software to return to
51df8bae1dSRodney W. Grimes  *
52df8bae1dSRodney W. Grimes  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
53df8bae1dSRodney W. Grimes  *  School of Computer Science
54df8bae1dSRodney W. Grimes  *  Carnegie Mellon University
55df8bae1dSRodney W. Grimes  *  Pittsburgh PA 15213-3890
56df8bae1dSRodney W. Grimes  *
57df8bae1dSRodney W. Grimes  * any improvements or extensions that they make and grant Carnegie the
58df8bae1dSRodney W. Grimes  * rights to redistribute these changes.
59df8bae1dSRodney W. Grimes  */
60df8bae1dSRodney W. Grimes 
61df8bae1dSRodney W. Grimes /*
62df8bae1dSRodney W. Grimes  *	Resident memory system definitions.
63df8bae1dSRodney W. Grimes  */
64df8bae1dSRodney W. Grimes 
65df8bae1dSRodney W. Grimes #ifndef	_VM_PAGE_
66df8bae1dSRodney W. Grimes #define	_VM_PAGE_
67df8bae1dSRodney W. Grimes 
68f919ebdeSDavid Greenman #include <vm/pmap.h>
69431fb8abSMark Johnston #include <vm/_vm_phys.h>
70069e9bc1SDoug Rabson 
71df8bae1dSRodney W. Grimes /*
72df8bae1dSRodney W. Grimes  *	Management of resident (logical) pages.
73df8bae1dSRodney W. Grimes  *
74df8bae1dSRodney W. Grimes  *	A small structure is kept for each resident
75df8bae1dSRodney W. Grimes  *	page, indexed by page number.  Each structure
76da384208SAlan Cox  *	is an element of several collections:
77df8bae1dSRodney W. Grimes  *
78da384208SAlan Cox  *		A radix tree used to quickly
79df8bae1dSRodney W. Grimes  *		perform object/offset lookups
80df8bae1dSRodney W. Grimes  *
81df8bae1dSRodney W. Grimes  *		A list of all pages for a given object,
82df8bae1dSRodney W. Grimes  *		so they can be quickly deactivated at
83df8bae1dSRodney W. Grimes  *		time of deallocation.
84df8bae1dSRodney W. Grimes  *
85df8bae1dSRodney W. Grimes  *		An ordered list of pages due for pageout.
86df8bae1dSRodney W. Grimes  *
87df8bae1dSRodney W. Grimes  *	In addition, the structure contains the object
88df8bae1dSRodney W. Grimes  *	and offset to which this page belongs (for pageout),
89df8bae1dSRodney W. Grimes  *	and sundry status bits.
90df8bae1dSRodney W. Grimes  *
913c76db4cSAlan Cox  *	In general, operations on this structure's mutable fields are
920012f373SJeff Roberson  *	synchronized using either one of or a combination of locks.  If a
930012f373SJeff Roberson  *	field is annotated with two of these locks then holding either is
940012f373SJeff Roberson  *	sufficient for read access but both are required for write access.
95efec381dSMark Johnston  *	The queue lock for a page depends on the value of its queue field and is
96efec381dSMark Johnston  *	described in detail below.
970012f373SJeff Roberson  *
980012f373SJeff Roberson  *	The following annotations are possible:
99958d8f52SMark Johnston  *	(A) the field must be accessed using atomic(9) and may require
100958d8f52SMark Johnston  *	    additional synchronization.
1010012f373SJeff Roberson  *	(B) the page busy lock.
1020012f373SJeff Roberson  *	(C) the field is immutable.
103efec381dSMark Johnston  *	(F) the per-domain lock for the free queues.
1040012f373SJeff Roberson  *	(M) Machine dependent, defined by pmap layer.
1050012f373SJeff Roberson  *	(O) the object that the page belongs to.
1060012f373SJeff Roberson  *	(Q) the page's queue lock.
1070012f373SJeff Roberson  *
1080012f373SJeff Roberson  *	The busy lock is an embedded reader-writer lock that protects the
1090012f373SJeff Roberson  *	page's contents and identity (i.e., its <object, pindex> tuple) as
1100012f373SJeff Roberson  *	well as certain valid/dirty modifications.  To avoid bloating the
1110012f373SJeff Roberson  *	the page structure, the busy lock lacks some of the features available
1120012f373SJeff Roberson  *	the kernel's general-purpose synchronization primitives.  As a result,
1130012f373SJeff Roberson  *	busy lock ordering rules are not verified, lock recursion is not
1140012f373SJeff Roberson  *	detected, and an attempt to xbusy a busy page or sbusy an xbusy page
1150012f373SJeff Roberson  *	results will trigger a panic rather than causing the thread to block.
1160012f373SJeff Roberson  *	vm_page_sleep_if_busy() can be used to sleep until the page's busy
1170012f373SJeff Roberson  *	state changes, after which the caller must re-lookup the page and
1180012f373SJeff Roberson  *	re-evaluate its state.  vm_page_busy_acquire() will block until
1190012f373SJeff Roberson  *	the lock is acquired.
1200012f373SJeff Roberson  *
1210012f373SJeff Roberson  *	The valid field is protected by the page busy lock (B) and object
1220012f373SJeff Roberson  *	lock (O).  Transitions from invalid to valid are generally done
1230012f373SJeff Roberson  *	via I/O or zero filling and do not require the object lock.
1240012f373SJeff Roberson  *	These must be protected with the busy lock to prevent page-in or
1250012f373SJeff Roberson  *	creation races.  Page invalidation generally happens as a result
1260012f373SJeff Roberson  *	of truncate or msync.  When invalidated, pages must not be present
1270012f373SJeff Roberson  *	in pmap and must hold the object lock to prevent concurrent
1280012f373SJeff Roberson  *	speculative read-only mappings that do not require busy.  I/O
1290012f373SJeff Roberson  *	routines may check for validity without a lock if they are prepared
1300012f373SJeff Roberson  *	to handle invalidation races with higher level locks (vnode) or are
1310012f373SJeff Roberson  *	unconcerned with races so long as they hold a reference to prevent
1320012f373SJeff Roberson  *	recycling.  When a valid bit is set while holding a shared busy
1330012f373SJeff Roberson  *	lock (A) atomic operations are used to protect against concurrent
1340012f373SJeff Roberson  *	modification.
1350ce3ba8cSKip Macy  *
136abb9b935SKonstantin Belousov  *	In contrast, the synchronization of accesses to the page's
1370012f373SJeff Roberson  *	dirty field is a mix of machine dependent (M) and busy (B).  In
1380012f373SJeff Roberson  *	the machine-independent layer, the page busy must be held to
1390012f373SJeff Roberson  *	operate on the field.  However, the pmap layer is permitted to
1400012f373SJeff Roberson  *	set all bits within the field without holding that lock.  If the
1410012f373SJeff Roberson  *	underlying architecture does not support atomic read-modify-write
142abb9b935SKonstantin Belousov  *	operations on the field's type, then the machine-independent
1432042bb37SKonstantin Belousov  *	layer uses a 32-bit atomic on the aligned 32-bit word that
144abb9b935SKonstantin Belousov  *	contains the dirty field.  In the machine-independent layer,
145abb9b935SKonstantin Belousov  *	the implementation of read-modify-write operations on the
1460012f373SJeff Roberson  *	field is encapsulated in vm_page_clear_dirty_mask().  An
1470012f373SJeff Roberson  *	exclusive busy lock combined with pmap_remove_{write/all}() is the
1480012f373SJeff Roberson  *	only way to ensure a page can not become dirty.  I/O generally
1490012f373SJeff Roberson  *	removes the page from pmap to ensure exclusive access and atomic
1500012f373SJeff Roberson  *	writes.
1511d3a1bcfSMark Johnston  *
152fee2a2faSMark Johnston  *	The ref_count field tracks references to the page.  References that
153fee2a2faSMark Johnston  *	prevent the page from being reclaimable are called wirings and are
154fee2a2faSMark Johnston  *	counted in the low bits of ref_count.  The containing object's
155fee2a2faSMark Johnston  *	reference, if one exists, is counted using the VPRC_OBJREF bit in the
156fee2a2faSMark Johnston  *	ref_count field.  Additionally, the VPRC_BLOCKED bit is used to
157fee2a2faSMark Johnston  *	atomically check for wirings and prevent new wirings via
158fee2a2faSMark Johnston  *	pmap_extract_and_hold().  When a page belongs to an object, it may be
159fee2a2faSMark Johnston  *	wired only when the object is locked, or the page is busy, or by
160fee2a2faSMark Johnston  *	pmap_extract_and_hold().  As a result, if the object is locked and the
161fee2a2faSMark Johnston  *	page is not busy (or is exclusively busied by the current thread), and
162fee2a2faSMark Johnston  *	the page is unmapped, its wire count will not increase.  The ref_count
163fee2a2faSMark Johnston  *	field is updated using atomic operations in most cases, except when it
164fee2a2faSMark Johnston  *	is known that no other references to the page exist, such as in the page
165fee2a2faSMark Johnston  *	allocator.  A page may be present in the page queues, or even actively
166fee2a2faSMark Johnston  *	scanned by the page daemon, without an explicitly counted referenced.
167fee2a2faSMark Johnston  *	The page daemon must therefore handle the possibility of a concurrent
168fee2a2faSMark Johnston  *	free of the page.
1691d3a1bcfSMark Johnston  *
170dc71caa0SMark Johnston  *	The queue state of a page consists of the queue and act_count fields of
171dc71caa0SMark Johnston  *	its atomically updated state, and the subset of atomic flags specified
172dc71caa0SMark Johnston  *	by PGA_QUEUE_STATE_MASK.  The queue field contains the page's page queue
173dc71caa0SMark Johnston  *	index, or PQ_NONE if it does not belong to a page queue.  To modify the
174dc71caa0SMark Johnston  *	queue field, the page queue lock corresponding to the old value must be
175dc71caa0SMark Johnston  *	held, unless that value is PQ_NONE, in which case the queue index must
176dc71caa0SMark Johnston  *	be updated using an atomic RMW operation.  There is one exception to
177dc71caa0SMark Johnston  *	this rule: the page daemon may transition the queue field from
178dc71caa0SMark Johnston  *	PQ_INACTIVE to PQ_NONE immediately prior to freeing the page during an
179dc71caa0SMark Johnston  *	inactive queue scan.  At that point the page is already dequeued and no
180dc71caa0SMark Johnston  *	other references to that vm_page structure can exist.  The PGA_ENQUEUED
181dc71caa0SMark Johnston  *	flag, when set, indicates that the page structure is physically inserted
182dc71caa0SMark Johnston  *	into the queue corresponding to the page's queue index, and may only be
183dc71caa0SMark Johnston  *	set or cleared with the corresponding page queue lock held.
1845cd29d0fSMark Johnston  *
185dc71caa0SMark Johnston  *	To avoid contention on page queue locks, page queue operations (enqueue,
186dc71caa0SMark Johnston  *	dequeue, requeue) are batched using fixed-size per-CPU queues.  A
187dc71caa0SMark Johnston  *	deferred operation is requested by setting one of the flags in
188dc71caa0SMark Johnston  *	PGA_QUEUE_OP_MASK and inserting an entry into a batch queue.  When a
189dc71caa0SMark Johnston  *	queue is full, an attempt to insert a new entry will lock the page
190dc71caa0SMark Johnston  *	queues and trigger processing of the pending entries.  The
191dc71caa0SMark Johnston  *	type-stability of vm_page structures is crucial to this scheme since the
192dc71caa0SMark Johnston  *	processing of entries in a given batch queue may be deferred
193dc71caa0SMark Johnston  *	indefinitely.  In particular, a page may be freed with pending batch
194dc71caa0SMark Johnston  *	queue entries.  The page queue operation flags must be set using atomic
195dc71caa0SMark Johnston  *	RWM operations.
196df8bae1dSRodney W. Grimes  */
197df8bae1dSRodney W. Grimes 
198561cc9fcSKonstantin Belousov #if PAGE_SIZE == 4096
199561cc9fcSKonstantin Belousov #define VM_PAGE_BITS_ALL 0xffu
200561cc9fcSKonstantin Belousov typedef uint8_t vm_page_bits_t;
201561cc9fcSKonstantin Belousov #elif PAGE_SIZE == 8192
202561cc9fcSKonstantin Belousov #define VM_PAGE_BITS_ALL 0xffffu
203561cc9fcSKonstantin Belousov typedef uint16_t vm_page_bits_t;
204561cc9fcSKonstantin Belousov #elif PAGE_SIZE == 16384
205561cc9fcSKonstantin Belousov #define VM_PAGE_BITS_ALL 0xffffffffu
206561cc9fcSKonstantin Belousov typedef uint32_t vm_page_bits_t;
207561cc9fcSKonstantin Belousov #elif PAGE_SIZE == 32768
208561cc9fcSKonstantin Belousov #define VM_PAGE_BITS_ALL 0xfffffffffffffffflu
209561cc9fcSKonstantin Belousov typedef uint64_t vm_page_bits_t;
210561cc9fcSKonstantin Belousov #endif
211561cc9fcSKonstantin Belousov 
2125cff1f4dSMark Johnston typedef union vm_page_astate {
2135cff1f4dSMark Johnston 	struct {
2145cff1f4dSMark Johnston 		uint16_t flags;
2155cff1f4dSMark Johnston 		uint8_t	queue;
2165cff1f4dSMark Johnston 		uint8_t act_count;
2175cff1f4dSMark Johnston 	};
2185cff1f4dSMark Johnston 	uint32_t _bits;
2195cff1f4dSMark Johnston } vm_page_astate_t;
2205cff1f4dSMark Johnston 
221df8bae1dSRodney W. Grimes struct vm_page {
222c325e866SKonstantin Belousov 	union {
223c325e866SKonstantin Belousov 		TAILQ_ENTRY(vm_page) q; /* page queue or free list (Q) */
224c325e866SKonstantin Belousov 		struct {
225c325e866SKonstantin Belousov 			SLIST_ENTRY(vm_page) ss; /* private slists */
226c325e866SKonstantin Belousov 		} s;
227c325e866SKonstantin Belousov 		struct {
228c325e866SKonstantin Belousov 			u_long p;
229c325e866SKonstantin Belousov 			u_long v;
230c325e866SKonstantin Belousov 		} memguard;
231584061b4SJeff Roberson 		struct {
232584061b4SJeff Roberson 			void *slab;
233584061b4SJeff Roberson 			void *zone;
234584061b4SJeff Roberson 		} uma;
235c325e866SKonstantin Belousov 	} plinks;
236e3975643SJake Burkholder 	TAILQ_ENTRY(vm_page) listq;	/* pages in same object (O) */
237fee2a2faSMark Johnston 	vm_object_t object;		/* which object am I in (O) */
23843319c11SAlan Cox 	vm_pindex_t pindex;		/* offset into object (O,P) */
2395cd29d0fSMark Johnston 	vm_paddr_t phys_addr;		/* physical address of page (C) */
240763df3ecSPedro F. Giffuni 	struct md_page md;		/* machine dependent stuff */
241b119329dSMark Johnston 	u_int ref_count;		/* page references (A) */
242958d8f52SMark Johnston 	u_int busy_lock;		/* busy owners lock (A) */
243958d8f52SMark Johnston 	union vm_page_astate a;		/* state accessed atomically (A) */
244eeacb3b0SMark Johnston 	uint8_t order;			/* index of the buddy queue (F) */
245eeacb3b0SMark Johnston 	uint8_t pool;			/* vm_phys freepool index (F) */
2463a2ba997SMark Johnston 	uint8_t flags;			/* page PG_* flags (P) */
247e8bcf696SMark Johnston 	uint8_t oflags;			/* page VPO_* flags (O) */
248dd05fa19SAlan Cox 	int8_t psind;			/* pagesizes[] index (O) */
2491d3a1bcfSMark Johnston 	int8_t segind;			/* vm_phys segment index (C) */
250c325e866SKonstantin Belousov 	/* NOTE that these must support one bit per DEV_BSIZE in a page */
251bd7e5f99SJohn Dyson 	/* so, on normal X86 kernels, they must be at least 8 bits wide */
2520012f373SJeff Roberson 	vm_page_bits_t valid;		/* valid DEV_BSIZE chunk map (O,B) */
2530012f373SJeff Roberson 	vm_page_bits_t dirty;		/* dirty DEV_BSIZE chunk map (M,B) */
254df8bae1dSRodney W. Grimes };
255df8bae1dSRodney W. Grimes 
2565786be7cSAlan Cox /*
257fee2a2faSMark Johnston  * Special bits used in the ref_count field.
258fee2a2faSMark Johnston  *
259fee2a2faSMark Johnston  * ref_count is normally used to count wirings that prevent the page from being
260fee2a2faSMark Johnston  * reclaimed, but also supports several special types of references that do not
261fee2a2faSMark Johnston  * prevent reclamation.  Accesses to the ref_count field must be atomic unless
262fee2a2faSMark Johnston  * the page is unallocated.
263fee2a2faSMark Johnston  *
264fee2a2faSMark Johnston  * VPRC_OBJREF is the reference held by the containing object.  It can set or
265fee2a2faSMark Johnston  * cleared only when the corresponding object's write lock is held.
266fee2a2faSMark Johnston  *
267fee2a2faSMark Johnston  * VPRC_BLOCKED is used to atomically block wirings via pmap lookups while
268efec381dSMark Johnston  * attempting to tear down all mappings of a given page.  The page busy lock and
269fee2a2faSMark Johnston  * object write lock must both be held in order to set or clear this bit.
270fee2a2faSMark Johnston  */
271fee2a2faSMark Johnston #define	VPRC_BLOCKED	0x40000000u	/* mappings are being removed */
272fee2a2faSMark Johnston #define	VPRC_OBJREF	0x80000000u	/* object reference, cleared with (O) */
273fee2a2faSMark Johnston #define	VPRC_WIRE_COUNT(c)	((c) & ~(VPRC_BLOCKED | VPRC_OBJREF))
274fee2a2faSMark Johnston #define	VPRC_WIRE_COUNT_MAX	(~(VPRC_BLOCKED | VPRC_OBJREF))
275fee2a2faSMark Johnston 
276fee2a2faSMark Johnston /*
2775786be7cSAlan Cox  * Page flags stored in oflags:
2785786be7cSAlan Cox  *
2795786be7cSAlan Cox  * Access to these page flags is synchronized by the lock on the object
2805786be7cSAlan Cox  * containing the page (O).
281d98d0ce2SKonstantin Belousov  *
282d98d0ce2SKonstantin Belousov  * Note: VPO_UNMANAGED (used by OBJT_DEVICE, OBJT_PHYS and OBJT_SG)
283d98d0ce2SKonstantin Belousov  * 	 indicates that the page is not under PV management but
284d98d0ce2SKonstantin Belousov  * 	 otherwise should be treated as a normal page.  Pages not
285d98d0ce2SKonstantin Belousov  * 	 under PV management cannot be paged out via the
286d98d0ce2SKonstantin Belousov  * 	 object/vm_page_t because there is no knowledge of their pte
287d98d0ce2SKonstantin Belousov  * 	 mappings, and such pages are also not on any PQ queue.
288d98d0ce2SKonstantin Belousov  *
2895786be7cSAlan Cox  */
29049bfa624SAlan Cox #define	VPO_KMEM_EXEC	0x01		/* kmem mapping allows execution */
291c7aebda8SAttilio Rao #define	VPO_SWAPSLEEP	0x02		/* waiting for swap to finish */
292081a4881SAlan Cox #define	VPO_UNMANAGED	0x04		/* no PV management for page */
293081a4881SAlan Cox #define	VPO_SWAPINPROG	0x08		/* swap I/O in progress on page */
2945786be7cSAlan Cox 
295c7aebda8SAttilio Rao /*
296c7aebda8SAttilio Rao  * Busy page implementation details.
297c7aebda8SAttilio Rao  * The algorithm is taken mostly by rwlock(9) and sx(9) locks implementation,
298c7aebda8SAttilio Rao  * even if the support for owner identity is removed because of size
299c7aebda8SAttilio Rao  * constraints.  Checks on lock recursion are then not possible, while the
300c7aebda8SAttilio Rao  * lock assertions effectiveness is someway reduced.
301c7aebda8SAttilio Rao  */
302c7aebda8SAttilio Rao #define	VPB_BIT_SHARED		0x01
303c7aebda8SAttilio Rao #define	VPB_BIT_EXCLUSIVE	0x02
304c7aebda8SAttilio Rao #define	VPB_BIT_WAITERS		0x04
305c7aebda8SAttilio Rao #define	VPB_BIT_FLAGMASK						\
306c7aebda8SAttilio Rao 	(VPB_BIT_SHARED | VPB_BIT_EXCLUSIVE | VPB_BIT_WAITERS)
307c7aebda8SAttilio Rao 
308c7aebda8SAttilio Rao #define	VPB_SHARERS_SHIFT	3
309c7aebda8SAttilio Rao #define	VPB_SHARERS(x)							\
310c7aebda8SAttilio Rao 	(((x) & ~VPB_BIT_FLAGMASK) >> VPB_SHARERS_SHIFT)
311c7aebda8SAttilio Rao #define	VPB_SHARERS_WORD(x)	((x) << VPB_SHARERS_SHIFT | VPB_BIT_SHARED)
312c7aebda8SAttilio Rao #define	VPB_ONE_SHARER		(1 << VPB_SHARERS_SHIFT)
313c7aebda8SAttilio Rao 
314b631c36fSKonstantin Belousov #define	VPB_SINGLE_EXCLUSIVE	VPB_BIT_EXCLUSIVE
315b631c36fSKonstantin Belousov #ifdef INVARIANTS
316b631c36fSKonstantin Belousov #define	VPB_CURTHREAD_EXCLUSIVE						\
317b631c36fSKonstantin Belousov 	(VPB_BIT_EXCLUSIVE | ((u_int)(uintptr_t)curthread & ~VPB_BIT_FLAGMASK))
318b631c36fSKonstantin Belousov #else
319b631c36fSKonstantin Belousov #define	VPB_CURTHREAD_EXCLUSIVE	VPB_SINGLE_EXCLUSIVE
320b631c36fSKonstantin Belousov #endif
321c7aebda8SAttilio Rao 
322c7aebda8SAttilio Rao #define	VPB_UNBUSIED		VPB_SHARERS_WORD(0)
323c7aebda8SAttilio Rao 
324ee9e43f8SJeff Roberson /* Freed lock blocks both shared and exclusive. */
325ee9e43f8SJeff Roberson #define	VPB_FREED		(0xffffffff - VPB_BIT_SHARED)
326ee9e43f8SJeff Roberson 
32744e46b9eSAlan Cox #define	PQ_NONE		255
32844e46b9eSAlan Cox #define	PQ_INACTIVE	0
32944e46b9eSAlan Cox #define	PQ_ACTIVE	1
330ebcddc72SAlan Cox #define	PQ_LAUNDRY	2
331b1fd102eSMark Johnston #define	PQ_UNSWAPPABLE	3
332b1fd102eSMark Johnston #define	PQ_COUNT	4
333ef39c05bSAlexander Leidinger 
334b9e8fb64SKonstantin Belousov #ifndef VM_PAGE_HAVE_PGLIST
3358d220203SAlan Cox TAILQ_HEAD(pglist, vm_page);
336b9e8fb64SKonstantin Belousov #define VM_PAGE_HAVE_PGLIST
337b9e8fb64SKonstantin Belousov #endif
338c325e866SKonstantin Belousov SLIST_HEAD(spglist, vm_page);
33970c17636SAlan Cox 
340449c2e92SKonstantin Belousov #ifdef _KERNEL
341bfc8c24cSGleb Smirnoff extern vm_page_t bogus_page;
342449c2e92SKonstantin Belousov #endif	/* _KERNEL */
343449c2e92SKonstantin Belousov 
3444ceaf45dSAttilio Rao extern struct mtx_padalign pa_lock[];
3452965a453SKip Macy 
3462965a453SKip Macy #if defined(__arm__)
3472965a453SKip Macy #define	PDRSHIFT	PDR_SHIFT
3482965a453SKip Macy #elif !defined(PDRSHIFT)
3492965a453SKip Macy #define PDRSHIFT	21
3502965a453SKip Macy #endif
3512965a453SKip Macy 
3522965a453SKip Macy #define	pa_index(pa)	((pa) >> PDRSHIFT)
3534ceaf45dSAttilio Rao #define	PA_LOCKPTR(pa)	((struct mtx *)(&pa_lock[pa_index(pa) % PA_LOCK_COUNT]))
3542965a453SKip Macy #define	PA_LOCKOBJPTR(pa)	((struct lock_object *)PA_LOCKPTR((pa)))
3552965a453SKip Macy #define	PA_LOCK(pa)	mtx_lock(PA_LOCKPTR(pa))
3562965a453SKip Macy #define	PA_TRYLOCK(pa)	mtx_trylock(PA_LOCKPTR(pa))
3572965a453SKip Macy #define	PA_UNLOCK(pa)	mtx_unlock(PA_LOCKPTR(pa))
3582965a453SKip Macy #define	PA_UNLOCK_COND(pa) 			\
3592965a453SKip Macy 	do {		   			\
360567e51e1SAlan Cox 		if ((pa) != 0) {		\
361567e51e1SAlan Cox 			PA_UNLOCK((pa));	\
362567e51e1SAlan Cox 			(pa) = 0;		\
363567e51e1SAlan Cox 		}				\
3642965a453SKip Macy 	} while (0)
3652965a453SKip Macy 
3662965a453SKip Macy #define	PA_LOCK_ASSERT(pa, a)	mtx_assert(PA_LOCKPTR(pa), (a))
3672965a453SKip Macy 
368f4b36404SMatt Macy #if defined(KLD_MODULE) && !defined(KLD_TIED)
369cf1911a9SKonstantin Belousov #define	vm_page_lock(m)		vm_page_lock_KBI((m), LOCK_FILE, LOCK_LINE)
370cf1911a9SKonstantin Belousov #define	vm_page_unlock(m)	vm_page_unlock_KBI((m), LOCK_FILE, LOCK_LINE)
371cf1911a9SKonstantin Belousov #define	vm_page_trylock(m)	vm_page_trylock_KBI((m), LOCK_FILE, LOCK_LINE)
372cf1911a9SKonstantin Belousov #else	/* !KLD_MODULE */
3732965a453SKip Macy #define	vm_page_lockptr(m)	(PA_LOCKPTR(VM_PAGE_TO_PHYS((m))))
3742965a453SKip Macy #define	vm_page_lock(m)		mtx_lock(vm_page_lockptr((m)))
3752965a453SKip Macy #define	vm_page_unlock(m)	mtx_unlock(vm_page_lockptr((m)))
3762965a453SKip Macy #define	vm_page_trylock(m)	mtx_trylock(vm_page_lockptr((m)))
377ef5ba5a3SAlan Cox #endif
378ef5ba5a3SAlan Cox #if defined(INVARIANTS)
379b4171812SAlan Cox #define	vm_page_assert_locked(m)		\
380b4171812SAlan Cox     vm_page_assert_locked_KBI((m), __FILE__, __LINE__)
381ef5ba5a3SAlan Cox #define	vm_page_lock_assert(m, a)		\
382ef5ba5a3SAlan Cox     vm_page_lock_assert_KBI((m), (a), __FILE__, __LINE__)
383ef5ba5a3SAlan Cox #else
384b4171812SAlan Cox #define	vm_page_assert_locked(m)
385ef5ba5a3SAlan Cox #define	vm_page_lock_assert(m, a)
386cf1911a9SKonstantin Belousov #endif
387e67e0775SAlan Cox 
388df8bae1dSRodney W. Grimes /*
389369763e3SAlan Cox  * The vm_page's aflags are updated using atomic operations.  To set or clear
390369763e3SAlan Cox  * these flags, the functions vm_page_aflag_set() and vm_page_aflag_clear()
391369763e3SAlan Cox  * must be used.  Neither these flags nor these functions are part of the KBI.
3923407fefeSKonstantin Belousov  *
393b4171812SAlan Cox  * PGA_REFERENCED may be cleared only if the page is locked.  It is set by
394b4171812SAlan Cox  * both the MI and MD VM layers.  However, kernel loadable modules should not
395b4171812SAlan Cox  * directly set this flag.  They should call vm_page_reference() instead.
396ce186587SAlan Cox  *
397afb69e6bSKonstantin Belousov  * PGA_WRITEABLE is set exclusively on managed pages by pmap_enter().
398afb69e6bSKonstantin Belousov  * When it does so, the object must be locked, or the page must be
399afb69e6bSKonstantin Belousov  * exclusive busied.  The MI VM layer must never access this flag
400afb69e6bSKonstantin Belousov  * directly.  Instead, it should call pmap_page_is_write_mapped().
40157bd5cceSNathan Whitehorn  *
40257bd5cceSNathan Whitehorn  * PGA_EXECUTABLE may be set by pmap routines, and indicates that a page has
4036031c68dSAlan Cox  * at least one executable mapping.  It is not consumed by the MI VM layer.
4045cd29d0fSMark Johnston  *
405fff5403fSJeff Roberson  * PGA_NOSYNC must be set and cleared with the page busy lock held.
406fff5403fSJeff Roberson  *
4075cd29d0fSMark Johnston  * PGA_ENQUEUED is set and cleared when a page is inserted into or removed
4085cd29d0fSMark Johnston  * from a page queue, respectively.  It determines whether the plinks.q field
409efec381dSMark Johnston  * of the page is valid.  To set or clear this flag, page's "queue" field must
410efec381dSMark Johnston  * be a valid queue index, and the corresponding page queue lock must be held.
4115cd29d0fSMark Johnston  *
4125cd29d0fSMark Johnston  * PGA_DEQUEUE is set when the page is scheduled to be dequeued from a page
4135cd29d0fSMark Johnston  * queue, and cleared when the dequeue request is processed.  A page may
4145cd29d0fSMark Johnston  * have PGA_DEQUEUE set and PGA_ENQUEUED cleared, for instance if a dequeue
4155cd29d0fSMark Johnston  * is requested after the page is scheduled to be enqueued but before it is
416efec381dSMark Johnston  * actually inserted into the page queue.
4175cd29d0fSMark Johnston  *
4185cd29d0fSMark Johnston  * PGA_REQUEUE is set when the page is scheduled to be enqueued or requeued
419efec381dSMark Johnston  * in its page queue.
4205cd29d0fSMark Johnston  *
4215cd29d0fSMark Johnston  * PGA_REQUEUE_HEAD is a special flag for enqueuing pages near the head of
422efec381dSMark Johnston  * the inactive queue, thus bypassing LRU.
423efec381dSMark Johnston  *
424efec381dSMark Johnston  * The PGA_DEQUEUE, PGA_REQUEUE and PGA_REQUEUE_HEAD flags must be set using an
425efec381dSMark Johnston  * atomic RMW operation to ensure that the "queue" field is a valid queue index,
426efec381dSMark Johnston  * and the corresponding page queue lock must be held when clearing any of the
427efec381dSMark Johnston  * flags.
428a8081778SJeff Roberson  *
429a8081778SJeff Roberson  * PGA_SWAP_FREE is used to defer freeing swap space to the pageout daemon
430a8081778SJeff Roberson  * when the context that dirties the page does not have the object write lock
431a8081778SJeff Roberson  * held.
432df8bae1dSRodney W. Grimes  */
4333a2ba997SMark Johnston #define	PGA_WRITEABLE	0x0001		/* page may be mapped writeable */
4343a2ba997SMark Johnston #define	PGA_REFERENCED	0x0002		/* page has been referenced */
4353a2ba997SMark Johnston #define	PGA_EXECUTABLE	0x0004		/* page may be mapped executable */
4363a2ba997SMark Johnston #define	PGA_ENQUEUED	0x0008		/* page is enqueued in a page queue */
4373a2ba997SMark Johnston #define	PGA_DEQUEUE	0x0010		/* page is due to be dequeued */
4383a2ba997SMark Johnston #define	PGA_REQUEUE	0x0020		/* page is due to be requeued */
4393a2ba997SMark Johnston #define	PGA_REQUEUE_HEAD 0x0040		/* page requeue should bypass LRU */
4403a2ba997SMark Johnston #define	PGA_NOSYNC	0x0080		/* do not collect for syncer */
441a8081778SJeff Roberson #define	PGA_SWAP_FREE	0x0100		/* page with swap space was dirtied */
442a8081778SJeff Roberson #define	PGA_SWAP_SPACE	0x0200		/* page has allocated swap space */
4435cd29d0fSMark Johnston 
4446fbaf685SMark Johnston #define	PGA_QUEUE_OP_MASK	(PGA_DEQUEUE | PGA_REQUEUE | PGA_REQUEUE_HEAD)
4456fbaf685SMark Johnston #define	PGA_QUEUE_STATE_MASK	(PGA_ENQUEUED | PGA_QUEUE_OP_MASK)
4463407fefeSKonstantin Belousov 
4473407fefeSKonstantin Belousov /*
448efec381dSMark Johnston  * Page flags.  Updates to these flags are not synchronized, and thus they must
449efec381dSMark Johnston  * be set during page allocation or free to avoid races.
450d9a73522SMark Johnston  *
451d9a73522SMark Johnston  * The PG_PCPU_CACHE flag is set at allocation time if the page was
452d9a73522SMark Johnston  * allocated from a per-CPU cache.  It is cleared the next time that the
453d9a73522SMark Johnston  * page is allocated from the physical memory allocator.
4543407fefeSKonstantin Belousov  */
4553a2ba997SMark Johnston #define	PG_PCPU_CACHE	0x01		/* was allocated from per-CPU caches */
4563a2ba997SMark Johnston #define	PG_FICTITIOUS	0x02		/* physical page doesn't exist */
4573a2ba997SMark Johnston #define	PG_ZERO		0x04		/* page is zeroed */
4583a2ba997SMark Johnston #define	PG_MARKER	0x08		/* special queue marker page */
4593a2ba997SMark Johnston #define	PG_NODUMP	0x10		/* don't include this page in a dump */
46092b91389SBojan Novković #define	PG_NOFREE	0x20		/* page should never be freed. */
461df8bae1dSRodney W. Grimes 
46224a1cce3SDavid Greenman /*
46324a1cce3SDavid Greenman  * Misc constants.
46424a1cce3SDavid Greenman  */
46524a1cce3SDavid Greenman #define ACT_DECLINE		1
46624a1cce3SDavid Greenman #define ACT_ADVANCE		3
46738efa82bSJohn Dyson #define ACT_INIT		5
4685070c7f8SJohn Dyson #define ACT_MAX			64
469df8bae1dSRodney W. Grimes 
470c4473420SPeter Wemm #ifdef _KERNEL
47104a18977SAlan Cox 
472d950c589SKonstantin Belousov #include <sys/kassert.h>
473369763e3SAlan Cox #include <machine/atomic.h>
474450a6690SDoug Moore struct pctrie_iter;
475369763e3SAlan Cox 
476df8bae1dSRodney W. Grimes /*
477b1fd102eSMark Johnston  * Each pageable resident page falls into one of five lists:
478df8bae1dSRodney W. Grimes  *
479df8bae1dSRodney W. Grimes  *	free
480df8bae1dSRodney W. Grimes  *		Available for allocation now.
48124a1cce3SDavid Greenman  *
482df8bae1dSRodney W. Grimes  *	inactive
4836c5e9bbdSMike Pritchard  *		Low activity, candidates for reclamation.
4843453bca8SAlan Cox  *		This list is approximately LRU ordered.
4853453bca8SAlan Cox  *
4863453bca8SAlan Cox  *	laundry
487df8bae1dSRodney W. Grimes  *		This is the list of pages that should be
488df8bae1dSRodney W. Grimes  *		paged out next.
48924a1cce3SDavid Greenman  *
490b1fd102eSMark Johnston  *	unswappable
491b1fd102eSMark Johnston  *		Dirty anonymous pages that cannot be paged
492b1fd102eSMark Johnston  *		out because no swap device is configured.
493b1fd102eSMark Johnston  *
494df8bae1dSRodney W. Grimes  *	active
4953453bca8SAlan Cox  *		Pages that are "active", i.e., they have been
49624a1cce3SDavid Greenman  *		recently referenced.
49710ad4d48SJohn Dyson  *
498df8bae1dSRodney W. Grimes  */
499df8bae1dSRodney W. Grimes 
5000d94caffSDavid Greenman extern vm_page_t vm_page_array;		/* First resident page in table */
50113a0b7bcSKonstantin Belousov extern long vm_page_array_size;		/* number of vm_page_t's */
5020d94caffSDavid Greenman extern long first_page;			/* first physical page number */
5030d94caffSDavid Greenman 
504df8bae1dSRodney W. Grimes #define VM_PAGE_TO_PHYS(entry)	((entry)->phys_addr)
505df8bae1dSRodney W. Grimes 
506a5073058SJason A. Harmening /*
5077c989c15SJason A. Harmening  * PHYS_TO_VM_PAGE() returns the vm_page_t object that represents a memory
508a5073058SJason A. Harmening  * page to which the given physical address belongs. The correct vm_page_t
509a5073058SJason A. Harmening  * object is returned for addresses that are not page-aligned.
510a5073058SJason A. Harmening  */
511b6de32bdSKonstantin Belousov vm_page_t PHYS_TO_VM_PAGE(vm_paddr_t pa);
512df8bae1dSRodney W. Grimes 
51389fc8bdbSGleb Smirnoff /*
51489fc8bdbSGleb Smirnoff  * Page allocation parameters for vm_page for the functions
51589fc8bdbSGleb Smirnoff  * vm_page_alloc(), vm_page_grab(), vm_page_alloc_contig() and
51689fc8bdbSGleb Smirnoff  * vm_page_alloc_freelist().  Some functions support only a subset
51789fc8bdbSGleb Smirnoff  * of the flags, and ignore others, see the flags legend.
51889fc8bdbSGleb Smirnoff  *
5195471caf6SAlan Cox  * The meaning of VM_ALLOC_ZERO differs slightly between the vm_page_alloc*()
5205471caf6SAlan Cox  * and the vm_page_grab*() functions.  See these functions for details.
5215471caf6SAlan Cox  *
52289fc8bdbSGleb Smirnoff  * Bits 0 - 1 define class.
52389fc8bdbSGleb Smirnoff  * Bits 2 - 15 dedicated for flags.
52489fc8bdbSGleb Smirnoff  * Legend:
52589fc8bdbSGleb Smirnoff  * (a) - vm_page_alloc() supports the flag.
52689fc8bdbSGleb Smirnoff  * (c) - vm_page_alloc_contig() supports the flag.
52789fc8bdbSGleb Smirnoff  * (g) - vm_page_grab() supports the flag.
528b498f71bSMark Johnston  * (n) - vm_page_alloc_noobj() and vm_page_alloc_freelist() support the flag.
5295471caf6SAlan Cox  * (p) - vm_page_grab_pages() supports the flag.
53089fc8bdbSGleb Smirnoff  * Bits above 15 define the count of additional pages that the caller
53189fc8bdbSGleb Smirnoff  * intends to allocate.
53289fc8bdbSGleb Smirnoff  */
5336d40c3d3SDavid Greenman #define VM_ALLOC_NORMAL		0
5346d40c3d3SDavid Greenman #define VM_ALLOC_INTERRUPT	1
5356d40c3d3SDavid Greenman #define VM_ALLOC_SYSTEM		2
536827b2fa0SAlan Cox #define	VM_ALLOC_CLASS_MASK	3
537b498f71bSMark Johnston #define	VM_ALLOC_WAITOK		0x0008	/* (acn) Sleep and retry */
538b498f71bSMark Johnston #define	VM_ALLOC_WAITFAIL	0x0010	/* (acn) Sleep and return error */
539b498f71bSMark Johnston #define	VM_ALLOC_WIRED		0x0020	/* (acgnp) Allocate a wired page */
540b498f71bSMark Johnston #define	VM_ALLOC_ZERO		0x0040	/* (acgnp) Allocate a zeroed page */
541660344caSRyan Stone #define	VM_ALLOC_NORECLAIM	0x0080	/* (c) Do not reclaim after failure */
54292b91389SBojan Novković #define	VM_ALLOC_NOFREE		0x0100	/* (an) Page will never be released */
5435471caf6SAlan Cox #define	VM_ALLOC_NOBUSY		0x0200	/* (acgp) Do not excl busy the page */
544c7575748SJeff Roberson #define	VM_ALLOC_NOCREAT	0x0400	/* (gp) Don't create a page */
545a9d6f1feSMark Johnston #define	VM_ALLOC_AVAIL1		0x0800
5465471caf6SAlan Cox #define	VM_ALLOC_IGN_SBUSY	0x1000	/* (gp) Ignore shared busy flag */
54789fc8bdbSGleb Smirnoff #define	VM_ALLOC_NODUMP		0x2000	/* (ag) don't include in dump */
5485471caf6SAlan Cox #define	VM_ALLOC_SBUSY		0x4000	/* (acgp) Shared busy the page */
549b498f71bSMark Johnston #define	VM_ALLOC_NOWAIT		0x8000	/* (acgnp) Do not sleep */
550c40cf9bcSMark Johnston #define	VM_ALLOC_COUNT_MAX	0xffff
5515f195aa3SKonstantin Belousov #define	VM_ALLOC_COUNT_SHIFT	16
552c40cf9bcSMark Johnston #define	VM_ALLOC_COUNT_MASK	(VM_ALLOC_COUNT(VM_ALLOC_COUNT_MAX))
553c40cf9bcSMark Johnston #define	VM_ALLOC_COUNT(count)	({				\
554c40cf9bcSMark Johnston 	KASSERT((count) <= VM_ALLOC_COUNT_MAX,			\
555c40cf9bcSMark Johnston 	    ("%s: invalid VM_ALLOC_COUNT value", __func__));	\
556c40cf9bcSMark Johnston 	(count) << VM_ALLOC_COUNT_SHIFT;			\
557c40cf9bcSMark Johnston })
5580d94caffSDavid Greenman 
559b32ecf44SKonstantin Belousov #ifdef M_NOWAIT
560b32ecf44SKonstantin Belousov static inline int
malloc2vm_flags(int malloc_flags)561b32ecf44SKonstantin Belousov malloc2vm_flags(int malloc_flags)
562b32ecf44SKonstantin Belousov {
563b32ecf44SKonstantin Belousov 	int pflags;
564b32ecf44SKonstantin Belousov 
565962b064aSKonstantin Belousov 	KASSERT((malloc_flags & M_USE_RESERVE) == 0 ||
566962b064aSKonstantin Belousov 	    (malloc_flags & M_NOWAIT) != 0,
567962b064aSKonstantin Belousov 	    ("M_USE_RESERVE requires M_NOWAIT"));
568b32ecf44SKonstantin Belousov 	pflags = (malloc_flags & M_USE_RESERVE) != 0 ? VM_ALLOC_INTERRUPT :
569b32ecf44SKonstantin Belousov 	    VM_ALLOC_SYSTEM;
570b32ecf44SKonstantin Belousov 	if ((malloc_flags & M_ZERO) != 0)
571b32ecf44SKonstantin Belousov 		pflags |= VM_ALLOC_ZERO;
572b32ecf44SKonstantin Belousov 	if ((malloc_flags & M_NODUMP) != 0)
573b32ecf44SKonstantin Belousov 		pflags |= VM_ALLOC_NODUMP;
5748d6fbbb8SJeff Roberson 	if ((malloc_flags & M_NOWAIT))
5758d6fbbb8SJeff Roberson 		pflags |= VM_ALLOC_NOWAIT;
5768d6fbbb8SJeff Roberson 	if ((malloc_flags & M_WAITOK))
5778d6fbbb8SJeff Roberson 		pflags |= VM_ALLOC_WAITOK;
578660344caSRyan Stone 	if ((malloc_flags & M_NORECLAIM))
579660344caSRyan Stone 		pflags |= VM_ALLOC_NORECLAIM;
58092b91389SBojan Novković 	if ((malloc_flags & M_NEVERFREED))
58192b91389SBojan Novković 		pflags |= VM_ALLOC_NOFREE;
582b32ecf44SKonstantin Belousov 	return (pflags);
583b32ecf44SKonstantin Belousov }
584b32ecf44SKonstantin Belousov #endif
585b32ecf44SKonstantin Belousov 
58688302601SAlan Cox /*
58788302601SAlan Cox  * Predicates supported by vm_page_ps_test():
58888302601SAlan Cox  *
58988302601SAlan Cox  *	PS_ALL_DIRTY is true only if the entire (super)page is dirty.
59088302601SAlan Cox  *	However, it can be spuriously false when the (super)page has become
59188302601SAlan Cox  *	dirty in the pmap but that information has not been propagated to the
59288302601SAlan Cox  *	machine-independent layer.
59388302601SAlan Cox  */
59488302601SAlan Cox #define	PS_ALL_DIRTY	0x1
59588302601SAlan Cox #define	PS_ALL_VALID	0x2
59688302601SAlan Cox #define	PS_NONE_BUSY	0x4
59788302601SAlan Cox 
598fb1d575cSJeff Roberson bool vm_page_busy_acquire(vm_page_t m, int allocflags);
599c7aebda8SAttilio Rao void vm_page_busy_downgrade(vm_page_t m);
60063e97555SJeff Roberson int vm_page_busy_tryupgrade(vm_page_t m);
60187b64663SMark Johnston bool vm_page_busy_sleep(vm_page_t m, const char *msg, int allocflags);
602f212367bSJeff Roberson void vm_page_busy_sleep_unlocked(vm_object_t obj, vm_page_t m,
60387b64663SMark Johnston     vm_pindex_t pindex, const char *wmesg, int allocflags);
6041b40f8c0SMatthew Dillon void vm_page_free(vm_page_t m);
605*5b78ff83SDoug Moore void vm_page_iter_free(struct pctrie_iter *);
6061b40f8c0SMatthew Dillon void vm_page_free_zero(vm_page_t m);
6071b40f8c0SMatthew Dillon 
6081b40f8c0SMatthew Dillon void vm_page_activate (vm_page_t);
6092051980fSAlan Cox void vm_page_advise(vm_page_t m, int advice);
6106d42d5dbSDoug Moore vm_page_t vm_page_mpred(vm_object_t, vm_pindex_t);
6111b40f8c0SMatthew Dillon vm_page_t vm_page_alloc(vm_object_t, vm_pindex_t, int);
612ef435ae7SJeff Roberson vm_page_t vm_page_alloc_domain_after(vm_object_t, vm_pindex_t, int, int,
613ef435ae7SJeff Roberson     vm_page_t);
614fbd80bd0SAlan Cox vm_page_t vm_page_alloc_contig(vm_object_t object, vm_pindex_t pindex, int req,
615fbd80bd0SAlan Cox     u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment,
616fbd80bd0SAlan Cox     vm_paddr_t boundary, vm_memattr_t memattr);
617ef435ae7SJeff Roberson vm_page_t vm_page_alloc_contig_domain(vm_object_t object,
618ef435ae7SJeff Roberson     vm_pindex_t pindex, int domain, int req, u_long npages, vm_paddr_t low,
619ef435ae7SJeff Roberson     vm_paddr_t high, u_long alignment, vm_paddr_t boundary,
620ef435ae7SJeff Roberson     vm_memattr_t memattr);
621b498f71bSMark Johnston vm_page_t vm_page_alloc_noobj(int);
622b498f71bSMark Johnston vm_page_t vm_page_alloc_noobj_domain(int, int);
62392db9f3bSMark Johnston vm_page_t vm_page_alloc_noobj_contig(int req, u_long npages, vm_paddr_t low,
62492db9f3bSMark Johnston     vm_paddr_t high, u_long alignment, vm_paddr_t boundary,
62592db9f3bSMark Johnston     vm_memattr_t memattr);
62692db9f3bSMark Johnston vm_page_t vm_page_alloc_noobj_contig_domain(int domain, int req, u_long npages,
62792db9f3bSMark Johnston     vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary,
62892db9f3bSMark Johnston     vm_memattr_t memattr);
6297f935055SJeff Roberson void vm_page_bits_set(vm_page_t m, vm_page_bits_t *bits, vm_page_bits_t set);
630ffc568baSScott Long bool vm_page_blacklist_add(vm_paddr_t pa, bool verbose);
6311b40f8c0SMatthew Dillon vm_page_t vm_page_grab(vm_object_t, vm_pindex_t, int);
632c49be4f1SJeff Roberson vm_page_t vm_page_grab_unlocked(vm_object_t, vm_pindex_t, int);
6339df950b3SMark Johnston int vm_page_grab_pages(vm_object_t object, vm_pindex_t pindex, int allocflags,
6345471caf6SAlan Cox     vm_page_t *ma, int count);
635c49be4f1SJeff Roberson int vm_page_grab_pages_unlocked(vm_object_t object, vm_pindex_t pindex,
636c49be4f1SJeff Roberson     int allocflags, vm_page_t *ma, int count);
637c7575748SJeff Roberson int vm_page_grab_valid(vm_page_t *mp, vm_object_t object, vm_pindex_t pindex,
638c7575748SJeff Roberson     int allocflags);
639c49be4f1SJeff Roberson int vm_page_grab_valid_unlocked(vm_page_t *mp, vm_object_t object,
640c49be4f1SJeff Roberson     vm_pindex_t pindex, int allocflags);
6411b40f8c0SMatthew Dillon void vm_page_deactivate(vm_page_t);
6423138cd36SMark Johnston void vm_page_deactivate_noreuse(vm_page_t);
6438d220203SAlan Cox void vm_page_dequeue(vm_page_t m);
644e8bcf696SMark Johnston void vm_page_dequeue_deferred(vm_page_t m);
645b382c10aSKonstantin Belousov vm_page_t vm_page_find_least(vm_object_t, vm_pindex_t);
646450a6690SDoug Moore vm_page_t vm_page_iter_lookup_ge(struct pctrie_iter *, vm_pindex_t);
6474dfa06e1SChuck Silvers void vm_page_free_invalid(vm_page_t);
64810cf2560SAlan Cox vm_page_t vm_page_getfake(vm_paddr_t paddr, vm_memattr_t memattr);
649e461aae7SKonstantin Belousov void vm_page_initfake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr);
6500292c54bSConrad Meyer void vm_page_init_marker(vm_page_t marker, int queue, uint16_t aflags);
65169ccea1cSMark Johnston void vm_page_init_page(vm_page_t m, vm_paddr_t pa, int segind, int pool);
652e946b949SAttilio Rao int vm_page_insert (vm_page_t, vm_object_t, vm_pindex_t);
6530012f373SJeff Roberson void vm_page_invalid(vm_page_t m);
654ebcddc72SAlan Cox void vm_page_launder(vm_page_t m);
6551b40f8c0SMatthew Dillon vm_page_t vm_page_lookup(vm_object_t, vm_pindex_t);
656450a6690SDoug Moore void vm_page_iter_init(struct pctrie_iter *, vm_object_t);
657450a6690SDoug Moore void vm_page_iter_limit_init(struct pctrie_iter *, vm_object_t, vm_pindex_t);
658450a6690SDoug Moore vm_page_t vm_page_iter_lookup(struct pctrie_iter *, vm_pindex_t);
659c2c6fb90SBryan Drewery vm_page_t vm_page_lookup_unlocked(vm_object_t, vm_pindex_t);
66091b4f427SAlan Cox vm_page_t vm_page_next(vm_page_t m);
661386eba08SMark Johnston void vm_page_pqbatch_drain(void);
662386eba08SMark Johnston void vm_page_pqbatch_submit(vm_page_t m, uint8_t queue);
663f3f38e25SMark Johnston bool vm_page_pqstate_commit(vm_page_t m, vm_page_astate_t *old,
664f3f38e25SMark Johnston     vm_page_astate_t new);
66591b4f427SAlan Cox vm_page_t vm_page_prev(vm_page_t m);
6663e00c11aSAlan Cox bool vm_page_ps_test(vm_page_t m, int psind, int flags, vm_page_t skip_m);
66710cf2560SAlan Cox void vm_page_putfake(vm_page_t m);
668b6c00483SKonstantin Belousov void vm_page_readahead_finish(vm_page_t m);
6692619c5ccSJason A. Harmening int vm_page_reclaim_contig(int req, u_long npages, vm_paddr_t low,
670c869e672SAlan Cox     vm_paddr_t high, u_long alignment, vm_paddr_t boundary);
6712619c5ccSJason A. Harmening int vm_page_reclaim_contig_domain(int domain, int req, u_long npages,
672ef435ae7SJeff Roberson     vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary);
6732619c5ccSJason A. Harmening int vm_page_reclaim_contig_domain_ext(int domain, int req, u_long npages,
6748b0dafdbSAndrew Gallatin     vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary,
6758b0dafdbSAndrew Gallatin     int desired_runs);
6763407fefeSKonstantin Belousov void vm_page_reference(vm_page_t m);
67798549e2dSMark Johnston #define	VPR_TRYFREE	0x01
67898549e2dSMark Johnston #define	VPR_NOREUSE	0x02
67998549e2dSMark Johnston void vm_page_release(vm_page_t m, int flags);
68098549e2dSMark Johnston void vm_page_release_locked(vm_page_t m, int flags);
6816be21eb7SJeff Roberson vm_page_t vm_page_relookup(vm_object_t, vm_pindex_t);
6820fd977b3SMark Johnston bool vm_page_remove(vm_page_t);
683*5b78ff83SDoug Moore bool vm_page_iter_remove(struct pctrie_iter *);
6843cf3b4e6SJeff Roberson bool vm_page_remove_xbusy(vm_page_t);
685*5b78ff83SDoug Moore int vm_page_rename(struct pctrie_iter *, vm_object_t, vm_pindex_t);
6863cf3b4e6SJeff Roberson void vm_page_replace(vm_page_t mnew, vm_object_t object,
6873cf3b4e6SJeff Roberson     vm_pindex_t pindex, vm_page_t mold);
688c7aebda8SAttilio Rao int vm_page_sbusied(vm_page_t m);
689a8081778SJeff Roberson vm_page_bits_t vm_page_set_dirty(vm_page_t m);
690dc874f98SKonstantin Belousov void vm_page_set_valid_range(vm_page_t m, int base, int size);
691889eb0fcSAlan Cox vm_offset_t vm_page_startup(vm_offset_t vaddr);
692c7aebda8SAttilio Rao void vm_page_sunbusy(vm_page_t m);
693fee2a2faSMark Johnston bool vm_page_try_remove_all(vm_page_t m);
694fee2a2faSMark Johnston bool vm_page_try_remove_write(vm_page_t m);
695c7aebda8SAttilio Rao int vm_page_trysbusy(vm_page_t m);
696205be21dSJeff Roberson int vm_page_tryxbusy(vm_page_t m);
6978c22654dSAlan Cox void vm_page_unhold_pages(vm_page_t *ma, int count);
698b1fd102eSMark Johnston void vm_page_unswappable(vm_page_t m);
699fee2a2faSMark Johnston void vm_page_unwire(vm_page_t m, uint8_t queue);
7001d3a1bcfSMark Johnston bool vm_page_unwire_noq(vm_page_t m);
70110cf2560SAlan Cox void vm_page_updatefake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr);
7021b40f8c0SMatthew Dillon void vm_page_wire(vm_page_t);
703fee2a2faSMark Johnston bool vm_page_wire_mapped(vm_page_t m);
704c7aebda8SAttilio Rao void vm_page_xunbusy_hard(vm_page_t m);
705b631c36fSKonstantin Belousov void vm_page_xunbusy_hard_unchecked(vm_page_t m);
7061b40f8c0SMatthew Dillon void vm_page_set_validclean (vm_page_t, int, int);
7071b40f8c0SMatthew Dillon void vm_page_clear_dirty(vm_page_t, int, int);
7081b40f8c0SMatthew Dillon void vm_page_set_invalid(vm_page_t, int, int);
7090012f373SJeff Roberson void vm_page_valid(vm_page_t m);
7101b40f8c0SMatthew Dillon int vm_page_is_valid(vm_page_t, int, int);
7111b40f8c0SMatthew Dillon void vm_page_test_dirty(vm_page_t);
712561cc9fcSKonstantin Belousov vm_page_bits_t vm_page_bits(int base, int size);
7138d17e694SJulian Elischer void vm_page_zero_invalid(vm_page_t m, boolean_t setvalid);
7141784fb44SKonstantin Belousov int vm_page_free_pages_toq(struct spglist *free, bool update_wire_count);
71598cb733cSKenneth D. Merry 
716eddc9291SAlan Cox void vm_page_dirty_KBI(vm_page_t m);
717cf1911a9SKonstantin Belousov void vm_page_lock_KBI(vm_page_t m, const char *file, int line);
718cf1911a9SKonstantin Belousov void vm_page_unlock_KBI(vm_page_t m, const char *file, int line);
719cf1911a9SKonstantin Belousov int vm_page_trylock_KBI(vm_page_t m, const char *file, int line);
720cf1911a9SKonstantin Belousov #if defined(INVARIANTS) || defined(INVARIANT_SUPPORT)
721b4171812SAlan Cox void vm_page_assert_locked_KBI(vm_page_t m, const char *file, int line);
722cf1911a9SKonstantin Belousov void vm_page_lock_assert_KBI(vm_page_t m, int a, const char *file, int line);
723cf1911a9SKonstantin Belousov #endif
724cf1911a9SKonstantin Belousov 
725958d8f52SMark Johnston #define	vm_page_busy_fetch(m)	atomic_load_int(&(m)->busy_lock)
726958d8f52SMark Johnston 
7270012f373SJeff Roberson #define	vm_page_assert_busied(m)					\
7280012f373SJeff Roberson 	KASSERT(vm_page_busied(m),					\
7290012f373SJeff Roberson 	    ("vm_page_assert_busied: page %p not busy @ %s:%d", \
7300012f373SJeff Roberson 	    (m), __FILE__, __LINE__))
7310012f373SJeff Roberson 
732c7aebda8SAttilio Rao #define	vm_page_assert_sbusied(m)					\
733c7aebda8SAttilio Rao 	KASSERT(vm_page_sbusied(m),					\
734c7aebda8SAttilio Rao 	    ("vm_page_assert_sbusied: page %p not shared busy @ %s:%d", \
735dc62d559SConrad Meyer 	    (m), __FILE__, __LINE__))
736c7aebda8SAttilio Rao 
737c7aebda8SAttilio Rao #define	vm_page_assert_unbusied(m)					\
738958d8f52SMark Johnston 	KASSERT((vm_page_busy_fetch(m) & ~VPB_BIT_WAITERS) !=		\
739ee9e43f8SJeff Roberson 	    VPB_CURTHREAD_EXCLUSIVE,					\
7400ee1cd6dSJason A. Harmening 	    ("vm_page_assert_unbusied: page %p busy_lock %#x owned"	\
7410ee1cd6dSJason A. Harmening 	     " by me (%p) @ %s:%d",					\
7420ee1cd6dSJason A. Harmening 	    (m), (m)->busy_lock, curthread, __FILE__, __LINE__));	\
743c7aebda8SAttilio Rao 
744b631c36fSKonstantin Belousov #define	vm_page_assert_xbusied_unchecked(m) do {			\
745c7aebda8SAttilio Rao 	KASSERT(vm_page_xbusied(m),					\
746c7aebda8SAttilio Rao 	    ("vm_page_assert_xbusied: page %p not exclusive busy @ %s:%d", \
747b631c36fSKonstantin Belousov 	    (m), __FILE__, __LINE__));					\
748b631c36fSKonstantin Belousov } while (0)
749b631c36fSKonstantin Belousov #define	vm_page_assert_xbusied(m) do {					\
750b631c36fSKonstantin Belousov 	vm_page_assert_xbusied_unchecked(m);				\
751958d8f52SMark Johnston 	KASSERT((vm_page_busy_fetch(m) & ~VPB_BIT_WAITERS) ==		\
752b631c36fSKonstantin Belousov 	    VPB_CURTHREAD_EXCLUSIVE,					\
753b631c36fSKonstantin Belousov 	    ("vm_page_assert_xbusied: page %p busy_lock %#x not owned"	\
7540ee1cd6dSJason A. Harmening 	     " by me (%p) @ %s:%d",					\
7550ee1cd6dSJason A. Harmening 	    (m), (m)->busy_lock, curthread, __FILE__, __LINE__));	\
756b631c36fSKonstantin Belousov } while (0)
757c7aebda8SAttilio Rao 
758c7aebda8SAttilio Rao #define	vm_page_busied(m)						\
759958d8f52SMark Johnston 	(vm_page_busy_fetch(m) != VPB_UNBUSIED)
760c7aebda8SAttilio Rao 
761c7aebda8SAttilio Rao #define	vm_page_xbusied(m)						\
762958d8f52SMark Johnston 	((vm_page_busy_fetch(m) & VPB_SINGLE_EXCLUSIVE) != 0)
763c7aebda8SAttilio Rao 
764ee9e43f8SJeff Roberson #define	vm_page_busy_freed(m)						\
765958d8f52SMark Johnston 	(vm_page_busy_fetch(m) == VPB_FREED)
766ee9e43f8SJeff Roberson 
767505cd5d1SKonstantin Belousov /* Note: page m's lock must not be owned by the caller. */
768c7aebda8SAttilio Rao #define	vm_page_xunbusy(m) do {						\
769c7aebda8SAttilio Rao 	if (!atomic_cmpset_rel_int(&(m)->busy_lock,			\
770b631c36fSKonstantin Belousov 	    VPB_CURTHREAD_EXCLUSIVE, VPB_UNBUSIED))			\
771c7aebda8SAttilio Rao 		vm_page_xunbusy_hard(m);				\
772c7aebda8SAttilio Rao } while (0)
773b631c36fSKonstantin Belousov #define	vm_page_xunbusy_unchecked(m) do {				\
774b631c36fSKonstantin Belousov 	if (!atomic_cmpset_rel_int(&(m)->busy_lock,			\
775b631c36fSKonstantin Belousov 	    VPB_CURTHREAD_EXCLUSIVE, VPB_UNBUSIED))			\
776b631c36fSKonstantin Belousov 		vm_page_xunbusy_hard_unchecked(m);			\
777b631c36fSKonstantin Belousov } while (0)
778c7aebda8SAttilio Rao 
7793b1025d2SKonstantin Belousov #ifdef INVARIANTS
780205be21dSJeff Roberson void vm_page_object_busy_assert(vm_page_t m);
781205be21dSJeff Roberson #define	VM_PAGE_OBJECT_BUSY_ASSERT(m)	vm_page_object_busy_assert(m)
7823a2ba997SMark Johnston void vm_page_assert_pga_writeable(vm_page_t m, uint16_t bits);
783afb69e6bSKonstantin Belousov #define	VM_PAGE_ASSERT_PGA_WRITEABLE(m, bits)				\
784afb69e6bSKonstantin Belousov 	vm_page_assert_pga_writeable(m, bits)
785958d8f52SMark Johnston /*
786958d8f52SMark Johnston  * Claim ownership of a page's xbusy state.  In non-INVARIANTS kernels this
787958d8f52SMark Johnston  * operation is a no-op since ownership is not tracked.  In particular
788958d8f52SMark Johnston  * this macro does not provide any synchronization with the previous owner.
789958d8f52SMark Johnston  */
790e9ceb9ddSJeff Roberson #define	vm_page_xbusy_claim(m) do {					\
791f72e5be5SMark Johnston 	u_int _busy_lock;						\
792f72e5be5SMark Johnston 									\
793e9ceb9ddSJeff Roberson 	vm_page_assert_xbusied_unchecked((m));				\
794f72e5be5SMark Johnston 	do {								\
795958d8f52SMark Johnston 		_busy_lock = vm_page_busy_fetch(m);			\
796f72e5be5SMark Johnston 	} while (!atomic_cmpset_int(&(m)->busy_lock, _busy_lock,	\
797f72e5be5SMark Johnston 	    (_busy_lock & VPB_BIT_FLAGMASK) | VPB_CURTHREAD_EXCLUSIVE)); \
798e9ceb9ddSJeff Roberson } while (0)
7993b1025d2SKonstantin Belousov #else
800205be21dSJeff Roberson #define	VM_PAGE_OBJECT_BUSY_ASSERT(m)	(void)0
801afb69e6bSKonstantin Belousov #define	VM_PAGE_ASSERT_PGA_WRITEABLE(m, bits)	(void)0
802e9ceb9ddSJeff Roberson #define	vm_page_xbusy_claim(m)
8033b1025d2SKonstantin Belousov #endif
8043b1025d2SKonstantin Belousov 
805c2f22e97SMark Johnston #if BYTE_ORDER == BIG_ENDIAN
806c2f22e97SMark Johnston #define	VM_PAGE_AFLAG_SHIFT	16
807c2f22e97SMark Johnston #else
808c2f22e97SMark Johnston #define	VM_PAGE_AFLAG_SHIFT	0
809c2f22e97SMark Johnston #endif
8107cdeaf33SMark Johnston 
81141fd4b94SMark Johnston /*
8126fbaf685SMark Johnston  *	Load a snapshot of a page's 32-bit atomic state.
81341fd4b94SMark Johnston  */
8146fbaf685SMark Johnston static inline vm_page_astate_t
vm_page_astate_load(vm_page_t m)8156fbaf685SMark Johnston vm_page_astate_load(vm_page_t m)
8166fbaf685SMark Johnston {
8176fbaf685SMark Johnston 	vm_page_astate_t a;
81841fd4b94SMark Johnston 
819cbc080b4SMark Johnston 	a._bits = atomic_load_32(&m->a._bits);
8206fbaf685SMark Johnston 	return (a);
8216fbaf685SMark Johnston }
8226fbaf685SMark Johnston 
8236fbaf685SMark Johnston /*
8246fbaf685SMark Johnston  *	Atomically compare and set a page's atomic state.
8256fbaf685SMark Johnston  */
8266fbaf685SMark Johnston static inline bool
vm_page_astate_fcmpset(vm_page_t m,vm_page_astate_t * old,vm_page_astate_t new)8276fbaf685SMark Johnston vm_page_astate_fcmpset(vm_page_t m, vm_page_astate_t *old, vm_page_astate_t new)
8286fbaf685SMark Johnston {
8296fbaf685SMark Johnston 
8306fbaf685SMark Johnston 	KASSERT(new.queue == PQ_INACTIVE || (new.flags & PGA_REQUEUE_HEAD) == 0,
8316fbaf685SMark Johnston 	    ("%s: invalid head requeue request for page %p", __func__, m));
8326fbaf685SMark Johnston 	KASSERT((new.flags & PGA_ENQUEUED) == 0 || new.queue != PQ_NONE,
8336fbaf685SMark Johnston 	    ("%s: setting PGA_ENQUEUED with PQ_NONE in page %p", __func__, m));
8346fbaf685SMark Johnston 	KASSERT(new._bits != old->_bits,
8356fbaf685SMark Johnston 	    ("%s: bits are unchanged", __func__));
8366fbaf685SMark Johnston 
8376fbaf685SMark Johnston 	return (atomic_fcmpset_32(&m->a._bits, &old->_bits, new._bits) != 0);
8386fbaf685SMark Johnston }
839369763e3SAlan Cox 
840369763e3SAlan Cox /*
841369763e3SAlan Cox  *	Clear the given bits in the specified page.
842369763e3SAlan Cox  */
843369763e3SAlan Cox static inline void
vm_page_aflag_clear(vm_page_t m,uint16_t bits)8443a2ba997SMark Johnston vm_page_aflag_clear(vm_page_t m, uint16_t bits)
845369763e3SAlan Cox {
846369763e3SAlan Cox 	uint32_t *addr, val;
847369763e3SAlan Cox 
848369763e3SAlan Cox 	/*
849369763e3SAlan Cox 	 * Access the whole 32-bit word containing the aflags field with an
850369763e3SAlan Cox 	 * atomic update.  Parallel non-atomic updates to the other fields
851369763e3SAlan Cox 	 * within this word are handled properly by the atomic update.
852369763e3SAlan Cox 	 */
8536fbaf685SMark Johnston 	addr = (void *)&m->a;
8547cdeaf33SMark Johnston 	val = bits << VM_PAGE_AFLAG_SHIFT;
855369763e3SAlan Cox 	atomic_clear_32(addr, val);
856369763e3SAlan Cox }
857369763e3SAlan Cox 
858369763e3SAlan Cox /*
859369763e3SAlan Cox  *	Set the given bits in the specified page.
860369763e3SAlan Cox  */
861369763e3SAlan Cox static inline void
vm_page_aflag_set(vm_page_t m,uint16_t bits)8623a2ba997SMark Johnston vm_page_aflag_set(vm_page_t m, uint16_t bits)
863369763e3SAlan Cox {
864369763e3SAlan Cox 	uint32_t *addr, val;
865369763e3SAlan Cox 
866afb69e6bSKonstantin Belousov 	VM_PAGE_ASSERT_PGA_WRITEABLE(m, bits);
867369763e3SAlan Cox 
868369763e3SAlan Cox 	/*
869369763e3SAlan Cox 	 * Access the whole 32-bit word containing the aflags field with an
870369763e3SAlan Cox 	 * atomic update.  Parallel non-atomic updates to the other fields
871369763e3SAlan Cox 	 * within this word are handled properly by the atomic update.
872369763e3SAlan Cox 	 */
8736fbaf685SMark Johnston 	addr = (void *)&m->a;
8747cdeaf33SMark Johnston 	val = bits << VM_PAGE_AFLAG_SHIFT;
875369763e3SAlan Cox 	atomic_set_32(addr, val);
876369763e3SAlan Cox }
877369763e3SAlan Cox 
878e8bcf696SMark Johnston /*
879eddc9291SAlan Cox  *	vm_page_dirty:
880eddc9291SAlan Cox  *
881eddc9291SAlan Cox  *	Set all bits in the page's dirty field.
882eddc9291SAlan Cox  *
883eddc9291SAlan Cox  *	The object containing the specified page must be locked if the
884eddc9291SAlan Cox  *	call is made from the machine-independent layer.
885eddc9291SAlan Cox  *
886eddc9291SAlan Cox  *	See vm_page_clear_dirty_mask().
887eddc9291SAlan Cox  */
888eddc9291SAlan Cox static __inline void
vm_page_dirty(vm_page_t m)889eddc9291SAlan Cox vm_page_dirty(vm_page_t m)
890eddc9291SAlan Cox {
891eddc9291SAlan Cox 
892eddc9291SAlan Cox 	/* Use vm_page_dirty_KBI() under INVARIANTS to save memory. */
893f4b36404SMatt Macy #if (defined(KLD_MODULE) && !defined(KLD_TIED)) || defined(INVARIANTS)
894eddc9291SAlan Cox 	vm_page_dirty_KBI(m);
895eddc9291SAlan Cox #else
896eddc9291SAlan Cox 	m->dirty = VM_PAGE_BITS_ALL;
897eddc9291SAlan Cox #endif
898eddc9291SAlan Cox }
899eddc9291SAlan Cox 
900eddc9291SAlan Cox /*
901f3b676f0SAlan Cox  *	vm_page_undirty:
902f3b676f0SAlan Cox  *
903f3b676f0SAlan Cox  *	Set page to not be dirty.  Note: does not clear pmap modify bits
904f3b676f0SAlan Cox  */
905f3b676f0SAlan Cox static __inline void
vm_page_undirty(vm_page_t m)906f3b676f0SAlan Cox vm_page_undirty(vm_page_t m)
907f3b676f0SAlan Cox {
9083b1025d2SKonstantin Belousov 
909205be21dSJeff Roberson 	VM_PAGE_OBJECT_BUSY_ASSERT(m);
910f3b676f0SAlan Cox 	m->dirty = 0;
911f3b676f0SAlan Cox }
912f3b676f0SAlan Cox 
913f3f38e25SMark Johnston static inline uint8_t
_vm_page_queue(vm_page_astate_t as)914f3f38e25SMark Johnston _vm_page_queue(vm_page_astate_t as)
915f3f38e25SMark Johnston {
916f3f38e25SMark Johnston 
917f3f38e25SMark Johnston 	if ((as.flags & PGA_DEQUEUE) != 0)
918f3f38e25SMark Johnston 		return (PQ_NONE);
919f3f38e25SMark Johnston 	return (as.queue);
920f3f38e25SMark Johnston }
921f3f38e25SMark Johnston 
9221b5c869dSMark Johnston /*
9231b5c869dSMark Johnston  *	vm_page_queue:
9241b5c869dSMark Johnston  *
925f3f38e25SMark Johnston  *	Return the index of the queue containing m.
9261b5c869dSMark Johnston  */
9271b5c869dSMark Johnston static inline uint8_t
vm_page_queue(vm_page_t m)9281b5c869dSMark Johnston vm_page_queue(vm_page_t m)
9291b5c869dSMark Johnston {
9301b5c869dSMark Johnston 
931f3f38e25SMark Johnston 	return (_vm_page_queue(vm_page_astate_load(m)));
9321b5c869dSMark Johnston }
9331b5c869dSMark Johnston 
934ebcddc72SAlan Cox static inline bool
vm_page_active(vm_page_t m)935ebcddc72SAlan Cox vm_page_active(vm_page_t m)
936ebcddc72SAlan Cox {
937ebcddc72SAlan Cox 
9381b5c869dSMark Johnston 	return (vm_page_queue(m) == PQ_ACTIVE);
939ebcddc72SAlan Cox }
940ebcddc72SAlan Cox 
941ebcddc72SAlan Cox static inline bool
vm_page_inactive(vm_page_t m)942ebcddc72SAlan Cox vm_page_inactive(vm_page_t m)
943ebcddc72SAlan Cox {
944ebcddc72SAlan Cox 
9451b5c869dSMark Johnston 	return (vm_page_queue(m) == PQ_INACTIVE);
946ebcddc72SAlan Cox }
947ebcddc72SAlan Cox 
948ebcddc72SAlan Cox static inline bool
vm_page_in_laundry(vm_page_t m)949ebcddc72SAlan Cox vm_page_in_laundry(vm_page_t m)
950ebcddc72SAlan Cox {
9511b5c869dSMark Johnston 	uint8_t queue;
952ebcddc72SAlan Cox 
9531b5c869dSMark Johnston 	queue = vm_page_queue(m);
9541b5c869dSMark Johnston 	return (queue == PQ_LAUNDRY || queue == PQ_UNSWAPPABLE);
9555cd29d0fSMark Johnston }
9565cd29d0fSMark Johnston 
95745cde0e4SKonstantin Belousov static inline void
vm_page_clearref(vm_page_t m)95845cde0e4SKonstantin Belousov vm_page_clearref(vm_page_t m)
95945cde0e4SKonstantin Belousov {
96045cde0e4SKonstantin Belousov 	u_int r;
96145cde0e4SKonstantin Belousov 
96245cde0e4SKonstantin Belousov 	r = m->ref_count;
96345cde0e4SKonstantin Belousov 	while (atomic_fcmpset_int(&m->ref_count, &r, r & (VPRC_BLOCKED |
96445cde0e4SKonstantin Belousov 	    VPRC_OBJREF)) == 0)
96545cde0e4SKonstantin Belousov 		;
96645cde0e4SKonstantin Belousov }
96745cde0e4SKonstantin Belousov 
9685cd29d0fSMark Johnston /*
969fee2a2faSMark Johnston  *	vm_page_drop:
970fee2a2faSMark Johnston  *
971fee2a2faSMark Johnston  *	Release a reference to a page and return the old reference count.
972fee2a2faSMark Johnston  */
973fee2a2faSMark Johnston static inline u_int
vm_page_drop(vm_page_t m,u_int val)974fee2a2faSMark Johnston vm_page_drop(vm_page_t m, u_int val)
975fee2a2faSMark Johnston {
97638547d59SMark Johnston 	u_int old;
977fee2a2faSMark Johnston 
978fee2a2faSMark Johnston 	/*
979fee2a2faSMark Johnston 	 * Synchronize with vm_page_free_prep(): ensure that all updates to the
980fee2a2faSMark Johnston 	 * page structure are visible before it is freed.
981fee2a2faSMark Johnston 	 */
982fee2a2faSMark Johnston 	atomic_thread_fence_rel();
98338547d59SMark Johnston 	old = atomic_fetchadd_int(&m->ref_count, -val);
98438547d59SMark Johnston 	KASSERT(old != VPRC_BLOCKED,
98538547d59SMark Johnston 	    ("vm_page_drop: page %p has an invalid refcount value", m));
98638547d59SMark Johnston 	return (old);
987fee2a2faSMark Johnston }
988fee2a2faSMark Johnston 
989fee2a2faSMark Johnston /*
990eeacb3b0SMark Johnston  *	vm_page_wired:
9911d3a1bcfSMark Johnston  *
992fee2a2faSMark Johnston  *	Perform a racy check to determine whether a reference prevents the page
993fee2a2faSMark Johnston  *	from being reclaimable.  If the page's object is locked, and the page is
994a9ea09e5SMark Johnston  *	unmapped and exclusively busied by the current thread, no new wirings
995a9ea09e5SMark Johnston  *	may be created.
9961d3a1bcfSMark Johnston  */
9971d3a1bcfSMark Johnston static inline bool
vm_page_wired(vm_page_t m)998d842aa51SMark Johnston vm_page_wired(vm_page_t m)
999d842aa51SMark Johnston {
1000d842aa51SMark Johnston 
1001fee2a2faSMark Johnston 	return (VPRC_WIRE_COUNT(m->ref_count) > 0);
1002d842aa51SMark Johnston }
1003d842aa51SMark Johnston 
10040012f373SJeff Roberson static inline bool
vm_page_all_valid(vm_page_t m)10050012f373SJeff Roberson vm_page_all_valid(vm_page_t m)
10060012f373SJeff Roberson {
10070012f373SJeff Roberson 
10080012f373SJeff Roberson 	return (m->valid == VM_PAGE_BITS_ALL);
10090012f373SJeff Roberson }
10100012f373SJeff Roberson 
10110012f373SJeff Roberson static inline bool
vm_page_any_valid(vm_page_t m)1012934bfc12SKonstantin Belousov vm_page_any_valid(vm_page_t m)
1013934bfc12SKonstantin Belousov {
1014934bfc12SKonstantin Belousov 
1015934bfc12SKonstantin Belousov 	return (m->valid != 0);
1016934bfc12SKonstantin Belousov }
1017934bfc12SKonstantin Belousov 
1018934bfc12SKonstantin Belousov static inline bool
vm_page_none_valid(vm_page_t m)10190012f373SJeff Roberson vm_page_none_valid(vm_page_t m)
10200012f373SJeff Roberson {
10210012f373SJeff Roberson 
10220012f373SJeff Roberson 	return (m->valid == 0);
10230012f373SJeff Roberson }
10240012f373SJeff Roberson 
1025431fb8abSMark Johnston static inline int
vm_page_domain(vm_page_t m __numa_used)1026cb20a74cSStephen J. Kiernan vm_page_domain(vm_page_t m __numa_used)
1027431fb8abSMark Johnston {
1028431fb8abSMark Johnston #ifdef NUMA
1029431fb8abSMark Johnston 	int domn, segind;
1030431fb8abSMark Johnston 
1031431fb8abSMark Johnston 	segind = m->segind;
1032431fb8abSMark Johnston 	KASSERT(segind < vm_phys_nsegs, ("segind %d m %p", segind, m));
1033431fb8abSMark Johnston 	domn = vm_phys_segs[segind].domain;
1034431fb8abSMark Johnston 	KASSERT(domn >= 0 && domn < vm_ndomains, ("domain %d m %p", domn, m));
1035431fb8abSMark Johnston 	return (domn);
1036431fb8abSMark Johnston #else
1037431fb8abSMark Johnston 	return (0);
1038431fb8abSMark Johnston #endif
1039431fb8abSMark Johnston }
1040431fb8abSMark Johnston 
1041c4473420SPeter Wemm #endif				/* _KERNEL */
1042df8bae1dSRodney W. Grimes #endif				/* !_VM_PAGE_ */
1043