xref: /freebsd/sys/vm/vm_page.h (revision 5cd29d0f3cdad565df4c35e3f45825009db26003)
160727d8bSWarner Losh /*-
2796df753SPedro F. Giffuni  * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU)
351369649SPedro F. Giffuni  *
4df8bae1dSRodney W. Grimes  * Copyright (c) 1991, 1993
5df8bae1dSRodney W. Grimes  *	The Regents of the University of California.  All rights reserved.
6df8bae1dSRodney W. Grimes  *
7df8bae1dSRodney W. Grimes  * This code is derived from software contributed to Berkeley by
8df8bae1dSRodney W. Grimes  * The Mach Operating System project at Carnegie-Mellon University.
9df8bae1dSRodney W. Grimes  *
10df8bae1dSRodney W. Grimes  * Redistribution and use in source and binary forms, with or without
11df8bae1dSRodney W. Grimes  * modification, are permitted provided that the following conditions
12df8bae1dSRodney W. Grimes  * are met:
13df8bae1dSRodney W. Grimes  * 1. Redistributions of source code must retain the above copyright
14df8bae1dSRodney W. Grimes  *    notice, this list of conditions and the following disclaimer.
15df8bae1dSRodney W. Grimes  * 2. Redistributions in binary form must reproduce the above copyright
16df8bae1dSRodney W. Grimes  *    notice, this list of conditions and the following disclaimer in the
17df8bae1dSRodney W. Grimes  *    documentation and/or other materials provided with the distribution.
18fbbd9655SWarner Losh  * 3. Neither the name of the University nor the names of its contributors
19df8bae1dSRodney W. Grimes  *    may be used to endorse or promote products derived from this software
20df8bae1dSRodney W. Grimes  *    without specific prior written permission.
21df8bae1dSRodney W. Grimes  *
22df8bae1dSRodney W. Grimes  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23df8bae1dSRodney W. Grimes  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24df8bae1dSRodney W. Grimes  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25df8bae1dSRodney W. Grimes  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26df8bae1dSRodney W. Grimes  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27df8bae1dSRodney W. Grimes  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28df8bae1dSRodney W. Grimes  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29df8bae1dSRodney W. Grimes  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30df8bae1dSRodney W. Grimes  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31df8bae1dSRodney W. Grimes  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32df8bae1dSRodney W. Grimes  * SUCH DAMAGE.
33df8bae1dSRodney W. Grimes  *
343c4dd356SDavid Greenman  *	from: @(#)vm_page.h	8.2 (Berkeley) 12/13/93
35df8bae1dSRodney W. Grimes  *
36df8bae1dSRodney W. Grimes  *
37df8bae1dSRodney W. Grimes  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
38df8bae1dSRodney W. Grimes  * All rights reserved.
39df8bae1dSRodney W. Grimes  *
40df8bae1dSRodney W. Grimes  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
41df8bae1dSRodney W. Grimes  *
42df8bae1dSRodney W. Grimes  * Permission to use, copy, modify and distribute this software and
43df8bae1dSRodney W. Grimes  * its documentation is hereby granted, provided that both the copyright
44df8bae1dSRodney W. Grimes  * notice and this permission notice appear in all copies of the
45df8bae1dSRodney W. Grimes  * software, derivative works or modified versions, and any portions
46df8bae1dSRodney W. Grimes  * thereof, and that both notices appear in supporting documentation.
47df8bae1dSRodney W. Grimes  *
48df8bae1dSRodney W. Grimes  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
49df8bae1dSRodney W. Grimes  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
50df8bae1dSRodney W. Grimes  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
51df8bae1dSRodney W. Grimes  *
52df8bae1dSRodney W. Grimes  * Carnegie Mellon requests users of this software to return to
53df8bae1dSRodney W. Grimes  *
54df8bae1dSRodney W. Grimes  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
55df8bae1dSRodney W. Grimes  *  School of Computer Science
56df8bae1dSRodney W. Grimes  *  Carnegie Mellon University
57df8bae1dSRodney W. Grimes  *  Pittsburgh PA 15213-3890
58df8bae1dSRodney W. Grimes  *
59df8bae1dSRodney W. Grimes  * any improvements or extensions that they make and grant Carnegie the
60df8bae1dSRodney W. Grimes  * rights to redistribute these changes.
613c4dd356SDavid Greenman  *
62c3aac50fSPeter Wemm  * $FreeBSD$
63df8bae1dSRodney W. Grimes  */
64df8bae1dSRodney W. Grimes 
65df8bae1dSRodney W. Grimes /*
66df8bae1dSRodney W. Grimes  *	Resident memory system definitions.
67df8bae1dSRodney W. Grimes  */
68df8bae1dSRodney W. Grimes 
69df8bae1dSRodney W. Grimes #ifndef	_VM_PAGE_
70df8bae1dSRodney W. Grimes #define	_VM_PAGE_
71df8bae1dSRodney W. Grimes 
72f919ebdeSDavid Greenman #include <vm/pmap.h>
73069e9bc1SDoug Rabson 
74df8bae1dSRodney W. Grimes /*
75df8bae1dSRodney W. Grimes  *	Management of resident (logical) pages.
76df8bae1dSRodney W. Grimes  *
77df8bae1dSRodney W. Grimes  *	A small structure is kept for each resident
78df8bae1dSRodney W. Grimes  *	page, indexed by page number.  Each structure
79da384208SAlan Cox  *	is an element of several collections:
80df8bae1dSRodney W. Grimes  *
81da384208SAlan Cox  *		A radix tree used to quickly
82df8bae1dSRodney W. Grimes  *		perform object/offset lookups
83df8bae1dSRodney W. Grimes  *
84df8bae1dSRodney W. Grimes  *		A list of all pages for a given object,
85df8bae1dSRodney W. Grimes  *		so they can be quickly deactivated at
86df8bae1dSRodney W. Grimes  *		time of deallocation.
87df8bae1dSRodney W. Grimes  *
88df8bae1dSRodney W. Grimes  *		An ordered list of pages due for pageout.
89df8bae1dSRodney W. Grimes  *
90df8bae1dSRodney W. Grimes  *	In addition, the structure contains the object
91df8bae1dSRodney W. Grimes  *	and offset to which this page belongs (for pageout),
92df8bae1dSRodney W. Grimes  *	and sundry status bits.
93df8bae1dSRodney W. Grimes  *
943c76db4cSAlan Cox  *	In general, operations on this structure's mutable fields are
953c76db4cSAlan Cox  *	synchronized using either one of or a combination of the lock on the
96*5cd29d0fSMark Johnston  *	object that the page belongs to (O), the page lock (P),
97*5cd29d0fSMark Johnston  *	the per-domain lock for the free queues (F), or the page's queue
98*5cd29d0fSMark Johnston  *	lock (Q).  The physical address of a page is used to select its page
99*5cd29d0fSMark Johnston  *	lock from a pool.  The queue lock for a page depends on the value of
100*5cd29d0fSMark Johnston  *	its queue field and described in detail below.  If a field is
1013c76db4cSAlan Cox  *	annotated below with two of these locks, then holding either lock is
1023c76db4cSAlan Cox  *	sufficient for read access, but both locks are required for write
1031d3a1bcfSMark Johnston  *	access.  An annotation of (C) indicates that the field is immutable.
1040ce3ba8cSKip Macy  *
105abb9b935SKonstantin Belousov  *	In contrast, the synchronization of accesses to the page's
106abb9b935SKonstantin Belousov  *	dirty field is machine dependent (M).  In the
107abb9b935SKonstantin Belousov  *	machine-independent layer, the lock on the object that the
108abb9b935SKonstantin Belousov  *	page belongs to must be held in order to operate on the field.
109abb9b935SKonstantin Belousov  *	However, the pmap layer is permitted to set all bits within
110abb9b935SKonstantin Belousov  *	the field without holding that lock.  If the underlying
111abb9b935SKonstantin Belousov  *	architecture does not support atomic read-modify-write
112abb9b935SKonstantin Belousov  *	operations on the field's type, then the machine-independent
1132042bb37SKonstantin Belousov  *	layer uses a 32-bit atomic on the aligned 32-bit word that
114abb9b935SKonstantin Belousov  *	contains the dirty field.  In the machine-independent layer,
115abb9b935SKonstantin Belousov  *	the implementation of read-modify-write operations on the
116abb9b935SKonstantin Belousov  *	field is encapsulated in vm_page_clear_dirty_mask().
1171d3a1bcfSMark Johnston  *
1181d3a1bcfSMark Johnston  *	The page structure contains two counters which prevent page reuse.
1191d3a1bcfSMark Johnston  *	Both counters are protected by the page lock (P).  The hold
1201d3a1bcfSMark Johnston  *	counter counts transient references obtained via a pmap lookup, and
1211d3a1bcfSMark Johnston  *	is also used to prevent page reclamation in situations where it is
1221d3a1bcfSMark Johnston  *	undesirable to block other accesses to the page.  The wire counter
1231d3a1bcfSMark Johnston  *	is used to implement mlock(2) and is non-zero for pages containing
1241d3a1bcfSMark Johnston  *	kernel memory.  Pages that are wired or held will not be reclaimed
1251d3a1bcfSMark Johnston  *	or laundered by the page daemon, but are treated differently during
1261d3a1bcfSMark Johnston  *	a page queue scan: held pages remain at their position in the queue,
1271d3a1bcfSMark Johnston  *	while wired pages are removed from the queue and must later be
1281d3a1bcfSMark Johnston  *	re-enqueued appropriately by the unwiring thread.  It is legal to
1291d3a1bcfSMark Johnston  *	call vm_page_free() on a held page; doing so causes it to be removed
1301d3a1bcfSMark Johnston  *	from its object and page queue, and the page is released to the
1311d3a1bcfSMark Johnston  *	allocator once the last hold reference is dropped.  In contrast,
1321d3a1bcfSMark Johnston  *	wired pages may not be freed.
1331d3a1bcfSMark Johnston  *
1341d3a1bcfSMark Johnston  *	In some pmap implementations, the wire count of a page table page is
1351d3a1bcfSMark Johnston  *	used to track the number of populated entries.
1361d3a1bcfSMark Johnston  *
1371d3a1bcfSMark Johnston  *	The busy lock is an embedded reader-writer lock which protects the
1381d3a1bcfSMark Johnston  *	page's contents and identity (i.e., its <object, pindex> tuple) and
1391d3a1bcfSMark Johnston  *	interlocks with the object lock (O).  In particular, a page may be
1401d3a1bcfSMark Johnston  *	busied or unbusied only with the object write lock held.  To avoid
1411d3a1bcfSMark Johnston  *	bloating the page structure, the busy lock lacks some of the
1421d3a1bcfSMark Johnston  *	features available to the kernel's general-purpose synchronization
1431d3a1bcfSMark Johnston  *	primitives.  As a result, busy lock ordering rules are not verified,
1441d3a1bcfSMark Johnston  *	lock recursion is not detected, and an attempt to xbusy a busy page
1451d3a1bcfSMark Johnston  *	or sbusy an xbusy page results will trigger a panic rather than
1461d3a1bcfSMark Johnston  *	causing the thread to block.  vm_page_sleep_if_busy() can be used to
1471d3a1bcfSMark Johnston  *	sleep until the page's busy state changes, after which the caller
1481d3a1bcfSMark Johnston  *	must re-lookup the page and re-evaluate its state.
149*5cd29d0fSMark Johnston  *
150*5cd29d0fSMark Johnston  *	The queue field is the index of the page queue containing the
151*5cd29d0fSMark Johnston  *	page, or PQ_NONE if the page is not enqueued.  The queue lock of a
152*5cd29d0fSMark Johnston  *	page is the page queue lock corresponding to the page queue index,
153*5cd29d0fSMark Johnston  *	or the page lock (P) for the page if it is not enqueued.  To modify
154*5cd29d0fSMark Johnston  *	the queue field, the queue lock for the old value of the field must
155*5cd29d0fSMark Johnston  *	be held.  It is invalid for a page's queue field to transition
156*5cd29d0fSMark Johnston  *	between two distinct page queue indices.  That is, when updating
157*5cd29d0fSMark Johnston  *	the queue field, either the new value or the old value must be
158*5cd29d0fSMark Johnston  *	PQ_NONE.
159*5cd29d0fSMark Johnston  *
160*5cd29d0fSMark Johnston  *	To avoid contention on page queue locks, page queue operations
161*5cd29d0fSMark Johnston  *	(enqueue, dequeue, requeue) are batched using per-CPU queues.
162*5cd29d0fSMark Johnston  *	A deferred operation is requested by inserting an entry into a
163*5cd29d0fSMark Johnston  *	batch queue; the entry is simply a pointer to the page, and the
164*5cd29d0fSMark Johnston  *	request type is encoded in the page's aflags field using the values
165*5cd29d0fSMark Johnston  *	in PGA_QUEUE_STATE_MASK.  The type-stability of struct vm_pages is
166*5cd29d0fSMark Johnston  *	crucial to this scheme since the processing of entries in a given
167*5cd29d0fSMark Johnston  *	batch queue may be deferred indefinitely.  In particular, a page
168*5cd29d0fSMark Johnston  *	may be freed before its pending batch queue entries have been
169*5cd29d0fSMark Johnston  *	processed.  The page lock (P) must be held to schedule a batched
170*5cd29d0fSMark Johnston  *	queue operation, and the page queue lock must be held in order to
171*5cd29d0fSMark Johnston  *	process batch queue entries for the page queue.
172df8bae1dSRodney W. Grimes  */
173df8bae1dSRodney W. Grimes 
174561cc9fcSKonstantin Belousov #if PAGE_SIZE == 4096
175561cc9fcSKonstantin Belousov #define VM_PAGE_BITS_ALL 0xffu
176561cc9fcSKonstantin Belousov typedef uint8_t vm_page_bits_t;
177561cc9fcSKonstantin Belousov #elif PAGE_SIZE == 8192
178561cc9fcSKonstantin Belousov #define VM_PAGE_BITS_ALL 0xffffu
179561cc9fcSKonstantin Belousov typedef uint16_t vm_page_bits_t;
180561cc9fcSKonstantin Belousov #elif PAGE_SIZE == 16384
181561cc9fcSKonstantin Belousov #define VM_PAGE_BITS_ALL 0xffffffffu
182561cc9fcSKonstantin Belousov typedef uint32_t vm_page_bits_t;
183561cc9fcSKonstantin Belousov #elif PAGE_SIZE == 32768
184561cc9fcSKonstantin Belousov #define VM_PAGE_BITS_ALL 0xfffffffffffffffflu
185561cc9fcSKonstantin Belousov typedef uint64_t vm_page_bits_t;
186561cc9fcSKonstantin Belousov #endif
187561cc9fcSKonstantin Belousov 
188df8bae1dSRodney W. Grimes struct vm_page {
189c325e866SKonstantin Belousov 	union {
190c325e866SKonstantin Belousov 		TAILQ_ENTRY(vm_page) q; /* page queue or free list (Q) */
191c325e866SKonstantin Belousov 		struct {
192c325e866SKonstantin Belousov 			SLIST_ENTRY(vm_page) ss; /* private slists */
193c325e866SKonstantin Belousov 			void *pv;
194c325e866SKonstantin Belousov 		} s;
195c325e866SKonstantin Belousov 		struct {
196c325e866SKonstantin Belousov 			u_long p;
197c325e866SKonstantin Belousov 			u_long v;
198c325e866SKonstantin Belousov 		} memguard;
199c325e866SKonstantin Belousov 	} plinks;
200e3975643SJake Burkholder 	TAILQ_ENTRY(vm_page) listq;	/* pages in same object (O) */
2015ac59343SAlan Cox 	vm_object_t object;		/* which object am I in (O,P) */
20243319c11SAlan Cox 	vm_pindex_t pindex;		/* offset into object (O,P) */
203*5cd29d0fSMark Johnston 	vm_paddr_t phys_addr;		/* physical address of page (C) */
204763df3ecSPedro F. Giffuni 	struct md_page md;		/* machine dependent stuff */
205fd8c28bfSAlan Cox 	u_int wire_count;		/* wired down maps refs (P) */
2063846a822SKonstantin Belousov 	volatile u_int busy_lock;	/* busy owners lock */
2073846a822SKonstantin Belousov 	uint16_t hold_count;		/* page hold count (P) */
2083846a822SKonstantin Belousov 	uint16_t flags;			/* page PG_* flags (P) */
2093407fefeSKonstantin Belousov 	uint8_t aflags;			/* access is atomic */
210081a4881SAlan Cox 	uint8_t oflags;			/* page VPO_* flags (O) */
211*5cd29d0fSMark Johnston 	uint8_t	queue;			/* page queue index (Q) */
212dd05fa19SAlan Cox 	int8_t psind;			/* pagesizes[] index (O) */
2131d3a1bcfSMark Johnston 	int8_t segind;			/* vm_phys segment index (C) */
214*5cd29d0fSMark Johnston 	uint8_t	order;			/* index of the buddy queue (F) */
215*5cd29d0fSMark Johnston 	uint8_t pool;			/* vm_phys freepool index (F) */
216a15f7df5SAttilio Rao 	u_char	act_count;		/* page usage count (P) */
217c325e866SKonstantin Belousov 	/* NOTE that these must support one bit per DEV_BSIZE in a page */
218bd7e5f99SJohn Dyson 	/* so, on normal X86 kernels, they must be at least 8 bits wide */
219561cc9fcSKonstantin Belousov 	vm_page_bits_t valid;		/* map of valid DEV_BSIZE chunks (O) */
220561cc9fcSKonstantin Belousov 	vm_page_bits_t dirty;		/* map of dirty DEV_BSIZE chunks (M) */
221df8bae1dSRodney W. Grimes };
222df8bae1dSRodney W. Grimes 
2235786be7cSAlan Cox /*
2245786be7cSAlan Cox  * Page flags stored in oflags:
2255786be7cSAlan Cox  *
2265786be7cSAlan Cox  * Access to these page flags is synchronized by the lock on the object
2275786be7cSAlan Cox  * containing the page (O).
228d98d0ce2SKonstantin Belousov  *
229d98d0ce2SKonstantin Belousov  * Note: VPO_UNMANAGED (used by OBJT_DEVICE, OBJT_PHYS and OBJT_SG)
230d98d0ce2SKonstantin Belousov  * 	 indicates that the page is not under PV management but
231d98d0ce2SKonstantin Belousov  * 	 otherwise should be treated as a normal page.  Pages not
232d98d0ce2SKonstantin Belousov  * 	 under PV management cannot be paged out via the
233d98d0ce2SKonstantin Belousov  * 	 object/vm_page_t because there is no knowledge of their pte
234d98d0ce2SKonstantin Belousov  * 	 mappings, and such pages are also not on any PQ queue.
235d98d0ce2SKonstantin Belousov  *
2365786be7cSAlan Cox  */
237c7aebda8SAttilio Rao #define	VPO_UNUSED01	0x01		/* --available-- */
238c7aebda8SAttilio Rao #define	VPO_SWAPSLEEP	0x02		/* waiting for swap to finish */
239081a4881SAlan Cox #define	VPO_UNMANAGED	0x04		/* no PV management for page */
240081a4881SAlan Cox #define	VPO_SWAPINPROG	0x08		/* swap I/O in progress on page */
241081a4881SAlan Cox #define	VPO_NOSYNC	0x10		/* do not collect for syncer */
2425786be7cSAlan Cox 
243c7aebda8SAttilio Rao /*
244c7aebda8SAttilio Rao  * Busy page implementation details.
245c7aebda8SAttilio Rao  * The algorithm is taken mostly by rwlock(9) and sx(9) locks implementation,
246c7aebda8SAttilio Rao  * even if the support for owner identity is removed because of size
247c7aebda8SAttilio Rao  * constraints.  Checks on lock recursion are then not possible, while the
248c7aebda8SAttilio Rao  * lock assertions effectiveness is someway reduced.
249c7aebda8SAttilio Rao  */
250c7aebda8SAttilio Rao #define	VPB_BIT_SHARED		0x01
251c7aebda8SAttilio Rao #define	VPB_BIT_EXCLUSIVE	0x02
252c7aebda8SAttilio Rao #define	VPB_BIT_WAITERS		0x04
253c7aebda8SAttilio Rao #define	VPB_BIT_FLAGMASK						\
254c7aebda8SAttilio Rao 	(VPB_BIT_SHARED | VPB_BIT_EXCLUSIVE | VPB_BIT_WAITERS)
255c7aebda8SAttilio Rao 
256c7aebda8SAttilio Rao #define	VPB_SHARERS_SHIFT	3
257c7aebda8SAttilio Rao #define	VPB_SHARERS(x)							\
258c7aebda8SAttilio Rao 	(((x) & ~VPB_BIT_FLAGMASK) >> VPB_SHARERS_SHIFT)
259c7aebda8SAttilio Rao #define	VPB_SHARERS_WORD(x)	((x) << VPB_SHARERS_SHIFT | VPB_BIT_SHARED)
260c7aebda8SAttilio Rao #define	VPB_ONE_SHARER		(1 << VPB_SHARERS_SHIFT)
261c7aebda8SAttilio Rao 
262c7aebda8SAttilio Rao #define	VPB_SINGLE_EXCLUSIVER	VPB_BIT_EXCLUSIVE
263c7aebda8SAttilio Rao 
264c7aebda8SAttilio Rao #define	VPB_UNBUSIED		VPB_SHARERS_WORD(0)
265c7aebda8SAttilio Rao 
26644e46b9eSAlan Cox #define	PQ_NONE		255
26744e46b9eSAlan Cox #define	PQ_INACTIVE	0
26844e46b9eSAlan Cox #define	PQ_ACTIVE	1
269ebcddc72SAlan Cox #define	PQ_LAUNDRY	2
270b1fd102eSMark Johnston #define	PQ_UNSWAPPABLE	3
271b1fd102eSMark Johnston #define	PQ_COUNT	4
272ef39c05bSAlexander Leidinger 
273b9e8fb64SKonstantin Belousov #ifndef VM_PAGE_HAVE_PGLIST
2748d220203SAlan Cox TAILQ_HEAD(pglist, vm_page);
275b9e8fb64SKonstantin Belousov #define VM_PAGE_HAVE_PGLIST
276b9e8fb64SKonstantin Belousov #endif
277c325e866SKonstantin Belousov SLIST_HEAD(spglist, vm_page);
27870c17636SAlan Cox 
279449c2e92SKonstantin Belousov #ifdef _KERNEL
280bfc8c24cSGleb Smirnoff extern vm_page_t bogus_page;
281449c2e92SKonstantin Belousov #endif	/* _KERNEL */
282449c2e92SKonstantin Belousov 
2834ceaf45dSAttilio Rao extern struct mtx_padalign pa_lock[];
2842965a453SKip Macy 
2852965a453SKip Macy #if defined(__arm__)
2862965a453SKip Macy #define	PDRSHIFT	PDR_SHIFT
2872965a453SKip Macy #elif !defined(PDRSHIFT)
2882965a453SKip Macy #define PDRSHIFT	21
2892965a453SKip Macy #endif
2902965a453SKip Macy 
2912965a453SKip Macy #define	pa_index(pa)	((pa) >> PDRSHIFT)
2924ceaf45dSAttilio Rao #define	PA_LOCKPTR(pa)	((struct mtx *)(&pa_lock[pa_index(pa) % PA_LOCK_COUNT]))
2932965a453SKip Macy #define	PA_LOCKOBJPTR(pa)	((struct lock_object *)PA_LOCKPTR((pa)))
2942965a453SKip Macy #define	PA_LOCK(pa)	mtx_lock(PA_LOCKPTR(pa))
2952965a453SKip Macy #define	PA_TRYLOCK(pa)	mtx_trylock(PA_LOCKPTR(pa))
2962965a453SKip Macy #define	PA_UNLOCK(pa)	mtx_unlock(PA_LOCKPTR(pa))
2972965a453SKip Macy #define	PA_UNLOCK_COND(pa) 			\
2982965a453SKip Macy 	do {		   			\
299567e51e1SAlan Cox 		if ((pa) != 0) {		\
300567e51e1SAlan Cox 			PA_UNLOCK((pa));	\
301567e51e1SAlan Cox 			(pa) = 0;		\
302567e51e1SAlan Cox 		}				\
3032965a453SKip Macy 	} while (0)
3042965a453SKip Macy 
3052965a453SKip Macy #define	PA_LOCK_ASSERT(pa, a)	mtx_assert(PA_LOCKPTR(pa), (a))
3062965a453SKip Macy 
307cf1911a9SKonstantin Belousov #ifdef KLD_MODULE
308cf1911a9SKonstantin Belousov #define	vm_page_lock(m)		vm_page_lock_KBI((m), LOCK_FILE, LOCK_LINE)
309cf1911a9SKonstantin Belousov #define	vm_page_unlock(m)	vm_page_unlock_KBI((m), LOCK_FILE, LOCK_LINE)
310cf1911a9SKonstantin Belousov #define	vm_page_trylock(m)	vm_page_trylock_KBI((m), LOCK_FILE, LOCK_LINE)
311cf1911a9SKonstantin Belousov #else	/* !KLD_MODULE */
3122965a453SKip Macy #define	vm_page_lockptr(m)	(PA_LOCKPTR(VM_PAGE_TO_PHYS((m))))
3132965a453SKip Macy #define	vm_page_lock(m)		mtx_lock(vm_page_lockptr((m)))
3142965a453SKip Macy #define	vm_page_unlock(m)	mtx_unlock(vm_page_lockptr((m)))
3152965a453SKip Macy #define	vm_page_trylock(m)	mtx_trylock(vm_page_lockptr((m)))
316ef5ba5a3SAlan Cox #endif
317ef5ba5a3SAlan Cox #if defined(INVARIANTS)
318b4171812SAlan Cox #define	vm_page_assert_locked(m)		\
319b4171812SAlan Cox     vm_page_assert_locked_KBI((m), __FILE__, __LINE__)
320ef5ba5a3SAlan Cox #define	vm_page_lock_assert(m, a)		\
321ef5ba5a3SAlan Cox     vm_page_lock_assert_KBI((m), (a), __FILE__, __LINE__)
322ef5ba5a3SAlan Cox #else
323b4171812SAlan Cox #define	vm_page_assert_locked(m)
324ef5ba5a3SAlan Cox #define	vm_page_lock_assert(m, a)
325cf1911a9SKonstantin Belousov #endif
326e67e0775SAlan Cox 
327df8bae1dSRodney W. Grimes /*
328369763e3SAlan Cox  * The vm_page's aflags are updated using atomic operations.  To set or clear
329369763e3SAlan Cox  * these flags, the functions vm_page_aflag_set() and vm_page_aflag_clear()
330369763e3SAlan Cox  * must be used.  Neither these flags nor these functions are part of the KBI.
3313407fefeSKonstantin Belousov  *
332b4171812SAlan Cox  * PGA_REFERENCED may be cleared only if the page is locked.  It is set by
333b4171812SAlan Cox  * both the MI and MD VM layers.  However, kernel loadable modules should not
334b4171812SAlan Cox  * directly set this flag.  They should call vm_page_reference() instead.
335ce186587SAlan Cox  *
336afb69e6bSKonstantin Belousov  * PGA_WRITEABLE is set exclusively on managed pages by pmap_enter().
337afb69e6bSKonstantin Belousov  * When it does so, the object must be locked, or the page must be
338afb69e6bSKonstantin Belousov  * exclusive busied.  The MI VM layer must never access this flag
339afb69e6bSKonstantin Belousov  * directly.  Instead, it should call pmap_page_is_write_mapped().
34057bd5cceSNathan Whitehorn  *
34157bd5cceSNathan Whitehorn  * PGA_EXECUTABLE may be set by pmap routines, and indicates that a page has
3426031c68dSAlan Cox  * at least one executable mapping.  It is not consumed by the MI VM layer.
343*5cd29d0fSMark Johnston  *
344*5cd29d0fSMark Johnston  * PGA_ENQUEUED is set and cleared when a page is inserted into or removed
345*5cd29d0fSMark Johnston  * from a page queue, respectively.  It determines whether the plinks.q field
346*5cd29d0fSMark Johnston  * of the page is valid.  To set or clear this flag, the queue lock for the
347*5cd29d0fSMark Johnston  * page must be held: the page queue lock corresponding to the page's "queue"
348*5cd29d0fSMark Johnston  * field if its value is not PQ_NONE, and the page lock otherwise.
349*5cd29d0fSMark Johnston  *
350*5cd29d0fSMark Johnston  * PGA_DEQUEUE is set when the page is scheduled to be dequeued from a page
351*5cd29d0fSMark Johnston  * queue, and cleared when the dequeue request is processed.  A page may
352*5cd29d0fSMark Johnston  * have PGA_DEQUEUE set and PGA_ENQUEUED cleared, for instance if a dequeue
353*5cd29d0fSMark Johnston  * is requested after the page is scheduled to be enqueued but before it is
354*5cd29d0fSMark Johnston  * actually inserted into the page queue.  The page lock must be held to set
355*5cd29d0fSMark Johnston  * this flag, and the queue lock for the page must be held to clear it.
356*5cd29d0fSMark Johnston  *
357*5cd29d0fSMark Johnston  * PGA_REQUEUE is set when the page is scheduled to be enqueued or requeued
358*5cd29d0fSMark Johnston  * in its page queue.  The page lock must be held to set this flag, and the
359*5cd29d0fSMark Johnston  * queue lock for the page must be held to clear it.
360*5cd29d0fSMark Johnston  *
361*5cd29d0fSMark Johnston  * PGA_REQUEUE_HEAD is a special flag for enqueuing pages near the head of
362*5cd29d0fSMark Johnston  * the inactive queue, thus bypassing LRU.  The page lock must be held to
363*5cd29d0fSMark Johnston  * set this flag, and the queue lock for the page must be held to clear it.
364df8bae1dSRodney W. Grimes  */
3653407fefeSKonstantin Belousov #define	PGA_WRITEABLE	0x01		/* page may be mapped writeable */
3663407fefeSKonstantin Belousov #define	PGA_REFERENCED	0x02		/* page has been referenced */
36757bd5cceSNathan Whitehorn #define	PGA_EXECUTABLE	0x04		/* page may be mapped executable */
368*5cd29d0fSMark Johnston #define	PGA_ENQUEUED	0x08		/* page is enqueued in a page queue */
369*5cd29d0fSMark Johnston #define	PGA_DEQUEUE	0x10		/* page is due to be dequeued */
370*5cd29d0fSMark Johnston #define	PGA_REQUEUE	0x20		/* page is due to be requeued */
371*5cd29d0fSMark Johnston #define	PGA_REQUEUE_HEAD 0x40		/* page requeue should bypass LRU */
372*5cd29d0fSMark Johnston 
373*5cd29d0fSMark Johnston #define	PGA_QUEUE_STATE_MASK	(PGA_ENQUEUED | PGA_DEQUEUE | PGA_REQUEUE | \
374*5cd29d0fSMark Johnston 				PGA_REQUEUE_HEAD)
3753407fefeSKonstantin Belousov 
3763407fefeSKonstantin Belousov /*
3773407fefeSKonstantin Belousov  * Page flags.  If changed at any other time than page allocation or
3783407fefeSKonstantin Belousov  * freeing, the modification must be protected by the vm_page lock.
3793407fefeSKonstantin Belousov  */
380081a4881SAlan Cox #define	PG_FICTITIOUS	0x0004		/* physical page doesn't exist */
381081a4881SAlan Cox #define	PG_ZERO		0x0008		/* page is zeroed */
382081a4881SAlan Cox #define	PG_MARKER	0x0010		/* special queue marker page */
383081a4881SAlan Cox #define	PG_NODUMP	0x0080		/* don't include this page in a dump */
384081a4881SAlan Cox #define	PG_UNHOLDFREE	0x0100		/* delayed free of a held page */
385df8bae1dSRodney W. Grimes 
38624a1cce3SDavid Greenman /*
38724a1cce3SDavid Greenman  * Misc constants.
38824a1cce3SDavid Greenman  */
38924a1cce3SDavid Greenman #define ACT_DECLINE		1
39024a1cce3SDavid Greenman #define ACT_ADVANCE		3
39138efa82bSJohn Dyson #define ACT_INIT		5
3925070c7f8SJohn Dyson #define ACT_MAX			64
393df8bae1dSRodney W. Grimes 
394c4473420SPeter Wemm #ifdef _KERNEL
39504a18977SAlan Cox 
396369763e3SAlan Cox #include <sys/systm.h>
397369763e3SAlan Cox 
398369763e3SAlan Cox #include <machine/atomic.h>
399369763e3SAlan Cox 
400df8bae1dSRodney W. Grimes /*
401b1fd102eSMark Johnston  * Each pageable resident page falls into one of five lists:
402df8bae1dSRodney W. Grimes  *
403df8bae1dSRodney W. Grimes  *	free
404df8bae1dSRodney W. Grimes  *		Available for allocation now.
40524a1cce3SDavid Greenman  *
406df8bae1dSRodney W. Grimes  *	inactive
4076c5e9bbdSMike Pritchard  *		Low activity, candidates for reclamation.
4083453bca8SAlan Cox  *		This list is approximately LRU ordered.
4093453bca8SAlan Cox  *
4103453bca8SAlan Cox  *	laundry
411df8bae1dSRodney W. Grimes  *		This is the list of pages that should be
412df8bae1dSRodney W. Grimes  *		paged out next.
41324a1cce3SDavid Greenman  *
414b1fd102eSMark Johnston  *	unswappable
415b1fd102eSMark Johnston  *		Dirty anonymous pages that cannot be paged
416b1fd102eSMark Johnston  *		out because no swap device is configured.
417b1fd102eSMark Johnston  *
418df8bae1dSRodney W. Grimes  *	active
4193453bca8SAlan Cox  *		Pages that are "active", i.e., they have been
42024a1cce3SDavid Greenman  *		recently referenced.
42110ad4d48SJohn Dyson  *
422df8bae1dSRodney W. Grimes  */
423df8bae1dSRodney W. Grimes 
4240d94caffSDavid Greenman extern vm_page_t vm_page_array;		/* First resident page in table */
42513a0b7bcSKonstantin Belousov extern long vm_page_array_size;		/* number of vm_page_t's */
4260d94caffSDavid Greenman extern long first_page;			/* first physical page number */
4270d94caffSDavid Greenman 
428df8bae1dSRodney W. Grimes #define VM_PAGE_TO_PHYS(entry)	((entry)->phys_addr)
429df8bae1dSRodney W. Grimes 
430a5073058SJason A. Harmening /*
4317c989c15SJason A. Harmening  * PHYS_TO_VM_PAGE() returns the vm_page_t object that represents a memory
432a5073058SJason A. Harmening  * page to which the given physical address belongs. The correct vm_page_t
433a5073058SJason A. Harmening  * object is returned for addresses that are not page-aligned.
434a5073058SJason A. Harmening  */
435b6de32bdSKonstantin Belousov vm_page_t PHYS_TO_VM_PAGE(vm_paddr_t pa);
436df8bae1dSRodney W. Grimes 
43789fc8bdbSGleb Smirnoff /*
43889fc8bdbSGleb Smirnoff  * Page allocation parameters for vm_page for the functions
43989fc8bdbSGleb Smirnoff  * vm_page_alloc(), vm_page_grab(), vm_page_alloc_contig() and
44089fc8bdbSGleb Smirnoff  * vm_page_alloc_freelist().  Some functions support only a subset
44189fc8bdbSGleb Smirnoff  * of the flags, and ignore others, see the flags legend.
44289fc8bdbSGleb Smirnoff  *
4435471caf6SAlan Cox  * The meaning of VM_ALLOC_ZERO differs slightly between the vm_page_alloc*()
4445471caf6SAlan Cox  * and the vm_page_grab*() functions.  See these functions for details.
4455471caf6SAlan Cox  *
44689fc8bdbSGleb Smirnoff  * Bits 0 - 1 define class.
44789fc8bdbSGleb Smirnoff  * Bits 2 - 15 dedicated for flags.
44889fc8bdbSGleb Smirnoff  * Legend:
44989fc8bdbSGleb Smirnoff  * (a) - vm_page_alloc() supports the flag.
45089fc8bdbSGleb Smirnoff  * (c) - vm_page_alloc_contig() supports the flag.
45189fc8bdbSGleb Smirnoff  * (f) - vm_page_alloc_freelist() supports the flag.
45289fc8bdbSGleb Smirnoff  * (g) - vm_page_grab() supports the flag.
4535471caf6SAlan Cox  * (p) - vm_page_grab_pages() supports the flag.
45489fc8bdbSGleb Smirnoff  * Bits above 15 define the count of additional pages that the caller
45589fc8bdbSGleb Smirnoff  * intends to allocate.
45689fc8bdbSGleb Smirnoff  */
4576d40c3d3SDavid Greenman #define VM_ALLOC_NORMAL		0
4586d40c3d3SDavid Greenman #define VM_ALLOC_INTERRUPT	1
4596d40c3d3SDavid Greenman #define VM_ALLOC_SYSTEM		2
460827b2fa0SAlan Cox #define	VM_ALLOC_CLASS_MASK	3
4618d6fbbb8SJeff Roberson #define	VM_ALLOC_WAITOK		0x0008	/* (acf) Sleep and retry */
4628d6fbbb8SJeff Roberson #define	VM_ALLOC_WAITFAIL	0x0010	/* (acf) Sleep and return error */
4635471caf6SAlan Cox #define	VM_ALLOC_WIRED		0x0020	/* (acfgp) Allocate a wired page */
4645471caf6SAlan Cox #define	VM_ALLOC_ZERO		0x0040	/* (acfgp) Allocate a prezeroed page */
46589fc8bdbSGleb Smirnoff #define	VM_ALLOC_NOOBJ		0x0100	/* (acg) No associated object */
4665471caf6SAlan Cox #define	VM_ALLOC_NOBUSY		0x0200	/* (acgp) Do not excl busy the page */
4675471caf6SAlan Cox #define	VM_ALLOC_IGN_SBUSY	0x1000	/* (gp) Ignore shared busy flag */
46889fc8bdbSGleb Smirnoff #define	VM_ALLOC_NODUMP		0x2000	/* (ag) don't include in dump */
4695471caf6SAlan Cox #define	VM_ALLOC_SBUSY		0x4000	/* (acgp) Shared busy the page */
4708d6fbbb8SJeff Roberson #define	VM_ALLOC_NOWAIT		0x8000	/* (acfgp) Do not sleep */
4715f195aa3SKonstantin Belousov #define	VM_ALLOC_COUNT_SHIFT	16
4725f195aa3SKonstantin Belousov #define	VM_ALLOC_COUNT(count)	((count) << VM_ALLOC_COUNT_SHIFT)
4730d94caffSDavid Greenman 
474b32ecf44SKonstantin Belousov #ifdef M_NOWAIT
475b32ecf44SKonstantin Belousov static inline int
476b32ecf44SKonstantin Belousov malloc2vm_flags(int malloc_flags)
477b32ecf44SKonstantin Belousov {
478b32ecf44SKonstantin Belousov 	int pflags;
479b32ecf44SKonstantin Belousov 
480962b064aSKonstantin Belousov 	KASSERT((malloc_flags & M_USE_RESERVE) == 0 ||
481962b064aSKonstantin Belousov 	    (malloc_flags & M_NOWAIT) != 0,
482962b064aSKonstantin Belousov 	    ("M_USE_RESERVE requires M_NOWAIT"));
483b32ecf44SKonstantin Belousov 	pflags = (malloc_flags & M_USE_RESERVE) != 0 ? VM_ALLOC_INTERRUPT :
484b32ecf44SKonstantin Belousov 	    VM_ALLOC_SYSTEM;
485b32ecf44SKonstantin Belousov 	if ((malloc_flags & M_ZERO) != 0)
486b32ecf44SKonstantin Belousov 		pflags |= VM_ALLOC_ZERO;
487b32ecf44SKonstantin Belousov 	if ((malloc_flags & M_NODUMP) != 0)
488b32ecf44SKonstantin Belousov 		pflags |= VM_ALLOC_NODUMP;
4898d6fbbb8SJeff Roberson 	if ((malloc_flags & M_NOWAIT))
4908d6fbbb8SJeff Roberson 		pflags |= VM_ALLOC_NOWAIT;
4918d6fbbb8SJeff Roberson 	if ((malloc_flags & M_WAITOK))
4928d6fbbb8SJeff Roberson 		pflags |= VM_ALLOC_WAITOK;
493b32ecf44SKonstantin Belousov 	return (pflags);
494b32ecf44SKonstantin Belousov }
495b32ecf44SKonstantin Belousov #endif
496b32ecf44SKonstantin Belousov 
49788302601SAlan Cox /*
49888302601SAlan Cox  * Predicates supported by vm_page_ps_test():
49988302601SAlan Cox  *
50088302601SAlan Cox  *	PS_ALL_DIRTY is true only if the entire (super)page is dirty.
50188302601SAlan Cox  *	However, it can be spuriously false when the (super)page has become
50288302601SAlan Cox  *	dirty in the pmap but that information has not been propagated to the
50388302601SAlan Cox  *	machine-independent layer.
50488302601SAlan Cox  */
50588302601SAlan Cox #define	PS_ALL_DIRTY	0x1
50688302601SAlan Cox #define	PS_ALL_VALID	0x2
50788302601SAlan Cox #define	PS_NONE_BUSY	0x4
50888302601SAlan Cox 
509c7aebda8SAttilio Rao void vm_page_busy_downgrade(vm_page_t m);
5105975e53dSKonstantin Belousov void vm_page_busy_sleep(vm_page_t m, const char *msg, bool nonshared);
5111b40f8c0SMatthew Dillon void vm_page_flash(vm_page_t m);
5121b40f8c0SMatthew Dillon void vm_page_hold(vm_page_t mem);
5131b40f8c0SMatthew Dillon void vm_page_unhold(vm_page_t mem);
5141b40f8c0SMatthew Dillon void vm_page_free(vm_page_t m);
5151b40f8c0SMatthew Dillon void vm_page_free_zero(vm_page_t m);
5161b40f8c0SMatthew Dillon 
5171b40f8c0SMatthew Dillon void vm_page_activate (vm_page_t);
5182051980fSAlan Cox void vm_page_advise(vm_page_t m, int advice);
5191b40f8c0SMatthew Dillon vm_page_t vm_page_alloc(vm_object_t, vm_pindex_t, int);
520ef435ae7SJeff Roberson vm_page_t vm_page_alloc_domain(vm_object_t, vm_pindex_t, int, int);
52133fff5d5SMark Johnston vm_page_t vm_page_alloc_after(vm_object_t, vm_pindex_t, int, vm_page_t);
522ef435ae7SJeff Roberson vm_page_t vm_page_alloc_domain_after(vm_object_t, vm_pindex_t, int, int,
523ef435ae7SJeff Roberson     vm_page_t);
524fbd80bd0SAlan Cox vm_page_t vm_page_alloc_contig(vm_object_t object, vm_pindex_t pindex, int req,
525fbd80bd0SAlan Cox     u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment,
526fbd80bd0SAlan Cox     vm_paddr_t boundary, vm_memattr_t memattr);
527ef435ae7SJeff Roberson vm_page_t vm_page_alloc_contig_domain(vm_object_t object,
528ef435ae7SJeff Roberson     vm_pindex_t pindex, int domain, int req, u_long npages, vm_paddr_t low,
529ef435ae7SJeff Roberson     vm_paddr_t high, u_long alignment, vm_paddr_t boundary,
530ef435ae7SJeff Roberson     vm_memattr_t memattr);
531aa546366SJayachandran C. vm_page_t vm_page_alloc_freelist(int, int);
532ef435ae7SJeff Roberson vm_page_t vm_page_alloc_freelist_domain(int, int, int);
533e55d32b7SKonstantin Belousov bool vm_page_blacklist_add(vm_paddr_t pa, bool verbose);
53493c5d3a4SKonstantin Belousov void vm_page_change_lock(vm_page_t m, struct mtx **mtx);
5351b40f8c0SMatthew Dillon vm_page_t vm_page_grab (vm_object_t, vm_pindex_t, int);
5369df950b3SMark Johnston int vm_page_grab_pages(vm_object_t object, vm_pindex_t pindex, int allocflags,
5375471caf6SAlan Cox     vm_page_t *ma, int count);
5381b40f8c0SMatthew Dillon void vm_page_deactivate(vm_page_t);
5393138cd36SMark Johnston void vm_page_deactivate_noreuse(vm_page_t);
5408d220203SAlan Cox void vm_page_dequeue(vm_page_t m);
541*5cd29d0fSMark Johnston void vm_page_dequeue_deferred(vm_page_t m);
5428d220203SAlan Cox void vm_page_dequeue_locked(vm_page_t m);
543*5cd29d0fSMark Johnston void vm_page_drain_pqbatch(void);
544b382c10aSKonstantin Belousov vm_page_t vm_page_find_least(vm_object_t, vm_pindex_t);
545*5cd29d0fSMark Johnston bool vm_page_free_prep(vm_page_t m);
54610cf2560SAlan Cox vm_page_t vm_page_getfake(vm_paddr_t paddr, vm_memattr_t memattr);
547e461aae7SKonstantin Belousov void vm_page_initfake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr);
548e946b949SAttilio Rao int vm_page_insert (vm_page_t, vm_object_t, vm_pindex_t);
549ebcddc72SAlan Cox void vm_page_launder(vm_page_t m);
5501b40f8c0SMatthew Dillon vm_page_t vm_page_lookup (vm_object_t, vm_pindex_t);
55191b4f427SAlan Cox vm_page_t vm_page_next(vm_page_t m);
5522965a453SKip Macy int vm_page_pa_tryrelock(pmap_t, vm_paddr_t, vm_paddr_t *);
553449c2e92SKonstantin Belousov struct vm_pagequeue *vm_page_pagequeue(vm_page_t m);
55491b4f427SAlan Cox vm_page_t vm_page_prev(vm_page_t m);
55588302601SAlan Cox bool vm_page_ps_test(vm_page_t m, int flags, vm_page_t skip_m);
55610cf2560SAlan Cox void vm_page_putfake(vm_page_t m);
557b6c00483SKonstantin Belousov void vm_page_readahead_finish(vm_page_t m);
558c869e672SAlan Cox bool vm_page_reclaim_contig(int req, u_long npages, vm_paddr_t low,
559c869e672SAlan Cox     vm_paddr_t high, u_long alignment, vm_paddr_t boundary);
5603f289c3fSJeff Roberson bool vm_page_reclaim_contig_domain(int domain, int req, u_long npages,
561ef435ae7SJeff Roberson     vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary);
5623407fefeSKonstantin Belousov void vm_page_reference(vm_page_t m);
5631b40f8c0SMatthew Dillon void vm_page_remove (vm_page_t);
564e946b949SAttilio Rao int vm_page_rename (vm_page_t, vm_object_t, vm_pindex_t);
565e946b949SAttilio Rao vm_page_t vm_page_replace(vm_page_t mnew, vm_object_t object,
566e946b949SAttilio Rao     vm_pindex_t pindex);
5678d220203SAlan Cox void vm_page_requeue(vm_page_t m);
5688d220203SAlan Cox void vm_page_requeue_locked(vm_page_t m);
569c7aebda8SAttilio Rao int vm_page_sbusied(vm_page_t m);
570c869e672SAlan Cox vm_page_t vm_page_scan_contig(u_long npages, vm_page_t m_start,
571c869e672SAlan Cox     vm_page_t m_end, u_long alignment, vm_paddr_t boundary, int options);
572dc874f98SKonstantin Belousov void vm_page_set_valid_range(vm_page_t m, int base, int size);
573c7aebda8SAttilio Rao int vm_page_sleep_if_busy(vm_page_t m, const char *msg);
574889eb0fcSAlan Cox vm_offset_t vm_page_startup(vm_offset_t vaddr);
575c7aebda8SAttilio Rao void vm_page_sunbusy(vm_page_t m);
576494c6e43SAlan Cox bool vm_page_try_to_free(vm_page_t m);
577c7aebda8SAttilio Rao int vm_page_trysbusy(vm_page_t m);
5788c22654dSAlan Cox void vm_page_unhold_pages(vm_page_t *ma, int count);
579b1fd102eSMark Johnston void vm_page_unswappable(vm_page_t m);
5801d3a1bcfSMark Johnston bool vm_page_unwire(vm_page_t m, uint8_t queue);
5811d3a1bcfSMark Johnston bool vm_page_unwire_noq(vm_page_t m);
58210cf2560SAlan Cox void vm_page_updatefake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr);
5831b40f8c0SMatthew Dillon void vm_page_wire (vm_page_t);
584c7aebda8SAttilio Rao void vm_page_xunbusy_hard(vm_page_t m);
585bd9546a2SKonstantin Belousov void vm_page_xunbusy_maybelocked(vm_page_t m);
5861b40f8c0SMatthew Dillon void vm_page_set_validclean (vm_page_t, int, int);
5871b40f8c0SMatthew Dillon void vm_page_clear_dirty (vm_page_t, int, int);
5881b40f8c0SMatthew Dillon void vm_page_set_invalid (vm_page_t, int, int);
5891b40f8c0SMatthew Dillon int vm_page_is_valid (vm_page_t, int, int);
5901b40f8c0SMatthew Dillon void vm_page_test_dirty (vm_page_t);
591561cc9fcSKonstantin Belousov vm_page_bits_t vm_page_bits(int base, int size);
5928d17e694SJulian Elischer void vm_page_zero_invalid(vm_page_t m, boolean_t setvalid);
593faa273d5SMatthew Dillon void vm_page_free_toq(vm_page_t m);
5948c8ee2eeSKonstantin Belousov void vm_page_free_pages_toq(struct spglist *free, bool update_wire_count);
59598cb733cSKenneth D. Merry 
596eddc9291SAlan Cox void vm_page_dirty_KBI(vm_page_t m);
597cf1911a9SKonstantin Belousov void vm_page_lock_KBI(vm_page_t m, const char *file, int line);
598cf1911a9SKonstantin Belousov void vm_page_unlock_KBI(vm_page_t m, const char *file, int line);
599cf1911a9SKonstantin Belousov int vm_page_trylock_KBI(vm_page_t m, const char *file, int line);
600cf1911a9SKonstantin Belousov #if defined(INVARIANTS) || defined(INVARIANT_SUPPORT)
601b4171812SAlan Cox void vm_page_assert_locked_KBI(vm_page_t m, const char *file, int line);
602cf1911a9SKonstantin Belousov void vm_page_lock_assert_KBI(vm_page_t m, int a, const char *file, int line);
603cf1911a9SKonstantin Belousov #endif
604cf1911a9SKonstantin Belousov 
605c7aebda8SAttilio Rao #define	vm_page_assert_sbusied(m)					\
606c7aebda8SAttilio Rao 	KASSERT(vm_page_sbusied(m),					\
607c7aebda8SAttilio Rao 	    ("vm_page_assert_sbusied: page %p not shared busy @ %s:%d", \
608dc62d559SConrad Meyer 	    (m), __FILE__, __LINE__))
609c7aebda8SAttilio Rao 
610c7aebda8SAttilio Rao #define	vm_page_assert_unbusied(m)					\
611c7aebda8SAttilio Rao 	KASSERT(!vm_page_busied(m),					\
612c7aebda8SAttilio Rao 	    ("vm_page_assert_unbusied: page %p busy @ %s:%d",		\
613dc62d559SConrad Meyer 	    (m), __FILE__, __LINE__))
614c7aebda8SAttilio Rao 
615c7aebda8SAttilio Rao #define	vm_page_assert_xbusied(m)					\
616c7aebda8SAttilio Rao 	KASSERT(vm_page_xbusied(m),					\
617c7aebda8SAttilio Rao 	    ("vm_page_assert_xbusied: page %p not exclusive busy @ %s:%d", \
618dc62d559SConrad Meyer 	    (m), __FILE__, __LINE__))
619c7aebda8SAttilio Rao 
620c7aebda8SAttilio Rao #define	vm_page_busied(m)						\
621c7aebda8SAttilio Rao 	((m)->busy_lock != VPB_UNBUSIED)
622c7aebda8SAttilio Rao 
623c7aebda8SAttilio Rao #define	vm_page_sbusy(m) do {						\
624c7aebda8SAttilio Rao 	if (!vm_page_trysbusy(m))					\
625dc62d559SConrad Meyer 		panic("%s: page %p failed shared busying", __func__,	\
626dc62d559SConrad Meyer 		    (m));						\
627c7aebda8SAttilio Rao } while (0)
628c7aebda8SAttilio Rao 
629c7aebda8SAttilio Rao #define	vm_page_tryxbusy(m)						\
630dc62d559SConrad Meyer 	(atomic_cmpset_acq_int(&(m)->busy_lock, VPB_UNBUSIED,		\
631c7aebda8SAttilio Rao 	    VPB_SINGLE_EXCLUSIVER))
632c7aebda8SAttilio Rao 
633c7aebda8SAttilio Rao #define	vm_page_xbusied(m)						\
634dc62d559SConrad Meyer 	(((m)->busy_lock & VPB_SINGLE_EXCLUSIVER) != 0)
635c7aebda8SAttilio Rao 
636c7aebda8SAttilio Rao #define	vm_page_xbusy(m) do {						\
637c7aebda8SAttilio Rao 	if (!vm_page_tryxbusy(m))					\
638dc62d559SConrad Meyer 		panic("%s: page %p failed exclusive busying", __func__,	\
639dc62d559SConrad Meyer 		    (m));						\
640c7aebda8SAttilio Rao } while (0)
641c7aebda8SAttilio Rao 
642505cd5d1SKonstantin Belousov /* Note: page m's lock must not be owned by the caller. */
643c7aebda8SAttilio Rao #define	vm_page_xunbusy(m) do {						\
644c7aebda8SAttilio Rao 	if (!atomic_cmpset_rel_int(&(m)->busy_lock,			\
645c7aebda8SAttilio Rao 	    VPB_SINGLE_EXCLUSIVER, VPB_UNBUSIED))			\
646c7aebda8SAttilio Rao 		vm_page_xunbusy_hard(m);				\
647c7aebda8SAttilio Rao } while (0)
648c7aebda8SAttilio Rao 
6493b1025d2SKonstantin Belousov #ifdef INVARIANTS
6503b1025d2SKonstantin Belousov void vm_page_object_lock_assert(vm_page_t m);
6513b1025d2SKonstantin Belousov #define	VM_PAGE_OBJECT_LOCK_ASSERT(m)	vm_page_object_lock_assert(m)
652afb69e6bSKonstantin Belousov void vm_page_assert_pga_writeable(vm_page_t m, uint8_t bits);
653afb69e6bSKonstantin Belousov #define	VM_PAGE_ASSERT_PGA_WRITEABLE(m, bits)				\
654afb69e6bSKonstantin Belousov 	vm_page_assert_pga_writeable(m, bits)
6553b1025d2SKonstantin Belousov #else
6563b1025d2SKonstantin Belousov #define	VM_PAGE_OBJECT_LOCK_ASSERT(m)	(void)0
657afb69e6bSKonstantin Belousov #define	VM_PAGE_ASSERT_PGA_WRITEABLE(m, bits)	(void)0
6583b1025d2SKonstantin Belousov #endif
6593b1025d2SKonstantin Belousov 
660f3b676f0SAlan Cox /*
661369763e3SAlan Cox  * We want to use atomic updates for the aflags field, which is 8 bits wide.
662369763e3SAlan Cox  * However, not all architectures support atomic operations on 8-bit
663369763e3SAlan Cox  * destinations.  In order that we can easily use a 32-bit operation, we
664369763e3SAlan Cox  * require that the aflags field be 32-bit aligned.
665369763e3SAlan Cox  */
666369763e3SAlan Cox CTASSERT(offsetof(struct vm_page, aflags) % sizeof(uint32_t) == 0);
667369763e3SAlan Cox 
668369763e3SAlan Cox /*
669369763e3SAlan Cox  *	Clear the given bits in the specified page.
670369763e3SAlan Cox  */
671369763e3SAlan Cox static inline void
672369763e3SAlan Cox vm_page_aflag_clear(vm_page_t m, uint8_t bits)
673369763e3SAlan Cox {
674369763e3SAlan Cox 	uint32_t *addr, val;
675369763e3SAlan Cox 
676369763e3SAlan Cox 	/*
677b4171812SAlan Cox 	 * The PGA_REFERENCED flag can only be cleared if the page is locked.
678369763e3SAlan Cox 	 */
679369763e3SAlan Cox 	if ((bits & PGA_REFERENCED) != 0)
680b4171812SAlan Cox 		vm_page_assert_locked(m);
681369763e3SAlan Cox 
682369763e3SAlan Cox 	/*
683369763e3SAlan Cox 	 * Access the whole 32-bit word containing the aflags field with an
684369763e3SAlan Cox 	 * atomic update.  Parallel non-atomic updates to the other fields
685369763e3SAlan Cox 	 * within this word are handled properly by the atomic update.
686369763e3SAlan Cox 	 */
687369763e3SAlan Cox 	addr = (void *)&m->aflags;
688369763e3SAlan Cox 	KASSERT(((uintptr_t)addr & (sizeof(uint32_t) - 1)) == 0,
689369763e3SAlan Cox 	    ("vm_page_aflag_clear: aflags is misaligned"));
690369763e3SAlan Cox 	val = bits;
691369763e3SAlan Cox #if BYTE_ORDER == BIG_ENDIAN
692369763e3SAlan Cox 	val <<= 24;
693369763e3SAlan Cox #endif
694369763e3SAlan Cox 	atomic_clear_32(addr, val);
695369763e3SAlan Cox }
696369763e3SAlan Cox 
697369763e3SAlan Cox /*
698369763e3SAlan Cox  *	Set the given bits in the specified page.
699369763e3SAlan Cox  */
700369763e3SAlan Cox static inline void
701369763e3SAlan Cox vm_page_aflag_set(vm_page_t m, uint8_t bits)
702369763e3SAlan Cox {
703369763e3SAlan Cox 	uint32_t *addr, val;
704369763e3SAlan Cox 
705afb69e6bSKonstantin Belousov 	VM_PAGE_ASSERT_PGA_WRITEABLE(m, bits);
706369763e3SAlan Cox 
707369763e3SAlan Cox 	/*
708369763e3SAlan Cox 	 * Access the whole 32-bit word containing the aflags field with an
709369763e3SAlan Cox 	 * atomic update.  Parallel non-atomic updates to the other fields
710369763e3SAlan Cox 	 * within this word are handled properly by the atomic update.
711369763e3SAlan Cox 	 */
712369763e3SAlan Cox 	addr = (void *)&m->aflags;
713369763e3SAlan Cox 	KASSERT(((uintptr_t)addr & (sizeof(uint32_t) - 1)) == 0,
714369763e3SAlan Cox 	    ("vm_page_aflag_set: aflags is misaligned"));
715369763e3SAlan Cox 	val = bits;
716369763e3SAlan Cox #if BYTE_ORDER == BIG_ENDIAN
717369763e3SAlan Cox 	val <<= 24;
718369763e3SAlan Cox #endif
719369763e3SAlan Cox 	atomic_set_32(addr, val);
720369763e3SAlan Cox }
721369763e3SAlan Cox 
722369763e3SAlan Cox /*
723eddc9291SAlan Cox  *	vm_page_dirty:
724eddc9291SAlan Cox  *
725eddc9291SAlan Cox  *	Set all bits in the page's dirty field.
726eddc9291SAlan Cox  *
727eddc9291SAlan Cox  *	The object containing the specified page must be locked if the
728eddc9291SAlan Cox  *	call is made from the machine-independent layer.
729eddc9291SAlan Cox  *
730eddc9291SAlan Cox  *	See vm_page_clear_dirty_mask().
731eddc9291SAlan Cox  */
732eddc9291SAlan Cox static __inline void
733eddc9291SAlan Cox vm_page_dirty(vm_page_t m)
734eddc9291SAlan Cox {
735eddc9291SAlan Cox 
736eddc9291SAlan Cox 	/* Use vm_page_dirty_KBI() under INVARIANTS to save memory. */
737eddc9291SAlan Cox #if defined(KLD_MODULE) || defined(INVARIANTS)
738eddc9291SAlan Cox 	vm_page_dirty_KBI(m);
739eddc9291SAlan Cox #else
740eddc9291SAlan Cox 	m->dirty = VM_PAGE_BITS_ALL;
741eddc9291SAlan Cox #endif
742eddc9291SAlan Cox }
743eddc9291SAlan Cox 
744eddc9291SAlan Cox /*
7458d220203SAlan Cox  *	vm_page_remque:
7468d220203SAlan Cox  *
7478d220203SAlan Cox  *	If the given page is in a page queue, then remove it from that page
7488d220203SAlan Cox  *	queue.
7498d220203SAlan Cox  *
7508d220203SAlan Cox  *	The page must be locked.
7518d220203SAlan Cox  */
7528d220203SAlan Cox static inline void
7538d220203SAlan Cox vm_page_remque(vm_page_t m)
7548d220203SAlan Cox {
7558d220203SAlan Cox 
7568d220203SAlan Cox 	if (m->queue != PQ_NONE)
7578d220203SAlan Cox 		vm_page_dequeue(m);
7588d220203SAlan Cox }
7598d220203SAlan Cox 
7608d220203SAlan Cox /*
761f3b676f0SAlan Cox  *	vm_page_undirty:
762f3b676f0SAlan Cox  *
763f3b676f0SAlan Cox  *	Set page to not be dirty.  Note: does not clear pmap modify bits
764f3b676f0SAlan Cox  */
765f3b676f0SAlan Cox static __inline void
766f3b676f0SAlan Cox vm_page_undirty(vm_page_t m)
767f3b676f0SAlan Cox {
7683b1025d2SKonstantin Belousov 
7693b1025d2SKonstantin Belousov 	VM_PAGE_OBJECT_LOCK_ASSERT(m);
770f3b676f0SAlan Cox 	m->dirty = 0;
771f3b676f0SAlan Cox }
772f3b676f0SAlan Cox 
7738170d6e5SConrad Meyer static inline void
7748170d6e5SConrad Meyer vm_page_replace_checked(vm_page_t mnew, vm_object_t object, vm_pindex_t pindex,
7758170d6e5SConrad Meyer     vm_page_t mold)
7768170d6e5SConrad Meyer {
7778170d6e5SConrad Meyer 	vm_page_t mret;
7788170d6e5SConrad Meyer 
7798170d6e5SConrad Meyer 	mret = vm_page_replace(mnew, object, pindex);
7808170d6e5SConrad Meyer 	KASSERT(mret == mold,
7818170d6e5SConrad Meyer 	    ("invalid page replacement, mold=%p, mret=%p", mold, mret));
7828170d6e5SConrad Meyer 
7838170d6e5SConrad Meyer 	/* Unused if !INVARIANTS. */
7848170d6e5SConrad Meyer 	(void)mold;
7858170d6e5SConrad Meyer 	(void)mret;
7868170d6e5SConrad Meyer }
7878170d6e5SConrad Meyer 
788ebcddc72SAlan Cox static inline bool
789ebcddc72SAlan Cox vm_page_active(vm_page_t m)
790ebcddc72SAlan Cox {
791ebcddc72SAlan Cox 
792ebcddc72SAlan Cox 	return (m->queue == PQ_ACTIVE);
793ebcddc72SAlan Cox }
794ebcddc72SAlan Cox 
795ebcddc72SAlan Cox static inline bool
796ebcddc72SAlan Cox vm_page_inactive(vm_page_t m)
797ebcddc72SAlan Cox {
798ebcddc72SAlan Cox 
799ebcddc72SAlan Cox 	return (m->queue == PQ_INACTIVE);
800ebcddc72SAlan Cox }
801ebcddc72SAlan Cox 
802ebcddc72SAlan Cox static inline bool
803ebcddc72SAlan Cox vm_page_in_laundry(vm_page_t m)
804ebcddc72SAlan Cox {
805ebcddc72SAlan Cox 
806b1fd102eSMark Johnston 	return (m->queue == PQ_LAUNDRY || m->queue == PQ_UNSWAPPABLE);
807ebcddc72SAlan Cox }
808ebcddc72SAlan Cox 
8091d3a1bcfSMark Johnston /*
810*5cd29d0fSMark Johnston  *	vm_page_enqueued:
811*5cd29d0fSMark Johnston  *
812*5cd29d0fSMark Johnston  *	Return true if the page is logically enqueued and no deferred
813*5cd29d0fSMark Johnston  *	dequeue is pending.
814*5cd29d0fSMark Johnston  */
815*5cd29d0fSMark Johnston static inline bool
816*5cd29d0fSMark Johnston vm_page_enqueued(vm_page_t m)
817*5cd29d0fSMark Johnston {
818*5cd29d0fSMark Johnston 
819*5cd29d0fSMark Johnston 	vm_page_assert_locked(m);
820*5cd29d0fSMark Johnston 
821*5cd29d0fSMark Johnston 	if ((m->aflags & PGA_DEQUEUE) != 0)
822*5cd29d0fSMark Johnston 		return (false);
823*5cd29d0fSMark Johnston 	atomic_thread_fence_acq();
824*5cd29d0fSMark Johnston 	return (m->queue != PQ_NONE);
825*5cd29d0fSMark Johnston }
826*5cd29d0fSMark Johnston 
827*5cd29d0fSMark Johnston /*
8281d3a1bcfSMark Johnston  *	vm_page_held:
8291d3a1bcfSMark Johnston  *
8301d3a1bcfSMark Johnston  *	Return true if a reference prevents the page from being reclaimable.
8311d3a1bcfSMark Johnston  */
8321d3a1bcfSMark Johnston static inline bool
8331d3a1bcfSMark Johnston vm_page_held(vm_page_t m)
8341d3a1bcfSMark Johnston {
8351d3a1bcfSMark Johnston 
8361d3a1bcfSMark Johnston 	return (m->hold_count > 0 || m->wire_count > 0);
8371d3a1bcfSMark Johnston }
8381d3a1bcfSMark Johnston 
839c4473420SPeter Wemm #endif				/* _KERNEL */
840df8bae1dSRodney W. Grimes #endif				/* !_VM_PAGE_ */
841