xref: /freebsd/sys/vm/vm_page.h (revision a15f7df5deb5861348e8eff04f60cecf5942f595)
160727d8bSWarner Losh /*-
2df8bae1dSRodney W. Grimes  * Copyright (c) 1991, 1993
3df8bae1dSRodney W. Grimes  *	The Regents of the University of California.  All rights reserved.
4df8bae1dSRodney W. Grimes  *
5df8bae1dSRodney W. Grimes  * This code is derived from software contributed to Berkeley by
6df8bae1dSRodney W. Grimes  * The Mach Operating System project at Carnegie-Mellon University.
7df8bae1dSRodney W. Grimes  *
8df8bae1dSRodney W. Grimes  * Redistribution and use in source and binary forms, with or without
9df8bae1dSRodney W. Grimes  * modification, are permitted provided that the following conditions
10df8bae1dSRodney W. Grimes  * are met:
11df8bae1dSRodney W. Grimes  * 1. Redistributions of source code must retain the above copyright
12df8bae1dSRodney W. Grimes  *    notice, this list of conditions and the following disclaimer.
13df8bae1dSRodney W. Grimes  * 2. Redistributions in binary form must reproduce the above copyright
14df8bae1dSRodney W. Grimes  *    notice, this list of conditions and the following disclaimer in the
15df8bae1dSRodney W. Grimes  *    documentation and/or other materials provided with the distribution.
16df8bae1dSRodney W. Grimes  * 4. Neither the name of the University nor the names of its contributors
17df8bae1dSRodney W. Grimes  *    may be used to endorse or promote products derived from this software
18df8bae1dSRodney W. Grimes  *    without specific prior written permission.
19df8bae1dSRodney W. Grimes  *
20df8bae1dSRodney W. Grimes  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21df8bae1dSRodney W. Grimes  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22df8bae1dSRodney W. Grimes  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23df8bae1dSRodney W. Grimes  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24df8bae1dSRodney W. Grimes  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25df8bae1dSRodney W. Grimes  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26df8bae1dSRodney W. Grimes  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27df8bae1dSRodney W. Grimes  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28df8bae1dSRodney W. Grimes  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29df8bae1dSRodney W. Grimes  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30df8bae1dSRodney W. Grimes  * SUCH DAMAGE.
31df8bae1dSRodney W. Grimes  *
323c4dd356SDavid Greenman  *	from: @(#)vm_page.h	8.2 (Berkeley) 12/13/93
33df8bae1dSRodney W. Grimes  *
34df8bae1dSRodney W. Grimes  *
35df8bae1dSRodney W. Grimes  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
36df8bae1dSRodney W. Grimes  * All rights reserved.
37df8bae1dSRodney W. Grimes  *
38df8bae1dSRodney W. Grimes  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
39df8bae1dSRodney W. Grimes  *
40df8bae1dSRodney W. Grimes  * Permission to use, copy, modify and distribute this software and
41df8bae1dSRodney W. Grimes  * its documentation is hereby granted, provided that both the copyright
42df8bae1dSRodney W. Grimes  * notice and this permission notice appear in all copies of the
43df8bae1dSRodney W. Grimes  * software, derivative works or modified versions, and any portions
44df8bae1dSRodney W. Grimes  * thereof, and that both notices appear in supporting documentation.
45df8bae1dSRodney W. Grimes  *
46df8bae1dSRodney W. Grimes  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
47df8bae1dSRodney W. Grimes  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
48df8bae1dSRodney W. Grimes  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
49df8bae1dSRodney W. Grimes  *
50df8bae1dSRodney W. Grimes  * Carnegie Mellon requests users of this software to return to
51df8bae1dSRodney W. Grimes  *
52df8bae1dSRodney W. Grimes  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
53df8bae1dSRodney W. Grimes  *  School of Computer Science
54df8bae1dSRodney W. Grimes  *  Carnegie Mellon University
55df8bae1dSRodney W. Grimes  *  Pittsburgh PA 15213-3890
56df8bae1dSRodney W. Grimes  *
57df8bae1dSRodney W. Grimes  * any improvements or extensions that they make and grant Carnegie the
58df8bae1dSRodney W. Grimes  * rights to redistribute these changes.
593c4dd356SDavid Greenman  *
60c3aac50fSPeter Wemm  * $FreeBSD$
61df8bae1dSRodney W. Grimes  */
62df8bae1dSRodney W. Grimes 
63df8bae1dSRodney W. Grimes /*
64df8bae1dSRodney W. Grimes  *	Resident memory system definitions.
65df8bae1dSRodney W. Grimes  */
66df8bae1dSRodney W. Grimes 
67df8bae1dSRodney W. Grimes #ifndef	_VM_PAGE_
68df8bae1dSRodney W. Grimes #define	_VM_PAGE_
69df8bae1dSRodney W. Grimes 
70f919ebdeSDavid Greenman #include <vm/pmap.h>
71069e9bc1SDoug Rabson 
72df8bae1dSRodney W. Grimes /*
73df8bae1dSRodney W. Grimes  *	Management of resident (logical) pages.
74df8bae1dSRodney W. Grimes  *
75df8bae1dSRodney W. Grimes  *	A small structure is kept for each resident
76df8bae1dSRodney W. Grimes  *	page, indexed by page number.  Each structure
77df8bae1dSRodney W. Grimes  *	is an element of several lists:
78df8bae1dSRodney W. Grimes  *
79df8bae1dSRodney W. Grimes  *		A hash table bucket used to quickly
80df8bae1dSRodney W. Grimes  *		perform object/offset lookups
81df8bae1dSRodney W. Grimes  *
82df8bae1dSRodney W. Grimes  *		A list of all pages for a given object,
83df8bae1dSRodney W. Grimes  *		so they can be quickly deactivated at
84df8bae1dSRodney W. Grimes  *		time of deallocation.
85df8bae1dSRodney W. Grimes  *
86df8bae1dSRodney W. Grimes  *		An ordered list of pages due for pageout.
87df8bae1dSRodney W. Grimes  *
88df8bae1dSRodney W. Grimes  *	In addition, the structure contains the object
89df8bae1dSRodney W. Grimes  *	and offset to which this page belongs (for pageout),
90df8bae1dSRodney W. Grimes  *	and sundry status bits.
91df8bae1dSRodney W. Grimes  *
923c76db4cSAlan Cox  *	In general, operations on this structure's mutable fields are
933c76db4cSAlan Cox  *	synchronized using either one of or a combination of the lock on the
943c76db4cSAlan Cox  *	object that the page belongs to (O), the pool lock for the page (P),
958d220203SAlan Cox  *	or the lock for either the free or paging queue (Q).  If a field is
963c76db4cSAlan Cox  *	annotated below with two of these locks, then holding either lock is
973c76db4cSAlan Cox  *	sufficient for read access, but both locks are required for write
983c76db4cSAlan Cox  *	access.
990ce3ba8cSKip Macy  *
100abb9b935SKonstantin Belousov  *	In contrast, the synchronization of accesses to the page's
101abb9b935SKonstantin Belousov  *	dirty field is machine dependent (M).  In the
102abb9b935SKonstantin Belousov  *	machine-independent layer, the lock on the object that the
103abb9b935SKonstantin Belousov  *	page belongs to must be held in order to operate on the field.
104abb9b935SKonstantin Belousov  *	However, the pmap layer is permitted to set all bits within
105abb9b935SKonstantin Belousov  *	the field without holding that lock.  If the underlying
106abb9b935SKonstantin Belousov  *	architecture does not support atomic read-modify-write
107abb9b935SKonstantin Belousov  *	operations on the field's type, then the machine-independent
1082042bb37SKonstantin Belousov  *	layer uses a 32-bit atomic on the aligned 32-bit word that
109abb9b935SKonstantin Belousov  *	contains the dirty field.  In the machine-independent layer,
110abb9b935SKonstantin Belousov  *	the implementation of read-modify-write operations on the
111abb9b935SKonstantin Belousov  *	field is encapsulated in vm_page_clear_dirty_mask().
112df8bae1dSRodney W. Grimes  */
113df8bae1dSRodney W. Grimes 
114561cc9fcSKonstantin Belousov #if PAGE_SIZE == 4096
115561cc9fcSKonstantin Belousov #define VM_PAGE_BITS_ALL 0xffu
116561cc9fcSKonstantin Belousov typedef uint8_t vm_page_bits_t;
117561cc9fcSKonstantin Belousov #elif PAGE_SIZE == 8192
118561cc9fcSKonstantin Belousov #define VM_PAGE_BITS_ALL 0xffffu
119561cc9fcSKonstantin Belousov typedef uint16_t vm_page_bits_t;
120561cc9fcSKonstantin Belousov #elif PAGE_SIZE == 16384
121561cc9fcSKonstantin Belousov #define VM_PAGE_BITS_ALL 0xffffffffu
122561cc9fcSKonstantin Belousov typedef uint32_t vm_page_bits_t;
123561cc9fcSKonstantin Belousov #elif PAGE_SIZE == 32768
124561cc9fcSKonstantin Belousov #define VM_PAGE_BITS_ALL 0xfffffffffffffffflu
125561cc9fcSKonstantin Belousov typedef uint64_t vm_page_bits_t;
126561cc9fcSKonstantin Belousov #endif
127561cc9fcSKonstantin Belousov 
128df8bae1dSRodney W. Grimes struct vm_page {
1298d220203SAlan Cox 	TAILQ_ENTRY(vm_page) pageq;	/* page queue or free list (Q)	*/
130e3975643SJake Burkholder 	TAILQ_ENTRY(vm_page) listq;	/* pages in same object (O) 	*/
131df8bae1dSRodney W. Grimes 
1325ac59343SAlan Cox 	vm_object_t object;		/* which object am I in (O,P)*/
13343319c11SAlan Cox 	vm_pindex_t pindex;		/* offset into object (O,P) */
134227f9a1cSJake Burkholder 	vm_paddr_t phys_addr;		/* physical address of page */
1350385347cSPeter Wemm 	struct md_page md;		/* machine dependant stuff */
1367024db1dSAlan Cox 	uint8_t	queue;			/* page queue index (P,Q) */
1372446e4f0SAlan Cox 	int8_t segind;
1383407fefeSKonstantin Belousov 	short hold_count;		/* page hold count (P) */
1392446e4f0SAlan Cox 	uint8_t	order;			/* index of the buddy queue */
1402446e4f0SAlan Cox 	uint8_t pool;
141b2775308SAlan Cox 	u_short cow;			/* page cow mapping count (P) */
142fd8c28bfSAlan Cox 	u_int wire_count;		/* wired down maps refs (P) */
1433407fefeSKonstantin Belousov 	uint8_t aflags;			/* access is atomic */
144081a4881SAlan Cox 	uint8_t oflags;			/* page VPO_* flags (O) */
145081a4881SAlan Cox 	uint16_t flags;			/* page PG_* flags (P) */
146*a15f7df5SAttilio Rao 	u_char	act_count;		/* page usage count (P) */
14791f7a860SAlan Cox 	u_char	busy;			/* page busy count (O) */
148bd7e5f99SJohn Dyson 	/* NOTE that these must support one bit per DEV_BSIZE in a page!!! */
149bd7e5f99SJohn Dyson 	/* so, on normal X86 kernels, they must be at least 8 bits wide */
150561cc9fcSKonstantin Belousov 	vm_page_bits_t valid;		/* map of valid DEV_BSIZE chunks (O) */
151561cc9fcSKonstantin Belousov 	vm_page_bits_t dirty;		/* map of dirty DEV_BSIZE chunks (M) */
152df8bae1dSRodney W. Grimes };
153df8bae1dSRodney W. Grimes 
1545786be7cSAlan Cox /*
1555786be7cSAlan Cox  * Page flags stored in oflags:
1565786be7cSAlan Cox  *
1575786be7cSAlan Cox  * Access to these page flags is synchronized by the lock on the object
1585786be7cSAlan Cox  * containing the page (O).
159d98d0ce2SKonstantin Belousov  *
160d98d0ce2SKonstantin Belousov  * Note: VPO_UNMANAGED (used by OBJT_DEVICE, OBJT_PHYS and OBJT_SG)
161d98d0ce2SKonstantin Belousov  * 	 indicates that the page is not under PV management but
162d98d0ce2SKonstantin Belousov  * 	 otherwise should be treated as a normal page.  Pages not
163d98d0ce2SKonstantin Belousov  * 	 under PV management cannot be paged out via the
164d98d0ce2SKonstantin Belousov  * 	 object/vm_page_t because there is no knowledge of their pte
165d98d0ce2SKonstantin Belousov  * 	 mappings, and such pages are also not on any PQ queue.
166d98d0ce2SKonstantin Belousov  *
1675786be7cSAlan Cox  */
168081a4881SAlan Cox #define	VPO_BUSY	0x01		/* page is in transit */
169081a4881SAlan Cox #define	VPO_WANTED	0x02		/* someone is waiting for page */
170081a4881SAlan Cox #define	VPO_UNMANAGED	0x04		/* no PV management for page */
171081a4881SAlan Cox #define	VPO_SWAPINPROG	0x08		/* swap I/O in progress on page */
172081a4881SAlan Cox #define	VPO_NOSYNC	0x10		/* do not collect for syncer */
1735786be7cSAlan Cox 
17444e46b9eSAlan Cox #define	PQ_NONE		255
17544e46b9eSAlan Cox #define	PQ_INACTIVE	0
17644e46b9eSAlan Cox #define	PQ_ACTIVE	1
177081a4881SAlan Cox #define	PQ_COUNT	2
178ef39c05bSAlexander Leidinger 
1798d220203SAlan Cox TAILQ_HEAD(pglist, vm_page);
18070c17636SAlan Cox 
1818d220203SAlan Cox struct vm_pagequeue {
1828d220203SAlan Cox 	struct mtx	pq_mutex;
1838d220203SAlan Cox 	struct pglist	pq_pl;
1848d220203SAlan Cox 	int *const	pq_cnt;
1858d220203SAlan Cox 	const char *const pq_name;
1868d220203SAlan Cox } __aligned(CACHE_LINE_SIZE);
1878d220203SAlan Cox 
1888d220203SAlan Cox extern struct vm_pagequeue vm_pagequeues[PQ_COUNT];
1898d220203SAlan Cox 
1908d220203SAlan Cox #define	vm_pagequeue_assert_locked(pq)	mtx_assert(&(pq)->pq_mutex, MA_OWNED)
1918d220203SAlan Cox #define	vm_pagequeue_init_lock(pq)	mtx_init(&(pq)->pq_mutex,	\
1928d220203SAlan Cox 	    (pq)->pq_name, "vm pagequeue", MTX_DEF | MTX_DUPOK);
1938d220203SAlan Cox #define	vm_pagequeue_lock(pq)		mtx_lock(&(pq)->pq_mutex)
1948d220203SAlan Cox #define	vm_pagequeue_unlock(pq)		mtx_unlock(&(pq)->pq_mutex)
195e67e0775SAlan Cox 
1964ceaf45dSAttilio Rao extern struct mtx_padalign vm_page_queue_free_mtx;
1974ceaf45dSAttilio Rao extern struct mtx_padalign pa_lock[];
1982965a453SKip Macy 
1992965a453SKip Macy #if defined(__arm__)
2002965a453SKip Macy #define	PDRSHIFT	PDR_SHIFT
2012965a453SKip Macy #elif !defined(PDRSHIFT)
2022965a453SKip Macy #define PDRSHIFT	21
2032965a453SKip Macy #endif
2042965a453SKip Macy 
2052965a453SKip Macy #define	pa_index(pa)	((pa) >> PDRSHIFT)
2064ceaf45dSAttilio Rao #define	PA_LOCKPTR(pa)	((struct mtx *)(&pa_lock[pa_index(pa) % PA_LOCK_COUNT]))
2072965a453SKip Macy #define	PA_LOCKOBJPTR(pa)	((struct lock_object *)PA_LOCKPTR((pa)))
2082965a453SKip Macy #define	PA_LOCK(pa)	mtx_lock(PA_LOCKPTR(pa))
2092965a453SKip Macy #define	PA_TRYLOCK(pa)	mtx_trylock(PA_LOCKPTR(pa))
2102965a453SKip Macy #define	PA_UNLOCK(pa)	mtx_unlock(PA_LOCKPTR(pa))
2112965a453SKip Macy #define	PA_UNLOCK_COND(pa) 			\
2122965a453SKip Macy 	do {		   			\
213567e51e1SAlan Cox 		if ((pa) != 0) {		\
214567e51e1SAlan Cox 			PA_UNLOCK((pa));	\
215567e51e1SAlan Cox 			(pa) = 0;		\
216567e51e1SAlan Cox 		}				\
2172965a453SKip Macy 	} while (0)
2182965a453SKip Macy 
2192965a453SKip Macy #define	PA_LOCK_ASSERT(pa, a)	mtx_assert(PA_LOCKPTR(pa), (a))
2202965a453SKip Macy 
221cf1911a9SKonstantin Belousov #ifdef KLD_MODULE
222cf1911a9SKonstantin Belousov #define	vm_page_lock(m)		vm_page_lock_KBI((m), LOCK_FILE, LOCK_LINE)
223cf1911a9SKonstantin Belousov #define	vm_page_unlock(m)	vm_page_unlock_KBI((m), LOCK_FILE, LOCK_LINE)
224cf1911a9SKonstantin Belousov #define	vm_page_trylock(m)	vm_page_trylock_KBI((m), LOCK_FILE, LOCK_LINE)
225cf1911a9SKonstantin Belousov #if defined(INVARIANTS)
226cf1911a9SKonstantin Belousov #define	vm_page_lock_assert(m, a)		\
227cf1911a9SKonstantin Belousov     vm_page_lock_assert_KBI((m), (a), __FILE__, __LINE__)
228cf1911a9SKonstantin Belousov #else
229cf1911a9SKonstantin Belousov #define	vm_page_lock_assert(m, a)
230cf1911a9SKonstantin Belousov #endif
231cf1911a9SKonstantin Belousov #else	/* !KLD_MODULE */
2322965a453SKip Macy #define	vm_page_lockptr(m)	(PA_LOCKPTR(VM_PAGE_TO_PHYS((m))))
2332965a453SKip Macy #define	vm_page_lock(m)		mtx_lock(vm_page_lockptr((m)))
2342965a453SKip Macy #define	vm_page_unlock(m)	mtx_unlock(vm_page_lockptr((m)))
2352965a453SKip Macy #define	vm_page_trylock(m)	mtx_trylock(vm_page_lockptr((m)))
2362965a453SKip Macy #define	vm_page_lock_assert(m, a)	mtx_assert(vm_page_lockptr((m)), (a))
237cf1911a9SKonstantin Belousov #endif
238e67e0775SAlan Cox 
239df8bae1dSRodney W. Grimes /*
240369763e3SAlan Cox  * The vm_page's aflags are updated using atomic operations.  To set or clear
241369763e3SAlan Cox  * these flags, the functions vm_page_aflag_set() and vm_page_aflag_clear()
242369763e3SAlan Cox  * must be used.  Neither these flags nor these functions are part of the KBI.
2433407fefeSKonstantin Belousov  *
2443407fefeSKonstantin Belousov  * PGA_REFERENCED may be cleared only if the object containing the page is
245369763e3SAlan Cox  * locked.  It is set by both the MI and MD VM layers.  However, kernel
246369763e3SAlan Cox  * loadable modules should not directly set this flag.  They should call
247369763e3SAlan Cox  * vm_page_reference() instead.
248ce186587SAlan Cox  *
2493407fefeSKonstantin Belousov  * PGA_WRITEABLE is set exclusively on managed pages by pmap_enter().  When it
2506031c68dSAlan Cox  * does so, the page must be VPO_BUSY.  The MI VM layer must never access this
2516031c68dSAlan Cox  * flag directly.  Instead, it should call pmap_page_is_write_mapped().
25257bd5cceSNathan Whitehorn  *
25357bd5cceSNathan Whitehorn  * PGA_EXECUTABLE may be set by pmap routines, and indicates that a page has
2546031c68dSAlan Cox  * at least one executable mapping.  It is not consumed by the MI VM layer.
255df8bae1dSRodney W. Grimes  */
2563407fefeSKonstantin Belousov #define	PGA_WRITEABLE	0x01		/* page may be mapped writeable */
2573407fefeSKonstantin Belousov #define	PGA_REFERENCED	0x02		/* page has been referenced */
25857bd5cceSNathan Whitehorn #define	PGA_EXECUTABLE	0x04		/* page may be mapped executable */
2593407fefeSKonstantin Belousov 
2603407fefeSKonstantin Belousov /*
2613407fefeSKonstantin Belousov  * Page flags.  If changed at any other time than page allocation or
2623407fefeSKonstantin Belousov  * freeing, the modification must be protected by the vm_page lock.
2633407fefeSKonstantin Belousov  */
264081a4881SAlan Cox #define	PG_CACHED	0x0001		/* page is cached */
265081a4881SAlan Cox #define	PG_FREE		0x0002		/* page is free */
266081a4881SAlan Cox #define	PG_FICTITIOUS	0x0004		/* physical page doesn't exist */
267081a4881SAlan Cox #define	PG_ZERO		0x0008		/* page is zeroed */
268081a4881SAlan Cox #define	PG_MARKER	0x0010		/* special queue marker page */
269081a4881SAlan Cox #define	PG_SLAB		0x0020		/* object pointer is actually a slab */
270081a4881SAlan Cox #define	PG_WINATCFLS	0x0040		/* flush dirty page on inactive q */
271081a4881SAlan Cox #define	PG_NODUMP	0x0080		/* don't include this page in a dump */
272081a4881SAlan Cox #define	PG_UNHOLDFREE	0x0100		/* delayed free of a held page */
273df8bae1dSRodney W. Grimes 
27424a1cce3SDavid Greenman /*
27524a1cce3SDavid Greenman  * Misc constants.
27624a1cce3SDavid Greenman  */
27724a1cce3SDavid Greenman #define ACT_DECLINE		1
27824a1cce3SDavid Greenman #define ACT_ADVANCE		3
27938efa82bSJohn Dyson #define ACT_INIT		5
2805070c7f8SJohn Dyson #define ACT_MAX			64
281df8bae1dSRodney W. Grimes 
282c4473420SPeter Wemm #ifdef _KERNEL
28304a18977SAlan Cox 
284369763e3SAlan Cox #include <sys/systm.h>
285369763e3SAlan Cox 
286369763e3SAlan Cox #include <machine/atomic.h>
287369763e3SAlan Cox 
288df8bae1dSRodney W. Grimes /*
289969a0af0SAlan Cox  * Each pageable resident page falls into one of four lists:
290df8bae1dSRodney W. Grimes  *
291df8bae1dSRodney W. Grimes  *	free
292df8bae1dSRodney W. Grimes  *		Available for allocation now.
29324a1cce3SDavid Greenman  *
29424a1cce3SDavid Greenman  *	cache
2957bfda801SAlan Cox  *		Almost available for allocation. Still associated with
2967bfda801SAlan Cox  *		an object, but clean and immediately freeable.
29724a1cce3SDavid Greenman  *
2980f752392SAlan Cox  * The following lists are LRU sorted:
2990f752392SAlan Cox  *
300df8bae1dSRodney W. Grimes  *	inactive
3016c5e9bbdSMike Pritchard  *		Low activity, candidates for reclamation.
302df8bae1dSRodney W. Grimes  *		This is the list of pages that should be
303df8bae1dSRodney W. Grimes  *		paged out next.
30424a1cce3SDavid Greenman  *
305df8bae1dSRodney W. Grimes  *	active
30624a1cce3SDavid Greenman  *		Pages that are "active" i.e. they have been
30724a1cce3SDavid Greenman  *		recently referenced.
30810ad4d48SJohn Dyson  *
309df8bae1dSRodney W. Grimes  */
310df8bae1dSRodney W. Grimes 
311a316d390SJohn Dyson extern int vm_page_zero_count;
312a316d390SJohn Dyson 
3130d94caffSDavid Greenman extern vm_page_t vm_page_array;		/* First resident page in table */
31413a0b7bcSKonstantin Belousov extern long vm_page_array_size;		/* number of vm_page_t's */
3150d94caffSDavid Greenman extern long first_page;			/* first physical page number */
3160d94caffSDavid Greenman 
3172446e4f0SAlan Cox #define	VM_PAGE_IS_FREE(m)	(((m)->flags & PG_FREE) != 0)
3182446e4f0SAlan Cox 
319df8bae1dSRodney W. Grimes #define VM_PAGE_TO_PHYS(entry)	((entry)->phys_addr)
320df8bae1dSRodney W. Grimes 
321b6de32bdSKonstantin Belousov vm_page_t PHYS_TO_VM_PAGE(vm_paddr_t pa);
322df8bae1dSRodney W. Grimes 
323827b2fa0SAlan Cox /* page allocation classes: */
3246d40c3d3SDavid Greenman #define VM_ALLOC_NORMAL		0
3256d40c3d3SDavid Greenman #define VM_ALLOC_INTERRUPT	1
3266d40c3d3SDavid Greenman #define VM_ALLOC_SYSTEM		2
327827b2fa0SAlan Cox #define	VM_ALLOC_CLASS_MASK	3
328827b2fa0SAlan Cox /* page allocation flags: */
329026aa839SJeff Roberson #define	VM_ALLOC_WIRED		0x0020	/* non pageable */
330026aa839SJeff Roberson #define	VM_ALLOC_ZERO		0x0040	/* Try to obtain a zeroed page */
3311d9e77f6SKonstantin Belousov #define	VM_ALLOC_RETRY		0x0080	/* Mandatory with vm_page_grab() */
332026aa839SJeff Roberson #define	VM_ALLOC_NOOBJ		0x0100	/* No associated object */
3330f9f9bcbSAlan Cox #define	VM_ALLOC_NOBUSY		0x0200	/* Do not busy the page */
3347bfda801SAlan Cox #define	VM_ALLOC_IFCACHED	0x0400	/* Fail if the page is not cached */
3357bfda801SAlan Cox #define	VM_ALLOC_IFNOTCACHED	0x0800	/* Fail if the page is cached */
3365f195aa3SKonstantin Belousov #define	VM_ALLOC_IGN_SBUSY	0x1000	/* vm_page_grab() only */
337263811f7SKip Macy #define	VM_ALLOC_NODUMP		0x2000	/* don't include in dump */
3385f195aa3SKonstantin Belousov 
3395f195aa3SKonstantin Belousov #define	VM_ALLOC_COUNT_SHIFT	16
3405f195aa3SKonstantin Belousov #define	VM_ALLOC_COUNT(count)	((count) << VM_ALLOC_COUNT_SHIFT)
3410d94caffSDavid Greenman 
342b32ecf44SKonstantin Belousov #ifdef M_NOWAIT
343b32ecf44SKonstantin Belousov static inline int
344b32ecf44SKonstantin Belousov malloc2vm_flags(int malloc_flags)
345b32ecf44SKonstantin Belousov {
346b32ecf44SKonstantin Belousov 	int pflags;
347b32ecf44SKonstantin Belousov 
348962b064aSKonstantin Belousov 	KASSERT((malloc_flags & M_USE_RESERVE) == 0 ||
349962b064aSKonstantin Belousov 	    (malloc_flags & M_NOWAIT) != 0,
350962b064aSKonstantin Belousov 	    ("M_USE_RESERVE requires M_NOWAIT"));
351b32ecf44SKonstantin Belousov 	pflags = (malloc_flags & M_USE_RESERVE) != 0 ? VM_ALLOC_INTERRUPT :
352b32ecf44SKonstantin Belousov 	    VM_ALLOC_SYSTEM;
353b32ecf44SKonstantin Belousov 	if ((malloc_flags & M_ZERO) != 0)
354b32ecf44SKonstantin Belousov 		pflags |= VM_ALLOC_ZERO;
355b32ecf44SKonstantin Belousov 	if ((malloc_flags & M_NODUMP) != 0)
356b32ecf44SKonstantin Belousov 		pflags |= VM_ALLOC_NODUMP;
357b32ecf44SKonstantin Belousov 	return (pflags);
358b32ecf44SKonstantin Belousov }
359b32ecf44SKonstantin Belousov #endif
360b32ecf44SKonstantin Belousov 
3611b40f8c0SMatthew Dillon void vm_page_busy(vm_page_t m);
3621b40f8c0SMatthew Dillon void vm_page_flash(vm_page_t m);
3631b40f8c0SMatthew Dillon void vm_page_io_start(vm_page_t m);
3641b40f8c0SMatthew Dillon void vm_page_io_finish(vm_page_t m);
3651b40f8c0SMatthew Dillon void vm_page_hold(vm_page_t mem);
3661b40f8c0SMatthew Dillon void vm_page_unhold(vm_page_t mem);
3671b40f8c0SMatthew Dillon void vm_page_free(vm_page_t m);
3681b40f8c0SMatthew Dillon void vm_page_free_zero(vm_page_t m);
3691b40f8c0SMatthew Dillon void vm_page_wakeup(vm_page_t m);
3701b40f8c0SMatthew Dillon 
3711b40f8c0SMatthew Dillon void vm_page_activate (vm_page_t);
3721b40f8c0SMatthew Dillon vm_page_t vm_page_alloc (vm_object_t, vm_pindex_t, int);
373fbd80bd0SAlan Cox vm_page_t vm_page_alloc_contig(vm_object_t object, vm_pindex_t pindex, int req,
374fbd80bd0SAlan Cox     u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment,
375fbd80bd0SAlan Cox     vm_paddr_t boundary, vm_memattr_t memattr);
376aa546366SJayachandran C. vm_page_t vm_page_alloc_freelist(int, int);
3771b40f8c0SMatthew Dillon vm_page_t vm_page_grab (vm_object_t, vm_pindex_t, int);
378461c7860SAlan Cox void vm_page_cache(vm_page_t);
379c9444914SAlan Cox void vm_page_cache_free(vm_object_t, vm_pindex_t, vm_pindex_t);
3807bfda801SAlan Cox void vm_page_cache_transfer(vm_object_t, vm_pindex_t, vm_object_t);
3811b40f8c0SMatthew Dillon int vm_page_try_to_cache (vm_page_t);
3821b40f8c0SMatthew Dillon int vm_page_try_to_free (vm_page_t);
383461c7860SAlan Cox void vm_page_dontneed(vm_page_t);
3841b40f8c0SMatthew Dillon void vm_page_deactivate (vm_page_t);
3858d220203SAlan Cox void vm_page_dequeue(vm_page_t m);
3868d220203SAlan Cox void vm_page_dequeue_locked(vm_page_t m);
387b382c10aSKonstantin Belousov vm_page_t vm_page_find_least(vm_object_t, vm_pindex_t);
38810cf2560SAlan Cox vm_page_t vm_page_getfake(vm_paddr_t paddr, vm_memattr_t memattr);
389e461aae7SKonstantin Belousov void vm_page_initfake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr);
3901b40f8c0SMatthew Dillon void vm_page_insert (vm_page_t, vm_object_t, vm_pindex_t);
3911c8279e4SAlan Cox boolean_t vm_page_is_cached(vm_object_t object, vm_pindex_t pindex);
3921b40f8c0SMatthew Dillon vm_page_t vm_page_lookup (vm_object_t, vm_pindex_t);
39391b4f427SAlan Cox vm_page_t vm_page_next(vm_page_t m);
3942965a453SKip Macy int vm_page_pa_tryrelock(pmap_t, vm_paddr_t, vm_paddr_t *);
39591b4f427SAlan Cox vm_page_t vm_page_prev(vm_page_t m);
39610cf2560SAlan Cox void vm_page_putfake(vm_page_t m);
397b6c00483SKonstantin Belousov void vm_page_readahead_finish(vm_page_t m);
3983407fefeSKonstantin Belousov void vm_page_reference(vm_page_t m);
3991b40f8c0SMatthew Dillon void vm_page_remove (vm_page_t);
4001b40f8c0SMatthew Dillon void vm_page_rename (vm_page_t, vm_object_t, vm_pindex_t);
4018d220203SAlan Cox void vm_page_requeue(vm_page_t m);
4028d220203SAlan Cox void vm_page_requeue_locked(vm_page_t m);
403dc874f98SKonstantin Belousov void vm_page_set_valid_range(vm_page_t m, int base, int size);
404eb4bbba8SAlan Cox void vm_page_sleep(vm_page_t m, const char *msg);
405889eb0fcSAlan Cox vm_offset_t vm_page_startup(vm_offset_t vaddr);
4068c22654dSAlan Cox void vm_page_unhold_pages(vm_page_t *ma, int count);
4071b40f8c0SMatthew Dillon void vm_page_unwire (vm_page_t, int);
40810cf2560SAlan Cox void vm_page_updatefake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr);
4091b40f8c0SMatthew Dillon void vm_page_wire (vm_page_t);
4101b40f8c0SMatthew Dillon void vm_page_set_validclean (vm_page_t, int, int);
4111b40f8c0SMatthew Dillon void vm_page_clear_dirty (vm_page_t, int, int);
4121b40f8c0SMatthew Dillon void vm_page_set_invalid (vm_page_t, int, int);
4131b40f8c0SMatthew Dillon int vm_page_is_valid (vm_page_t, int, int);
4141b40f8c0SMatthew Dillon void vm_page_test_dirty (vm_page_t);
415561cc9fcSKonstantin Belousov vm_page_bits_t vm_page_bits(int base, int size);
4168d17e694SJulian Elischer void vm_page_zero_invalid(vm_page_t m, boolean_t setvalid);
417faa273d5SMatthew Dillon void vm_page_free_toq(vm_page_t m);
4183516c025SPeter Wemm void vm_page_zero_idle_wakeup(void);
41998cb733cSKenneth D. Merry void vm_page_cowfault (vm_page_t);
420641e2829SKonstantin Belousov int vm_page_cowsetup(vm_page_t);
42198cb733cSKenneth D. Merry void vm_page_cowclear (vm_page_t);
42298cb733cSKenneth D. Merry 
423eddc9291SAlan Cox void vm_page_dirty_KBI(vm_page_t m);
424cf1911a9SKonstantin Belousov void vm_page_lock_KBI(vm_page_t m, const char *file, int line);
425cf1911a9SKonstantin Belousov void vm_page_unlock_KBI(vm_page_t m, const char *file, int line);
426cf1911a9SKonstantin Belousov int vm_page_trylock_KBI(vm_page_t m, const char *file, int line);
427cf1911a9SKonstantin Belousov #if defined(INVARIANTS) || defined(INVARIANT_SUPPORT)
428cf1911a9SKonstantin Belousov void vm_page_lock_assert_KBI(vm_page_t m, int a, const char *file, int line);
429cf1911a9SKonstantin Belousov #endif
430cf1911a9SKonstantin Belousov 
4313b1025d2SKonstantin Belousov #ifdef INVARIANTS
4323b1025d2SKonstantin Belousov void vm_page_object_lock_assert(vm_page_t m);
4333b1025d2SKonstantin Belousov #define	VM_PAGE_OBJECT_LOCK_ASSERT(m)	vm_page_object_lock_assert(m)
4343b1025d2SKonstantin Belousov #else
4353b1025d2SKonstantin Belousov #define	VM_PAGE_OBJECT_LOCK_ASSERT(m)	(void)0
4363b1025d2SKonstantin Belousov #endif
4373b1025d2SKonstantin Belousov 
438f3b676f0SAlan Cox /*
439369763e3SAlan Cox  * We want to use atomic updates for the aflags field, which is 8 bits wide.
440369763e3SAlan Cox  * However, not all architectures support atomic operations on 8-bit
441369763e3SAlan Cox  * destinations.  In order that we can easily use a 32-bit operation, we
442369763e3SAlan Cox  * require that the aflags field be 32-bit aligned.
443369763e3SAlan Cox  */
444369763e3SAlan Cox CTASSERT(offsetof(struct vm_page, aflags) % sizeof(uint32_t) == 0);
445369763e3SAlan Cox 
446369763e3SAlan Cox /*
447369763e3SAlan Cox  *	Clear the given bits in the specified page.
448369763e3SAlan Cox  */
449369763e3SAlan Cox static inline void
450369763e3SAlan Cox vm_page_aflag_clear(vm_page_t m, uint8_t bits)
451369763e3SAlan Cox {
452369763e3SAlan Cox 	uint32_t *addr, val;
453369763e3SAlan Cox 
454369763e3SAlan Cox 	/*
455369763e3SAlan Cox 	 * The PGA_REFERENCED flag can only be cleared if the object
456369763e3SAlan Cox 	 * containing the page is locked.
457369763e3SAlan Cox 	 */
458369763e3SAlan Cox 	if ((bits & PGA_REFERENCED) != 0)
459369763e3SAlan Cox 		VM_PAGE_OBJECT_LOCK_ASSERT(m);
460369763e3SAlan Cox 
461369763e3SAlan Cox 	/*
462369763e3SAlan Cox 	 * Access the whole 32-bit word containing the aflags field with an
463369763e3SAlan Cox 	 * atomic update.  Parallel non-atomic updates to the other fields
464369763e3SAlan Cox 	 * within this word are handled properly by the atomic update.
465369763e3SAlan Cox 	 */
466369763e3SAlan Cox 	addr = (void *)&m->aflags;
467369763e3SAlan Cox 	KASSERT(((uintptr_t)addr & (sizeof(uint32_t) - 1)) == 0,
468369763e3SAlan Cox 	    ("vm_page_aflag_clear: aflags is misaligned"));
469369763e3SAlan Cox 	val = bits;
470369763e3SAlan Cox #if BYTE_ORDER == BIG_ENDIAN
471369763e3SAlan Cox 	val <<= 24;
472369763e3SAlan Cox #endif
473369763e3SAlan Cox 	atomic_clear_32(addr, val);
474369763e3SAlan Cox }
475369763e3SAlan Cox 
476369763e3SAlan Cox /*
477369763e3SAlan Cox  *	Set the given bits in the specified page.
478369763e3SAlan Cox  */
479369763e3SAlan Cox static inline void
480369763e3SAlan Cox vm_page_aflag_set(vm_page_t m, uint8_t bits)
481369763e3SAlan Cox {
482369763e3SAlan Cox 	uint32_t *addr, val;
483369763e3SAlan Cox 
484369763e3SAlan Cox 	/*
485369763e3SAlan Cox 	 * The PGA_WRITEABLE flag can only be set if the page is managed and
486369763e3SAlan Cox 	 * VPO_BUSY.  Currently, this flag is only set by pmap_enter().
487369763e3SAlan Cox 	 */
488369763e3SAlan Cox 	KASSERT((bits & PGA_WRITEABLE) == 0 ||
489369763e3SAlan Cox 	    (m->oflags & (VPO_UNMANAGED | VPO_BUSY)) == VPO_BUSY,
490369763e3SAlan Cox 	    ("vm_page_aflag_set: PGA_WRITEABLE and !VPO_BUSY"));
491369763e3SAlan Cox 
492369763e3SAlan Cox 	/*
493369763e3SAlan Cox 	 * Access the whole 32-bit word containing the aflags field with an
494369763e3SAlan Cox 	 * atomic update.  Parallel non-atomic updates to the other fields
495369763e3SAlan Cox 	 * within this word are handled properly by the atomic update.
496369763e3SAlan Cox 	 */
497369763e3SAlan Cox 	addr = (void *)&m->aflags;
498369763e3SAlan Cox 	KASSERT(((uintptr_t)addr & (sizeof(uint32_t) - 1)) == 0,
499369763e3SAlan Cox 	    ("vm_page_aflag_set: aflags is misaligned"));
500369763e3SAlan Cox 	val = bits;
501369763e3SAlan Cox #if BYTE_ORDER == BIG_ENDIAN
502369763e3SAlan Cox 	val <<= 24;
503369763e3SAlan Cox #endif
504369763e3SAlan Cox 	atomic_set_32(addr, val);
505369763e3SAlan Cox }
506369763e3SAlan Cox 
507369763e3SAlan Cox /*
508eddc9291SAlan Cox  *	vm_page_dirty:
509eddc9291SAlan Cox  *
510eddc9291SAlan Cox  *	Set all bits in the page's dirty field.
511eddc9291SAlan Cox  *
512eddc9291SAlan Cox  *	The object containing the specified page must be locked if the
513eddc9291SAlan Cox  *	call is made from the machine-independent layer.
514eddc9291SAlan Cox  *
515eddc9291SAlan Cox  *	See vm_page_clear_dirty_mask().
516eddc9291SAlan Cox  */
517eddc9291SAlan Cox static __inline void
518eddc9291SAlan Cox vm_page_dirty(vm_page_t m)
519eddc9291SAlan Cox {
520eddc9291SAlan Cox 
521eddc9291SAlan Cox 	/* Use vm_page_dirty_KBI() under INVARIANTS to save memory. */
522eddc9291SAlan Cox #if defined(KLD_MODULE) || defined(INVARIANTS)
523eddc9291SAlan Cox 	vm_page_dirty_KBI(m);
524eddc9291SAlan Cox #else
525eddc9291SAlan Cox 	m->dirty = VM_PAGE_BITS_ALL;
526eddc9291SAlan Cox #endif
527eddc9291SAlan Cox }
528eddc9291SAlan Cox 
529eddc9291SAlan Cox /*
5308d220203SAlan Cox  *	vm_page_remque:
5318d220203SAlan Cox  *
5328d220203SAlan Cox  *	If the given page is in a page queue, then remove it from that page
5338d220203SAlan Cox  *	queue.
5348d220203SAlan Cox  *
5358d220203SAlan Cox  *	The page must be locked.
5368d220203SAlan Cox  */
5378d220203SAlan Cox static inline void
5388d220203SAlan Cox vm_page_remque(vm_page_t m)
5398d220203SAlan Cox {
5408d220203SAlan Cox 
5418d220203SAlan Cox 	if (m->queue != PQ_NONE)
5428d220203SAlan Cox 		vm_page_dequeue(m);
5438d220203SAlan Cox }
5448d220203SAlan Cox 
5458d220203SAlan Cox /*
546eb4bbba8SAlan Cox  *	vm_page_sleep_if_busy:
547eb4bbba8SAlan Cox  *
5489af80719SAlan Cox  *	Sleep and release the page queues lock if VPO_BUSY is set or,
549eb4bbba8SAlan Cox  *	if also_m_busy is TRUE, busy is non-zero.  Returns TRUE if the
550eb4bbba8SAlan Cox  *	thread slept and the page queues lock was released.
551eb4bbba8SAlan Cox  *	Otherwise, retains the page queues lock and returns FALSE.
552eb4bbba8SAlan Cox  *
553eb4bbba8SAlan Cox  *	The object containing the given page must be locked.
554eb4bbba8SAlan Cox  */
555eb4bbba8SAlan Cox static __inline int
556eb4bbba8SAlan Cox vm_page_sleep_if_busy(vm_page_t m, int also_m_busy, const char *msg)
557eb4bbba8SAlan Cox {
558eb4bbba8SAlan Cox 
5599af80719SAlan Cox 	if ((m->oflags & VPO_BUSY) || (also_m_busy && m->busy)) {
560eb4bbba8SAlan Cox 		vm_page_sleep(m, msg);
561eb4bbba8SAlan Cox 		return (TRUE);
562eb4bbba8SAlan Cox 	}
563eb4bbba8SAlan Cox 	return (FALSE);
564eb4bbba8SAlan Cox }
565eb4bbba8SAlan Cox 
566eb4bbba8SAlan Cox /*
567f3b676f0SAlan Cox  *	vm_page_undirty:
568f3b676f0SAlan Cox  *
569f3b676f0SAlan Cox  *	Set page to not be dirty.  Note: does not clear pmap modify bits
570f3b676f0SAlan Cox  */
571f3b676f0SAlan Cox static __inline void
572f3b676f0SAlan Cox vm_page_undirty(vm_page_t m)
573f3b676f0SAlan Cox {
5743b1025d2SKonstantin Belousov 
5753b1025d2SKonstantin Belousov 	VM_PAGE_OBJECT_LOCK_ASSERT(m);
576f3b676f0SAlan Cox 	m->dirty = 0;
577f3b676f0SAlan Cox }
578f3b676f0SAlan Cox 
579c4473420SPeter Wemm #endif				/* _KERNEL */
580df8bae1dSRodney W. Grimes #endif				/* !_VM_PAGE_ */
581