xref: /freebsd/sys/vm/vm_page.h (revision 675aae732d3dd2ec0f12f39df3a1fbfe693a4ebd)
1 /*-
2  * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU)
3  *
4  * Copyright (c) 1991, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * This code is derived from software contributed to Berkeley by
8  * The Mach Operating System project at Carnegie-Mellon University.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *	from: @(#)vm_page.h	8.2 (Berkeley) 12/13/93
35  *
36  *
37  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
38  * All rights reserved.
39  *
40  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
41  *
42  * Permission to use, copy, modify and distribute this software and
43  * its documentation is hereby granted, provided that both the copyright
44  * notice and this permission notice appear in all copies of the
45  * software, derivative works or modified versions, and any portions
46  * thereof, and that both notices appear in supporting documentation.
47  *
48  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
49  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
50  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
51  *
52  * Carnegie Mellon requests users of this software to return to
53  *
54  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
55  *  School of Computer Science
56  *  Carnegie Mellon University
57  *  Pittsburgh PA 15213-3890
58  *
59  * any improvements or extensions that they make and grant Carnegie the
60  * rights to redistribute these changes.
61  *
62  * $FreeBSD$
63  */
64 
65 /*
66  *	Resident memory system definitions.
67  */
68 
69 #ifndef	_VM_PAGE_
70 #define	_VM_PAGE_
71 
72 #include <sys/_bitset.h>
73 #include <sys/bitset.h>
74 #include <vm/pmap.h>
75 
76 /*
77  *	Management of resident (logical) pages.
78  *
79  *	A small structure is kept for each resident
80  *	page, indexed by page number.  Each structure
81  *	is an element of several collections:
82  *
83  *		A radix tree used to quickly
84  *		perform object/offset lookups
85  *
86  *		A list of all pages for a given object,
87  *		so they can be quickly deactivated at
88  *		time of deallocation.
89  *
90  *		An ordered list of pages due for pageout.
91  *
92  *	In addition, the structure contains the object
93  *	and offset to which this page belongs (for pageout),
94  *	and sundry status bits.
95  *
96  *	In general, operations on this structure's mutable fields are
97  *	synchronized using either one of or a combination of locks.  If a
98  *	field is annotated with two of these locks then holding either is
99  *	sufficient for read access but both are required for write access.
100  *	The queue lock for a page depends on the value of its queue field and is
101  *	described in detail below.
102  *
103  *	The following annotations are possible:
104  *	(A) the field must be accessed using atomic(9) and may require
105  *	    additional synchronization.
106  *	(B) the page busy lock.
107  *	(C) the field is immutable.
108  *	(F) the per-domain lock for the free queues.
109  *	(M) Machine dependent, defined by pmap layer.
110  *	(O) the object that the page belongs to.
111  *	(Q) the page's queue lock.
112  *
113  *	The busy lock is an embedded reader-writer lock that protects the
114  *	page's contents and identity (i.e., its <object, pindex> tuple) as
115  *	well as certain valid/dirty modifications.  To avoid bloating the
116  *	the page structure, the busy lock lacks some of the features available
117  *	the kernel's general-purpose synchronization primitives.  As a result,
118  *	busy lock ordering rules are not verified, lock recursion is not
119  *	detected, and an attempt to xbusy a busy page or sbusy an xbusy page
120  *	results will trigger a panic rather than causing the thread to block.
121  *	vm_page_sleep_if_busy() can be used to sleep until the page's busy
122  *	state changes, after which the caller must re-lookup the page and
123  *	re-evaluate its state.  vm_page_busy_acquire() will block until
124  *	the lock is acquired.
125  *
126  *	The valid field is protected by the page busy lock (B) and object
127  *	lock (O).  Transitions from invalid to valid are generally done
128  *	via I/O or zero filling and do not require the object lock.
129  *	These must be protected with the busy lock to prevent page-in or
130  *	creation races.  Page invalidation generally happens as a result
131  *	of truncate or msync.  When invalidated, pages must not be present
132  *	in pmap and must hold the object lock to prevent concurrent
133  *	speculative read-only mappings that do not require busy.  I/O
134  *	routines may check for validity without a lock if they are prepared
135  *	to handle invalidation races with higher level locks (vnode) or are
136  *	unconcerned with races so long as they hold a reference to prevent
137  *	recycling.  When a valid bit is set while holding a shared busy
138  *	lock (A) atomic operations are used to protect against concurrent
139  *	modification.
140  *
141  *	In contrast, the synchronization of accesses to the page's
142  *	dirty field is a mix of machine dependent (M) and busy (B).  In
143  *	the machine-independent layer, the page busy must be held to
144  *	operate on the field.  However, the pmap layer is permitted to
145  *	set all bits within the field without holding that lock.  If the
146  *	underlying architecture does not support atomic read-modify-write
147  *	operations on the field's type, then the machine-independent
148  *	layer uses a 32-bit atomic on the aligned 32-bit word that
149  *	contains the dirty field.  In the machine-independent layer,
150  *	the implementation of read-modify-write operations on the
151  *	field is encapsulated in vm_page_clear_dirty_mask().  An
152  *	exclusive busy lock combined with pmap_remove_{write/all}() is the
153  *	only way to ensure a page can not become dirty.  I/O generally
154  *	removes the page from pmap to ensure exclusive access and atomic
155  *	writes.
156  *
157  *	The ref_count field tracks references to the page.  References that
158  *	prevent the page from being reclaimable are called wirings and are
159  *	counted in the low bits of ref_count.  The containing object's
160  *	reference, if one exists, is counted using the VPRC_OBJREF bit in the
161  *	ref_count field.  Additionally, the VPRC_BLOCKED bit is used to
162  *	atomically check for wirings and prevent new wirings via
163  *	pmap_extract_and_hold().  When a page belongs to an object, it may be
164  *	wired only when the object is locked, or the page is busy, or by
165  *	pmap_extract_and_hold().  As a result, if the object is locked and the
166  *	page is not busy (or is exclusively busied by the current thread), and
167  *	the page is unmapped, its wire count will not increase.  The ref_count
168  *	field is updated using atomic operations in most cases, except when it
169  *	is known that no other references to the page exist, such as in the page
170  *	allocator.  A page may be present in the page queues, or even actively
171  *	scanned by the page daemon, without an explicitly counted referenced.
172  *	The page daemon must therefore handle the possibility of a concurrent
173  *	free of the page.
174  *
175  *	The queue state of a page consists of the queue and act_count fields of
176  *	its atomically updated state, and the subset of atomic flags specified
177  *	by PGA_QUEUE_STATE_MASK.  The queue field contains the page's page queue
178  *	index, or PQ_NONE if it does not belong to a page queue.  To modify the
179  *	queue field, the page queue lock corresponding to the old value must be
180  *	held, unless that value is PQ_NONE, in which case the queue index must
181  *	be updated using an atomic RMW operation.  There is one exception to
182  *	this rule: the page daemon may transition the queue field from
183  *	PQ_INACTIVE to PQ_NONE immediately prior to freeing the page during an
184  *	inactive queue scan.  At that point the page is already dequeued and no
185  *	other references to that vm_page structure can exist.  The PGA_ENQUEUED
186  *	flag, when set, indicates that the page structure is physically inserted
187  *	into the queue corresponding to the page's queue index, and may only be
188  *	set or cleared with the corresponding page queue lock held.
189  *
190  *	To avoid contention on page queue locks, page queue operations (enqueue,
191  *	dequeue, requeue) are batched using fixed-size per-CPU queues.  A
192  *	deferred operation is requested by setting one of the flags in
193  *	PGA_QUEUE_OP_MASK and inserting an entry into a batch queue.  When a
194  *	queue is full, an attempt to insert a new entry will lock the page
195  *	queues and trigger processing of the pending entries.  The
196  *	type-stability of vm_page structures is crucial to this scheme since the
197  *	processing of entries in a given batch queue may be deferred
198  *	indefinitely.  In particular, a page may be freed with pending batch
199  *	queue entries.  The page queue operation flags must be set using atomic
200  *	RWM operations.
201  */
202 
203 #if PAGE_SIZE == 4096
204 #define VM_PAGE_BITS_ALL 0xffu
205 typedef uint8_t vm_page_bits_t;
206 #elif PAGE_SIZE == 8192
207 #define VM_PAGE_BITS_ALL 0xffffu
208 typedef uint16_t vm_page_bits_t;
209 #elif PAGE_SIZE == 16384
210 #define VM_PAGE_BITS_ALL 0xffffffffu
211 typedef uint32_t vm_page_bits_t;
212 #elif PAGE_SIZE == 32768
213 #define VM_PAGE_BITS_ALL 0xfffffffffffffffflu
214 typedef uint64_t vm_page_bits_t;
215 #endif
216 
217 typedef union vm_page_astate {
218 	struct {
219 		uint16_t flags;
220 		uint8_t	queue;
221 		uint8_t act_count;
222 	};
223 	uint32_t _bits;
224 } vm_page_astate_t;
225 
226 struct vm_page {
227 	union {
228 		TAILQ_ENTRY(vm_page) q; /* page queue or free list (Q) */
229 		struct {
230 			SLIST_ENTRY(vm_page) ss; /* private slists */
231 		} s;
232 		struct {
233 			u_long p;
234 			u_long v;
235 		} memguard;
236 		struct {
237 			void *slab;
238 			void *zone;
239 		} uma;
240 	} plinks;
241 	TAILQ_ENTRY(vm_page) listq;	/* pages in same object (O) */
242 	vm_object_t object;		/* which object am I in (O) */
243 	vm_pindex_t pindex;		/* offset into object (O,P) */
244 	vm_paddr_t phys_addr;		/* physical address of page (C) */
245 	struct md_page md;		/* machine dependent stuff */
246 	u_int ref_count;		/* page references (A) */
247 	u_int busy_lock;		/* busy owners lock (A) */
248 	union vm_page_astate a;		/* state accessed atomically (A) */
249 	uint8_t order;			/* index of the buddy queue (F) */
250 	uint8_t pool;			/* vm_phys freepool index (F) */
251 	uint8_t flags;			/* page PG_* flags (P) */
252 	uint8_t oflags;			/* page VPO_* flags (O) */
253 	int8_t psind;			/* pagesizes[] index (O) */
254 	int8_t segind;			/* vm_phys segment index (C) */
255 	/* NOTE that these must support one bit per DEV_BSIZE in a page */
256 	/* so, on normal X86 kernels, they must be at least 8 bits wide */
257 	vm_page_bits_t valid;		/* valid DEV_BSIZE chunk map (O,B) */
258 	vm_page_bits_t dirty;		/* dirty DEV_BSIZE chunk map (M,B) */
259 };
260 
261 /*
262  * Special bits used in the ref_count field.
263  *
264  * ref_count is normally used to count wirings that prevent the page from being
265  * reclaimed, but also supports several special types of references that do not
266  * prevent reclamation.  Accesses to the ref_count field must be atomic unless
267  * the page is unallocated.
268  *
269  * VPRC_OBJREF is the reference held by the containing object.  It can set or
270  * cleared only when the corresponding object's write lock is held.
271  *
272  * VPRC_BLOCKED is used to atomically block wirings via pmap lookups while
273  * attempting to tear down all mappings of a given page.  The page busy lock and
274  * object write lock must both be held in order to set or clear this bit.
275  */
276 #define	VPRC_BLOCKED	0x40000000u	/* mappings are being removed */
277 #define	VPRC_OBJREF	0x80000000u	/* object reference, cleared with (O) */
278 #define	VPRC_WIRE_COUNT(c)	((c) & ~(VPRC_BLOCKED | VPRC_OBJREF))
279 #define	VPRC_WIRE_COUNT_MAX	(~(VPRC_BLOCKED | VPRC_OBJREF))
280 
281 /*
282  * Page flags stored in oflags:
283  *
284  * Access to these page flags is synchronized by the lock on the object
285  * containing the page (O).
286  *
287  * Note: VPO_UNMANAGED (used by OBJT_DEVICE, OBJT_PHYS and OBJT_SG)
288  * 	 indicates that the page is not under PV management but
289  * 	 otherwise should be treated as a normal page.  Pages not
290  * 	 under PV management cannot be paged out via the
291  * 	 object/vm_page_t because there is no knowledge of their pte
292  * 	 mappings, and such pages are also not on any PQ queue.
293  *
294  */
295 #define	VPO_KMEM_EXEC	0x01		/* kmem mapping allows execution */
296 #define	VPO_SWAPSLEEP	0x02		/* waiting for swap to finish */
297 #define	VPO_UNMANAGED	0x04		/* no PV management for page */
298 #define	VPO_SWAPINPROG	0x08		/* swap I/O in progress on page */
299 
300 /*
301  * Busy page implementation details.
302  * The algorithm is taken mostly by rwlock(9) and sx(9) locks implementation,
303  * even if the support for owner identity is removed because of size
304  * constraints.  Checks on lock recursion are then not possible, while the
305  * lock assertions effectiveness is someway reduced.
306  */
307 #define	VPB_BIT_SHARED		0x01
308 #define	VPB_BIT_EXCLUSIVE	0x02
309 #define	VPB_BIT_WAITERS		0x04
310 #define	VPB_BIT_FLAGMASK						\
311 	(VPB_BIT_SHARED | VPB_BIT_EXCLUSIVE | VPB_BIT_WAITERS)
312 
313 #define	VPB_SHARERS_SHIFT	3
314 #define	VPB_SHARERS(x)							\
315 	(((x) & ~VPB_BIT_FLAGMASK) >> VPB_SHARERS_SHIFT)
316 #define	VPB_SHARERS_WORD(x)	((x) << VPB_SHARERS_SHIFT | VPB_BIT_SHARED)
317 #define	VPB_ONE_SHARER		(1 << VPB_SHARERS_SHIFT)
318 
319 #define	VPB_SINGLE_EXCLUSIVE	VPB_BIT_EXCLUSIVE
320 #ifdef INVARIANTS
321 #define	VPB_CURTHREAD_EXCLUSIVE						\
322 	(VPB_BIT_EXCLUSIVE | ((u_int)(uintptr_t)curthread & ~VPB_BIT_FLAGMASK))
323 #else
324 #define	VPB_CURTHREAD_EXCLUSIVE	VPB_SINGLE_EXCLUSIVE
325 #endif
326 
327 #define	VPB_UNBUSIED		VPB_SHARERS_WORD(0)
328 
329 /* Freed lock blocks both shared and exclusive. */
330 #define	VPB_FREED		(0xffffffff - VPB_BIT_SHARED)
331 
332 #define	PQ_NONE		255
333 #define	PQ_INACTIVE	0
334 #define	PQ_ACTIVE	1
335 #define	PQ_LAUNDRY	2
336 #define	PQ_UNSWAPPABLE	3
337 #define	PQ_COUNT	4
338 
339 #ifndef VM_PAGE_HAVE_PGLIST
340 TAILQ_HEAD(pglist, vm_page);
341 #define VM_PAGE_HAVE_PGLIST
342 #endif
343 SLIST_HEAD(spglist, vm_page);
344 
345 #ifdef _KERNEL
346 extern vm_page_t bogus_page;
347 #endif	/* _KERNEL */
348 
349 extern struct mtx_padalign pa_lock[];
350 
351 #if defined(__arm__)
352 #define	PDRSHIFT	PDR_SHIFT
353 #elif !defined(PDRSHIFT)
354 #define PDRSHIFT	21
355 #endif
356 
357 #define	pa_index(pa)	((pa) >> PDRSHIFT)
358 #define	PA_LOCKPTR(pa)	((struct mtx *)(&pa_lock[pa_index(pa) % PA_LOCK_COUNT]))
359 #define	PA_LOCKOBJPTR(pa)	((struct lock_object *)PA_LOCKPTR((pa)))
360 #define	PA_LOCK(pa)	mtx_lock(PA_LOCKPTR(pa))
361 #define	PA_TRYLOCK(pa)	mtx_trylock(PA_LOCKPTR(pa))
362 #define	PA_UNLOCK(pa)	mtx_unlock(PA_LOCKPTR(pa))
363 #define	PA_UNLOCK_COND(pa) 			\
364 	do {		   			\
365 		if ((pa) != 0) {		\
366 			PA_UNLOCK((pa));	\
367 			(pa) = 0;		\
368 		}				\
369 	} while (0)
370 
371 #define	PA_LOCK_ASSERT(pa, a)	mtx_assert(PA_LOCKPTR(pa), (a))
372 
373 #if defined(KLD_MODULE) && !defined(KLD_TIED)
374 #define	vm_page_lock(m)		vm_page_lock_KBI((m), LOCK_FILE, LOCK_LINE)
375 #define	vm_page_unlock(m)	vm_page_unlock_KBI((m), LOCK_FILE, LOCK_LINE)
376 #define	vm_page_trylock(m)	vm_page_trylock_KBI((m), LOCK_FILE, LOCK_LINE)
377 #else	/* !KLD_MODULE */
378 #define	vm_page_lockptr(m)	(PA_LOCKPTR(VM_PAGE_TO_PHYS((m))))
379 #define	vm_page_lock(m)		mtx_lock(vm_page_lockptr((m)))
380 #define	vm_page_unlock(m)	mtx_unlock(vm_page_lockptr((m)))
381 #define	vm_page_trylock(m)	mtx_trylock(vm_page_lockptr((m)))
382 #endif
383 #if defined(INVARIANTS)
384 #define	vm_page_assert_locked(m)		\
385     vm_page_assert_locked_KBI((m), __FILE__, __LINE__)
386 #define	vm_page_lock_assert(m, a)		\
387     vm_page_lock_assert_KBI((m), (a), __FILE__, __LINE__)
388 #else
389 #define	vm_page_assert_locked(m)
390 #define	vm_page_lock_assert(m, a)
391 #endif
392 
393 /*
394  * The vm_page's aflags are updated using atomic operations.  To set or clear
395  * these flags, the functions vm_page_aflag_set() and vm_page_aflag_clear()
396  * must be used.  Neither these flags nor these functions are part of the KBI.
397  *
398  * PGA_REFERENCED may be cleared only if the page is locked.  It is set by
399  * both the MI and MD VM layers.  However, kernel loadable modules should not
400  * directly set this flag.  They should call vm_page_reference() instead.
401  *
402  * PGA_WRITEABLE is set exclusively on managed pages by pmap_enter().
403  * When it does so, the object must be locked, or the page must be
404  * exclusive busied.  The MI VM layer must never access this flag
405  * directly.  Instead, it should call pmap_page_is_write_mapped().
406  *
407  * PGA_EXECUTABLE may be set by pmap routines, and indicates that a page has
408  * at least one executable mapping.  It is not consumed by the MI VM layer.
409  *
410  * PGA_NOSYNC must be set and cleared with the page busy lock held.
411  *
412  * PGA_ENQUEUED is set and cleared when a page is inserted into or removed
413  * from a page queue, respectively.  It determines whether the plinks.q field
414  * of the page is valid.  To set or clear this flag, page's "queue" field must
415  * be a valid queue index, and the corresponding page queue lock must be held.
416  *
417  * PGA_DEQUEUE is set when the page is scheduled to be dequeued from a page
418  * queue, and cleared when the dequeue request is processed.  A page may
419  * have PGA_DEQUEUE set and PGA_ENQUEUED cleared, for instance if a dequeue
420  * is requested after the page is scheduled to be enqueued but before it is
421  * actually inserted into the page queue.
422  *
423  * PGA_REQUEUE is set when the page is scheduled to be enqueued or requeued
424  * in its page queue.
425  *
426  * PGA_REQUEUE_HEAD is a special flag for enqueuing pages near the head of
427  * the inactive queue, thus bypassing LRU.
428  *
429  * The PGA_DEQUEUE, PGA_REQUEUE and PGA_REQUEUE_HEAD flags must be set using an
430  * atomic RMW operation to ensure that the "queue" field is a valid queue index,
431  * and the corresponding page queue lock must be held when clearing any of the
432  * flags.
433  *
434  * PGA_SWAP_FREE is used to defer freeing swap space to the pageout daemon
435  * when the context that dirties the page does not have the object write lock
436  * held.
437  */
438 #define	PGA_WRITEABLE	0x0001		/* page may be mapped writeable */
439 #define	PGA_REFERENCED	0x0002		/* page has been referenced */
440 #define	PGA_EXECUTABLE	0x0004		/* page may be mapped executable */
441 #define	PGA_ENQUEUED	0x0008		/* page is enqueued in a page queue */
442 #define	PGA_DEQUEUE	0x0010		/* page is due to be dequeued */
443 #define	PGA_REQUEUE	0x0020		/* page is due to be requeued */
444 #define	PGA_REQUEUE_HEAD 0x0040		/* page requeue should bypass LRU */
445 #define	PGA_NOSYNC	0x0080		/* do not collect for syncer */
446 #define	PGA_SWAP_FREE	0x0100		/* page with swap space was dirtied */
447 #define	PGA_SWAP_SPACE	0x0200		/* page has allocated swap space */
448 
449 #define	PGA_QUEUE_OP_MASK	(PGA_DEQUEUE | PGA_REQUEUE | PGA_REQUEUE_HEAD)
450 #define	PGA_QUEUE_STATE_MASK	(PGA_ENQUEUED | PGA_QUEUE_OP_MASK)
451 
452 /*
453  * Page flags.  Updates to these flags are not synchronized, and thus they must
454  * be set during page allocation or free to avoid races.
455  *
456  * The PG_PCPU_CACHE flag is set at allocation time if the page was
457  * allocated from a per-CPU cache.  It is cleared the next time that the
458  * page is allocated from the physical memory allocator.
459  */
460 #define	PG_PCPU_CACHE	0x01		/* was allocated from per-CPU caches */
461 #define	PG_FICTITIOUS	0x02		/* physical page doesn't exist */
462 #define	PG_ZERO		0x04		/* page is zeroed */
463 #define	PG_MARKER	0x08		/* special queue marker page */
464 #define	PG_NODUMP	0x10		/* don't include this page in a dump */
465 
466 /*
467  * Misc constants.
468  */
469 #define ACT_DECLINE		1
470 #define ACT_ADVANCE		3
471 #define ACT_INIT		5
472 #define ACT_MAX			64
473 
474 #ifdef _KERNEL
475 
476 #include <sys/systm.h>
477 
478 #include <machine/atomic.h>
479 
480 /*
481  * Each pageable resident page falls into one of five lists:
482  *
483  *	free
484  *		Available for allocation now.
485  *
486  *	inactive
487  *		Low activity, candidates for reclamation.
488  *		This list is approximately LRU ordered.
489  *
490  *	laundry
491  *		This is the list of pages that should be
492  *		paged out next.
493  *
494  *	unswappable
495  *		Dirty anonymous pages that cannot be paged
496  *		out because no swap device is configured.
497  *
498  *	active
499  *		Pages that are "active", i.e., they have been
500  *		recently referenced.
501  *
502  */
503 
504 extern vm_page_t vm_page_array;		/* First resident page in table */
505 extern long vm_page_array_size;		/* number of vm_page_t's */
506 extern long first_page;			/* first physical page number */
507 
508 #define VM_PAGE_TO_PHYS(entry)	((entry)->phys_addr)
509 
510 /*
511  * PHYS_TO_VM_PAGE() returns the vm_page_t object that represents a memory
512  * page to which the given physical address belongs. The correct vm_page_t
513  * object is returned for addresses that are not page-aligned.
514  */
515 vm_page_t PHYS_TO_VM_PAGE(vm_paddr_t pa);
516 
517 /*
518  * Page allocation parameters for vm_page for the functions
519  * vm_page_alloc(), vm_page_grab(), vm_page_alloc_contig() and
520  * vm_page_alloc_freelist().  Some functions support only a subset
521  * of the flags, and ignore others, see the flags legend.
522  *
523  * The meaning of VM_ALLOC_ZERO differs slightly between the vm_page_alloc*()
524  * and the vm_page_grab*() functions.  See these functions for details.
525  *
526  * Bits 0 - 1 define class.
527  * Bits 2 - 15 dedicated for flags.
528  * Legend:
529  * (a) - vm_page_alloc() supports the flag.
530  * (c) - vm_page_alloc_contig() supports the flag.
531  * (f) - vm_page_alloc_freelist() supports the flag.
532  * (g) - vm_page_grab() supports the flag.
533  * (p) - vm_page_grab_pages() supports the flag.
534  * Bits above 15 define the count of additional pages that the caller
535  * intends to allocate.
536  */
537 #define VM_ALLOC_NORMAL		0
538 #define VM_ALLOC_INTERRUPT	1
539 #define VM_ALLOC_SYSTEM		2
540 #define	VM_ALLOC_CLASS_MASK	3
541 #define	VM_ALLOC_WAITOK		0x0008	/* (acf) Sleep and retry */
542 #define	VM_ALLOC_WAITFAIL	0x0010	/* (acf) Sleep and return error */
543 #define	VM_ALLOC_WIRED		0x0020	/* (acfgp) Allocate a wired page */
544 #define	VM_ALLOC_ZERO		0x0040	/* (acfgp) Allocate a prezeroed page */
545 #define	VM_ALLOC_NOOBJ		0x0100	/* (acg) No associated object */
546 #define	VM_ALLOC_NOBUSY		0x0200	/* (acgp) Do not excl busy the page */
547 #define	VM_ALLOC_NOCREAT	0x0400	/* (gp) Don't create a page */
548 #define	VM_ALLOC_IGN_SBUSY	0x1000	/* (gp) Ignore shared busy flag */
549 #define	VM_ALLOC_NODUMP		0x2000	/* (ag) don't include in dump */
550 #define	VM_ALLOC_SBUSY		0x4000	/* (acgp) Shared busy the page */
551 #define	VM_ALLOC_NOWAIT		0x8000	/* (acfgp) Do not sleep */
552 #define	VM_ALLOC_COUNT_SHIFT	16
553 #define	VM_ALLOC_COUNT(count)	((count) << VM_ALLOC_COUNT_SHIFT)
554 
555 #ifdef M_NOWAIT
556 static inline int
557 malloc2vm_flags(int malloc_flags)
558 {
559 	int pflags;
560 
561 	KASSERT((malloc_flags & M_USE_RESERVE) == 0 ||
562 	    (malloc_flags & M_NOWAIT) != 0,
563 	    ("M_USE_RESERVE requires M_NOWAIT"));
564 	pflags = (malloc_flags & M_USE_RESERVE) != 0 ? VM_ALLOC_INTERRUPT :
565 	    VM_ALLOC_SYSTEM;
566 	if ((malloc_flags & M_ZERO) != 0)
567 		pflags |= VM_ALLOC_ZERO;
568 	if ((malloc_flags & M_NODUMP) != 0)
569 		pflags |= VM_ALLOC_NODUMP;
570 	if ((malloc_flags & M_NOWAIT))
571 		pflags |= VM_ALLOC_NOWAIT;
572 	if ((malloc_flags & M_WAITOK))
573 		pflags |= VM_ALLOC_WAITOK;
574 	return (pflags);
575 }
576 #endif
577 
578 /*
579  * Predicates supported by vm_page_ps_test():
580  *
581  *	PS_ALL_DIRTY is true only if the entire (super)page is dirty.
582  *	However, it can be spuriously false when the (super)page has become
583  *	dirty in the pmap but that information has not been propagated to the
584  *	machine-independent layer.
585  */
586 #define	PS_ALL_DIRTY	0x1
587 #define	PS_ALL_VALID	0x2
588 #define	PS_NONE_BUSY	0x4
589 
590 extern struct bitset *vm_page_dump;
591 extern long vm_page_dump_pages;
592 extern vm_paddr_t dump_avail[];
593 
594 static inline void
595 dump_add_page(vm_paddr_t pa)
596 {
597 	vm_pindex_t adj;
598 	int i;
599 
600 	adj = 0;
601 	for (i = 0; dump_avail[i + 1] != 0; i += 2) {
602 		if (pa >= dump_avail[i] && pa < dump_avail[i + 1]) {
603 			BIT_SET_ATOMIC(vm_page_dump_pages,
604 			    (pa >> PAGE_SHIFT) - (dump_avail[i] >> PAGE_SHIFT) +
605 			    adj, vm_page_dump);
606 			return;
607 		}
608 		adj += howmany(dump_avail[i + 1], PAGE_SIZE) -
609 		    dump_avail[i] / PAGE_SIZE;
610 	}
611 }
612 
613 static inline void
614 dump_drop_page(vm_paddr_t pa)
615 {
616 	vm_pindex_t adj;
617 	int i;
618 
619 	adj = 0;
620 	for (i = 0; dump_avail[i + 1] != 0; i += 2) {
621 		if (pa >= dump_avail[i] && pa < dump_avail[i + 1]) {
622 			BIT_CLR_ATOMIC(vm_page_dump_pages,
623 			    (pa >> PAGE_SHIFT) - (dump_avail[i] >> PAGE_SHIFT) +
624 			    adj, vm_page_dump);
625 			return;
626 		}
627 		adj += howmany(dump_avail[i + 1], PAGE_SIZE) -
628 		    dump_avail[i] / PAGE_SIZE;
629 	}
630 }
631 
632 static inline vm_paddr_t
633 vm_page_dump_index_to_pa(int bit)
634 {
635 	int i, tot;
636 
637 	for (i = 0; dump_avail[i + 1] != 0; i += 2) {
638 		tot = howmany(dump_avail[i + 1], PAGE_SIZE) -
639 		    dump_avail[i] / PAGE_SIZE;
640 		if (bit < tot)
641 			return ((vm_paddr_t)bit * PAGE_SIZE +
642 			    dump_avail[i] & ~PAGE_MASK);
643 		bit -= tot;
644 	}
645 	return ((vm_paddr_t)NULL);
646 }
647 
648 #define VM_PAGE_DUMP_FOREACH(pa)						\
649 	for (vm_pindex_t __b = BIT_FFS(vm_page_dump_pages, vm_page_dump);	\
650 	    (pa) = vm_page_dump_index_to_pa(__b - 1), __b != 0;			\
651 	    __b = BIT_FFS_AT(vm_page_dump_pages, vm_page_dump, __b))
652 
653 bool vm_page_busy_acquire(vm_page_t m, int allocflags);
654 void vm_page_busy_downgrade(vm_page_t m);
655 int vm_page_busy_tryupgrade(vm_page_t m);
656 void vm_page_busy_sleep(vm_page_t m, const char *msg, bool nonshared);
657 void vm_page_busy_sleep_unlocked(vm_object_t obj, vm_page_t m,
658     vm_pindex_t pindex, const char *wmesg, bool nonshared);
659 void vm_page_free(vm_page_t m);
660 void vm_page_free_zero(vm_page_t m);
661 
662 void vm_page_activate (vm_page_t);
663 void vm_page_advise(vm_page_t m, int advice);
664 vm_page_t vm_page_alloc(vm_object_t, vm_pindex_t, int);
665 vm_page_t vm_page_alloc_domain(vm_object_t, vm_pindex_t, int, int);
666 vm_page_t vm_page_alloc_after(vm_object_t, vm_pindex_t, int, vm_page_t);
667 vm_page_t vm_page_alloc_domain_after(vm_object_t, vm_pindex_t, int, int,
668     vm_page_t);
669 vm_page_t vm_page_alloc_contig(vm_object_t object, vm_pindex_t pindex, int req,
670     u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment,
671     vm_paddr_t boundary, vm_memattr_t memattr);
672 vm_page_t vm_page_alloc_contig_domain(vm_object_t object,
673     vm_pindex_t pindex, int domain, int req, u_long npages, vm_paddr_t low,
674     vm_paddr_t high, u_long alignment, vm_paddr_t boundary,
675     vm_memattr_t memattr);
676 vm_page_t vm_page_alloc_freelist(int, int);
677 vm_page_t vm_page_alloc_freelist_domain(int, int, int);
678 void vm_page_bits_set(vm_page_t m, vm_page_bits_t *bits, vm_page_bits_t set);
679 bool vm_page_blacklist_add(vm_paddr_t pa, bool verbose);
680 vm_page_t vm_page_grab(vm_object_t, vm_pindex_t, int);
681 vm_page_t vm_page_grab_unlocked(vm_object_t, vm_pindex_t, int);
682 int vm_page_grab_pages(vm_object_t object, vm_pindex_t pindex, int allocflags,
683     vm_page_t *ma, int count);
684 int vm_page_grab_pages_unlocked(vm_object_t object, vm_pindex_t pindex,
685     int allocflags, vm_page_t *ma, int count);
686 int vm_page_grab_valid(vm_page_t *mp, vm_object_t object, vm_pindex_t pindex,
687     int allocflags);
688 int vm_page_grab_valid_unlocked(vm_page_t *mp, vm_object_t object,
689     vm_pindex_t pindex, int allocflags);
690 void vm_page_deactivate(vm_page_t);
691 void vm_page_deactivate_noreuse(vm_page_t);
692 void vm_page_dequeue(vm_page_t m);
693 void vm_page_dequeue_deferred(vm_page_t m);
694 vm_page_t vm_page_find_least(vm_object_t, vm_pindex_t);
695 void vm_page_free_invalid(vm_page_t);
696 vm_page_t vm_page_getfake(vm_paddr_t paddr, vm_memattr_t memattr);
697 void vm_page_initfake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr);
698 void vm_page_init_marker(vm_page_t marker, int queue, uint16_t aflags);
699 int vm_page_insert (vm_page_t, vm_object_t, vm_pindex_t);
700 void vm_page_invalid(vm_page_t m);
701 void vm_page_launder(vm_page_t m);
702 vm_page_t vm_page_lookup(vm_object_t, vm_pindex_t);
703 vm_page_t vm_page_next(vm_page_t m);
704 void vm_page_pqbatch_drain(void);
705 void vm_page_pqbatch_submit(vm_page_t m, uint8_t queue);
706 bool vm_page_pqstate_commit(vm_page_t m, vm_page_astate_t *old,
707     vm_page_astate_t new);
708 vm_page_t vm_page_prev(vm_page_t m);
709 bool vm_page_ps_test(vm_page_t m, int flags, vm_page_t skip_m);
710 void vm_page_putfake(vm_page_t m);
711 void vm_page_readahead_finish(vm_page_t m);
712 bool vm_page_reclaim_contig(int req, u_long npages, vm_paddr_t low,
713     vm_paddr_t high, u_long alignment, vm_paddr_t boundary);
714 bool vm_page_reclaim_contig_domain(int domain, int req, u_long npages,
715     vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary);
716 void vm_page_reference(vm_page_t m);
717 #define	VPR_TRYFREE	0x01
718 #define	VPR_NOREUSE	0x02
719 void vm_page_release(vm_page_t m, int flags);
720 void vm_page_release_locked(vm_page_t m, int flags);
721 vm_page_t vm_page_relookup(vm_object_t, vm_pindex_t);
722 bool vm_page_remove(vm_page_t);
723 bool vm_page_remove_xbusy(vm_page_t);
724 int vm_page_rename(vm_page_t, vm_object_t, vm_pindex_t);
725 void vm_page_replace(vm_page_t mnew, vm_object_t object,
726     vm_pindex_t pindex, vm_page_t mold);
727 int vm_page_sbusied(vm_page_t m);
728 vm_page_t vm_page_scan_contig(u_long npages, vm_page_t m_start,
729     vm_page_t m_end, u_long alignment, vm_paddr_t boundary, int options);
730 vm_page_bits_t vm_page_set_dirty(vm_page_t m);
731 void vm_page_set_valid_range(vm_page_t m, int base, int size);
732 int vm_page_sleep_if_busy(vm_page_t m, const char *msg);
733 int vm_page_sleep_if_xbusy(vm_page_t m, const char *msg);
734 vm_offset_t vm_page_startup(vm_offset_t vaddr);
735 void vm_page_sunbusy(vm_page_t m);
736 bool vm_page_try_remove_all(vm_page_t m);
737 bool vm_page_try_remove_write(vm_page_t m);
738 int vm_page_trysbusy(vm_page_t m);
739 int vm_page_tryxbusy(vm_page_t m);
740 void vm_page_unhold_pages(vm_page_t *ma, int count);
741 void vm_page_unswappable(vm_page_t m);
742 void vm_page_unwire(vm_page_t m, uint8_t queue);
743 bool vm_page_unwire_noq(vm_page_t m);
744 void vm_page_updatefake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr);
745 void vm_page_wire(vm_page_t);
746 bool vm_page_wire_mapped(vm_page_t m);
747 void vm_page_xunbusy_hard(vm_page_t m);
748 void vm_page_xunbusy_hard_unchecked(vm_page_t m);
749 void vm_page_set_validclean (vm_page_t, int, int);
750 void vm_page_clear_dirty(vm_page_t, int, int);
751 void vm_page_set_invalid(vm_page_t, int, int);
752 void vm_page_valid(vm_page_t m);
753 int vm_page_is_valid(vm_page_t, int, int);
754 void vm_page_test_dirty(vm_page_t);
755 vm_page_bits_t vm_page_bits(int base, int size);
756 void vm_page_zero_invalid(vm_page_t m, boolean_t setvalid);
757 void vm_page_free_pages_toq(struct spglist *free, bool update_wire_count);
758 
759 void vm_page_dirty_KBI(vm_page_t m);
760 void vm_page_lock_KBI(vm_page_t m, const char *file, int line);
761 void vm_page_unlock_KBI(vm_page_t m, const char *file, int line);
762 int vm_page_trylock_KBI(vm_page_t m, const char *file, int line);
763 #if defined(INVARIANTS) || defined(INVARIANT_SUPPORT)
764 void vm_page_assert_locked_KBI(vm_page_t m, const char *file, int line);
765 void vm_page_lock_assert_KBI(vm_page_t m, int a, const char *file, int line);
766 #endif
767 
768 #define	vm_page_busy_fetch(m)	atomic_load_int(&(m)->busy_lock)
769 
770 #define	vm_page_assert_busied(m)					\
771 	KASSERT(vm_page_busied(m),					\
772 	    ("vm_page_assert_busied: page %p not busy @ %s:%d", \
773 	    (m), __FILE__, __LINE__))
774 
775 #define	vm_page_assert_sbusied(m)					\
776 	KASSERT(vm_page_sbusied(m),					\
777 	    ("vm_page_assert_sbusied: page %p not shared busy @ %s:%d", \
778 	    (m), __FILE__, __LINE__))
779 
780 #define	vm_page_assert_unbusied(m)					\
781 	KASSERT((vm_page_busy_fetch(m) & ~VPB_BIT_WAITERS) !=		\
782 	    VPB_CURTHREAD_EXCLUSIVE,					\
783 	    ("vm_page_assert_xbusied: page %p busy_lock %#x owned"	\
784             " by me @ %s:%d",						\
785 	    (m), (m)->busy_lock, __FILE__, __LINE__));			\
786 
787 #define	vm_page_assert_xbusied_unchecked(m) do {			\
788 	KASSERT(vm_page_xbusied(m),					\
789 	    ("vm_page_assert_xbusied: page %p not exclusive busy @ %s:%d", \
790 	    (m), __FILE__, __LINE__));					\
791 } while (0)
792 #define	vm_page_assert_xbusied(m) do {					\
793 	vm_page_assert_xbusied_unchecked(m);				\
794 	KASSERT((vm_page_busy_fetch(m) & ~VPB_BIT_WAITERS) ==		\
795 	    VPB_CURTHREAD_EXCLUSIVE,					\
796 	    ("vm_page_assert_xbusied: page %p busy_lock %#x not owned"	\
797             " by me @ %s:%d",						\
798 	    (m), (m)->busy_lock, __FILE__, __LINE__));			\
799 } while (0)
800 
801 #define	vm_page_busied(m)						\
802 	(vm_page_busy_fetch(m) != VPB_UNBUSIED)
803 
804 #define	vm_page_sbusy(m) do {						\
805 	if (!vm_page_trysbusy(m))					\
806 		panic("%s: page %p failed shared busying", __func__,	\
807 		    (m));						\
808 } while (0)
809 
810 #define	vm_page_xbusied(m)						\
811 	((vm_page_busy_fetch(m) & VPB_SINGLE_EXCLUSIVE) != 0)
812 
813 #define	vm_page_busy_freed(m)						\
814 	(vm_page_busy_fetch(m) == VPB_FREED)
815 
816 #define	vm_page_xbusy(m) do {						\
817 	if (!vm_page_tryxbusy(m))					\
818 		panic("%s: page %p failed exclusive busying", __func__,	\
819 		    (m));						\
820 } while (0)
821 
822 /* Note: page m's lock must not be owned by the caller. */
823 #define	vm_page_xunbusy(m) do {						\
824 	if (!atomic_cmpset_rel_int(&(m)->busy_lock,			\
825 	    VPB_CURTHREAD_EXCLUSIVE, VPB_UNBUSIED))			\
826 		vm_page_xunbusy_hard(m);				\
827 } while (0)
828 #define	vm_page_xunbusy_unchecked(m) do {				\
829 	if (!atomic_cmpset_rel_int(&(m)->busy_lock,			\
830 	    VPB_CURTHREAD_EXCLUSIVE, VPB_UNBUSIED))			\
831 		vm_page_xunbusy_hard_unchecked(m);			\
832 } while (0)
833 
834 #ifdef INVARIANTS
835 void vm_page_object_busy_assert(vm_page_t m);
836 #define	VM_PAGE_OBJECT_BUSY_ASSERT(m)	vm_page_object_busy_assert(m)
837 void vm_page_assert_pga_writeable(vm_page_t m, uint16_t bits);
838 #define	VM_PAGE_ASSERT_PGA_WRITEABLE(m, bits)				\
839 	vm_page_assert_pga_writeable(m, bits)
840 /*
841  * Claim ownership of a page's xbusy state.  In non-INVARIANTS kernels this
842  * operation is a no-op since ownership is not tracked.  In particular
843  * this macro does not provide any synchronization with the previous owner.
844  */
845 #define	vm_page_xbusy_claim(m) do {					\
846 	u_int _busy_lock;						\
847 									\
848 	vm_page_assert_xbusied_unchecked((m));				\
849 	do {								\
850 		_busy_lock = vm_page_busy_fetch(m);			\
851 	} while (!atomic_cmpset_int(&(m)->busy_lock, _busy_lock,	\
852 	    (_busy_lock & VPB_BIT_FLAGMASK) | VPB_CURTHREAD_EXCLUSIVE)); \
853 } while (0)
854 #else
855 #define	VM_PAGE_OBJECT_BUSY_ASSERT(m)	(void)0
856 #define	VM_PAGE_ASSERT_PGA_WRITEABLE(m, bits)	(void)0
857 #define	vm_page_xbusy_claim(m)
858 #endif
859 
860 #if BYTE_ORDER == BIG_ENDIAN
861 #define	VM_PAGE_AFLAG_SHIFT	16
862 #else
863 #define	VM_PAGE_AFLAG_SHIFT	0
864 #endif
865 
866 /*
867  *	Load a snapshot of a page's 32-bit atomic state.
868  */
869 static inline vm_page_astate_t
870 vm_page_astate_load(vm_page_t m)
871 {
872 	vm_page_astate_t a;
873 
874 	a._bits = atomic_load_32(&m->a._bits);
875 	return (a);
876 }
877 
878 /*
879  *	Atomically compare and set a page's atomic state.
880  */
881 static inline bool
882 vm_page_astate_fcmpset(vm_page_t m, vm_page_astate_t *old, vm_page_astate_t new)
883 {
884 
885 	KASSERT(new.queue == PQ_INACTIVE || (new.flags & PGA_REQUEUE_HEAD) == 0,
886 	    ("%s: invalid head requeue request for page %p", __func__, m));
887 	KASSERT((new.flags & PGA_ENQUEUED) == 0 || new.queue != PQ_NONE,
888 	    ("%s: setting PGA_ENQUEUED with PQ_NONE in page %p", __func__, m));
889 	KASSERT(new._bits != old->_bits,
890 	    ("%s: bits are unchanged", __func__));
891 
892 	return (atomic_fcmpset_32(&m->a._bits, &old->_bits, new._bits) != 0);
893 }
894 
895 /*
896  *	Clear the given bits in the specified page.
897  */
898 static inline void
899 vm_page_aflag_clear(vm_page_t m, uint16_t bits)
900 {
901 	uint32_t *addr, val;
902 
903 	/*
904 	 * Access the whole 32-bit word containing the aflags field with an
905 	 * atomic update.  Parallel non-atomic updates to the other fields
906 	 * within this word are handled properly by the atomic update.
907 	 */
908 	addr = (void *)&m->a;
909 	val = bits << VM_PAGE_AFLAG_SHIFT;
910 	atomic_clear_32(addr, val);
911 }
912 
913 /*
914  *	Set the given bits in the specified page.
915  */
916 static inline void
917 vm_page_aflag_set(vm_page_t m, uint16_t bits)
918 {
919 	uint32_t *addr, val;
920 
921 	VM_PAGE_ASSERT_PGA_WRITEABLE(m, bits);
922 
923 	/*
924 	 * Access the whole 32-bit word containing the aflags field with an
925 	 * atomic update.  Parallel non-atomic updates to the other fields
926 	 * within this word are handled properly by the atomic update.
927 	 */
928 	addr = (void *)&m->a;
929 	val = bits << VM_PAGE_AFLAG_SHIFT;
930 	atomic_set_32(addr, val);
931 }
932 
933 /*
934  *	vm_page_dirty:
935  *
936  *	Set all bits in the page's dirty field.
937  *
938  *	The object containing the specified page must be locked if the
939  *	call is made from the machine-independent layer.
940  *
941  *	See vm_page_clear_dirty_mask().
942  */
943 static __inline void
944 vm_page_dirty(vm_page_t m)
945 {
946 
947 	/* Use vm_page_dirty_KBI() under INVARIANTS to save memory. */
948 #if (defined(KLD_MODULE) && !defined(KLD_TIED)) || defined(INVARIANTS)
949 	vm_page_dirty_KBI(m);
950 #else
951 	m->dirty = VM_PAGE_BITS_ALL;
952 #endif
953 }
954 
955 /*
956  *	vm_page_undirty:
957  *
958  *	Set page to not be dirty.  Note: does not clear pmap modify bits
959  */
960 static __inline void
961 vm_page_undirty(vm_page_t m)
962 {
963 
964 	VM_PAGE_OBJECT_BUSY_ASSERT(m);
965 	m->dirty = 0;
966 }
967 
968 static inline uint8_t
969 _vm_page_queue(vm_page_astate_t as)
970 {
971 
972 	if ((as.flags & PGA_DEQUEUE) != 0)
973 		return (PQ_NONE);
974 	return (as.queue);
975 }
976 
977 /*
978  *	vm_page_queue:
979  *
980  *	Return the index of the queue containing m.
981  */
982 static inline uint8_t
983 vm_page_queue(vm_page_t m)
984 {
985 
986 	return (_vm_page_queue(vm_page_astate_load(m)));
987 }
988 
989 static inline bool
990 vm_page_active(vm_page_t m)
991 {
992 
993 	return (vm_page_queue(m) == PQ_ACTIVE);
994 }
995 
996 static inline bool
997 vm_page_inactive(vm_page_t m)
998 {
999 
1000 	return (vm_page_queue(m) == PQ_INACTIVE);
1001 }
1002 
1003 static inline bool
1004 vm_page_in_laundry(vm_page_t m)
1005 {
1006 	uint8_t queue;
1007 
1008 	queue = vm_page_queue(m);
1009 	return (queue == PQ_LAUNDRY || queue == PQ_UNSWAPPABLE);
1010 }
1011 
1012 /*
1013  *	vm_page_drop:
1014  *
1015  *	Release a reference to a page and return the old reference count.
1016  */
1017 static inline u_int
1018 vm_page_drop(vm_page_t m, u_int val)
1019 {
1020 	u_int old;
1021 
1022 	/*
1023 	 * Synchronize with vm_page_free_prep(): ensure that all updates to the
1024 	 * page structure are visible before it is freed.
1025 	 */
1026 	atomic_thread_fence_rel();
1027 	old = atomic_fetchadd_int(&m->ref_count, -val);
1028 	KASSERT(old != VPRC_BLOCKED,
1029 	    ("vm_page_drop: page %p has an invalid refcount value", m));
1030 	return (old);
1031 }
1032 
1033 /*
1034  *	vm_page_wired:
1035  *
1036  *	Perform a racy check to determine whether a reference prevents the page
1037  *	from being reclaimable.  If the page's object is locked, and the page is
1038  *	unmapped and exclusively busied by the current thread, no new wirings
1039  *	may be created.
1040  */
1041 static inline bool
1042 vm_page_wired(vm_page_t m)
1043 {
1044 
1045 	return (VPRC_WIRE_COUNT(m->ref_count) > 0);
1046 }
1047 
1048 static inline bool
1049 vm_page_all_valid(vm_page_t m)
1050 {
1051 
1052 	return (m->valid == VM_PAGE_BITS_ALL);
1053 }
1054 
1055 static inline bool
1056 vm_page_none_valid(vm_page_t m)
1057 {
1058 
1059 	return (m->valid == 0);
1060 }
1061 
1062 #endif				/* _KERNEL */
1063 #endif				/* !_VM_PAGE_ */
1064