xref: /freebsd/sys/vm/vm_page.h (revision 718cf2ccb9956613756ab15d7a0e28f2c8e91cab)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 1991, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * This code is derived from software contributed to Berkeley by
8  * The Mach Operating System project at Carnegie-Mellon University.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *	from: @(#)vm_page.h	8.2 (Berkeley) 12/13/93
35  *
36  *
37  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
38  * All rights reserved.
39  *
40  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
41  *
42  * Permission to use, copy, modify and distribute this software and
43  * its documentation is hereby granted, provided that both the copyright
44  * notice and this permission notice appear in all copies of the
45  * software, derivative works or modified versions, and any portions
46  * thereof, and that both notices appear in supporting documentation.
47  *
48  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
49  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
50  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
51  *
52  * Carnegie Mellon requests users of this software to return to
53  *
54  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
55  *  School of Computer Science
56  *  Carnegie Mellon University
57  *  Pittsburgh PA 15213-3890
58  *
59  * any improvements or extensions that they make and grant Carnegie the
60  * rights to redistribute these changes.
61  *
62  * $FreeBSD$
63  */
64 
65 /*
66  *	Resident memory system definitions.
67  */
68 
69 #ifndef	_VM_PAGE_
70 #define	_VM_PAGE_
71 
72 #include <vm/pmap.h>
73 
74 /*
75  *	Management of resident (logical) pages.
76  *
77  *	A small structure is kept for each resident
78  *	page, indexed by page number.  Each structure
79  *	is an element of several collections:
80  *
81  *		A radix tree used to quickly
82  *		perform object/offset lookups
83  *
84  *		A list of all pages for a given object,
85  *		so they can be quickly deactivated at
86  *		time of deallocation.
87  *
88  *		An ordered list of pages due for pageout.
89  *
90  *	In addition, the structure contains the object
91  *	and offset to which this page belongs (for pageout),
92  *	and sundry status bits.
93  *
94  *	In general, operations on this structure's mutable fields are
95  *	synchronized using either one of or a combination of the lock on the
96  *	object that the page belongs to (O), the pool lock for the page (P),
97  *	or the lock for either the free or paging queue (Q).  If a field is
98  *	annotated below with two of these locks, then holding either lock is
99  *	sufficient for read access, but both locks are required for write
100  *	access.
101  *
102  *	In contrast, the synchronization of accesses to the page's
103  *	dirty field is machine dependent (M).  In the
104  *	machine-independent layer, the lock on the object that the
105  *	page belongs to must be held in order to operate on the field.
106  *	However, the pmap layer is permitted to set all bits within
107  *	the field without holding that lock.  If the underlying
108  *	architecture does not support atomic read-modify-write
109  *	operations on the field's type, then the machine-independent
110  *	layer uses a 32-bit atomic on the aligned 32-bit word that
111  *	contains the dirty field.  In the machine-independent layer,
112  *	the implementation of read-modify-write operations on the
113  *	field is encapsulated in vm_page_clear_dirty_mask().
114  */
115 
116 #if PAGE_SIZE == 4096
117 #define VM_PAGE_BITS_ALL 0xffu
118 typedef uint8_t vm_page_bits_t;
119 #elif PAGE_SIZE == 8192
120 #define VM_PAGE_BITS_ALL 0xffffu
121 typedef uint16_t vm_page_bits_t;
122 #elif PAGE_SIZE == 16384
123 #define VM_PAGE_BITS_ALL 0xffffffffu
124 typedef uint32_t vm_page_bits_t;
125 #elif PAGE_SIZE == 32768
126 #define VM_PAGE_BITS_ALL 0xfffffffffffffffflu
127 typedef uint64_t vm_page_bits_t;
128 #endif
129 
130 struct vm_page {
131 	union {
132 		TAILQ_ENTRY(vm_page) q; /* page queue or free list (Q) */
133 		struct {
134 			SLIST_ENTRY(vm_page) ss; /* private slists */
135 			void *pv;
136 		} s;
137 		struct {
138 			u_long p;
139 			u_long v;
140 		} memguard;
141 	} plinks;
142 	TAILQ_ENTRY(vm_page) listq;	/* pages in same object (O) */
143 	vm_object_t object;		/* which object am I in (O,P) */
144 	vm_pindex_t pindex;		/* offset into object (O,P) */
145 	vm_paddr_t phys_addr;		/* physical address of page */
146 	struct md_page md;		/* machine dependent stuff */
147 	u_int wire_count;		/* wired down maps refs (P) */
148 	volatile u_int busy_lock;	/* busy owners lock */
149 	uint16_t hold_count;		/* page hold count (P) */
150 	uint16_t flags;			/* page PG_* flags (P) */
151 	uint8_t aflags;			/* access is atomic */
152 	uint8_t oflags;			/* page VPO_* flags (O) */
153 	uint8_t	queue;			/* page queue index (P,Q) */
154 	int8_t psind;			/* pagesizes[] index (O) */
155 	int8_t segind;
156 	uint8_t	order;			/* index of the buddy queue */
157 	uint8_t pool;
158 	u_char	act_count;		/* page usage count (P) */
159 	/* NOTE that these must support one bit per DEV_BSIZE in a page */
160 	/* so, on normal X86 kernels, they must be at least 8 bits wide */
161 	vm_page_bits_t valid;		/* map of valid DEV_BSIZE chunks (O) */
162 	vm_page_bits_t dirty;		/* map of dirty DEV_BSIZE chunks (M) */
163 };
164 
165 /*
166  * Page flags stored in oflags:
167  *
168  * Access to these page flags is synchronized by the lock on the object
169  * containing the page (O).
170  *
171  * Note: VPO_UNMANAGED (used by OBJT_DEVICE, OBJT_PHYS and OBJT_SG)
172  * 	 indicates that the page is not under PV management but
173  * 	 otherwise should be treated as a normal page.  Pages not
174  * 	 under PV management cannot be paged out via the
175  * 	 object/vm_page_t because there is no knowledge of their pte
176  * 	 mappings, and such pages are also not on any PQ queue.
177  *
178  */
179 #define	VPO_UNUSED01	0x01		/* --available-- */
180 #define	VPO_SWAPSLEEP	0x02		/* waiting for swap to finish */
181 #define	VPO_UNMANAGED	0x04		/* no PV management for page */
182 #define	VPO_SWAPINPROG	0x08		/* swap I/O in progress on page */
183 #define	VPO_NOSYNC	0x10		/* do not collect for syncer */
184 
185 /*
186  * Busy page implementation details.
187  * The algorithm is taken mostly by rwlock(9) and sx(9) locks implementation,
188  * even if the support for owner identity is removed because of size
189  * constraints.  Checks on lock recursion are then not possible, while the
190  * lock assertions effectiveness is someway reduced.
191  */
192 #define	VPB_BIT_SHARED		0x01
193 #define	VPB_BIT_EXCLUSIVE	0x02
194 #define	VPB_BIT_WAITERS		0x04
195 #define	VPB_BIT_FLAGMASK						\
196 	(VPB_BIT_SHARED | VPB_BIT_EXCLUSIVE | VPB_BIT_WAITERS)
197 
198 #define	VPB_SHARERS_SHIFT	3
199 #define	VPB_SHARERS(x)							\
200 	(((x) & ~VPB_BIT_FLAGMASK) >> VPB_SHARERS_SHIFT)
201 #define	VPB_SHARERS_WORD(x)	((x) << VPB_SHARERS_SHIFT | VPB_BIT_SHARED)
202 #define	VPB_ONE_SHARER		(1 << VPB_SHARERS_SHIFT)
203 
204 #define	VPB_SINGLE_EXCLUSIVER	VPB_BIT_EXCLUSIVE
205 
206 #define	VPB_UNBUSIED		VPB_SHARERS_WORD(0)
207 
208 #define	PQ_NONE		255
209 #define	PQ_INACTIVE	0
210 #define	PQ_ACTIVE	1
211 #define	PQ_LAUNDRY	2
212 #define	PQ_UNSWAPPABLE	3
213 #define	PQ_COUNT	4
214 
215 #ifndef VM_PAGE_HAVE_PGLIST
216 TAILQ_HEAD(pglist, vm_page);
217 #define VM_PAGE_HAVE_PGLIST
218 #endif
219 SLIST_HEAD(spglist, vm_page);
220 
221 struct vm_pagequeue {
222 	struct mtx	pq_mutex;
223 	struct pglist	pq_pl;
224 	int		pq_cnt;
225 	u_int		* const pq_vcnt;
226 	const char	* const pq_name;
227 } __aligned(CACHE_LINE_SIZE);
228 
229 
230 struct vm_domain {
231 	struct vm_pagequeue vmd_pagequeues[PQ_COUNT];
232 	u_int vmd_page_count;
233 	u_int vmd_free_count;
234 	long vmd_segs;	/* bitmask of the segments */
235 	boolean_t vmd_oom;
236 	int vmd_oom_seq;
237 	int vmd_last_active_scan;
238 	struct vm_page vmd_laundry_marker;
239 	struct vm_page vmd_marker; /* marker for pagedaemon private use */
240 	struct vm_page vmd_inacthead; /* marker for LRU-defeating insertions */
241 };
242 
243 extern struct vm_domain vm_dom[MAXMEMDOM];
244 
245 #define	vm_pagequeue_assert_locked(pq)	mtx_assert(&(pq)->pq_mutex, MA_OWNED)
246 #define	vm_pagequeue_lock(pq)		mtx_lock(&(pq)->pq_mutex)
247 #define	vm_pagequeue_lockptr(pq)	(&(pq)->pq_mutex)
248 #define	vm_pagequeue_unlock(pq)		mtx_unlock(&(pq)->pq_mutex)
249 
250 #ifdef _KERNEL
251 extern vm_page_t bogus_page;
252 
253 static __inline void
254 vm_pagequeue_cnt_add(struct vm_pagequeue *pq, int addend)
255 {
256 
257 #ifdef notyet
258 	vm_pagequeue_assert_locked(pq);
259 #endif
260 	pq->pq_cnt += addend;
261 	atomic_add_int(pq->pq_vcnt, addend);
262 }
263 #define	vm_pagequeue_cnt_inc(pq)	vm_pagequeue_cnt_add((pq), 1)
264 #define	vm_pagequeue_cnt_dec(pq)	vm_pagequeue_cnt_add((pq), -1)
265 #endif	/* _KERNEL */
266 
267 extern struct mtx_padalign vm_page_queue_free_mtx;
268 extern struct mtx_padalign pa_lock[];
269 
270 #if defined(__arm__)
271 #define	PDRSHIFT	PDR_SHIFT
272 #elif !defined(PDRSHIFT)
273 #define PDRSHIFT	21
274 #endif
275 
276 #define	pa_index(pa)	((pa) >> PDRSHIFT)
277 #define	PA_LOCKPTR(pa)	((struct mtx *)(&pa_lock[pa_index(pa) % PA_LOCK_COUNT]))
278 #define	PA_LOCKOBJPTR(pa)	((struct lock_object *)PA_LOCKPTR((pa)))
279 #define	PA_LOCK(pa)	mtx_lock(PA_LOCKPTR(pa))
280 #define	PA_TRYLOCK(pa)	mtx_trylock(PA_LOCKPTR(pa))
281 #define	PA_UNLOCK(pa)	mtx_unlock(PA_LOCKPTR(pa))
282 #define	PA_UNLOCK_COND(pa) 			\
283 	do {		   			\
284 		if ((pa) != 0) {		\
285 			PA_UNLOCK((pa));	\
286 			(pa) = 0;		\
287 		}				\
288 	} while (0)
289 
290 #define	PA_LOCK_ASSERT(pa, a)	mtx_assert(PA_LOCKPTR(pa), (a))
291 
292 #ifdef KLD_MODULE
293 #define	vm_page_lock(m)		vm_page_lock_KBI((m), LOCK_FILE, LOCK_LINE)
294 #define	vm_page_unlock(m)	vm_page_unlock_KBI((m), LOCK_FILE, LOCK_LINE)
295 #define	vm_page_trylock(m)	vm_page_trylock_KBI((m), LOCK_FILE, LOCK_LINE)
296 #else	/* !KLD_MODULE */
297 #define	vm_page_lockptr(m)	(PA_LOCKPTR(VM_PAGE_TO_PHYS((m))))
298 #define	vm_page_lock(m)		mtx_lock(vm_page_lockptr((m)))
299 #define	vm_page_unlock(m)	mtx_unlock(vm_page_lockptr((m)))
300 #define	vm_page_trylock(m)	mtx_trylock(vm_page_lockptr((m)))
301 #endif
302 #if defined(INVARIANTS)
303 #define	vm_page_assert_locked(m)		\
304     vm_page_assert_locked_KBI((m), __FILE__, __LINE__)
305 #define	vm_page_lock_assert(m, a)		\
306     vm_page_lock_assert_KBI((m), (a), __FILE__, __LINE__)
307 #else
308 #define	vm_page_assert_locked(m)
309 #define	vm_page_lock_assert(m, a)
310 #endif
311 
312 /*
313  * The vm_page's aflags are updated using atomic operations.  To set or clear
314  * these flags, the functions vm_page_aflag_set() and vm_page_aflag_clear()
315  * must be used.  Neither these flags nor these functions are part of the KBI.
316  *
317  * PGA_REFERENCED may be cleared only if the page is locked.  It is set by
318  * both the MI and MD VM layers.  However, kernel loadable modules should not
319  * directly set this flag.  They should call vm_page_reference() instead.
320  *
321  * PGA_WRITEABLE is set exclusively on managed pages by pmap_enter().
322  * When it does so, the object must be locked, or the page must be
323  * exclusive busied.  The MI VM layer must never access this flag
324  * directly.  Instead, it should call pmap_page_is_write_mapped().
325  *
326  * PGA_EXECUTABLE may be set by pmap routines, and indicates that a page has
327  * at least one executable mapping.  It is not consumed by the MI VM layer.
328  */
329 #define	PGA_WRITEABLE	0x01		/* page may be mapped writeable */
330 #define	PGA_REFERENCED	0x02		/* page has been referenced */
331 #define	PGA_EXECUTABLE	0x04		/* page may be mapped executable */
332 
333 /*
334  * Page flags.  If changed at any other time than page allocation or
335  * freeing, the modification must be protected by the vm_page lock.
336  */
337 #define	PG_FICTITIOUS	0x0004		/* physical page doesn't exist */
338 #define	PG_ZERO		0x0008		/* page is zeroed */
339 #define	PG_MARKER	0x0010		/* special queue marker page */
340 #define	PG_NODUMP	0x0080		/* don't include this page in a dump */
341 #define	PG_UNHOLDFREE	0x0100		/* delayed free of a held page */
342 
343 /*
344  * Misc constants.
345  */
346 #define ACT_DECLINE		1
347 #define ACT_ADVANCE		3
348 #define ACT_INIT		5
349 #define ACT_MAX			64
350 
351 #ifdef _KERNEL
352 
353 #include <sys/systm.h>
354 
355 #include <machine/atomic.h>
356 
357 /*
358  * Each pageable resident page falls into one of five lists:
359  *
360  *	free
361  *		Available for allocation now.
362  *
363  *	inactive
364  *		Low activity, candidates for reclamation.
365  *		This list is approximately LRU ordered.
366  *
367  *	laundry
368  *		This is the list of pages that should be
369  *		paged out next.
370  *
371  *	unswappable
372  *		Dirty anonymous pages that cannot be paged
373  *		out because no swap device is configured.
374  *
375  *	active
376  *		Pages that are "active", i.e., they have been
377  *		recently referenced.
378  *
379  */
380 
381 extern int vm_page_zero_count;
382 
383 extern vm_page_t vm_page_array;		/* First resident page in table */
384 extern long vm_page_array_size;		/* number of vm_page_t's */
385 extern long first_page;			/* first physical page number */
386 
387 #define VM_PAGE_TO_PHYS(entry)	((entry)->phys_addr)
388 
389 /*
390  * PHYS_TO_VM_PAGE() returns the vm_page_t object that represents a memory
391  * page to which the given physical address belongs. The correct vm_page_t
392  * object is returned for addresses that are not page-aligned.
393  */
394 vm_page_t PHYS_TO_VM_PAGE(vm_paddr_t pa);
395 
396 /*
397  * Page allocation parameters for vm_page for the functions
398  * vm_page_alloc(), vm_page_grab(), vm_page_alloc_contig() and
399  * vm_page_alloc_freelist().  Some functions support only a subset
400  * of the flags, and ignore others, see the flags legend.
401  *
402  * The meaning of VM_ALLOC_ZERO differs slightly between the vm_page_alloc*()
403  * and the vm_page_grab*() functions.  See these functions for details.
404  *
405  * Bits 0 - 1 define class.
406  * Bits 2 - 15 dedicated for flags.
407  * Legend:
408  * (a) - vm_page_alloc() supports the flag.
409  * (c) - vm_page_alloc_contig() supports the flag.
410  * (f) - vm_page_alloc_freelist() supports the flag.
411  * (g) - vm_page_grab() supports the flag.
412  * (p) - vm_page_grab_pages() supports the flag.
413  * Bits above 15 define the count of additional pages that the caller
414  * intends to allocate.
415  */
416 #define VM_ALLOC_NORMAL		0
417 #define VM_ALLOC_INTERRUPT	1
418 #define VM_ALLOC_SYSTEM		2
419 #define	VM_ALLOC_CLASS_MASK	3
420 #define	VM_ALLOC_WAITOK		0x0008	/* (acf) Sleep and retry */
421 #define	VM_ALLOC_WAITFAIL	0x0010	/* (acf) Sleep and return error */
422 #define	VM_ALLOC_WIRED		0x0020	/* (acfgp) Allocate a wired page */
423 #define	VM_ALLOC_ZERO		0x0040	/* (acfgp) Allocate a prezeroed page */
424 #define	VM_ALLOC_NOOBJ		0x0100	/* (acg) No associated object */
425 #define	VM_ALLOC_NOBUSY		0x0200	/* (acgp) Do not excl busy the page */
426 #define	VM_ALLOC_IGN_SBUSY	0x1000	/* (gp) Ignore shared busy flag */
427 #define	VM_ALLOC_NODUMP		0x2000	/* (ag) don't include in dump */
428 #define	VM_ALLOC_SBUSY		0x4000	/* (acgp) Shared busy the page */
429 #define	VM_ALLOC_NOWAIT		0x8000	/* (acfgp) Do not sleep */
430 #define	VM_ALLOC_COUNT_SHIFT	16
431 #define	VM_ALLOC_COUNT(count)	((count) << VM_ALLOC_COUNT_SHIFT)
432 
433 #ifdef M_NOWAIT
434 static inline int
435 malloc2vm_flags(int malloc_flags)
436 {
437 	int pflags;
438 
439 	KASSERT((malloc_flags & M_USE_RESERVE) == 0 ||
440 	    (malloc_flags & M_NOWAIT) != 0,
441 	    ("M_USE_RESERVE requires M_NOWAIT"));
442 	pflags = (malloc_flags & M_USE_RESERVE) != 0 ? VM_ALLOC_INTERRUPT :
443 	    VM_ALLOC_SYSTEM;
444 	if ((malloc_flags & M_ZERO) != 0)
445 		pflags |= VM_ALLOC_ZERO;
446 	if ((malloc_flags & M_NODUMP) != 0)
447 		pflags |= VM_ALLOC_NODUMP;
448 	if ((malloc_flags & M_NOWAIT))
449 		pflags |= VM_ALLOC_NOWAIT;
450 	if ((malloc_flags & M_WAITOK))
451 		pflags |= VM_ALLOC_WAITOK;
452 	return (pflags);
453 }
454 #endif
455 
456 /*
457  * Predicates supported by vm_page_ps_test():
458  *
459  *	PS_ALL_DIRTY is true only if the entire (super)page is dirty.
460  *	However, it can be spuriously false when the (super)page has become
461  *	dirty in the pmap but that information has not been propagated to the
462  *	machine-independent layer.
463  */
464 #define	PS_ALL_DIRTY	0x1
465 #define	PS_ALL_VALID	0x2
466 #define	PS_NONE_BUSY	0x4
467 
468 void vm_page_busy_downgrade(vm_page_t m);
469 void vm_page_busy_sleep(vm_page_t m, const char *msg, bool nonshared);
470 void vm_page_flash(vm_page_t m);
471 void vm_page_hold(vm_page_t mem);
472 void vm_page_unhold(vm_page_t mem);
473 void vm_page_free(vm_page_t m);
474 void vm_page_free_zero(vm_page_t m);
475 
476 void vm_page_activate (vm_page_t);
477 void vm_page_advise(vm_page_t m, int advice);
478 vm_page_t vm_page_alloc(vm_object_t, vm_pindex_t, int);
479 vm_page_t vm_page_alloc_after(vm_object_t, vm_pindex_t, int, vm_page_t);
480 vm_page_t vm_page_alloc_contig(vm_object_t object, vm_pindex_t pindex, int req,
481     u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment,
482     vm_paddr_t boundary, vm_memattr_t memattr);
483 vm_page_t vm_page_alloc_freelist(int, int);
484 void vm_page_change_lock(vm_page_t m, struct mtx **mtx);
485 vm_page_t vm_page_grab (vm_object_t, vm_pindex_t, int);
486 int vm_page_grab_pages(vm_object_t object, vm_pindex_t pindex, int allocflags,
487     vm_page_t *ma, int count);
488 void vm_page_deactivate (vm_page_t);
489 void vm_page_deactivate_noreuse(vm_page_t);
490 void vm_page_dequeue(vm_page_t m);
491 void vm_page_dequeue_locked(vm_page_t m);
492 vm_page_t vm_page_find_least(vm_object_t, vm_pindex_t);
493 void vm_page_free_phys_pglist(struct pglist *tq);
494 bool vm_page_free_prep(vm_page_t m, bool pagequeue_locked);
495 vm_page_t vm_page_getfake(vm_paddr_t paddr, vm_memattr_t memattr);
496 void vm_page_initfake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr);
497 int vm_page_insert (vm_page_t, vm_object_t, vm_pindex_t);
498 void vm_page_launder(vm_page_t m);
499 vm_page_t vm_page_lookup (vm_object_t, vm_pindex_t);
500 vm_page_t vm_page_next(vm_page_t m);
501 int vm_page_pa_tryrelock(pmap_t, vm_paddr_t, vm_paddr_t *);
502 struct vm_pagequeue *vm_page_pagequeue(vm_page_t m);
503 vm_page_t vm_page_prev(vm_page_t m);
504 bool vm_page_ps_test(vm_page_t m, int flags, vm_page_t skip_m);
505 void vm_page_putfake(vm_page_t m);
506 void vm_page_readahead_finish(vm_page_t m);
507 bool vm_page_reclaim_contig(int req, u_long npages, vm_paddr_t low,
508     vm_paddr_t high, u_long alignment, vm_paddr_t boundary);
509 void vm_page_reference(vm_page_t m);
510 void vm_page_remove (vm_page_t);
511 int vm_page_rename (vm_page_t, vm_object_t, vm_pindex_t);
512 vm_page_t vm_page_replace(vm_page_t mnew, vm_object_t object,
513     vm_pindex_t pindex);
514 void vm_page_requeue(vm_page_t m);
515 void vm_page_requeue_locked(vm_page_t m);
516 int vm_page_sbusied(vm_page_t m);
517 vm_page_t vm_page_scan_contig(u_long npages, vm_page_t m_start,
518     vm_page_t m_end, u_long alignment, vm_paddr_t boundary, int options);
519 void vm_page_set_valid_range(vm_page_t m, int base, int size);
520 int vm_page_sleep_if_busy(vm_page_t m, const char *msg);
521 vm_offset_t vm_page_startup(vm_offset_t vaddr);
522 void vm_page_sunbusy(vm_page_t m);
523 bool vm_page_try_to_free(vm_page_t m);
524 int vm_page_trysbusy(vm_page_t m);
525 void vm_page_unhold_pages(vm_page_t *ma, int count);
526 void vm_page_unswappable(vm_page_t m);
527 boolean_t vm_page_unwire(vm_page_t m, uint8_t queue);
528 void vm_page_updatefake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr);
529 void vm_page_wire (vm_page_t);
530 void vm_page_xunbusy_hard(vm_page_t m);
531 void vm_page_xunbusy_maybelocked(vm_page_t m);
532 void vm_page_set_validclean (vm_page_t, int, int);
533 void vm_page_clear_dirty (vm_page_t, int, int);
534 void vm_page_set_invalid (vm_page_t, int, int);
535 int vm_page_is_valid (vm_page_t, int, int);
536 void vm_page_test_dirty (vm_page_t);
537 vm_page_bits_t vm_page_bits(int base, int size);
538 void vm_page_zero_invalid(vm_page_t m, boolean_t setvalid);
539 void vm_page_free_toq(vm_page_t m);
540 
541 void vm_page_dirty_KBI(vm_page_t m);
542 void vm_page_lock_KBI(vm_page_t m, const char *file, int line);
543 void vm_page_unlock_KBI(vm_page_t m, const char *file, int line);
544 int vm_page_trylock_KBI(vm_page_t m, const char *file, int line);
545 #if defined(INVARIANTS) || defined(INVARIANT_SUPPORT)
546 void vm_page_assert_locked_KBI(vm_page_t m, const char *file, int line);
547 void vm_page_lock_assert_KBI(vm_page_t m, int a, const char *file, int line);
548 #endif
549 
550 #define	vm_page_assert_sbusied(m)					\
551 	KASSERT(vm_page_sbusied(m),					\
552 	    ("vm_page_assert_sbusied: page %p not shared busy @ %s:%d", \
553 	    (m), __FILE__, __LINE__))
554 
555 #define	vm_page_assert_unbusied(m)					\
556 	KASSERT(!vm_page_busied(m),					\
557 	    ("vm_page_assert_unbusied: page %p busy @ %s:%d",		\
558 	    (m), __FILE__, __LINE__))
559 
560 #define	vm_page_assert_xbusied(m)					\
561 	KASSERT(vm_page_xbusied(m),					\
562 	    ("vm_page_assert_xbusied: page %p not exclusive busy @ %s:%d", \
563 	    (m), __FILE__, __LINE__))
564 
565 #define	vm_page_busied(m)						\
566 	((m)->busy_lock != VPB_UNBUSIED)
567 
568 #define	vm_page_sbusy(m) do {						\
569 	if (!vm_page_trysbusy(m))					\
570 		panic("%s: page %p failed shared busying", __func__,	\
571 		    (m));						\
572 } while (0)
573 
574 #define	vm_page_tryxbusy(m)						\
575 	(atomic_cmpset_acq_int(&(m)->busy_lock, VPB_UNBUSIED,		\
576 	    VPB_SINGLE_EXCLUSIVER))
577 
578 #define	vm_page_xbusied(m)						\
579 	(((m)->busy_lock & VPB_SINGLE_EXCLUSIVER) != 0)
580 
581 #define	vm_page_xbusy(m) do {						\
582 	if (!vm_page_tryxbusy(m))					\
583 		panic("%s: page %p failed exclusive busying", __func__,	\
584 		    (m));						\
585 } while (0)
586 
587 /* Note: page m's lock must not be owned by the caller. */
588 #define	vm_page_xunbusy(m) do {						\
589 	if (!atomic_cmpset_rel_int(&(m)->busy_lock,			\
590 	    VPB_SINGLE_EXCLUSIVER, VPB_UNBUSIED))			\
591 		vm_page_xunbusy_hard(m);				\
592 } while (0)
593 
594 #ifdef INVARIANTS
595 void vm_page_object_lock_assert(vm_page_t m);
596 #define	VM_PAGE_OBJECT_LOCK_ASSERT(m)	vm_page_object_lock_assert(m)
597 void vm_page_assert_pga_writeable(vm_page_t m, uint8_t bits);
598 #define	VM_PAGE_ASSERT_PGA_WRITEABLE(m, bits)				\
599 	vm_page_assert_pga_writeable(m, bits)
600 #else
601 #define	VM_PAGE_OBJECT_LOCK_ASSERT(m)	(void)0
602 #define	VM_PAGE_ASSERT_PGA_WRITEABLE(m, bits)	(void)0
603 #endif
604 
605 /*
606  * We want to use atomic updates for the aflags field, which is 8 bits wide.
607  * However, not all architectures support atomic operations on 8-bit
608  * destinations.  In order that we can easily use a 32-bit operation, we
609  * require that the aflags field be 32-bit aligned.
610  */
611 CTASSERT(offsetof(struct vm_page, aflags) % sizeof(uint32_t) == 0);
612 
613 /*
614  *	Clear the given bits in the specified page.
615  */
616 static inline void
617 vm_page_aflag_clear(vm_page_t m, uint8_t bits)
618 {
619 	uint32_t *addr, val;
620 
621 	/*
622 	 * The PGA_REFERENCED flag can only be cleared if the page is locked.
623 	 */
624 	if ((bits & PGA_REFERENCED) != 0)
625 		vm_page_assert_locked(m);
626 
627 	/*
628 	 * Access the whole 32-bit word containing the aflags field with an
629 	 * atomic update.  Parallel non-atomic updates to the other fields
630 	 * within this word are handled properly by the atomic update.
631 	 */
632 	addr = (void *)&m->aflags;
633 	KASSERT(((uintptr_t)addr & (sizeof(uint32_t) - 1)) == 0,
634 	    ("vm_page_aflag_clear: aflags is misaligned"));
635 	val = bits;
636 #if BYTE_ORDER == BIG_ENDIAN
637 	val <<= 24;
638 #endif
639 	atomic_clear_32(addr, val);
640 }
641 
642 /*
643  *	Set the given bits in the specified page.
644  */
645 static inline void
646 vm_page_aflag_set(vm_page_t m, uint8_t bits)
647 {
648 	uint32_t *addr, val;
649 
650 	VM_PAGE_ASSERT_PGA_WRITEABLE(m, bits);
651 
652 	/*
653 	 * Access the whole 32-bit word containing the aflags field with an
654 	 * atomic update.  Parallel non-atomic updates to the other fields
655 	 * within this word are handled properly by the atomic update.
656 	 */
657 	addr = (void *)&m->aflags;
658 	KASSERT(((uintptr_t)addr & (sizeof(uint32_t) - 1)) == 0,
659 	    ("vm_page_aflag_set: aflags is misaligned"));
660 	val = bits;
661 #if BYTE_ORDER == BIG_ENDIAN
662 	val <<= 24;
663 #endif
664 	atomic_set_32(addr, val);
665 }
666 
667 /*
668  *	vm_page_dirty:
669  *
670  *	Set all bits in the page's dirty field.
671  *
672  *	The object containing the specified page must be locked if the
673  *	call is made from the machine-independent layer.
674  *
675  *	See vm_page_clear_dirty_mask().
676  */
677 static __inline void
678 vm_page_dirty(vm_page_t m)
679 {
680 
681 	/* Use vm_page_dirty_KBI() under INVARIANTS to save memory. */
682 #if defined(KLD_MODULE) || defined(INVARIANTS)
683 	vm_page_dirty_KBI(m);
684 #else
685 	m->dirty = VM_PAGE_BITS_ALL;
686 #endif
687 }
688 
689 /*
690  *	vm_page_remque:
691  *
692  *	If the given page is in a page queue, then remove it from that page
693  *	queue.
694  *
695  *	The page must be locked.
696  */
697 static inline void
698 vm_page_remque(vm_page_t m)
699 {
700 
701 	if (m->queue != PQ_NONE)
702 		vm_page_dequeue(m);
703 }
704 
705 /*
706  *	vm_page_undirty:
707  *
708  *	Set page to not be dirty.  Note: does not clear pmap modify bits
709  */
710 static __inline void
711 vm_page_undirty(vm_page_t m)
712 {
713 
714 	VM_PAGE_OBJECT_LOCK_ASSERT(m);
715 	m->dirty = 0;
716 }
717 
718 static inline void
719 vm_page_replace_checked(vm_page_t mnew, vm_object_t object, vm_pindex_t pindex,
720     vm_page_t mold)
721 {
722 	vm_page_t mret;
723 
724 	mret = vm_page_replace(mnew, object, pindex);
725 	KASSERT(mret == mold,
726 	    ("invalid page replacement, mold=%p, mret=%p", mold, mret));
727 
728 	/* Unused if !INVARIANTS. */
729 	(void)mold;
730 	(void)mret;
731 }
732 
733 static inline bool
734 vm_page_active(vm_page_t m)
735 {
736 
737 	return (m->queue == PQ_ACTIVE);
738 }
739 
740 static inline bool
741 vm_page_inactive(vm_page_t m)
742 {
743 
744 	return (m->queue == PQ_INACTIVE);
745 }
746 
747 static inline bool
748 vm_page_in_laundry(vm_page_t m)
749 {
750 
751 	return (m->queue == PQ_LAUNDRY || m->queue == PQ_UNSWAPPABLE);
752 }
753 
754 #endif				/* _KERNEL */
755 #endif				/* !_VM_PAGE_ */
756