1 /*-
2 * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU)
3 *
4 * Copyright (c) 1991, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * The Mach Operating System project at Carnegie-Mellon University.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 *
35 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
36 * All rights reserved.
37 *
38 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
39 *
40 * Permission to use, copy, modify and distribute this software and
41 * its documentation is hereby granted, provided that both the copyright
42 * notice and this permission notice appear in all copies of the
43 * software, derivative works or modified versions, and any portions
44 * thereof, and that both notices appear in supporting documentation.
45 *
46 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
47 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
48 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
49 *
50 * Carnegie Mellon requests users of this software to return to
51 *
52 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
53 * School of Computer Science
54 * Carnegie Mellon University
55 * Pittsburgh PA 15213-3890
56 *
57 * any improvements or extensions that they make and grant Carnegie the
58 * rights to redistribute these changes.
59 */
60
61 /*
62 * Resident memory system definitions.
63 */
64
65 #ifndef _VM_PAGE_
66 #define _VM_PAGE_
67
68 #include <vm/pmap.h>
69 #include <vm/_vm_phys.h>
70
71 /*
72 * Management of resident (logical) pages.
73 *
74 * A small structure is kept for each resident
75 * page, indexed by page number. Each structure
76 * is an element of several collections:
77 *
78 * A radix tree used to quickly
79 * perform object/offset lookups
80 *
81 * A list of all pages for a given object,
82 * so they can be quickly deactivated at
83 * time of deallocation.
84 *
85 * An ordered list of pages due for pageout.
86 *
87 * In addition, the structure contains the object
88 * and offset to which this page belongs (for pageout),
89 * and sundry status bits.
90 *
91 * In general, operations on this structure's mutable fields are
92 * synchronized using either one of or a combination of locks. If a
93 * field is annotated with two of these locks then holding either is
94 * sufficient for read access but both are required for write access.
95 * The queue lock for a page depends on the value of its queue field and is
96 * described in detail below.
97 *
98 * The following annotations are possible:
99 * (A) the field must be accessed using atomic(9) and may require
100 * additional synchronization.
101 * (B) the page busy lock.
102 * (C) the field is immutable.
103 * (F) the per-domain lock for the free queues.
104 * (M) Machine dependent, defined by pmap layer.
105 * (O) the object that the page belongs to.
106 * (Q) the page's queue lock.
107 *
108 * The busy lock is an embedded reader-writer lock that protects the
109 * page's contents and identity (i.e., its <object, pindex> tuple) as
110 * well as certain valid/dirty modifications. To avoid bloating the
111 * the page structure, the busy lock lacks some of the features available
112 * the kernel's general-purpose synchronization primitives. As a result,
113 * busy lock ordering rules are not verified, lock recursion is not
114 * detected, and an attempt to xbusy a busy page or sbusy an xbusy page
115 * results will trigger a panic rather than causing the thread to block.
116 * vm_page_sleep_if_busy() can be used to sleep until the page's busy
117 * state changes, after which the caller must re-lookup the page and
118 * re-evaluate its state. vm_page_busy_acquire() will block until
119 * the lock is acquired.
120 *
121 * The valid field is protected by the page busy lock (B) and object
122 * lock (O). Transitions from invalid to valid are generally done
123 * via I/O or zero filling and do not require the object lock.
124 * These must be protected with the busy lock to prevent page-in or
125 * creation races. Page invalidation generally happens as a result
126 * of truncate or msync. When invalidated, pages must not be present
127 * in pmap and must hold the object lock to prevent concurrent
128 * speculative read-only mappings that do not require busy. I/O
129 * routines may check for validity without a lock if they are prepared
130 * to handle invalidation races with higher level locks (vnode) or are
131 * unconcerned with races so long as they hold a reference to prevent
132 * recycling. When a valid bit is set while holding a shared busy
133 * lock (A) atomic operations are used to protect against concurrent
134 * modification.
135 *
136 * In contrast, the synchronization of accesses to the page's
137 * dirty field is a mix of machine dependent (M) and busy (B). In
138 * the machine-independent layer, the page busy must be held to
139 * operate on the field. However, the pmap layer is permitted to
140 * set all bits within the field without holding that lock. If the
141 * underlying architecture does not support atomic read-modify-write
142 * operations on the field's type, then the machine-independent
143 * layer uses a 32-bit atomic on the aligned 32-bit word that
144 * contains the dirty field. In the machine-independent layer,
145 * the implementation of read-modify-write operations on the
146 * field is encapsulated in vm_page_clear_dirty_mask(). An
147 * exclusive busy lock combined with pmap_remove_{write/all}() is the
148 * only way to ensure a page can not become dirty. I/O generally
149 * removes the page from pmap to ensure exclusive access and atomic
150 * writes.
151 *
152 * The ref_count field tracks references to the page. References that
153 * prevent the page from being reclaimable are called wirings and are
154 * counted in the low bits of ref_count. The containing object's
155 * reference, if one exists, is counted using the VPRC_OBJREF bit in the
156 * ref_count field. Additionally, the VPRC_BLOCKED bit is used to
157 * atomically check for wirings and prevent new wirings via
158 * pmap_extract_and_hold(). When a page belongs to an object, it may be
159 * wired only when the object is locked, or the page is busy, or by
160 * pmap_extract_and_hold(). As a result, if the object is locked and the
161 * page is not busy (or is exclusively busied by the current thread), and
162 * the page is unmapped, its wire count will not increase. The ref_count
163 * field is updated using atomic operations in most cases, except when it
164 * is known that no other references to the page exist, such as in the page
165 * allocator. A page may be present in the page queues, or even actively
166 * scanned by the page daemon, without an explicitly counted referenced.
167 * The page daemon must therefore handle the possibility of a concurrent
168 * free of the page.
169 *
170 * The queue state of a page consists of the queue and act_count fields of
171 * its atomically updated state, and the subset of atomic flags specified
172 * by PGA_QUEUE_STATE_MASK. The queue field contains the page's page queue
173 * index, or PQ_NONE if it does not belong to a page queue. To modify the
174 * queue field, the page queue lock corresponding to the old value must be
175 * held, unless that value is PQ_NONE, in which case the queue index must
176 * be updated using an atomic RMW operation. There is one exception to
177 * this rule: the page daemon may transition the queue field from
178 * PQ_INACTIVE to PQ_NONE immediately prior to freeing the page during an
179 * inactive queue scan. At that point the page is already dequeued and no
180 * other references to that vm_page structure can exist. The PGA_ENQUEUED
181 * flag, when set, indicates that the page structure is physically inserted
182 * into the queue corresponding to the page's queue index, and may only be
183 * set or cleared with the corresponding page queue lock held.
184 *
185 * To avoid contention on page queue locks, page queue operations (enqueue,
186 * dequeue, requeue) are batched using fixed-size per-CPU queues. A
187 * deferred operation is requested by setting one of the flags in
188 * PGA_QUEUE_OP_MASK and inserting an entry into a batch queue. When a
189 * queue is full, an attempt to insert a new entry will lock the page
190 * queues and trigger processing of the pending entries. The
191 * type-stability of vm_page structures is crucial to this scheme since the
192 * processing of entries in a given batch queue may be deferred
193 * indefinitely. In particular, a page may be freed with pending batch
194 * queue entries. The page queue operation flags must be set using atomic
195 * RWM operations.
196 */
197
198 #if PAGE_SIZE == 4096
199 #define VM_PAGE_BITS_ALL 0xffu
200 typedef uint8_t vm_page_bits_t;
201 #elif PAGE_SIZE == 8192
202 #define VM_PAGE_BITS_ALL 0xffffu
203 typedef uint16_t vm_page_bits_t;
204 #elif PAGE_SIZE == 16384
205 #define VM_PAGE_BITS_ALL 0xffffffffu
206 typedef uint32_t vm_page_bits_t;
207 #elif PAGE_SIZE == 32768
208 #define VM_PAGE_BITS_ALL 0xfffffffffffffffflu
209 typedef uint64_t vm_page_bits_t;
210 #endif
211
212 typedef union vm_page_astate {
213 struct {
214 uint16_t flags;
215 uint8_t queue;
216 uint8_t act_count;
217 };
218 uint32_t _bits;
219 } vm_page_astate_t;
220
221 struct vm_page {
222 union {
223 TAILQ_ENTRY(vm_page) q; /* page queue or free list (Q) */
224 struct {
225 SLIST_ENTRY(vm_page) ss; /* private slists */
226 } s;
227 struct {
228 u_long p;
229 u_long v;
230 } memguard;
231 struct {
232 void *slab;
233 void *zone;
234 } uma;
235 } plinks;
236 TAILQ_ENTRY(vm_page) listq; /* pages in same object (O) */
237 vm_object_t object; /* which object am I in (O) */
238 vm_pindex_t pindex; /* offset into object (O,P) */
239 vm_paddr_t phys_addr; /* physical address of page (C) */
240 struct md_page md; /* machine dependent stuff */
241 u_int ref_count; /* page references (A) */
242 u_int busy_lock; /* busy owners lock (A) */
243 union vm_page_astate a; /* state accessed atomically (A) */
244 uint8_t order; /* index of the buddy queue (F) */
245 uint8_t pool; /* vm_phys freepool index (F) */
246 uint8_t flags; /* page PG_* flags (P) */
247 uint8_t oflags; /* page VPO_* flags (O) */
248 int8_t psind; /* pagesizes[] index (O) */
249 int8_t segind; /* vm_phys segment index (C) */
250 /* NOTE that these must support one bit per DEV_BSIZE in a page */
251 /* so, on normal X86 kernels, they must be at least 8 bits wide */
252 vm_page_bits_t valid; /* valid DEV_BSIZE chunk map (O,B) */
253 vm_page_bits_t dirty; /* dirty DEV_BSIZE chunk map (M,B) */
254 };
255
256 /*
257 * Special bits used in the ref_count field.
258 *
259 * ref_count is normally used to count wirings that prevent the page from being
260 * reclaimed, but also supports several special types of references that do not
261 * prevent reclamation. Accesses to the ref_count field must be atomic unless
262 * the page is unallocated.
263 *
264 * VPRC_OBJREF is the reference held by the containing object. It can set or
265 * cleared only when the corresponding object's write lock is held.
266 *
267 * VPRC_BLOCKED is used to atomically block wirings via pmap lookups while
268 * attempting to tear down all mappings of a given page. The page busy lock and
269 * object write lock must both be held in order to set or clear this bit.
270 */
271 #define VPRC_BLOCKED 0x40000000u /* mappings are being removed */
272 #define VPRC_OBJREF 0x80000000u /* object reference, cleared with (O) */
273 #define VPRC_WIRE_COUNT(c) ((c) & ~(VPRC_BLOCKED | VPRC_OBJREF))
274 #define VPRC_WIRE_COUNT_MAX (~(VPRC_BLOCKED | VPRC_OBJREF))
275
276 /*
277 * Page flags stored in oflags:
278 *
279 * Access to these page flags is synchronized by the lock on the object
280 * containing the page (O).
281 *
282 * Note: VPO_UNMANAGED (used by OBJT_DEVICE, OBJT_PHYS and OBJT_SG)
283 * indicates that the page is not under PV management but
284 * otherwise should be treated as a normal page. Pages not
285 * under PV management cannot be paged out via the
286 * object/vm_page_t because there is no knowledge of their pte
287 * mappings, and such pages are also not on any PQ queue.
288 *
289 */
290 #define VPO_KMEM_EXEC 0x01 /* kmem mapping allows execution */
291 #define VPO_SWAPSLEEP 0x02 /* waiting for swap to finish */
292 #define VPO_UNMANAGED 0x04 /* no PV management for page */
293 #define VPO_SWAPINPROG 0x08 /* swap I/O in progress on page */
294
295 /*
296 * Busy page implementation details.
297 * The algorithm is taken mostly by rwlock(9) and sx(9) locks implementation,
298 * even if the support for owner identity is removed because of size
299 * constraints. Checks on lock recursion are then not possible, while the
300 * lock assertions effectiveness is someway reduced.
301 */
302 #define VPB_BIT_SHARED 0x01
303 #define VPB_BIT_EXCLUSIVE 0x02
304 #define VPB_BIT_WAITERS 0x04
305 #define VPB_BIT_FLAGMASK \
306 (VPB_BIT_SHARED | VPB_BIT_EXCLUSIVE | VPB_BIT_WAITERS)
307
308 #define VPB_SHARERS_SHIFT 3
309 #define VPB_SHARERS(x) \
310 (((x) & ~VPB_BIT_FLAGMASK) >> VPB_SHARERS_SHIFT)
311 #define VPB_SHARERS_WORD(x) ((x) << VPB_SHARERS_SHIFT | VPB_BIT_SHARED)
312 #define VPB_ONE_SHARER (1 << VPB_SHARERS_SHIFT)
313
314 #define VPB_SINGLE_EXCLUSIVE VPB_BIT_EXCLUSIVE
315 #ifdef INVARIANTS
316 #define VPB_CURTHREAD_EXCLUSIVE \
317 (VPB_BIT_EXCLUSIVE | ((u_int)(uintptr_t)curthread & ~VPB_BIT_FLAGMASK))
318 #else
319 #define VPB_CURTHREAD_EXCLUSIVE VPB_SINGLE_EXCLUSIVE
320 #endif
321
322 #define VPB_UNBUSIED VPB_SHARERS_WORD(0)
323
324 /* Freed lock blocks both shared and exclusive. */
325 #define VPB_FREED (0xffffffff - VPB_BIT_SHARED)
326
327 #define PQ_NONE 255
328 #define PQ_INACTIVE 0
329 #define PQ_ACTIVE 1
330 #define PQ_LAUNDRY 2
331 #define PQ_UNSWAPPABLE 3
332 #define PQ_COUNT 4
333
334 #ifndef VM_PAGE_HAVE_PGLIST
335 TAILQ_HEAD(pglist, vm_page);
336 #define VM_PAGE_HAVE_PGLIST
337 #endif
338 SLIST_HEAD(spglist, vm_page);
339
340 #ifdef _KERNEL
341 extern vm_page_t bogus_page;
342 #endif /* _KERNEL */
343
344 extern struct mtx_padalign pa_lock[];
345
346 #if defined(__arm__)
347 #define PDRSHIFT PDR_SHIFT
348 #elif !defined(PDRSHIFT)
349 #define PDRSHIFT 21
350 #endif
351
352 #define pa_index(pa) ((pa) >> PDRSHIFT)
353 #define PA_LOCKPTR(pa) ((struct mtx *)(&pa_lock[pa_index(pa) % PA_LOCK_COUNT]))
354 #define PA_LOCKOBJPTR(pa) ((struct lock_object *)PA_LOCKPTR((pa)))
355 #define PA_LOCK(pa) mtx_lock(PA_LOCKPTR(pa))
356 #define PA_TRYLOCK(pa) mtx_trylock(PA_LOCKPTR(pa))
357 #define PA_UNLOCK(pa) mtx_unlock(PA_LOCKPTR(pa))
358 #define PA_UNLOCK_COND(pa) \
359 do { \
360 if ((pa) != 0) { \
361 PA_UNLOCK((pa)); \
362 (pa) = 0; \
363 } \
364 } while (0)
365
366 #define PA_LOCK_ASSERT(pa, a) mtx_assert(PA_LOCKPTR(pa), (a))
367
368 #if defined(KLD_MODULE) && !defined(KLD_TIED)
369 #define vm_page_lock(m) vm_page_lock_KBI((m), LOCK_FILE, LOCK_LINE)
370 #define vm_page_unlock(m) vm_page_unlock_KBI((m), LOCK_FILE, LOCK_LINE)
371 #define vm_page_trylock(m) vm_page_trylock_KBI((m), LOCK_FILE, LOCK_LINE)
372 #else /* !KLD_MODULE */
373 #define vm_page_lockptr(m) (PA_LOCKPTR(VM_PAGE_TO_PHYS((m))))
374 #define vm_page_lock(m) mtx_lock(vm_page_lockptr((m)))
375 #define vm_page_unlock(m) mtx_unlock(vm_page_lockptr((m)))
376 #define vm_page_trylock(m) mtx_trylock(vm_page_lockptr((m)))
377 #endif
378 #if defined(INVARIANTS)
379 #define vm_page_assert_locked(m) \
380 vm_page_assert_locked_KBI((m), __FILE__, __LINE__)
381 #define vm_page_lock_assert(m, a) \
382 vm_page_lock_assert_KBI((m), (a), __FILE__, __LINE__)
383 #else
384 #define vm_page_assert_locked(m)
385 #define vm_page_lock_assert(m, a)
386 #endif
387
388 /*
389 * The vm_page's aflags are updated using atomic operations. To set or clear
390 * these flags, the functions vm_page_aflag_set() and vm_page_aflag_clear()
391 * must be used. Neither these flags nor these functions are part of the KBI.
392 *
393 * PGA_REFERENCED may be cleared only if the page is locked. It is set by
394 * both the MI and MD VM layers. However, kernel loadable modules should not
395 * directly set this flag. They should call vm_page_reference() instead.
396 *
397 * PGA_WRITEABLE is set exclusively on managed pages by pmap_enter().
398 * When it does so, the object must be locked, or the page must be
399 * exclusive busied. The MI VM layer must never access this flag
400 * directly. Instead, it should call pmap_page_is_write_mapped().
401 *
402 * PGA_EXECUTABLE may be set by pmap routines, and indicates that a page has
403 * at least one executable mapping. It is not consumed by the MI VM layer.
404 *
405 * PGA_NOSYNC must be set and cleared with the page busy lock held.
406 *
407 * PGA_ENQUEUED is set and cleared when a page is inserted into or removed
408 * from a page queue, respectively. It determines whether the plinks.q field
409 * of the page is valid. To set or clear this flag, page's "queue" field must
410 * be a valid queue index, and the corresponding page queue lock must be held.
411 *
412 * PGA_DEQUEUE is set when the page is scheduled to be dequeued from a page
413 * queue, and cleared when the dequeue request is processed. A page may
414 * have PGA_DEQUEUE set and PGA_ENQUEUED cleared, for instance if a dequeue
415 * is requested after the page is scheduled to be enqueued but before it is
416 * actually inserted into the page queue.
417 *
418 * PGA_REQUEUE is set when the page is scheduled to be enqueued or requeued
419 * in its page queue.
420 *
421 * PGA_REQUEUE_HEAD is a special flag for enqueuing pages near the head of
422 * the inactive queue, thus bypassing LRU.
423 *
424 * The PGA_DEQUEUE, PGA_REQUEUE and PGA_REQUEUE_HEAD flags must be set using an
425 * atomic RMW operation to ensure that the "queue" field is a valid queue index,
426 * and the corresponding page queue lock must be held when clearing any of the
427 * flags.
428 *
429 * PGA_SWAP_FREE is used to defer freeing swap space to the pageout daemon
430 * when the context that dirties the page does not have the object write lock
431 * held.
432 */
433 #define PGA_WRITEABLE 0x0001 /* page may be mapped writeable */
434 #define PGA_REFERENCED 0x0002 /* page has been referenced */
435 #define PGA_EXECUTABLE 0x0004 /* page may be mapped executable */
436 #define PGA_ENQUEUED 0x0008 /* page is enqueued in a page queue */
437 #define PGA_DEQUEUE 0x0010 /* page is due to be dequeued */
438 #define PGA_REQUEUE 0x0020 /* page is due to be requeued */
439 #define PGA_REQUEUE_HEAD 0x0040 /* page requeue should bypass LRU */
440 #define PGA_NOSYNC 0x0080 /* do not collect for syncer */
441 #define PGA_SWAP_FREE 0x0100 /* page with swap space was dirtied */
442 #define PGA_SWAP_SPACE 0x0200 /* page has allocated swap space */
443
444 #define PGA_QUEUE_OP_MASK (PGA_DEQUEUE | PGA_REQUEUE | PGA_REQUEUE_HEAD)
445 #define PGA_QUEUE_STATE_MASK (PGA_ENQUEUED | PGA_QUEUE_OP_MASK)
446
447 /*
448 * Page flags. Updates to these flags are not synchronized, and thus they must
449 * be set during page allocation or free to avoid races.
450 *
451 * The PG_PCPU_CACHE flag is set at allocation time if the page was
452 * allocated from a per-CPU cache. It is cleared the next time that the
453 * page is allocated from the physical memory allocator.
454 */
455 #define PG_PCPU_CACHE 0x01 /* was allocated from per-CPU caches */
456 #define PG_FICTITIOUS 0x02 /* physical page doesn't exist */
457 #define PG_ZERO 0x04 /* page is zeroed */
458 #define PG_MARKER 0x08 /* special queue marker page */
459 #define PG_NODUMP 0x10 /* don't include this page in a dump */
460 #define PG_NOFREE 0x20 /* page should never be freed. */
461
462 /*
463 * Misc constants.
464 */
465 #define ACT_DECLINE 1
466 #define ACT_ADVANCE 3
467 #define ACT_INIT 5
468 #define ACT_MAX 64
469
470 #ifdef _KERNEL
471
472 #include <sys/kassert.h>
473 #include <machine/atomic.h>
474 struct pctrie_iter;
475
476 /*
477 * Each pageable resident page falls into one of five lists:
478 *
479 * free
480 * Available for allocation now.
481 *
482 * inactive
483 * Low activity, candidates for reclamation.
484 * This list is approximately LRU ordered.
485 *
486 * laundry
487 * This is the list of pages that should be
488 * paged out next.
489 *
490 * unswappable
491 * Dirty anonymous pages that cannot be paged
492 * out because no swap device is configured.
493 *
494 * active
495 * Pages that are "active", i.e., they have been
496 * recently referenced.
497 *
498 */
499
500 extern vm_page_t vm_page_array; /* First resident page in table */
501 extern long vm_page_array_size; /* number of vm_page_t's */
502 extern long first_page; /* first physical page number */
503
504 #define VM_PAGE_TO_PHYS(entry) ((entry)->phys_addr)
505
506 /*
507 * PHYS_TO_VM_PAGE() returns the vm_page_t object that represents a memory
508 * page to which the given physical address belongs. The correct vm_page_t
509 * object is returned for addresses that are not page-aligned.
510 */
511 vm_page_t PHYS_TO_VM_PAGE(vm_paddr_t pa);
512
513 /*
514 * Page allocation parameters for vm_page for the functions
515 * vm_page_alloc(), vm_page_grab(), vm_page_alloc_contig() and
516 * vm_page_alloc_freelist(). Some functions support only a subset
517 * of the flags, and ignore others, see the flags legend.
518 *
519 * The meaning of VM_ALLOC_ZERO differs slightly between the vm_page_alloc*()
520 * and the vm_page_grab*() functions. See these functions for details.
521 *
522 * Bits 0 - 1 define class.
523 * Bits 2 - 15 dedicated for flags.
524 * Legend:
525 * (a) - vm_page_alloc() supports the flag.
526 * (c) - vm_page_alloc_contig() supports the flag.
527 * (g) - vm_page_grab() supports the flag.
528 * (n) - vm_page_alloc_noobj() and vm_page_alloc_freelist() support the flag.
529 * (p) - vm_page_grab_pages() supports the flag.
530 * Bits above 15 define the count of additional pages that the caller
531 * intends to allocate.
532 */
533 #define VM_ALLOC_NORMAL 0
534 #define VM_ALLOC_INTERRUPT 1
535 #define VM_ALLOC_SYSTEM 2
536 #define VM_ALLOC_CLASS_MASK 3
537 #define VM_ALLOC_WAITOK 0x0008 /* (acn) Sleep and retry */
538 #define VM_ALLOC_WAITFAIL 0x0010 /* (acn) Sleep and return error */
539 #define VM_ALLOC_WIRED 0x0020 /* (acgnp) Allocate a wired page */
540 #define VM_ALLOC_ZERO 0x0040 /* (acgnp) Allocate a zeroed page */
541 #define VM_ALLOC_NORECLAIM 0x0080 /* (c) Do not reclaim after failure */
542 #define VM_ALLOC_NOFREE 0x0100 /* (an) Page will never be released */
543 #define VM_ALLOC_NOBUSY 0x0200 /* (acgp) Do not excl busy the page */
544 #define VM_ALLOC_NOCREAT 0x0400 /* (gp) Don't create a page */
545 #define VM_ALLOC_AVAIL1 0x0800
546 #define VM_ALLOC_IGN_SBUSY 0x1000 /* (gp) Ignore shared busy flag */
547 #define VM_ALLOC_NODUMP 0x2000 /* (ag) don't include in dump */
548 #define VM_ALLOC_SBUSY 0x4000 /* (acgp) Shared busy the page */
549 #define VM_ALLOC_NOWAIT 0x8000 /* (acgnp) Do not sleep */
550 #define VM_ALLOC_COUNT_MAX 0xffff
551 #define VM_ALLOC_COUNT_SHIFT 16
552 #define VM_ALLOC_COUNT_MASK (VM_ALLOC_COUNT(VM_ALLOC_COUNT_MAX))
553 #define VM_ALLOC_COUNT(count) ({ \
554 KASSERT((count) <= VM_ALLOC_COUNT_MAX, \
555 ("%s: invalid VM_ALLOC_COUNT value", __func__)); \
556 (count) << VM_ALLOC_COUNT_SHIFT; \
557 })
558
559 #ifdef M_NOWAIT
560 static inline int
malloc2vm_flags(int malloc_flags)561 malloc2vm_flags(int malloc_flags)
562 {
563 int pflags;
564
565 KASSERT((malloc_flags & M_USE_RESERVE) == 0 ||
566 (malloc_flags & M_NOWAIT) != 0,
567 ("M_USE_RESERVE requires M_NOWAIT"));
568 pflags = (malloc_flags & M_USE_RESERVE) != 0 ? VM_ALLOC_INTERRUPT :
569 VM_ALLOC_SYSTEM;
570 if ((malloc_flags & M_ZERO) != 0)
571 pflags |= VM_ALLOC_ZERO;
572 if ((malloc_flags & M_NODUMP) != 0)
573 pflags |= VM_ALLOC_NODUMP;
574 if ((malloc_flags & M_NOWAIT))
575 pflags |= VM_ALLOC_NOWAIT;
576 if ((malloc_flags & M_WAITOK))
577 pflags |= VM_ALLOC_WAITOK;
578 if ((malloc_flags & M_NORECLAIM))
579 pflags |= VM_ALLOC_NORECLAIM;
580 if ((malloc_flags & M_NEVERFREED))
581 pflags |= VM_ALLOC_NOFREE;
582 return (pflags);
583 }
584 #endif
585
586 /*
587 * Predicates supported by vm_page_ps_test():
588 *
589 * PS_ALL_DIRTY is true only if the entire (super)page is dirty.
590 * However, it can be spuriously false when the (super)page has become
591 * dirty in the pmap but that information has not been propagated to the
592 * machine-independent layer.
593 */
594 #define PS_ALL_DIRTY 0x1
595 #define PS_ALL_VALID 0x2
596 #define PS_NONE_BUSY 0x4
597
598 bool vm_page_busy_acquire(vm_page_t m, int allocflags);
599 void vm_page_busy_downgrade(vm_page_t m);
600 int vm_page_busy_tryupgrade(vm_page_t m);
601 bool vm_page_busy_sleep(vm_page_t m, const char *msg, int allocflags);
602 void vm_page_busy_sleep_unlocked(vm_object_t obj, vm_page_t m,
603 vm_pindex_t pindex, const char *wmesg, int allocflags);
604 void vm_page_free(vm_page_t m);
605 void vm_page_free_zero(vm_page_t m);
606
607 void vm_page_activate (vm_page_t);
608 void vm_page_advise(vm_page_t m, int advice);
609 vm_page_t vm_page_mpred(vm_object_t, vm_pindex_t);
610 vm_page_t vm_page_alloc(vm_object_t, vm_pindex_t, int);
611 vm_page_t vm_page_alloc_domain_after(vm_object_t, vm_pindex_t, int, int,
612 vm_page_t);
613 vm_page_t vm_page_alloc_contig(vm_object_t object, vm_pindex_t pindex, int req,
614 u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment,
615 vm_paddr_t boundary, vm_memattr_t memattr);
616 vm_page_t vm_page_alloc_contig_domain(vm_object_t object,
617 vm_pindex_t pindex, int domain, int req, u_long npages, vm_paddr_t low,
618 vm_paddr_t high, u_long alignment, vm_paddr_t boundary,
619 vm_memattr_t memattr);
620 vm_page_t vm_page_alloc_noobj(int);
621 vm_page_t vm_page_alloc_noobj_domain(int, int);
622 vm_page_t vm_page_alloc_noobj_contig(int req, u_long npages, vm_paddr_t low,
623 vm_paddr_t high, u_long alignment, vm_paddr_t boundary,
624 vm_memattr_t memattr);
625 vm_page_t vm_page_alloc_noobj_contig_domain(int domain, int req, u_long npages,
626 vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary,
627 vm_memattr_t memattr);
628 void vm_page_bits_set(vm_page_t m, vm_page_bits_t *bits, vm_page_bits_t set);
629 bool vm_page_blacklist_add(vm_paddr_t pa, bool verbose);
630 vm_page_t vm_page_grab(vm_object_t, vm_pindex_t, int);
631 vm_page_t vm_page_grab_unlocked(vm_object_t, vm_pindex_t, int);
632 int vm_page_grab_pages(vm_object_t object, vm_pindex_t pindex, int allocflags,
633 vm_page_t *ma, int count);
634 int vm_page_grab_pages_unlocked(vm_object_t object, vm_pindex_t pindex,
635 int allocflags, vm_page_t *ma, int count);
636 int vm_page_grab_valid(vm_page_t *mp, vm_object_t object, vm_pindex_t pindex,
637 int allocflags);
638 int vm_page_grab_valid_unlocked(vm_page_t *mp, vm_object_t object,
639 vm_pindex_t pindex, int allocflags);
640 void vm_page_deactivate(vm_page_t);
641 void vm_page_deactivate_noreuse(vm_page_t);
642 void vm_page_dequeue(vm_page_t m);
643 void vm_page_dequeue_deferred(vm_page_t m);
644 vm_page_t vm_page_find_least(vm_object_t, vm_pindex_t);
645 vm_page_t vm_page_iter_lookup_ge(struct pctrie_iter *, vm_pindex_t);
646 void vm_page_free_invalid(vm_page_t);
647 vm_page_t vm_page_getfake(vm_paddr_t paddr, vm_memattr_t memattr);
648 void vm_page_initfake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr);
649 void vm_page_init_marker(vm_page_t marker, int queue, uint16_t aflags);
650 void vm_page_init_page(vm_page_t m, vm_paddr_t pa, int segind, int pool);
651 int vm_page_insert (vm_page_t, vm_object_t, vm_pindex_t);
652 void vm_page_invalid(vm_page_t m);
653 void vm_page_iter_free(struct pctrie_iter *pages, vm_page_t m);
654 void vm_page_iter_init(struct pctrie_iter *, vm_object_t);
655 void vm_page_iter_limit_init(struct pctrie_iter *, vm_object_t, vm_pindex_t);
656 vm_page_t vm_page_iter_lookup(struct pctrie_iter *, vm_pindex_t);
657 bool vm_page_iter_remove(struct pctrie_iter *pages);
658 bool vm_page_iter_rename(struct pctrie_iter *old_pages, vm_page_t m,
659 vm_object_t new_object, vm_pindex_t new_pindex);
660 void vm_page_launder(vm_page_t m);
661 vm_page_t vm_page_lookup(vm_object_t, vm_pindex_t);
662 vm_page_t vm_page_lookup_unlocked(vm_object_t, vm_pindex_t);
663 vm_page_t vm_page_next(vm_page_t m);
664 void vm_page_pqbatch_drain(void);
665 void vm_page_pqbatch_submit(vm_page_t m, uint8_t queue);
666 bool vm_page_pqstate_commit(vm_page_t m, vm_page_astate_t *old,
667 vm_page_astate_t new);
668 vm_page_t vm_page_prev(vm_page_t m);
669 bool vm_page_ps_test(vm_page_t m, int psind, int flags, vm_page_t skip_m);
670 void vm_page_putfake(vm_page_t m);
671 void vm_page_readahead_finish(vm_page_t m);
672 int vm_page_reclaim_contig(int req, u_long npages, vm_paddr_t low,
673 vm_paddr_t high, u_long alignment, vm_paddr_t boundary);
674 int vm_page_reclaim_contig_domain(int domain, int req, u_long npages,
675 vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary);
676 int vm_page_reclaim_contig_domain_ext(int domain, int req, u_long npages,
677 vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary,
678 int desired_runs);
679 void vm_page_reference(vm_page_t m);
680 #define VPR_TRYFREE 0x01
681 #define VPR_NOREUSE 0x02
682 void vm_page_release(vm_page_t m, int flags);
683 void vm_page_release_locked(vm_page_t m, int flags);
684 vm_page_t vm_page_relookup(vm_object_t, vm_pindex_t);
685 bool vm_page_remove(vm_page_t);
686 bool vm_page_remove_xbusy(vm_page_t);
687 void vm_page_replace(vm_page_t mnew, vm_object_t object,
688 vm_pindex_t pindex, vm_page_t mold);
689 int vm_page_sbusied(vm_page_t m);
690 vm_page_bits_t vm_page_set_dirty(vm_page_t m);
691 void vm_page_set_valid_range(vm_page_t m, int base, int size);
692 vm_offset_t vm_page_startup(vm_offset_t vaddr);
693 void vm_page_sunbusy(vm_page_t m);
694 bool vm_page_try_remove_all(vm_page_t m);
695 bool vm_page_try_remove_write(vm_page_t m);
696 int vm_page_trysbusy(vm_page_t m);
697 int vm_page_tryxbusy(vm_page_t m);
698 void vm_page_unhold_pages(vm_page_t *ma, int count);
699 void vm_page_unswappable(vm_page_t m);
700 void vm_page_unwire(vm_page_t m, uint8_t queue);
701 bool vm_page_unwire_noq(vm_page_t m);
702 void vm_page_updatefake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr);
703 void vm_page_wire(vm_page_t);
704 bool vm_page_wire_mapped(vm_page_t m);
705 void vm_page_xunbusy_hard(vm_page_t m);
706 void vm_page_xunbusy_hard_unchecked(vm_page_t m);
707 void vm_page_set_validclean (vm_page_t, int, int);
708 void vm_page_clear_dirty(vm_page_t, int, int);
709 void vm_page_set_invalid(vm_page_t, int, int);
710 void vm_page_valid(vm_page_t m);
711 int vm_page_is_valid(vm_page_t, int, int);
712 void vm_page_test_dirty(vm_page_t);
713 vm_page_bits_t vm_page_bits(int base, int size);
714 void vm_page_zero_invalid(vm_page_t m, boolean_t setvalid);
715 int vm_page_free_pages_toq(struct spglist *free, bool update_wire_count);
716
717 void vm_page_dirty_KBI(vm_page_t m);
718 void vm_page_lock_KBI(vm_page_t m, const char *file, int line);
719 void vm_page_unlock_KBI(vm_page_t m, const char *file, int line);
720 int vm_page_trylock_KBI(vm_page_t m, const char *file, int line);
721 #if defined(INVARIANTS) || defined(INVARIANT_SUPPORT)
722 void vm_page_assert_locked_KBI(vm_page_t m, const char *file, int line);
723 void vm_page_lock_assert_KBI(vm_page_t m, int a, const char *file, int line);
724 #endif
725
726 #define vm_page_busy_fetch(m) atomic_load_int(&(m)->busy_lock)
727
728 #define vm_page_assert_busied(m) \
729 KASSERT(vm_page_busied(m), \
730 ("vm_page_assert_busied: page %p not busy @ %s:%d", \
731 (m), __FILE__, __LINE__))
732
733 #define vm_page_assert_sbusied(m) \
734 KASSERT(vm_page_sbusied(m), \
735 ("vm_page_assert_sbusied: page %p not shared busy @ %s:%d", \
736 (m), __FILE__, __LINE__))
737
738 #define vm_page_assert_unbusied(m) \
739 KASSERT((vm_page_busy_fetch(m) & ~VPB_BIT_WAITERS) != \
740 VPB_CURTHREAD_EXCLUSIVE, \
741 ("vm_page_assert_unbusied: page %p busy_lock %#x owned" \
742 " by me (%p) @ %s:%d", \
743 (m), (m)->busy_lock, curthread, __FILE__, __LINE__)); \
744
745 #define vm_page_assert_xbusied_unchecked(m) do { \
746 KASSERT(vm_page_xbusied(m), \
747 ("vm_page_assert_xbusied: page %p not exclusive busy @ %s:%d", \
748 (m), __FILE__, __LINE__)); \
749 } while (0)
750 #define vm_page_assert_xbusied(m) do { \
751 vm_page_assert_xbusied_unchecked(m); \
752 KASSERT((vm_page_busy_fetch(m) & ~VPB_BIT_WAITERS) == \
753 VPB_CURTHREAD_EXCLUSIVE, \
754 ("vm_page_assert_xbusied: page %p busy_lock %#x not owned" \
755 " by me (%p) @ %s:%d", \
756 (m), (m)->busy_lock, curthread, __FILE__, __LINE__)); \
757 } while (0)
758
759 #define vm_page_busied(m) \
760 (vm_page_busy_fetch(m) != VPB_UNBUSIED)
761
762 #define vm_page_xbusied(m) \
763 ((vm_page_busy_fetch(m) & VPB_SINGLE_EXCLUSIVE) != 0)
764
765 #define vm_page_busy_freed(m) \
766 (vm_page_busy_fetch(m) == VPB_FREED)
767
768 /* Note: page m's lock must not be owned by the caller. */
769 #define vm_page_xunbusy(m) do { \
770 if (!atomic_cmpset_rel_int(&(m)->busy_lock, \
771 VPB_CURTHREAD_EXCLUSIVE, VPB_UNBUSIED)) \
772 vm_page_xunbusy_hard(m); \
773 } while (0)
774 #define vm_page_xunbusy_unchecked(m) do { \
775 if (!atomic_cmpset_rel_int(&(m)->busy_lock, \
776 VPB_CURTHREAD_EXCLUSIVE, VPB_UNBUSIED)) \
777 vm_page_xunbusy_hard_unchecked(m); \
778 } while (0)
779
780 #ifdef INVARIANTS
781 void vm_page_object_busy_assert(vm_page_t m);
782 #define VM_PAGE_OBJECT_BUSY_ASSERT(m) vm_page_object_busy_assert(m)
783 void vm_page_assert_pga_writeable(vm_page_t m, uint16_t bits);
784 #define VM_PAGE_ASSERT_PGA_WRITEABLE(m, bits) \
785 vm_page_assert_pga_writeable(m, bits)
786 /*
787 * Claim ownership of a page's xbusy state. In non-INVARIANTS kernels this
788 * operation is a no-op since ownership is not tracked. In particular
789 * this macro does not provide any synchronization with the previous owner.
790 */
791 #define vm_page_xbusy_claim(m) do { \
792 u_int _busy_lock; \
793 \
794 vm_page_assert_xbusied_unchecked((m)); \
795 do { \
796 _busy_lock = vm_page_busy_fetch(m); \
797 } while (!atomic_cmpset_int(&(m)->busy_lock, _busy_lock, \
798 (_busy_lock & VPB_BIT_FLAGMASK) | VPB_CURTHREAD_EXCLUSIVE)); \
799 } while (0)
800 #else
801 #define VM_PAGE_OBJECT_BUSY_ASSERT(m) (void)0
802 #define VM_PAGE_ASSERT_PGA_WRITEABLE(m, bits) (void)0
803 #define vm_page_xbusy_claim(m)
804 #endif
805
806 #if BYTE_ORDER == BIG_ENDIAN
807 #define VM_PAGE_AFLAG_SHIFT 16
808 #else
809 #define VM_PAGE_AFLAG_SHIFT 0
810 #endif
811
812 /*
813 * Load a snapshot of a page's 32-bit atomic state.
814 */
815 static inline vm_page_astate_t
vm_page_astate_load(vm_page_t m)816 vm_page_astate_load(vm_page_t m)
817 {
818 vm_page_astate_t a;
819
820 a._bits = atomic_load_32(&m->a._bits);
821 return (a);
822 }
823
824 /*
825 * Atomically compare and set a page's atomic state.
826 */
827 static inline bool
vm_page_astate_fcmpset(vm_page_t m,vm_page_astate_t * old,vm_page_astate_t new)828 vm_page_astate_fcmpset(vm_page_t m, vm_page_astate_t *old, vm_page_astate_t new)
829 {
830
831 KASSERT(new.queue == PQ_INACTIVE || (new.flags & PGA_REQUEUE_HEAD) == 0,
832 ("%s: invalid head requeue request for page %p", __func__, m));
833 KASSERT((new.flags & PGA_ENQUEUED) == 0 || new.queue != PQ_NONE,
834 ("%s: setting PGA_ENQUEUED with PQ_NONE in page %p", __func__, m));
835 KASSERT(new._bits != old->_bits,
836 ("%s: bits are unchanged", __func__));
837
838 return (atomic_fcmpset_32(&m->a._bits, &old->_bits, new._bits) != 0);
839 }
840
841 /*
842 * Clear the given bits in the specified page.
843 */
844 static inline void
vm_page_aflag_clear(vm_page_t m,uint16_t bits)845 vm_page_aflag_clear(vm_page_t m, uint16_t bits)
846 {
847 uint32_t *addr, val;
848
849 /*
850 * Access the whole 32-bit word containing the aflags field with an
851 * atomic update. Parallel non-atomic updates to the other fields
852 * within this word are handled properly by the atomic update.
853 */
854 addr = (void *)&m->a;
855 val = bits << VM_PAGE_AFLAG_SHIFT;
856 atomic_clear_32(addr, val);
857 }
858
859 /*
860 * Set the given bits in the specified page.
861 */
862 static inline void
vm_page_aflag_set(vm_page_t m,uint16_t bits)863 vm_page_aflag_set(vm_page_t m, uint16_t bits)
864 {
865 uint32_t *addr, val;
866
867 VM_PAGE_ASSERT_PGA_WRITEABLE(m, bits);
868
869 /*
870 * Access the whole 32-bit word containing the aflags field with an
871 * atomic update. Parallel non-atomic updates to the other fields
872 * within this word are handled properly by the atomic update.
873 */
874 addr = (void *)&m->a;
875 val = bits << VM_PAGE_AFLAG_SHIFT;
876 atomic_set_32(addr, val);
877 }
878
879 /*
880 * vm_page_dirty:
881 *
882 * Set all bits in the page's dirty field.
883 *
884 * The object containing the specified page must be locked if the
885 * call is made from the machine-independent layer.
886 *
887 * See vm_page_clear_dirty_mask().
888 */
889 static __inline void
vm_page_dirty(vm_page_t m)890 vm_page_dirty(vm_page_t m)
891 {
892
893 /* Use vm_page_dirty_KBI() under INVARIANTS to save memory. */
894 #if (defined(KLD_MODULE) && !defined(KLD_TIED)) || defined(INVARIANTS)
895 vm_page_dirty_KBI(m);
896 #else
897 m->dirty = VM_PAGE_BITS_ALL;
898 #endif
899 }
900
901 /*
902 * vm_page_undirty:
903 *
904 * Set page to not be dirty. Note: does not clear pmap modify bits
905 */
906 static __inline void
vm_page_undirty(vm_page_t m)907 vm_page_undirty(vm_page_t m)
908 {
909
910 VM_PAGE_OBJECT_BUSY_ASSERT(m);
911 m->dirty = 0;
912 }
913
914 static inline uint8_t
_vm_page_queue(vm_page_astate_t as)915 _vm_page_queue(vm_page_astate_t as)
916 {
917
918 if ((as.flags & PGA_DEQUEUE) != 0)
919 return (PQ_NONE);
920 return (as.queue);
921 }
922
923 /*
924 * vm_page_queue:
925 *
926 * Return the index of the queue containing m.
927 */
928 static inline uint8_t
vm_page_queue(vm_page_t m)929 vm_page_queue(vm_page_t m)
930 {
931
932 return (_vm_page_queue(vm_page_astate_load(m)));
933 }
934
935 static inline bool
vm_page_active(vm_page_t m)936 vm_page_active(vm_page_t m)
937 {
938
939 return (vm_page_queue(m) == PQ_ACTIVE);
940 }
941
942 static inline bool
vm_page_inactive(vm_page_t m)943 vm_page_inactive(vm_page_t m)
944 {
945
946 return (vm_page_queue(m) == PQ_INACTIVE);
947 }
948
949 static inline bool
vm_page_in_laundry(vm_page_t m)950 vm_page_in_laundry(vm_page_t m)
951 {
952 uint8_t queue;
953
954 queue = vm_page_queue(m);
955 return (queue == PQ_LAUNDRY || queue == PQ_UNSWAPPABLE);
956 }
957
958 static inline void
vm_page_clearref(vm_page_t m)959 vm_page_clearref(vm_page_t m)
960 {
961 u_int r;
962
963 r = m->ref_count;
964 while (atomic_fcmpset_int(&m->ref_count, &r, r & (VPRC_BLOCKED |
965 VPRC_OBJREF)) == 0)
966 ;
967 }
968
969 /*
970 * vm_page_drop:
971 *
972 * Release a reference to a page and return the old reference count.
973 */
974 static inline u_int
vm_page_drop(vm_page_t m,u_int val)975 vm_page_drop(vm_page_t m, u_int val)
976 {
977 u_int old;
978
979 /*
980 * Synchronize with vm_page_free_prep(): ensure that all updates to the
981 * page structure are visible before it is freed.
982 */
983 atomic_thread_fence_rel();
984 old = atomic_fetchadd_int(&m->ref_count, -val);
985 KASSERT(old != VPRC_BLOCKED,
986 ("vm_page_drop: page %p has an invalid refcount value", m));
987 return (old);
988 }
989
990 /*
991 * vm_page_wired:
992 *
993 * Perform a racy check to determine whether a reference prevents the page
994 * from being reclaimable. If the page's object is locked, and the page is
995 * unmapped and exclusively busied by the current thread, no new wirings
996 * may be created.
997 */
998 static inline bool
vm_page_wired(vm_page_t m)999 vm_page_wired(vm_page_t m)
1000 {
1001
1002 return (VPRC_WIRE_COUNT(m->ref_count) > 0);
1003 }
1004
1005 static inline bool
vm_page_all_valid(vm_page_t m)1006 vm_page_all_valid(vm_page_t m)
1007 {
1008
1009 return (m->valid == VM_PAGE_BITS_ALL);
1010 }
1011
1012 static inline bool
vm_page_any_valid(vm_page_t m)1013 vm_page_any_valid(vm_page_t m)
1014 {
1015
1016 return (m->valid != 0);
1017 }
1018
1019 static inline bool
vm_page_none_valid(vm_page_t m)1020 vm_page_none_valid(vm_page_t m)
1021 {
1022
1023 return (m->valid == 0);
1024 }
1025
1026 static inline int
vm_page_domain(vm_page_t m __numa_used)1027 vm_page_domain(vm_page_t m __numa_used)
1028 {
1029 #ifdef NUMA
1030 int domn, segind;
1031
1032 segind = m->segind;
1033 KASSERT(segind < vm_phys_nsegs, ("segind %d m %p", segind, m));
1034 domn = vm_phys_segs[segind].domain;
1035 KASSERT(domn >= 0 && domn < vm_ndomains, ("domain %d m %p", domn, m));
1036 return (domn);
1037 #else
1038 return (0);
1039 #endif
1040 }
1041
1042 #endif /* _KERNEL */
1043 #endif /* !_VM_PAGE_ */
1044