1 /*- 2 * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU) 3 * 4 * Copyright (c) 1991, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * The Mach Operating System project at Carnegie-Mellon University. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * from: @(#)vm_page.h 8.2 (Berkeley) 12/13/93 35 * 36 * 37 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 38 * All rights reserved. 39 * 40 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 41 * 42 * Permission to use, copy, modify and distribute this software and 43 * its documentation is hereby granted, provided that both the copyright 44 * notice and this permission notice appear in all copies of the 45 * software, derivative works or modified versions, and any portions 46 * thereof, and that both notices appear in supporting documentation. 47 * 48 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 49 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 50 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 51 * 52 * Carnegie Mellon requests users of this software to return to 53 * 54 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 55 * School of Computer Science 56 * Carnegie Mellon University 57 * Pittsburgh PA 15213-3890 58 * 59 * any improvements or extensions that they make and grant Carnegie the 60 * rights to redistribute these changes. 61 * 62 * $FreeBSD$ 63 */ 64 65 /* 66 * Resident memory system definitions. 67 */ 68 69 #ifndef _VM_PAGE_ 70 #define _VM_PAGE_ 71 72 #include <vm/pmap.h> 73 74 /* 75 * Management of resident (logical) pages. 76 * 77 * A small structure is kept for each resident 78 * page, indexed by page number. Each structure 79 * is an element of several collections: 80 * 81 * A radix tree used to quickly 82 * perform object/offset lookups 83 * 84 * A list of all pages for a given object, 85 * so they can be quickly deactivated at 86 * time of deallocation. 87 * 88 * An ordered list of pages due for pageout. 89 * 90 * In addition, the structure contains the object 91 * and offset to which this page belongs (for pageout), 92 * and sundry status bits. 93 * 94 * In general, operations on this structure's mutable fields are 95 * synchronized using either one of or a combination of locks. If a 96 * field is annotated with two of these locks then holding either is 97 * sufficient for read access but both are required for write access. 98 * The physical address of a page is used to select its page lock from 99 * a pool. The queue lock for a page depends on the value of its queue 100 * field and is described in detail below. 101 * 102 * The following annotations are possible: 103 * (A) the field is atomic and may require additional synchronization. 104 * (B) the page busy lock. 105 * (C) the field is immutable. 106 * (F) the per-domain lock for the free queues 107 * (M) Machine dependent, defined by pmap layer. 108 * (O) the object that the page belongs to. 109 * (P) the page lock. 110 * (Q) the page's queue lock. 111 * 112 * The busy lock is an embedded reader-writer lock that protects the 113 * page's contents and identity (i.e., its <object, pindex> tuple) as 114 * well as certain valid/dirty modifications. To avoid bloating the 115 * the page structure, the busy lock lacks some of the features available 116 * the kernel's general-purpose synchronization primitives. As a result, 117 * busy lock ordering rules are not verified, lock recursion is not 118 * detected, and an attempt to xbusy a busy page or sbusy an xbusy page 119 * results will trigger a panic rather than causing the thread to block. 120 * vm_page_sleep_if_busy() can be used to sleep until the page's busy 121 * state changes, after which the caller must re-lookup the page and 122 * re-evaluate its state. vm_page_busy_acquire() will block until 123 * the lock is acquired. 124 * 125 * The valid field is protected by the page busy lock (B) and object 126 * lock (O). Transitions from invalid to valid are generally done 127 * via I/O or zero filling and do not require the object lock. 128 * These must be protected with the busy lock to prevent page-in or 129 * creation races. Page invalidation generally happens as a result 130 * of truncate or msync. When invalidated, pages must not be present 131 * in pmap and must hold the object lock to prevent concurrent 132 * speculative read-only mappings that do not require busy. I/O 133 * routines may check for validity without a lock if they are prepared 134 * to handle invalidation races with higher level locks (vnode) or are 135 * unconcerned with races so long as they hold a reference to prevent 136 * recycling. When a valid bit is set while holding a shared busy 137 * lock (A) atomic operations are used to protect against concurrent 138 * modification. 139 * 140 * In contrast, the synchronization of accesses to the page's 141 * dirty field is a mix of machine dependent (M) and busy (B). In 142 * the machine-independent layer, the page busy must be held to 143 * operate on the field. However, the pmap layer is permitted to 144 * set all bits within the field without holding that lock. If the 145 * underlying architecture does not support atomic read-modify-write 146 * operations on the field's type, then the machine-independent 147 * layer uses a 32-bit atomic on the aligned 32-bit word that 148 * contains the dirty field. In the machine-independent layer, 149 * the implementation of read-modify-write operations on the 150 * field is encapsulated in vm_page_clear_dirty_mask(). An 151 * exclusive busy lock combined with pmap_remove_{write/all}() is the 152 * only way to ensure a page can not become dirty. I/O generally 153 * removes the page from pmap to ensure exclusive access and atomic 154 * writes. 155 * 156 * The ref_count field tracks references to the page. References that 157 * prevent the page from being reclaimable are called wirings and are 158 * counted in the low bits of ref_count. The containing object's 159 * reference, if one exists, is counted using the VPRC_OBJREF bit in the 160 * ref_count field. Additionally, the VPRC_BLOCKED bit is used to 161 * atomically check for wirings and prevent new wirings via 162 * pmap_extract_and_hold(). When a page belongs to an object, it may be 163 * wired only when the object is locked, or the page is busy, or by 164 * pmap_extract_and_hold(). As a result, if the object is locked and the 165 * page is not busy (or is exclusively busied by the current thread), and 166 * the page is unmapped, its wire count will not increase. The ref_count 167 * field is updated using atomic operations in most cases, except when it 168 * is known that no other references to the page exist, such as in the page 169 * allocator. A page may be present in the page queues, or even actively 170 * scanned by the page daemon, without an explicitly counted referenced. 171 * The page daemon must therefore handle the possibility of a concurrent 172 * free of the page. 173 * 174 * The queue field is the index of the page queue containing the page, 175 * or PQ_NONE if the page is not enqueued. The queue lock of a page is 176 * the page queue lock corresponding to the page queue index, or the 177 * page lock (P) for the page if it is not enqueued. To modify the 178 * queue field, the queue lock for the old value of the field must be 179 * held. There is one exception to this rule: the page daemon may 180 * transition the queue field from PQ_INACTIVE to PQ_NONE immediately 181 * prior to freeing a page during an inactive queue scan. At that 182 * point the page has already been physically dequeued and no other 183 * references to that vm_page structure exist. 184 * 185 * To avoid contention on page queue locks, page queue operations 186 * (enqueue, dequeue, requeue) are batched using per-CPU queues. A 187 * deferred operation is requested by inserting an entry into a batch 188 * queue; the entry is simply a pointer to the page, and the request 189 * type is encoded in the page's aflags field using the values in 190 * PGA_QUEUE_STATE_MASK. The type-stability of struct vm_pages is 191 * crucial to this scheme since the processing of entries in a given 192 * batch queue may be deferred indefinitely. In particular, a page may 193 * be freed before its pending batch queue entries have been processed. 194 * The page lock (P) must be held to schedule a batched queue 195 * operation, and the page queue lock must be held in order to process 196 * batch queue entries for the page queue. There is one exception to 197 * this rule: the thread freeing a page may schedule a dequeue without 198 * holding the page lock. In this scenario the only other thread which 199 * may hold a reference to the page is the page daemon, which is 200 * careful to avoid modifying the page's queue state once the dequeue 201 * has been requested by setting PGA_DEQUEUE. 202 */ 203 204 #if PAGE_SIZE == 4096 205 #define VM_PAGE_BITS_ALL 0xffu 206 typedef uint8_t vm_page_bits_t; 207 #elif PAGE_SIZE == 8192 208 #define VM_PAGE_BITS_ALL 0xffffu 209 typedef uint16_t vm_page_bits_t; 210 #elif PAGE_SIZE == 16384 211 #define VM_PAGE_BITS_ALL 0xffffffffu 212 typedef uint32_t vm_page_bits_t; 213 #elif PAGE_SIZE == 32768 214 #define VM_PAGE_BITS_ALL 0xfffffffffffffffflu 215 typedef uint64_t vm_page_bits_t; 216 #endif 217 218 struct vm_page { 219 union { 220 TAILQ_ENTRY(vm_page) q; /* page queue or free list (Q) */ 221 struct { 222 SLIST_ENTRY(vm_page) ss; /* private slists */ 223 } s; 224 struct { 225 u_long p; 226 u_long v; 227 } memguard; 228 struct { 229 void *slab; 230 void *zone; 231 } uma; 232 } plinks; 233 TAILQ_ENTRY(vm_page) listq; /* pages in same object (O) */ 234 vm_object_t object; /* which object am I in (O) */ 235 vm_pindex_t pindex; /* offset into object (O,P) */ 236 vm_paddr_t phys_addr; /* physical address of page (C) */ 237 struct md_page md; /* machine dependent stuff */ 238 u_int ref_count; /* page references (A) */ 239 volatile u_int busy_lock; /* busy owners lock */ 240 uint16_t aflags; /* atomic flags (A) */ 241 uint8_t queue; /* page queue index (Q) */ 242 uint8_t act_count; /* page usage count (P) */ 243 uint8_t order; /* index of the buddy queue (F) */ 244 uint8_t pool; /* vm_phys freepool index (F) */ 245 uint8_t flags; /* page PG_* flags (P) */ 246 uint8_t oflags; /* page VPO_* flags (O) */ 247 int8_t psind; /* pagesizes[] index (O) */ 248 int8_t segind; /* vm_phys segment index (C) */ 249 /* NOTE that these must support one bit per DEV_BSIZE in a page */ 250 /* so, on normal X86 kernels, they must be at least 8 bits wide */ 251 vm_page_bits_t valid; /* valid DEV_BSIZE chunk map (O,B) */ 252 vm_page_bits_t dirty; /* dirty DEV_BSIZE chunk map (M,B) */ 253 }; 254 255 /* 256 * Special bits used in the ref_count field. 257 * 258 * ref_count is normally used to count wirings that prevent the page from being 259 * reclaimed, but also supports several special types of references that do not 260 * prevent reclamation. Accesses to the ref_count field must be atomic unless 261 * the page is unallocated. 262 * 263 * VPRC_OBJREF is the reference held by the containing object. It can set or 264 * cleared only when the corresponding object's write lock is held. 265 * 266 * VPRC_BLOCKED is used to atomically block wirings via pmap lookups while 267 * attempting to tear down all mappings of a given page. The page lock and 268 * object write lock must both be held in order to set or clear this bit. 269 */ 270 #define VPRC_BLOCKED 0x40000000u /* mappings are being removed */ 271 #define VPRC_OBJREF 0x80000000u /* object reference, cleared with (O) */ 272 #define VPRC_WIRE_COUNT(c) ((c) & ~(VPRC_BLOCKED | VPRC_OBJREF)) 273 #define VPRC_WIRE_COUNT_MAX (~(VPRC_BLOCKED | VPRC_OBJREF)) 274 275 /* 276 * Page flags stored in oflags: 277 * 278 * Access to these page flags is synchronized by the lock on the object 279 * containing the page (O). 280 * 281 * Note: VPO_UNMANAGED (used by OBJT_DEVICE, OBJT_PHYS and OBJT_SG) 282 * indicates that the page is not under PV management but 283 * otherwise should be treated as a normal page. Pages not 284 * under PV management cannot be paged out via the 285 * object/vm_page_t because there is no knowledge of their pte 286 * mappings, and such pages are also not on any PQ queue. 287 * 288 */ 289 #define VPO_KMEM_EXEC 0x01 /* kmem mapping allows execution */ 290 #define VPO_SWAPSLEEP 0x02 /* waiting for swap to finish */ 291 #define VPO_UNMANAGED 0x04 /* no PV management for page */ 292 #define VPO_SWAPINPROG 0x08 /* swap I/O in progress on page */ 293 294 /* 295 * Busy page implementation details. 296 * The algorithm is taken mostly by rwlock(9) and sx(9) locks implementation, 297 * even if the support for owner identity is removed because of size 298 * constraints. Checks on lock recursion are then not possible, while the 299 * lock assertions effectiveness is someway reduced. 300 */ 301 #define VPB_BIT_SHARED 0x01 302 #define VPB_BIT_EXCLUSIVE 0x02 303 #define VPB_BIT_WAITERS 0x04 304 #define VPB_BIT_FLAGMASK \ 305 (VPB_BIT_SHARED | VPB_BIT_EXCLUSIVE | VPB_BIT_WAITERS) 306 307 #define VPB_SHARERS_SHIFT 3 308 #define VPB_SHARERS(x) \ 309 (((x) & ~VPB_BIT_FLAGMASK) >> VPB_SHARERS_SHIFT) 310 #define VPB_SHARERS_WORD(x) ((x) << VPB_SHARERS_SHIFT | VPB_BIT_SHARED) 311 #define VPB_ONE_SHARER (1 << VPB_SHARERS_SHIFT) 312 313 #define VPB_SINGLE_EXCLUSIVE VPB_BIT_EXCLUSIVE 314 #ifdef INVARIANTS 315 #define VPB_CURTHREAD_EXCLUSIVE \ 316 (VPB_BIT_EXCLUSIVE | ((u_int)(uintptr_t)curthread & ~VPB_BIT_FLAGMASK)) 317 #else 318 #define VPB_CURTHREAD_EXCLUSIVE VPB_SINGLE_EXCLUSIVE 319 #endif 320 321 #define VPB_UNBUSIED VPB_SHARERS_WORD(0) 322 323 #define PQ_NONE 255 324 #define PQ_INACTIVE 0 325 #define PQ_ACTIVE 1 326 #define PQ_LAUNDRY 2 327 #define PQ_UNSWAPPABLE 3 328 #define PQ_COUNT 4 329 330 #ifndef VM_PAGE_HAVE_PGLIST 331 TAILQ_HEAD(pglist, vm_page); 332 #define VM_PAGE_HAVE_PGLIST 333 #endif 334 SLIST_HEAD(spglist, vm_page); 335 336 #ifdef _KERNEL 337 extern vm_page_t bogus_page; 338 #endif /* _KERNEL */ 339 340 extern struct mtx_padalign pa_lock[]; 341 342 #if defined(__arm__) 343 #define PDRSHIFT PDR_SHIFT 344 #elif !defined(PDRSHIFT) 345 #define PDRSHIFT 21 346 #endif 347 348 #define pa_index(pa) ((pa) >> PDRSHIFT) 349 #define PA_LOCKPTR(pa) ((struct mtx *)(&pa_lock[pa_index(pa) % PA_LOCK_COUNT])) 350 #define PA_LOCKOBJPTR(pa) ((struct lock_object *)PA_LOCKPTR((pa))) 351 #define PA_LOCK(pa) mtx_lock(PA_LOCKPTR(pa)) 352 #define PA_TRYLOCK(pa) mtx_trylock(PA_LOCKPTR(pa)) 353 #define PA_UNLOCK(pa) mtx_unlock(PA_LOCKPTR(pa)) 354 #define PA_UNLOCK_COND(pa) \ 355 do { \ 356 if ((pa) != 0) { \ 357 PA_UNLOCK((pa)); \ 358 (pa) = 0; \ 359 } \ 360 } while (0) 361 362 #define PA_LOCK_ASSERT(pa, a) mtx_assert(PA_LOCKPTR(pa), (a)) 363 364 #if defined(KLD_MODULE) && !defined(KLD_TIED) 365 #define vm_page_lock(m) vm_page_lock_KBI((m), LOCK_FILE, LOCK_LINE) 366 #define vm_page_unlock(m) vm_page_unlock_KBI((m), LOCK_FILE, LOCK_LINE) 367 #define vm_page_trylock(m) vm_page_trylock_KBI((m), LOCK_FILE, LOCK_LINE) 368 #else /* !KLD_MODULE */ 369 #define vm_page_lockptr(m) (PA_LOCKPTR(VM_PAGE_TO_PHYS((m)))) 370 #define vm_page_lock(m) mtx_lock(vm_page_lockptr((m))) 371 #define vm_page_unlock(m) mtx_unlock(vm_page_lockptr((m))) 372 #define vm_page_trylock(m) mtx_trylock(vm_page_lockptr((m))) 373 #endif 374 #if defined(INVARIANTS) 375 #define vm_page_assert_locked(m) \ 376 vm_page_assert_locked_KBI((m), __FILE__, __LINE__) 377 #define vm_page_lock_assert(m, a) \ 378 vm_page_lock_assert_KBI((m), (a), __FILE__, __LINE__) 379 #else 380 #define vm_page_assert_locked(m) 381 #define vm_page_lock_assert(m, a) 382 #endif 383 384 /* 385 * The vm_page's aflags are updated using atomic operations. To set or clear 386 * these flags, the functions vm_page_aflag_set() and vm_page_aflag_clear() 387 * must be used. Neither these flags nor these functions are part of the KBI. 388 * 389 * PGA_REFERENCED may be cleared only if the page is locked. It is set by 390 * both the MI and MD VM layers. However, kernel loadable modules should not 391 * directly set this flag. They should call vm_page_reference() instead. 392 * 393 * PGA_WRITEABLE is set exclusively on managed pages by pmap_enter(). 394 * When it does so, the object must be locked, or the page must be 395 * exclusive busied. The MI VM layer must never access this flag 396 * directly. Instead, it should call pmap_page_is_write_mapped(). 397 * 398 * PGA_EXECUTABLE may be set by pmap routines, and indicates that a page has 399 * at least one executable mapping. It is not consumed by the MI VM layer. 400 * 401 * PGA_NOSYNC must be set and cleared with the page busy lock held. 402 * 403 * PGA_ENQUEUED is set and cleared when a page is inserted into or removed 404 * from a page queue, respectively. It determines whether the plinks.q field 405 * of the page is valid. To set or clear this flag, the queue lock for the 406 * page must be held: the page queue lock corresponding to the page's "queue" 407 * field if its value is not PQ_NONE, and the page lock otherwise. 408 * 409 * PGA_DEQUEUE is set when the page is scheduled to be dequeued from a page 410 * queue, and cleared when the dequeue request is processed. A page may 411 * have PGA_DEQUEUE set and PGA_ENQUEUED cleared, for instance if a dequeue 412 * is requested after the page is scheduled to be enqueued but before it is 413 * actually inserted into the page queue. For allocated pages, the page lock 414 * must be held to set this flag, but it may be set by vm_page_free_prep() 415 * without the page lock held. The page queue lock must be held to clear the 416 * PGA_DEQUEUE flag. 417 * 418 * PGA_REQUEUE is set when the page is scheduled to be enqueued or requeued 419 * in its page queue. The page lock must be held to set this flag, and the 420 * queue lock for the page must be held to clear it. 421 * 422 * PGA_REQUEUE_HEAD is a special flag for enqueuing pages near the head of 423 * the inactive queue, thus bypassing LRU. The page lock must be held to 424 * set this flag, and the queue lock for the page must be held to clear it. 425 */ 426 #define PGA_WRITEABLE 0x0001 /* page may be mapped writeable */ 427 #define PGA_REFERENCED 0x0002 /* page has been referenced */ 428 #define PGA_EXECUTABLE 0x0004 /* page may be mapped executable */ 429 #define PGA_ENQUEUED 0x0008 /* page is enqueued in a page queue */ 430 #define PGA_DEQUEUE 0x0010 /* page is due to be dequeued */ 431 #define PGA_REQUEUE 0x0020 /* page is due to be requeued */ 432 #define PGA_REQUEUE_HEAD 0x0040 /* page requeue should bypass LRU */ 433 #define PGA_NOSYNC 0x0080 /* do not collect for syncer */ 434 435 #define PGA_QUEUE_STATE_MASK (PGA_ENQUEUED | PGA_DEQUEUE | PGA_REQUEUE | \ 436 PGA_REQUEUE_HEAD) 437 438 /* 439 * Page flags. If changed at any other time than page allocation or 440 * freeing, the modification must be protected by the vm_page lock. 441 * 442 * The PG_PCPU_CACHE flag is set at allocation time if the page was 443 * allocated from a per-CPU cache. It is cleared the next time that the 444 * page is allocated from the physical memory allocator. 445 */ 446 #define PG_PCPU_CACHE 0x01 /* was allocated from per-CPU caches */ 447 #define PG_FICTITIOUS 0x02 /* physical page doesn't exist */ 448 #define PG_ZERO 0x04 /* page is zeroed */ 449 #define PG_MARKER 0x08 /* special queue marker page */ 450 #define PG_NODUMP 0x10 /* don't include this page in a dump */ 451 452 /* 453 * Misc constants. 454 */ 455 #define ACT_DECLINE 1 456 #define ACT_ADVANCE 3 457 #define ACT_INIT 5 458 #define ACT_MAX 64 459 460 #ifdef _KERNEL 461 462 #include <sys/systm.h> 463 464 #include <machine/atomic.h> 465 466 /* 467 * Each pageable resident page falls into one of five lists: 468 * 469 * free 470 * Available for allocation now. 471 * 472 * inactive 473 * Low activity, candidates for reclamation. 474 * This list is approximately LRU ordered. 475 * 476 * laundry 477 * This is the list of pages that should be 478 * paged out next. 479 * 480 * unswappable 481 * Dirty anonymous pages that cannot be paged 482 * out because no swap device is configured. 483 * 484 * active 485 * Pages that are "active", i.e., they have been 486 * recently referenced. 487 * 488 */ 489 490 extern vm_page_t vm_page_array; /* First resident page in table */ 491 extern long vm_page_array_size; /* number of vm_page_t's */ 492 extern long first_page; /* first physical page number */ 493 494 #define VM_PAGE_TO_PHYS(entry) ((entry)->phys_addr) 495 496 /* 497 * PHYS_TO_VM_PAGE() returns the vm_page_t object that represents a memory 498 * page to which the given physical address belongs. The correct vm_page_t 499 * object is returned for addresses that are not page-aligned. 500 */ 501 vm_page_t PHYS_TO_VM_PAGE(vm_paddr_t pa); 502 503 /* 504 * Page allocation parameters for vm_page for the functions 505 * vm_page_alloc(), vm_page_grab(), vm_page_alloc_contig() and 506 * vm_page_alloc_freelist(). Some functions support only a subset 507 * of the flags, and ignore others, see the flags legend. 508 * 509 * The meaning of VM_ALLOC_ZERO differs slightly between the vm_page_alloc*() 510 * and the vm_page_grab*() functions. See these functions for details. 511 * 512 * Bits 0 - 1 define class. 513 * Bits 2 - 15 dedicated for flags. 514 * Legend: 515 * (a) - vm_page_alloc() supports the flag. 516 * (c) - vm_page_alloc_contig() supports the flag. 517 * (f) - vm_page_alloc_freelist() supports the flag. 518 * (g) - vm_page_grab() supports the flag. 519 * (p) - vm_page_grab_pages() supports the flag. 520 * Bits above 15 define the count of additional pages that the caller 521 * intends to allocate. 522 */ 523 #define VM_ALLOC_NORMAL 0 524 #define VM_ALLOC_INTERRUPT 1 525 #define VM_ALLOC_SYSTEM 2 526 #define VM_ALLOC_CLASS_MASK 3 527 #define VM_ALLOC_WAITOK 0x0008 /* (acf) Sleep and retry */ 528 #define VM_ALLOC_WAITFAIL 0x0010 /* (acf) Sleep and return error */ 529 #define VM_ALLOC_WIRED 0x0020 /* (acfgp) Allocate a wired page */ 530 #define VM_ALLOC_ZERO 0x0040 /* (acfgp) Allocate a prezeroed page */ 531 #define VM_ALLOC_NOOBJ 0x0100 /* (acg) No associated object */ 532 #define VM_ALLOC_NOBUSY 0x0200 /* (acgp) Do not excl busy the page */ 533 #define VM_ALLOC_NOCREAT 0x0400 /* (gp) Don't create a page */ 534 #define VM_ALLOC_IGN_SBUSY 0x1000 /* (gp) Ignore shared busy flag */ 535 #define VM_ALLOC_NODUMP 0x2000 /* (ag) don't include in dump */ 536 #define VM_ALLOC_SBUSY 0x4000 /* (acgp) Shared busy the page */ 537 #define VM_ALLOC_NOWAIT 0x8000 /* (acfgp) Do not sleep */ 538 #define VM_ALLOC_COUNT_SHIFT 16 539 #define VM_ALLOC_COUNT(count) ((count) << VM_ALLOC_COUNT_SHIFT) 540 541 #ifdef M_NOWAIT 542 static inline int 543 malloc2vm_flags(int malloc_flags) 544 { 545 int pflags; 546 547 KASSERT((malloc_flags & M_USE_RESERVE) == 0 || 548 (malloc_flags & M_NOWAIT) != 0, 549 ("M_USE_RESERVE requires M_NOWAIT")); 550 pflags = (malloc_flags & M_USE_RESERVE) != 0 ? VM_ALLOC_INTERRUPT : 551 VM_ALLOC_SYSTEM; 552 if ((malloc_flags & M_ZERO) != 0) 553 pflags |= VM_ALLOC_ZERO; 554 if ((malloc_flags & M_NODUMP) != 0) 555 pflags |= VM_ALLOC_NODUMP; 556 if ((malloc_flags & M_NOWAIT)) 557 pflags |= VM_ALLOC_NOWAIT; 558 if ((malloc_flags & M_WAITOK)) 559 pflags |= VM_ALLOC_WAITOK; 560 return (pflags); 561 } 562 #endif 563 564 /* 565 * Predicates supported by vm_page_ps_test(): 566 * 567 * PS_ALL_DIRTY is true only if the entire (super)page is dirty. 568 * However, it can be spuriously false when the (super)page has become 569 * dirty in the pmap but that information has not been propagated to the 570 * machine-independent layer. 571 */ 572 #define PS_ALL_DIRTY 0x1 573 #define PS_ALL_VALID 0x2 574 #define PS_NONE_BUSY 0x4 575 576 int vm_page_busy_acquire(vm_page_t m, int allocflags); 577 void vm_page_busy_downgrade(vm_page_t m); 578 int vm_page_busy_tryupgrade(vm_page_t m); 579 void vm_page_busy_sleep(vm_page_t m, const char *msg, bool nonshared); 580 void vm_page_free(vm_page_t m); 581 void vm_page_free_zero(vm_page_t m); 582 583 void vm_page_activate (vm_page_t); 584 void vm_page_advise(vm_page_t m, int advice); 585 vm_page_t vm_page_alloc(vm_object_t, vm_pindex_t, int); 586 vm_page_t vm_page_alloc_domain(vm_object_t, vm_pindex_t, int, int); 587 vm_page_t vm_page_alloc_after(vm_object_t, vm_pindex_t, int, vm_page_t); 588 vm_page_t vm_page_alloc_domain_after(vm_object_t, vm_pindex_t, int, int, 589 vm_page_t); 590 vm_page_t vm_page_alloc_contig(vm_object_t object, vm_pindex_t pindex, int req, 591 u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment, 592 vm_paddr_t boundary, vm_memattr_t memattr); 593 vm_page_t vm_page_alloc_contig_domain(vm_object_t object, 594 vm_pindex_t pindex, int domain, int req, u_long npages, vm_paddr_t low, 595 vm_paddr_t high, u_long alignment, vm_paddr_t boundary, 596 vm_memattr_t memattr); 597 vm_page_t vm_page_alloc_freelist(int, int); 598 vm_page_t vm_page_alloc_freelist_domain(int, int, int); 599 void vm_page_bits_set(vm_page_t m, vm_page_bits_t *bits, vm_page_bits_t set); 600 bool vm_page_blacklist_add(vm_paddr_t pa, bool verbose); 601 void vm_page_change_lock(vm_page_t m, struct mtx **mtx); 602 vm_page_t vm_page_grab (vm_object_t, vm_pindex_t, int); 603 int vm_page_grab_pages(vm_object_t object, vm_pindex_t pindex, int allocflags, 604 vm_page_t *ma, int count); 605 int vm_page_grab_valid(vm_page_t *mp, vm_object_t object, vm_pindex_t pindex, 606 int allocflags); 607 void vm_page_deactivate(vm_page_t); 608 void vm_page_deactivate_noreuse(vm_page_t); 609 void vm_page_dequeue(vm_page_t m); 610 void vm_page_dequeue_deferred(vm_page_t m); 611 vm_page_t vm_page_find_least(vm_object_t, vm_pindex_t); 612 bool vm_page_free_prep(vm_page_t m); 613 vm_page_t vm_page_getfake(vm_paddr_t paddr, vm_memattr_t memattr); 614 void vm_page_initfake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr); 615 int vm_page_insert (vm_page_t, vm_object_t, vm_pindex_t); 616 void vm_page_invalid(vm_page_t m); 617 void vm_page_launder(vm_page_t m); 618 vm_page_t vm_page_lookup (vm_object_t, vm_pindex_t); 619 vm_page_t vm_page_next(vm_page_t m); 620 void vm_page_pqbatch_drain(void); 621 void vm_page_pqbatch_submit(vm_page_t m, uint8_t queue); 622 vm_page_t vm_page_prev(vm_page_t m); 623 bool vm_page_ps_test(vm_page_t m, int flags, vm_page_t skip_m); 624 void vm_page_putfake(vm_page_t m); 625 void vm_page_readahead_finish(vm_page_t m); 626 bool vm_page_reclaim_contig(int req, u_long npages, vm_paddr_t low, 627 vm_paddr_t high, u_long alignment, vm_paddr_t boundary); 628 bool vm_page_reclaim_contig_domain(int domain, int req, u_long npages, 629 vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary); 630 void vm_page_reference(vm_page_t m); 631 #define VPR_TRYFREE 0x01 632 #define VPR_NOREUSE 0x02 633 void vm_page_release(vm_page_t m, int flags); 634 void vm_page_release_locked(vm_page_t m, int flags); 635 bool vm_page_remove(vm_page_t); 636 int vm_page_rename(vm_page_t, vm_object_t, vm_pindex_t); 637 vm_page_t vm_page_replace(vm_page_t mnew, vm_object_t object, 638 vm_pindex_t pindex); 639 void vm_page_requeue(vm_page_t m); 640 int vm_page_sbusied(vm_page_t m); 641 vm_page_t vm_page_scan_contig(u_long npages, vm_page_t m_start, 642 vm_page_t m_end, u_long alignment, vm_paddr_t boundary, int options); 643 void vm_page_set_valid_range(vm_page_t m, int base, int size); 644 int vm_page_sleep_if_busy(vm_page_t m, const char *msg); 645 int vm_page_sleep_if_xbusy(vm_page_t m, const char *msg); 646 vm_offset_t vm_page_startup(vm_offset_t vaddr); 647 void vm_page_sunbusy(vm_page_t m); 648 void vm_page_swapqueue(vm_page_t m, uint8_t oldq, uint8_t newq); 649 bool vm_page_try_remove_all(vm_page_t m); 650 bool vm_page_try_remove_write(vm_page_t m); 651 int vm_page_trysbusy(vm_page_t m); 652 int vm_page_tryxbusy(vm_page_t m); 653 void vm_page_unhold_pages(vm_page_t *ma, int count); 654 void vm_page_unswappable(vm_page_t m); 655 void vm_page_unwire(vm_page_t m, uint8_t queue); 656 bool vm_page_unwire_noq(vm_page_t m); 657 void vm_page_updatefake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr); 658 void vm_page_wire(vm_page_t); 659 bool vm_page_wire_mapped(vm_page_t m); 660 void vm_page_xunbusy_hard(vm_page_t m); 661 void vm_page_xunbusy_hard_unchecked(vm_page_t m); 662 void vm_page_set_validclean (vm_page_t, int, int); 663 void vm_page_clear_dirty(vm_page_t, int, int); 664 void vm_page_set_invalid(vm_page_t, int, int); 665 void vm_page_valid(vm_page_t m); 666 int vm_page_is_valid(vm_page_t, int, int); 667 void vm_page_test_dirty(vm_page_t); 668 vm_page_bits_t vm_page_bits(int base, int size); 669 void vm_page_zero_invalid(vm_page_t m, boolean_t setvalid); 670 void vm_page_free_toq(vm_page_t m); 671 void vm_page_free_pages_toq(struct spglist *free, bool update_wire_count); 672 673 void vm_page_dirty_KBI(vm_page_t m); 674 void vm_page_lock_KBI(vm_page_t m, const char *file, int line); 675 void vm_page_unlock_KBI(vm_page_t m, const char *file, int line); 676 int vm_page_trylock_KBI(vm_page_t m, const char *file, int line); 677 #if defined(INVARIANTS) || defined(INVARIANT_SUPPORT) 678 void vm_page_assert_locked_KBI(vm_page_t m, const char *file, int line); 679 void vm_page_lock_assert_KBI(vm_page_t m, int a, const char *file, int line); 680 #endif 681 682 #define vm_page_assert_busied(m) \ 683 KASSERT(vm_page_busied(m), \ 684 ("vm_page_assert_busied: page %p not busy @ %s:%d", \ 685 (m), __FILE__, __LINE__)) 686 687 #define vm_page_assert_sbusied(m) \ 688 KASSERT(vm_page_sbusied(m), \ 689 ("vm_page_assert_sbusied: page %p not shared busy @ %s:%d", \ 690 (m), __FILE__, __LINE__)) 691 692 #define vm_page_assert_unbusied(m) \ 693 KASSERT(!vm_page_busied(m), \ 694 ("vm_page_assert_unbusied: page %p busy @ %s:%d", \ 695 (m), __FILE__, __LINE__)) 696 697 #define vm_page_assert_xbusied_unchecked(m) do { \ 698 KASSERT(vm_page_xbusied(m), \ 699 ("vm_page_assert_xbusied: page %p not exclusive busy @ %s:%d", \ 700 (m), __FILE__, __LINE__)); \ 701 } while (0) 702 #define vm_page_assert_xbusied(m) do { \ 703 vm_page_assert_xbusied_unchecked(m); \ 704 KASSERT((m->busy_lock & ~VPB_BIT_WAITERS) == \ 705 VPB_CURTHREAD_EXCLUSIVE, \ 706 ("vm_page_assert_xbusied: page %p busy_lock %#x not owned" \ 707 " by me @ %s:%d", \ 708 (m), (m)->busy_lock, __FILE__, __LINE__)); \ 709 } while (0) 710 711 #define vm_page_busied(m) \ 712 ((m)->busy_lock != VPB_UNBUSIED) 713 714 #define vm_page_sbusy(m) do { \ 715 if (!vm_page_trysbusy(m)) \ 716 panic("%s: page %p failed shared busying", __func__, \ 717 (m)); \ 718 } while (0) 719 720 #define vm_page_xbusied(m) \ 721 (((m)->busy_lock & VPB_SINGLE_EXCLUSIVE) != 0) 722 723 #define vm_page_xbusy(m) do { \ 724 if (!vm_page_tryxbusy(m)) \ 725 panic("%s: page %p failed exclusive busying", __func__, \ 726 (m)); \ 727 } while (0) 728 729 /* Note: page m's lock must not be owned by the caller. */ 730 #define vm_page_xunbusy(m) do { \ 731 if (!atomic_cmpset_rel_int(&(m)->busy_lock, \ 732 VPB_CURTHREAD_EXCLUSIVE, VPB_UNBUSIED)) \ 733 vm_page_xunbusy_hard(m); \ 734 } while (0) 735 #define vm_page_xunbusy_unchecked(m) do { \ 736 if (!atomic_cmpset_rel_int(&(m)->busy_lock, \ 737 VPB_CURTHREAD_EXCLUSIVE, VPB_UNBUSIED)) \ 738 vm_page_xunbusy_hard_unchecked(m); \ 739 } while (0) 740 741 #ifdef INVARIANTS 742 void vm_page_object_busy_assert(vm_page_t m); 743 #define VM_PAGE_OBJECT_BUSY_ASSERT(m) vm_page_object_busy_assert(m) 744 void vm_page_assert_pga_writeable(vm_page_t m, uint16_t bits); 745 #define VM_PAGE_ASSERT_PGA_WRITEABLE(m, bits) \ 746 vm_page_assert_pga_writeable(m, bits) 747 #else 748 #define VM_PAGE_OBJECT_BUSY_ASSERT(m) (void)0 749 #define VM_PAGE_ASSERT_PGA_WRITEABLE(m, bits) (void)0 750 #endif 751 752 /* 753 * We want to use atomic updates for the aflags field, which is 8 bits wide. 754 * However, not all architectures support atomic operations on 8-bit 755 * destinations. In order that we can easily use a 32-bit operation, we 756 * require that the aflags field be 32-bit aligned. 757 */ 758 _Static_assert(offsetof(struct vm_page, aflags) % sizeof(uint32_t) == 0, 759 "aflags field is not 32-bit aligned"); 760 761 /* 762 * We want to be able to update the aflags and queue fields atomically in 763 * the same operation. 764 */ 765 _Static_assert(offsetof(struct vm_page, aflags) / sizeof(uint32_t) == 766 offsetof(struct vm_page, queue) / sizeof(uint32_t), 767 "aflags and queue fields do not belong to the same 32-bit word"); 768 _Static_assert(offsetof(struct vm_page, queue) % sizeof(uint32_t) == 2, 769 "queue field is at an unexpected offset"); 770 _Static_assert(sizeof(((struct vm_page *)NULL)->queue) == 1, 771 "queue field has an unexpected size"); 772 773 #if BYTE_ORDER == LITTLE_ENDIAN 774 #define VM_PAGE_AFLAG_SHIFT 0 775 #define VM_PAGE_QUEUE_SHIFT 16 776 #else 777 #define VM_PAGE_AFLAG_SHIFT 16 778 #define VM_PAGE_QUEUE_SHIFT 8 779 #endif 780 #define VM_PAGE_QUEUE_MASK (0xff << VM_PAGE_QUEUE_SHIFT) 781 782 /* 783 * Clear the given bits in the specified page. 784 */ 785 static inline void 786 vm_page_aflag_clear(vm_page_t m, uint16_t bits) 787 { 788 uint32_t *addr, val; 789 790 /* 791 * The PGA_REFERENCED flag can only be cleared if the page is locked. 792 */ 793 if ((bits & PGA_REFERENCED) != 0) 794 vm_page_assert_locked(m); 795 796 /* 797 * Access the whole 32-bit word containing the aflags field with an 798 * atomic update. Parallel non-atomic updates to the other fields 799 * within this word are handled properly by the atomic update. 800 */ 801 addr = (void *)&m->aflags; 802 val = bits << VM_PAGE_AFLAG_SHIFT; 803 atomic_clear_32(addr, val); 804 } 805 806 /* 807 * Set the given bits in the specified page. 808 */ 809 static inline void 810 vm_page_aflag_set(vm_page_t m, uint16_t bits) 811 { 812 uint32_t *addr, val; 813 814 VM_PAGE_ASSERT_PGA_WRITEABLE(m, bits); 815 816 /* 817 * Access the whole 32-bit word containing the aflags field with an 818 * atomic update. Parallel non-atomic updates to the other fields 819 * within this word are handled properly by the atomic update. 820 */ 821 addr = (void *)&m->aflags; 822 val = bits << VM_PAGE_AFLAG_SHIFT; 823 atomic_set_32(addr, val); 824 } 825 826 /* 827 * Atomically update the queue state of the page. The operation fails if 828 * any of the queue flags in "fflags" are set or if the "queue" field of 829 * the page does not match the expected value; if the operation is 830 * successful, the flags in "nflags" are set and all other queue state 831 * flags are cleared. 832 */ 833 static inline bool 834 vm_page_pqstate_cmpset(vm_page_t m, uint32_t oldq, uint32_t newq, 835 uint32_t fflags, uint32_t nflags) 836 { 837 uint32_t *addr, nval, oval, qsmask; 838 839 fflags <<= VM_PAGE_AFLAG_SHIFT; 840 nflags <<= VM_PAGE_AFLAG_SHIFT; 841 newq <<= VM_PAGE_QUEUE_SHIFT; 842 oldq <<= VM_PAGE_QUEUE_SHIFT; 843 qsmask = ((PGA_DEQUEUE | PGA_REQUEUE | PGA_REQUEUE_HEAD) << 844 VM_PAGE_AFLAG_SHIFT) | VM_PAGE_QUEUE_MASK; 845 846 addr = (void *)&m->aflags; 847 oval = atomic_load_32(addr); 848 do { 849 if ((oval & fflags) != 0) 850 return (false); 851 if ((oval & VM_PAGE_QUEUE_MASK) != oldq) 852 return (false); 853 nval = (oval & ~qsmask) | nflags | newq; 854 } while (!atomic_fcmpset_32(addr, &oval, nval)); 855 856 return (true); 857 } 858 859 /* 860 * vm_page_dirty: 861 * 862 * Set all bits in the page's dirty field. 863 * 864 * The object containing the specified page must be locked if the 865 * call is made from the machine-independent layer. 866 * 867 * See vm_page_clear_dirty_mask(). 868 */ 869 static __inline void 870 vm_page_dirty(vm_page_t m) 871 { 872 873 /* Use vm_page_dirty_KBI() under INVARIANTS to save memory. */ 874 #if (defined(KLD_MODULE) && !defined(KLD_TIED)) || defined(INVARIANTS) 875 vm_page_dirty_KBI(m); 876 #else 877 m->dirty = VM_PAGE_BITS_ALL; 878 #endif 879 } 880 881 /* 882 * vm_page_undirty: 883 * 884 * Set page to not be dirty. Note: does not clear pmap modify bits 885 */ 886 static __inline void 887 vm_page_undirty(vm_page_t m) 888 { 889 890 VM_PAGE_OBJECT_BUSY_ASSERT(m); 891 m->dirty = 0; 892 } 893 894 static inline void 895 vm_page_replace_checked(vm_page_t mnew, vm_object_t object, vm_pindex_t pindex, 896 vm_page_t mold) 897 { 898 vm_page_t mret; 899 900 mret = vm_page_replace(mnew, object, pindex); 901 KASSERT(mret == mold, 902 ("invalid page replacement, mold=%p, mret=%p", mold, mret)); 903 904 /* Unused if !INVARIANTS. */ 905 (void)mold; 906 (void)mret; 907 } 908 909 /* 910 * vm_page_queue: 911 * 912 * Return the index of the queue containing m. This index is guaranteed 913 * not to change while the page lock is held. 914 */ 915 static inline uint8_t 916 vm_page_queue(vm_page_t m) 917 { 918 919 vm_page_assert_locked(m); 920 921 if ((m->aflags & PGA_DEQUEUE) != 0) 922 return (PQ_NONE); 923 atomic_thread_fence_acq(); 924 return (m->queue); 925 } 926 927 static inline bool 928 vm_page_active(vm_page_t m) 929 { 930 931 return (vm_page_queue(m) == PQ_ACTIVE); 932 } 933 934 static inline bool 935 vm_page_inactive(vm_page_t m) 936 { 937 938 return (vm_page_queue(m) == PQ_INACTIVE); 939 } 940 941 static inline bool 942 vm_page_in_laundry(vm_page_t m) 943 { 944 uint8_t queue; 945 946 queue = vm_page_queue(m); 947 return (queue == PQ_LAUNDRY || queue == PQ_UNSWAPPABLE); 948 } 949 950 /* 951 * vm_page_drop: 952 * 953 * Release a reference to a page and return the old reference count. 954 */ 955 static inline u_int 956 vm_page_drop(vm_page_t m, u_int val) 957 { 958 u_int old; 959 960 /* 961 * Synchronize with vm_page_free_prep(): ensure that all updates to the 962 * page structure are visible before it is freed. 963 */ 964 atomic_thread_fence_rel(); 965 old = atomic_fetchadd_int(&m->ref_count, -val); 966 KASSERT(old != VPRC_BLOCKED, 967 ("vm_page_drop: page %p has an invalid refcount value", m)); 968 return (old); 969 } 970 971 /* 972 * vm_page_wired: 973 * 974 * Perform a racy check to determine whether a reference prevents the page 975 * from being reclaimable. If the page's object is locked, and the page is 976 * unmapped and unbusied or exclusively busied by the current thread, no 977 * new wirings may be created. 978 */ 979 static inline bool 980 vm_page_wired(vm_page_t m) 981 { 982 983 return (VPRC_WIRE_COUNT(m->ref_count) > 0); 984 } 985 986 static inline bool 987 vm_page_all_valid(vm_page_t m) 988 { 989 990 return (m->valid == VM_PAGE_BITS_ALL); 991 } 992 993 static inline bool 994 vm_page_none_valid(vm_page_t m) 995 { 996 997 return (m->valid == 0); 998 } 999 1000 #endif /* _KERNEL */ 1001 #endif /* !_VM_PAGE_ */ 1002