1 /*- 2 * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU) 3 * 4 * Copyright (c) 1991, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * The Mach Operating System project at Carnegie-Mellon University. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * from: @(#)vm_page.h 8.2 (Berkeley) 12/13/93 35 * 36 * 37 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 38 * All rights reserved. 39 * 40 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 41 * 42 * Permission to use, copy, modify and distribute this software and 43 * its documentation is hereby granted, provided that both the copyright 44 * notice and this permission notice appear in all copies of the 45 * software, derivative works or modified versions, and any portions 46 * thereof, and that both notices appear in supporting documentation. 47 * 48 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 49 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 50 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 51 * 52 * Carnegie Mellon requests users of this software to return to 53 * 54 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 55 * School of Computer Science 56 * Carnegie Mellon University 57 * Pittsburgh PA 15213-3890 58 * 59 * any improvements or extensions that they make and grant Carnegie the 60 * rights to redistribute these changes. 61 * 62 * $FreeBSD$ 63 */ 64 65 /* 66 * Resident memory system definitions. 67 */ 68 69 #ifndef _VM_PAGE_ 70 #define _VM_PAGE_ 71 72 #include <vm/pmap.h> 73 #include <vm/_vm_phys.h> 74 75 /* 76 * Management of resident (logical) pages. 77 * 78 * A small structure is kept for each resident 79 * page, indexed by page number. Each structure 80 * is an element of several collections: 81 * 82 * A radix tree used to quickly 83 * perform object/offset lookups 84 * 85 * A list of all pages for a given object, 86 * so they can be quickly deactivated at 87 * time of deallocation. 88 * 89 * An ordered list of pages due for pageout. 90 * 91 * In addition, the structure contains the object 92 * and offset to which this page belongs (for pageout), 93 * and sundry status bits. 94 * 95 * In general, operations on this structure's mutable fields are 96 * synchronized using either one of or a combination of locks. If a 97 * field is annotated with two of these locks then holding either is 98 * sufficient for read access but both are required for write access. 99 * The queue lock for a page depends on the value of its queue field and is 100 * described in detail below. 101 * 102 * The following annotations are possible: 103 * (A) the field must be accessed using atomic(9) and may require 104 * additional synchronization. 105 * (B) the page busy lock. 106 * (C) the field is immutable. 107 * (F) the per-domain lock for the free queues. 108 * (M) Machine dependent, defined by pmap layer. 109 * (O) the object that the page belongs to. 110 * (Q) the page's queue lock. 111 * 112 * The busy lock is an embedded reader-writer lock that protects the 113 * page's contents and identity (i.e., its <object, pindex> tuple) as 114 * well as certain valid/dirty modifications. To avoid bloating the 115 * the page structure, the busy lock lacks some of the features available 116 * the kernel's general-purpose synchronization primitives. As a result, 117 * busy lock ordering rules are not verified, lock recursion is not 118 * detected, and an attempt to xbusy a busy page or sbusy an xbusy page 119 * results will trigger a panic rather than causing the thread to block. 120 * vm_page_sleep_if_busy() can be used to sleep until the page's busy 121 * state changes, after which the caller must re-lookup the page and 122 * re-evaluate its state. vm_page_busy_acquire() will block until 123 * the lock is acquired. 124 * 125 * The valid field is protected by the page busy lock (B) and object 126 * lock (O). Transitions from invalid to valid are generally done 127 * via I/O or zero filling and do not require the object lock. 128 * These must be protected with the busy lock to prevent page-in or 129 * creation races. Page invalidation generally happens as a result 130 * of truncate or msync. When invalidated, pages must not be present 131 * in pmap and must hold the object lock to prevent concurrent 132 * speculative read-only mappings that do not require busy. I/O 133 * routines may check for validity without a lock if they are prepared 134 * to handle invalidation races with higher level locks (vnode) or are 135 * unconcerned with races so long as they hold a reference to prevent 136 * recycling. When a valid bit is set while holding a shared busy 137 * lock (A) atomic operations are used to protect against concurrent 138 * modification. 139 * 140 * In contrast, the synchronization of accesses to the page's 141 * dirty field is a mix of machine dependent (M) and busy (B). In 142 * the machine-independent layer, the page busy must be held to 143 * operate on the field. However, the pmap layer is permitted to 144 * set all bits within the field without holding that lock. If the 145 * underlying architecture does not support atomic read-modify-write 146 * operations on the field's type, then the machine-independent 147 * layer uses a 32-bit atomic on the aligned 32-bit word that 148 * contains the dirty field. In the machine-independent layer, 149 * the implementation of read-modify-write operations on the 150 * field is encapsulated in vm_page_clear_dirty_mask(). An 151 * exclusive busy lock combined with pmap_remove_{write/all}() is the 152 * only way to ensure a page can not become dirty. I/O generally 153 * removes the page from pmap to ensure exclusive access and atomic 154 * writes. 155 * 156 * The ref_count field tracks references to the page. References that 157 * prevent the page from being reclaimable are called wirings and are 158 * counted in the low bits of ref_count. The containing object's 159 * reference, if one exists, is counted using the VPRC_OBJREF bit in the 160 * ref_count field. Additionally, the VPRC_BLOCKED bit is used to 161 * atomically check for wirings and prevent new wirings via 162 * pmap_extract_and_hold(). When a page belongs to an object, it may be 163 * wired only when the object is locked, or the page is busy, or by 164 * pmap_extract_and_hold(). As a result, if the object is locked and the 165 * page is not busy (or is exclusively busied by the current thread), and 166 * the page is unmapped, its wire count will not increase. The ref_count 167 * field is updated using atomic operations in most cases, except when it 168 * is known that no other references to the page exist, such as in the page 169 * allocator. A page may be present in the page queues, or even actively 170 * scanned by the page daemon, without an explicitly counted referenced. 171 * The page daemon must therefore handle the possibility of a concurrent 172 * free of the page. 173 * 174 * The queue state of a page consists of the queue and act_count fields of 175 * its atomically updated state, and the subset of atomic flags specified 176 * by PGA_QUEUE_STATE_MASK. The queue field contains the page's page queue 177 * index, or PQ_NONE if it does not belong to a page queue. To modify the 178 * queue field, the page queue lock corresponding to the old value must be 179 * held, unless that value is PQ_NONE, in which case the queue index must 180 * be updated using an atomic RMW operation. There is one exception to 181 * this rule: the page daemon may transition the queue field from 182 * PQ_INACTIVE to PQ_NONE immediately prior to freeing the page during an 183 * inactive queue scan. At that point the page is already dequeued and no 184 * other references to that vm_page structure can exist. The PGA_ENQUEUED 185 * flag, when set, indicates that the page structure is physically inserted 186 * into the queue corresponding to the page's queue index, and may only be 187 * set or cleared with the corresponding page queue lock held. 188 * 189 * To avoid contention on page queue locks, page queue operations (enqueue, 190 * dequeue, requeue) are batched using fixed-size per-CPU queues. A 191 * deferred operation is requested by setting one of the flags in 192 * PGA_QUEUE_OP_MASK and inserting an entry into a batch queue. When a 193 * queue is full, an attempt to insert a new entry will lock the page 194 * queues and trigger processing of the pending entries. The 195 * type-stability of vm_page structures is crucial to this scheme since the 196 * processing of entries in a given batch queue may be deferred 197 * indefinitely. In particular, a page may be freed with pending batch 198 * queue entries. The page queue operation flags must be set using atomic 199 * RWM operations. 200 */ 201 202 #if PAGE_SIZE == 4096 203 #define VM_PAGE_BITS_ALL 0xffu 204 typedef uint8_t vm_page_bits_t; 205 #elif PAGE_SIZE == 8192 206 #define VM_PAGE_BITS_ALL 0xffffu 207 typedef uint16_t vm_page_bits_t; 208 #elif PAGE_SIZE == 16384 209 #define VM_PAGE_BITS_ALL 0xffffffffu 210 typedef uint32_t vm_page_bits_t; 211 #elif PAGE_SIZE == 32768 212 #define VM_PAGE_BITS_ALL 0xfffffffffffffffflu 213 typedef uint64_t vm_page_bits_t; 214 #endif 215 216 typedef union vm_page_astate { 217 struct { 218 uint16_t flags; 219 uint8_t queue; 220 uint8_t act_count; 221 }; 222 uint32_t _bits; 223 } vm_page_astate_t; 224 225 struct vm_page { 226 union { 227 TAILQ_ENTRY(vm_page) q; /* page queue or free list (Q) */ 228 struct { 229 SLIST_ENTRY(vm_page) ss; /* private slists */ 230 } s; 231 struct { 232 u_long p; 233 u_long v; 234 } memguard; 235 struct { 236 void *slab; 237 void *zone; 238 } uma; 239 } plinks; 240 TAILQ_ENTRY(vm_page) listq; /* pages in same object (O) */ 241 vm_object_t object; /* which object am I in (O) */ 242 vm_pindex_t pindex; /* offset into object (O,P) */ 243 vm_paddr_t phys_addr; /* physical address of page (C) */ 244 struct md_page md; /* machine dependent stuff */ 245 u_int ref_count; /* page references (A) */ 246 u_int busy_lock; /* busy owners lock (A) */ 247 union vm_page_astate a; /* state accessed atomically (A) */ 248 uint8_t order; /* index of the buddy queue (F) */ 249 uint8_t pool; /* vm_phys freepool index (F) */ 250 uint8_t flags; /* page PG_* flags (P) */ 251 uint8_t oflags; /* page VPO_* flags (O) */ 252 int8_t psind; /* pagesizes[] index (O) */ 253 int8_t segind; /* vm_phys segment index (C) */ 254 /* NOTE that these must support one bit per DEV_BSIZE in a page */ 255 /* so, on normal X86 kernels, they must be at least 8 bits wide */ 256 vm_page_bits_t valid; /* valid DEV_BSIZE chunk map (O,B) */ 257 vm_page_bits_t dirty; /* dirty DEV_BSIZE chunk map (M,B) */ 258 }; 259 260 /* 261 * Special bits used in the ref_count field. 262 * 263 * ref_count is normally used to count wirings that prevent the page from being 264 * reclaimed, but also supports several special types of references that do not 265 * prevent reclamation. Accesses to the ref_count field must be atomic unless 266 * the page is unallocated. 267 * 268 * VPRC_OBJREF is the reference held by the containing object. It can set or 269 * cleared only when the corresponding object's write lock is held. 270 * 271 * VPRC_BLOCKED is used to atomically block wirings via pmap lookups while 272 * attempting to tear down all mappings of a given page. The page busy lock and 273 * object write lock must both be held in order to set or clear this bit. 274 */ 275 #define VPRC_BLOCKED 0x40000000u /* mappings are being removed */ 276 #define VPRC_OBJREF 0x80000000u /* object reference, cleared with (O) */ 277 #define VPRC_WIRE_COUNT(c) ((c) & ~(VPRC_BLOCKED | VPRC_OBJREF)) 278 #define VPRC_WIRE_COUNT_MAX (~(VPRC_BLOCKED | VPRC_OBJREF)) 279 280 /* 281 * Page flags stored in oflags: 282 * 283 * Access to these page flags is synchronized by the lock on the object 284 * containing the page (O). 285 * 286 * Note: VPO_UNMANAGED (used by OBJT_DEVICE, OBJT_PHYS and OBJT_SG) 287 * indicates that the page is not under PV management but 288 * otherwise should be treated as a normal page. Pages not 289 * under PV management cannot be paged out via the 290 * object/vm_page_t because there is no knowledge of their pte 291 * mappings, and such pages are also not on any PQ queue. 292 * 293 */ 294 #define VPO_KMEM_EXEC 0x01 /* kmem mapping allows execution */ 295 #define VPO_SWAPSLEEP 0x02 /* waiting for swap to finish */ 296 #define VPO_UNMANAGED 0x04 /* no PV management for page */ 297 #define VPO_SWAPINPROG 0x08 /* swap I/O in progress on page */ 298 299 /* 300 * Busy page implementation details. 301 * The algorithm is taken mostly by rwlock(9) and sx(9) locks implementation, 302 * even if the support for owner identity is removed because of size 303 * constraints. Checks on lock recursion are then not possible, while the 304 * lock assertions effectiveness is someway reduced. 305 */ 306 #define VPB_BIT_SHARED 0x01 307 #define VPB_BIT_EXCLUSIVE 0x02 308 #define VPB_BIT_WAITERS 0x04 309 #define VPB_BIT_FLAGMASK \ 310 (VPB_BIT_SHARED | VPB_BIT_EXCLUSIVE | VPB_BIT_WAITERS) 311 312 #define VPB_SHARERS_SHIFT 3 313 #define VPB_SHARERS(x) \ 314 (((x) & ~VPB_BIT_FLAGMASK) >> VPB_SHARERS_SHIFT) 315 #define VPB_SHARERS_WORD(x) ((x) << VPB_SHARERS_SHIFT | VPB_BIT_SHARED) 316 #define VPB_ONE_SHARER (1 << VPB_SHARERS_SHIFT) 317 318 #define VPB_SINGLE_EXCLUSIVE VPB_BIT_EXCLUSIVE 319 #ifdef INVARIANTS 320 #define VPB_CURTHREAD_EXCLUSIVE \ 321 (VPB_BIT_EXCLUSIVE | ((u_int)(uintptr_t)curthread & ~VPB_BIT_FLAGMASK)) 322 #else 323 #define VPB_CURTHREAD_EXCLUSIVE VPB_SINGLE_EXCLUSIVE 324 #endif 325 326 #define VPB_UNBUSIED VPB_SHARERS_WORD(0) 327 328 /* Freed lock blocks both shared and exclusive. */ 329 #define VPB_FREED (0xffffffff - VPB_BIT_SHARED) 330 331 #define PQ_NONE 255 332 #define PQ_INACTIVE 0 333 #define PQ_ACTIVE 1 334 #define PQ_LAUNDRY 2 335 #define PQ_UNSWAPPABLE 3 336 #define PQ_COUNT 4 337 338 #ifndef VM_PAGE_HAVE_PGLIST 339 TAILQ_HEAD(pglist, vm_page); 340 #define VM_PAGE_HAVE_PGLIST 341 #endif 342 SLIST_HEAD(spglist, vm_page); 343 344 #ifdef _KERNEL 345 extern vm_page_t bogus_page; 346 #endif /* _KERNEL */ 347 348 extern struct mtx_padalign pa_lock[]; 349 350 #if defined(__arm__) 351 #define PDRSHIFT PDR_SHIFT 352 #elif !defined(PDRSHIFT) 353 #define PDRSHIFT 21 354 #endif 355 356 #define pa_index(pa) ((pa) >> PDRSHIFT) 357 #define PA_LOCKPTR(pa) ((struct mtx *)(&pa_lock[pa_index(pa) % PA_LOCK_COUNT])) 358 #define PA_LOCKOBJPTR(pa) ((struct lock_object *)PA_LOCKPTR((pa))) 359 #define PA_LOCK(pa) mtx_lock(PA_LOCKPTR(pa)) 360 #define PA_TRYLOCK(pa) mtx_trylock(PA_LOCKPTR(pa)) 361 #define PA_UNLOCK(pa) mtx_unlock(PA_LOCKPTR(pa)) 362 #define PA_UNLOCK_COND(pa) \ 363 do { \ 364 if ((pa) != 0) { \ 365 PA_UNLOCK((pa)); \ 366 (pa) = 0; \ 367 } \ 368 } while (0) 369 370 #define PA_LOCK_ASSERT(pa, a) mtx_assert(PA_LOCKPTR(pa), (a)) 371 372 #if defined(KLD_MODULE) && !defined(KLD_TIED) 373 #define vm_page_lock(m) vm_page_lock_KBI((m), LOCK_FILE, LOCK_LINE) 374 #define vm_page_unlock(m) vm_page_unlock_KBI((m), LOCK_FILE, LOCK_LINE) 375 #define vm_page_trylock(m) vm_page_trylock_KBI((m), LOCK_FILE, LOCK_LINE) 376 #else /* !KLD_MODULE */ 377 #define vm_page_lockptr(m) (PA_LOCKPTR(VM_PAGE_TO_PHYS((m)))) 378 #define vm_page_lock(m) mtx_lock(vm_page_lockptr((m))) 379 #define vm_page_unlock(m) mtx_unlock(vm_page_lockptr((m))) 380 #define vm_page_trylock(m) mtx_trylock(vm_page_lockptr((m))) 381 #endif 382 #if defined(INVARIANTS) 383 #define vm_page_assert_locked(m) \ 384 vm_page_assert_locked_KBI((m), __FILE__, __LINE__) 385 #define vm_page_lock_assert(m, a) \ 386 vm_page_lock_assert_KBI((m), (a), __FILE__, __LINE__) 387 #else 388 #define vm_page_assert_locked(m) 389 #define vm_page_lock_assert(m, a) 390 #endif 391 392 /* 393 * The vm_page's aflags are updated using atomic operations. To set or clear 394 * these flags, the functions vm_page_aflag_set() and vm_page_aflag_clear() 395 * must be used. Neither these flags nor these functions are part of the KBI. 396 * 397 * PGA_REFERENCED may be cleared only if the page is locked. It is set by 398 * both the MI and MD VM layers. However, kernel loadable modules should not 399 * directly set this flag. They should call vm_page_reference() instead. 400 * 401 * PGA_WRITEABLE is set exclusively on managed pages by pmap_enter(). 402 * When it does so, the object must be locked, or the page must be 403 * exclusive busied. The MI VM layer must never access this flag 404 * directly. Instead, it should call pmap_page_is_write_mapped(). 405 * 406 * PGA_EXECUTABLE may be set by pmap routines, and indicates that a page has 407 * at least one executable mapping. It is not consumed by the MI VM layer. 408 * 409 * PGA_NOSYNC must be set and cleared with the page busy lock held. 410 * 411 * PGA_ENQUEUED is set and cleared when a page is inserted into or removed 412 * from a page queue, respectively. It determines whether the plinks.q field 413 * of the page is valid. To set or clear this flag, page's "queue" field must 414 * be a valid queue index, and the corresponding page queue lock must be held. 415 * 416 * PGA_DEQUEUE is set when the page is scheduled to be dequeued from a page 417 * queue, and cleared when the dequeue request is processed. A page may 418 * have PGA_DEQUEUE set and PGA_ENQUEUED cleared, for instance if a dequeue 419 * is requested after the page is scheduled to be enqueued but before it is 420 * actually inserted into the page queue. 421 * 422 * PGA_REQUEUE is set when the page is scheduled to be enqueued or requeued 423 * in its page queue. 424 * 425 * PGA_REQUEUE_HEAD is a special flag for enqueuing pages near the head of 426 * the inactive queue, thus bypassing LRU. 427 * 428 * The PGA_DEQUEUE, PGA_REQUEUE and PGA_REQUEUE_HEAD flags must be set using an 429 * atomic RMW operation to ensure that the "queue" field is a valid queue index, 430 * and the corresponding page queue lock must be held when clearing any of the 431 * flags. 432 * 433 * PGA_SWAP_FREE is used to defer freeing swap space to the pageout daemon 434 * when the context that dirties the page does not have the object write lock 435 * held. 436 */ 437 #define PGA_WRITEABLE 0x0001 /* page may be mapped writeable */ 438 #define PGA_REFERENCED 0x0002 /* page has been referenced */ 439 #define PGA_EXECUTABLE 0x0004 /* page may be mapped executable */ 440 #define PGA_ENQUEUED 0x0008 /* page is enqueued in a page queue */ 441 #define PGA_DEQUEUE 0x0010 /* page is due to be dequeued */ 442 #define PGA_REQUEUE 0x0020 /* page is due to be requeued */ 443 #define PGA_REQUEUE_HEAD 0x0040 /* page requeue should bypass LRU */ 444 #define PGA_NOSYNC 0x0080 /* do not collect for syncer */ 445 #define PGA_SWAP_FREE 0x0100 /* page with swap space was dirtied */ 446 #define PGA_SWAP_SPACE 0x0200 /* page has allocated swap space */ 447 448 #define PGA_QUEUE_OP_MASK (PGA_DEQUEUE | PGA_REQUEUE | PGA_REQUEUE_HEAD) 449 #define PGA_QUEUE_STATE_MASK (PGA_ENQUEUED | PGA_QUEUE_OP_MASK) 450 451 /* 452 * Page flags. Updates to these flags are not synchronized, and thus they must 453 * be set during page allocation or free to avoid races. 454 * 455 * The PG_PCPU_CACHE flag is set at allocation time if the page was 456 * allocated from a per-CPU cache. It is cleared the next time that the 457 * page is allocated from the physical memory allocator. 458 */ 459 #define PG_PCPU_CACHE 0x01 /* was allocated from per-CPU caches */ 460 #define PG_FICTITIOUS 0x02 /* physical page doesn't exist */ 461 #define PG_ZERO 0x04 /* page is zeroed */ 462 #define PG_MARKER 0x08 /* special queue marker page */ 463 #define PG_NODUMP 0x10 /* don't include this page in a dump */ 464 465 /* 466 * Misc constants. 467 */ 468 #define ACT_DECLINE 1 469 #define ACT_ADVANCE 3 470 #define ACT_INIT 5 471 #define ACT_MAX 64 472 473 #ifdef _KERNEL 474 475 #include <sys/systm.h> 476 477 #include <machine/atomic.h> 478 479 /* 480 * Each pageable resident page falls into one of five lists: 481 * 482 * free 483 * Available for allocation now. 484 * 485 * inactive 486 * Low activity, candidates for reclamation. 487 * This list is approximately LRU ordered. 488 * 489 * laundry 490 * This is the list of pages that should be 491 * paged out next. 492 * 493 * unswappable 494 * Dirty anonymous pages that cannot be paged 495 * out because no swap device is configured. 496 * 497 * active 498 * Pages that are "active", i.e., they have been 499 * recently referenced. 500 * 501 */ 502 503 extern vm_page_t vm_page_array; /* First resident page in table */ 504 extern long vm_page_array_size; /* number of vm_page_t's */ 505 extern long first_page; /* first physical page number */ 506 507 #define VM_PAGE_TO_PHYS(entry) ((entry)->phys_addr) 508 509 /* 510 * PHYS_TO_VM_PAGE() returns the vm_page_t object that represents a memory 511 * page to which the given physical address belongs. The correct vm_page_t 512 * object is returned for addresses that are not page-aligned. 513 */ 514 vm_page_t PHYS_TO_VM_PAGE(vm_paddr_t pa); 515 516 /* 517 * Page allocation parameters for vm_page for the functions 518 * vm_page_alloc(), vm_page_grab(), vm_page_alloc_contig() and 519 * vm_page_alloc_freelist(). Some functions support only a subset 520 * of the flags, and ignore others, see the flags legend. 521 * 522 * The meaning of VM_ALLOC_ZERO differs slightly between the vm_page_alloc*() 523 * and the vm_page_grab*() functions. See these functions for details. 524 * 525 * Bits 0 - 1 define class. 526 * Bits 2 - 15 dedicated for flags. 527 * Legend: 528 * (a) - vm_page_alloc() supports the flag. 529 * (c) - vm_page_alloc_contig() supports the flag. 530 * (f) - vm_page_alloc_freelist() supports the flag. 531 * (g) - vm_page_grab() supports the flag. 532 * (p) - vm_page_grab_pages() supports the flag. 533 * Bits above 15 define the count of additional pages that the caller 534 * intends to allocate. 535 */ 536 #define VM_ALLOC_NORMAL 0 537 #define VM_ALLOC_INTERRUPT 1 538 #define VM_ALLOC_SYSTEM 2 539 #define VM_ALLOC_CLASS_MASK 3 540 #define VM_ALLOC_WAITOK 0x0008 /* (acf) Sleep and retry */ 541 #define VM_ALLOC_WAITFAIL 0x0010 /* (acf) Sleep and return error */ 542 #define VM_ALLOC_WIRED 0x0020 /* (acfgp) Allocate a wired page */ 543 #define VM_ALLOC_ZERO 0x0040 /* (acfgp) Allocate a prezeroed page */ 544 #define VM_ALLOC_NORECLAIM 0x0080 /* (c) Do not reclaim after failure */ 545 #define VM_ALLOC_NOOBJ 0x0100 /* (acg) No associated object */ 546 #define VM_ALLOC_NOBUSY 0x0200 /* (acgp) Do not excl busy the page */ 547 #define VM_ALLOC_NOCREAT 0x0400 /* (gp) Don't create a page */ 548 #define VM_ALLOC_IGN_SBUSY 0x1000 /* (gp) Ignore shared busy flag */ 549 #define VM_ALLOC_NODUMP 0x2000 /* (ag) don't include in dump */ 550 #define VM_ALLOC_SBUSY 0x4000 /* (acgp) Shared busy the page */ 551 #define VM_ALLOC_NOWAIT 0x8000 /* (acfgp) Do not sleep */ 552 #define VM_ALLOC_COUNT_SHIFT 16 553 #define VM_ALLOC_COUNT(count) ((count) << VM_ALLOC_COUNT_SHIFT) 554 555 #ifdef M_NOWAIT 556 static inline int 557 malloc2vm_flags(int malloc_flags) 558 { 559 int pflags; 560 561 KASSERT((malloc_flags & M_USE_RESERVE) == 0 || 562 (malloc_flags & M_NOWAIT) != 0, 563 ("M_USE_RESERVE requires M_NOWAIT")); 564 pflags = (malloc_flags & M_USE_RESERVE) != 0 ? VM_ALLOC_INTERRUPT : 565 VM_ALLOC_SYSTEM; 566 if ((malloc_flags & M_ZERO) != 0) 567 pflags |= VM_ALLOC_ZERO; 568 if ((malloc_flags & M_NODUMP) != 0) 569 pflags |= VM_ALLOC_NODUMP; 570 if ((malloc_flags & M_NOWAIT)) 571 pflags |= VM_ALLOC_NOWAIT; 572 if ((malloc_flags & M_WAITOK)) 573 pflags |= VM_ALLOC_WAITOK; 574 if ((malloc_flags & M_NORECLAIM)) 575 pflags |= VM_ALLOC_NORECLAIM; 576 return (pflags); 577 } 578 #endif 579 580 /* 581 * Predicates supported by vm_page_ps_test(): 582 * 583 * PS_ALL_DIRTY is true only if the entire (super)page is dirty. 584 * However, it can be spuriously false when the (super)page has become 585 * dirty in the pmap but that information has not been propagated to the 586 * machine-independent layer. 587 */ 588 #define PS_ALL_DIRTY 0x1 589 #define PS_ALL_VALID 0x2 590 #define PS_NONE_BUSY 0x4 591 592 bool vm_page_busy_acquire(vm_page_t m, int allocflags); 593 void vm_page_busy_downgrade(vm_page_t m); 594 int vm_page_busy_tryupgrade(vm_page_t m); 595 void vm_page_busy_sleep(vm_page_t m, const char *msg, bool nonshared); 596 void vm_page_busy_sleep_unlocked(vm_object_t obj, vm_page_t m, 597 vm_pindex_t pindex, const char *wmesg, bool nonshared); 598 void vm_page_free(vm_page_t m); 599 void vm_page_free_zero(vm_page_t m); 600 601 void vm_page_activate (vm_page_t); 602 void vm_page_advise(vm_page_t m, int advice); 603 vm_page_t vm_page_alloc(vm_object_t, vm_pindex_t, int); 604 vm_page_t vm_page_alloc_domain(vm_object_t, vm_pindex_t, int, int); 605 vm_page_t vm_page_alloc_after(vm_object_t, vm_pindex_t, int, vm_page_t); 606 vm_page_t vm_page_alloc_domain_after(vm_object_t, vm_pindex_t, int, int, 607 vm_page_t); 608 vm_page_t vm_page_alloc_contig(vm_object_t object, vm_pindex_t pindex, int req, 609 u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment, 610 vm_paddr_t boundary, vm_memattr_t memattr); 611 vm_page_t vm_page_alloc_contig_domain(vm_object_t object, 612 vm_pindex_t pindex, int domain, int req, u_long npages, vm_paddr_t low, 613 vm_paddr_t high, u_long alignment, vm_paddr_t boundary, 614 vm_memattr_t memattr); 615 vm_page_t vm_page_alloc_freelist(int, int); 616 vm_page_t vm_page_alloc_freelist_domain(int, int, int); 617 void vm_page_bits_set(vm_page_t m, vm_page_bits_t *bits, vm_page_bits_t set); 618 bool vm_page_blacklist_add(vm_paddr_t pa, bool verbose); 619 vm_page_t vm_page_grab(vm_object_t, vm_pindex_t, int); 620 vm_page_t vm_page_grab_unlocked(vm_object_t, vm_pindex_t, int); 621 int vm_page_grab_pages(vm_object_t object, vm_pindex_t pindex, int allocflags, 622 vm_page_t *ma, int count); 623 int vm_page_grab_pages_unlocked(vm_object_t object, vm_pindex_t pindex, 624 int allocflags, vm_page_t *ma, int count); 625 int vm_page_grab_valid(vm_page_t *mp, vm_object_t object, vm_pindex_t pindex, 626 int allocflags); 627 int vm_page_grab_valid_unlocked(vm_page_t *mp, vm_object_t object, 628 vm_pindex_t pindex, int allocflags); 629 void vm_page_deactivate(vm_page_t); 630 void vm_page_deactivate_noreuse(vm_page_t); 631 void vm_page_dequeue(vm_page_t m); 632 void vm_page_dequeue_deferred(vm_page_t m); 633 vm_page_t vm_page_find_least(vm_object_t, vm_pindex_t); 634 void vm_page_free_invalid(vm_page_t); 635 vm_page_t vm_page_getfake(vm_paddr_t paddr, vm_memattr_t memattr); 636 void vm_page_initfake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr); 637 void vm_page_init_marker(vm_page_t marker, int queue, uint16_t aflags); 638 int vm_page_insert (vm_page_t, vm_object_t, vm_pindex_t); 639 void vm_page_invalid(vm_page_t m); 640 void vm_page_launder(vm_page_t m); 641 vm_page_t vm_page_lookup(vm_object_t, vm_pindex_t); 642 vm_page_t vm_page_lookup_unlocked(vm_object_t, vm_pindex_t); 643 vm_page_t vm_page_next(vm_page_t m); 644 void vm_page_pqbatch_drain(void); 645 void vm_page_pqbatch_submit(vm_page_t m, uint8_t queue); 646 bool vm_page_pqstate_commit(vm_page_t m, vm_page_astate_t *old, 647 vm_page_astate_t new); 648 vm_page_t vm_page_prev(vm_page_t m); 649 bool vm_page_ps_test(vm_page_t m, int flags, vm_page_t skip_m); 650 void vm_page_putfake(vm_page_t m); 651 void vm_page_readahead_finish(vm_page_t m); 652 bool vm_page_reclaim_contig(int req, u_long npages, vm_paddr_t low, 653 vm_paddr_t high, u_long alignment, vm_paddr_t boundary); 654 bool vm_page_reclaim_contig_domain(int domain, int req, u_long npages, 655 vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary); 656 void vm_page_reference(vm_page_t m); 657 #define VPR_TRYFREE 0x01 658 #define VPR_NOREUSE 0x02 659 void vm_page_release(vm_page_t m, int flags); 660 void vm_page_release_locked(vm_page_t m, int flags); 661 vm_page_t vm_page_relookup(vm_object_t, vm_pindex_t); 662 bool vm_page_remove(vm_page_t); 663 bool vm_page_remove_xbusy(vm_page_t); 664 int vm_page_rename(vm_page_t, vm_object_t, vm_pindex_t); 665 void vm_page_replace(vm_page_t mnew, vm_object_t object, 666 vm_pindex_t pindex, vm_page_t mold); 667 int vm_page_sbusied(vm_page_t m); 668 vm_page_t vm_page_scan_contig(u_long npages, vm_page_t m_start, 669 vm_page_t m_end, u_long alignment, vm_paddr_t boundary, int options); 670 vm_page_bits_t vm_page_set_dirty(vm_page_t m); 671 void vm_page_set_valid_range(vm_page_t m, int base, int size); 672 int vm_page_sleep_if_busy(vm_page_t m, const char *msg); 673 int vm_page_sleep_if_xbusy(vm_page_t m, const char *msg); 674 vm_offset_t vm_page_startup(vm_offset_t vaddr); 675 void vm_page_sunbusy(vm_page_t m); 676 bool vm_page_try_remove_all(vm_page_t m); 677 bool vm_page_try_remove_write(vm_page_t m); 678 int vm_page_trysbusy(vm_page_t m); 679 int vm_page_tryxbusy(vm_page_t m); 680 void vm_page_unhold_pages(vm_page_t *ma, int count); 681 void vm_page_unswappable(vm_page_t m); 682 void vm_page_unwire(vm_page_t m, uint8_t queue); 683 bool vm_page_unwire_noq(vm_page_t m); 684 void vm_page_updatefake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr); 685 void vm_page_wire(vm_page_t); 686 bool vm_page_wire_mapped(vm_page_t m); 687 void vm_page_xunbusy_hard(vm_page_t m); 688 void vm_page_xunbusy_hard_unchecked(vm_page_t m); 689 void vm_page_set_validclean (vm_page_t, int, int); 690 void vm_page_clear_dirty(vm_page_t, int, int); 691 void vm_page_set_invalid(vm_page_t, int, int); 692 void vm_page_valid(vm_page_t m); 693 int vm_page_is_valid(vm_page_t, int, int); 694 void vm_page_test_dirty(vm_page_t); 695 vm_page_bits_t vm_page_bits(int base, int size); 696 void vm_page_zero_invalid(vm_page_t m, boolean_t setvalid); 697 void vm_page_free_pages_toq(struct spglist *free, bool update_wire_count); 698 699 void vm_page_dirty_KBI(vm_page_t m); 700 void vm_page_lock_KBI(vm_page_t m, const char *file, int line); 701 void vm_page_unlock_KBI(vm_page_t m, const char *file, int line); 702 int vm_page_trylock_KBI(vm_page_t m, const char *file, int line); 703 #if defined(INVARIANTS) || defined(INVARIANT_SUPPORT) 704 void vm_page_assert_locked_KBI(vm_page_t m, const char *file, int line); 705 void vm_page_lock_assert_KBI(vm_page_t m, int a, const char *file, int line); 706 #endif 707 708 #define vm_page_busy_fetch(m) atomic_load_int(&(m)->busy_lock) 709 710 #define vm_page_assert_busied(m) \ 711 KASSERT(vm_page_busied(m), \ 712 ("vm_page_assert_busied: page %p not busy @ %s:%d", \ 713 (m), __FILE__, __LINE__)) 714 715 #define vm_page_assert_sbusied(m) \ 716 KASSERT(vm_page_sbusied(m), \ 717 ("vm_page_assert_sbusied: page %p not shared busy @ %s:%d", \ 718 (m), __FILE__, __LINE__)) 719 720 #define vm_page_assert_unbusied(m) \ 721 KASSERT((vm_page_busy_fetch(m) & ~VPB_BIT_WAITERS) != \ 722 VPB_CURTHREAD_EXCLUSIVE, \ 723 ("vm_page_assert_xbusied: page %p busy_lock %#x owned" \ 724 " by me @ %s:%d", \ 725 (m), (m)->busy_lock, __FILE__, __LINE__)); \ 726 727 #define vm_page_assert_xbusied_unchecked(m) do { \ 728 KASSERT(vm_page_xbusied(m), \ 729 ("vm_page_assert_xbusied: page %p not exclusive busy @ %s:%d", \ 730 (m), __FILE__, __LINE__)); \ 731 } while (0) 732 #define vm_page_assert_xbusied(m) do { \ 733 vm_page_assert_xbusied_unchecked(m); \ 734 KASSERT((vm_page_busy_fetch(m) & ~VPB_BIT_WAITERS) == \ 735 VPB_CURTHREAD_EXCLUSIVE, \ 736 ("vm_page_assert_xbusied: page %p busy_lock %#x not owned" \ 737 " by me @ %s:%d", \ 738 (m), (m)->busy_lock, __FILE__, __LINE__)); \ 739 } while (0) 740 741 #define vm_page_busied(m) \ 742 (vm_page_busy_fetch(m) != VPB_UNBUSIED) 743 744 #define vm_page_sbusy(m) do { \ 745 if (!vm_page_trysbusy(m)) \ 746 panic("%s: page %p failed shared busying", __func__, \ 747 (m)); \ 748 } while (0) 749 750 #define vm_page_xbusied(m) \ 751 ((vm_page_busy_fetch(m) & VPB_SINGLE_EXCLUSIVE) != 0) 752 753 #define vm_page_busy_freed(m) \ 754 (vm_page_busy_fetch(m) == VPB_FREED) 755 756 #define vm_page_xbusy(m) do { \ 757 if (!vm_page_tryxbusy(m)) \ 758 panic("%s: page %p failed exclusive busying", __func__, \ 759 (m)); \ 760 } while (0) 761 762 /* Note: page m's lock must not be owned by the caller. */ 763 #define vm_page_xunbusy(m) do { \ 764 if (!atomic_cmpset_rel_int(&(m)->busy_lock, \ 765 VPB_CURTHREAD_EXCLUSIVE, VPB_UNBUSIED)) \ 766 vm_page_xunbusy_hard(m); \ 767 } while (0) 768 #define vm_page_xunbusy_unchecked(m) do { \ 769 if (!atomic_cmpset_rel_int(&(m)->busy_lock, \ 770 VPB_CURTHREAD_EXCLUSIVE, VPB_UNBUSIED)) \ 771 vm_page_xunbusy_hard_unchecked(m); \ 772 } while (0) 773 774 #ifdef INVARIANTS 775 void vm_page_object_busy_assert(vm_page_t m); 776 #define VM_PAGE_OBJECT_BUSY_ASSERT(m) vm_page_object_busy_assert(m) 777 void vm_page_assert_pga_writeable(vm_page_t m, uint16_t bits); 778 #define VM_PAGE_ASSERT_PGA_WRITEABLE(m, bits) \ 779 vm_page_assert_pga_writeable(m, bits) 780 /* 781 * Claim ownership of a page's xbusy state. In non-INVARIANTS kernels this 782 * operation is a no-op since ownership is not tracked. In particular 783 * this macro does not provide any synchronization with the previous owner. 784 */ 785 #define vm_page_xbusy_claim(m) do { \ 786 u_int _busy_lock; \ 787 \ 788 vm_page_assert_xbusied_unchecked((m)); \ 789 do { \ 790 _busy_lock = vm_page_busy_fetch(m); \ 791 } while (!atomic_cmpset_int(&(m)->busy_lock, _busy_lock, \ 792 (_busy_lock & VPB_BIT_FLAGMASK) | VPB_CURTHREAD_EXCLUSIVE)); \ 793 } while (0) 794 #else 795 #define VM_PAGE_OBJECT_BUSY_ASSERT(m) (void)0 796 #define VM_PAGE_ASSERT_PGA_WRITEABLE(m, bits) (void)0 797 #define vm_page_xbusy_claim(m) 798 #endif 799 800 #if BYTE_ORDER == BIG_ENDIAN 801 #define VM_PAGE_AFLAG_SHIFT 16 802 #else 803 #define VM_PAGE_AFLAG_SHIFT 0 804 #endif 805 806 /* 807 * Load a snapshot of a page's 32-bit atomic state. 808 */ 809 static inline vm_page_astate_t 810 vm_page_astate_load(vm_page_t m) 811 { 812 vm_page_astate_t a; 813 814 a._bits = atomic_load_32(&m->a._bits); 815 return (a); 816 } 817 818 /* 819 * Atomically compare and set a page's atomic state. 820 */ 821 static inline bool 822 vm_page_astate_fcmpset(vm_page_t m, vm_page_astate_t *old, vm_page_astate_t new) 823 { 824 825 KASSERT(new.queue == PQ_INACTIVE || (new.flags & PGA_REQUEUE_HEAD) == 0, 826 ("%s: invalid head requeue request for page %p", __func__, m)); 827 KASSERT((new.flags & PGA_ENQUEUED) == 0 || new.queue != PQ_NONE, 828 ("%s: setting PGA_ENQUEUED with PQ_NONE in page %p", __func__, m)); 829 KASSERT(new._bits != old->_bits, 830 ("%s: bits are unchanged", __func__)); 831 832 return (atomic_fcmpset_32(&m->a._bits, &old->_bits, new._bits) != 0); 833 } 834 835 /* 836 * Clear the given bits in the specified page. 837 */ 838 static inline void 839 vm_page_aflag_clear(vm_page_t m, uint16_t bits) 840 { 841 uint32_t *addr, val; 842 843 /* 844 * Access the whole 32-bit word containing the aflags field with an 845 * atomic update. Parallel non-atomic updates to the other fields 846 * within this word are handled properly by the atomic update. 847 */ 848 addr = (void *)&m->a; 849 val = bits << VM_PAGE_AFLAG_SHIFT; 850 atomic_clear_32(addr, val); 851 } 852 853 /* 854 * Set the given bits in the specified page. 855 */ 856 static inline void 857 vm_page_aflag_set(vm_page_t m, uint16_t bits) 858 { 859 uint32_t *addr, val; 860 861 VM_PAGE_ASSERT_PGA_WRITEABLE(m, bits); 862 863 /* 864 * Access the whole 32-bit word containing the aflags field with an 865 * atomic update. Parallel non-atomic updates to the other fields 866 * within this word are handled properly by the atomic update. 867 */ 868 addr = (void *)&m->a; 869 val = bits << VM_PAGE_AFLAG_SHIFT; 870 atomic_set_32(addr, val); 871 } 872 873 /* 874 * vm_page_dirty: 875 * 876 * Set all bits in the page's dirty field. 877 * 878 * The object containing the specified page must be locked if the 879 * call is made from the machine-independent layer. 880 * 881 * See vm_page_clear_dirty_mask(). 882 */ 883 static __inline void 884 vm_page_dirty(vm_page_t m) 885 { 886 887 /* Use vm_page_dirty_KBI() under INVARIANTS to save memory. */ 888 #if (defined(KLD_MODULE) && !defined(KLD_TIED)) || defined(INVARIANTS) 889 vm_page_dirty_KBI(m); 890 #else 891 m->dirty = VM_PAGE_BITS_ALL; 892 #endif 893 } 894 895 /* 896 * vm_page_undirty: 897 * 898 * Set page to not be dirty. Note: does not clear pmap modify bits 899 */ 900 static __inline void 901 vm_page_undirty(vm_page_t m) 902 { 903 904 VM_PAGE_OBJECT_BUSY_ASSERT(m); 905 m->dirty = 0; 906 } 907 908 static inline uint8_t 909 _vm_page_queue(vm_page_astate_t as) 910 { 911 912 if ((as.flags & PGA_DEQUEUE) != 0) 913 return (PQ_NONE); 914 return (as.queue); 915 } 916 917 /* 918 * vm_page_queue: 919 * 920 * Return the index of the queue containing m. 921 */ 922 static inline uint8_t 923 vm_page_queue(vm_page_t m) 924 { 925 926 return (_vm_page_queue(vm_page_astate_load(m))); 927 } 928 929 static inline bool 930 vm_page_active(vm_page_t m) 931 { 932 933 return (vm_page_queue(m) == PQ_ACTIVE); 934 } 935 936 static inline bool 937 vm_page_inactive(vm_page_t m) 938 { 939 940 return (vm_page_queue(m) == PQ_INACTIVE); 941 } 942 943 static inline bool 944 vm_page_in_laundry(vm_page_t m) 945 { 946 uint8_t queue; 947 948 queue = vm_page_queue(m); 949 return (queue == PQ_LAUNDRY || queue == PQ_UNSWAPPABLE); 950 } 951 952 /* 953 * vm_page_drop: 954 * 955 * Release a reference to a page and return the old reference count. 956 */ 957 static inline u_int 958 vm_page_drop(vm_page_t m, u_int val) 959 { 960 u_int old; 961 962 /* 963 * Synchronize with vm_page_free_prep(): ensure that all updates to the 964 * page structure are visible before it is freed. 965 */ 966 atomic_thread_fence_rel(); 967 old = atomic_fetchadd_int(&m->ref_count, -val); 968 KASSERT(old != VPRC_BLOCKED, 969 ("vm_page_drop: page %p has an invalid refcount value", m)); 970 return (old); 971 } 972 973 /* 974 * vm_page_wired: 975 * 976 * Perform a racy check to determine whether a reference prevents the page 977 * from being reclaimable. If the page's object is locked, and the page is 978 * unmapped and exclusively busied by the current thread, no new wirings 979 * may be created. 980 */ 981 static inline bool 982 vm_page_wired(vm_page_t m) 983 { 984 985 return (VPRC_WIRE_COUNT(m->ref_count) > 0); 986 } 987 988 static inline bool 989 vm_page_all_valid(vm_page_t m) 990 { 991 992 return (m->valid == VM_PAGE_BITS_ALL); 993 } 994 995 static inline bool 996 vm_page_none_valid(vm_page_t m) 997 { 998 999 return (m->valid == 0); 1000 } 1001 1002 static inline int 1003 vm_page_domain(vm_page_t m) 1004 { 1005 #ifdef NUMA 1006 int domn, segind; 1007 1008 segind = m->segind; 1009 KASSERT(segind < vm_phys_nsegs, ("segind %d m %p", segind, m)); 1010 domn = vm_phys_segs[segind].domain; 1011 KASSERT(domn >= 0 && domn < vm_ndomains, ("domain %d m %p", domn, m)); 1012 return (domn); 1013 #else 1014 return (0); 1015 #endif 1016 } 1017 1018 #endif /* _KERNEL */ 1019 #endif /* !_VM_PAGE_ */ 1020