1 /*- 2 * Copyright (c) 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 4. Neither the name of the University nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * from: @(#)vm_page.h 8.2 (Berkeley) 12/13/93 33 * 34 * 35 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 36 * All rights reserved. 37 * 38 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 39 * 40 * Permission to use, copy, modify and distribute this software and 41 * its documentation is hereby granted, provided that both the copyright 42 * notice and this permission notice appear in all copies of the 43 * software, derivative works or modified versions, and any portions 44 * thereof, and that both notices appear in supporting documentation. 45 * 46 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 47 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 48 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 49 * 50 * Carnegie Mellon requests users of this software to return to 51 * 52 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 53 * School of Computer Science 54 * Carnegie Mellon University 55 * Pittsburgh PA 15213-3890 56 * 57 * any improvements or extensions that they make and grant Carnegie the 58 * rights to redistribute these changes. 59 * 60 * $FreeBSD$ 61 */ 62 63 /* 64 * Resident memory system definitions. 65 */ 66 67 #ifndef _VM_PAGE_ 68 #define _VM_PAGE_ 69 70 #include <vm/pmap.h> 71 72 /* 73 * Management of resident (logical) pages. 74 * 75 * A small structure is kept for each resident 76 * page, indexed by page number. Each structure 77 * is an element of several collections: 78 * 79 * A radix tree used to quickly 80 * perform object/offset lookups 81 * 82 * A list of all pages for a given object, 83 * so they can be quickly deactivated at 84 * time of deallocation. 85 * 86 * An ordered list of pages due for pageout. 87 * 88 * In addition, the structure contains the object 89 * and offset to which this page belongs (for pageout), 90 * and sundry status bits. 91 * 92 * In general, operations on this structure's mutable fields are 93 * synchronized using either one of or a combination of the lock on the 94 * object that the page belongs to (O), the pool lock for the page (P), 95 * or the lock for either the free or paging queue (Q). If a field is 96 * annotated below with two of these locks, then holding either lock is 97 * sufficient for read access, but both locks are required for write 98 * access. 99 * 100 * In contrast, the synchronization of accesses to the page's 101 * dirty field is machine dependent (M). In the 102 * machine-independent layer, the lock on the object that the 103 * page belongs to must be held in order to operate on the field. 104 * However, the pmap layer is permitted to set all bits within 105 * the field without holding that lock. If the underlying 106 * architecture does not support atomic read-modify-write 107 * operations on the field's type, then the machine-independent 108 * layer uses a 32-bit atomic on the aligned 32-bit word that 109 * contains the dirty field. In the machine-independent layer, 110 * the implementation of read-modify-write operations on the 111 * field is encapsulated in vm_page_clear_dirty_mask(). 112 */ 113 114 #if PAGE_SIZE == 4096 115 #define VM_PAGE_BITS_ALL 0xffu 116 typedef uint8_t vm_page_bits_t; 117 #elif PAGE_SIZE == 8192 118 #define VM_PAGE_BITS_ALL 0xffffu 119 typedef uint16_t vm_page_bits_t; 120 #elif PAGE_SIZE == 16384 121 #define VM_PAGE_BITS_ALL 0xffffffffu 122 typedef uint32_t vm_page_bits_t; 123 #elif PAGE_SIZE == 32768 124 #define VM_PAGE_BITS_ALL 0xfffffffffffffffflu 125 typedef uint64_t vm_page_bits_t; 126 #endif 127 128 struct vm_page { 129 union { 130 TAILQ_ENTRY(vm_page) q; /* page queue or free list (Q) */ 131 struct { 132 SLIST_ENTRY(vm_page) ss; /* private slists */ 133 void *pv; 134 } s; 135 struct { 136 u_long p; 137 u_long v; 138 } memguard; 139 } plinks; 140 TAILQ_ENTRY(vm_page) listq; /* pages in same object (O) */ 141 vm_object_t object; /* which object am I in (O,P) */ 142 vm_pindex_t pindex; /* offset into object (O,P) */ 143 vm_paddr_t phys_addr; /* physical address of page */ 144 struct md_page md; /* machine dependant stuff */ 145 uint8_t queue; /* page queue index (P,Q) */ 146 int8_t segind; 147 short hold_count; /* page hold count (P) */ 148 uint8_t order; /* index of the buddy queue */ 149 uint8_t pool; 150 u_short cow; /* page cow mapping count (P) */ 151 u_int wire_count; /* wired down maps refs (P) */ 152 uint8_t aflags; /* access is atomic */ 153 uint8_t oflags; /* page VPO_* flags (O) */ 154 uint16_t flags; /* page PG_* flags (P) */ 155 u_char act_count; /* page usage count (P) */ 156 u_char __pad0; /* unused padding */ 157 /* NOTE that these must support one bit per DEV_BSIZE in a page */ 158 /* so, on normal X86 kernels, they must be at least 8 bits wide */ 159 vm_page_bits_t valid; /* map of valid DEV_BSIZE chunks (O) */ 160 vm_page_bits_t dirty; /* map of dirty DEV_BSIZE chunks (M) */ 161 volatile u_int busy_lock; /* busy owners lock */ 162 }; 163 164 /* 165 * Page flags stored in oflags: 166 * 167 * Access to these page flags is synchronized by the lock on the object 168 * containing the page (O). 169 * 170 * Note: VPO_UNMANAGED (used by OBJT_DEVICE, OBJT_PHYS and OBJT_SG) 171 * indicates that the page is not under PV management but 172 * otherwise should be treated as a normal page. Pages not 173 * under PV management cannot be paged out via the 174 * object/vm_page_t because there is no knowledge of their pte 175 * mappings, and such pages are also not on any PQ queue. 176 * 177 */ 178 #define VPO_UNUSED01 0x01 /* --available-- */ 179 #define VPO_SWAPSLEEP 0x02 /* waiting for swap to finish */ 180 #define VPO_UNMANAGED 0x04 /* no PV management for page */ 181 #define VPO_SWAPINPROG 0x08 /* swap I/O in progress on page */ 182 #define VPO_NOSYNC 0x10 /* do not collect for syncer */ 183 184 /* 185 * Busy page implementation details. 186 * The algorithm is taken mostly by rwlock(9) and sx(9) locks implementation, 187 * even if the support for owner identity is removed because of size 188 * constraints. Checks on lock recursion are then not possible, while the 189 * lock assertions effectiveness is someway reduced. 190 */ 191 #define VPB_BIT_SHARED 0x01 192 #define VPB_BIT_EXCLUSIVE 0x02 193 #define VPB_BIT_WAITERS 0x04 194 #define VPB_BIT_FLAGMASK \ 195 (VPB_BIT_SHARED | VPB_BIT_EXCLUSIVE | VPB_BIT_WAITERS) 196 197 #define VPB_SHARERS_SHIFT 3 198 #define VPB_SHARERS(x) \ 199 (((x) & ~VPB_BIT_FLAGMASK) >> VPB_SHARERS_SHIFT) 200 #define VPB_SHARERS_WORD(x) ((x) << VPB_SHARERS_SHIFT | VPB_BIT_SHARED) 201 #define VPB_ONE_SHARER (1 << VPB_SHARERS_SHIFT) 202 203 #define VPB_SINGLE_EXCLUSIVER VPB_BIT_EXCLUSIVE 204 205 #define VPB_UNBUSIED VPB_SHARERS_WORD(0) 206 207 #define PQ_NONE 255 208 #define PQ_INACTIVE 0 209 #define PQ_ACTIVE 1 210 #define PQ_COUNT 2 211 212 TAILQ_HEAD(pglist, vm_page); 213 SLIST_HEAD(spglist, vm_page); 214 215 struct vm_pagequeue { 216 struct mtx pq_mutex; 217 struct pglist pq_pl; 218 int pq_cnt; 219 int * const pq_vcnt; 220 const char * const pq_name; 221 } __aligned(CACHE_LINE_SIZE); 222 223 224 struct vm_domain { 225 struct vm_pagequeue vmd_pagequeues[PQ_COUNT]; 226 u_int vmd_page_count; 227 u_int vmd_free_count; 228 long vmd_segs; /* bitmask of the segments */ 229 boolean_t vmd_oom; 230 int vmd_pass; /* local pagedaemon pass */ 231 struct vm_page vmd_marker; /* marker for pagedaemon private use */ 232 }; 233 234 extern struct vm_domain vm_dom[MAXMEMDOM]; 235 236 #define vm_pagequeue_assert_locked(pq) mtx_assert(&(pq)->pq_mutex, MA_OWNED) 237 #define vm_pagequeue_lock(pq) mtx_lock(&(pq)->pq_mutex) 238 #define vm_pagequeue_unlock(pq) mtx_unlock(&(pq)->pq_mutex) 239 240 #ifdef _KERNEL 241 static __inline void 242 vm_pagequeue_cnt_add(struct vm_pagequeue *pq, int addend) 243 { 244 245 #ifdef notyet 246 vm_pagequeue_assert_locked(pq); 247 #endif 248 pq->pq_cnt += addend; 249 atomic_add_int(pq->pq_vcnt, addend); 250 } 251 #define vm_pagequeue_cnt_inc(pq) vm_pagequeue_cnt_add((pq), 1) 252 #define vm_pagequeue_cnt_dec(pq) vm_pagequeue_cnt_add((pq), -1) 253 #endif /* _KERNEL */ 254 255 extern struct mtx_padalign vm_page_queue_free_mtx; 256 extern struct mtx_padalign pa_lock[]; 257 258 #if defined(__arm__) 259 #define PDRSHIFT PDR_SHIFT 260 #elif !defined(PDRSHIFT) 261 #define PDRSHIFT 21 262 #endif 263 264 #define pa_index(pa) ((pa) >> PDRSHIFT) 265 #define PA_LOCKPTR(pa) ((struct mtx *)(&pa_lock[pa_index(pa) % PA_LOCK_COUNT])) 266 #define PA_LOCKOBJPTR(pa) ((struct lock_object *)PA_LOCKPTR((pa))) 267 #define PA_LOCK(pa) mtx_lock(PA_LOCKPTR(pa)) 268 #define PA_TRYLOCK(pa) mtx_trylock(PA_LOCKPTR(pa)) 269 #define PA_UNLOCK(pa) mtx_unlock(PA_LOCKPTR(pa)) 270 #define PA_UNLOCK_COND(pa) \ 271 do { \ 272 if ((pa) != 0) { \ 273 PA_UNLOCK((pa)); \ 274 (pa) = 0; \ 275 } \ 276 } while (0) 277 278 #define PA_LOCK_ASSERT(pa, a) mtx_assert(PA_LOCKPTR(pa), (a)) 279 280 #ifdef KLD_MODULE 281 #define vm_page_lock(m) vm_page_lock_KBI((m), LOCK_FILE, LOCK_LINE) 282 #define vm_page_unlock(m) vm_page_unlock_KBI((m), LOCK_FILE, LOCK_LINE) 283 #define vm_page_trylock(m) vm_page_trylock_KBI((m), LOCK_FILE, LOCK_LINE) 284 #else /* !KLD_MODULE */ 285 #define vm_page_lockptr(m) (PA_LOCKPTR(VM_PAGE_TO_PHYS((m)))) 286 #define vm_page_lock(m) mtx_lock(vm_page_lockptr((m))) 287 #define vm_page_unlock(m) mtx_unlock(vm_page_lockptr((m))) 288 #define vm_page_trylock(m) mtx_trylock(vm_page_lockptr((m))) 289 #endif 290 #if defined(INVARIANTS) 291 #define vm_page_assert_locked(m) \ 292 vm_page_assert_locked_KBI((m), __FILE__, __LINE__) 293 #define vm_page_lock_assert(m, a) \ 294 vm_page_lock_assert_KBI((m), (a), __FILE__, __LINE__) 295 #else 296 #define vm_page_assert_locked(m) 297 #define vm_page_lock_assert(m, a) 298 #endif 299 300 /* 301 * The vm_page's aflags are updated using atomic operations. To set or clear 302 * these flags, the functions vm_page_aflag_set() and vm_page_aflag_clear() 303 * must be used. Neither these flags nor these functions are part of the KBI. 304 * 305 * PGA_REFERENCED may be cleared only if the page is locked. It is set by 306 * both the MI and MD VM layers. However, kernel loadable modules should not 307 * directly set this flag. They should call vm_page_reference() instead. 308 * 309 * PGA_WRITEABLE is set exclusively on managed pages by pmap_enter(). When it 310 * does so, the page must be exclusive busied. The MI VM layer must never 311 * access this flag directly. Instead, it should call 312 * pmap_page_is_write_mapped(). 313 * 314 * PGA_EXECUTABLE may be set by pmap routines, and indicates that a page has 315 * at least one executable mapping. It is not consumed by the MI VM layer. 316 */ 317 #define PGA_WRITEABLE 0x01 /* page may be mapped writeable */ 318 #define PGA_REFERENCED 0x02 /* page has been referenced */ 319 #define PGA_EXECUTABLE 0x04 /* page may be mapped executable */ 320 321 /* 322 * Page flags. If changed at any other time than page allocation or 323 * freeing, the modification must be protected by the vm_page lock. 324 */ 325 #define PG_CACHED 0x0001 /* page is cached */ 326 #define PG_FREE 0x0002 /* page is free */ 327 #define PG_FICTITIOUS 0x0004 /* physical page doesn't exist */ 328 #define PG_ZERO 0x0008 /* page is zeroed */ 329 #define PG_MARKER 0x0010 /* special queue marker page */ 330 #define PG_SLAB 0x0020 /* object pointer is actually a slab */ 331 #define PG_WINATCFLS 0x0040 /* flush dirty page on inactive q */ 332 #define PG_NODUMP 0x0080 /* don't include this page in a dump */ 333 #define PG_UNHOLDFREE 0x0100 /* delayed free of a held page */ 334 335 /* 336 * Misc constants. 337 */ 338 #define ACT_DECLINE 1 339 #define ACT_ADVANCE 3 340 #define ACT_INIT 5 341 #define ACT_MAX 64 342 343 #ifdef _KERNEL 344 345 #include <sys/systm.h> 346 347 #include <machine/atomic.h> 348 349 /* 350 * Each pageable resident page falls into one of four lists: 351 * 352 * free 353 * Available for allocation now. 354 * 355 * cache 356 * Almost available for allocation. Still associated with 357 * an object, but clean and immediately freeable. 358 * 359 * The following lists are LRU sorted: 360 * 361 * inactive 362 * Low activity, candidates for reclamation. 363 * This is the list of pages that should be 364 * paged out next. 365 * 366 * active 367 * Pages that are "active" i.e. they have been 368 * recently referenced. 369 * 370 */ 371 372 extern int vm_page_zero_count; 373 374 extern vm_page_t vm_page_array; /* First resident page in table */ 375 extern long vm_page_array_size; /* number of vm_page_t's */ 376 extern long first_page; /* first physical page number */ 377 378 #define VM_PAGE_IS_FREE(m) (((m)->flags & PG_FREE) != 0) 379 380 #define VM_PAGE_TO_PHYS(entry) ((entry)->phys_addr) 381 382 vm_page_t PHYS_TO_VM_PAGE(vm_paddr_t pa); 383 384 /* page allocation classes: */ 385 #define VM_ALLOC_NORMAL 0 386 #define VM_ALLOC_INTERRUPT 1 387 #define VM_ALLOC_SYSTEM 2 388 #define VM_ALLOC_CLASS_MASK 3 389 /* page allocation flags: */ 390 #define VM_ALLOC_WIRED 0x0020 /* non pageable */ 391 #define VM_ALLOC_ZERO 0x0040 /* Try to obtain a zeroed page */ 392 #define VM_ALLOC_RETRY 0x0080 /* Mandatory with vm_page_grab() */ 393 #define VM_ALLOC_NOOBJ 0x0100 /* No associated object */ 394 #define VM_ALLOC_NOBUSY 0x0200 /* Do not busy the page */ 395 #define VM_ALLOC_IFCACHED 0x0400 /* Fail if the page is not cached */ 396 #define VM_ALLOC_IFNOTCACHED 0x0800 /* Fail if the page is cached */ 397 #define VM_ALLOC_IGN_SBUSY 0x1000 /* vm_page_grab() only */ 398 #define VM_ALLOC_NODUMP 0x2000 /* don't include in dump */ 399 #define VM_ALLOC_SBUSY 0x4000 /* Shared busy the page */ 400 401 #define VM_ALLOC_COUNT_SHIFT 16 402 #define VM_ALLOC_COUNT(count) ((count) << VM_ALLOC_COUNT_SHIFT) 403 404 #ifdef M_NOWAIT 405 static inline int 406 malloc2vm_flags(int malloc_flags) 407 { 408 int pflags; 409 410 KASSERT((malloc_flags & M_USE_RESERVE) == 0 || 411 (malloc_flags & M_NOWAIT) != 0, 412 ("M_USE_RESERVE requires M_NOWAIT")); 413 pflags = (malloc_flags & M_USE_RESERVE) != 0 ? VM_ALLOC_INTERRUPT : 414 VM_ALLOC_SYSTEM; 415 if ((malloc_flags & M_ZERO) != 0) 416 pflags |= VM_ALLOC_ZERO; 417 if ((malloc_flags & M_NODUMP) != 0) 418 pflags |= VM_ALLOC_NODUMP; 419 return (pflags); 420 } 421 #endif 422 423 void vm_page_busy_downgrade(vm_page_t m); 424 void vm_page_busy_sleep(vm_page_t m, const char *msg); 425 void vm_page_flash(vm_page_t m); 426 void vm_page_hold(vm_page_t mem); 427 void vm_page_unhold(vm_page_t mem); 428 void vm_page_free(vm_page_t m); 429 void vm_page_free_zero(vm_page_t m); 430 431 void vm_page_activate (vm_page_t); 432 void vm_page_advise(vm_page_t m, int advice); 433 vm_page_t vm_page_alloc (vm_object_t, vm_pindex_t, int); 434 vm_page_t vm_page_alloc_contig(vm_object_t object, vm_pindex_t pindex, int req, 435 u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment, 436 vm_paddr_t boundary, vm_memattr_t memattr); 437 vm_page_t vm_page_alloc_freelist(int, int); 438 vm_page_t vm_page_grab (vm_object_t, vm_pindex_t, int); 439 void vm_page_cache(vm_page_t); 440 void vm_page_cache_free(vm_object_t, vm_pindex_t, vm_pindex_t); 441 void vm_page_cache_transfer(vm_object_t, vm_pindex_t, vm_object_t); 442 int vm_page_try_to_cache (vm_page_t); 443 int vm_page_try_to_free (vm_page_t); 444 void vm_page_deactivate (vm_page_t); 445 void vm_page_dequeue(vm_page_t m); 446 void vm_page_dequeue_locked(vm_page_t m); 447 vm_page_t vm_page_find_least(vm_object_t, vm_pindex_t); 448 vm_page_t vm_page_getfake(vm_paddr_t paddr, vm_memattr_t memattr); 449 void vm_page_initfake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr); 450 int vm_page_insert (vm_page_t, vm_object_t, vm_pindex_t); 451 boolean_t vm_page_is_cached(vm_object_t object, vm_pindex_t pindex); 452 vm_page_t vm_page_lookup (vm_object_t, vm_pindex_t); 453 vm_page_t vm_page_next(vm_page_t m); 454 int vm_page_pa_tryrelock(pmap_t, vm_paddr_t, vm_paddr_t *); 455 struct vm_pagequeue *vm_page_pagequeue(vm_page_t m); 456 vm_page_t vm_page_prev(vm_page_t m); 457 void vm_page_putfake(vm_page_t m); 458 void vm_page_readahead_finish(vm_page_t m); 459 void vm_page_reference(vm_page_t m); 460 void vm_page_remove (vm_page_t); 461 int vm_page_rename (vm_page_t, vm_object_t, vm_pindex_t); 462 vm_page_t vm_page_replace(vm_page_t mnew, vm_object_t object, 463 vm_pindex_t pindex); 464 void vm_page_requeue(vm_page_t m); 465 void vm_page_requeue_locked(vm_page_t m); 466 int vm_page_sbusied(vm_page_t m); 467 void vm_page_set_valid_range(vm_page_t m, int base, int size); 468 int vm_page_sleep_if_busy(vm_page_t m, const char *msg); 469 vm_offset_t vm_page_startup(vm_offset_t vaddr); 470 void vm_page_sunbusy(vm_page_t m); 471 int vm_page_trysbusy(vm_page_t m); 472 void vm_page_unhold_pages(vm_page_t *ma, int count); 473 void vm_page_unwire (vm_page_t, int); 474 void vm_page_updatefake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr); 475 void vm_page_wire (vm_page_t); 476 void vm_page_xunbusy_hard(vm_page_t m); 477 void vm_page_set_validclean (vm_page_t, int, int); 478 void vm_page_clear_dirty (vm_page_t, int, int); 479 void vm_page_set_invalid (vm_page_t, int, int); 480 int vm_page_is_valid (vm_page_t, int, int); 481 void vm_page_test_dirty (vm_page_t); 482 vm_page_bits_t vm_page_bits(int base, int size); 483 void vm_page_zero_invalid(vm_page_t m, boolean_t setvalid); 484 void vm_page_free_toq(vm_page_t m); 485 void vm_page_zero_idle_wakeup(void); 486 void vm_page_cowfault (vm_page_t); 487 int vm_page_cowsetup(vm_page_t); 488 void vm_page_cowclear (vm_page_t); 489 490 void vm_page_dirty_KBI(vm_page_t m); 491 void vm_page_lock_KBI(vm_page_t m, const char *file, int line); 492 void vm_page_unlock_KBI(vm_page_t m, const char *file, int line); 493 int vm_page_trylock_KBI(vm_page_t m, const char *file, int line); 494 #if defined(INVARIANTS) || defined(INVARIANT_SUPPORT) 495 void vm_page_assert_locked_KBI(vm_page_t m, const char *file, int line); 496 void vm_page_lock_assert_KBI(vm_page_t m, int a, const char *file, int line); 497 #endif 498 499 #define vm_page_assert_sbusied(m) \ 500 KASSERT(vm_page_sbusied(m), \ 501 ("vm_page_assert_sbusied: page %p not shared busy @ %s:%d", \ 502 (void *)m, __FILE__, __LINE__)); 503 504 #define vm_page_assert_unbusied(m) \ 505 KASSERT(!vm_page_busied(m), \ 506 ("vm_page_assert_unbusied: page %p busy @ %s:%d", \ 507 (void *)m, __FILE__, __LINE__)); 508 509 #define vm_page_assert_xbusied(m) \ 510 KASSERT(vm_page_xbusied(m), \ 511 ("vm_page_assert_xbusied: page %p not exclusive busy @ %s:%d", \ 512 (void *)m, __FILE__, __LINE__)); 513 514 #define vm_page_busied(m) \ 515 ((m)->busy_lock != VPB_UNBUSIED) 516 517 #define vm_page_sbusy(m) do { \ 518 if (!vm_page_trysbusy(m)) \ 519 panic("%s: page %p failed shared busing", __func__, m); \ 520 } while (0) 521 522 #define vm_page_tryxbusy(m) \ 523 (atomic_cmpset_acq_int(&m->busy_lock, VPB_UNBUSIED, \ 524 VPB_SINGLE_EXCLUSIVER)) 525 526 #define vm_page_xbusied(m) \ 527 ((m->busy_lock & VPB_SINGLE_EXCLUSIVER) != 0) 528 529 #define vm_page_xbusy(m) do { \ 530 if (!vm_page_tryxbusy(m)) \ 531 panic("%s: page %p failed exclusive busing", __func__, \ 532 m); \ 533 } while (0) 534 535 #define vm_page_xunbusy(m) do { \ 536 if (!atomic_cmpset_rel_int(&(m)->busy_lock, \ 537 VPB_SINGLE_EXCLUSIVER, VPB_UNBUSIED)) \ 538 vm_page_xunbusy_hard(m); \ 539 } while (0) 540 541 #ifdef INVARIANTS 542 void vm_page_object_lock_assert(vm_page_t m); 543 #define VM_PAGE_OBJECT_LOCK_ASSERT(m) vm_page_object_lock_assert(m) 544 #else 545 #define VM_PAGE_OBJECT_LOCK_ASSERT(m) (void)0 546 #endif 547 548 /* 549 * We want to use atomic updates for the aflags field, which is 8 bits wide. 550 * However, not all architectures support atomic operations on 8-bit 551 * destinations. In order that we can easily use a 32-bit operation, we 552 * require that the aflags field be 32-bit aligned. 553 */ 554 CTASSERT(offsetof(struct vm_page, aflags) % sizeof(uint32_t) == 0); 555 556 /* 557 * Clear the given bits in the specified page. 558 */ 559 static inline void 560 vm_page_aflag_clear(vm_page_t m, uint8_t bits) 561 { 562 uint32_t *addr, val; 563 564 /* 565 * The PGA_REFERENCED flag can only be cleared if the page is locked. 566 */ 567 if ((bits & PGA_REFERENCED) != 0) 568 vm_page_assert_locked(m); 569 570 /* 571 * Access the whole 32-bit word containing the aflags field with an 572 * atomic update. Parallel non-atomic updates to the other fields 573 * within this word are handled properly by the atomic update. 574 */ 575 addr = (void *)&m->aflags; 576 KASSERT(((uintptr_t)addr & (sizeof(uint32_t) - 1)) == 0, 577 ("vm_page_aflag_clear: aflags is misaligned")); 578 val = bits; 579 #if BYTE_ORDER == BIG_ENDIAN 580 val <<= 24; 581 #endif 582 atomic_clear_32(addr, val); 583 } 584 585 /* 586 * Set the given bits in the specified page. 587 */ 588 static inline void 589 vm_page_aflag_set(vm_page_t m, uint8_t bits) 590 { 591 uint32_t *addr, val; 592 593 /* 594 * The PGA_WRITEABLE flag can only be set if the page is managed and 595 * exclusive busied. Currently, this flag is only set by pmap_enter(). 596 */ 597 KASSERT((bits & PGA_WRITEABLE) == 0 || 598 ((m->oflags & VPO_UNMANAGED) == 0 && vm_page_xbusied(m)), 599 ("vm_page_aflag_set: PGA_WRITEABLE and not exclusive busy")); 600 601 /* 602 * Access the whole 32-bit word containing the aflags field with an 603 * atomic update. Parallel non-atomic updates to the other fields 604 * within this word are handled properly by the atomic update. 605 */ 606 addr = (void *)&m->aflags; 607 KASSERT(((uintptr_t)addr & (sizeof(uint32_t) - 1)) == 0, 608 ("vm_page_aflag_set: aflags is misaligned")); 609 val = bits; 610 #if BYTE_ORDER == BIG_ENDIAN 611 val <<= 24; 612 #endif 613 atomic_set_32(addr, val); 614 } 615 616 /* 617 * vm_page_dirty: 618 * 619 * Set all bits in the page's dirty field. 620 * 621 * The object containing the specified page must be locked if the 622 * call is made from the machine-independent layer. 623 * 624 * See vm_page_clear_dirty_mask(). 625 */ 626 static __inline void 627 vm_page_dirty(vm_page_t m) 628 { 629 630 /* Use vm_page_dirty_KBI() under INVARIANTS to save memory. */ 631 #if defined(KLD_MODULE) || defined(INVARIANTS) 632 vm_page_dirty_KBI(m); 633 #else 634 m->dirty = VM_PAGE_BITS_ALL; 635 #endif 636 } 637 638 /* 639 * vm_page_remque: 640 * 641 * If the given page is in a page queue, then remove it from that page 642 * queue. 643 * 644 * The page must be locked. 645 */ 646 static inline void 647 vm_page_remque(vm_page_t m) 648 { 649 650 if (m->queue != PQ_NONE) 651 vm_page_dequeue(m); 652 } 653 654 /* 655 * vm_page_undirty: 656 * 657 * Set page to not be dirty. Note: does not clear pmap modify bits 658 */ 659 static __inline void 660 vm_page_undirty(vm_page_t m) 661 { 662 663 VM_PAGE_OBJECT_LOCK_ASSERT(m); 664 m->dirty = 0; 665 } 666 667 #endif /* _KERNEL */ 668 #endif /* !_VM_PAGE_ */ 669