1 /*- 2 * Copyright (c) 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 4. Neither the name of the University nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * from: @(#)vm_page.h 8.2 (Berkeley) 12/13/93 33 * 34 * 35 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 36 * All rights reserved. 37 * 38 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 39 * 40 * Permission to use, copy, modify and distribute this software and 41 * its documentation is hereby granted, provided that both the copyright 42 * notice and this permission notice appear in all copies of the 43 * software, derivative works or modified versions, and any portions 44 * thereof, and that both notices appear in supporting documentation. 45 * 46 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 47 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 48 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 49 * 50 * Carnegie Mellon requests users of this software to return to 51 * 52 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 53 * School of Computer Science 54 * Carnegie Mellon University 55 * Pittsburgh PA 15213-3890 56 * 57 * any improvements or extensions that they make and grant Carnegie the 58 * rights to redistribute these changes. 59 * 60 * $FreeBSD$ 61 */ 62 63 /* 64 * Resident memory system definitions. 65 */ 66 67 #ifndef _VM_PAGE_ 68 #define _VM_PAGE_ 69 70 #include <vm/pmap.h> 71 72 /* 73 * Management of resident (logical) pages. 74 * 75 * A small structure is kept for each resident 76 * page, indexed by page number. Each structure 77 * is an element of several lists: 78 * 79 * A hash table bucket used to quickly 80 * perform object/offset lookups 81 * 82 * A list of all pages for a given object, 83 * so they can be quickly deactivated at 84 * time of deallocation. 85 * 86 * An ordered list of pages due for pageout. 87 * 88 * In addition, the structure contains the object 89 * and offset to which this page belongs (for pageout), 90 * and sundry status bits. 91 * 92 * Fields in this structure are locked either by the lock on the 93 * object that the page belongs to (O) or by the lock on the page 94 * queues (P). 95 */ 96 97 TAILQ_HEAD(pglist, vm_page); 98 99 struct vm_page { 100 TAILQ_ENTRY(vm_page) pageq; /* queue info for FIFO queue or free list (P) */ 101 TAILQ_ENTRY(vm_page) listq; /* pages in same object (O) */ 102 struct vm_page *left; /* splay tree link (O) */ 103 struct vm_page *right; /* splay tree link (O) */ 104 105 vm_object_t object; /* which object am I in (O,P)*/ 106 vm_pindex_t pindex; /* offset into object (O,P) */ 107 vm_paddr_t phys_addr; /* physical address of page */ 108 struct md_page md; /* machine dependant stuff */ 109 uint8_t queue; /* page queue index */ 110 int8_t segind; 111 u_short flags; /* see below */ 112 uint8_t order; /* index of the buddy queue */ 113 uint8_t pool; 114 u_short cow; /* page cow mapping count */ 115 u_int wire_count; /* wired down maps refs (P) */ 116 short hold_count; /* page hold count */ 117 u_short oflags; /* page flags (O) */ 118 u_char act_count; /* page usage count */ 119 u_char busy; /* page busy count (O) */ 120 /* NOTE that these must support one bit per DEV_BSIZE in a page!!! */ 121 /* so, on normal X86 kernels, they must be at least 8 bits wide */ 122 #if PAGE_SIZE == 4096 123 u_char valid; /* map of valid DEV_BSIZE chunks (O) */ 124 u_char dirty; /* map of dirty DEV_BSIZE chunks */ 125 #elif PAGE_SIZE == 8192 126 u_short valid; /* map of valid DEV_BSIZE chunks (O) */ 127 u_short dirty; /* map of dirty DEV_BSIZE chunks */ 128 #elif PAGE_SIZE == 16384 129 u_int valid; /* map of valid DEV_BSIZE chunks (O) */ 130 u_int dirty; /* map of dirty DEV_BSIZE chunks */ 131 #elif PAGE_SIZE == 32768 132 u_long valid; /* map of valid DEV_BSIZE chunks (O) */ 133 u_long dirty; /* map of dirty DEV_BSIZE chunks */ 134 #endif 135 }; 136 137 /* 138 * Page flags stored in oflags: 139 * 140 * Access to these page flags is synchronized by the lock on the object 141 * containing the page (O). 142 */ 143 #define VPO_BUSY 0x0001 /* page is in transit */ 144 #define VPO_WANTED 0x0002 /* someone is waiting for page */ 145 #define VPO_CLEANCHK 0x0100 /* page will be checked for cleaning */ 146 #define VPO_SWAPINPROG 0x0200 /* swap I/O in progress on page */ 147 #define VPO_NOSYNC 0x0400 /* do not collect for syncer */ 148 149 #define PQ_NONE 0 150 #define PQ_INACTIVE 1 151 #define PQ_ACTIVE 2 152 #define PQ_HOLD 3 153 #define PQ_COUNT 4 154 155 /* Returns the real queue a page is on. */ 156 #define VM_PAGE_GETQUEUE(m) ((m)->queue) 157 158 /* Returns the well known queue a page is on. */ 159 #define VM_PAGE_GETKNOWNQUEUE2(m) VM_PAGE_GETQUEUE(m) 160 161 /* Returns true if the page is in the named well known queue. */ 162 #define VM_PAGE_INQUEUE2(m, q) (VM_PAGE_GETKNOWNQUEUE2(m) == (q)) 163 164 /* Sets the queue a page is on. */ 165 #define VM_PAGE_SETQUEUE2(m, q) (VM_PAGE_GETQUEUE(m) = (q)) 166 167 struct vpgqueues { 168 struct pglist pl; 169 int *cnt; 170 }; 171 172 extern struct vpgqueues vm_page_queues[PQ_COUNT]; 173 extern struct mtx vm_page_queue_free_mtx; 174 175 /* 176 * These are the flags defined for vm_page. 177 * 178 * Note: PG_UNMANAGED (used by OBJT_PHYS) indicates that the page is 179 * not under PV management but otherwise should be treated as a 180 * normal page. Pages not under PV management cannot be paged out 181 * via the object/vm_page_t because there is no knowledge of their 182 * pte mappings, nor can they be removed from their objects via 183 * the object, and such pages are also not on any PQ queue. 184 */ 185 #define PG_CACHED 0x0001 /* page is cached */ 186 #define PG_FREE 0x0002 /* page is free */ 187 #define PG_WINATCFLS 0x0004 /* flush dirty page on inactive q */ 188 #define PG_FICTITIOUS 0x0008 /* physical page doesn't exist (O) */ 189 #define PG_WRITEABLE 0x0010 /* page is mapped writeable */ 190 #define PG_ZERO 0x0040 /* page is zeroed */ 191 #define PG_REFERENCED 0x0080 /* page has been referenced */ 192 #define PG_UNMANAGED 0x0800 /* No PV management for page */ 193 #define PG_MARKER 0x1000 /* special queue marker page */ 194 #define PG_SLAB 0x2000 /* object pointer is actually a slab */ 195 196 /* 197 * Misc constants. 198 */ 199 #define ACT_DECLINE 1 200 #define ACT_ADVANCE 3 201 #define ACT_INIT 5 202 #define ACT_MAX 64 203 204 #ifdef _KERNEL 205 206 #include <vm/vm_param.h> 207 208 /* 209 * Each pageable resident page falls into one of five lists: 210 * 211 * free 212 * Available for allocation now. 213 * 214 * cache 215 * Almost available for allocation. Still associated with 216 * an object, but clean and immediately freeable. 217 * 218 * hold 219 * Will become free after a pending I/O operation 220 * completes. 221 * 222 * The following lists are LRU sorted: 223 * 224 * inactive 225 * Low activity, candidates for reclamation. 226 * This is the list of pages that should be 227 * paged out next. 228 * 229 * active 230 * Pages that are "active" i.e. they have been 231 * recently referenced. 232 * 233 */ 234 235 extern int vm_page_zero_count; 236 237 extern vm_page_t vm_page_array; /* First resident page in table */ 238 extern int vm_page_array_size; /* number of vm_page_t's */ 239 extern long first_page; /* first physical page number */ 240 241 #define VM_PAGE_IS_FREE(m) (((m)->flags & PG_FREE) != 0) 242 243 #define VM_PAGE_TO_PHYS(entry) ((entry)->phys_addr) 244 245 vm_page_t vm_phys_paddr_to_vm_page(vm_paddr_t pa); 246 247 static __inline vm_page_t PHYS_TO_VM_PAGE(vm_paddr_t pa); 248 249 static __inline vm_page_t 250 PHYS_TO_VM_PAGE(vm_paddr_t pa) 251 { 252 #ifdef VM_PHYSSEG_SPARSE 253 return (vm_phys_paddr_to_vm_page(pa)); 254 #elif defined(VM_PHYSSEG_DENSE) 255 return (&vm_page_array[atop(pa) - first_page]); 256 #else 257 #error "Either VM_PHYSSEG_DENSE or VM_PHYSSEG_SPARSE must be defined." 258 #endif 259 } 260 261 extern struct mtx vm_page_queue_mtx; 262 #define vm_page_lock_queues() mtx_lock(&vm_page_queue_mtx) 263 #define vm_page_unlock_queues() mtx_unlock(&vm_page_queue_mtx) 264 265 #if PAGE_SIZE == 4096 266 #define VM_PAGE_BITS_ALL 0xffu 267 #elif PAGE_SIZE == 8192 268 #define VM_PAGE_BITS_ALL 0xffffu 269 #elif PAGE_SIZE == 16384 270 #define VM_PAGE_BITS_ALL 0xffffffffu 271 #elif PAGE_SIZE == 32768 272 #define VM_PAGE_BITS_ALL 0xfffffffffffffffflu 273 #endif 274 275 /* page allocation classes: */ 276 #define VM_ALLOC_NORMAL 0 277 #define VM_ALLOC_INTERRUPT 1 278 #define VM_ALLOC_SYSTEM 2 279 #define VM_ALLOC_CLASS_MASK 3 280 /* page allocation flags: */ 281 #define VM_ALLOC_WIRED 0x0020 /* non pageable */ 282 #define VM_ALLOC_ZERO 0x0040 /* Try to obtain a zeroed page */ 283 #define VM_ALLOC_RETRY 0x0080 /* vm_page_grab() only */ 284 #define VM_ALLOC_NOOBJ 0x0100 /* No associated object */ 285 #define VM_ALLOC_NOBUSY 0x0200 /* Do not busy the page */ 286 #define VM_ALLOC_IFCACHED 0x0400 /* Fail if the page is not cached */ 287 #define VM_ALLOC_IFNOTCACHED 0x0800 /* Fail if the page is cached */ 288 289 void vm_page_flag_set(vm_page_t m, unsigned short bits); 290 void vm_page_flag_clear(vm_page_t m, unsigned short bits); 291 void vm_page_busy(vm_page_t m); 292 void vm_page_flash(vm_page_t m); 293 void vm_page_io_start(vm_page_t m); 294 void vm_page_io_finish(vm_page_t m); 295 void vm_page_hold(vm_page_t mem); 296 void vm_page_unhold(vm_page_t mem); 297 void vm_page_free(vm_page_t m); 298 void vm_page_free_zero(vm_page_t m); 299 void vm_page_dirty(vm_page_t m); 300 void vm_page_wakeup(vm_page_t m); 301 302 void vm_pageq_remove(vm_page_t m); 303 304 void vm_page_activate (vm_page_t); 305 vm_page_t vm_page_alloc (vm_object_t, vm_pindex_t, int); 306 vm_page_t vm_page_grab (vm_object_t, vm_pindex_t, int); 307 void vm_page_cache(vm_page_t); 308 void vm_page_cache_free(vm_object_t, vm_pindex_t, vm_pindex_t); 309 void vm_page_cache_remove(vm_page_t); 310 void vm_page_cache_transfer(vm_object_t, vm_pindex_t, vm_object_t); 311 int vm_page_try_to_cache (vm_page_t); 312 int vm_page_try_to_free (vm_page_t); 313 void vm_page_dontneed(vm_page_t); 314 void vm_page_deactivate (vm_page_t); 315 void vm_page_insert (vm_page_t, vm_object_t, vm_pindex_t); 316 vm_page_t vm_page_lookup (vm_object_t, vm_pindex_t); 317 void vm_page_remove (vm_page_t); 318 void vm_page_rename (vm_page_t, vm_object_t, vm_pindex_t); 319 void vm_page_requeue(vm_page_t m); 320 void vm_page_set_valid(vm_page_t m, int base, int size); 321 void vm_page_sleep(vm_page_t m, const char *msg); 322 vm_page_t vm_page_splay(vm_pindex_t, vm_page_t); 323 vm_offset_t vm_page_startup(vm_offset_t vaddr); 324 void vm_page_unwire (vm_page_t, int); 325 void vm_page_wire (vm_page_t); 326 void vm_page_set_validclean (vm_page_t, int, int); 327 void vm_page_clear_dirty (vm_page_t, int, int); 328 void vm_page_set_invalid (vm_page_t, int, int); 329 int vm_page_is_valid (vm_page_t, int, int); 330 void vm_page_test_dirty (vm_page_t); 331 int vm_page_bits (int, int); 332 void vm_page_zero_invalid(vm_page_t m, boolean_t setvalid); 333 void vm_page_free_toq(vm_page_t m); 334 void vm_page_zero_idle_wakeup(void); 335 void vm_page_cowfault (vm_page_t); 336 int vm_page_cowsetup(vm_page_t); 337 void vm_page_cowclear (vm_page_t); 338 339 /* 340 * vm_page_sleep_if_busy: 341 * 342 * Sleep and release the page queues lock if VPO_BUSY is set or, 343 * if also_m_busy is TRUE, busy is non-zero. Returns TRUE if the 344 * thread slept and the page queues lock was released. 345 * Otherwise, retains the page queues lock and returns FALSE. 346 * 347 * The object containing the given page must be locked. 348 */ 349 static __inline int 350 vm_page_sleep_if_busy(vm_page_t m, int also_m_busy, const char *msg) 351 { 352 353 if ((m->oflags & VPO_BUSY) || (also_m_busy && m->busy)) { 354 vm_page_sleep(m, msg); 355 return (TRUE); 356 } 357 return (FALSE); 358 } 359 360 /* 361 * vm_page_undirty: 362 * 363 * Set page to not be dirty. Note: does not clear pmap modify bits 364 */ 365 static __inline void 366 vm_page_undirty(vm_page_t m) 367 { 368 m->dirty = 0; 369 } 370 371 #endif /* _KERNEL */ 372 #endif /* !_VM_PAGE_ */ 373