1 /* 2 * Copyright (c) 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * from: @(#)vm_page.h 8.2 (Berkeley) 12/13/93 37 * 38 * 39 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 40 * All rights reserved. 41 * 42 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 43 * 44 * Permission to use, copy, modify and distribute this software and 45 * its documentation is hereby granted, provided that both the copyright 46 * notice and this permission notice appear in all copies of the 47 * software, derivative works or modified versions, and any portions 48 * thereof, and that both notices appear in supporting documentation. 49 * 50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 51 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 53 * 54 * Carnegie Mellon requests users of this software to return to 55 * 56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 57 * School of Computer Science 58 * Carnegie Mellon University 59 * Pittsburgh PA 15213-3890 60 * 61 * any improvements or extensions that they make and grant Carnegie the 62 * rights to redistribute these changes. 63 * 64 * $Id: vm_page.h,v 1.45 1998/09/01 17:12:19 wollman Exp $ 65 */ 66 67 /* 68 * Resident memory system definitions. 69 */ 70 71 #ifndef _VM_PAGE_ 72 #define _VM_PAGE_ 73 74 #include "opt_vmpage.h" 75 76 #include <vm/pmap.h> 77 #include <machine/atomic.h> 78 79 /* 80 * Management of resident (logical) pages. 81 * 82 * A small structure is kept for each resident 83 * page, indexed by page number. Each structure 84 * is an element of several lists: 85 * 86 * A hash table bucket used to quickly 87 * perform object/offset lookups 88 * 89 * A list of all pages for a given object, 90 * so they can be quickly deactivated at 91 * time of deallocation. 92 * 93 * An ordered list of pages due for pageout. 94 * 95 * In addition, the structure contains the object 96 * and offset to which this page belongs (for pageout), 97 * and sundry status bits. 98 * 99 * Fields in this structure are locked either by the lock on the 100 * object that the page belongs to (O) or by the lock on the page 101 * queues (P). 102 */ 103 104 TAILQ_HEAD(pglist, vm_page); 105 106 struct vm_page { 107 TAILQ_ENTRY(vm_page) pageq; /* queue info for FIFO queue or free list (P) */ 108 TAILQ_ENTRY(vm_page) hashq; /* hash table links (O) */ 109 TAILQ_ENTRY(vm_page) listq; /* pages in same object (O) */ 110 111 vm_object_t object; /* which object am I in (O,P) */ 112 vm_pindex_t pindex; /* offset into object (O,P) */ 113 vm_offset_t phys_addr; /* physical address of page */ 114 u_short queue; /* page queue index */ 115 u_short flags, /* see below */ 116 pc; /* page color */ 117 u_short wire_count; /* wired down maps refs (P) */ 118 short hold_count; /* page hold count */ 119 u_char act_count; /* page usage count */ 120 u_char busy; /* page busy count */ 121 /* NOTE that these must support one bit per DEV_BSIZE in a page!!! */ 122 /* so, on normal X86 kernels, they must be at least 8 bits wide */ 123 #if PAGE_SIZE == 4096 124 u_char valid; /* map of valid DEV_BSIZE chunks */ 125 u_char dirty; /* map of dirty DEV_BSIZE chunks */ 126 #elif PAGE_SIZE == 8192 127 u_short valid; /* map of valid DEV_BSIZE chunks */ 128 u_short dirty; /* map of dirty DEV_BSIZE chunks */ 129 #endif 130 }; 131 132 /* 133 * Page coloring parameters 134 */ 135 /* Each of PQ_FREE, PQ_ZERO and PQ_CACHE have PQ_HASH_SIZE entries */ 136 137 /* Define one of the following */ 138 #if defined(PQ_HUGECACHE) 139 #define PQ_PRIME1 31 /* Prime number somewhat less than PQ_HASH_SIZE */ 140 #define PQ_PRIME2 23 /* Prime number somewhat less than PQ_HASH_SIZE */ 141 #define PQ_PRIME3 17 /* Prime number somewhat less than PQ_HASH_SIZE */ 142 #define PQ_L2_SIZE 256 /* A number of colors opt for 1M cache */ 143 #define PQ_L1_SIZE 4 /* Four page L1 cache */ 144 #endif 145 146 /* Define one of the following */ 147 #if defined(PQ_LARGECACHE) 148 #define PQ_PRIME1 31 /* Prime number somewhat less than PQ_HASH_SIZE */ 149 #define PQ_PRIME2 23 /* Prime number somewhat less than PQ_HASH_SIZE */ 150 #define PQ_PRIME3 17 /* Prime number somewhat less than PQ_HASH_SIZE */ 151 #define PQ_L2_SIZE 128 /* A number of colors opt for 512K cache */ 152 #define PQ_L1_SIZE 4 /* Four page L1 cache (for PII) */ 153 #endif 154 155 156 /* 157 * Use 'options PQ_NOOPT' to disable page coloring 158 */ 159 #if defined(PQ_NOOPT) 160 #define PQ_PRIME1 1 161 #define PQ_PRIME2 1 162 #define PQ_PRIME3 1 163 #define PQ_L2_SIZE 1 164 #define PQ_L1_SIZE 1 165 #endif 166 167 #if defined(PQ_NORMALCACHE) 168 #define PQ_PRIME1 5 /* Prime number somewhat less than PQ_HASH_SIZE */ 169 #define PQ_PRIME2 3 /* Prime number somewhat less than PQ_HASH_SIZE */ 170 #define PQ_PRIME3 11 /* Prime number somewhat less than PQ_HASH_SIZE */ 171 #define PQ_L2_SIZE 16 /* A reasonable number of colors (opt for 64K cache) */ 172 #define PQ_L1_SIZE 2 /* Two page L1 cache */ 173 #endif 174 175 #if defined(PQ_MEDIUMCACHE) || !defined(PQ_L2_SIZE) 176 #define PQ_PRIME1 13 /* Prime number somewhat less than PQ_HASH_SIZE */ 177 #define PQ_PRIME2 7 /* Prime number somewhat less than PQ_HASH_SIZE */ 178 #define PQ_PRIME3 5 /* Prime number somewhat less than PQ_HASH_SIZE */ 179 #define PQ_L2_SIZE 64 /* A number of colors opt for 256K cache */ 180 #define PQ_L1_SIZE 2 /* Two page L1 cache */ 181 #endif 182 183 #define PQ_L2_MASK (PQ_L2_SIZE - 1) 184 185 #define PQ_NONE 0 186 #define PQ_FREE 1 187 #define PQ_ZERO (1 + PQ_L2_SIZE) 188 #define PQ_INACTIVE (1 + 2*PQ_L2_SIZE) 189 #define PQ_ACTIVE (2 + 2*PQ_L2_SIZE) 190 #define PQ_CACHE (3 + 2*PQ_L2_SIZE) 191 #define PQ_COUNT (3 + 3*PQ_L2_SIZE) 192 193 extern struct vpgqueues { 194 struct pglist *pl; 195 int *cnt; 196 int *lcnt; 197 } vm_page_queues[PQ_COUNT]; 198 199 /* 200 * These are the flags defined for vm_page. 201 * 202 * Note: PG_FILLED and PG_DIRTY are added for the filesystems. 203 */ 204 #define PG_BUSY 0x01 /* page is in transit (O) */ 205 #define PG_WANTED 0x02 /* someone is waiting for page (O) */ 206 #define PG_TABLED 0x04 /* page is in an object (O) */ 207 #define PG_FICTITIOUS 0x08 /* physical page doesn't exist (O) */ 208 #define PG_WRITEABLE 0x10 /* page is mapped writeable */ 209 #define PG_MAPPED 0x20 /* page is mapped */ 210 #define PG_ZERO 0x40 /* page is zeroed */ 211 #define PG_REFERENCED 0x80 /* page has been referenced */ 212 #define PG_CLEANCHK 0x100 /* page will be checked for cleaning */ 213 214 /* 215 * Misc constants. 216 */ 217 218 #define ACT_DECLINE 1 219 #define ACT_ADVANCE 3 220 #define ACT_INIT 5 221 #define ACT_MAX 64 222 #define PFCLUSTER_BEHIND 3 223 #define PFCLUSTER_AHEAD 3 224 225 #ifdef KERNEL 226 /* 227 * Each pageable resident page falls into one of four lists: 228 * 229 * free 230 * Available for allocation now. 231 * 232 * The following are all LRU sorted: 233 * 234 * cache 235 * Almost available for allocation. Still in an 236 * object, but clean and immediately freeable at 237 * non-interrupt times. 238 * 239 * inactive 240 * Low activity, candidates for reclamation. 241 * This is the list of pages that should be 242 * paged out next. 243 * 244 * active 245 * Pages that are "active" i.e. they have been 246 * recently referenced. 247 * 248 * zero 249 * Pages that are really free and have been pre-zeroed 250 * 251 */ 252 253 extern struct pglist vm_page_queue_free[PQ_L2_SIZE];/* memory free queue */ 254 extern struct pglist vm_page_queue_zero[PQ_L2_SIZE];/* zeroed memory free queue */ 255 extern struct pglist vm_page_queue_active; /* active memory queue */ 256 extern struct pglist vm_page_queue_inactive; /* inactive memory queue */ 257 extern struct pglist vm_page_queue_cache[PQ_L2_SIZE];/* cache memory queue */ 258 259 extern int vm_page_zero_count; 260 261 extern vm_page_t vm_page_array; /* First resident page in table */ 262 extern long first_page; /* first physical page number */ 263 264 /* ... represented in vm_page_array */ 265 extern long last_page; /* last physical page number */ 266 267 /* ... represented in vm_page_array */ 268 /* [INCLUSIVE] */ 269 extern vm_offset_t first_phys_addr; /* physical address for first_page */ 270 extern vm_offset_t last_phys_addr; /* physical address for last_page */ 271 272 #define VM_PAGE_TO_PHYS(entry) ((entry)->phys_addr) 273 274 #define IS_VM_PHYSADDR(pa) \ 275 ((pa) >= first_phys_addr && (pa) <= last_phys_addr) 276 277 #define PHYS_TO_VM_PAGE(pa) \ 278 (&vm_page_array[atop(pa) - first_page ]) 279 280 /* 281 * Functions implemented as macros 282 */ 283 284 static __inline void 285 vm_page_flag_set(vm_page_t m, unsigned int bits) 286 { 287 atomic_set_short(&(m)->flags, bits); 288 } 289 290 static __inline void 291 vm_page_flag_clear(vm_page_t m, unsigned int bits) 292 { 293 atomic_clear_short(&(m)->flags, bits); 294 } 295 296 #if 0 297 static __inline void 298 vm_page_assert_wait(vm_page_t m, int interruptible) 299 { 300 vm_page_flag_set(m, PG_WANTED); 301 assert_wait((int) m, interruptible); 302 } 303 #endif 304 305 static __inline void 306 vm_page_busy(vm_page_t m) 307 { 308 vm_page_flag_set(m, PG_BUSY); 309 } 310 311 static __inline void 312 vm_page_wakeup(vm_page_t m) 313 { 314 vm_page_flag_clear(m, PG_BUSY); 315 if (m->flags & PG_WANTED) { 316 vm_page_flag_clear(m, PG_WANTED); 317 wakeup(m); 318 } 319 } 320 321 static __inline void 322 vm_page_io_start(vm_page_t m) 323 { 324 atomic_add_char(&(m)->busy, 1); 325 } 326 327 static __inline void 328 vm_page_io_finish(vm_page_t m) 329 { 330 atomic_subtract_char(&m->busy, 1); 331 if ((m->flags & PG_WANTED) && m->busy == 0) { 332 vm_page_flag_clear(m, PG_WANTED); 333 wakeup(m); 334 } 335 } 336 337 338 #if PAGE_SIZE == 4096 339 #define VM_PAGE_BITS_ALL 0xff 340 #endif 341 342 #if PAGE_SIZE == 8192 343 #define VM_PAGE_BITS_ALL 0xffff 344 #endif 345 346 #define VM_ALLOC_NORMAL 0 347 #define VM_ALLOC_INTERRUPT 1 348 #define VM_ALLOC_SYSTEM 2 349 #define VM_ALLOC_ZERO 3 350 #define VM_ALLOC_RETRY 0x80 351 352 void vm_page_activate __P((vm_page_t)); 353 vm_page_t vm_page_alloc __P((vm_object_t, vm_pindex_t, int)); 354 vm_page_t vm_page_grab __P((vm_object_t, vm_pindex_t, int)); 355 void vm_page_cache __P((register vm_page_t)); 356 static __inline void vm_page_copy __P((vm_page_t, vm_page_t)); 357 void vm_page_deactivate __P((vm_page_t)); 358 void vm_page_free __P((vm_page_t)); 359 void vm_page_free_zero __P((vm_page_t)); 360 void vm_page_insert __P((vm_page_t, vm_object_t, vm_pindex_t)); 361 vm_page_t vm_page_lookup __P((vm_object_t, vm_pindex_t)); 362 void vm_page_remove __P((vm_page_t)); 363 void vm_page_rename __P((vm_page_t, vm_object_t, vm_pindex_t)); 364 vm_offset_t vm_page_startup __P((vm_offset_t, vm_offset_t, vm_offset_t)); 365 void vm_page_unwire __P((vm_page_t)); 366 void vm_page_wire __P((vm_page_t)); 367 void vm_page_unqueue __P((vm_page_t)); 368 void vm_page_unqueue_nowakeup __P((vm_page_t)); 369 void vm_page_set_validclean __P((vm_page_t, int, int)); 370 void vm_page_set_invalid __P((vm_page_t, int, int)); 371 static __inline boolean_t vm_page_zero_fill __P((vm_page_t)); 372 int vm_page_is_valid __P((vm_page_t, int, int)); 373 void vm_page_test_dirty __P((vm_page_t)); 374 int vm_page_bits __P((int, int)); 375 vm_page_t vm_page_list_find __P((int, int)); 376 int vm_page_queue_index __P((vm_offset_t, int)); 377 vm_page_t vm_page_select __P((vm_object_t, vm_pindex_t, int)); 378 int vm_page_sleep(vm_page_t m, char *msg, char *busy); 379 380 /* 381 * Keep page from being freed by the page daemon 382 * much of the same effect as wiring, except much lower 383 * overhead and should be used only for *very* temporary 384 * holding ("wiring"). 385 */ 386 static __inline void 387 vm_page_hold(vm_page_t mem) 388 { 389 mem->hold_count++; 390 } 391 392 static __inline void 393 vm_page_unhold(vm_page_t mem) 394 { 395 #ifdef DIAGNOSTIC 396 if (--mem->hold_count < 0) 397 panic("vm_page_unhold: hold count < 0!!!"); 398 #else 399 --mem->hold_count; 400 #endif 401 } 402 403 static __inline void 404 vm_page_protect(vm_page_t mem, int prot) 405 { 406 if (prot == VM_PROT_NONE) { 407 if (mem->flags & (PG_WRITEABLE|PG_MAPPED)) { 408 pmap_page_protect(VM_PAGE_TO_PHYS(mem), VM_PROT_NONE); 409 vm_page_flag_clear(mem, PG_WRITEABLE|PG_MAPPED); 410 } 411 } else if ((prot == VM_PROT_READ) && (mem->flags & PG_WRITEABLE)) { 412 pmap_page_protect(VM_PAGE_TO_PHYS(mem), VM_PROT_READ); 413 vm_page_flag_clear(mem, PG_WRITEABLE); 414 } 415 } 416 417 /* 418 * vm_page_zero_fill: 419 * 420 * Zero-fill the specified page. 421 * Written as a standard pagein routine, to 422 * be used by the zero-fill object. 423 */ 424 static __inline boolean_t 425 vm_page_zero_fill(m) 426 vm_page_t m; 427 { 428 pmap_zero_page(VM_PAGE_TO_PHYS(m)); 429 return (TRUE); 430 } 431 432 /* 433 * vm_page_copy: 434 * 435 * Copy one page to another 436 */ 437 static __inline void 438 vm_page_copy(src_m, dest_m) 439 vm_page_t src_m; 440 vm_page_t dest_m; 441 { 442 pmap_copy_page(VM_PAGE_TO_PHYS(src_m), VM_PAGE_TO_PHYS(dest_m)); 443 dest_m->valid = VM_PAGE_BITS_ALL; 444 } 445 446 #endif /* KERNEL */ 447 #endif /* !_VM_PAGE_ */ 448