1 /*- 2 * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU) 3 * 4 * Copyright (c) 1991, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * The Mach Operating System project at Carnegie-Mellon University. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * 35 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 36 * All rights reserved. 37 * 38 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 39 * 40 * Permission to use, copy, modify and distribute this software and 41 * its documentation is hereby granted, provided that both the copyright 42 * notice and this permission notice appear in all copies of the 43 * software, derivative works or modified versions, and any portions 44 * thereof, and that both notices appear in supporting documentation. 45 * 46 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 47 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 48 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 49 * 50 * Carnegie Mellon requests users of this software to return to 51 * 52 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 53 * School of Computer Science 54 * Carnegie Mellon University 55 * Pittsburgh PA 15213-3890 56 * 57 * any improvements or extensions that they make and grant Carnegie the 58 * rights to redistribute these changes. 59 */ 60 61 #ifndef _VM_PAGEQUEUE_ 62 #define _VM_PAGEQUEUE_ 63 64 #ifdef _KERNEL 65 struct vm_pagequeue { 66 struct mtx pq_mutex; 67 struct pglist pq_pl; 68 int pq_cnt; 69 const char * const pq_name; 70 uint64_t pq_pdpages; 71 } __aligned(CACHE_LINE_SIZE); 72 73 #if __SIZEOF_LONG__ == 8 74 #define VM_BATCHQUEUE_SIZE 63 75 #else 76 #define VM_BATCHQUEUE_SIZE 15 77 #endif 78 79 struct vm_batchqueue { 80 vm_page_t bq_pa[VM_BATCHQUEUE_SIZE]; 81 int bq_cnt; 82 } __aligned(CACHE_LINE_SIZE); 83 84 #include <vm/uma.h> 85 #include <sys/_blockcount.h> 86 #include <sys/pidctrl.h> 87 struct sysctl_oid; 88 89 /* 90 * One vm_domain per NUMA domain. Contains pagequeues, free page structures, 91 * and accounting. 92 * 93 * Lock Key: 94 * f vmd_free_mtx 95 * p vmd_pageout_mtx 96 * d vm_domainset_lock 97 * a atomic 98 * c const after boot 99 * q page queue lock 100 * 101 * A unique page daemon thread manages each vm_domain structure and is 102 * responsible for ensuring that some free memory is available by freeing 103 * inactive pages and aging active pages. To decide how many pages to process, 104 * it uses thresholds derived from the number of pages in the domain: 105 * 106 * vmd_page_count 107 * --- 108 * | 109 * |-> vmd_inactive_target (~3%) 110 * | - The active queue scan target is given by 111 * | (vmd_inactive_target + vmd_free_target - vmd_free_count). 112 * | 113 * | 114 * |-> vmd_free_target (~2%) 115 * | - Target for page reclamation. 116 * | 117 * |-> vmd_pageout_wakeup_thresh (~1.8%) 118 * | - Threshold for waking up the page daemon. 119 * | 120 * | 121 * |-> vmd_free_min (~0.5%) 122 * | - First low memory threshold. 123 * | - Causes per-CPU caching to be lazily disabled in UMA. 124 * | - vm_wait() sleeps below this threshold. 125 * | 126 * |-> vmd_free_severe (~0.25%) 127 * | - Second low memory threshold. 128 * | - Triggers aggressive UMA reclamation, disables delayed buffer 129 * | writes. 130 * | 131 * |-> vmd_free_reserved (~0.13%) 132 * | - Minimum for VM_ALLOC_NORMAL page allocations. 133 * |-> vmd_pageout_free_min (32 + 2 pages) 134 * | - Minimum for waking a page daemon thread sleeping in vm_wait(). 135 * |-> vmd_interrupt_free_min (2 pages) 136 * | - Minimum for VM_ALLOC_SYSTEM page allocations. 137 * --- 138 * 139 *-- 140 * Free page count regulation: 141 * 142 * The page daemon attempts to ensure that the free page count is above the free 143 * target. It wakes up periodically (every 100ms) to input the current free 144 * page shortage (free_target - free_count) to a PID controller, which in 145 * response outputs the number of pages to attempt to reclaim. The shortage's 146 * current magnitude, rate of change, and cumulative value are together used to 147 * determine the controller's output. The page daemon target thus adapts 148 * dynamically to the system's demand for free pages, resulting in less 149 * burstiness than a simple hysteresis loop. 150 * 151 * When the free page count drops below the wakeup threshold, 152 * vm_domain_allocate() proactively wakes up the page daemon. This helps ensure 153 * that the system responds promptly to a large instantaneous free page 154 * shortage. 155 * 156 * The page daemon also attempts to ensure that some fraction of the system's 157 * memory is present in the inactive (I) and laundry (L) page queues, so that it 158 * can respond promptly to a sudden free page shortage. In particular, the page 159 * daemon thread aggressively scans active pages so long as the following 160 * condition holds: 161 * 162 * len(I) + len(L) + free_target - free_count < inactive_target 163 * 164 * Otherwise, when the inactive target is met, the page daemon periodically 165 * scans a small portion of the active queue in order to maintain up-to-date 166 * per-page access history. Unreferenced pages in the active queue thus 167 * eventually migrate to the inactive queue. 168 * 169 * The per-domain laundry thread periodically launders dirty pages based on the 170 * number of clean pages freed by the page daemon since the last laundering. If 171 * the page daemon fails to meet its scan target (i.e., the PID controller 172 * output) because of a shortage of clean inactive pages, the laundry thread 173 * attempts to launder enough pages to meet the free page target. 174 * 175 *-- 176 * Page allocation priorities: 177 * 178 * The system defines three page allocation priorities: VM_ALLOC_NORMAL, 179 * VM_ALLOC_SYSTEM and VM_ALLOC_INTERRUPT. An interrupt-priority allocation can 180 * claim any free page. This priority is used in the pmap layer when attempting 181 * to allocate a page for the kernel page tables; in such cases an allocation 182 * failure will usually result in a kernel panic. The system priority is used 183 * for most other kernel memory allocations, for instance by UMA's slab 184 * allocator or the buffer cache. Such allocations will fail if the free count 185 * is below interrupt_free_min. All other allocations occur at the normal 186 * priority, which is typically used for allocation of user pages, for instance 187 * in the page fault handler or when allocating page table pages or pv_entry 188 * structures for user pmaps. Such allocations fail if the free count is below 189 * the free_reserved threshold. 190 * 191 *-- 192 * Free memory shortages: 193 * 194 * The system uses the free_min and free_severe thresholds to apply 195 * back-pressure and give the page daemon a chance to recover. When a page 196 * allocation fails due to a shortage and the allocating thread cannot handle 197 * failure, it may call vm_wait() to sleep until free pages are available. 198 * vm_domain_freecnt_inc() wakes sleeping threads once the free page count rises 199 * above the free_min threshold; the page daemon and laundry threads are given 200 * priority and will wake up once free_count reaches the (much smaller) 201 * pageout_free_min threshold. 202 * 203 * On NUMA systems, the domainset iterators always prefer NUMA domains where the 204 * free page count is above the free_min threshold. This means that given the 205 * choice between two NUMA domains, one above the free_min threshold and one 206 * below, the former will be used to satisfy the allocation request regardless 207 * of the domain selection policy. 208 * 209 * In addition to reclaiming memory from the page queues, the vm_lowmem event 210 * fires every ten seconds so long as the system is under memory pressure (i.e., 211 * vmd_free_count < vmd_free_target). This allows kernel subsystems to register 212 * for notifications of free page shortages, upon which they may shrink their 213 * caches. Following a vm_lowmem event, UMA's caches are pruned to ensure that 214 * they do not contain an excess of unused memory. When a domain is below the 215 * free_min threshold, UMA limits the population of per-CPU caches. When a 216 * domain falls below the free_severe threshold, UMA's caches are completely 217 * drained. 218 * 219 * If the system encounters a global memory shortage, it may resort to the 220 * out-of-memory (OOM) killer, which selects a process and delivers SIGKILL in a 221 * last-ditch attempt to free up some pages. Either of the two following 222 * conditions will activate the OOM killer: 223 * 224 * 1. The page daemons collectively fail to reclaim any pages during their 225 * inactive queue scans. After vm_pageout_oom_seq consecutive scans fail, 226 * the page daemon thread votes for an OOM kill, and an OOM kill is 227 * triggered when all page daemons have voted. This heuristic is strict and 228 * may fail to trigger even when the system is effectively deadlocked. 229 * 230 * 2. Threads in the user fault handler are repeatedly unable to make progress 231 * while allocating a page to satisfy the fault. After 232 * vm_pfault_oom_attempts page allocation failures with intervening 233 * vm_wait() calls, the faulting thread will trigger an OOM kill. 234 */ 235 struct vm_domain { 236 struct vm_pagequeue vmd_pagequeues[PQ_COUNT]; 237 struct mtx_padalign vmd_free_mtx; 238 struct mtx_padalign vmd_pageout_mtx; 239 struct vm_pgcache { 240 int domain; 241 int pool; 242 uma_zone_t zone; 243 } vmd_pgcache[VM_NFREEPOOL]; 244 struct vmem *vmd_kernel_arena; /* (c) per-domain kva R/W arena. */ 245 struct vmem *vmd_kernel_rwx_arena; /* (c) per-domain kva R/W/X arena. */ 246 struct vmem *vmd_kernel_nofree_arena; /* (c) per-domain kva NOFREE arena. */ 247 u_int vmd_domain; /* (c) Domain number. */ 248 u_int vmd_page_count; /* (c) Total page count. */ 249 long vmd_segs; /* (c) bitmask of the segments */ 250 struct vm_nofreeq { 251 vm_page_t ma; 252 int offs; 253 } vmd_nofreeq; /* (f) NOFREE page bump allocator. */ 254 u_int __aligned(CACHE_LINE_SIZE) vmd_free_count; /* (a,f) free page count */ 255 u_int vmd_pageout_deficit; /* (a) Estimated number of pages deficit */ 256 uint8_t vmd_pad[CACHE_LINE_SIZE - (sizeof(u_int) * 2)]; 257 258 /* Paging control variables, used within single threaded page daemon. */ 259 struct pidctrl vmd_pid; /* Pageout controller. */ 260 boolean_t vmd_oom; 261 u_int vmd_inactive_threads; 262 u_int vmd_inactive_shortage; /* Per-thread shortage. */ 263 blockcount_t vmd_inactive_running; /* Number of inactive threads. */ 264 blockcount_t vmd_inactive_starting; /* Number of threads started. */ 265 volatile u_int vmd_addl_shortage; /* Shortage accumulator. */ 266 volatile u_int vmd_inactive_freed; /* Successful inactive frees. */ 267 volatile u_int vmd_inactive_us; /* Microseconds for above. */ 268 u_int vmd_inactive_pps; /* Exponential decay frees/second. */ 269 int vmd_oom_seq; 270 int vmd_last_active_scan; 271 struct vm_page vmd_markers[PQ_COUNT]; /* (q) markers for queue scans */ 272 struct vm_page vmd_inacthead; /* marker for LRU-defeating insertions */ 273 struct vm_page vmd_clock[2]; /* markers for active queue scan */ 274 275 int vmd_pageout_wanted; /* (a, p) pageout daemon wait channel */ 276 int vmd_pageout_pages_needed; /* (d) page daemon waiting for pages? */ 277 bool vmd_minset; /* (d) Are we in vm_min_domains? */ 278 bool vmd_severeset; /* (d) Are we in vm_severe_domains? */ 279 enum { 280 VM_LAUNDRY_IDLE = 0, 281 VM_LAUNDRY_BACKGROUND, 282 VM_LAUNDRY_SHORTFALL 283 } vmd_laundry_request; 284 285 /* Paging thresholds and targets. */ 286 u_int vmd_clean_pages_freed; /* (q) accumulator for laundry thread */ 287 u_int vmd_background_launder_target; /* (c) */ 288 u_int vmd_free_reserved; /* (c) pages reserved for deadlock */ 289 u_int vmd_free_target; /* (c) pages desired free */ 290 u_int vmd_free_min; /* (c) pages desired free */ 291 u_int vmd_inactive_target; /* (c) pages desired inactive */ 292 u_int vmd_pageout_free_min; /* (c) min pages reserved for kernel */ 293 u_int vmd_pageout_wakeup_thresh;/* (c) min pages to wake pagedaemon */ 294 u_int vmd_interrupt_free_min; /* (c) reserved pages for int code */ 295 u_int vmd_free_severe; /* (c) severe page depletion point */ 296 297 /* Name for sysctl etc. */ 298 struct sysctl_oid *vmd_oid; 299 char vmd_name[sizeof(__XSTRING(MAXMEMDOM))]; 300 } __aligned(CACHE_LINE_SIZE); 301 302 extern struct vm_domain vm_dom[MAXMEMDOM]; 303 304 #define VM_DOMAIN(n) (&vm_dom[(n)]) 305 #define VM_DOMAIN_EMPTY(n) (vm_dom[(n)].vmd_page_count == 0) 306 307 #define vm_pagequeue_assert_locked(pq) mtx_assert(&(pq)->pq_mutex, MA_OWNED) 308 #define vm_pagequeue_lock(pq) mtx_lock(&(pq)->pq_mutex) 309 #define vm_pagequeue_lockptr(pq) (&(pq)->pq_mutex) 310 #define vm_pagequeue_trylock(pq) mtx_trylock(&(pq)->pq_mutex) 311 #define vm_pagequeue_unlock(pq) mtx_unlock(&(pq)->pq_mutex) 312 313 #define vm_domain_free_assert_locked(n) \ 314 mtx_assert(vm_domain_free_lockptr((n)), MA_OWNED) 315 #define vm_domain_free_assert_unlocked(n) \ 316 mtx_assert(vm_domain_free_lockptr((n)), MA_NOTOWNED) 317 #define vm_domain_free_lock(d) \ 318 mtx_lock(vm_domain_free_lockptr((d))) 319 #define vm_domain_free_lockptr(d) \ 320 (&(d)->vmd_free_mtx) 321 #define vm_domain_free_trylock(d) \ 322 mtx_trylock(vm_domain_free_lockptr((d))) 323 #define vm_domain_free_unlock(d) \ 324 mtx_unlock(vm_domain_free_lockptr((d))) 325 326 #define vm_domain_pageout_lockptr(d) \ 327 (&(d)->vmd_pageout_mtx) 328 #define vm_domain_pageout_assert_locked(n) \ 329 mtx_assert(vm_domain_pageout_lockptr((n)), MA_OWNED) 330 #define vm_domain_pageout_assert_unlocked(n) \ 331 mtx_assert(vm_domain_pageout_lockptr((n)), MA_NOTOWNED) 332 #define vm_domain_pageout_lock(d) \ 333 mtx_lock(vm_domain_pageout_lockptr((d))) 334 #define vm_domain_pageout_unlock(d) \ 335 mtx_unlock(vm_domain_pageout_lockptr((d))) 336 337 static __inline void 338 vm_pagequeue_cnt_add(struct vm_pagequeue *pq, int addend) 339 { 340 341 vm_pagequeue_assert_locked(pq); 342 pq->pq_cnt += addend; 343 } 344 #define vm_pagequeue_cnt_inc(pq) vm_pagequeue_cnt_add((pq), 1) 345 #define vm_pagequeue_cnt_dec(pq) vm_pagequeue_cnt_add((pq), -1) 346 347 static inline void 348 vm_pagequeue_remove(struct vm_pagequeue *pq, vm_page_t m) 349 { 350 351 TAILQ_REMOVE(&pq->pq_pl, m, plinks.q); 352 vm_pagequeue_cnt_dec(pq); 353 } 354 355 static inline void 356 vm_batchqueue_init(struct vm_batchqueue *bq) 357 { 358 359 bq->bq_cnt = 0; 360 } 361 362 static inline bool 363 vm_batchqueue_empty(const struct vm_batchqueue *bq) 364 { 365 return (bq->bq_cnt == 0); 366 } 367 368 static inline int 369 vm_batchqueue_insert(struct vm_batchqueue *bq, vm_page_t m) 370 { 371 int slots_free; 372 373 slots_free = nitems(bq->bq_pa) - bq->bq_cnt; 374 if (slots_free > 0) { 375 bq->bq_pa[bq->bq_cnt++] = m; 376 return (slots_free); 377 } 378 return (slots_free); 379 } 380 381 static inline vm_page_t 382 vm_batchqueue_pop(struct vm_batchqueue *bq) 383 { 384 385 if (bq->bq_cnt == 0) 386 return (NULL); 387 return (bq->bq_pa[--bq->bq_cnt]); 388 } 389 390 void vm_domain_set(struct vm_domain *vmd); 391 void vm_domain_clear(struct vm_domain *vmd); 392 int vm_domain_allocate(struct vm_domain *vmd, int req, int npages); 393 394 /* 395 * vm_pagequeue_domain: 396 * 397 * Return the memory domain the page belongs to. 398 */ 399 static inline struct vm_domain * 400 vm_pagequeue_domain(vm_page_t m) 401 { 402 403 return (VM_DOMAIN(vm_page_domain(m))); 404 } 405 406 /* 407 * Return the number of pages we need to free-up or cache 408 * A positive number indicates that we do not have enough free pages. 409 */ 410 static inline int 411 vm_paging_target(struct vm_domain *vmd) 412 { 413 414 return (vmd->vmd_free_target - vmd->vmd_free_count); 415 } 416 417 /* 418 * Returns TRUE if the pagedaemon needs to be woken up. 419 */ 420 static inline int 421 vm_paging_needed(struct vm_domain *vmd, u_int free_count) 422 { 423 424 return (free_count < vmd->vmd_pageout_wakeup_thresh); 425 } 426 427 /* 428 * Returns TRUE if the domain is below the min paging target. 429 */ 430 static inline int 431 vm_paging_min(struct vm_domain *vmd) 432 { 433 434 return (vmd->vmd_free_min > vmd->vmd_free_count); 435 } 436 437 /* 438 * Returns TRUE if the domain is below the severe paging target. 439 */ 440 static inline int 441 vm_paging_severe(struct vm_domain *vmd) 442 { 443 444 return (vmd->vmd_free_severe > vmd->vmd_free_count); 445 } 446 447 /* 448 * Return the number of pages we need to launder. 449 * A positive number indicates that we have a shortfall of clean pages. 450 */ 451 static inline int 452 vm_laundry_target(struct vm_domain *vmd) 453 { 454 455 return (vm_paging_target(vmd)); 456 } 457 458 void pagedaemon_wakeup(int domain); 459 460 static inline void 461 vm_domain_freecnt_inc(struct vm_domain *vmd, int adj) 462 { 463 u_int old, new; 464 465 old = atomic_fetchadd_int(&vmd->vmd_free_count, adj); 466 new = old + adj; 467 /* 468 * Only update bitsets on transitions. Notice we short-circuit the 469 * rest of the checks if we're above min already. 470 */ 471 if (old < vmd->vmd_free_min && (new >= vmd->vmd_free_min || 472 (old < vmd->vmd_free_severe && new >= vmd->vmd_free_severe) || 473 (old < vmd->vmd_pageout_free_min && 474 new >= vmd->vmd_pageout_free_min))) 475 vm_domain_clear(vmd); 476 } 477 478 #endif /* _KERNEL */ 479 #endif /* !_VM_PAGEQUEUE_ */ 480