Lines Matching +full:inactive +full:-
1 /*-
2 * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU)
8 * The Mach Operating System project at Carnegie-Mellon University.
35 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
55 * Pittsburgh PA 15213-3890
103 * inactive pages and aging active pages. To decide how many pages to process,
107 * ---
109 * |-> vmd_inactive_target (~3%)
110 * | - The active queue scan target is given by
111 * | (vmd_inactive_target + vmd_free_target - vmd_free_count).
114 * |-> vmd_free_target (~2%)
115 * | - Target for page reclamation.
117 * |-> vmd_pageout_wakeup_thresh (~1.8%)
118 * | - Threshold for waking up the page daemon.
121 * |-> vmd_free_min (~0.5%)
122 * | - First low memory threshold.
123 * | - Causes per-CPU caching to be lazily disabled in UMA.
124 * | - vm_wait() sleeps below this threshold.
126 * |-> vmd_free_severe (~0.25%)
127 * | - Second low memory threshold.
128 * | - Triggers aggressive UMA reclamation, disables delayed buffer
131 * |-> vmd_free_reserved (~0.13%)
132 * | - Minimum for VM_ALLOC_NORMAL page allocations.
133 * |-> vmd_pageout_free_min (32 + 2 pages)
134 * | - Minimum for waking a page daemon thread sleeping in vm_wait().
135 * |-> vmd_interrupt_free_min (2 pages)
136 * | - Minimum for VM_ALLOC_SYSTEM page allocations.
137 * ---
139 *--
144 * page shortage (free_target - free_count) to a PID controller, which in
157 * memory is present in the inactive (I) and laundry (L) page queues, so that it
162 * len(I) + len(L) + free_target - free_count < inactive_target
164 * Otherwise, when the inactive target is met, the page daemon periodically
165 * scans a small portion of the active queue in order to maintain up-to-date
166 * per-page access history. Unreferenced pages in the active queue thus
167 * eventually migrate to the inactive queue.
169 * The per-domain laundry thread periodically launders dirty pages based on the
172 * output) because of a shortage of clean inactive pages, the laundry thread
175 *--
179 * VM_ALLOC_SYSTEM and VM_ALLOC_INTERRUPT. An interrupt-priority allocation can
191 *--
195 * back-pressure and give the page daemon a chance to recover. When a page
215 * free_min threshold, UMA limits the population of per-CPU caches. When a
220 * out-of-memory (OOM) killer, which selects a process and delivers SIGKILL in a
221 * last-ditch attempt to free up some pages. Either of the two following
225 * inactive queue scans. After vm_pageout_oom_seq consecutive scans fail,
244 struct vmem *vmd_kernel_arena; /* (c) per-domain kva R/W arena. */
245 struct vmem *vmd_kernel_rwx_arena; /* (c) per-domain kva R/W/X arena. */
246 struct vmem *vmd_kernel_nofree_arena; /* (c) per-domain kva NOFREE arena. */
256 uint8_t vmd_pad[CACHE_LINE_SIZE - (sizeof(u_int) * 2)];
262 u_int vmd_inactive_shortage; /* Per-thread shortage. */
263 blockcount_t vmd_inactive_running; /* Number of inactive threads. */
266 volatile u_int vmd_inactive_freed; /* Successful inactive frees. */
272 struct vm_page vmd_inacthead; /* marker for LRU-defeating insertions */
291 u_int vmd_inactive_target; /* (c) pages desired inactive */
307 #define vm_pagequeue_assert_locked(pq) mtx_assert(&(pq)->pq_mutex, MA_OWNED)
308 #define vm_pagequeue_lock(pq) mtx_lock(&(pq)->pq_mutex)
309 #define vm_pagequeue_lockptr(pq) (&(pq)->pq_mutex)
310 #define vm_pagequeue_trylock(pq) mtx_trylock(&(pq)->pq_mutex)
311 #define vm_pagequeue_unlock(pq) mtx_unlock(&(pq)->pq_mutex)
320 (&(d)->vmd_free_mtx)
327 (&(d)->vmd_pageout_mtx)
342 pq->pq_cnt += addend;
345 #define vm_pagequeue_cnt_dec(pq) vm_pagequeue_cnt_add((pq), -1)
351 TAILQ_REMOVE(&pq->pq_pl, m, plinks.q);
359 bq->bq_cnt = 0;
365 return (bq->bq_cnt == 0);
373 slots_free = nitems(bq->bq_pa) - bq->bq_cnt;
375 bq->bq_pa[bq->bq_cnt++] = m;
385 if (bq->bq_cnt == 0)
387 return (bq->bq_pa[--bq->bq_cnt]);
407 * Return the number of pages we need to free-up or cache
414 return (vmd->vmd_free_target - vmd->vmd_free_count);
424 return (free_count < vmd->vmd_pageout_wakeup_thresh);
434 return (vmd->vmd_free_min > vmd->vmd_free_count);
444 return (vmd->vmd_free_severe > vmd->vmd_free_count);
465 old = atomic_fetchadd_int(&vmd->vmd_free_count, adj);
468 * Only update bitsets on transitions. Notice we short-circuit the
471 if (old < vmd->vmd_free_min && (new >= vmd->vmd_free_min ||
472 (old < vmd->vmd_free_severe && new >= vmd->vmd_free_severe) ||
473 (old < vmd->vmd_pageout_free_min &&
474 new >= vmd->vmd_pageout_free_min)))