Lines Matching +full:waking +full:- +full:up
1 /*-
2 * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU)
8 * The Mach Operating System project at Carnegie-Mellon University.
35 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
55 * Pittsburgh PA 15213-3890
107 * ---
109 * |-> vmd_inactive_target (~3%)
110 * | - The active queue scan target is given by
111 * | (vmd_inactive_target + vmd_free_target - vmd_free_count).
114 * |-> vmd_free_target (~2%)
115 * | - Target for page reclamation.
117 * |-> vmd_pageout_wakeup_thresh (~1.8%)
118 * | - Threshold for waking up the page daemon.
121 * |-> vmd_free_min (~0.5%)
122 * | - First low memory threshold.
123 * | - Causes per-CPU caching to be lazily disabled in UMA.
124 * | - vm_wait() sleeps below this threshold.
126 * |-> vmd_free_severe (~0.25%)
127 * | - Second low memory threshold.
128 * | - Triggers aggressive UMA reclamation, disables delayed buffer
131 * |-> vmd_free_reserved (~0.13%)
132 * | - Minimum for VM_ALLOC_NORMAL page allocations.
133 * |-> vmd_pageout_free_min (32 + 2 pages)
134 * | - Minimum for waking a page daemon thread sleeping in vm_wait().
135 * |-> vmd_interrupt_free_min (2 pages)
136 * | - Minimum for VM_ALLOC_SYSTEM page allocations.
137 * ---
139 *--
143 * target. It wakes up periodically (every 100ms) to input the current free
144 * page shortage (free_target - free_count) to a PID controller, which in
152 * vm_domain_allocate() proactively wakes up the page daemon. This helps ensure
162 * len(I) + len(L) + free_target - free_count < inactive_target
165 * scans a small portion of the active queue in order to maintain up-to-date
166 * per-page access history. Unreferenced pages in the active queue thus
169 * The per-domain laundry thread periodically launders dirty pages based on the
175 *--
179 * VM_ALLOC_SYSTEM and VM_ALLOC_INTERRUPT. An interrupt-priority allocation can
191 *--
195 * back-pressure and give the page daemon a chance to recover. When a page
200 * priority and will wake up once free_count reaches the (much smaller)
215 * free_min threshold, UMA limits the population of per-CPU caches. When a
220 * out-of-memory (OOM) killer, which selects a process and delivers SIGKILL in a
221 * last-ditch attempt to free up some pages. Either of the two following
244 struct vmem *vmd_kernel_arena; /* (c) per-domain kva R/W arena. */
245 struct vmem *vmd_kernel_rwx_arena; /* (c) per-domain kva R/W/X arena. */
246 struct vmem *vmd_kernel_nofree_arena; /* (c) per-domain kva NOFREE arena. */
256 uint8_t vmd_pad[CACHE_LINE_SIZE - (sizeof(u_int) * 2)];
263 u_int vmd_inactive_shortage; /* Per-thread shortage. */
273 struct vm_page vmd_inacthead; /* marker for LRU-defeating insertions */
308 #define vm_pagequeue_assert_locked(pq) mtx_assert(&(pq)->pq_mutex, MA_OWNED)
309 #define vm_pagequeue_lock(pq) mtx_lock(&(pq)->pq_mutex)
310 #define vm_pagequeue_lockptr(pq) (&(pq)->pq_mutex)
311 #define vm_pagequeue_trylock(pq) mtx_trylock(&(pq)->pq_mutex)
312 #define vm_pagequeue_unlock(pq) mtx_unlock(&(pq)->pq_mutex)
321 (&(d)->vmd_free_mtx)
328 (&(d)->vmd_pageout_mtx)
343 pq->pq_cnt += addend; in vm_pagequeue_cnt_add()
346 #define vm_pagequeue_cnt_dec(pq) vm_pagequeue_cnt_add((pq), -1)
352 TAILQ_REMOVE(&pq->pq_pl, m, plinks.q); in vm_pagequeue_remove()
360 bq->bq_cnt = 0; in vm_batchqueue_init()
366 return (bq->bq_cnt == 0); in vm_batchqueue_empty()
374 slots_free = nitems(bq->bq_pa) - bq->bq_cnt; in vm_batchqueue_insert()
376 bq->bq_pa[bq->bq_cnt++] = m; in vm_batchqueue_insert()
386 if (bq->bq_cnt == 0) in vm_batchqueue_pop()
388 return (bq->bq_pa[--bq->bq_cnt]); in vm_batchqueue_pop()
408 * Return the number of pages we need to free-up or cache
415 return (vmd->vmd_free_target - vmd->vmd_free_count); in vm_paging_target()
419 * Returns TRUE if the pagedaemon needs to be woken up.
425 return (free_count < vmd->vmd_pageout_wakeup_thresh); in vm_paging_needed()
435 return (vmd->vmd_free_min > vmd->vmd_free_count); in vm_paging_min()
445 return (vmd->vmd_free_severe > vmd->vmd_free_count); in vm_paging_severe()
466 old = atomic_fetchadd_int(&vmd->vmd_free_count, adj); in vm_domain_freecnt_inc()
469 * Only update bitsets on transitions. Notice we short-circuit the in vm_domain_freecnt_inc()
472 if (old < vmd->vmd_free_min && (new >= vmd->vmd_free_min || in vm_domain_freecnt_inc()
473 (old < vmd->vmd_free_severe && new >= vmd->vmd_free_severe) || in vm_domain_freecnt_inc()
474 (old < vmd->vmd_pageout_free_min && in vm_domain_freecnt_inc()
475 new >= vmd->vmd_pageout_free_min))) in vm_domain_freecnt_inc()