Lines Matching +full:int +full:- +full:threshold
1 /*-
2 * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU)
8 * The Mach Operating System project at Carnegie-Mellon University.
35 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
55 * Pittsburgh PA 15213-3890
68 int pq_cnt;
81 int bq_cnt;
107 * ---
109 * |-> vmd_inactive_target (~3%)
110 * | - The active queue scan target is given by
111 * | (vmd_inactive_target + vmd_free_target - vmd_free_count).
114 * |-> vmd_free_target (~2%)
115 * | - Target for page reclamation.
117 * |-> vmd_pageout_wakeup_thresh (~1.8%)
118 * | - Threshold for waking up the page daemon.
121 * |-> vmd_free_min (~0.5%)
122 * | - First low memory threshold.
123 * | - Causes per-CPU caching to be lazily disabled in UMA.
124 * | - vm_wait() sleeps below this threshold.
126 * |-> vmd_free_severe (~0.25%)
127 * | - Second low memory threshold.
128 * | - Triggers aggressive UMA reclamation, disables delayed buffer
131 * |-> vmd_free_reserved (~0.13%)
132 * | - Minimum for VM_ALLOC_NORMAL page allocations.
133 * |-> vmd_pageout_free_min (32 + 2 pages)
134 * | - Minimum for waking a page daemon thread sleeping in vm_wait().
135 * |-> vmd_interrupt_free_min (2 pages)
136 * | - Minimum for VM_ALLOC_SYSTEM page allocations.
137 * ---
139 *--
144 * page shortage (free_target - free_count) to a PID controller, which in
151 * When the free page count drops below the wakeup threshold,
162 * len(I) + len(L) + free_target - free_count < inactive_target
165 * scans a small portion of the active queue in order to maintain up-to-date
166 * per-page access history. Unreferenced pages in the active queue thus
169 * The per-domain laundry thread periodically launders dirty pages based on the
175 *--
179 * VM_ALLOC_SYSTEM and VM_ALLOC_INTERRUPT. An interrupt-priority allocation can
189 * the free_reserved threshold.
191 *--
195 * back-pressure and give the page daemon a chance to recover. When a page
199 * above the free_min threshold; the page daemon and laundry threads are given
201 * pageout_free_min threshold.
204 * free page count is above the free_min threshold. This means that given the
205 * choice between two NUMA domains, one above the free_min threshold and one
215 * free_min threshold, UMA limits the population of per-CPU caches. When a
216 * domain falls below the free_severe threshold, UMA's caches are completely
220 * out-of-memory (OOM) killer, which selects a process and delivers SIGKILL in a
221 * last-ditch attempt to free up some pages. Either of the two following
240 int domain;
241 int pool;
244 struct vmem *vmd_kernel_arena; /* (c) per-domain kva R/W arena. */
245 struct vmem *vmd_kernel_rwx_arena; /* (c) per-domain kva R/W/X arena. */
246 struct vmem *vmd_kernel_nofree_arena; /* (c) per-domain kva NOFREE arena. */
252 int offs;
256 uint8_t vmd_pad[CACHE_LINE_SIZE - (sizeof(u_int) * 2)];
263 u_int vmd_inactive_shortage; /* Per-thread shortage. */
270 int vmd_oom_seq;
271 int vmd_last_active_scan;
273 struct vm_page vmd_inacthead; /* marker for LRU-defeating insertions */
276 int vmd_pageout_wanted; /* (a, p) pageout daemon wait channel */
277 int vmd_pageout_pages_needed; /* (d) page daemon waiting for pages? */
295 u_int vmd_interrupt_free_min; /* (c) reserved pages for int code */
308 #define vm_pagequeue_assert_locked(pq) mtx_assert(&(pq)->pq_mutex, MA_OWNED)
309 #define vm_pagequeue_lock(pq) mtx_lock(&(pq)->pq_mutex)
310 #define vm_pagequeue_lockptr(pq) (&(pq)->pq_mutex)
311 #define vm_pagequeue_trylock(pq) mtx_trylock(&(pq)->pq_mutex)
312 #define vm_pagequeue_unlock(pq) mtx_unlock(&(pq)->pq_mutex)
321 (&(d)->vmd_free_mtx)
328 (&(d)->vmd_pageout_mtx)
339 vm_pagequeue_cnt_add(struct vm_pagequeue *pq, int addend) in vm_pagequeue_cnt_add()
343 pq->pq_cnt += addend; in vm_pagequeue_cnt_add()
346 #define vm_pagequeue_cnt_dec(pq) vm_pagequeue_cnt_add((pq), -1)
352 TAILQ_REMOVE(&pq->pq_pl, m, plinks.q); in vm_pagequeue_remove()
360 bq->bq_cnt = 0; in vm_batchqueue_init()
366 return (bq->bq_cnt == 0); in vm_batchqueue_empty()
369 static inline int
372 int slots_free; in vm_batchqueue_insert()
374 slots_free = nitems(bq->bq_pa) - bq->bq_cnt; in vm_batchqueue_insert()
376 bq->bq_pa[bq->bq_cnt++] = m; in vm_batchqueue_insert()
386 if (bq->bq_cnt == 0) in vm_batchqueue_pop()
388 return (bq->bq_pa[--bq->bq_cnt]); in vm_batchqueue_pop()
393 int vm_domain_allocate(struct vm_domain *vmd, int req, int npages);
408 * Return the number of pages we need to free-up or cache
411 static inline int
415 return (vmd->vmd_free_target - vmd->vmd_free_count); in vm_paging_target()
421 static inline int
425 return (free_count < vmd->vmd_pageout_wakeup_thresh); in vm_paging_needed()
431 static inline int
435 return (vmd->vmd_free_min > vmd->vmd_free_count); in vm_paging_min()
441 static inline int
445 return (vmd->vmd_free_severe > vmd->vmd_free_count); in vm_paging_severe()
452 static inline int
459 void pagedaemon_wakeup(int domain);
462 vm_domain_freecnt_inc(struct vm_domain *vmd, int adj) in vm_domain_freecnt_inc()
466 old = atomic_fetchadd_int(&vmd->vmd_free_count, adj); in vm_domain_freecnt_inc()
469 * Only update bitsets on transitions. Notice we short-circuit the in vm_domain_freecnt_inc()
472 if (old < vmd->vmd_free_min && (new >= vmd->vmd_free_min || in vm_domain_freecnt_inc()
473 (old < vmd->vmd_free_severe && new >= vmd->vmd_free_severe) || in vm_domain_freecnt_inc()
474 (old < vmd->vmd_pageout_free_min && in vm_domain_freecnt_inc()
475 new >= vmd->vmd_pageout_free_min))) in vm_domain_freecnt_inc()