1e2068d0bSJeff Roberson /*-
2e2068d0bSJeff Roberson * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU)
3e2068d0bSJeff Roberson *
4e2068d0bSJeff Roberson * Copyright (c) 1991, 1993
5e2068d0bSJeff Roberson * The Regents of the University of California. All rights reserved.
6e2068d0bSJeff Roberson *
7e2068d0bSJeff Roberson * This code is derived from software contributed to Berkeley by
8e2068d0bSJeff Roberson * The Mach Operating System project at Carnegie-Mellon University.
9e2068d0bSJeff Roberson *
10e2068d0bSJeff Roberson * Redistribution and use in source and binary forms, with or without
11e2068d0bSJeff Roberson * modification, are permitted provided that the following conditions
12e2068d0bSJeff Roberson * are met:
13e2068d0bSJeff Roberson * 1. Redistributions of source code must retain the above copyright
14e2068d0bSJeff Roberson * notice, this list of conditions and the following disclaimer.
15e2068d0bSJeff Roberson * 2. Redistributions in binary form must reproduce the above copyright
16e2068d0bSJeff Roberson * notice, this list of conditions and the following disclaimer in the
17e2068d0bSJeff Roberson * documentation and/or other materials provided with the distribution.
18e2068d0bSJeff Roberson * 3. Neither the name of the University nor the names of its contributors
19e2068d0bSJeff Roberson * may be used to endorse or promote products derived from this software
20e2068d0bSJeff Roberson * without specific prior written permission.
21e2068d0bSJeff Roberson *
22e2068d0bSJeff Roberson * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23e2068d0bSJeff Roberson * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24e2068d0bSJeff Roberson * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25e2068d0bSJeff Roberson * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26e2068d0bSJeff Roberson * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27e2068d0bSJeff Roberson * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28e2068d0bSJeff Roberson * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29e2068d0bSJeff Roberson * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30e2068d0bSJeff Roberson * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31e2068d0bSJeff Roberson * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32e2068d0bSJeff Roberson * SUCH DAMAGE.
33e2068d0bSJeff Roberson *
34e2068d0bSJeff Roberson *
35e2068d0bSJeff Roberson * Copyright (c) 1987, 1990 Carnegie-Mellon University.
36e2068d0bSJeff Roberson * All rights reserved.
37e2068d0bSJeff Roberson *
38e2068d0bSJeff Roberson * Authors: Avadis Tevanian, Jr., Michael Wayne Young
39e2068d0bSJeff Roberson *
40e2068d0bSJeff Roberson * Permission to use, copy, modify and distribute this software and
41e2068d0bSJeff Roberson * its documentation is hereby granted, provided that both the copyright
42e2068d0bSJeff Roberson * notice and this permission notice appear in all copies of the
43e2068d0bSJeff Roberson * software, derivative works or modified versions, and any portions
44e2068d0bSJeff Roberson * thereof, and that both notices appear in supporting documentation.
45e2068d0bSJeff Roberson *
46e2068d0bSJeff Roberson * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
47e2068d0bSJeff Roberson * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
48e2068d0bSJeff Roberson * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
49e2068d0bSJeff Roberson *
50e2068d0bSJeff Roberson * Carnegie Mellon requests users of this software to return to
51e2068d0bSJeff Roberson *
52e2068d0bSJeff Roberson * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
53e2068d0bSJeff Roberson * School of Computer Science
54e2068d0bSJeff Roberson * Carnegie Mellon University
55e2068d0bSJeff Roberson * Pittsburgh PA 15213-3890
56e2068d0bSJeff Roberson *
57e2068d0bSJeff Roberson * any improvements or extensions that they make and grant Carnegie the
58e2068d0bSJeff Roberson * rights to redistribute these changes.
59e2068d0bSJeff Roberson */
60e2068d0bSJeff Roberson
61e2068d0bSJeff Roberson #ifndef _VM_PAGEQUEUE_
62e2068d0bSJeff Roberson #define _VM_PAGEQUEUE_
63e2068d0bSJeff Roberson
64e2068d0bSJeff Roberson #ifdef _KERNEL
65e2068d0bSJeff Roberson struct vm_pagequeue {
66e2068d0bSJeff Roberson struct mtx pq_mutex;
67e2068d0bSJeff Roberson struct pglist pq_pl;
68e2068d0bSJeff Roberson int pq_cnt;
69e2068d0bSJeff Roberson const char * const pq_name;
70899fe184SMark Johnston uint64_t pq_pdpages;
71e2068d0bSJeff Roberson } __aligned(CACHE_LINE_SIZE);
72e2068d0bSJeff Roberson
739cb6ba29SAndrew Gallatin #if __SIZEOF_LONG__ == 8
749cb6ba29SAndrew Gallatin #define VM_BATCHQUEUE_SIZE 63
759cb6ba29SAndrew Gallatin #else
761cac76c9SAndrew Gallatin #define VM_BATCHQUEUE_SIZE 15
775cd29d0fSMark Johnston #endif
785cd29d0fSMark Johnston
795cd29d0fSMark Johnston struct vm_batchqueue {
805cd29d0fSMark Johnston vm_page_t bq_pa[VM_BATCHQUEUE_SIZE];
815cd29d0fSMark Johnston int bq_cnt;
825cd29d0fSMark Johnston } __aligned(CACHE_LINE_SIZE);
835cd29d0fSMark Johnston
84c33e3a64SJeff Roberson #include <vm/uma.h>
850292c54bSConrad Meyer #include <sys/_blockcount.h>
865cd29d0fSMark Johnston #include <sys/pidctrl.h>
875f8cd1c0SJeff Roberson struct sysctl_oid;
88e2068d0bSJeff Roberson
8930fbfddaSJeff Roberson /*
909c770a27SMark Johnston * One vm_domain per NUMA domain. Contains pagequeues, free page structures,
9130fbfddaSJeff Roberson * and accounting.
9230fbfddaSJeff Roberson *
9330fbfddaSJeff Roberson * Lock Key:
9430fbfddaSJeff Roberson * f vmd_free_mtx
9530fbfddaSJeff Roberson * p vmd_pageout_mtx
9630fbfddaSJeff Roberson * d vm_domainset_lock
9730fbfddaSJeff Roberson * a atomic
9830fbfddaSJeff Roberson * c const after boot
9960684862SMark Johnston * q page queue lock
1009c770a27SMark Johnston *
1019c770a27SMark Johnston * A unique page daemon thread manages each vm_domain structure and is
1029c770a27SMark Johnston * responsible for ensuring that some free memory is available by freeing
1039c770a27SMark Johnston * inactive pages and aging active pages. To decide how many pages to process,
1049c770a27SMark Johnston * it uses thresholds derived from the number of pages in the domain:
1059c770a27SMark Johnston *
1069c770a27SMark Johnston * vmd_page_count
1079c770a27SMark Johnston * ---
1089c770a27SMark Johnston * |
1099c770a27SMark Johnston * |-> vmd_inactive_target (~3%)
1109c770a27SMark Johnston * | - The active queue scan target is given by
1119c770a27SMark Johnston * | (vmd_inactive_target + vmd_free_target - vmd_free_count).
1129c770a27SMark Johnston * |
1139c770a27SMark Johnston * |
1149c770a27SMark Johnston * |-> vmd_free_target (~2%)
1159c770a27SMark Johnston * | - Target for page reclamation.
1169c770a27SMark Johnston * |
1179c770a27SMark Johnston * |-> vmd_pageout_wakeup_thresh (~1.8%)
1189c770a27SMark Johnston * | - Threshold for waking up the page daemon.
1199c770a27SMark Johnston * |
1209c770a27SMark Johnston * |
1219c770a27SMark Johnston * |-> vmd_free_min (~0.5%)
1229c770a27SMark Johnston * | - First low memory threshold.
1239c770a27SMark Johnston * | - Causes per-CPU caching to be lazily disabled in UMA.
1249c770a27SMark Johnston * | - vm_wait() sleeps below this threshold.
1259c770a27SMark Johnston * |
1269c770a27SMark Johnston * |-> vmd_free_severe (~0.25%)
1279c770a27SMark Johnston * | - Second low memory threshold.
1289c770a27SMark Johnston * | - Triggers aggressive UMA reclamation, disables delayed buffer
1299c770a27SMark Johnston * | writes.
1309c770a27SMark Johnston * |
1319c770a27SMark Johnston * |-> vmd_free_reserved (~0.13%)
1329c770a27SMark Johnston * | - Minimum for VM_ALLOC_NORMAL page allocations.
1339c770a27SMark Johnston * |-> vmd_pageout_free_min (32 + 2 pages)
1349c770a27SMark Johnston * | - Minimum for waking a page daemon thread sleeping in vm_wait().
1359c770a27SMark Johnston * |-> vmd_interrupt_free_min (2 pages)
1369c770a27SMark Johnston * | - Minimum for VM_ALLOC_SYSTEM page allocations.
1379c770a27SMark Johnston * ---
1389c770a27SMark Johnston *
1399c770a27SMark Johnston *--
1409c770a27SMark Johnston * Free page count regulation:
1419c770a27SMark Johnston *
1429c770a27SMark Johnston * The page daemon attempts to ensure that the free page count is above the free
1439c770a27SMark Johnston * target. It wakes up periodically (every 100ms) to input the current free
1449c770a27SMark Johnston * page shortage (free_target - free_count) to a PID controller, which in
1459c770a27SMark Johnston * response outputs the number of pages to attempt to reclaim. The shortage's
1469c770a27SMark Johnston * current magnitude, rate of change, and cumulative value are together used to
1479c770a27SMark Johnston * determine the controller's output. The page daemon target thus adapts
1489c770a27SMark Johnston * dynamically to the system's demand for free pages, resulting in less
1499c770a27SMark Johnston * burstiness than a simple hysteresis loop.
1509c770a27SMark Johnston *
1519c770a27SMark Johnston * When the free page count drops below the wakeup threshold,
1529c770a27SMark Johnston * vm_domain_allocate() proactively wakes up the page daemon. This helps ensure
1539c770a27SMark Johnston * that the system responds promptly to a large instantaneous free page
1549c770a27SMark Johnston * shortage.
1559c770a27SMark Johnston *
1569c770a27SMark Johnston * The page daemon also attempts to ensure that some fraction of the system's
1579c770a27SMark Johnston * memory is present in the inactive (I) and laundry (L) page queues, so that it
1589c770a27SMark Johnston * can respond promptly to a sudden free page shortage. In particular, the page
1599c770a27SMark Johnston * daemon thread aggressively scans active pages so long as the following
1609c770a27SMark Johnston * condition holds:
1619c770a27SMark Johnston *
1629c770a27SMark Johnston * len(I) + len(L) + free_target - free_count < inactive_target
1639c770a27SMark Johnston *
1649c770a27SMark Johnston * Otherwise, when the inactive target is met, the page daemon periodically
1659c770a27SMark Johnston * scans a small portion of the active queue in order to maintain up-to-date
1669c770a27SMark Johnston * per-page access history. Unreferenced pages in the active queue thus
1679c770a27SMark Johnston * eventually migrate to the inactive queue.
1689c770a27SMark Johnston *
1699c770a27SMark Johnston * The per-domain laundry thread periodically launders dirty pages based on the
1709c770a27SMark Johnston * number of clean pages freed by the page daemon since the last laundering. If
1719c770a27SMark Johnston * the page daemon fails to meet its scan target (i.e., the PID controller
1729c770a27SMark Johnston * output) because of a shortage of clean inactive pages, the laundry thread
1739c770a27SMark Johnston * attempts to launder enough pages to meet the free page target.
1749c770a27SMark Johnston *
1759c770a27SMark Johnston *--
1769c770a27SMark Johnston * Page allocation priorities:
1779c770a27SMark Johnston *
1789c770a27SMark Johnston * The system defines three page allocation priorities: VM_ALLOC_NORMAL,
1799c770a27SMark Johnston * VM_ALLOC_SYSTEM and VM_ALLOC_INTERRUPT. An interrupt-priority allocation can
1809c770a27SMark Johnston * claim any free page. This priority is used in the pmap layer when attempting
1819c770a27SMark Johnston * to allocate a page for the kernel page tables; in such cases an allocation
1829c770a27SMark Johnston * failure will usually result in a kernel panic. The system priority is used
1839c770a27SMark Johnston * for most other kernel memory allocations, for instance by UMA's slab
1849c770a27SMark Johnston * allocator or the buffer cache. Such allocations will fail if the free count
1859c770a27SMark Johnston * is below interrupt_free_min. All other allocations occur at the normal
1869c770a27SMark Johnston * priority, which is typically used for allocation of user pages, for instance
1879c770a27SMark Johnston * in the page fault handler or when allocating page table pages or pv_entry
1889c770a27SMark Johnston * structures for user pmaps. Such allocations fail if the free count is below
1899c770a27SMark Johnston * the free_reserved threshold.
1909c770a27SMark Johnston *
1919c770a27SMark Johnston *--
1929c770a27SMark Johnston * Free memory shortages:
1939c770a27SMark Johnston *
1949c770a27SMark Johnston * The system uses the free_min and free_severe thresholds to apply
1959c770a27SMark Johnston * back-pressure and give the page daemon a chance to recover. When a page
1969c770a27SMark Johnston * allocation fails due to a shortage and the allocating thread cannot handle
1979c770a27SMark Johnston * failure, it may call vm_wait() to sleep until free pages are available.
1989c770a27SMark Johnston * vm_domain_freecnt_inc() wakes sleeping threads once the free page count rises
1999c770a27SMark Johnston * above the free_min threshold; the page daemon and laundry threads are given
2009c770a27SMark Johnston * priority and will wake up once free_count reaches the (much smaller)
2019c770a27SMark Johnston * pageout_free_min threshold.
2029c770a27SMark Johnston *
2039c770a27SMark Johnston * On NUMA systems, the domainset iterators always prefer NUMA domains where the
2049c770a27SMark Johnston * free page count is above the free_min threshold. This means that given the
2059c770a27SMark Johnston * choice between two NUMA domains, one above the free_min threshold and one
2069c770a27SMark Johnston * below, the former will be used to satisfy the allocation request regardless
2079c770a27SMark Johnston * of the domain selection policy.
2089c770a27SMark Johnston *
2099c770a27SMark Johnston * In addition to reclaiming memory from the page queues, the vm_lowmem event
2109c770a27SMark Johnston * fires every ten seconds so long as the system is under memory pressure (i.e.,
2119c770a27SMark Johnston * vmd_free_count < vmd_free_target). This allows kernel subsystems to register
2129c770a27SMark Johnston * for notifications of free page shortages, upon which they may shrink their
2139c770a27SMark Johnston * caches. Following a vm_lowmem event, UMA's caches are pruned to ensure that
2149c770a27SMark Johnston * they do not contain an excess of unused memory. When a domain is below the
2159c770a27SMark Johnston * free_min threshold, UMA limits the population of per-CPU caches. When a
2169c770a27SMark Johnston * domain falls below the free_severe threshold, UMA's caches are completely
2179c770a27SMark Johnston * drained.
2189c770a27SMark Johnston *
2199c770a27SMark Johnston * If the system encounters a global memory shortage, it may resort to the
2209c770a27SMark Johnston * out-of-memory (OOM) killer, which selects a process and delivers SIGKILL in a
2219c770a27SMark Johnston * last-ditch attempt to free up some pages. Either of the two following
2229c770a27SMark Johnston * conditions will activate the OOM killer:
2239c770a27SMark Johnston *
2249c770a27SMark Johnston * 1. The page daemons collectively fail to reclaim any pages during their
2259c770a27SMark Johnston * inactive queue scans. After vm_pageout_oom_seq consecutive scans fail,
2269c770a27SMark Johnston * the page daemon thread votes for an OOM kill, and an OOM kill is
2279c770a27SMark Johnston * triggered when all page daemons have voted. This heuristic is strict and
2289c770a27SMark Johnston * may fail to trigger even when the system is effectively deadlocked.
2299c770a27SMark Johnston *
2309c770a27SMark Johnston * 2. Threads in the user fault handler are repeatedly unable to make progress
2319c770a27SMark Johnston * while allocating a page to satisfy the fault. After
2329c770a27SMark Johnston * vm_pfault_oom_attempts page allocation failures with intervening
2339c770a27SMark Johnston * vm_wait() calls, the faulting thread will trigger an OOM kill.
23430fbfddaSJeff Roberson */
235e2068d0bSJeff Roberson struct vm_domain {
236e2068d0bSJeff Roberson struct vm_pagequeue vmd_pagequeues[PQ_COUNT];
237e2068d0bSJeff Roberson struct mtx_padalign vmd_free_mtx;
23830fbfddaSJeff Roberson struct mtx_padalign vmd_pageout_mtx;
239d9a73522SMark Johnston struct vm_pgcache {
240d9a73522SMark Johnston int domain;
241d9a73522SMark Johnston int pool;
242d9a73522SMark Johnston uma_zone_t zone;
243d9a73522SMark Johnston } vmd_pgcache[VM_NFREEPOOL];
2440766f278SJonathan T. Looney struct vmem *vmd_kernel_arena; /* (c) per-domain kva R/W arena. */
2450766f278SJonathan T. Looney struct vmem *vmd_kernel_rwx_arena; /* (c) per-domain kva R/W/X arena. */
2463f32a7e4SBojan Novković struct vmem *vmd_kernel_nofree_arena; /* (c) per-domain kva NOFREE arena. */
24730fbfddaSJeff Roberson u_int vmd_domain; /* (c) Domain number. */
24830fbfddaSJeff Roberson u_int vmd_page_count; /* (c) Total page count. */
24930fbfddaSJeff Roberson long vmd_segs; /* (c) bitmask of the segments */
250a8693e89SBojan Novković struct vm_nofreeq {
251a8693e89SBojan Novković vm_page_t ma;
252a8693e89SBojan Novković int offs;
253a8693e89SBojan Novković } vmd_nofreeq; /* (f) NOFREE page bump allocator. */
25430fbfddaSJeff Roberson u_int __aligned(CACHE_LINE_SIZE) vmd_free_count; /* (a,f) free page count */
25530fbfddaSJeff Roberson u_int vmd_pageout_deficit; /* (a) Estimated number of pages deficit */
25630fbfddaSJeff Roberson uint8_t vmd_pad[CACHE_LINE_SIZE - (sizeof(u_int) * 2)];
257e2068d0bSJeff Roberson
25830fbfddaSJeff Roberson /* Paging control variables, used within single threaded page daemon. */
2595f8cd1c0SJeff Roberson struct pidctrl vmd_pid; /* Pageout controller. */
260*55b343f4SMark Johnston bool vmd_oom; /* An OOM kill was requested. */
261*55b343f4SMark Johnston bool vmd_helper_threads_enabled;/* Use multiple threads to scan. */
262*55b343f4SMark Johnston u_int vmd_inactive_threads; /* Number of extra helper threads. */
2630292c54bSConrad Meyer u_int vmd_inactive_shortage; /* Per-thread shortage. */
2640292c54bSConrad Meyer blockcount_t vmd_inactive_running; /* Number of inactive threads. */
2650292c54bSConrad Meyer blockcount_t vmd_inactive_starting; /* Number of threads started. */
2660292c54bSConrad Meyer volatile u_int vmd_addl_shortage; /* Shortage accumulator. */
2670292c54bSConrad Meyer volatile u_int vmd_inactive_freed; /* Successful inactive frees. */
2680292c54bSConrad Meyer volatile u_int vmd_inactive_us; /* Microseconds for above. */
2690292c54bSConrad Meyer u_int vmd_inactive_pps; /* Exponential decay frees/second. */
270e2068d0bSJeff Roberson int vmd_oom_seq;
271e2068d0bSJeff Roberson int vmd_last_active_scan;
2725cd29d0fSMark Johnston struct vm_page vmd_markers[PQ_COUNT]; /* (q) markers for queue scans */
273e2068d0bSJeff Roberson struct vm_page vmd_inacthead; /* marker for LRU-defeating insertions */
2745cd29d0fSMark Johnston struct vm_page vmd_clock[2]; /* markers for active queue scan */
275e2068d0bSJeff Roberson
27630fbfddaSJeff Roberson int vmd_pageout_wanted; /* (a, p) pageout daemon wait channel */
27730fbfddaSJeff Roberson int vmd_pageout_pages_needed; /* (d) page daemon waiting for pages? */
27830fbfddaSJeff Roberson bool vmd_minset; /* (d) Are we in vm_min_domains? */
27930fbfddaSJeff Roberson bool vmd_severeset; /* (d) Are we in vm_severe_domains? */
280e2068d0bSJeff Roberson enum {
281e2068d0bSJeff Roberson VM_LAUNDRY_IDLE = 0,
282e2068d0bSJeff Roberson VM_LAUNDRY_BACKGROUND,
283e2068d0bSJeff Roberson VM_LAUNDRY_SHORTFALL
284e2068d0bSJeff Roberson } vmd_laundry_request;
285e2068d0bSJeff Roberson
28660684862SMark Johnston /* Paging thresholds and targets. */
28760684862SMark Johnston u_int vmd_clean_pages_freed; /* (q) accumulator for laundry thread */
28860684862SMark Johnston u_int vmd_background_launder_target; /* (c) */
289e2068d0bSJeff Roberson u_int vmd_free_reserved; /* (c) pages reserved for deadlock */
290e2068d0bSJeff Roberson u_int vmd_free_target; /* (c) pages desired free */
291e2068d0bSJeff Roberson u_int vmd_free_min; /* (c) pages desired free */
292e2068d0bSJeff Roberson u_int vmd_inactive_target; /* (c) pages desired inactive */
293e2068d0bSJeff Roberson u_int vmd_pageout_free_min; /* (c) min pages reserved for kernel */
294e2068d0bSJeff Roberson u_int vmd_pageout_wakeup_thresh;/* (c) min pages to wake pagedaemon */
295e2068d0bSJeff Roberson u_int vmd_interrupt_free_min; /* (c) reserved pages for int code */
296e2068d0bSJeff Roberson u_int vmd_free_severe; /* (c) severe page depletion point */
2975f8cd1c0SJeff Roberson
2985f8cd1c0SJeff Roberson /* Name for sysctl etc. */
2995f8cd1c0SJeff Roberson struct sysctl_oid *vmd_oid;
3005f8cd1c0SJeff Roberson char vmd_name[sizeof(__XSTRING(MAXMEMDOM))];
301e2068d0bSJeff Roberson } __aligned(CACHE_LINE_SIZE);
302e2068d0bSJeff Roberson
303e2068d0bSJeff Roberson extern struct vm_domain vm_dom[MAXMEMDOM];
304e2068d0bSJeff Roberson
305e2068d0bSJeff Roberson #define VM_DOMAIN(n) (&vm_dom[(n)])
30630c5525bSAndrew Gallatin #define VM_DOMAIN_EMPTY(n) (vm_dom[(n)].vmd_page_count == 0)
307e2068d0bSJeff Roberson
308e2068d0bSJeff Roberson #define vm_pagequeue_assert_locked(pq) mtx_assert(&(pq)->pq_mutex, MA_OWNED)
309e2068d0bSJeff Roberson #define vm_pagequeue_lock(pq) mtx_lock(&(pq)->pq_mutex)
310e2068d0bSJeff Roberson #define vm_pagequeue_lockptr(pq) (&(pq)->pq_mutex)
3115cd29d0fSMark Johnston #define vm_pagequeue_trylock(pq) mtx_trylock(&(pq)->pq_mutex)
312e2068d0bSJeff Roberson #define vm_pagequeue_unlock(pq) mtx_unlock(&(pq)->pq_mutex)
313e2068d0bSJeff Roberson
314e2068d0bSJeff Roberson #define vm_domain_free_assert_locked(n) \
315e2068d0bSJeff Roberson mtx_assert(vm_domain_free_lockptr((n)), MA_OWNED)
316e2068d0bSJeff Roberson #define vm_domain_free_assert_unlocked(n) \
317e2068d0bSJeff Roberson mtx_assert(vm_domain_free_lockptr((n)), MA_NOTOWNED)
318e2068d0bSJeff Roberson #define vm_domain_free_lock(d) \
319e2068d0bSJeff Roberson mtx_lock(vm_domain_free_lockptr((d)))
320e2068d0bSJeff Roberson #define vm_domain_free_lockptr(d) \
321e2068d0bSJeff Roberson (&(d)->vmd_free_mtx)
3225cd29d0fSMark Johnston #define vm_domain_free_trylock(d) \
3235cd29d0fSMark Johnston mtx_trylock(vm_domain_free_lockptr((d)))
324e2068d0bSJeff Roberson #define vm_domain_free_unlock(d) \
325e2068d0bSJeff Roberson mtx_unlock(vm_domain_free_lockptr((d)))
326e2068d0bSJeff Roberson
32730fbfddaSJeff Roberson #define vm_domain_pageout_lockptr(d) \
32830fbfddaSJeff Roberson (&(d)->vmd_pageout_mtx)
32930fbfddaSJeff Roberson #define vm_domain_pageout_assert_locked(n) \
33030fbfddaSJeff Roberson mtx_assert(vm_domain_pageout_lockptr((n)), MA_OWNED)
33130fbfddaSJeff Roberson #define vm_domain_pageout_assert_unlocked(n) \
33230fbfddaSJeff Roberson mtx_assert(vm_domain_pageout_lockptr((n)), MA_NOTOWNED)
33330fbfddaSJeff Roberson #define vm_domain_pageout_lock(d) \
33430fbfddaSJeff Roberson mtx_lock(vm_domain_pageout_lockptr((d)))
33530fbfddaSJeff Roberson #define vm_domain_pageout_unlock(d) \
33630fbfddaSJeff Roberson mtx_unlock(vm_domain_pageout_lockptr((d)))
33730fbfddaSJeff Roberson
338e2068d0bSJeff Roberson static __inline void
vm_pagequeue_cnt_add(struct vm_pagequeue * pq,int addend)339e2068d0bSJeff Roberson vm_pagequeue_cnt_add(struct vm_pagequeue *pq, int addend)
340e2068d0bSJeff Roberson {
341e2068d0bSJeff Roberson
342e2068d0bSJeff Roberson vm_pagequeue_assert_locked(pq);
343e2068d0bSJeff Roberson pq->pq_cnt += addend;
344e2068d0bSJeff Roberson }
345e2068d0bSJeff Roberson #define vm_pagequeue_cnt_inc(pq) vm_pagequeue_cnt_add((pq), 1)
346e2068d0bSJeff Roberson #define vm_pagequeue_cnt_dec(pq) vm_pagequeue_cnt_add((pq), -1)
347e2068d0bSJeff Roberson
3485cd29d0fSMark Johnston static inline void
vm_pagequeue_remove(struct vm_pagequeue * pq,vm_page_t m)3498b90607fSMark Johnston vm_pagequeue_remove(struct vm_pagequeue *pq, vm_page_t m)
3508b90607fSMark Johnston {
3518b90607fSMark Johnston
3528b90607fSMark Johnston TAILQ_REMOVE(&pq->pq_pl, m, plinks.q);
3538b90607fSMark Johnston vm_pagequeue_cnt_dec(pq);
3548b90607fSMark Johnston }
3558b90607fSMark Johnston
3568b90607fSMark Johnston static inline void
vm_batchqueue_init(struct vm_batchqueue * bq)3575cd29d0fSMark Johnston vm_batchqueue_init(struct vm_batchqueue *bq)
3585cd29d0fSMark Johnston {
3595cd29d0fSMark Johnston
3605cd29d0fSMark Johnston bq->bq_cnt = 0;
3615cd29d0fSMark Johnston }
3625cd29d0fSMark Johnston
363a216e311SRyan Libby static inline bool
vm_batchqueue_empty(const struct vm_batchqueue * bq)364a216e311SRyan Libby vm_batchqueue_empty(const struct vm_batchqueue *bq)
365a216e311SRyan Libby {
366a216e311SRyan Libby return (bq->bq_cnt == 0);
367a216e311SRyan Libby }
368a216e311SRyan Libby
3691cac76c9SAndrew Gallatin static inline int
vm_batchqueue_insert(struct vm_batchqueue * bq,vm_page_t m)3705cd29d0fSMark Johnston vm_batchqueue_insert(struct vm_batchqueue *bq, vm_page_t m)
3715cd29d0fSMark Johnston {
3721cac76c9SAndrew Gallatin int slots_free;
3735cd29d0fSMark Johnston
3741cac76c9SAndrew Gallatin slots_free = nitems(bq->bq_pa) - bq->bq_cnt;
3751cac76c9SAndrew Gallatin if (slots_free > 0) {
3765cd29d0fSMark Johnston bq->bq_pa[bq->bq_cnt++] = m;
3771cac76c9SAndrew Gallatin return (slots_free);
3785cd29d0fSMark Johnston }
3791cac76c9SAndrew Gallatin return (slots_free);
3805cd29d0fSMark Johnston }
3815cd29d0fSMark Johnston
3825cd29d0fSMark Johnston static inline vm_page_t
vm_batchqueue_pop(struct vm_batchqueue * bq)3835cd29d0fSMark Johnston vm_batchqueue_pop(struct vm_batchqueue *bq)
3845cd29d0fSMark Johnston {
3855cd29d0fSMark Johnston
3865cd29d0fSMark Johnston if (bq->bq_cnt == 0)
3875cd29d0fSMark Johnston return (NULL);
3885cd29d0fSMark Johnston return (bq->bq_pa[--bq->bq_cnt]);
3895cd29d0fSMark Johnston }
3905cd29d0fSMark Johnston
391e2068d0bSJeff Roberson void vm_domain_set(struct vm_domain *vmd);
39230fbfddaSJeff Roberson void vm_domain_clear(struct vm_domain *vmd);
3935c930c89SJeff Roberson int vm_domain_allocate(struct vm_domain *vmd, int req, int npages);
394e2068d0bSJeff Roberson
395e2068d0bSJeff Roberson /*
396e2068d0bSJeff Roberson * vm_pagequeue_domain:
397e2068d0bSJeff Roberson *
398e2068d0bSJeff Roberson * Return the memory domain the page belongs to.
399e2068d0bSJeff Roberson */
400e2068d0bSJeff Roberson static inline struct vm_domain *
vm_pagequeue_domain(vm_page_t m)401e2068d0bSJeff Roberson vm_pagequeue_domain(vm_page_t m)
402e2068d0bSJeff Roberson {
403e2068d0bSJeff Roberson
404431fb8abSMark Johnston return (VM_DOMAIN(vm_page_domain(m)));
405e2068d0bSJeff Roberson }
406e2068d0bSJeff Roberson
407e2068d0bSJeff Roberson /*
408e2068d0bSJeff Roberson * Return the number of pages we need to free-up or cache
409e2068d0bSJeff Roberson * A positive number indicates that we do not have enough free pages.
410e2068d0bSJeff Roberson */
411e2068d0bSJeff Roberson static inline int
vm_paging_target(struct vm_domain * vmd)412e2068d0bSJeff Roberson vm_paging_target(struct vm_domain *vmd)
413e2068d0bSJeff Roberson {
414e2068d0bSJeff Roberson
415e2068d0bSJeff Roberson return (vmd->vmd_free_target - vmd->vmd_free_count);
416e2068d0bSJeff Roberson }
417e2068d0bSJeff Roberson
418e2068d0bSJeff Roberson /*
419e2068d0bSJeff Roberson * Returns TRUE if the pagedaemon needs to be woken up.
420e2068d0bSJeff Roberson */
421e2068d0bSJeff Roberson static inline int
vm_paging_needed(struct vm_domain * vmd,u_int free_count)422e2068d0bSJeff Roberson vm_paging_needed(struct vm_domain *vmd, u_int free_count)
423e2068d0bSJeff Roberson {
424e2068d0bSJeff Roberson
425e2068d0bSJeff Roberson return (free_count < vmd->vmd_pageout_wakeup_thresh);
426e2068d0bSJeff Roberson }
427e2068d0bSJeff Roberson
428e2068d0bSJeff Roberson /*
429e2068d0bSJeff Roberson * Returns TRUE if the domain is below the min paging target.
430e2068d0bSJeff Roberson */
431e2068d0bSJeff Roberson static inline int
vm_paging_min(struct vm_domain * vmd)432e2068d0bSJeff Roberson vm_paging_min(struct vm_domain *vmd)
433e2068d0bSJeff Roberson {
434e2068d0bSJeff Roberson
435e2068d0bSJeff Roberson return (vmd->vmd_free_min > vmd->vmd_free_count);
436e2068d0bSJeff Roberson }
437e2068d0bSJeff Roberson
438e2068d0bSJeff Roberson /*
439e2068d0bSJeff Roberson * Returns TRUE if the domain is below the severe paging target.
440e2068d0bSJeff Roberson */
441e2068d0bSJeff Roberson static inline int
vm_paging_severe(struct vm_domain * vmd)442e2068d0bSJeff Roberson vm_paging_severe(struct vm_domain *vmd)
443e2068d0bSJeff Roberson {
444e2068d0bSJeff Roberson
445e2068d0bSJeff Roberson return (vmd->vmd_free_severe > vmd->vmd_free_count);
446e2068d0bSJeff Roberson }
447e2068d0bSJeff Roberson
448e2068d0bSJeff Roberson /*
449e2068d0bSJeff Roberson * Return the number of pages we need to launder.
450e2068d0bSJeff Roberson * A positive number indicates that we have a shortfall of clean pages.
451e2068d0bSJeff Roberson */
452e2068d0bSJeff Roberson static inline int
vm_laundry_target(struct vm_domain * vmd)453e2068d0bSJeff Roberson vm_laundry_target(struct vm_domain *vmd)
454e2068d0bSJeff Roberson {
455e2068d0bSJeff Roberson
456e2068d0bSJeff Roberson return (vm_paging_target(vmd));
457e2068d0bSJeff Roberson }
458e2068d0bSJeff Roberson
45930fbfddaSJeff Roberson void pagedaemon_wakeup(int domain);
46030fbfddaSJeff Roberson
46130fbfddaSJeff Roberson static inline void
vm_domain_freecnt_inc(struct vm_domain * vmd,int adj)46230fbfddaSJeff Roberson vm_domain_freecnt_inc(struct vm_domain *vmd, int adj)
463e2068d0bSJeff Roberson {
46430fbfddaSJeff Roberson u_int old, new;
465e2068d0bSJeff Roberson
46630fbfddaSJeff Roberson old = atomic_fetchadd_int(&vmd->vmd_free_count, adj);
46730fbfddaSJeff Roberson new = old + adj;
46830fbfddaSJeff Roberson /*
46930fbfddaSJeff Roberson * Only update bitsets on transitions. Notice we short-circuit the
47030fbfddaSJeff Roberson * rest of the checks if we're above min already.
47130fbfddaSJeff Roberson */
47230fbfddaSJeff Roberson if (old < vmd->vmd_free_min && (new >= vmd->vmd_free_min ||
47330fbfddaSJeff Roberson (old < vmd->vmd_free_severe && new >= vmd->vmd_free_severe) ||
47430fbfddaSJeff Roberson (old < vmd->vmd_pageout_free_min &&
47530fbfddaSJeff Roberson new >= vmd->vmd_pageout_free_min)))
47630fbfddaSJeff Roberson vm_domain_clear(vmd);
47730fbfddaSJeff Roberson }
47830fbfddaSJeff Roberson
479e2068d0bSJeff Roberson #endif /* _KERNEL */
480e2068d0bSJeff Roberson #endif /* !_VM_PAGEQUEUE_ */
481