1 /*-
2 * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU)
3 *
4 * Copyright (c) 1991, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * The Mach Operating System project at Carnegie-Mellon University.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 *
35 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
36 * All rights reserved.
37 *
38 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
39 *
40 * Permission to use, copy, modify and distribute this software and
41 * its documentation is hereby granted, provided that both the copyright
42 * notice and this permission notice appear in all copies of the
43 * software, derivative works or modified versions, and any portions
44 * thereof, and that both notices appear in supporting documentation.
45 *
46 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
47 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
48 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
49 *
50 * Carnegie Mellon requests users of this software to return to
51 *
52 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
53 * School of Computer Science
54 * Carnegie Mellon University
55 * Pittsburgh PA 15213-3890
56 *
57 * any improvements or extensions that they make and grant Carnegie the
58 * rights to redistribute these changes.
59 */
60
61 #ifndef _VM_PAGEQUEUE_
62 #define _VM_PAGEQUEUE_
63
64 #ifdef _KERNEL
65 struct vm_pagequeue {
66 struct mtx pq_mutex;
67 struct pglist pq_pl;
68 int pq_cnt;
69 const char * const pq_name;
70 uint64_t pq_pdpages;
71 } __aligned(CACHE_LINE_SIZE);
72
73 #if __SIZEOF_LONG__ == 8
74 #define VM_BATCHQUEUE_SIZE 63
75 #else
76 #define VM_BATCHQUEUE_SIZE 15
77 #endif
78
79 struct vm_batchqueue {
80 vm_page_t bq_pa[VM_BATCHQUEUE_SIZE];
81 int bq_cnt;
82 } __aligned(CACHE_LINE_SIZE);
83
84 #include <vm/uma.h>
85 #include <sys/_blockcount.h>
86 #include <sys/pidctrl.h>
87 struct sysctl_oid;
88
89 /*
90 * One vm_domain per NUMA domain. Contains pagequeues, free page structures,
91 * and accounting.
92 *
93 * Lock Key:
94 * f vmd_free_mtx
95 * p vmd_pageout_mtx
96 * d vm_domainset_lock
97 * a atomic
98 * c const after boot
99 * q page queue lock
100 *
101 * A unique page daemon thread manages each vm_domain structure and is
102 * responsible for ensuring that some free memory is available by freeing
103 * inactive pages and aging active pages. To decide how many pages to process,
104 * it uses thresholds derived from the number of pages in the domain:
105 *
106 * vmd_page_count
107 * ---
108 * |
109 * |-> vmd_inactive_target (~3%)
110 * | - The active queue scan target is given by
111 * | (vmd_inactive_target + vmd_free_target - vmd_free_count).
112 * |
113 * |
114 * |-> vmd_free_target (~2%)
115 * | - Target for page reclamation.
116 * |
117 * |-> vmd_pageout_wakeup_thresh (~1.8%)
118 * | - Threshold for waking up the page daemon.
119 * |
120 * |
121 * |-> vmd_free_min (~0.5%)
122 * | - First low memory threshold.
123 * | - Causes per-CPU caching to be lazily disabled in UMA.
124 * | - vm_wait() sleeps below this threshold.
125 * |
126 * |-> vmd_free_severe (~0.25%)
127 * | - Second low memory threshold.
128 * | - Triggers aggressive UMA reclamation, disables delayed buffer
129 * | writes.
130 * |
131 * |-> vmd_free_reserved (~0.13%)
132 * | - Minimum for VM_ALLOC_NORMAL page allocations.
133 * |-> vmd_pageout_free_min (32 + 2 pages)
134 * | - Minimum for waking a page daemon thread sleeping in vm_wait().
135 * |-> vmd_interrupt_free_min (2 pages)
136 * | - Minimum for VM_ALLOC_SYSTEM page allocations.
137 * ---
138 *
139 *--
140 * Free page count regulation:
141 *
142 * The page daemon attempts to ensure that the free page count is above the free
143 * target. It wakes up periodically (every 100ms) to input the current free
144 * page shortage (free_target - free_count) to a PID controller, which in
145 * response outputs the number of pages to attempt to reclaim. The shortage's
146 * current magnitude, rate of change, and cumulative value are together used to
147 * determine the controller's output. The page daemon target thus adapts
148 * dynamically to the system's demand for free pages, resulting in less
149 * burstiness than a simple hysteresis loop.
150 *
151 * When the free page count drops below the wakeup threshold,
152 * vm_domain_allocate() proactively wakes up the page daemon. This helps ensure
153 * that the system responds promptly to a large instantaneous free page
154 * shortage.
155 *
156 * The page daemon also attempts to ensure that some fraction of the system's
157 * memory is present in the inactive (I) and laundry (L) page queues, so that it
158 * can respond promptly to a sudden free page shortage. In particular, the page
159 * daemon thread aggressively scans active pages so long as the following
160 * condition holds:
161 *
162 * len(I) + len(L) + free_target - free_count < inactive_target
163 *
164 * Otherwise, when the inactive target is met, the page daemon periodically
165 * scans a small portion of the active queue in order to maintain up-to-date
166 * per-page access history. Unreferenced pages in the active queue thus
167 * eventually migrate to the inactive queue.
168 *
169 * The per-domain laundry thread periodically launders dirty pages based on the
170 * number of clean pages freed by the page daemon since the last laundering. If
171 * the page daemon fails to meet its scan target (i.e., the PID controller
172 * output) because of a shortage of clean inactive pages, the laundry thread
173 * attempts to launder enough pages to meet the free page target.
174 *
175 *--
176 * Page allocation priorities:
177 *
178 * The system defines three page allocation priorities: VM_ALLOC_NORMAL,
179 * VM_ALLOC_SYSTEM and VM_ALLOC_INTERRUPT. An interrupt-priority allocation can
180 * claim any free page. This priority is used in the pmap layer when attempting
181 * to allocate a page for the kernel page tables; in such cases an allocation
182 * failure will usually result in a kernel panic. The system priority is used
183 * for most other kernel memory allocations, for instance by UMA's slab
184 * allocator or the buffer cache. Such allocations will fail if the free count
185 * is below interrupt_free_min. All other allocations occur at the normal
186 * priority, which is typically used for allocation of user pages, for instance
187 * in the page fault handler or when allocating page table pages or pv_entry
188 * structures for user pmaps. Such allocations fail if the free count is below
189 * the free_reserved threshold.
190 *
191 *--
192 * Free memory shortages:
193 *
194 * The system uses the free_min and free_severe thresholds to apply
195 * back-pressure and give the page daemon a chance to recover. When a page
196 * allocation fails due to a shortage and the allocating thread cannot handle
197 * failure, it may call vm_wait() to sleep until free pages are available.
198 * vm_domain_freecnt_inc() wakes sleeping threads once the free page count rises
199 * above the free_min threshold; the page daemon and laundry threads are given
200 * priority and will wake up once free_count reaches the (much smaller)
201 * pageout_free_min threshold.
202 *
203 * On NUMA systems, the domainset iterators always prefer NUMA domains where the
204 * free page count is above the free_min threshold. This means that given the
205 * choice between two NUMA domains, one above the free_min threshold and one
206 * below, the former will be used to satisfy the allocation request regardless
207 * of the domain selection policy.
208 *
209 * In addition to reclaiming memory from the page queues, the vm_lowmem event
210 * fires every ten seconds so long as the system is under memory pressure (i.e.,
211 * vmd_free_count < vmd_free_target). This allows kernel subsystems to register
212 * for notifications of free page shortages, upon which they may shrink their
213 * caches. Following a vm_lowmem event, UMA's caches are pruned to ensure that
214 * they do not contain an excess of unused memory. When a domain is below the
215 * free_min threshold, UMA limits the population of per-CPU caches. When a
216 * domain falls below the free_severe threshold, UMA's caches are completely
217 * drained.
218 *
219 * If the system encounters a global memory shortage, it may resort to the
220 * out-of-memory (OOM) killer, which selects a process and delivers SIGKILL in a
221 * last-ditch attempt to free up some pages. Either of the two following
222 * conditions will activate the OOM killer:
223 *
224 * 1. The page daemons collectively fail to reclaim any pages during their
225 * inactive queue scans. After vm_pageout_oom_seq consecutive scans fail,
226 * the page daemon thread votes for an OOM kill, and an OOM kill is
227 * triggered when all page daemons have voted. This heuristic is strict and
228 * may fail to trigger even when the system is effectively deadlocked.
229 *
230 * 2. Threads in the user fault handler are repeatedly unable to make progress
231 * while allocating a page to satisfy the fault. After
232 * vm_pfault_oom_attempts page allocation failures with intervening
233 * vm_wait() calls, the faulting thread will trigger an OOM kill.
234 */
235 struct vm_domain {
236 struct vm_pagequeue vmd_pagequeues[PQ_COUNT];
237 struct mtx_padalign vmd_free_mtx;
238 struct mtx_padalign vmd_pageout_mtx;
239 struct vm_pgcache {
240 int domain;
241 int pool;
242 uma_zone_t zone;
243 } vmd_pgcache[VM_NFREEPOOL];
244 struct vmem *vmd_kernel_arena; /* (c) per-domain kva R/W arena. */
245 struct vmem *vmd_kernel_rwx_arena; /* (c) per-domain kva R/W/X arena. */
246 struct vmem *vmd_kernel_nofree_arena; /* (c) per-domain kva NOFREE arena. */
247 u_int vmd_domain; /* (c) Domain number. */
248 u_int vmd_page_count; /* (c) Total page count. */
249 long vmd_segs; /* (c) bitmask of the segments */
250 struct vm_nofreeq {
251 vm_page_t ma;
252 int offs;
253 } vmd_nofreeq; /* (f) NOFREE page bump allocator. */
254 u_int __aligned(CACHE_LINE_SIZE) vmd_free_count; /* (a,f) free page count */
255 u_int vmd_pageout_deficit; /* (a) Estimated number of pages deficit */
256 uint8_t vmd_pad[CACHE_LINE_SIZE - (sizeof(u_int) * 2)];
257
258 /* Paging control variables, used within single threaded page daemon. */
259 struct pidctrl vmd_pid; /* Pageout controller. */
260 bool vmd_oom; /* An OOM kill was requested. */
261 bool vmd_helper_threads_enabled;/* Use multiple threads to scan. */
262 u_int vmd_inactive_threads; /* Number of extra helper threads. */
263 u_int vmd_inactive_shortage; /* Per-thread shortage. */
264 blockcount_t vmd_inactive_running; /* Number of inactive threads. */
265 blockcount_t vmd_inactive_starting; /* Number of threads started. */
266 volatile u_int vmd_addl_shortage; /* Shortage accumulator. */
267 volatile u_int vmd_inactive_freed; /* Successful inactive frees. */
268 volatile u_int vmd_inactive_us; /* Microseconds for above. */
269 u_int vmd_inactive_pps; /* Exponential decay frees/second. */
270 int vmd_oom_seq;
271 int vmd_last_active_scan;
272 struct vm_page vmd_markers[PQ_COUNT]; /* (q) markers for queue scans */
273 struct vm_page vmd_inacthead; /* marker for LRU-defeating insertions */
274 struct vm_page vmd_clock[2]; /* markers for active queue scan */
275
276 int vmd_pageout_wanted; /* (a, p) pageout daemon wait channel */
277 int vmd_pageout_pages_needed; /* (d) page daemon waiting for pages? */
278 bool vmd_minset; /* (d) Are we in vm_min_domains? */
279 bool vmd_severeset; /* (d) Are we in vm_severe_domains? */
280 enum {
281 VM_LAUNDRY_IDLE = 0,
282 VM_LAUNDRY_BACKGROUND,
283 VM_LAUNDRY_SHORTFALL
284 } vmd_laundry_request;
285
286 /* Paging thresholds and targets. */
287 u_int vmd_clean_pages_freed; /* (q) accumulator for laundry thread */
288 u_int vmd_background_launder_target; /* (c) */
289 u_int vmd_free_reserved; /* (c) pages reserved for deadlock */
290 u_int vmd_free_target; /* (c) pages desired free */
291 u_int vmd_free_min; /* (c) pages desired free */
292 u_int vmd_inactive_target; /* (c) pages desired inactive */
293 u_int vmd_pageout_free_min; /* (c) min pages reserved for kernel */
294 u_int vmd_pageout_wakeup_thresh;/* (c) min pages to wake pagedaemon */
295 u_int vmd_interrupt_free_min; /* (c) reserved pages for int code */
296 u_int vmd_free_severe; /* (c) severe page depletion point */
297
298 /* Name for sysctl etc. */
299 struct sysctl_oid *vmd_oid;
300 char vmd_name[sizeof(__XSTRING(MAXMEMDOM))];
301 } __aligned(CACHE_LINE_SIZE);
302
303 extern struct vm_domain vm_dom[MAXMEMDOM];
304
305 #define VM_DOMAIN(n) (&vm_dom[(n)])
306 #define VM_DOMAIN_EMPTY(n) (vm_dom[(n)].vmd_page_count == 0)
307
308 #define vm_pagequeue_assert_locked(pq) mtx_assert(&(pq)->pq_mutex, MA_OWNED)
309 #define vm_pagequeue_lock(pq) mtx_lock(&(pq)->pq_mutex)
310 #define vm_pagequeue_lockptr(pq) (&(pq)->pq_mutex)
311 #define vm_pagequeue_trylock(pq) mtx_trylock(&(pq)->pq_mutex)
312 #define vm_pagequeue_unlock(pq) mtx_unlock(&(pq)->pq_mutex)
313
314 #define vm_domain_free_assert_locked(n) \
315 mtx_assert(vm_domain_free_lockptr((n)), MA_OWNED)
316 #define vm_domain_free_assert_unlocked(n) \
317 mtx_assert(vm_domain_free_lockptr((n)), MA_NOTOWNED)
318 #define vm_domain_free_lock(d) \
319 mtx_lock(vm_domain_free_lockptr((d)))
320 #define vm_domain_free_lockptr(d) \
321 (&(d)->vmd_free_mtx)
322 #define vm_domain_free_trylock(d) \
323 mtx_trylock(vm_domain_free_lockptr((d)))
324 #define vm_domain_free_unlock(d) \
325 mtx_unlock(vm_domain_free_lockptr((d)))
326
327 #define vm_domain_pageout_lockptr(d) \
328 (&(d)->vmd_pageout_mtx)
329 #define vm_domain_pageout_assert_locked(n) \
330 mtx_assert(vm_domain_pageout_lockptr((n)), MA_OWNED)
331 #define vm_domain_pageout_assert_unlocked(n) \
332 mtx_assert(vm_domain_pageout_lockptr((n)), MA_NOTOWNED)
333 #define vm_domain_pageout_lock(d) \
334 mtx_lock(vm_domain_pageout_lockptr((d)))
335 #define vm_domain_pageout_unlock(d) \
336 mtx_unlock(vm_domain_pageout_lockptr((d)))
337
338 static __inline void
vm_pagequeue_cnt_add(struct vm_pagequeue * pq,int addend)339 vm_pagequeue_cnt_add(struct vm_pagequeue *pq, int addend)
340 {
341
342 vm_pagequeue_assert_locked(pq);
343 pq->pq_cnt += addend;
344 }
345 #define vm_pagequeue_cnt_inc(pq) vm_pagequeue_cnt_add((pq), 1)
346 #define vm_pagequeue_cnt_dec(pq) vm_pagequeue_cnt_add((pq), -1)
347
348 static inline void
vm_pagequeue_remove(struct vm_pagequeue * pq,vm_page_t m)349 vm_pagequeue_remove(struct vm_pagequeue *pq, vm_page_t m)
350 {
351
352 TAILQ_REMOVE(&pq->pq_pl, m, plinks.q);
353 vm_pagequeue_cnt_dec(pq);
354 }
355
356 static inline void
vm_batchqueue_init(struct vm_batchqueue * bq)357 vm_batchqueue_init(struct vm_batchqueue *bq)
358 {
359
360 bq->bq_cnt = 0;
361 }
362
363 static inline bool
vm_batchqueue_empty(const struct vm_batchqueue * bq)364 vm_batchqueue_empty(const struct vm_batchqueue *bq)
365 {
366 return (bq->bq_cnt == 0);
367 }
368
369 static inline int
vm_batchqueue_insert(struct vm_batchqueue * bq,vm_page_t m)370 vm_batchqueue_insert(struct vm_batchqueue *bq, vm_page_t m)
371 {
372 int slots_free;
373
374 slots_free = nitems(bq->bq_pa) - bq->bq_cnt;
375 if (slots_free > 0) {
376 bq->bq_pa[bq->bq_cnt++] = m;
377 return (slots_free);
378 }
379 return (slots_free);
380 }
381
382 static inline vm_page_t
vm_batchqueue_pop(struct vm_batchqueue * bq)383 vm_batchqueue_pop(struct vm_batchqueue *bq)
384 {
385
386 if (bq->bq_cnt == 0)
387 return (NULL);
388 return (bq->bq_pa[--bq->bq_cnt]);
389 }
390
391 void vm_domain_set(struct vm_domain *vmd);
392 void vm_domain_clear(struct vm_domain *vmd);
393 int vm_domain_allocate(struct vm_domain *vmd, int req, int npages);
394
395 /*
396 * vm_pagequeue_domain:
397 *
398 * Return the memory domain the page belongs to.
399 */
400 static inline struct vm_domain *
vm_pagequeue_domain(vm_page_t m)401 vm_pagequeue_domain(vm_page_t m)
402 {
403
404 return (VM_DOMAIN(vm_page_domain(m)));
405 }
406
407 /*
408 * Return the number of pages we need to free-up or cache
409 * A positive number indicates that we do not have enough free pages.
410 */
411 static inline int
vm_paging_target(struct vm_domain * vmd)412 vm_paging_target(struct vm_domain *vmd)
413 {
414
415 return (vmd->vmd_free_target - vmd->vmd_free_count);
416 }
417
418 /*
419 * Returns TRUE if the pagedaemon needs to be woken up.
420 */
421 static inline int
vm_paging_needed(struct vm_domain * vmd,u_int free_count)422 vm_paging_needed(struct vm_domain *vmd, u_int free_count)
423 {
424
425 return (free_count < vmd->vmd_pageout_wakeup_thresh);
426 }
427
428 /*
429 * Returns TRUE if the domain is below the min paging target.
430 */
431 static inline int
vm_paging_min(struct vm_domain * vmd)432 vm_paging_min(struct vm_domain *vmd)
433 {
434
435 return (vmd->vmd_free_min > vmd->vmd_free_count);
436 }
437
438 /*
439 * Returns TRUE if the domain is below the severe paging target.
440 */
441 static inline int
vm_paging_severe(struct vm_domain * vmd)442 vm_paging_severe(struct vm_domain *vmd)
443 {
444
445 return (vmd->vmd_free_severe > vmd->vmd_free_count);
446 }
447
448 /*
449 * Return the number of pages we need to launder.
450 * A positive number indicates that we have a shortfall of clean pages.
451 */
452 static inline int
vm_laundry_target(struct vm_domain * vmd)453 vm_laundry_target(struct vm_domain *vmd)
454 {
455
456 return (vm_paging_target(vmd));
457 }
458
459 void pagedaemon_wakeup(int domain);
460
461 static inline void
vm_domain_freecnt_inc(struct vm_domain * vmd,int adj)462 vm_domain_freecnt_inc(struct vm_domain *vmd, int adj)
463 {
464 u_int old, new;
465
466 old = atomic_fetchadd_int(&vmd->vmd_free_count, adj);
467 new = old + adj;
468 /*
469 * Only update bitsets on transitions. Notice we short-circuit the
470 * rest of the checks if we're above min already.
471 */
472 if (old < vmd->vmd_free_min && (new >= vmd->vmd_free_min ||
473 (old < vmd->vmd_free_severe && new >= vmd->vmd_free_severe) ||
474 (old < vmd->vmd_pageout_free_min &&
475 new >= vmd->vmd_pageout_free_min)))
476 vm_domain_clear(vmd);
477 }
478
479 #endif /* _KERNEL */
480 #endif /* !_VM_PAGEQUEUE_ */
481