xref: /freebsd/sys/vm/vm_pagequeue.h (revision cbb3ec25236ba72f91cbdf23f8b78b9d1af0cedf)
1 /*-
2  * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU)
3  *
4  * Copyright (c) 1991, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * This code is derived from software contributed to Berkeley by
8  * The Mach Operating System project at Carnegie-Mellon University.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *	from: @(#)vm_page.h	8.2 (Berkeley) 12/13/93
35  *
36  *
37  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
38  * All rights reserved.
39  *
40  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
41  *
42  * Permission to use, copy, modify and distribute this software and
43  * its documentation is hereby granted, provided that both the copyright
44  * notice and this permission notice appear in all copies of the
45  * software, derivative works or modified versions, and any portions
46  * thereof, and that both notices appear in supporting documentation.
47  *
48  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
49  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
50  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
51  *
52  * Carnegie Mellon requests users of this software to return to
53  *
54  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
55  *  School of Computer Science
56  *  Carnegie Mellon University
57  *  Pittsburgh PA 15213-3890
58  *
59  * any improvements or extensions that they make and grant Carnegie the
60  * rights to redistribute these changes.
61  */
62 
63 #ifndef	_VM_PAGEQUEUE_
64 #define	_VM_PAGEQUEUE_
65 
66 #ifdef _KERNEL
67 struct vm_pagequeue {
68 	struct mtx	pq_mutex;
69 	struct pglist	pq_pl;
70 	int		pq_cnt;
71 	const char	* const pq_name;
72 	uint64_t	pq_pdpages;
73 } __aligned(CACHE_LINE_SIZE);
74 
75 #if __SIZEOF_LONG__ == 8
76 #define	VM_BATCHQUEUE_SIZE	63
77 #else
78 #define	VM_BATCHQUEUE_SIZE	15
79 #endif
80 
81 struct vm_batchqueue {
82 	vm_page_t	bq_pa[VM_BATCHQUEUE_SIZE];
83 	int		bq_cnt;
84 } __aligned(CACHE_LINE_SIZE);
85 
86 #include <vm/uma.h>
87 #include <sys/_blockcount.h>
88 #include <sys/pidctrl.h>
89 struct sysctl_oid;
90 
91 /*
92  * One vm_domain per NUMA domain.  Contains pagequeues, free page structures,
93  * and accounting.
94  *
95  * Lock Key:
96  * f	vmd_free_mtx
97  * p	vmd_pageout_mtx
98  * d	vm_domainset_lock
99  * a	atomic
100  * c	const after boot
101  * q	page queue lock
102  *
103  * A unique page daemon thread manages each vm_domain structure and is
104  * responsible for ensuring that some free memory is available by freeing
105  * inactive pages and aging active pages.  To decide how many pages to process,
106  * it uses thresholds derived from the number of pages in the domain:
107  *
108  *  vmd_page_count
109  *       ---
110  *        |
111  *        |-> vmd_inactive_target (~3%)
112  *        |   - The active queue scan target is given by
113  *        |     (vmd_inactive_target + vmd_free_target - vmd_free_count).
114  *        |
115  *        |
116  *        |-> vmd_free_target (~2%)
117  *        |   - Target for page reclamation.
118  *        |
119  *        |-> vmd_pageout_wakeup_thresh (~1.8%)
120  *        |   - Threshold for waking up the page daemon.
121  *        |
122  *        |
123  *        |-> vmd_free_min (~0.5%)
124  *        |   - First low memory threshold.
125  *        |   - Causes per-CPU caching to be lazily disabled in UMA.
126  *        |   - vm_wait() sleeps below this threshold.
127  *        |
128  *        |-> vmd_free_severe (~0.25%)
129  *        |   - Second low memory threshold.
130  *        |   - Triggers aggressive UMA reclamation, disables delayed buffer
131  *        |     writes.
132  *        |
133  *        |-> vmd_free_reserved (~0.13%)
134  *        |   - Minimum for VM_ALLOC_NORMAL page allocations.
135  *        |-> vmd_pageout_free_min (32 + 2 pages)
136  *        |   - Minimum for waking a page daemon thread sleeping in vm_wait().
137  *        |-> vmd_interrupt_free_min (2 pages)
138  *        |   - Minimum for VM_ALLOC_SYSTEM page allocations.
139  *       ---
140  *
141  *--
142  * Free page count regulation:
143  *
144  * The page daemon attempts to ensure that the free page count is above the free
145  * target.  It wakes up periodically (every 100ms) to input the current free
146  * page shortage (free_target - free_count) to a PID controller, which in
147  * response outputs the number of pages to attempt to reclaim.  The shortage's
148  * current magnitude, rate of change, and cumulative value are together used to
149  * determine the controller's output.  The page daemon target thus adapts
150  * dynamically to the system's demand for free pages, resulting in less
151  * burstiness than a simple hysteresis loop.
152  *
153  * When the free page count drops below the wakeup threshold,
154  * vm_domain_allocate() proactively wakes up the page daemon.  This helps ensure
155  * that the system responds promptly to a large instantaneous free page
156  * shortage.
157  *
158  * The page daemon also attempts to ensure that some fraction of the system's
159  * memory is present in the inactive (I) and laundry (L) page queues, so that it
160  * can respond promptly to a sudden free page shortage.  In particular, the page
161  * daemon thread aggressively scans active pages so long as the following
162  * condition holds:
163  *
164  *         len(I) + len(L) + free_target - free_count < inactive_target
165  *
166  * Otherwise, when the inactive target is met, the page daemon periodically
167  * scans a small portion of the active queue in order to maintain up-to-date
168  * per-page access history.  Unreferenced pages in the active queue thus
169  * eventually migrate to the inactive queue.
170  *
171  * The per-domain laundry thread periodically launders dirty pages based on the
172  * number of clean pages freed by the page daemon since the last laundering.  If
173  * the page daemon fails to meet its scan target (i.e., the PID controller
174  * output) because of a shortage of clean inactive pages, the laundry thread
175  * attempts to launder enough pages to meet the free page target.
176  *
177  *--
178  * Page allocation priorities:
179  *
180  * The system defines three page allocation priorities: VM_ALLOC_NORMAL,
181  * VM_ALLOC_SYSTEM and VM_ALLOC_INTERRUPT.  An interrupt-priority allocation can
182  * claim any free page.  This priority is used in the pmap layer when attempting
183  * to allocate a page for the kernel page tables; in such cases an allocation
184  * failure will usually result in a kernel panic.  The system priority is used
185  * for most other kernel memory allocations, for instance by UMA's slab
186  * allocator or the buffer cache.  Such allocations will fail if the free count
187  * is below interrupt_free_min.  All other allocations occur at the normal
188  * priority, which is typically used for allocation of user pages, for instance
189  * in the page fault handler or when allocating page table pages or pv_entry
190  * structures for user pmaps.  Such allocations fail if the free count is below
191  * the free_reserved threshold.
192  *
193  *--
194  * Free memory shortages:
195  *
196  * The system uses the free_min and free_severe thresholds to apply
197  * back-pressure and give the page daemon a chance to recover.  When a page
198  * allocation fails due to a shortage and the allocating thread cannot handle
199  * failure, it may call vm_wait() to sleep until free pages are available.
200  * vm_domain_freecnt_inc() wakes sleeping threads once the free page count rises
201  * above the free_min threshold; the page daemon and laundry threads are given
202  * priority and will wake up once free_count reaches the (much smaller)
203  * pageout_free_min threshold.
204  *
205  * On NUMA systems, the domainset iterators always prefer NUMA domains where the
206  * free page count is above the free_min threshold.  This means that given the
207  * choice between two NUMA domains, one above the free_min threshold and one
208  * below, the former will be used to satisfy the allocation request regardless
209  * of the domain selection policy.
210  *
211  * In addition to reclaiming memory from the page queues, the vm_lowmem event
212  * fires every ten seconds so long as the system is under memory pressure (i.e.,
213  * vmd_free_count < vmd_free_target).  This allows kernel subsystems to register
214  * for notifications of free page shortages, upon which they may shrink their
215  * caches.  Following a vm_lowmem event, UMA's caches are pruned to ensure that
216  * they do not contain an excess of unused memory.  When a domain is below the
217  * free_min threshold, UMA limits the population of per-CPU caches.  When a
218  * domain falls below the free_severe threshold, UMA's caches are completely
219  * drained.
220  *
221  * If the system encounters a global memory shortage, it may resort to the
222  * out-of-memory (OOM) killer, which selects a process and delivers SIGKILL in a
223  * last-ditch attempt to free up some pages.  Either of the two following
224  * conditions will activate the OOM killer:
225  *
226  *  1. The page daemons collectively fail to reclaim any pages during their
227  *     inactive queue scans.  After vm_pageout_oom_seq consecutive scans fail,
228  *     the page daemon thread votes for an OOM kill, and an OOM kill is
229  *     triggered when all page daemons have voted.  This heuristic is strict and
230  *     may fail to trigger even when the system is effectively deadlocked.
231  *
232  *  2. Threads in the user fault handler are repeatedly unable to make progress
233  *     while allocating a page to satisfy the fault.  After
234  *     vm_pfault_oom_attempts page allocation failures with intervening
235  *     vm_wait() calls, the faulting thread will trigger an OOM kill.
236  */
237 struct vm_domain {
238 	struct vm_pagequeue vmd_pagequeues[PQ_COUNT];
239 	struct mtx_padalign vmd_free_mtx;
240 	struct mtx_padalign vmd_pageout_mtx;
241 	struct vm_pgcache {
242 		int domain;
243 		int pool;
244 		uma_zone_t zone;
245 	} vmd_pgcache[VM_NFREEPOOL];
246 	struct vmem *vmd_kernel_arena;	/* (c) per-domain kva R/W arena. */
247 	struct vmem *vmd_kernel_rwx_arena; /* (c) per-domain kva R/W/X arena. */
248 	u_int vmd_domain;		/* (c) Domain number. */
249 	u_int vmd_page_count;		/* (c) Total page count. */
250 	long vmd_segs;			/* (c) bitmask of the segments */
251 	u_int __aligned(CACHE_LINE_SIZE) vmd_free_count; /* (a,f) free page count */
252 	u_int vmd_pageout_deficit;	/* (a) Estimated number of pages deficit */
253 	uint8_t vmd_pad[CACHE_LINE_SIZE - (sizeof(u_int) * 2)];
254 
255 	/* Paging control variables, used within single threaded page daemon. */
256 	struct pidctrl vmd_pid;		/* Pageout controller. */
257 	boolean_t vmd_oom;
258 	u_int vmd_inactive_threads;
259 	u_int vmd_inactive_shortage;		/* Per-thread shortage. */
260 	blockcount_t vmd_inactive_running;	/* Number of inactive threads. */
261 	blockcount_t vmd_inactive_starting;	/* Number of threads started. */
262 	volatile u_int vmd_addl_shortage;	/* Shortage accumulator. */
263 	volatile u_int vmd_inactive_freed;	/* Successful inactive frees. */
264 	volatile u_int vmd_inactive_us;		/* Microseconds for above. */
265 	u_int vmd_inactive_pps;		/* Exponential decay frees/second. */
266 	int vmd_oom_seq;
267 	int vmd_last_active_scan;
268 	struct vm_page vmd_markers[PQ_COUNT]; /* (q) markers for queue scans */
269 	struct vm_page vmd_inacthead; /* marker for LRU-defeating insertions */
270 	struct vm_page vmd_clock[2]; /* markers for active queue scan */
271 
272 	int vmd_pageout_wanted;		/* (a, p) pageout daemon wait channel */
273 	int vmd_pageout_pages_needed;	/* (d) page daemon waiting for pages? */
274 	bool vmd_minset;		/* (d) Are we in vm_min_domains? */
275 	bool vmd_severeset;		/* (d) Are we in vm_severe_domains? */
276 	enum {
277 		VM_LAUNDRY_IDLE = 0,
278 		VM_LAUNDRY_BACKGROUND,
279 		VM_LAUNDRY_SHORTFALL
280 	} vmd_laundry_request;
281 
282 	/* Paging thresholds and targets. */
283 	u_int vmd_clean_pages_freed;	/* (q) accumulator for laundry thread */
284 	u_int vmd_background_launder_target; /* (c) */
285 	u_int vmd_free_reserved;	/* (c) pages reserved for deadlock */
286 	u_int vmd_free_target;		/* (c) pages desired free */
287 	u_int vmd_free_min;		/* (c) pages desired free */
288 	u_int vmd_inactive_target;	/* (c) pages desired inactive */
289 	u_int vmd_pageout_free_min;	/* (c) min pages reserved for kernel */
290 	u_int vmd_pageout_wakeup_thresh;/* (c) min pages to wake pagedaemon */
291 	u_int vmd_interrupt_free_min;	/* (c) reserved pages for int code */
292 	u_int vmd_free_severe;		/* (c) severe page depletion point */
293 
294 	/* Name for sysctl etc. */
295 	struct sysctl_oid *vmd_oid;
296 	char vmd_name[sizeof(__XSTRING(MAXMEMDOM))];
297 } __aligned(CACHE_LINE_SIZE);
298 
299 extern struct vm_domain vm_dom[MAXMEMDOM];
300 
301 #define	VM_DOMAIN(n)		(&vm_dom[(n)])
302 #define	VM_DOMAIN_EMPTY(n)	(vm_dom[(n)].vmd_page_count == 0)
303 
304 #define	vm_pagequeue_assert_locked(pq)	mtx_assert(&(pq)->pq_mutex, MA_OWNED)
305 #define	vm_pagequeue_lock(pq)		mtx_lock(&(pq)->pq_mutex)
306 #define	vm_pagequeue_lockptr(pq)	(&(pq)->pq_mutex)
307 #define	vm_pagequeue_trylock(pq)	mtx_trylock(&(pq)->pq_mutex)
308 #define	vm_pagequeue_unlock(pq)		mtx_unlock(&(pq)->pq_mutex)
309 
310 #define	vm_domain_free_assert_locked(n)					\
311 	    mtx_assert(vm_domain_free_lockptr((n)), MA_OWNED)
312 #define	vm_domain_free_assert_unlocked(n)				\
313 	    mtx_assert(vm_domain_free_lockptr((n)), MA_NOTOWNED)
314 #define	vm_domain_free_lock(d)						\
315 	    mtx_lock(vm_domain_free_lockptr((d)))
316 #define	vm_domain_free_lockptr(d)					\
317 	    (&(d)->vmd_free_mtx)
318 #define	vm_domain_free_trylock(d)					\
319 	    mtx_trylock(vm_domain_free_lockptr((d)))
320 #define	vm_domain_free_unlock(d)					\
321 	    mtx_unlock(vm_domain_free_lockptr((d)))
322 
323 #define	vm_domain_pageout_lockptr(d)					\
324 	    (&(d)->vmd_pageout_mtx)
325 #define	vm_domain_pageout_assert_locked(n)				\
326 	    mtx_assert(vm_domain_pageout_lockptr((n)), MA_OWNED)
327 #define	vm_domain_pageout_assert_unlocked(n)				\
328 	    mtx_assert(vm_domain_pageout_lockptr((n)), MA_NOTOWNED)
329 #define	vm_domain_pageout_lock(d)					\
330 	    mtx_lock(vm_domain_pageout_lockptr((d)))
331 #define	vm_domain_pageout_unlock(d)					\
332 	    mtx_unlock(vm_domain_pageout_lockptr((d)))
333 
334 static __inline void
335 vm_pagequeue_cnt_add(struct vm_pagequeue *pq, int addend)
336 {
337 
338 	vm_pagequeue_assert_locked(pq);
339 	pq->pq_cnt += addend;
340 }
341 #define	vm_pagequeue_cnt_inc(pq)	vm_pagequeue_cnt_add((pq), 1)
342 #define	vm_pagequeue_cnt_dec(pq)	vm_pagequeue_cnt_add((pq), -1)
343 
344 static inline void
345 vm_pagequeue_remove(struct vm_pagequeue *pq, vm_page_t m)
346 {
347 
348 	TAILQ_REMOVE(&pq->pq_pl, m, plinks.q);
349 	vm_pagequeue_cnt_dec(pq);
350 }
351 
352 static inline void
353 vm_batchqueue_init(struct vm_batchqueue *bq)
354 {
355 
356 	bq->bq_cnt = 0;
357 }
358 
359 static inline int
360 vm_batchqueue_insert(struct vm_batchqueue *bq, vm_page_t m)
361 {
362 	int slots_free;
363 
364 	slots_free = nitems(bq->bq_pa) - bq->bq_cnt;
365 	if (slots_free > 0) {
366 		bq->bq_pa[bq->bq_cnt++] = m;
367 		return (slots_free);
368 	}
369 	return (slots_free);
370 }
371 
372 static inline vm_page_t
373 vm_batchqueue_pop(struct vm_batchqueue *bq)
374 {
375 
376 	if (bq->bq_cnt == 0)
377 		return (NULL);
378 	return (bq->bq_pa[--bq->bq_cnt]);
379 }
380 
381 void vm_domain_set(struct vm_domain *vmd);
382 void vm_domain_clear(struct vm_domain *vmd);
383 int vm_domain_allocate(struct vm_domain *vmd, int req, int npages);
384 
385 /*
386  *      vm_pagequeue_domain:
387  *
388  *      Return the memory domain the page belongs to.
389  */
390 static inline struct vm_domain *
391 vm_pagequeue_domain(vm_page_t m)
392 {
393 
394 	return (VM_DOMAIN(vm_page_domain(m)));
395 }
396 
397 /*
398  * Return the number of pages we need to free-up or cache
399  * A positive number indicates that we do not have enough free pages.
400  */
401 static inline int
402 vm_paging_target(struct vm_domain *vmd)
403 {
404 
405 	return (vmd->vmd_free_target - vmd->vmd_free_count);
406 }
407 
408 /*
409  * Returns TRUE if the pagedaemon needs to be woken up.
410  */
411 static inline int
412 vm_paging_needed(struct vm_domain *vmd, u_int free_count)
413 {
414 
415 	return (free_count < vmd->vmd_pageout_wakeup_thresh);
416 }
417 
418 /*
419  * Returns TRUE if the domain is below the min paging target.
420  */
421 static inline int
422 vm_paging_min(struct vm_domain *vmd)
423 {
424 
425         return (vmd->vmd_free_min > vmd->vmd_free_count);
426 }
427 
428 /*
429  * Returns TRUE if the domain is below the severe paging target.
430  */
431 static inline int
432 vm_paging_severe(struct vm_domain *vmd)
433 {
434 
435         return (vmd->vmd_free_severe > vmd->vmd_free_count);
436 }
437 
438 /*
439  * Return the number of pages we need to launder.
440  * A positive number indicates that we have a shortfall of clean pages.
441  */
442 static inline int
443 vm_laundry_target(struct vm_domain *vmd)
444 {
445 
446 	return (vm_paging_target(vmd));
447 }
448 
449 void pagedaemon_wakeup(int domain);
450 
451 static inline void
452 vm_domain_freecnt_inc(struct vm_domain *vmd, int adj)
453 {
454 	u_int old, new;
455 
456 	old = atomic_fetchadd_int(&vmd->vmd_free_count, adj);
457 	new = old + adj;
458 	/*
459 	 * Only update bitsets on transitions.  Notice we short-circuit the
460 	 * rest of the checks if we're above min already.
461 	 */
462 	if (old < vmd->vmd_free_min && (new >= vmd->vmd_free_min ||
463 	    (old < vmd->vmd_free_severe && new >= vmd->vmd_free_severe) ||
464 	    (old < vmd->vmd_pageout_free_min &&
465 	    new >= vmd->vmd_pageout_free_min)))
466 		vm_domain_clear(vmd);
467 }
468 
469 #endif	/* _KERNEL */
470 #endif				/* !_VM_PAGEQUEUE_ */
471