xref: /freebsd/sys/vm/vm_pageout.c (revision 095f6305772be1dae27e7af9d87db0387625440d)
1 /*-
2  * SPDX-License-Identifier: (BSD-4-Clause AND MIT-CMU)
3  *
4  * Copyright (c) 1991 Regents of the University of California.
5  * All rights reserved.
6  * Copyright (c) 1994 John S. Dyson
7  * All rights reserved.
8  * Copyright (c) 1994 David Greenman
9  * All rights reserved.
10  * Copyright (c) 2005 Yahoo! Technologies Norway AS
11  * All rights reserved.
12  *
13  * This code is derived from software contributed to Berkeley by
14  * The Mach Operating System project at Carnegie-Mellon University.
15  *
16  * Redistribution and use in source and binary forms, with or without
17  * modification, are permitted provided that the following conditions
18  * are met:
19  * 1. Redistributions of source code must retain the above copyright
20  *    notice, this list of conditions and the following disclaimer.
21  * 2. Redistributions in binary form must reproduce the above copyright
22  *    notice, this list of conditions and the following disclaimer in the
23  *    documentation and/or other materials provided with the distribution.
24  * 3. All advertising materials mentioning features or use of this software
25  *    must display the following acknowledgement:
26  *	This product includes software developed by the University of
27  *	California, Berkeley and its contributors.
28  * 4. Neither the name of the University nor the names of its contributors
29  *    may be used to endorse or promote products derived from this software
30  *    without specific prior written permission.
31  *
32  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
33  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
34  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
35  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
36  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
37  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
38  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
39  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
40  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
41  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
42  * SUCH DAMAGE.
43  *
44  *
45  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
46  * All rights reserved.
47  *
48  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
49  *
50  * Permission to use, copy, modify and distribute this software and
51  * its documentation is hereby granted, provided that both the copyright
52  * notice and this permission notice appear in all copies of the
53  * software, derivative works or modified versions, and any portions
54  * thereof, and that both notices appear in supporting documentation.
55  *
56  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
57  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
58  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
59  *
60  * Carnegie Mellon requests users of this software to return to
61  *
62  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
63  *  School of Computer Science
64  *  Carnegie Mellon University
65  *  Pittsburgh PA 15213-3890
66  *
67  * any improvements or extensions that they make and grant Carnegie the
68  * rights to redistribute these changes.
69  */
70 
71 /*
72  *	The proverbial page-out daemon.
73  */
74 
75 #include <sys/cdefs.h>
76 #include "opt_vm.h"
77 
78 #include <sys/param.h>
79 #include <sys/systm.h>
80 #include <sys/kernel.h>
81 #include <sys/blockcount.h>
82 #include <sys/eventhandler.h>
83 #include <sys/limits.h>
84 #include <sys/lock.h>
85 #include <sys/mutex.h>
86 #include <sys/proc.h>
87 #include <sys/kthread.h>
88 #include <sys/ktr.h>
89 #include <sys/mount.h>
90 #include <sys/racct.h>
91 #include <sys/resourcevar.h>
92 #include <sys/sched.h>
93 #include <sys/sdt.h>
94 #include <sys/signalvar.h>
95 #include <sys/smp.h>
96 #include <sys/time.h>
97 #include <sys/vnode.h>
98 #include <sys/vmmeter.h>
99 #include <sys/rwlock.h>
100 #include <sys/sx.h>
101 #include <sys/sysctl.h>
102 
103 #include <vm/vm.h>
104 #include <vm/vm_param.h>
105 #include <vm/vm_object.h>
106 #include <vm/vm_page.h>
107 #include <vm/vm_map.h>
108 #include <vm/vm_pageout.h>
109 #include <vm/vm_pager.h>
110 #include <vm/vm_phys.h>
111 #include <vm/vm_pagequeue.h>
112 #include <vm/vm_radix.h>
113 #include <vm/swap_pager.h>
114 #include <vm/vm_extern.h>
115 #include <vm/uma.h>
116 
117 /*
118  * System initialization
119  */
120 
121 /* the kernel process "vm_pageout"*/
122 static void vm_pageout(void);
123 static void vm_pageout_init(void);
124 static int vm_pageout_clean(vm_page_t m, int *numpagedout);
125 static int vm_pageout_cluster(vm_page_t m);
126 static void vm_pageout_mightbe_oom(struct vm_domain *vmd, int page_shortage,
127     int starting_page_shortage);
128 
129 SYSINIT(pagedaemon_init, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, vm_pageout_init,
130     NULL);
131 
132 struct proc *pageproc;
133 
134 static struct kproc_desc page_kp = {
135 	"pagedaemon",
136 	vm_pageout,
137 	&pageproc
138 };
139 SYSINIT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_SECOND, kproc_start,
140     &page_kp);
141 
142 SDT_PROVIDER_DEFINE(vm);
143 SDT_PROBE_DEFINE(vm, , , vm__lowmem_scan);
144 
145 /* Pagedaemon activity rates, in subdivisions of one second. */
146 #define	VM_LAUNDER_RATE		10
147 #define	VM_INACT_SCAN_RATE	10
148 
149 static int swapdev_enabled;
150 int vm_pageout_page_count = 32;
151 
152 static int vm_panic_on_oom = 0;
153 SYSCTL_INT(_vm, OID_AUTO, panic_on_oom,
154     CTLFLAG_RWTUN, &vm_panic_on_oom, 0,
155     "Panic on the given number of out-of-memory errors instead of "
156     "killing the largest process");
157 
158 static int vm_pageout_update_period;
159 SYSCTL_INT(_vm, OID_AUTO, pageout_update_period,
160     CTLFLAG_RWTUN, &vm_pageout_update_period, 0,
161     "Maximum active LRU update period");
162 
163 static int pageout_cpus_per_thread = 16;
164 SYSCTL_INT(_vm, OID_AUTO, pageout_cpus_per_thread, CTLFLAG_RDTUN,
165     &pageout_cpus_per_thread, 0,
166     "Number of CPUs per pagedaemon worker thread");
167 
168 static int lowmem_period = 10;
169 SYSCTL_INT(_vm, OID_AUTO, lowmem_period, CTLFLAG_RWTUN, &lowmem_period, 0,
170     "Low memory callback period");
171 
172 static int disable_swap_pageouts;
173 SYSCTL_INT(_vm, OID_AUTO, disable_swapspace_pageouts,
174     CTLFLAG_RWTUN, &disable_swap_pageouts, 0,
175     "Disallow swapout of dirty pages");
176 
177 static int pageout_lock_miss;
178 SYSCTL_INT(_vm, OID_AUTO, pageout_lock_miss,
179     CTLFLAG_RD, &pageout_lock_miss, 0,
180     "vget() lock misses during pageout");
181 
182 static int vm_pageout_oom_seq = 12;
183 SYSCTL_INT(_vm, OID_AUTO, pageout_oom_seq,
184     CTLFLAG_RWTUN, &vm_pageout_oom_seq, 0,
185     "back-to-back calls to oom detector to start OOM");
186 
187 static int
sysctl_laundry_weight(SYSCTL_HANDLER_ARGS)188 sysctl_laundry_weight(SYSCTL_HANDLER_ARGS)
189 {
190 	int error, val;
191 
192 	val = *(int *)arg1;
193 	error = sysctl_handle_int(oidp, &val, 0, req);
194 	if (error != 0 || req->newptr == NULL)
195 		return (error);
196 	if (val < arg2 || val > 100)
197 		return (EINVAL);
198 	*(int *)arg1 = val;
199 	return (0);
200 }
201 
202 static int act_scan_laundry_weight = 3;
203 SYSCTL_PROC(_vm, OID_AUTO, act_scan_laundry_weight,
204     CTLTYPE_INT | CTLFLAG_RWTUN, &act_scan_laundry_weight, 1,
205     sysctl_laundry_weight, "I",
206     "weight given to clean vs. dirty pages in active queue scans");
207 
208 static int inact_scan_laundry_weight = 1;
209 SYSCTL_PROC(_vm, OID_AUTO, inact_scan_laundry_weight,
210     CTLTYPE_INT | CTLFLAG_RWTUN, &inact_scan_laundry_weight, 0,
211     sysctl_laundry_weight, "I",
212     "weight given to clean vs. dirty pages in inactive queue scans");
213 
214 static u_int vm_background_launder_rate = 4096;
215 SYSCTL_UINT(_vm, OID_AUTO, background_launder_rate, CTLFLAG_RWTUN,
216     &vm_background_launder_rate, 0,
217     "background laundering rate, in kilobytes per second");
218 
219 static u_int vm_background_launder_max = 20 * 1024;
220 SYSCTL_UINT(_vm, OID_AUTO, background_launder_max, CTLFLAG_RWTUN,
221     &vm_background_launder_max, 0,
222     "background laundering cap, in kilobytes");
223 
224 u_long vm_page_max_user_wired;
225 SYSCTL_ULONG(_vm, OID_AUTO, max_user_wired, CTLFLAG_RW,
226     &vm_page_max_user_wired, 0,
227     "system-wide limit to user-wired page count");
228 
229 static u_int isqrt(u_int num);
230 static int vm_pageout_launder(struct vm_domain *vmd, int launder,
231     bool in_shortfall);
232 static void vm_pageout_laundry_worker(void *arg);
233 
234 struct scan_state {
235 	struct vm_batchqueue bq;
236 	struct vm_pagequeue *pq;
237 	vm_page_t	marker;
238 	int		maxscan;
239 	int		scanned;
240 };
241 
242 static void
vm_pageout_init_scan(struct scan_state * ss,struct vm_pagequeue * pq,vm_page_t marker,vm_page_t after,int maxscan)243 vm_pageout_init_scan(struct scan_state *ss, struct vm_pagequeue *pq,
244     vm_page_t marker, vm_page_t after, int maxscan)
245 {
246 
247 	vm_pagequeue_assert_locked(pq);
248 	KASSERT((marker->a.flags & PGA_ENQUEUED) == 0,
249 	    ("marker %p already enqueued", marker));
250 
251 	if (after == NULL)
252 		TAILQ_INSERT_HEAD(&pq->pq_pl, marker, plinks.q);
253 	else
254 		TAILQ_INSERT_AFTER(&pq->pq_pl, after, marker, plinks.q);
255 	vm_page_aflag_set(marker, PGA_ENQUEUED);
256 
257 	vm_batchqueue_init(&ss->bq);
258 	ss->pq = pq;
259 	ss->marker = marker;
260 	ss->maxscan = maxscan;
261 	ss->scanned = 0;
262 	vm_pagequeue_unlock(pq);
263 }
264 
265 static void
vm_pageout_end_scan(struct scan_state * ss)266 vm_pageout_end_scan(struct scan_state *ss)
267 {
268 	struct vm_pagequeue *pq;
269 
270 	pq = ss->pq;
271 	vm_pagequeue_assert_locked(pq);
272 	KASSERT((ss->marker->a.flags & PGA_ENQUEUED) != 0,
273 	    ("marker %p not enqueued", ss->marker));
274 
275 	TAILQ_REMOVE(&pq->pq_pl, ss->marker, plinks.q);
276 	vm_page_aflag_clear(ss->marker, PGA_ENQUEUED);
277 	pq->pq_pdpages += ss->scanned;
278 }
279 
280 /*
281  * Add a small number of queued pages to a batch queue for later processing
282  * without the corresponding queue lock held.  The caller must have enqueued a
283  * marker page at the desired start point for the scan.  Pages will be
284  * physically dequeued if the caller so requests.  Otherwise, the returned
285  * batch may contain marker pages, and it is up to the caller to handle them.
286  *
287  * When processing the batch queue, vm_pageout_defer() must be used to
288  * determine whether the page has been logically dequeued since the batch was
289  * collected.
290  */
291 static __always_inline void
vm_pageout_collect_batch(struct scan_state * ss,const bool dequeue)292 vm_pageout_collect_batch(struct scan_state *ss, const bool dequeue)
293 {
294 	struct vm_pagequeue *pq;
295 	vm_page_t m, marker, n;
296 
297 	marker = ss->marker;
298 	pq = ss->pq;
299 
300 	KASSERT((marker->a.flags & PGA_ENQUEUED) != 0,
301 	    ("marker %p not enqueued", ss->marker));
302 
303 	vm_pagequeue_lock(pq);
304 	for (m = TAILQ_NEXT(marker, plinks.q); m != NULL &&
305 	    ss->scanned < ss->maxscan && ss->bq.bq_cnt < VM_BATCHQUEUE_SIZE;
306 	    m = n, ss->scanned++) {
307 		n = TAILQ_NEXT(m, plinks.q);
308 		if ((m->flags & PG_MARKER) == 0) {
309 			KASSERT((m->a.flags & PGA_ENQUEUED) != 0,
310 			    ("page %p not enqueued", m));
311 			KASSERT((m->flags & PG_FICTITIOUS) == 0,
312 			    ("Fictitious page %p cannot be in page queue", m));
313 			KASSERT((m->oflags & VPO_UNMANAGED) == 0,
314 			    ("Unmanaged page %p cannot be in page queue", m));
315 		} else if (dequeue)
316 			continue;
317 
318 		(void)vm_batchqueue_insert(&ss->bq, m);
319 		if (dequeue) {
320 			TAILQ_REMOVE(&pq->pq_pl, m, plinks.q);
321 			vm_page_aflag_clear(m, PGA_ENQUEUED);
322 		}
323 	}
324 	TAILQ_REMOVE(&pq->pq_pl, marker, plinks.q);
325 	if (__predict_true(m != NULL))
326 		TAILQ_INSERT_BEFORE(m, marker, plinks.q);
327 	else
328 		TAILQ_INSERT_TAIL(&pq->pq_pl, marker, plinks.q);
329 	if (dequeue)
330 		vm_pagequeue_cnt_add(pq, -ss->bq.bq_cnt);
331 	vm_pagequeue_unlock(pq);
332 }
333 
334 /*
335  * Return the next page to be scanned, or NULL if the scan is complete.
336  */
337 static __always_inline vm_page_t
vm_pageout_next(struct scan_state * ss,const bool dequeue)338 vm_pageout_next(struct scan_state *ss, const bool dequeue)
339 {
340 
341 	if (ss->bq.bq_cnt == 0)
342 		vm_pageout_collect_batch(ss, dequeue);
343 	return (vm_batchqueue_pop(&ss->bq));
344 }
345 
346 /*
347  * Determine whether processing of a page should be deferred and ensure that any
348  * outstanding queue operations are processed.
349  */
350 static __always_inline bool
vm_pageout_defer(vm_page_t m,const uint8_t queue,const bool enqueued)351 vm_pageout_defer(vm_page_t m, const uint8_t queue, const bool enqueued)
352 {
353 	vm_page_astate_t as;
354 
355 	as = vm_page_astate_load(m);
356 	if (__predict_false(as.queue != queue ||
357 	    ((as.flags & PGA_ENQUEUED) != 0) != enqueued))
358 		return (true);
359 	if ((as.flags & PGA_QUEUE_OP_MASK) != 0) {
360 		vm_page_pqbatch_submit(m, queue);
361 		return (true);
362 	}
363 	return (false);
364 }
365 
366 /*
367  * We can cluster only if the page is not clean, busy, or held, and the page is
368  * in the laundry queue.
369  */
370 static bool
vm_pageout_flushable(vm_page_t m)371 vm_pageout_flushable(vm_page_t m)
372 {
373 	if (vm_page_tryxbusy(m) == 0)
374 		return (false);
375 	if (!vm_page_wired(m)) {
376 		vm_page_test_dirty(m);
377 		if (m->dirty != 0 && vm_page_in_laundry(m) &&
378 		    vm_page_try_remove_write(m))
379 			return (true);
380 	}
381 	vm_page_xunbusy(m);
382 	return (false);
383 }
384 
385 /*
386  * Scan for pages at adjacent offsets within the given page's object that are
387  * eligible for laundering, form a cluster of these pages and the given page,
388  * and launder that cluster.
389  */
390 static int
vm_pageout_cluster(vm_page_t m)391 vm_pageout_cluster(vm_page_t m)
392 {
393 	struct pctrie_iter pages;
394 	vm_page_t mc[2 * vm_pageout_page_count - 1];
395 	int alignment, page_base, pageout_count;
396 
397 	VM_OBJECT_ASSERT_WLOCKED(m->object);
398 
399 	vm_page_assert_xbusied(m);
400 
401 	vm_page_iter_init(&pages, m->object);
402 	alignment = m->pindex % vm_pageout_page_count;
403 	page_base = nitems(mc) / 2;
404 	pageout_count = 1;
405 	mc[page_base] = m;
406 
407 	/*
408 	 * During heavy mmap/modification loads the pageout
409 	 * daemon can really fragment the underlying file
410 	 * due to flushing pages out of order and not trying to
411 	 * align the clusters (which leaves sporadic out-of-order
412 	 * holes).  To solve this problem we do the reverse scan
413 	 * first and attempt to align our cluster, then do a
414 	 * forward scan if room remains.
415 	 *
416 	 * If we are at an alignment boundary, stop here, and switch directions.
417 	 */
418 	if (alignment > 0) {
419 		pages.index = mc[page_base]->pindex;
420 		do {
421 			m = vm_radix_iter_prev(&pages);
422 			if (m == NULL || !vm_pageout_flushable(m))
423 				break;
424 			mc[--page_base] = m;
425 		} while (pageout_count++ < alignment);
426 	}
427 	if (pageout_count < vm_pageout_page_count) {
428 		pages.index = mc[page_base + pageout_count - 1]->pindex;
429 		do {
430 			m = vm_radix_iter_next(&pages);
431 			if (m == NULL || !vm_pageout_flushable(m))
432 				break;
433 			mc[page_base + pageout_count] = m;
434 		} while (++pageout_count < vm_pageout_page_count);
435 	}
436 	if (pageout_count < vm_pageout_page_count &&
437 	    alignment == nitems(mc) / 2 - page_base) {
438 		/* Resume the reverse scan. */
439 		pages.index = mc[page_base]->pindex;
440 		do {
441 			m = vm_radix_iter_prev(&pages);
442 			if (m == NULL || !vm_pageout_flushable(m))
443 				break;
444 			mc[--page_base] = m;
445 		} while (++pageout_count < vm_pageout_page_count);
446 	}
447 
448 	return (vm_pageout_flush(&mc[page_base], pageout_count,
449 	    VM_PAGER_PUT_NOREUSE, NULL));
450 }
451 
452 /*
453  * vm_pageout_flush() - launder the given pages
454  *
455  *	The given pages are laundered.  Note that we setup for the start of
456  *	I/O ( i.e. busy the page ), mark it read-only, and bump the object
457  *	reference count all in here rather then in the parent.  If we want
458  *	the parent to do more sophisticated things we may have to change
459  *	the ordering.
460  *
461  *	If eio is not NULL, returns the count of pages between 0 and first page
462  *	with status VM_PAGER_AGAIN.  *eio is set to true if pager returned
463  *	VM_PAGER_ERROR or VM_PAGER_FAIL for any page in that set.
464  *
465  *	Otherwise, returns the number of paged-out pages.
466  */
467 int
vm_pageout_flush(vm_page_t * mc,int count,int flags,bool * eio)468 vm_pageout_flush(vm_page_t *mc, int count, int flags, bool *eio)
469 {
470 	vm_object_t object = mc[0]->object;
471 	int pageout_status[count];
472 	int numpagedout = 0;
473 	int i, runlen;
474 
475 	VM_OBJECT_ASSERT_WLOCKED(object);
476 
477 	/*
478 	 * Initiate I/O.  Mark the pages shared busy and verify that they're
479 	 * valid and read-only.
480 	 *
481 	 * We do not have to fixup the clean/dirty bits here... we can
482 	 * allow the pager to do it after the I/O completes.
483 	 *
484 	 * NOTE! mc[i]->dirty may be partial or fragmented due to an
485 	 * edge case with file fragments.
486 	 */
487 	for (i = 0; i < count; i++) {
488 		KASSERT(vm_page_all_valid(mc[i]),
489 		    ("vm_pageout_flush: partially invalid page %p index %d/%d",
490 			mc[i], i, count));
491 		KASSERT((mc[i]->a.flags & PGA_WRITEABLE) == 0,
492 		    ("vm_pageout_flush: writeable page %p", mc[i]));
493 		vm_page_busy_downgrade(mc[i]);
494 	}
495 	vm_object_pip_add(object, count);
496 
497 	vm_pager_put_pages(object, mc, count, flags, pageout_status);
498 
499 	runlen = count;
500 	if (eio != NULL)
501 		*eio = false;
502 	for (i = 0; i < count; i++) {
503 		vm_page_t mt = mc[i];
504 
505 		KASSERT(pageout_status[i] == VM_PAGER_PEND ||
506 		    !pmap_page_is_write_mapped(mt),
507 		    ("vm_pageout_flush: page %p is not write protected", mt));
508 		switch (pageout_status[i]) {
509 		case VM_PAGER_OK:
510 			/*
511 			 * The page may have moved since laundering started, in
512 			 * which case it should be left alone.
513 			 */
514 			if (vm_page_in_laundry(mt))
515 				vm_page_deactivate_noreuse(mt);
516 			/* FALLTHROUGH */
517 		case VM_PAGER_PEND:
518 			numpagedout++;
519 			break;
520 		case VM_PAGER_BAD:
521 			/*
522 			 * The page is outside the object's range.  We pretend
523 			 * that the page out worked and clean the page, so the
524 			 * changes will be lost if the page is reclaimed by
525 			 * the page daemon.
526 			 */
527 			vm_page_undirty(mt);
528 			if (vm_page_in_laundry(mt))
529 				vm_page_deactivate_noreuse(mt);
530 			break;
531 		case VM_PAGER_ERROR:
532 		case VM_PAGER_FAIL:
533 			/*
534 			 * If the page couldn't be paged out to swap because the
535 			 * pager wasn't able to find space, place the page in
536 			 * the PQ_UNSWAPPABLE holding queue.  This is an
537 			 * optimization that prevents the page daemon from
538 			 * wasting CPU cycles on pages that cannot be reclaimed
539 			 * because no swap device is configured.
540 			 *
541 			 * Otherwise, reactivate the page so that it doesn't
542 			 * clog the laundry and inactive queues.  (We will try
543 			 * paging it out again later.)
544 			 */
545 			if ((object->flags & OBJ_SWAP) != 0 &&
546 			    pageout_status[i] == VM_PAGER_FAIL) {
547 				vm_page_unswappable(mt);
548 				numpagedout++;
549 			} else
550 				vm_page_activate(mt);
551 			if (eio != NULL)
552 				*eio = true;
553 			break;
554 		case VM_PAGER_AGAIN:
555 			if (runlen == count)
556 				runlen = i;
557 			break;
558 		}
559 
560 		/*
561 		 * If the operation is still going, leave the page busy to
562 		 * block all other accesses. Also, leave the paging in
563 		 * progress indicator set so that we don't attempt an object
564 		 * collapse.
565 		 */
566 		if (pageout_status[i] != VM_PAGER_PEND) {
567 			vm_object_pip_wakeup(object);
568 			vm_page_sunbusy(mt);
569 		}
570 	}
571 	if (eio != NULL)
572 		return (runlen);
573 	return (numpagedout);
574 }
575 
576 static void
vm_pageout_swapon(void * arg __unused,struct swdevt * sp __unused)577 vm_pageout_swapon(void *arg __unused, struct swdevt *sp __unused)
578 {
579 
580 	atomic_store_rel_int(&swapdev_enabled, 1);
581 }
582 
583 static void
vm_pageout_swapoff(void * arg __unused,struct swdevt * sp __unused)584 vm_pageout_swapoff(void *arg __unused, struct swdevt *sp __unused)
585 {
586 
587 	if (swap_pager_nswapdev() == 1)
588 		atomic_store_rel_int(&swapdev_enabled, 0);
589 }
590 
591 /*
592  * Attempt to acquire all of the necessary locks to launder a page and
593  * then call through the clustering layer to PUTPAGES.  Wait a short
594  * time for a vnode lock.
595  *
596  * Requires the page and object lock on entry, releases both before return.
597  * Returns 0 on success and an errno otherwise.
598  */
599 static int
vm_pageout_clean(vm_page_t m,int * numpagedout)600 vm_pageout_clean(vm_page_t m, int *numpagedout)
601 {
602 	struct vnode *vp;
603 	struct mount *mp;
604 	vm_object_t object;
605 	vm_pindex_t pindex;
606 	int error;
607 
608 	object = m->object;
609 	VM_OBJECT_ASSERT_WLOCKED(object);
610 	error = 0;
611 	vp = NULL;
612 	mp = NULL;
613 
614 	/*
615 	 * The object is already known NOT to be dead.   It
616 	 * is possible for the vget() to block the whole
617 	 * pageout daemon, but the new low-memory handling
618 	 * code should prevent it.
619 	 *
620 	 * We can't wait forever for the vnode lock, we might
621 	 * deadlock due to a vn_read() getting stuck in
622 	 * vm_wait while holding this vnode.  We skip the
623 	 * vnode if we can't get it in a reasonable amount
624 	 * of time.
625 	 */
626 	if (object->type == OBJT_VNODE) {
627 		vm_page_xunbusy(m);
628 		vp = object->handle;
629 		if (vp->v_type == VREG &&
630 		    vn_start_write(vp, &mp, V_NOWAIT) != 0) {
631 			mp = NULL;
632 			error = EDEADLK;
633 			goto unlock_all;
634 		}
635 		KASSERT(mp != NULL,
636 		    ("vp %p with NULL v_mount", vp));
637 		vm_object_reference_locked(object);
638 		pindex = m->pindex;
639 		VM_OBJECT_WUNLOCK(object);
640 		if (vget(vp, vn_lktype_write(NULL, vp) | LK_TIMELOCK) != 0) {
641 			vp = NULL;
642 			error = EDEADLK;
643 			goto unlock_mp;
644 		}
645 		VM_OBJECT_WLOCK(object);
646 
647 		/*
648 		 * Ensure that the object and vnode were not disassociated
649 		 * while locks were dropped.
650 		 */
651 		if (vp->v_object != object) {
652 			error = ENOENT;
653 			goto unlock_all;
654 		}
655 
656 		/*
657 		 * While the object was unlocked, the page may have been:
658 		 * (1) moved to a different queue,
659 		 * (2) reallocated to a different object,
660 		 * (3) reallocated to a different offset, or
661 		 * (4) cleaned.
662 		 */
663 		if (!vm_page_in_laundry(m) || m->object != object ||
664 		    m->pindex != pindex || m->dirty == 0) {
665 			error = ENXIO;
666 			goto unlock_all;
667 		}
668 
669 		/*
670 		 * The page may have been busied while the object lock was
671 		 * released.
672 		 */
673 		if (vm_page_tryxbusy(m) == 0) {
674 			error = EBUSY;
675 			goto unlock_all;
676 		}
677 	}
678 
679 	/*
680 	 * Remove all writeable mappings, failing if the page is wired.
681 	 */
682 	if (!vm_page_try_remove_write(m)) {
683 		vm_page_xunbusy(m);
684 		error = EBUSY;
685 		goto unlock_all;
686 	}
687 
688 	/*
689 	 * If a page is dirty, then it is either being washed
690 	 * (but not yet cleaned) or it is still in the
691 	 * laundry.  If it is still in the laundry, then we
692 	 * start the cleaning operation.
693 	 */
694 	if ((*numpagedout = vm_pageout_cluster(m)) == 0)
695 		error = EIO;
696 
697 unlock_all:
698 	VM_OBJECT_WUNLOCK(object);
699 
700 unlock_mp:
701 	if (mp != NULL) {
702 		if (vp != NULL)
703 			vput(vp);
704 		vm_object_deallocate(object);
705 		vn_finished_write(mp);
706 	}
707 
708 	return (error);
709 }
710 
711 /*
712  * Attempt to launder the specified number of pages.
713  *
714  * Returns the number of pages successfully laundered.
715  */
716 static int
vm_pageout_launder(struct vm_domain * vmd,int launder,bool in_shortfall)717 vm_pageout_launder(struct vm_domain *vmd, int launder, bool in_shortfall)
718 {
719 	struct scan_state ss;
720 	struct vm_pagequeue *pq;
721 	vm_object_t object;
722 	vm_page_t m, marker;
723 	vm_page_astate_t new, old;
724 	int act_delta, error, numpagedout, queue, refs, starting_target;
725 	int vnodes_skipped;
726 	bool pageout_ok;
727 
728 	object = NULL;
729 	starting_target = launder;
730 	vnodes_skipped = 0;
731 
732 	/*
733 	 * Scan the laundry queues for pages eligible to be laundered.  We stop
734 	 * once the target number of dirty pages have been laundered, or once
735 	 * we've reached the end of the queue.  A single iteration of this loop
736 	 * may cause more than one page to be laundered because of clustering.
737 	 *
738 	 * As an optimization, we avoid laundering from PQ_UNSWAPPABLE when no
739 	 * swap devices are configured.
740 	 */
741 	if (atomic_load_acq_int(&swapdev_enabled))
742 		queue = PQ_UNSWAPPABLE;
743 	else
744 		queue = PQ_LAUNDRY;
745 
746 scan:
747 	marker = &vmd->vmd_markers[queue];
748 	pq = &vmd->vmd_pagequeues[queue];
749 	vm_pagequeue_lock(pq);
750 	vm_pageout_init_scan(&ss, pq, marker, NULL, pq->pq_cnt);
751 	while (launder > 0 && (m = vm_pageout_next(&ss, false)) != NULL) {
752 		if (__predict_false((m->flags & PG_MARKER) != 0))
753 			continue;
754 
755 		/*
756 		 * Don't touch a page that was removed from the queue after the
757 		 * page queue lock was released.  Otherwise, ensure that any
758 		 * pending queue operations, such as dequeues for wired pages,
759 		 * are handled.
760 		 */
761 		if (vm_pageout_defer(m, queue, true))
762 			continue;
763 
764 		/*
765 		 * Lock the page's object.
766 		 */
767 		if (object == NULL || object != m->object) {
768 			if (object != NULL)
769 				VM_OBJECT_WUNLOCK(object);
770 			object = atomic_load_ptr(&m->object);
771 			if (__predict_false(object == NULL))
772 				/* The page is being freed by another thread. */
773 				continue;
774 
775 			/* Depends on type-stability. */
776 			VM_OBJECT_WLOCK(object);
777 			if (__predict_false(m->object != object)) {
778 				VM_OBJECT_WUNLOCK(object);
779 				object = NULL;
780 				continue;
781 			}
782 		}
783 
784 		if (vm_page_tryxbusy(m) == 0)
785 			continue;
786 
787 		/*
788 		 * Check for wirings now that we hold the object lock and have
789 		 * exclusively busied the page.  If the page is mapped, it may
790 		 * still be wired by pmap lookups.  The call to
791 		 * vm_page_try_remove_all() below atomically checks for such
792 		 * wirings and removes mappings.  If the page is unmapped, the
793 		 * wire count is guaranteed not to increase after this check.
794 		 */
795 		if (__predict_false(vm_page_wired(m)))
796 			goto skip_page;
797 
798 		/*
799 		 * Invalid pages can be easily freed.  They cannot be
800 		 * mapped; vm_page_free() asserts this.
801 		 */
802 		if (vm_page_none_valid(m))
803 			goto free_page;
804 
805 		refs = object->ref_count != 0 ? pmap_ts_referenced(m) : 0;
806 
807 		for (old = vm_page_astate_load(m);;) {
808 			/*
809 			 * Check to see if the page has been removed from the
810 			 * queue since the first such check.  Leave it alone if
811 			 * so, discarding any references collected by
812 			 * pmap_ts_referenced().
813 			 */
814 			if (__predict_false(_vm_page_queue(old) == PQ_NONE))
815 				goto skip_page;
816 
817 			new = old;
818 			act_delta = refs;
819 			if ((old.flags & PGA_REFERENCED) != 0) {
820 				new.flags &= ~PGA_REFERENCED;
821 				act_delta++;
822 			}
823 			if (act_delta == 0) {
824 				;
825 			} else if (object->ref_count != 0) {
826 				/*
827 				 * Increase the activation count if the page was
828 				 * referenced while in the laundry queue.  This
829 				 * makes it less likely that the page will be
830 				 * returned prematurely to the laundry queue.
831 				 */
832 				new.act_count += ACT_ADVANCE +
833 				    act_delta;
834 				if (new.act_count > ACT_MAX)
835 					new.act_count = ACT_MAX;
836 
837 				new.flags &= ~PGA_QUEUE_OP_MASK;
838 				new.flags |= PGA_REQUEUE;
839 				new.queue = PQ_ACTIVE;
840 				if (!vm_page_pqstate_commit(m, &old, new))
841 					continue;
842 
843 				/*
844 				 * If this was a background laundering, count
845 				 * activated pages towards our target.  The
846 				 * purpose of background laundering is to ensure
847 				 * that pages are eventually cycled through the
848 				 * laundry queue, and an activation is a valid
849 				 * way out.
850 				 */
851 				if (!in_shortfall)
852 					launder--;
853 				VM_CNT_INC(v_reactivated);
854 				goto skip_page;
855 			} else if ((object->flags & OBJ_DEAD) == 0) {
856 				new.flags |= PGA_REQUEUE;
857 				if (!vm_page_pqstate_commit(m, &old, new))
858 					continue;
859 				goto skip_page;
860 			}
861 			break;
862 		}
863 
864 		/*
865 		 * If the page appears to be clean at the machine-independent
866 		 * layer, then remove all of its mappings from the pmap in
867 		 * anticipation of freeing it.  If, however, any of the page's
868 		 * mappings allow write access, then the page may still be
869 		 * modified until the last of those mappings are removed.
870 		 */
871 		if (object->ref_count != 0) {
872 			vm_page_test_dirty(m);
873 			if (m->dirty == 0 && !vm_page_try_remove_all(m))
874 				goto skip_page;
875 		}
876 
877 		/*
878 		 * Clean pages are freed, and dirty pages are paged out unless
879 		 * they belong to a dead object.  Requeueing dirty pages from
880 		 * dead objects is pointless, as they are being paged out and
881 		 * freed by the thread that destroyed the object.
882 		 */
883 		if (m->dirty == 0) {
884 free_page:
885 			/*
886 			 * Now we are guaranteed that no other threads are
887 			 * manipulating the page, check for a last-second
888 			 * reference.
889 			 */
890 			if (vm_pageout_defer(m, queue, true))
891 				goto skip_page;
892 			vm_page_free(m);
893 			VM_CNT_INC(v_dfree);
894 		} else if ((object->flags & OBJ_DEAD) == 0) {
895 			if ((object->flags & OBJ_SWAP) != 0)
896 				pageout_ok = disable_swap_pageouts == 0;
897 			else
898 				pageout_ok = true;
899 			if (!pageout_ok) {
900 				vm_page_launder(m);
901 				goto skip_page;
902 			}
903 
904 			/*
905 			 * Form a cluster with adjacent, dirty pages from the
906 			 * same object, and page out that entire cluster.
907 			 *
908 			 * The adjacent, dirty pages must also be in the
909 			 * laundry.  However, their mappings are not checked
910 			 * for new references.  Consequently, a recently
911 			 * referenced page may be paged out.  However, that
912 			 * page will not be prematurely reclaimed.  After page
913 			 * out, the page will be placed in the inactive queue,
914 			 * where any new references will be detected and the
915 			 * page reactivated.
916 			 */
917 			error = vm_pageout_clean(m, &numpagedout);
918 			if (error == 0) {
919 				launder -= numpagedout;
920 				ss.scanned += numpagedout;
921 			} else if (error == EDEADLK) {
922 				pageout_lock_miss++;
923 				vnodes_skipped++;
924 			}
925 			object = NULL;
926 		} else {
927 skip_page:
928 			vm_page_xunbusy(m);
929 		}
930 	}
931 	if (object != NULL) {
932 		VM_OBJECT_WUNLOCK(object);
933 		object = NULL;
934 	}
935 	vm_pagequeue_lock(pq);
936 	vm_pageout_end_scan(&ss);
937 	vm_pagequeue_unlock(pq);
938 
939 	if (launder > 0 && queue == PQ_UNSWAPPABLE) {
940 		queue = PQ_LAUNDRY;
941 		goto scan;
942 	}
943 
944 	/*
945 	 * Wakeup the sync daemon if we skipped a vnode in a writeable object
946 	 * and we didn't launder enough pages.
947 	 */
948 	if (vnodes_skipped > 0 && launder > 0)
949 		(void)speedup_syncer();
950 
951 	return (starting_target - launder);
952 }
953 
954 /*
955  * Compute the integer square root.
956  */
957 static u_int
isqrt(u_int num)958 isqrt(u_int num)
959 {
960 	u_int bit, root, tmp;
961 
962 	bit = num != 0 ? (1u << ((fls(num) - 1) & ~1)) : 0;
963 	root = 0;
964 	while (bit != 0) {
965 		tmp = root + bit;
966 		root >>= 1;
967 		if (num >= tmp) {
968 			num -= tmp;
969 			root += bit;
970 		}
971 		bit >>= 2;
972 	}
973 	return (root);
974 }
975 
976 /*
977  * Perform the work of the laundry thread: periodically wake up and determine
978  * whether any pages need to be laundered.  If so, determine the number of pages
979  * that need to be laundered, and launder them.
980  */
981 static void
vm_pageout_laundry_worker(void * arg)982 vm_pageout_laundry_worker(void *arg)
983 {
984 	struct vm_domain *vmd;
985 	struct vm_pagequeue *pq;
986 	uint64_t nclean, ndirty, nfreed;
987 	int domain, last_target, launder, shortfall, shortfall_cycle, target;
988 	bool in_shortfall;
989 
990 	domain = (uintptr_t)arg;
991 	vmd = VM_DOMAIN(domain);
992 	pq = &vmd->vmd_pagequeues[PQ_LAUNDRY];
993 	KASSERT(vmd->vmd_segs != 0, ("domain without segments"));
994 
995 	shortfall = 0;
996 	in_shortfall = false;
997 	shortfall_cycle = 0;
998 	last_target = target = 0;
999 	nfreed = 0;
1000 
1001 	/*
1002 	 * Calls to these handlers are serialized by the swap syscall lock.
1003 	 */
1004 	(void)EVENTHANDLER_REGISTER(swapon, vm_pageout_swapon, vmd,
1005 	    EVENTHANDLER_PRI_ANY);
1006 	(void)EVENTHANDLER_REGISTER(swapoff, vm_pageout_swapoff, vmd,
1007 	    EVENTHANDLER_PRI_ANY);
1008 
1009 	/*
1010 	 * The pageout laundry worker is never done, so loop forever.
1011 	 */
1012 	for (;;) {
1013 		KASSERT(target >= 0, ("negative target %d", target));
1014 		KASSERT(shortfall_cycle >= 0,
1015 		    ("negative cycle %d", shortfall_cycle));
1016 		launder = 0;
1017 
1018 		/*
1019 		 * First determine whether we need to launder pages to meet a
1020 		 * shortage of free pages.
1021 		 */
1022 		if (shortfall > 0) {
1023 			in_shortfall = true;
1024 			shortfall_cycle = VM_LAUNDER_RATE / VM_INACT_SCAN_RATE;
1025 			target = shortfall;
1026 		} else if (!in_shortfall)
1027 			goto trybackground;
1028 		else if (shortfall_cycle == 0 || vm_laundry_target(vmd) <= 0) {
1029 			/*
1030 			 * We recently entered shortfall and began laundering
1031 			 * pages.  If we have completed that laundering run
1032 			 * (and we are no longer in shortfall) or we have met
1033 			 * our laundry target through other activity, then we
1034 			 * can stop laundering pages.
1035 			 */
1036 			in_shortfall = false;
1037 			target = 0;
1038 			goto trybackground;
1039 		}
1040 		launder = target / shortfall_cycle--;
1041 		goto dolaundry;
1042 
1043 		/*
1044 		 * There's no immediate need to launder any pages; see if we
1045 		 * meet the conditions to perform background laundering:
1046 		 *
1047 		 * 1. The ratio of dirty to clean inactive pages exceeds the
1048 		 *    background laundering threshold, or
1049 		 * 2. we haven't yet reached the target of the current
1050 		 *    background laundering run.
1051 		 *
1052 		 * The background laundering threshold is not a constant.
1053 		 * Instead, it is a slowly growing function of the number of
1054 		 * clean pages freed by the page daemon since the last
1055 		 * background laundering.  Thus, as the ratio of dirty to
1056 		 * clean inactive pages grows, the amount of memory pressure
1057 		 * required to trigger laundering decreases.  We ensure
1058 		 * that the threshold is non-zero after an inactive queue
1059 		 * scan, even if that scan failed to free a single clean page.
1060 		 */
1061 trybackground:
1062 		nclean = vmd->vmd_free_count +
1063 		    vmd->vmd_pagequeues[PQ_INACTIVE].pq_cnt;
1064 		ndirty = vmd->vmd_pagequeues[PQ_LAUNDRY].pq_cnt;
1065 		if (target == 0 && ndirty * isqrt(howmany(nfreed + 1,
1066 		    vmd->vmd_free_target - vmd->vmd_free_min)) >= nclean) {
1067 			target = vmd->vmd_background_launder_target;
1068 		}
1069 
1070 		/*
1071 		 * We have a non-zero background laundering target.  If we've
1072 		 * laundered up to our maximum without observing a page daemon
1073 		 * request, just stop.  This is a safety belt that ensures we
1074 		 * don't launder an excessive amount if memory pressure is low
1075 		 * and the ratio of dirty to clean pages is large.  Otherwise,
1076 		 * proceed at the background laundering rate.
1077 		 */
1078 		if (target > 0) {
1079 			if (nfreed > 0) {
1080 				nfreed = 0;
1081 				last_target = target;
1082 			} else if (last_target - target >=
1083 			    vm_background_launder_max * PAGE_SIZE / 1024) {
1084 				target = 0;
1085 			}
1086 			launder = vm_background_launder_rate * PAGE_SIZE / 1024;
1087 			launder /= VM_LAUNDER_RATE;
1088 			if (launder > target)
1089 				launder = target;
1090 		}
1091 
1092 dolaundry:
1093 		if (launder > 0) {
1094 			/*
1095 			 * Because of I/O clustering, the number of laundered
1096 			 * pages could exceed "target" by the maximum size of
1097 			 * a cluster minus one.
1098 			 */
1099 			target -= min(vm_pageout_launder(vmd, launder,
1100 			    in_shortfall), target);
1101 			pause("laundp", hz / VM_LAUNDER_RATE);
1102 		}
1103 
1104 		/*
1105 		 * If we're not currently laundering pages and the page daemon
1106 		 * hasn't posted a new request, sleep until the page daemon
1107 		 * kicks us.
1108 		 */
1109 		vm_pagequeue_lock(pq);
1110 		if (target == 0 && vmd->vmd_laundry_request == VM_LAUNDRY_IDLE)
1111 			(void)mtx_sleep(&vmd->vmd_laundry_request,
1112 			    vm_pagequeue_lockptr(pq), PVM, "launds", 0);
1113 
1114 		/*
1115 		 * If the pagedaemon has indicated that it's in shortfall, start
1116 		 * a shortfall laundering unless we're already in the middle of
1117 		 * one.  This may preempt a background laundering.
1118 		 */
1119 		if (vmd->vmd_laundry_request == VM_LAUNDRY_SHORTFALL &&
1120 		    (!in_shortfall || shortfall_cycle == 0)) {
1121 			shortfall = vm_laundry_target(vmd) +
1122 			    vmd->vmd_pageout_deficit;
1123 			target = 0;
1124 		} else
1125 			shortfall = 0;
1126 
1127 		if (target == 0)
1128 			vmd->vmd_laundry_request = VM_LAUNDRY_IDLE;
1129 		nfreed += vmd->vmd_clean_pages_freed;
1130 		vmd->vmd_clean_pages_freed = 0;
1131 		vm_pagequeue_unlock(pq);
1132 	}
1133 }
1134 
1135 /*
1136  * Compute the number of pages we want to try to move from the
1137  * active queue to either the inactive or laundry queue.
1138  *
1139  * When scanning active pages during a shortage, we make clean pages
1140  * count more heavily towards the page shortage than dirty pages.
1141  * This is because dirty pages must be laundered before they can be
1142  * reused and thus have less utility when attempting to quickly
1143  * alleviate a free page shortage.  However, this weighting also
1144  * causes the scan to deactivate dirty pages more aggressively,
1145  * improving the effectiveness of clustering.
1146  */
1147 static int
vm_pageout_active_target(struct vm_domain * vmd)1148 vm_pageout_active_target(struct vm_domain *vmd)
1149 {
1150 	int shortage;
1151 
1152 	shortage = vmd->vmd_inactive_target + vm_paging_target(vmd) -
1153 	    (vmd->vmd_pagequeues[PQ_INACTIVE].pq_cnt +
1154 	    vmd->vmd_pagequeues[PQ_LAUNDRY].pq_cnt / act_scan_laundry_weight);
1155 	shortage *= act_scan_laundry_weight;
1156 	return (shortage);
1157 }
1158 
1159 /*
1160  * Scan the active queue.  If there is no shortage of inactive pages, scan a
1161  * small portion of the queue in order to maintain quasi-LRU.
1162  */
1163 static void
vm_pageout_scan_active(struct vm_domain * vmd,int page_shortage)1164 vm_pageout_scan_active(struct vm_domain *vmd, int page_shortage)
1165 {
1166 	struct scan_state ss;
1167 	vm_object_t object;
1168 	vm_page_t m, marker;
1169 	struct vm_pagequeue *pq;
1170 	vm_page_astate_t old, new;
1171 	long min_scan;
1172 	int act_delta, max_scan, ps_delta, refs, scan_tick;
1173 	uint8_t nqueue;
1174 
1175 	marker = &vmd->vmd_markers[PQ_ACTIVE];
1176 	pq = &vmd->vmd_pagequeues[PQ_ACTIVE];
1177 	vm_pagequeue_lock(pq);
1178 
1179 	/*
1180 	 * If we're just idle polling attempt to visit every
1181 	 * active page within 'update_period' seconds.
1182 	 */
1183 	scan_tick = ticks;
1184 	if (vm_pageout_update_period != 0) {
1185 		min_scan = pq->pq_cnt;
1186 		min_scan *= scan_tick - vmd->vmd_last_active_scan;
1187 		min_scan /= hz * vm_pageout_update_period;
1188 	} else
1189 		min_scan = 0;
1190 	if (min_scan > 0 || (page_shortage > 0 && pq->pq_cnt > 0))
1191 		vmd->vmd_last_active_scan = scan_tick;
1192 
1193 	/*
1194 	 * Scan the active queue for pages that can be deactivated.  Update
1195 	 * the per-page activity counter and use it to identify deactivation
1196 	 * candidates.  Held pages may be deactivated.
1197 	 *
1198 	 * To avoid requeuing each page that remains in the active queue, we
1199 	 * implement the CLOCK algorithm.  To keep the implementation of the
1200 	 * enqueue operation consistent for all page queues, we use two hands,
1201 	 * represented by marker pages. Scans begin at the first hand, which
1202 	 * precedes the second hand in the queue.  When the two hands meet,
1203 	 * they are moved back to the head and tail of the queue, respectively,
1204 	 * and scanning resumes.
1205 	 */
1206 	max_scan = page_shortage > 0 ? pq->pq_cnt : min_scan;
1207 act_scan:
1208 	vm_pageout_init_scan(&ss, pq, marker, &vmd->vmd_clock[0], max_scan);
1209 	while ((m = vm_pageout_next(&ss, false)) != NULL) {
1210 		if (__predict_false(m == &vmd->vmd_clock[1])) {
1211 			vm_pagequeue_lock(pq);
1212 			TAILQ_REMOVE(&pq->pq_pl, &vmd->vmd_clock[0], plinks.q);
1213 			TAILQ_REMOVE(&pq->pq_pl, &vmd->vmd_clock[1], plinks.q);
1214 			TAILQ_INSERT_HEAD(&pq->pq_pl, &vmd->vmd_clock[0],
1215 			    plinks.q);
1216 			TAILQ_INSERT_TAIL(&pq->pq_pl, &vmd->vmd_clock[1],
1217 			    plinks.q);
1218 			max_scan -= ss.scanned;
1219 			vm_pageout_end_scan(&ss);
1220 			goto act_scan;
1221 		}
1222 		if (__predict_false((m->flags & PG_MARKER) != 0))
1223 			continue;
1224 
1225 		/*
1226 		 * Don't touch a page that was removed from the queue after the
1227 		 * page queue lock was released.  Otherwise, ensure that any
1228 		 * pending queue operations, such as dequeues for wired pages,
1229 		 * are handled.
1230 		 */
1231 		if (vm_pageout_defer(m, PQ_ACTIVE, true))
1232 			continue;
1233 
1234 		/*
1235 		 * A page's object pointer may be set to NULL before
1236 		 * the object lock is acquired.
1237 		 */
1238 		object = atomic_load_ptr(&m->object);
1239 		if (__predict_false(object == NULL))
1240 			/*
1241 			 * The page has been removed from its object.
1242 			 */
1243 			continue;
1244 
1245 		/* Deferred free of swap space. */
1246 		if ((m->a.flags & PGA_SWAP_FREE) != 0 &&
1247 		    VM_OBJECT_TRYWLOCK(object)) {
1248 			if (m->object == object)
1249 				vm_pager_page_unswapped(m);
1250 			VM_OBJECT_WUNLOCK(object);
1251 		}
1252 
1253 		/*
1254 		 * Check to see "how much" the page has been used.
1255 		 *
1256 		 * Test PGA_REFERENCED after calling pmap_ts_referenced() so
1257 		 * that a reference from a concurrently destroyed mapping is
1258 		 * observed here and now.
1259 		 *
1260 		 * Perform an unsynchronized object ref count check.  While
1261 		 * the page lock ensures that the page is not reallocated to
1262 		 * another object, in particular, one with unmanaged mappings
1263 		 * that cannot support pmap_ts_referenced(), two races are,
1264 		 * nonetheless, possible:
1265 		 * 1) The count was transitioning to zero, but we saw a non-
1266 		 *    zero value.  pmap_ts_referenced() will return zero
1267 		 *    because the page is not mapped.
1268 		 * 2) The count was transitioning to one, but we saw zero.
1269 		 *    This race delays the detection of a new reference.  At
1270 		 *    worst, we will deactivate and reactivate the page.
1271 		 */
1272 		refs = object->ref_count != 0 ? pmap_ts_referenced(m) : 0;
1273 
1274 		old = vm_page_astate_load(m);
1275 		do {
1276 			/*
1277 			 * Check to see if the page has been removed from the
1278 			 * queue since the first such check.  Leave it alone if
1279 			 * so, discarding any references collected by
1280 			 * pmap_ts_referenced().
1281 			 */
1282 			if (__predict_false(_vm_page_queue(old) == PQ_NONE)) {
1283 				ps_delta = 0;
1284 				break;
1285 			}
1286 
1287 			/*
1288 			 * Advance or decay the act_count based on recent usage.
1289 			 */
1290 			new = old;
1291 			act_delta = refs;
1292 			if ((old.flags & PGA_REFERENCED) != 0) {
1293 				new.flags &= ~PGA_REFERENCED;
1294 				act_delta++;
1295 			}
1296 			if (act_delta != 0) {
1297 				new.act_count += ACT_ADVANCE + act_delta;
1298 				if (new.act_count > ACT_MAX)
1299 					new.act_count = ACT_MAX;
1300 			} else {
1301 				new.act_count -= min(new.act_count,
1302 				    ACT_DECLINE);
1303 			}
1304 
1305 			if (new.act_count > 0) {
1306 				/*
1307 				 * Adjust the activation count and keep the page
1308 				 * in the active queue.  The count might be left
1309 				 * unchanged if it is saturated.  The page may
1310 				 * have been moved to a different queue since we
1311 				 * started the scan, in which case we move it
1312 				 * back.
1313 				 */
1314 				ps_delta = 0;
1315 				if (old.queue != PQ_ACTIVE) {
1316 					new.flags &= ~PGA_QUEUE_OP_MASK;
1317 					new.flags |= PGA_REQUEUE;
1318 					new.queue = PQ_ACTIVE;
1319 				}
1320 			} else {
1321 				/*
1322 				 * When not short for inactive pages, let dirty
1323 				 * pages go through the inactive queue before
1324 				 * moving to the laundry queue.  This gives them
1325 				 * some extra time to be reactivated,
1326 				 * potentially avoiding an expensive pageout.
1327 				 * However, during a page shortage, the inactive
1328 				 * queue is necessarily small, and so dirty
1329 				 * pages would only spend a trivial amount of
1330 				 * time in the inactive queue.  Therefore, we
1331 				 * might as well place them directly in the
1332 				 * laundry queue to reduce queuing overhead.
1333 				 *
1334 				 * Calling vm_page_test_dirty() here would
1335 				 * require acquisition of the object's write
1336 				 * lock.  However, during a page shortage,
1337 				 * directing dirty pages into the laundry queue
1338 				 * is only an optimization and not a
1339 				 * requirement.  Therefore, we simply rely on
1340 				 * the opportunistic updates to the page's dirty
1341 				 * field by the pmap.
1342 				 */
1343 				if (page_shortage <= 0) {
1344 					nqueue = PQ_INACTIVE;
1345 					ps_delta = 0;
1346 				} else if (m->dirty == 0) {
1347 					nqueue = PQ_INACTIVE;
1348 					ps_delta = act_scan_laundry_weight;
1349 				} else {
1350 					nqueue = PQ_LAUNDRY;
1351 					ps_delta = 1;
1352 				}
1353 
1354 				new.flags &= ~PGA_QUEUE_OP_MASK;
1355 				new.flags |= PGA_REQUEUE;
1356 				new.queue = nqueue;
1357 			}
1358 		} while (!vm_page_pqstate_commit(m, &old, new));
1359 
1360 		page_shortage -= ps_delta;
1361 	}
1362 	vm_pagequeue_lock(pq);
1363 	TAILQ_REMOVE(&pq->pq_pl, &vmd->vmd_clock[0], plinks.q);
1364 	TAILQ_INSERT_AFTER(&pq->pq_pl, marker, &vmd->vmd_clock[0], plinks.q);
1365 	vm_pageout_end_scan(&ss);
1366 	vm_pagequeue_unlock(pq);
1367 }
1368 
1369 static int
vm_pageout_reinsert_inactive_page(struct vm_pagequeue * pq,vm_page_t marker,vm_page_t m)1370 vm_pageout_reinsert_inactive_page(struct vm_pagequeue *pq, vm_page_t marker,
1371     vm_page_t m)
1372 {
1373 	vm_page_astate_t as;
1374 
1375 	vm_pagequeue_assert_locked(pq);
1376 
1377 	as = vm_page_astate_load(m);
1378 	if (as.queue != PQ_INACTIVE || (as.flags & PGA_ENQUEUED) != 0)
1379 		return (0);
1380 	vm_page_aflag_set(m, PGA_ENQUEUED);
1381 	TAILQ_INSERT_BEFORE(marker, m, plinks.q);
1382 	return (1);
1383 }
1384 
1385 /*
1386  * Re-add stuck pages to the inactive queue.  We will examine them again
1387  * during the next scan.  If the queue state of a page has changed since
1388  * it was physically removed from the page queue in
1389  * vm_pageout_collect_batch(), don't do anything with that page.
1390  */
1391 static void
vm_pageout_reinsert_inactive(struct scan_state * ss,struct vm_batchqueue * bq,vm_page_t m)1392 vm_pageout_reinsert_inactive(struct scan_state *ss, struct vm_batchqueue *bq,
1393     vm_page_t m)
1394 {
1395 	struct vm_pagequeue *pq;
1396 	vm_page_t marker;
1397 	int delta;
1398 
1399 	delta = 0;
1400 	marker = ss->marker;
1401 	pq = ss->pq;
1402 
1403 	if (m != NULL) {
1404 		if (vm_batchqueue_insert(bq, m) != 0)
1405 			return;
1406 		vm_pagequeue_lock(pq);
1407 		delta += vm_pageout_reinsert_inactive_page(pq, marker, m);
1408 	} else
1409 		vm_pagequeue_lock(pq);
1410 	while ((m = vm_batchqueue_pop(bq)) != NULL)
1411 		delta += vm_pageout_reinsert_inactive_page(pq, marker, m);
1412 	vm_pagequeue_cnt_add(pq, delta);
1413 	vm_pagequeue_unlock(pq);
1414 	vm_batchqueue_init(bq);
1415 }
1416 
1417 static void
vm_pageout_scan_inactive(struct vm_domain * vmd,int page_shortage)1418 vm_pageout_scan_inactive(struct vm_domain *vmd, int page_shortage)
1419 {
1420 	struct timeval start, end;
1421 	struct scan_state ss;
1422 	struct vm_batchqueue rq;
1423 	struct vm_page marker_page;
1424 	vm_page_t m, marker;
1425 	struct vm_pagequeue *pq;
1426 	vm_object_t object;
1427 	vm_page_astate_t old, new;
1428 	int act_delta, addl_page_shortage, dirty_count, dirty_thresh;
1429 	int starting_page_shortage, refs;
1430 
1431 	object = NULL;
1432 	vm_batchqueue_init(&rq);
1433 	getmicrouptime(&start);
1434 
1435 	/*
1436 	 * The addl_page_shortage is an estimate of the number of temporarily
1437 	 * stuck pages in the inactive queue.  In other words, the
1438 	 * number of pages from the inactive count that should be
1439 	 * discounted in setting the target for the active queue scan.
1440 	 */
1441 	addl_page_shortage = 0;
1442 
1443 	/*
1444 	 * dirty_count is the number of pages encountered that require
1445 	 * laundering before reclamation is possible.  If we encounter a large
1446 	 * number of dirty pages, we may abort the scan without meeting the page
1447 	 * shortage in the hope that laundering will allow a future scan to meet
1448 	 * the target.
1449 	 */
1450 	dirty_count = 0;
1451 	dirty_thresh = inact_scan_laundry_weight * page_shortage;
1452 	if (dirty_thresh == 0)
1453 		dirty_thresh = INT_MAX;
1454 
1455 	/*
1456 	 * Start scanning the inactive queue for pages that we can free.  The
1457 	 * scan will stop when we reach the target or we have scanned the
1458 	 * entire queue.  (Note that m->a.act_count is not used to make
1459 	 * decisions for the inactive queue, only for the active queue.)
1460 	 */
1461 	starting_page_shortage = page_shortage;
1462 	marker = &marker_page;
1463 	vm_page_init_marker(marker, PQ_INACTIVE, 0);
1464 	pq = &vmd->vmd_pagequeues[PQ_INACTIVE];
1465 	vm_pagequeue_lock(pq);
1466 	vm_pageout_init_scan(&ss, pq, marker, NULL, pq->pq_cnt);
1467 	while (page_shortage > 0 && dirty_count < dirty_thresh) {
1468 		/*
1469 		 * If we need to refill the scan batch queue, release any
1470 		 * optimistically held object lock.  This gives someone else a
1471 		 * chance to grab the lock, and also avoids holding it while we
1472 		 * do unrelated work.
1473 		 */
1474 		if (object != NULL && vm_batchqueue_empty(&ss.bq)) {
1475 			VM_OBJECT_WUNLOCK(object);
1476 			object = NULL;
1477 		}
1478 
1479 		m = vm_pageout_next(&ss, true);
1480 		if (m == NULL)
1481 			break;
1482 		KASSERT((m->flags & PG_MARKER) == 0,
1483 		    ("marker page %p was dequeued", m));
1484 
1485 		/*
1486 		 * Don't touch a page that was removed from the queue after the
1487 		 * page queue lock was released.  Otherwise, ensure that any
1488 		 * pending queue operations, such as dequeues for wired pages,
1489 		 * are handled.
1490 		 */
1491 		if (vm_pageout_defer(m, PQ_INACTIVE, false))
1492 			continue;
1493 
1494 		/*
1495 		 * Lock the page's object.
1496 		 */
1497 		if (object == NULL || object != m->object) {
1498 			if (object != NULL)
1499 				VM_OBJECT_WUNLOCK(object);
1500 			object = atomic_load_ptr(&m->object);
1501 			if (__predict_false(object == NULL))
1502 				/* The page is being freed by another thread. */
1503 				continue;
1504 
1505 			/* Depends on type-stability. */
1506 			VM_OBJECT_WLOCK(object);
1507 			if (__predict_false(m->object != object)) {
1508 				VM_OBJECT_WUNLOCK(object);
1509 				object = NULL;
1510 				goto reinsert;
1511 			}
1512 		}
1513 
1514 		if (vm_page_tryxbusy(m) == 0) {
1515 			/*
1516 			 * Don't mess with busy pages.  Leave them at
1517 			 * the front of the queue.  Most likely, they
1518 			 * are being paged out and will leave the
1519 			 * queue shortly after the scan finishes.  So,
1520 			 * they ought to be discounted from the
1521 			 * inactive count.
1522 			 */
1523 			addl_page_shortage++;
1524 			goto reinsert;
1525 		}
1526 
1527 		/* Deferred free of swap space. */
1528 		if ((m->a.flags & PGA_SWAP_FREE) != 0)
1529 			vm_pager_page_unswapped(m);
1530 
1531 		/*
1532 		 * Check for wirings now that we hold the object lock and have
1533 		 * exclusively busied the page.  If the page is mapped, it may
1534 		 * still be wired by pmap lookups.  The call to
1535 		 * vm_page_try_remove_all() below atomically checks for such
1536 		 * wirings and removes mappings.  If the page is unmapped, the
1537 		 * wire count is guaranteed not to increase after this check.
1538 		 */
1539 		if (__predict_false(vm_page_wired(m)))
1540 			goto skip_page;
1541 
1542 		/*
1543 		 * Invalid pages can be easily freed. They cannot be
1544 		 * mapped, vm_page_free() asserts this.
1545 		 */
1546 		if (vm_page_none_valid(m))
1547 			goto free_page;
1548 
1549 		refs = object->ref_count != 0 ? pmap_ts_referenced(m) : 0;
1550 
1551 		for (old = vm_page_astate_load(m);;) {
1552 			/*
1553 			 * Check to see if the page has been removed from the
1554 			 * queue since the first such check.  Leave it alone if
1555 			 * so, discarding any references collected by
1556 			 * pmap_ts_referenced().
1557 			 */
1558 			if (__predict_false(_vm_page_queue(old) == PQ_NONE))
1559 				goto skip_page;
1560 
1561 			new = old;
1562 			act_delta = refs;
1563 			if ((old.flags & PGA_REFERENCED) != 0) {
1564 				new.flags &= ~PGA_REFERENCED;
1565 				act_delta++;
1566 			}
1567 			if (act_delta == 0) {
1568 				;
1569 			} else if (object->ref_count != 0) {
1570 				/*
1571 				 * Increase the activation count if the
1572 				 * page was referenced while in the
1573 				 * inactive queue.  This makes it less
1574 				 * likely that the page will be returned
1575 				 * prematurely to the inactive queue.
1576 				 */
1577 				new.act_count += ACT_ADVANCE +
1578 				    act_delta;
1579 				if (new.act_count > ACT_MAX)
1580 					new.act_count = ACT_MAX;
1581 
1582 				new.flags &= ~PGA_QUEUE_OP_MASK;
1583 				new.flags |= PGA_REQUEUE;
1584 				new.queue = PQ_ACTIVE;
1585 				if (!vm_page_pqstate_commit(m, &old, new))
1586 					continue;
1587 
1588 				VM_CNT_INC(v_reactivated);
1589 				goto skip_page;
1590 			} else if ((object->flags & OBJ_DEAD) == 0) {
1591 				new.queue = PQ_INACTIVE;
1592 				new.flags |= PGA_REQUEUE;
1593 				if (!vm_page_pqstate_commit(m, &old, new))
1594 					continue;
1595 				goto skip_page;
1596 			}
1597 			break;
1598 		}
1599 
1600 		/*
1601 		 * If the page appears to be clean at the machine-independent
1602 		 * layer, then remove all of its mappings from the pmap in
1603 		 * anticipation of freeing it.  If, however, any of the page's
1604 		 * mappings allow write access, then the page may still be
1605 		 * modified until the last of those mappings are removed.
1606 		 */
1607 		if (object->ref_count != 0) {
1608 			vm_page_test_dirty(m);
1609 			if (m->dirty == 0 && !vm_page_try_remove_all(m))
1610 				goto skip_page;
1611 		}
1612 
1613 		/*
1614 		 * Clean pages can be freed, but dirty pages must be sent back
1615 		 * to the laundry, unless they belong to a dead object.
1616 		 * Requeueing dirty pages from dead objects is pointless, as
1617 		 * they are being paged out and freed by the thread that
1618 		 * destroyed the object.
1619 		 */
1620 		if (m->dirty == 0) {
1621 free_page:
1622 			/*
1623 			 * Now we are guaranteed that no other threads are
1624 			 * manipulating the page, check for a last-second
1625 			 * reference that would save it from doom.
1626 			 */
1627 			if (vm_pageout_defer(m, PQ_INACTIVE, false))
1628 				goto skip_page;
1629 
1630 			/*
1631 			 * Because we dequeued the page and have already checked
1632 			 * for pending dequeue and enqueue requests, we can
1633 			 * safely disassociate the page from the inactive queue
1634 			 * without holding the queue lock.
1635 			 */
1636 			m->a.queue = PQ_NONE;
1637 			vm_page_free(m);
1638 			page_shortage--;
1639 			continue;
1640 		}
1641 		if ((object->flags & OBJ_DEAD) == 0) {
1642 			vm_page_launder(m);
1643 
1644 			/*
1645 			 * If the page would be paged out to a swap device, and
1646 			 * no devices are configured or they are all nearly
1647 			 * full, then don't count it against our threshold,
1648 			 * since it most likely can't be used to meet our
1649 			 * target.
1650 			 */
1651 			if ((object->flags & OBJ_SWAP) == 0 ||
1652 			    !atomic_load_bool(&swap_pager_almost_full))
1653 				dirty_count++;
1654 		}
1655 skip_page:
1656 		vm_page_xunbusy(m);
1657 		continue;
1658 reinsert:
1659 		vm_pageout_reinsert_inactive(&ss, &rq, m);
1660 	}
1661 	if (object != NULL)
1662 		VM_OBJECT_WUNLOCK(object);
1663 	vm_pageout_reinsert_inactive(&ss, &rq, NULL);
1664 	vm_pageout_reinsert_inactive(&ss, &ss.bq, NULL);
1665 	vm_pagequeue_lock(pq);
1666 	vm_pageout_end_scan(&ss);
1667 	vm_pagequeue_unlock(pq);
1668 
1669 	/*
1670 	 * Record the remaining shortage and the progress and rate it was made.
1671 	 */
1672 	atomic_add_int(&vmd->vmd_addl_shortage, addl_page_shortage);
1673 	getmicrouptime(&end);
1674 	timevalsub(&end, &start);
1675 	atomic_add_int(&vmd->vmd_inactive_us,
1676 	    end.tv_sec * 1000000 + end.tv_usec);
1677 	atomic_add_int(&vmd->vmd_inactive_freed,
1678 	    starting_page_shortage - page_shortage);
1679 }
1680 
1681 /*
1682  * Dispatch a number of inactive threads according to load and collect the
1683  * results to present a coherent view of paging activity on this domain.
1684  */
1685 static int
vm_pageout_inactive_dispatch(struct vm_domain * vmd,int shortage)1686 vm_pageout_inactive_dispatch(struct vm_domain *vmd, int shortage)
1687 {
1688 	u_int freed, pps, slop, threads, us;
1689 
1690 	vmd->vmd_inactive_shortage = shortage;
1691 	slop = 0;
1692 
1693 	/*
1694 	 * If we have more work than we can do in a quarter of our interval, we
1695 	 * fire off multiple threads to process it.
1696 	 */
1697 	if ((threads = vmd->vmd_inactive_threads) > 1 &&
1698 	    vmd->vmd_helper_threads_enabled &&
1699 	    vmd->vmd_inactive_pps != 0 &&
1700 	    shortage > vmd->vmd_inactive_pps / VM_INACT_SCAN_RATE / 4) {
1701 		vmd->vmd_inactive_shortage /= threads;
1702 		slop = shortage % threads;
1703 		vm_domain_pageout_lock(vmd);
1704 		blockcount_acquire(&vmd->vmd_inactive_starting, threads - 1);
1705 		blockcount_acquire(&vmd->vmd_inactive_running, threads - 1);
1706 		wakeup(&vmd->vmd_inactive_shortage);
1707 		vm_domain_pageout_unlock(vmd);
1708 	}
1709 
1710 	/* Run the local thread scan. */
1711 	vm_pageout_scan_inactive(vmd, vmd->vmd_inactive_shortage + slop);
1712 
1713 	/*
1714 	 * Block until helper threads report results and then accumulate
1715 	 * totals.
1716 	 */
1717 	blockcount_wait(&vmd->vmd_inactive_running, NULL, "vmpoid", PVM);
1718 	freed = atomic_readandclear_int(&vmd->vmd_inactive_freed);
1719 	VM_CNT_ADD(v_dfree, freed);
1720 
1721 	/*
1722 	 * Calculate the per-thread paging rate with an exponential decay of
1723 	 * prior results.  Careful to avoid integer rounding errors with large
1724 	 * us values.
1725 	 */
1726 	us = max(atomic_readandclear_int(&vmd->vmd_inactive_us), 1);
1727 	if (us > 1000000)
1728 		/* Keep rounding to tenths */
1729 		pps = (freed * 10) / ((us * 10) / 1000000);
1730 	else
1731 		pps = (1000000 / us) * freed;
1732 	vmd->vmd_inactive_pps = (vmd->vmd_inactive_pps / 2) + (pps / 2);
1733 
1734 	return (shortage - freed);
1735 }
1736 
1737 /*
1738  * Attempt to reclaim the requested number of pages from the inactive queue.
1739  * Returns true if the shortage was addressed.
1740  */
1741 static int
vm_pageout_inactive(struct vm_domain * vmd,int shortage,int * addl_shortage)1742 vm_pageout_inactive(struct vm_domain *vmd, int shortage, int *addl_shortage)
1743 {
1744 	struct vm_pagequeue *pq;
1745 	u_int addl_page_shortage, deficit, page_shortage;
1746 	u_int starting_page_shortage;
1747 
1748 	/*
1749 	 * vmd_pageout_deficit counts the number of pages requested in
1750 	 * allocations that failed because of a free page shortage.  We assume
1751 	 * that the allocations will be reattempted and thus include the deficit
1752 	 * in our scan target.
1753 	 */
1754 	deficit = atomic_readandclear_int(&vmd->vmd_pageout_deficit);
1755 	starting_page_shortage = shortage + deficit;
1756 
1757 	/*
1758 	 * Run the inactive scan on as many threads as is necessary.
1759 	 */
1760 	page_shortage = vm_pageout_inactive_dispatch(vmd, starting_page_shortage);
1761 	addl_page_shortage = atomic_readandclear_int(&vmd->vmd_addl_shortage);
1762 
1763 	/*
1764 	 * Wake up the laundry thread so that it can perform any needed
1765 	 * laundering.  If we didn't meet our target, we're in shortfall and
1766 	 * need to launder more aggressively.  If PQ_LAUNDRY is empty and no
1767 	 * swap devices are configured, the laundry thread has no work to do, so
1768 	 * don't bother waking it up.
1769 	 *
1770 	 * The laundry thread uses the number of inactive queue scans elapsed
1771 	 * since the last laundering to determine whether to launder again, so
1772 	 * keep count.
1773 	 */
1774 	if (starting_page_shortage > 0) {
1775 		pq = &vmd->vmd_pagequeues[PQ_LAUNDRY];
1776 		vm_pagequeue_lock(pq);
1777 		if (vmd->vmd_laundry_request == VM_LAUNDRY_IDLE &&
1778 		    (pq->pq_cnt > 0 || atomic_load_acq_int(&swapdev_enabled))) {
1779 			if (page_shortage > 0) {
1780 				vmd->vmd_laundry_request = VM_LAUNDRY_SHORTFALL;
1781 				VM_CNT_INC(v_pdshortfalls);
1782 			} else if (vmd->vmd_laundry_request !=
1783 			    VM_LAUNDRY_SHORTFALL)
1784 				vmd->vmd_laundry_request =
1785 				    VM_LAUNDRY_BACKGROUND;
1786 			wakeup(&vmd->vmd_laundry_request);
1787 		}
1788 		vmd->vmd_clean_pages_freed +=
1789 		    starting_page_shortage - page_shortage;
1790 		vm_pagequeue_unlock(pq);
1791 	}
1792 
1793 	/*
1794 	 * If the inactive queue scan fails repeatedly to meet its
1795 	 * target, kill the largest process.
1796 	 */
1797 	vm_pageout_mightbe_oom(vmd, page_shortage, starting_page_shortage);
1798 
1799 	/*
1800 	 * See the description of addl_page_shortage above.
1801 	 */
1802 	*addl_shortage = addl_page_shortage + deficit;
1803 
1804 	return (page_shortage <= 0);
1805 }
1806 
1807 static int vm_pageout_oom_vote;
1808 
1809 /*
1810  * The pagedaemon threads randlomly select one to perform the
1811  * OOM.  Trying to kill processes before all pagedaemons
1812  * failed to reach free target is premature.
1813  */
1814 static void
vm_pageout_mightbe_oom(struct vm_domain * vmd,int page_shortage,int starting_page_shortage)1815 vm_pageout_mightbe_oom(struct vm_domain *vmd, int page_shortage,
1816     int starting_page_shortage)
1817 {
1818 	int old_vote;
1819 
1820 	/*
1821 	 * Do not trigger an OOM kill if the page daemon is able to make
1822 	 * progress, or if there is no instantaneous shortage.  The latter case
1823 	 * can happen if the PID controller is still reacting to an acute
1824 	 * shortage, and the inactive queue is full of dirty pages.
1825 	 */
1826 	if (starting_page_shortage <= 0 || starting_page_shortage !=
1827 	    page_shortage || !vm_paging_needed(vmd, vmd->vmd_free_count))
1828 		vmd->vmd_oom_seq = 0;
1829 	else
1830 		vmd->vmd_oom_seq++;
1831 	if (vmd->vmd_oom_seq < vm_pageout_oom_seq) {
1832 		if (vmd->vmd_oom) {
1833 			vmd->vmd_oom = false;
1834 			atomic_subtract_int(&vm_pageout_oom_vote, 1);
1835 		}
1836 		return;
1837 	}
1838 
1839 	/*
1840 	 * Do not follow the call sequence until OOM condition is
1841 	 * cleared.
1842 	 */
1843 	vmd->vmd_oom_seq = 0;
1844 
1845 	if (vmd->vmd_oom)
1846 		return;
1847 
1848 	vmd->vmd_oom = true;
1849 	old_vote = atomic_fetchadd_int(&vm_pageout_oom_vote, 1);
1850 	if (old_vote != vm_ndomains - 1)
1851 		return;
1852 
1853 	/*
1854 	 * The current pagedaemon thread is the last in the quorum to
1855 	 * start OOM.  Initiate the selection and signaling of the
1856 	 * victim.
1857 	 */
1858 	vm_pageout_oom(VM_OOM_MEM);
1859 
1860 	/*
1861 	 * After one round of OOM terror, recall our vote.  On the
1862 	 * next pass, current pagedaemon would vote again if the low
1863 	 * memory condition is still there, due to vmd_oom being
1864 	 * false.
1865 	 */
1866 	vmd->vmd_oom = false;
1867 	atomic_subtract_int(&vm_pageout_oom_vote, 1);
1868 }
1869 
1870 /*
1871  * The OOM killer is the page daemon's action of last resort when
1872  * memory allocation requests have been stalled for a prolonged period
1873  * of time because it cannot reclaim memory.  This function computes
1874  * the approximate number of physical pages that could be reclaimed if
1875  * the specified address space is destroyed.
1876  *
1877  * Private, anonymous memory owned by the address space is the
1878  * principal resource that we expect to recover after an OOM kill.
1879  * Since the physical pages mapped by the address space's COW entries
1880  * are typically shared pages, they are unlikely to be released and so
1881  * they are not counted.
1882  *
1883  * To get to the point where the page daemon runs the OOM killer, its
1884  * efforts to write-back vnode-backed pages may have stalled.  This
1885  * could be caused by a memory allocation deadlock in the write path
1886  * that might be resolved by an OOM kill.  Therefore, physical pages
1887  * belonging to vnode-backed objects are counted, because they might
1888  * be freed without being written out first if the address space holds
1889  * the last reference to an unlinked vnode.
1890  *
1891  * Similarly, physical pages belonging to OBJT_PHYS objects are
1892  * counted because the address space might hold the last reference to
1893  * the object.
1894  */
1895 static long
vm_pageout_oom_pagecount(struct vmspace * vmspace)1896 vm_pageout_oom_pagecount(struct vmspace *vmspace)
1897 {
1898 	vm_map_t map;
1899 	vm_map_entry_t entry;
1900 	vm_object_t obj;
1901 	long res;
1902 
1903 	map = &vmspace->vm_map;
1904 	KASSERT(!vm_map_is_system(map), ("system map"));
1905 	sx_assert(&map->lock, SA_LOCKED);
1906 	res = 0;
1907 	VM_MAP_ENTRY_FOREACH(entry, map) {
1908 		if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0)
1909 			continue;
1910 		obj = entry->object.vm_object;
1911 		if (obj == NULL)
1912 			continue;
1913 		if ((entry->eflags & MAP_ENTRY_NEEDS_COPY) != 0 &&
1914 		    obj->ref_count != 1)
1915 			continue;
1916 		if (obj->type == OBJT_PHYS || obj->type == OBJT_VNODE ||
1917 		    (obj->flags & OBJ_SWAP) != 0)
1918 			res += obj->resident_page_count;
1919 	}
1920 	return (res);
1921 }
1922 
1923 static int vm_oom_ratelim_last;
1924 static int vm_oom_pf_secs = 10;
1925 SYSCTL_INT(_vm, OID_AUTO, oom_pf_secs, CTLFLAG_RWTUN, &vm_oom_pf_secs, 0,
1926     "");
1927 static struct mtx vm_oom_ratelim_mtx;
1928 
1929 void
vm_pageout_oom(int shortage)1930 vm_pageout_oom(int shortage)
1931 {
1932 	const char *reason;
1933 	struct proc *p, *bigproc;
1934 	vm_offset_t size, bigsize;
1935 	struct thread *td;
1936 	struct vmspace *vm;
1937 	int now;
1938 	bool breakout;
1939 
1940 	/*
1941 	 * For OOM requests originating from vm_fault(), there is a high
1942 	 * chance that a single large process faults simultaneously in
1943 	 * several threads.  Also, on an active system running many
1944 	 * processes of middle-size, like buildworld, all of them
1945 	 * could fault almost simultaneously as well.
1946 	 *
1947 	 * To avoid killing too many processes, rate-limit OOMs
1948 	 * initiated by vm_fault() time-outs on the waits for free
1949 	 * pages.
1950 	 */
1951 	mtx_lock(&vm_oom_ratelim_mtx);
1952 	now = ticks;
1953 	if (shortage == VM_OOM_MEM_PF &&
1954 	    (u_int)(now - vm_oom_ratelim_last) < hz * vm_oom_pf_secs) {
1955 		mtx_unlock(&vm_oom_ratelim_mtx);
1956 		return;
1957 	}
1958 	vm_oom_ratelim_last = now;
1959 	mtx_unlock(&vm_oom_ratelim_mtx);
1960 
1961 	/*
1962 	 * We keep the process bigproc locked once we find it to keep anyone
1963 	 * from messing with it; however, there is a possibility of
1964 	 * deadlock if process B is bigproc and one of its child processes
1965 	 * attempts to propagate a signal to B while we are waiting for A's
1966 	 * lock while walking this list.  To avoid this, we don't block on
1967 	 * the process lock but just skip a process if it is already locked.
1968 	 */
1969 	bigproc = NULL;
1970 	bigsize = 0;
1971 	sx_slock(&allproc_lock);
1972 	FOREACH_PROC_IN_SYSTEM(p) {
1973 		PROC_LOCK(p);
1974 
1975 		/*
1976 		 * If this is a system, protected or killed process, skip it.
1977 		 */
1978 		if (p->p_state != PRS_NORMAL || (p->p_flag & (P_INEXEC |
1979 		    P_PROTECTED | P_SYSTEM | P_WEXIT)) != 0 ||
1980 		    p->p_pid == 1 || P_KILLED(p) ||
1981 		    (p->p_pid < 48 && swap_pager_avail != 0)) {
1982 			PROC_UNLOCK(p);
1983 			continue;
1984 		}
1985 		/*
1986 		 * If the process is in a non-running type state,
1987 		 * don't touch it.  Check all the threads individually.
1988 		 */
1989 		breakout = false;
1990 		FOREACH_THREAD_IN_PROC(p, td) {
1991 			thread_lock(td);
1992 			if (!TD_ON_RUNQ(td) &&
1993 			    !TD_IS_RUNNING(td) &&
1994 			    !TD_IS_SLEEPING(td) &&
1995 			    !TD_IS_SUSPENDED(td)) {
1996 				thread_unlock(td);
1997 				breakout = true;
1998 				break;
1999 			}
2000 			thread_unlock(td);
2001 		}
2002 		if (breakout) {
2003 			PROC_UNLOCK(p);
2004 			continue;
2005 		}
2006 		/*
2007 		 * get the process size
2008 		 */
2009 		vm = vmspace_acquire_ref(p);
2010 		if (vm == NULL) {
2011 			PROC_UNLOCK(p);
2012 			continue;
2013 		}
2014 		_PHOLD(p);
2015 		PROC_UNLOCK(p);
2016 		sx_sunlock(&allproc_lock);
2017 		if (!vm_map_trylock_read(&vm->vm_map)) {
2018 			vmspace_free(vm);
2019 			sx_slock(&allproc_lock);
2020 			PRELE(p);
2021 			continue;
2022 		}
2023 		size = vmspace_swap_count(vm);
2024 		if (shortage == VM_OOM_MEM || shortage == VM_OOM_MEM_PF)
2025 			size += vm_pageout_oom_pagecount(vm);
2026 		vm_map_unlock_read(&vm->vm_map);
2027 		vmspace_free(vm);
2028 		sx_slock(&allproc_lock);
2029 
2030 		/*
2031 		 * If this process is bigger than the biggest one,
2032 		 * remember it.
2033 		 */
2034 		if (size > bigsize) {
2035 			if (bigproc != NULL)
2036 				PRELE(bigproc);
2037 			bigproc = p;
2038 			bigsize = size;
2039 		} else {
2040 			PRELE(p);
2041 		}
2042 	}
2043 	sx_sunlock(&allproc_lock);
2044 
2045 	if (bigproc != NULL) {
2046 		switch (shortage) {
2047 		case VM_OOM_MEM:
2048 			reason = "failed to reclaim memory";
2049 			break;
2050 		case VM_OOM_MEM_PF:
2051 			reason = "a thread waited too long to allocate a page";
2052 			break;
2053 		case VM_OOM_SWAPZ:
2054 			reason = "out of swap space";
2055 			break;
2056 		default:
2057 			panic("unknown OOM reason %d", shortage);
2058 		}
2059 		if (vm_panic_on_oom != 0 && --vm_panic_on_oom == 0)
2060 			panic("%s", reason);
2061 		PROC_LOCK(bigproc);
2062 		killproc(bigproc, reason);
2063 		sched_nice(bigproc, PRIO_MIN);
2064 		_PRELE(bigproc);
2065 		PROC_UNLOCK(bigproc);
2066 	}
2067 }
2068 
2069 /*
2070  * Signal a free page shortage to subsystems that have registered an event
2071  * handler.  Reclaim memory from UMA in the event of a severe shortage.
2072  * Return true if the free page count should be re-evaluated.
2073  */
2074 static bool
vm_pageout_lowmem(void)2075 vm_pageout_lowmem(void)
2076 {
2077 	static int lowmem_ticks = 0;
2078 	int last;
2079 	bool ret;
2080 
2081 	ret = false;
2082 
2083 	last = atomic_load_int(&lowmem_ticks);
2084 	while ((u_int)(ticks - last) / hz >= lowmem_period) {
2085 		if (atomic_fcmpset_int(&lowmem_ticks, &last, ticks) == 0)
2086 			continue;
2087 
2088 		/*
2089 		 * Decrease registered cache sizes.
2090 		 */
2091 		SDT_PROBE0(vm, , , vm__lowmem_scan);
2092 		EVENTHANDLER_INVOKE(vm_lowmem, VM_LOW_PAGES);
2093 
2094 		/*
2095 		 * We do this explicitly after the caches have been
2096 		 * drained above.
2097 		 */
2098 		uma_reclaim(UMA_RECLAIM_TRIM);
2099 		ret = true;
2100 		break;
2101 	}
2102 
2103 	/*
2104 	 * Kick off an asynchronous reclaim of cached memory if one of the
2105 	 * page daemons is failing to keep up with demand.  Use the "severe"
2106 	 * threshold instead of "min" to ensure that we do not blow away the
2107 	 * caches if a subset of the NUMA domains are depleted by kernel memory
2108 	 * allocations; the domainset iterators automatically skip domains
2109 	 * below the "min" threshold on the first pass.
2110 	 *
2111 	 * UMA reclaim worker has its own rate-limiting mechanism, so don't
2112 	 * worry about kicking it too often.
2113 	 */
2114 	if (vm_page_count_severe())
2115 		uma_reclaim_wakeup();
2116 
2117 	return (ret);
2118 }
2119 
2120 static void
vm_pageout_worker(void * arg)2121 vm_pageout_worker(void *arg)
2122 {
2123 	struct vm_domain *vmd;
2124 	u_int ofree;
2125 	int addl_shortage, domain, shortage;
2126 	bool target_met;
2127 
2128 	domain = (uintptr_t)arg;
2129 	vmd = VM_DOMAIN(domain);
2130 	shortage = 0;
2131 	target_met = true;
2132 
2133 	/*
2134 	 * XXXKIB It could be useful to bind pageout daemon threads to
2135 	 * the cores belonging to the domain, from which vm_page_array
2136 	 * is allocated.
2137 	 */
2138 
2139 	KASSERT(vmd->vmd_segs != 0, ("domain without segments"));
2140 	vmd->vmd_last_active_scan = ticks;
2141 
2142 	/*
2143 	 * The pageout daemon worker is never done, so loop forever.
2144 	 */
2145 	while (TRUE) {
2146 		vm_domain_pageout_lock(vmd);
2147 
2148 		/*
2149 		 * We need to clear wanted before we check the limits.  This
2150 		 * prevents races with wakers who will check wanted after they
2151 		 * reach the limit.
2152 		 */
2153 		atomic_store_int(&vmd->vmd_pageout_wanted, 0);
2154 
2155 		/*
2156 		 * Might the page daemon need to run again?
2157 		 */
2158 		if (vm_paging_needed(vmd, vmd->vmd_free_count)) {
2159 			/*
2160 			 * Yes.  If the scan failed to produce enough free
2161 			 * pages, sleep uninterruptibly for some time in the
2162 			 * hope that the laundry thread will clean some pages.
2163 			 */
2164 			vm_domain_pageout_unlock(vmd);
2165 			if (!target_met)
2166 				pause("pwait", hz / VM_INACT_SCAN_RATE);
2167 		} else {
2168 			/*
2169 			 * No, sleep until the next wakeup or until pages
2170 			 * need to have their reference stats updated.
2171 			 */
2172 			if (mtx_sleep(&vmd->vmd_pageout_wanted,
2173 			    vm_domain_pageout_lockptr(vmd), PDROP | PVM,
2174 			    "psleep", hz / VM_INACT_SCAN_RATE) == 0)
2175 				VM_CNT_INC(v_pdwakeups);
2176 		}
2177 
2178 		/* Prevent spurious wakeups by ensuring that wanted is set. */
2179 		atomic_store_int(&vmd->vmd_pageout_wanted, 1);
2180 
2181 		/*
2182 		 * Use the controller to calculate how many pages to free in
2183 		 * this interval, and scan the inactive queue.  If the lowmem
2184 		 * handlers appear to have freed up some pages, subtract the
2185 		 * difference from the inactive queue scan target.
2186 		 */
2187 		shortage = pidctrl_daemon(&vmd->vmd_pid, vmd->vmd_free_count);
2188 		if (shortage > 0) {
2189 			ofree = vmd->vmd_free_count;
2190 			if (vm_pageout_lowmem() && vmd->vmd_free_count > ofree)
2191 				shortage -= min(vmd->vmd_free_count - ofree,
2192 				    (u_int)shortage);
2193 			target_met = vm_pageout_inactive(vmd, shortage,
2194 			    &addl_shortage);
2195 		} else
2196 			addl_shortage = 0;
2197 
2198 		/*
2199 		 * Scan the active queue.  A positive value for shortage
2200 		 * indicates that we must aggressively deactivate pages to avoid
2201 		 * a shortfall.
2202 		 */
2203 		shortage = vm_pageout_active_target(vmd) + addl_shortage;
2204 		vm_pageout_scan_active(vmd, shortage);
2205 	}
2206 }
2207 
2208 /*
2209  * vm_pageout_helper runs additional pageout daemons in times of high paging
2210  * activity.
2211  */
2212 static void
vm_pageout_helper(void * arg)2213 vm_pageout_helper(void *arg)
2214 {
2215 	struct vm_domain *vmd;
2216 	int domain;
2217 
2218 	domain = (uintptr_t)arg;
2219 	vmd = VM_DOMAIN(domain);
2220 
2221 	vm_domain_pageout_lock(vmd);
2222 	for (;;) {
2223 		msleep(&vmd->vmd_inactive_shortage,
2224 		    vm_domain_pageout_lockptr(vmd), PVM, "psleep", 0);
2225 		blockcount_release(&vmd->vmd_inactive_starting, 1);
2226 
2227 		vm_domain_pageout_unlock(vmd);
2228 		vm_pageout_scan_inactive(vmd, vmd->vmd_inactive_shortage);
2229 		vm_domain_pageout_lock(vmd);
2230 
2231 		/*
2232 		 * Release the running count while the pageout lock is held to
2233 		 * prevent wakeup races.
2234 		 */
2235 		blockcount_release(&vmd->vmd_inactive_running, 1);
2236 	}
2237 }
2238 
2239 static int
get_pageout_threads_per_domain(const struct vm_domain * vmd)2240 get_pageout_threads_per_domain(const struct vm_domain *vmd)
2241 {
2242 	unsigned total_pageout_threads, eligible_cpus, domain_cpus;
2243 
2244 	if (VM_DOMAIN_EMPTY(vmd->vmd_domain))
2245 		return (0);
2246 
2247 	/*
2248 	 * Semi-arbitrarily constrain pagedaemon threads to less than half the
2249 	 * total number of CPUs in the system as an upper limit.
2250 	 */
2251 	if (pageout_cpus_per_thread < 2)
2252 		pageout_cpus_per_thread = 2;
2253 	else if (pageout_cpus_per_thread > mp_ncpus)
2254 		pageout_cpus_per_thread = mp_ncpus;
2255 
2256 	total_pageout_threads = howmany(mp_ncpus, pageout_cpus_per_thread);
2257 	domain_cpus = CPU_COUNT(&cpuset_domain[vmd->vmd_domain]);
2258 
2259 	/* Pagedaemons are not run in empty domains. */
2260 	eligible_cpus = mp_ncpus;
2261 	for (unsigned i = 0; i < vm_ndomains; i++)
2262 		if (VM_DOMAIN_EMPTY(i))
2263 			eligible_cpus -= CPU_COUNT(&cpuset_domain[i]);
2264 
2265 	/*
2266 	 * Assign a portion of the total pageout threads to this domain
2267 	 * corresponding to the fraction of pagedaemon-eligible CPUs in the
2268 	 * domain.  In asymmetric NUMA systems, domains with more CPUs may be
2269 	 * allocated more threads than domains with fewer CPUs.
2270 	 */
2271 	return (howmany(total_pageout_threads * domain_cpus, eligible_cpus));
2272 }
2273 
2274 /*
2275  * Initialize basic pageout daemon settings.  See the comment above the
2276  * definition of vm_domain for some explanation of how these thresholds are
2277  * used.
2278  */
2279 static void
vm_pageout_init_domain(int domain)2280 vm_pageout_init_domain(int domain)
2281 {
2282 	struct vm_domain *vmd;
2283 	struct sysctl_oid *oid;
2284 
2285 	vmd = VM_DOMAIN(domain);
2286 	vmd->vmd_interrupt_free_min = 2;
2287 
2288 	/*
2289 	 * v_free_reserved needs to include enough for the largest
2290 	 * swap pager structures plus enough for any pv_entry structs
2291 	 * when paging.
2292 	 */
2293 	vmd->vmd_pageout_free_min = 2 * MAXBSIZE / PAGE_SIZE +
2294 	    vmd->vmd_interrupt_free_min;
2295 	vmd->vmd_free_reserved = vm_pageout_page_count +
2296 	    vmd->vmd_pageout_free_min + vmd->vmd_page_count / 768;
2297 	vmd->vmd_free_min = vmd->vmd_page_count / 200;
2298 	vmd->vmd_free_severe = vmd->vmd_free_min / 2;
2299 	vmd->vmd_free_target = 4 * vmd->vmd_free_min + vmd->vmd_free_reserved;
2300 	vmd->vmd_free_min += vmd->vmd_free_reserved;
2301 	vmd->vmd_free_severe += vmd->vmd_free_reserved;
2302 	vmd->vmd_inactive_target = (3 * vmd->vmd_free_target) / 2;
2303 	if (vmd->vmd_inactive_target > vmd->vmd_free_count / 3)
2304 		vmd->vmd_inactive_target = vmd->vmd_free_count / 3;
2305 
2306 	/*
2307 	 * Set the default wakeup threshold to be 10% below the paging
2308 	 * target.  This keeps the steady state out of shortfall.
2309 	 */
2310 	vmd->vmd_pageout_wakeup_thresh = (vmd->vmd_free_target / 10) * 9;
2311 
2312 	/*
2313 	 * Target amount of memory to move out of the laundry queue during a
2314 	 * background laundering.  This is proportional to the amount of system
2315 	 * memory.
2316 	 */
2317 	vmd->vmd_background_launder_target = (vmd->vmd_free_target -
2318 	    vmd->vmd_free_min) / 10;
2319 
2320 	/* Initialize the pageout daemon pid controller. */
2321 	pidctrl_init(&vmd->vmd_pid, hz / VM_INACT_SCAN_RATE,
2322 	    vmd->vmd_free_target, PIDCTRL_BOUND,
2323 	    PIDCTRL_KPD, PIDCTRL_KID, PIDCTRL_KDD);
2324 	oid = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(vmd->vmd_oid), OID_AUTO,
2325 	    "pidctrl", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "");
2326 	pidctrl_init_sysctl(&vmd->vmd_pid, SYSCTL_CHILDREN(oid));
2327 
2328 	vmd->vmd_inactive_threads = get_pageout_threads_per_domain(vmd);
2329 	SYSCTL_ADD_BOOL(NULL, SYSCTL_CHILDREN(vmd->vmd_oid), OID_AUTO,
2330 	    "pageout_helper_threads_enabled", CTLFLAG_RWTUN,
2331 	    &vmd->vmd_helper_threads_enabled, 0,
2332 	    "Enable multi-threaded inactive queue scanning");
2333 }
2334 
2335 static void
vm_pageout_init(void)2336 vm_pageout_init(void)
2337 {
2338 	u_long freecount;
2339 	int i;
2340 
2341 	/*
2342 	 * Initialize some paging parameters.
2343 	 */
2344 	freecount = 0;
2345 	for (i = 0; i < vm_ndomains; i++) {
2346 		struct vm_domain *vmd;
2347 
2348 		vm_pageout_init_domain(i);
2349 		vmd = VM_DOMAIN(i);
2350 		vm_cnt.v_free_reserved += vmd->vmd_free_reserved;
2351 		vm_cnt.v_free_target += vmd->vmd_free_target;
2352 		vm_cnt.v_free_min += vmd->vmd_free_min;
2353 		vm_cnt.v_inactive_target += vmd->vmd_inactive_target;
2354 		vm_cnt.v_pageout_free_min += vmd->vmd_pageout_free_min;
2355 		vm_cnt.v_interrupt_free_min += vmd->vmd_interrupt_free_min;
2356 		vm_cnt.v_free_severe += vmd->vmd_free_severe;
2357 		freecount += vmd->vmd_free_count;
2358 	}
2359 
2360 	/*
2361 	 * Set interval in seconds for active scan.  We want to visit each
2362 	 * page at least once every ten minutes.  This is to prevent worst
2363 	 * case paging behaviors with stale active LRU.
2364 	 */
2365 	if (vm_pageout_update_period == 0)
2366 		vm_pageout_update_period = 600;
2367 
2368 	/*
2369 	 * Set the maximum number of user-wired virtual pages.  Historically the
2370 	 * main source of such pages was mlock(2) and mlockall(2).  Hypervisors
2371 	 * may also request user-wired memory.
2372 	 */
2373 	if (vm_page_max_user_wired == 0)
2374 		vm_page_max_user_wired = 4 * freecount / 5;
2375 }
2376 
2377 /*
2378  *     vm_pageout is the high level pageout daemon.
2379  */
2380 static void
vm_pageout(void)2381 vm_pageout(void)
2382 {
2383 	struct proc *p;
2384 	struct thread *td;
2385 	int error, first, i, j, pageout_threads;
2386 
2387 	p = curproc;
2388 	td = curthread;
2389 
2390 	mtx_init(&vm_oom_ratelim_mtx, "vmoomr", NULL, MTX_DEF);
2391 	swap_pager_swap_init();
2392 	for (first = -1, i = 0; i < vm_ndomains; i++) {
2393 		if (VM_DOMAIN_EMPTY(i)) {
2394 			if (bootverbose)
2395 				printf("domain %d empty; skipping pageout\n",
2396 				    i);
2397 			continue;
2398 		}
2399 		if (first == -1)
2400 			first = i;
2401 		else {
2402 			error = kthread_add(vm_pageout_worker,
2403 			    (void *)(uintptr_t)i, p, NULL, 0, 0, "dom%d", i);
2404 			if (error != 0)
2405 				panic("starting pageout for domain %d: %d\n",
2406 				    i, error);
2407 		}
2408 		pageout_threads = VM_DOMAIN(i)->vmd_inactive_threads;
2409 		for (j = 0; j < pageout_threads - 1; j++) {
2410 			error = kthread_add(vm_pageout_helper,
2411 			    (void *)(uintptr_t)i, p, NULL, 0, 0,
2412 			    "dom%d helper%d", i, j);
2413 			if (error != 0)
2414 				panic("starting pageout helper %d for domain "
2415 				    "%d: %d\n", j, i, error);
2416 		}
2417 		error = kthread_add(vm_pageout_laundry_worker,
2418 		    (void *)(uintptr_t)i, p, NULL, 0, 0, "laundry: dom%d", i);
2419 		if (error != 0)
2420 			panic("starting laundry for domain %d: %d", i, error);
2421 	}
2422 	error = kthread_add(uma_reclaim_worker, NULL, p, NULL, 0, 0, "uma");
2423 	if (error != 0)
2424 		panic("starting uma_reclaim helper, error %d\n", error);
2425 
2426 	snprintf(td->td_name, sizeof(td->td_name), "dom%d", first);
2427 	vm_pageout_worker((void *)(uintptr_t)first);
2428 }
2429 
2430 /*
2431  * Perform an advisory wakeup of the page daemon.
2432  */
2433 void
pagedaemon_wakeup(int domain)2434 pagedaemon_wakeup(int domain)
2435 {
2436 	struct vm_domain *vmd;
2437 
2438 	vmd = VM_DOMAIN(domain);
2439 	vm_domain_pageout_assert_unlocked(vmd);
2440 	if (curproc == pageproc)
2441 		return;
2442 
2443 	if (atomic_fetchadd_int(&vmd->vmd_pageout_wanted, 1) == 0) {
2444 		vm_domain_pageout_lock(vmd);
2445 		atomic_store_int(&vmd->vmd_pageout_wanted, 1);
2446 		wakeup(&vmd->vmd_pageout_wanted);
2447 		vm_domain_pageout_unlock(vmd);
2448 	}
2449 }
2450