xref: /freebsd/sys/vm/vm_pageout.c (revision 2a2234c0f41da33b8cfc938e46b54a8234b64135)
1 /*-
2  * SPDX-License-Identifier: (BSD-4-Clause AND MIT-CMU)
3  *
4  * Copyright (c) 1991 Regents of the University of California.
5  * All rights reserved.
6  * Copyright (c) 1994 John S. Dyson
7  * All rights reserved.
8  * Copyright (c) 1994 David Greenman
9  * All rights reserved.
10  * Copyright (c) 2005 Yahoo! Technologies Norway AS
11  * All rights reserved.
12  *
13  * This code is derived from software contributed to Berkeley by
14  * The Mach Operating System project at Carnegie-Mellon University.
15  *
16  * Redistribution and use in source and binary forms, with or without
17  * modification, are permitted provided that the following conditions
18  * are met:
19  * 1. Redistributions of source code must retain the above copyright
20  *    notice, this list of conditions and the following disclaimer.
21  * 2. Redistributions in binary form must reproduce the above copyright
22  *    notice, this list of conditions and the following disclaimer in the
23  *    documentation and/or other materials provided with the distribution.
24  * 3. All advertising materials mentioning features or use of this software
25  *    must display the following acknowledgement:
26  *	This product includes software developed by the University of
27  *	California, Berkeley and its contributors.
28  * 4. Neither the name of the University nor the names of its contributors
29  *    may be used to endorse or promote products derived from this software
30  *    without specific prior written permission.
31  *
32  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
33  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
34  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
35  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
36  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
37  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
38  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
39  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
40  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
41  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
42  * SUCH DAMAGE.
43  *
44  *	from: @(#)vm_pageout.c	7.4 (Berkeley) 5/7/91
45  *
46  *
47  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
48  * All rights reserved.
49  *
50  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
51  *
52  * Permission to use, copy, modify and distribute this software and
53  * its documentation is hereby granted, provided that both the copyright
54  * notice and this permission notice appear in all copies of the
55  * software, derivative works or modified versions, and any portions
56  * thereof, and that both notices appear in supporting documentation.
57  *
58  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
59  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
60  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
61  *
62  * Carnegie Mellon requests users of this software to return to
63  *
64  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
65  *  School of Computer Science
66  *  Carnegie Mellon University
67  *  Pittsburgh PA 15213-3890
68  *
69  * any improvements or extensions that they make and grant Carnegie the
70  * rights to redistribute these changes.
71  */
72 
73 /*
74  *	The proverbial page-out daemon.
75  */
76 
77 #include <sys/cdefs.h>
78 __FBSDID("$FreeBSD$");
79 
80 #include "opt_vm.h"
81 
82 #include <sys/param.h>
83 #include <sys/systm.h>
84 #include <sys/kernel.h>
85 #include <sys/eventhandler.h>
86 #include <sys/lock.h>
87 #include <sys/mutex.h>
88 #include <sys/proc.h>
89 #include <sys/kthread.h>
90 #include <sys/ktr.h>
91 #include <sys/mount.h>
92 #include <sys/racct.h>
93 #include <sys/resourcevar.h>
94 #include <sys/sched.h>
95 #include <sys/sdt.h>
96 #include <sys/signalvar.h>
97 #include <sys/smp.h>
98 #include <sys/time.h>
99 #include <sys/vnode.h>
100 #include <sys/vmmeter.h>
101 #include <sys/rwlock.h>
102 #include <sys/sx.h>
103 #include <sys/sysctl.h>
104 
105 #include <vm/vm.h>
106 #include <vm/vm_param.h>
107 #include <vm/vm_object.h>
108 #include <vm/vm_page.h>
109 #include <vm/vm_map.h>
110 #include <vm/vm_pageout.h>
111 #include <vm/vm_pager.h>
112 #include <vm/vm_phys.h>
113 #include <vm/vm_pagequeue.h>
114 #include <vm/swap_pager.h>
115 #include <vm/vm_extern.h>
116 #include <vm/uma.h>
117 
118 /*
119  * System initialization
120  */
121 
122 /* the kernel process "vm_pageout"*/
123 static void vm_pageout(void);
124 static void vm_pageout_init(void);
125 static int vm_pageout_clean(vm_page_t m, int *numpagedout);
126 static int vm_pageout_cluster(vm_page_t m);
127 static bool vm_pageout_scan(struct vm_domain *vmd, int pass, int shortage);
128 static void vm_pageout_mightbe_oom(struct vm_domain *vmd, int page_shortage,
129     int starting_page_shortage);
130 
131 SYSINIT(pagedaemon_init, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, vm_pageout_init,
132     NULL);
133 
134 struct proc *pageproc;
135 
136 static struct kproc_desc page_kp = {
137 	"pagedaemon",
138 	vm_pageout,
139 	&pageproc
140 };
141 SYSINIT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_SECOND, kproc_start,
142     &page_kp);
143 
144 SDT_PROVIDER_DEFINE(vm);
145 SDT_PROBE_DEFINE(vm, , , vm__lowmem_scan);
146 
147 /* Pagedaemon activity rates, in subdivisions of one second. */
148 #define	VM_LAUNDER_RATE		10
149 #define	VM_INACT_SCAN_RATE	10
150 
151 static int vm_pageout_oom_seq = 12;
152 
153 static int vm_pageout_update_period;
154 static int disable_swap_pageouts;
155 static int lowmem_period = 10;
156 static time_t lowmem_uptime;
157 static int swapdev_enabled;
158 
159 static int vm_panic_on_oom = 0;
160 
161 SYSCTL_INT(_vm, OID_AUTO, panic_on_oom,
162 	CTLFLAG_RWTUN, &vm_panic_on_oom, 0,
163 	"panic on out of memory instead of killing the largest process");
164 
165 SYSCTL_INT(_vm, OID_AUTO, pageout_update_period,
166 	CTLFLAG_RWTUN, &vm_pageout_update_period, 0,
167 	"Maximum active LRU update period");
168 
169 SYSCTL_INT(_vm, OID_AUTO, lowmem_period, CTLFLAG_RWTUN, &lowmem_period, 0,
170 	"Low memory callback period");
171 
172 SYSCTL_INT(_vm, OID_AUTO, disable_swapspace_pageouts,
173 	CTLFLAG_RWTUN, &disable_swap_pageouts, 0, "Disallow swapout of dirty pages");
174 
175 static int pageout_lock_miss;
176 SYSCTL_INT(_vm, OID_AUTO, pageout_lock_miss,
177 	CTLFLAG_RD, &pageout_lock_miss, 0, "vget() lock misses during pageout");
178 
179 SYSCTL_INT(_vm, OID_AUTO, pageout_oom_seq,
180 	CTLFLAG_RWTUN, &vm_pageout_oom_seq, 0,
181 	"back-to-back calls to oom detector to start OOM");
182 
183 static int act_scan_laundry_weight = 3;
184 SYSCTL_INT(_vm, OID_AUTO, act_scan_laundry_weight, CTLFLAG_RWTUN,
185     &act_scan_laundry_weight, 0,
186     "weight given to clean vs. dirty pages in active queue scans");
187 
188 static u_int vm_background_launder_rate = 4096;
189 SYSCTL_UINT(_vm, OID_AUTO, background_launder_rate, CTLFLAG_RWTUN,
190     &vm_background_launder_rate, 0,
191     "background laundering rate, in kilobytes per second");
192 
193 static u_int vm_background_launder_max = 20 * 1024;
194 SYSCTL_UINT(_vm, OID_AUTO, background_launder_max, CTLFLAG_RWTUN,
195     &vm_background_launder_max, 0, "background laundering cap, in kilobytes");
196 
197 int vm_pageout_page_count = 32;
198 
199 int vm_page_max_wired;		/* XXX max # of wired pages system-wide */
200 SYSCTL_INT(_vm, OID_AUTO, max_wired,
201 	CTLFLAG_RW, &vm_page_max_wired, 0, "System-wide limit to wired page count");
202 
203 static u_int isqrt(u_int num);
204 static boolean_t vm_pageout_fallback_object_lock(vm_page_t, vm_page_t *);
205 static int vm_pageout_launder(struct vm_domain *vmd, int launder,
206     bool in_shortfall);
207 static void vm_pageout_laundry_worker(void *arg);
208 static boolean_t vm_pageout_page_lock(vm_page_t, vm_page_t *);
209 
210 /*
211  * vm_pageout_fallback_object_lock:
212  *
213  * Lock vm object currently associated with `m'. VM_OBJECT_TRYWLOCK is
214  * known to have failed and page queue must be either PQ_ACTIVE or
215  * PQ_INACTIVE.  To avoid lock order violation, unlock the page queue
216  * while locking the vm object.  Use marker page to detect page queue
217  * changes and maintain notion of next page on page queue.  Return
218  * TRUE if no changes were detected, FALSE otherwise.  vm object is
219  * locked on return.
220  *
221  * This function depends on both the lock portion of struct vm_object
222  * and normal struct vm_page being type stable.
223  */
224 static boolean_t
225 vm_pageout_fallback_object_lock(vm_page_t m, vm_page_t *next)
226 {
227 	struct vm_page marker;
228 	struct vm_pagequeue *pq;
229 	boolean_t unchanged;
230 	vm_object_t object;
231 	int queue;
232 
233 	queue = m->queue;
234 	vm_page_init_marker(&marker, queue);
235 	pq = vm_page_pagequeue(m);
236 	object = m->object;
237 
238 	TAILQ_INSERT_AFTER(&pq->pq_pl, m, &marker, plinks.q);
239 	vm_pagequeue_unlock(pq);
240 	vm_page_unlock(m);
241 	VM_OBJECT_WLOCK(object);
242 	vm_page_lock(m);
243 	vm_pagequeue_lock(pq);
244 
245 	/*
246 	 * The page's object might have changed, and/or the page might
247 	 * have moved from its original position in the queue.  If the
248 	 * page's object has changed, then the caller should abandon
249 	 * processing the page because the wrong object lock was
250 	 * acquired.  Use the marker's plinks.q, not the page's, to
251 	 * determine if the page has been moved.  The state of the
252 	 * page's plinks.q can be indeterminate; whereas, the marker's
253 	 * plinks.q must be valid.
254 	 */
255 	*next = TAILQ_NEXT(&marker, plinks.q);
256 	unchanged = m->object == object &&
257 	    m == TAILQ_PREV(&marker, pglist, plinks.q);
258 	KASSERT(!unchanged || m->queue == queue,
259 	    ("page %p queue %d %d", m, queue, m->queue));
260 	TAILQ_REMOVE(&pq->pq_pl, &marker, plinks.q);
261 	return (unchanged);
262 }
263 
264 /*
265  * Lock the page while holding the page queue lock.  Use marker page
266  * to detect page queue changes and maintain notion of next page on
267  * page queue.  Return TRUE if no changes were detected, FALSE
268  * otherwise.  The page is locked on return. The page queue lock might
269  * be dropped and reacquired.
270  *
271  * This function depends on normal struct vm_page being type stable.
272  */
273 static boolean_t
274 vm_pageout_page_lock(vm_page_t m, vm_page_t *next)
275 {
276 	struct vm_page marker;
277 	struct vm_pagequeue *pq;
278 	boolean_t unchanged;
279 	int queue;
280 
281 	vm_page_lock_assert(m, MA_NOTOWNED);
282 	if (vm_page_trylock(m))
283 		return (TRUE);
284 
285 	queue = m->queue;
286 	vm_page_init_marker(&marker, queue);
287 	pq = vm_page_pagequeue(m);
288 
289 	TAILQ_INSERT_AFTER(&pq->pq_pl, m, &marker, plinks.q);
290 	vm_pagequeue_unlock(pq);
291 	vm_page_lock(m);
292 	vm_pagequeue_lock(pq);
293 
294 	/* Page queue might have changed. */
295 	*next = TAILQ_NEXT(&marker, plinks.q);
296 	unchanged = m == TAILQ_PREV(&marker, pglist, plinks.q);
297 	KASSERT(!unchanged || m->queue == queue,
298 	    ("page %p queue %d %d", m, queue, m->queue));
299 	TAILQ_REMOVE(&pq->pq_pl, &marker, plinks.q);
300 	return (unchanged);
301 }
302 
303 /*
304  * Scan for pages at adjacent offsets within the given page's object that are
305  * eligible for laundering, form a cluster of these pages and the given page,
306  * and launder that cluster.
307  */
308 static int
309 vm_pageout_cluster(vm_page_t m)
310 {
311 	vm_object_t object;
312 	vm_page_t mc[2 * vm_pageout_page_count], p, pb, ps;
313 	vm_pindex_t pindex;
314 	int ib, is, page_base, pageout_count;
315 
316 	vm_page_assert_locked(m);
317 	object = m->object;
318 	VM_OBJECT_ASSERT_WLOCKED(object);
319 	pindex = m->pindex;
320 
321 	vm_page_assert_unbusied(m);
322 	KASSERT(!vm_page_held(m), ("page %p is held", m));
323 
324 	pmap_remove_write(m);
325 	vm_page_unlock(m);
326 
327 	mc[vm_pageout_page_count] = pb = ps = m;
328 	pageout_count = 1;
329 	page_base = vm_pageout_page_count;
330 	ib = 1;
331 	is = 1;
332 
333 	/*
334 	 * We can cluster only if the page is not clean, busy, or held, and
335 	 * the page is in the laundry queue.
336 	 *
337 	 * During heavy mmap/modification loads the pageout
338 	 * daemon can really fragment the underlying file
339 	 * due to flushing pages out of order and not trying to
340 	 * align the clusters (which leaves sporadic out-of-order
341 	 * holes).  To solve this problem we do the reverse scan
342 	 * first and attempt to align our cluster, then do a
343 	 * forward scan if room remains.
344 	 */
345 more:
346 	while (ib != 0 && pageout_count < vm_pageout_page_count) {
347 		if (ib > pindex) {
348 			ib = 0;
349 			break;
350 		}
351 		if ((p = vm_page_prev(pb)) == NULL || vm_page_busied(p)) {
352 			ib = 0;
353 			break;
354 		}
355 		vm_page_test_dirty(p);
356 		if (p->dirty == 0) {
357 			ib = 0;
358 			break;
359 		}
360 		vm_page_lock(p);
361 		if (!vm_page_in_laundry(p) || vm_page_held(p)) {
362 			vm_page_unlock(p);
363 			ib = 0;
364 			break;
365 		}
366 		pmap_remove_write(p);
367 		vm_page_unlock(p);
368 		mc[--page_base] = pb = p;
369 		++pageout_count;
370 		++ib;
371 
372 		/*
373 		 * We are at an alignment boundary.  Stop here, and switch
374 		 * directions.  Do not clear ib.
375 		 */
376 		if ((pindex - (ib - 1)) % vm_pageout_page_count == 0)
377 			break;
378 	}
379 	while (pageout_count < vm_pageout_page_count &&
380 	    pindex + is < object->size) {
381 		if ((p = vm_page_next(ps)) == NULL || vm_page_busied(p))
382 			break;
383 		vm_page_test_dirty(p);
384 		if (p->dirty == 0)
385 			break;
386 		vm_page_lock(p);
387 		if (!vm_page_in_laundry(p) || vm_page_held(p)) {
388 			vm_page_unlock(p);
389 			break;
390 		}
391 		pmap_remove_write(p);
392 		vm_page_unlock(p);
393 		mc[page_base + pageout_count] = ps = p;
394 		++pageout_count;
395 		++is;
396 	}
397 
398 	/*
399 	 * If we exhausted our forward scan, continue with the reverse scan
400 	 * when possible, even past an alignment boundary.  This catches
401 	 * boundary conditions.
402 	 */
403 	if (ib != 0 && pageout_count < vm_pageout_page_count)
404 		goto more;
405 
406 	return (vm_pageout_flush(&mc[page_base], pageout_count,
407 	    VM_PAGER_PUT_NOREUSE, 0, NULL, NULL));
408 }
409 
410 /*
411  * vm_pageout_flush() - launder the given pages
412  *
413  *	The given pages are laundered.  Note that we setup for the start of
414  *	I/O ( i.e. busy the page ), mark it read-only, and bump the object
415  *	reference count all in here rather then in the parent.  If we want
416  *	the parent to do more sophisticated things we may have to change
417  *	the ordering.
418  *
419  *	Returned runlen is the count of pages between mreq and first
420  *	page after mreq with status VM_PAGER_AGAIN.
421  *	*eio is set to TRUE if pager returned VM_PAGER_ERROR or VM_PAGER_FAIL
422  *	for any page in runlen set.
423  */
424 int
425 vm_pageout_flush(vm_page_t *mc, int count, int flags, int mreq, int *prunlen,
426     boolean_t *eio)
427 {
428 	vm_object_t object = mc[0]->object;
429 	int pageout_status[count];
430 	int numpagedout = 0;
431 	int i, runlen;
432 
433 	VM_OBJECT_ASSERT_WLOCKED(object);
434 
435 	/*
436 	 * Initiate I/O.  Mark the pages busy and verify that they're valid
437 	 * and read-only.
438 	 *
439 	 * We do not have to fixup the clean/dirty bits here... we can
440 	 * allow the pager to do it after the I/O completes.
441 	 *
442 	 * NOTE! mc[i]->dirty may be partial or fragmented due to an
443 	 * edge case with file fragments.
444 	 */
445 	for (i = 0; i < count; i++) {
446 		KASSERT(mc[i]->valid == VM_PAGE_BITS_ALL,
447 		    ("vm_pageout_flush: partially invalid page %p index %d/%d",
448 			mc[i], i, count));
449 		KASSERT((mc[i]->aflags & PGA_WRITEABLE) == 0,
450 		    ("vm_pageout_flush: writeable page %p", mc[i]));
451 		vm_page_sbusy(mc[i]);
452 	}
453 	vm_object_pip_add(object, count);
454 
455 	vm_pager_put_pages(object, mc, count, flags, pageout_status);
456 
457 	runlen = count - mreq;
458 	if (eio != NULL)
459 		*eio = FALSE;
460 	for (i = 0; i < count; i++) {
461 		vm_page_t mt = mc[i];
462 
463 		KASSERT(pageout_status[i] == VM_PAGER_PEND ||
464 		    !pmap_page_is_write_mapped(mt),
465 		    ("vm_pageout_flush: page %p is not write protected", mt));
466 		switch (pageout_status[i]) {
467 		case VM_PAGER_OK:
468 			vm_page_lock(mt);
469 			if (vm_page_in_laundry(mt))
470 				vm_page_deactivate_noreuse(mt);
471 			vm_page_unlock(mt);
472 			/* FALLTHROUGH */
473 		case VM_PAGER_PEND:
474 			numpagedout++;
475 			break;
476 		case VM_PAGER_BAD:
477 			/*
478 			 * The page is outside the object's range.  We pretend
479 			 * that the page out worked and clean the page, so the
480 			 * changes will be lost if the page is reclaimed by
481 			 * the page daemon.
482 			 */
483 			vm_page_undirty(mt);
484 			vm_page_lock(mt);
485 			if (vm_page_in_laundry(mt))
486 				vm_page_deactivate_noreuse(mt);
487 			vm_page_unlock(mt);
488 			break;
489 		case VM_PAGER_ERROR:
490 		case VM_PAGER_FAIL:
491 			/*
492 			 * If the page couldn't be paged out to swap because the
493 			 * pager wasn't able to find space, place the page in
494 			 * the PQ_UNSWAPPABLE holding queue.  This is an
495 			 * optimization that prevents the page daemon from
496 			 * wasting CPU cycles on pages that cannot be reclaimed
497 			 * becase no swap device is configured.
498 			 *
499 			 * Otherwise, reactivate the page so that it doesn't
500 			 * clog the laundry and inactive queues.  (We will try
501 			 * paging it out again later.)
502 			 */
503 			vm_page_lock(mt);
504 			if (object->type == OBJT_SWAP &&
505 			    pageout_status[i] == VM_PAGER_FAIL) {
506 				vm_page_unswappable(mt);
507 				numpagedout++;
508 			} else
509 				vm_page_activate(mt);
510 			vm_page_unlock(mt);
511 			if (eio != NULL && i >= mreq && i - mreq < runlen)
512 				*eio = TRUE;
513 			break;
514 		case VM_PAGER_AGAIN:
515 			if (i >= mreq && i - mreq < runlen)
516 				runlen = i - mreq;
517 			break;
518 		}
519 
520 		/*
521 		 * If the operation is still going, leave the page busy to
522 		 * block all other accesses. Also, leave the paging in
523 		 * progress indicator set so that we don't attempt an object
524 		 * collapse.
525 		 */
526 		if (pageout_status[i] != VM_PAGER_PEND) {
527 			vm_object_pip_wakeup(object);
528 			vm_page_sunbusy(mt);
529 		}
530 	}
531 	if (prunlen != NULL)
532 		*prunlen = runlen;
533 	return (numpagedout);
534 }
535 
536 static void
537 vm_pageout_swapon(void *arg __unused, struct swdevt *sp __unused)
538 {
539 
540 	atomic_store_rel_int(&swapdev_enabled, 1);
541 }
542 
543 static void
544 vm_pageout_swapoff(void *arg __unused, struct swdevt *sp __unused)
545 {
546 
547 	if (swap_pager_nswapdev() == 1)
548 		atomic_store_rel_int(&swapdev_enabled, 0);
549 }
550 
551 /*
552  * Attempt to acquire all of the necessary locks to launder a page and
553  * then call through the clustering layer to PUTPAGES.  Wait a short
554  * time for a vnode lock.
555  *
556  * Requires the page and object lock on entry, releases both before return.
557  * Returns 0 on success and an errno otherwise.
558  */
559 static int
560 vm_pageout_clean(vm_page_t m, int *numpagedout)
561 {
562 	struct vnode *vp;
563 	struct mount *mp;
564 	vm_object_t object;
565 	vm_pindex_t pindex;
566 	int error, lockmode;
567 
568 	vm_page_assert_locked(m);
569 	object = m->object;
570 	VM_OBJECT_ASSERT_WLOCKED(object);
571 	error = 0;
572 	vp = NULL;
573 	mp = NULL;
574 
575 	/*
576 	 * The object is already known NOT to be dead.   It
577 	 * is possible for the vget() to block the whole
578 	 * pageout daemon, but the new low-memory handling
579 	 * code should prevent it.
580 	 *
581 	 * We can't wait forever for the vnode lock, we might
582 	 * deadlock due to a vn_read() getting stuck in
583 	 * vm_wait while holding this vnode.  We skip the
584 	 * vnode if we can't get it in a reasonable amount
585 	 * of time.
586 	 */
587 	if (object->type == OBJT_VNODE) {
588 		vm_page_unlock(m);
589 		vp = object->handle;
590 		if (vp->v_type == VREG &&
591 		    vn_start_write(vp, &mp, V_NOWAIT) != 0) {
592 			mp = NULL;
593 			error = EDEADLK;
594 			goto unlock_all;
595 		}
596 		KASSERT(mp != NULL,
597 		    ("vp %p with NULL v_mount", vp));
598 		vm_object_reference_locked(object);
599 		pindex = m->pindex;
600 		VM_OBJECT_WUNLOCK(object);
601 		lockmode = MNT_SHARED_WRITES(vp->v_mount) ?
602 		    LK_SHARED : LK_EXCLUSIVE;
603 		if (vget(vp, lockmode | LK_TIMELOCK, curthread)) {
604 			vp = NULL;
605 			error = EDEADLK;
606 			goto unlock_mp;
607 		}
608 		VM_OBJECT_WLOCK(object);
609 
610 		/*
611 		 * Ensure that the object and vnode were not disassociated
612 		 * while locks were dropped.
613 		 */
614 		if (vp->v_object != object) {
615 			error = ENOENT;
616 			goto unlock_all;
617 		}
618 		vm_page_lock(m);
619 
620 		/*
621 		 * While the object and page were unlocked, the page
622 		 * may have been:
623 		 * (1) moved to a different queue,
624 		 * (2) reallocated to a different object,
625 		 * (3) reallocated to a different offset, or
626 		 * (4) cleaned.
627 		 */
628 		if (!vm_page_in_laundry(m) || m->object != object ||
629 		    m->pindex != pindex || m->dirty == 0) {
630 			vm_page_unlock(m);
631 			error = ENXIO;
632 			goto unlock_all;
633 		}
634 
635 		/*
636 		 * The page may have been busied or referenced while the object
637 		 * and page locks were released.
638 		 */
639 		if (vm_page_busied(m) || vm_page_held(m)) {
640 			vm_page_unlock(m);
641 			error = EBUSY;
642 			goto unlock_all;
643 		}
644 	}
645 
646 	/*
647 	 * If a page is dirty, then it is either being washed
648 	 * (but not yet cleaned) or it is still in the
649 	 * laundry.  If it is still in the laundry, then we
650 	 * start the cleaning operation.
651 	 */
652 	if ((*numpagedout = vm_pageout_cluster(m)) == 0)
653 		error = EIO;
654 
655 unlock_all:
656 	VM_OBJECT_WUNLOCK(object);
657 
658 unlock_mp:
659 	vm_page_lock_assert(m, MA_NOTOWNED);
660 	if (mp != NULL) {
661 		if (vp != NULL)
662 			vput(vp);
663 		vm_object_deallocate(object);
664 		vn_finished_write(mp);
665 	}
666 
667 	return (error);
668 }
669 
670 /*
671  * Attempt to launder the specified number of pages.
672  *
673  * Returns the number of pages successfully laundered.
674  */
675 static int
676 vm_pageout_launder(struct vm_domain *vmd, int launder, bool in_shortfall)
677 {
678 	struct vm_pagequeue *pq;
679 	vm_object_t object;
680 	vm_page_t m, marker, next;
681 	int act_delta, error, maxscan, numpagedout, queue, starting_target;
682 	int vnodes_skipped;
683 	bool pageout_ok, queue_locked;
684 
685 	starting_target = launder;
686 	vnodes_skipped = 0;
687 
688 	/*
689 	 * Scan the laundry queues for pages eligible to be laundered.  We stop
690 	 * once the target number of dirty pages have been laundered, or once
691 	 * we've reached the end of the queue.  A single iteration of this loop
692 	 * may cause more than one page to be laundered because of clustering.
693 	 *
694 	 * maxscan ensures that we don't re-examine requeued pages.  Any
695 	 * additional pages written as part of a cluster are subtracted from
696 	 * maxscan since they must be taken from the laundry queue.
697 	 *
698 	 * As an optimization, we avoid laundering from PQ_UNSWAPPABLE when no
699 	 * swap devices are configured.
700 	 */
701 	if (atomic_load_acq_int(&swapdev_enabled))
702 		queue = PQ_UNSWAPPABLE;
703 	else
704 		queue = PQ_LAUNDRY;
705 
706 scan:
707 	pq = &vmd->vmd_pagequeues[queue];
708 	marker = &vmd->vmd_markers[queue];
709 
710 	vm_pagequeue_lock(pq);
711 	maxscan = pq->pq_cnt;
712 	queue_locked = true;
713 	for (m = TAILQ_FIRST(&pq->pq_pl);
714 	    m != NULL && maxscan-- > 0 && launder > 0;
715 	    m = next) {
716 		vm_pagequeue_assert_locked(pq);
717 		KASSERT(queue_locked, ("unlocked laundry queue"));
718 		KASSERT(vm_page_in_laundry(m),
719 		    ("page %p has an inconsistent queue", m));
720 		next = TAILQ_NEXT(m, plinks.q);
721 		if ((m->flags & PG_MARKER) != 0)
722 			continue;
723 		KASSERT((m->flags & PG_FICTITIOUS) == 0,
724 		    ("PG_FICTITIOUS page %p cannot be in laundry queue", m));
725 		KASSERT((m->oflags & VPO_UNMANAGED) == 0,
726 		    ("VPO_UNMANAGED page %p cannot be in laundry queue", m));
727 		if (!vm_pageout_page_lock(m, &next) || m->hold_count != 0) {
728 			vm_page_unlock(m);
729 			continue;
730 		}
731 		if (m->wire_count != 0) {
732 			vm_page_dequeue_locked(m);
733 			vm_page_unlock(m);
734 			continue;
735 		}
736 		object = m->object;
737 		if ((!VM_OBJECT_TRYWLOCK(object) &&
738 		    (!vm_pageout_fallback_object_lock(m, &next) ||
739 		    vm_page_held(m))) || vm_page_busied(m)) {
740 			VM_OBJECT_WUNLOCK(object);
741 			if (m->wire_count != 0 && vm_page_pagequeue(m) == pq)
742 				vm_page_dequeue_locked(m);
743 			vm_page_unlock(m);
744 			continue;
745 		}
746 
747 		/*
748 		 * Unlock the laundry queue, invalidating the 'next' pointer.
749 		 * Use a marker to remember our place in the laundry queue.
750 		 */
751 		TAILQ_INSERT_AFTER(&pq->pq_pl, m, marker, plinks.q);
752 		vm_pagequeue_unlock(pq);
753 		queue_locked = false;
754 
755 		/*
756 		 * Invalid pages can be easily freed.  They cannot be
757 		 * mapped; vm_page_free() asserts this.
758 		 */
759 		if (m->valid == 0)
760 			goto free_page;
761 
762 		/*
763 		 * If the page has been referenced and the object is not dead,
764 		 * reactivate or requeue the page depending on whether the
765 		 * object is mapped.
766 		 */
767 		if ((m->aflags & PGA_REFERENCED) != 0) {
768 			vm_page_aflag_clear(m, PGA_REFERENCED);
769 			act_delta = 1;
770 		} else
771 			act_delta = 0;
772 		if (object->ref_count != 0)
773 			act_delta += pmap_ts_referenced(m);
774 		else {
775 			KASSERT(!pmap_page_is_mapped(m),
776 			    ("page %p is mapped", m));
777 		}
778 		if (act_delta != 0) {
779 			if (object->ref_count != 0) {
780 				VM_CNT_INC(v_reactivated);
781 				vm_page_activate(m);
782 
783 				/*
784 				 * Increase the activation count if the page
785 				 * was referenced while in the laundry queue.
786 				 * This makes it less likely that the page will
787 				 * be returned prematurely to the inactive
788 				 * queue.
789  				 */
790 				m->act_count += act_delta + ACT_ADVANCE;
791 
792 				/*
793 				 * If this was a background laundering, count
794 				 * activated pages towards our target.  The
795 				 * purpose of background laundering is to ensure
796 				 * that pages are eventually cycled through the
797 				 * laundry queue, and an activation is a valid
798 				 * way out.
799 				 */
800 				if (!in_shortfall)
801 					launder--;
802 				goto drop_page;
803 			} else if ((object->flags & OBJ_DEAD) == 0)
804 				goto requeue_page;
805 		}
806 
807 		/*
808 		 * If the page appears to be clean at the machine-independent
809 		 * layer, then remove all of its mappings from the pmap in
810 		 * anticipation of freeing it.  If, however, any of the page's
811 		 * mappings allow write access, then the page may still be
812 		 * modified until the last of those mappings are removed.
813 		 */
814 		if (object->ref_count != 0) {
815 			vm_page_test_dirty(m);
816 			if (m->dirty == 0)
817 				pmap_remove_all(m);
818 		}
819 
820 		/*
821 		 * Clean pages are freed, and dirty pages are paged out unless
822 		 * they belong to a dead object.  Requeueing dirty pages from
823 		 * dead objects is pointless, as they are being paged out and
824 		 * freed by the thread that destroyed the object.
825 		 */
826 		if (m->dirty == 0) {
827 free_page:
828 			vm_page_free(m);
829 			VM_CNT_INC(v_dfree);
830 		} else if ((object->flags & OBJ_DEAD) == 0) {
831 			if (object->type != OBJT_SWAP &&
832 			    object->type != OBJT_DEFAULT)
833 				pageout_ok = true;
834 			else if (disable_swap_pageouts)
835 				pageout_ok = false;
836 			else
837 				pageout_ok = true;
838 			if (!pageout_ok) {
839 requeue_page:
840 				vm_pagequeue_lock(pq);
841 				queue_locked = true;
842 				vm_page_requeue_locked(m);
843 				goto drop_page;
844 			}
845 
846 			/*
847 			 * Form a cluster with adjacent, dirty pages from the
848 			 * same object, and page out that entire cluster.
849 			 *
850 			 * The adjacent, dirty pages must also be in the
851 			 * laundry.  However, their mappings are not checked
852 			 * for new references.  Consequently, a recently
853 			 * referenced page may be paged out.  However, that
854 			 * page will not be prematurely reclaimed.  After page
855 			 * out, the page will be placed in the inactive queue,
856 			 * where any new references will be detected and the
857 			 * page reactivated.
858 			 */
859 			error = vm_pageout_clean(m, &numpagedout);
860 			if (error == 0) {
861 				launder -= numpagedout;
862 				maxscan -= numpagedout - 1;
863 			} else if (error == EDEADLK) {
864 				pageout_lock_miss++;
865 				vnodes_skipped++;
866 			}
867 			goto relock_queue;
868 		}
869 drop_page:
870 		vm_page_unlock(m);
871 		VM_OBJECT_WUNLOCK(object);
872 relock_queue:
873 		if (!queue_locked) {
874 			vm_pagequeue_lock(pq);
875 			queue_locked = true;
876 		}
877 		next = TAILQ_NEXT(marker, plinks.q);
878 		TAILQ_REMOVE(&pq->pq_pl, marker, plinks.q);
879 	}
880 	vm_pagequeue_unlock(pq);
881 
882 	if (launder > 0 && queue == PQ_UNSWAPPABLE) {
883 		queue = PQ_LAUNDRY;
884 		goto scan;
885 	}
886 
887 	/*
888 	 * Wakeup the sync daemon if we skipped a vnode in a writeable object
889 	 * and we didn't launder enough pages.
890 	 */
891 	if (vnodes_skipped > 0 && launder > 0)
892 		(void)speedup_syncer();
893 
894 	return (starting_target - launder);
895 }
896 
897 /*
898  * Compute the integer square root.
899  */
900 static u_int
901 isqrt(u_int num)
902 {
903 	u_int bit, root, tmp;
904 
905 	bit = 1u << ((NBBY * sizeof(u_int)) - 2);
906 	while (bit > num)
907 		bit >>= 2;
908 	root = 0;
909 	while (bit != 0) {
910 		tmp = root + bit;
911 		root >>= 1;
912 		if (num >= tmp) {
913 			num -= tmp;
914 			root += bit;
915 		}
916 		bit >>= 2;
917 	}
918 	return (root);
919 }
920 
921 /*
922  * Perform the work of the laundry thread: periodically wake up and determine
923  * whether any pages need to be laundered.  If so, determine the number of pages
924  * that need to be laundered, and launder them.
925  */
926 static void
927 vm_pageout_laundry_worker(void *arg)
928 {
929 	struct vm_domain *vmd;
930 	struct vm_pagequeue *pq;
931 	uint64_t nclean, ndirty, nfreed;
932 	int domain, last_target, launder, shortfall, shortfall_cycle, target;
933 	bool in_shortfall;
934 
935 	domain = (uintptr_t)arg;
936 	vmd = VM_DOMAIN(domain);
937 	pq = &vmd->vmd_pagequeues[PQ_LAUNDRY];
938 	KASSERT(vmd->vmd_segs != 0, ("domain without segments"));
939 
940 	shortfall = 0;
941 	in_shortfall = false;
942 	shortfall_cycle = 0;
943 	target = 0;
944 	nfreed = 0;
945 
946 	/*
947 	 * Calls to these handlers are serialized by the swap syscall lock.
948 	 */
949 	(void)EVENTHANDLER_REGISTER(swapon, vm_pageout_swapon, vmd,
950 	    EVENTHANDLER_PRI_ANY);
951 	(void)EVENTHANDLER_REGISTER(swapoff, vm_pageout_swapoff, vmd,
952 	    EVENTHANDLER_PRI_ANY);
953 
954 	/*
955 	 * The pageout laundry worker is never done, so loop forever.
956 	 */
957 	for (;;) {
958 		KASSERT(target >= 0, ("negative target %d", target));
959 		KASSERT(shortfall_cycle >= 0,
960 		    ("negative cycle %d", shortfall_cycle));
961 		launder = 0;
962 
963 		/*
964 		 * First determine whether we need to launder pages to meet a
965 		 * shortage of free pages.
966 		 */
967 		if (shortfall > 0) {
968 			in_shortfall = true;
969 			shortfall_cycle = VM_LAUNDER_RATE / VM_INACT_SCAN_RATE;
970 			target = shortfall;
971 		} else if (!in_shortfall)
972 			goto trybackground;
973 		else if (shortfall_cycle == 0 || vm_laundry_target(vmd) <= 0) {
974 			/*
975 			 * We recently entered shortfall and began laundering
976 			 * pages.  If we have completed that laundering run
977 			 * (and we are no longer in shortfall) or we have met
978 			 * our laundry target through other activity, then we
979 			 * can stop laundering pages.
980 			 */
981 			in_shortfall = false;
982 			target = 0;
983 			goto trybackground;
984 		}
985 		launder = target / shortfall_cycle--;
986 		goto dolaundry;
987 
988 		/*
989 		 * There's no immediate need to launder any pages; see if we
990 		 * meet the conditions to perform background laundering:
991 		 *
992 		 * 1. The ratio of dirty to clean inactive pages exceeds the
993 		 *    background laundering threshold, or
994 		 * 2. we haven't yet reached the target of the current
995 		 *    background laundering run.
996 		 *
997 		 * The background laundering threshold is not a constant.
998 		 * Instead, it is a slowly growing function of the number of
999 		 * clean pages freed by the page daemon since the last
1000 		 * background laundering.  Thus, as the ratio of dirty to
1001 		 * clean inactive pages grows, the amount of memory pressure
1002 		 * required to trigger laundering decreases.  We ensure
1003 		 * that the threshold is non-zero after an inactive queue
1004 		 * scan, even if that scan failed to free a single clean page.
1005 		 */
1006 trybackground:
1007 		nclean = vmd->vmd_free_count +
1008 		    vmd->vmd_pagequeues[PQ_INACTIVE].pq_cnt;
1009 		ndirty = vmd->vmd_pagequeues[PQ_LAUNDRY].pq_cnt;
1010 		if (target == 0 && ndirty * isqrt(howmany(nfreed + 1,
1011 		    vmd->vmd_free_target - vmd->vmd_free_min)) >= nclean) {
1012 			target = vmd->vmd_background_launder_target;
1013 		}
1014 
1015 		/*
1016 		 * We have a non-zero background laundering target.  If we've
1017 		 * laundered up to our maximum without observing a page daemon
1018 		 * request, just stop.  This is a safety belt that ensures we
1019 		 * don't launder an excessive amount if memory pressure is low
1020 		 * and the ratio of dirty to clean pages is large.  Otherwise,
1021 		 * proceed at the background laundering rate.
1022 		 */
1023 		if (target > 0) {
1024 			if (nfreed > 0) {
1025 				nfreed = 0;
1026 				last_target = target;
1027 			} else if (last_target - target >=
1028 			    vm_background_launder_max * PAGE_SIZE / 1024) {
1029 				target = 0;
1030 			}
1031 			launder = vm_background_launder_rate * PAGE_SIZE / 1024;
1032 			launder /= VM_LAUNDER_RATE;
1033 			if (launder > target)
1034 				launder = target;
1035 		}
1036 
1037 dolaundry:
1038 		if (launder > 0) {
1039 			/*
1040 			 * Because of I/O clustering, the number of laundered
1041 			 * pages could exceed "target" by the maximum size of
1042 			 * a cluster minus one.
1043 			 */
1044 			target -= min(vm_pageout_launder(vmd, launder,
1045 			    in_shortfall), target);
1046 			pause("laundp", hz / VM_LAUNDER_RATE);
1047 		}
1048 
1049 		/*
1050 		 * If we're not currently laundering pages and the page daemon
1051 		 * hasn't posted a new request, sleep until the page daemon
1052 		 * kicks us.
1053 		 */
1054 		vm_pagequeue_lock(pq);
1055 		if (target == 0 && vmd->vmd_laundry_request == VM_LAUNDRY_IDLE)
1056 			(void)mtx_sleep(&vmd->vmd_laundry_request,
1057 			    vm_pagequeue_lockptr(pq), PVM, "launds", 0);
1058 
1059 		/*
1060 		 * If the pagedaemon has indicated that it's in shortfall, start
1061 		 * a shortfall laundering unless we're already in the middle of
1062 		 * one.  This may preempt a background laundering.
1063 		 */
1064 		if (vmd->vmd_laundry_request == VM_LAUNDRY_SHORTFALL &&
1065 		    (!in_shortfall || shortfall_cycle == 0)) {
1066 			shortfall = vm_laundry_target(vmd) +
1067 			    vmd->vmd_pageout_deficit;
1068 			target = 0;
1069 		} else
1070 			shortfall = 0;
1071 
1072 		if (target == 0)
1073 			vmd->vmd_laundry_request = VM_LAUNDRY_IDLE;
1074 		nfreed += vmd->vmd_clean_pages_freed;
1075 		vmd->vmd_clean_pages_freed = 0;
1076 		vm_pagequeue_unlock(pq);
1077 	}
1078 }
1079 
1080 /*
1081  *	vm_pageout_scan does the dirty work for the pageout daemon.
1082  *
1083  *	pass == 0: Update active LRU/deactivate pages
1084  *	pass >= 1: Free inactive pages
1085  *
1086  * Returns true if pass was zero or enough pages were freed by the inactive
1087  * queue scan to meet the target.
1088  */
1089 static bool
1090 vm_pageout_scan(struct vm_domain *vmd, int pass, int shortage)
1091 {
1092 	vm_page_t m, marker, next;
1093 	struct vm_pagequeue *pq;
1094 	vm_object_t object;
1095 	long min_scan;
1096 	int act_delta, addl_page_shortage, deficit, inactq_shortage, maxscan;
1097 	int page_shortage, scan_tick, scanned, starting_page_shortage;
1098 	boolean_t queue_locked;
1099 
1100 	/*
1101 	 * If we need to reclaim memory ask kernel caches to return
1102 	 * some.  We rate limit to avoid thrashing.
1103 	 */
1104 	if (vmd == VM_DOMAIN(0) && pass > 0 &&
1105 	    (time_uptime - lowmem_uptime) >= lowmem_period) {
1106 		/*
1107 		 * Decrease registered cache sizes.
1108 		 */
1109 		SDT_PROBE0(vm, , , vm__lowmem_scan);
1110 		EVENTHANDLER_INVOKE(vm_lowmem, VM_LOW_PAGES);
1111 		/*
1112 		 * We do this explicitly after the caches have been
1113 		 * drained above.
1114 		 */
1115 		uma_reclaim();
1116 		lowmem_uptime = time_uptime;
1117 	}
1118 
1119 	/*
1120 	 * The addl_page_shortage is the number of temporarily
1121 	 * stuck pages in the inactive queue.  In other words, the
1122 	 * number of pages from the inactive count that should be
1123 	 * discounted in setting the target for the active queue scan.
1124 	 */
1125 	addl_page_shortage = 0;
1126 
1127 	/*
1128 	 * Calculate the number of pages that we want to free.  This number
1129 	 * can be negative if many pages are freed between the wakeup call to
1130 	 * the page daemon and this calculation.
1131 	 */
1132 	if (pass > 0) {
1133 		deficit = atomic_readandclear_int(&vmd->vmd_pageout_deficit);
1134 		page_shortage = shortage + deficit;
1135 	} else
1136 		page_shortage = deficit = 0;
1137 	starting_page_shortage = page_shortage;
1138 
1139 	/*
1140 	 * Start scanning the inactive queue for pages that we can free.  The
1141 	 * scan will stop when we reach the target or we have scanned the
1142 	 * entire queue.  (Note that m->act_count is not used to make
1143 	 * decisions for the inactive queue, only for the active queue.)
1144 	 */
1145 	pq = &vmd->vmd_pagequeues[PQ_INACTIVE];
1146 	marker = &vmd->vmd_markers[PQ_INACTIVE];
1147 	maxscan = pq->pq_cnt;
1148 	vm_pagequeue_lock(pq);
1149 	queue_locked = TRUE;
1150 	for (m = TAILQ_FIRST(&pq->pq_pl);
1151 	     m != NULL && maxscan-- > 0 && page_shortage > 0;
1152 	     m = next) {
1153 		vm_pagequeue_assert_locked(pq);
1154 		KASSERT(queue_locked, ("unlocked inactive queue"));
1155 		KASSERT(vm_page_inactive(m), ("Inactive queue %p", m));
1156 
1157 		VM_CNT_INC(v_pdpages);
1158 		next = TAILQ_NEXT(m, plinks.q);
1159 
1160 		/*
1161 		 * skip marker pages
1162 		 */
1163 		if (m->flags & PG_MARKER)
1164 			continue;
1165 
1166 		KASSERT((m->flags & PG_FICTITIOUS) == 0,
1167 		    ("Fictitious page %p cannot be in inactive queue", m));
1168 		KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1169 		    ("Unmanaged page %p cannot be in inactive queue", m));
1170 
1171 		/*
1172 		 * The page or object lock acquisitions fail if the
1173 		 * page was removed from the queue or moved to a
1174 		 * different position within the queue.  In either
1175 		 * case, addl_page_shortage should not be incremented.
1176 		 */
1177 		if (!vm_pageout_page_lock(m, &next))
1178 			goto unlock_page;
1179 		else if (m->wire_count != 0) {
1180 			/*
1181 			 * Wired pages may not be freed, and unwiring a queued
1182 			 * page will cause it to be requeued.  Thus, remove them
1183 			 * from the queue now to avoid unnecessary revisits.
1184 			 */
1185 			vm_page_dequeue_locked(m);
1186 			addl_page_shortage++;
1187 			goto unlock_page;
1188 		} else if (m->hold_count != 0) {
1189 			/*
1190 			 * Held pages are essentially stuck in the
1191 			 * queue.  So, they ought to be discounted
1192 			 * from the inactive count.  See the
1193 			 * calculation of inactq_shortage before the
1194 			 * loop over the active queue below.
1195 			 */
1196 			addl_page_shortage++;
1197 			goto unlock_page;
1198 		}
1199 		object = m->object;
1200 		if (!VM_OBJECT_TRYWLOCK(object)) {
1201 			if (!vm_pageout_fallback_object_lock(m, &next))
1202 				goto unlock_object;
1203 			else if (m->wire_count != 0) {
1204 				vm_page_dequeue_locked(m);
1205 				addl_page_shortage++;
1206 				goto unlock_object;
1207 			} else if (m->hold_count != 0) {
1208 				addl_page_shortage++;
1209 				goto unlock_object;
1210 			}
1211 		}
1212 		if (vm_page_busied(m)) {
1213 			/*
1214 			 * Don't mess with busy pages.  Leave them at
1215 			 * the front of the queue.  Most likely, they
1216 			 * are being paged out and will leave the
1217 			 * queue shortly after the scan finishes.  So,
1218 			 * they ought to be discounted from the
1219 			 * inactive count.
1220 			 */
1221 			addl_page_shortage++;
1222 unlock_object:
1223 			VM_OBJECT_WUNLOCK(object);
1224 unlock_page:
1225 			vm_page_unlock(m);
1226 			continue;
1227 		}
1228 		KASSERT(!vm_page_held(m), ("Held page %p", m));
1229 
1230 		/*
1231 		 * Dequeue the inactive page and unlock the inactive page
1232 		 * queue, invalidating the 'next' pointer.  Dequeueing the
1233 		 * page here avoids a later reacquisition (and release) of
1234 		 * the inactive page queue lock when vm_page_activate(),
1235 		 * vm_page_free(), or vm_page_launder() is called.  Use a
1236 		 * marker to remember our place in the inactive queue.
1237 		 */
1238 		TAILQ_INSERT_AFTER(&pq->pq_pl, m, marker, plinks.q);
1239 		vm_page_dequeue_locked(m);
1240 		vm_pagequeue_unlock(pq);
1241 		queue_locked = FALSE;
1242 
1243 		/*
1244 		 * Invalid pages can be easily freed. They cannot be
1245 		 * mapped, vm_page_free() asserts this.
1246 		 */
1247 		if (m->valid == 0)
1248 			goto free_page;
1249 
1250 		/*
1251 		 * If the page has been referenced and the object is not dead,
1252 		 * reactivate or requeue the page depending on whether the
1253 		 * object is mapped.
1254 		 */
1255 		if ((m->aflags & PGA_REFERENCED) != 0) {
1256 			vm_page_aflag_clear(m, PGA_REFERENCED);
1257 			act_delta = 1;
1258 		} else
1259 			act_delta = 0;
1260 		if (object->ref_count != 0) {
1261 			act_delta += pmap_ts_referenced(m);
1262 		} else {
1263 			KASSERT(!pmap_page_is_mapped(m),
1264 			    ("vm_pageout_scan: page %p is mapped", m));
1265 		}
1266 		if (act_delta != 0) {
1267 			if (object->ref_count != 0) {
1268 				VM_CNT_INC(v_reactivated);
1269 				vm_page_activate(m);
1270 
1271 				/*
1272 				 * Increase the activation count if the page
1273 				 * was referenced while in the inactive queue.
1274 				 * This makes it less likely that the page will
1275 				 * be returned prematurely to the inactive
1276 				 * queue.
1277  				 */
1278 				m->act_count += act_delta + ACT_ADVANCE;
1279 				goto drop_page;
1280 			} else if ((object->flags & OBJ_DEAD) == 0) {
1281 				vm_pagequeue_lock(pq);
1282 				queue_locked = TRUE;
1283 				m->queue = PQ_INACTIVE;
1284 				TAILQ_INSERT_TAIL(&pq->pq_pl, m, plinks.q);
1285 				vm_pagequeue_cnt_inc(pq);
1286 				goto drop_page;
1287 			}
1288 		}
1289 
1290 		/*
1291 		 * If the page appears to be clean at the machine-independent
1292 		 * layer, then remove all of its mappings from the pmap in
1293 		 * anticipation of freeing it.  If, however, any of the page's
1294 		 * mappings allow write access, then the page may still be
1295 		 * modified until the last of those mappings are removed.
1296 		 */
1297 		if (object->ref_count != 0) {
1298 			vm_page_test_dirty(m);
1299 			if (m->dirty == 0)
1300 				pmap_remove_all(m);
1301 		}
1302 
1303 		/*
1304 		 * Clean pages can be freed, but dirty pages must be sent back
1305 		 * to the laundry, unless they belong to a dead object.
1306 		 * Requeueing dirty pages from dead objects is pointless, as
1307 		 * they are being paged out and freed by the thread that
1308 		 * destroyed the object.
1309 		 */
1310 		if (m->dirty == 0) {
1311 free_page:
1312 			vm_page_free(m);
1313 			VM_CNT_INC(v_dfree);
1314 			--page_shortage;
1315 		} else if ((object->flags & OBJ_DEAD) == 0)
1316 			vm_page_launder(m);
1317 drop_page:
1318 		vm_page_unlock(m);
1319 		VM_OBJECT_WUNLOCK(object);
1320 		if (!queue_locked) {
1321 			vm_pagequeue_lock(pq);
1322 			queue_locked = TRUE;
1323 		}
1324 		next = TAILQ_NEXT(marker, plinks.q);
1325 		TAILQ_REMOVE(&pq->pq_pl, marker, plinks.q);
1326 	}
1327 	vm_pagequeue_unlock(pq);
1328 
1329 	/*
1330 	 * Wake up the laundry thread so that it can perform any needed
1331 	 * laundering.  If we didn't meet our target, we're in shortfall and
1332 	 * need to launder more aggressively.  If PQ_LAUNDRY is empty and no
1333 	 * swap devices are configured, the laundry thread has no work to do, so
1334 	 * don't bother waking it up.
1335 	 *
1336 	 * The laundry thread uses the number of inactive queue scans elapsed
1337 	 * since the last laundering to determine whether to launder again, so
1338 	 * keep count.
1339 	 */
1340 	if (starting_page_shortage > 0) {
1341 		pq = &vmd->vmd_pagequeues[PQ_LAUNDRY];
1342 		vm_pagequeue_lock(pq);
1343 		if (vmd->vmd_laundry_request == VM_LAUNDRY_IDLE &&
1344 		    (pq->pq_cnt > 0 || atomic_load_acq_int(&swapdev_enabled))) {
1345 			if (page_shortage > 0) {
1346 				vmd->vmd_laundry_request = VM_LAUNDRY_SHORTFALL;
1347 				VM_CNT_INC(v_pdshortfalls);
1348 			} else if (vmd->vmd_laundry_request !=
1349 			    VM_LAUNDRY_SHORTFALL)
1350 				vmd->vmd_laundry_request =
1351 				    VM_LAUNDRY_BACKGROUND;
1352 			wakeup(&vmd->vmd_laundry_request);
1353 		}
1354 		vmd->vmd_clean_pages_freed +=
1355 		    starting_page_shortage - page_shortage;
1356 		vm_pagequeue_unlock(pq);
1357 	}
1358 
1359 	/*
1360 	 * Wakeup the swapout daemon if we didn't free the targeted number of
1361 	 * pages.
1362 	 */
1363 	if (page_shortage > 0)
1364 		vm_swapout_run();
1365 
1366 	/*
1367 	 * If the inactive queue scan fails repeatedly to meet its
1368 	 * target, kill the largest process.
1369 	 */
1370 	vm_pageout_mightbe_oom(vmd, page_shortage, starting_page_shortage);
1371 
1372 	/*
1373 	 * Compute the number of pages we want to try to move from the
1374 	 * active queue to either the inactive or laundry queue.
1375 	 *
1376 	 * When scanning active pages, we make clean pages count more heavily
1377 	 * towards the page shortage than dirty pages.  This is because dirty
1378 	 * pages must be laundered before they can be reused and thus have less
1379 	 * utility when attempting to quickly alleviate a shortage.  However,
1380 	 * this weighting also causes the scan to deactivate dirty pages more
1381 	 * more aggressively, improving the effectiveness of clustering and
1382 	 * ensuring that they can eventually be reused.
1383 	 */
1384 	inactq_shortage = vmd->vmd_inactive_target - (pq->pq_cnt +
1385 	    vmd->vmd_pagequeues[PQ_LAUNDRY].pq_cnt / act_scan_laundry_weight) +
1386 	    vm_paging_target(vmd) + deficit + addl_page_shortage;
1387 	inactq_shortage *= act_scan_laundry_weight;
1388 
1389 	pq = &vmd->vmd_pagequeues[PQ_ACTIVE];
1390 	vm_pagequeue_lock(pq);
1391 	maxscan = pq->pq_cnt;
1392 
1393 	/*
1394 	 * If we're just idle polling attempt to visit every
1395 	 * active page within 'update_period' seconds.
1396 	 */
1397 	scan_tick = ticks;
1398 	if (vm_pageout_update_period != 0) {
1399 		min_scan = pq->pq_cnt;
1400 		min_scan *= scan_tick - vmd->vmd_last_active_scan;
1401 		min_scan /= hz * vm_pageout_update_period;
1402 	} else
1403 		min_scan = 0;
1404 	if (min_scan > 0 || (inactq_shortage > 0 && maxscan > 0))
1405 		vmd->vmd_last_active_scan = scan_tick;
1406 
1407 	/*
1408 	 * Scan the active queue for pages that can be deactivated.  Update
1409 	 * the per-page activity counter and use it to identify deactivation
1410 	 * candidates.  Held pages may be deactivated.
1411 	 */
1412 	for (m = TAILQ_FIRST(&pq->pq_pl), scanned = 0; m != NULL && (scanned <
1413 	    min_scan || (inactq_shortage > 0 && scanned < maxscan)); m = next,
1414 	    scanned++) {
1415 		KASSERT(m->queue == PQ_ACTIVE,
1416 		    ("vm_pageout_scan: page %p isn't active", m));
1417 		next = TAILQ_NEXT(m, plinks.q);
1418 		if ((m->flags & PG_MARKER) != 0)
1419 			continue;
1420 		KASSERT((m->flags & PG_FICTITIOUS) == 0,
1421 		    ("Fictitious page %p cannot be in active queue", m));
1422 		KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1423 		    ("Unmanaged page %p cannot be in active queue", m));
1424 		if (!vm_pageout_page_lock(m, &next)) {
1425 			vm_page_unlock(m);
1426 			continue;
1427 		}
1428 
1429 		/*
1430 		 * The count for page daemon pages is updated after checking
1431 		 * the page for eligibility.
1432 		 */
1433 		VM_CNT_INC(v_pdpages);
1434 
1435 		/*
1436 		 * Wired pages are dequeued lazily.
1437 		 */
1438 		if (m->wire_count != 0) {
1439 			vm_page_dequeue_locked(m);
1440 			vm_page_unlock(m);
1441 			continue;
1442 		}
1443 
1444 		/*
1445 		 * Check to see "how much" the page has been used.
1446 		 */
1447 		if ((m->aflags & PGA_REFERENCED) != 0) {
1448 			vm_page_aflag_clear(m, PGA_REFERENCED);
1449 			act_delta = 1;
1450 		} else
1451 			act_delta = 0;
1452 
1453 		/*
1454 		 * Perform an unsynchronized object ref count check.  While
1455 		 * the page lock ensures that the page is not reallocated to
1456 		 * another object, in particular, one with unmanaged mappings
1457 		 * that cannot support pmap_ts_referenced(), two races are,
1458 		 * nonetheless, possible:
1459 		 * 1) The count was transitioning to zero, but we saw a non-
1460 		 *    zero value.  pmap_ts_referenced() will return zero
1461 		 *    because the page is not mapped.
1462 		 * 2) The count was transitioning to one, but we saw zero.
1463 		 *    This race delays the detection of a new reference.  At
1464 		 *    worst, we will deactivate and reactivate the page.
1465 		 */
1466 		if (m->object->ref_count != 0)
1467 			act_delta += pmap_ts_referenced(m);
1468 
1469 		/*
1470 		 * Advance or decay the act_count based on recent usage.
1471 		 */
1472 		if (act_delta != 0) {
1473 			m->act_count += ACT_ADVANCE + act_delta;
1474 			if (m->act_count > ACT_MAX)
1475 				m->act_count = ACT_MAX;
1476 		} else
1477 			m->act_count -= min(m->act_count, ACT_DECLINE);
1478 
1479 		/*
1480 		 * Move this page to the tail of the active, inactive or laundry
1481 		 * queue depending on usage.
1482 		 */
1483 		if (m->act_count == 0) {
1484 			/* Dequeue to avoid later lock recursion. */
1485 			vm_page_dequeue_locked(m);
1486 
1487 			/*
1488 			 * When not short for inactive pages, let dirty pages go
1489 			 * through the inactive queue before moving to the
1490 			 * laundry queues.  This gives them some extra time to
1491 			 * be reactivated, potentially avoiding an expensive
1492 			 * pageout.  During a page shortage, the inactive queue
1493 			 * is necessarily small, so we may move dirty pages
1494 			 * directly to the laundry queue.
1495 			 */
1496 			if (inactq_shortage <= 0)
1497 				vm_page_deactivate(m);
1498 			else {
1499 				/*
1500 				 * Calling vm_page_test_dirty() here would
1501 				 * require acquisition of the object's write
1502 				 * lock.  However, during a page shortage,
1503 				 * directing dirty pages into the laundry
1504 				 * queue is only an optimization and not a
1505 				 * requirement.  Therefore, we simply rely on
1506 				 * the opportunistic updates to the page's
1507 				 * dirty field by the pmap.
1508 				 */
1509 				if (m->dirty == 0) {
1510 					vm_page_deactivate(m);
1511 					inactq_shortage -=
1512 					    act_scan_laundry_weight;
1513 				} else {
1514 					vm_page_launder(m);
1515 					inactq_shortage--;
1516 				}
1517 			}
1518 		} else
1519 			vm_page_requeue_locked(m);
1520 		vm_page_unlock(m);
1521 	}
1522 	vm_pagequeue_unlock(pq);
1523 	if (pass > 0)
1524 		vm_swapout_run_idle();
1525 	return (page_shortage <= 0);
1526 }
1527 
1528 static int vm_pageout_oom_vote;
1529 
1530 /*
1531  * The pagedaemon threads randlomly select one to perform the
1532  * OOM.  Trying to kill processes before all pagedaemons
1533  * failed to reach free target is premature.
1534  */
1535 static void
1536 vm_pageout_mightbe_oom(struct vm_domain *vmd, int page_shortage,
1537     int starting_page_shortage)
1538 {
1539 	int old_vote;
1540 
1541 	if (starting_page_shortage <= 0 || starting_page_shortage !=
1542 	    page_shortage)
1543 		vmd->vmd_oom_seq = 0;
1544 	else
1545 		vmd->vmd_oom_seq++;
1546 	if (vmd->vmd_oom_seq < vm_pageout_oom_seq) {
1547 		if (vmd->vmd_oom) {
1548 			vmd->vmd_oom = FALSE;
1549 			atomic_subtract_int(&vm_pageout_oom_vote, 1);
1550 		}
1551 		return;
1552 	}
1553 
1554 	/*
1555 	 * Do not follow the call sequence until OOM condition is
1556 	 * cleared.
1557 	 */
1558 	vmd->vmd_oom_seq = 0;
1559 
1560 	if (vmd->vmd_oom)
1561 		return;
1562 
1563 	vmd->vmd_oom = TRUE;
1564 	old_vote = atomic_fetchadd_int(&vm_pageout_oom_vote, 1);
1565 	if (old_vote != vm_ndomains - 1)
1566 		return;
1567 
1568 	/*
1569 	 * The current pagedaemon thread is the last in the quorum to
1570 	 * start OOM.  Initiate the selection and signaling of the
1571 	 * victim.
1572 	 */
1573 	vm_pageout_oom(VM_OOM_MEM);
1574 
1575 	/*
1576 	 * After one round of OOM terror, recall our vote.  On the
1577 	 * next pass, current pagedaemon would vote again if the low
1578 	 * memory condition is still there, due to vmd_oom being
1579 	 * false.
1580 	 */
1581 	vmd->vmd_oom = FALSE;
1582 	atomic_subtract_int(&vm_pageout_oom_vote, 1);
1583 }
1584 
1585 /*
1586  * The OOM killer is the page daemon's action of last resort when
1587  * memory allocation requests have been stalled for a prolonged period
1588  * of time because it cannot reclaim memory.  This function computes
1589  * the approximate number of physical pages that could be reclaimed if
1590  * the specified address space is destroyed.
1591  *
1592  * Private, anonymous memory owned by the address space is the
1593  * principal resource that we expect to recover after an OOM kill.
1594  * Since the physical pages mapped by the address space's COW entries
1595  * are typically shared pages, they are unlikely to be released and so
1596  * they are not counted.
1597  *
1598  * To get to the point where the page daemon runs the OOM killer, its
1599  * efforts to write-back vnode-backed pages may have stalled.  This
1600  * could be caused by a memory allocation deadlock in the write path
1601  * that might be resolved by an OOM kill.  Therefore, physical pages
1602  * belonging to vnode-backed objects are counted, because they might
1603  * be freed without being written out first if the address space holds
1604  * the last reference to an unlinked vnode.
1605  *
1606  * Similarly, physical pages belonging to OBJT_PHYS objects are
1607  * counted because the address space might hold the last reference to
1608  * the object.
1609  */
1610 static long
1611 vm_pageout_oom_pagecount(struct vmspace *vmspace)
1612 {
1613 	vm_map_t map;
1614 	vm_map_entry_t entry;
1615 	vm_object_t obj;
1616 	long res;
1617 
1618 	map = &vmspace->vm_map;
1619 	KASSERT(!map->system_map, ("system map"));
1620 	sx_assert(&map->lock, SA_LOCKED);
1621 	res = 0;
1622 	for (entry = map->header.next; entry != &map->header;
1623 	    entry = entry->next) {
1624 		if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0)
1625 			continue;
1626 		obj = entry->object.vm_object;
1627 		if (obj == NULL)
1628 			continue;
1629 		if ((entry->eflags & MAP_ENTRY_NEEDS_COPY) != 0 &&
1630 		    obj->ref_count != 1)
1631 			continue;
1632 		switch (obj->type) {
1633 		case OBJT_DEFAULT:
1634 		case OBJT_SWAP:
1635 		case OBJT_PHYS:
1636 		case OBJT_VNODE:
1637 			res += obj->resident_page_count;
1638 			break;
1639 		}
1640 	}
1641 	return (res);
1642 }
1643 
1644 void
1645 vm_pageout_oom(int shortage)
1646 {
1647 	struct proc *p, *bigproc;
1648 	vm_offset_t size, bigsize;
1649 	struct thread *td;
1650 	struct vmspace *vm;
1651 	bool breakout;
1652 
1653 	/*
1654 	 * We keep the process bigproc locked once we find it to keep anyone
1655 	 * from messing with it; however, there is a possibility of
1656 	 * deadlock if process B is bigproc and one of its child processes
1657 	 * attempts to propagate a signal to B while we are waiting for A's
1658 	 * lock while walking this list.  To avoid this, we don't block on
1659 	 * the process lock but just skip a process if it is already locked.
1660 	 */
1661 	bigproc = NULL;
1662 	bigsize = 0;
1663 	sx_slock(&allproc_lock);
1664 	FOREACH_PROC_IN_SYSTEM(p) {
1665 		PROC_LOCK(p);
1666 
1667 		/*
1668 		 * If this is a system, protected or killed process, skip it.
1669 		 */
1670 		if (p->p_state != PRS_NORMAL || (p->p_flag & (P_INEXEC |
1671 		    P_PROTECTED | P_SYSTEM | P_WEXIT)) != 0 ||
1672 		    p->p_pid == 1 || P_KILLED(p) ||
1673 		    (p->p_pid < 48 && swap_pager_avail != 0)) {
1674 			PROC_UNLOCK(p);
1675 			continue;
1676 		}
1677 		/*
1678 		 * If the process is in a non-running type state,
1679 		 * don't touch it.  Check all the threads individually.
1680 		 */
1681 		breakout = false;
1682 		FOREACH_THREAD_IN_PROC(p, td) {
1683 			thread_lock(td);
1684 			if (!TD_ON_RUNQ(td) &&
1685 			    !TD_IS_RUNNING(td) &&
1686 			    !TD_IS_SLEEPING(td) &&
1687 			    !TD_IS_SUSPENDED(td) &&
1688 			    !TD_IS_SWAPPED(td)) {
1689 				thread_unlock(td);
1690 				breakout = true;
1691 				break;
1692 			}
1693 			thread_unlock(td);
1694 		}
1695 		if (breakout) {
1696 			PROC_UNLOCK(p);
1697 			continue;
1698 		}
1699 		/*
1700 		 * get the process size
1701 		 */
1702 		vm = vmspace_acquire_ref(p);
1703 		if (vm == NULL) {
1704 			PROC_UNLOCK(p);
1705 			continue;
1706 		}
1707 		_PHOLD_LITE(p);
1708 		PROC_UNLOCK(p);
1709 		sx_sunlock(&allproc_lock);
1710 		if (!vm_map_trylock_read(&vm->vm_map)) {
1711 			vmspace_free(vm);
1712 			sx_slock(&allproc_lock);
1713 			PRELE(p);
1714 			continue;
1715 		}
1716 		size = vmspace_swap_count(vm);
1717 		if (shortage == VM_OOM_MEM)
1718 			size += vm_pageout_oom_pagecount(vm);
1719 		vm_map_unlock_read(&vm->vm_map);
1720 		vmspace_free(vm);
1721 		sx_slock(&allproc_lock);
1722 
1723 		/*
1724 		 * If this process is bigger than the biggest one,
1725 		 * remember it.
1726 		 */
1727 		if (size > bigsize) {
1728 			if (bigproc != NULL)
1729 				PRELE(bigproc);
1730 			bigproc = p;
1731 			bigsize = size;
1732 		} else {
1733 			PRELE(p);
1734 		}
1735 	}
1736 	sx_sunlock(&allproc_lock);
1737 	if (bigproc != NULL) {
1738 		if (vm_panic_on_oom != 0)
1739 			panic("out of swap space");
1740 		PROC_LOCK(bigproc);
1741 		killproc(bigproc, "out of swap space");
1742 		sched_nice(bigproc, PRIO_MIN);
1743 		_PRELE(bigproc);
1744 		PROC_UNLOCK(bigproc);
1745 	}
1746 }
1747 
1748 static void
1749 vm_pageout_worker(void *arg)
1750 {
1751 	struct vm_domain *vmd;
1752 	int domain, pass, shortage;
1753 	bool target_met;
1754 
1755 	domain = (uintptr_t)arg;
1756 	vmd = VM_DOMAIN(domain);
1757 	pass = 0;
1758 	shortage = 0;
1759 	target_met = true;
1760 
1761 	/*
1762 	 * XXXKIB It could be useful to bind pageout daemon threads to
1763 	 * the cores belonging to the domain, from which vm_page_array
1764 	 * is allocated.
1765 	 */
1766 
1767 	KASSERT(vmd->vmd_segs != 0, ("domain without segments"));
1768 	vmd->vmd_last_active_scan = ticks;
1769 
1770 	/*
1771 	 * The pageout daemon worker is never done, so loop forever.
1772 	 */
1773 	while (TRUE) {
1774 		vm_domain_pageout_lock(vmd);
1775 		/*
1776 		 * We need to clear wanted before we check the limits.  This
1777 		 * prevents races with wakers who will check wanted after they
1778 		 * reach the limit.
1779 		 */
1780 		atomic_store_int(&vmd->vmd_pageout_wanted, 0);
1781 
1782 		/*
1783 		 * Might the page daemon need to run again?
1784 		 */
1785 		if (vm_paging_needed(vmd, vmd->vmd_free_count)) {
1786 			/*
1787 			 * Yes, the scan failed to free enough pages.  If
1788 			 * we have performed a level >= 1 (page reclamation)
1789 			 * scan, then sleep a bit and try again.
1790 			 */
1791 			vm_domain_pageout_unlock(vmd);
1792 			if (pass > 1)
1793 				pause("pwait", hz / VM_INACT_SCAN_RATE);
1794 		} else {
1795 			/*
1796 			 * No, sleep until the next wakeup or until pages
1797 			 * need to have their reference stats updated.
1798 			 */
1799 			if (mtx_sleep(&vmd->vmd_pageout_wanted,
1800 			    vm_domain_pageout_lockptr(vmd), PDROP | PVM,
1801 			    "psleep", hz / VM_INACT_SCAN_RATE) == 0)
1802 				VM_CNT_INC(v_pdwakeups);
1803 		}
1804 		/* Prevent spurious wakeups by ensuring that wanted is set. */
1805 		atomic_store_int(&vmd->vmd_pageout_wanted, 1);
1806 
1807 		/*
1808 		 * Use the controller to calculate how many pages to free in
1809 		 * this interval.
1810 		 */
1811 		shortage = pidctrl_daemon(&vmd->vmd_pid, vmd->vmd_free_count);
1812 		if (shortage && pass == 0)
1813 			pass = 1;
1814 
1815 		target_met = vm_pageout_scan(vmd, pass, shortage);
1816 		/*
1817 		 * If the target was not met we must increase the pass to
1818 		 * more aggressively reclaim.
1819 		 */
1820 		if (!target_met)
1821 			pass++;
1822 	}
1823 }
1824 
1825 /*
1826  *	vm_pageout_init initialises basic pageout daemon settings.
1827  */
1828 static void
1829 vm_pageout_init_domain(int domain)
1830 {
1831 	struct vm_domain *vmd;
1832 	struct sysctl_oid *oid;
1833 
1834 	vmd = VM_DOMAIN(domain);
1835 	vmd->vmd_interrupt_free_min = 2;
1836 
1837 	/*
1838 	 * v_free_reserved needs to include enough for the largest
1839 	 * swap pager structures plus enough for any pv_entry structs
1840 	 * when paging.
1841 	 */
1842 	if (vmd->vmd_page_count > 1024)
1843 		vmd->vmd_free_min = 4 + (vmd->vmd_page_count - 1024) / 200;
1844 	else
1845 		vmd->vmd_free_min = 4;
1846 	vmd->vmd_pageout_free_min = (2*MAXBSIZE)/PAGE_SIZE +
1847 	    vmd->vmd_interrupt_free_min;
1848 	vmd->vmd_free_reserved = vm_pageout_page_count +
1849 	    vmd->vmd_pageout_free_min + (vmd->vmd_page_count / 768);
1850 	vmd->vmd_free_severe = vmd->vmd_free_min / 2;
1851 	vmd->vmd_free_target = 4 * vmd->vmd_free_min + vmd->vmd_free_reserved;
1852 	vmd->vmd_free_min += vmd->vmd_free_reserved;
1853 	vmd->vmd_free_severe += vmd->vmd_free_reserved;
1854 	vmd->vmd_inactive_target = (3 * vmd->vmd_free_target) / 2;
1855 	if (vmd->vmd_inactive_target > vmd->vmd_free_count / 3)
1856 		vmd->vmd_inactive_target = vmd->vmd_free_count / 3;
1857 
1858 	/*
1859 	 * Set the default wakeup threshold to be 10% below the paging
1860 	 * target.  This keeps the steady state out of shortfall.
1861 	 */
1862 	vmd->vmd_pageout_wakeup_thresh = (vmd->vmd_free_target / 10) * 9;
1863 
1864 	/*
1865 	 * Target amount of memory to move out of the laundry queue during a
1866 	 * background laundering.  This is proportional to the amount of system
1867 	 * memory.
1868 	 */
1869 	vmd->vmd_background_launder_target = (vmd->vmd_free_target -
1870 	    vmd->vmd_free_min) / 10;
1871 
1872 	/* Initialize the pageout daemon pid controller. */
1873 	pidctrl_init(&vmd->vmd_pid, hz / VM_INACT_SCAN_RATE,
1874 	    vmd->vmd_free_target, PIDCTRL_BOUND,
1875 	    PIDCTRL_KPD, PIDCTRL_KID, PIDCTRL_KDD);
1876 	oid = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(vmd->vmd_oid), OID_AUTO,
1877 	    "pidctrl", CTLFLAG_RD, NULL, "");
1878 	pidctrl_init_sysctl(&vmd->vmd_pid, SYSCTL_CHILDREN(oid));
1879 }
1880 
1881 static void
1882 vm_pageout_init(void)
1883 {
1884 	u_int freecount;
1885 	int i;
1886 
1887 	/*
1888 	 * Initialize some paging parameters.
1889 	 */
1890 	if (vm_cnt.v_page_count < 2000)
1891 		vm_pageout_page_count = 8;
1892 
1893 	freecount = 0;
1894 	for (i = 0; i < vm_ndomains; i++) {
1895 		struct vm_domain *vmd;
1896 
1897 		vm_pageout_init_domain(i);
1898 		vmd = VM_DOMAIN(i);
1899 		vm_cnt.v_free_reserved += vmd->vmd_free_reserved;
1900 		vm_cnt.v_free_target += vmd->vmd_free_target;
1901 		vm_cnt.v_free_min += vmd->vmd_free_min;
1902 		vm_cnt.v_inactive_target += vmd->vmd_inactive_target;
1903 		vm_cnt.v_pageout_free_min += vmd->vmd_pageout_free_min;
1904 		vm_cnt.v_interrupt_free_min += vmd->vmd_interrupt_free_min;
1905 		vm_cnt.v_free_severe += vmd->vmd_free_severe;
1906 		freecount += vmd->vmd_free_count;
1907 	}
1908 
1909 	/*
1910 	 * Set interval in seconds for active scan.  We want to visit each
1911 	 * page at least once every ten minutes.  This is to prevent worst
1912 	 * case paging behaviors with stale active LRU.
1913 	 */
1914 	if (vm_pageout_update_period == 0)
1915 		vm_pageout_update_period = 600;
1916 
1917 	if (vm_page_max_wired == 0)
1918 		vm_page_max_wired = freecount / 3;
1919 }
1920 
1921 /*
1922  *     vm_pageout is the high level pageout daemon.
1923  */
1924 static void
1925 vm_pageout(void)
1926 {
1927 	int error;
1928 	int i;
1929 
1930 	swap_pager_swap_init();
1931 	snprintf(curthread->td_name, sizeof(curthread->td_name), "dom0");
1932 	error = kthread_add(vm_pageout_laundry_worker, NULL, curproc, NULL,
1933 	    0, 0, "laundry: dom0");
1934 	if (error != 0)
1935 		panic("starting laundry for domain 0, error %d", error);
1936 	for (i = 1; i < vm_ndomains; i++) {
1937 		error = kthread_add(vm_pageout_worker, (void *)(uintptr_t)i,
1938 		    curproc, NULL, 0, 0, "dom%d", i);
1939 		if (error != 0) {
1940 			panic("starting pageout for domain %d, error %d\n",
1941 			    i, error);
1942 		}
1943 		error = kthread_add(vm_pageout_laundry_worker,
1944 		    (void *)(uintptr_t)i, curproc, NULL, 0, 0,
1945 		    "laundry: dom%d", i);
1946 		if (error != 0)
1947 			panic("starting laundry for domain %d, error %d",
1948 			    i, error);
1949 	}
1950 	error = kthread_add(uma_reclaim_worker, NULL, curproc, NULL,
1951 	    0, 0, "uma");
1952 	if (error != 0)
1953 		panic("starting uma_reclaim helper, error %d\n", error);
1954 	vm_pageout_worker((void *)(uintptr_t)0);
1955 }
1956 
1957 /*
1958  * Perform an advisory wakeup of the page daemon.
1959  */
1960 void
1961 pagedaemon_wakeup(int domain)
1962 {
1963 	struct vm_domain *vmd;
1964 
1965 	vmd = VM_DOMAIN(domain);
1966 	vm_domain_pageout_assert_unlocked(vmd);
1967 	if (curproc == pageproc)
1968 		return;
1969 
1970 	if (atomic_fetchadd_int(&vmd->vmd_pageout_wanted, 1) == 0) {
1971 		vm_domain_pageout_lock(vmd);
1972 		atomic_store_int(&vmd->vmd_pageout_wanted, 1);
1973 		wakeup(&vmd->vmd_pageout_wanted);
1974 		vm_domain_pageout_unlock(vmd);
1975 	}
1976 }
1977