xref: /freebsd/sys/vm/vm_pageout.c (revision 40427cca7a9ae77b095936fb1954417c290cfb17)
1 /*-
2  * Copyright (c) 1991 Regents of the University of California.
3  * All rights reserved.
4  * Copyright (c) 1994 John S. Dyson
5  * All rights reserved.
6  * Copyright (c) 1994 David Greenman
7  * All rights reserved.
8  * Copyright (c) 2005 Yahoo! Technologies Norway AS
9  * All rights reserved.
10  *
11  * This code is derived from software contributed to Berkeley by
12  * The Mach Operating System project at Carnegie-Mellon University.
13  *
14  * Redistribution and use in source and binary forms, with or without
15  * modification, are permitted provided that the following conditions
16  * are met:
17  * 1. Redistributions of source code must retain the above copyright
18  *    notice, this list of conditions and the following disclaimer.
19  * 2. Redistributions in binary form must reproduce the above copyright
20  *    notice, this list of conditions and the following disclaimer in the
21  *    documentation and/or other materials provided with the distribution.
22  * 3. All advertising materials mentioning features or use of this software
23  *    must display the following acknowledgement:
24  *	This product includes software developed by the University of
25  *	California, Berkeley and its contributors.
26  * 4. Neither the name of the University nor the names of its contributors
27  *    may be used to endorse or promote products derived from this software
28  *    without specific prior written permission.
29  *
30  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
31  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
32  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
33  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
34  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
35  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
36  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
37  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
38  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
39  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
40  * SUCH DAMAGE.
41  *
42  *	from: @(#)vm_pageout.c	7.4 (Berkeley) 5/7/91
43  *
44  *
45  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
46  * All rights reserved.
47  *
48  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
49  *
50  * Permission to use, copy, modify and distribute this software and
51  * its documentation is hereby granted, provided that both the copyright
52  * notice and this permission notice appear in all copies of the
53  * software, derivative works or modified versions, and any portions
54  * thereof, and that both notices appear in supporting documentation.
55  *
56  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
57  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
58  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
59  *
60  * Carnegie Mellon requests users of this software to return to
61  *
62  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
63  *  School of Computer Science
64  *  Carnegie Mellon University
65  *  Pittsburgh PA 15213-3890
66  *
67  * any improvements or extensions that they make and grant Carnegie the
68  * rights to redistribute these changes.
69  */
70 
71 /*
72  *	The proverbial page-out daemon.
73  */
74 
75 #include <sys/cdefs.h>
76 __FBSDID("$FreeBSD$");
77 
78 #include "opt_vm.h"
79 
80 #include <sys/param.h>
81 #include <sys/systm.h>
82 #include <sys/kernel.h>
83 #include <sys/eventhandler.h>
84 #include <sys/lock.h>
85 #include <sys/mutex.h>
86 #include <sys/proc.h>
87 #include <sys/kthread.h>
88 #include <sys/ktr.h>
89 #include <sys/mount.h>
90 #include <sys/racct.h>
91 #include <sys/resourcevar.h>
92 #include <sys/sched.h>
93 #include <sys/sdt.h>
94 #include <sys/signalvar.h>
95 #include <sys/smp.h>
96 #include <sys/time.h>
97 #include <sys/vnode.h>
98 #include <sys/vmmeter.h>
99 #include <sys/rwlock.h>
100 #include <sys/sx.h>
101 #include <sys/sysctl.h>
102 
103 #include <vm/vm.h>
104 #include <vm/vm_param.h>
105 #include <vm/vm_object.h>
106 #include <vm/vm_page.h>
107 #include <vm/vm_map.h>
108 #include <vm/vm_pageout.h>
109 #include <vm/vm_pager.h>
110 #include <vm/vm_phys.h>
111 #include <vm/swap_pager.h>
112 #include <vm/vm_extern.h>
113 #include <vm/uma.h>
114 
115 /*
116  * System initialization
117  */
118 
119 /* the kernel process "vm_pageout"*/
120 static void vm_pageout(void);
121 static void vm_pageout_init(void);
122 static int vm_pageout_clean(vm_page_t m, int *numpagedout);
123 static int vm_pageout_cluster(vm_page_t m);
124 static bool vm_pageout_scan(struct vm_domain *vmd, int pass);
125 static void vm_pageout_mightbe_oom(struct vm_domain *vmd, int page_shortage,
126     int starting_page_shortage);
127 
128 SYSINIT(pagedaemon_init, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, vm_pageout_init,
129     NULL);
130 
131 struct proc *pageproc;
132 
133 static struct kproc_desc page_kp = {
134 	"pagedaemon",
135 	vm_pageout,
136 	&pageproc
137 };
138 SYSINIT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_SECOND, kproc_start,
139     &page_kp);
140 
141 SDT_PROVIDER_DEFINE(vm);
142 SDT_PROBE_DEFINE(vm, , , vm__lowmem_scan);
143 
144 #if !defined(NO_SWAPPING)
145 /* the kernel process "vm_daemon"*/
146 static void vm_daemon(void);
147 static struct	proc *vmproc;
148 
149 static struct kproc_desc vm_kp = {
150 	"vmdaemon",
151 	vm_daemon,
152 	&vmproc
153 };
154 SYSINIT(vmdaemon, SI_SUB_KTHREAD_VM, SI_ORDER_FIRST, kproc_start, &vm_kp);
155 #endif
156 
157 /* Pagedaemon activity rates, in subdivisions of one second. */
158 #define	VM_LAUNDER_RATE		10
159 #define	VM_INACT_SCAN_RATE	2
160 
161 int vm_pageout_deficit;		/* Estimated number of pages deficit */
162 u_int vm_pageout_wakeup_thresh;
163 static int vm_pageout_oom_seq = 12;
164 bool vm_pageout_wanted;		/* Event on which pageout daemon sleeps */
165 bool vm_pages_needed;		/* Are threads waiting for free pages? */
166 
167 /* Pending request for dirty page laundering. */
168 static enum {
169 	VM_LAUNDRY_IDLE,
170 	VM_LAUNDRY_BACKGROUND,
171 	VM_LAUNDRY_SHORTFALL
172 } vm_laundry_request = VM_LAUNDRY_IDLE;
173 
174 #if !defined(NO_SWAPPING)
175 static int vm_pageout_req_swapout;	/* XXX */
176 static int vm_daemon_needed;
177 static struct mtx vm_daemon_mtx;
178 /* Allow for use by vm_pageout before vm_daemon is initialized. */
179 MTX_SYSINIT(vm_daemon, &vm_daemon_mtx, "vm daemon", MTX_DEF);
180 #endif
181 static int vm_pageout_update_period;
182 static int disable_swap_pageouts;
183 static int lowmem_period = 10;
184 static time_t lowmem_uptime;
185 static int swapdev_enabled;
186 
187 #if defined(NO_SWAPPING)
188 static int vm_swap_enabled = 0;
189 static int vm_swap_idle_enabled = 0;
190 #else
191 static int vm_swap_enabled = 1;
192 static int vm_swap_idle_enabled = 0;
193 #endif
194 
195 static int vm_panic_on_oom = 0;
196 
197 SYSCTL_INT(_vm, OID_AUTO, panic_on_oom,
198 	CTLFLAG_RWTUN, &vm_panic_on_oom, 0,
199 	"panic on out of memory instead of killing the largest process");
200 
201 SYSCTL_INT(_vm, OID_AUTO, pageout_wakeup_thresh,
202 	CTLFLAG_RW, &vm_pageout_wakeup_thresh, 0,
203 	"free page threshold for waking up the pageout daemon");
204 
205 SYSCTL_INT(_vm, OID_AUTO, pageout_update_period,
206 	CTLFLAG_RW, &vm_pageout_update_period, 0,
207 	"Maximum active LRU update period");
208 
209 SYSCTL_INT(_vm, OID_AUTO, lowmem_period, CTLFLAG_RW, &lowmem_period, 0,
210 	"Low memory callback period");
211 
212 #if defined(NO_SWAPPING)
213 SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled,
214 	CTLFLAG_RD, &vm_swap_enabled, 0, "Enable entire process swapout");
215 SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled,
216 	CTLFLAG_RD, &vm_swap_idle_enabled, 0, "Allow swapout on idle criteria");
217 #else
218 SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled,
219 	CTLFLAG_RW, &vm_swap_enabled, 0, "Enable entire process swapout");
220 SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled,
221 	CTLFLAG_RW, &vm_swap_idle_enabled, 0, "Allow swapout on idle criteria");
222 #endif
223 
224 SYSCTL_INT(_vm, OID_AUTO, disable_swapspace_pageouts,
225 	CTLFLAG_RW, &disable_swap_pageouts, 0, "Disallow swapout of dirty pages");
226 
227 static int pageout_lock_miss;
228 SYSCTL_INT(_vm, OID_AUTO, pageout_lock_miss,
229 	CTLFLAG_RD, &pageout_lock_miss, 0, "vget() lock misses during pageout");
230 
231 SYSCTL_INT(_vm, OID_AUTO, pageout_oom_seq,
232 	CTLFLAG_RW, &vm_pageout_oom_seq, 0,
233 	"back-to-back calls to oom detector to start OOM");
234 
235 static int act_scan_laundry_weight = 3;
236 SYSCTL_INT(_vm, OID_AUTO, act_scan_laundry_weight, CTLFLAG_RW,
237     &act_scan_laundry_weight, 0,
238     "weight given to clean vs. dirty pages in active queue scans");
239 
240 static u_int vm_background_launder_target;
241 SYSCTL_UINT(_vm, OID_AUTO, background_launder_target, CTLFLAG_RW,
242     &vm_background_launder_target, 0,
243     "background laundering target, in pages");
244 
245 static u_int vm_background_launder_rate = 4096;
246 SYSCTL_UINT(_vm, OID_AUTO, background_launder_rate, CTLFLAG_RW,
247     &vm_background_launder_rate, 0,
248     "background laundering rate, in kilobytes per second");
249 
250 static u_int vm_background_launder_max = 20 * 1024;
251 SYSCTL_UINT(_vm, OID_AUTO, background_launder_max, CTLFLAG_RW,
252     &vm_background_launder_max, 0, "background laundering cap, in kilobytes");
253 
254 int vm_pageout_page_count = 32;
255 
256 int vm_page_max_wired;		/* XXX max # of wired pages system-wide */
257 SYSCTL_INT(_vm, OID_AUTO, max_wired,
258 	CTLFLAG_RW, &vm_page_max_wired, 0, "System-wide limit to wired page count");
259 
260 static u_int isqrt(u_int num);
261 static boolean_t vm_pageout_fallback_object_lock(vm_page_t, vm_page_t *);
262 static int vm_pageout_launder(struct vm_domain *vmd, int launder,
263     bool in_shortfall);
264 static void vm_pageout_laundry_worker(void *arg);
265 #if !defined(NO_SWAPPING)
266 static void vm_pageout_map_deactivate_pages(vm_map_t, long);
267 static void vm_pageout_object_deactivate_pages(pmap_t, vm_object_t, long);
268 static void vm_req_vmdaemon(int req);
269 #endif
270 static boolean_t vm_pageout_page_lock(vm_page_t, vm_page_t *);
271 
272 /*
273  * Initialize a dummy page for marking the caller's place in the specified
274  * paging queue.  In principle, this function only needs to set the flag
275  * PG_MARKER.  Nonetheless, it write busies and initializes the hold count
276  * to one as safety precautions.
277  */
278 static void
279 vm_pageout_init_marker(vm_page_t marker, u_short queue)
280 {
281 
282 	bzero(marker, sizeof(*marker));
283 	marker->flags = PG_MARKER;
284 	marker->busy_lock = VPB_SINGLE_EXCLUSIVER;
285 	marker->queue = queue;
286 	marker->hold_count = 1;
287 }
288 
289 /*
290  * vm_pageout_fallback_object_lock:
291  *
292  * Lock vm object currently associated with `m'. VM_OBJECT_TRYWLOCK is
293  * known to have failed and page queue must be either PQ_ACTIVE or
294  * PQ_INACTIVE.  To avoid lock order violation, unlock the page queue
295  * while locking the vm object.  Use marker page to detect page queue
296  * changes and maintain notion of next page on page queue.  Return
297  * TRUE if no changes were detected, FALSE otherwise.  vm object is
298  * locked on return.
299  *
300  * This function depends on both the lock portion of struct vm_object
301  * and normal struct vm_page being type stable.
302  */
303 static boolean_t
304 vm_pageout_fallback_object_lock(vm_page_t m, vm_page_t *next)
305 {
306 	struct vm_page marker;
307 	struct vm_pagequeue *pq;
308 	boolean_t unchanged;
309 	u_short queue;
310 	vm_object_t object;
311 
312 	queue = m->queue;
313 	vm_pageout_init_marker(&marker, queue);
314 	pq = vm_page_pagequeue(m);
315 	object = m->object;
316 
317 	TAILQ_INSERT_AFTER(&pq->pq_pl, m, &marker, plinks.q);
318 	vm_pagequeue_unlock(pq);
319 	vm_page_unlock(m);
320 	VM_OBJECT_WLOCK(object);
321 	vm_page_lock(m);
322 	vm_pagequeue_lock(pq);
323 
324 	/*
325 	 * The page's object might have changed, and/or the page might
326 	 * have moved from its original position in the queue.  If the
327 	 * page's object has changed, then the caller should abandon
328 	 * processing the page because the wrong object lock was
329 	 * acquired.  Use the marker's plinks.q, not the page's, to
330 	 * determine if the page has been moved.  The state of the
331 	 * page's plinks.q can be indeterminate; whereas, the marker's
332 	 * plinks.q must be valid.
333 	 */
334 	*next = TAILQ_NEXT(&marker, plinks.q);
335 	unchanged = m->object == object &&
336 	    m == TAILQ_PREV(&marker, pglist, plinks.q);
337 	KASSERT(!unchanged || m->queue == queue,
338 	    ("page %p queue %d %d", m, queue, m->queue));
339 	TAILQ_REMOVE(&pq->pq_pl, &marker, plinks.q);
340 	return (unchanged);
341 }
342 
343 /*
344  * Lock the page while holding the page queue lock.  Use marker page
345  * to detect page queue changes and maintain notion of next page on
346  * page queue.  Return TRUE if no changes were detected, FALSE
347  * otherwise.  The page is locked on return. The page queue lock might
348  * be dropped and reacquired.
349  *
350  * This function depends on normal struct vm_page being type stable.
351  */
352 static boolean_t
353 vm_pageout_page_lock(vm_page_t m, vm_page_t *next)
354 {
355 	struct vm_page marker;
356 	struct vm_pagequeue *pq;
357 	boolean_t unchanged;
358 	u_short queue;
359 
360 	vm_page_lock_assert(m, MA_NOTOWNED);
361 	if (vm_page_trylock(m))
362 		return (TRUE);
363 
364 	queue = m->queue;
365 	vm_pageout_init_marker(&marker, queue);
366 	pq = vm_page_pagequeue(m);
367 
368 	TAILQ_INSERT_AFTER(&pq->pq_pl, m, &marker, plinks.q);
369 	vm_pagequeue_unlock(pq);
370 	vm_page_lock(m);
371 	vm_pagequeue_lock(pq);
372 
373 	/* Page queue might have changed. */
374 	*next = TAILQ_NEXT(&marker, plinks.q);
375 	unchanged = m == TAILQ_PREV(&marker, pglist, plinks.q);
376 	KASSERT(!unchanged || m->queue == queue,
377 	    ("page %p queue %d %d", m, queue, m->queue));
378 	TAILQ_REMOVE(&pq->pq_pl, &marker, plinks.q);
379 	return (unchanged);
380 }
381 
382 /*
383  * Scan for pages at adjacent offsets within the given page's object that are
384  * eligible for laundering, form a cluster of these pages and the given page,
385  * and launder that cluster.
386  */
387 static int
388 vm_pageout_cluster(vm_page_t m)
389 {
390 	vm_object_t object;
391 	vm_page_t mc[2 * vm_pageout_page_count], p, pb, ps;
392 	vm_pindex_t pindex;
393 	int ib, is, page_base, pageout_count;
394 
395 	vm_page_assert_locked(m);
396 	object = m->object;
397 	VM_OBJECT_ASSERT_WLOCKED(object);
398 	pindex = m->pindex;
399 
400 	/*
401 	 * We can't clean the page if it is busy or held.
402 	 */
403 	vm_page_assert_unbusied(m);
404 	KASSERT(m->hold_count == 0, ("page %p is held", m));
405 
406 	pmap_remove_write(m);
407 	vm_page_unlock(m);
408 
409 	mc[vm_pageout_page_count] = pb = ps = m;
410 	pageout_count = 1;
411 	page_base = vm_pageout_page_count;
412 	ib = 1;
413 	is = 1;
414 
415 	/*
416 	 * We can cluster only if the page is not clean, busy, or held, and
417 	 * the page is in the laundry queue.
418 	 *
419 	 * During heavy mmap/modification loads the pageout
420 	 * daemon can really fragment the underlying file
421 	 * due to flushing pages out of order and not trying to
422 	 * align the clusters (which leaves sporadic out-of-order
423 	 * holes).  To solve this problem we do the reverse scan
424 	 * first and attempt to align our cluster, then do a
425 	 * forward scan if room remains.
426 	 */
427 more:
428 	while (ib != 0 && pageout_count < vm_pageout_page_count) {
429 		if (ib > pindex) {
430 			ib = 0;
431 			break;
432 		}
433 		if ((p = vm_page_prev(pb)) == NULL || vm_page_busied(p)) {
434 			ib = 0;
435 			break;
436 		}
437 		vm_page_test_dirty(p);
438 		if (p->dirty == 0) {
439 			ib = 0;
440 			break;
441 		}
442 		vm_page_lock(p);
443 		if (!vm_page_in_laundry(p) ||
444 		    p->hold_count != 0) {	/* may be undergoing I/O */
445 			vm_page_unlock(p);
446 			ib = 0;
447 			break;
448 		}
449 		pmap_remove_write(p);
450 		vm_page_unlock(p);
451 		mc[--page_base] = pb = p;
452 		++pageout_count;
453 		++ib;
454 
455 		/*
456 		 * We are at an alignment boundary.  Stop here, and switch
457 		 * directions.  Do not clear ib.
458 		 */
459 		if ((pindex - (ib - 1)) % vm_pageout_page_count == 0)
460 			break;
461 	}
462 	while (pageout_count < vm_pageout_page_count &&
463 	    pindex + is < object->size) {
464 		if ((p = vm_page_next(ps)) == NULL || vm_page_busied(p))
465 			break;
466 		vm_page_test_dirty(p);
467 		if (p->dirty == 0)
468 			break;
469 		vm_page_lock(p);
470 		if (!vm_page_in_laundry(p) ||
471 		    p->hold_count != 0) {	/* may be undergoing I/O */
472 			vm_page_unlock(p);
473 			break;
474 		}
475 		pmap_remove_write(p);
476 		vm_page_unlock(p);
477 		mc[page_base + pageout_count] = ps = p;
478 		++pageout_count;
479 		++is;
480 	}
481 
482 	/*
483 	 * If we exhausted our forward scan, continue with the reverse scan
484 	 * when possible, even past an alignment boundary.  This catches
485 	 * boundary conditions.
486 	 */
487 	if (ib != 0 && pageout_count < vm_pageout_page_count)
488 		goto more;
489 
490 	return (vm_pageout_flush(&mc[page_base], pageout_count,
491 	    VM_PAGER_PUT_NOREUSE, 0, NULL, NULL));
492 }
493 
494 /*
495  * vm_pageout_flush() - launder the given pages
496  *
497  *	The given pages are laundered.  Note that we setup for the start of
498  *	I/O ( i.e. busy the page ), mark it read-only, and bump the object
499  *	reference count all in here rather then in the parent.  If we want
500  *	the parent to do more sophisticated things we may have to change
501  *	the ordering.
502  *
503  *	Returned runlen is the count of pages between mreq and first
504  *	page after mreq with status VM_PAGER_AGAIN.
505  *	*eio is set to TRUE if pager returned VM_PAGER_ERROR or VM_PAGER_FAIL
506  *	for any page in runlen set.
507  */
508 int
509 vm_pageout_flush(vm_page_t *mc, int count, int flags, int mreq, int *prunlen,
510     boolean_t *eio)
511 {
512 	vm_object_t object = mc[0]->object;
513 	int pageout_status[count];
514 	int numpagedout = 0;
515 	int i, runlen;
516 
517 	VM_OBJECT_ASSERT_WLOCKED(object);
518 
519 	/*
520 	 * Initiate I/O.  Mark the pages busy and verify that they're valid
521 	 * and read-only.
522 	 *
523 	 * We do not have to fixup the clean/dirty bits here... we can
524 	 * allow the pager to do it after the I/O completes.
525 	 *
526 	 * NOTE! mc[i]->dirty may be partial or fragmented due to an
527 	 * edge case with file fragments.
528 	 */
529 	for (i = 0; i < count; i++) {
530 		KASSERT(mc[i]->valid == VM_PAGE_BITS_ALL,
531 		    ("vm_pageout_flush: partially invalid page %p index %d/%d",
532 			mc[i], i, count));
533 		KASSERT((mc[i]->aflags & PGA_WRITEABLE) == 0,
534 		    ("vm_pageout_flush: writeable page %p", mc[i]));
535 		vm_page_sbusy(mc[i]);
536 	}
537 	vm_object_pip_add(object, count);
538 
539 	vm_pager_put_pages(object, mc, count, flags, pageout_status);
540 
541 	runlen = count - mreq;
542 	if (eio != NULL)
543 		*eio = FALSE;
544 	for (i = 0; i < count; i++) {
545 		vm_page_t mt = mc[i];
546 
547 		KASSERT(pageout_status[i] == VM_PAGER_PEND ||
548 		    !pmap_page_is_write_mapped(mt),
549 		    ("vm_pageout_flush: page %p is not write protected", mt));
550 		switch (pageout_status[i]) {
551 		case VM_PAGER_OK:
552 			vm_page_lock(mt);
553 			if (vm_page_in_laundry(mt))
554 				vm_page_deactivate_noreuse(mt);
555 			vm_page_unlock(mt);
556 			/* FALLTHROUGH */
557 		case VM_PAGER_PEND:
558 			numpagedout++;
559 			break;
560 		case VM_PAGER_BAD:
561 			/*
562 			 * The page is outside the object's range.  We pretend
563 			 * that the page out worked and clean the page, so the
564 			 * changes will be lost if the page is reclaimed by
565 			 * the page daemon.
566 			 */
567 			vm_page_undirty(mt);
568 			vm_page_lock(mt);
569 			if (vm_page_in_laundry(mt))
570 				vm_page_deactivate_noreuse(mt);
571 			vm_page_unlock(mt);
572 			break;
573 		case VM_PAGER_ERROR:
574 		case VM_PAGER_FAIL:
575 			/*
576 			 * If the page couldn't be paged out to swap because the
577 			 * pager wasn't able to find space, place the page in
578 			 * the PQ_UNSWAPPABLE holding queue.  This is an
579 			 * optimization that prevents the page daemon from
580 			 * wasting CPU cycles on pages that cannot be reclaimed
581 			 * becase no swap device is configured.
582 			 *
583 			 * Otherwise, reactivate the page so that it doesn't
584 			 * clog the laundry and inactive queues.  (We will try
585 			 * paging it out again later.)
586 			 */
587 			vm_page_lock(mt);
588 			if (object->type == OBJT_SWAP &&
589 			    pageout_status[i] == VM_PAGER_FAIL) {
590 				vm_page_unswappable(mt);
591 				numpagedout++;
592 			} else
593 				vm_page_activate(mt);
594 			vm_page_unlock(mt);
595 			if (eio != NULL && i >= mreq && i - mreq < runlen)
596 				*eio = TRUE;
597 			break;
598 		case VM_PAGER_AGAIN:
599 			if (i >= mreq && i - mreq < runlen)
600 				runlen = i - mreq;
601 			break;
602 		}
603 
604 		/*
605 		 * If the operation is still going, leave the page busy to
606 		 * block all other accesses. Also, leave the paging in
607 		 * progress indicator set so that we don't attempt an object
608 		 * collapse.
609 		 */
610 		if (pageout_status[i] != VM_PAGER_PEND) {
611 			vm_object_pip_wakeup(object);
612 			vm_page_sunbusy(mt);
613 		}
614 	}
615 	if (prunlen != NULL)
616 		*prunlen = runlen;
617 	return (numpagedout);
618 }
619 
620 static void
621 vm_pageout_swapon(void *arg __unused, struct swdevt *sp __unused)
622 {
623 
624 	atomic_store_rel_int(&swapdev_enabled, 1);
625 }
626 
627 static void
628 vm_pageout_swapoff(void *arg __unused, struct swdevt *sp __unused)
629 {
630 
631 	if (swap_pager_nswapdev() == 1)
632 		atomic_store_rel_int(&swapdev_enabled, 0);
633 }
634 
635 #if !defined(NO_SWAPPING)
636 /*
637  *	vm_pageout_object_deactivate_pages
638  *
639  *	Deactivate enough pages to satisfy the inactive target
640  *	requirements.
641  *
642  *	The object and map must be locked.
643  */
644 static void
645 vm_pageout_object_deactivate_pages(pmap_t pmap, vm_object_t first_object,
646     long desired)
647 {
648 	vm_object_t backing_object, object;
649 	vm_page_t p;
650 	int act_delta, remove_mode;
651 
652 	VM_OBJECT_ASSERT_LOCKED(first_object);
653 	if ((first_object->flags & OBJ_FICTITIOUS) != 0)
654 		return;
655 	for (object = first_object;; object = backing_object) {
656 		if (pmap_resident_count(pmap) <= desired)
657 			goto unlock_return;
658 		VM_OBJECT_ASSERT_LOCKED(object);
659 		if ((object->flags & OBJ_UNMANAGED) != 0 ||
660 		    object->paging_in_progress != 0)
661 			goto unlock_return;
662 
663 		remove_mode = 0;
664 		if (object->shadow_count > 1)
665 			remove_mode = 1;
666 		/*
667 		 * Scan the object's entire memory queue.
668 		 */
669 		TAILQ_FOREACH(p, &object->memq, listq) {
670 			if (pmap_resident_count(pmap) <= desired)
671 				goto unlock_return;
672 			if (vm_page_busied(p))
673 				continue;
674 			VM_CNT_INC(v_pdpages);
675 			vm_page_lock(p);
676 			if (p->wire_count != 0 || p->hold_count != 0 ||
677 			    !pmap_page_exists_quick(pmap, p)) {
678 				vm_page_unlock(p);
679 				continue;
680 			}
681 			act_delta = pmap_ts_referenced(p);
682 			if ((p->aflags & PGA_REFERENCED) != 0) {
683 				if (act_delta == 0)
684 					act_delta = 1;
685 				vm_page_aflag_clear(p, PGA_REFERENCED);
686 			}
687 			if (!vm_page_active(p) && act_delta != 0) {
688 				vm_page_activate(p);
689 				p->act_count += act_delta;
690 			} else if (vm_page_active(p)) {
691 				if (act_delta == 0) {
692 					p->act_count -= min(p->act_count,
693 					    ACT_DECLINE);
694 					if (!remove_mode && p->act_count == 0) {
695 						pmap_remove_all(p);
696 						vm_page_deactivate(p);
697 					} else
698 						vm_page_requeue(p);
699 				} else {
700 					vm_page_activate(p);
701 					if (p->act_count < ACT_MAX -
702 					    ACT_ADVANCE)
703 						p->act_count += ACT_ADVANCE;
704 					vm_page_requeue(p);
705 				}
706 			} else if (vm_page_inactive(p))
707 				pmap_remove_all(p);
708 			vm_page_unlock(p);
709 		}
710 		if ((backing_object = object->backing_object) == NULL)
711 			goto unlock_return;
712 		VM_OBJECT_RLOCK(backing_object);
713 		if (object != first_object)
714 			VM_OBJECT_RUNLOCK(object);
715 	}
716 unlock_return:
717 	if (object != first_object)
718 		VM_OBJECT_RUNLOCK(object);
719 }
720 
721 /*
722  * deactivate some number of pages in a map, try to do it fairly, but
723  * that is really hard to do.
724  */
725 static void
726 vm_pageout_map_deactivate_pages(map, desired)
727 	vm_map_t map;
728 	long desired;
729 {
730 	vm_map_entry_t tmpe;
731 	vm_object_t obj, bigobj;
732 	int nothingwired;
733 
734 	if (!vm_map_trylock(map))
735 		return;
736 
737 	bigobj = NULL;
738 	nothingwired = TRUE;
739 
740 	/*
741 	 * first, search out the biggest object, and try to free pages from
742 	 * that.
743 	 */
744 	tmpe = map->header.next;
745 	while (tmpe != &map->header) {
746 		if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
747 			obj = tmpe->object.vm_object;
748 			if (obj != NULL && VM_OBJECT_TRYRLOCK(obj)) {
749 				if (obj->shadow_count <= 1 &&
750 				    (bigobj == NULL ||
751 				     bigobj->resident_page_count < obj->resident_page_count)) {
752 					if (bigobj != NULL)
753 						VM_OBJECT_RUNLOCK(bigobj);
754 					bigobj = obj;
755 				} else
756 					VM_OBJECT_RUNLOCK(obj);
757 			}
758 		}
759 		if (tmpe->wired_count > 0)
760 			nothingwired = FALSE;
761 		tmpe = tmpe->next;
762 	}
763 
764 	if (bigobj != NULL) {
765 		vm_pageout_object_deactivate_pages(map->pmap, bigobj, desired);
766 		VM_OBJECT_RUNLOCK(bigobj);
767 	}
768 	/*
769 	 * Next, hunt around for other pages to deactivate.  We actually
770 	 * do this search sort of wrong -- .text first is not the best idea.
771 	 */
772 	tmpe = map->header.next;
773 	while (tmpe != &map->header) {
774 		if (pmap_resident_count(vm_map_pmap(map)) <= desired)
775 			break;
776 		if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
777 			obj = tmpe->object.vm_object;
778 			if (obj != NULL) {
779 				VM_OBJECT_RLOCK(obj);
780 				vm_pageout_object_deactivate_pages(map->pmap, obj, desired);
781 				VM_OBJECT_RUNLOCK(obj);
782 			}
783 		}
784 		tmpe = tmpe->next;
785 	}
786 
787 	/*
788 	 * Remove all mappings if a process is swapped out, this will free page
789 	 * table pages.
790 	 */
791 	if (desired == 0 && nothingwired) {
792 		pmap_remove(vm_map_pmap(map), vm_map_min(map),
793 		    vm_map_max(map));
794 	}
795 
796 	vm_map_unlock(map);
797 }
798 #endif		/* !defined(NO_SWAPPING) */
799 
800 /*
801  * Attempt to acquire all of the necessary locks to launder a page and
802  * then call through the clustering layer to PUTPAGES.  Wait a short
803  * time for a vnode lock.
804  *
805  * Requires the page and object lock on entry, releases both before return.
806  * Returns 0 on success and an errno otherwise.
807  */
808 static int
809 vm_pageout_clean(vm_page_t m, int *numpagedout)
810 {
811 	struct vnode *vp;
812 	struct mount *mp;
813 	vm_object_t object;
814 	vm_pindex_t pindex;
815 	int error, lockmode;
816 
817 	vm_page_assert_locked(m);
818 	object = m->object;
819 	VM_OBJECT_ASSERT_WLOCKED(object);
820 	error = 0;
821 	vp = NULL;
822 	mp = NULL;
823 
824 	/*
825 	 * The object is already known NOT to be dead.   It
826 	 * is possible for the vget() to block the whole
827 	 * pageout daemon, but the new low-memory handling
828 	 * code should prevent it.
829 	 *
830 	 * We can't wait forever for the vnode lock, we might
831 	 * deadlock due to a vn_read() getting stuck in
832 	 * vm_wait while holding this vnode.  We skip the
833 	 * vnode if we can't get it in a reasonable amount
834 	 * of time.
835 	 */
836 	if (object->type == OBJT_VNODE) {
837 		vm_page_unlock(m);
838 		vp = object->handle;
839 		if (vp->v_type == VREG &&
840 		    vn_start_write(vp, &mp, V_NOWAIT) != 0) {
841 			mp = NULL;
842 			error = EDEADLK;
843 			goto unlock_all;
844 		}
845 		KASSERT(mp != NULL,
846 		    ("vp %p with NULL v_mount", vp));
847 		vm_object_reference_locked(object);
848 		pindex = m->pindex;
849 		VM_OBJECT_WUNLOCK(object);
850 		lockmode = MNT_SHARED_WRITES(vp->v_mount) ?
851 		    LK_SHARED : LK_EXCLUSIVE;
852 		if (vget(vp, lockmode | LK_TIMELOCK, curthread)) {
853 			vp = NULL;
854 			error = EDEADLK;
855 			goto unlock_mp;
856 		}
857 		VM_OBJECT_WLOCK(object);
858 		vm_page_lock(m);
859 		/*
860 		 * While the object and page were unlocked, the page
861 		 * may have been:
862 		 * (1) moved to a different queue,
863 		 * (2) reallocated to a different object,
864 		 * (3) reallocated to a different offset, or
865 		 * (4) cleaned.
866 		 */
867 		if (!vm_page_in_laundry(m) || m->object != object ||
868 		    m->pindex != pindex || m->dirty == 0) {
869 			vm_page_unlock(m);
870 			error = ENXIO;
871 			goto unlock_all;
872 		}
873 
874 		/*
875 		 * The page may have been busied or held while the object
876 		 * and page locks were released.
877 		 */
878 		if (vm_page_busied(m) || m->hold_count != 0) {
879 			vm_page_unlock(m);
880 			error = EBUSY;
881 			goto unlock_all;
882 		}
883 	}
884 
885 	/*
886 	 * If a page is dirty, then it is either being washed
887 	 * (but not yet cleaned) or it is still in the
888 	 * laundry.  If it is still in the laundry, then we
889 	 * start the cleaning operation.
890 	 */
891 	if ((*numpagedout = vm_pageout_cluster(m)) == 0)
892 		error = EIO;
893 
894 unlock_all:
895 	VM_OBJECT_WUNLOCK(object);
896 
897 unlock_mp:
898 	vm_page_lock_assert(m, MA_NOTOWNED);
899 	if (mp != NULL) {
900 		if (vp != NULL)
901 			vput(vp);
902 		vm_object_deallocate(object);
903 		vn_finished_write(mp);
904 	}
905 
906 	return (error);
907 }
908 
909 /*
910  * Attempt to launder the specified number of pages.
911  *
912  * Returns the number of pages successfully laundered.
913  */
914 static int
915 vm_pageout_launder(struct vm_domain *vmd, int launder, bool in_shortfall)
916 {
917 	struct vm_pagequeue *pq;
918 	vm_object_t object;
919 	vm_page_t m, next;
920 	int act_delta, error, maxscan, numpagedout, starting_target;
921 	int vnodes_skipped;
922 	bool pageout_ok, queue_locked;
923 
924 	starting_target = launder;
925 	vnodes_skipped = 0;
926 
927 	/*
928 	 * Scan the laundry queues for pages eligible to be laundered.  We stop
929 	 * once the target number of dirty pages have been laundered, or once
930 	 * we've reached the end of the queue.  A single iteration of this loop
931 	 * may cause more than one page to be laundered because of clustering.
932 	 *
933 	 * maxscan ensures that we don't re-examine requeued pages.  Any
934 	 * additional pages written as part of a cluster are subtracted from
935 	 * maxscan since they must be taken from the laundry queue.
936 	 *
937 	 * As an optimization, we avoid laundering from PQ_UNSWAPPABLE when no
938 	 * swap devices are configured.
939 	 */
940 	if (atomic_load_acq_int(&swapdev_enabled))
941 		pq = &vmd->vmd_pagequeues[PQ_UNSWAPPABLE];
942 	else
943 		pq = &vmd->vmd_pagequeues[PQ_LAUNDRY];
944 
945 scan:
946 	vm_pagequeue_lock(pq);
947 	maxscan = pq->pq_cnt;
948 	queue_locked = true;
949 	for (m = TAILQ_FIRST(&pq->pq_pl);
950 	    m != NULL && maxscan-- > 0 && launder > 0;
951 	    m = next) {
952 		vm_pagequeue_assert_locked(pq);
953 		KASSERT(queue_locked, ("unlocked laundry queue"));
954 		KASSERT(vm_page_in_laundry(m),
955 		    ("page %p has an inconsistent queue", m));
956 		next = TAILQ_NEXT(m, plinks.q);
957 		if ((m->flags & PG_MARKER) != 0)
958 			continue;
959 		KASSERT((m->flags & PG_FICTITIOUS) == 0,
960 		    ("PG_FICTITIOUS page %p cannot be in laundry queue", m));
961 		KASSERT((m->oflags & VPO_UNMANAGED) == 0,
962 		    ("VPO_UNMANAGED page %p cannot be in laundry queue", m));
963 		if (!vm_pageout_page_lock(m, &next) || m->hold_count != 0) {
964 			vm_page_unlock(m);
965 			continue;
966 		}
967 		object = m->object;
968 		if ((!VM_OBJECT_TRYWLOCK(object) &&
969 		    (!vm_pageout_fallback_object_lock(m, &next) ||
970 		    m->hold_count != 0)) || vm_page_busied(m)) {
971 			VM_OBJECT_WUNLOCK(object);
972 			vm_page_unlock(m);
973 			continue;
974 		}
975 
976 		/*
977 		 * Unlock the laundry queue, invalidating the 'next' pointer.
978 		 * Use a marker to remember our place in the laundry queue.
979 		 */
980 		TAILQ_INSERT_AFTER(&pq->pq_pl, m, &vmd->vmd_laundry_marker,
981 		    plinks.q);
982 		vm_pagequeue_unlock(pq);
983 		queue_locked = false;
984 
985 		/*
986 		 * Invalid pages can be easily freed.  They cannot be
987 		 * mapped; vm_page_free() asserts this.
988 		 */
989 		if (m->valid == 0)
990 			goto free_page;
991 
992 		/*
993 		 * If the page has been referenced and the object is not dead,
994 		 * reactivate or requeue the page depending on whether the
995 		 * object is mapped.
996 		 */
997 		if ((m->aflags & PGA_REFERENCED) != 0) {
998 			vm_page_aflag_clear(m, PGA_REFERENCED);
999 			act_delta = 1;
1000 		} else
1001 			act_delta = 0;
1002 		if (object->ref_count != 0)
1003 			act_delta += pmap_ts_referenced(m);
1004 		else {
1005 			KASSERT(!pmap_page_is_mapped(m),
1006 			    ("page %p is mapped", m));
1007 		}
1008 		if (act_delta != 0) {
1009 			if (object->ref_count != 0) {
1010 				VM_CNT_INC(v_reactivated);
1011 				vm_page_activate(m);
1012 
1013 				/*
1014 				 * Increase the activation count if the page
1015 				 * was referenced while in the laundry queue.
1016 				 * This makes it less likely that the page will
1017 				 * be returned prematurely to the inactive
1018 				 * queue.
1019  				 */
1020 				m->act_count += act_delta + ACT_ADVANCE;
1021 
1022 				/*
1023 				 * If this was a background laundering, count
1024 				 * activated pages towards our target.  The
1025 				 * purpose of background laundering is to ensure
1026 				 * that pages are eventually cycled through the
1027 				 * laundry queue, and an activation is a valid
1028 				 * way out.
1029 				 */
1030 				if (!in_shortfall)
1031 					launder--;
1032 				goto drop_page;
1033 			} else if ((object->flags & OBJ_DEAD) == 0)
1034 				goto requeue_page;
1035 		}
1036 
1037 		/*
1038 		 * If the page appears to be clean at the machine-independent
1039 		 * layer, then remove all of its mappings from the pmap in
1040 		 * anticipation of freeing it.  If, however, any of the page's
1041 		 * mappings allow write access, then the page may still be
1042 		 * modified until the last of those mappings are removed.
1043 		 */
1044 		if (object->ref_count != 0) {
1045 			vm_page_test_dirty(m);
1046 			if (m->dirty == 0)
1047 				pmap_remove_all(m);
1048 		}
1049 
1050 		/*
1051 		 * Clean pages are freed, and dirty pages are paged out unless
1052 		 * they belong to a dead object.  Requeueing dirty pages from
1053 		 * dead objects is pointless, as they are being paged out and
1054 		 * freed by the thread that destroyed the object.
1055 		 */
1056 		if (m->dirty == 0) {
1057 free_page:
1058 			vm_page_free(m);
1059 			VM_CNT_INC(v_dfree);
1060 		} else if ((object->flags & OBJ_DEAD) == 0) {
1061 			if (object->type != OBJT_SWAP &&
1062 			    object->type != OBJT_DEFAULT)
1063 				pageout_ok = true;
1064 			else if (disable_swap_pageouts)
1065 				pageout_ok = false;
1066 			else
1067 				pageout_ok = true;
1068 			if (!pageout_ok) {
1069 requeue_page:
1070 				vm_pagequeue_lock(pq);
1071 				queue_locked = true;
1072 				vm_page_requeue_locked(m);
1073 				goto drop_page;
1074 			}
1075 
1076 			/*
1077 			 * Form a cluster with adjacent, dirty pages from the
1078 			 * same object, and page out that entire cluster.
1079 			 *
1080 			 * The adjacent, dirty pages must also be in the
1081 			 * laundry.  However, their mappings are not checked
1082 			 * for new references.  Consequently, a recently
1083 			 * referenced page may be paged out.  However, that
1084 			 * page will not be prematurely reclaimed.  After page
1085 			 * out, the page will be placed in the inactive queue,
1086 			 * where any new references will be detected and the
1087 			 * page reactivated.
1088 			 */
1089 			error = vm_pageout_clean(m, &numpagedout);
1090 			if (error == 0) {
1091 				launder -= numpagedout;
1092 				maxscan -= numpagedout - 1;
1093 			} else if (error == EDEADLK) {
1094 				pageout_lock_miss++;
1095 				vnodes_skipped++;
1096 			}
1097 			goto relock_queue;
1098 		}
1099 drop_page:
1100 		vm_page_unlock(m);
1101 		VM_OBJECT_WUNLOCK(object);
1102 relock_queue:
1103 		if (!queue_locked) {
1104 			vm_pagequeue_lock(pq);
1105 			queue_locked = true;
1106 		}
1107 		next = TAILQ_NEXT(&vmd->vmd_laundry_marker, plinks.q);
1108 		TAILQ_REMOVE(&pq->pq_pl, &vmd->vmd_laundry_marker, plinks.q);
1109 	}
1110 	vm_pagequeue_unlock(pq);
1111 
1112 	if (launder > 0 && pq == &vmd->vmd_pagequeues[PQ_UNSWAPPABLE]) {
1113 		pq = &vmd->vmd_pagequeues[PQ_LAUNDRY];
1114 		goto scan;
1115 	}
1116 
1117 	/*
1118 	 * Wakeup the sync daemon if we skipped a vnode in a writeable object
1119 	 * and we didn't launder enough pages.
1120 	 */
1121 	if (vnodes_skipped > 0 && launder > 0)
1122 		(void)speedup_syncer();
1123 
1124 	return (starting_target - launder);
1125 }
1126 
1127 /*
1128  * Compute the integer square root.
1129  */
1130 static u_int
1131 isqrt(u_int num)
1132 {
1133 	u_int bit, root, tmp;
1134 
1135 	bit = 1u << ((NBBY * sizeof(u_int)) - 2);
1136 	while (bit > num)
1137 		bit >>= 2;
1138 	root = 0;
1139 	while (bit != 0) {
1140 		tmp = root + bit;
1141 		root >>= 1;
1142 		if (num >= tmp) {
1143 			num -= tmp;
1144 			root += bit;
1145 		}
1146 		bit >>= 2;
1147 	}
1148 	return (root);
1149 }
1150 
1151 /*
1152  * Perform the work of the laundry thread: periodically wake up and determine
1153  * whether any pages need to be laundered.  If so, determine the number of pages
1154  * that need to be laundered, and launder them.
1155  */
1156 static void
1157 vm_pageout_laundry_worker(void *arg)
1158 {
1159 	struct vm_domain *domain;
1160 	struct vm_pagequeue *pq;
1161 	uint64_t nclean, ndirty;
1162 	u_int last_launder, wakeups;
1163 	int domidx, last_target, launder, shortfall, shortfall_cycle, target;
1164 	bool in_shortfall;
1165 
1166 	domidx = (uintptr_t)arg;
1167 	domain = &vm_dom[domidx];
1168 	pq = &domain->vmd_pagequeues[PQ_LAUNDRY];
1169 	KASSERT(domain->vmd_segs != 0, ("domain without segments"));
1170 	vm_pageout_init_marker(&domain->vmd_laundry_marker, PQ_LAUNDRY);
1171 
1172 	shortfall = 0;
1173 	in_shortfall = false;
1174 	shortfall_cycle = 0;
1175 	target = 0;
1176 	last_launder = 0;
1177 
1178 	/*
1179 	 * Calls to these handlers are serialized by the swap syscall lock.
1180 	 */
1181 	(void)EVENTHANDLER_REGISTER(swapon, vm_pageout_swapon, domain,
1182 	    EVENTHANDLER_PRI_ANY);
1183 	(void)EVENTHANDLER_REGISTER(swapoff, vm_pageout_swapoff, domain,
1184 	    EVENTHANDLER_PRI_ANY);
1185 
1186 	/*
1187 	 * The pageout laundry worker is never done, so loop forever.
1188 	 */
1189 	for (;;) {
1190 		KASSERT(target >= 0, ("negative target %d", target));
1191 		KASSERT(shortfall_cycle >= 0,
1192 		    ("negative cycle %d", shortfall_cycle));
1193 		launder = 0;
1194 		wakeups = VM_CNT_FETCH(v_pdwakeups);
1195 
1196 		/*
1197 		 * First determine whether we need to launder pages to meet a
1198 		 * shortage of free pages.
1199 		 */
1200 		if (shortfall > 0) {
1201 			in_shortfall = true;
1202 			shortfall_cycle = VM_LAUNDER_RATE / VM_INACT_SCAN_RATE;
1203 			target = shortfall;
1204 		} else if (!in_shortfall)
1205 			goto trybackground;
1206 		else if (shortfall_cycle == 0 || vm_laundry_target() <= 0) {
1207 			/*
1208 			 * We recently entered shortfall and began laundering
1209 			 * pages.  If we have completed that laundering run
1210 			 * (and we are no longer in shortfall) or we have met
1211 			 * our laundry target through other activity, then we
1212 			 * can stop laundering pages.
1213 			 */
1214 			in_shortfall = false;
1215 			target = 0;
1216 			goto trybackground;
1217 		}
1218 		last_launder = wakeups;
1219 		launder = target / shortfall_cycle--;
1220 		goto dolaundry;
1221 
1222 		/*
1223 		 * There's no immediate need to launder any pages; see if we
1224 		 * meet the conditions to perform background laundering:
1225 		 *
1226 		 * 1. The ratio of dirty to clean inactive pages exceeds the
1227 		 *    background laundering threshold and the pagedaemon has
1228 		 *    been woken up to reclaim pages since our last
1229 		 *    laundering, or
1230 		 * 2. we haven't yet reached the target of the current
1231 		 *    background laundering run.
1232 		 *
1233 		 * The background laundering threshold is not a constant.
1234 		 * Instead, it is a slowly growing function of the number of
1235 		 * page daemon wakeups since the last laundering.  Thus, as the
1236 		 * ratio of dirty to clean inactive pages grows, the amount of
1237 		 * memory pressure required to trigger laundering decreases.
1238 		 */
1239 trybackground:
1240 		nclean = vm_cnt.v_inactive_count + vm_cnt.v_free_count;
1241 		ndirty = vm_cnt.v_laundry_count;
1242 		if (target == 0 && wakeups != last_launder &&
1243 		    ndirty * isqrt(wakeups - last_launder) >= nclean) {
1244 			target = vm_background_launder_target;
1245 		}
1246 
1247 		/*
1248 		 * We have a non-zero background laundering target.  If we've
1249 		 * laundered up to our maximum without observing a page daemon
1250 		 * wakeup, just stop.  This is a safety belt that ensures we
1251 		 * don't launder an excessive amount if memory pressure is low
1252 		 * and the ratio of dirty to clean pages is large.  Otherwise,
1253 		 * proceed at the background laundering rate.
1254 		 */
1255 		if (target > 0) {
1256 			if (wakeups != last_launder) {
1257 				last_launder = wakeups;
1258 				last_target = target;
1259 			} else if (last_target - target >=
1260 			    vm_background_launder_max * PAGE_SIZE / 1024) {
1261 				target = 0;
1262 			}
1263 			launder = vm_background_launder_rate * PAGE_SIZE / 1024;
1264 			launder /= VM_LAUNDER_RATE;
1265 			if (launder > target)
1266 				launder = target;
1267 		}
1268 
1269 dolaundry:
1270 		if (launder > 0) {
1271 			/*
1272 			 * Because of I/O clustering, the number of laundered
1273 			 * pages could exceed "target" by the maximum size of
1274 			 * a cluster minus one.
1275 			 */
1276 			target -= min(vm_pageout_launder(domain, launder,
1277 			    in_shortfall), target);
1278 			pause("laundp", hz / VM_LAUNDER_RATE);
1279 		}
1280 
1281 		/*
1282 		 * If we're not currently laundering pages and the page daemon
1283 		 * hasn't posted a new request, sleep until the page daemon
1284 		 * kicks us.
1285 		 */
1286 		vm_pagequeue_lock(pq);
1287 		if (target == 0 && vm_laundry_request == VM_LAUNDRY_IDLE)
1288 			(void)mtx_sleep(&vm_laundry_request,
1289 			    vm_pagequeue_lockptr(pq), PVM, "launds", 0);
1290 
1291 		/*
1292 		 * If the pagedaemon has indicated that it's in shortfall, start
1293 		 * a shortfall laundering unless we're already in the middle of
1294 		 * one.  This may preempt a background laundering.
1295 		 */
1296 		if (vm_laundry_request == VM_LAUNDRY_SHORTFALL &&
1297 		    (!in_shortfall || shortfall_cycle == 0)) {
1298 			shortfall = vm_laundry_target() + vm_pageout_deficit;
1299 			target = 0;
1300 		} else
1301 			shortfall = 0;
1302 
1303 		if (target == 0)
1304 			vm_laundry_request = VM_LAUNDRY_IDLE;
1305 		vm_pagequeue_unlock(pq);
1306 	}
1307 }
1308 
1309 /*
1310  *	vm_pageout_scan does the dirty work for the pageout daemon.
1311  *
1312  *	pass == 0: Update active LRU/deactivate pages
1313  *	pass >= 1: Free inactive pages
1314  *
1315  * Returns true if pass was zero or enough pages were freed by the inactive
1316  * queue scan to meet the target.
1317  */
1318 static bool
1319 vm_pageout_scan(struct vm_domain *vmd, int pass)
1320 {
1321 	vm_page_t m, next;
1322 	struct vm_pagequeue *pq;
1323 	vm_object_t object;
1324 	long min_scan;
1325 	int act_delta, addl_page_shortage, deficit, inactq_shortage, maxscan;
1326 	int page_shortage, scan_tick, scanned, starting_page_shortage;
1327 	boolean_t queue_locked;
1328 
1329 	/*
1330 	 * If we need to reclaim memory ask kernel caches to return
1331 	 * some.  We rate limit to avoid thrashing.
1332 	 */
1333 	if (vmd == &vm_dom[0] && pass > 0 &&
1334 	    (time_uptime - lowmem_uptime) >= lowmem_period) {
1335 		/*
1336 		 * Decrease registered cache sizes.
1337 		 */
1338 		SDT_PROBE0(vm, , , vm__lowmem_scan);
1339 		EVENTHANDLER_INVOKE(vm_lowmem, VM_LOW_PAGES);
1340 		/*
1341 		 * We do this explicitly after the caches have been
1342 		 * drained above.
1343 		 */
1344 		uma_reclaim();
1345 		lowmem_uptime = time_uptime;
1346 	}
1347 
1348 	/*
1349 	 * The addl_page_shortage is the number of temporarily
1350 	 * stuck pages in the inactive queue.  In other words, the
1351 	 * number of pages from the inactive count that should be
1352 	 * discounted in setting the target for the active queue scan.
1353 	 */
1354 	addl_page_shortage = 0;
1355 
1356 	/*
1357 	 * Calculate the number of pages that we want to free.  This number
1358 	 * can be negative if many pages are freed between the wakeup call to
1359 	 * the page daemon and this calculation.
1360 	 */
1361 	if (pass > 0) {
1362 		deficit = atomic_readandclear_int(&vm_pageout_deficit);
1363 		page_shortage = vm_paging_target() + deficit;
1364 	} else
1365 		page_shortage = deficit = 0;
1366 	starting_page_shortage = page_shortage;
1367 
1368 	/*
1369 	 * Start scanning the inactive queue for pages that we can free.  The
1370 	 * scan will stop when we reach the target or we have scanned the
1371 	 * entire queue.  (Note that m->act_count is not used to make
1372 	 * decisions for the inactive queue, only for the active queue.)
1373 	 */
1374 	pq = &vmd->vmd_pagequeues[PQ_INACTIVE];
1375 	maxscan = pq->pq_cnt;
1376 	vm_pagequeue_lock(pq);
1377 	queue_locked = TRUE;
1378 	for (m = TAILQ_FIRST(&pq->pq_pl);
1379 	     m != NULL && maxscan-- > 0 && page_shortage > 0;
1380 	     m = next) {
1381 		vm_pagequeue_assert_locked(pq);
1382 		KASSERT(queue_locked, ("unlocked inactive queue"));
1383 		KASSERT(vm_page_inactive(m), ("Inactive queue %p", m));
1384 
1385 		VM_CNT_INC(v_pdpages);
1386 		next = TAILQ_NEXT(m, plinks.q);
1387 
1388 		/*
1389 		 * skip marker pages
1390 		 */
1391 		if (m->flags & PG_MARKER)
1392 			continue;
1393 
1394 		KASSERT((m->flags & PG_FICTITIOUS) == 0,
1395 		    ("Fictitious page %p cannot be in inactive queue", m));
1396 		KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1397 		    ("Unmanaged page %p cannot be in inactive queue", m));
1398 
1399 		/*
1400 		 * The page or object lock acquisitions fail if the
1401 		 * page was removed from the queue or moved to a
1402 		 * different position within the queue.  In either
1403 		 * case, addl_page_shortage should not be incremented.
1404 		 */
1405 		if (!vm_pageout_page_lock(m, &next))
1406 			goto unlock_page;
1407 		else if (m->hold_count != 0) {
1408 			/*
1409 			 * Held pages are essentially stuck in the
1410 			 * queue.  So, they ought to be discounted
1411 			 * from the inactive count.  See the
1412 			 * calculation of inactq_shortage before the
1413 			 * loop over the active queue below.
1414 			 */
1415 			addl_page_shortage++;
1416 			goto unlock_page;
1417 		}
1418 		object = m->object;
1419 		if (!VM_OBJECT_TRYWLOCK(object)) {
1420 			if (!vm_pageout_fallback_object_lock(m, &next))
1421 				goto unlock_object;
1422 			else if (m->hold_count != 0) {
1423 				addl_page_shortage++;
1424 				goto unlock_object;
1425 			}
1426 		}
1427 		if (vm_page_busied(m)) {
1428 			/*
1429 			 * Don't mess with busy pages.  Leave them at
1430 			 * the front of the queue.  Most likely, they
1431 			 * are being paged out and will leave the
1432 			 * queue shortly after the scan finishes.  So,
1433 			 * they ought to be discounted from the
1434 			 * inactive count.
1435 			 */
1436 			addl_page_shortage++;
1437 unlock_object:
1438 			VM_OBJECT_WUNLOCK(object);
1439 unlock_page:
1440 			vm_page_unlock(m);
1441 			continue;
1442 		}
1443 		KASSERT(m->hold_count == 0, ("Held page %p", m));
1444 
1445 		/*
1446 		 * Dequeue the inactive page and unlock the inactive page
1447 		 * queue, invalidating the 'next' pointer.  Dequeueing the
1448 		 * page here avoids a later reacquisition (and release) of
1449 		 * the inactive page queue lock when vm_page_activate(),
1450 		 * vm_page_free(), or vm_page_launder() is called.  Use a
1451 		 * marker to remember our place in the inactive queue.
1452 		 */
1453 		TAILQ_INSERT_AFTER(&pq->pq_pl, m, &vmd->vmd_marker, plinks.q);
1454 		vm_page_dequeue_locked(m);
1455 		vm_pagequeue_unlock(pq);
1456 		queue_locked = FALSE;
1457 
1458 		/*
1459 		 * Invalid pages can be easily freed. They cannot be
1460 		 * mapped, vm_page_free() asserts this.
1461 		 */
1462 		if (m->valid == 0)
1463 			goto free_page;
1464 
1465 		/*
1466 		 * If the page has been referenced and the object is not dead,
1467 		 * reactivate or requeue the page depending on whether the
1468 		 * object is mapped.
1469 		 */
1470 		if ((m->aflags & PGA_REFERENCED) != 0) {
1471 			vm_page_aflag_clear(m, PGA_REFERENCED);
1472 			act_delta = 1;
1473 		} else
1474 			act_delta = 0;
1475 		if (object->ref_count != 0) {
1476 			act_delta += pmap_ts_referenced(m);
1477 		} else {
1478 			KASSERT(!pmap_page_is_mapped(m),
1479 			    ("vm_pageout_scan: page %p is mapped", m));
1480 		}
1481 		if (act_delta != 0) {
1482 			if (object->ref_count != 0) {
1483 				VM_CNT_INC(v_reactivated);
1484 				vm_page_activate(m);
1485 
1486 				/*
1487 				 * Increase the activation count if the page
1488 				 * was referenced while in the inactive queue.
1489 				 * This makes it less likely that the page will
1490 				 * be returned prematurely to the inactive
1491 				 * queue.
1492  				 */
1493 				m->act_count += act_delta + ACT_ADVANCE;
1494 				goto drop_page;
1495 			} else if ((object->flags & OBJ_DEAD) == 0) {
1496 				vm_pagequeue_lock(pq);
1497 				queue_locked = TRUE;
1498 				m->queue = PQ_INACTIVE;
1499 				TAILQ_INSERT_TAIL(&pq->pq_pl, m, plinks.q);
1500 				vm_pagequeue_cnt_inc(pq);
1501 				goto drop_page;
1502 			}
1503 		}
1504 
1505 		/*
1506 		 * If the page appears to be clean at the machine-independent
1507 		 * layer, then remove all of its mappings from the pmap in
1508 		 * anticipation of freeing it.  If, however, any of the page's
1509 		 * mappings allow write access, then the page may still be
1510 		 * modified until the last of those mappings are removed.
1511 		 */
1512 		if (object->ref_count != 0) {
1513 			vm_page_test_dirty(m);
1514 			if (m->dirty == 0)
1515 				pmap_remove_all(m);
1516 		}
1517 
1518 		/*
1519 		 * Clean pages can be freed, but dirty pages must be sent back
1520 		 * to the laundry, unless they belong to a dead object.
1521 		 * Requeueing dirty pages from dead objects is pointless, as
1522 		 * they are being paged out and freed by the thread that
1523 		 * destroyed the object.
1524 		 */
1525 		if (m->dirty == 0) {
1526 free_page:
1527 			vm_page_free(m);
1528 			VM_CNT_INC(v_dfree);
1529 			--page_shortage;
1530 		} else if ((object->flags & OBJ_DEAD) == 0)
1531 			vm_page_launder(m);
1532 drop_page:
1533 		vm_page_unlock(m);
1534 		VM_OBJECT_WUNLOCK(object);
1535 		if (!queue_locked) {
1536 			vm_pagequeue_lock(pq);
1537 			queue_locked = TRUE;
1538 		}
1539 		next = TAILQ_NEXT(&vmd->vmd_marker, plinks.q);
1540 		TAILQ_REMOVE(&pq->pq_pl, &vmd->vmd_marker, plinks.q);
1541 	}
1542 	vm_pagequeue_unlock(pq);
1543 
1544 	/*
1545 	 * Wake up the laundry thread so that it can perform any needed
1546 	 * laundering.  If we didn't meet our target, we're in shortfall and
1547 	 * need to launder more aggressively.  If PQ_LAUNDRY is empty and no
1548 	 * swap devices are configured, the laundry thread has no work to do, so
1549 	 * don't bother waking it up.
1550 	 */
1551 	if (vm_laundry_request == VM_LAUNDRY_IDLE &&
1552 	    starting_page_shortage > 0) {
1553 		pq = &vm_dom[0].vmd_pagequeues[PQ_LAUNDRY];
1554 		vm_pagequeue_lock(pq);
1555 		if (pq->pq_cnt > 0 || atomic_load_acq_int(&swapdev_enabled)) {
1556 			if (page_shortage > 0) {
1557 				vm_laundry_request = VM_LAUNDRY_SHORTFALL;
1558 				VM_CNT_INC(v_pdshortfalls);
1559 			} else if (vm_laundry_request != VM_LAUNDRY_SHORTFALL)
1560 				vm_laundry_request = VM_LAUNDRY_BACKGROUND;
1561 			wakeup(&vm_laundry_request);
1562 		}
1563 		vm_pagequeue_unlock(pq);
1564 	}
1565 
1566 #if !defined(NO_SWAPPING)
1567 	/*
1568 	 * Wakeup the swapout daemon if we didn't free the targeted number of
1569 	 * pages.
1570 	 */
1571 	if (vm_swap_enabled && page_shortage > 0)
1572 		vm_req_vmdaemon(VM_SWAP_NORMAL);
1573 #endif
1574 
1575 	/*
1576 	 * If the inactive queue scan fails repeatedly to meet its
1577 	 * target, kill the largest process.
1578 	 */
1579 	vm_pageout_mightbe_oom(vmd, page_shortage, starting_page_shortage);
1580 
1581 	/*
1582 	 * Compute the number of pages we want to try to move from the
1583 	 * active queue to either the inactive or laundry queue.
1584 	 *
1585 	 * When scanning active pages, we make clean pages count more heavily
1586 	 * towards the page shortage than dirty pages.  This is because dirty
1587 	 * pages must be laundered before they can be reused and thus have less
1588 	 * utility when attempting to quickly alleviate a shortage.  However,
1589 	 * this weighting also causes the scan to deactivate dirty pages more
1590 	 * more aggressively, improving the effectiveness of clustering and
1591 	 * ensuring that they can eventually be reused.
1592 	 */
1593 	inactq_shortage = vm_cnt.v_inactive_target - (vm_cnt.v_inactive_count +
1594 	    vm_cnt.v_laundry_count / act_scan_laundry_weight) +
1595 	    vm_paging_target() + deficit + addl_page_shortage;
1596 	page_shortage *= act_scan_laundry_weight;
1597 
1598 	pq = &vmd->vmd_pagequeues[PQ_ACTIVE];
1599 	vm_pagequeue_lock(pq);
1600 	maxscan = pq->pq_cnt;
1601 
1602 	/*
1603 	 * If we're just idle polling attempt to visit every
1604 	 * active page within 'update_period' seconds.
1605 	 */
1606 	scan_tick = ticks;
1607 	if (vm_pageout_update_period != 0) {
1608 		min_scan = pq->pq_cnt;
1609 		min_scan *= scan_tick - vmd->vmd_last_active_scan;
1610 		min_scan /= hz * vm_pageout_update_period;
1611 	} else
1612 		min_scan = 0;
1613 	if (min_scan > 0 || (inactq_shortage > 0 && maxscan > 0))
1614 		vmd->vmd_last_active_scan = scan_tick;
1615 
1616 	/*
1617 	 * Scan the active queue for pages that can be deactivated.  Update
1618 	 * the per-page activity counter and use it to identify deactivation
1619 	 * candidates.  Held pages may be deactivated.
1620 	 */
1621 	for (m = TAILQ_FIRST(&pq->pq_pl), scanned = 0; m != NULL && (scanned <
1622 	    min_scan || (inactq_shortage > 0 && scanned < maxscan)); m = next,
1623 	    scanned++) {
1624 		KASSERT(m->queue == PQ_ACTIVE,
1625 		    ("vm_pageout_scan: page %p isn't active", m));
1626 		next = TAILQ_NEXT(m, plinks.q);
1627 		if ((m->flags & PG_MARKER) != 0)
1628 			continue;
1629 		KASSERT((m->flags & PG_FICTITIOUS) == 0,
1630 		    ("Fictitious page %p cannot be in active queue", m));
1631 		KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1632 		    ("Unmanaged page %p cannot be in active queue", m));
1633 		if (!vm_pageout_page_lock(m, &next)) {
1634 			vm_page_unlock(m);
1635 			continue;
1636 		}
1637 
1638 		/*
1639 		 * The count for page daemon pages is updated after checking
1640 		 * the page for eligibility.
1641 		 */
1642 		VM_CNT_INC(v_pdpages);
1643 
1644 		/*
1645 		 * Check to see "how much" the page has been used.
1646 		 */
1647 		if ((m->aflags & PGA_REFERENCED) != 0) {
1648 			vm_page_aflag_clear(m, PGA_REFERENCED);
1649 			act_delta = 1;
1650 		} else
1651 			act_delta = 0;
1652 
1653 		/*
1654 		 * Perform an unsynchronized object ref count check.  While
1655 		 * the page lock ensures that the page is not reallocated to
1656 		 * another object, in particular, one with unmanaged mappings
1657 		 * that cannot support pmap_ts_referenced(), two races are,
1658 		 * nonetheless, possible:
1659 		 * 1) The count was transitioning to zero, but we saw a non-
1660 		 *    zero value.  pmap_ts_referenced() will return zero
1661 		 *    because the page is not mapped.
1662 		 * 2) The count was transitioning to one, but we saw zero.
1663 		 *    This race delays the detection of a new reference.  At
1664 		 *    worst, we will deactivate and reactivate the page.
1665 		 */
1666 		if (m->object->ref_count != 0)
1667 			act_delta += pmap_ts_referenced(m);
1668 
1669 		/*
1670 		 * Advance or decay the act_count based on recent usage.
1671 		 */
1672 		if (act_delta != 0) {
1673 			m->act_count += ACT_ADVANCE + act_delta;
1674 			if (m->act_count > ACT_MAX)
1675 				m->act_count = ACT_MAX;
1676 		} else
1677 			m->act_count -= min(m->act_count, ACT_DECLINE);
1678 
1679 		/*
1680 		 * Move this page to the tail of the active, inactive or laundry
1681 		 * queue depending on usage.
1682 		 */
1683 		if (m->act_count == 0) {
1684 			/* Dequeue to avoid later lock recursion. */
1685 			vm_page_dequeue_locked(m);
1686 
1687 			/*
1688 			 * When not short for inactive pages, let dirty pages go
1689 			 * through the inactive queue before moving to the
1690 			 * laundry queues.  This gives them some extra time to
1691 			 * be reactivated, potentially avoiding an expensive
1692 			 * pageout.  During a page shortage, the inactive queue
1693 			 * is necessarily small, so we may move dirty pages
1694 			 * directly to the laundry queue.
1695 			 */
1696 			if (inactq_shortage <= 0)
1697 				vm_page_deactivate(m);
1698 			else {
1699 				/*
1700 				 * Calling vm_page_test_dirty() here would
1701 				 * require acquisition of the object's write
1702 				 * lock.  However, during a page shortage,
1703 				 * directing dirty pages into the laundry
1704 				 * queue is only an optimization and not a
1705 				 * requirement.  Therefore, we simply rely on
1706 				 * the opportunistic updates to the page's
1707 				 * dirty field by the pmap.
1708 				 */
1709 				if (m->dirty == 0) {
1710 					vm_page_deactivate(m);
1711 					inactq_shortage -=
1712 					    act_scan_laundry_weight;
1713 				} else {
1714 					vm_page_launder(m);
1715 					inactq_shortage--;
1716 				}
1717 			}
1718 		} else
1719 			vm_page_requeue_locked(m);
1720 		vm_page_unlock(m);
1721 	}
1722 	vm_pagequeue_unlock(pq);
1723 #if !defined(NO_SWAPPING)
1724 	/*
1725 	 * Idle process swapout -- run once per second when we are reclaiming
1726 	 * pages.
1727 	 */
1728 	if (vm_swap_idle_enabled && pass > 0) {
1729 		static long lsec;
1730 		if (time_second != lsec) {
1731 			vm_req_vmdaemon(VM_SWAP_IDLE);
1732 			lsec = time_second;
1733 		}
1734 	}
1735 #endif
1736 	return (page_shortage <= 0);
1737 }
1738 
1739 static int vm_pageout_oom_vote;
1740 
1741 /*
1742  * The pagedaemon threads randlomly select one to perform the
1743  * OOM.  Trying to kill processes before all pagedaemons
1744  * failed to reach free target is premature.
1745  */
1746 static void
1747 vm_pageout_mightbe_oom(struct vm_domain *vmd, int page_shortage,
1748     int starting_page_shortage)
1749 {
1750 	int old_vote;
1751 
1752 	if (starting_page_shortage <= 0 || starting_page_shortage !=
1753 	    page_shortage)
1754 		vmd->vmd_oom_seq = 0;
1755 	else
1756 		vmd->vmd_oom_seq++;
1757 	if (vmd->vmd_oom_seq < vm_pageout_oom_seq) {
1758 		if (vmd->vmd_oom) {
1759 			vmd->vmd_oom = FALSE;
1760 			atomic_subtract_int(&vm_pageout_oom_vote, 1);
1761 		}
1762 		return;
1763 	}
1764 
1765 	/*
1766 	 * Do not follow the call sequence until OOM condition is
1767 	 * cleared.
1768 	 */
1769 	vmd->vmd_oom_seq = 0;
1770 
1771 	if (vmd->vmd_oom)
1772 		return;
1773 
1774 	vmd->vmd_oom = TRUE;
1775 	old_vote = atomic_fetchadd_int(&vm_pageout_oom_vote, 1);
1776 	if (old_vote != vm_ndomains - 1)
1777 		return;
1778 
1779 	/*
1780 	 * The current pagedaemon thread is the last in the quorum to
1781 	 * start OOM.  Initiate the selection and signaling of the
1782 	 * victim.
1783 	 */
1784 	vm_pageout_oom(VM_OOM_MEM);
1785 
1786 	/*
1787 	 * After one round of OOM terror, recall our vote.  On the
1788 	 * next pass, current pagedaemon would vote again if the low
1789 	 * memory condition is still there, due to vmd_oom being
1790 	 * false.
1791 	 */
1792 	vmd->vmd_oom = FALSE;
1793 	atomic_subtract_int(&vm_pageout_oom_vote, 1);
1794 }
1795 
1796 /*
1797  * The OOM killer is the page daemon's action of last resort when
1798  * memory allocation requests have been stalled for a prolonged period
1799  * of time because it cannot reclaim memory.  This function computes
1800  * the approximate number of physical pages that could be reclaimed if
1801  * the specified address space is destroyed.
1802  *
1803  * Private, anonymous memory owned by the address space is the
1804  * principal resource that we expect to recover after an OOM kill.
1805  * Since the physical pages mapped by the address space's COW entries
1806  * are typically shared pages, they are unlikely to be released and so
1807  * they are not counted.
1808  *
1809  * To get to the point where the page daemon runs the OOM killer, its
1810  * efforts to write-back vnode-backed pages may have stalled.  This
1811  * could be caused by a memory allocation deadlock in the write path
1812  * that might be resolved by an OOM kill.  Therefore, physical pages
1813  * belonging to vnode-backed objects are counted, because they might
1814  * be freed without being written out first if the address space holds
1815  * the last reference to an unlinked vnode.
1816  *
1817  * Similarly, physical pages belonging to OBJT_PHYS objects are
1818  * counted because the address space might hold the last reference to
1819  * the object.
1820  */
1821 static long
1822 vm_pageout_oom_pagecount(struct vmspace *vmspace)
1823 {
1824 	vm_map_t map;
1825 	vm_map_entry_t entry;
1826 	vm_object_t obj;
1827 	long res;
1828 
1829 	map = &vmspace->vm_map;
1830 	KASSERT(!map->system_map, ("system map"));
1831 	sx_assert(&map->lock, SA_LOCKED);
1832 	res = 0;
1833 	for (entry = map->header.next; entry != &map->header;
1834 	    entry = entry->next) {
1835 		if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0)
1836 			continue;
1837 		obj = entry->object.vm_object;
1838 		if (obj == NULL)
1839 			continue;
1840 		if ((entry->eflags & MAP_ENTRY_NEEDS_COPY) != 0 &&
1841 		    obj->ref_count != 1)
1842 			continue;
1843 		switch (obj->type) {
1844 		case OBJT_DEFAULT:
1845 		case OBJT_SWAP:
1846 		case OBJT_PHYS:
1847 		case OBJT_VNODE:
1848 			res += obj->resident_page_count;
1849 			break;
1850 		}
1851 	}
1852 	return (res);
1853 }
1854 
1855 void
1856 vm_pageout_oom(int shortage)
1857 {
1858 	struct proc *p, *bigproc;
1859 	vm_offset_t size, bigsize;
1860 	struct thread *td;
1861 	struct vmspace *vm;
1862 	bool breakout;
1863 
1864 	/*
1865 	 * We keep the process bigproc locked once we find it to keep anyone
1866 	 * from messing with it; however, there is a possibility of
1867 	 * deadlock if process B is bigproc and one of its child processes
1868 	 * attempts to propagate a signal to B while we are waiting for A's
1869 	 * lock while walking this list.  To avoid this, we don't block on
1870 	 * the process lock but just skip a process if it is already locked.
1871 	 */
1872 	bigproc = NULL;
1873 	bigsize = 0;
1874 	sx_slock(&allproc_lock);
1875 	FOREACH_PROC_IN_SYSTEM(p) {
1876 		PROC_LOCK(p);
1877 
1878 		/*
1879 		 * If this is a system, protected or killed process, skip it.
1880 		 */
1881 		if (p->p_state != PRS_NORMAL || (p->p_flag & (P_INEXEC |
1882 		    P_PROTECTED | P_SYSTEM | P_WEXIT)) != 0 ||
1883 		    p->p_pid == 1 || P_KILLED(p) ||
1884 		    (p->p_pid < 48 && swap_pager_avail != 0)) {
1885 			PROC_UNLOCK(p);
1886 			continue;
1887 		}
1888 		/*
1889 		 * If the process is in a non-running type state,
1890 		 * don't touch it.  Check all the threads individually.
1891 		 */
1892 		breakout = false;
1893 		FOREACH_THREAD_IN_PROC(p, td) {
1894 			thread_lock(td);
1895 			if (!TD_ON_RUNQ(td) &&
1896 			    !TD_IS_RUNNING(td) &&
1897 			    !TD_IS_SLEEPING(td) &&
1898 			    !TD_IS_SUSPENDED(td) &&
1899 			    !TD_IS_SWAPPED(td)) {
1900 				thread_unlock(td);
1901 				breakout = true;
1902 				break;
1903 			}
1904 			thread_unlock(td);
1905 		}
1906 		if (breakout) {
1907 			PROC_UNLOCK(p);
1908 			continue;
1909 		}
1910 		/*
1911 		 * get the process size
1912 		 */
1913 		vm = vmspace_acquire_ref(p);
1914 		if (vm == NULL) {
1915 			PROC_UNLOCK(p);
1916 			continue;
1917 		}
1918 		_PHOLD_LITE(p);
1919 		PROC_UNLOCK(p);
1920 		sx_sunlock(&allproc_lock);
1921 		if (!vm_map_trylock_read(&vm->vm_map)) {
1922 			vmspace_free(vm);
1923 			sx_slock(&allproc_lock);
1924 			PRELE(p);
1925 			continue;
1926 		}
1927 		size = vmspace_swap_count(vm);
1928 		if (shortage == VM_OOM_MEM)
1929 			size += vm_pageout_oom_pagecount(vm);
1930 		vm_map_unlock_read(&vm->vm_map);
1931 		vmspace_free(vm);
1932 		sx_slock(&allproc_lock);
1933 
1934 		/*
1935 		 * If this process is bigger than the biggest one,
1936 		 * remember it.
1937 		 */
1938 		if (size > bigsize) {
1939 			if (bigproc != NULL)
1940 				PRELE(bigproc);
1941 			bigproc = p;
1942 			bigsize = size;
1943 		} else {
1944 			PRELE(p);
1945 		}
1946 	}
1947 	sx_sunlock(&allproc_lock);
1948 	if (bigproc != NULL) {
1949 		if (vm_panic_on_oom != 0)
1950 			panic("out of swap space");
1951 		PROC_LOCK(bigproc);
1952 		killproc(bigproc, "out of swap space");
1953 		sched_nice(bigproc, PRIO_MIN);
1954 		_PRELE(bigproc);
1955 		PROC_UNLOCK(bigproc);
1956 		wakeup(&vm_cnt.v_free_count);
1957 	}
1958 }
1959 
1960 static void
1961 vm_pageout_worker(void *arg)
1962 {
1963 	struct vm_domain *domain;
1964 	int domidx, pass;
1965 	bool target_met;
1966 
1967 	domidx = (uintptr_t)arg;
1968 	domain = &vm_dom[domidx];
1969 	pass = 0;
1970 	target_met = true;
1971 
1972 	/*
1973 	 * XXXKIB It could be useful to bind pageout daemon threads to
1974 	 * the cores belonging to the domain, from which vm_page_array
1975 	 * is allocated.
1976 	 */
1977 
1978 	KASSERT(domain->vmd_segs != 0, ("domain without segments"));
1979 	domain->vmd_last_active_scan = ticks;
1980 	vm_pageout_init_marker(&domain->vmd_marker, PQ_INACTIVE);
1981 	vm_pageout_init_marker(&domain->vmd_inacthead, PQ_INACTIVE);
1982 	TAILQ_INSERT_HEAD(&domain->vmd_pagequeues[PQ_INACTIVE].pq_pl,
1983 	    &domain->vmd_inacthead, plinks.q);
1984 
1985 	/*
1986 	 * The pageout daemon worker is never done, so loop forever.
1987 	 */
1988 	while (TRUE) {
1989 		mtx_lock(&vm_page_queue_free_mtx);
1990 
1991 		/*
1992 		 * Generally, after a level >= 1 scan, if there are enough
1993 		 * free pages to wakeup the waiters, then they are already
1994 		 * awake.  A call to vm_page_free() during the scan awakened
1995 		 * them.  However, in the following case, this wakeup serves
1996 		 * to bound the amount of time that a thread might wait.
1997 		 * Suppose a thread's call to vm_page_alloc() fails, but
1998 		 * before that thread calls VM_WAIT, enough pages are freed by
1999 		 * other threads to alleviate the free page shortage.  The
2000 		 * thread will, nonetheless, wait until another page is freed
2001 		 * or this wakeup is performed.
2002 		 */
2003 		if (vm_pages_needed && !vm_page_count_min()) {
2004 			vm_pages_needed = false;
2005 			wakeup(&vm_cnt.v_free_count);
2006 		}
2007 
2008 		/*
2009 		 * Do not clear vm_pageout_wanted until we reach our free page
2010 		 * target.  Otherwise, we may be awakened over and over again,
2011 		 * wasting CPU time.
2012 		 */
2013 		if (vm_pageout_wanted && target_met)
2014 			vm_pageout_wanted = false;
2015 
2016 		/*
2017 		 * Might the page daemon receive a wakeup call?
2018 		 */
2019 		if (vm_pageout_wanted) {
2020 			/*
2021 			 * No.  Either vm_pageout_wanted was set by another
2022 			 * thread during the previous scan, which must have
2023 			 * been a level 0 scan, or vm_pageout_wanted was
2024 			 * already set and the scan failed to free enough
2025 			 * pages.  If we haven't yet performed a level >= 1
2026 			 * (page reclamation) scan, then increase the level
2027 			 * and scan again now.  Otherwise, sleep a bit and
2028 			 * try again later.
2029 			 */
2030 			mtx_unlock(&vm_page_queue_free_mtx);
2031 			if (pass >= 1)
2032 				pause("psleep", hz / VM_INACT_SCAN_RATE);
2033 			pass++;
2034 		} else {
2035 			/*
2036 			 * Yes.  Sleep until pages need to be reclaimed or
2037 			 * have their reference stats updated.
2038 			 */
2039 			if (mtx_sleep(&vm_pageout_wanted,
2040 			    &vm_page_queue_free_mtx, PDROP | PVM, "psleep",
2041 			    hz) == 0) {
2042 				VM_CNT_INC(v_pdwakeups);
2043 				pass = 1;
2044 			} else
2045 				pass = 0;
2046 		}
2047 
2048 		target_met = vm_pageout_scan(domain, pass);
2049 	}
2050 }
2051 
2052 /*
2053  *	vm_pageout_init initialises basic pageout daemon settings.
2054  */
2055 static void
2056 vm_pageout_init(void)
2057 {
2058 	/*
2059 	 * Initialize some paging parameters.
2060 	 */
2061 	vm_cnt.v_interrupt_free_min = 2;
2062 	if (vm_cnt.v_page_count < 2000)
2063 		vm_pageout_page_count = 8;
2064 
2065 	/*
2066 	 * v_free_reserved needs to include enough for the largest
2067 	 * swap pager structures plus enough for any pv_entry structs
2068 	 * when paging.
2069 	 */
2070 	if (vm_cnt.v_page_count > 1024)
2071 		vm_cnt.v_free_min = 4 + (vm_cnt.v_page_count - 1024) / 200;
2072 	else
2073 		vm_cnt.v_free_min = 4;
2074 	vm_cnt.v_pageout_free_min = (2*MAXBSIZE)/PAGE_SIZE +
2075 	    vm_cnt.v_interrupt_free_min;
2076 	vm_cnt.v_free_reserved = vm_pageout_page_count +
2077 	    vm_cnt.v_pageout_free_min + (vm_cnt.v_page_count / 768);
2078 	vm_cnt.v_free_severe = vm_cnt.v_free_min / 2;
2079 	vm_cnt.v_free_target = 4 * vm_cnt.v_free_min + vm_cnt.v_free_reserved;
2080 	vm_cnt.v_free_min += vm_cnt.v_free_reserved;
2081 	vm_cnt.v_free_severe += vm_cnt.v_free_reserved;
2082 	vm_cnt.v_inactive_target = (3 * vm_cnt.v_free_target) / 2;
2083 	if (vm_cnt.v_inactive_target > vm_cnt.v_free_count / 3)
2084 		vm_cnt.v_inactive_target = vm_cnt.v_free_count / 3;
2085 
2086 	/*
2087 	 * Set the default wakeup threshold to be 10% above the minimum
2088 	 * page limit.  This keeps the steady state out of shortfall.
2089 	 */
2090 	vm_pageout_wakeup_thresh = (vm_cnt.v_free_min / 10) * 11;
2091 
2092 	/*
2093 	 * Set interval in seconds for active scan.  We want to visit each
2094 	 * page at least once every ten minutes.  This is to prevent worst
2095 	 * case paging behaviors with stale active LRU.
2096 	 */
2097 	if (vm_pageout_update_period == 0)
2098 		vm_pageout_update_period = 600;
2099 
2100 	/* XXX does not really belong here */
2101 	if (vm_page_max_wired == 0)
2102 		vm_page_max_wired = vm_cnt.v_free_count / 3;
2103 
2104 	/*
2105 	 * Target amount of memory to move out of the laundry queue during a
2106 	 * background laundering.  This is proportional to the amount of system
2107 	 * memory.
2108 	 */
2109 	vm_background_launder_target = (vm_cnt.v_free_target -
2110 	    vm_cnt.v_free_min) / 10;
2111 }
2112 
2113 /*
2114  *     vm_pageout is the high level pageout daemon.
2115  */
2116 static void
2117 vm_pageout(void)
2118 {
2119 	int error;
2120 #ifdef VM_NUMA_ALLOC
2121 	int i;
2122 #endif
2123 
2124 	swap_pager_swap_init();
2125 	error = kthread_add(vm_pageout_laundry_worker, NULL, curproc, NULL,
2126 	    0, 0, "laundry: dom0");
2127 	if (error != 0)
2128 		panic("starting laundry for domain 0, error %d", error);
2129 #ifdef VM_NUMA_ALLOC
2130 	for (i = 1; i < vm_ndomains; i++) {
2131 		error = kthread_add(vm_pageout_worker, (void *)(uintptr_t)i,
2132 		    curproc, NULL, 0, 0, "dom%d", i);
2133 		if (error != 0) {
2134 			panic("starting pageout for domain %d, error %d\n",
2135 			    i, error);
2136 		}
2137 	}
2138 #endif
2139 	error = kthread_add(uma_reclaim_worker, NULL, curproc, NULL,
2140 	    0, 0, "uma");
2141 	if (error != 0)
2142 		panic("starting uma_reclaim helper, error %d\n", error);
2143 	vm_pageout_worker((void *)(uintptr_t)0);
2144 }
2145 
2146 /*
2147  * Unless the free page queue lock is held by the caller, this function
2148  * should be regarded as advisory.  Specifically, the caller should
2149  * not msleep() on &vm_cnt.v_free_count following this function unless
2150  * the free page queue lock is held until the msleep() is performed.
2151  */
2152 void
2153 pagedaemon_wakeup(void)
2154 {
2155 
2156 	if (!vm_pageout_wanted && curthread->td_proc != pageproc) {
2157 		vm_pageout_wanted = true;
2158 		wakeup(&vm_pageout_wanted);
2159 	}
2160 }
2161 
2162 #if !defined(NO_SWAPPING)
2163 static void
2164 vm_req_vmdaemon(int req)
2165 {
2166 	static int lastrun = 0;
2167 
2168 	mtx_lock(&vm_daemon_mtx);
2169 	vm_pageout_req_swapout |= req;
2170 	if ((ticks > (lastrun + hz)) || (ticks < lastrun)) {
2171 		wakeup(&vm_daemon_needed);
2172 		lastrun = ticks;
2173 	}
2174 	mtx_unlock(&vm_daemon_mtx);
2175 }
2176 
2177 static void
2178 vm_daemon(void)
2179 {
2180 	struct rlimit rsslim;
2181 	struct proc *p;
2182 	struct thread *td;
2183 	struct vmspace *vm;
2184 	int breakout, swapout_flags, tryagain, attempts;
2185 #ifdef RACCT
2186 	uint64_t rsize, ravailable;
2187 #endif
2188 
2189 	while (TRUE) {
2190 		mtx_lock(&vm_daemon_mtx);
2191 		msleep(&vm_daemon_needed, &vm_daemon_mtx, PPAUSE, "psleep",
2192 #ifdef RACCT
2193 		    racct_enable ? hz : 0
2194 #else
2195 		    0
2196 #endif
2197 		);
2198 		swapout_flags = vm_pageout_req_swapout;
2199 		vm_pageout_req_swapout = 0;
2200 		mtx_unlock(&vm_daemon_mtx);
2201 		if (swapout_flags)
2202 			swapout_procs(swapout_flags);
2203 
2204 		/*
2205 		 * scan the processes for exceeding their rlimits or if
2206 		 * process is swapped out -- deactivate pages
2207 		 */
2208 		tryagain = 0;
2209 		attempts = 0;
2210 again:
2211 		attempts++;
2212 		sx_slock(&allproc_lock);
2213 		FOREACH_PROC_IN_SYSTEM(p) {
2214 			vm_pindex_t limit, size;
2215 
2216 			/*
2217 			 * if this is a system process or if we have already
2218 			 * looked at this process, skip it.
2219 			 */
2220 			PROC_LOCK(p);
2221 			if (p->p_state != PRS_NORMAL ||
2222 			    p->p_flag & (P_INEXEC | P_SYSTEM | P_WEXIT)) {
2223 				PROC_UNLOCK(p);
2224 				continue;
2225 			}
2226 			/*
2227 			 * if the process is in a non-running type state,
2228 			 * don't touch it.
2229 			 */
2230 			breakout = 0;
2231 			FOREACH_THREAD_IN_PROC(p, td) {
2232 				thread_lock(td);
2233 				if (!TD_ON_RUNQ(td) &&
2234 				    !TD_IS_RUNNING(td) &&
2235 				    !TD_IS_SLEEPING(td) &&
2236 				    !TD_IS_SUSPENDED(td)) {
2237 					thread_unlock(td);
2238 					breakout = 1;
2239 					break;
2240 				}
2241 				thread_unlock(td);
2242 			}
2243 			if (breakout) {
2244 				PROC_UNLOCK(p);
2245 				continue;
2246 			}
2247 			/*
2248 			 * get a limit
2249 			 */
2250 			lim_rlimit_proc(p, RLIMIT_RSS, &rsslim);
2251 			limit = OFF_TO_IDX(
2252 			    qmin(rsslim.rlim_cur, rsslim.rlim_max));
2253 
2254 			/*
2255 			 * let processes that are swapped out really be
2256 			 * swapped out set the limit to nothing (will force a
2257 			 * swap-out.)
2258 			 */
2259 			if ((p->p_flag & P_INMEM) == 0)
2260 				limit = 0;	/* XXX */
2261 			vm = vmspace_acquire_ref(p);
2262 			_PHOLD_LITE(p);
2263 			PROC_UNLOCK(p);
2264 			if (vm == NULL) {
2265 				PRELE(p);
2266 				continue;
2267 			}
2268 			sx_sunlock(&allproc_lock);
2269 
2270 			size = vmspace_resident_count(vm);
2271 			if (size >= limit) {
2272 				vm_pageout_map_deactivate_pages(
2273 				    &vm->vm_map, limit);
2274 				size = vmspace_resident_count(vm);
2275 			}
2276 #ifdef RACCT
2277 			if (racct_enable) {
2278 				rsize = IDX_TO_OFF(size);
2279 				PROC_LOCK(p);
2280 				if (p->p_state == PRS_NORMAL)
2281 					racct_set(p, RACCT_RSS, rsize);
2282 				ravailable = racct_get_available(p, RACCT_RSS);
2283 				PROC_UNLOCK(p);
2284 				if (rsize > ravailable) {
2285 					/*
2286 					 * Don't be overly aggressive; this
2287 					 * might be an innocent process,
2288 					 * and the limit could've been exceeded
2289 					 * by some memory hog.  Don't try
2290 					 * to deactivate more than 1/4th
2291 					 * of process' resident set size.
2292 					 */
2293 					if (attempts <= 8) {
2294 						if (ravailable < rsize -
2295 						    (rsize / 4)) {
2296 							ravailable = rsize -
2297 							    (rsize / 4);
2298 						}
2299 					}
2300 					vm_pageout_map_deactivate_pages(
2301 					    &vm->vm_map,
2302 					    OFF_TO_IDX(ravailable));
2303 					/* Update RSS usage after paging out. */
2304 					size = vmspace_resident_count(vm);
2305 					rsize = IDX_TO_OFF(size);
2306 					PROC_LOCK(p);
2307 					if (p->p_state == PRS_NORMAL)
2308 						racct_set(p, RACCT_RSS, rsize);
2309 					PROC_UNLOCK(p);
2310 					if (rsize > ravailable)
2311 						tryagain = 1;
2312 				}
2313 			}
2314 #endif
2315 			vmspace_free(vm);
2316 			sx_slock(&allproc_lock);
2317 			PRELE(p);
2318 		}
2319 		sx_sunlock(&allproc_lock);
2320 		if (tryagain != 0 && attempts <= 10)
2321 			goto again;
2322 	}
2323 }
2324 #endif			/* !defined(NO_SWAPPING) */
2325