xref: /freebsd/sys/vm/vm_swapout.c (revision bbce101753b9f68edd34180cb617fff9327a9e0b)
1 /*-
2  * SPDX-License-Identifier: (BSD-4-Clause AND MIT-CMU)
3  *
4  * Copyright (c) 1991 Regents of the University of California.
5  * All rights reserved.
6  * Copyright (c) 1994 John S. Dyson
7  * All rights reserved.
8  * Copyright (c) 1994 David Greenman
9  * All rights reserved.
10  * Copyright (c) 2005 Yahoo! Technologies Norway AS
11  * All rights reserved.
12  *
13  * This code is derived from software contributed to Berkeley by
14  * The Mach Operating System project at Carnegie-Mellon University.
15  *
16  * Redistribution and use in source and binary forms, with or without
17  * modification, are permitted provided that the following conditions
18  * are met:
19  * 1. Redistributions of source code must retain the above copyright
20  *    notice, this list of conditions and the following disclaimer.
21  * 2. Redistributions in binary form must reproduce the above copyright
22  *    notice, this list of conditions and the following disclaimer in the
23  *    documentation and/or other materials provided with the distribution.
24  * 3. All advertising materials mentioning features or use of this software
25  *    must display the following acknowledgement:
26  *	This product includes software developed by the University of
27  *	California, Berkeley and its contributors.
28  * 4. Neither the name of the University nor the names of its contributors
29  *    may be used to endorse or promote products derived from this software
30  *    without specific prior written permission.
31  *
32  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
33  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
34  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
35  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
36  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
37  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
38  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
39  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
40  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
41  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
42  * SUCH DAMAGE.
43  *
44  *	from: @(#)vm_pageout.c	7.4 (Berkeley) 5/7/91
45  *
46  *
47  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
48  * All rights reserved.
49  *
50  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
51  *
52  * Permission to use, copy, modify and distribute this software and
53  * its documentation is hereby granted, provided that both the copyright
54  * notice and this permission notice appear in all copies of the
55  * software, derivative works or modified versions, and any portions
56  * thereof, and that both notices appear in supporting documentation.
57  *
58  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
59  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
60  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
61  *
62  * Carnegie Mellon requests users of this software to return to
63  *
64  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
65  *  School of Computer Science
66  *  Carnegie Mellon University
67  *  Pittsburgh PA 15213-3890
68  *
69  * any improvements or extensions that they make and grant Carnegie the
70  * rights to redistribute these changes.
71  */
72 
73 #include <sys/cdefs.h>
74 __FBSDID("$FreeBSD$");
75 
76 #include "opt_kstack_pages.h"
77 #include "opt_kstack_max_pages.h"
78 #include "opt_vm.h"
79 
80 #include <sys/param.h>
81 #include <sys/systm.h>
82 #include <sys/limits.h>
83 #include <sys/kernel.h>
84 #include <sys/eventhandler.h>
85 #include <sys/lock.h>
86 #include <sys/mutex.h>
87 #include <sys/proc.h>
88 #include <sys/kthread.h>
89 #include <sys/ktr.h>
90 #include <sys/mount.h>
91 #include <sys/racct.h>
92 #include <sys/resourcevar.h>
93 #include <sys/sched.h>
94 #include <sys/sdt.h>
95 #include <sys/signalvar.h>
96 #include <sys/smp.h>
97 #include <sys/time.h>
98 #include <sys/vnode.h>
99 #include <sys/vmmeter.h>
100 #include <sys/rwlock.h>
101 #include <sys/sx.h>
102 #include <sys/sysctl.h>
103 
104 #include <vm/vm.h>
105 #include <vm/vm_param.h>
106 #include <vm/vm_object.h>
107 #include <vm/vm_page.h>
108 #include <vm/vm_map.h>
109 #include <vm/vm_pageout.h>
110 #include <vm/vm_pager.h>
111 #include <vm/vm_phys.h>
112 #include <vm/swap_pager.h>
113 #include <vm/vm_extern.h>
114 #include <vm/uma.h>
115 
116 /* the kernel process "vm_daemon" */
117 static void vm_daemon(void);
118 static struct proc *vmproc;
119 
120 static struct kproc_desc vm_kp = {
121 	"vmdaemon",
122 	vm_daemon,
123 	&vmproc
124 };
125 SYSINIT(vmdaemon, SI_SUB_KTHREAD_VM, SI_ORDER_FIRST, kproc_start, &vm_kp);
126 
127 static int vm_swap_enabled = 1;
128 static int vm_swap_idle_enabled = 0;
129 
130 SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled, CTLFLAG_RW,
131     &vm_swap_enabled, 0,
132     "Enable entire process swapout");
133 SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled, CTLFLAG_RW,
134     &vm_swap_idle_enabled, 0,
135     "Allow swapout on idle criteria");
136 
137 /*
138  * Swap_idle_threshold1 is the guaranteed swapped in time for a process
139  */
140 static int swap_idle_threshold1 = 2;
141 SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold1, CTLFLAG_RW,
142     &swap_idle_threshold1, 0,
143     "Guaranteed swapped in time for a process");
144 
145 /*
146  * Swap_idle_threshold2 is the time that a process can be idle before
147  * it will be swapped out, if idle swapping is enabled.
148  */
149 static int swap_idle_threshold2 = 10;
150 SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold2, CTLFLAG_RW,
151     &swap_idle_threshold2, 0,
152     "Time before a process will be swapped out");
153 
154 static int vm_pageout_req_swapout;	/* XXX */
155 static int vm_daemon_needed;
156 static struct mtx vm_daemon_mtx;
157 /* Allow for use by vm_pageout before vm_daemon is initialized. */
158 MTX_SYSINIT(vm_daemon, &vm_daemon_mtx, "vm daemon", MTX_DEF);
159 
160 static int swapped_cnt;
161 static int swap_inprogress;	/* Pending swap-ins done outside swapper. */
162 static int last_swapin;
163 
164 static void swapclear(struct proc *);
165 static int swapout(struct proc *);
166 static void vm_swapout_map_deactivate_pages(vm_map_t, long);
167 static void vm_swapout_object_deactivate_pages(pmap_t, vm_object_t, long);
168 static void swapout_procs(int action);
169 static void vm_req_vmdaemon(int req);
170 static void vm_thread_swapout(struct thread *td);
171 
172 /*
173  *	vm_swapout_object_deactivate_pages
174  *
175  *	Deactivate enough pages to satisfy the inactive target
176  *	requirements.
177  *
178  *	The object and map must be locked.
179  */
180 static void
181 vm_swapout_object_deactivate_pages(pmap_t pmap, vm_object_t first_object,
182     long desired)
183 {
184 	vm_object_t backing_object, object;
185 	vm_page_t p;
186 	int act_delta, remove_mode;
187 
188 	VM_OBJECT_ASSERT_LOCKED(first_object);
189 	if ((first_object->flags & OBJ_FICTITIOUS) != 0)
190 		return;
191 	for (object = first_object;; object = backing_object) {
192 		if (pmap_resident_count(pmap) <= desired)
193 			goto unlock_return;
194 		VM_OBJECT_ASSERT_LOCKED(object);
195 		if ((object->flags & OBJ_UNMANAGED) != 0 ||
196 		    object->paging_in_progress != 0)
197 			goto unlock_return;
198 
199 		remove_mode = 0;
200 		if (object->shadow_count > 1)
201 			remove_mode = 1;
202 		/*
203 		 * Scan the object's entire memory queue.
204 		 */
205 		TAILQ_FOREACH(p, &object->memq, listq) {
206 			if (pmap_resident_count(pmap) <= desired)
207 				goto unlock_return;
208 			if (should_yield())
209 				goto unlock_return;
210 			if (vm_page_busied(p))
211 				continue;
212 			VM_CNT_INC(v_pdpages);
213 			vm_page_lock(p);
214 			if (vm_page_wired(p) ||
215 			    !pmap_page_exists_quick(pmap, p)) {
216 				vm_page_unlock(p);
217 				continue;
218 			}
219 			act_delta = pmap_ts_referenced(p);
220 			if ((p->aflags & PGA_REFERENCED) != 0) {
221 				if (act_delta == 0)
222 					act_delta = 1;
223 				vm_page_aflag_clear(p, PGA_REFERENCED);
224 			}
225 			if (!vm_page_active(p) && act_delta != 0) {
226 				vm_page_activate(p);
227 				p->act_count += act_delta;
228 			} else if (vm_page_active(p)) {
229 				if (act_delta == 0) {
230 					p->act_count -= min(p->act_count,
231 					    ACT_DECLINE);
232 					if (!remove_mode && p->act_count == 0) {
233 						pmap_remove_all(p);
234 						vm_page_deactivate(p);
235 					} else
236 						vm_page_requeue(p);
237 				} else {
238 					vm_page_activate(p);
239 					if (p->act_count < ACT_MAX -
240 					    ACT_ADVANCE)
241 						p->act_count += ACT_ADVANCE;
242 					vm_page_requeue(p);
243 				}
244 			} else if (vm_page_inactive(p))
245 				pmap_remove_all(p);
246 			vm_page_unlock(p);
247 		}
248 		if ((backing_object = object->backing_object) == NULL)
249 			goto unlock_return;
250 		VM_OBJECT_RLOCK(backing_object);
251 		if (object != first_object)
252 			VM_OBJECT_RUNLOCK(object);
253 	}
254 unlock_return:
255 	if (object != first_object)
256 		VM_OBJECT_RUNLOCK(object);
257 }
258 
259 /*
260  * deactivate some number of pages in a map, try to do it fairly, but
261  * that is really hard to do.
262  */
263 static void
264 vm_swapout_map_deactivate_pages(vm_map_t map, long desired)
265 {
266 	vm_map_entry_t tmpe;
267 	vm_object_t obj, bigobj;
268 	int nothingwired;
269 
270 	if (!vm_map_trylock_read(map))
271 		return;
272 
273 	bigobj = NULL;
274 	nothingwired = TRUE;
275 
276 	/*
277 	 * first, search out the biggest object, and try to free pages from
278 	 * that.
279 	 */
280 	tmpe = map->header.next;
281 	while (tmpe != &map->header) {
282 		if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
283 			obj = tmpe->object.vm_object;
284 			if (obj != NULL && VM_OBJECT_TRYRLOCK(obj)) {
285 				if (obj->shadow_count <= 1 &&
286 				    (bigobj == NULL ||
287 				     bigobj->resident_page_count <
288 				     obj->resident_page_count)) {
289 					if (bigobj != NULL)
290 						VM_OBJECT_RUNLOCK(bigobj);
291 					bigobj = obj;
292 				} else
293 					VM_OBJECT_RUNLOCK(obj);
294 			}
295 		}
296 		if (tmpe->wired_count > 0)
297 			nothingwired = FALSE;
298 		tmpe = tmpe->next;
299 	}
300 
301 	if (bigobj != NULL) {
302 		vm_swapout_object_deactivate_pages(map->pmap, bigobj, desired);
303 		VM_OBJECT_RUNLOCK(bigobj);
304 	}
305 	/*
306 	 * Next, hunt around for other pages to deactivate.  We actually
307 	 * do this search sort of wrong -- .text first is not the best idea.
308 	 */
309 	tmpe = map->header.next;
310 	while (tmpe != &map->header) {
311 		if (pmap_resident_count(vm_map_pmap(map)) <= desired)
312 			break;
313 		if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
314 			obj = tmpe->object.vm_object;
315 			if (obj != NULL) {
316 				VM_OBJECT_RLOCK(obj);
317 				vm_swapout_object_deactivate_pages(map->pmap,
318 				    obj, desired);
319 				VM_OBJECT_RUNLOCK(obj);
320 			}
321 		}
322 		tmpe = tmpe->next;
323 	}
324 
325 	/*
326 	 * Remove all mappings if a process is swapped out, this will free page
327 	 * table pages.
328 	 */
329 	if (desired == 0 && nothingwired) {
330 		pmap_remove(vm_map_pmap(map), vm_map_min(map),
331 		    vm_map_max(map));
332 	}
333 
334 	vm_map_unlock_read(map);
335 }
336 
337 /*
338  * Swap out requests
339  */
340 #define VM_SWAP_NORMAL 1
341 #define VM_SWAP_IDLE 2
342 
343 void
344 vm_swapout_run(void)
345 {
346 
347 	if (vm_swap_enabled)
348 		vm_req_vmdaemon(VM_SWAP_NORMAL);
349 }
350 
351 /*
352  * Idle process swapout -- run once per second when pagedaemons are
353  * reclaiming pages.
354  */
355 void
356 vm_swapout_run_idle(void)
357 {
358 	static long lsec;
359 
360 	if (!vm_swap_idle_enabled || time_second == lsec)
361 		return;
362 	vm_req_vmdaemon(VM_SWAP_IDLE);
363 	lsec = time_second;
364 }
365 
366 static void
367 vm_req_vmdaemon(int req)
368 {
369 	static int lastrun = 0;
370 
371 	mtx_lock(&vm_daemon_mtx);
372 	vm_pageout_req_swapout |= req;
373 	if ((ticks > (lastrun + hz)) || (ticks < lastrun)) {
374 		wakeup(&vm_daemon_needed);
375 		lastrun = ticks;
376 	}
377 	mtx_unlock(&vm_daemon_mtx);
378 }
379 
380 static void
381 vm_daemon(void)
382 {
383 	struct rlimit rsslim;
384 	struct proc *p;
385 	struct thread *td;
386 	struct vmspace *vm;
387 	int breakout, swapout_flags, tryagain, attempts;
388 #ifdef RACCT
389 	uint64_t rsize, ravailable;
390 #endif
391 
392 	while (TRUE) {
393 		mtx_lock(&vm_daemon_mtx);
394 		msleep(&vm_daemon_needed, &vm_daemon_mtx, PPAUSE, "psleep",
395 #ifdef RACCT
396 		    racct_enable ? hz : 0
397 #else
398 		    0
399 #endif
400 		);
401 		swapout_flags = vm_pageout_req_swapout;
402 		vm_pageout_req_swapout = 0;
403 		mtx_unlock(&vm_daemon_mtx);
404 		if (swapout_flags != 0) {
405 			/*
406 			 * Drain the per-CPU page queue batches as a deadlock
407 			 * avoidance measure.
408 			 */
409 			if ((swapout_flags & VM_SWAP_NORMAL) != 0)
410 				vm_page_drain_pqbatch();
411 			swapout_procs(swapout_flags);
412 		}
413 
414 		/*
415 		 * scan the processes for exceeding their rlimits or if
416 		 * process is swapped out -- deactivate pages
417 		 */
418 		tryagain = 0;
419 		attempts = 0;
420 again:
421 		attempts++;
422 		sx_slock(&allproc_lock);
423 		FOREACH_PROC_IN_SYSTEM(p) {
424 			vm_pindex_t limit, size;
425 
426 			/*
427 			 * if this is a system process or if we have already
428 			 * looked at this process, skip it.
429 			 */
430 			PROC_LOCK(p);
431 			if (p->p_state != PRS_NORMAL ||
432 			    p->p_flag & (P_INEXEC | P_SYSTEM | P_WEXIT)) {
433 				PROC_UNLOCK(p);
434 				continue;
435 			}
436 			/*
437 			 * if the process is in a non-running type state,
438 			 * don't touch it.
439 			 */
440 			breakout = 0;
441 			FOREACH_THREAD_IN_PROC(p, td) {
442 				thread_lock(td);
443 				if (!TD_ON_RUNQ(td) &&
444 				    !TD_IS_RUNNING(td) &&
445 				    !TD_IS_SLEEPING(td) &&
446 				    !TD_IS_SUSPENDED(td)) {
447 					thread_unlock(td);
448 					breakout = 1;
449 					break;
450 				}
451 				thread_unlock(td);
452 			}
453 			if (breakout) {
454 				PROC_UNLOCK(p);
455 				continue;
456 			}
457 			/*
458 			 * get a limit
459 			 */
460 			lim_rlimit_proc(p, RLIMIT_RSS, &rsslim);
461 			limit = OFF_TO_IDX(
462 			    qmin(rsslim.rlim_cur, rsslim.rlim_max));
463 
464 			/*
465 			 * let processes that are swapped out really be
466 			 * swapped out set the limit to nothing (will force a
467 			 * swap-out.)
468 			 */
469 			if ((p->p_flag & P_INMEM) == 0)
470 				limit = 0;	/* XXX */
471 			vm = vmspace_acquire_ref(p);
472 			_PHOLD_LITE(p);
473 			PROC_UNLOCK(p);
474 			if (vm == NULL) {
475 				PRELE(p);
476 				continue;
477 			}
478 			sx_sunlock(&allproc_lock);
479 
480 			size = vmspace_resident_count(vm);
481 			if (size >= limit) {
482 				vm_swapout_map_deactivate_pages(
483 				    &vm->vm_map, limit);
484 				size = vmspace_resident_count(vm);
485 			}
486 #ifdef RACCT
487 			if (racct_enable) {
488 				rsize = IDX_TO_OFF(size);
489 				PROC_LOCK(p);
490 				if (p->p_state == PRS_NORMAL)
491 					racct_set(p, RACCT_RSS, rsize);
492 				ravailable = racct_get_available(p, RACCT_RSS);
493 				PROC_UNLOCK(p);
494 				if (rsize > ravailable) {
495 					/*
496 					 * Don't be overly aggressive; this
497 					 * might be an innocent process,
498 					 * and the limit could've been exceeded
499 					 * by some memory hog.  Don't try
500 					 * to deactivate more than 1/4th
501 					 * of process' resident set size.
502 					 */
503 					if (attempts <= 8) {
504 						if (ravailable < rsize -
505 						    (rsize / 4)) {
506 							ravailable = rsize -
507 							    (rsize / 4);
508 						}
509 					}
510 					vm_swapout_map_deactivate_pages(
511 					    &vm->vm_map,
512 					    OFF_TO_IDX(ravailable));
513 					/* Update RSS usage after paging out. */
514 					size = vmspace_resident_count(vm);
515 					rsize = IDX_TO_OFF(size);
516 					PROC_LOCK(p);
517 					if (p->p_state == PRS_NORMAL)
518 						racct_set(p, RACCT_RSS, rsize);
519 					PROC_UNLOCK(p);
520 					if (rsize > ravailable)
521 						tryagain = 1;
522 				}
523 			}
524 #endif
525 			vmspace_free(vm);
526 			sx_slock(&allproc_lock);
527 			PRELE(p);
528 		}
529 		sx_sunlock(&allproc_lock);
530 		if (tryagain != 0 && attempts <= 10) {
531 			maybe_yield();
532 			goto again;
533 		}
534 	}
535 }
536 
537 /*
538  * Allow a thread's kernel stack to be paged out.
539  */
540 static void
541 vm_thread_swapout(struct thread *td)
542 {
543 	vm_object_t ksobj;
544 	vm_page_t m;
545 	int i, pages;
546 
547 	cpu_thread_swapout(td);
548 	pages = td->td_kstack_pages;
549 	ksobj = td->td_kstack_obj;
550 	pmap_qremove(td->td_kstack, pages);
551 	VM_OBJECT_WLOCK(ksobj);
552 	for (i = 0; i < pages; i++) {
553 		m = vm_page_lookup(ksobj, i);
554 		if (m == NULL)
555 			panic("vm_thread_swapout: kstack already missing?");
556 		vm_page_dirty(m);
557 		vm_page_lock(m);
558 		vm_page_unwire(m, PQ_LAUNDRY);
559 		vm_page_unlock(m);
560 	}
561 	VM_OBJECT_WUNLOCK(ksobj);
562 }
563 
564 /*
565  * Bring the kernel stack for a specified thread back in.
566  */
567 static void
568 vm_thread_swapin(struct thread *td, int oom_alloc)
569 {
570 	vm_object_t ksobj;
571 	vm_page_t ma[KSTACK_MAX_PAGES];
572 	int a, count, i, j, pages, rv;
573 
574 	pages = td->td_kstack_pages;
575 	ksobj = td->td_kstack_obj;
576 	VM_OBJECT_WLOCK(ksobj);
577 	(void)vm_page_grab_pages(ksobj, 0, oom_alloc | VM_ALLOC_WIRED, ma,
578 	    pages);
579 	for (i = 0; i < pages;) {
580 		vm_page_assert_xbusied(ma[i]);
581 		if (ma[i]->valid == VM_PAGE_BITS_ALL) {
582 			vm_page_xunbusy(ma[i]);
583 			i++;
584 			continue;
585 		}
586 		vm_object_pip_add(ksobj, 1);
587 		for (j = i + 1; j < pages; j++)
588 			if (ma[j]->valid == VM_PAGE_BITS_ALL)
589 				break;
590 		rv = vm_pager_has_page(ksobj, ma[i]->pindex, NULL, &a);
591 		KASSERT(rv == 1, ("%s: missing page %p", __func__, ma[i]));
592 		count = min(a + 1, j - i);
593 		rv = vm_pager_get_pages(ksobj, ma + i, count, NULL, NULL);
594 		KASSERT(rv == VM_PAGER_OK, ("%s: cannot get kstack for proc %d",
595 		    __func__, td->td_proc->p_pid));
596 		vm_object_pip_wakeup(ksobj);
597 		for (j = i; j < i + count; j++)
598 			vm_page_xunbusy(ma[j]);
599 		i += count;
600 	}
601 	VM_OBJECT_WUNLOCK(ksobj);
602 	pmap_qenter(td->td_kstack, ma, pages);
603 	cpu_thread_swapin(td);
604 }
605 
606 void
607 faultin(struct proc *p)
608 {
609 	struct thread *td;
610 	int oom_alloc;
611 
612 	PROC_LOCK_ASSERT(p, MA_OWNED);
613 
614 	/*
615 	 * If another process is swapping in this process,
616 	 * just wait until it finishes.
617 	 */
618 	if (p->p_flag & P_SWAPPINGIN) {
619 		while (p->p_flag & P_SWAPPINGIN)
620 			msleep(&p->p_flag, &p->p_mtx, PVM, "faultin", 0);
621 		return;
622 	}
623 
624 	if ((p->p_flag & P_INMEM) == 0) {
625 		oom_alloc = (p->p_flag & P_WKILLED) != 0 ? VM_ALLOC_SYSTEM :
626 		    VM_ALLOC_NORMAL;
627 
628 		/*
629 		 * Don't let another thread swap process p out while we are
630 		 * busy swapping it in.
631 		 */
632 		++p->p_lock;
633 		p->p_flag |= P_SWAPPINGIN;
634 		PROC_UNLOCK(p);
635 		sx_xlock(&allproc_lock);
636 		MPASS(swapped_cnt > 0);
637 		swapped_cnt--;
638 		if (curthread != &thread0)
639 			swap_inprogress++;
640 		sx_xunlock(&allproc_lock);
641 
642 		/*
643 		 * We hold no lock here because the list of threads
644 		 * can not change while all threads in the process are
645 		 * swapped out.
646 		 */
647 		FOREACH_THREAD_IN_PROC(p, td)
648 			vm_thread_swapin(td, oom_alloc);
649 
650 		if (curthread != &thread0) {
651 			sx_xlock(&allproc_lock);
652 			MPASS(swap_inprogress > 0);
653 			swap_inprogress--;
654 			last_swapin = ticks;
655 			sx_xunlock(&allproc_lock);
656 		}
657 		PROC_LOCK(p);
658 		swapclear(p);
659 		p->p_swtick = ticks;
660 
661 		/* Allow other threads to swap p out now. */
662 		wakeup(&p->p_flag);
663 		--p->p_lock;
664 	}
665 }
666 
667 /*
668  * This swapin algorithm attempts to swap-in processes only if there
669  * is enough space for them.  Of course, if a process waits for a long
670  * time, it will be swapped in anyway.
671  */
672 
673 static struct proc *
674 swapper_selector(bool wkilled_only)
675 {
676 	struct proc *p, *res;
677 	struct thread *td;
678 	int ppri, pri, slptime, swtime;
679 
680 	sx_assert(&allproc_lock, SA_SLOCKED);
681 	if (swapped_cnt == 0)
682 		return (NULL);
683 	res = NULL;
684 	ppri = INT_MIN;
685 	FOREACH_PROC_IN_SYSTEM(p) {
686 		PROC_LOCK(p);
687 		if (p->p_state == PRS_NEW || (p->p_flag & (P_SWAPPINGOUT |
688 		    P_SWAPPINGIN | P_INMEM)) != 0) {
689 			PROC_UNLOCK(p);
690 			continue;
691 		}
692 		if (p->p_state == PRS_NORMAL && (p->p_flag & P_WKILLED) != 0) {
693 			/*
694 			 * A swapped-out process might have mapped a
695 			 * large portion of the system's pages as
696 			 * anonymous memory.  There is no other way to
697 			 * release the memory other than to kill the
698 			 * process, for which we need to swap it in.
699 			 */
700 			return (p);
701 		}
702 		if (wkilled_only) {
703 			PROC_UNLOCK(p);
704 			continue;
705 		}
706 		swtime = (ticks - p->p_swtick) / hz;
707 		FOREACH_THREAD_IN_PROC(p, td) {
708 			/*
709 			 * An otherwise runnable thread of a process
710 			 * swapped out has only the TDI_SWAPPED bit set.
711 			 */
712 			thread_lock(td);
713 			if (td->td_inhibitors == TDI_SWAPPED) {
714 				slptime = (ticks - td->td_slptick) / hz;
715 				pri = swtime + slptime;
716 				if ((td->td_flags & TDF_SWAPINREQ) == 0)
717 					pri -= p->p_nice * 8;
718 				/*
719 				 * if this thread is higher priority
720 				 * and there is enough space, then select
721 				 * this process instead of the previous
722 				 * selection.
723 				 */
724 				if (pri > ppri) {
725 					res = p;
726 					ppri = pri;
727 				}
728 			}
729 			thread_unlock(td);
730 		}
731 		PROC_UNLOCK(p);
732 	}
733 
734 	if (res != NULL)
735 		PROC_LOCK(res);
736 	return (res);
737 }
738 
739 #define	SWAPIN_INTERVAL	(MAXSLP * hz / 2)
740 
741 /*
742  * Limit swapper to swap in one non-WKILLED process in MAXSLP/2
743  * interval, assuming that there is:
744  * - at least one domain that is not suffering from a shortage of free memory;
745  * - no parallel swap-ins;
746  * - no other swap-ins in the current SWAPIN_INTERVAL.
747  */
748 static bool
749 swapper_wkilled_only(void)
750 {
751 
752 	return (vm_page_count_min_set(&all_domains) || swap_inprogress > 0 ||
753 	    (u_int)(ticks - last_swapin) < SWAPIN_INTERVAL);
754 }
755 
756 void
757 swapper(void)
758 {
759 	struct proc *p;
760 
761 	for (;;) {
762 		sx_slock(&allproc_lock);
763 		p = swapper_selector(swapper_wkilled_only());
764 		sx_sunlock(&allproc_lock);
765 
766 		if (p == NULL) {
767 			tsleep(&proc0, PVM, "swapin", SWAPIN_INTERVAL);
768 		} else {
769 			PROC_LOCK_ASSERT(p, MA_OWNED);
770 
771 			/*
772 			 * Another process may be bringing or may have
773 			 * already brought this process in while we
774 			 * traverse all threads.  Or, this process may
775 			 * have exited or even being swapped out
776 			 * again.
777 			 */
778 			if (p->p_state == PRS_NORMAL && (p->p_flag & (P_INMEM |
779 			    P_SWAPPINGOUT | P_SWAPPINGIN)) == 0) {
780 				faultin(p);
781 			}
782 			PROC_UNLOCK(p);
783 		}
784 	}
785 }
786 
787 /*
788  * First, if any processes have been sleeping or stopped for at least
789  * "swap_idle_threshold1" seconds, they are swapped out.  If, however,
790  * no such processes exist, then the longest-sleeping or stopped
791  * process is swapped out.  Finally, and only as a last resort, if
792  * there are no sleeping or stopped processes, the longest-resident
793  * process is swapped out.
794  */
795 static void
796 swapout_procs(int action)
797 {
798 	struct proc *p;
799 	struct thread *td;
800 	int slptime;
801 	bool didswap, doswap;
802 
803 	MPASS((action & (VM_SWAP_NORMAL | VM_SWAP_IDLE)) != 0);
804 
805 	didswap = false;
806 	sx_slock(&allproc_lock);
807 	FOREACH_PROC_IN_SYSTEM(p) {
808 		/*
809 		 * Filter out not yet fully constructed processes.  Do
810 		 * not swap out held processes.  Avoid processes which
811 		 * are system, exiting, execing, traced, already swapped
812 		 * out or are in the process of being swapped in or out.
813 		 */
814 		PROC_LOCK(p);
815 		if (p->p_state != PRS_NORMAL || p->p_lock != 0 || (p->p_flag &
816 		    (P_SYSTEM | P_WEXIT | P_INEXEC | P_STOPPED_SINGLE |
817 		    P_TRACED | P_SWAPPINGOUT | P_SWAPPINGIN | P_INMEM)) !=
818 		    P_INMEM) {
819 			PROC_UNLOCK(p);
820 			continue;
821 		}
822 
823 		/*
824 		 * Further consideration of this process for swap out
825 		 * requires iterating over its threads.  We release
826 		 * allproc_lock here so that process creation and
827 		 * destruction are not blocked while we iterate.
828 		 *
829 		 * To later reacquire allproc_lock and resume
830 		 * iteration over the allproc list, we will first have
831 		 * to release the lock on the process.  We place a
832 		 * hold on the process so that it remains in the
833 		 * allproc list while it is unlocked.
834 		 */
835 		_PHOLD_LITE(p);
836 		sx_sunlock(&allproc_lock);
837 
838 		/*
839 		 * Do not swapout a realtime process.
840 		 * Guarantee swap_idle_threshold1 time in memory.
841 		 * If the system is under memory stress, or if we are
842 		 * swapping idle processes >= swap_idle_threshold2,
843 		 * then swap the process out.
844 		 */
845 		doswap = true;
846 		FOREACH_THREAD_IN_PROC(p, td) {
847 			thread_lock(td);
848 			slptime = (ticks - td->td_slptick) / hz;
849 			if (PRI_IS_REALTIME(td->td_pri_class) ||
850 			    slptime < swap_idle_threshold1 ||
851 			    !thread_safetoswapout(td) ||
852 			    ((action & VM_SWAP_NORMAL) == 0 &&
853 			    slptime < swap_idle_threshold2))
854 				doswap = false;
855 			thread_unlock(td);
856 			if (!doswap)
857 				break;
858 		}
859 		if (doswap && swapout(p) == 0)
860 			didswap = true;
861 
862 		PROC_UNLOCK(p);
863 		if (didswap) {
864 			sx_xlock(&allproc_lock);
865 			swapped_cnt++;
866 			sx_downgrade(&allproc_lock);
867 		} else
868 			sx_slock(&allproc_lock);
869 		PRELE(p);
870 	}
871 	sx_sunlock(&allproc_lock);
872 
873 	/*
874 	 * If we swapped something out, and another process needed memory,
875 	 * then wakeup the sched process.
876 	 */
877 	if (didswap)
878 		wakeup(&proc0);
879 }
880 
881 static void
882 swapclear(struct proc *p)
883 {
884 	struct thread *td;
885 
886 	PROC_LOCK_ASSERT(p, MA_OWNED);
887 
888 	FOREACH_THREAD_IN_PROC(p, td) {
889 		thread_lock(td);
890 		td->td_flags |= TDF_INMEM;
891 		td->td_flags &= ~TDF_SWAPINREQ;
892 		TD_CLR_SWAPPED(td);
893 		if (TD_CAN_RUN(td))
894 			if (setrunnable(td)) {
895 #ifdef INVARIANTS
896 				/*
897 				 * XXX: We just cleared TDI_SWAPPED
898 				 * above and set TDF_INMEM, so this
899 				 * should never happen.
900 				 */
901 				panic("not waking up swapper");
902 #endif
903 			}
904 		thread_unlock(td);
905 	}
906 	p->p_flag &= ~(P_SWAPPINGIN | P_SWAPPINGOUT);
907 	p->p_flag |= P_INMEM;
908 }
909 
910 static int
911 swapout(struct proc *p)
912 {
913 	struct thread *td;
914 
915 	PROC_LOCK_ASSERT(p, MA_OWNED);
916 
917 	/*
918 	 * The states of this process and its threads may have changed
919 	 * by now.  Assuming that there is only one pageout daemon thread,
920 	 * this process should still be in memory.
921 	 */
922 	KASSERT((p->p_flag & (P_INMEM | P_SWAPPINGOUT | P_SWAPPINGIN)) ==
923 	    P_INMEM, ("swapout: lost a swapout race?"));
924 
925 	/*
926 	 * Remember the resident count.
927 	 */
928 	p->p_vmspace->vm_swrss = vmspace_resident_count(p->p_vmspace);
929 
930 	/*
931 	 * Check and mark all threads before we proceed.
932 	 */
933 	p->p_flag &= ~P_INMEM;
934 	p->p_flag |= P_SWAPPINGOUT;
935 	FOREACH_THREAD_IN_PROC(p, td) {
936 		thread_lock(td);
937 		if (!thread_safetoswapout(td)) {
938 			thread_unlock(td);
939 			swapclear(p);
940 			return (EBUSY);
941 		}
942 		td->td_flags &= ~TDF_INMEM;
943 		TD_SET_SWAPPED(td);
944 		thread_unlock(td);
945 	}
946 	td = FIRST_THREAD_IN_PROC(p);
947 	++td->td_ru.ru_nswap;
948 	PROC_UNLOCK(p);
949 
950 	/*
951 	 * This list is stable because all threads are now prevented from
952 	 * running.  The list is only modified in the context of a running
953 	 * thread in this process.
954 	 */
955 	FOREACH_THREAD_IN_PROC(p, td)
956 		vm_thread_swapout(td);
957 
958 	PROC_LOCK(p);
959 	p->p_flag &= ~P_SWAPPINGOUT;
960 	p->p_swtick = ticks;
961 	return (0);
962 }
963