xref: /freebsd/sys/vm/vm_swapout.c (revision 734e82fe33aa764367791a7d603b383996c6b40b)
1 /*-
2  * SPDX-License-Identifier: (BSD-4-Clause AND MIT-CMU)
3  *
4  * Copyright (c) 1991 Regents of the University of California.
5  * All rights reserved.
6  * Copyright (c) 1994 John S. Dyson
7  * All rights reserved.
8  * Copyright (c) 1994 David Greenman
9  * All rights reserved.
10  * Copyright (c) 2005 Yahoo! Technologies Norway AS
11  * All rights reserved.
12  *
13  * This code is derived from software contributed to Berkeley by
14  * The Mach Operating System project at Carnegie-Mellon University.
15  *
16  * Redistribution and use in source and binary forms, with or without
17  * modification, are permitted provided that the following conditions
18  * are met:
19  * 1. Redistributions of source code must retain the above copyright
20  *    notice, this list of conditions and the following disclaimer.
21  * 2. Redistributions in binary form must reproduce the above copyright
22  *    notice, this list of conditions and the following disclaimer in the
23  *    documentation and/or other materials provided with the distribution.
24  * 3. All advertising materials mentioning features or use of this software
25  *    must display the following acknowledgement:
26  *	This product includes software developed by the University of
27  *	California, Berkeley and its contributors.
28  * 4. Neither the name of the University nor the names of its contributors
29  *    may be used to endorse or promote products derived from this software
30  *    without specific prior written permission.
31  *
32  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
33  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
34  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
35  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
36  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
37  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
38  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
39  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
40  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
41  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
42  * SUCH DAMAGE.
43  *
44  *	from: @(#)vm_pageout.c	7.4 (Berkeley) 5/7/91
45  *
46  *
47  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
48  * All rights reserved.
49  *
50  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
51  *
52  * Permission to use, copy, modify and distribute this software and
53  * its documentation is hereby granted, provided that both the copyright
54  * notice and this permission notice appear in all copies of the
55  * software, derivative works or modified versions, and any portions
56  * thereof, and that both notices appear in supporting documentation.
57  *
58  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
59  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
60  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
61  *
62  * Carnegie Mellon requests users of this software to return to
63  *
64  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
65  *  School of Computer Science
66  *  Carnegie Mellon University
67  *  Pittsburgh PA 15213-3890
68  *
69  * any improvements or extensions that they make and grant Carnegie the
70  * rights to redistribute these changes.
71  */
72 
73 #include <sys/cdefs.h>
74 #include "opt_kstack_pages.h"
75 #include "opt_kstack_max_pages.h"
76 #include "opt_vm.h"
77 
78 #include <sys/param.h>
79 #include <sys/systm.h>
80 #include <sys/limits.h>
81 #include <sys/kernel.h>
82 #include <sys/eventhandler.h>
83 #include <sys/lock.h>
84 #include <sys/mutex.h>
85 #include <sys/proc.h>
86 #include <sys/kthread.h>
87 #include <sys/ktr.h>
88 #include <sys/mount.h>
89 #include <sys/racct.h>
90 #include <sys/resourcevar.h>
91 #include <sys/refcount.h>
92 #include <sys/sched.h>
93 #include <sys/sdt.h>
94 #include <sys/signalvar.h>
95 #include <sys/smp.h>
96 #include <sys/time.h>
97 #include <sys/vnode.h>
98 #include <sys/vmmeter.h>
99 #include <sys/rwlock.h>
100 #include <sys/sx.h>
101 #include <sys/sysctl.h>
102 
103 #include <vm/vm.h>
104 #include <vm/vm_param.h>
105 #include <vm/vm_kern.h>
106 #include <vm/vm_object.h>
107 #include <vm/vm_page.h>
108 #include <vm/vm_map.h>
109 #include <vm/vm_pageout.h>
110 #include <vm/vm_pager.h>
111 #include <vm/vm_phys.h>
112 #include <vm/swap_pager.h>
113 #include <vm/vm_extern.h>
114 #include <vm/uma.h>
115 
116 /* the kernel process "vm_daemon" */
117 static void vm_daemon(void);
118 static struct proc *vmproc;
119 
120 static struct kproc_desc vm_kp = {
121 	"vmdaemon",
122 	vm_daemon,
123 	&vmproc
124 };
125 SYSINIT(vmdaemon, SI_SUB_KTHREAD_VM, SI_ORDER_FIRST, kproc_start, &vm_kp);
126 
127 static int vm_swap_enabled = 1;
128 static int vm_swap_idle_enabled = 0;
129 
130 SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled, CTLFLAG_RW,
131     &vm_swap_enabled, 0,
132     "Enable entire process swapout");
133 SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled, CTLFLAG_RW,
134     &vm_swap_idle_enabled, 0,
135     "Allow swapout on idle criteria");
136 
137 /*
138  * Swap_idle_threshold1 is the guaranteed swapped in time for a process
139  */
140 static int swap_idle_threshold1 = 2;
141 SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold1, CTLFLAG_RW,
142     &swap_idle_threshold1, 0,
143     "Guaranteed swapped in time for a process");
144 
145 /*
146  * Swap_idle_threshold2 is the time that a process can be idle before
147  * it will be swapped out, if idle swapping is enabled.
148  */
149 static int swap_idle_threshold2 = 10;
150 SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold2, CTLFLAG_RW,
151     &swap_idle_threshold2, 0,
152     "Time before a process will be swapped out");
153 
154 static int vm_daemon_timeout = 0;
155 SYSCTL_INT(_vm, OID_AUTO, vmdaemon_timeout, CTLFLAG_RW,
156     &vm_daemon_timeout, 0,
157     "Time between vmdaemon runs");
158 
159 static int vm_pageout_req_swapout;	/* XXX */
160 static int vm_daemon_needed;
161 static struct mtx vm_daemon_mtx;
162 /* Allow for use by vm_pageout before vm_daemon is initialized. */
163 MTX_SYSINIT(vm_daemon, &vm_daemon_mtx, "vm daemon", MTX_DEF);
164 
165 static int swapped_cnt;
166 static int swap_inprogress;	/* Pending swap-ins done outside swapper. */
167 static int last_swapin;
168 
169 static void swapclear(struct proc *);
170 static int swapout(struct proc *);
171 static void vm_swapout_map_deactivate_pages(vm_map_t, long);
172 static void vm_swapout_object_deactivate(pmap_t, vm_object_t, long);
173 static void swapout_procs(int action);
174 static void vm_req_vmdaemon(int req);
175 static void vm_thread_swapout(struct thread *td);
176 
177 static void
178 vm_swapout_object_deactivate_page(pmap_t pmap, vm_page_t m, bool unmap)
179 {
180 
181 	/*
182 	 * Ignore unreclaimable wired pages.  Repeat the check after busying
183 	 * since a busy holder may wire the page.
184 	 */
185 	if (vm_page_wired(m) || !vm_page_tryxbusy(m))
186 		return;
187 
188 	if (vm_page_wired(m) || !pmap_page_exists_quick(pmap, m)) {
189 		vm_page_xunbusy(m);
190 		return;
191 	}
192 	if (!pmap_is_referenced(m)) {
193 		if (!vm_page_active(m))
194 			(void)vm_page_try_remove_all(m);
195 		else if (unmap && vm_page_try_remove_all(m))
196 			vm_page_deactivate(m);
197 	}
198 	vm_page_xunbusy(m);
199 }
200 
201 /*
202  *	vm_swapout_object_deactivate
203  *
204  *	Deactivate enough pages to satisfy the inactive target
205  *	requirements.
206  *
207  *	The object and map must be locked.
208  */
209 static void
210 vm_swapout_object_deactivate(pmap_t pmap, vm_object_t first_object,
211     long desired)
212 {
213 	vm_object_t backing_object, object;
214 	vm_page_t m;
215 	bool unmap;
216 
217 	VM_OBJECT_ASSERT_LOCKED(first_object);
218 	if ((first_object->flags & OBJ_FICTITIOUS) != 0)
219 		return;
220 	for (object = first_object;; object = backing_object) {
221 		if (pmap_resident_count(pmap) <= desired)
222 			goto unlock_return;
223 		VM_OBJECT_ASSERT_LOCKED(object);
224 		if ((object->flags & OBJ_UNMANAGED) != 0 ||
225 		    blockcount_read(&object->paging_in_progress) > 0)
226 			goto unlock_return;
227 
228 		unmap = true;
229 		if (object->shadow_count > 1)
230 			unmap = false;
231 
232 		/*
233 		 * Scan the object's entire memory queue.
234 		 */
235 		TAILQ_FOREACH(m, &object->memq, listq) {
236 			if (pmap_resident_count(pmap) <= desired)
237 				goto unlock_return;
238 			if (should_yield())
239 				goto unlock_return;
240 			vm_swapout_object_deactivate_page(pmap, m, unmap);
241 		}
242 		if ((backing_object = object->backing_object) == NULL)
243 			goto unlock_return;
244 		VM_OBJECT_RLOCK(backing_object);
245 		if (object != first_object)
246 			VM_OBJECT_RUNLOCK(object);
247 	}
248 unlock_return:
249 	if (object != first_object)
250 		VM_OBJECT_RUNLOCK(object);
251 }
252 
253 /*
254  * deactivate some number of pages in a map, try to do it fairly, but
255  * that is really hard to do.
256  */
257 static void
258 vm_swapout_map_deactivate_pages(vm_map_t map, long desired)
259 {
260 	vm_map_entry_t tmpe;
261 	vm_object_t obj, bigobj;
262 	int nothingwired;
263 
264 	if (!vm_map_trylock_read(map))
265 		return;
266 
267 	bigobj = NULL;
268 	nothingwired = TRUE;
269 
270 	/*
271 	 * first, search out the biggest object, and try to free pages from
272 	 * that.
273 	 */
274 	VM_MAP_ENTRY_FOREACH(tmpe, map) {
275 		if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
276 			obj = tmpe->object.vm_object;
277 			if (obj != NULL && VM_OBJECT_TRYRLOCK(obj)) {
278 				if (obj->shadow_count <= 1 &&
279 				    (bigobj == NULL ||
280 				     bigobj->resident_page_count <
281 				     obj->resident_page_count)) {
282 					if (bigobj != NULL)
283 						VM_OBJECT_RUNLOCK(bigobj);
284 					bigobj = obj;
285 				} else
286 					VM_OBJECT_RUNLOCK(obj);
287 			}
288 		}
289 		if (tmpe->wired_count > 0)
290 			nothingwired = FALSE;
291 	}
292 
293 	if (bigobj != NULL) {
294 		vm_swapout_object_deactivate(map->pmap, bigobj, desired);
295 		VM_OBJECT_RUNLOCK(bigobj);
296 	}
297 	/*
298 	 * Next, hunt around for other pages to deactivate.  We actually
299 	 * do this search sort of wrong -- .text first is not the best idea.
300 	 */
301 	VM_MAP_ENTRY_FOREACH(tmpe, map) {
302 		if (pmap_resident_count(vm_map_pmap(map)) <= desired)
303 			break;
304 		if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
305 			obj = tmpe->object.vm_object;
306 			if (obj != NULL) {
307 				VM_OBJECT_RLOCK(obj);
308 				vm_swapout_object_deactivate(map->pmap, obj,
309 				    desired);
310 				VM_OBJECT_RUNLOCK(obj);
311 			}
312 		}
313 	}
314 
315 	/*
316 	 * Remove all mappings if a process is swapped out, this will free page
317 	 * table pages.
318 	 */
319 	if (desired == 0 && nothingwired) {
320 		pmap_remove(vm_map_pmap(map), vm_map_min(map),
321 		    vm_map_max(map));
322 	}
323 
324 	vm_map_unlock_read(map);
325 }
326 
327 /*
328  * Swap out requests
329  */
330 #define VM_SWAP_NORMAL 1
331 #define VM_SWAP_IDLE 2
332 
333 void
334 vm_swapout_run(void)
335 {
336 
337 	if (vm_swap_enabled)
338 		vm_req_vmdaemon(VM_SWAP_NORMAL);
339 }
340 
341 /*
342  * Idle process swapout -- run once per second when pagedaemons are
343  * reclaiming pages.
344  */
345 void
346 vm_swapout_run_idle(void)
347 {
348 	static long lsec;
349 
350 	if (!vm_swap_idle_enabled || time_second == lsec)
351 		return;
352 	vm_req_vmdaemon(VM_SWAP_IDLE);
353 	lsec = time_second;
354 }
355 
356 static void
357 vm_req_vmdaemon(int req)
358 {
359 	static int lastrun = 0;
360 
361 	mtx_lock(&vm_daemon_mtx);
362 	vm_pageout_req_swapout |= req;
363 	if ((ticks > (lastrun + hz)) || (ticks < lastrun)) {
364 		wakeup(&vm_daemon_needed);
365 		lastrun = ticks;
366 	}
367 	mtx_unlock(&vm_daemon_mtx);
368 }
369 
370 static void
371 vm_daemon(void)
372 {
373 	struct rlimit rsslim;
374 	struct proc *p;
375 	struct thread *td;
376 	struct vmspace *vm;
377 	int breakout, swapout_flags, tryagain, attempts;
378 #ifdef RACCT
379 	uint64_t rsize, ravailable;
380 
381 	if (racct_enable && vm_daemon_timeout == 0)
382 		vm_daemon_timeout = hz;
383 #endif
384 
385 	while (TRUE) {
386 		mtx_lock(&vm_daemon_mtx);
387 		msleep(&vm_daemon_needed, &vm_daemon_mtx, PPAUSE, "psleep",
388 		    vm_daemon_timeout);
389 		swapout_flags = vm_pageout_req_swapout;
390 		vm_pageout_req_swapout = 0;
391 		mtx_unlock(&vm_daemon_mtx);
392 		if (swapout_flags != 0) {
393 			/*
394 			 * Drain the per-CPU page queue batches as a deadlock
395 			 * avoidance measure.
396 			 */
397 			if ((swapout_flags & VM_SWAP_NORMAL) != 0)
398 				vm_page_pqbatch_drain();
399 			swapout_procs(swapout_flags);
400 		}
401 
402 		/*
403 		 * scan the processes for exceeding their rlimits or if
404 		 * process is swapped out -- deactivate pages
405 		 */
406 		tryagain = 0;
407 		attempts = 0;
408 again:
409 		attempts++;
410 		sx_slock(&allproc_lock);
411 		FOREACH_PROC_IN_SYSTEM(p) {
412 			vm_pindex_t limit, size;
413 
414 			/*
415 			 * if this is a system process or if we have already
416 			 * looked at this process, skip it.
417 			 */
418 			PROC_LOCK(p);
419 			if (p->p_state != PRS_NORMAL ||
420 			    p->p_flag & (P_INEXEC | P_SYSTEM | P_WEXIT)) {
421 				PROC_UNLOCK(p);
422 				continue;
423 			}
424 			/*
425 			 * if the process is in a non-running type state,
426 			 * don't touch it.
427 			 */
428 			breakout = 0;
429 			FOREACH_THREAD_IN_PROC(p, td) {
430 				thread_lock(td);
431 				if (!TD_ON_RUNQ(td) &&
432 				    !TD_IS_RUNNING(td) &&
433 				    !TD_IS_SLEEPING(td) &&
434 				    !TD_IS_SUSPENDED(td)) {
435 					thread_unlock(td);
436 					breakout = 1;
437 					break;
438 				}
439 				thread_unlock(td);
440 			}
441 			if (breakout) {
442 				PROC_UNLOCK(p);
443 				continue;
444 			}
445 			/*
446 			 * get a limit
447 			 */
448 			lim_rlimit_proc(p, RLIMIT_RSS, &rsslim);
449 			limit = OFF_TO_IDX(
450 			    qmin(rsslim.rlim_cur, rsslim.rlim_max));
451 
452 			/*
453 			 * let processes that are swapped out really be
454 			 * swapped out set the limit to nothing (will force a
455 			 * swap-out.)
456 			 */
457 			if ((p->p_flag & P_INMEM) == 0)
458 				limit = 0;	/* XXX */
459 			vm = vmspace_acquire_ref(p);
460 			_PHOLD_LITE(p);
461 			PROC_UNLOCK(p);
462 			if (vm == NULL) {
463 				PRELE(p);
464 				continue;
465 			}
466 			sx_sunlock(&allproc_lock);
467 
468 			size = vmspace_resident_count(vm);
469 			if (size >= limit) {
470 				vm_swapout_map_deactivate_pages(
471 				    &vm->vm_map, limit);
472 				size = vmspace_resident_count(vm);
473 			}
474 #ifdef RACCT
475 			if (racct_enable) {
476 				rsize = IDX_TO_OFF(size);
477 				PROC_LOCK(p);
478 				if (p->p_state == PRS_NORMAL)
479 					racct_set(p, RACCT_RSS, rsize);
480 				ravailable = racct_get_available(p, RACCT_RSS);
481 				PROC_UNLOCK(p);
482 				if (rsize > ravailable) {
483 					/*
484 					 * Don't be overly aggressive; this
485 					 * might be an innocent process,
486 					 * and the limit could've been exceeded
487 					 * by some memory hog.  Don't try
488 					 * to deactivate more than 1/4th
489 					 * of process' resident set size.
490 					 */
491 					if (attempts <= 8) {
492 						if (ravailable < rsize -
493 						    (rsize / 4)) {
494 							ravailable = rsize -
495 							    (rsize / 4);
496 						}
497 					}
498 					vm_swapout_map_deactivate_pages(
499 					    &vm->vm_map,
500 					    OFF_TO_IDX(ravailable));
501 					/* Update RSS usage after paging out. */
502 					size = vmspace_resident_count(vm);
503 					rsize = IDX_TO_OFF(size);
504 					PROC_LOCK(p);
505 					if (p->p_state == PRS_NORMAL)
506 						racct_set(p, RACCT_RSS, rsize);
507 					PROC_UNLOCK(p);
508 					if (rsize > ravailable)
509 						tryagain = 1;
510 				}
511 			}
512 #endif
513 			vmspace_free(vm);
514 			sx_slock(&allproc_lock);
515 			PRELE(p);
516 		}
517 		sx_sunlock(&allproc_lock);
518 		if (tryagain != 0 && attempts <= 10) {
519 			maybe_yield();
520 			goto again;
521 		}
522 	}
523 }
524 
525 /*
526  * Allow a thread's kernel stack to be paged out.
527  */
528 static void
529 vm_thread_swapout(struct thread *td)
530 {
531 	vm_page_t m;
532 	vm_offset_t kaddr;
533 	vm_pindex_t pindex;
534 	int i, pages;
535 
536 	cpu_thread_swapout(td);
537 	kaddr = td->td_kstack;
538 	pages = td->td_kstack_pages;
539 	pindex = atop(kaddr - VM_MIN_KERNEL_ADDRESS);
540 	pmap_qremove(kaddr, pages);
541 	VM_OBJECT_WLOCK(kstack_object);
542 	for (i = 0; i < pages; i++) {
543 		m = vm_page_lookup(kstack_object, pindex + i);
544 		if (m == NULL)
545 			panic("vm_thread_swapout: kstack already missing?");
546 		vm_page_dirty(m);
547 		vm_page_xunbusy_unchecked(m);
548 		vm_page_unwire(m, PQ_LAUNDRY);
549 	}
550 	VM_OBJECT_WUNLOCK(kstack_object);
551 }
552 
553 /*
554  * Bring the kernel stack for a specified thread back in.
555  */
556 static void
557 vm_thread_swapin(struct thread *td, int oom_alloc)
558 {
559 	vm_page_t ma[KSTACK_MAX_PAGES];
560 	vm_offset_t kaddr;
561 	int a, count, i, j, pages, rv __diagused;
562 
563 	kaddr = td->td_kstack;
564 	pages = td->td_kstack_pages;
565 	vm_thread_stack_back(td->td_domain.dr_policy, kaddr, ma, pages,
566 	    oom_alloc);
567 	for (i = 0; i < pages;) {
568 		vm_page_assert_xbusied(ma[i]);
569 		if (vm_page_all_valid(ma[i])) {
570 			i++;
571 			continue;
572 		}
573 		vm_object_pip_add(kstack_object, 1);
574 		for (j = i + 1; j < pages; j++)
575 			if (vm_page_all_valid(ma[j]))
576 				break;
577 		VM_OBJECT_WLOCK(kstack_object);
578 		rv = vm_pager_has_page(kstack_object, ma[i]->pindex, NULL, &a);
579 		VM_OBJECT_WUNLOCK(kstack_object);
580 		KASSERT(rv == 1, ("%s: missing page %p", __func__, ma[i]));
581 		count = min(a + 1, j - i);
582 		rv = vm_pager_get_pages(kstack_object, ma + i, count, NULL, NULL);
583 		KASSERT(rv == VM_PAGER_OK, ("%s: cannot get kstack for proc %d",
584 		    __func__, td->td_proc->p_pid));
585 		vm_object_pip_wakeup(kstack_object);
586 		i += count;
587 	}
588 	pmap_qenter(kaddr, ma, pages);
589 	cpu_thread_swapin(td);
590 }
591 
592 void
593 faultin(struct proc *p)
594 {
595 	struct thread *td;
596 	int oom_alloc;
597 
598 	PROC_LOCK_ASSERT(p, MA_OWNED);
599 
600 	/*
601 	 * If another process is swapping in this process,
602 	 * just wait until it finishes.
603 	 */
604 	if (p->p_flag & P_SWAPPINGIN) {
605 		while (p->p_flag & P_SWAPPINGIN)
606 			msleep(&p->p_flag, &p->p_mtx, PVM, "faultin", 0);
607 		return;
608 	}
609 
610 	if ((p->p_flag & P_INMEM) == 0) {
611 		oom_alloc = (p->p_flag & P_WKILLED) != 0 ? VM_ALLOC_SYSTEM :
612 		    VM_ALLOC_NORMAL;
613 
614 		/*
615 		 * Don't let another thread swap process p out while we are
616 		 * busy swapping it in.
617 		 */
618 		++p->p_lock;
619 		p->p_flag |= P_SWAPPINGIN;
620 		PROC_UNLOCK(p);
621 		sx_xlock(&allproc_lock);
622 		MPASS(swapped_cnt > 0);
623 		swapped_cnt--;
624 		if (curthread != &thread0)
625 			swap_inprogress++;
626 		sx_xunlock(&allproc_lock);
627 
628 		/*
629 		 * We hold no lock here because the list of threads
630 		 * can not change while all threads in the process are
631 		 * swapped out.
632 		 */
633 		FOREACH_THREAD_IN_PROC(p, td)
634 			vm_thread_swapin(td, oom_alloc);
635 
636 		if (curthread != &thread0) {
637 			sx_xlock(&allproc_lock);
638 			MPASS(swap_inprogress > 0);
639 			swap_inprogress--;
640 			last_swapin = ticks;
641 			sx_xunlock(&allproc_lock);
642 		}
643 		PROC_LOCK(p);
644 		swapclear(p);
645 		p->p_swtick = ticks;
646 
647 		/* Allow other threads to swap p out now. */
648 		wakeup(&p->p_flag);
649 		--p->p_lock;
650 	}
651 }
652 
653 /*
654  * This swapin algorithm attempts to swap-in processes only if there
655  * is enough space for them.  Of course, if a process waits for a long
656  * time, it will be swapped in anyway.
657  */
658 
659 static struct proc *
660 swapper_selector(bool wkilled_only)
661 {
662 	struct proc *p, *res;
663 	struct thread *td;
664 	int ppri, pri, slptime, swtime;
665 
666 	sx_assert(&allproc_lock, SA_SLOCKED);
667 	if (swapped_cnt == 0)
668 		return (NULL);
669 	res = NULL;
670 	ppri = INT_MIN;
671 	FOREACH_PROC_IN_SYSTEM(p) {
672 		PROC_LOCK(p);
673 		if (p->p_state == PRS_NEW || (p->p_flag & (P_SWAPPINGOUT |
674 		    P_SWAPPINGIN | P_INMEM)) != 0) {
675 			PROC_UNLOCK(p);
676 			continue;
677 		}
678 		if (p->p_state == PRS_NORMAL && (p->p_flag & P_WKILLED) != 0) {
679 			/*
680 			 * A swapped-out process might have mapped a
681 			 * large portion of the system's pages as
682 			 * anonymous memory.  There is no other way to
683 			 * release the memory other than to kill the
684 			 * process, for which we need to swap it in.
685 			 */
686 			return (p);
687 		}
688 		if (wkilled_only) {
689 			PROC_UNLOCK(p);
690 			continue;
691 		}
692 		swtime = (ticks - p->p_swtick) / hz;
693 		FOREACH_THREAD_IN_PROC(p, td) {
694 			/*
695 			 * An otherwise runnable thread of a process
696 			 * swapped out has only the TDI_SWAPPED bit set.
697 			 */
698 			thread_lock(td);
699 			if (td->td_inhibitors == TDI_SWAPPED) {
700 				slptime = (ticks - td->td_slptick) / hz;
701 				pri = swtime + slptime;
702 				if ((td->td_flags & TDF_SWAPINREQ) == 0)
703 					pri -= p->p_nice * 8;
704 				/*
705 				 * if this thread is higher priority
706 				 * and there is enough space, then select
707 				 * this process instead of the previous
708 				 * selection.
709 				 */
710 				if (pri > ppri) {
711 					res = p;
712 					ppri = pri;
713 				}
714 			}
715 			thread_unlock(td);
716 		}
717 		PROC_UNLOCK(p);
718 	}
719 
720 	if (res != NULL)
721 		PROC_LOCK(res);
722 	return (res);
723 }
724 
725 #define	SWAPIN_INTERVAL	(MAXSLP * hz / 2)
726 
727 /*
728  * Limit swapper to swap in one non-WKILLED process in MAXSLP/2
729  * interval, assuming that there is:
730  * - at least one domain that is not suffering from a shortage of free memory;
731  * - no parallel swap-ins;
732  * - no other swap-ins in the current SWAPIN_INTERVAL.
733  */
734 static bool
735 swapper_wkilled_only(void)
736 {
737 
738 	return (vm_page_count_min_set(&all_domains) || swap_inprogress > 0 ||
739 	    (u_int)(ticks - last_swapin) < SWAPIN_INTERVAL);
740 }
741 
742 void
743 swapper(void)
744 {
745 	struct proc *p;
746 
747 	for (;;) {
748 		sx_slock(&allproc_lock);
749 		p = swapper_selector(swapper_wkilled_only());
750 		sx_sunlock(&allproc_lock);
751 
752 		if (p == NULL) {
753 			tsleep(&proc0, PVM, "swapin", SWAPIN_INTERVAL);
754 		} else {
755 			PROC_LOCK_ASSERT(p, MA_OWNED);
756 
757 			/*
758 			 * Another process may be bringing or may have
759 			 * already brought this process in while we
760 			 * traverse all threads.  Or, this process may
761 			 * have exited or even being swapped out
762 			 * again.
763 			 */
764 			if (p->p_state == PRS_NORMAL && (p->p_flag & (P_INMEM |
765 			    P_SWAPPINGOUT | P_SWAPPINGIN)) == 0) {
766 				faultin(p);
767 			}
768 			PROC_UNLOCK(p);
769 		}
770 	}
771 }
772 
773 /*
774  * First, if any processes have been sleeping or stopped for at least
775  * "swap_idle_threshold1" seconds, they are swapped out.  If, however,
776  * no such processes exist, then the longest-sleeping or stopped
777  * process is swapped out.  Finally, and only as a last resort, if
778  * there are no sleeping or stopped processes, the longest-resident
779  * process is swapped out.
780  */
781 static void
782 swapout_procs(int action)
783 {
784 	struct proc *p;
785 	struct thread *td;
786 	int slptime;
787 	bool didswap, doswap;
788 
789 	MPASS((action & (VM_SWAP_NORMAL | VM_SWAP_IDLE)) != 0);
790 
791 	didswap = false;
792 	sx_slock(&allproc_lock);
793 	FOREACH_PROC_IN_SYSTEM(p) {
794 		/*
795 		 * Filter out not yet fully constructed processes.  Do
796 		 * not swap out held processes.  Avoid processes which
797 		 * are system, exiting, execing, traced, already swapped
798 		 * out or are in the process of being swapped in or out.
799 		 */
800 		PROC_LOCK(p);
801 		if (p->p_state != PRS_NORMAL || p->p_lock != 0 || (p->p_flag &
802 		    (P_SYSTEM | P_WEXIT | P_INEXEC | P_STOPPED_SINGLE |
803 		    P_TRACED | P_SWAPPINGOUT | P_SWAPPINGIN | P_INMEM)) !=
804 		    P_INMEM) {
805 			PROC_UNLOCK(p);
806 			continue;
807 		}
808 
809 		/*
810 		 * Further consideration of this process for swap out
811 		 * requires iterating over its threads.  We release
812 		 * allproc_lock here so that process creation and
813 		 * destruction are not blocked while we iterate.
814 		 *
815 		 * To later reacquire allproc_lock and resume
816 		 * iteration over the allproc list, we will first have
817 		 * to release the lock on the process.  We place a
818 		 * hold on the process so that it remains in the
819 		 * allproc list while it is unlocked.
820 		 */
821 		_PHOLD_LITE(p);
822 		sx_sunlock(&allproc_lock);
823 
824 		/*
825 		 * Do not swapout a realtime process.
826 		 * Guarantee swap_idle_threshold1 time in memory.
827 		 * If the system is under memory stress, or if we are
828 		 * swapping idle processes >= swap_idle_threshold2,
829 		 * then swap the process out.
830 		 */
831 		doswap = true;
832 		FOREACH_THREAD_IN_PROC(p, td) {
833 			thread_lock(td);
834 			slptime = (ticks - td->td_slptick) / hz;
835 			if (PRI_IS_REALTIME(td->td_pri_class) ||
836 			    slptime < swap_idle_threshold1 ||
837 			    !thread_safetoswapout(td) ||
838 			    ((action & VM_SWAP_NORMAL) == 0 &&
839 			    slptime < swap_idle_threshold2))
840 				doswap = false;
841 			thread_unlock(td);
842 			if (!doswap)
843 				break;
844 		}
845 		if (doswap && swapout(p) == 0)
846 			didswap = true;
847 
848 		PROC_UNLOCK(p);
849 		if (didswap) {
850 			sx_xlock(&allproc_lock);
851 			swapped_cnt++;
852 			sx_downgrade(&allproc_lock);
853 		} else
854 			sx_slock(&allproc_lock);
855 		PRELE(p);
856 	}
857 	sx_sunlock(&allproc_lock);
858 
859 	/*
860 	 * If we swapped something out, and another process needed memory,
861 	 * then wakeup the sched process.
862 	 */
863 	if (didswap)
864 		wakeup(&proc0);
865 }
866 
867 static void
868 swapclear(struct proc *p)
869 {
870 	struct thread *td;
871 
872 	PROC_LOCK_ASSERT(p, MA_OWNED);
873 
874 	FOREACH_THREAD_IN_PROC(p, td) {
875 		thread_lock(td);
876 		td->td_flags |= TDF_INMEM;
877 		td->td_flags &= ~TDF_SWAPINREQ;
878 		TD_CLR_SWAPPED(td);
879 		if (TD_CAN_RUN(td)) {
880 			if (setrunnable(td, 0)) {
881 #ifdef INVARIANTS
882 				/*
883 				 * XXX: We just cleared TDI_SWAPPED
884 				 * above and set TDF_INMEM, so this
885 				 * should never happen.
886 				 */
887 				panic("not waking up swapper");
888 #endif
889 			}
890 		} else
891 			thread_unlock(td);
892 	}
893 	p->p_flag &= ~(P_SWAPPINGIN | P_SWAPPINGOUT);
894 	p->p_flag |= P_INMEM;
895 }
896 
897 static int
898 swapout(struct proc *p)
899 {
900 	struct thread *td;
901 
902 	PROC_LOCK_ASSERT(p, MA_OWNED);
903 
904 	/*
905 	 * The states of this process and its threads may have changed
906 	 * by now.  Assuming that there is only one pageout daemon thread,
907 	 * this process should still be in memory.
908 	 */
909 	KASSERT((p->p_flag & (P_INMEM | P_SWAPPINGOUT | P_SWAPPINGIN)) ==
910 	    P_INMEM, ("swapout: lost a swapout race?"));
911 
912 	/*
913 	 * Remember the resident count.
914 	 */
915 	p->p_vmspace->vm_swrss = vmspace_resident_count(p->p_vmspace);
916 
917 	/*
918 	 * Check and mark all threads before we proceed.
919 	 */
920 	p->p_flag &= ~P_INMEM;
921 	p->p_flag |= P_SWAPPINGOUT;
922 	FOREACH_THREAD_IN_PROC(p, td) {
923 		thread_lock(td);
924 		if (!thread_safetoswapout(td)) {
925 			thread_unlock(td);
926 			swapclear(p);
927 			return (EBUSY);
928 		}
929 		td->td_flags &= ~TDF_INMEM;
930 		TD_SET_SWAPPED(td);
931 		thread_unlock(td);
932 	}
933 	td = FIRST_THREAD_IN_PROC(p);
934 	++td->td_ru.ru_nswap;
935 	PROC_UNLOCK(p);
936 
937 	/*
938 	 * This list is stable because all threads are now prevented from
939 	 * running.  The list is only modified in the context of a running
940 	 * thread in this process.
941 	 */
942 	FOREACH_THREAD_IN_PROC(p, td)
943 		vm_thread_swapout(td);
944 
945 	PROC_LOCK(p);
946 	p->p_flag &= ~P_SWAPPINGOUT;
947 	p->p_swtick = ticks;
948 	return (0);
949 }
950