xref: /freebsd/sys/vm/vm_glue.c (revision 63f9a4cb2684a303e3eb2ffed39c03a2e2b28ae0)
1 /*
2  * Copyright (c) 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 4. Neither the name of the University nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  *	from: @(#)vm_glue.c	8.6 (Berkeley) 1/5/94
33  *
34  *
35  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
36  * All rights reserved.
37  *
38  * Permission to use, copy, modify and distribute this software and
39  * its documentation is hereby granted, provided that both the copyright
40  * notice and this permission notice appear in all copies of the
41  * software, derivative works or modified versions, and any portions
42  * thereof, and that both notices appear in supporting documentation.
43  *
44  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
45  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
46  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
47  *
48  * Carnegie Mellon requests users of this software to return to
49  *
50  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
51  *  School of Computer Science
52  *  Carnegie Mellon University
53  *  Pittsburgh PA 15213-3890
54  *
55  * any improvements or extensions that they make and grant Carnegie the
56  * rights to redistribute these changes.
57  */
58 
59 #include <sys/cdefs.h>
60 __FBSDID("$FreeBSD$");
61 
62 #include "opt_vm.h"
63 #include "opt_kstack_pages.h"
64 #include "opt_kstack_max_pages.h"
65 
66 #include <sys/param.h>
67 #include <sys/systm.h>
68 #include <sys/limits.h>
69 #include <sys/lock.h>
70 #include <sys/mutex.h>
71 #include <sys/proc.h>
72 #include <sys/resourcevar.h>
73 #include <sys/shm.h>
74 #include <sys/vmmeter.h>
75 #include <sys/sx.h>
76 #include <sys/sysctl.h>
77 
78 #include <sys/kernel.h>
79 #include <sys/ktr.h>
80 #include <sys/unistd.h>
81 
82 #include <vm/vm.h>
83 #include <vm/vm_param.h>
84 #include <vm/pmap.h>
85 #include <vm/vm_map.h>
86 #include <vm/vm_page.h>
87 #include <vm/vm_pageout.h>
88 #include <vm/vm_object.h>
89 #include <vm/vm_kern.h>
90 #include <vm/vm_extern.h>
91 #include <vm/vm_pager.h>
92 #include <vm/swap_pager.h>
93 
94 extern int maxslp;
95 
96 /*
97  * System initialization
98  *
99  * Note: proc0 from proc.h
100  */
101 static void vm_init_limits(void *);
102 SYSINIT(vm_limits, SI_SUB_VM_CONF, SI_ORDER_FIRST, vm_init_limits, &proc0)
103 
104 /*
105  * THIS MUST BE THE LAST INITIALIZATION ITEM!!!
106  *
107  * Note: run scheduling should be divorced from the vm system.
108  */
109 static void scheduler(void *);
110 SYSINIT(scheduler, SI_SUB_RUN_SCHEDULER, SI_ORDER_ANY, scheduler, NULL)
111 
112 #ifndef NO_SWAPPING
113 static void swapout(struct proc *);
114 #endif
115 
116 /*
117  * MPSAFE
118  *
119  * WARNING!  This code calls vm_map_check_protection() which only checks
120  * the associated vm_map_entry range.  It does not determine whether the
121  * contents of the memory is actually readable or writable.  In most cases
122  * just checking the vm_map_entry is sufficient within the kernel's address
123  * space.
124  */
125 int
126 kernacc(addr, len, rw)
127 	void *addr;
128 	int len, rw;
129 {
130 	boolean_t rv;
131 	vm_offset_t saddr, eaddr;
132 	vm_prot_t prot;
133 
134 	KASSERT((rw & ~VM_PROT_ALL) == 0,
135 	    ("illegal ``rw'' argument to kernacc (%x)\n", rw));
136 	prot = rw;
137 	saddr = trunc_page((vm_offset_t)addr);
138 	eaddr = round_page((vm_offset_t)addr + len);
139 	vm_map_lock_read(kernel_map);
140 	rv = vm_map_check_protection(kernel_map, saddr, eaddr, prot);
141 	vm_map_unlock_read(kernel_map);
142 	return (rv == TRUE);
143 }
144 
145 /*
146  * MPSAFE
147  *
148  * WARNING!  This code calls vm_map_check_protection() which only checks
149  * the associated vm_map_entry range.  It does not determine whether the
150  * contents of the memory is actually readable or writable.  vmapbuf(),
151  * vm_fault_quick(), or copyin()/copout()/su*()/fu*() functions should be
152  * used in conjuction with this call.
153  */
154 int
155 useracc(addr, len, rw)
156 	void *addr;
157 	int len, rw;
158 {
159 	boolean_t rv;
160 	vm_prot_t prot;
161 	vm_map_t map;
162 
163 	KASSERT((rw & ~VM_PROT_ALL) == 0,
164 	    ("illegal ``rw'' argument to useracc (%x)\n", rw));
165 	prot = rw;
166 	map = &curproc->p_vmspace->vm_map;
167 	if ((vm_offset_t)addr + len > vm_map_max(map) ||
168 	    (vm_offset_t)addr + len < (vm_offset_t)addr) {
169 		return (FALSE);
170 	}
171 	vm_map_lock_read(map);
172 	rv = vm_map_check_protection(map, trunc_page((vm_offset_t)addr),
173 	    round_page((vm_offset_t)addr + len), prot);
174 	vm_map_unlock_read(map);
175 	return (rv == TRUE);
176 }
177 
178 int
179 vslock(void *addr, size_t len)
180 {
181 	vm_offset_t end, last, start;
182 	vm_size_t npages;
183 	int error;
184 
185 	last = (vm_offset_t)addr + len;
186 	start = trunc_page((vm_offset_t)addr);
187 	end = round_page(last);
188 	if (last < (vm_offset_t)addr || end < (vm_offset_t)addr)
189 		return (EINVAL);
190 	npages = atop(end - start);
191 	if (npages > vm_page_max_wired)
192 		return (ENOMEM);
193 	PROC_LOCK(curproc);
194 	if (ptoa(npages +
195 	    pmap_wired_count(vm_map_pmap(&curproc->p_vmspace->vm_map))) >
196 	    lim_cur(curproc, RLIMIT_MEMLOCK)) {
197 		PROC_UNLOCK(curproc);
198 		return (ENOMEM);
199 	}
200 	PROC_UNLOCK(curproc);
201 #if 0
202 	/*
203 	 * XXX - not yet
204 	 *
205 	 * The limit for transient usage of wired pages should be
206 	 * larger than for "permanent" wired pages (mlock()).
207 	 *
208 	 * Also, the sysctl code, which is the only present user
209 	 * of vslock(), does a hard loop on EAGAIN.
210 	 */
211 	if (npages + cnt.v_wire_count > vm_page_max_wired)
212 		return (EAGAIN);
213 #endif
214 	error = vm_map_wire(&curproc->p_vmspace->vm_map, start, end,
215 	    VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
216 	/*
217 	 * Return EFAULT on error to match copy{in,out}() behaviour
218 	 * rather than returning ENOMEM like mlock() would.
219 	 */
220 	return (error == KERN_SUCCESS ? 0 : EFAULT);
221 }
222 
223 void
224 vsunlock(void *addr, size_t len)
225 {
226 
227 	/* Rely on the parameter sanity checks performed by vslock(). */
228 	(void)vm_map_unwire(&curproc->p_vmspace->vm_map,
229 	    trunc_page((vm_offset_t)addr), round_page((vm_offset_t)addr + len),
230 	    VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
231 }
232 
233 #ifndef KSTACK_MAX_PAGES
234 #define KSTACK_MAX_PAGES 32
235 #endif
236 
237 /*
238  * Create the kernel stack (including pcb for i386) for a new thread.
239  * This routine directly affects the fork perf for a process and
240  * create performance for a thread.
241  */
242 void
243 vm_thread_new(struct thread *td, int pages)
244 {
245 	vm_object_t ksobj;
246 	vm_offset_t ks;
247 	vm_page_t m, ma[KSTACK_MAX_PAGES];
248 	int i;
249 
250 	/* Bounds check */
251 	if (pages <= 1)
252 		pages = KSTACK_PAGES;
253 	else if (pages > KSTACK_MAX_PAGES)
254 		pages = KSTACK_MAX_PAGES;
255 	/*
256 	 * Allocate an object for the kstack.
257 	 */
258 	ksobj = vm_object_allocate(OBJT_DEFAULT, pages);
259 	td->td_kstack_obj = ksobj;
260 	/*
261 	 * Get a kernel virtual address for this thread's kstack.
262 	 */
263 	ks = kmem_alloc_nofault(kernel_map,
264 	   (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
265 	if (ks == 0)
266 		panic("vm_thread_new: kstack allocation failed");
267 	if (KSTACK_GUARD_PAGES != 0) {
268 		pmap_qremove(ks, KSTACK_GUARD_PAGES);
269 		ks += KSTACK_GUARD_PAGES * PAGE_SIZE;
270 	}
271 	td->td_kstack = ks;
272 	/*
273 	 * Knowing the number of pages allocated is useful when you
274 	 * want to deallocate them.
275 	 */
276 	td->td_kstack_pages = pages;
277 	/*
278 	 * For the length of the stack, link in a real page of ram for each
279 	 * page of stack.
280 	 */
281 	VM_OBJECT_LOCK(ksobj);
282 	for (i = 0; i < pages; i++) {
283 		/*
284 		 * Get a kernel stack page.
285 		 */
286 		m = vm_page_grab(ksobj, i, VM_ALLOC_NOBUSY |
287 		    VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED);
288 		ma[i] = m;
289 		m->valid = VM_PAGE_BITS_ALL;
290 	}
291 	VM_OBJECT_UNLOCK(ksobj);
292 	pmap_qenter(ks, ma, pages);
293 }
294 
295 /*
296  * Dispose of a thread's kernel stack.
297  */
298 void
299 vm_thread_dispose(struct thread *td)
300 {
301 	vm_object_t ksobj;
302 	vm_offset_t ks;
303 	vm_page_t m;
304 	int i, pages;
305 
306 	pages = td->td_kstack_pages;
307 	ksobj = td->td_kstack_obj;
308 	ks = td->td_kstack;
309 	pmap_qremove(ks, pages);
310 	VM_OBJECT_LOCK(ksobj);
311 	for (i = 0; i < pages; i++) {
312 		m = vm_page_lookup(ksobj, i);
313 		if (m == NULL)
314 			panic("vm_thread_dispose: kstack already missing?");
315 		vm_page_lock_queues();
316 		vm_page_unwire(m, 0);
317 		vm_page_free(m);
318 		vm_page_unlock_queues();
319 	}
320 	VM_OBJECT_UNLOCK(ksobj);
321 	vm_object_deallocate(ksobj);
322 	kmem_free(kernel_map, ks - (KSTACK_GUARD_PAGES * PAGE_SIZE),
323 	    (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
324 }
325 
326 /*
327  * Allow a thread's kernel stack to be paged out.
328  */
329 void
330 vm_thread_swapout(struct thread *td)
331 {
332 	vm_object_t ksobj;
333 	vm_page_t m;
334 	int i, pages;
335 
336 	cpu_thread_swapout(td);
337 	pages = td->td_kstack_pages;
338 	ksobj = td->td_kstack_obj;
339 	pmap_qremove(td->td_kstack, pages);
340 	VM_OBJECT_LOCK(ksobj);
341 	for (i = 0; i < pages; i++) {
342 		m = vm_page_lookup(ksobj, i);
343 		if (m == NULL)
344 			panic("vm_thread_swapout: kstack already missing?");
345 		vm_page_lock_queues();
346 		vm_page_dirty(m);
347 		vm_page_unwire(m, 0);
348 		vm_page_unlock_queues();
349 	}
350 	VM_OBJECT_UNLOCK(ksobj);
351 }
352 
353 /*
354  * Bring the kernel stack for a specified thread back in.
355  */
356 void
357 vm_thread_swapin(struct thread *td)
358 {
359 	vm_object_t ksobj;
360 	vm_page_t m, ma[KSTACK_MAX_PAGES];
361 	int i, pages, rv;
362 
363 	pages = td->td_kstack_pages;
364 	ksobj = td->td_kstack_obj;
365 	VM_OBJECT_LOCK(ksobj);
366 	for (i = 0; i < pages; i++) {
367 		m = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
368 		if (m->valid != VM_PAGE_BITS_ALL) {
369 			rv = vm_pager_get_pages(ksobj, &m, 1, 0);
370 			if (rv != VM_PAGER_OK)
371 				panic("vm_thread_swapin: cannot get kstack for proc: %d", td->td_proc->p_pid);
372 			m = vm_page_lookup(ksobj, i);
373 			m->valid = VM_PAGE_BITS_ALL;
374 		}
375 		ma[i] = m;
376 		vm_page_lock_queues();
377 		vm_page_wire(m);
378 		vm_page_wakeup(m);
379 		vm_page_unlock_queues();
380 	}
381 	VM_OBJECT_UNLOCK(ksobj);
382 	pmap_qenter(td->td_kstack, ma, pages);
383 	cpu_thread_swapin(td);
384 }
385 
386 /*
387  * Set up a variable-sized alternate kstack.
388  */
389 void
390 vm_thread_new_altkstack(struct thread *td, int pages)
391 {
392 
393 	td->td_altkstack = td->td_kstack;
394 	td->td_altkstack_obj = td->td_kstack_obj;
395 	td->td_altkstack_pages = td->td_kstack_pages;
396 
397 	vm_thread_new(td, pages);
398 }
399 
400 /*
401  * Restore the original kstack.
402  */
403 void
404 vm_thread_dispose_altkstack(struct thread *td)
405 {
406 
407 	vm_thread_dispose(td);
408 
409 	td->td_kstack = td->td_altkstack;
410 	td->td_kstack_obj = td->td_altkstack_obj;
411 	td->td_kstack_pages = td->td_altkstack_pages;
412 	td->td_altkstack = 0;
413 	td->td_altkstack_obj = NULL;
414 	td->td_altkstack_pages = 0;
415 }
416 
417 /*
418  * Implement fork's actions on an address space.
419  * Here we arrange for the address space to be copied or referenced,
420  * allocate a user struct (pcb and kernel stack), then call the
421  * machine-dependent layer to fill those in and make the new process
422  * ready to run.  The new process is set up so that it returns directly
423  * to user mode to avoid stack copying and relocation problems.
424  */
425 void
426 vm_forkproc(td, p2, td2, flags)
427 	struct thread *td;
428 	struct proc *p2;
429 	struct thread *td2;
430 	int flags;
431 {
432 	struct proc *p1 = td->td_proc;
433 
434 	if ((flags & RFPROC) == 0) {
435 		/*
436 		 * Divorce the memory, if it is shared, essentially
437 		 * this changes shared memory amongst threads, into
438 		 * COW locally.
439 		 */
440 		if ((flags & RFMEM) == 0) {
441 			if (p1->p_vmspace->vm_refcnt > 1) {
442 				vmspace_unshare(p1);
443 			}
444 		}
445 		cpu_fork(td, p2, td2, flags);
446 		return;
447 	}
448 
449 	if (flags & RFMEM) {
450 		p2->p_vmspace = p1->p_vmspace;
451 		atomic_add_int(&p1->p_vmspace->vm_refcnt, 1);
452 	}
453 
454 	while (vm_page_count_severe()) {
455 		VM_WAIT;
456 	}
457 
458 	if ((flags & RFMEM) == 0) {
459 		p2->p_vmspace = vmspace_fork(p1->p_vmspace);
460 		if (p1->p_vmspace->vm_shm)
461 			shmfork(p1, p2);
462 	}
463 
464 	/*
465 	 * cpu_fork will copy and update the pcb, set up the kernel stack,
466 	 * and make the child ready to run.
467 	 */
468 	cpu_fork(td, p2, td2, flags);
469 }
470 
471 /*
472  * Called after process has been wait(2)'ed apon and is being reaped.
473  * The idea is to reclaim resources that we could not reclaim while
474  * the process was still executing.
475  */
476 void
477 vm_waitproc(p)
478 	struct proc *p;
479 {
480 
481 	vmspace_exitfree(p);		/* and clean-out the vmspace */
482 }
483 
484 /*
485  * Set default limits for VM system.
486  * Called for proc 0, and then inherited by all others.
487  *
488  * XXX should probably act directly on proc0.
489  */
490 static void
491 vm_init_limits(udata)
492 	void *udata;
493 {
494 	struct proc *p = udata;
495 	struct plimit *limp;
496 	int rss_limit;
497 
498 	/*
499 	 * Set up the initial limits on process VM. Set the maximum resident
500 	 * set size to be half of (reasonably) available memory.  Since this
501 	 * is a soft limit, it comes into effect only when the system is out
502 	 * of memory - half of main memory helps to favor smaller processes,
503 	 * and reduces thrashing of the object cache.
504 	 */
505 	limp = p->p_limit;
506 	limp->pl_rlimit[RLIMIT_STACK].rlim_cur = dflssiz;
507 	limp->pl_rlimit[RLIMIT_STACK].rlim_max = maxssiz;
508 	limp->pl_rlimit[RLIMIT_DATA].rlim_cur = dfldsiz;
509 	limp->pl_rlimit[RLIMIT_DATA].rlim_max = maxdsiz;
510 	/* limit the limit to no less than 2MB */
511 	rss_limit = max(cnt.v_free_count, 512);
512 	limp->pl_rlimit[RLIMIT_RSS].rlim_cur = ptoa(rss_limit);
513 	limp->pl_rlimit[RLIMIT_RSS].rlim_max = RLIM_INFINITY;
514 }
515 
516 void
517 faultin(p)
518 	struct proc *p;
519 {
520 #ifdef NO_SWAPPING
521 
522 	PROC_LOCK_ASSERT(p, MA_OWNED);
523 	if ((p->p_sflag & PS_INMEM) == 0)
524 		panic("faultin: proc swapped out with NO_SWAPPING!");
525 #else /* !NO_SWAPPING */
526 	struct thread *td;
527 
528 	GIANT_REQUIRED;
529 	PROC_LOCK_ASSERT(p, MA_OWNED);
530 	/*
531 	 * If another process is swapping in this process,
532 	 * just wait until it finishes.
533 	 */
534 	if (p->p_sflag & PS_SWAPPINGIN)
535 		msleep(&p->p_sflag, &p->p_mtx, PVM, "faultin", 0);
536 	else if ((p->p_sflag & PS_INMEM) == 0) {
537 		/*
538 		 * Don't let another thread swap process p out while we are
539 		 * busy swapping it in.
540 		 */
541 		++p->p_lock;
542 		mtx_lock_spin(&sched_lock);
543 		p->p_sflag |= PS_SWAPPINGIN;
544 		mtx_unlock_spin(&sched_lock);
545 		PROC_UNLOCK(p);
546 
547 		FOREACH_THREAD_IN_PROC(p, td)
548 			vm_thread_swapin(td);
549 
550 		PROC_LOCK(p);
551 		mtx_lock_spin(&sched_lock);
552 		p->p_sflag &= ~PS_SWAPPINGIN;
553 		p->p_sflag |= PS_INMEM;
554 		FOREACH_THREAD_IN_PROC(p, td) {
555 			TD_CLR_SWAPPED(td);
556 			if (TD_CAN_RUN(td))
557 				setrunnable(td);
558 		}
559 		mtx_unlock_spin(&sched_lock);
560 
561 		wakeup(&p->p_sflag);
562 
563 		/* Allow other threads to swap p out now. */
564 		--p->p_lock;
565 	}
566 #endif /* NO_SWAPPING */
567 }
568 
569 /*
570  * This swapin algorithm attempts to swap-in processes only if there
571  * is enough space for them.  Of course, if a process waits for a long
572  * time, it will be swapped in anyway.
573  *
574  *  XXXKSE - process with the thread with highest priority counts..
575  *
576  * Giant is still held at this point, to be released in tsleep.
577  */
578 /* ARGSUSED*/
579 static void
580 scheduler(dummy)
581 	void *dummy;
582 {
583 	struct proc *p;
584 	struct thread *td;
585 	int pri;
586 	struct proc *pp;
587 	int ppri;
588 
589 	mtx_assert(&Giant, MA_OWNED | MA_NOTRECURSED);
590 	/* GIANT_REQUIRED */
591 
592 loop:
593 	if (vm_page_count_min()) {
594 		VM_WAIT;
595 		goto loop;
596 	}
597 
598 	pp = NULL;
599 	ppri = INT_MIN;
600 	sx_slock(&allproc_lock);
601 	FOREACH_PROC_IN_SYSTEM(p) {
602 		struct ksegrp *kg;
603 		if (p->p_sflag & (PS_INMEM | PS_SWAPPINGOUT | PS_SWAPPINGIN)) {
604 			continue;
605 		}
606 		mtx_lock_spin(&sched_lock);
607 		FOREACH_THREAD_IN_PROC(p, td) {
608 			/*
609 			 * An otherwise runnable thread of a process
610 			 * swapped out has only the TDI_SWAPPED bit set.
611 			 *
612 			 */
613 			if (td->td_inhibitors == TDI_SWAPPED) {
614 				kg = td->td_ksegrp;
615 				pri = p->p_swtime + kg->kg_slptime;
616 				if ((p->p_sflag & PS_SWAPINREQ) == 0) {
617 					pri -= p->p_nice * 8;
618 				}
619 
620 				/*
621 				 * if this ksegrp is higher priority
622 				 * and there is enough space, then select
623 				 * this process instead of the previous
624 				 * selection.
625 				 */
626 				if (pri > ppri) {
627 					pp = p;
628 					ppri = pri;
629 				}
630 			}
631 		}
632 		mtx_unlock_spin(&sched_lock);
633 	}
634 	sx_sunlock(&allproc_lock);
635 
636 	/*
637 	 * Nothing to do, back to sleep.
638 	 */
639 	if ((p = pp) == NULL) {
640 		tsleep(&proc0, PVM, "sched", maxslp * hz / 2);
641 		goto loop;
642 	}
643 	PROC_LOCK(p);
644 
645 	/*
646 	 * Another process may be bringing or may have already
647 	 * brought this process in while we traverse all threads.
648 	 * Or, this process may even be being swapped out again.
649 	 */
650 	if (p->p_sflag & (PS_INMEM | PS_SWAPPINGOUT | PS_SWAPPINGIN)) {
651 		PROC_UNLOCK(p);
652 		goto loop;
653 	}
654 
655 	mtx_lock_spin(&sched_lock);
656 	p->p_sflag &= ~PS_SWAPINREQ;
657 	mtx_unlock_spin(&sched_lock);
658 
659 	/*
660 	 * We would like to bring someone in. (only if there is space).
661 	 * [What checks the space? ]
662 	 */
663 	faultin(p);
664 	PROC_UNLOCK(p);
665 	mtx_lock_spin(&sched_lock);
666 	p->p_swtime = 0;
667 	mtx_unlock_spin(&sched_lock);
668 	goto loop;
669 }
670 
671 #ifndef NO_SWAPPING
672 
673 /*
674  * Swap_idle_threshold1 is the guaranteed swapped in time for a process
675  */
676 static int swap_idle_threshold1 = 2;
677 SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold1, CTLFLAG_RW,
678     &swap_idle_threshold1, 0, "Guaranteed swapped in time for a process");
679 
680 /*
681  * Swap_idle_threshold2 is the time that a process can be idle before
682  * it will be swapped out, if idle swapping is enabled.
683  */
684 static int swap_idle_threshold2 = 10;
685 SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold2, CTLFLAG_RW,
686     &swap_idle_threshold2, 0, "Time before a process will be swapped out");
687 
688 /*
689  * Swapout is driven by the pageout daemon.  Very simple, we find eligible
690  * procs and unwire their u-areas.  We try to always "swap" at least one
691  * process in case we need the room for a swapin.
692  * If any procs have been sleeping/stopped for at least maxslp seconds,
693  * they are swapped.  Else, we swap the longest-sleeping or stopped process,
694  * if any, otherwise the longest-resident process.
695  */
696 void
697 swapout_procs(action)
698 int action;
699 {
700 	struct proc *p;
701 	struct thread *td;
702 	struct ksegrp *kg;
703 	int didswap = 0;
704 
705 	GIANT_REQUIRED;
706 
707 retry:
708 	sx_slock(&allproc_lock);
709 	FOREACH_PROC_IN_SYSTEM(p) {
710 		struct vmspace *vm;
711 		int minslptime = 100000;
712 
713 		/*
714 		 * Watch out for a process in
715 		 * creation.  It may have no
716 		 * address space or lock yet.
717 		 */
718 		mtx_lock_spin(&sched_lock);
719 		if (p->p_state == PRS_NEW) {
720 			mtx_unlock_spin(&sched_lock);
721 			continue;
722 		}
723 		mtx_unlock_spin(&sched_lock);
724 
725 		/*
726 		 * An aio daemon switches its
727 		 * address space while running.
728 		 * Perform a quick check whether
729 		 * a process has P_SYSTEM.
730 		 */
731 		if ((p->p_flag & P_SYSTEM) != 0)
732 			continue;
733 
734 		/*
735 		 * Do not swapout a process that
736 		 * is waiting for VM data
737 		 * structures as there is a possible
738 		 * deadlock.  Test this first as
739 		 * this may block.
740 		 *
741 		 * Lock the map until swapout
742 		 * finishes, or a thread of this
743 		 * process may attempt to alter
744 		 * the map.
745 		 */
746 		PROC_LOCK(p);
747 		vm = p->p_vmspace;
748 		KASSERT(vm != NULL,
749 			("swapout_procs: a process has no address space"));
750 		atomic_add_int(&vm->vm_refcnt, 1);
751 		PROC_UNLOCK(p);
752 		if (!vm_map_trylock(&vm->vm_map))
753 			goto nextproc1;
754 
755 		PROC_LOCK(p);
756 		if (p->p_lock != 0 ||
757 		    (p->p_flag & (P_STOPPED_SINGLE|P_TRACED|P_SYSTEM|P_WEXIT)
758 		    ) != 0) {
759 			goto nextproc2;
760 		}
761 		/*
762 		 * only aiod changes vmspace, however it will be
763 		 * skipped because of the if statement above checking
764 		 * for P_SYSTEM
765 		 */
766 		if ((p->p_sflag & (PS_INMEM|PS_SWAPPINGOUT|PS_SWAPPINGIN)) != PS_INMEM)
767 			goto nextproc2;
768 
769 		switch (p->p_state) {
770 		default:
771 			/* Don't swap out processes in any sort
772 			 * of 'special' state. */
773 			break;
774 
775 		case PRS_NORMAL:
776 			mtx_lock_spin(&sched_lock);
777 			/*
778 			 * do not swapout a realtime process
779 			 * Check all the thread groups..
780 			 */
781 			FOREACH_KSEGRP_IN_PROC(p, kg) {
782 				if (PRI_IS_REALTIME(kg->kg_pri_class))
783 					goto nextproc;
784 
785 				/*
786 				 * Guarantee swap_idle_threshold1
787 				 * time in memory.
788 				 */
789 				if (kg->kg_slptime < swap_idle_threshold1)
790 					goto nextproc;
791 
792 				/*
793 				 * Do not swapout a process if it is
794 				 * waiting on a critical event of some
795 				 * kind or there is a thread whose
796 				 * pageable memory may be accessed.
797 				 *
798 				 * This could be refined to support
799 				 * swapping out a thread.
800 				 */
801 				FOREACH_THREAD_IN_GROUP(kg, td) {
802 					if ((td->td_priority) < PSOCK ||
803 					    !thread_safetoswapout(td))
804 						goto nextproc;
805 				}
806 				/*
807 				 * If the system is under memory stress,
808 				 * or if we are swapping
809 				 * idle processes >= swap_idle_threshold2,
810 				 * then swap the process out.
811 				 */
812 				if (((action & VM_SWAP_NORMAL) == 0) &&
813 				    (((action & VM_SWAP_IDLE) == 0) ||
814 				    (kg->kg_slptime < swap_idle_threshold2)))
815 					goto nextproc;
816 
817 				if (minslptime > kg->kg_slptime)
818 					minslptime = kg->kg_slptime;
819 			}
820 
821 			/*
822 			 * If the pageout daemon didn't free enough pages,
823 			 * or if this process is idle and the system is
824 			 * configured to swap proactively, swap it out.
825 			 */
826 			if ((action & VM_SWAP_NORMAL) ||
827 				((action & VM_SWAP_IDLE) &&
828 				 (minslptime > swap_idle_threshold2))) {
829 				swapout(p);
830 				didswap++;
831 				mtx_unlock_spin(&sched_lock);
832 				PROC_UNLOCK(p);
833 				vm_map_unlock(&vm->vm_map);
834 				vmspace_free(vm);
835 				sx_sunlock(&allproc_lock);
836 				goto retry;
837 			}
838 nextproc:
839 			mtx_unlock_spin(&sched_lock);
840 		}
841 nextproc2:
842 		PROC_UNLOCK(p);
843 		vm_map_unlock(&vm->vm_map);
844 nextproc1:
845 		vmspace_free(vm);
846 		continue;
847 	}
848 	sx_sunlock(&allproc_lock);
849 	/*
850 	 * If we swapped something out, and another process needed memory,
851 	 * then wakeup the sched process.
852 	 */
853 	if (didswap)
854 		wakeup(&proc0);
855 }
856 
857 static void
858 swapout(p)
859 	struct proc *p;
860 {
861 	struct thread *td;
862 
863 	PROC_LOCK_ASSERT(p, MA_OWNED);
864 	mtx_assert(&sched_lock, MA_OWNED | MA_NOTRECURSED);
865 #if defined(SWAP_DEBUG)
866 	printf("swapping out %d\n", p->p_pid);
867 #endif
868 
869 	/*
870 	 * The states of this process and its threads may have changed
871 	 * by now.  Assuming that there is only one pageout daemon thread,
872 	 * this process should still be in memory.
873 	 */
874 	KASSERT((p->p_sflag & (PS_INMEM|PS_SWAPPINGOUT|PS_SWAPPINGIN)) == PS_INMEM,
875 		("swapout: lost a swapout race?"));
876 
877 #if defined(INVARIANTS)
878 	/*
879 	 * Make sure that all threads are safe to be swapped out.
880 	 *
881 	 * Alternatively, we could swap out only safe threads.
882 	 */
883 	FOREACH_THREAD_IN_PROC(p, td) {
884 		KASSERT(thread_safetoswapout(td),
885 			("swapout: there is a thread not safe for swapout"));
886 	}
887 #endif /* INVARIANTS */
888 
889 	++p->p_stats->p_ru.ru_nswap;
890 	/*
891 	 * remember the process resident count
892 	 */
893 	p->p_vmspace->vm_swrss = vmspace_resident_count(p->p_vmspace);
894 
895 	p->p_sflag &= ~PS_INMEM;
896 	p->p_sflag |= PS_SWAPPINGOUT;
897 	PROC_UNLOCK(p);
898 	FOREACH_THREAD_IN_PROC(p, td)
899 		TD_SET_SWAPPED(td);
900 	mtx_unlock_spin(&sched_lock);
901 
902 	FOREACH_THREAD_IN_PROC(p, td)
903 		vm_thread_swapout(td);
904 
905 	PROC_LOCK(p);
906 	mtx_lock_spin(&sched_lock);
907 	p->p_sflag &= ~PS_SWAPPINGOUT;
908 	p->p_swtime = 0;
909 }
910 #endif /* !NO_SWAPPING */
911