xref: /freebsd/sys/vm/vm_glue.c (revision 169299398a2a9e940511bfe7790697236a4a40f3)
1df8bae1dSRodney W. Grimes /*
2df8bae1dSRodney W. Grimes  * Copyright (c) 1991, 1993
3df8bae1dSRodney W. Grimes  *	The Regents of the University of California.  All rights reserved.
4df8bae1dSRodney W. Grimes  *
5df8bae1dSRodney W. Grimes  * This code is derived from software contributed to Berkeley by
6df8bae1dSRodney W. Grimes  * The Mach Operating System project at Carnegie-Mellon University.
7df8bae1dSRodney W. Grimes  *
8df8bae1dSRodney W. Grimes  * Redistribution and use in source and binary forms, with or without
9df8bae1dSRodney W. Grimes  * modification, are permitted provided that the following conditions
10df8bae1dSRodney W. Grimes  * are met:
11df8bae1dSRodney W. Grimes  * 1. Redistributions of source code must retain the above copyright
12df8bae1dSRodney W. Grimes  *    notice, this list of conditions and the following disclaimer.
13df8bae1dSRodney W. Grimes  * 2. Redistributions in binary form must reproduce the above copyright
14df8bae1dSRodney W. Grimes  *    notice, this list of conditions and the following disclaimer in the
15df8bae1dSRodney W. Grimes  *    documentation and/or other materials provided with the distribution.
16df8bae1dSRodney W. Grimes  * 3. All advertising materials mentioning features or use of this software
175929bcfaSPhilippe Charnier  *    must display the following acknowledgement:
18df8bae1dSRodney W. Grimes  *	This product includes software developed by the University of
19df8bae1dSRodney W. Grimes  *	California, Berkeley and its contributors.
20df8bae1dSRodney W. Grimes  * 4. Neither the name of the University nor the names of its contributors
21df8bae1dSRodney W. Grimes  *    may be used to endorse or promote products derived from this software
22df8bae1dSRodney W. Grimes  *    without specific prior written permission.
23df8bae1dSRodney W. Grimes  *
24df8bae1dSRodney W. Grimes  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25df8bae1dSRodney W. Grimes  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26df8bae1dSRodney W. Grimes  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27df8bae1dSRodney W. Grimes  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28df8bae1dSRodney W. Grimes  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29df8bae1dSRodney W. Grimes  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30df8bae1dSRodney W. Grimes  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31df8bae1dSRodney W. Grimes  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32df8bae1dSRodney W. Grimes  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33df8bae1dSRodney W. Grimes  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34df8bae1dSRodney W. Grimes  * SUCH DAMAGE.
35df8bae1dSRodney W. Grimes  *
363c4dd356SDavid Greenman  *	from: @(#)vm_glue.c	8.6 (Berkeley) 1/5/94
37df8bae1dSRodney W. Grimes  *
38df8bae1dSRodney W. Grimes  *
39df8bae1dSRodney W. Grimes  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40df8bae1dSRodney W. Grimes  * All rights reserved.
41df8bae1dSRodney W. Grimes  *
42df8bae1dSRodney W. Grimes  * Permission to use, copy, modify and distribute this software and
43df8bae1dSRodney W. Grimes  * its documentation is hereby granted, provided that both the copyright
44df8bae1dSRodney W. Grimes  * notice and this permission notice appear in all copies of the
45df8bae1dSRodney W. Grimes  * software, derivative works or modified versions, and any portions
46df8bae1dSRodney W. Grimes  * thereof, and that both notices appear in supporting documentation.
47df8bae1dSRodney W. Grimes  *
48df8bae1dSRodney W. Grimes  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
49df8bae1dSRodney W. Grimes  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
50df8bae1dSRodney W. Grimes  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
51df8bae1dSRodney W. Grimes  *
52df8bae1dSRodney W. Grimes  * Carnegie Mellon requests users of this software to return to
53df8bae1dSRodney W. Grimes  *
54df8bae1dSRodney W. Grimes  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
55df8bae1dSRodney W. Grimes  *  School of Computer Science
56df8bae1dSRodney W. Grimes  *  Carnegie Mellon University
57df8bae1dSRodney W. Grimes  *  Pittsburgh PA 15213-3890
58df8bae1dSRodney W. Grimes  *
59df8bae1dSRodney W. Grimes  * any improvements or extensions that they make and grant Carnegie the
60df8bae1dSRodney W. Grimes  * rights to redistribute these changes.
61df8bae1dSRodney W. Grimes  */
62df8bae1dSRodney W. Grimes 
63874651b1SDavid E. O'Brien #include <sys/cdefs.h>
64874651b1SDavid E. O'Brien __FBSDID("$FreeBSD$");
65874651b1SDavid E. O'Brien 
66faa5f8d8SAndrzej Bialecki #include "opt_vm.h"
6715a7ad60SPeter Wemm #include "opt_kstack_pages.h"
6815a7ad60SPeter Wemm #include "opt_kstack_max_pages.h"
69e9822d92SJoerg Wunsch 
70df8bae1dSRodney W. Grimes #include <sys/param.h>
71df8bae1dSRodney W. Grimes #include <sys/systm.h>
72104a9b7eSAlexander Kabaev #include <sys/limits.h>
73fb919e4dSMark Murray #include <sys/lock.h>
74fb919e4dSMark Murray #include <sys/mutex.h>
75df8bae1dSRodney W. Grimes #include <sys/proc.h>
76df8bae1dSRodney W. Grimes #include <sys/resourcevar.h>
773aa12267SBruce Evans #include <sys/shm.h>
78efeaf95aSDavid Greenman #include <sys/vmmeter.h>
791005a129SJohn Baldwin #include <sys/sx.h>
80ceb0cf87SJohn Dyson #include <sys/sysctl.h>
81df8bae1dSRodney W. Grimes 
8226f9a767SRodney W. Grimes #include <sys/kernel.h>
830384fff8SJason Evans #include <sys/ktr.h>
84a2a1c95cSPeter Wemm #include <sys/unistd.h>
8526f9a767SRodney W. Grimes 
86df8bae1dSRodney W. Grimes #include <vm/vm.h>
87efeaf95aSDavid Greenman #include <vm/vm_param.h>
88efeaf95aSDavid Greenman #include <vm/pmap.h>
89efeaf95aSDavid Greenman #include <vm/vm_map.h>
90df8bae1dSRodney W. Grimes #include <vm/vm_page.h>
9126f9a767SRodney W. Grimes #include <vm/vm_pageout.h>
92a136efe9SPeter Wemm #include <vm/vm_object.h>
93df8bae1dSRodney W. Grimes #include <vm/vm_kern.h>
94efeaf95aSDavid Greenman #include <vm/vm_extern.h>
95a136efe9SPeter Wemm #include <vm/vm_pager.h>
9692da00bbSMatthew Dillon #include <vm/swap_pager.h>
97efeaf95aSDavid Greenman 
98efeaf95aSDavid Greenman #include <sys/user.h>
99df8bae1dSRodney W. Grimes 
100ea754954SJohn Baldwin extern int maxslp;
101ea754954SJohn Baldwin 
1022b14f991SJulian Elischer /*
1032b14f991SJulian Elischer  * System initialization
1042b14f991SJulian Elischer  *
1052b14f991SJulian Elischer  * Note: proc0 from proc.h
1062b14f991SJulian Elischer  */
10711caded3SAlfred Perlstein static void vm_init_limits(void *);
1084590fd3aSDavid Greenman SYSINIT(vm_limits, SI_SUB_VM_CONF, SI_ORDER_FIRST, vm_init_limits, &proc0)
1092b14f991SJulian Elischer 
1102b14f991SJulian Elischer /*
1112b14f991SJulian Elischer  * THIS MUST BE THE LAST INITIALIZATION ITEM!!!
1122b14f991SJulian Elischer  *
1132b14f991SJulian Elischer  * Note: run scheduling should be divorced from the vm system.
1142b14f991SJulian Elischer  */
11511caded3SAlfred Perlstein static void scheduler(void *);
1169a44a82bSBruce Evans SYSINIT(scheduler, SI_SUB_RUN_SCHEDULER, SI_ORDER_ANY, scheduler, NULL)
1172b14f991SJulian Elischer 
118e50f5c2eSBruce Evans #ifndef NO_SWAPPING
11911caded3SAlfred Perlstein static void swapout(struct proc *);
120a136efe9SPeter Wemm static void vm_proc_swapin(struct proc *p);
121a136efe9SPeter Wemm static void vm_proc_swapout(struct proc *p);
122e50f5c2eSBruce Evans #endif
123f708ef1bSPoul-Henning Kamp 
12443a90f3aSAlan Cox /*
12543a90f3aSAlan Cox  * MPSAFE
1262d5c7e45SMatthew Dillon  *
1272d5c7e45SMatthew Dillon  * WARNING!  This code calls vm_map_check_protection() which only checks
1282d5c7e45SMatthew Dillon  * the associated vm_map_entry range.  It does not determine whether the
1292d5c7e45SMatthew Dillon  * contents of the memory is actually readable or writable.  In most cases
1302d5c7e45SMatthew Dillon  * just checking the vm_map_entry is sufficient within the kernel's address
1312d5c7e45SMatthew Dillon  * space.
13243a90f3aSAlan Cox  */
133df8bae1dSRodney W. Grimes int
134df8bae1dSRodney W. Grimes kernacc(addr, len, rw)
135c3dfdfd1SAlfred Perlstein 	void *addr;
136df8bae1dSRodney W. Grimes 	int len, rw;
137df8bae1dSRodney W. Grimes {
138df8bae1dSRodney W. Grimes 	boolean_t rv;
139df8bae1dSRodney W. Grimes 	vm_offset_t saddr, eaddr;
14002c58685SPoul-Henning Kamp 	vm_prot_t prot;
141df8bae1dSRodney W. Grimes 
142e50f5c2eSBruce Evans 	KASSERT((rw & ~VM_PROT_ALL) == 0,
14302c58685SPoul-Henning Kamp 	    ("illegal ``rw'' argument to kernacc (%x)\n", rw));
14402c58685SPoul-Henning Kamp 	prot = rw;
1456cde7a16SDavid Greenman 	saddr = trunc_page((vm_offset_t)addr);
1466cde7a16SDavid Greenman 	eaddr = round_page((vm_offset_t)addr + len);
147d8834602SAlan Cox 	vm_map_lock_read(kernel_map);
148df8bae1dSRodney W. Grimes 	rv = vm_map_check_protection(kernel_map, saddr, eaddr, prot);
149d8834602SAlan Cox 	vm_map_unlock_read(kernel_map);
150df8bae1dSRodney W. Grimes 	return (rv == TRUE);
151df8bae1dSRodney W. Grimes }
152df8bae1dSRodney W. Grimes 
15343a90f3aSAlan Cox /*
15443a90f3aSAlan Cox  * MPSAFE
1552d5c7e45SMatthew Dillon  *
1562d5c7e45SMatthew Dillon  * WARNING!  This code calls vm_map_check_protection() which only checks
1572d5c7e45SMatthew Dillon  * the associated vm_map_entry range.  It does not determine whether the
1582d5c7e45SMatthew Dillon  * contents of the memory is actually readable or writable.  vmapbuf(),
1592d5c7e45SMatthew Dillon  * vm_fault_quick(), or copyin()/copout()/su*()/fu*() functions should be
1602d5c7e45SMatthew Dillon  * used in conjuction with this call.
16143a90f3aSAlan Cox  */
162df8bae1dSRodney W. Grimes int
163df8bae1dSRodney W. Grimes useracc(addr, len, rw)
164c3dfdfd1SAlfred Perlstein 	void *addr;
165df8bae1dSRodney W. Grimes 	int len, rw;
166df8bae1dSRodney W. Grimes {
167df8bae1dSRodney W. Grimes 	boolean_t rv;
16802c58685SPoul-Henning Kamp 	vm_prot_t prot;
16905ba50f5SJake Burkholder 	vm_map_t map;
170df8bae1dSRodney W. Grimes 
171e50f5c2eSBruce Evans 	KASSERT((rw & ~VM_PROT_ALL) == 0,
17202c58685SPoul-Henning Kamp 	    ("illegal ``rw'' argument to useracc (%x)\n", rw));
17302c58685SPoul-Henning Kamp 	prot = rw;
17405ba50f5SJake Burkholder 	map = &curproc->p_vmspace->vm_map;
17505ba50f5SJake Burkholder 	if ((vm_offset_t)addr + len > vm_map_max(map) ||
17605ba50f5SJake Burkholder 	    (vm_offset_t)addr + len < (vm_offset_t)addr) {
17726f9a767SRodney W. Grimes 		return (FALSE);
17826f9a767SRodney W. Grimes 	}
179d8834602SAlan Cox 	vm_map_lock_read(map);
18005ba50f5SJake Burkholder 	rv = vm_map_check_protection(map, trunc_page((vm_offset_t)addr),
18105ba50f5SJake Burkholder 	    round_page((vm_offset_t)addr + len), prot);
182d8834602SAlan Cox 	vm_map_unlock_read(map);
183df8bae1dSRodney W. Grimes 	return (rv == TRUE);
184df8bae1dSRodney W. Grimes }
185df8bae1dSRodney W. Grimes 
18643a90f3aSAlan Cox /*
18716929939SDon Lewis  * MPSAFE
18816929939SDon Lewis  */
18916929939SDon Lewis int
19016929939SDon Lewis vslock(td, addr, size)
19116929939SDon Lewis 	struct thread *td;
19216929939SDon Lewis 	vm_offset_t addr;
19316929939SDon Lewis 	vm_size_t size;
19416929939SDon Lewis {
19516929939SDon Lewis 	vm_offset_t start, end;
19616929939SDon Lewis 	struct proc *proc = td->td_proc;
19716929939SDon Lewis 	int error, npages;
19816929939SDon Lewis 
19916929939SDon Lewis 	start = trunc_page(addr);
20016929939SDon Lewis 	end = round_page(addr + size);
20116929939SDon Lewis 
20216929939SDon Lewis 	/* disable wrap around */
20316929939SDon Lewis 	if (end <= start)
20416929939SDon Lewis 		return (EINVAL);
20516929939SDon Lewis 
20616929939SDon Lewis 	npages = atop(end - start);
20716929939SDon Lewis 
20816929939SDon Lewis 	if (npages > vm_page_max_wired)
20916929939SDon Lewis 		return (ENOMEM);
21016929939SDon Lewis 
21116929939SDon Lewis 	PROC_LOCK(proc);
21216929939SDon Lewis 	if (npages + pmap_wired_count(vm_map_pmap(&proc->p_vmspace->vm_map)) >
21316929939SDon Lewis 	    atop(lim_cur(proc, RLIMIT_MEMLOCK))) {
21416929939SDon Lewis 		PROC_UNLOCK(proc);
21516929939SDon Lewis 		return (ENOMEM);
21616929939SDon Lewis 	}
21716929939SDon Lewis 	PROC_UNLOCK(proc);
21816929939SDon Lewis 
21916929939SDon Lewis #if 0
22016929939SDon Lewis 	/*
22116929939SDon Lewis 	 * XXX - not yet
22216929939SDon Lewis 	 *
22316929939SDon Lewis 	 * The limit for transient usage of wired pages should be
22416929939SDon Lewis 	 * larger than for "permanent" wired pages (mlock()).
22516929939SDon Lewis 	 *
22616929939SDon Lewis 	 * Also, the sysctl code, which is the only present user
22716929939SDon Lewis 	 * of vslock(), does a hard loop on EAGAIN.
22816929939SDon Lewis 	 */
22916929939SDon Lewis 	if (npages + cnt.v_wire_count > vm_page_max_wired)
23016929939SDon Lewis 		return (EAGAIN);
23116929939SDon Lewis #endif
23216929939SDon Lewis 
23316929939SDon Lewis 	error = vm_map_wire(&proc->p_vmspace->vm_map, start, end,
23416929939SDon Lewis 	     VM_MAP_WIRE_USER|VM_MAP_WIRE_NOHOLES);
23516929939SDon Lewis 
23616929939SDon Lewis 	/* EINVAL is probably a better error to return than ENOMEM */
23716929939SDon Lewis 	return (error == KERN_SUCCESS ? 0 : EINVAL);
23816929939SDon Lewis }
23916929939SDon Lewis 
24016929939SDon Lewis /*
24116929939SDon Lewis  * MPSAFE
24216929939SDon Lewis  */
24316929939SDon Lewis int
24416929939SDon Lewis vsunlock(td, addr, size)
24516929939SDon Lewis 	struct thread *td;
24616929939SDon Lewis 	vm_offset_t addr;
24716929939SDon Lewis 	vm_size_t size;
24816929939SDon Lewis {
24916929939SDon Lewis 	vm_offset_t start, end;
25016929939SDon Lewis 	int error;
25116929939SDon Lewis 
25216929939SDon Lewis 	start = trunc_page(addr);
25316929939SDon Lewis 	end = round_page(addr + size);
25416929939SDon Lewis 
25516929939SDon Lewis 	/* disable wrap around */
25616929939SDon Lewis 	if (end <= start)
25716929939SDon Lewis 		return (EINVAL);
25816929939SDon Lewis 
25916929939SDon Lewis 	error = vm_map_unwire(&td->td_proc->p_vmspace->vm_map, start, end,
26016929939SDon Lewis 	    VM_MAP_WIRE_SYSTEM|VM_MAP_WIRE_NOHOLES);
26116929939SDon Lewis 	return (error == KERN_SUCCESS ? 0 : EINVAL);
26216929939SDon Lewis }
26316929939SDon Lewis 
26416929939SDon Lewis /*
265a136efe9SPeter Wemm  * Create the U area for a new process.
266a136efe9SPeter Wemm  * This routine directly affects the fork perf for a process.
267a136efe9SPeter Wemm  */
268a136efe9SPeter Wemm void
269a136efe9SPeter Wemm vm_proc_new(struct proc *p)
270a136efe9SPeter Wemm {
271a136efe9SPeter Wemm 	vm_page_t ma[UAREA_PAGES];
272a136efe9SPeter Wemm 	vm_object_t upobj;
273a136efe9SPeter Wemm 	vm_offset_t up;
274a136efe9SPeter Wemm 	vm_page_t m;
275a136efe9SPeter Wemm 	u_int i;
276a136efe9SPeter Wemm 
277a136efe9SPeter Wemm 	/*
278a136efe9SPeter Wemm 	 * Get a kernel virtual address for the U area for this process.
279a136efe9SPeter Wemm 	 */
280a136efe9SPeter Wemm 	up = kmem_alloc_nofault(kernel_map, UAREA_PAGES * PAGE_SIZE);
281a136efe9SPeter Wemm 	if (up == 0)
282a136efe9SPeter Wemm 		panic("vm_proc_new: upage allocation failed");
283a136efe9SPeter Wemm 	p->p_uarea = (struct user *)up;
284a136efe9SPeter Wemm 
285a136efe9SPeter Wemm 	/*
286ef13663bSAlan Cox 	 * Allocate object and page(s) for the U area.
287a136efe9SPeter Wemm 	 */
288ef13663bSAlan Cox 	upobj = vm_object_allocate(OBJT_DEFAULT, UAREA_PAGES);
289ef13663bSAlan Cox 	p->p_upages_obj = upobj;
290ef13663bSAlan Cox 	VM_OBJECT_LOCK(upobj);
291ef13663bSAlan Cox 	for (i = 0; i < UAREA_PAGES; i++) {
29214f8ceaaSAlan Cox 		m = vm_page_grab(upobj, i,
29314f8ceaaSAlan Cox 		    VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED);
294a136efe9SPeter Wemm 		ma[i] = m;
295a136efe9SPeter Wemm 
296dc907f66SAlan Cox 		vm_page_lock_queues();
297a136efe9SPeter Wemm 		vm_page_wakeup(m);
298a136efe9SPeter Wemm 		m->valid = VM_PAGE_BITS_ALL;
299dc907f66SAlan Cox 		vm_page_unlock_queues();
300a136efe9SPeter Wemm 	}
301ef13663bSAlan Cox 	VM_OBJECT_UNLOCK(upobj);
302a136efe9SPeter Wemm 
303a136efe9SPeter Wemm 	/*
304a136efe9SPeter Wemm 	 * Enter the pages into the kernel address space.
305a136efe9SPeter Wemm 	 */
306a136efe9SPeter Wemm 	pmap_qenter(up, ma, UAREA_PAGES);
307a136efe9SPeter Wemm }
308a136efe9SPeter Wemm 
309a136efe9SPeter Wemm /*
310a136efe9SPeter Wemm  * Dispose the U area for a process that has exited.
311a136efe9SPeter Wemm  * This routine directly impacts the exit perf of a process.
312a136efe9SPeter Wemm  * XXX proc_zone is marked UMA_ZONE_NOFREE, so this should never be called.
313a136efe9SPeter Wemm  */
314a136efe9SPeter Wemm void
315a136efe9SPeter Wemm vm_proc_dispose(struct proc *p)
316a136efe9SPeter Wemm {
317a136efe9SPeter Wemm 	vm_object_t upobj;
318a136efe9SPeter Wemm 	vm_offset_t up;
319a136efe9SPeter Wemm 	vm_page_t m;
320a136efe9SPeter Wemm 
321a136efe9SPeter Wemm 	upobj = p->p_upages_obj;
3226a07e90dSAlan Cox 	VM_OBJECT_LOCK(upobj);
323f59685a4SPeter Wemm 	if (upobj->resident_page_count != UAREA_PAGES)
324f59685a4SPeter Wemm 		panic("vm_proc_dispose: incorrect number of pages in upobj");
3252d09a6adSAlan Cox 	vm_page_lock_queues();
326f59685a4SPeter Wemm 	while ((m = TAILQ_FIRST(&upobj->memq)) != NULL) {
327a136efe9SPeter Wemm 		vm_page_busy(m);
328a136efe9SPeter Wemm 		vm_page_unwire(m, 0);
329a136efe9SPeter Wemm 		vm_page_free(m);
330a136efe9SPeter Wemm 	}
3312d09a6adSAlan Cox 	vm_page_unlock_queues();
3326a07e90dSAlan Cox 	VM_OBJECT_UNLOCK(upobj);
333f59685a4SPeter Wemm 	up = (vm_offset_t)p->p_uarea;
334a136efe9SPeter Wemm 	pmap_qremove(up, UAREA_PAGES);
335a136efe9SPeter Wemm 	kmem_free(kernel_map, up, UAREA_PAGES * PAGE_SIZE);
336a136efe9SPeter Wemm 	vm_object_deallocate(upobj);
337a136efe9SPeter Wemm }
338a136efe9SPeter Wemm 
339a136efe9SPeter Wemm #ifndef NO_SWAPPING
340a136efe9SPeter Wemm /*
341a136efe9SPeter Wemm  * Allow the U area for a process to be prejudicially paged out.
342a136efe9SPeter Wemm  */
34337c84183SPoul-Henning Kamp static void
344a136efe9SPeter Wemm vm_proc_swapout(struct proc *p)
345a136efe9SPeter Wemm {
346a136efe9SPeter Wemm 	vm_object_t upobj;
347a136efe9SPeter Wemm 	vm_offset_t up;
348a136efe9SPeter Wemm 	vm_page_t m;
349a136efe9SPeter Wemm 
350a136efe9SPeter Wemm 	upobj = p->p_upages_obj;
3516a07e90dSAlan Cox 	VM_OBJECT_LOCK(upobj);
352f59685a4SPeter Wemm 	if (upobj->resident_page_count != UAREA_PAGES)
353f59685a4SPeter Wemm 		panic("vm_proc_dispose: incorrect number of pages in upobj");
3542d09a6adSAlan Cox 	vm_page_lock_queues();
355f59685a4SPeter Wemm 	TAILQ_FOREACH(m, &upobj->memq, listq) {
356a136efe9SPeter Wemm 		vm_page_dirty(m);
357a136efe9SPeter Wemm 		vm_page_unwire(m, 0);
358a136efe9SPeter Wemm 	}
3592d09a6adSAlan Cox 	vm_page_unlock_queues();
3606a07e90dSAlan Cox 	VM_OBJECT_UNLOCK(upobj);
361f59685a4SPeter Wemm 	up = (vm_offset_t)p->p_uarea;
362a136efe9SPeter Wemm 	pmap_qremove(up, UAREA_PAGES);
363a136efe9SPeter Wemm }
364a136efe9SPeter Wemm 
365a136efe9SPeter Wemm /*
366a136efe9SPeter Wemm  * Bring the U area for a specified process back in.
367a136efe9SPeter Wemm  */
36837c84183SPoul-Henning Kamp static void
369a136efe9SPeter Wemm vm_proc_swapin(struct proc *p)
370a136efe9SPeter Wemm {
371a136efe9SPeter Wemm 	vm_page_t ma[UAREA_PAGES];
372a136efe9SPeter Wemm 	vm_object_t upobj;
373a136efe9SPeter Wemm 	vm_offset_t up;
374a136efe9SPeter Wemm 	vm_page_t m;
375a136efe9SPeter Wemm 	int rv;
376a136efe9SPeter Wemm 	int i;
377a136efe9SPeter Wemm 
378a136efe9SPeter Wemm 	upobj = p->p_upages_obj;
3798630c117SAlan Cox 	VM_OBJECT_LOCK(upobj);
380a136efe9SPeter Wemm 	for (i = 0; i < UAREA_PAGES; i++) {
381a136efe9SPeter Wemm 		m = vm_page_grab(upobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
382a136efe9SPeter Wemm 		if (m->valid != VM_PAGE_BITS_ALL) {
383a136efe9SPeter Wemm 			rv = vm_pager_get_pages(upobj, &m, 1, 0);
384a136efe9SPeter Wemm 			if (rv != VM_PAGER_OK)
385a136efe9SPeter Wemm 				panic("vm_proc_swapin: cannot get upage");
386a136efe9SPeter Wemm 		}
387a136efe9SPeter Wemm 		ma[i] = m;
388a7e9138eSPeter Wemm 	}
389a7e9138eSPeter Wemm 	if (upobj->resident_page_count != UAREA_PAGES)
390a7e9138eSPeter Wemm 		panic("vm_proc_swapin: lost pages from upobj");
391e16cfdbeSAlan Cox 	vm_page_lock_queues();
392a7e9138eSPeter Wemm 	TAILQ_FOREACH(m, &upobj->memq, listq) {
393a7e9138eSPeter Wemm 		m->valid = VM_PAGE_BITS_ALL;
394a136efe9SPeter Wemm 		vm_page_wire(m);
395a136efe9SPeter Wemm 		vm_page_wakeup(m);
396a136efe9SPeter Wemm 	}
397e16cfdbeSAlan Cox 	vm_page_unlock_queues();
3986a07e90dSAlan Cox 	VM_OBJECT_UNLOCK(upobj);
399f59685a4SPeter Wemm 	up = (vm_offset_t)p->p_uarea;
400a136efe9SPeter Wemm 	pmap_qenter(up, ma, UAREA_PAGES);
401a136efe9SPeter Wemm }
40292da00bbSMatthew Dillon 
40392da00bbSMatthew Dillon /*
40492da00bbSMatthew Dillon  * Swap in the UAREAs of all processes swapped out to the given device.
40592da00bbSMatthew Dillon  * The pages in the UAREA are marked dirty and their swap metadata is freed.
40692da00bbSMatthew Dillon  */
40792da00bbSMatthew Dillon void
4088f60c087SPoul-Henning Kamp vm_proc_swapin_all(struct swdevt *devidx)
40992da00bbSMatthew Dillon {
41092da00bbSMatthew Dillon 	struct proc *p;
41192da00bbSMatthew Dillon 	vm_object_t object;
41292da00bbSMatthew Dillon 	vm_page_t m;
41392da00bbSMatthew Dillon 
41492da00bbSMatthew Dillon retry:
41592da00bbSMatthew Dillon 	sx_slock(&allproc_lock);
41692da00bbSMatthew Dillon 	FOREACH_PROC_IN_SYSTEM(p) {
41792da00bbSMatthew Dillon 		PROC_LOCK(p);
41892da00bbSMatthew Dillon 		object = p->p_upages_obj;
41917cd3642SAlan Cox 		if (object != NULL) {
42017cd3642SAlan Cox 			VM_OBJECT_LOCK(object);
42117cd3642SAlan Cox 			if (swap_pager_isswapped(object, devidx)) {
42217cd3642SAlan Cox 				VM_OBJECT_UNLOCK(object);
42392da00bbSMatthew Dillon 				sx_sunlock(&allproc_lock);
42492da00bbSMatthew Dillon 				faultin(p);
42592da00bbSMatthew Dillon 				PROC_UNLOCK(p);
4266a07e90dSAlan Cox 				VM_OBJECT_LOCK(object);
42792da00bbSMatthew Dillon 				vm_page_lock_queues();
42892da00bbSMatthew Dillon 				TAILQ_FOREACH(m, &object->memq, listq)
42992da00bbSMatthew Dillon 					vm_page_dirty(m);
43092da00bbSMatthew Dillon 				vm_page_unlock_queues();
43192da00bbSMatthew Dillon 				swap_pager_freespace(object, 0,
43292da00bbSMatthew Dillon 				    object->un_pager.swp.swp_bcount);
4336a07e90dSAlan Cox 				VM_OBJECT_UNLOCK(object);
43492da00bbSMatthew Dillon 				goto retry;
43592da00bbSMatthew Dillon 			}
43617cd3642SAlan Cox 			VM_OBJECT_UNLOCK(object);
43717cd3642SAlan Cox 		}
43892da00bbSMatthew Dillon 		PROC_UNLOCK(p);
43992da00bbSMatthew Dillon 	}
44092da00bbSMatthew Dillon 	sx_sunlock(&allproc_lock);
44192da00bbSMatthew Dillon }
442a136efe9SPeter Wemm #endif
443a136efe9SPeter Wemm 
44449a2507bSAlan Cox #ifndef KSTACK_MAX_PAGES
44549a2507bSAlan Cox #define KSTACK_MAX_PAGES 32
44649a2507bSAlan Cox #endif
44749a2507bSAlan Cox 
44849a2507bSAlan Cox /*
44949a2507bSAlan Cox  * Create the kernel stack (including pcb for i386) for a new thread.
45049a2507bSAlan Cox  * This routine directly affects the fork perf for a process and
45149a2507bSAlan Cox  * create performance for a thread.
45249a2507bSAlan Cox  */
45349a2507bSAlan Cox void
45449a2507bSAlan Cox vm_thread_new(struct thread *td, int pages)
45549a2507bSAlan Cox {
45649a2507bSAlan Cox 	vm_object_t ksobj;
45749a2507bSAlan Cox 	vm_offset_t ks;
45849a2507bSAlan Cox 	vm_page_t m, ma[KSTACK_MAX_PAGES];
45949a2507bSAlan Cox 	int i;
46049a2507bSAlan Cox 
46149a2507bSAlan Cox 	/* Bounds check */
46249a2507bSAlan Cox 	if (pages <= 1)
46349a2507bSAlan Cox 		pages = KSTACK_PAGES;
46449a2507bSAlan Cox 	else if (pages > KSTACK_MAX_PAGES)
46549a2507bSAlan Cox 		pages = KSTACK_MAX_PAGES;
46649a2507bSAlan Cox 	/*
46749a2507bSAlan Cox 	 * Allocate an object for the kstack.
46849a2507bSAlan Cox 	 */
46949a2507bSAlan Cox 	ksobj = vm_object_allocate(OBJT_DEFAULT, pages);
47049a2507bSAlan Cox 	td->td_kstack_obj = ksobj;
47149a2507bSAlan Cox 	/*
47249a2507bSAlan Cox 	 * Get a kernel virtual address for this thread's kstack.
47349a2507bSAlan Cox 	 */
47449a2507bSAlan Cox 	ks = kmem_alloc_nofault(kernel_map,
47549a2507bSAlan Cox 	   (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
47649a2507bSAlan Cox 	if (ks == 0)
47749a2507bSAlan Cox 		panic("vm_thread_new: kstack allocation failed");
47849a2507bSAlan Cox 	if (KSTACK_GUARD_PAGES != 0) {
47949a2507bSAlan Cox 		pmap_qremove(ks, KSTACK_GUARD_PAGES);
48049a2507bSAlan Cox 		ks += KSTACK_GUARD_PAGES * PAGE_SIZE;
48149a2507bSAlan Cox 	}
48249a2507bSAlan Cox 	td->td_kstack = ks;
48349a2507bSAlan Cox 	/*
48449a2507bSAlan Cox 	 * Knowing the number of pages allocated is useful when you
48549a2507bSAlan Cox 	 * want to deallocate them.
48649a2507bSAlan Cox 	 */
48749a2507bSAlan Cox 	td->td_kstack_pages = pages;
48849a2507bSAlan Cox 	/*
48949a2507bSAlan Cox 	 * For the length of the stack, link in a real page of ram for each
49049a2507bSAlan Cox 	 * page of stack.
49149a2507bSAlan Cox 	 */
49249a2507bSAlan Cox 	VM_OBJECT_LOCK(ksobj);
49349a2507bSAlan Cox 	for (i = 0; i < pages; i++) {
49449a2507bSAlan Cox 		/*
49549a2507bSAlan Cox 		 * Get a kernel stack page.
49649a2507bSAlan Cox 		 */
49749a2507bSAlan Cox 		m = vm_page_grab(ksobj, i,
49849a2507bSAlan Cox 		    VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED);
49949a2507bSAlan Cox 		ma[i] = m;
50049a2507bSAlan Cox 		vm_page_lock_queues();
50149a2507bSAlan Cox 		vm_page_wakeup(m);
50249a2507bSAlan Cox 		m->valid = VM_PAGE_BITS_ALL;
50349a2507bSAlan Cox 		vm_page_unlock_queues();
50449a2507bSAlan Cox 	}
50549a2507bSAlan Cox 	VM_OBJECT_UNLOCK(ksobj);
50649a2507bSAlan Cox 	pmap_qenter(ks, ma, pages);
50749a2507bSAlan Cox }
50849a2507bSAlan Cox 
50949a2507bSAlan Cox /*
51049a2507bSAlan Cox  * Dispose of a thread's kernel stack.
51149a2507bSAlan Cox  */
51249a2507bSAlan Cox void
51349a2507bSAlan Cox vm_thread_dispose(struct thread *td)
51449a2507bSAlan Cox {
51549a2507bSAlan Cox 	vm_object_t ksobj;
51649a2507bSAlan Cox 	vm_offset_t ks;
51749a2507bSAlan Cox 	vm_page_t m;
51849a2507bSAlan Cox 	int i, pages;
51949a2507bSAlan Cox 
52049a2507bSAlan Cox 	pages = td->td_kstack_pages;
52149a2507bSAlan Cox 	ksobj = td->td_kstack_obj;
52249a2507bSAlan Cox 	ks = td->td_kstack;
52349a2507bSAlan Cox 	pmap_qremove(ks, pages);
52449a2507bSAlan Cox 	VM_OBJECT_LOCK(ksobj);
52549a2507bSAlan Cox 	for (i = 0; i < pages; i++) {
52649a2507bSAlan Cox 		m = vm_page_lookup(ksobj, i);
52749a2507bSAlan Cox 		if (m == NULL)
52849a2507bSAlan Cox 			panic("vm_thread_dispose: kstack already missing?");
52949a2507bSAlan Cox 		vm_page_lock_queues();
53049a2507bSAlan Cox 		vm_page_busy(m);
53149a2507bSAlan Cox 		vm_page_unwire(m, 0);
53249a2507bSAlan Cox 		vm_page_free(m);
53349a2507bSAlan Cox 		vm_page_unlock_queues();
53449a2507bSAlan Cox 	}
53549a2507bSAlan Cox 	VM_OBJECT_UNLOCK(ksobj);
53649a2507bSAlan Cox 	vm_object_deallocate(ksobj);
53749a2507bSAlan Cox 	kmem_free(kernel_map, ks - (KSTACK_GUARD_PAGES * PAGE_SIZE),
53849a2507bSAlan Cox 	    (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
53949a2507bSAlan Cox }
54049a2507bSAlan Cox 
54149a2507bSAlan Cox /*
54249a2507bSAlan Cox  * Allow a thread's kernel stack to be paged out.
54349a2507bSAlan Cox  */
54449a2507bSAlan Cox void
54549a2507bSAlan Cox vm_thread_swapout(struct thread *td)
54649a2507bSAlan Cox {
54749a2507bSAlan Cox 	vm_object_t ksobj;
54849a2507bSAlan Cox 	vm_page_t m;
54949a2507bSAlan Cox 	int i, pages;
55049a2507bSAlan Cox 
551710338e9SMarcel Moolenaar 	cpu_thread_swapout(td);
55249a2507bSAlan Cox 	pages = td->td_kstack_pages;
55349a2507bSAlan Cox 	ksobj = td->td_kstack_obj;
55449a2507bSAlan Cox 	pmap_qremove(td->td_kstack, pages);
55549a2507bSAlan Cox 	VM_OBJECT_LOCK(ksobj);
55649a2507bSAlan Cox 	for (i = 0; i < pages; i++) {
55749a2507bSAlan Cox 		m = vm_page_lookup(ksobj, i);
55849a2507bSAlan Cox 		if (m == NULL)
55949a2507bSAlan Cox 			panic("vm_thread_swapout: kstack already missing?");
56049a2507bSAlan Cox 		vm_page_lock_queues();
56149a2507bSAlan Cox 		vm_page_dirty(m);
56249a2507bSAlan Cox 		vm_page_unwire(m, 0);
56349a2507bSAlan Cox 		vm_page_unlock_queues();
56449a2507bSAlan Cox 	}
56549a2507bSAlan Cox 	VM_OBJECT_UNLOCK(ksobj);
56649a2507bSAlan Cox }
56749a2507bSAlan Cox 
56849a2507bSAlan Cox /*
56949a2507bSAlan Cox  * Bring the kernel stack for a specified thread back in.
57049a2507bSAlan Cox  */
57149a2507bSAlan Cox void
57249a2507bSAlan Cox vm_thread_swapin(struct thread *td)
57349a2507bSAlan Cox {
57449a2507bSAlan Cox 	vm_object_t ksobj;
57549a2507bSAlan Cox 	vm_page_t m, ma[KSTACK_MAX_PAGES];
57649a2507bSAlan Cox 	int i, pages, rv;
57749a2507bSAlan Cox 
57849a2507bSAlan Cox 	pages = td->td_kstack_pages;
57949a2507bSAlan Cox 	ksobj = td->td_kstack_obj;
58049a2507bSAlan Cox 	VM_OBJECT_LOCK(ksobj);
58149a2507bSAlan Cox 	for (i = 0; i < pages; i++) {
58249a2507bSAlan Cox 		m = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
58349a2507bSAlan Cox 		if (m->valid != VM_PAGE_BITS_ALL) {
58449a2507bSAlan Cox 			rv = vm_pager_get_pages(ksobj, &m, 1, 0);
58549a2507bSAlan Cox 			if (rv != VM_PAGER_OK)
58649a2507bSAlan Cox 				panic("vm_thread_swapin: cannot get kstack for proc: %d", td->td_proc->p_pid);
58749a2507bSAlan Cox 			m = vm_page_lookup(ksobj, i);
58849a2507bSAlan Cox 			m->valid = VM_PAGE_BITS_ALL;
58949a2507bSAlan Cox 		}
59049a2507bSAlan Cox 		ma[i] = m;
59149a2507bSAlan Cox 		vm_page_lock_queues();
59249a2507bSAlan Cox 		vm_page_wire(m);
59349a2507bSAlan Cox 		vm_page_wakeup(m);
59449a2507bSAlan Cox 		vm_page_unlock_queues();
59549a2507bSAlan Cox 	}
59649a2507bSAlan Cox 	VM_OBJECT_UNLOCK(ksobj);
59749a2507bSAlan Cox 	pmap_qenter(td->td_kstack, ma, pages);
598710338e9SMarcel Moolenaar 	cpu_thread_swapin(td);
59949a2507bSAlan Cox }
60049a2507bSAlan Cox 
601a136efe9SPeter Wemm /*
60289f4fca2SAlan Cox  * Set up a variable-sized alternate kstack.
60389f4fca2SAlan Cox  */
60489f4fca2SAlan Cox void
60589f4fca2SAlan Cox vm_thread_new_altkstack(struct thread *td, int pages)
60689f4fca2SAlan Cox {
60789f4fca2SAlan Cox 
60889f4fca2SAlan Cox 	td->td_altkstack = td->td_kstack;
60989f4fca2SAlan Cox 	td->td_altkstack_obj = td->td_kstack_obj;
61089f4fca2SAlan Cox 	td->td_altkstack_pages = td->td_kstack_pages;
61189f4fca2SAlan Cox 
61249a2507bSAlan Cox 	vm_thread_new(td, pages);
61389f4fca2SAlan Cox }
61489f4fca2SAlan Cox 
61589f4fca2SAlan Cox /*
61689f4fca2SAlan Cox  * Restore the original kstack.
61789f4fca2SAlan Cox  */
61889f4fca2SAlan Cox void
61989f4fca2SAlan Cox vm_thread_dispose_altkstack(struct thread *td)
62089f4fca2SAlan Cox {
62189f4fca2SAlan Cox 
62249a2507bSAlan Cox 	vm_thread_dispose(td);
62389f4fca2SAlan Cox 
62489f4fca2SAlan Cox 	td->td_kstack = td->td_altkstack;
62589f4fca2SAlan Cox 	td->td_kstack_obj = td->td_altkstack_obj;
62689f4fca2SAlan Cox 	td->td_kstack_pages = td->td_altkstack_pages;
62789f4fca2SAlan Cox 	td->td_altkstack = 0;
62889f4fca2SAlan Cox 	td->td_altkstack_obj = NULL;
62989f4fca2SAlan Cox 	td->td_altkstack_pages = 0;
63089f4fca2SAlan Cox }
63189f4fca2SAlan Cox 
63289f4fca2SAlan Cox /*
633df8bae1dSRodney W. Grimes  * Implement fork's actions on an address space.
634df8bae1dSRodney W. Grimes  * Here we arrange for the address space to be copied or referenced,
635df8bae1dSRodney W. Grimes  * allocate a user struct (pcb and kernel stack), then call the
636df8bae1dSRodney W. Grimes  * machine-dependent layer to fill those in and make the new process
637a2a1c95cSPeter Wemm  * ready to run.  The new process is set up so that it returns directly
638a2a1c95cSPeter Wemm  * to user mode to avoid stack copying and relocation problems.
639df8bae1dSRodney W. Grimes  */
640a2a1c95cSPeter Wemm void
641079b7badSJulian Elischer vm_forkproc(td, p2, td2, flags)
642b40ce416SJulian Elischer 	struct thread *td;
643b40ce416SJulian Elischer 	struct proc *p2;
644079b7badSJulian Elischer 	struct thread *td2;
645a2a1c95cSPeter Wemm 	int flags;
646df8bae1dSRodney W. Grimes {
647b40ce416SJulian Elischer 	struct proc *p1 = td->td_proc;
64854d92145SMatthew Dillon 	struct user *up;
649df8bae1dSRodney W. Grimes 
6500cddd8f0SMatthew Dillon 	GIANT_REQUIRED;
6510cddd8f0SMatthew Dillon 
65291c28bfdSLuoqi Chen 	if ((flags & RFPROC) == 0) {
65391c28bfdSLuoqi Chen 		/*
65491c28bfdSLuoqi Chen 		 * Divorce the memory, if it is shared, essentially
65591c28bfdSLuoqi Chen 		 * this changes shared memory amongst threads, into
65691c28bfdSLuoqi Chen 		 * COW locally.
65791c28bfdSLuoqi Chen 		 */
65891c28bfdSLuoqi Chen 		if ((flags & RFMEM) == 0) {
65991c28bfdSLuoqi Chen 			if (p1->p_vmspace->vm_refcnt > 1) {
66091c28bfdSLuoqi Chen 				vmspace_unshare(p1);
66191c28bfdSLuoqi Chen 			}
66291c28bfdSLuoqi Chen 		}
663079b7badSJulian Elischer 		cpu_fork(td, p2, td2, flags);
66491c28bfdSLuoqi Chen 		return;
66591c28bfdSLuoqi Chen 	}
66691c28bfdSLuoqi Chen 
6675856e12eSJohn Dyson 	if (flags & RFMEM) {
6685856e12eSJohn Dyson 		p2->p_vmspace = p1->p_vmspace;
6695856e12eSJohn Dyson 		p1->p_vmspace->vm_refcnt++;
6705856e12eSJohn Dyson 	}
6715856e12eSJohn Dyson 
67290ecac61SMatthew Dillon 	while (vm_page_count_severe()) {
67326f9a767SRodney W. Grimes 		VM_WAIT;
6740d94caffSDavid Greenman 	}
67526f9a767SRodney W. Grimes 
6765856e12eSJohn Dyson 	if ((flags & RFMEM) == 0) {
677df8bae1dSRodney W. Grimes 		p2->p_vmspace = vmspace_fork(p1->p_vmspace);
678df8bae1dSRodney W. Grimes 
679d4da2dbaSAlan Cox 		pmap_pinit2(vmspace_pmap(p2->p_vmspace));
680d4da2dbaSAlan Cox 
681df8bae1dSRodney W. Grimes 		if (p1->p_vmspace->vm_shm)
682dabee6feSPeter Wemm 			shmfork(p1, p2);
683a2a1c95cSPeter Wemm 	}
684df8bae1dSRodney W. Grimes 
685b40ce416SJulian Elischer 	/* XXXKSE this is unsatisfactory but should be adequate */
686b40ce416SJulian Elischer 	up = p2->p_uarea;
68790af4afaSJohn Baldwin 	MPASS(p2->p_sigacts != NULL);
688df8bae1dSRodney W. Grimes 
68939fb8e6bSJulian Elischer 	/*
69039fb8e6bSJulian Elischer 	 * p_stats currently points at fields in the user struct
69139fb8e6bSJulian Elischer 	 * but not at &u, instead at p_addr. Copy parts of
69239fb8e6bSJulian Elischer 	 * p_stats; zero the rest of p_stats (statistics).
69339fb8e6bSJulian Elischer 	 */
69439fb8e6bSJulian Elischer 	p2->p_stats = &up->u_stats;
695df8bae1dSRodney W. Grimes 	bzero(&up->u_stats.pstat_startzero,
696df8bae1dSRodney W. Grimes 	    (unsigned) ((caddr_t) &up->u_stats.pstat_endzero -
697df8bae1dSRodney W. Grimes 		(caddr_t) &up->u_stats.pstat_startzero));
698df8bae1dSRodney W. Grimes 	bcopy(&p1->p_stats->pstat_startcopy, &up->u_stats.pstat_startcopy,
699df8bae1dSRodney W. Grimes 	    ((caddr_t) &up->u_stats.pstat_endcopy -
700df8bae1dSRodney W. Grimes 		(caddr_t) &up->u_stats.pstat_startcopy));
701df8bae1dSRodney W. Grimes 
702df8bae1dSRodney W. Grimes 	/*
703a2a1c95cSPeter Wemm 	 * cpu_fork will copy and update the pcb, set up the kernel stack,
704a2a1c95cSPeter Wemm 	 * and make the child ready to run.
705df8bae1dSRodney W. Grimes 	 */
706079b7badSJulian Elischer 	cpu_fork(td, p2, td2, flags);
707df8bae1dSRodney W. Grimes }
708df8bae1dSRodney W. Grimes 
709df8bae1dSRodney W. Grimes /*
710eb30c1c0SPeter Wemm  * Called after process has been wait(2)'ed apon and is being reaped.
711eb30c1c0SPeter Wemm  * The idea is to reclaim resources that we could not reclaim while
712eb30c1c0SPeter Wemm  * the process was still executing.
713eb30c1c0SPeter Wemm  */
714eb30c1c0SPeter Wemm void
715eb30c1c0SPeter Wemm vm_waitproc(p)
716eb30c1c0SPeter Wemm 	struct proc *p;
717eb30c1c0SPeter Wemm {
718eb30c1c0SPeter Wemm 
719eb30c1c0SPeter Wemm 	GIANT_REQUIRED;
720582ec34cSAlfred Perlstein 	vmspace_exitfree(p);		/* and clean-out the vmspace */
721eb30c1c0SPeter Wemm }
722eb30c1c0SPeter Wemm 
723eb30c1c0SPeter Wemm /*
724df8bae1dSRodney W. Grimes  * Set default limits for VM system.
725df8bae1dSRodney W. Grimes  * Called for proc 0, and then inherited by all others.
7262b14f991SJulian Elischer  *
7272b14f991SJulian Elischer  * XXX should probably act directly on proc0.
728df8bae1dSRodney W. Grimes  */
7292b14f991SJulian Elischer static void
7302b14f991SJulian Elischer vm_init_limits(udata)
7314590fd3aSDavid Greenman 	void *udata;
732df8bae1dSRodney W. Grimes {
73354d92145SMatthew Dillon 	struct proc *p = udata;
73491d5354aSJohn Baldwin 	struct plimit *limp;
735bbc0ec52SDavid Greenman 	int rss_limit;
736df8bae1dSRodney W. Grimes 
737df8bae1dSRodney W. Grimes 	/*
7380d94caffSDavid Greenman 	 * Set up the initial limits on process VM. Set the maximum resident
7390d94caffSDavid Greenman 	 * set size to be half of (reasonably) available memory.  Since this
7400d94caffSDavid Greenman 	 * is a soft limit, it comes into effect only when the system is out
7410d94caffSDavid Greenman 	 * of memory - half of main memory helps to favor smaller processes,
742bbc0ec52SDavid Greenman 	 * and reduces thrashing of the object cache.
743df8bae1dSRodney W. Grimes 	 */
74491d5354aSJohn Baldwin 	limp = p->p_limit;
74591d5354aSJohn Baldwin 	limp->pl_rlimit[RLIMIT_STACK].rlim_cur = dflssiz;
74691d5354aSJohn Baldwin 	limp->pl_rlimit[RLIMIT_STACK].rlim_max = maxssiz;
74791d5354aSJohn Baldwin 	limp->pl_rlimit[RLIMIT_DATA].rlim_cur = dfldsiz;
74891d5354aSJohn Baldwin 	limp->pl_rlimit[RLIMIT_DATA].rlim_max = maxdsiz;
749dd0bd066SDavid Greenman 	/* limit the limit to no less than 2MB */
750f2daac0cSDavid Greenman 	rss_limit = max(cnt.v_free_count, 512);
75191d5354aSJohn Baldwin 	limp->pl_rlimit[RLIMIT_RSS].rlim_cur = ptoa(rss_limit);
75291d5354aSJohn Baldwin 	limp->pl_rlimit[RLIMIT_RSS].rlim_max = RLIM_INFINITY;
753df8bae1dSRodney W. Grimes }
754df8bae1dSRodney W. Grimes 
75526f9a767SRodney W. Grimes void
75626f9a767SRodney W. Grimes faultin(p)
75726f9a767SRodney W. Grimes 	struct proc *p;
75826f9a767SRodney W. Grimes {
75911edc1e0SJohn Baldwin #ifdef NO_SWAPPING
76011edc1e0SJohn Baldwin 
76111edc1e0SJohn Baldwin 	PROC_LOCK_ASSERT(p, MA_OWNED);
76211edc1e0SJohn Baldwin 	if ((p->p_sflag & PS_INMEM) == 0)
76311edc1e0SJohn Baldwin 		panic("faultin: proc swapped out with NO_SWAPPING!");
76411edc1e0SJohn Baldwin #else /* !NO_SWAPPING */
765664f718bSJohn Baldwin 	struct thread *td;
76626f9a767SRodney W. Grimes 
767a136efe9SPeter Wemm 	GIANT_REQUIRED;
768c96d52a9SJohn Baldwin 	PROC_LOCK_ASSERT(p, MA_OWNED);
7691d7b9ed2SJulian Elischer 	/*
7701d7b9ed2SJulian Elischer 	 * If another process is swapping in this process,
7711d7b9ed2SJulian Elischer 	 * just wait until it finishes.
7721d7b9ed2SJulian Elischer 	 */
773664f718bSJohn Baldwin 	if (p->p_sflag & PS_SWAPPINGIN)
7741d7b9ed2SJulian Elischer 		msleep(&p->p_sflag, &p->p_mtx, PVM, "faultin", 0);
775664f718bSJohn Baldwin 	else if ((p->p_sflag & PS_INMEM) == 0) {
776664f718bSJohn Baldwin 		/*
777664f718bSJohn Baldwin 		 * Don't let another thread swap process p out while we are
778664f718bSJohn Baldwin 		 * busy swapping it in.
779664f718bSJohn Baldwin 		 */
780664f718bSJohn Baldwin 		++p->p_lock;
7811d7b9ed2SJulian Elischer 		mtx_lock_spin(&sched_lock);
7821d7b9ed2SJulian Elischer 		p->p_sflag |= PS_SWAPPINGIN;
7839ed346baSBosko Milekic 		mtx_unlock_spin(&sched_lock);
78445ece682SJohn Baldwin 		PROC_UNLOCK(p);
78526f9a767SRodney W. Grimes 
786a136efe9SPeter Wemm 		vm_proc_swapin(p);
787664f718bSJohn Baldwin 		FOREACH_THREAD_IN_PROC(p, td)
78849a2507bSAlan Cox 			vm_thread_swapin(td);
78926f9a767SRodney W. Grimes 
79045ece682SJohn Baldwin 		PROC_LOCK(p);
7919ed346baSBosko Milekic 		mtx_lock_spin(&sched_lock);
7929eb881f8SSeigo Tanimura 		p->p_sflag &= ~PS_SWAPPINGIN;
7939eb881f8SSeigo Tanimura 		p->p_sflag |= PS_INMEM;
794664f718bSJohn Baldwin 		FOREACH_THREAD_IN_PROC(p, td) {
795664f718bSJohn Baldwin 			TD_CLR_SWAPPED(td);
79671fad9fdSJulian Elischer 			if (TD_CAN_RUN(td))
79771fad9fdSJulian Elischer 				setrunnable(td);
798664f718bSJohn Baldwin 		}
799664f718bSJohn Baldwin 		mtx_unlock_spin(&sched_lock);
80026f9a767SRodney W. Grimes 
8011d7b9ed2SJulian Elischer 		wakeup(&p->p_sflag);
80226f9a767SRodney W. Grimes 
803664f718bSJohn Baldwin 		/* Allow other threads to swap p out now. */
80426f9a767SRodney W. Grimes 		--p->p_lock;
80526f9a767SRodney W. Grimes 	}
80611edc1e0SJohn Baldwin #endif /* NO_SWAPPING */
80726f9a767SRodney W. Grimes }
80826f9a767SRodney W. Grimes 
809df8bae1dSRodney W. Grimes /*
81026f9a767SRodney W. Grimes  * This swapin algorithm attempts to swap-in processes only if there
81126f9a767SRodney W. Grimes  * is enough space for them.  Of course, if a process waits for a long
81226f9a767SRodney W. Grimes  * time, it will be swapped in anyway.
8130384fff8SJason Evans  *
814e602ba25SJulian Elischer  *  XXXKSE - process with the thread with highest priority counts..
815b40ce416SJulian Elischer  *
8160384fff8SJason Evans  * Giant is still held at this point, to be released in tsleep.
817df8bae1dSRodney W. Grimes  */
8182b14f991SJulian Elischer /* ARGSUSED*/
8192b14f991SJulian Elischer static void
820d841aaa7SBruce Evans scheduler(dummy)
821d841aaa7SBruce Evans 	void *dummy;
822df8bae1dSRodney W. Grimes {
82354d92145SMatthew Dillon 	struct proc *p;
824e602ba25SJulian Elischer 	struct thread *td;
82554d92145SMatthew Dillon 	int pri;
826df8bae1dSRodney W. Grimes 	struct proc *pp;
827df8bae1dSRodney W. Grimes 	int ppri;
828df8bae1dSRodney W. Grimes 
829c96d52a9SJohn Baldwin 	mtx_assert(&Giant, MA_OWNED | MA_NOTRECURSED);
8300cddd8f0SMatthew Dillon 	/* GIANT_REQUIRED */
8310384fff8SJason Evans 
832df8bae1dSRodney W. Grimes loop:
83390ecac61SMatthew Dillon 	if (vm_page_count_min()) {
8340d94caffSDavid Greenman 		VM_WAIT;
83590ecac61SMatthew Dillon 		goto loop;
8360d94caffSDavid Greenman 	}
83726f9a767SRodney W. Grimes 
838df8bae1dSRodney W. Grimes 	pp = NULL;
839df8bae1dSRodney W. Grimes 	ppri = INT_MIN;
8401005a129SJohn Baldwin 	sx_slock(&allproc_lock);
841b40ce416SJulian Elischer 	FOREACH_PROC_IN_SYSTEM(p) {
842b40ce416SJulian Elischer 		struct ksegrp *kg;
843664f718bSJohn Baldwin 		if (p->p_sflag & (PS_INMEM | PS_SWAPPINGOUT | PS_SWAPPINGIN)) {
844e602ba25SJulian Elischer 			continue;
845e602ba25SJulian Elischer 		}
8469ed346baSBosko Milekic 		mtx_lock_spin(&sched_lock);
847e602ba25SJulian Elischer 		FOREACH_THREAD_IN_PROC(p, td) {
8481d7b9ed2SJulian Elischer 			/*
84971fad9fdSJulian Elischer 			 * An otherwise runnable thread of a process
85071fad9fdSJulian Elischer 			 * swapped out has only the TDI_SWAPPED bit set.
85171fad9fdSJulian Elischer 			 *
8521d7b9ed2SJulian Elischer 			 */
85371fad9fdSJulian Elischer 			if (td->td_inhibitors == TDI_SWAPPED) {
854e602ba25SJulian Elischer 				kg = td->td_ksegrp;
855b40ce416SJulian Elischer 				pri = p->p_swtime + kg->kg_slptime;
8565074aecdSJohn Baldwin 				if ((p->p_sflag & PS_SWAPINREQ) == 0) {
857b40ce416SJulian Elischer 					pri -= kg->kg_nice * 8;
858a669a6e9SJohn Dyson 				}
85995461b45SJohn Dyson 
86026f9a767SRodney W. Grimes 				/*
861b40ce416SJulian Elischer 				 * if this ksegrp is higher priority
862b40ce416SJulian Elischer 				 * and there is enough space, then select
863b40ce416SJulian Elischer 				 * this process instead of the previous
864b40ce416SJulian Elischer 				 * selection.
86526f9a767SRodney W. Grimes 				 */
8660d94caffSDavid Greenman 				if (pri > ppri) {
867df8bae1dSRodney W. Grimes 					pp = p;
868df8bae1dSRodney W. Grimes 					ppri = pri;
869df8bae1dSRodney W. Grimes 				}
870df8bae1dSRodney W. Grimes 			}
871b40ce416SJulian Elischer 		}
8729ed346baSBosko Milekic 		mtx_unlock_spin(&sched_lock);
873df8bae1dSRodney W. Grimes 	}
8741005a129SJohn Baldwin 	sx_sunlock(&allproc_lock);
87526f9a767SRodney W. Grimes 
876df8bae1dSRodney W. Grimes 	/*
877a669a6e9SJohn Dyson 	 * Nothing to do, back to sleep.
878df8bae1dSRodney W. Grimes 	 */
879df8bae1dSRodney W. Grimes 	if ((p = pp) == NULL) {
880ea754954SJohn Baldwin 		tsleep(&proc0, PVM, "sched", maxslp * hz / 2);
881df8bae1dSRodney W. Grimes 		goto loop;
882df8bae1dSRodney W. Grimes 	}
8831d7b9ed2SJulian Elischer 	PROC_LOCK(p);
8841d7b9ed2SJulian Elischer 
8851d7b9ed2SJulian Elischer 	/*
8861d7b9ed2SJulian Elischer 	 * Another process may be bringing or may have already
8871d7b9ed2SJulian Elischer 	 * brought this process in while we traverse all threads.
8881d7b9ed2SJulian Elischer 	 * Or, this process may even be being swapped out again.
8891d7b9ed2SJulian Elischer 	 */
890664f718bSJohn Baldwin 	if (p->p_sflag & (PS_INMEM | PS_SWAPPINGOUT | PS_SWAPPINGIN)) {
8911d7b9ed2SJulian Elischer 		PROC_UNLOCK(p);
8921d7b9ed2SJulian Elischer 		goto loop;
8931d7b9ed2SJulian Elischer 	}
8941d7b9ed2SJulian Elischer 
895664f718bSJohn Baldwin 	mtx_lock_spin(&sched_lock);
8961d7b9ed2SJulian Elischer 	p->p_sflag &= ~PS_SWAPINREQ;
897664f718bSJohn Baldwin 	mtx_unlock_spin(&sched_lock);
898a669a6e9SJohn Dyson 
899df8bae1dSRodney W. Grimes 	/*
90026f9a767SRodney W. Grimes 	 * We would like to bring someone in. (only if there is space).
901e602ba25SJulian Elischer 	 * [What checks the space? ]
902df8bae1dSRodney W. Grimes 	 */
90326f9a767SRodney W. Grimes 	faultin(p);
90445ece682SJohn Baldwin 	PROC_UNLOCK(p);
905664f718bSJohn Baldwin 	mtx_lock_spin(&sched_lock);
906df8bae1dSRodney W. Grimes 	p->p_swtime = 0;
9079ed346baSBosko Milekic 	mtx_unlock_spin(&sched_lock);
908df8bae1dSRodney W. Grimes 	goto loop;
909df8bae1dSRodney W. Grimes }
910df8bae1dSRodney W. Grimes 
9115afce282SDavid Greenman #ifndef NO_SWAPPING
9125afce282SDavid Greenman 
913ceb0cf87SJohn Dyson /*
914ceb0cf87SJohn Dyson  * Swap_idle_threshold1 is the guaranteed swapped in time for a process
915ceb0cf87SJohn Dyson  */
916303b270bSEivind Eklund static int swap_idle_threshold1 = 2;
9172a3eeaa2STom Rhodes SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold1, CTLFLAG_RW,
9189faaf3b3STom Rhodes     &swap_idle_threshold1, 0, "Guaranteed swapped in time for a process");
919ceb0cf87SJohn Dyson 
920ceb0cf87SJohn Dyson /*
921ceb0cf87SJohn Dyson  * Swap_idle_threshold2 is the time that a process can be idle before
922ceb0cf87SJohn Dyson  * it will be swapped out, if idle swapping is enabled.
923ceb0cf87SJohn Dyson  */
924303b270bSEivind Eklund static int swap_idle_threshold2 = 10;
9252a3eeaa2STom Rhodes SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold2, CTLFLAG_RW,
9269faaf3b3STom Rhodes     &swap_idle_threshold2, 0, "Time before a process will be swapped out");
927ceb0cf87SJohn Dyson 
928df8bae1dSRodney W. Grimes /*
929df8bae1dSRodney W. Grimes  * Swapout is driven by the pageout daemon.  Very simple, we find eligible
930df8bae1dSRodney W. Grimes  * procs and unwire their u-areas.  We try to always "swap" at least one
931df8bae1dSRodney W. Grimes  * process in case we need the room for a swapin.
932df8bae1dSRodney W. Grimes  * If any procs have been sleeping/stopped for at least maxslp seconds,
933df8bae1dSRodney W. Grimes  * they are swapped.  Else, we swap the longest-sleeping or stopped process,
934df8bae1dSRodney W. Grimes  * if any, otherwise the longest-resident process.
935df8bae1dSRodney W. Grimes  */
936df8bae1dSRodney W. Grimes void
9373a2dc656SJohn Dyson swapout_procs(action)
9383a2dc656SJohn Dyson int action;
939df8bae1dSRodney W. Grimes {
94054d92145SMatthew Dillon 	struct proc *p;
941e602ba25SJulian Elischer 	struct thread *td;
942b40ce416SJulian Elischer 	struct ksegrp *kg;
943df8bae1dSRodney W. Grimes 	int didswap = 0;
944df8bae1dSRodney W. Grimes 
9450cddd8f0SMatthew Dillon 	GIANT_REQUIRED;
9460cddd8f0SMatthew Dillon 
9470d94caffSDavid Greenman retry:
9483a2189d4SJohn Baldwin 	sx_slock(&allproc_lock);
949e602ba25SJulian Elischer 	FOREACH_PROC_IN_SYSTEM(p) {
950b18bfc3dSJohn Dyson 		struct vmspace *vm;
951b40ce416SJulian Elischer 		int minslptime = 100000;
952b18bfc3dSJohn Dyson 
9539eb881f8SSeigo Tanimura 		/*
954b1f99ebeSSeigo Tanimura 		 * Watch out for a process in
955b1f99ebeSSeigo Tanimura 		 * creation.  It may have no
9561c865ac7SJohn Baldwin 		 * address space or lock yet.
9571c865ac7SJohn Baldwin 		 */
9581c865ac7SJohn Baldwin 		mtx_lock_spin(&sched_lock);
9591c865ac7SJohn Baldwin 		if (p->p_state == PRS_NEW) {
9601c865ac7SJohn Baldwin 			mtx_unlock_spin(&sched_lock);
9611c865ac7SJohn Baldwin 			continue;
9621c865ac7SJohn Baldwin 		}
9631c865ac7SJohn Baldwin 		mtx_unlock_spin(&sched_lock);
9641c865ac7SJohn Baldwin 
9651c865ac7SJohn Baldwin 		/*
966b1f99ebeSSeigo Tanimura 		 * An aio daemon switches its
967b1f99ebeSSeigo Tanimura 		 * address space while running.
968b1f99ebeSSeigo Tanimura 		 * Perform a quick check whether
969b1f99ebeSSeigo Tanimura 		 * a process has P_SYSTEM.
9709eb881f8SSeigo Tanimura 		 */
9718f887403SJohn Baldwin 		if ((p->p_flag & P_SYSTEM) != 0)
972b1f99ebeSSeigo Tanimura 			continue;
9731c865ac7SJohn Baldwin 
9741c865ac7SJohn Baldwin 		/*
9751c865ac7SJohn Baldwin 		 * Do not swapout a process that
9761c865ac7SJohn Baldwin 		 * is waiting for VM data
9771c865ac7SJohn Baldwin 		 * structures as there is a possible
9781c865ac7SJohn Baldwin 		 * deadlock.  Test this first as
9791c865ac7SJohn Baldwin 		 * this may block.
9801c865ac7SJohn Baldwin 		 *
9811c865ac7SJohn Baldwin 		 * Lock the map until swapout
9821c865ac7SJohn Baldwin 		 * finishes, or a thread of this
9831c865ac7SJohn Baldwin 		 * process may attempt to alter
9841c865ac7SJohn Baldwin 		 * the map.
9851c865ac7SJohn Baldwin 		 */
9868f887403SJohn Baldwin 		PROC_LOCK(p);
9879eb881f8SSeigo Tanimura 		vm = p->p_vmspace;
988b1f99ebeSSeigo Tanimura 		KASSERT(vm != NULL,
989b1f99ebeSSeigo Tanimura 			("swapout_procs: a process has no address space"));
9909eb881f8SSeigo Tanimura 		++vm->vm_refcnt;
991b1f99ebeSSeigo Tanimura 		PROC_UNLOCK(p);
9929eb881f8SSeigo Tanimura 		if (!vm_map_trylock(&vm->vm_map))
9939eb881f8SSeigo Tanimura 			goto nextproc1;
9949eb881f8SSeigo Tanimura 
9955074aecdSJohn Baldwin 		PROC_LOCK(p);
99669b40456SJohn Baldwin 		if (p->p_lock != 0 ||
9971279572aSDavid Xu 		    (p->p_flag & (P_STOPPED_SINGLE|P_TRACED|P_SYSTEM|P_WEXIT)
9981279572aSDavid Xu 		    ) != 0) {
9999eb881f8SSeigo Tanimura 			goto nextproc2;
10005074aecdSJohn Baldwin 		}
100123955314SAlfred Perlstein 		/*
100223955314SAlfred Perlstein 		 * only aiod changes vmspace, however it will be
100323955314SAlfred Perlstein 		 * skipped because of the if statement above checking
100423955314SAlfred Perlstein 		 * for P_SYSTEM
100523955314SAlfred Perlstein 		 */
1006664f718bSJohn Baldwin 		if ((p->p_sflag & (PS_INMEM|PS_SWAPPINGOUT|PS_SWAPPINGIN)) != PS_INMEM)
1007664f718bSJohn Baldwin 			goto nextproc2;
100869b40456SJohn Baldwin 
1009e602ba25SJulian Elischer 		switch (p->p_state) {
10100d94caffSDavid Greenman 		default:
1011e602ba25SJulian Elischer 			/* Don't swap out processes in any sort
1012e602ba25SJulian Elischer 			 * of 'special' state. */
10138f887403SJohn Baldwin 			break;
1014df8bae1dSRodney W. Grimes 
1015e602ba25SJulian Elischer 		case PRS_NORMAL:
10168f887403SJohn Baldwin 			mtx_lock_spin(&sched_lock);
101726f9a767SRodney W. Grimes 			/*
1018bfbfac11SDavid Greenman 			 * do not swapout a realtime process
1019b40ce416SJulian Elischer 			 * Check all the thread groups..
1020bfbfac11SDavid Greenman 			 */
1021b40ce416SJulian Elischer 			FOREACH_KSEGRP_IN_PROC(p, kg) {
10229eb881f8SSeigo Tanimura 				if (PRI_IS_REALTIME(kg->kg_pri_class))
1023b40ce416SJulian Elischer 					goto nextproc;
1024bfbfac11SDavid Greenman 
1025bfbfac11SDavid Greenman 				/*
10269eb881f8SSeigo Tanimura 				 * Guarantee swap_idle_threshold1
1027ceb0cf87SJohn Dyson 				 * time in memory.
10280d94caffSDavid Greenman 				 */
10299eb881f8SSeigo Tanimura 				if (kg->kg_slptime < swap_idle_threshold1)
1030b40ce416SJulian Elischer 					goto nextproc;
10319eb881f8SSeigo Tanimura 
10321d7b9ed2SJulian Elischer 				/*
10339eb881f8SSeigo Tanimura 				 * Do not swapout a process if it is
10349eb881f8SSeigo Tanimura 				 * waiting on a critical event of some
10359eb881f8SSeigo Tanimura 				 * kind or there is a thread whose
10369eb881f8SSeigo Tanimura 				 * pageable memory may be accessed.
10371d7b9ed2SJulian Elischer 				 *
10381d7b9ed2SJulian Elischer 				 * This could be refined to support
10391d7b9ed2SJulian Elischer 				 * swapping out a thread.
10401d7b9ed2SJulian Elischer 				 */
10419eb881f8SSeigo Tanimura 				FOREACH_THREAD_IN_GROUP(kg, td) {
10421d7b9ed2SJulian Elischer 					if ((td->td_priority) < PSOCK ||
10439eb881f8SSeigo Tanimura 					    !thread_safetoswapout(td))
1044e602ba25SJulian Elischer 						goto nextproc;
1045e602ba25SJulian Elischer 				}
1046ceb0cf87SJohn Dyson 				/*
1047b40ce416SJulian Elischer 				 * If the system is under memory stress,
1048b40ce416SJulian Elischer 				 * or if we are swapping
1049b40ce416SJulian Elischer 				 * idle processes >= swap_idle_threshold2,
1050b40ce416SJulian Elischer 				 * then swap the process out.
1051ceb0cf87SJohn Dyson 				 */
1052ceb0cf87SJohn Dyson 				if (((action & VM_SWAP_NORMAL) == 0) &&
1053ceb0cf87SJohn Dyson 				    (((action & VM_SWAP_IDLE) == 0) ||
10549eb881f8SSeigo Tanimura 				    (kg->kg_slptime < swap_idle_threshold2)))
1055b40ce416SJulian Elischer 					goto nextproc;
10569eb881f8SSeigo Tanimura 
1057b40ce416SJulian Elischer 				if (minslptime > kg->kg_slptime)
1058b40ce416SJulian Elischer 					minslptime = kg->kg_slptime;
1059b40ce416SJulian Elischer 			}
10600d94caffSDavid Greenman 
106111b224dcSDavid Greenman 			/*
10620d94caffSDavid Greenman 			 * If the process has been asleep for awhile and had
10630d94caffSDavid Greenman 			 * most of its pages taken away already, swap it out.
106411b224dcSDavid Greenman 			 */
1065ceb0cf87SJohn Dyson 			if ((action & VM_SWAP_NORMAL) ||
1066ceb0cf87SJohn Dyson 				((action & VM_SWAP_IDLE) &&
1067b40ce416SJulian Elischer 				 (minslptime > swap_idle_threshold2))) {
1068df8bae1dSRodney W. Grimes 				swapout(p);
1069df8bae1dSRodney W. Grimes 				didswap++;
10709eb881f8SSeigo Tanimura 				mtx_unlock_spin(&sched_lock);
1071664f718bSJohn Baldwin 				PROC_UNLOCK(p);
10729eb881f8SSeigo Tanimura 				vm_map_unlock(&vm->vm_map);
10739eb881f8SSeigo Tanimura 				vmspace_free(vm);
10749eb881f8SSeigo Tanimura 				sx_sunlock(&allproc_lock);
10750d94caffSDavid Greenman 				goto retry;
1076c96d52a9SJohn Baldwin 			}
1077b40ce416SJulian Elischer nextproc:
10789eb881f8SSeigo Tanimura 			mtx_unlock_spin(&sched_lock);
10798f887403SJohn Baldwin 		}
10809eb881f8SSeigo Tanimura nextproc2:
10819eb881f8SSeigo Tanimura 		PROC_UNLOCK(p);
10829eb881f8SSeigo Tanimura 		vm_map_unlock(&vm->vm_map);
10839eb881f8SSeigo Tanimura nextproc1:
10849eb881f8SSeigo Tanimura 		vmspace_free(vm);
108530171114SPeter Wemm 		continue;
1086ceb0cf87SJohn Dyson 	}
10871005a129SJohn Baldwin 	sx_sunlock(&allproc_lock);
108826f9a767SRodney W. Grimes 	/*
108926f9a767SRodney W. Grimes 	 * If we swapped something out, and another process needed memory,
109026f9a767SRodney W. Grimes 	 * then wakeup the sched process.
109126f9a767SRodney W. Grimes 	 */
10920d94caffSDavid Greenman 	if (didswap)
109324a1cce3SDavid Greenman 		wakeup(&proc0);
1094df8bae1dSRodney W. Grimes }
1095df8bae1dSRodney W. Grimes 
1096f708ef1bSPoul-Henning Kamp static void
1097df8bae1dSRodney W. Grimes swapout(p)
109854d92145SMatthew Dillon 	struct proc *p;
1099df8bae1dSRodney W. Grimes {
1100b40ce416SJulian Elischer 	struct thread *td;
1101df8bae1dSRodney W. Grimes 
1102ea754954SJohn Baldwin 	PROC_LOCK_ASSERT(p, MA_OWNED);
11039eb881f8SSeigo Tanimura 	mtx_assert(&sched_lock, MA_OWNED | MA_NOTRECURSED);
1104d3a34985SJohn Dyson #if defined(SWAP_DEBUG)
1105d3a34985SJohn Dyson 	printf("swapping out %d\n", p->p_pid);
1106d3a34985SJohn Dyson #endif
11071d7b9ed2SJulian Elischer 
11081d7b9ed2SJulian Elischer 	/*
11099eb881f8SSeigo Tanimura 	 * The states of this process and its threads may have changed
11109eb881f8SSeigo Tanimura 	 * by now.  Assuming that there is only one pageout daemon thread,
11119eb881f8SSeigo Tanimura 	 * this process should still be in memory.
11129eb881f8SSeigo Tanimura 	 */
1113664f718bSJohn Baldwin 	KASSERT((p->p_sflag & (PS_INMEM|PS_SWAPPINGOUT|PS_SWAPPINGIN)) == PS_INMEM,
11149eb881f8SSeigo Tanimura 		("swapout: lost a swapout race?"));
11159eb881f8SSeigo Tanimura 
11169eb881f8SSeigo Tanimura #if defined(INVARIANTS)
11179eb881f8SSeigo Tanimura 	/*
11181d7b9ed2SJulian Elischer 	 * Make sure that all threads are safe to be swapped out.
11191d7b9ed2SJulian Elischer 	 *
11201d7b9ed2SJulian Elischer 	 * Alternatively, we could swap out only safe threads.
11211d7b9ed2SJulian Elischer 	 */
11221d7b9ed2SJulian Elischer 	FOREACH_THREAD_IN_PROC(p, td) {
11239eb881f8SSeigo Tanimura 		KASSERT(thread_safetoswapout(td),
11249eb881f8SSeigo Tanimura 			("swapout: there is a thread not safe for swapout"));
11251d7b9ed2SJulian Elischer 	}
11269eb881f8SSeigo Tanimura #endif /* INVARIANTS */
11271d7b9ed2SJulian Elischer 
112826f9a767SRodney W. Grimes 	++p->p_stats->p_ru.ru_nswap;
1129df8bae1dSRodney W. Grimes 	/*
113026f9a767SRodney W. Grimes 	 * remember the process resident count
1131df8bae1dSRodney W. Grimes 	 */
1132b1028ad1SLuoqi Chen 	p->p_vmspace->vm_swrss = vmspace_resident_count(p->p_vmspace);
1133df8bae1dSRodney W. Grimes 
11349eb881f8SSeigo Tanimura 	p->p_sflag &= ~PS_INMEM;
1135664f718bSJohn Baldwin 	p->p_sflag |= PS_SWAPPINGOUT;
1136664f718bSJohn Baldwin 	PROC_UNLOCK(p);
1137664f718bSJohn Baldwin 	FOREACH_THREAD_IN_PROC(p, td)
1138664f718bSJohn Baldwin 		TD_SET_SWAPPED(td);
11399ed346baSBosko Milekic 	mtx_unlock_spin(&sched_lock);
114026f9a767SRodney W. Grimes 
1141a136efe9SPeter Wemm 	vm_proc_swapout(p);
1142664f718bSJohn Baldwin 	FOREACH_THREAD_IN_PROC(p, td)
114349a2507bSAlan Cox 		vm_thread_swapout(td);
1144664f718bSJohn Baldwin 
1145664f718bSJohn Baldwin 	PROC_LOCK(p);
11469ed346baSBosko Milekic 	mtx_lock_spin(&sched_lock);
1147664f718bSJohn Baldwin 	p->p_sflag &= ~PS_SWAPPINGOUT;
1148df8bae1dSRodney W. Grimes 	p->p_swtime = 0;
1149df8bae1dSRodney W. Grimes }
11505afce282SDavid Greenman #endif /* !NO_SWAPPING */
1151