xref: /freebsd/sys/vm/vm_glue.c (revision d88346020b75c3db732441a769f7f7a1d0824f7d)
1df8bae1dSRodney W. Grimes /*
2df8bae1dSRodney W. Grimes  * Copyright (c) 1991, 1993
3df8bae1dSRodney W. Grimes  *	The Regents of the University of California.  All rights reserved.
4df8bae1dSRodney W. Grimes  *
5df8bae1dSRodney W. Grimes  * This code is derived from software contributed to Berkeley by
6df8bae1dSRodney W. Grimes  * The Mach Operating System project at Carnegie-Mellon University.
7df8bae1dSRodney W. Grimes  *
8df8bae1dSRodney W. Grimes  * Redistribution and use in source and binary forms, with or without
9df8bae1dSRodney W. Grimes  * modification, are permitted provided that the following conditions
10df8bae1dSRodney W. Grimes  * are met:
11df8bae1dSRodney W. Grimes  * 1. Redistributions of source code must retain the above copyright
12df8bae1dSRodney W. Grimes  *    notice, this list of conditions and the following disclaimer.
13df8bae1dSRodney W. Grimes  * 2. Redistributions in binary form must reproduce the above copyright
14df8bae1dSRodney W. Grimes  *    notice, this list of conditions and the following disclaimer in the
15df8bae1dSRodney W. Grimes  *    documentation and/or other materials provided with the distribution.
16df8bae1dSRodney W. Grimes  * 3. All advertising materials mentioning features or use of this software
175929bcfaSPhilippe Charnier  *    must display the following acknowledgement:
18df8bae1dSRodney W. Grimes  *	This product includes software developed by the University of
19df8bae1dSRodney W. Grimes  *	California, Berkeley and its contributors.
20df8bae1dSRodney W. Grimes  * 4. Neither the name of the University nor the names of its contributors
21df8bae1dSRodney W. Grimes  *    may be used to endorse or promote products derived from this software
22df8bae1dSRodney W. Grimes  *    without specific prior written permission.
23df8bae1dSRodney W. Grimes  *
24df8bae1dSRodney W. Grimes  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25df8bae1dSRodney W. Grimes  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26df8bae1dSRodney W. Grimes  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27df8bae1dSRodney W. Grimes  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28df8bae1dSRodney W. Grimes  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29df8bae1dSRodney W. Grimes  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30df8bae1dSRodney W. Grimes  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31df8bae1dSRodney W. Grimes  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32df8bae1dSRodney W. Grimes  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33df8bae1dSRodney W. Grimes  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34df8bae1dSRodney W. Grimes  * SUCH DAMAGE.
35df8bae1dSRodney W. Grimes  *
363c4dd356SDavid Greenman  *	from: @(#)vm_glue.c	8.6 (Berkeley) 1/5/94
37df8bae1dSRodney W. Grimes  *
38df8bae1dSRodney W. Grimes  *
39df8bae1dSRodney W. Grimes  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40df8bae1dSRodney W. Grimes  * All rights reserved.
41df8bae1dSRodney W. Grimes  *
42df8bae1dSRodney W. Grimes  * Permission to use, copy, modify and distribute this software and
43df8bae1dSRodney W. Grimes  * its documentation is hereby granted, provided that both the copyright
44df8bae1dSRodney W. Grimes  * notice and this permission notice appear in all copies of the
45df8bae1dSRodney W. Grimes  * software, derivative works or modified versions, and any portions
46df8bae1dSRodney W. Grimes  * thereof, and that both notices appear in supporting documentation.
47df8bae1dSRodney W. Grimes  *
48df8bae1dSRodney W. Grimes  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
49df8bae1dSRodney W. Grimes  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
50df8bae1dSRodney W. Grimes  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
51df8bae1dSRodney W. Grimes  *
52df8bae1dSRodney W. Grimes  * Carnegie Mellon requests users of this software to return to
53df8bae1dSRodney W. Grimes  *
54df8bae1dSRodney W. Grimes  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
55df8bae1dSRodney W. Grimes  *  School of Computer Science
56df8bae1dSRodney W. Grimes  *  Carnegie Mellon University
57df8bae1dSRodney W. Grimes  *  Pittsburgh PA 15213-3890
58df8bae1dSRodney W. Grimes  *
59df8bae1dSRodney W. Grimes  * any improvements or extensions that they make and grant Carnegie the
60df8bae1dSRodney W. Grimes  * rights to redistribute these changes.
61df8bae1dSRodney W. Grimes  */
62df8bae1dSRodney W. Grimes 
63874651b1SDavid E. O'Brien #include <sys/cdefs.h>
64874651b1SDavid E. O'Brien __FBSDID("$FreeBSD$");
65874651b1SDavid E. O'Brien 
66faa5f8d8SAndrzej Bialecki #include "opt_vm.h"
6715a7ad60SPeter Wemm #include "opt_kstack_pages.h"
6815a7ad60SPeter Wemm #include "opt_kstack_max_pages.h"
69e9822d92SJoerg Wunsch 
70df8bae1dSRodney W. Grimes #include <sys/param.h>
71df8bae1dSRodney W. Grimes #include <sys/systm.h>
72104a9b7eSAlexander Kabaev #include <sys/limits.h>
73fb919e4dSMark Murray #include <sys/lock.h>
74fb919e4dSMark Murray #include <sys/mutex.h>
75df8bae1dSRodney W. Grimes #include <sys/proc.h>
76df8bae1dSRodney W. Grimes #include <sys/resourcevar.h>
773aa12267SBruce Evans #include <sys/shm.h>
78efeaf95aSDavid Greenman #include <sys/vmmeter.h>
791005a129SJohn Baldwin #include <sys/sx.h>
80ceb0cf87SJohn Dyson #include <sys/sysctl.h>
81df8bae1dSRodney W. Grimes 
8226f9a767SRodney W. Grimes #include <sys/kernel.h>
830384fff8SJason Evans #include <sys/ktr.h>
84a2a1c95cSPeter Wemm #include <sys/unistd.h>
8526f9a767SRodney W. Grimes 
86df8bae1dSRodney W. Grimes #include <vm/vm.h>
87efeaf95aSDavid Greenman #include <vm/vm_param.h>
88efeaf95aSDavid Greenman #include <vm/pmap.h>
89efeaf95aSDavid Greenman #include <vm/vm_map.h>
90df8bae1dSRodney W. Grimes #include <vm/vm_page.h>
9126f9a767SRodney W. Grimes #include <vm/vm_pageout.h>
92a136efe9SPeter Wemm #include <vm/vm_object.h>
93df8bae1dSRodney W. Grimes #include <vm/vm_kern.h>
94efeaf95aSDavid Greenman #include <vm/vm_extern.h>
95a136efe9SPeter Wemm #include <vm/vm_pager.h>
9692da00bbSMatthew Dillon #include <vm/swap_pager.h>
97efeaf95aSDavid Greenman 
98efeaf95aSDavid Greenman #include <sys/user.h>
99df8bae1dSRodney W. Grimes 
100ea754954SJohn Baldwin extern int maxslp;
101ea754954SJohn Baldwin 
1022b14f991SJulian Elischer /*
1032b14f991SJulian Elischer  * System initialization
1042b14f991SJulian Elischer  *
1052b14f991SJulian Elischer  * Note: proc0 from proc.h
1062b14f991SJulian Elischer  */
10711caded3SAlfred Perlstein static void vm_init_limits(void *);
1084590fd3aSDavid Greenman SYSINIT(vm_limits, SI_SUB_VM_CONF, SI_ORDER_FIRST, vm_init_limits, &proc0)
1092b14f991SJulian Elischer 
1102b14f991SJulian Elischer /*
1112b14f991SJulian Elischer  * THIS MUST BE THE LAST INITIALIZATION ITEM!!!
1122b14f991SJulian Elischer  *
1132b14f991SJulian Elischer  * Note: run scheduling should be divorced from the vm system.
1142b14f991SJulian Elischer  */
11511caded3SAlfred Perlstein static void scheduler(void *);
1162b14f991SJulian Elischer SYSINIT(scheduler, SI_SUB_RUN_SCHEDULER, SI_ORDER_FIRST, scheduler, NULL)
1172b14f991SJulian Elischer 
118e50f5c2eSBruce Evans #ifndef NO_SWAPPING
11911caded3SAlfred Perlstein static void swapout(struct proc *);
120a136efe9SPeter Wemm static void vm_proc_swapin(struct proc *p);
121a136efe9SPeter Wemm static void vm_proc_swapout(struct proc *p);
122e50f5c2eSBruce Evans #endif
123f708ef1bSPoul-Henning Kamp 
12443a90f3aSAlan Cox /*
12543a90f3aSAlan Cox  * MPSAFE
1262d5c7e45SMatthew Dillon  *
1272d5c7e45SMatthew Dillon  * WARNING!  This code calls vm_map_check_protection() which only checks
1282d5c7e45SMatthew Dillon  * the associated vm_map_entry range.  It does not determine whether the
1292d5c7e45SMatthew Dillon  * contents of the memory is actually readable or writable.  In most cases
1302d5c7e45SMatthew Dillon  * just checking the vm_map_entry is sufficient within the kernel's address
1312d5c7e45SMatthew Dillon  * space.
13243a90f3aSAlan Cox  */
133df8bae1dSRodney W. Grimes int
134df8bae1dSRodney W. Grimes kernacc(addr, len, rw)
135c3dfdfd1SAlfred Perlstein 	void *addr;
136df8bae1dSRodney W. Grimes 	int len, rw;
137df8bae1dSRodney W. Grimes {
138df8bae1dSRodney W. Grimes 	boolean_t rv;
139df8bae1dSRodney W. Grimes 	vm_offset_t saddr, eaddr;
14002c58685SPoul-Henning Kamp 	vm_prot_t prot;
141df8bae1dSRodney W. Grimes 
142e50f5c2eSBruce Evans 	KASSERT((rw & ~VM_PROT_ALL) == 0,
14302c58685SPoul-Henning Kamp 	    ("illegal ``rw'' argument to kernacc (%x)\n", rw));
14402c58685SPoul-Henning Kamp 	prot = rw;
1456cde7a16SDavid Greenman 	saddr = trunc_page((vm_offset_t)addr);
1466cde7a16SDavid Greenman 	eaddr = round_page((vm_offset_t)addr + len);
147d8834602SAlan Cox 	vm_map_lock_read(kernel_map);
148df8bae1dSRodney W. Grimes 	rv = vm_map_check_protection(kernel_map, saddr, eaddr, prot);
149d8834602SAlan Cox 	vm_map_unlock_read(kernel_map);
150df8bae1dSRodney W. Grimes 	return (rv == TRUE);
151df8bae1dSRodney W. Grimes }
152df8bae1dSRodney W. Grimes 
15343a90f3aSAlan Cox /*
15443a90f3aSAlan Cox  * MPSAFE
1552d5c7e45SMatthew Dillon  *
1562d5c7e45SMatthew Dillon  * WARNING!  This code calls vm_map_check_protection() which only checks
1572d5c7e45SMatthew Dillon  * the associated vm_map_entry range.  It does not determine whether the
1582d5c7e45SMatthew Dillon  * contents of the memory is actually readable or writable.  vmapbuf(),
1592d5c7e45SMatthew Dillon  * vm_fault_quick(), or copyin()/copout()/su*()/fu*() functions should be
1602d5c7e45SMatthew Dillon  * used in conjuction with this call.
16143a90f3aSAlan Cox  */
162df8bae1dSRodney W. Grimes int
163df8bae1dSRodney W. Grimes useracc(addr, len, rw)
164c3dfdfd1SAlfred Perlstein 	void *addr;
165df8bae1dSRodney W. Grimes 	int len, rw;
166df8bae1dSRodney W. Grimes {
167df8bae1dSRodney W. Grimes 	boolean_t rv;
16802c58685SPoul-Henning Kamp 	vm_prot_t prot;
16905ba50f5SJake Burkholder 	vm_map_t map;
170df8bae1dSRodney W. Grimes 
171e50f5c2eSBruce Evans 	KASSERT((rw & ~VM_PROT_ALL) == 0,
17202c58685SPoul-Henning Kamp 	    ("illegal ``rw'' argument to useracc (%x)\n", rw));
17302c58685SPoul-Henning Kamp 	prot = rw;
17405ba50f5SJake Burkholder 	map = &curproc->p_vmspace->vm_map;
17505ba50f5SJake Burkholder 	if ((vm_offset_t)addr + len > vm_map_max(map) ||
17605ba50f5SJake Burkholder 	    (vm_offset_t)addr + len < (vm_offset_t)addr) {
17726f9a767SRodney W. Grimes 		return (FALSE);
17826f9a767SRodney W. Grimes 	}
179d8834602SAlan Cox 	vm_map_lock_read(map);
18005ba50f5SJake Burkholder 	rv = vm_map_check_protection(map, trunc_page((vm_offset_t)addr),
18105ba50f5SJake Burkholder 	    round_page((vm_offset_t)addr + len), prot);
182d8834602SAlan Cox 	vm_map_unlock_read(map);
183df8bae1dSRodney W. Grimes 	return (rv == TRUE);
184df8bae1dSRodney W. Grimes }
185df8bae1dSRodney W. Grimes 
18643a90f3aSAlan Cox /*
1875d264f84SBruce M Simpson  * MPSAFE
1885d264f84SBruce M Simpson  */
1895d264f84SBruce M Simpson void
1905d264f84SBruce M Simpson vslock(addr, len)
1915d264f84SBruce M Simpson 	void *addr;
1925d264f84SBruce M Simpson 	u_int len;
1935d264f84SBruce M Simpson {
1945d264f84SBruce M Simpson 
1955d264f84SBruce M Simpson 	vm_map_wire(&curproc->p_vmspace->vm_map, trunc_page((vm_offset_t)addr),
1965d264f84SBruce M Simpson 	    round_page((vm_offset_t)addr + len),
1975d264f84SBruce M Simpson 	    VM_MAP_WIRE_SYSTEM|VM_MAP_WIRE_NOHOLES);
1985d264f84SBruce M Simpson }
1995d264f84SBruce M Simpson 
2005d264f84SBruce M Simpson /*
2015d264f84SBruce M Simpson  * MPSAFE
2025d264f84SBruce M Simpson  */
2035d264f84SBruce M Simpson void
2045d264f84SBruce M Simpson vsunlock(addr, len)
2055d264f84SBruce M Simpson 	void *addr;
2065d264f84SBruce M Simpson 	u_int len;
2075d264f84SBruce M Simpson {
2085d264f84SBruce M Simpson 
2095d264f84SBruce M Simpson 	vm_map_unwire(&curproc->p_vmspace->vm_map,
2105d264f84SBruce M Simpson 	    trunc_page((vm_offset_t)addr),
2115d264f84SBruce M Simpson 	    round_page((vm_offset_t)addr + len),
2125d264f84SBruce M Simpson 	    VM_MAP_WIRE_SYSTEM|VM_MAP_WIRE_NOHOLES);
2135d264f84SBruce M Simpson }
2145d264f84SBruce M Simpson 
2155d264f84SBruce M Simpson /*
216a136efe9SPeter Wemm  * Create the U area for a new process.
217a136efe9SPeter Wemm  * This routine directly affects the fork perf for a process.
218a136efe9SPeter Wemm  */
219a136efe9SPeter Wemm void
220a136efe9SPeter Wemm vm_proc_new(struct proc *p)
221a136efe9SPeter Wemm {
222a136efe9SPeter Wemm 	vm_page_t ma[UAREA_PAGES];
223a136efe9SPeter Wemm 	vm_object_t upobj;
224a136efe9SPeter Wemm 	vm_offset_t up;
225a136efe9SPeter Wemm 	vm_page_t m;
226a136efe9SPeter Wemm 	u_int i;
227a136efe9SPeter Wemm 
228a136efe9SPeter Wemm 	/*
229a136efe9SPeter Wemm 	 * Get a kernel virtual address for the U area for this process.
230a136efe9SPeter Wemm 	 */
231a136efe9SPeter Wemm 	up = kmem_alloc_nofault(kernel_map, UAREA_PAGES * PAGE_SIZE);
232a136efe9SPeter Wemm 	if (up == 0)
233a136efe9SPeter Wemm 		panic("vm_proc_new: upage allocation failed");
234a136efe9SPeter Wemm 	p->p_uarea = (struct user *)up;
235a136efe9SPeter Wemm 
236a136efe9SPeter Wemm 	/*
237ef13663bSAlan Cox 	 * Allocate object and page(s) for the U area.
238a136efe9SPeter Wemm 	 */
239ef13663bSAlan Cox 	upobj = vm_object_allocate(OBJT_DEFAULT, UAREA_PAGES);
240ef13663bSAlan Cox 	p->p_upages_obj = upobj;
241ef13663bSAlan Cox 	VM_OBJECT_LOCK(upobj);
242ef13663bSAlan Cox 	for (i = 0; i < UAREA_PAGES; i++) {
24314f8ceaaSAlan Cox 		m = vm_page_grab(upobj, i,
24414f8ceaaSAlan Cox 		    VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED);
245a136efe9SPeter Wemm 		ma[i] = m;
246a136efe9SPeter Wemm 
247dc907f66SAlan Cox 		vm_page_lock_queues();
248a136efe9SPeter Wemm 		vm_page_wakeup(m);
249a136efe9SPeter Wemm 		m->valid = VM_PAGE_BITS_ALL;
250dc907f66SAlan Cox 		vm_page_unlock_queues();
251a136efe9SPeter Wemm 	}
252ef13663bSAlan Cox 	VM_OBJECT_UNLOCK(upobj);
253a136efe9SPeter Wemm 
254a136efe9SPeter Wemm 	/*
255a136efe9SPeter Wemm 	 * Enter the pages into the kernel address space.
256a136efe9SPeter Wemm 	 */
257a136efe9SPeter Wemm 	pmap_qenter(up, ma, UAREA_PAGES);
258a136efe9SPeter Wemm }
259a136efe9SPeter Wemm 
260a136efe9SPeter Wemm /*
261a136efe9SPeter Wemm  * Dispose the U area for a process that has exited.
262a136efe9SPeter Wemm  * This routine directly impacts the exit perf of a process.
263a136efe9SPeter Wemm  * XXX proc_zone is marked UMA_ZONE_NOFREE, so this should never be called.
264a136efe9SPeter Wemm  */
265a136efe9SPeter Wemm void
266a136efe9SPeter Wemm vm_proc_dispose(struct proc *p)
267a136efe9SPeter Wemm {
268a136efe9SPeter Wemm 	vm_object_t upobj;
269a136efe9SPeter Wemm 	vm_offset_t up;
270a136efe9SPeter Wemm 	vm_page_t m;
271a136efe9SPeter Wemm 
272a136efe9SPeter Wemm 	upobj = p->p_upages_obj;
2736a07e90dSAlan Cox 	VM_OBJECT_LOCK(upobj);
274f59685a4SPeter Wemm 	if (upobj->resident_page_count != UAREA_PAGES)
275f59685a4SPeter Wemm 		panic("vm_proc_dispose: incorrect number of pages in upobj");
2762d09a6adSAlan Cox 	vm_page_lock_queues();
277f59685a4SPeter Wemm 	while ((m = TAILQ_FIRST(&upobj->memq)) != NULL) {
278a136efe9SPeter Wemm 		vm_page_busy(m);
279a136efe9SPeter Wemm 		vm_page_unwire(m, 0);
280a136efe9SPeter Wemm 		vm_page_free(m);
281a136efe9SPeter Wemm 	}
2822d09a6adSAlan Cox 	vm_page_unlock_queues();
2836a07e90dSAlan Cox 	VM_OBJECT_UNLOCK(upobj);
284f59685a4SPeter Wemm 	up = (vm_offset_t)p->p_uarea;
285a136efe9SPeter Wemm 	pmap_qremove(up, UAREA_PAGES);
286a136efe9SPeter Wemm 	kmem_free(kernel_map, up, UAREA_PAGES * PAGE_SIZE);
287a136efe9SPeter Wemm 	vm_object_deallocate(upobj);
288a136efe9SPeter Wemm }
289a136efe9SPeter Wemm 
290a136efe9SPeter Wemm #ifndef NO_SWAPPING
291a136efe9SPeter Wemm /*
292a136efe9SPeter Wemm  * Allow the U area for a process to be prejudicially paged out.
293a136efe9SPeter Wemm  */
29437c84183SPoul-Henning Kamp static void
295a136efe9SPeter Wemm vm_proc_swapout(struct proc *p)
296a136efe9SPeter Wemm {
297a136efe9SPeter Wemm 	vm_object_t upobj;
298a136efe9SPeter Wemm 	vm_offset_t up;
299a136efe9SPeter Wemm 	vm_page_t m;
300a136efe9SPeter Wemm 
301a136efe9SPeter Wemm 	upobj = p->p_upages_obj;
3026a07e90dSAlan Cox 	VM_OBJECT_LOCK(upobj);
303f59685a4SPeter Wemm 	if (upobj->resident_page_count != UAREA_PAGES)
304f59685a4SPeter Wemm 		panic("vm_proc_dispose: incorrect number of pages in upobj");
3052d09a6adSAlan Cox 	vm_page_lock_queues();
306f59685a4SPeter Wemm 	TAILQ_FOREACH(m, &upobj->memq, listq) {
307a136efe9SPeter Wemm 		vm_page_dirty(m);
308a136efe9SPeter Wemm 		vm_page_unwire(m, 0);
309a136efe9SPeter Wemm 	}
3102d09a6adSAlan Cox 	vm_page_unlock_queues();
3116a07e90dSAlan Cox 	VM_OBJECT_UNLOCK(upobj);
312f59685a4SPeter Wemm 	up = (vm_offset_t)p->p_uarea;
313a136efe9SPeter Wemm 	pmap_qremove(up, UAREA_PAGES);
314a136efe9SPeter Wemm }
315a136efe9SPeter Wemm 
316a136efe9SPeter Wemm /*
317a136efe9SPeter Wemm  * Bring the U area for a specified process back in.
318a136efe9SPeter Wemm  */
31937c84183SPoul-Henning Kamp static void
320a136efe9SPeter Wemm vm_proc_swapin(struct proc *p)
321a136efe9SPeter Wemm {
322a136efe9SPeter Wemm 	vm_page_t ma[UAREA_PAGES];
323a136efe9SPeter Wemm 	vm_object_t upobj;
324a136efe9SPeter Wemm 	vm_offset_t up;
325a136efe9SPeter Wemm 	vm_page_t m;
326a136efe9SPeter Wemm 	int rv;
327a136efe9SPeter Wemm 	int i;
328a136efe9SPeter Wemm 
329a136efe9SPeter Wemm 	upobj = p->p_upages_obj;
3308630c117SAlan Cox 	VM_OBJECT_LOCK(upobj);
331a136efe9SPeter Wemm 	for (i = 0; i < UAREA_PAGES; i++) {
332a136efe9SPeter Wemm 		m = vm_page_grab(upobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
333a136efe9SPeter Wemm 		if (m->valid != VM_PAGE_BITS_ALL) {
334a136efe9SPeter Wemm 			rv = vm_pager_get_pages(upobj, &m, 1, 0);
335a136efe9SPeter Wemm 			if (rv != VM_PAGER_OK)
336a136efe9SPeter Wemm 				panic("vm_proc_swapin: cannot get upage");
337a136efe9SPeter Wemm 		}
338a136efe9SPeter Wemm 		ma[i] = m;
339a7e9138eSPeter Wemm 	}
340a7e9138eSPeter Wemm 	if (upobj->resident_page_count != UAREA_PAGES)
341a7e9138eSPeter Wemm 		panic("vm_proc_swapin: lost pages from upobj");
342e16cfdbeSAlan Cox 	vm_page_lock_queues();
343a7e9138eSPeter Wemm 	TAILQ_FOREACH(m, &upobj->memq, listq) {
344a7e9138eSPeter Wemm 		m->valid = VM_PAGE_BITS_ALL;
345a136efe9SPeter Wemm 		vm_page_wire(m);
346a136efe9SPeter Wemm 		vm_page_wakeup(m);
347a136efe9SPeter Wemm 	}
348e16cfdbeSAlan Cox 	vm_page_unlock_queues();
3496a07e90dSAlan Cox 	VM_OBJECT_UNLOCK(upobj);
350f59685a4SPeter Wemm 	up = (vm_offset_t)p->p_uarea;
351a136efe9SPeter Wemm 	pmap_qenter(up, ma, UAREA_PAGES);
352a136efe9SPeter Wemm }
35392da00bbSMatthew Dillon 
35492da00bbSMatthew Dillon /*
35592da00bbSMatthew Dillon  * Swap in the UAREAs of all processes swapped out to the given device.
35692da00bbSMatthew Dillon  * The pages in the UAREA are marked dirty and their swap metadata is freed.
35792da00bbSMatthew Dillon  */
35892da00bbSMatthew Dillon void
3598f60c087SPoul-Henning Kamp vm_proc_swapin_all(struct swdevt *devidx)
36092da00bbSMatthew Dillon {
36192da00bbSMatthew Dillon 	struct proc *p;
36292da00bbSMatthew Dillon 	vm_object_t object;
36392da00bbSMatthew Dillon 	vm_page_t m;
36492da00bbSMatthew Dillon 
36592da00bbSMatthew Dillon retry:
36692da00bbSMatthew Dillon 	sx_slock(&allproc_lock);
36792da00bbSMatthew Dillon 	FOREACH_PROC_IN_SYSTEM(p) {
36892da00bbSMatthew Dillon 		PROC_LOCK(p);
36992da00bbSMatthew Dillon 		object = p->p_upages_obj;
37017cd3642SAlan Cox 		if (object != NULL) {
37117cd3642SAlan Cox 			VM_OBJECT_LOCK(object);
37217cd3642SAlan Cox 			if (swap_pager_isswapped(object, devidx)) {
37317cd3642SAlan Cox 				VM_OBJECT_UNLOCK(object);
37492da00bbSMatthew Dillon 				sx_sunlock(&allproc_lock);
37592da00bbSMatthew Dillon 				faultin(p);
37692da00bbSMatthew Dillon 				PROC_UNLOCK(p);
3776a07e90dSAlan Cox 				VM_OBJECT_LOCK(object);
37892da00bbSMatthew Dillon 				vm_page_lock_queues();
37992da00bbSMatthew Dillon 				TAILQ_FOREACH(m, &object->memq, listq)
38092da00bbSMatthew Dillon 					vm_page_dirty(m);
38192da00bbSMatthew Dillon 				vm_page_unlock_queues();
38292da00bbSMatthew Dillon 				swap_pager_freespace(object, 0,
38392da00bbSMatthew Dillon 				    object->un_pager.swp.swp_bcount);
3846a07e90dSAlan Cox 				VM_OBJECT_UNLOCK(object);
38592da00bbSMatthew Dillon 				goto retry;
38692da00bbSMatthew Dillon 			}
38717cd3642SAlan Cox 			VM_OBJECT_UNLOCK(object);
38817cd3642SAlan Cox 		}
38992da00bbSMatthew Dillon 		PROC_UNLOCK(p);
39092da00bbSMatthew Dillon 	}
39192da00bbSMatthew Dillon 	sx_sunlock(&allproc_lock);
39292da00bbSMatthew Dillon }
393a136efe9SPeter Wemm #endif
394a136efe9SPeter Wemm 
39549a2507bSAlan Cox #ifndef KSTACK_MAX_PAGES
39649a2507bSAlan Cox #define KSTACK_MAX_PAGES 32
39749a2507bSAlan Cox #endif
39849a2507bSAlan Cox 
39949a2507bSAlan Cox /*
40049a2507bSAlan Cox  * Create the kernel stack (including pcb for i386) for a new thread.
40149a2507bSAlan Cox  * This routine directly affects the fork perf for a process and
40249a2507bSAlan Cox  * create performance for a thread.
40349a2507bSAlan Cox  */
40449a2507bSAlan Cox void
40549a2507bSAlan Cox vm_thread_new(struct thread *td, int pages)
40649a2507bSAlan Cox {
40749a2507bSAlan Cox 	vm_object_t ksobj;
40849a2507bSAlan Cox 	vm_offset_t ks;
40949a2507bSAlan Cox 	vm_page_t m, ma[KSTACK_MAX_PAGES];
41049a2507bSAlan Cox 	int i;
41149a2507bSAlan Cox 
41249a2507bSAlan Cox 	/* Bounds check */
41349a2507bSAlan Cox 	if (pages <= 1)
41449a2507bSAlan Cox 		pages = KSTACK_PAGES;
41549a2507bSAlan Cox 	else if (pages > KSTACK_MAX_PAGES)
41649a2507bSAlan Cox 		pages = KSTACK_MAX_PAGES;
41749a2507bSAlan Cox 	/*
41849a2507bSAlan Cox 	 * Allocate an object for the kstack.
41949a2507bSAlan Cox 	 */
42049a2507bSAlan Cox 	ksobj = vm_object_allocate(OBJT_DEFAULT, pages);
42149a2507bSAlan Cox 	td->td_kstack_obj = ksobj;
42249a2507bSAlan Cox 	/*
42349a2507bSAlan Cox 	 * Get a kernel virtual address for this thread's kstack.
42449a2507bSAlan Cox 	 */
42549a2507bSAlan Cox 	ks = kmem_alloc_nofault(kernel_map,
42649a2507bSAlan Cox 	   (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
42749a2507bSAlan Cox 	if (ks == 0)
42849a2507bSAlan Cox 		panic("vm_thread_new: kstack allocation failed");
42949a2507bSAlan Cox 	if (KSTACK_GUARD_PAGES != 0) {
43049a2507bSAlan Cox 		pmap_qremove(ks, KSTACK_GUARD_PAGES);
43149a2507bSAlan Cox 		ks += KSTACK_GUARD_PAGES * PAGE_SIZE;
43249a2507bSAlan Cox 	}
43349a2507bSAlan Cox 	td->td_kstack = ks;
43449a2507bSAlan Cox 	/*
43549a2507bSAlan Cox 	 * Knowing the number of pages allocated is useful when you
43649a2507bSAlan Cox 	 * want to deallocate them.
43749a2507bSAlan Cox 	 */
43849a2507bSAlan Cox 	td->td_kstack_pages = pages;
43949a2507bSAlan Cox 	/*
44049a2507bSAlan Cox 	 * For the length of the stack, link in a real page of ram for each
44149a2507bSAlan Cox 	 * page of stack.
44249a2507bSAlan Cox 	 */
44349a2507bSAlan Cox 	VM_OBJECT_LOCK(ksobj);
44449a2507bSAlan Cox 	for (i = 0; i < pages; i++) {
44549a2507bSAlan Cox 		/*
44649a2507bSAlan Cox 		 * Get a kernel stack page.
44749a2507bSAlan Cox 		 */
44849a2507bSAlan Cox 		m = vm_page_grab(ksobj, i,
44949a2507bSAlan Cox 		    VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED);
45049a2507bSAlan Cox 		ma[i] = m;
45149a2507bSAlan Cox 		vm_page_lock_queues();
45249a2507bSAlan Cox 		vm_page_wakeup(m);
45349a2507bSAlan Cox 		m->valid = VM_PAGE_BITS_ALL;
45449a2507bSAlan Cox 		vm_page_unlock_queues();
45549a2507bSAlan Cox 	}
45649a2507bSAlan Cox 	VM_OBJECT_UNLOCK(ksobj);
45749a2507bSAlan Cox 	pmap_qenter(ks, ma, pages);
45849a2507bSAlan Cox }
45949a2507bSAlan Cox 
46049a2507bSAlan Cox /*
46149a2507bSAlan Cox  * Dispose of a thread's kernel stack.
46249a2507bSAlan Cox  */
46349a2507bSAlan Cox void
46449a2507bSAlan Cox vm_thread_dispose(struct thread *td)
46549a2507bSAlan Cox {
46649a2507bSAlan Cox 	vm_object_t ksobj;
46749a2507bSAlan Cox 	vm_offset_t ks;
46849a2507bSAlan Cox 	vm_page_t m;
46949a2507bSAlan Cox 	int i, pages;
47049a2507bSAlan Cox 
47149a2507bSAlan Cox 	pages = td->td_kstack_pages;
47249a2507bSAlan Cox 	ksobj = td->td_kstack_obj;
47349a2507bSAlan Cox 	ks = td->td_kstack;
47449a2507bSAlan Cox 	pmap_qremove(ks, pages);
47549a2507bSAlan Cox 	VM_OBJECT_LOCK(ksobj);
47649a2507bSAlan Cox 	for (i = 0; i < pages; i++) {
47749a2507bSAlan Cox 		m = vm_page_lookup(ksobj, i);
47849a2507bSAlan Cox 		if (m == NULL)
47949a2507bSAlan Cox 			panic("vm_thread_dispose: kstack already missing?");
48049a2507bSAlan Cox 		vm_page_lock_queues();
48149a2507bSAlan Cox 		vm_page_busy(m);
48249a2507bSAlan Cox 		vm_page_unwire(m, 0);
48349a2507bSAlan Cox 		vm_page_free(m);
48449a2507bSAlan Cox 		vm_page_unlock_queues();
48549a2507bSAlan Cox 	}
48649a2507bSAlan Cox 	VM_OBJECT_UNLOCK(ksobj);
48749a2507bSAlan Cox 	vm_object_deallocate(ksobj);
48849a2507bSAlan Cox 	kmem_free(kernel_map, ks - (KSTACK_GUARD_PAGES * PAGE_SIZE),
48949a2507bSAlan Cox 	    (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
49049a2507bSAlan Cox }
49149a2507bSAlan Cox 
49249a2507bSAlan Cox /*
49349a2507bSAlan Cox  * Allow a thread's kernel stack to be paged out.
49449a2507bSAlan Cox  */
49549a2507bSAlan Cox void
49649a2507bSAlan Cox vm_thread_swapout(struct thread *td)
49749a2507bSAlan Cox {
49849a2507bSAlan Cox 	vm_object_t ksobj;
49949a2507bSAlan Cox 	vm_page_t m;
50049a2507bSAlan Cox 	int i, pages;
50149a2507bSAlan Cox 
502710338e9SMarcel Moolenaar 	cpu_thread_swapout(td);
50349a2507bSAlan Cox 	pages = td->td_kstack_pages;
50449a2507bSAlan Cox 	ksobj = td->td_kstack_obj;
50549a2507bSAlan Cox 	pmap_qremove(td->td_kstack, pages);
50649a2507bSAlan Cox 	VM_OBJECT_LOCK(ksobj);
50749a2507bSAlan Cox 	for (i = 0; i < pages; i++) {
50849a2507bSAlan Cox 		m = vm_page_lookup(ksobj, i);
50949a2507bSAlan Cox 		if (m == NULL)
51049a2507bSAlan Cox 			panic("vm_thread_swapout: kstack already missing?");
51149a2507bSAlan Cox 		vm_page_lock_queues();
51249a2507bSAlan Cox 		vm_page_dirty(m);
51349a2507bSAlan Cox 		vm_page_unwire(m, 0);
51449a2507bSAlan Cox 		vm_page_unlock_queues();
51549a2507bSAlan Cox 	}
51649a2507bSAlan Cox 	VM_OBJECT_UNLOCK(ksobj);
51749a2507bSAlan Cox }
51849a2507bSAlan Cox 
51949a2507bSAlan Cox /*
52049a2507bSAlan Cox  * Bring the kernel stack for a specified thread back in.
52149a2507bSAlan Cox  */
52249a2507bSAlan Cox void
52349a2507bSAlan Cox vm_thread_swapin(struct thread *td)
52449a2507bSAlan Cox {
52549a2507bSAlan Cox 	vm_object_t ksobj;
52649a2507bSAlan Cox 	vm_page_t m, ma[KSTACK_MAX_PAGES];
52749a2507bSAlan Cox 	int i, pages, rv;
52849a2507bSAlan Cox 
52949a2507bSAlan Cox 	pages = td->td_kstack_pages;
53049a2507bSAlan Cox 	ksobj = td->td_kstack_obj;
53149a2507bSAlan Cox 	VM_OBJECT_LOCK(ksobj);
53249a2507bSAlan Cox 	for (i = 0; i < pages; i++) {
53349a2507bSAlan Cox 		m = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
53449a2507bSAlan Cox 		if (m->valid != VM_PAGE_BITS_ALL) {
53549a2507bSAlan Cox 			rv = vm_pager_get_pages(ksobj, &m, 1, 0);
53649a2507bSAlan Cox 			if (rv != VM_PAGER_OK)
53749a2507bSAlan Cox 				panic("vm_thread_swapin: cannot get kstack for proc: %d", td->td_proc->p_pid);
53849a2507bSAlan Cox 			m = vm_page_lookup(ksobj, i);
53949a2507bSAlan Cox 			m->valid = VM_PAGE_BITS_ALL;
54049a2507bSAlan Cox 		}
54149a2507bSAlan Cox 		ma[i] = m;
54249a2507bSAlan Cox 		vm_page_lock_queues();
54349a2507bSAlan Cox 		vm_page_wire(m);
54449a2507bSAlan Cox 		vm_page_wakeup(m);
54549a2507bSAlan Cox 		vm_page_unlock_queues();
54649a2507bSAlan Cox 	}
54749a2507bSAlan Cox 	VM_OBJECT_UNLOCK(ksobj);
54849a2507bSAlan Cox 	pmap_qenter(td->td_kstack, ma, pages);
549710338e9SMarcel Moolenaar 	cpu_thread_swapin(td);
55049a2507bSAlan Cox }
55149a2507bSAlan Cox 
552a136efe9SPeter Wemm /*
55389f4fca2SAlan Cox  * Set up a variable-sized alternate kstack.
55489f4fca2SAlan Cox  */
55589f4fca2SAlan Cox void
55689f4fca2SAlan Cox vm_thread_new_altkstack(struct thread *td, int pages)
55789f4fca2SAlan Cox {
55889f4fca2SAlan Cox 
55989f4fca2SAlan Cox 	td->td_altkstack = td->td_kstack;
56089f4fca2SAlan Cox 	td->td_altkstack_obj = td->td_kstack_obj;
56189f4fca2SAlan Cox 	td->td_altkstack_pages = td->td_kstack_pages;
56289f4fca2SAlan Cox 
56349a2507bSAlan Cox 	vm_thread_new(td, pages);
56489f4fca2SAlan Cox }
56589f4fca2SAlan Cox 
56689f4fca2SAlan Cox /*
56789f4fca2SAlan Cox  * Restore the original kstack.
56889f4fca2SAlan Cox  */
56989f4fca2SAlan Cox void
57089f4fca2SAlan Cox vm_thread_dispose_altkstack(struct thread *td)
57189f4fca2SAlan Cox {
57289f4fca2SAlan Cox 
57349a2507bSAlan Cox 	vm_thread_dispose(td);
57489f4fca2SAlan Cox 
57589f4fca2SAlan Cox 	td->td_kstack = td->td_altkstack;
57689f4fca2SAlan Cox 	td->td_kstack_obj = td->td_altkstack_obj;
57789f4fca2SAlan Cox 	td->td_kstack_pages = td->td_altkstack_pages;
57889f4fca2SAlan Cox 	td->td_altkstack = 0;
57989f4fca2SAlan Cox 	td->td_altkstack_obj = NULL;
58089f4fca2SAlan Cox 	td->td_altkstack_pages = 0;
58189f4fca2SAlan Cox }
58289f4fca2SAlan Cox 
58389f4fca2SAlan Cox /*
584df8bae1dSRodney W. Grimes  * Implement fork's actions on an address space.
585df8bae1dSRodney W. Grimes  * Here we arrange for the address space to be copied or referenced,
586df8bae1dSRodney W. Grimes  * allocate a user struct (pcb and kernel stack), then call the
587df8bae1dSRodney W. Grimes  * machine-dependent layer to fill those in and make the new process
588a2a1c95cSPeter Wemm  * ready to run.  The new process is set up so that it returns directly
589a2a1c95cSPeter Wemm  * to user mode to avoid stack copying and relocation problems.
590df8bae1dSRodney W. Grimes  */
591a2a1c95cSPeter Wemm void
592079b7badSJulian Elischer vm_forkproc(td, p2, td2, flags)
593b40ce416SJulian Elischer 	struct thread *td;
594b40ce416SJulian Elischer 	struct proc *p2;
595079b7badSJulian Elischer 	struct thread *td2;
596a2a1c95cSPeter Wemm 	int flags;
597df8bae1dSRodney W. Grimes {
598b40ce416SJulian Elischer 	struct proc *p1 = td->td_proc;
59954d92145SMatthew Dillon 	struct user *up;
600df8bae1dSRodney W. Grimes 
6010cddd8f0SMatthew Dillon 	GIANT_REQUIRED;
6020cddd8f0SMatthew Dillon 
60391c28bfdSLuoqi Chen 	if ((flags & RFPROC) == 0) {
60491c28bfdSLuoqi Chen 		/*
60591c28bfdSLuoqi Chen 		 * Divorce the memory, if it is shared, essentially
60691c28bfdSLuoqi Chen 		 * this changes shared memory amongst threads, into
60791c28bfdSLuoqi Chen 		 * COW locally.
60891c28bfdSLuoqi Chen 		 */
60991c28bfdSLuoqi Chen 		if ((flags & RFMEM) == 0) {
61091c28bfdSLuoqi Chen 			if (p1->p_vmspace->vm_refcnt > 1) {
61191c28bfdSLuoqi Chen 				vmspace_unshare(p1);
61291c28bfdSLuoqi Chen 			}
61391c28bfdSLuoqi Chen 		}
614079b7badSJulian Elischer 		cpu_fork(td, p2, td2, flags);
61591c28bfdSLuoqi Chen 		return;
61691c28bfdSLuoqi Chen 	}
61791c28bfdSLuoqi Chen 
6185856e12eSJohn Dyson 	if (flags & RFMEM) {
6195856e12eSJohn Dyson 		p2->p_vmspace = p1->p_vmspace;
6205856e12eSJohn Dyson 		p1->p_vmspace->vm_refcnt++;
6215856e12eSJohn Dyson 	}
6225856e12eSJohn Dyson 
62390ecac61SMatthew Dillon 	while (vm_page_count_severe()) {
62426f9a767SRodney W. Grimes 		VM_WAIT;
6250d94caffSDavid Greenman 	}
62626f9a767SRodney W. Grimes 
6275856e12eSJohn Dyson 	if ((flags & RFMEM) == 0) {
628df8bae1dSRodney W. Grimes 		p2->p_vmspace = vmspace_fork(p1->p_vmspace);
629df8bae1dSRodney W. Grimes 
630d4da2dbaSAlan Cox 		pmap_pinit2(vmspace_pmap(p2->p_vmspace));
631d4da2dbaSAlan Cox 
632df8bae1dSRodney W. Grimes 		if (p1->p_vmspace->vm_shm)
633dabee6feSPeter Wemm 			shmfork(p1, p2);
634a2a1c95cSPeter Wemm 	}
635df8bae1dSRodney W. Grimes 
636b40ce416SJulian Elischer 	/* XXXKSE this is unsatisfactory but should be adequate */
637b40ce416SJulian Elischer 	up = p2->p_uarea;
63890af4afaSJohn Baldwin 	MPASS(p2->p_sigacts != NULL);
639df8bae1dSRodney W. Grimes 
64039fb8e6bSJulian Elischer 	/*
64139fb8e6bSJulian Elischer 	 * p_stats currently points at fields in the user struct
64239fb8e6bSJulian Elischer 	 * but not at &u, instead at p_addr. Copy parts of
64339fb8e6bSJulian Elischer 	 * p_stats; zero the rest of p_stats (statistics).
64439fb8e6bSJulian Elischer 	 */
64539fb8e6bSJulian Elischer 	p2->p_stats = &up->u_stats;
646df8bae1dSRodney W. Grimes 	bzero(&up->u_stats.pstat_startzero,
647df8bae1dSRodney W. Grimes 	    (unsigned) ((caddr_t) &up->u_stats.pstat_endzero -
648df8bae1dSRodney W. Grimes 		(caddr_t) &up->u_stats.pstat_startzero));
649df8bae1dSRodney W. Grimes 	bcopy(&p1->p_stats->pstat_startcopy, &up->u_stats.pstat_startcopy,
650df8bae1dSRodney W. Grimes 	    ((caddr_t) &up->u_stats.pstat_endcopy -
651df8bae1dSRodney W. Grimes 		(caddr_t) &up->u_stats.pstat_startcopy));
652df8bae1dSRodney W. Grimes 
653df8bae1dSRodney W. Grimes 	/*
654a2a1c95cSPeter Wemm 	 * cpu_fork will copy and update the pcb, set up the kernel stack,
655a2a1c95cSPeter Wemm 	 * and make the child ready to run.
656df8bae1dSRodney W. Grimes 	 */
657079b7badSJulian Elischer 	cpu_fork(td, p2, td2, flags);
658df8bae1dSRodney W. Grimes }
659df8bae1dSRodney W. Grimes 
660df8bae1dSRodney W. Grimes /*
661eb30c1c0SPeter Wemm  * Called after process has been wait(2)'ed apon and is being reaped.
662eb30c1c0SPeter Wemm  * The idea is to reclaim resources that we could not reclaim while
663eb30c1c0SPeter Wemm  * the process was still executing.
664eb30c1c0SPeter Wemm  */
665eb30c1c0SPeter Wemm void
666eb30c1c0SPeter Wemm vm_waitproc(p)
667eb30c1c0SPeter Wemm 	struct proc *p;
668eb30c1c0SPeter Wemm {
669eb30c1c0SPeter Wemm 
670eb30c1c0SPeter Wemm 	GIANT_REQUIRED;
671582ec34cSAlfred Perlstein 	vmspace_exitfree(p);		/* and clean-out the vmspace */
672eb30c1c0SPeter Wemm }
673eb30c1c0SPeter Wemm 
674eb30c1c0SPeter Wemm /*
675df8bae1dSRodney W. Grimes  * Set default limits for VM system.
676df8bae1dSRodney W. Grimes  * Called for proc 0, and then inherited by all others.
6772b14f991SJulian Elischer  *
6782b14f991SJulian Elischer  * XXX should probably act directly on proc0.
679df8bae1dSRodney W. Grimes  */
6802b14f991SJulian Elischer static void
6812b14f991SJulian Elischer vm_init_limits(udata)
6824590fd3aSDavid Greenman 	void *udata;
683df8bae1dSRodney W. Grimes {
68454d92145SMatthew Dillon 	struct proc *p = udata;
685bbc0ec52SDavid Greenman 	int rss_limit;
686df8bae1dSRodney W. Grimes 
687df8bae1dSRodney W. Grimes 	/*
6880d94caffSDavid Greenman 	 * Set up the initial limits on process VM. Set the maximum resident
6890d94caffSDavid Greenman 	 * set size to be half of (reasonably) available memory.  Since this
6900d94caffSDavid Greenman 	 * is a soft limit, it comes into effect only when the system is out
6910d94caffSDavid Greenman 	 * of memory - half of main memory helps to favor smaller processes,
692bbc0ec52SDavid Greenman 	 * and reduces thrashing of the object cache.
693df8bae1dSRodney W. Grimes 	 */
694cbc89bfbSPaul Saab 	p->p_rlimit[RLIMIT_STACK].rlim_cur = dflssiz;
695cbc89bfbSPaul Saab 	p->p_rlimit[RLIMIT_STACK].rlim_max = maxssiz;
696cbc89bfbSPaul Saab 	p->p_rlimit[RLIMIT_DATA].rlim_cur = dfldsiz;
697cbc89bfbSPaul Saab 	p->p_rlimit[RLIMIT_DATA].rlim_max = maxdsiz;
698dd0bd066SDavid Greenman 	/* limit the limit to no less than 2MB */
699f2daac0cSDavid Greenman 	rss_limit = max(cnt.v_free_count, 512);
700bbc0ec52SDavid Greenman 	p->p_rlimit[RLIMIT_RSS].rlim_cur = ptoa(rss_limit);
70126f9a767SRodney W. Grimes 	p->p_rlimit[RLIMIT_RSS].rlim_max = RLIM_INFINITY;
702df8bae1dSRodney W. Grimes }
703df8bae1dSRodney W. Grimes 
70426f9a767SRodney W. Grimes void
70526f9a767SRodney W. Grimes faultin(p)
70626f9a767SRodney W. Grimes 	struct proc *p;
70726f9a767SRodney W. Grimes {
70811edc1e0SJohn Baldwin #ifdef NO_SWAPPING
70911edc1e0SJohn Baldwin 
71011edc1e0SJohn Baldwin 	PROC_LOCK_ASSERT(p, MA_OWNED);
71111edc1e0SJohn Baldwin 	if ((p->p_sflag & PS_INMEM) == 0)
71211edc1e0SJohn Baldwin 		panic("faultin: proc swapped out with NO_SWAPPING!");
71311edc1e0SJohn Baldwin #else /* !NO_SWAPPING */
714664f718bSJohn Baldwin 	struct thread *td;
71526f9a767SRodney W. Grimes 
716a136efe9SPeter Wemm 	GIANT_REQUIRED;
717c96d52a9SJohn Baldwin 	PROC_LOCK_ASSERT(p, MA_OWNED);
7181d7b9ed2SJulian Elischer 	/*
7191d7b9ed2SJulian Elischer 	 * If another process is swapping in this process,
7201d7b9ed2SJulian Elischer 	 * just wait until it finishes.
7211d7b9ed2SJulian Elischer 	 */
722664f718bSJohn Baldwin 	if (p->p_sflag & PS_SWAPPINGIN)
7231d7b9ed2SJulian Elischer 		msleep(&p->p_sflag, &p->p_mtx, PVM, "faultin", 0);
724664f718bSJohn Baldwin 	else if ((p->p_sflag & PS_INMEM) == 0) {
725664f718bSJohn Baldwin 		/*
726664f718bSJohn Baldwin 		 * Don't let another thread swap process p out while we are
727664f718bSJohn Baldwin 		 * busy swapping it in.
728664f718bSJohn Baldwin 		 */
729664f718bSJohn Baldwin 		++p->p_lock;
7301d7b9ed2SJulian Elischer 		mtx_lock_spin(&sched_lock);
7311d7b9ed2SJulian Elischer 		p->p_sflag |= PS_SWAPPINGIN;
7329ed346baSBosko Milekic 		mtx_unlock_spin(&sched_lock);
73345ece682SJohn Baldwin 		PROC_UNLOCK(p);
73426f9a767SRodney W. Grimes 
735a136efe9SPeter Wemm 		vm_proc_swapin(p);
736664f718bSJohn Baldwin 		FOREACH_THREAD_IN_PROC(p, td)
73749a2507bSAlan Cox 			vm_thread_swapin(td);
73826f9a767SRodney W. Grimes 
73945ece682SJohn Baldwin 		PROC_LOCK(p);
7409ed346baSBosko Milekic 		mtx_lock_spin(&sched_lock);
7419eb881f8SSeigo Tanimura 		p->p_sflag &= ~PS_SWAPPINGIN;
7429eb881f8SSeigo Tanimura 		p->p_sflag |= PS_INMEM;
743664f718bSJohn Baldwin 		FOREACH_THREAD_IN_PROC(p, td) {
744664f718bSJohn Baldwin 			TD_CLR_SWAPPED(td);
74571fad9fdSJulian Elischer 			if (TD_CAN_RUN(td))
74671fad9fdSJulian Elischer 				setrunnable(td);
747664f718bSJohn Baldwin 		}
748664f718bSJohn Baldwin 		mtx_unlock_spin(&sched_lock);
74926f9a767SRodney W. Grimes 
7501d7b9ed2SJulian Elischer 		wakeup(&p->p_sflag);
75126f9a767SRodney W. Grimes 
752664f718bSJohn Baldwin 		/* Allow other threads to swap p out now. */
75326f9a767SRodney W. Grimes 		--p->p_lock;
75426f9a767SRodney W. Grimes 	}
75511edc1e0SJohn Baldwin #endif /* NO_SWAPPING */
75626f9a767SRodney W. Grimes }
75726f9a767SRodney W. Grimes 
758df8bae1dSRodney W. Grimes /*
75926f9a767SRodney W. Grimes  * This swapin algorithm attempts to swap-in processes only if there
76026f9a767SRodney W. Grimes  * is enough space for them.  Of course, if a process waits for a long
76126f9a767SRodney W. Grimes  * time, it will be swapped in anyway.
7620384fff8SJason Evans  *
763e602ba25SJulian Elischer  *  XXXKSE - process with the thread with highest priority counts..
764b40ce416SJulian Elischer  *
7650384fff8SJason Evans  * Giant is still held at this point, to be released in tsleep.
766df8bae1dSRodney W. Grimes  */
7672b14f991SJulian Elischer /* ARGSUSED*/
7682b14f991SJulian Elischer static void
769d841aaa7SBruce Evans scheduler(dummy)
770d841aaa7SBruce Evans 	void *dummy;
771df8bae1dSRodney W. Grimes {
77254d92145SMatthew Dillon 	struct proc *p;
773e602ba25SJulian Elischer 	struct thread *td;
77454d92145SMatthew Dillon 	int pri;
775df8bae1dSRodney W. Grimes 	struct proc *pp;
776df8bae1dSRodney W. Grimes 	int ppri;
777df8bae1dSRodney W. Grimes 
778c96d52a9SJohn Baldwin 	mtx_assert(&Giant, MA_OWNED | MA_NOTRECURSED);
7790cddd8f0SMatthew Dillon 	/* GIANT_REQUIRED */
7800384fff8SJason Evans 
781df8bae1dSRodney W. Grimes loop:
78290ecac61SMatthew Dillon 	if (vm_page_count_min()) {
7830d94caffSDavid Greenman 		VM_WAIT;
78490ecac61SMatthew Dillon 		goto loop;
7850d94caffSDavid Greenman 	}
78626f9a767SRodney W. Grimes 
787df8bae1dSRodney W. Grimes 	pp = NULL;
788df8bae1dSRodney W. Grimes 	ppri = INT_MIN;
7891005a129SJohn Baldwin 	sx_slock(&allproc_lock);
790b40ce416SJulian Elischer 	FOREACH_PROC_IN_SYSTEM(p) {
791b40ce416SJulian Elischer 		struct ksegrp *kg;
792664f718bSJohn Baldwin 		if (p->p_sflag & (PS_INMEM | PS_SWAPPINGOUT | PS_SWAPPINGIN)) {
793e602ba25SJulian Elischer 			continue;
794e602ba25SJulian Elischer 		}
7959ed346baSBosko Milekic 		mtx_lock_spin(&sched_lock);
796e602ba25SJulian Elischer 		FOREACH_THREAD_IN_PROC(p, td) {
7971d7b9ed2SJulian Elischer 			/*
79871fad9fdSJulian Elischer 			 * An otherwise runnable thread of a process
79971fad9fdSJulian Elischer 			 * swapped out has only the TDI_SWAPPED bit set.
80071fad9fdSJulian Elischer 			 *
8011d7b9ed2SJulian Elischer 			 */
80271fad9fdSJulian Elischer 			if (td->td_inhibitors == TDI_SWAPPED) {
803e602ba25SJulian Elischer 				kg = td->td_ksegrp;
804b40ce416SJulian Elischer 				pri = p->p_swtime + kg->kg_slptime;
8055074aecdSJohn Baldwin 				if ((p->p_sflag & PS_SWAPINREQ) == 0) {
806b40ce416SJulian Elischer 					pri -= kg->kg_nice * 8;
807a669a6e9SJohn Dyson 				}
80895461b45SJohn Dyson 
80926f9a767SRodney W. Grimes 				/*
810b40ce416SJulian Elischer 				 * if this ksegrp is higher priority
811b40ce416SJulian Elischer 				 * and there is enough space, then select
812b40ce416SJulian Elischer 				 * this process instead of the previous
813b40ce416SJulian Elischer 				 * selection.
81426f9a767SRodney W. Grimes 				 */
8150d94caffSDavid Greenman 				if (pri > ppri) {
816df8bae1dSRodney W. Grimes 					pp = p;
817df8bae1dSRodney W. Grimes 					ppri = pri;
818df8bae1dSRodney W. Grimes 				}
819df8bae1dSRodney W. Grimes 			}
820b40ce416SJulian Elischer 		}
8219ed346baSBosko Milekic 		mtx_unlock_spin(&sched_lock);
822df8bae1dSRodney W. Grimes 	}
8231005a129SJohn Baldwin 	sx_sunlock(&allproc_lock);
82426f9a767SRodney W. Grimes 
825df8bae1dSRodney W. Grimes 	/*
826a669a6e9SJohn Dyson 	 * Nothing to do, back to sleep.
827df8bae1dSRodney W. Grimes 	 */
828df8bae1dSRodney W. Grimes 	if ((p = pp) == NULL) {
829ea754954SJohn Baldwin 		tsleep(&proc0, PVM, "sched", maxslp * hz / 2);
830df8bae1dSRodney W. Grimes 		goto loop;
831df8bae1dSRodney W. Grimes 	}
8321d7b9ed2SJulian Elischer 	PROC_LOCK(p);
8331d7b9ed2SJulian Elischer 
8341d7b9ed2SJulian Elischer 	/*
8351d7b9ed2SJulian Elischer 	 * Another process may be bringing or may have already
8361d7b9ed2SJulian Elischer 	 * brought this process in while we traverse all threads.
8371d7b9ed2SJulian Elischer 	 * Or, this process may even be being swapped out again.
8381d7b9ed2SJulian Elischer 	 */
839664f718bSJohn Baldwin 	if (p->p_sflag & (PS_INMEM | PS_SWAPPINGOUT | PS_SWAPPINGIN)) {
8401d7b9ed2SJulian Elischer 		PROC_UNLOCK(p);
8411d7b9ed2SJulian Elischer 		goto loop;
8421d7b9ed2SJulian Elischer 	}
8431d7b9ed2SJulian Elischer 
844664f718bSJohn Baldwin 	mtx_lock_spin(&sched_lock);
8451d7b9ed2SJulian Elischer 	p->p_sflag &= ~PS_SWAPINREQ;
846664f718bSJohn Baldwin 	mtx_unlock_spin(&sched_lock);
847a669a6e9SJohn Dyson 
848df8bae1dSRodney W. Grimes 	/*
84926f9a767SRodney W. Grimes 	 * We would like to bring someone in. (only if there is space).
850e602ba25SJulian Elischer 	 * [What checks the space? ]
851df8bae1dSRodney W. Grimes 	 */
85226f9a767SRodney W. Grimes 	faultin(p);
85345ece682SJohn Baldwin 	PROC_UNLOCK(p);
854664f718bSJohn Baldwin 	mtx_lock_spin(&sched_lock);
855df8bae1dSRodney W. Grimes 	p->p_swtime = 0;
8569ed346baSBosko Milekic 	mtx_unlock_spin(&sched_lock);
857df8bae1dSRodney W. Grimes 	goto loop;
858df8bae1dSRodney W. Grimes }
859df8bae1dSRodney W. Grimes 
8605afce282SDavid Greenman #ifndef NO_SWAPPING
8615afce282SDavid Greenman 
862ceb0cf87SJohn Dyson /*
863ceb0cf87SJohn Dyson  * Swap_idle_threshold1 is the guaranteed swapped in time for a process
864ceb0cf87SJohn Dyson  */
865303b270bSEivind Eklund static int swap_idle_threshold1 = 2;
8662a3eeaa2STom Rhodes SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold1, CTLFLAG_RW,
8679faaf3b3STom Rhodes     &swap_idle_threshold1, 0, "Guaranteed swapped in time for a process");
868ceb0cf87SJohn Dyson 
869ceb0cf87SJohn Dyson /*
870ceb0cf87SJohn Dyson  * Swap_idle_threshold2 is the time that a process can be idle before
871ceb0cf87SJohn Dyson  * it will be swapped out, if idle swapping is enabled.
872ceb0cf87SJohn Dyson  */
873303b270bSEivind Eklund static int swap_idle_threshold2 = 10;
8742a3eeaa2STom Rhodes SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold2, CTLFLAG_RW,
8759faaf3b3STom Rhodes     &swap_idle_threshold2, 0, "Time before a process will be swapped out");
876ceb0cf87SJohn Dyson 
877df8bae1dSRodney W. Grimes /*
878df8bae1dSRodney W. Grimes  * Swapout is driven by the pageout daemon.  Very simple, we find eligible
879df8bae1dSRodney W. Grimes  * procs and unwire their u-areas.  We try to always "swap" at least one
880df8bae1dSRodney W. Grimes  * process in case we need the room for a swapin.
881df8bae1dSRodney W. Grimes  * If any procs have been sleeping/stopped for at least maxslp seconds,
882df8bae1dSRodney W. Grimes  * they are swapped.  Else, we swap the longest-sleeping or stopped process,
883df8bae1dSRodney W. Grimes  * if any, otherwise the longest-resident process.
884df8bae1dSRodney W. Grimes  */
885df8bae1dSRodney W. Grimes void
8863a2dc656SJohn Dyson swapout_procs(action)
8873a2dc656SJohn Dyson int action;
888df8bae1dSRodney W. Grimes {
88954d92145SMatthew Dillon 	struct proc *p;
890e602ba25SJulian Elischer 	struct thread *td;
891b40ce416SJulian Elischer 	struct ksegrp *kg;
892df8bae1dSRodney W. Grimes 	int didswap = 0;
893df8bae1dSRodney W. Grimes 
8940cddd8f0SMatthew Dillon 	GIANT_REQUIRED;
8950cddd8f0SMatthew Dillon 
8960d94caffSDavid Greenman retry:
8973a2189d4SJohn Baldwin 	sx_slock(&allproc_lock);
898e602ba25SJulian Elischer 	FOREACH_PROC_IN_SYSTEM(p) {
899b18bfc3dSJohn Dyson 		struct vmspace *vm;
900b40ce416SJulian Elischer 		int minslptime = 100000;
901b18bfc3dSJohn Dyson 
9029eb881f8SSeigo Tanimura 		/*
903b1f99ebeSSeigo Tanimura 		 * Watch out for a process in
904b1f99ebeSSeigo Tanimura 		 * creation.  It may have no
9051c865ac7SJohn Baldwin 		 * address space or lock yet.
9061c865ac7SJohn Baldwin 		 */
9071c865ac7SJohn Baldwin 		mtx_lock_spin(&sched_lock);
9081c865ac7SJohn Baldwin 		if (p->p_state == PRS_NEW) {
9091c865ac7SJohn Baldwin 			mtx_unlock_spin(&sched_lock);
9101c865ac7SJohn Baldwin 			continue;
9111c865ac7SJohn Baldwin 		}
9121c865ac7SJohn Baldwin 		mtx_unlock_spin(&sched_lock);
9131c865ac7SJohn Baldwin 
9141c865ac7SJohn Baldwin 		/*
915b1f99ebeSSeigo Tanimura 		 * An aio daemon switches its
916b1f99ebeSSeigo Tanimura 		 * address space while running.
917b1f99ebeSSeigo Tanimura 		 * Perform a quick check whether
918b1f99ebeSSeigo Tanimura 		 * a process has P_SYSTEM.
9199eb881f8SSeigo Tanimura 		 */
9208f887403SJohn Baldwin 		if ((p->p_flag & P_SYSTEM) != 0)
921b1f99ebeSSeigo Tanimura 			continue;
9221c865ac7SJohn Baldwin 
9231c865ac7SJohn Baldwin 		/*
9241c865ac7SJohn Baldwin 		 * Do not swapout a process that
9251c865ac7SJohn Baldwin 		 * is waiting for VM data
9261c865ac7SJohn Baldwin 		 * structures as there is a possible
9271c865ac7SJohn Baldwin 		 * deadlock.  Test this first as
9281c865ac7SJohn Baldwin 		 * this may block.
9291c865ac7SJohn Baldwin 		 *
9301c865ac7SJohn Baldwin 		 * Lock the map until swapout
9311c865ac7SJohn Baldwin 		 * finishes, or a thread of this
9321c865ac7SJohn Baldwin 		 * process may attempt to alter
9331c865ac7SJohn Baldwin 		 * the map.
9341c865ac7SJohn Baldwin 		 */
9358f887403SJohn Baldwin 		PROC_LOCK(p);
9369eb881f8SSeigo Tanimura 		vm = p->p_vmspace;
937b1f99ebeSSeigo Tanimura 		KASSERT(vm != NULL,
938b1f99ebeSSeigo Tanimura 			("swapout_procs: a process has no address space"));
9399eb881f8SSeigo Tanimura 		++vm->vm_refcnt;
940b1f99ebeSSeigo Tanimura 		PROC_UNLOCK(p);
9419eb881f8SSeigo Tanimura 		if (!vm_map_trylock(&vm->vm_map))
9429eb881f8SSeigo Tanimura 			goto nextproc1;
9439eb881f8SSeigo Tanimura 
9445074aecdSJohn Baldwin 		PROC_LOCK(p);
94569b40456SJohn Baldwin 		if (p->p_lock != 0 ||
9461279572aSDavid Xu 		    (p->p_flag & (P_STOPPED_SINGLE|P_TRACED|P_SYSTEM|P_WEXIT)
9471279572aSDavid Xu 		    ) != 0) {
9489eb881f8SSeigo Tanimura 			goto nextproc2;
9495074aecdSJohn Baldwin 		}
95023955314SAlfred Perlstein 		/*
95123955314SAlfred Perlstein 		 * only aiod changes vmspace, however it will be
95223955314SAlfred Perlstein 		 * skipped because of the if statement above checking
95323955314SAlfred Perlstein 		 * for P_SYSTEM
95423955314SAlfred Perlstein 		 */
955664f718bSJohn Baldwin 		if ((p->p_sflag & (PS_INMEM|PS_SWAPPINGOUT|PS_SWAPPINGIN)) != PS_INMEM)
956664f718bSJohn Baldwin 			goto nextproc2;
95769b40456SJohn Baldwin 
958e602ba25SJulian Elischer 		switch (p->p_state) {
9590d94caffSDavid Greenman 		default:
960e602ba25SJulian Elischer 			/* Don't swap out processes in any sort
961e602ba25SJulian Elischer 			 * of 'special' state. */
9628f887403SJohn Baldwin 			break;
963df8bae1dSRodney W. Grimes 
964e602ba25SJulian Elischer 		case PRS_NORMAL:
9658f887403SJohn Baldwin 			mtx_lock_spin(&sched_lock);
96626f9a767SRodney W. Grimes 			/*
967bfbfac11SDavid Greenman 			 * do not swapout a realtime process
968b40ce416SJulian Elischer 			 * Check all the thread groups..
969bfbfac11SDavid Greenman 			 */
970b40ce416SJulian Elischer 			FOREACH_KSEGRP_IN_PROC(p, kg) {
9719eb881f8SSeigo Tanimura 				if (PRI_IS_REALTIME(kg->kg_pri_class))
972b40ce416SJulian Elischer 					goto nextproc;
973bfbfac11SDavid Greenman 
974bfbfac11SDavid Greenman 				/*
9759eb881f8SSeigo Tanimura 				 * Guarantee swap_idle_threshold1
976ceb0cf87SJohn Dyson 				 * time in memory.
9770d94caffSDavid Greenman 				 */
9789eb881f8SSeigo Tanimura 				if (kg->kg_slptime < swap_idle_threshold1)
979b40ce416SJulian Elischer 					goto nextproc;
9809eb881f8SSeigo Tanimura 
9811d7b9ed2SJulian Elischer 				/*
9829eb881f8SSeigo Tanimura 				 * Do not swapout a process if it is
9839eb881f8SSeigo Tanimura 				 * waiting on a critical event of some
9849eb881f8SSeigo Tanimura 				 * kind or there is a thread whose
9859eb881f8SSeigo Tanimura 				 * pageable memory may be accessed.
9861d7b9ed2SJulian Elischer 				 *
9871d7b9ed2SJulian Elischer 				 * This could be refined to support
9881d7b9ed2SJulian Elischer 				 * swapping out a thread.
9891d7b9ed2SJulian Elischer 				 */
9909eb881f8SSeigo Tanimura 				FOREACH_THREAD_IN_GROUP(kg, td) {
9911d7b9ed2SJulian Elischer 					if ((td->td_priority) < PSOCK ||
9929eb881f8SSeigo Tanimura 					    !thread_safetoswapout(td))
993e602ba25SJulian Elischer 						goto nextproc;
994e602ba25SJulian Elischer 				}
995ceb0cf87SJohn Dyson 				/*
996b40ce416SJulian Elischer 				 * If the system is under memory stress,
997b40ce416SJulian Elischer 				 * or if we are swapping
998b40ce416SJulian Elischer 				 * idle processes >= swap_idle_threshold2,
999b40ce416SJulian Elischer 				 * then swap the process out.
1000ceb0cf87SJohn Dyson 				 */
1001ceb0cf87SJohn Dyson 				if (((action & VM_SWAP_NORMAL) == 0) &&
1002ceb0cf87SJohn Dyson 				    (((action & VM_SWAP_IDLE) == 0) ||
10039eb881f8SSeigo Tanimura 				    (kg->kg_slptime < swap_idle_threshold2)))
1004b40ce416SJulian Elischer 					goto nextproc;
10059eb881f8SSeigo Tanimura 
1006b40ce416SJulian Elischer 				if (minslptime > kg->kg_slptime)
1007b40ce416SJulian Elischer 					minslptime = kg->kg_slptime;
1008b40ce416SJulian Elischer 			}
10090d94caffSDavid Greenman 
101011b224dcSDavid Greenman 			/*
10110d94caffSDavid Greenman 			 * If the process has been asleep for awhile and had
10120d94caffSDavid Greenman 			 * most of its pages taken away already, swap it out.
101311b224dcSDavid Greenman 			 */
1014ceb0cf87SJohn Dyson 			if ((action & VM_SWAP_NORMAL) ||
1015ceb0cf87SJohn Dyson 				((action & VM_SWAP_IDLE) &&
1016b40ce416SJulian Elischer 				 (minslptime > swap_idle_threshold2))) {
1017df8bae1dSRodney W. Grimes 				swapout(p);
1018df8bae1dSRodney W. Grimes 				didswap++;
10199eb881f8SSeigo Tanimura 				mtx_unlock_spin(&sched_lock);
1020664f718bSJohn Baldwin 				PROC_UNLOCK(p);
10219eb881f8SSeigo Tanimura 				vm_map_unlock(&vm->vm_map);
10229eb881f8SSeigo Tanimura 				vmspace_free(vm);
10239eb881f8SSeigo Tanimura 				sx_sunlock(&allproc_lock);
10240d94caffSDavid Greenman 				goto retry;
1025c96d52a9SJohn Baldwin 			}
1026b40ce416SJulian Elischer nextproc:
10279eb881f8SSeigo Tanimura 			mtx_unlock_spin(&sched_lock);
10288f887403SJohn Baldwin 		}
10299eb881f8SSeigo Tanimura nextproc2:
10309eb881f8SSeigo Tanimura 		PROC_UNLOCK(p);
10319eb881f8SSeigo Tanimura 		vm_map_unlock(&vm->vm_map);
10329eb881f8SSeigo Tanimura nextproc1:
10339eb881f8SSeigo Tanimura 		vmspace_free(vm);
103430171114SPeter Wemm 		continue;
1035ceb0cf87SJohn Dyson 	}
10361005a129SJohn Baldwin 	sx_sunlock(&allproc_lock);
103726f9a767SRodney W. Grimes 	/*
103826f9a767SRodney W. Grimes 	 * If we swapped something out, and another process needed memory,
103926f9a767SRodney W. Grimes 	 * then wakeup the sched process.
104026f9a767SRodney W. Grimes 	 */
10410d94caffSDavid Greenman 	if (didswap)
104224a1cce3SDavid Greenman 		wakeup(&proc0);
1043df8bae1dSRodney W. Grimes }
1044df8bae1dSRodney W. Grimes 
1045f708ef1bSPoul-Henning Kamp static void
1046df8bae1dSRodney W. Grimes swapout(p)
104754d92145SMatthew Dillon 	struct proc *p;
1048df8bae1dSRodney W. Grimes {
1049b40ce416SJulian Elischer 	struct thread *td;
1050df8bae1dSRodney W. Grimes 
1051ea754954SJohn Baldwin 	PROC_LOCK_ASSERT(p, MA_OWNED);
10529eb881f8SSeigo Tanimura 	mtx_assert(&sched_lock, MA_OWNED | MA_NOTRECURSED);
1053d3a34985SJohn Dyson #if defined(SWAP_DEBUG)
1054d3a34985SJohn Dyson 	printf("swapping out %d\n", p->p_pid);
1055d3a34985SJohn Dyson #endif
10561d7b9ed2SJulian Elischer 
10571d7b9ed2SJulian Elischer 	/*
10589eb881f8SSeigo Tanimura 	 * The states of this process and its threads may have changed
10599eb881f8SSeigo Tanimura 	 * by now.  Assuming that there is only one pageout daemon thread,
10609eb881f8SSeigo Tanimura 	 * this process should still be in memory.
10619eb881f8SSeigo Tanimura 	 */
1062664f718bSJohn Baldwin 	KASSERT((p->p_sflag & (PS_INMEM|PS_SWAPPINGOUT|PS_SWAPPINGIN)) == PS_INMEM,
10639eb881f8SSeigo Tanimura 		("swapout: lost a swapout race?"));
10649eb881f8SSeigo Tanimura 
10659eb881f8SSeigo Tanimura #if defined(INVARIANTS)
10669eb881f8SSeigo Tanimura 	/*
10671d7b9ed2SJulian Elischer 	 * Make sure that all threads are safe to be swapped out.
10681d7b9ed2SJulian Elischer 	 *
10691d7b9ed2SJulian Elischer 	 * Alternatively, we could swap out only safe threads.
10701d7b9ed2SJulian Elischer 	 */
10711d7b9ed2SJulian Elischer 	FOREACH_THREAD_IN_PROC(p, td) {
10729eb881f8SSeigo Tanimura 		KASSERT(thread_safetoswapout(td),
10739eb881f8SSeigo Tanimura 			("swapout: there is a thread not safe for swapout"));
10741d7b9ed2SJulian Elischer 	}
10759eb881f8SSeigo Tanimura #endif /* INVARIANTS */
10761d7b9ed2SJulian Elischer 
107726f9a767SRodney W. Grimes 	++p->p_stats->p_ru.ru_nswap;
1078df8bae1dSRodney W. Grimes 	/*
107926f9a767SRodney W. Grimes 	 * remember the process resident count
1080df8bae1dSRodney W. Grimes 	 */
1081b1028ad1SLuoqi Chen 	p->p_vmspace->vm_swrss = vmspace_resident_count(p->p_vmspace);
1082df8bae1dSRodney W. Grimes 
10839eb881f8SSeigo Tanimura 	p->p_sflag &= ~PS_INMEM;
1084664f718bSJohn Baldwin 	p->p_sflag |= PS_SWAPPINGOUT;
1085664f718bSJohn Baldwin 	PROC_UNLOCK(p);
1086664f718bSJohn Baldwin 	FOREACH_THREAD_IN_PROC(p, td)
1087664f718bSJohn Baldwin 		TD_SET_SWAPPED(td);
10889ed346baSBosko Milekic 	mtx_unlock_spin(&sched_lock);
108926f9a767SRodney W. Grimes 
1090a136efe9SPeter Wemm 	vm_proc_swapout(p);
1091664f718bSJohn Baldwin 	FOREACH_THREAD_IN_PROC(p, td)
109249a2507bSAlan Cox 		vm_thread_swapout(td);
1093664f718bSJohn Baldwin 
1094664f718bSJohn Baldwin 	PROC_LOCK(p);
10959ed346baSBosko Milekic 	mtx_lock_spin(&sched_lock);
1096664f718bSJohn Baldwin 	p->p_sflag &= ~PS_SWAPPINGOUT;
1097df8bae1dSRodney W. Grimes 	p->p_swtime = 0;
1098df8bae1dSRodney W. Grimes }
10995afce282SDavid Greenman #endif /* !NO_SWAPPING */
1100