xref: /freebsd/sys/vm/vm_glue.c (revision 15a7ad60fbdab76596e824f1c30c4c4a53fa6e55)
1df8bae1dSRodney W. Grimes /*
2df8bae1dSRodney W. Grimes  * Copyright (c) 1991, 1993
3df8bae1dSRodney W. Grimes  *	The Regents of the University of California.  All rights reserved.
4df8bae1dSRodney W. Grimes  *
5df8bae1dSRodney W. Grimes  * This code is derived from software contributed to Berkeley by
6df8bae1dSRodney W. Grimes  * The Mach Operating System project at Carnegie-Mellon University.
7df8bae1dSRodney W. Grimes  *
8df8bae1dSRodney W. Grimes  * Redistribution and use in source and binary forms, with or without
9df8bae1dSRodney W. Grimes  * modification, are permitted provided that the following conditions
10df8bae1dSRodney W. Grimes  * are met:
11df8bae1dSRodney W. Grimes  * 1. Redistributions of source code must retain the above copyright
12df8bae1dSRodney W. Grimes  *    notice, this list of conditions and the following disclaimer.
13df8bae1dSRodney W. Grimes  * 2. Redistributions in binary form must reproduce the above copyright
14df8bae1dSRodney W. Grimes  *    notice, this list of conditions and the following disclaimer in the
15df8bae1dSRodney W. Grimes  *    documentation and/or other materials provided with the distribution.
16df8bae1dSRodney W. Grimes  * 3. All advertising materials mentioning features or use of this software
175929bcfaSPhilippe Charnier  *    must display the following acknowledgement:
18df8bae1dSRodney W. Grimes  *	This product includes software developed by the University of
19df8bae1dSRodney W. Grimes  *	California, Berkeley and its contributors.
20df8bae1dSRodney W. Grimes  * 4. Neither the name of the University nor the names of its contributors
21df8bae1dSRodney W. Grimes  *    may be used to endorse or promote products derived from this software
22df8bae1dSRodney W. Grimes  *    without specific prior written permission.
23df8bae1dSRodney W. Grimes  *
24df8bae1dSRodney W. Grimes  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25df8bae1dSRodney W. Grimes  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26df8bae1dSRodney W. Grimes  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27df8bae1dSRodney W. Grimes  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28df8bae1dSRodney W. Grimes  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29df8bae1dSRodney W. Grimes  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30df8bae1dSRodney W. Grimes  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31df8bae1dSRodney W. Grimes  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32df8bae1dSRodney W. Grimes  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33df8bae1dSRodney W. Grimes  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34df8bae1dSRodney W. Grimes  * SUCH DAMAGE.
35df8bae1dSRodney W. Grimes  *
363c4dd356SDavid Greenman  *	from: @(#)vm_glue.c	8.6 (Berkeley) 1/5/94
37df8bae1dSRodney W. Grimes  *
38df8bae1dSRodney W. Grimes  *
39df8bae1dSRodney W. Grimes  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40df8bae1dSRodney W. Grimes  * All rights reserved.
41df8bae1dSRodney W. Grimes  *
42df8bae1dSRodney W. Grimes  * Permission to use, copy, modify and distribute this software and
43df8bae1dSRodney W. Grimes  * its documentation is hereby granted, provided that both the copyright
44df8bae1dSRodney W. Grimes  * notice and this permission notice appear in all copies of the
45df8bae1dSRodney W. Grimes  * software, derivative works or modified versions, and any portions
46df8bae1dSRodney W. Grimes  * thereof, and that both notices appear in supporting documentation.
47df8bae1dSRodney W. Grimes  *
48df8bae1dSRodney W. Grimes  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
49df8bae1dSRodney W. Grimes  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
50df8bae1dSRodney W. Grimes  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
51df8bae1dSRodney W. Grimes  *
52df8bae1dSRodney W. Grimes  * Carnegie Mellon requests users of this software to return to
53df8bae1dSRodney W. Grimes  *
54df8bae1dSRodney W. Grimes  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
55df8bae1dSRodney W. Grimes  *  School of Computer Science
56df8bae1dSRodney W. Grimes  *  Carnegie Mellon University
57df8bae1dSRodney W. Grimes  *  Pittsburgh PA 15213-3890
58df8bae1dSRodney W. Grimes  *
59df8bae1dSRodney W. Grimes  * any improvements or extensions that they make and grant Carnegie the
60df8bae1dSRodney W. Grimes  * rights to redistribute these changes.
61df8bae1dSRodney W. Grimes  */
62df8bae1dSRodney W. Grimes 
63874651b1SDavid E. O'Brien #include <sys/cdefs.h>
64874651b1SDavid E. O'Brien __FBSDID("$FreeBSD$");
65874651b1SDavid E. O'Brien 
66faa5f8d8SAndrzej Bialecki #include "opt_vm.h"
6715a7ad60SPeter Wemm #include "opt_kstack_pages.h"
6815a7ad60SPeter Wemm #include "opt_kstack_max_pages.h"
69e9822d92SJoerg Wunsch 
70df8bae1dSRodney W. Grimes #include <sys/param.h>
71df8bae1dSRodney W. Grimes #include <sys/systm.h>
72104a9b7eSAlexander Kabaev #include <sys/limits.h>
73fb919e4dSMark Murray #include <sys/lock.h>
74fb919e4dSMark Murray #include <sys/mutex.h>
75df8bae1dSRodney W. Grimes #include <sys/proc.h>
76df8bae1dSRodney W. Grimes #include <sys/resourcevar.h>
773aa12267SBruce Evans #include <sys/shm.h>
78efeaf95aSDavid Greenman #include <sys/vmmeter.h>
791005a129SJohn Baldwin #include <sys/sx.h>
80ceb0cf87SJohn Dyson #include <sys/sysctl.h>
81df8bae1dSRodney W. Grimes 
8226f9a767SRodney W. Grimes #include <sys/kernel.h>
830384fff8SJason Evans #include <sys/ktr.h>
84a2a1c95cSPeter Wemm #include <sys/unistd.h>
8526f9a767SRodney W. Grimes 
86df8bae1dSRodney W. Grimes #include <vm/vm.h>
87efeaf95aSDavid Greenman #include <vm/vm_param.h>
88efeaf95aSDavid Greenman #include <vm/pmap.h>
89efeaf95aSDavid Greenman #include <vm/vm_map.h>
90df8bae1dSRodney W. Grimes #include <vm/vm_page.h>
9126f9a767SRodney W. Grimes #include <vm/vm_pageout.h>
92a136efe9SPeter Wemm #include <vm/vm_object.h>
93df8bae1dSRodney W. Grimes #include <vm/vm_kern.h>
94efeaf95aSDavid Greenman #include <vm/vm_extern.h>
95a136efe9SPeter Wemm #include <vm/vm_pager.h>
9692da00bbSMatthew Dillon #include <vm/swap_pager.h>
97efeaf95aSDavid Greenman 
98efeaf95aSDavid Greenman #include <sys/user.h>
99df8bae1dSRodney W. Grimes 
100ea754954SJohn Baldwin extern int maxslp;
101ea754954SJohn Baldwin 
1022b14f991SJulian Elischer /*
1032b14f991SJulian Elischer  * System initialization
1042b14f991SJulian Elischer  *
1052b14f991SJulian Elischer  * Note: proc0 from proc.h
1062b14f991SJulian Elischer  */
10711caded3SAlfred Perlstein static void vm_init_limits(void *);
1084590fd3aSDavid Greenman SYSINIT(vm_limits, SI_SUB_VM_CONF, SI_ORDER_FIRST, vm_init_limits, &proc0)
1092b14f991SJulian Elischer 
1102b14f991SJulian Elischer /*
1112b14f991SJulian Elischer  * THIS MUST BE THE LAST INITIALIZATION ITEM!!!
1122b14f991SJulian Elischer  *
1132b14f991SJulian Elischer  * Note: run scheduling should be divorced from the vm system.
1142b14f991SJulian Elischer  */
11511caded3SAlfred Perlstein static void scheduler(void *);
1162b14f991SJulian Elischer SYSINIT(scheduler, SI_SUB_RUN_SCHEDULER, SI_ORDER_FIRST, scheduler, NULL)
1172b14f991SJulian Elischer 
118e50f5c2eSBruce Evans #ifndef NO_SWAPPING
11911caded3SAlfred Perlstein static void swapout(struct proc *);
120a136efe9SPeter Wemm static void vm_proc_swapin(struct proc *p);
121a136efe9SPeter Wemm static void vm_proc_swapout(struct proc *p);
122e50f5c2eSBruce Evans #endif
123f708ef1bSPoul-Henning Kamp 
12443a90f3aSAlan Cox /*
12543a90f3aSAlan Cox  * MPSAFE
1262d5c7e45SMatthew Dillon  *
1272d5c7e45SMatthew Dillon  * WARNING!  This code calls vm_map_check_protection() which only checks
1282d5c7e45SMatthew Dillon  * the associated vm_map_entry range.  It does not determine whether the
1292d5c7e45SMatthew Dillon  * contents of the memory is actually readable or writable.  In most cases
1302d5c7e45SMatthew Dillon  * just checking the vm_map_entry is sufficient within the kernel's address
1312d5c7e45SMatthew Dillon  * space.
13243a90f3aSAlan Cox  */
133df8bae1dSRodney W. Grimes int
134df8bae1dSRodney W. Grimes kernacc(addr, len, rw)
135c3dfdfd1SAlfred Perlstein 	void *addr;
136df8bae1dSRodney W. Grimes 	int len, rw;
137df8bae1dSRodney W. Grimes {
138df8bae1dSRodney W. Grimes 	boolean_t rv;
139df8bae1dSRodney W. Grimes 	vm_offset_t saddr, eaddr;
14002c58685SPoul-Henning Kamp 	vm_prot_t prot;
141df8bae1dSRodney W. Grimes 
142e50f5c2eSBruce Evans 	KASSERT((rw & ~VM_PROT_ALL) == 0,
14302c58685SPoul-Henning Kamp 	    ("illegal ``rw'' argument to kernacc (%x)\n", rw));
14402c58685SPoul-Henning Kamp 	prot = rw;
1456cde7a16SDavid Greenman 	saddr = trunc_page((vm_offset_t)addr);
1466cde7a16SDavid Greenman 	eaddr = round_page((vm_offset_t)addr + len);
147df8bae1dSRodney W. Grimes 	rv = vm_map_check_protection(kernel_map, saddr, eaddr, prot);
148df8bae1dSRodney W. Grimes 	return (rv == TRUE);
149df8bae1dSRodney W. Grimes }
150df8bae1dSRodney W. Grimes 
15143a90f3aSAlan Cox /*
15243a90f3aSAlan Cox  * MPSAFE
1532d5c7e45SMatthew Dillon  *
1542d5c7e45SMatthew Dillon  * WARNING!  This code calls vm_map_check_protection() which only checks
1552d5c7e45SMatthew Dillon  * the associated vm_map_entry range.  It does not determine whether the
1562d5c7e45SMatthew Dillon  * contents of the memory is actually readable or writable.  vmapbuf(),
1572d5c7e45SMatthew Dillon  * vm_fault_quick(), or copyin()/copout()/su*()/fu*() functions should be
1582d5c7e45SMatthew Dillon  * used in conjuction with this call.
15943a90f3aSAlan Cox  */
160df8bae1dSRodney W. Grimes int
161df8bae1dSRodney W. Grimes useracc(addr, len, rw)
162c3dfdfd1SAlfred Perlstein 	void *addr;
163df8bae1dSRodney W. Grimes 	int len, rw;
164df8bae1dSRodney W. Grimes {
165df8bae1dSRodney W. Grimes 	boolean_t rv;
16602c58685SPoul-Henning Kamp 	vm_prot_t prot;
16705ba50f5SJake Burkholder 	vm_map_t map;
168df8bae1dSRodney W. Grimes 
169e50f5c2eSBruce Evans 	KASSERT((rw & ~VM_PROT_ALL) == 0,
17002c58685SPoul-Henning Kamp 	    ("illegal ``rw'' argument to useracc (%x)\n", rw));
17102c58685SPoul-Henning Kamp 	prot = rw;
17205ba50f5SJake Burkholder 	map = &curproc->p_vmspace->vm_map;
17305ba50f5SJake Burkholder 	if ((vm_offset_t)addr + len > vm_map_max(map) ||
17405ba50f5SJake Burkholder 	    (vm_offset_t)addr + len < (vm_offset_t)addr) {
17526f9a767SRodney W. Grimes 		return (FALSE);
17626f9a767SRodney W. Grimes 	}
17705ba50f5SJake Burkholder 	rv = vm_map_check_protection(map, trunc_page((vm_offset_t)addr),
17805ba50f5SJake Burkholder 	    round_page((vm_offset_t)addr + len), prot);
179df8bae1dSRodney W. Grimes 	return (rv == TRUE);
180df8bae1dSRodney W. Grimes }
181df8bae1dSRodney W. Grimes 
18243a90f3aSAlan Cox /*
18343a90f3aSAlan Cox  * MPSAFE
18443a90f3aSAlan Cox  */
185df8bae1dSRodney W. Grimes void
186df8bae1dSRodney W. Grimes vslock(addr, len)
187c3dfdfd1SAlfred Perlstein 	void *addr;
188df8bae1dSRodney W. Grimes 	u_int len;
189df8bae1dSRodney W. Grimes {
19043a90f3aSAlan Cox 
1911d7cf06cSAlan Cox 	vm_map_wire(&curproc->p_vmspace->vm_map, trunc_page((vm_offset_t)addr),
1926cde7a16SDavid Greenman 	    round_page((vm_offset_t)addr + len), FALSE);
193df8bae1dSRodney W. Grimes }
194df8bae1dSRodney W. Grimes 
195319490fbSAlan Cox /*
196319490fbSAlan Cox  * MPSAFE
197319490fbSAlan Cox  */
198df8bae1dSRodney W. Grimes void
1997de47255SPoul-Henning Kamp vsunlock(addr, len)
200c3dfdfd1SAlfred Perlstein 	void *addr;
201df8bae1dSRodney W. Grimes 	u_int len;
202df8bae1dSRodney W. Grimes {
203319490fbSAlan Cox 
2041d7cf06cSAlan Cox 	vm_map_unwire(&curproc->p_vmspace->vm_map,
20523955314SAlfred Perlstein 	    trunc_page((vm_offset_t)addr),
2061d7cf06cSAlan Cox 	    round_page((vm_offset_t)addr + len), FALSE);
207df8bae1dSRodney W. Grimes }
208df8bae1dSRodney W. Grimes 
209df8bae1dSRodney W. Grimes /*
210a136efe9SPeter Wemm  * Create the U area for a new process.
211a136efe9SPeter Wemm  * This routine directly affects the fork perf for a process.
212a136efe9SPeter Wemm  */
213a136efe9SPeter Wemm void
214a136efe9SPeter Wemm vm_proc_new(struct proc *p)
215a136efe9SPeter Wemm {
216a136efe9SPeter Wemm 	vm_page_t ma[UAREA_PAGES];
217a136efe9SPeter Wemm 	vm_object_t upobj;
218a136efe9SPeter Wemm 	vm_offset_t up;
219a136efe9SPeter Wemm 	vm_page_t m;
220a136efe9SPeter Wemm 	u_int i;
221a136efe9SPeter Wemm 
222a136efe9SPeter Wemm 	/*
223a136efe9SPeter Wemm 	 * Allocate object for the upage.
224a136efe9SPeter Wemm 	 */
225a136efe9SPeter Wemm 	upobj = vm_object_allocate(OBJT_DEFAULT, UAREA_PAGES);
226a136efe9SPeter Wemm 	p->p_upages_obj = upobj;
227a136efe9SPeter Wemm 
228a136efe9SPeter Wemm 	/*
229a136efe9SPeter Wemm 	 * Get a kernel virtual address for the U area for this process.
230a136efe9SPeter Wemm 	 */
231a136efe9SPeter Wemm 	up = kmem_alloc_nofault(kernel_map, UAREA_PAGES * PAGE_SIZE);
232a136efe9SPeter Wemm 	if (up == 0)
233a136efe9SPeter Wemm 		panic("vm_proc_new: upage allocation failed");
234a136efe9SPeter Wemm 	p->p_uarea = (struct user *)up;
235a136efe9SPeter Wemm 
236a136efe9SPeter Wemm 	for (i = 0; i < UAREA_PAGES; i++) {
237a136efe9SPeter Wemm 		/*
238a136efe9SPeter Wemm 		 * Get a uarea page.
239a136efe9SPeter Wemm 		 */
24014f8ceaaSAlan Cox 		m = vm_page_grab(upobj, i,
24114f8ceaaSAlan Cox 		    VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED);
242a136efe9SPeter Wemm 		ma[i] = m;
243a136efe9SPeter Wemm 
244dc907f66SAlan Cox 		vm_page_lock_queues();
245a136efe9SPeter Wemm 		vm_page_wakeup(m);
246a136efe9SPeter Wemm 		vm_page_flag_clear(m, PG_ZERO);
247a136efe9SPeter Wemm 		m->valid = VM_PAGE_BITS_ALL;
248dc907f66SAlan Cox 		vm_page_unlock_queues();
249a136efe9SPeter Wemm 	}
250a136efe9SPeter Wemm 
251a136efe9SPeter Wemm 	/*
252a136efe9SPeter Wemm 	 * Enter the pages into the kernel address space.
253a136efe9SPeter Wemm 	 */
254a136efe9SPeter Wemm 	pmap_qenter(up, ma, UAREA_PAGES);
255a136efe9SPeter Wemm }
256a136efe9SPeter Wemm 
257a136efe9SPeter Wemm /*
258a136efe9SPeter Wemm  * Dispose the U area for a process that has exited.
259a136efe9SPeter Wemm  * This routine directly impacts the exit perf of a process.
260a136efe9SPeter Wemm  * XXX proc_zone is marked UMA_ZONE_NOFREE, so this should never be called.
261a136efe9SPeter Wemm  */
262a136efe9SPeter Wemm void
263a136efe9SPeter Wemm vm_proc_dispose(struct proc *p)
264a136efe9SPeter Wemm {
265a136efe9SPeter Wemm 	vm_object_t upobj;
266a136efe9SPeter Wemm 	vm_offset_t up;
267a136efe9SPeter Wemm 	vm_page_t m;
268a136efe9SPeter Wemm 
269a136efe9SPeter Wemm 	upobj = p->p_upages_obj;
2706a07e90dSAlan Cox 	VM_OBJECT_LOCK(upobj);
271f59685a4SPeter Wemm 	if (upobj->resident_page_count != UAREA_PAGES)
272f59685a4SPeter Wemm 		panic("vm_proc_dispose: incorrect number of pages in upobj");
2732d09a6adSAlan Cox 	vm_page_lock_queues();
274f59685a4SPeter Wemm 	while ((m = TAILQ_FIRST(&upobj->memq)) != NULL) {
275a136efe9SPeter Wemm 		vm_page_busy(m);
276a136efe9SPeter Wemm 		vm_page_unwire(m, 0);
277a136efe9SPeter Wemm 		vm_page_free(m);
278a136efe9SPeter Wemm 	}
2792d09a6adSAlan Cox 	vm_page_unlock_queues();
2806a07e90dSAlan Cox 	VM_OBJECT_UNLOCK(upobj);
281f59685a4SPeter Wemm 	up = (vm_offset_t)p->p_uarea;
282a136efe9SPeter Wemm 	pmap_qremove(up, UAREA_PAGES);
283a136efe9SPeter Wemm 	kmem_free(kernel_map, up, UAREA_PAGES * PAGE_SIZE);
284a136efe9SPeter Wemm 	vm_object_deallocate(upobj);
285a136efe9SPeter Wemm }
286a136efe9SPeter Wemm 
287a136efe9SPeter Wemm #ifndef NO_SWAPPING
288a136efe9SPeter Wemm /*
289a136efe9SPeter Wemm  * Allow the U area for a process to be prejudicially paged out.
290a136efe9SPeter Wemm  */
29137c84183SPoul-Henning Kamp static void
292a136efe9SPeter Wemm vm_proc_swapout(struct proc *p)
293a136efe9SPeter Wemm {
294a136efe9SPeter Wemm 	vm_object_t upobj;
295a136efe9SPeter Wemm 	vm_offset_t up;
296a136efe9SPeter Wemm 	vm_page_t m;
297a136efe9SPeter Wemm 
298a136efe9SPeter Wemm 	upobj = p->p_upages_obj;
2996a07e90dSAlan Cox 	VM_OBJECT_LOCK(upobj);
300f59685a4SPeter Wemm 	if (upobj->resident_page_count != UAREA_PAGES)
301f59685a4SPeter Wemm 		panic("vm_proc_dispose: incorrect number of pages in upobj");
3022d09a6adSAlan Cox 	vm_page_lock_queues();
303f59685a4SPeter Wemm 	TAILQ_FOREACH(m, &upobj->memq, listq) {
304a136efe9SPeter Wemm 		vm_page_dirty(m);
305a136efe9SPeter Wemm 		vm_page_unwire(m, 0);
306a136efe9SPeter Wemm 	}
3072d09a6adSAlan Cox 	vm_page_unlock_queues();
3086a07e90dSAlan Cox 	VM_OBJECT_UNLOCK(upobj);
309f59685a4SPeter Wemm 	up = (vm_offset_t)p->p_uarea;
310a136efe9SPeter Wemm 	pmap_qremove(up, UAREA_PAGES);
311a136efe9SPeter Wemm }
312a136efe9SPeter Wemm 
313a136efe9SPeter Wemm /*
314a136efe9SPeter Wemm  * Bring the U area for a specified process back in.
315a136efe9SPeter Wemm  */
31637c84183SPoul-Henning Kamp static void
317a136efe9SPeter Wemm vm_proc_swapin(struct proc *p)
318a136efe9SPeter Wemm {
319a136efe9SPeter Wemm 	vm_page_t ma[UAREA_PAGES];
320a136efe9SPeter Wemm 	vm_object_t upobj;
321a136efe9SPeter Wemm 	vm_offset_t up;
322a136efe9SPeter Wemm 	vm_page_t m;
323a136efe9SPeter Wemm 	int rv;
324a136efe9SPeter Wemm 	int i;
325a136efe9SPeter Wemm 
326a136efe9SPeter Wemm 	upobj = p->p_upages_obj;
3278630c117SAlan Cox 	VM_OBJECT_LOCK(upobj);
328a136efe9SPeter Wemm 	for (i = 0; i < UAREA_PAGES; i++) {
329a136efe9SPeter Wemm 		m = vm_page_grab(upobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
330a136efe9SPeter Wemm 		if (m->valid != VM_PAGE_BITS_ALL) {
331a136efe9SPeter Wemm 			rv = vm_pager_get_pages(upobj, &m, 1, 0);
332a136efe9SPeter Wemm 			if (rv != VM_PAGER_OK)
333a136efe9SPeter Wemm 				panic("vm_proc_swapin: cannot get upage");
334a136efe9SPeter Wemm 		}
335a136efe9SPeter Wemm 		ma[i] = m;
336a7e9138eSPeter Wemm 	}
337a7e9138eSPeter Wemm 	if (upobj->resident_page_count != UAREA_PAGES)
338a7e9138eSPeter Wemm 		panic("vm_proc_swapin: lost pages from upobj");
339e16cfdbeSAlan Cox 	vm_page_lock_queues();
340a7e9138eSPeter Wemm 	TAILQ_FOREACH(m, &upobj->memq, listq) {
341a7e9138eSPeter Wemm 		m->valid = VM_PAGE_BITS_ALL;
342a136efe9SPeter Wemm 		vm_page_wire(m);
343a136efe9SPeter Wemm 		vm_page_wakeup(m);
344a136efe9SPeter Wemm 	}
345e16cfdbeSAlan Cox 	vm_page_unlock_queues();
3466a07e90dSAlan Cox 	VM_OBJECT_UNLOCK(upobj);
347f59685a4SPeter Wemm 	up = (vm_offset_t)p->p_uarea;
348a136efe9SPeter Wemm 	pmap_qenter(up, ma, UAREA_PAGES);
349a136efe9SPeter Wemm }
35092da00bbSMatthew Dillon 
35192da00bbSMatthew Dillon /*
35292da00bbSMatthew Dillon  * Swap in the UAREAs of all processes swapped out to the given device.
35392da00bbSMatthew Dillon  * The pages in the UAREA are marked dirty and their swap metadata is freed.
35492da00bbSMatthew Dillon  */
35592da00bbSMatthew Dillon void
35692da00bbSMatthew Dillon vm_proc_swapin_all(int devidx)
35792da00bbSMatthew Dillon {
35892da00bbSMatthew Dillon 	struct proc *p;
35992da00bbSMatthew Dillon 	vm_object_t object;
36092da00bbSMatthew Dillon 	vm_page_t m;
36192da00bbSMatthew Dillon 
36292da00bbSMatthew Dillon retry:
36392da00bbSMatthew Dillon 	sx_slock(&allproc_lock);
36492da00bbSMatthew Dillon 	FOREACH_PROC_IN_SYSTEM(p) {
36592da00bbSMatthew Dillon 		PROC_LOCK(p);
36692da00bbSMatthew Dillon 		object = p->p_upages_obj;
36717cd3642SAlan Cox 		if (object != NULL) {
36817cd3642SAlan Cox 			VM_OBJECT_LOCK(object);
36917cd3642SAlan Cox 			if (swap_pager_isswapped(object, devidx)) {
37017cd3642SAlan Cox 				VM_OBJECT_UNLOCK(object);
37192da00bbSMatthew Dillon 				sx_sunlock(&allproc_lock);
37292da00bbSMatthew Dillon 				faultin(p);
37392da00bbSMatthew Dillon 				PROC_UNLOCK(p);
3746a07e90dSAlan Cox 				VM_OBJECT_LOCK(object);
37592da00bbSMatthew Dillon 				vm_page_lock_queues();
37692da00bbSMatthew Dillon 				TAILQ_FOREACH(m, &object->memq, listq)
37792da00bbSMatthew Dillon 					vm_page_dirty(m);
37892da00bbSMatthew Dillon 				vm_page_unlock_queues();
37992da00bbSMatthew Dillon 				swap_pager_freespace(object, 0,
38092da00bbSMatthew Dillon 				    object->un_pager.swp.swp_bcount);
3816a07e90dSAlan Cox 				VM_OBJECT_UNLOCK(object);
38292da00bbSMatthew Dillon 				goto retry;
38392da00bbSMatthew Dillon 			}
38417cd3642SAlan Cox 			VM_OBJECT_UNLOCK(object);
38517cd3642SAlan Cox 		}
38692da00bbSMatthew Dillon 		PROC_UNLOCK(p);
38792da00bbSMatthew Dillon 	}
38892da00bbSMatthew Dillon 	sx_sunlock(&allproc_lock);
38992da00bbSMatthew Dillon }
390a136efe9SPeter Wemm #endif
391a136efe9SPeter Wemm 
39249a2507bSAlan Cox #ifndef KSTACK_MAX_PAGES
39349a2507bSAlan Cox #define KSTACK_MAX_PAGES 32
39449a2507bSAlan Cox #endif
39549a2507bSAlan Cox 
39649a2507bSAlan Cox /*
39749a2507bSAlan Cox  * Create the kernel stack (including pcb for i386) for a new thread.
39849a2507bSAlan Cox  * This routine directly affects the fork perf for a process and
39949a2507bSAlan Cox  * create performance for a thread.
40049a2507bSAlan Cox  */
40149a2507bSAlan Cox void
40249a2507bSAlan Cox vm_thread_new(struct thread *td, int pages)
40349a2507bSAlan Cox {
40449a2507bSAlan Cox 	vm_object_t ksobj;
40549a2507bSAlan Cox 	vm_offset_t ks;
40649a2507bSAlan Cox 	vm_page_t m, ma[KSTACK_MAX_PAGES];
40749a2507bSAlan Cox 	int i;
40849a2507bSAlan Cox 
40949a2507bSAlan Cox 	/* Bounds check */
41049a2507bSAlan Cox 	if (pages <= 1)
41149a2507bSAlan Cox 		pages = KSTACK_PAGES;
41249a2507bSAlan Cox 	else if (pages > KSTACK_MAX_PAGES)
41349a2507bSAlan Cox 		pages = KSTACK_MAX_PAGES;
41449a2507bSAlan Cox 	/*
41549a2507bSAlan Cox 	 * Allocate an object for the kstack.
41649a2507bSAlan Cox 	 */
41749a2507bSAlan Cox 	ksobj = vm_object_allocate(OBJT_DEFAULT, pages);
41849a2507bSAlan Cox 	td->td_kstack_obj = ksobj;
41949a2507bSAlan Cox 	/*
42049a2507bSAlan Cox 	 * Get a kernel virtual address for this thread's kstack.
42149a2507bSAlan Cox 	 */
42249a2507bSAlan Cox 	ks = kmem_alloc_nofault(kernel_map,
42349a2507bSAlan Cox 	   (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
42449a2507bSAlan Cox 	if (ks == 0)
42549a2507bSAlan Cox 		panic("vm_thread_new: kstack allocation failed");
42649a2507bSAlan Cox 	if (KSTACK_GUARD_PAGES != 0) {
42749a2507bSAlan Cox 		pmap_qremove(ks, KSTACK_GUARD_PAGES);
42849a2507bSAlan Cox 		ks += KSTACK_GUARD_PAGES * PAGE_SIZE;
42949a2507bSAlan Cox 	}
43049a2507bSAlan Cox 	td->td_kstack = ks;
43149a2507bSAlan Cox 	/*
43249a2507bSAlan Cox 	 * Knowing the number of pages allocated is useful when you
43349a2507bSAlan Cox 	 * want to deallocate them.
43449a2507bSAlan Cox 	 */
43549a2507bSAlan Cox 	td->td_kstack_pages = pages;
43649a2507bSAlan Cox 	/*
43749a2507bSAlan Cox 	 * For the length of the stack, link in a real page of ram for each
43849a2507bSAlan Cox 	 * page of stack.
43949a2507bSAlan Cox 	 */
44049a2507bSAlan Cox 	VM_OBJECT_LOCK(ksobj);
44149a2507bSAlan Cox 	for (i = 0; i < pages; i++) {
44249a2507bSAlan Cox 		/*
44349a2507bSAlan Cox 		 * Get a kernel stack page.
44449a2507bSAlan Cox 		 */
44549a2507bSAlan Cox 		m = vm_page_grab(ksobj, i,
44649a2507bSAlan Cox 		    VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED);
44749a2507bSAlan Cox 		ma[i] = m;
44849a2507bSAlan Cox 		vm_page_lock_queues();
44949a2507bSAlan Cox 		vm_page_wakeup(m);
45049a2507bSAlan Cox 		m->valid = VM_PAGE_BITS_ALL;
45149a2507bSAlan Cox 		vm_page_unlock_queues();
45249a2507bSAlan Cox 	}
45349a2507bSAlan Cox 	VM_OBJECT_UNLOCK(ksobj);
45449a2507bSAlan Cox 	pmap_qenter(ks, ma, pages);
45549a2507bSAlan Cox }
45649a2507bSAlan Cox 
45749a2507bSAlan Cox /*
45849a2507bSAlan Cox  * Dispose of a thread's kernel stack.
45949a2507bSAlan Cox  */
46049a2507bSAlan Cox void
46149a2507bSAlan Cox vm_thread_dispose(struct thread *td)
46249a2507bSAlan Cox {
46349a2507bSAlan Cox 	vm_object_t ksobj;
46449a2507bSAlan Cox 	vm_offset_t ks;
46549a2507bSAlan Cox 	vm_page_t m;
46649a2507bSAlan Cox 	int i, pages;
46749a2507bSAlan Cox 
46849a2507bSAlan Cox 	pages = td->td_kstack_pages;
46949a2507bSAlan Cox 	ksobj = td->td_kstack_obj;
47049a2507bSAlan Cox 	ks = td->td_kstack;
47149a2507bSAlan Cox 	pmap_qremove(ks, pages);
47249a2507bSAlan Cox 	VM_OBJECT_LOCK(ksobj);
47349a2507bSAlan Cox 	for (i = 0; i < pages; i++) {
47449a2507bSAlan Cox 		m = vm_page_lookup(ksobj, i);
47549a2507bSAlan Cox 		if (m == NULL)
47649a2507bSAlan Cox 			panic("vm_thread_dispose: kstack already missing?");
47749a2507bSAlan Cox 		vm_page_lock_queues();
47849a2507bSAlan Cox 		vm_page_busy(m);
47949a2507bSAlan Cox 		vm_page_unwire(m, 0);
48049a2507bSAlan Cox 		vm_page_free(m);
48149a2507bSAlan Cox 		vm_page_unlock_queues();
48249a2507bSAlan Cox 	}
48349a2507bSAlan Cox 	VM_OBJECT_UNLOCK(ksobj);
48449a2507bSAlan Cox 	vm_object_deallocate(ksobj);
48549a2507bSAlan Cox 	kmem_free(kernel_map, ks - (KSTACK_GUARD_PAGES * PAGE_SIZE),
48649a2507bSAlan Cox 	    (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
48749a2507bSAlan Cox }
48849a2507bSAlan Cox 
48949a2507bSAlan Cox /*
49049a2507bSAlan Cox  * Allow a thread's kernel stack to be paged out.
49149a2507bSAlan Cox  */
49249a2507bSAlan Cox void
49349a2507bSAlan Cox vm_thread_swapout(struct thread *td)
49449a2507bSAlan Cox {
49549a2507bSAlan Cox 	vm_object_t ksobj;
49649a2507bSAlan Cox 	vm_page_t m;
49749a2507bSAlan Cox 	int i, pages;
49849a2507bSAlan Cox 
499a04a7f22SAlan Cox #ifdef	__alpha__
50049a2507bSAlan Cox 	/*
50149a2507bSAlan Cox 	 * Make sure we aren't fpcurthread.
50249a2507bSAlan Cox 	 */
50349a2507bSAlan Cox 	alpha_fpstate_save(td, 1);
50449a2507bSAlan Cox #endif
50549a2507bSAlan Cox 	pages = td->td_kstack_pages;
50649a2507bSAlan Cox 	ksobj = td->td_kstack_obj;
50749a2507bSAlan Cox 	pmap_qremove(td->td_kstack, pages);
50849a2507bSAlan Cox 	VM_OBJECT_LOCK(ksobj);
50949a2507bSAlan Cox 	for (i = 0; i < pages; i++) {
51049a2507bSAlan Cox 		m = vm_page_lookup(ksobj, i);
51149a2507bSAlan Cox 		if (m == NULL)
51249a2507bSAlan Cox 			panic("vm_thread_swapout: kstack already missing?");
51349a2507bSAlan Cox 		vm_page_lock_queues();
51449a2507bSAlan Cox 		vm_page_dirty(m);
51549a2507bSAlan Cox 		vm_page_unwire(m, 0);
51649a2507bSAlan Cox 		vm_page_unlock_queues();
51749a2507bSAlan Cox 	}
51849a2507bSAlan Cox 	VM_OBJECT_UNLOCK(ksobj);
51949a2507bSAlan Cox }
52049a2507bSAlan Cox 
52149a2507bSAlan Cox /*
52249a2507bSAlan Cox  * Bring the kernel stack for a specified thread back in.
52349a2507bSAlan Cox  */
52449a2507bSAlan Cox void
52549a2507bSAlan Cox vm_thread_swapin(struct thread *td)
52649a2507bSAlan Cox {
52749a2507bSAlan Cox 	vm_object_t ksobj;
52849a2507bSAlan Cox 	vm_page_t m, ma[KSTACK_MAX_PAGES];
52949a2507bSAlan Cox 	int i, pages, rv;
53049a2507bSAlan Cox 
53149a2507bSAlan Cox 	pages = td->td_kstack_pages;
53249a2507bSAlan Cox 	ksobj = td->td_kstack_obj;
53349a2507bSAlan Cox 	VM_OBJECT_LOCK(ksobj);
53449a2507bSAlan Cox 	for (i = 0; i < pages; i++) {
53549a2507bSAlan Cox 		m = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
53649a2507bSAlan Cox 		if (m->valid != VM_PAGE_BITS_ALL) {
53749a2507bSAlan Cox 			rv = vm_pager_get_pages(ksobj, &m, 1, 0);
53849a2507bSAlan Cox 			if (rv != VM_PAGER_OK)
53949a2507bSAlan Cox 				panic("vm_thread_swapin: cannot get kstack for proc: %d", td->td_proc->p_pid);
54049a2507bSAlan Cox 			m = vm_page_lookup(ksobj, i);
54149a2507bSAlan Cox 			m->valid = VM_PAGE_BITS_ALL;
54249a2507bSAlan Cox 		}
54349a2507bSAlan Cox 		ma[i] = m;
54449a2507bSAlan Cox 		vm_page_lock_queues();
54549a2507bSAlan Cox 		vm_page_wire(m);
54649a2507bSAlan Cox 		vm_page_wakeup(m);
54749a2507bSAlan Cox 		vm_page_unlock_queues();
54849a2507bSAlan Cox 	}
54949a2507bSAlan Cox 	VM_OBJECT_UNLOCK(ksobj);
55049a2507bSAlan Cox 	pmap_qenter(td->td_kstack, ma, pages);
551a04a7f22SAlan Cox #ifdef	__alpha__
55249a2507bSAlan Cox 	/*
55349a2507bSAlan Cox 	 * The pcb may be at a different physical address now so cache the
55449a2507bSAlan Cox 	 * new address.
55549a2507bSAlan Cox 	 */
55649a2507bSAlan Cox 	td->td_md.md_pcbpaddr = (void *)vtophys((vm_offset_t)td->td_pcb);
55749a2507bSAlan Cox #endif
55849a2507bSAlan Cox }
55949a2507bSAlan Cox 
560a136efe9SPeter Wemm /*
56189f4fca2SAlan Cox  * Set up a variable-sized alternate kstack.
56289f4fca2SAlan Cox  */
56389f4fca2SAlan Cox void
56489f4fca2SAlan Cox vm_thread_new_altkstack(struct thread *td, int pages)
56589f4fca2SAlan Cox {
56689f4fca2SAlan Cox 
56789f4fca2SAlan Cox 	td->td_altkstack = td->td_kstack;
56889f4fca2SAlan Cox 	td->td_altkstack_obj = td->td_kstack_obj;
56989f4fca2SAlan Cox 	td->td_altkstack_pages = td->td_kstack_pages;
57089f4fca2SAlan Cox 
57149a2507bSAlan Cox 	vm_thread_new(td, pages);
57289f4fca2SAlan Cox }
57389f4fca2SAlan Cox 
57489f4fca2SAlan Cox /*
57589f4fca2SAlan Cox  * Restore the original kstack.
57689f4fca2SAlan Cox  */
57789f4fca2SAlan Cox void
57889f4fca2SAlan Cox vm_thread_dispose_altkstack(struct thread *td)
57989f4fca2SAlan Cox {
58089f4fca2SAlan Cox 
58149a2507bSAlan Cox 	vm_thread_dispose(td);
58289f4fca2SAlan Cox 
58389f4fca2SAlan Cox 	td->td_kstack = td->td_altkstack;
58489f4fca2SAlan Cox 	td->td_kstack_obj = td->td_altkstack_obj;
58589f4fca2SAlan Cox 	td->td_kstack_pages = td->td_altkstack_pages;
58689f4fca2SAlan Cox 	td->td_altkstack = 0;
58789f4fca2SAlan Cox 	td->td_altkstack_obj = NULL;
58889f4fca2SAlan Cox 	td->td_altkstack_pages = 0;
58989f4fca2SAlan Cox }
59089f4fca2SAlan Cox 
59189f4fca2SAlan Cox /*
592df8bae1dSRodney W. Grimes  * Implement fork's actions on an address space.
593df8bae1dSRodney W. Grimes  * Here we arrange for the address space to be copied or referenced,
594df8bae1dSRodney W. Grimes  * allocate a user struct (pcb and kernel stack), then call the
595df8bae1dSRodney W. Grimes  * machine-dependent layer to fill those in and make the new process
596a2a1c95cSPeter Wemm  * ready to run.  The new process is set up so that it returns directly
597a2a1c95cSPeter Wemm  * to user mode to avoid stack copying and relocation problems.
598df8bae1dSRodney W. Grimes  */
599a2a1c95cSPeter Wemm void
600079b7badSJulian Elischer vm_forkproc(td, p2, td2, flags)
601b40ce416SJulian Elischer 	struct thread *td;
602b40ce416SJulian Elischer 	struct proc *p2;
603079b7badSJulian Elischer 	struct thread *td2;
604a2a1c95cSPeter Wemm 	int flags;
605df8bae1dSRodney W. Grimes {
606b40ce416SJulian Elischer 	struct proc *p1 = td->td_proc;
60754d92145SMatthew Dillon 	struct user *up;
608df8bae1dSRodney W. Grimes 
6090cddd8f0SMatthew Dillon 	GIANT_REQUIRED;
6100cddd8f0SMatthew Dillon 
61191c28bfdSLuoqi Chen 	if ((flags & RFPROC) == 0) {
61291c28bfdSLuoqi Chen 		/*
61391c28bfdSLuoqi Chen 		 * Divorce the memory, if it is shared, essentially
61491c28bfdSLuoqi Chen 		 * this changes shared memory amongst threads, into
61591c28bfdSLuoqi Chen 		 * COW locally.
61691c28bfdSLuoqi Chen 		 */
61791c28bfdSLuoqi Chen 		if ((flags & RFMEM) == 0) {
61891c28bfdSLuoqi Chen 			if (p1->p_vmspace->vm_refcnt > 1) {
61991c28bfdSLuoqi Chen 				vmspace_unshare(p1);
62091c28bfdSLuoqi Chen 			}
62191c28bfdSLuoqi Chen 		}
622079b7badSJulian Elischer 		cpu_fork(td, p2, td2, flags);
62391c28bfdSLuoqi Chen 		return;
62491c28bfdSLuoqi Chen 	}
62591c28bfdSLuoqi Chen 
6265856e12eSJohn Dyson 	if (flags & RFMEM) {
6275856e12eSJohn Dyson 		p2->p_vmspace = p1->p_vmspace;
6285856e12eSJohn Dyson 		p1->p_vmspace->vm_refcnt++;
6295856e12eSJohn Dyson 	}
6305856e12eSJohn Dyson 
63190ecac61SMatthew Dillon 	while (vm_page_count_severe()) {
63226f9a767SRodney W. Grimes 		VM_WAIT;
6330d94caffSDavid Greenman 	}
63426f9a767SRodney W. Grimes 
6355856e12eSJohn Dyson 	if ((flags & RFMEM) == 0) {
636df8bae1dSRodney W. Grimes 		p2->p_vmspace = vmspace_fork(p1->p_vmspace);
637df8bae1dSRodney W. Grimes 
638d4da2dbaSAlan Cox 		pmap_pinit2(vmspace_pmap(p2->p_vmspace));
639d4da2dbaSAlan Cox 
640df8bae1dSRodney W. Grimes 		if (p1->p_vmspace->vm_shm)
641dabee6feSPeter Wemm 			shmfork(p1, p2);
642a2a1c95cSPeter Wemm 	}
643df8bae1dSRodney W. Grimes 
644b40ce416SJulian Elischer 	/* XXXKSE this is unsatisfactory but should be adequate */
645b40ce416SJulian Elischer 	up = p2->p_uarea;
64690af4afaSJohn Baldwin 	MPASS(p2->p_sigacts != NULL);
647df8bae1dSRodney W. Grimes 
64839fb8e6bSJulian Elischer 	/*
64939fb8e6bSJulian Elischer 	 * p_stats currently points at fields in the user struct
65039fb8e6bSJulian Elischer 	 * but not at &u, instead at p_addr. Copy parts of
65139fb8e6bSJulian Elischer 	 * p_stats; zero the rest of p_stats (statistics).
65239fb8e6bSJulian Elischer 	 */
65339fb8e6bSJulian Elischer 	p2->p_stats = &up->u_stats;
654df8bae1dSRodney W. Grimes 	bzero(&up->u_stats.pstat_startzero,
655df8bae1dSRodney W. Grimes 	    (unsigned) ((caddr_t) &up->u_stats.pstat_endzero -
656df8bae1dSRodney W. Grimes 		(caddr_t) &up->u_stats.pstat_startzero));
657df8bae1dSRodney W. Grimes 	bcopy(&p1->p_stats->pstat_startcopy, &up->u_stats.pstat_startcopy,
658df8bae1dSRodney W. Grimes 	    ((caddr_t) &up->u_stats.pstat_endcopy -
659df8bae1dSRodney W. Grimes 		(caddr_t) &up->u_stats.pstat_startcopy));
660df8bae1dSRodney W. Grimes 
661df8bae1dSRodney W. Grimes 	/*
662a2a1c95cSPeter Wemm 	 * cpu_fork will copy and update the pcb, set up the kernel stack,
663a2a1c95cSPeter Wemm 	 * and make the child ready to run.
664df8bae1dSRodney W. Grimes 	 */
665079b7badSJulian Elischer 	cpu_fork(td, p2, td2, flags);
666df8bae1dSRodney W. Grimes }
667df8bae1dSRodney W. Grimes 
668df8bae1dSRodney W. Grimes /*
669eb30c1c0SPeter Wemm  * Called after process has been wait(2)'ed apon and is being reaped.
670eb30c1c0SPeter Wemm  * The idea is to reclaim resources that we could not reclaim while
671eb30c1c0SPeter Wemm  * the process was still executing.
672eb30c1c0SPeter Wemm  */
673eb30c1c0SPeter Wemm void
674eb30c1c0SPeter Wemm vm_waitproc(p)
675eb30c1c0SPeter Wemm 	struct proc *p;
676eb30c1c0SPeter Wemm {
677eb30c1c0SPeter Wemm 
678eb30c1c0SPeter Wemm 	GIANT_REQUIRED;
679582ec34cSAlfred Perlstein 	vmspace_exitfree(p);		/* and clean-out the vmspace */
680eb30c1c0SPeter Wemm }
681eb30c1c0SPeter Wemm 
682eb30c1c0SPeter Wemm /*
683df8bae1dSRodney W. Grimes  * Set default limits for VM system.
684df8bae1dSRodney W. Grimes  * Called for proc 0, and then inherited by all others.
6852b14f991SJulian Elischer  *
6862b14f991SJulian Elischer  * XXX should probably act directly on proc0.
687df8bae1dSRodney W. Grimes  */
6882b14f991SJulian Elischer static void
6892b14f991SJulian Elischer vm_init_limits(udata)
6904590fd3aSDavid Greenman 	void *udata;
691df8bae1dSRodney W. Grimes {
69254d92145SMatthew Dillon 	struct proc *p = udata;
693bbc0ec52SDavid Greenman 	int rss_limit;
694df8bae1dSRodney W. Grimes 
695df8bae1dSRodney W. Grimes 	/*
6960d94caffSDavid Greenman 	 * Set up the initial limits on process VM. Set the maximum resident
6970d94caffSDavid Greenman 	 * set size to be half of (reasonably) available memory.  Since this
6980d94caffSDavid Greenman 	 * is a soft limit, it comes into effect only when the system is out
6990d94caffSDavid Greenman 	 * of memory - half of main memory helps to favor smaller processes,
700bbc0ec52SDavid Greenman 	 * and reduces thrashing of the object cache.
701df8bae1dSRodney W. Grimes 	 */
702cbc89bfbSPaul Saab 	p->p_rlimit[RLIMIT_STACK].rlim_cur = dflssiz;
703cbc89bfbSPaul Saab 	p->p_rlimit[RLIMIT_STACK].rlim_max = maxssiz;
704cbc89bfbSPaul Saab 	p->p_rlimit[RLIMIT_DATA].rlim_cur = dfldsiz;
705cbc89bfbSPaul Saab 	p->p_rlimit[RLIMIT_DATA].rlim_max = maxdsiz;
706dd0bd066SDavid Greenman 	/* limit the limit to no less than 2MB */
707f2daac0cSDavid Greenman 	rss_limit = max(cnt.v_free_count, 512);
708bbc0ec52SDavid Greenman 	p->p_rlimit[RLIMIT_RSS].rlim_cur = ptoa(rss_limit);
70926f9a767SRodney W. Grimes 	p->p_rlimit[RLIMIT_RSS].rlim_max = RLIM_INFINITY;
710df8bae1dSRodney W. Grimes }
711df8bae1dSRodney W. Grimes 
71226f9a767SRodney W. Grimes void
71326f9a767SRodney W. Grimes faultin(p)
71426f9a767SRodney W. Grimes 	struct proc *p;
71526f9a767SRodney W. Grimes {
71611edc1e0SJohn Baldwin #ifdef NO_SWAPPING
71711edc1e0SJohn Baldwin 
71811edc1e0SJohn Baldwin 	PROC_LOCK_ASSERT(p, MA_OWNED);
71911edc1e0SJohn Baldwin 	if ((p->p_sflag & PS_INMEM) == 0)
72011edc1e0SJohn Baldwin 		panic("faultin: proc swapped out with NO_SWAPPING!");
72111edc1e0SJohn Baldwin #else /* !NO_SWAPPING */
722664f718bSJohn Baldwin 	struct thread *td;
72326f9a767SRodney W. Grimes 
724a136efe9SPeter Wemm 	GIANT_REQUIRED;
725c96d52a9SJohn Baldwin 	PROC_LOCK_ASSERT(p, MA_OWNED);
7261d7b9ed2SJulian Elischer 	/*
7271d7b9ed2SJulian Elischer 	 * If another process is swapping in this process,
7281d7b9ed2SJulian Elischer 	 * just wait until it finishes.
7291d7b9ed2SJulian Elischer 	 */
730664f718bSJohn Baldwin 	if (p->p_sflag & PS_SWAPPINGIN)
7311d7b9ed2SJulian Elischer 		msleep(&p->p_sflag, &p->p_mtx, PVM, "faultin", 0);
732664f718bSJohn Baldwin 	else if ((p->p_sflag & PS_INMEM) == 0) {
733664f718bSJohn Baldwin 		/*
734664f718bSJohn Baldwin 		 * Don't let another thread swap process p out while we are
735664f718bSJohn Baldwin 		 * busy swapping it in.
736664f718bSJohn Baldwin 		 */
737664f718bSJohn Baldwin 		++p->p_lock;
7381d7b9ed2SJulian Elischer 		mtx_lock_spin(&sched_lock);
7391d7b9ed2SJulian Elischer 		p->p_sflag |= PS_SWAPPINGIN;
7409ed346baSBosko Milekic 		mtx_unlock_spin(&sched_lock);
74145ece682SJohn Baldwin 		PROC_UNLOCK(p);
74226f9a767SRodney W. Grimes 
743a136efe9SPeter Wemm 		vm_proc_swapin(p);
744664f718bSJohn Baldwin 		FOREACH_THREAD_IN_PROC(p, td)
74549a2507bSAlan Cox 			vm_thread_swapin(td);
74626f9a767SRodney W. Grimes 
74745ece682SJohn Baldwin 		PROC_LOCK(p);
7489ed346baSBosko Milekic 		mtx_lock_spin(&sched_lock);
7499eb881f8SSeigo Tanimura 		p->p_sflag &= ~PS_SWAPPINGIN;
7509eb881f8SSeigo Tanimura 		p->p_sflag |= PS_INMEM;
751664f718bSJohn Baldwin 		FOREACH_THREAD_IN_PROC(p, td) {
752664f718bSJohn Baldwin 			TD_CLR_SWAPPED(td);
75371fad9fdSJulian Elischer 			if (TD_CAN_RUN(td))
75471fad9fdSJulian Elischer 				setrunnable(td);
755664f718bSJohn Baldwin 		}
756664f718bSJohn Baldwin 		mtx_unlock_spin(&sched_lock);
75726f9a767SRodney W. Grimes 
7581d7b9ed2SJulian Elischer 		wakeup(&p->p_sflag);
75926f9a767SRodney W. Grimes 
760664f718bSJohn Baldwin 		/* Allow other threads to swap p out now. */
76126f9a767SRodney W. Grimes 		--p->p_lock;
76226f9a767SRodney W. Grimes 	}
76311edc1e0SJohn Baldwin #endif /* NO_SWAPPING */
76426f9a767SRodney W. Grimes }
76526f9a767SRodney W. Grimes 
766df8bae1dSRodney W. Grimes /*
76726f9a767SRodney W. Grimes  * This swapin algorithm attempts to swap-in processes only if there
76826f9a767SRodney W. Grimes  * is enough space for them.  Of course, if a process waits for a long
76926f9a767SRodney W. Grimes  * time, it will be swapped in anyway.
7700384fff8SJason Evans  *
771e602ba25SJulian Elischer  *  XXXKSE - process with the thread with highest priority counts..
772b40ce416SJulian Elischer  *
7730384fff8SJason Evans  * Giant is still held at this point, to be released in tsleep.
774df8bae1dSRodney W. Grimes  */
7752b14f991SJulian Elischer /* ARGSUSED*/
7762b14f991SJulian Elischer static void
777d841aaa7SBruce Evans scheduler(dummy)
778d841aaa7SBruce Evans 	void *dummy;
779df8bae1dSRodney W. Grimes {
78054d92145SMatthew Dillon 	struct proc *p;
781e602ba25SJulian Elischer 	struct thread *td;
78254d92145SMatthew Dillon 	int pri;
783df8bae1dSRodney W. Grimes 	struct proc *pp;
784df8bae1dSRodney W. Grimes 	int ppri;
785df8bae1dSRodney W. Grimes 
786c96d52a9SJohn Baldwin 	mtx_assert(&Giant, MA_OWNED | MA_NOTRECURSED);
7870cddd8f0SMatthew Dillon 	/* GIANT_REQUIRED */
7880384fff8SJason Evans 
789df8bae1dSRodney W. Grimes loop:
79090ecac61SMatthew Dillon 	if (vm_page_count_min()) {
7910d94caffSDavid Greenman 		VM_WAIT;
79290ecac61SMatthew Dillon 		goto loop;
7930d94caffSDavid Greenman 	}
79426f9a767SRodney W. Grimes 
795df8bae1dSRodney W. Grimes 	pp = NULL;
796df8bae1dSRodney W. Grimes 	ppri = INT_MIN;
7971005a129SJohn Baldwin 	sx_slock(&allproc_lock);
798b40ce416SJulian Elischer 	FOREACH_PROC_IN_SYSTEM(p) {
799b40ce416SJulian Elischer 		struct ksegrp *kg;
800664f718bSJohn Baldwin 		if (p->p_sflag & (PS_INMEM | PS_SWAPPINGOUT | PS_SWAPPINGIN)) {
801e602ba25SJulian Elischer 			continue;
802e602ba25SJulian Elischer 		}
8039ed346baSBosko Milekic 		mtx_lock_spin(&sched_lock);
804e602ba25SJulian Elischer 		FOREACH_THREAD_IN_PROC(p, td) {
8051d7b9ed2SJulian Elischer 			/*
80671fad9fdSJulian Elischer 			 * An otherwise runnable thread of a process
80771fad9fdSJulian Elischer 			 * swapped out has only the TDI_SWAPPED bit set.
80871fad9fdSJulian Elischer 			 *
8091d7b9ed2SJulian Elischer 			 */
81071fad9fdSJulian Elischer 			if (td->td_inhibitors == TDI_SWAPPED) {
811e602ba25SJulian Elischer 				kg = td->td_ksegrp;
812b40ce416SJulian Elischer 				pri = p->p_swtime + kg->kg_slptime;
8135074aecdSJohn Baldwin 				if ((p->p_sflag & PS_SWAPINREQ) == 0) {
814b40ce416SJulian Elischer 					pri -= kg->kg_nice * 8;
815a669a6e9SJohn Dyson 				}
81695461b45SJohn Dyson 
81726f9a767SRodney W. Grimes 				/*
818b40ce416SJulian Elischer 				 * if this ksegrp is higher priority
819b40ce416SJulian Elischer 				 * and there is enough space, then select
820b40ce416SJulian Elischer 				 * this process instead of the previous
821b40ce416SJulian Elischer 				 * selection.
82226f9a767SRodney W. Grimes 				 */
8230d94caffSDavid Greenman 				if (pri > ppri) {
824df8bae1dSRodney W. Grimes 					pp = p;
825df8bae1dSRodney W. Grimes 					ppri = pri;
826df8bae1dSRodney W. Grimes 				}
827df8bae1dSRodney W. Grimes 			}
828b40ce416SJulian Elischer 		}
8299ed346baSBosko Milekic 		mtx_unlock_spin(&sched_lock);
830df8bae1dSRodney W. Grimes 	}
8311005a129SJohn Baldwin 	sx_sunlock(&allproc_lock);
83226f9a767SRodney W. Grimes 
833df8bae1dSRodney W. Grimes 	/*
834a669a6e9SJohn Dyson 	 * Nothing to do, back to sleep.
835df8bae1dSRodney W. Grimes 	 */
836df8bae1dSRodney W. Grimes 	if ((p = pp) == NULL) {
837ea754954SJohn Baldwin 		tsleep(&proc0, PVM, "sched", maxslp * hz / 2);
838df8bae1dSRodney W. Grimes 		goto loop;
839df8bae1dSRodney W. Grimes 	}
8401d7b9ed2SJulian Elischer 	PROC_LOCK(p);
8411d7b9ed2SJulian Elischer 
8421d7b9ed2SJulian Elischer 	/*
8431d7b9ed2SJulian Elischer 	 * Another process may be bringing or may have already
8441d7b9ed2SJulian Elischer 	 * brought this process in while we traverse all threads.
8451d7b9ed2SJulian Elischer 	 * Or, this process may even be being swapped out again.
8461d7b9ed2SJulian Elischer 	 */
847664f718bSJohn Baldwin 	if (p->p_sflag & (PS_INMEM | PS_SWAPPINGOUT | PS_SWAPPINGIN)) {
8481d7b9ed2SJulian Elischer 		PROC_UNLOCK(p);
8491d7b9ed2SJulian Elischer 		goto loop;
8501d7b9ed2SJulian Elischer 	}
8511d7b9ed2SJulian Elischer 
852664f718bSJohn Baldwin 	mtx_lock_spin(&sched_lock);
8531d7b9ed2SJulian Elischer 	p->p_sflag &= ~PS_SWAPINREQ;
854664f718bSJohn Baldwin 	mtx_unlock_spin(&sched_lock);
855a669a6e9SJohn Dyson 
856df8bae1dSRodney W. Grimes 	/*
85726f9a767SRodney W. Grimes 	 * We would like to bring someone in. (only if there is space).
858e602ba25SJulian Elischer 	 * [What checks the space? ]
859df8bae1dSRodney W. Grimes 	 */
86026f9a767SRodney W. Grimes 	faultin(p);
86145ece682SJohn Baldwin 	PROC_UNLOCK(p);
862664f718bSJohn Baldwin 	mtx_lock_spin(&sched_lock);
863df8bae1dSRodney W. Grimes 	p->p_swtime = 0;
8649ed346baSBosko Milekic 	mtx_unlock_spin(&sched_lock);
865df8bae1dSRodney W. Grimes 	goto loop;
866df8bae1dSRodney W. Grimes }
867df8bae1dSRodney W. Grimes 
8685afce282SDavid Greenman #ifndef NO_SWAPPING
8695afce282SDavid Greenman 
870ceb0cf87SJohn Dyson /*
871ceb0cf87SJohn Dyson  * Swap_idle_threshold1 is the guaranteed swapped in time for a process
872ceb0cf87SJohn Dyson  */
873303b270bSEivind Eklund static int swap_idle_threshold1 = 2;
8742a3eeaa2STom Rhodes SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold1, CTLFLAG_RW,
8759faaf3b3STom Rhodes     &swap_idle_threshold1, 0, "Guaranteed swapped in time for a process");
876ceb0cf87SJohn Dyson 
877ceb0cf87SJohn Dyson /*
878ceb0cf87SJohn Dyson  * Swap_idle_threshold2 is the time that a process can be idle before
879ceb0cf87SJohn Dyson  * it will be swapped out, if idle swapping is enabled.
880ceb0cf87SJohn Dyson  */
881303b270bSEivind Eklund static int swap_idle_threshold2 = 10;
8822a3eeaa2STom Rhodes SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold2, CTLFLAG_RW,
8839faaf3b3STom Rhodes     &swap_idle_threshold2, 0, "Time before a process will be swapped out");
884ceb0cf87SJohn Dyson 
885df8bae1dSRodney W. Grimes /*
886df8bae1dSRodney W. Grimes  * Swapout is driven by the pageout daemon.  Very simple, we find eligible
887df8bae1dSRodney W. Grimes  * procs and unwire their u-areas.  We try to always "swap" at least one
888df8bae1dSRodney W. Grimes  * process in case we need the room for a swapin.
889df8bae1dSRodney W. Grimes  * If any procs have been sleeping/stopped for at least maxslp seconds,
890df8bae1dSRodney W. Grimes  * they are swapped.  Else, we swap the longest-sleeping or stopped process,
891df8bae1dSRodney W. Grimes  * if any, otherwise the longest-resident process.
892df8bae1dSRodney W. Grimes  */
893df8bae1dSRodney W. Grimes void
8943a2dc656SJohn Dyson swapout_procs(action)
8953a2dc656SJohn Dyson int action;
896df8bae1dSRodney W. Grimes {
89754d92145SMatthew Dillon 	struct proc *p;
898e602ba25SJulian Elischer 	struct thread *td;
899b40ce416SJulian Elischer 	struct ksegrp *kg;
900df8bae1dSRodney W. Grimes 	int didswap = 0;
901df8bae1dSRodney W. Grimes 
9020cddd8f0SMatthew Dillon 	GIANT_REQUIRED;
9030cddd8f0SMatthew Dillon 
9040d94caffSDavid Greenman retry:
9053a2189d4SJohn Baldwin 	sx_slock(&allproc_lock);
906e602ba25SJulian Elischer 	FOREACH_PROC_IN_SYSTEM(p) {
907b18bfc3dSJohn Dyson 		struct vmspace *vm;
908b40ce416SJulian Elischer 		int minslptime = 100000;
909b18bfc3dSJohn Dyson 
9109eb881f8SSeigo Tanimura 		/*
911b1f99ebeSSeigo Tanimura 		 * Watch out for a process in
912b1f99ebeSSeigo Tanimura 		 * creation.  It may have no
9131c865ac7SJohn Baldwin 		 * address space or lock yet.
9141c865ac7SJohn Baldwin 		 */
9151c865ac7SJohn Baldwin 		mtx_lock_spin(&sched_lock);
9161c865ac7SJohn Baldwin 		if (p->p_state == PRS_NEW) {
9171c865ac7SJohn Baldwin 			mtx_unlock_spin(&sched_lock);
9181c865ac7SJohn Baldwin 			continue;
9191c865ac7SJohn Baldwin 		}
9201c865ac7SJohn Baldwin 		mtx_unlock_spin(&sched_lock);
9211c865ac7SJohn Baldwin 
9221c865ac7SJohn Baldwin 		/*
923b1f99ebeSSeigo Tanimura 		 * An aio daemon switches its
924b1f99ebeSSeigo Tanimura 		 * address space while running.
925b1f99ebeSSeigo Tanimura 		 * Perform a quick check whether
926b1f99ebeSSeigo Tanimura 		 * a process has P_SYSTEM.
9279eb881f8SSeigo Tanimura 		 */
9288f887403SJohn Baldwin 		if ((p->p_flag & P_SYSTEM) != 0)
929b1f99ebeSSeigo Tanimura 			continue;
9301c865ac7SJohn Baldwin 
9311c865ac7SJohn Baldwin 		/*
9321c865ac7SJohn Baldwin 		 * Do not swapout a process that
9331c865ac7SJohn Baldwin 		 * is waiting for VM data
9341c865ac7SJohn Baldwin 		 * structures as there is a possible
9351c865ac7SJohn Baldwin 		 * deadlock.  Test this first as
9361c865ac7SJohn Baldwin 		 * this may block.
9371c865ac7SJohn Baldwin 		 *
9381c865ac7SJohn Baldwin 		 * Lock the map until swapout
9391c865ac7SJohn Baldwin 		 * finishes, or a thread of this
9401c865ac7SJohn Baldwin 		 * process may attempt to alter
9411c865ac7SJohn Baldwin 		 * the map.
9421c865ac7SJohn Baldwin 		 */
9438f887403SJohn Baldwin 		PROC_LOCK(p);
9449eb881f8SSeigo Tanimura 		vm = p->p_vmspace;
945b1f99ebeSSeigo Tanimura 		KASSERT(vm != NULL,
946b1f99ebeSSeigo Tanimura 			("swapout_procs: a process has no address space"));
9479eb881f8SSeigo Tanimura 		++vm->vm_refcnt;
948b1f99ebeSSeigo Tanimura 		PROC_UNLOCK(p);
9499eb881f8SSeigo Tanimura 		if (!vm_map_trylock(&vm->vm_map))
9509eb881f8SSeigo Tanimura 			goto nextproc1;
9519eb881f8SSeigo Tanimura 
9525074aecdSJohn Baldwin 		PROC_LOCK(p);
95369b40456SJohn Baldwin 		if (p->p_lock != 0 ||
9541279572aSDavid Xu 		    (p->p_flag & (P_STOPPED_SINGLE|P_TRACED|P_SYSTEM|P_WEXIT)
9551279572aSDavid Xu 		    ) != 0) {
9569eb881f8SSeigo Tanimura 			goto nextproc2;
9575074aecdSJohn Baldwin 		}
95823955314SAlfred Perlstein 		/*
95923955314SAlfred Perlstein 		 * only aiod changes vmspace, however it will be
96023955314SAlfred Perlstein 		 * skipped because of the if statement above checking
96123955314SAlfred Perlstein 		 * for P_SYSTEM
96223955314SAlfred Perlstein 		 */
963664f718bSJohn Baldwin 		if ((p->p_sflag & (PS_INMEM|PS_SWAPPINGOUT|PS_SWAPPINGIN)) != PS_INMEM)
964664f718bSJohn Baldwin 			goto nextproc2;
96569b40456SJohn Baldwin 
966e602ba25SJulian Elischer 		switch (p->p_state) {
9670d94caffSDavid Greenman 		default:
968e602ba25SJulian Elischer 			/* Don't swap out processes in any sort
969e602ba25SJulian Elischer 			 * of 'special' state. */
9708f887403SJohn Baldwin 			break;
971df8bae1dSRodney W. Grimes 
972e602ba25SJulian Elischer 		case PRS_NORMAL:
9738f887403SJohn Baldwin 			mtx_lock_spin(&sched_lock);
97426f9a767SRodney W. Grimes 			/*
975bfbfac11SDavid Greenman 			 * do not swapout a realtime process
976b40ce416SJulian Elischer 			 * Check all the thread groups..
977bfbfac11SDavid Greenman 			 */
978b40ce416SJulian Elischer 			FOREACH_KSEGRP_IN_PROC(p, kg) {
9799eb881f8SSeigo Tanimura 				if (PRI_IS_REALTIME(kg->kg_pri_class))
980b40ce416SJulian Elischer 					goto nextproc;
981bfbfac11SDavid Greenman 
982bfbfac11SDavid Greenman 				/*
9839eb881f8SSeigo Tanimura 				 * Guarantee swap_idle_threshold1
984ceb0cf87SJohn Dyson 				 * time in memory.
9850d94caffSDavid Greenman 				 */
9869eb881f8SSeigo Tanimura 				if (kg->kg_slptime < swap_idle_threshold1)
987b40ce416SJulian Elischer 					goto nextproc;
9889eb881f8SSeigo Tanimura 
9891d7b9ed2SJulian Elischer 				/*
9909eb881f8SSeigo Tanimura 				 * Do not swapout a process if it is
9919eb881f8SSeigo Tanimura 				 * waiting on a critical event of some
9929eb881f8SSeigo Tanimura 				 * kind or there is a thread whose
9939eb881f8SSeigo Tanimura 				 * pageable memory may be accessed.
9941d7b9ed2SJulian Elischer 				 *
9951d7b9ed2SJulian Elischer 				 * This could be refined to support
9961d7b9ed2SJulian Elischer 				 * swapping out a thread.
9971d7b9ed2SJulian Elischer 				 */
9989eb881f8SSeigo Tanimura 				FOREACH_THREAD_IN_GROUP(kg, td) {
9991d7b9ed2SJulian Elischer 					if ((td->td_priority) < PSOCK ||
10009eb881f8SSeigo Tanimura 					    !thread_safetoswapout(td))
1001e602ba25SJulian Elischer 						goto nextproc;
1002e602ba25SJulian Elischer 				}
1003ceb0cf87SJohn Dyson 				/*
1004b40ce416SJulian Elischer 				 * If the system is under memory stress,
1005b40ce416SJulian Elischer 				 * or if we are swapping
1006b40ce416SJulian Elischer 				 * idle processes >= swap_idle_threshold2,
1007b40ce416SJulian Elischer 				 * then swap the process out.
1008ceb0cf87SJohn Dyson 				 */
1009ceb0cf87SJohn Dyson 				if (((action & VM_SWAP_NORMAL) == 0) &&
1010ceb0cf87SJohn Dyson 				    (((action & VM_SWAP_IDLE) == 0) ||
10119eb881f8SSeigo Tanimura 				    (kg->kg_slptime < swap_idle_threshold2)))
1012b40ce416SJulian Elischer 					goto nextproc;
10139eb881f8SSeigo Tanimura 
1014b40ce416SJulian Elischer 				if (minslptime > kg->kg_slptime)
1015b40ce416SJulian Elischer 					minslptime = kg->kg_slptime;
1016b40ce416SJulian Elischer 			}
10170d94caffSDavid Greenman 
101811b224dcSDavid Greenman 			/*
10190d94caffSDavid Greenman 			 * If the process has been asleep for awhile and had
10200d94caffSDavid Greenman 			 * most of its pages taken away already, swap it out.
102111b224dcSDavid Greenman 			 */
1022ceb0cf87SJohn Dyson 			if ((action & VM_SWAP_NORMAL) ||
1023ceb0cf87SJohn Dyson 				((action & VM_SWAP_IDLE) &&
1024b40ce416SJulian Elischer 				 (minslptime > swap_idle_threshold2))) {
1025df8bae1dSRodney W. Grimes 				swapout(p);
1026df8bae1dSRodney W. Grimes 				didswap++;
10279eb881f8SSeigo Tanimura 				mtx_unlock_spin(&sched_lock);
1028664f718bSJohn Baldwin 				PROC_UNLOCK(p);
10299eb881f8SSeigo Tanimura 				vm_map_unlock(&vm->vm_map);
10309eb881f8SSeigo Tanimura 				vmspace_free(vm);
10319eb881f8SSeigo Tanimura 				sx_sunlock(&allproc_lock);
10320d94caffSDavid Greenman 				goto retry;
1033c96d52a9SJohn Baldwin 			}
1034b40ce416SJulian Elischer nextproc:
10359eb881f8SSeigo Tanimura 			mtx_unlock_spin(&sched_lock);
10368f887403SJohn Baldwin 		}
10379eb881f8SSeigo Tanimura nextproc2:
10389eb881f8SSeigo Tanimura 		PROC_UNLOCK(p);
10399eb881f8SSeigo Tanimura 		vm_map_unlock(&vm->vm_map);
10409eb881f8SSeigo Tanimura nextproc1:
10419eb881f8SSeigo Tanimura 		vmspace_free(vm);
104230171114SPeter Wemm 		continue;
1043ceb0cf87SJohn Dyson 	}
10441005a129SJohn Baldwin 	sx_sunlock(&allproc_lock);
104526f9a767SRodney W. Grimes 	/*
104626f9a767SRodney W. Grimes 	 * If we swapped something out, and another process needed memory,
104726f9a767SRodney W. Grimes 	 * then wakeup the sched process.
104826f9a767SRodney W. Grimes 	 */
10490d94caffSDavid Greenman 	if (didswap)
105024a1cce3SDavid Greenman 		wakeup(&proc0);
1051df8bae1dSRodney W. Grimes }
1052df8bae1dSRodney W. Grimes 
1053f708ef1bSPoul-Henning Kamp static void
1054df8bae1dSRodney W. Grimes swapout(p)
105554d92145SMatthew Dillon 	struct proc *p;
1056df8bae1dSRodney W. Grimes {
1057b40ce416SJulian Elischer 	struct thread *td;
1058df8bae1dSRodney W. Grimes 
1059ea754954SJohn Baldwin 	PROC_LOCK_ASSERT(p, MA_OWNED);
10609eb881f8SSeigo Tanimura 	mtx_assert(&sched_lock, MA_OWNED | MA_NOTRECURSED);
1061d3a34985SJohn Dyson #if defined(SWAP_DEBUG)
1062d3a34985SJohn Dyson 	printf("swapping out %d\n", p->p_pid);
1063d3a34985SJohn Dyson #endif
10641d7b9ed2SJulian Elischer 
10651d7b9ed2SJulian Elischer 	/*
10669eb881f8SSeigo Tanimura 	 * The states of this process and its threads may have changed
10679eb881f8SSeigo Tanimura 	 * by now.  Assuming that there is only one pageout daemon thread,
10689eb881f8SSeigo Tanimura 	 * this process should still be in memory.
10699eb881f8SSeigo Tanimura 	 */
1070664f718bSJohn Baldwin 	KASSERT((p->p_sflag & (PS_INMEM|PS_SWAPPINGOUT|PS_SWAPPINGIN)) == PS_INMEM,
10719eb881f8SSeigo Tanimura 		("swapout: lost a swapout race?"));
10729eb881f8SSeigo Tanimura 
10739eb881f8SSeigo Tanimura #if defined(INVARIANTS)
10749eb881f8SSeigo Tanimura 	/*
10751d7b9ed2SJulian Elischer 	 * Make sure that all threads are safe to be swapped out.
10761d7b9ed2SJulian Elischer 	 *
10771d7b9ed2SJulian Elischer 	 * Alternatively, we could swap out only safe threads.
10781d7b9ed2SJulian Elischer 	 */
10791d7b9ed2SJulian Elischer 	FOREACH_THREAD_IN_PROC(p, td) {
10809eb881f8SSeigo Tanimura 		KASSERT(thread_safetoswapout(td),
10819eb881f8SSeigo Tanimura 			("swapout: there is a thread not safe for swapout"));
10821d7b9ed2SJulian Elischer 	}
10839eb881f8SSeigo Tanimura #endif /* INVARIANTS */
10841d7b9ed2SJulian Elischer 
108526f9a767SRodney W. Grimes 	++p->p_stats->p_ru.ru_nswap;
1086df8bae1dSRodney W. Grimes 	/*
108726f9a767SRodney W. Grimes 	 * remember the process resident count
1088df8bae1dSRodney W. Grimes 	 */
1089b1028ad1SLuoqi Chen 	p->p_vmspace->vm_swrss = vmspace_resident_count(p->p_vmspace);
1090df8bae1dSRodney W. Grimes 
10919eb881f8SSeigo Tanimura 	p->p_sflag &= ~PS_INMEM;
1092664f718bSJohn Baldwin 	p->p_sflag |= PS_SWAPPINGOUT;
1093664f718bSJohn Baldwin 	PROC_UNLOCK(p);
1094664f718bSJohn Baldwin 	FOREACH_THREAD_IN_PROC(p, td)
1095664f718bSJohn Baldwin 		TD_SET_SWAPPED(td);
10969ed346baSBosko Milekic 	mtx_unlock_spin(&sched_lock);
109726f9a767SRodney W. Grimes 
1098a136efe9SPeter Wemm 	vm_proc_swapout(p);
1099664f718bSJohn Baldwin 	FOREACH_THREAD_IN_PROC(p, td)
110049a2507bSAlan Cox 		vm_thread_swapout(td);
1101664f718bSJohn Baldwin 
1102664f718bSJohn Baldwin 	PROC_LOCK(p);
11039ed346baSBosko Milekic 	mtx_lock_spin(&sched_lock);
1104664f718bSJohn Baldwin 	p->p_sflag &= ~PS_SWAPPINGOUT;
1105df8bae1dSRodney W. Grimes 	p->p_swtime = 0;
1106df8bae1dSRodney W. Grimes }
11075afce282SDavid Greenman #endif /* !NO_SWAPPING */
1108