xref: /freebsd/sys/vm/vm_glue.c (revision c4f6a2a9e1b1879b618c436ab4f56ff75c73a0f5)
1 /*
2  * Copyright (c) 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *	This product includes software developed by the University of
19  *	California, Berkeley and its contributors.
20  * 4. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *	from: @(#)vm_glue.c	8.6 (Berkeley) 1/5/94
37  *
38  *
39  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40  * All rights reserved.
41  *
42  * Permission to use, copy, modify and distribute this software and
43  * its documentation is hereby granted, provided that both the copyright
44  * notice and this permission notice appear in all copies of the
45  * software, derivative works or modified versions, and any portions
46  * thereof, and that both notices appear in supporting documentation.
47  *
48  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
49  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
50  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
51  *
52  * Carnegie Mellon requests users of this software to return to
53  *
54  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
55  *  School of Computer Science
56  *  Carnegie Mellon University
57  *  Pittsburgh PA 15213-3890
58  *
59  * any improvements or extensions that they make and grant Carnegie the
60  * rights to redistribute these changes.
61  *
62  * $FreeBSD$
63  */
64 
65 #include "opt_vm.h"
66 
67 #include <sys/param.h>
68 #include <sys/systm.h>
69 #include <sys/lock.h>
70 #include <sys/mutex.h>
71 #include <sys/proc.h>
72 #include <sys/resourcevar.h>
73 #include <sys/shm.h>
74 #include <sys/vmmeter.h>
75 #include <sys/sx.h>
76 #include <sys/sysctl.h>
77 
78 #include <sys/kernel.h>
79 #include <sys/ktr.h>
80 #include <sys/unistd.h>
81 
82 #include <machine/limits.h>
83 
84 #include <vm/vm.h>
85 #include <vm/vm_param.h>
86 #include <vm/pmap.h>
87 #include <vm/vm_map.h>
88 #include <vm/vm_page.h>
89 #include <vm/vm_pageout.h>
90 #include <vm/vm_object.h>
91 #include <vm/vm_kern.h>
92 #include <vm/vm_extern.h>
93 #include <vm/vm_pager.h>
94 
95 #include <sys/user.h>
96 
97 extern int maxslp;
98 
99 /*
100  * System initialization
101  *
102  * Note: proc0 from proc.h
103  */
104 static void vm_init_limits(void *);
105 SYSINIT(vm_limits, SI_SUB_VM_CONF, SI_ORDER_FIRST, vm_init_limits, &proc0)
106 
107 /*
108  * THIS MUST BE THE LAST INITIALIZATION ITEM!!!
109  *
110  * Note: run scheduling should be divorced from the vm system.
111  */
112 static void scheduler(void *);
113 SYSINIT(scheduler, SI_SUB_RUN_SCHEDULER, SI_ORDER_FIRST, scheduler, NULL)
114 
115 #ifndef NO_SWAPPING
116 static void swapout(struct proc *);
117 static void vm_proc_swapin(struct proc *p);
118 static void vm_proc_swapout(struct proc *p);
119 #endif
120 
121 /*
122  * MPSAFE
123  */
124 int
125 kernacc(addr, len, rw)
126 	caddr_t addr;
127 	int len, rw;
128 {
129 	boolean_t rv;
130 	vm_offset_t saddr, eaddr;
131 	vm_prot_t prot;
132 
133 	KASSERT((rw & ~VM_PROT_ALL) == 0,
134 	    ("illegal ``rw'' argument to kernacc (%x)\n", rw));
135 	prot = rw;
136 	saddr = trunc_page((vm_offset_t)addr);
137 	eaddr = round_page((vm_offset_t)addr + len);
138 	rv = vm_map_check_protection(kernel_map, saddr, eaddr, prot);
139 	return (rv == TRUE);
140 }
141 
142 /*
143  * MPSAFE
144  */
145 int
146 useracc(addr, len, rw)
147 	caddr_t addr;
148 	int len, rw;
149 {
150 	boolean_t rv;
151 	vm_prot_t prot;
152 
153 	KASSERT((rw & ~VM_PROT_ALL) == 0,
154 	    ("illegal ``rw'' argument to useracc (%x)\n", rw));
155 	prot = rw;
156 	/*
157 	 * XXX - check separately to disallow access to user area and user
158 	 * page tables - they are in the map.
159 	 *
160 	 * XXX - VM_MAXUSER_ADDRESS is an end address, not a max.  It was once
161 	 * only used (as an end address) in trap.c.  Use it as an end address
162 	 * here too.  This bogusness has spread.  I just fixed where it was
163 	 * used as a max in vm_mmap.c.
164 	 */
165 	if ((vm_offset_t) addr + len > /* XXX */ VM_MAXUSER_ADDRESS
166 	    || (vm_offset_t) addr + len < (vm_offset_t) addr) {
167 		return (FALSE);
168 	}
169 	rv = vm_map_check_protection(&curproc->p_vmspace->vm_map,
170 	    trunc_page((vm_offset_t)addr), round_page((vm_offset_t)addr + len),
171 	    prot);
172 	return (rv == TRUE);
173 }
174 
175 /*
176  * MPSAFE
177  */
178 void
179 vslock(addr, len)
180 	caddr_t addr;
181 	u_int len;
182 {
183 
184 	vm_map_wire(&curproc->p_vmspace->vm_map, trunc_page((vm_offset_t)addr),
185 	    round_page((vm_offset_t)addr + len), FALSE);
186 }
187 
188 /*
189  * MPSAFE
190  */
191 void
192 vsunlock(addr, len)
193 	caddr_t addr;
194 	u_int len;
195 {
196 
197 	vm_map_unwire(&curproc->p_vmspace->vm_map,
198 	    trunc_page((vm_offset_t)addr),
199 	    round_page((vm_offset_t)addr + len), FALSE);
200 }
201 
202 /*
203  * Create the U area for a new process.
204  * This routine directly affects the fork perf for a process.
205  */
206 void
207 vm_proc_new(struct proc *p)
208 {
209 	vm_page_t ma[UAREA_PAGES];
210 	vm_object_t upobj;
211 	vm_offset_t up;
212 	vm_page_t m;
213 	u_int i;
214 
215 	/*
216 	 * Allocate object for the upage.
217 	 */
218 	upobj = vm_object_allocate(OBJT_DEFAULT, UAREA_PAGES);
219 	p->p_upages_obj = upobj;
220 
221 	/*
222 	 * Get a kernel virtual address for the U area for this process.
223 	 */
224 	up = kmem_alloc_nofault(kernel_map, UAREA_PAGES * PAGE_SIZE);
225 	if (up == 0)
226 		panic("vm_proc_new: upage allocation failed");
227 	p->p_uarea = (struct user *)up;
228 
229 	for (i = 0; i < UAREA_PAGES; i++) {
230 		/*
231 		 * Get a uarea page.
232 		 */
233 		m = vm_page_grab(upobj, i,
234 		    VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED);
235 		ma[i] = m;
236 
237 		vm_page_wakeup(m);
238 		vm_page_flag_clear(m, PG_ZERO);
239 		m->valid = VM_PAGE_BITS_ALL;
240 	}
241 
242 	/*
243 	 * Enter the pages into the kernel address space.
244 	 */
245 	pmap_qenter(up, ma, UAREA_PAGES);
246 }
247 
248 /*
249  * Dispose the U area for a process that has exited.
250  * This routine directly impacts the exit perf of a process.
251  * XXX proc_zone is marked UMA_ZONE_NOFREE, so this should never be called.
252  */
253 void
254 vm_proc_dispose(struct proc *p)
255 {
256 	vm_object_t upobj;
257 	vm_offset_t up;
258 	vm_page_t m;
259 
260 	upobj = p->p_upages_obj;
261 	if (upobj->resident_page_count != UAREA_PAGES)
262 		panic("vm_proc_dispose: incorrect number of pages in upobj");
263 	vm_page_lock_queues();
264 	while ((m = TAILQ_FIRST(&upobj->memq)) != NULL) {
265 		vm_page_busy(m);
266 		vm_page_unwire(m, 0);
267 		vm_page_free(m);
268 	}
269 	vm_page_unlock_queues();
270 	up = (vm_offset_t)p->p_uarea;
271 	pmap_qremove(up, UAREA_PAGES);
272 	kmem_free(kernel_map, up, UAREA_PAGES * PAGE_SIZE);
273 	vm_object_deallocate(upobj);
274 }
275 
276 #ifndef NO_SWAPPING
277 /*
278  * Allow the U area for a process to be prejudicially paged out.
279  */
280 void
281 vm_proc_swapout(struct proc *p)
282 {
283 	vm_object_t upobj;
284 	vm_offset_t up;
285 	vm_page_t m;
286 
287 	upobj = p->p_upages_obj;
288 	if (upobj->resident_page_count != UAREA_PAGES)
289 		panic("vm_proc_dispose: incorrect number of pages in upobj");
290 	vm_page_lock_queues();
291 	TAILQ_FOREACH(m, &upobj->memq, listq) {
292 		vm_page_dirty(m);
293 		vm_page_unwire(m, 0);
294 	}
295 	vm_page_unlock_queues();
296 	up = (vm_offset_t)p->p_uarea;
297 	pmap_qremove(up, UAREA_PAGES);
298 }
299 
300 /*
301  * Bring the U area for a specified process back in.
302  */
303 void
304 vm_proc_swapin(struct proc *p)
305 {
306 	vm_page_t ma[UAREA_PAGES];
307 	vm_object_t upobj;
308 	vm_offset_t up;
309 	vm_page_t m;
310 	int rv;
311 	int i;
312 
313 	upobj = p->p_upages_obj;
314 	for (i = 0; i < UAREA_PAGES; i++) {
315 		m = vm_page_grab(upobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
316 		if (m->valid != VM_PAGE_BITS_ALL) {
317 			rv = vm_pager_get_pages(upobj, &m, 1, 0);
318 			if (rv != VM_PAGER_OK)
319 				panic("vm_proc_swapin: cannot get upage");
320 		}
321 		ma[i] = m;
322 	}
323 	if (upobj->resident_page_count != UAREA_PAGES)
324 		panic("vm_proc_swapin: lost pages from upobj");
325 	vm_page_lock_queues();
326 	TAILQ_FOREACH(m, &upobj->memq, listq) {
327 		m->valid = VM_PAGE_BITS_ALL;
328 		vm_page_wire(m);
329 		vm_page_wakeup(m);
330 	}
331 	vm_page_unlock_queues();
332 	up = (vm_offset_t)p->p_uarea;
333 	pmap_qenter(up, ma, UAREA_PAGES);
334 }
335 #endif
336 
337 /*
338  * Implement fork's actions on an address space.
339  * Here we arrange for the address space to be copied or referenced,
340  * allocate a user struct (pcb and kernel stack), then call the
341  * machine-dependent layer to fill those in and make the new process
342  * ready to run.  The new process is set up so that it returns directly
343  * to user mode to avoid stack copying and relocation problems.
344  */
345 void
346 vm_forkproc(td, p2, td2, flags)
347 	struct thread *td;
348 	struct proc *p2;
349 	struct thread *td2;
350 	int flags;
351 {
352 	struct proc *p1 = td->td_proc;
353 	struct user *up;
354 
355 	GIANT_REQUIRED;
356 
357 	if ((flags & RFPROC) == 0) {
358 		/*
359 		 * Divorce the memory, if it is shared, essentially
360 		 * this changes shared memory amongst threads, into
361 		 * COW locally.
362 		 */
363 		if ((flags & RFMEM) == 0) {
364 			if (p1->p_vmspace->vm_refcnt > 1) {
365 				vmspace_unshare(p1);
366 			}
367 		}
368 		cpu_fork(td, p2, td2, flags);
369 		return;
370 	}
371 
372 	if (flags & RFMEM) {
373 		p2->p_vmspace = p1->p_vmspace;
374 		p1->p_vmspace->vm_refcnt++;
375 	}
376 
377 	while (vm_page_count_severe()) {
378 		VM_WAIT;
379 	}
380 
381 	if ((flags & RFMEM) == 0) {
382 		p2->p_vmspace = vmspace_fork(p1->p_vmspace);
383 
384 		pmap_pinit2(vmspace_pmap(p2->p_vmspace));
385 
386 		if (p1->p_vmspace->vm_shm)
387 			shmfork(p1, p2);
388 	}
389 
390 	/* XXXKSE this is unsatisfactory but should be adequate */
391 	up = p2->p_uarea;
392 
393 	/*
394 	 * p_stats currently points at fields in the user struct
395 	 * but not at &u, instead at p_addr. Copy parts of
396 	 * p_stats; zero the rest of p_stats (statistics).
397 	 *
398 	 * If procsig->ps_refcnt is 1 and p2->p_sigacts is NULL we dont' need
399 	 * to share sigacts, so we use the up->u_sigacts.
400 	 */
401 	p2->p_stats = &up->u_stats;
402 	if (p2->p_sigacts == NULL) {
403 		if (p2->p_procsig->ps_refcnt != 1)
404 			printf ("PID:%d NULL sigacts with refcnt not 1!\n",p2->p_pid);
405 		p2->p_sigacts = &up->u_sigacts;
406 		up->u_sigacts = *p1->p_sigacts;
407 	}
408 
409 	bzero(&up->u_stats.pstat_startzero,
410 	    (unsigned) ((caddr_t) &up->u_stats.pstat_endzero -
411 		(caddr_t) &up->u_stats.pstat_startzero));
412 	bcopy(&p1->p_stats->pstat_startcopy, &up->u_stats.pstat_startcopy,
413 	    ((caddr_t) &up->u_stats.pstat_endcopy -
414 		(caddr_t) &up->u_stats.pstat_startcopy));
415 
416 
417 	/*
418 	 * cpu_fork will copy and update the pcb, set up the kernel stack,
419 	 * and make the child ready to run.
420 	 */
421 	cpu_fork(td, p2, td2, flags);
422 }
423 
424 /*
425  * Called after process has been wait(2)'ed apon and is being reaped.
426  * The idea is to reclaim resources that we could not reclaim while
427  * the process was still executing.
428  */
429 void
430 vm_waitproc(p)
431 	struct proc *p;
432 {
433 	struct thread *td;
434 
435 	GIANT_REQUIRED;
436 	cpu_wait(p);
437 /* XXXKSE by here there should not be any threads left! */
438 	FOREACH_THREAD_IN_PROC(p, td) {
439 		panic("vm_waitproc: Survivor thread!");
440 	}
441 	vmspace_exitfree(p);		/* and clean-out the vmspace */
442 }
443 
444 /*
445  * Set default limits for VM system.
446  * Called for proc 0, and then inherited by all others.
447  *
448  * XXX should probably act directly on proc0.
449  */
450 static void
451 vm_init_limits(udata)
452 	void *udata;
453 {
454 	struct proc *p = udata;
455 	int rss_limit;
456 
457 	/*
458 	 * Set up the initial limits on process VM. Set the maximum resident
459 	 * set size to be half of (reasonably) available memory.  Since this
460 	 * is a soft limit, it comes into effect only when the system is out
461 	 * of memory - half of main memory helps to favor smaller processes,
462 	 * and reduces thrashing of the object cache.
463 	 */
464 	p->p_rlimit[RLIMIT_STACK].rlim_cur = dflssiz;
465 	p->p_rlimit[RLIMIT_STACK].rlim_max = maxssiz;
466 	p->p_rlimit[RLIMIT_DATA].rlim_cur = dfldsiz;
467 	p->p_rlimit[RLIMIT_DATA].rlim_max = maxdsiz;
468 	/* limit the limit to no less than 2MB */
469 	rss_limit = max(cnt.v_free_count, 512);
470 	p->p_rlimit[RLIMIT_RSS].rlim_cur = ptoa(rss_limit);
471 	p->p_rlimit[RLIMIT_RSS].rlim_max = RLIM_INFINITY;
472 }
473 
474 void
475 faultin(p)
476 	struct proc *p;
477 {
478 
479 	GIANT_REQUIRED;
480 	PROC_LOCK_ASSERT(p, MA_OWNED);
481 	mtx_assert(&sched_lock, MA_OWNED);
482 #ifdef NO_SWAPPING
483 	if ((p->p_sflag & PS_INMEM) == 0)
484 		panic("faultin: proc swapped out with NO_SWAPPING!");
485 #else
486 	if ((p->p_sflag & PS_INMEM) == 0) {
487 		struct thread *td;
488 
489 		++p->p_lock;
490 		/*
491 		 * If another process is swapping in this process,
492 		 * just wait until it finishes.
493 		 */
494 		if (p->p_sflag & PS_SWAPPINGIN) {
495 			mtx_unlock_spin(&sched_lock);
496 			msleep(&p->p_sflag, &p->p_mtx, PVM, "faultin", 0);
497 			mtx_lock_spin(&sched_lock);
498 			--p->p_lock;
499 			return;
500 		}
501 
502 		p->p_sflag |= PS_SWAPPINGIN;
503 		mtx_unlock_spin(&sched_lock);
504 		PROC_UNLOCK(p);
505 
506 		vm_proc_swapin(p);
507 		FOREACH_THREAD_IN_PROC (p, td)
508 			pmap_swapin_thread(td);
509 
510 		PROC_LOCK(p);
511 		mtx_lock_spin(&sched_lock);
512 		p->p_sflag &= ~PS_SWAPPINGIN;
513 		p->p_sflag |= PS_INMEM;
514 		FOREACH_THREAD_IN_PROC (p, td)
515 			if (td->td_state == TDS_SWAPPED)
516 				setrunqueue(td);
517 
518 		wakeup(&p->p_sflag);
519 
520 		/* undo the effect of setting SLOCK above */
521 		--p->p_lock;
522 	}
523 #endif
524 }
525 
526 /*
527  * This swapin algorithm attempts to swap-in processes only if there
528  * is enough space for them.  Of course, if a process waits for a long
529  * time, it will be swapped in anyway.
530  *
531  *  XXXKSE - process with the thread with highest priority counts..
532  *
533  * Giant is still held at this point, to be released in tsleep.
534  */
535 /* ARGSUSED*/
536 static void
537 scheduler(dummy)
538 	void *dummy;
539 {
540 	struct proc *p;
541 	struct thread *td;
542 	int pri;
543 	struct proc *pp;
544 	int ppri;
545 
546 	mtx_assert(&Giant, MA_OWNED | MA_NOTRECURSED);
547 	/* GIANT_REQUIRED */
548 
549 loop:
550 	if (vm_page_count_min()) {
551 		VM_WAIT;
552 		goto loop;
553 	}
554 
555 	pp = NULL;
556 	ppri = INT_MIN;
557 	sx_slock(&allproc_lock);
558 	FOREACH_PROC_IN_SYSTEM(p) {
559 		struct ksegrp *kg;
560 		if (p->p_sflag & (PS_INMEM | PS_SWAPPING | PS_SWAPPINGIN)) {
561 			continue;
562 		}
563 		mtx_lock_spin(&sched_lock);
564 		FOREACH_THREAD_IN_PROC(p, td) {
565 			/*
566 			 * A runnable thread of a process swapped out is in
567 			 * TDS_SWAPPED.
568 			 */
569 			if (td->td_state == TDS_SWAPPED) {
570 				kg = td->td_ksegrp;
571 				pri = p->p_swtime + kg->kg_slptime;
572 				if ((p->p_sflag & PS_SWAPINREQ) == 0) {
573 					pri -= kg->kg_nice * 8;
574 				}
575 
576 				/*
577 				 * if this ksegrp is higher priority
578 				 * and there is enough space, then select
579 				 * this process instead of the previous
580 				 * selection.
581 				 */
582 				if (pri > ppri) {
583 					pp = p;
584 					ppri = pri;
585 				}
586 			}
587 		}
588 		mtx_unlock_spin(&sched_lock);
589 	}
590 	sx_sunlock(&allproc_lock);
591 
592 	/*
593 	 * Nothing to do, back to sleep.
594 	 */
595 	if ((p = pp) == NULL) {
596 		tsleep(&proc0, PVM, "sched", maxslp * hz / 2);
597 		goto loop;
598 	}
599 	PROC_LOCK(p);
600 	mtx_lock_spin(&sched_lock);
601 
602 	/*
603 	 * Another process may be bringing or may have already
604 	 * brought this process in while we traverse all threads.
605 	 * Or, this process may even be being swapped out again.
606 	 */
607 	if (p->p_sflag & (PS_INMEM|PS_SWAPPING|PS_SWAPPINGIN)) {
608 		mtx_unlock_spin(&sched_lock);
609 		PROC_UNLOCK(p);
610 		goto loop;
611 	}
612 
613 	p->p_sflag &= ~PS_SWAPINREQ;
614 
615 	/*
616 	 * We would like to bring someone in. (only if there is space).
617 	 * [What checks the space? ]
618 	 */
619 	faultin(p);
620 	PROC_UNLOCK(p);
621 	p->p_swtime = 0;
622 	mtx_unlock_spin(&sched_lock);
623 	goto loop;
624 }
625 
626 #ifndef NO_SWAPPING
627 
628 /*
629  * Swap_idle_threshold1 is the guaranteed swapped in time for a process
630  */
631 static int swap_idle_threshold1 = 2;
632 SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold1,
633 	CTLFLAG_RW, &swap_idle_threshold1, 0, "");
634 
635 /*
636  * Swap_idle_threshold2 is the time that a process can be idle before
637  * it will be swapped out, if idle swapping is enabled.
638  */
639 static int swap_idle_threshold2 = 10;
640 SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold2,
641 	CTLFLAG_RW, &swap_idle_threshold2, 0, "");
642 
643 /*
644  * Swapout is driven by the pageout daemon.  Very simple, we find eligible
645  * procs and unwire their u-areas.  We try to always "swap" at least one
646  * process in case we need the room for a swapin.
647  * If any procs have been sleeping/stopped for at least maxslp seconds,
648  * they are swapped.  Else, we swap the longest-sleeping or stopped process,
649  * if any, otherwise the longest-resident process.
650  */
651 void
652 swapout_procs(action)
653 int action;
654 {
655 	struct proc *p;
656 	struct thread *td;
657 	struct ksegrp *kg;
658 	struct proc *outp, *outp2;
659 	int outpri, outpri2;
660 	int didswap = 0;
661 
662 	GIANT_REQUIRED;
663 
664 	outp = outp2 = NULL;
665 	outpri = outpri2 = INT_MIN;
666 retry:
667 	sx_slock(&allproc_lock);
668 	FOREACH_PROC_IN_SYSTEM(p) {
669 		struct vmspace *vm;
670 		int minslptime = 100000;
671 
672 		/*
673 		 * Do not swapout a process that
674 		 * is waiting for VM data
675 		 * structures there is a possible
676 		 * deadlock.  Test this first as
677 		 * this may block.
678 		 *
679 		 * Lock the map until swapout
680 		 * finishes, or a thread of this
681 		 * process may attempt to alter
682 		 * the map.
683 		 */
684 		vm = p->p_vmspace;
685 		++vm->vm_refcnt;
686 		if (!vm_map_trylock(&vm->vm_map))
687 			goto nextproc1;
688 
689 		PROC_LOCK(p);
690 		if (p->p_lock != 0 ||
691 		    (p->p_flag & (P_STOPPED_SNGL|P_TRACED|P_SYSTEM|P_WEXIT)) != 0) {
692 			goto nextproc2;
693 		}
694 		/*
695 		 * only aiod changes vmspace, however it will be
696 		 * skipped because of the if statement above checking
697 		 * for P_SYSTEM
698 		 */
699 		mtx_lock_spin(&sched_lock);
700 		if ((p->p_sflag & (PS_INMEM|PS_SWAPPING|PS_SWAPPINGIN)) != PS_INMEM)
701 			goto nextproc;
702 
703 		switch (p->p_state) {
704 		default:
705 			/* Don't swap out processes in any sort
706 			 * of 'special' state. */
707 			goto nextproc;
708 
709 		case PRS_NORMAL:
710 			/*
711 			 * do not swapout a realtime process
712 			 * Check all the thread groups..
713 			 */
714 			FOREACH_KSEGRP_IN_PROC(p, kg) {
715 				if (PRI_IS_REALTIME(kg->kg_pri_class))
716 					goto nextproc;
717 
718 				/*
719 				 * Guarantee swap_idle_threshold1
720 				 * time in memory.
721 				 */
722 				if (kg->kg_slptime < swap_idle_threshold1)
723 					goto nextproc;
724 
725 				/*
726 				 * Do not swapout a process if it is
727 				 * waiting on a critical event of some
728 				 * kind or there is a thread whose
729 				 * pageable memory may be accessed.
730 				 *
731 				 * This could be refined to support
732 				 * swapping out a thread.
733 				 */
734 				FOREACH_THREAD_IN_GROUP(kg, td) {
735 					if ((td->td_priority) < PSOCK ||
736 					    !thread_safetoswapout(td))
737 						goto nextproc;
738 				}
739 				/*
740 				 * If the system is under memory stress,
741 				 * or if we are swapping
742 				 * idle processes >= swap_idle_threshold2,
743 				 * then swap the process out.
744 				 */
745 				if (((action & VM_SWAP_NORMAL) == 0) &&
746 				    (((action & VM_SWAP_IDLE) == 0) ||
747 				    (kg->kg_slptime < swap_idle_threshold2)))
748 					goto nextproc;
749 
750 				if (minslptime > kg->kg_slptime)
751 					minslptime = kg->kg_slptime;
752 			}
753 
754 			/*
755 			 * If the process has been asleep for awhile and had
756 			 * most of its pages taken away already, swap it out.
757 			 */
758 			if ((action & VM_SWAP_NORMAL) ||
759 				((action & VM_SWAP_IDLE) &&
760 				 (minslptime > swap_idle_threshold2))) {
761 				swapout(p);
762 				didswap++;
763 
764 				/*
765 				 * swapout() unlocks a proc lock. This is
766 				 * ugly, but avoids superfluous lock.
767 				 */
768 				mtx_unlock_spin(&sched_lock);
769 				vm_map_unlock(&vm->vm_map);
770 				vmspace_free(vm);
771 				sx_sunlock(&allproc_lock);
772 				goto retry;
773 			}
774 		}
775 nextproc:
776 		mtx_unlock_spin(&sched_lock);
777 nextproc2:
778 		PROC_UNLOCK(p);
779 		vm_map_unlock(&vm->vm_map);
780 nextproc1:
781 		vmspace_free(vm);
782 		continue;
783 	}
784 	sx_sunlock(&allproc_lock);
785 	/*
786 	 * If we swapped something out, and another process needed memory,
787 	 * then wakeup the sched process.
788 	 */
789 	if (didswap)
790 		wakeup(&proc0);
791 }
792 
793 static void
794 swapout(p)
795 	struct proc *p;
796 {
797 	struct thread *td;
798 
799 	PROC_LOCK_ASSERT(p, MA_OWNED);
800 	mtx_assert(&sched_lock, MA_OWNED | MA_NOTRECURSED);
801 #if defined(SWAP_DEBUG)
802 	printf("swapping out %d\n", p->p_pid);
803 #endif
804 
805 	/*
806 	 * The states of this process and its threads may have changed
807 	 * by now.  Assuming that there is only one pageout daemon thread,
808 	 * this process should still be in memory.
809 	 */
810 	KASSERT((p->p_sflag & (PS_INMEM|PS_SWAPPING|PS_SWAPPINGIN)) == PS_INMEM,
811 		("swapout: lost a swapout race?"));
812 
813 #if defined(INVARIANTS)
814 	/*
815 	 * Make sure that all threads are safe to be swapped out.
816 	 *
817 	 * Alternatively, we could swap out only safe threads.
818 	 */
819 	FOREACH_THREAD_IN_PROC(p, td) {
820 		KASSERT(thread_safetoswapout(td),
821 			("swapout: there is a thread not safe for swapout"));
822 	}
823 #endif /* INVARIANTS */
824 
825 	++p->p_stats->p_ru.ru_nswap;
826 	/*
827 	 * remember the process resident count
828 	 */
829 	p->p_vmspace->vm_swrss = vmspace_resident_count(p->p_vmspace);
830 
831 	PROC_UNLOCK(p);
832 	FOREACH_THREAD_IN_PROC (p, td)
833 		if (td->td_state == TDS_RUNQ) {	/* XXXKSE */
834 			remrunqueue(td);	/* XXXKSE */
835 			td->td_state = TDS_SWAPPED;
836 		}
837 	p->p_sflag &= ~PS_INMEM;
838 	p->p_sflag |= PS_SWAPPING;
839 	mtx_unlock_spin(&sched_lock);
840 
841 	vm_proc_swapout(p);
842 	FOREACH_THREAD_IN_PROC(p, td)
843 		pmap_swapout_thread(td);
844 	mtx_lock_spin(&sched_lock);
845 	p->p_sflag &= ~PS_SWAPPING;
846 	p->p_swtime = 0;
847 }
848 #endif /* !NO_SWAPPING */
849