xref: /freebsd/sys/vm/vm_glue.c (revision a3e8fd0b7f663db7eafff527d5c3ca3bcfa8a537)
1 /*
2  * Copyright (c) 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *	This product includes software developed by the University of
19  *	California, Berkeley and its contributors.
20  * 4. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *	from: @(#)vm_glue.c	8.6 (Berkeley) 1/5/94
37  *
38  *
39  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40  * All rights reserved.
41  *
42  * Permission to use, copy, modify and distribute this software and
43  * its documentation is hereby granted, provided that both the copyright
44  * notice and this permission notice appear in all copies of the
45  * software, derivative works or modified versions, and any portions
46  * thereof, and that both notices appear in supporting documentation.
47  *
48  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
49  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
50  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
51  *
52  * Carnegie Mellon requests users of this software to return to
53  *
54  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
55  *  School of Computer Science
56  *  Carnegie Mellon University
57  *  Pittsburgh PA 15213-3890
58  *
59  * any improvements or extensions that they make and grant Carnegie the
60  * rights to redistribute these changes.
61  *
62  * $FreeBSD$
63  */
64 
65 #include "opt_vm.h"
66 
67 #include <sys/param.h>
68 #include <sys/systm.h>
69 #include <sys/lock.h>
70 #include <sys/mutex.h>
71 #include <sys/proc.h>
72 #include <sys/resourcevar.h>
73 #include <sys/shm.h>
74 #include <sys/vmmeter.h>
75 #include <sys/sx.h>
76 #include <sys/sysctl.h>
77 
78 #include <sys/kernel.h>
79 #include <sys/ktr.h>
80 #include <sys/unistd.h>
81 
82 #include <machine/limits.h>
83 
84 #include <vm/vm.h>
85 #include <vm/vm_param.h>
86 #include <vm/pmap.h>
87 #include <vm/vm_map.h>
88 #include <vm/vm_page.h>
89 #include <vm/vm_pageout.h>
90 #include <vm/vm_object.h>
91 #include <vm/vm_kern.h>
92 #include <vm/vm_extern.h>
93 #include <vm/vm_pager.h>
94 
95 #include <sys/user.h>
96 
97 extern int maxslp;
98 
99 /*
100  * System initialization
101  *
102  * Note: proc0 from proc.h
103  */
104 static void vm_init_limits(void *);
105 SYSINIT(vm_limits, SI_SUB_VM_CONF, SI_ORDER_FIRST, vm_init_limits, &proc0)
106 
107 /*
108  * THIS MUST BE THE LAST INITIALIZATION ITEM!!!
109  *
110  * Note: run scheduling should be divorced from the vm system.
111  */
112 static void scheduler(void *);
113 SYSINIT(scheduler, SI_SUB_RUN_SCHEDULER, SI_ORDER_FIRST, scheduler, NULL)
114 
115 #ifndef NO_SWAPPING
116 static void swapout(struct proc *);
117 static void vm_proc_swapin(struct proc *p);
118 static void vm_proc_swapout(struct proc *p);
119 #endif
120 
121 /*
122  * MPSAFE
123  */
124 int
125 kernacc(addr, len, rw)
126 	caddr_t addr;
127 	int len, rw;
128 {
129 	boolean_t rv;
130 	vm_offset_t saddr, eaddr;
131 	vm_prot_t prot;
132 
133 	KASSERT((rw & ~VM_PROT_ALL) == 0,
134 	    ("illegal ``rw'' argument to kernacc (%x)\n", rw));
135 	prot = rw;
136 	saddr = trunc_page((vm_offset_t)addr);
137 	eaddr = round_page((vm_offset_t)addr + len);
138 	rv = vm_map_check_protection(kernel_map, saddr, eaddr, prot);
139 	return (rv == TRUE);
140 }
141 
142 /*
143  * MPSAFE
144  */
145 int
146 useracc(addr, len, rw)
147 	caddr_t addr;
148 	int len, rw;
149 {
150 	boolean_t rv;
151 	vm_prot_t prot;
152 	vm_map_t map;
153 
154 	KASSERT((rw & ~VM_PROT_ALL) == 0,
155 	    ("illegal ``rw'' argument to useracc (%x)\n", rw));
156 	prot = rw;
157 	map = &curproc->p_vmspace->vm_map;
158 	if ((vm_offset_t)addr + len > vm_map_max(map) ||
159 	    (vm_offset_t)addr + len < (vm_offset_t)addr) {
160 		return (FALSE);
161 	}
162 	rv = vm_map_check_protection(map, trunc_page((vm_offset_t)addr),
163 	    round_page((vm_offset_t)addr + len), prot);
164 	return (rv == TRUE);
165 }
166 
167 /*
168  * MPSAFE
169  */
170 void
171 vslock(addr, len)
172 	caddr_t addr;
173 	u_int len;
174 {
175 
176 	vm_map_wire(&curproc->p_vmspace->vm_map, trunc_page((vm_offset_t)addr),
177 	    round_page((vm_offset_t)addr + len), FALSE);
178 }
179 
180 /*
181  * MPSAFE
182  */
183 void
184 vsunlock(addr, len)
185 	caddr_t addr;
186 	u_int len;
187 {
188 
189 	vm_map_unwire(&curproc->p_vmspace->vm_map,
190 	    trunc_page((vm_offset_t)addr),
191 	    round_page((vm_offset_t)addr + len), FALSE);
192 }
193 
194 /*
195  * Create the U area for a new process.
196  * This routine directly affects the fork perf for a process.
197  */
198 void
199 vm_proc_new(struct proc *p)
200 {
201 	vm_page_t ma[UAREA_PAGES];
202 	vm_object_t upobj;
203 	vm_offset_t up;
204 	vm_page_t m;
205 	u_int i;
206 
207 	/*
208 	 * Allocate object for the upage.
209 	 */
210 	upobj = vm_object_allocate(OBJT_DEFAULT, UAREA_PAGES);
211 	p->p_upages_obj = upobj;
212 
213 	/*
214 	 * Get a kernel virtual address for the U area for this process.
215 	 */
216 	up = kmem_alloc_nofault(kernel_map, UAREA_PAGES * PAGE_SIZE);
217 	if (up == 0)
218 		panic("vm_proc_new: upage allocation failed");
219 	p->p_uarea = (struct user *)up;
220 
221 	for (i = 0; i < UAREA_PAGES; i++) {
222 		/*
223 		 * Get a uarea page.
224 		 */
225 		m = vm_page_grab(upobj, i,
226 		    VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED);
227 		ma[i] = m;
228 
229 		vm_page_wakeup(m);
230 		vm_page_flag_clear(m, PG_ZERO);
231 		m->valid = VM_PAGE_BITS_ALL;
232 	}
233 
234 	/*
235 	 * Enter the pages into the kernel address space.
236 	 */
237 	pmap_qenter(up, ma, UAREA_PAGES);
238 }
239 
240 /*
241  * Dispose the U area for a process that has exited.
242  * This routine directly impacts the exit perf of a process.
243  * XXX proc_zone is marked UMA_ZONE_NOFREE, so this should never be called.
244  */
245 void
246 vm_proc_dispose(struct proc *p)
247 {
248 	vm_object_t upobj;
249 	vm_offset_t up;
250 	vm_page_t m;
251 
252 	upobj = p->p_upages_obj;
253 	if (upobj->resident_page_count != UAREA_PAGES)
254 		panic("vm_proc_dispose: incorrect number of pages in upobj");
255 	vm_page_lock_queues();
256 	while ((m = TAILQ_FIRST(&upobj->memq)) != NULL) {
257 		vm_page_busy(m);
258 		vm_page_unwire(m, 0);
259 		vm_page_free(m);
260 	}
261 	vm_page_unlock_queues();
262 	up = (vm_offset_t)p->p_uarea;
263 	pmap_qremove(up, UAREA_PAGES);
264 	kmem_free(kernel_map, up, UAREA_PAGES * PAGE_SIZE);
265 	vm_object_deallocate(upobj);
266 }
267 
268 #ifndef NO_SWAPPING
269 /*
270  * Allow the U area for a process to be prejudicially paged out.
271  */
272 static void
273 vm_proc_swapout(struct proc *p)
274 {
275 	vm_object_t upobj;
276 	vm_offset_t up;
277 	vm_page_t m;
278 
279 	upobj = p->p_upages_obj;
280 	if (upobj->resident_page_count != UAREA_PAGES)
281 		panic("vm_proc_dispose: incorrect number of pages in upobj");
282 	vm_page_lock_queues();
283 	TAILQ_FOREACH(m, &upobj->memq, listq) {
284 		vm_page_dirty(m);
285 		vm_page_unwire(m, 0);
286 	}
287 	vm_page_unlock_queues();
288 	up = (vm_offset_t)p->p_uarea;
289 	pmap_qremove(up, UAREA_PAGES);
290 }
291 
292 /*
293  * Bring the U area for a specified process back in.
294  */
295 static void
296 vm_proc_swapin(struct proc *p)
297 {
298 	vm_page_t ma[UAREA_PAGES];
299 	vm_object_t upobj;
300 	vm_offset_t up;
301 	vm_page_t m;
302 	int rv;
303 	int i;
304 
305 	upobj = p->p_upages_obj;
306 	for (i = 0; i < UAREA_PAGES; i++) {
307 		m = vm_page_grab(upobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
308 		if (m->valid != VM_PAGE_BITS_ALL) {
309 			rv = vm_pager_get_pages(upobj, &m, 1, 0);
310 			if (rv != VM_PAGER_OK)
311 				panic("vm_proc_swapin: cannot get upage");
312 		}
313 		ma[i] = m;
314 	}
315 	if (upobj->resident_page_count != UAREA_PAGES)
316 		panic("vm_proc_swapin: lost pages from upobj");
317 	vm_page_lock_queues();
318 	TAILQ_FOREACH(m, &upobj->memq, listq) {
319 		m->valid = VM_PAGE_BITS_ALL;
320 		vm_page_wire(m);
321 		vm_page_wakeup(m);
322 	}
323 	vm_page_unlock_queues();
324 	up = (vm_offset_t)p->p_uarea;
325 	pmap_qenter(up, ma, UAREA_PAGES);
326 }
327 #endif
328 
329 /*
330  * Implement fork's actions on an address space.
331  * Here we arrange for the address space to be copied or referenced,
332  * allocate a user struct (pcb and kernel stack), then call the
333  * machine-dependent layer to fill those in and make the new process
334  * ready to run.  The new process is set up so that it returns directly
335  * to user mode to avoid stack copying and relocation problems.
336  */
337 void
338 vm_forkproc(td, p2, td2, flags)
339 	struct thread *td;
340 	struct proc *p2;
341 	struct thread *td2;
342 	int flags;
343 {
344 	struct proc *p1 = td->td_proc;
345 	struct user *up;
346 
347 	GIANT_REQUIRED;
348 
349 	if ((flags & RFPROC) == 0) {
350 		/*
351 		 * Divorce the memory, if it is shared, essentially
352 		 * this changes shared memory amongst threads, into
353 		 * COW locally.
354 		 */
355 		if ((flags & RFMEM) == 0) {
356 			if (p1->p_vmspace->vm_refcnt > 1) {
357 				vmspace_unshare(p1);
358 			}
359 		}
360 		cpu_fork(td, p2, td2, flags);
361 		return;
362 	}
363 
364 	if (flags & RFMEM) {
365 		p2->p_vmspace = p1->p_vmspace;
366 		p1->p_vmspace->vm_refcnt++;
367 	}
368 
369 	while (vm_page_count_severe()) {
370 		VM_WAIT;
371 	}
372 
373 	if ((flags & RFMEM) == 0) {
374 		p2->p_vmspace = vmspace_fork(p1->p_vmspace);
375 
376 		pmap_pinit2(vmspace_pmap(p2->p_vmspace));
377 
378 		if (p1->p_vmspace->vm_shm)
379 			shmfork(p1, p2);
380 	}
381 
382 	/* XXXKSE this is unsatisfactory but should be adequate */
383 	up = p2->p_uarea;
384 
385 	/*
386 	 * p_stats currently points at fields in the user struct
387 	 * but not at &u, instead at p_addr. Copy parts of
388 	 * p_stats; zero the rest of p_stats (statistics).
389 	 *
390 	 * If procsig->ps_refcnt is 1 and p2->p_sigacts is NULL we dont' need
391 	 * to share sigacts, so we use the up->u_sigacts.
392 	 */
393 	p2->p_stats = &up->u_stats;
394 	if (p2->p_sigacts == NULL) {
395 		if (p2->p_procsig->ps_refcnt != 1)
396 			printf ("PID:%d NULL sigacts with refcnt not 1!\n",p2->p_pid);
397 		p2->p_sigacts = &up->u_sigacts;
398 		up->u_sigacts = *p1->p_sigacts;
399 	}
400 
401 	bzero(&up->u_stats.pstat_startzero,
402 	    (unsigned) ((caddr_t) &up->u_stats.pstat_endzero -
403 		(caddr_t) &up->u_stats.pstat_startzero));
404 	bcopy(&p1->p_stats->pstat_startcopy, &up->u_stats.pstat_startcopy,
405 	    ((caddr_t) &up->u_stats.pstat_endcopy -
406 		(caddr_t) &up->u_stats.pstat_startcopy));
407 
408 
409 	/*
410 	 * cpu_fork will copy and update the pcb, set up the kernel stack,
411 	 * and make the child ready to run.
412 	 */
413 	cpu_fork(td, p2, td2, flags);
414 }
415 
416 /*
417  * Called after process has been wait(2)'ed apon and is being reaped.
418  * The idea is to reclaim resources that we could not reclaim while
419  * the process was still executing.
420  */
421 void
422 vm_waitproc(p)
423 	struct proc *p;
424 {
425 
426 	GIANT_REQUIRED;
427 	cpu_wait(p);
428 	vmspace_exitfree(p);		/* and clean-out the vmspace */
429 }
430 
431 /*
432  * Set default limits for VM system.
433  * Called for proc 0, and then inherited by all others.
434  *
435  * XXX should probably act directly on proc0.
436  */
437 static void
438 vm_init_limits(udata)
439 	void *udata;
440 {
441 	struct proc *p = udata;
442 	int rss_limit;
443 
444 	/*
445 	 * Set up the initial limits on process VM. Set the maximum resident
446 	 * set size to be half of (reasonably) available memory.  Since this
447 	 * is a soft limit, it comes into effect only when the system is out
448 	 * of memory - half of main memory helps to favor smaller processes,
449 	 * and reduces thrashing of the object cache.
450 	 */
451 	p->p_rlimit[RLIMIT_STACK].rlim_cur = dflssiz;
452 	p->p_rlimit[RLIMIT_STACK].rlim_max = maxssiz;
453 	p->p_rlimit[RLIMIT_DATA].rlim_cur = dfldsiz;
454 	p->p_rlimit[RLIMIT_DATA].rlim_max = maxdsiz;
455 	/* limit the limit to no less than 2MB */
456 	rss_limit = max(cnt.v_free_count, 512);
457 	p->p_rlimit[RLIMIT_RSS].rlim_cur = ptoa(rss_limit);
458 	p->p_rlimit[RLIMIT_RSS].rlim_max = RLIM_INFINITY;
459 }
460 
461 void
462 faultin(p)
463 	struct proc *p;
464 {
465 
466 	GIANT_REQUIRED;
467 	PROC_LOCK_ASSERT(p, MA_OWNED);
468 	mtx_assert(&sched_lock, MA_OWNED);
469 #ifdef NO_SWAPPING
470 	if ((p->p_sflag & PS_INMEM) == 0)
471 		panic("faultin: proc swapped out with NO_SWAPPING!");
472 #else
473 	if ((p->p_sflag & PS_INMEM) == 0) {
474 		struct thread *td;
475 
476 		++p->p_lock;
477 		/*
478 		 * If another process is swapping in this process,
479 		 * just wait until it finishes.
480 		 */
481 		if (p->p_sflag & PS_SWAPPINGIN) {
482 			mtx_unlock_spin(&sched_lock);
483 			msleep(&p->p_sflag, &p->p_mtx, PVM, "faultin", 0);
484 			mtx_lock_spin(&sched_lock);
485 			--p->p_lock;
486 			return;
487 		}
488 
489 		p->p_sflag |= PS_SWAPPINGIN;
490 		mtx_unlock_spin(&sched_lock);
491 		PROC_UNLOCK(p);
492 
493 		vm_proc_swapin(p);
494 		FOREACH_THREAD_IN_PROC (p, td) {
495 			pmap_swapin_thread(td);
496 			TD_CLR_SWAPPED(td);
497 		}
498 
499 		PROC_LOCK(p);
500 		mtx_lock_spin(&sched_lock);
501 		p->p_sflag &= ~PS_SWAPPINGIN;
502 		p->p_sflag |= PS_INMEM;
503 		FOREACH_THREAD_IN_PROC (p, td)
504 			if (TD_CAN_RUN(td))
505 				setrunnable(td);
506 
507 		wakeup(&p->p_sflag);
508 
509 		/* undo the effect of setting SLOCK above */
510 		--p->p_lock;
511 	}
512 #endif
513 }
514 
515 /*
516  * This swapin algorithm attempts to swap-in processes only if there
517  * is enough space for them.  Of course, if a process waits for a long
518  * time, it will be swapped in anyway.
519  *
520  *  XXXKSE - process with the thread with highest priority counts..
521  *
522  * Giant is still held at this point, to be released in tsleep.
523  */
524 /* ARGSUSED*/
525 static void
526 scheduler(dummy)
527 	void *dummy;
528 {
529 	struct proc *p;
530 	struct thread *td;
531 	int pri;
532 	struct proc *pp;
533 	int ppri;
534 
535 	mtx_assert(&Giant, MA_OWNED | MA_NOTRECURSED);
536 	/* GIANT_REQUIRED */
537 
538 loop:
539 	if (vm_page_count_min()) {
540 		VM_WAIT;
541 		goto loop;
542 	}
543 
544 	pp = NULL;
545 	ppri = INT_MIN;
546 	sx_slock(&allproc_lock);
547 	FOREACH_PROC_IN_SYSTEM(p) {
548 		struct ksegrp *kg;
549 		if (p->p_sflag & (PS_INMEM | PS_SWAPPING | PS_SWAPPINGIN)) {
550 			continue;
551 		}
552 		mtx_lock_spin(&sched_lock);
553 		FOREACH_THREAD_IN_PROC(p, td) {
554 			/*
555 			 * An otherwise runnable thread of a process
556 			 * swapped out has only the TDI_SWAPPED bit set.
557 			 *
558 			 */
559 			if (td->td_inhibitors == TDI_SWAPPED) {
560 				kg = td->td_ksegrp;
561 				pri = p->p_swtime + kg->kg_slptime;
562 				if ((p->p_sflag & PS_SWAPINREQ) == 0) {
563 					pri -= kg->kg_nice * 8;
564 				}
565 
566 				/*
567 				 * if this ksegrp is higher priority
568 				 * and there is enough space, then select
569 				 * this process instead of the previous
570 				 * selection.
571 				 */
572 				if (pri > ppri) {
573 					pp = p;
574 					ppri = pri;
575 				}
576 			}
577 		}
578 		mtx_unlock_spin(&sched_lock);
579 	}
580 	sx_sunlock(&allproc_lock);
581 
582 	/*
583 	 * Nothing to do, back to sleep.
584 	 */
585 	if ((p = pp) == NULL) {
586 		tsleep(&proc0, PVM, "sched", maxslp * hz / 2);
587 		goto loop;
588 	}
589 	PROC_LOCK(p);
590 	mtx_lock_spin(&sched_lock);
591 
592 	/*
593 	 * Another process may be bringing or may have already
594 	 * brought this process in while we traverse all threads.
595 	 * Or, this process may even be being swapped out again.
596 	 */
597 	if (p->p_sflag & (PS_INMEM|PS_SWAPPING|PS_SWAPPINGIN)) {
598 		mtx_unlock_spin(&sched_lock);
599 		PROC_UNLOCK(p);
600 		goto loop;
601 	}
602 
603 	p->p_sflag &= ~PS_SWAPINREQ;
604 
605 	/*
606 	 * We would like to bring someone in. (only if there is space).
607 	 * [What checks the space? ]
608 	 */
609 	faultin(p);
610 	PROC_UNLOCK(p);
611 	p->p_swtime = 0;
612 	mtx_unlock_spin(&sched_lock);
613 	goto loop;
614 }
615 
616 #ifndef NO_SWAPPING
617 
618 /*
619  * Swap_idle_threshold1 is the guaranteed swapped in time for a process
620  */
621 static int swap_idle_threshold1 = 2;
622 SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold1,
623 	CTLFLAG_RW, &swap_idle_threshold1, 0, "");
624 
625 /*
626  * Swap_idle_threshold2 is the time that a process can be idle before
627  * it will be swapped out, if idle swapping is enabled.
628  */
629 static int swap_idle_threshold2 = 10;
630 SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold2,
631 	CTLFLAG_RW, &swap_idle_threshold2, 0, "");
632 
633 /*
634  * Swapout is driven by the pageout daemon.  Very simple, we find eligible
635  * procs and unwire their u-areas.  We try to always "swap" at least one
636  * process in case we need the room for a swapin.
637  * If any procs have been sleeping/stopped for at least maxslp seconds,
638  * they are swapped.  Else, we swap the longest-sleeping or stopped process,
639  * if any, otherwise the longest-resident process.
640  */
641 void
642 swapout_procs(action)
643 int action;
644 {
645 	struct proc *p;
646 	struct thread *td;
647 	struct ksegrp *kg;
648 	struct proc *outp, *outp2;
649 	int outpri, outpri2;
650 	int didswap = 0;
651 
652 	GIANT_REQUIRED;
653 
654 	outp = outp2 = NULL;
655 	outpri = outpri2 = INT_MIN;
656 retry:
657 	sx_slock(&allproc_lock);
658 	FOREACH_PROC_IN_SYSTEM(p) {
659 		struct vmspace *vm;
660 		int minslptime = 100000;
661 
662 		/*
663 		 * Watch out for a process in
664 		 * creation.  It may have no
665 		 * address space or lock yet.
666 		 */
667 		mtx_lock_spin(&sched_lock);
668 		if (p->p_state == PRS_NEW) {
669 			mtx_unlock_spin(&sched_lock);
670 			continue;
671 		}
672 		mtx_unlock_spin(&sched_lock);
673 
674 		/*
675 		 * An aio daemon switches its
676 		 * address space while running.
677 		 * Perform a quick check whether
678 		 * a process has P_SYSTEM.
679 		 */
680 		PROC_LOCK(p);
681 		if ((p->p_flag & P_SYSTEM) != 0) {
682 			PROC_UNLOCK(p);
683 			continue;
684 		}
685 
686 		/*
687 		 * Do not swapout a process that
688 		 * is waiting for VM data
689 		 * structures as there is a possible
690 		 * deadlock.  Test this first as
691 		 * this may block.
692 		 *
693 		 * Lock the map until swapout
694 		 * finishes, or a thread of this
695 		 * process may attempt to alter
696 		 * the map.
697 		 */
698 		vm = p->p_vmspace;
699 		KASSERT(vm != NULL,
700 			("swapout_procs: a process has no address space"));
701 		++vm->vm_refcnt;
702 		PROC_UNLOCK(p);
703 		if (!vm_map_trylock(&vm->vm_map))
704 			goto nextproc1;
705 
706 		PROC_LOCK(p);
707 		if (p->p_lock != 0 ||
708 		    (p->p_flag & (P_STOPPED_SINGLE|P_TRACED|P_SYSTEM|P_WEXIT)
709 		    ) != 0) {
710 			goto nextproc2;
711 		}
712 		/*
713 		 * only aiod changes vmspace, however it will be
714 		 * skipped because of the if statement above checking
715 		 * for P_SYSTEM
716 		 */
717 		mtx_lock_spin(&sched_lock);
718 		if ((p->p_sflag & (PS_INMEM|PS_SWAPPING|PS_SWAPPINGIN)) != PS_INMEM)
719 			goto nextproc;
720 
721 		switch (p->p_state) {
722 		default:
723 			/* Don't swap out processes in any sort
724 			 * of 'special' state. */
725 			goto nextproc;
726 
727 		case PRS_NORMAL:
728 			/*
729 			 * do not swapout a realtime process
730 			 * Check all the thread groups..
731 			 */
732 			FOREACH_KSEGRP_IN_PROC(p, kg) {
733 				if (PRI_IS_REALTIME(kg->kg_pri_class))
734 					goto nextproc;
735 
736 				/*
737 				 * Guarantee swap_idle_threshold1
738 				 * time in memory.
739 				 */
740 				if (kg->kg_slptime < swap_idle_threshold1)
741 					goto nextproc;
742 
743 				/*
744 				 * Do not swapout a process if it is
745 				 * waiting on a critical event of some
746 				 * kind or there is a thread whose
747 				 * pageable memory may be accessed.
748 				 *
749 				 * This could be refined to support
750 				 * swapping out a thread.
751 				 */
752 				FOREACH_THREAD_IN_GROUP(kg, td) {
753 					if ((td->td_priority) < PSOCK ||
754 					    !thread_safetoswapout(td))
755 						goto nextproc;
756 				}
757 				/*
758 				 * If the system is under memory stress,
759 				 * or if we are swapping
760 				 * idle processes >= swap_idle_threshold2,
761 				 * then swap the process out.
762 				 */
763 				if (((action & VM_SWAP_NORMAL) == 0) &&
764 				    (((action & VM_SWAP_IDLE) == 0) ||
765 				    (kg->kg_slptime < swap_idle_threshold2)))
766 					goto nextproc;
767 
768 				if (minslptime > kg->kg_slptime)
769 					minslptime = kg->kg_slptime;
770 			}
771 
772 			/*
773 			 * If the process has been asleep for awhile and had
774 			 * most of its pages taken away already, swap it out.
775 			 */
776 			if ((action & VM_SWAP_NORMAL) ||
777 				((action & VM_SWAP_IDLE) &&
778 				 (minslptime > swap_idle_threshold2))) {
779 				swapout(p);
780 				didswap++;
781 
782 				/*
783 				 * swapout() unlocks a proc lock. This is
784 				 * ugly, but avoids superfluous lock.
785 				 */
786 				mtx_unlock_spin(&sched_lock);
787 				vm_map_unlock(&vm->vm_map);
788 				vmspace_free(vm);
789 				sx_sunlock(&allproc_lock);
790 				goto retry;
791 			}
792 		}
793 nextproc:
794 		mtx_unlock_spin(&sched_lock);
795 nextproc2:
796 		PROC_UNLOCK(p);
797 		vm_map_unlock(&vm->vm_map);
798 nextproc1:
799 		vmspace_free(vm);
800 		continue;
801 	}
802 	sx_sunlock(&allproc_lock);
803 	/*
804 	 * If we swapped something out, and another process needed memory,
805 	 * then wakeup the sched process.
806 	 */
807 	if (didswap)
808 		wakeup(&proc0);
809 }
810 
811 static void
812 swapout(p)
813 	struct proc *p;
814 {
815 	struct thread *td;
816 
817 	PROC_LOCK_ASSERT(p, MA_OWNED);
818 	mtx_assert(&sched_lock, MA_OWNED | MA_NOTRECURSED);
819 #if defined(SWAP_DEBUG)
820 	printf("swapping out %d\n", p->p_pid);
821 #endif
822 
823 	/*
824 	 * The states of this process and its threads may have changed
825 	 * by now.  Assuming that there is only one pageout daemon thread,
826 	 * this process should still be in memory.
827 	 */
828 	KASSERT((p->p_sflag & (PS_INMEM|PS_SWAPPING|PS_SWAPPINGIN)) == PS_INMEM,
829 		("swapout: lost a swapout race?"));
830 
831 #if defined(INVARIANTS)
832 	/*
833 	 * Make sure that all threads are safe to be swapped out.
834 	 *
835 	 * Alternatively, we could swap out only safe threads.
836 	 */
837 	FOREACH_THREAD_IN_PROC(p, td) {
838 		KASSERT(thread_safetoswapout(td),
839 			("swapout: there is a thread not safe for swapout"));
840 	}
841 #endif /* INVARIANTS */
842 
843 	++p->p_stats->p_ru.ru_nswap;
844 	/*
845 	 * remember the process resident count
846 	 */
847 	p->p_vmspace->vm_swrss = vmspace_resident_count(p->p_vmspace);
848 
849 	PROC_UNLOCK(p);
850 	p->p_sflag &= ~PS_INMEM;
851 	p->p_sflag |= PS_SWAPPING;
852 	mtx_unlock_spin(&sched_lock);
853 
854 	vm_proc_swapout(p);
855 	FOREACH_THREAD_IN_PROC(p, td) {
856 		pmap_swapout_thread(td);
857 		TD_SET_SWAPPED(td);
858 	}
859 	mtx_lock_spin(&sched_lock);
860 	p->p_sflag &= ~PS_SWAPPING;
861 	p->p_swtime = 0;
862 }
863 #endif /* !NO_SWAPPING */
864