xref: /freebsd/sys/vm/vm_glue.c (revision 87c1627502a5dde91e5284118eec8682b60f27a2)
1 /*-
2  * Copyright (c) 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 4. Neither the name of the University nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  *	from: @(#)vm_glue.c	8.6 (Berkeley) 1/5/94
33  *
34  *
35  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
36  * All rights reserved.
37  *
38  * Permission to use, copy, modify and distribute this software and
39  * its documentation is hereby granted, provided that both the copyright
40  * notice and this permission notice appear in all copies of the
41  * software, derivative works or modified versions, and any portions
42  * thereof, and that both notices appear in supporting documentation.
43  *
44  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
45  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
46  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
47  *
48  * Carnegie Mellon requests users of this software to return to
49  *
50  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
51  *  School of Computer Science
52  *  Carnegie Mellon University
53  *  Pittsburgh PA 15213-3890
54  *
55  * any improvements or extensions that they make and grant Carnegie the
56  * rights to redistribute these changes.
57  */
58 
59 #include <sys/cdefs.h>
60 __FBSDID("$FreeBSD$");
61 
62 #include "opt_vm.h"
63 #include "opt_kstack_pages.h"
64 #include "opt_kstack_max_pages.h"
65 
66 #include <sys/param.h>
67 #include <sys/systm.h>
68 #include <sys/limits.h>
69 #include <sys/lock.h>
70 #include <sys/mutex.h>
71 #include <sys/proc.h>
72 #include <sys/racct.h>
73 #include <sys/resourcevar.h>
74 #include <sys/rwlock.h>
75 #include <sys/sched.h>
76 #include <sys/sf_buf.h>
77 #include <sys/shm.h>
78 #include <sys/vmmeter.h>
79 #include <sys/sx.h>
80 #include <sys/sysctl.h>
81 #include <sys/_kstack_cache.h>
82 #include <sys/eventhandler.h>
83 #include <sys/kernel.h>
84 #include <sys/ktr.h>
85 #include <sys/unistd.h>
86 
87 #include <vm/vm.h>
88 #include <vm/vm_param.h>
89 #include <vm/pmap.h>
90 #include <vm/vm_map.h>
91 #include <vm/vm_page.h>
92 #include <vm/vm_pageout.h>
93 #include <vm/vm_object.h>
94 #include <vm/vm_kern.h>
95 #include <vm/vm_extern.h>
96 #include <vm/vm_pager.h>
97 #include <vm/swap_pager.h>
98 
99 /*
100  * System initialization
101  *
102  * THIS MUST BE THE LAST INITIALIZATION ITEM!!!
103  *
104  * Note: run scheduling should be divorced from the vm system.
105  */
106 static void scheduler(void *);
107 SYSINIT(scheduler, SI_SUB_RUN_SCHEDULER, SI_ORDER_ANY, scheduler, NULL);
108 
109 #ifndef NO_SWAPPING
110 static int swapout(struct proc *);
111 static void swapclear(struct proc *);
112 static void vm_thread_swapin(struct thread *td);
113 static void vm_thread_swapout(struct thread *td);
114 #endif
115 
116 /*
117  * MPSAFE
118  *
119  * WARNING!  This code calls vm_map_check_protection() which only checks
120  * the associated vm_map_entry range.  It does not determine whether the
121  * contents of the memory is actually readable or writable.  In most cases
122  * just checking the vm_map_entry is sufficient within the kernel's address
123  * space.
124  */
125 int
126 kernacc(addr, len, rw)
127 	void *addr;
128 	int len, rw;
129 {
130 	boolean_t rv;
131 	vm_offset_t saddr, eaddr;
132 	vm_prot_t prot;
133 
134 	KASSERT((rw & ~VM_PROT_ALL) == 0,
135 	    ("illegal ``rw'' argument to kernacc (%x)\n", rw));
136 
137 	if ((vm_offset_t)addr + len > kernel_map->max_offset ||
138 	    (vm_offset_t)addr + len < (vm_offset_t)addr)
139 		return (FALSE);
140 
141 	prot = rw;
142 	saddr = trunc_page((vm_offset_t)addr);
143 	eaddr = round_page((vm_offset_t)addr + len);
144 	vm_map_lock_read(kernel_map);
145 	rv = vm_map_check_protection(kernel_map, saddr, eaddr, prot);
146 	vm_map_unlock_read(kernel_map);
147 	return (rv == TRUE);
148 }
149 
150 /*
151  * MPSAFE
152  *
153  * WARNING!  This code calls vm_map_check_protection() which only checks
154  * the associated vm_map_entry range.  It does not determine whether the
155  * contents of the memory is actually readable or writable.  vmapbuf(),
156  * vm_fault_quick(), or copyin()/copout()/su*()/fu*() functions should be
157  * used in conjuction with this call.
158  */
159 int
160 useracc(addr, len, rw)
161 	void *addr;
162 	int len, rw;
163 {
164 	boolean_t rv;
165 	vm_prot_t prot;
166 	vm_map_t map;
167 
168 	KASSERT((rw & ~VM_PROT_ALL) == 0,
169 	    ("illegal ``rw'' argument to useracc (%x)\n", rw));
170 	prot = rw;
171 	map = &curproc->p_vmspace->vm_map;
172 	if ((vm_offset_t)addr + len > vm_map_max(map) ||
173 	    (vm_offset_t)addr + len < (vm_offset_t)addr) {
174 		return (FALSE);
175 	}
176 	vm_map_lock_read(map);
177 	rv = vm_map_check_protection(map, trunc_page((vm_offset_t)addr),
178 	    round_page((vm_offset_t)addr + len), prot);
179 	vm_map_unlock_read(map);
180 	return (rv == TRUE);
181 }
182 
183 int
184 vslock(void *addr, size_t len)
185 {
186 	vm_offset_t end, last, start;
187 	vm_size_t npages;
188 	int error;
189 
190 	last = (vm_offset_t)addr + len;
191 	start = trunc_page((vm_offset_t)addr);
192 	end = round_page(last);
193 	if (last < (vm_offset_t)addr || end < (vm_offset_t)addr)
194 		return (EINVAL);
195 	npages = atop(end - start);
196 	if (npages > vm_page_max_wired)
197 		return (ENOMEM);
198 #if 0
199 	/*
200 	 * XXX - not yet
201 	 *
202 	 * The limit for transient usage of wired pages should be
203 	 * larger than for "permanent" wired pages (mlock()).
204 	 *
205 	 * Also, the sysctl code, which is the only present user
206 	 * of vslock(), does a hard loop on EAGAIN.
207 	 */
208 	if (npages + cnt.v_wire_count > vm_page_max_wired)
209 		return (EAGAIN);
210 #endif
211 	error = vm_map_wire(&curproc->p_vmspace->vm_map, start, end,
212 	    VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
213 	/*
214 	 * Return EFAULT on error to match copy{in,out}() behaviour
215 	 * rather than returning ENOMEM like mlock() would.
216 	 */
217 	return (error == KERN_SUCCESS ? 0 : EFAULT);
218 }
219 
220 void
221 vsunlock(void *addr, size_t len)
222 {
223 
224 	/* Rely on the parameter sanity checks performed by vslock(). */
225 	(void)vm_map_unwire(&curproc->p_vmspace->vm_map,
226 	    trunc_page((vm_offset_t)addr), round_page((vm_offset_t)addr + len),
227 	    VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
228 }
229 
230 /*
231  * Pin the page contained within the given object at the given offset.  If the
232  * page is not resident, allocate and load it using the given object's pager.
233  * Return the pinned page if successful; otherwise, return NULL.
234  */
235 static vm_page_t
236 vm_imgact_hold_page(vm_object_t object, vm_ooffset_t offset)
237 {
238 	vm_page_t m, ma[1];
239 	vm_pindex_t pindex;
240 	int rv;
241 
242 	VM_OBJECT_WLOCK(object);
243 	pindex = OFF_TO_IDX(offset);
244 	m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
245 	if (m->valid != VM_PAGE_BITS_ALL) {
246 		ma[0] = m;
247 		rv = vm_pager_get_pages(object, ma, 1, 0);
248 		m = vm_page_lookup(object, pindex);
249 		if (m == NULL)
250 			goto out;
251 		if (rv != VM_PAGER_OK) {
252 			vm_page_lock(m);
253 			vm_page_free(m);
254 			vm_page_unlock(m);
255 			m = NULL;
256 			goto out;
257 		}
258 	}
259 	vm_page_lock(m);
260 	vm_page_hold(m);
261 	vm_page_unlock(m);
262 	vm_page_wakeup(m);
263 out:
264 	VM_OBJECT_WUNLOCK(object);
265 	return (m);
266 }
267 
268 /*
269  * Return a CPU private mapping to the page at the given offset within the
270  * given object.  The page is pinned before it is mapped.
271  */
272 struct sf_buf *
273 vm_imgact_map_page(vm_object_t object, vm_ooffset_t offset)
274 {
275 	vm_page_t m;
276 
277 	m = vm_imgact_hold_page(object, offset);
278 	if (m == NULL)
279 		return (NULL);
280 	sched_pin();
281 	return (sf_buf_alloc(m, SFB_CPUPRIVATE));
282 }
283 
284 /*
285  * Destroy the given CPU private mapping and unpin the page that it mapped.
286  */
287 void
288 vm_imgact_unmap_page(struct sf_buf *sf)
289 {
290 	vm_page_t m;
291 
292 	m = sf_buf_page(sf);
293 	sf_buf_free(sf);
294 	sched_unpin();
295 	vm_page_lock(m);
296 	vm_page_unhold(m);
297 	vm_page_unlock(m);
298 }
299 
300 void
301 vm_sync_icache(vm_map_t map, vm_offset_t va, vm_offset_t sz)
302 {
303 
304 	pmap_sync_icache(map->pmap, va, sz);
305 }
306 
307 struct kstack_cache_entry *kstack_cache;
308 static int kstack_cache_size = 128;
309 static int kstacks;
310 static struct mtx kstack_cache_mtx;
311 MTX_SYSINIT(kstack_cache, &kstack_cache_mtx, "kstkch", MTX_DEF);
312 
313 SYSCTL_INT(_vm, OID_AUTO, kstack_cache_size, CTLFLAG_RW, &kstack_cache_size, 0,
314     "");
315 SYSCTL_INT(_vm, OID_AUTO, kstacks, CTLFLAG_RD, &kstacks, 0,
316     "");
317 
318 #ifndef KSTACK_MAX_PAGES
319 #define KSTACK_MAX_PAGES 32
320 #endif
321 
322 /*
323  * Create the kernel stack (including pcb for i386) for a new thread.
324  * This routine directly affects the fork perf for a process and
325  * create performance for a thread.
326  */
327 int
328 vm_thread_new(struct thread *td, int pages)
329 {
330 	vm_object_t ksobj;
331 	vm_offset_t ks;
332 	vm_page_t m, ma[KSTACK_MAX_PAGES];
333 	struct kstack_cache_entry *ks_ce;
334 	int i;
335 
336 	/* Bounds check */
337 	if (pages <= 1)
338 		pages = KSTACK_PAGES;
339 	else if (pages > KSTACK_MAX_PAGES)
340 		pages = KSTACK_MAX_PAGES;
341 
342 	if (pages == KSTACK_PAGES) {
343 		mtx_lock(&kstack_cache_mtx);
344 		if (kstack_cache != NULL) {
345 			ks_ce = kstack_cache;
346 			kstack_cache = ks_ce->next_ks_entry;
347 			mtx_unlock(&kstack_cache_mtx);
348 
349 			td->td_kstack_obj = ks_ce->ksobj;
350 			td->td_kstack = (vm_offset_t)ks_ce;
351 			td->td_kstack_pages = KSTACK_PAGES;
352 			return (1);
353 		}
354 		mtx_unlock(&kstack_cache_mtx);
355 	}
356 
357 	/*
358 	 * Allocate an object for the kstack.
359 	 */
360 	ksobj = vm_object_allocate(OBJT_DEFAULT, pages);
361 
362 	/*
363 	 * Get a kernel virtual address for this thread's kstack.
364 	 */
365 #if defined(__mips__)
366 	/*
367 	 * We need to align the kstack's mapped address to fit within
368 	 * a single TLB entry.
369 	 */
370 	ks = kmem_alloc_nofault_space(kernel_map,
371 	    (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE, VMFS_TLB_ALIGNED_SPACE);
372 #else
373 	ks = kmem_alloc_nofault(kernel_map,
374 	   (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
375 #endif
376 	if (ks == 0) {
377 		printf("vm_thread_new: kstack allocation failed\n");
378 		vm_object_deallocate(ksobj);
379 		return (0);
380 	}
381 
382 	atomic_add_int(&kstacks, 1);
383 	if (KSTACK_GUARD_PAGES != 0) {
384 		pmap_qremove(ks, KSTACK_GUARD_PAGES);
385 		ks += KSTACK_GUARD_PAGES * PAGE_SIZE;
386 	}
387 	td->td_kstack_obj = ksobj;
388 	td->td_kstack = ks;
389 	/*
390 	 * Knowing the number of pages allocated is useful when you
391 	 * want to deallocate them.
392 	 */
393 	td->td_kstack_pages = pages;
394 	/*
395 	 * For the length of the stack, link in a real page of ram for each
396 	 * page of stack.
397 	 */
398 	VM_OBJECT_WLOCK(ksobj);
399 	for (i = 0; i < pages; i++) {
400 		/*
401 		 * Get a kernel stack page.
402 		 */
403 		m = vm_page_grab(ksobj, i, VM_ALLOC_NOBUSY |
404 		    VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED);
405 		ma[i] = m;
406 		m->valid = VM_PAGE_BITS_ALL;
407 	}
408 	VM_OBJECT_WUNLOCK(ksobj);
409 	pmap_qenter(ks, ma, pages);
410 	return (1);
411 }
412 
413 static void
414 vm_thread_stack_dispose(vm_object_t ksobj, vm_offset_t ks, int pages)
415 {
416 	vm_page_t m;
417 	int i;
418 
419 	atomic_add_int(&kstacks, -1);
420 	pmap_qremove(ks, pages);
421 	VM_OBJECT_WLOCK(ksobj);
422 	for (i = 0; i < pages; i++) {
423 		m = vm_page_lookup(ksobj, i);
424 		if (m == NULL)
425 			panic("vm_thread_dispose: kstack already missing?");
426 		vm_page_lock(m);
427 		vm_page_unwire(m, 0);
428 		vm_page_free(m);
429 		vm_page_unlock(m);
430 	}
431 	VM_OBJECT_WUNLOCK(ksobj);
432 	vm_object_deallocate(ksobj);
433 	kmem_free(kernel_map, ks - (KSTACK_GUARD_PAGES * PAGE_SIZE),
434 	    (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
435 }
436 
437 /*
438  * Dispose of a thread's kernel stack.
439  */
440 void
441 vm_thread_dispose(struct thread *td)
442 {
443 	vm_object_t ksobj;
444 	vm_offset_t ks;
445 	struct kstack_cache_entry *ks_ce;
446 	int pages;
447 
448 	pages = td->td_kstack_pages;
449 	ksobj = td->td_kstack_obj;
450 	ks = td->td_kstack;
451 	td->td_kstack = 0;
452 	td->td_kstack_pages = 0;
453 	if (pages == KSTACK_PAGES && kstacks <= kstack_cache_size) {
454 		ks_ce = (struct kstack_cache_entry *)ks;
455 		ks_ce->ksobj = ksobj;
456 		mtx_lock(&kstack_cache_mtx);
457 		ks_ce->next_ks_entry = kstack_cache;
458 		kstack_cache = ks_ce;
459 		mtx_unlock(&kstack_cache_mtx);
460 		return;
461 	}
462 	vm_thread_stack_dispose(ksobj, ks, pages);
463 }
464 
465 static void
466 vm_thread_stack_lowmem(void *nulll)
467 {
468 	struct kstack_cache_entry *ks_ce, *ks_ce1;
469 
470 	mtx_lock(&kstack_cache_mtx);
471 	ks_ce = kstack_cache;
472 	kstack_cache = NULL;
473 	mtx_unlock(&kstack_cache_mtx);
474 
475 	while (ks_ce != NULL) {
476 		ks_ce1 = ks_ce;
477 		ks_ce = ks_ce->next_ks_entry;
478 
479 		vm_thread_stack_dispose(ks_ce1->ksobj, (vm_offset_t)ks_ce1,
480 		    KSTACK_PAGES);
481 	}
482 }
483 
484 static void
485 kstack_cache_init(void *nulll)
486 {
487 
488 	EVENTHANDLER_REGISTER(vm_lowmem, vm_thread_stack_lowmem, NULL,
489 	    EVENTHANDLER_PRI_ANY);
490 }
491 
492 SYSINIT(vm_kstacks, SI_SUB_KTHREAD_INIT, SI_ORDER_ANY, kstack_cache_init, NULL);
493 
494 #ifndef NO_SWAPPING
495 /*
496  * Allow a thread's kernel stack to be paged out.
497  */
498 static void
499 vm_thread_swapout(struct thread *td)
500 {
501 	vm_object_t ksobj;
502 	vm_page_t m;
503 	int i, pages;
504 
505 	cpu_thread_swapout(td);
506 	pages = td->td_kstack_pages;
507 	ksobj = td->td_kstack_obj;
508 	pmap_qremove(td->td_kstack, pages);
509 	VM_OBJECT_WLOCK(ksobj);
510 	for (i = 0; i < pages; i++) {
511 		m = vm_page_lookup(ksobj, i);
512 		if (m == NULL)
513 			panic("vm_thread_swapout: kstack already missing?");
514 		vm_page_dirty(m);
515 		vm_page_lock(m);
516 		vm_page_unwire(m, 0);
517 		vm_page_unlock(m);
518 	}
519 	VM_OBJECT_WUNLOCK(ksobj);
520 }
521 
522 /*
523  * Bring the kernel stack for a specified thread back in.
524  */
525 static void
526 vm_thread_swapin(struct thread *td)
527 {
528 	vm_object_t ksobj;
529 	vm_page_t ma[KSTACK_MAX_PAGES];
530 	int i, j, k, pages, rv;
531 
532 	pages = td->td_kstack_pages;
533 	ksobj = td->td_kstack_obj;
534 	VM_OBJECT_WLOCK(ksobj);
535 	for (i = 0; i < pages; i++)
536 		ma[i] = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY |
537 		    VM_ALLOC_WIRED);
538 	for (i = 0; i < pages; i++) {
539 		if (ma[i]->valid != VM_PAGE_BITS_ALL) {
540 			KASSERT(ma[i]->oflags & VPO_BUSY,
541 			    ("lost busy 1"));
542 			vm_object_pip_add(ksobj, 1);
543 			for (j = i + 1; j < pages; j++) {
544 				KASSERT(ma[j]->valid == VM_PAGE_BITS_ALL ||
545 				    (ma[j]->oflags & VPO_BUSY),
546 				    ("lost busy 2"));
547 				if (ma[j]->valid == VM_PAGE_BITS_ALL)
548 					break;
549 			}
550 			rv = vm_pager_get_pages(ksobj, ma + i, j - i, 0);
551 			if (rv != VM_PAGER_OK)
552 	panic("vm_thread_swapin: cannot get kstack for proc: %d",
553 				    td->td_proc->p_pid);
554 			vm_object_pip_wakeup(ksobj);
555 			for (k = i; k < j; k++)
556 				ma[k] = vm_page_lookup(ksobj, k);
557 			vm_page_wakeup(ma[i]);
558 		} else if (ma[i]->oflags & VPO_BUSY)
559 			vm_page_wakeup(ma[i]);
560 	}
561 	VM_OBJECT_WUNLOCK(ksobj);
562 	pmap_qenter(td->td_kstack, ma, pages);
563 	cpu_thread_swapin(td);
564 }
565 #endif /* !NO_SWAPPING */
566 
567 /*
568  * Implement fork's actions on an address space.
569  * Here we arrange for the address space to be copied or referenced,
570  * allocate a user struct (pcb and kernel stack), then call the
571  * machine-dependent layer to fill those in and make the new process
572  * ready to run.  The new process is set up so that it returns directly
573  * to user mode to avoid stack copying and relocation problems.
574  */
575 int
576 vm_forkproc(td, p2, td2, vm2, flags)
577 	struct thread *td;
578 	struct proc *p2;
579 	struct thread *td2;
580 	struct vmspace *vm2;
581 	int flags;
582 {
583 	struct proc *p1 = td->td_proc;
584 	int error;
585 
586 	if ((flags & RFPROC) == 0) {
587 		/*
588 		 * Divorce the memory, if it is shared, essentially
589 		 * this changes shared memory amongst threads, into
590 		 * COW locally.
591 		 */
592 		if ((flags & RFMEM) == 0) {
593 			if (p1->p_vmspace->vm_refcnt > 1) {
594 				error = vmspace_unshare(p1);
595 				if (error)
596 					return (error);
597 			}
598 		}
599 		cpu_fork(td, p2, td2, flags);
600 		return (0);
601 	}
602 
603 	if (flags & RFMEM) {
604 		p2->p_vmspace = p1->p_vmspace;
605 		atomic_add_int(&p1->p_vmspace->vm_refcnt, 1);
606 	}
607 
608 	while (vm_page_count_severe()) {
609 		VM_WAIT;
610 	}
611 
612 	if ((flags & RFMEM) == 0) {
613 		p2->p_vmspace = vm2;
614 		if (p1->p_vmspace->vm_shm)
615 			shmfork(p1, p2);
616 	}
617 
618 	/*
619 	 * cpu_fork will copy and update the pcb, set up the kernel stack,
620 	 * and make the child ready to run.
621 	 */
622 	cpu_fork(td, p2, td2, flags);
623 	return (0);
624 }
625 
626 /*
627  * Called after process has been wait(2)'ed apon and is being reaped.
628  * The idea is to reclaim resources that we could not reclaim while
629  * the process was still executing.
630  */
631 void
632 vm_waitproc(p)
633 	struct proc *p;
634 {
635 
636 	vmspace_exitfree(p);		/* and clean-out the vmspace */
637 }
638 
639 void
640 faultin(p)
641 	struct proc *p;
642 {
643 #ifdef NO_SWAPPING
644 
645 	PROC_LOCK_ASSERT(p, MA_OWNED);
646 	if ((p->p_flag & P_INMEM) == 0)
647 		panic("faultin: proc swapped out with NO_SWAPPING!");
648 #else /* !NO_SWAPPING */
649 	struct thread *td;
650 
651 	PROC_LOCK_ASSERT(p, MA_OWNED);
652 	/*
653 	 * If another process is swapping in this process,
654 	 * just wait until it finishes.
655 	 */
656 	if (p->p_flag & P_SWAPPINGIN) {
657 		while (p->p_flag & P_SWAPPINGIN)
658 			msleep(&p->p_flag, &p->p_mtx, PVM, "faultin", 0);
659 		return;
660 	}
661 	if ((p->p_flag & P_INMEM) == 0) {
662 		/*
663 		 * Don't let another thread swap process p out while we are
664 		 * busy swapping it in.
665 		 */
666 		++p->p_lock;
667 		p->p_flag |= P_SWAPPINGIN;
668 		PROC_UNLOCK(p);
669 
670 		/*
671 		 * We hold no lock here because the list of threads
672 		 * can not change while all threads in the process are
673 		 * swapped out.
674 		 */
675 		FOREACH_THREAD_IN_PROC(p, td)
676 			vm_thread_swapin(td);
677 		PROC_LOCK(p);
678 		swapclear(p);
679 		p->p_swtick = ticks;
680 
681 		wakeup(&p->p_flag);
682 
683 		/* Allow other threads to swap p out now. */
684 		--p->p_lock;
685 	}
686 #endif /* NO_SWAPPING */
687 }
688 
689 /*
690  * This swapin algorithm attempts to swap-in processes only if there
691  * is enough space for them.  Of course, if a process waits for a long
692  * time, it will be swapped in anyway.
693  *
694  * Giant is held on entry.
695  */
696 /* ARGSUSED*/
697 static void
698 scheduler(dummy)
699 	void *dummy;
700 {
701 	struct proc *p;
702 	struct thread *td;
703 	struct proc *pp;
704 	int slptime;
705 	int swtime;
706 	int ppri;
707 	int pri;
708 
709 	mtx_assert(&Giant, MA_OWNED | MA_NOTRECURSED);
710 	mtx_unlock(&Giant);
711 
712 loop:
713 	if (vm_page_count_min()) {
714 		VM_WAIT;
715 		goto loop;
716 	}
717 
718 	pp = NULL;
719 	ppri = INT_MIN;
720 	sx_slock(&allproc_lock);
721 	FOREACH_PROC_IN_SYSTEM(p) {
722 		PROC_LOCK(p);
723 		if (p->p_state == PRS_NEW ||
724 		    p->p_flag & (P_SWAPPINGOUT | P_SWAPPINGIN | P_INMEM)) {
725 			PROC_UNLOCK(p);
726 			continue;
727 		}
728 		swtime = (ticks - p->p_swtick) / hz;
729 		FOREACH_THREAD_IN_PROC(p, td) {
730 			/*
731 			 * An otherwise runnable thread of a process
732 			 * swapped out has only the TDI_SWAPPED bit set.
733 			 *
734 			 */
735 			thread_lock(td);
736 			if (td->td_inhibitors == TDI_SWAPPED) {
737 				slptime = (ticks - td->td_slptick) / hz;
738 				pri = swtime + slptime;
739 				if ((td->td_flags & TDF_SWAPINREQ) == 0)
740 					pri -= p->p_nice * 8;
741 				/*
742 				 * if this thread is higher priority
743 				 * and there is enough space, then select
744 				 * this process instead of the previous
745 				 * selection.
746 				 */
747 				if (pri > ppri) {
748 					pp = p;
749 					ppri = pri;
750 				}
751 			}
752 			thread_unlock(td);
753 		}
754 		PROC_UNLOCK(p);
755 	}
756 	sx_sunlock(&allproc_lock);
757 
758 	/*
759 	 * Nothing to do, back to sleep.
760 	 */
761 	if ((p = pp) == NULL) {
762 		tsleep(&proc0, PVM, "sched", MAXSLP * hz / 2);
763 		goto loop;
764 	}
765 	PROC_LOCK(p);
766 
767 	/*
768 	 * Another process may be bringing or may have already
769 	 * brought this process in while we traverse all threads.
770 	 * Or, this process may even be being swapped out again.
771 	 */
772 	if (p->p_flag & (P_INMEM | P_SWAPPINGOUT | P_SWAPPINGIN)) {
773 		PROC_UNLOCK(p);
774 		goto loop;
775 	}
776 
777 	/*
778 	 * We would like to bring someone in. (only if there is space).
779 	 * [What checks the space? ]
780 	 */
781 	faultin(p);
782 	PROC_UNLOCK(p);
783 	goto loop;
784 }
785 
786 void
787 kick_proc0(void)
788 {
789 
790 	wakeup(&proc0);
791 }
792 
793 #ifndef NO_SWAPPING
794 
795 /*
796  * Swap_idle_threshold1 is the guaranteed swapped in time for a process
797  */
798 static int swap_idle_threshold1 = 2;
799 SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold1, CTLFLAG_RW,
800     &swap_idle_threshold1, 0, "Guaranteed swapped in time for a process");
801 
802 /*
803  * Swap_idle_threshold2 is the time that a process can be idle before
804  * it will be swapped out, if idle swapping is enabled.
805  */
806 static int swap_idle_threshold2 = 10;
807 SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold2, CTLFLAG_RW,
808     &swap_idle_threshold2, 0, "Time before a process will be swapped out");
809 
810 /*
811  * First, if any processes have been sleeping or stopped for at least
812  * "swap_idle_threshold1" seconds, they are swapped out.  If, however,
813  * no such processes exist, then the longest-sleeping or stopped
814  * process is swapped out.  Finally, and only as a last resort, if
815  * there are no sleeping or stopped processes, the longest-resident
816  * process is swapped out.
817  */
818 void
819 swapout_procs(action)
820 int action;
821 {
822 	struct proc *p;
823 	struct thread *td;
824 	int didswap = 0;
825 
826 retry:
827 	sx_slock(&allproc_lock);
828 	FOREACH_PROC_IN_SYSTEM(p) {
829 		struct vmspace *vm;
830 		int minslptime = 100000;
831 		int slptime;
832 
833 		/*
834 		 * Watch out for a process in
835 		 * creation.  It may have no
836 		 * address space or lock yet.
837 		 */
838 		if (p->p_state == PRS_NEW)
839 			continue;
840 		/*
841 		 * An aio daemon switches its
842 		 * address space while running.
843 		 * Perform a quick check whether
844 		 * a process has P_SYSTEM.
845 		 */
846 		if ((p->p_flag & P_SYSTEM) != 0)
847 			continue;
848 		/*
849 		 * Do not swapout a process that
850 		 * is waiting for VM data
851 		 * structures as there is a possible
852 		 * deadlock.  Test this first as
853 		 * this may block.
854 		 *
855 		 * Lock the map until swapout
856 		 * finishes, or a thread of this
857 		 * process may attempt to alter
858 		 * the map.
859 		 */
860 		vm = vmspace_acquire_ref(p);
861 		if (vm == NULL)
862 			continue;
863 		if (!vm_map_trylock(&vm->vm_map))
864 			goto nextproc1;
865 
866 		PROC_LOCK(p);
867 		if (p->p_lock != 0 ||
868 		    (p->p_flag & (P_STOPPED_SINGLE|P_TRACED|P_SYSTEM|P_WEXIT)
869 		    ) != 0) {
870 			goto nextproc;
871 		}
872 		/*
873 		 * only aiod changes vmspace, however it will be
874 		 * skipped because of the if statement above checking
875 		 * for P_SYSTEM
876 		 */
877 		if ((p->p_flag & (P_INMEM|P_SWAPPINGOUT|P_SWAPPINGIN)) != P_INMEM)
878 			goto nextproc;
879 
880 		switch (p->p_state) {
881 		default:
882 			/* Don't swap out processes in any sort
883 			 * of 'special' state. */
884 			break;
885 
886 		case PRS_NORMAL:
887 			/*
888 			 * do not swapout a realtime process
889 			 * Check all the thread groups..
890 			 */
891 			FOREACH_THREAD_IN_PROC(p, td) {
892 				thread_lock(td);
893 				if (PRI_IS_REALTIME(td->td_pri_class)) {
894 					thread_unlock(td);
895 					goto nextproc;
896 				}
897 				slptime = (ticks - td->td_slptick) / hz;
898 				/*
899 				 * Guarantee swap_idle_threshold1
900 				 * time in memory.
901 				 */
902 				if (slptime < swap_idle_threshold1) {
903 					thread_unlock(td);
904 					goto nextproc;
905 				}
906 
907 				/*
908 				 * Do not swapout a process if it is
909 				 * waiting on a critical event of some
910 				 * kind or there is a thread whose
911 				 * pageable memory may be accessed.
912 				 *
913 				 * This could be refined to support
914 				 * swapping out a thread.
915 				 */
916 				if (!thread_safetoswapout(td)) {
917 					thread_unlock(td);
918 					goto nextproc;
919 				}
920 				/*
921 				 * If the system is under memory stress,
922 				 * or if we are swapping
923 				 * idle processes >= swap_idle_threshold2,
924 				 * then swap the process out.
925 				 */
926 				if (((action & VM_SWAP_NORMAL) == 0) &&
927 				    (((action & VM_SWAP_IDLE) == 0) ||
928 				    (slptime < swap_idle_threshold2))) {
929 					thread_unlock(td);
930 					goto nextproc;
931 				}
932 
933 				if (minslptime > slptime)
934 					minslptime = slptime;
935 				thread_unlock(td);
936 			}
937 
938 			/*
939 			 * If the pageout daemon didn't free enough pages,
940 			 * or if this process is idle and the system is
941 			 * configured to swap proactively, swap it out.
942 			 */
943 			if ((action & VM_SWAP_NORMAL) ||
944 				((action & VM_SWAP_IDLE) &&
945 				 (minslptime > swap_idle_threshold2))) {
946 				if (swapout(p) == 0)
947 					didswap++;
948 				PROC_UNLOCK(p);
949 				vm_map_unlock(&vm->vm_map);
950 				vmspace_free(vm);
951 				sx_sunlock(&allproc_lock);
952 				goto retry;
953 			}
954 		}
955 nextproc:
956 		PROC_UNLOCK(p);
957 		vm_map_unlock(&vm->vm_map);
958 nextproc1:
959 		vmspace_free(vm);
960 		continue;
961 	}
962 	sx_sunlock(&allproc_lock);
963 	/*
964 	 * If we swapped something out, and another process needed memory,
965 	 * then wakeup the sched process.
966 	 */
967 	if (didswap)
968 		wakeup(&proc0);
969 }
970 
971 static void
972 swapclear(p)
973 	struct proc *p;
974 {
975 	struct thread *td;
976 
977 	PROC_LOCK_ASSERT(p, MA_OWNED);
978 
979 	FOREACH_THREAD_IN_PROC(p, td) {
980 		thread_lock(td);
981 		td->td_flags |= TDF_INMEM;
982 		td->td_flags &= ~TDF_SWAPINREQ;
983 		TD_CLR_SWAPPED(td);
984 		if (TD_CAN_RUN(td))
985 			if (setrunnable(td)) {
986 #ifdef INVARIANTS
987 				/*
988 				 * XXX: We just cleared TDI_SWAPPED
989 				 * above and set TDF_INMEM, so this
990 				 * should never happen.
991 				 */
992 				panic("not waking up swapper");
993 #endif
994 			}
995 		thread_unlock(td);
996 	}
997 	p->p_flag &= ~(P_SWAPPINGIN|P_SWAPPINGOUT);
998 	p->p_flag |= P_INMEM;
999 }
1000 
1001 static int
1002 swapout(p)
1003 	struct proc *p;
1004 {
1005 	struct thread *td;
1006 
1007 	PROC_LOCK_ASSERT(p, MA_OWNED);
1008 #if defined(SWAP_DEBUG)
1009 	printf("swapping out %d\n", p->p_pid);
1010 #endif
1011 
1012 	/*
1013 	 * The states of this process and its threads may have changed
1014 	 * by now.  Assuming that there is only one pageout daemon thread,
1015 	 * this process should still be in memory.
1016 	 */
1017 	KASSERT((p->p_flag & (P_INMEM|P_SWAPPINGOUT|P_SWAPPINGIN)) == P_INMEM,
1018 		("swapout: lost a swapout race?"));
1019 
1020 	/*
1021 	 * remember the process resident count
1022 	 */
1023 	p->p_vmspace->vm_swrss = vmspace_resident_count(p->p_vmspace);
1024 	/*
1025 	 * Check and mark all threads before we proceed.
1026 	 */
1027 	p->p_flag &= ~P_INMEM;
1028 	p->p_flag |= P_SWAPPINGOUT;
1029 	FOREACH_THREAD_IN_PROC(p, td) {
1030 		thread_lock(td);
1031 		if (!thread_safetoswapout(td)) {
1032 			thread_unlock(td);
1033 			swapclear(p);
1034 			return (EBUSY);
1035 		}
1036 		td->td_flags &= ~TDF_INMEM;
1037 		TD_SET_SWAPPED(td);
1038 		thread_unlock(td);
1039 	}
1040 	td = FIRST_THREAD_IN_PROC(p);
1041 	++td->td_ru.ru_nswap;
1042 	PROC_UNLOCK(p);
1043 
1044 	/*
1045 	 * This list is stable because all threads are now prevented from
1046 	 * running.  The list is only modified in the context of a running
1047 	 * thread in this process.
1048 	 */
1049 	FOREACH_THREAD_IN_PROC(p, td)
1050 		vm_thread_swapout(td);
1051 
1052 	PROC_LOCK(p);
1053 	p->p_flag &= ~P_SWAPPINGOUT;
1054 	p->p_swtick = ticks;
1055 	return (0);
1056 }
1057 #endif /* !NO_SWAPPING */
1058