xref: /freebsd/sys/vm/vm_glue.c (revision 46902503bc85c7b5fcdfb06e027ad756e083b3a4)
1 /*-
2  * Copyright (c) 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 4. Neither the name of the University nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  *	from: @(#)vm_glue.c	8.6 (Berkeley) 1/5/94
33  *
34  *
35  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
36  * All rights reserved.
37  *
38  * Permission to use, copy, modify and distribute this software and
39  * its documentation is hereby granted, provided that both the copyright
40  * notice and this permission notice appear in all copies of the
41  * software, derivative works or modified versions, and any portions
42  * thereof, and that both notices appear in supporting documentation.
43  *
44  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
45  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
46  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
47  *
48  * Carnegie Mellon requests users of this software to return to
49  *
50  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
51  *  School of Computer Science
52  *  Carnegie Mellon University
53  *  Pittsburgh PA 15213-3890
54  *
55  * any improvements or extensions that they make and grant Carnegie the
56  * rights to redistribute these changes.
57  */
58 
59 #include <sys/cdefs.h>
60 __FBSDID("$FreeBSD$");
61 
62 #include "opt_vm.h"
63 #include "opt_kstack_pages.h"
64 #include "opt_kstack_max_pages.h"
65 
66 #include <sys/param.h>
67 #include <sys/systm.h>
68 #include <sys/limits.h>
69 #include <sys/lock.h>
70 #include <sys/mutex.h>
71 #include <sys/proc.h>
72 #include <sys/racct.h>
73 #include <sys/resourcevar.h>
74 #include <sys/rwlock.h>
75 #include <sys/sched.h>
76 #include <sys/sf_buf.h>
77 #include <sys/shm.h>
78 #include <sys/vmmeter.h>
79 #include <sys/sx.h>
80 #include <sys/sysctl.h>
81 #include <sys/_kstack_cache.h>
82 #include <sys/eventhandler.h>
83 #include <sys/kernel.h>
84 #include <sys/ktr.h>
85 #include <sys/unistd.h>
86 
87 #include <vm/vm.h>
88 #include <vm/vm_param.h>
89 #include <vm/pmap.h>
90 #include <vm/vm_map.h>
91 #include <vm/vm_page.h>
92 #include <vm/vm_pageout.h>
93 #include <vm/vm_object.h>
94 #include <vm/vm_kern.h>
95 #include <vm/vm_extern.h>
96 #include <vm/vm_pager.h>
97 #include <vm/swap_pager.h>
98 
99 /*
100  * System initialization
101  *
102  * THIS MUST BE THE LAST INITIALIZATION ITEM!!!
103  *
104  * Note: run scheduling should be divorced from the vm system.
105  */
106 static void scheduler(void *);
107 SYSINIT(scheduler, SI_SUB_RUN_SCHEDULER, SI_ORDER_ANY, scheduler, NULL);
108 
109 #ifndef NO_SWAPPING
110 static int swapout(struct proc *);
111 static void swapclear(struct proc *);
112 static void vm_thread_swapin(struct thread *td);
113 static void vm_thread_swapout(struct thread *td);
114 #endif
115 
116 /*
117  * MPSAFE
118  *
119  * WARNING!  This code calls vm_map_check_protection() which only checks
120  * the associated vm_map_entry range.  It does not determine whether the
121  * contents of the memory is actually readable or writable.  In most cases
122  * just checking the vm_map_entry is sufficient within the kernel's address
123  * space.
124  */
125 int
126 kernacc(addr, len, rw)
127 	void *addr;
128 	int len, rw;
129 {
130 	boolean_t rv;
131 	vm_offset_t saddr, eaddr;
132 	vm_prot_t prot;
133 
134 	KASSERT((rw & ~VM_PROT_ALL) == 0,
135 	    ("illegal ``rw'' argument to kernacc (%x)\n", rw));
136 
137 	if ((vm_offset_t)addr + len > kernel_map->max_offset ||
138 	    (vm_offset_t)addr + len < (vm_offset_t)addr)
139 		return (FALSE);
140 
141 	prot = rw;
142 	saddr = trunc_page((vm_offset_t)addr);
143 	eaddr = round_page((vm_offset_t)addr + len);
144 	vm_map_lock_read(kernel_map);
145 	rv = vm_map_check_protection(kernel_map, saddr, eaddr, prot);
146 	vm_map_unlock_read(kernel_map);
147 	return (rv == TRUE);
148 }
149 
150 /*
151  * MPSAFE
152  *
153  * WARNING!  This code calls vm_map_check_protection() which only checks
154  * the associated vm_map_entry range.  It does not determine whether the
155  * contents of the memory is actually readable or writable.  vmapbuf(),
156  * vm_fault_quick(), or copyin()/copout()/su*()/fu*() functions should be
157  * used in conjuction with this call.
158  */
159 int
160 useracc(addr, len, rw)
161 	void *addr;
162 	int len, rw;
163 {
164 	boolean_t rv;
165 	vm_prot_t prot;
166 	vm_map_t map;
167 
168 	KASSERT((rw & ~VM_PROT_ALL) == 0,
169 	    ("illegal ``rw'' argument to useracc (%x)\n", rw));
170 	prot = rw;
171 	map = &curproc->p_vmspace->vm_map;
172 	if ((vm_offset_t)addr + len > vm_map_max(map) ||
173 	    (vm_offset_t)addr + len < (vm_offset_t)addr) {
174 		return (FALSE);
175 	}
176 	vm_map_lock_read(map);
177 	rv = vm_map_check_protection(map, trunc_page((vm_offset_t)addr),
178 	    round_page((vm_offset_t)addr + len), prot);
179 	vm_map_unlock_read(map);
180 	return (rv == TRUE);
181 }
182 
183 int
184 vslock(void *addr, size_t len)
185 {
186 	vm_offset_t end, last, start;
187 	vm_size_t npages;
188 	int error;
189 
190 	last = (vm_offset_t)addr + len;
191 	start = trunc_page((vm_offset_t)addr);
192 	end = round_page(last);
193 	if (last < (vm_offset_t)addr || end < (vm_offset_t)addr)
194 		return (EINVAL);
195 	npages = atop(end - start);
196 	if (npages > vm_page_max_wired)
197 		return (ENOMEM);
198 #if 0
199 	/*
200 	 * XXX - not yet
201 	 *
202 	 * The limit for transient usage of wired pages should be
203 	 * larger than for "permanent" wired pages (mlock()).
204 	 *
205 	 * Also, the sysctl code, which is the only present user
206 	 * of vslock(), does a hard loop on EAGAIN.
207 	 */
208 	if (npages + cnt.v_wire_count > vm_page_max_wired)
209 		return (EAGAIN);
210 #endif
211 	error = vm_map_wire(&curproc->p_vmspace->vm_map, start, end,
212 	    VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
213 	/*
214 	 * Return EFAULT on error to match copy{in,out}() behaviour
215 	 * rather than returning ENOMEM like mlock() would.
216 	 */
217 	return (error == KERN_SUCCESS ? 0 : EFAULT);
218 }
219 
220 void
221 vsunlock(void *addr, size_t len)
222 {
223 
224 	/* Rely on the parameter sanity checks performed by vslock(). */
225 	(void)vm_map_unwire(&curproc->p_vmspace->vm_map,
226 	    trunc_page((vm_offset_t)addr), round_page((vm_offset_t)addr + len),
227 	    VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
228 }
229 
230 /*
231  * Pin the page contained within the given object at the given offset.  If the
232  * page is not resident, allocate and load it using the given object's pager.
233  * Return the pinned page if successful; otherwise, return NULL.
234  */
235 static vm_page_t
236 vm_imgact_hold_page(vm_object_t object, vm_ooffset_t offset)
237 {
238 	vm_page_t m, ma[1];
239 	vm_pindex_t pindex;
240 	int rv;
241 
242 	VM_OBJECT_WLOCK(object);
243 	pindex = OFF_TO_IDX(offset);
244 	m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL | VM_ALLOC_RETRY |
245 	    VM_ALLOC_NOBUSY);
246 	if (m->valid != VM_PAGE_BITS_ALL) {
247 		vm_page_busy(m);
248 		ma[0] = m;
249 		rv = vm_pager_get_pages(object, ma, 1, 0);
250 		m = vm_page_lookup(object, pindex);
251 		if (m == NULL)
252 			goto out;
253 		if (rv != VM_PAGER_OK) {
254 			vm_page_lock(m);
255 			vm_page_free(m);
256 			vm_page_unlock(m);
257 			m = NULL;
258 			goto out;
259 		}
260 		vm_page_wakeup(m);
261 	}
262 	vm_page_lock(m);
263 	vm_page_hold(m);
264 	vm_page_unlock(m);
265 out:
266 	VM_OBJECT_WUNLOCK(object);
267 	return (m);
268 }
269 
270 /*
271  * Return a CPU private mapping to the page at the given offset within the
272  * given object.  The page is pinned before it is mapped.
273  */
274 struct sf_buf *
275 vm_imgact_map_page(vm_object_t object, vm_ooffset_t offset)
276 {
277 	vm_page_t m;
278 
279 	m = vm_imgact_hold_page(object, offset);
280 	if (m == NULL)
281 		return (NULL);
282 	sched_pin();
283 	return (sf_buf_alloc(m, SFB_CPUPRIVATE));
284 }
285 
286 /*
287  * Destroy the given CPU private mapping and unpin the page that it mapped.
288  */
289 void
290 vm_imgact_unmap_page(struct sf_buf *sf)
291 {
292 	vm_page_t m;
293 
294 	m = sf_buf_page(sf);
295 	sf_buf_free(sf);
296 	sched_unpin();
297 	vm_page_lock(m);
298 	vm_page_unhold(m);
299 	vm_page_unlock(m);
300 }
301 
302 void
303 vm_sync_icache(vm_map_t map, vm_offset_t va, vm_offset_t sz)
304 {
305 
306 	pmap_sync_icache(map->pmap, va, sz);
307 }
308 
309 struct kstack_cache_entry *kstack_cache;
310 static int kstack_cache_size = 128;
311 static int kstacks;
312 static struct mtx kstack_cache_mtx;
313 MTX_SYSINIT(kstack_cache, &kstack_cache_mtx, "kstkch", MTX_DEF);
314 
315 SYSCTL_INT(_vm, OID_AUTO, kstack_cache_size, CTLFLAG_RW, &kstack_cache_size, 0,
316     "");
317 SYSCTL_INT(_vm, OID_AUTO, kstacks, CTLFLAG_RD, &kstacks, 0,
318     "");
319 
320 #ifndef KSTACK_MAX_PAGES
321 #define KSTACK_MAX_PAGES 32
322 #endif
323 
324 /*
325  * Create the kernel stack (including pcb for i386) for a new thread.
326  * This routine directly affects the fork perf for a process and
327  * create performance for a thread.
328  */
329 int
330 vm_thread_new(struct thread *td, int pages)
331 {
332 	vm_object_t ksobj;
333 	vm_offset_t ks;
334 	vm_page_t m, ma[KSTACK_MAX_PAGES];
335 	struct kstack_cache_entry *ks_ce;
336 	int i;
337 
338 	/* Bounds check */
339 	if (pages <= 1)
340 		pages = KSTACK_PAGES;
341 	else if (pages > KSTACK_MAX_PAGES)
342 		pages = KSTACK_MAX_PAGES;
343 
344 	if (pages == KSTACK_PAGES) {
345 		mtx_lock(&kstack_cache_mtx);
346 		if (kstack_cache != NULL) {
347 			ks_ce = kstack_cache;
348 			kstack_cache = ks_ce->next_ks_entry;
349 			mtx_unlock(&kstack_cache_mtx);
350 
351 			td->td_kstack_obj = ks_ce->ksobj;
352 			td->td_kstack = (vm_offset_t)ks_ce;
353 			td->td_kstack_pages = KSTACK_PAGES;
354 			return (1);
355 		}
356 		mtx_unlock(&kstack_cache_mtx);
357 	}
358 
359 	/*
360 	 * Allocate an object for the kstack.
361 	 */
362 	ksobj = vm_object_allocate(OBJT_DEFAULT, pages);
363 
364 	/*
365 	 * Get a kernel virtual address for this thread's kstack.
366 	 */
367 #if defined(__mips__)
368 	/*
369 	 * We need to align the kstack's mapped address to fit within
370 	 * a single TLB entry.
371 	 */
372 	ks = kmem_alloc_nofault_space(kernel_map,
373 	    (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE, VMFS_TLB_ALIGNED_SPACE);
374 #else
375 	ks = kmem_alloc_nofault(kernel_map,
376 	   (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
377 #endif
378 	if (ks == 0) {
379 		printf("vm_thread_new: kstack allocation failed\n");
380 		vm_object_deallocate(ksobj);
381 		return (0);
382 	}
383 
384 	atomic_add_int(&kstacks, 1);
385 	if (KSTACK_GUARD_PAGES != 0) {
386 		pmap_qremove(ks, KSTACK_GUARD_PAGES);
387 		ks += KSTACK_GUARD_PAGES * PAGE_SIZE;
388 	}
389 	td->td_kstack_obj = ksobj;
390 	td->td_kstack = ks;
391 	/*
392 	 * Knowing the number of pages allocated is useful when you
393 	 * want to deallocate them.
394 	 */
395 	td->td_kstack_pages = pages;
396 	/*
397 	 * For the length of the stack, link in a real page of ram for each
398 	 * page of stack.
399 	 */
400 	VM_OBJECT_WLOCK(ksobj);
401 	for (i = 0; i < pages; i++) {
402 		/*
403 		 * Get a kernel stack page.
404 		 */
405 		m = vm_page_grab(ksobj, i, VM_ALLOC_NOBUSY |
406 		    VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED);
407 		ma[i] = m;
408 		m->valid = VM_PAGE_BITS_ALL;
409 	}
410 	VM_OBJECT_WUNLOCK(ksobj);
411 	pmap_qenter(ks, ma, pages);
412 	return (1);
413 }
414 
415 static void
416 vm_thread_stack_dispose(vm_object_t ksobj, vm_offset_t ks, int pages)
417 {
418 	vm_page_t m;
419 	int i;
420 
421 	atomic_add_int(&kstacks, -1);
422 	pmap_qremove(ks, pages);
423 	VM_OBJECT_WLOCK(ksobj);
424 	for (i = 0; i < pages; i++) {
425 		m = vm_page_lookup(ksobj, i);
426 		if (m == NULL)
427 			panic("vm_thread_dispose: kstack already missing?");
428 		vm_page_lock(m);
429 		vm_page_unwire(m, 0);
430 		vm_page_free(m);
431 		vm_page_unlock(m);
432 	}
433 	VM_OBJECT_WUNLOCK(ksobj);
434 	vm_object_deallocate(ksobj);
435 	kmem_free(kernel_map, ks - (KSTACK_GUARD_PAGES * PAGE_SIZE),
436 	    (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
437 }
438 
439 /*
440  * Dispose of a thread's kernel stack.
441  */
442 void
443 vm_thread_dispose(struct thread *td)
444 {
445 	vm_object_t ksobj;
446 	vm_offset_t ks;
447 	struct kstack_cache_entry *ks_ce;
448 	int pages;
449 
450 	pages = td->td_kstack_pages;
451 	ksobj = td->td_kstack_obj;
452 	ks = td->td_kstack;
453 	td->td_kstack = 0;
454 	td->td_kstack_pages = 0;
455 	if (pages == KSTACK_PAGES && kstacks <= kstack_cache_size) {
456 		ks_ce = (struct kstack_cache_entry *)ks;
457 		ks_ce->ksobj = ksobj;
458 		mtx_lock(&kstack_cache_mtx);
459 		ks_ce->next_ks_entry = kstack_cache;
460 		kstack_cache = ks_ce;
461 		mtx_unlock(&kstack_cache_mtx);
462 		return;
463 	}
464 	vm_thread_stack_dispose(ksobj, ks, pages);
465 }
466 
467 static void
468 vm_thread_stack_lowmem(void *nulll)
469 {
470 	struct kstack_cache_entry *ks_ce, *ks_ce1;
471 
472 	mtx_lock(&kstack_cache_mtx);
473 	ks_ce = kstack_cache;
474 	kstack_cache = NULL;
475 	mtx_unlock(&kstack_cache_mtx);
476 
477 	while (ks_ce != NULL) {
478 		ks_ce1 = ks_ce;
479 		ks_ce = ks_ce->next_ks_entry;
480 
481 		vm_thread_stack_dispose(ks_ce1->ksobj, (vm_offset_t)ks_ce1,
482 		    KSTACK_PAGES);
483 	}
484 }
485 
486 static void
487 kstack_cache_init(void *nulll)
488 {
489 
490 	EVENTHANDLER_REGISTER(vm_lowmem, vm_thread_stack_lowmem, NULL,
491 	    EVENTHANDLER_PRI_ANY);
492 }
493 
494 SYSINIT(vm_kstacks, SI_SUB_KTHREAD_INIT, SI_ORDER_ANY, kstack_cache_init, NULL);
495 
496 #ifndef NO_SWAPPING
497 /*
498  * Allow a thread's kernel stack to be paged out.
499  */
500 static void
501 vm_thread_swapout(struct thread *td)
502 {
503 	vm_object_t ksobj;
504 	vm_page_t m;
505 	int i, pages;
506 
507 	cpu_thread_swapout(td);
508 	pages = td->td_kstack_pages;
509 	ksobj = td->td_kstack_obj;
510 	pmap_qremove(td->td_kstack, pages);
511 	VM_OBJECT_WLOCK(ksobj);
512 	for (i = 0; i < pages; i++) {
513 		m = vm_page_lookup(ksobj, i);
514 		if (m == NULL)
515 			panic("vm_thread_swapout: kstack already missing?");
516 		vm_page_dirty(m);
517 		vm_page_lock(m);
518 		vm_page_unwire(m, 0);
519 		vm_page_unlock(m);
520 	}
521 	VM_OBJECT_WUNLOCK(ksobj);
522 }
523 
524 /*
525  * Bring the kernel stack for a specified thread back in.
526  */
527 static void
528 vm_thread_swapin(struct thread *td)
529 {
530 	vm_object_t ksobj;
531 	vm_page_t ma[KSTACK_MAX_PAGES];
532 	int i, j, k, pages, rv;
533 
534 	pages = td->td_kstack_pages;
535 	ksobj = td->td_kstack_obj;
536 	VM_OBJECT_WLOCK(ksobj);
537 	for (i = 0; i < pages; i++)
538 		ma[i] = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY |
539 		    VM_ALLOC_WIRED);
540 	for (i = 0; i < pages; i++) {
541 		if (ma[i]->valid != VM_PAGE_BITS_ALL) {
542 			KASSERT(ma[i]->oflags & VPO_BUSY,
543 			    ("lost busy 1"));
544 			vm_object_pip_add(ksobj, 1);
545 			for (j = i + 1; j < pages; j++) {
546 				KASSERT(ma[j]->valid == VM_PAGE_BITS_ALL ||
547 				    (ma[j]->oflags & VPO_BUSY),
548 				    ("lost busy 2"));
549 				if (ma[j]->valid == VM_PAGE_BITS_ALL)
550 					break;
551 			}
552 			rv = vm_pager_get_pages(ksobj, ma + i, j - i, 0);
553 			if (rv != VM_PAGER_OK)
554 	panic("vm_thread_swapin: cannot get kstack for proc: %d",
555 				    td->td_proc->p_pid);
556 			vm_object_pip_wakeup(ksobj);
557 			for (k = i; k < j; k++)
558 				ma[k] = vm_page_lookup(ksobj, k);
559 			vm_page_wakeup(ma[i]);
560 		} else if (ma[i]->oflags & VPO_BUSY)
561 			vm_page_wakeup(ma[i]);
562 	}
563 	VM_OBJECT_WUNLOCK(ksobj);
564 	pmap_qenter(td->td_kstack, ma, pages);
565 	cpu_thread_swapin(td);
566 }
567 #endif /* !NO_SWAPPING */
568 
569 /*
570  * Implement fork's actions on an address space.
571  * Here we arrange for the address space to be copied or referenced,
572  * allocate a user struct (pcb and kernel stack), then call the
573  * machine-dependent layer to fill those in and make the new process
574  * ready to run.  The new process is set up so that it returns directly
575  * to user mode to avoid stack copying and relocation problems.
576  */
577 int
578 vm_forkproc(td, p2, td2, vm2, flags)
579 	struct thread *td;
580 	struct proc *p2;
581 	struct thread *td2;
582 	struct vmspace *vm2;
583 	int flags;
584 {
585 	struct proc *p1 = td->td_proc;
586 	int error;
587 
588 	if ((flags & RFPROC) == 0) {
589 		/*
590 		 * Divorce the memory, if it is shared, essentially
591 		 * this changes shared memory amongst threads, into
592 		 * COW locally.
593 		 */
594 		if ((flags & RFMEM) == 0) {
595 			if (p1->p_vmspace->vm_refcnt > 1) {
596 				error = vmspace_unshare(p1);
597 				if (error)
598 					return (error);
599 			}
600 		}
601 		cpu_fork(td, p2, td2, flags);
602 		return (0);
603 	}
604 
605 	if (flags & RFMEM) {
606 		p2->p_vmspace = p1->p_vmspace;
607 		atomic_add_int(&p1->p_vmspace->vm_refcnt, 1);
608 	}
609 
610 	while (vm_page_count_severe()) {
611 		VM_WAIT;
612 	}
613 
614 	if ((flags & RFMEM) == 0) {
615 		p2->p_vmspace = vm2;
616 		if (p1->p_vmspace->vm_shm)
617 			shmfork(p1, p2);
618 	}
619 
620 	/*
621 	 * cpu_fork will copy and update the pcb, set up the kernel stack,
622 	 * and make the child ready to run.
623 	 */
624 	cpu_fork(td, p2, td2, flags);
625 	return (0);
626 }
627 
628 /*
629  * Called after process has been wait(2)'ed apon and is being reaped.
630  * The idea is to reclaim resources that we could not reclaim while
631  * the process was still executing.
632  */
633 void
634 vm_waitproc(p)
635 	struct proc *p;
636 {
637 
638 	vmspace_exitfree(p);		/* and clean-out the vmspace */
639 }
640 
641 void
642 faultin(p)
643 	struct proc *p;
644 {
645 #ifdef NO_SWAPPING
646 
647 	PROC_LOCK_ASSERT(p, MA_OWNED);
648 	if ((p->p_flag & P_INMEM) == 0)
649 		panic("faultin: proc swapped out with NO_SWAPPING!");
650 #else /* !NO_SWAPPING */
651 	struct thread *td;
652 
653 	PROC_LOCK_ASSERT(p, MA_OWNED);
654 	/*
655 	 * If another process is swapping in this process,
656 	 * just wait until it finishes.
657 	 */
658 	if (p->p_flag & P_SWAPPINGIN) {
659 		while (p->p_flag & P_SWAPPINGIN)
660 			msleep(&p->p_flag, &p->p_mtx, PVM, "faultin", 0);
661 		return;
662 	}
663 	if ((p->p_flag & P_INMEM) == 0) {
664 		/*
665 		 * Don't let another thread swap process p out while we are
666 		 * busy swapping it in.
667 		 */
668 		++p->p_lock;
669 		p->p_flag |= P_SWAPPINGIN;
670 		PROC_UNLOCK(p);
671 
672 		/*
673 		 * We hold no lock here because the list of threads
674 		 * can not change while all threads in the process are
675 		 * swapped out.
676 		 */
677 		FOREACH_THREAD_IN_PROC(p, td)
678 			vm_thread_swapin(td);
679 		PROC_LOCK(p);
680 		swapclear(p);
681 		p->p_swtick = ticks;
682 
683 		wakeup(&p->p_flag);
684 
685 		/* Allow other threads to swap p out now. */
686 		--p->p_lock;
687 	}
688 #endif /* NO_SWAPPING */
689 }
690 
691 /*
692  * This swapin algorithm attempts to swap-in processes only if there
693  * is enough space for them.  Of course, if a process waits for a long
694  * time, it will be swapped in anyway.
695  *
696  * Giant is held on entry.
697  */
698 /* ARGSUSED*/
699 static void
700 scheduler(dummy)
701 	void *dummy;
702 {
703 	struct proc *p;
704 	struct thread *td;
705 	struct proc *pp;
706 	int slptime;
707 	int swtime;
708 	int ppri;
709 	int pri;
710 
711 	mtx_assert(&Giant, MA_OWNED | MA_NOTRECURSED);
712 	mtx_unlock(&Giant);
713 
714 loop:
715 	if (vm_page_count_min()) {
716 		VM_WAIT;
717 		goto loop;
718 	}
719 
720 	pp = NULL;
721 	ppri = INT_MIN;
722 	sx_slock(&allproc_lock);
723 	FOREACH_PROC_IN_SYSTEM(p) {
724 		PROC_LOCK(p);
725 		if (p->p_state == PRS_NEW ||
726 		    p->p_flag & (P_SWAPPINGOUT | P_SWAPPINGIN | P_INMEM)) {
727 			PROC_UNLOCK(p);
728 			continue;
729 		}
730 		swtime = (ticks - p->p_swtick) / hz;
731 		FOREACH_THREAD_IN_PROC(p, td) {
732 			/*
733 			 * An otherwise runnable thread of a process
734 			 * swapped out has only the TDI_SWAPPED bit set.
735 			 *
736 			 */
737 			thread_lock(td);
738 			if (td->td_inhibitors == TDI_SWAPPED) {
739 				slptime = (ticks - td->td_slptick) / hz;
740 				pri = swtime + slptime;
741 				if ((td->td_flags & TDF_SWAPINREQ) == 0)
742 					pri -= p->p_nice * 8;
743 				/*
744 				 * if this thread is higher priority
745 				 * and there is enough space, then select
746 				 * this process instead of the previous
747 				 * selection.
748 				 */
749 				if (pri > ppri) {
750 					pp = p;
751 					ppri = pri;
752 				}
753 			}
754 			thread_unlock(td);
755 		}
756 		PROC_UNLOCK(p);
757 	}
758 	sx_sunlock(&allproc_lock);
759 
760 	/*
761 	 * Nothing to do, back to sleep.
762 	 */
763 	if ((p = pp) == NULL) {
764 		tsleep(&proc0, PVM, "sched", MAXSLP * hz / 2);
765 		goto loop;
766 	}
767 	PROC_LOCK(p);
768 
769 	/*
770 	 * Another process may be bringing or may have already
771 	 * brought this process in while we traverse all threads.
772 	 * Or, this process may even be being swapped out again.
773 	 */
774 	if (p->p_flag & (P_INMEM | P_SWAPPINGOUT | P_SWAPPINGIN)) {
775 		PROC_UNLOCK(p);
776 		goto loop;
777 	}
778 
779 	/*
780 	 * We would like to bring someone in. (only if there is space).
781 	 * [What checks the space? ]
782 	 */
783 	faultin(p);
784 	PROC_UNLOCK(p);
785 	goto loop;
786 }
787 
788 void
789 kick_proc0(void)
790 {
791 
792 	wakeup(&proc0);
793 }
794 
795 #ifndef NO_SWAPPING
796 
797 /*
798  * Swap_idle_threshold1 is the guaranteed swapped in time for a process
799  */
800 static int swap_idle_threshold1 = 2;
801 SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold1, CTLFLAG_RW,
802     &swap_idle_threshold1, 0, "Guaranteed swapped in time for a process");
803 
804 /*
805  * Swap_idle_threshold2 is the time that a process can be idle before
806  * it will be swapped out, if idle swapping is enabled.
807  */
808 static int swap_idle_threshold2 = 10;
809 SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold2, CTLFLAG_RW,
810     &swap_idle_threshold2, 0, "Time before a process will be swapped out");
811 
812 /*
813  * First, if any processes have been sleeping or stopped for at least
814  * "swap_idle_threshold1" seconds, they are swapped out.  If, however,
815  * no such processes exist, then the longest-sleeping or stopped
816  * process is swapped out.  Finally, and only as a last resort, if
817  * there are no sleeping or stopped processes, the longest-resident
818  * process is swapped out.
819  */
820 void
821 swapout_procs(action)
822 int action;
823 {
824 	struct proc *p;
825 	struct thread *td;
826 	int didswap = 0;
827 
828 retry:
829 	sx_slock(&allproc_lock);
830 	FOREACH_PROC_IN_SYSTEM(p) {
831 		struct vmspace *vm;
832 		int minslptime = 100000;
833 		int slptime;
834 
835 		/*
836 		 * Watch out for a process in
837 		 * creation.  It may have no
838 		 * address space or lock yet.
839 		 */
840 		if (p->p_state == PRS_NEW)
841 			continue;
842 		/*
843 		 * An aio daemon switches its
844 		 * address space while running.
845 		 * Perform a quick check whether
846 		 * a process has P_SYSTEM.
847 		 */
848 		if ((p->p_flag & P_SYSTEM) != 0)
849 			continue;
850 		/*
851 		 * Do not swapout a process that
852 		 * is waiting for VM data
853 		 * structures as there is a possible
854 		 * deadlock.  Test this first as
855 		 * this may block.
856 		 *
857 		 * Lock the map until swapout
858 		 * finishes, or a thread of this
859 		 * process may attempt to alter
860 		 * the map.
861 		 */
862 		vm = vmspace_acquire_ref(p);
863 		if (vm == NULL)
864 			continue;
865 		if (!vm_map_trylock(&vm->vm_map))
866 			goto nextproc1;
867 
868 		PROC_LOCK(p);
869 		if (p->p_lock != 0 ||
870 		    (p->p_flag & (P_STOPPED_SINGLE|P_TRACED|P_SYSTEM|P_WEXIT)
871 		    ) != 0) {
872 			goto nextproc;
873 		}
874 		/*
875 		 * only aiod changes vmspace, however it will be
876 		 * skipped because of the if statement above checking
877 		 * for P_SYSTEM
878 		 */
879 		if ((p->p_flag & (P_INMEM|P_SWAPPINGOUT|P_SWAPPINGIN)) != P_INMEM)
880 			goto nextproc;
881 
882 		switch (p->p_state) {
883 		default:
884 			/* Don't swap out processes in any sort
885 			 * of 'special' state. */
886 			break;
887 
888 		case PRS_NORMAL:
889 			/*
890 			 * do not swapout a realtime process
891 			 * Check all the thread groups..
892 			 */
893 			FOREACH_THREAD_IN_PROC(p, td) {
894 				thread_lock(td);
895 				if (PRI_IS_REALTIME(td->td_pri_class)) {
896 					thread_unlock(td);
897 					goto nextproc;
898 				}
899 				slptime = (ticks - td->td_slptick) / hz;
900 				/*
901 				 * Guarantee swap_idle_threshold1
902 				 * time in memory.
903 				 */
904 				if (slptime < swap_idle_threshold1) {
905 					thread_unlock(td);
906 					goto nextproc;
907 				}
908 
909 				/*
910 				 * Do not swapout a process if it is
911 				 * waiting on a critical event of some
912 				 * kind or there is a thread whose
913 				 * pageable memory may be accessed.
914 				 *
915 				 * This could be refined to support
916 				 * swapping out a thread.
917 				 */
918 				if (!thread_safetoswapout(td)) {
919 					thread_unlock(td);
920 					goto nextproc;
921 				}
922 				/*
923 				 * If the system is under memory stress,
924 				 * or if we are swapping
925 				 * idle processes >= swap_idle_threshold2,
926 				 * then swap the process out.
927 				 */
928 				if (((action & VM_SWAP_NORMAL) == 0) &&
929 				    (((action & VM_SWAP_IDLE) == 0) ||
930 				    (slptime < swap_idle_threshold2))) {
931 					thread_unlock(td);
932 					goto nextproc;
933 				}
934 
935 				if (minslptime > slptime)
936 					minslptime = slptime;
937 				thread_unlock(td);
938 			}
939 
940 			/*
941 			 * If the pageout daemon didn't free enough pages,
942 			 * or if this process is idle and the system is
943 			 * configured to swap proactively, swap it out.
944 			 */
945 			if ((action & VM_SWAP_NORMAL) ||
946 				((action & VM_SWAP_IDLE) &&
947 				 (minslptime > swap_idle_threshold2))) {
948 				if (swapout(p) == 0)
949 					didswap++;
950 				PROC_UNLOCK(p);
951 				vm_map_unlock(&vm->vm_map);
952 				vmspace_free(vm);
953 				sx_sunlock(&allproc_lock);
954 				goto retry;
955 			}
956 		}
957 nextproc:
958 		PROC_UNLOCK(p);
959 		vm_map_unlock(&vm->vm_map);
960 nextproc1:
961 		vmspace_free(vm);
962 		continue;
963 	}
964 	sx_sunlock(&allproc_lock);
965 	/*
966 	 * If we swapped something out, and another process needed memory,
967 	 * then wakeup the sched process.
968 	 */
969 	if (didswap)
970 		wakeup(&proc0);
971 }
972 
973 static void
974 swapclear(p)
975 	struct proc *p;
976 {
977 	struct thread *td;
978 
979 	PROC_LOCK_ASSERT(p, MA_OWNED);
980 
981 	FOREACH_THREAD_IN_PROC(p, td) {
982 		thread_lock(td);
983 		td->td_flags |= TDF_INMEM;
984 		td->td_flags &= ~TDF_SWAPINREQ;
985 		TD_CLR_SWAPPED(td);
986 		if (TD_CAN_RUN(td))
987 			if (setrunnable(td)) {
988 #ifdef INVARIANTS
989 				/*
990 				 * XXX: We just cleared TDI_SWAPPED
991 				 * above and set TDF_INMEM, so this
992 				 * should never happen.
993 				 */
994 				panic("not waking up swapper");
995 #endif
996 			}
997 		thread_unlock(td);
998 	}
999 	p->p_flag &= ~(P_SWAPPINGIN|P_SWAPPINGOUT);
1000 	p->p_flag |= P_INMEM;
1001 }
1002 
1003 static int
1004 swapout(p)
1005 	struct proc *p;
1006 {
1007 	struct thread *td;
1008 
1009 	PROC_LOCK_ASSERT(p, MA_OWNED);
1010 #if defined(SWAP_DEBUG)
1011 	printf("swapping out %d\n", p->p_pid);
1012 #endif
1013 
1014 	/*
1015 	 * The states of this process and its threads may have changed
1016 	 * by now.  Assuming that there is only one pageout daemon thread,
1017 	 * this process should still be in memory.
1018 	 */
1019 	KASSERT((p->p_flag & (P_INMEM|P_SWAPPINGOUT|P_SWAPPINGIN)) == P_INMEM,
1020 		("swapout: lost a swapout race?"));
1021 
1022 	/*
1023 	 * remember the process resident count
1024 	 */
1025 	p->p_vmspace->vm_swrss = vmspace_resident_count(p->p_vmspace);
1026 	/*
1027 	 * Check and mark all threads before we proceed.
1028 	 */
1029 	p->p_flag &= ~P_INMEM;
1030 	p->p_flag |= P_SWAPPINGOUT;
1031 	FOREACH_THREAD_IN_PROC(p, td) {
1032 		thread_lock(td);
1033 		if (!thread_safetoswapout(td)) {
1034 			thread_unlock(td);
1035 			swapclear(p);
1036 			return (EBUSY);
1037 		}
1038 		td->td_flags &= ~TDF_INMEM;
1039 		TD_SET_SWAPPED(td);
1040 		thread_unlock(td);
1041 	}
1042 	td = FIRST_THREAD_IN_PROC(p);
1043 	++td->td_ru.ru_nswap;
1044 	PROC_UNLOCK(p);
1045 
1046 	/*
1047 	 * This list is stable because all threads are now prevented from
1048 	 * running.  The list is only modified in the context of a running
1049 	 * thread in this process.
1050 	 */
1051 	FOREACH_THREAD_IN_PROC(p, td)
1052 		vm_thread_swapout(td);
1053 
1054 	PROC_LOCK(p);
1055 	p->p_flag &= ~P_SWAPPINGOUT;
1056 	p->p_swtick = ticks;
1057 	return (0);
1058 }
1059 #endif /* !NO_SWAPPING */
1060