xref: /freebsd/sys/vm/vm_glue.c (revision 884a2a699669ec61e2366e3e358342dbc94be24a)
1 /*-
2  * Copyright (c) 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 4. Neither the name of the University nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  *	from: @(#)vm_glue.c	8.6 (Berkeley) 1/5/94
33  *
34  *
35  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
36  * All rights reserved.
37  *
38  * Permission to use, copy, modify and distribute this software and
39  * its documentation is hereby granted, provided that both the copyright
40  * notice and this permission notice appear in all copies of the
41  * software, derivative works or modified versions, and any portions
42  * thereof, and that both notices appear in supporting documentation.
43  *
44  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
45  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
46  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
47  *
48  * Carnegie Mellon requests users of this software to return to
49  *
50  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
51  *  School of Computer Science
52  *  Carnegie Mellon University
53  *  Pittsburgh PA 15213-3890
54  *
55  * any improvements or extensions that they make and grant Carnegie the
56  * rights to redistribute these changes.
57  */
58 
59 #include <sys/cdefs.h>
60 __FBSDID("$FreeBSD$");
61 
62 #include "opt_vm.h"
63 #include "opt_kstack_pages.h"
64 #include "opt_kstack_max_pages.h"
65 
66 #include <sys/param.h>
67 #include <sys/systm.h>
68 #include <sys/limits.h>
69 #include <sys/lock.h>
70 #include <sys/mutex.h>
71 #include <sys/proc.h>
72 #include <sys/racct.h>
73 #include <sys/resourcevar.h>
74 #include <sys/sched.h>
75 #include <sys/sf_buf.h>
76 #include <sys/shm.h>
77 #include <sys/vmmeter.h>
78 #include <sys/sx.h>
79 #include <sys/sysctl.h>
80 
81 #include <sys/eventhandler.h>
82 #include <sys/kernel.h>
83 #include <sys/ktr.h>
84 #include <sys/unistd.h>
85 
86 #include <vm/vm.h>
87 #include <vm/vm_param.h>
88 #include <vm/pmap.h>
89 #include <vm/vm_map.h>
90 #include <vm/vm_page.h>
91 #include <vm/vm_pageout.h>
92 #include <vm/vm_object.h>
93 #include <vm/vm_kern.h>
94 #include <vm/vm_extern.h>
95 #include <vm/vm_pager.h>
96 #include <vm/swap_pager.h>
97 
98 /*
99  * System initialization
100  *
101  * THIS MUST BE THE LAST INITIALIZATION ITEM!!!
102  *
103  * Note: run scheduling should be divorced from the vm system.
104  */
105 static void scheduler(void *);
106 SYSINIT(scheduler, SI_SUB_RUN_SCHEDULER, SI_ORDER_ANY, scheduler, NULL);
107 
108 #ifndef NO_SWAPPING
109 static int swapout(struct proc *);
110 static void swapclear(struct proc *);
111 static void vm_thread_swapin(struct thread *td);
112 static void vm_thread_swapout(struct thread *td);
113 #endif
114 
115 /*
116  * MPSAFE
117  *
118  * WARNING!  This code calls vm_map_check_protection() which only checks
119  * the associated vm_map_entry range.  It does not determine whether the
120  * contents of the memory is actually readable or writable.  In most cases
121  * just checking the vm_map_entry is sufficient within the kernel's address
122  * space.
123  */
124 int
125 kernacc(addr, len, rw)
126 	void *addr;
127 	int len, rw;
128 {
129 	boolean_t rv;
130 	vm_offset_t saddr, eaddr;
131 	vm_prot_t prot;
132 
133 	KASSERT((rw & ~VM_PROT_ALL) == 0,
134 	    ("illegal ``rw'' argument to kernacc (%x)\n", rw));
135 
136 	if ((vm_offset_t)addr + len > kernel_map->max_offset ||
137 	    (vm_offset_t)addr + len < (vm_offset_t)addr)
138 		return (FALSE);
139 
140 	prot = rw;
141 	saddr = trunc_page((vm_offset_t)addr);
142 	eaddr = round_page((vm_offset_t)addr + len);
143 	vm_map_lock_read(kernel_map);
144 	rv = vm_map_check_protection(kernel_map, saddr, eaddr, prot);
145 	vm_map_unlock_read(kernel_map);
146 	return (rv == TRUE);
147 }
148 
149 /*
150  * MPSAFE
151  *
152  * WARNING!  This code calls vm_map_check_protection() which only checks
153  * the associated vm_map_entry range.  It does not determine whether the
154  * contents of the memory is actually readable or writable.  vmapbuf(),
155  * vm_fault_quick(), or copyin()/copout()/su*()/fu*() functions should be
156  * used in conjuction with this call.
157  */
158 int
159 useracc(addr, len, rw)
160 	void *addr;
161 	int len, rw;
162 {
163 	boolean_t rv;
164 	vm_prot_t prot;
165 	vm_map_t map;
166 
167 	KASSERT((rw & ~VM_PROT_ALL) == 0,
168 	    ("illegal ``rw'' argument to useracc (%x)\n", rw));
169 	prot = rw;
170 	map = &curproc->p_vmspace->vm_map;
171 	if ((vm_offset_t)addr + len > vm_map_max(map) ||
172 	    (vm_offset_t)addr + len < (vm_offset_t)addr) {
173 		return (FALSE);
174 	}
175 	vm_map_lock_read(map);
176 	rv = vm_map_check_protection(map, trunc_page((vm_offset_t)addr),
177 	    round_page((vm_offset_t)addr + len), prot);
178 	vm_map_unlock_read(map);
179 	return (rv == TRUE);
180 }
181 
182 int
183 vslock(void *addr, size_t len)
184 {
185 	vm_offset_t end, last, start;
186 	unsigned long nsize;
187 	vm_size_t npages;
188 	int error;
189 
190 	last = (vm_offset_t)addr + len;
191 	start = trunc_page((vm_offset_t)addr);
192 	end = round_page(last);
193 	if (last < (vm_offset_t)addr || end < (vm_offset_t)addr)
194 		return (EINVAL);
195 	npages = atop(end - start);
196 	if (npages > vm_page_max_wired)
197 		return (ENOMEM);
198 	PROC_LOCK(curproc);
199 	nsize = ptoa(npages +
200 	    pmap_wired_count(vm_map_pmap(&curproc->p_vmspace->vm_map)));
201 	if (nsize > lim_cur(curproc, RLIMIT_MEMLOCK)) {
202 		PROC_UNLOCK(curproc);
203 		return (ENOMEM);
204 	}
205 	if (racct_set(curproc, RACCT_MEMLOCK, nsize)) {
206 		PROC_UNLOCK(curproc);
207 		return (ENOMEM);
208 	}
209 	PROC_UNLOCK(curproc);
210 #if 0
211 	/*
212 	 * XXX - not yet
213 	 *
214 	 * The limit for transient usage of wired pages should be
215 	 * larger than for "permanent" wired pages (mlock()).
216 	 *
217 	 * Also, the sysctl code, which is the only present user
218 	 * of vslock(), does a hard loop on EAGAIN.
219 	 */
220 	if (npages + cnt.v_wire_count > vm_page_max_wired)
221 		return (EAGAIN);
222 #endif
223 	error = vm_map_wire(&curproc->p_vmspace->vm_map, start, end,
224 	    VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
225 	if (error != KERN_SUCCESS) {
226 		PROC_LOCK(curproc);
227 		racct_set(curproc, RACCT_MEMLOCK,
228 		    ptoa(pmap_wired_count(vm_map_pmap(&curproc->p_vmspace->vm_map))));
229 		PROC_UNLOCK(curproc);
230 	}
231 	/*
232 	 * Return EFAULT on error to match copy{in,out}() behaviour
233 	 * rather than returning ENOMEM like mlock() would.
234 	 */
235 	return (error == KERN_SUCCESS ? 0 : EFAULT);
236 }
237 
238 void
239 vsunlock(void *addr, size_t len)
240 {
241 
242 	/* Rely on the parameter sanity checks performed by vslock(). */
243 	(void)vm_map_unwire(&curproc->p_vmspace->vm_map,
244 	    trunc_page((vm_offset_t)addr), round_page((vm_offset_t)addr + len),
245 	    VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
246 
247 	PROC_LOCK(curproc);
248 	racct_set(curproc, RACCT_MEMLOCK,
249 	    ptoa(pmap_wired_count(vm_map_pmap(&curproc->p_vmspace->vm_map))));
250 	PROC_UNLOCK(curproc);
251 }
252 
253 /*
254  * Pin the page contained within the given object at the given offset.  If the
255  * page is not resident, allocate and load it using the given object's pager.
256  * Return the pinned page if successful; otherwise, return NULL.
257  */
258 static vm_page_t
259 vm_imgact_hold_page(vm_object_t object, vm_ooffset_t offset)
260 {
261 	vm_page_t m, ma[1];
262 	vm_pindex_t pindex;
263 	int rv;
264 
265 	VM_OBJECT_LOCK(object);
266 	pindex = OFF_TO_IDX(offset);
267 	m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
268 	if (m->valid != VM_PAGE_BITS_ALL) {
269 		ma[0] = m;
270 		rv = vm_pager_get_pages(object, ma, 1, 0);
271 		m = vm_page_lookup(object, pindex);
272 		if (m == NULL)
273 			goto out;
274 		if (rv != VM_PAGER_OK) {
275 			vm_page_lock(m);
276 			vm_page_free(m);
277 			vm_page_unlock(m);
278 			m = NULL;
279 			goto out;
280 		}
281 	}
282 	vm_page_lock(m);
283 	vm_page_hold(m);
284 	vm_page_unlock(m);
285 	vm_page_wakeup(m);
286 out:
287 	VM_OBJECT_UNLOCK(object);
288 	return (m);
289 }
290 
291 /*
292  * Return a CPU private mapping to the page at the given offset within the
293  * given object.  The page is pinned before it is mapped.
294  */
295 struct sf_buf *
296 vm_imgact_map_page(vm_object_t object, vm_ooffset_t offset)
297 {
298 	vm_page_t m;
299 
300 	m = vm_imgact_hold_page(object, offset);
301 	if (m == NULL)
302 		return (NULL);
303 	sched_pin();
304 	return (sf_buf_alloc(m, SFB_CPUPRIVATE));
305 }
306 
307 /*
308  * Destroy the given CPU private mapping and unpin the page that it mapped.
309  */
310 void
311 vm_imgact_unmap_page(struct sf_buf *sf)
312 {
313 	vm_page_t m;
314 
315 	m = sf_buf_page(sf);
316 	sf_buf_free(sf);
317 	sched_unpin();
318 	vm_page_lock(m);
319 	vm_page_unhold(m);
320 	vm_page_unlock(m);
321 }
322 
323 void
324 vm_sync_icache(vm_map_t map, vm_offset_t va, vm_offset_t sz)
325 {
326 
327 	pmap_sync_icache(map->pmap, va, sz);
328 }
329 
330 struct kstack_cache_entry {
331 	vm_object_t ksobj;
332 	struct kstack_cache_entry *next_ks_entry;
333 };
334 
335 static struct kstack_cache_entry *kstack_cache;
336 static int kstack_cache_size = 128;
337 static int kstacks;
338 static struct mtx kstack_cache_mtx;
339 SYSCTL_INT(_vm, OID_AUTO, kstack_cache_size, CTLFLAG_RW, &kstack_cache_size, 0,
340     "");
341 SYSCTL_INT(_vm, OID_AUTO, kstacks, CTLFLAG_RD, &kstacks, 0,
342     "");
343 
344 #ifndef KSTACK_MAX_PAGES
345 #define KSTACK_MAX_PAGES 32
346 #endif
347 
348 /*
349  * Create the kernel stack (including pcb for i386) for a new thread.
350  * This routine directly affects the fork perf for a process and
351  * create performance for a thread.
352  */
353 int
354 vm_thread_new(struct thread *td, int pages)
355 {
356 	vm_object_t ksobj;
357 	vm_offset_t ks;
358 	vm_page_t m, ma[KSTACK_MAX_PAGES];
359 	struct kstack_cache_entry *ks_ce;
360 	int i;
361 
362 	/* Bounds check */
363 	if (pages <= 1)
364 		pages = KSTACK_PAGES;
365 	else if (pages > KSTACK_MAX_PAGES)
366 		pages = KSTACK_MAX_PAGES;
367 
368 	if (pages == KSTACK_PAGES) {
369 		mtx_lock(&kstack_cache_mtx);
370 		if (kstack_cache != NULL) {
371 			ks_ce = kstack_cache;
372 			kstack_cache = ks_ce->next_ks_entry;
373 			mtx_unlock(&kstack_cache_mtx);
374 
375 			td->td_kstack_obj = ks_ce->ksobj;
376 			td->td_kstack = (vm_offset_t)ks_ce;
377 			td->td_kstack_pages = KSTACK_PAGES;
378 			return (1);
379 		}
380 		mtx_unlock(&kstack_cache_mtx);
381 	}
382 
383 	/*
384 	 * Allocate an object for the kstack.
385 	 */
386 	ksobj = vm_object_allocate(OBJT_DEFAULT, pages);
387 
388 	/*
389 	 * Get a kernel virtual address for this thread's kstack.
390 	 */
391 #if defined(__mips__)
392 	/*
393 	 * We need to align the kstack's mapped address to fit within
394 	 * a single TLB entry.
395 	 */
396 	ks = kmem_alloc_nofault_space(kernel_map,
397 	    (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE, VMFS_TLB_ALIGNED_SPACE);
398 #else
399 	ks = kmem_alloc_nofault(kernel_map,
400 	   (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
401 #endif
402 	if (ks == 0) {
403 		printf("vm_thread_new: kstack allocation failed\n");
404 		vm_object_deallocate(ksobj);
405 		return (0);
406 	}
407 
408 	atomic_add_int(&kstacks, 1);
409 	if (KSTACK_GUARD_PAGES != 0) {
410 		pmap_qremove(ks, KSTACK_GUARD_PAGES);
411 		ks += KSTACK_GUARD_PAGES * PAGE_SIZE;
412 	}
413 	td->td_kstack_obj = ksobj;
414 	td->td_kstack = ks;
415 	/*
416 	 * Knowing the number of pages allocated is useful when you
417 	 * want to deallocate them.
418 	 */
419 	td->td_kstack_pages = pages;
420 	/*
421 	 * For the length of the stack, link in a real page of ram for each
422 	 * page of stack.
423 	 */
424 	VM_OBJECT_LOCK(ksobj);
425 	for (i = 0; i < pages; i++) {
426 		/*
427 		 * Get a kernel stack page.
428 		 */
429 		m = vm_page_grab(ksobj, i, VM_ALLOC_NOBUSY |
430 		    VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED);
431 		ma[i] = m;
432 		m->valid = VM_PAGE_BITS_ALL;
433 	}
434 	VM_OBJECT_UNLOCK(ksobj);
435 	pmap_qenter(ks, ma, pages);
436 	return (1);
437 }
438 
439 static void
440 vm_thread_stack_dispose(vm_object_t ksobj, vm_offset_t ks, int pages)
441 {
442 	vm_page_t m;
443 	int i;
444 
445 	atomic_add_int(&kstacks, -1);
446 	pmap_qremove(ks, pages);
447 	VM_OBJECT_LOCK(ksobj);
448 	for (i = 0; i < pages; i++) {
449 		m = vm_page_lookup(ksobj, i);
450 		if (m == NULL)
451 			panic("vm_thread_dispose: kstack already missing?");
452 		vm_page_lock(m);
453 		vm_page_unwire(m, 0);
454 		vm_page_free(m);
455 		vm_page_unlock(m);
456 	}
457 	VM_OBJECT_UNLOCK(ksobj);
458 	vm_object_deallocate(ksobj);
459 	kmem_free(kernel_map, ks - (KSTACK_GUARD_PAGES * PAGE_SIZE),
460 	    (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
461 }
462 
463 /*
464  * Dispose of a thread's kernel stack.
465  */
466 void
467 vm_thread_dispose(struct thread *td)
468 {
469 	vm_object_t ksobj;
470 	vm_offset_t ks;
471 	struct kstack_cache_entry *ks_ce;
472 	int pages;
473 
474 	pages = td->td_kstack_pages;
475 	ksobj = td->td_kstack_obj;
476 	ks = td->td_kstack;
477 	td->td_kstack = 0;
478 	td->td_kstack_pages = 0;
479 	if (pages == KSTACK_PAGES && kstacks <= kstack_cache_size) {
480 		ks_ce = (struct kstack_cache_entry *)ks;
481 		ks_ce->ksobj = ksobj;
482 		mtx_lock(&kstack_cache_mtx);
483 		ks_ce->next_ks_entry = kstack_cache;
484 		kstack_cache = ks_ce;
485 		mtx_unlock(&kstack_cache_mtx);
486 		return;
487 	}
488 	vm_thread_stack_dispose(ksobj, ks, pages);
489 }
490 
491 static void
492 vm_thread_stack_lowmem(void *nulll)
493 {
494 	struct kstack_cache_entry *ks_ce, *ks_ce1;
495 
496 	mtx_lock(&kstack_cache_mtx);
497 	ks_ce = kstack_cache;
498 	kstack_cache = NULL;
499 	mtx_unlock(&kstack_cache_mtx);
500 
501 	while (ks_ce != NULL) {
502 		ks_ce1 = ks_ce;
503 		ks_ce = ks_ce->next_ks_entry;
504 
505 		vm_thread_stack_dispose(ks_ce1->ksobj, (vm_offset_t)ks_ce1,
506 		    KSTACK_PAGES);
507 	}
508 }
509 
510 static void
511 kstack_cache_init(void *nulll)
512 {
513 
514 	EVENTHANDLER_REGISTER(vm_lowmem, vm_thread_stack_lowmem, NULL,
515 	    EVENTHANDLER_PRI_ANY);
516 }
517 
518 MTX_SYSINIT(kstack_cache, &kstack_cache_mtx, "kstkch", MTX_DEF);
519 SYSINIT(vm_kstacks, SI_SUB_KTHREAD_INIT, SI_ORDER_ANY, kstack_cache_init, NULL);
520 
521 #ifndef NO_SWAPPING
522 /*
523  * Allow a thread's kernel stack to be paged out.
524  */
525 static void
526 vm_thread_swapout(struct thread *td)
527 {
528 	vm_object_t ksobj;
529 	vm_page_t m;
530 	int i, pages;
531 
532 	cpu_thread_swapout(td);
533 	pages = td->td_kstack_pages;
534 	ksobj = td->td_kstack_obj;
535 	pmap_qremove(td->td_kstack, pages);
536 	VM_OBJECT_LOCK(ksobj);
537 	for (i = 0; i < pages; i++) {
538 		m = vm_page_lookup(ksobj, i);
539 		if (m == NULL)
540 			panic("vm_thread_swapout: kstack already missing?");
541 		vm_page_dirty(m);
542 		vm_page_lock(m);
543 		vm_page_unwire(m, 0);
544 		vm_page_unlock(m);
545 	}
546 	VM_OBJECT_UNLOCK(ksobj);
547 }
548 
549 /*
550  * Bring the kernel stack for a specified thread back in.
551  */
552 static void
553 vm_thread_swapin(struct thread *td)
554 {
555 	vm_object_t ksobj;
556 	vm_page_t ma[KSTACK_MAX_PAGES];
557 	int i, j, k, pages, rv;
558 
559 	pages = td->td_kstack_pages;
560 	ksobj = td->td_kstack_obj;
561 	VM_OBJECT_LOCK(ksobj);
562 	for (i = 0; i < pages; i++)
563 		ma[i] = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY |
564 		    VM_ALLOC_WIRED);
565 	for (i = 0; i < pages; i++) {
566 		if (ma[i]->valid != VM_PAGE_BITS_ALL) {
567 			KASSERT(ma[i]->oflags & VPO_BUSY,
568 			    ("lost busy 1"));
569 			vm_object_pip_add(ksobj, 1);
570 			for (j = i + 1; j < pages; j++) {
571 				KASSERT(ma[j]->valid == VM_PAGE_BITS_ALL ||
572 				    (ma[j]->oflags & VPO_BUSY),
573 				    ("lost busy 2"));
574 				if (ma[j]->valid == VM_PAGE_BITS_ALL)
575 					break;
576 			}
577 			rv = vm_pager_get_pages(ksobj, ma + i, j - i, 0);
578 			if (rv != VM_PAGER_OK)
579 	panic("vm_thread_swapin: cannot get kstack for proc: %d",
580 				    td->td_proc->p_pid);
581 			vm_object_pip_wakeup(ksobj);
582 			for (k = i; k < j; k++)
583 				ma[k] = vm_page_lookup(ksobj, k);
584 			vm_page_wakeup(ma[i]);
585 		} else if (ma[i]->oflags & VPO_BUSY)
586 			vm_page_wakeup(ma[i]);
587 	}
588 	VM_OBJECT_UNLOCK(ksobj);
589 	pmap_qenter(td->td_kstack, ma, pages);
590 	cpu_thread_swapin(td);
591 }
592 #endif /* !NO_SWAPPING */
593 
594 /*
595  * Implement fork's actions on an address space.
596  * Here we arrange for the address space to be copied or referenced,
597  * allocate a user struct (pcb and kernel stack), then call the
598  * machine-dependent layer to fill those in and make the new process
599  * ready to run.  The new process is set up so that it returns directly
600  * to user mode to avoid stack copying and relocation problems.
601  */
602 int
603 vm_forkproc(td, p2, td2, vm2, flags)
604 	struct thread *td;
605 	struct proc *p2;
606 	struct thread *td2;
607 	struct vmspace *vm2;
608 	int flags;
609 {
610 	struct proc *p1 = td->td_proc;
611 	int error;
612 
613 	if ((flags & RFPROC) == 0) {
614 		/*
615 		 * Divorce the memory, if it is shared, essentially
616 		 * this changes shared memory amongst threads, into
617 		 * COW locally.
618 		 */
619 		if ((flags & RFMEM) == 0) {
620 			if (p1->p_vmspace->vm_refcnt > 1) {
621 				error = vmspace_unshare(p1);
622 				if (error)
623 					return (error);
624 			}
625 		}
626 		cpu_fork(td, p2, td2, flags);
627 		return (0);
628 	}
629 
630 	if (flags & RFMEM) {
631 		p2->p_vmspace = p1->p_vmspace;
632 		atomic_add_int(&p1->p_vmspace->vm_refcnt, 1);
633 	}
634 
635 	while (vm_page_count_severe()) {
636 		VM_WAIT;
637 	}
638 
639 	if ((flags & RFMEM) == 0) {
640 		p2->p_vmspace = vm2;
641 		if (p1->p_vmspace->vm_shm)
642 			shmfork(p1, p2);
643 	}
644 
645 	/*
646 	 * cpu_fork will copy and update the pcb, set up the kernel stack,
647 	 * and make the child ready to run.
648 	 */
649 	cpu_fork(td, p2, td2, flags);
650 	return (0);
651 }
652 
653 /*
654  * Called after process has been wait(2)'ed apon and is being reaped.
655  * The idea is to reclaim resources that we could not reclaim while
656  * the process was still executing.
657  */
658 void
659 vm_waitproc(p)
660 	struct proc *p;
661 {
662 
663 	vmspace_exitfree(p);		/* and clean-out the vmspace */
664 }
665 
666 void
667 faultin(p)
668 	struct proc *p;
669 {
670 #ifdef NO_SWAPPING
671 
672 	PROC_LOCK_ASSERT(p, MA_OWNED);
673 	if ((p->p_flag & P_INMEM) == 0)
674 		panic("faultin: proc swapped out with NO_SWAPPING!");
675 #else /* !NO_SWAPPING */
676 	struct thread *td;
677 
678 	PROC_LOCK_ASSERT(p, MA_OWNED);
679 	/*
680 	 * If another process is swapping in this process,
681 	 * just wait until it finishes.
682 	 */
683 	if (p->p_flag & P_SWAPPINGIN) {
684 		while (p->p_flag & P_SWAPPINGIN)
685 			msleep(&p->p_flag, &p->p_mtx, PVM, "faultin", 0);
686 		return;
687 	}
688 	if ((p->p_flag & P_INMEM) == 0) {
689 		/*
690 		 * Don't let another thread swap process p out while we are
691 		 * busy swapping it in.
692 		 */
693 		++p->p_lock;
694 		p->p_flag |= P_SWAPPINGIN;
695 		PROC_UNLOCK(p);
696 
697 		/*
698 		 * We hold no lock here because the list of threads
699 		 * can not change while all threads in the process are
700 		 * swapped out.
701 		 */
702 		FOREACH_THREAD_IN_PROC(p, td)
703 			vm_thread_swapin(td);
704 		PROC_LOCK(p);
705 		swapclear(p);
706 		p->p_swtick = ticks;
707 
708 		wakeup(&p->p_flag);
709 
710 		/* Allow other threads to swap p out now. */
711 		--p->p_lock;
712 	}
713 #endif /* NO_SWAPPING */
714 }
715 
716 /*
717  * This swapin algorithm attempts to swap-in processes only if there
718  * is enough space for them.  Of course, if a process waits for a long
719  * time, it will be swapped in anyway.
720  *
721  * Giant is held on entry.
722  */
723 /* ARGSUSED*/
724 static void
725 scheduler(dummy)
726 	void *dummy;
727 {
728 	struct proc *p;
729 	struct thread *td;
730 	struct proc *pp;
731 	int slptime;
732 	int swtime;
733 	int ppri;
734 	int pri;
735 
736 	mtx_assert(&Giant, MA_OWNED | MA_NOTRECURSED);
737 	mtx_unlock(&Giant);
738 
739 loop:
740 	if (vm_page_count_min()) {
741 		VM_WAIT;
742 		goto loop;
743 	}
744 
745 	pp = NULL;
746 	ppri = INT_MIN;
747 	sx_slock(&allproc_lock);
748 	FOREACH_PROC_IN_SYSTEM(p) {
749 		PROC_LOCK(p);
750 		if (p->p_state == PRS_NEW ||
751 		    p->p_flag & (P_SWAPPINGOUT | P_SWAPPINGIN | P_INMEM)) {
752 			PROC_UNLOCK(p);
753 			continue;
754 		}
755 		swtime = (ticks - p->p_swtick) / hz;
756 		FOREACH_THREAD_IN_PROC(p, td) {
757 			/*
758 			 * An otherwise runnable thread of a process
759 			 * swapped out has only the TDI_SWAPPED bit set.
760 			 *
761 			 */
762 			thread_lock(td);
763 			if (td->td_inhibitors == TDI_SWAPPED) {
764 				slptime = (ticks - td->td_slptick) / hz;
765 				pri = swtime + slptime;
766 				if ((td->td_flags & TDF_SWAPINREQ) == 0)
767 					pri -= p->p_nice * 8;
768 				/*
769 				 * if this thread is higher priority
770 				 * and there is enough space, then select
771 				 * this process instead of the previous
772 				 * selection.
773 				 */
774 				if (pri > ppri) {
775 					pp = p;
776 					ppri = pri;
777 				}
778 			}
779 			thread_unlock(td);
780 		}
781 		PROC_UNLOCK(p);
782 	}
783 	sx_sunlock(&allproc_lock);
784 
785 	/*
786 	 * Nothing to do, back to sleep.
787 	 */
788 	if ((p = pp) == NULL) {
789 		tsleep(&proc0, PVM, "sched", MAXSLP * hz / 2);
790 		goto loop;
791 	}
792 	PROC_LOCK(p);
793 
794 	/*
795 	 * Another process may be bringing or may have already
796 	 * brought this process in while we traverse all threads.
797 	 * Or, this process may even be being swapped out again.
798 	 */
799 	if (p->p_flag & (P_INMEM | P_SWAPPINGOUT | P_SWAPPINGIN)) {
800 		PROC_UNLOCK(p);
801 		goto loop;
802 	}
803 
804 	/*
805 	 * We would like to bring someone in. (only if there is space).
806 	 * [What checks the space? ]
807 	 */
808 	faultin(p);
809 	PROC_UNLOCK(p);
810 	goto loop;
811 }
812 
813 void
814 kick_proc0(void)
815 {
816 
817 	wakeup(&proc0);
818 }
819 
820 #ifndef NO_SWAPPING
821 
822 /*
823  * Swap_idle_threshold1 is the guaranteed swapped in time for a process
824  */
825 static int swap_idle_threshold1 = 2;
826 SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold1, CTLFLAG_RW,
827     &swap_idle_threshold1, 0, "Guaranteed swapped in time for a process");
828 
829 /*
830  * Swap_idle_threshold2 is the time that a process can be idle before
831  * it will be swapped out, if idle swapping is enabled.
832  */
833 static int swap_idle_threshold2 = 10;
834 SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold2, CTLFLAG_RW,
835     &swap_idle_threshold2, 0, "Time before a process will be swapped out");
836 
837 /*
838  * First, if any processes have been sleeping or stopped for at least
839  * "swap_idle_threshold1" seconds, they are swapped out.  If, however,
840  * no such processes exist, then the longest-sleeping or stopped
841  * process is swapped out.  Finally, and only as a last resort, if
842  * there are no sleeping or stopped processes, the longest-resident
843  * process is swapped out.
844  */
845 void
846 swapout_procs(action)
847 int action;
848 {
849 	struct proc *p;
850 	struct thread *td;
851 	int didswap = 0;
852 
853 retry:
854 	sx_slock(&allproc_lock);
855 	FOREACH_PROC_IN_SYSTEM(p) {
856 		struct vmspace *vm;
857 		int minslptime = 100000;
858 		int slptime;
859 
860 		/*
861 		 * Watch out for a process in
862 		 * creation.  It may have no
863 		 * address space or lock yet.
864 		 */
865 		if (p->p_state == PRS_NEW)
866 			continue;
867 		/*
868 		 * An aio daemon switches its
869 		 * address space while running.
870 		 * Perform a quick check whether
871 		 * a process has P_SYSTEM.
872 		 */
873 		if ((p->p_flag & P_SYSTEM) != 0)
874 			continue;
875 		/*
876 		 * Do not swapout a process that
877 		 * is waiting for VM data
878 		 * structures as there is a possible
879 		 * deadlock.  Test this first as
880 		 * this may block.
881 		 *
882 		 * Lock the map until swapout
883 		 * finishes, or a thread of this
884 		 * process may attempt to alter
885 		 * the map.
886 		 */
887 		vm = vmspace_acquire_ref(p);
888 		if (vm == NULL)
889 			continue;
890 		if (!vm_map_trylock(&vm->vm_map))
891 			goto nextproc1;
892 
893 		PROC_LOCK(p);
894 		if (p->p_lock != 0 ||
895 		    (p->p_flag & (P_STOPPED_SINGLE|P_TRACED|P_SYSTEM|P_WEXIT)
896 		    ) != 0) {
897 			goto nextproc;
898 		}
899 		/*
900 		 * only aiod changes vmspace, however it will be
901 		 * skipped because of the if statement above checking
902 		 * for P_SYSTEM
903 		 */
904 		if ((p->p_flag & (P_INMEM|P_SWAPPINGOUT|P_SWAPPINGIN)) != P_INMEM)
905 			goto nextproc;
906 
907 		switch (p->p_state) {
908 		default:
909 			/* Don't swap out processes in any sort
910 			 * of 'special' state. */
911 			break;
912 
913 		case PRS_NORMAL:
914 			/*
915 			 * do not swapout a realtime process
916 			 * Check all the thread groups..
917 			 */
918 			FOREACH_THREAD_IN_PROC(p, td) {
919 				thread_lock(td);
920 				if (PRI_IS_REALTIME(td->td_pri_class)) {
921 					thread_unlock(td);
922 					goto nextproc;
923 				}
924 				slptime = (ticks - td->td_slptick) / hz;
925 				/*
926 				 * Guarantee swap_idle_threshold1
927 				 * time in memory.
928 				 */
929 				if (slptime < swap_idle_threshold1) {
930 					thread_unlock(td);
931 					goto nextproc;
932 				}
933 
934 				/*
935 				 * Do not swapout a process if it is
936 				 * waiting on a critical event of some
937 				 * kind or there is a thread whose
938 				 * pageable memory may be accessed.
939 				 *
940 				 * This could be refined to support
941 				 * swapping out a thread.
942 				 */
943 				if (!thread_safetoswapout(td)) {
944 					thread_unlock(td);
945 					goto nextproc;
946 				}
947 				/*
948 				 * If the system is under memory stress,
949 				 * or if we are swapping
950 				 * idle processes >= swap_idle_threshold2,
951 				 * then swap the process out.
952 				 */
953 				if (((action & VM_SWAP_NORMAL) == 0) &&
954 				    (((action & VM_SWAP_IDLE) == 0) ||
955 				    (slptime < swap_idle_threshold2))) {
956 					thread_unlock(td);
957 					goto nextproc;
958 				}
959 
960 				if (minslptime > slptime)
961 					minslptime = slptime;
962 				thread_unlock(td);
963 			}
964 
965 			/*
966 			 * If the pageout daemon didn't free enough pages,
967 			 * or if this process is idle and the system is
968 			 * configured to swap proactively, swap it out.
969 			 */
970 			if ((action & VM_SWAP_NORMAL) ||
971 				((action & VM_SWAP_IDLE) &&
972 				 (minslptime > swap_idle_threshold2))) {
973 				if (swapout(p) == 0)
974 					didswap++;
975 				PROC_UNLOCK(p);
976 				vm_map_unlock(&vm->vm_map);
977 				vmspace_free(vm);
978 				sx_sunlock(&allproc_lock);
979 				goto retry;
980 			}
981 		}
982 nextproc:
983 		PROC_UNLOCK(p);
984 		vm_map_unlock(&vm->vm_map);
985 nextproc1:
986 		vmspace_free(vm);
987 		continue;
988 	}
989 	sx_sunlock(&allproc_lock);
990 	/*
991 	 * If we swapped something out, and another process needed memory,
992 	 * then wakeup the sched process.
993 	 */
994 	if (didswap)
995 		wakeup(&proc0);
996 }
997 
998 static void
999 swapclear(p)
1000 	struct proc *p;
1001 {
1002 	struct thread *td;
1003 
1004 	PROC_LOCK_ASSERT(p, MA_OWNED);
1005 
1006 	FOREACH_THREAD_IN_PROC(p, td) {
1007 		thread_lock(td);
1008 		td->td_flags |= TDF_INMEM;
1009 		td->td_flags &= ~TDF_SWAPINREQ;
1010 		TD_CLR_SWAPPED(td);
1011 		if (TD_CAN_RUN(td))
1012 			if (setrunnable(td)) {
1013 #ifdef INVARIANTS
1014 				/*
1015 				 * XXX: We just cleared TDI_SWAPPED
1016 				 * above and set TDF_INMEM, so this
1017 				 * should never happen.
1018 				 */
1019 				panic("not waking up swapper");
1020 #endif
1021 			}
1022 		thread_unlock(td);
1023 	}
1024 	p->p_flag &= ~(P_SWAPPINGIN|P_SWAPPINGOUT);
1025 	p->p_flag |= P_INMEM;
1026 }
1027 
1028 static int
1029 swapout(p)
1030 	struct proc *p;
1031 {
1032 	struct thread *td;
1033 
1034 	PROC_LOCK_ASSERT(p, MA_OWNED);
1035 #if defined(SWAP_DEBUG)
1036 	printf("swapping out %d\n", p->p_pid);
1037 #endif
1038 
1039 	/*
1040 	 * The states of this process and its threads may have changed
1041 	 * by now.  Assuming that there is only one pageout daemon thread,
1042 	 * this process should still be in memory.
1043 	 */
1044 	KASSERT((p->p_flag & (P_INMEM|P_SWAPPINGOUT|P_SWAPPINGIN)) == P_INMEM,
1045 		("swapout: lost a swapout race?"));
1046 
1047 	/*
1048 	 * remember the process resident count
1049 	 */
1050 	p->p_vmspace->vm_swrss = vmspace_resident_count(p->p_vmspace);
1051 	/*
1052 	 * Check and mark all threads before we proceed.
1053 	 */
1054 	p->p_flag &= ~P_INMEM;
1055 	p->p_flag |= P_SWAPPINGOUT;
1056 	FOREACH_THREAD_IN_PROC(p, td) {
1057 		thread_lock(td);
1058 		if (!thread_safetoswapout(td)) {
1059 			thread_unlock(td);
1060 			swapclear(p);
1061 			return (EBUSY);
1062 		}
1063 		td->td_flags &= ~TDF_INMEM;
1064 		TD_SET_SWAPPED(td);
1065 		thread_unlock(td);
1066 	}
1067 	td = FIRST_THREAD_IN_PROC(p);
1068 	++td->td_ru.ru_nswap;
1069 	PROC_UNLOCK(p);
1070 
1071 	/*
1072 	 * This list is stable because all threads are now prevented from
1073 	 * running.  The list is only modified in the context of a running
1074 	 * thread in this process.
1075 	 */
1076 	FOREACH_THREAD_IN_PROC(p, td)
1077 		vm_thread_swapout(td);
1078 
1079 	PROC_LOCK(p);
1080 	p->p_flag &= ~P_SWAPPINGOUT;
1081 	p->p_swtick = ticks;
1082 	return (0);
1083 }
1084 #endif /* !NO_SWAPPING */
1085