xref: /freebsd/sys/vm/vm_glue.c (revision 7660b554bc59a07be0431c17e0e33815818baa69)
1 /*
2  * Copyright (c) 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *	This product includes software developed by the University of
19  *	California, Berkeley and its contributors.
20  * 4. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *	from: @(#)vm_glue.c	8.6 (Berkeley) 1/5/94
37  *
38  *
39  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40  * All rights reserved.
41  *
42  * Permission to use, copy, modify and distribute this software and
43  * its documentation is hereby granted, provided that both the copyright
44  * notice and this permission notice appear in all copies of the
45  * software, derivative works or modified versions, and any portions
46  * thereof, and that both notices appear in supporting documentation.
47  *
48  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
49  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
50  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
51  *
52  * Carnegie Mellon requests users of this software to return to
53  *
54  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
55  *  School of Computer Science
56  *  Carnegie Mellon University
57  *  Pittsburgh PA 15213-3890
58  *
59  * any improvements or extensions that they make and grant Carnegie the
60  * rights to redistribute these changes.
61  */
62 
63 #include <sys/cdefs.h>
64 __FBSDID("$FreeBSD$");
65 
66 #include "opt_vm.h"
67 #include "opt_kstack_pages.h"
68 #include "opt_kstack_max_pages.h"
69 
70 #include <sys/param.h>
71 #include <sys/systm.h>
72 #include <sys/limits.h>
73 #include <sys/lock.h>
74 #include <sys/mutex.h>
75 #include <sys/proc.h>
76 #include <sys/resourcevar.h>
77 #include <sys/shm.h>
78 #include <sys/vmmeter.h>
79 #include <sys/sx.h>
80 #include <sys/sysctl.h>
81 
82 #include <sys/kernel.h>
83 #include <sys/ktr.h>
84 #include <sys/unistd.h>
85 
86 #include <vm/vm.h>
87 #include <vm/vm_param.h>
88 #include <vm/pmap.h>
89 #include <vm/vm_map.h>
90 #include <vm/vm_page.h>
91 #include <vm/vm_pageout.h>
92 #include <vm/vm_object.h>
93 #include <vm/vm_kern.h>
94 #include <vm/vm_extern.h>
95 #include <vm/vm_pager.h>
96 #include <vm/swap_pager.h>
97 
98 #include <sys/user.h>
99 
100 extern int maxslp;
101 
102 /*
103  * System initialization
104  *
105  * Note: proc0 from proc.h
106  */
107 static void vm_init_limits(void *);
108 SYSINIT(vm_limits, SI_SUB_VM_CONF, SI_ORDER_FIRST, vm_init_limits, &proc0)
109 
110 /*
111  * THIS MUST BE THE LAST INITIALIZATION ITEM!!!
112  *
113  * Note: run scheduling should be divorced from the vm system.
114  */
115 static void scheduler(void *);
116 SYSINIT(scheduler, SI_SUB_RUN_SCHEDULER, SI_ORDER_FIRST, scheduler, NULL)
117 
118 #ifndef NO_SWAPPING
119 static void swapout(struct proc *);
120 static void vm_proc_swapin(struct proc *p);
121 static void vm_proc_swapout(struct proc *p);
122 #endif
123 
124 /*
125  * MPSAFE
126  *
127  * WARNING!  This code calls vm_map_check_protection() which only checks
128  * the associated vm_map_entry range.  It does not determine whether the
129  * contents of the memory is actually readable or writable.  In most cases
130  * just checking the vm_map_entry is sufficient within the kernel's address
131  * space.
132  */
133 int
134 kernacc(addr, len, rw)
135 	void *addr;
136 	int len, rw;
137 {
138 	boolean_t rv;
139 	vm_offset_t saddr, eaddr;
140 	vm_prot_t prot;
141 
142 	KASSERT((rw & ~VM_PROT_ALL) == 0,
143 	    ("illegal ``rw'' argument to kernacc (%x)\n", rw));
144 	prot = rw;
145 	saddr = trunc_page((vm_offset_t)addr);
146 	eaddr = round_page((vm_offset_t)addr + len);
147 	rv = vm_map_check_protection(kernel_map, saddr, eaddr, prot);
148 	return (rv == TRUE);
149 }
150 
151 /*
152  * MPSAFE
153  *
154  * WARNING!  This code calls vm_map_check_protection() which only checks
155  * the associated vm_map_entry range.  It does not determine whether the
156  * contents of the memory is actually readable or writable.  vmapbuf(),
157  * vm_fault_quick(), or copyin()/copout()/su*()/fu*() functions should be
158  * used in conjuction with this call.
159  */
160 int
161 useracc(addr, len, rw)
162 	void *addr;
163 	int len, rw;
164 {
165 	boolean_t rv;
166 	vm_prot_t prot;
167 	vm_map_t map;
168 
169 	KASSERT((rw & ~VM_PROT_ALL) == 0,
170 	    ("illegal ``rw'' argument to useracc (%x)\n", rw));
171 	prot = rw;
172 	map = &curproc->p_vmspace->vm_map;
173 	if ((vm_offset_t)addr + len > vm_map_max(map) ||
174 	    (vm_offset_t)addr + len < (vm_offset_t)addr) {
175 		return (FALSE);
176 	}
177 	rv = vm_map_check_protection(map, trunc_page((vm_offset_t)addr),
178 	    round_page((vm_offset_t)addr + len), prot);
179 	return (rv == TRUE);
180 }
181 
182 /*
183  * MPSAFE
184  */
185 void
186 vslock(addr, len)
187 	void *addr;
188 	u_int len;
189 {
190 
191 	vm_map_wire(&curproc->p_vmspace->vm_map, trunc_page((vm_offset_t)addr),
192 	    round_page((vm_offset_t)addr + len),
193 	    VM_MAP_WIRE_SYSTEM|VM_MAP_WIRE_NOHOLES);
194 }
195 
196 /*
197  * MPSAFE
198  */
199 void
200 vsunlock(addr, len)
201 	void *addr;
202 	u_int len;
203 {
204 
205 	vm_map_unwire(&curproc->p_vmspace->vm_map,
206 	    trunc_page((vm_offset_t)addr),
207 	    round_page((vm_offset_t)addr + len),
208 	    VM_MAP_WIRE_SYSTEM|VM_MAP_WIRE_NOHOLES);
209 }
210 
211 /*
212  * Create the U area for a new process.
213  * This routine directly affects the fork perf for a process.
214  */
215 void
216 vm_proc_new(struct proc *p)
217 {
218 	vm_page_t ma[UAREA_PAGES];
219 	vm_object_t upobj;
220 	vm_offset_t up;
221 	vm_page_t m;
222 	u_int i;
223 
224 	/*
225 	 * Get a kernel virtual address for the U area for this process.
226 	 */
227 	up = kmem_alloc_nofault(kernel_map, UAREA_PAGES * PAGE_SIZE);
228 	if (up == 0)
229 		panic("vm_proc_new: upage allocation failed");
230 	p->p_uarea = (struct user *)up;
231 
232 	/*
233 	 * Allocate object and page(s) for the U area.
234 	 */
235 	upobj = vm_object_allocate(OBJT_DEFAULT, UAREA_PAGES);
236 	p->p_upages_obj = upobj;
237 	VM_OBJECT_LOCK(upobj);
238 	for (i = 0; i < UAREA_PAGES; i++) {
239 		m = vm_page_grab(upobj, i,
240 		    VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED);
241 		ma[i] = m;
242 
243 		vm_page_lock_queues();
244 		vm_page_wakeup(m);
245 		m->valid = VM_PAGE_BITS_ALL;
246 		vm_page_unlock_queues();
247 	}
248 	VM_OBJECT_UNLOCK(upobj);
249 
250 	/*
251 	 * Enter the pages into the kernel address space.
252 	 */
253 	pmap_qenter(up, ma, UAREA_PAGES);
254 }
255 
256 /*
257  * Dispose the U area for a process that has exited.
258  * This routine directly impacts the exit perf of a process.
259  * XXX proc_zone is marked UMA_ZONE_NOFREE, so this should never be called.
260  */
261 void
262 vm_proc_dispose(struct proc *p)
263 {
264 	vm_object_t upobj;
265 	vm_offset_t up;
266 	vm_page_t m;
267 
268 	upobj = p->p_upages_obj;
269 	VM_OBJECT_LOCK(upobj);
270 	if (upobj->resident_page_count != UAREA_PAGES)
271 		panic("vm_proc_dispose: incorrect number of pages in upobj");
272 	vm_page_lock_queues();
273 	while ((m = TAILQ_FIRST(&upobj->memq)) != NULL) {
274 		vm_page_busy(m);
275 		vm_page_unwire(m, 0);
276 		vm_page_free(m);
277 	}
278 	vm_page_unlock_queues();
279 	VM_OBJECT_UNLOCK(upobj);
280 	up = (vm_offset_t)p->p_uarea;
281 	pmap_qremove(up, UAREA_PAGES);
282 	kmem_free(kernel_map, up, UAREA_PAGES * PAGE_SIZE);
283 	vm_object_deallocate(upobj);
284 }
285 
286 #ifndef NO_SWAPPING
287 /*
288  * Allow the U area for a process to be prejudicially paged out.
289  */
290 static void
291 vm_proc_swapout(struct proc *p)
292 {
293 	vm_object_t upobj;
294 	vm_offset_t up;
295 	vm_page_t m;
296 
297 	upobj = p->p_upages_obj;
298 	VM_OBJECT_LOCK(upobj);
299 	if (upobj->resident_page_count != UAREA_PAGES)
300 		panic("vm_proc_dispose: incorrect number of pages in upobj");
301 	vm_page_lock_queues();
302 	TAILQ_FOREACH(m, &upobj->memq, listq) {
303 		vm_page_dirty(m);
304 		vm_page_unwire(m, 0);
305 	}
306 	vm_page_unlock_queues();
307 	VM_OBJECT_UNLOCK(upobj);
308 	up = (vm_offset_t)p->p_uarea;
309 	pmap_qremove(up, UAREA_PAGES);
310 }
311 
312 /*
313  * Bring the U area for a specified process back in.
314  */
315 static void
316 vm_proc_swapin(struct proc *p)
317 {
318 	vm_page_t ma[UAREA_PAGES];
319 	vm_object_t upobj;
320 	vm_offset_t up;
321 	vm_page_t m;
322 	int rv;
323 	int i;
324 
325 	upobj = p->p_upages_obj;
326 	VM_OBJECT_LOCK(upobj);
327 	for (i = 0; i < UAREA_PAGES; i++) {
328 		m = vm_page_grab(upobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
329 		if (m->valid != VM_PAGE_BITS_ALL) {
330 			rv = vm_pager_get_pages(upobj, &m, 1, 0);
331 			if (rv != VM_PAGER_OK)
332 				panic("vm_proc_swapin: cannot get upage");
333 		}
334 		ma[i] = m;
335 	}
336 	if (upobj->resident_page_count != UAREA_PAGES)
337 		panic("vm_proc_swapin: lost pages from upobj");
338 	vm_page_lock_queues();
339 	TAILQ_FOREACH(m, &upobj->memq, listq) {
340 		m->valid = VM_PAGE_BITS_ALL;
341 		vm_page_wire(m);
342 		vm_page_wakeup(m);
343 	}
344 	vm_page_unlock_queues();
345 	VM_OBJECT_UNLOCK(upobj);
346 	up = (vm_offset_t)p->p_uarea;
347 	pmap_qenter(up, ma, UAREA_PAGES);
348 }
349 
350 /*
351  * Swap in the UAREAs of all processes swapped out to the given device.
352  * The pages in the UAREA are marked dirty and their swap metadata is freed.
353  */
354 void
355 vm_proc_swapin_all(struct swdevt *devidx)
356 {
357 	struct proc *p;
358 	vm_object_t object;
359 	vm_page_t m;
360 
361 retry:
362 	sx_slock(&allproc_lock);
363 	FOREACH_PROC_IN_SYSTEM(p) {
364 		PROC_LOCK(p);
365 		object = p->p_upages_obj;
366 		if (object != NULL) {
367 			VM_OBJECT_LOCK(object);
368 			if (swap_pager_isswapped(object, devidx)) {
369 				VM_OBJECT_UNLOCK(object);
370 				sx_sunlock(&allproc_lock);
371 				faultin(p);
372 				PROC_UNLOCK(p);
373 				VM_OBJECT_LOCK(object);
374 				vm_page_lock_queues();
375 				TAILQ_FOREACH(m, &object->memq, listq)
376 					vm_page_dirty(m);
377 				vm_page_unlock_queues();
378 				swap_pager_freespace(object, 0,
379 				    object->un_pager.swp.swp_bcount);
380 				VM_OBJECT_UNLOCK(object);
381 				goto retry;
382 			}
383 			VM_OBJECT_UNLOCK(object);
384 		}
385 		PROC_UNLOCK(p);
386 	}
387 	sx_sunlock(&allproc_lock);
388 }
389 #endif
390 
391 #ifndef KSTACK_MAX_PAGES
392 #define KSTACK_MAX_PAGES 32
393 #endif
394 
395 /*
396  * Create the kernel stack (including pcb for i386) for a new thread.
397  * This routine directly affects the fork perf for a process and
398  * create performance for a thread.
399  */
400 void
401 vm_thread_new(struct thread *td, int pages)
402 {
403 	vm_object_t ksobj;
404 	vm_offset_t ks;
405 	vm_page_t m, ma[KSTACK_MAX_PAGES];
406 	int i;
407 
408 	/* Bounds check */
409 	if (pages <= 1)
410 		pages = KSTACK_PAGES;
411 	else if (pages > KSTACK_MAX_PAGES)
412 		pages = KSTACK_MAX_PAGES;
413 	/*
414 	 * Allocate an object for the kstack.
415 	 */
416 	ksobj = vm_object_allocate(OBJT_DEFAULT, pages);
417 	td->td_kstack_obj = ksobj;
418 	/*
419 	 * Get a kernel virtual address for this thread's kstack.
420 	 */
421 	ks = kmem_alloc_nofault(kernel_map,
422 	   (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
423 	if (ks == 0)
424 		panic("vm_thread_new: kstack allocation failed");
425 	if (KSTACK_GUARD_PAGES != 0) {
426 		pmap_qremove(ks, KSTACK_GUARD_PAGES);
427 		ks += KSTACK_GUARD_PAGES * PAGE_SIZE;
428 	}
429 	td->td_kstack = ks;
430 	/*
431 	 * Knowing the number of pages allocated is useful when you
432 	 * want to deallocate them.
433 	 */
434 	td->td_kstack_pages = pages;
435 	/*
436 	 * For the length of the stack, link in a real page of ram for each
437 	 * page of stack.
438 	 */
439 	VM_OBJECT_LOCK(ksobj);
440 	for (i = 0; i < pages; i++) {
441 		/*
442 		 * Get a kernel stack page.
443 		 */
444 		m = vm_page_grab(ksobj, i,
445 		    VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED);
446 		ma[i] = m;
447 		vm_page_lock_queues();
448 		vm_page_wakeup(m);
449 		m->valid = VM_PAGE_BITS_ALL;
450 		vm_page_unlock_queues();
451 	}
452 	VM_OBJECT_UNLOCK(ksobj);
453 	pmap_qenter(ks, ma, pages);
454 }
455 
456 /*
457  * Dispose of a thread's kernel stack.
458  */
459 void
460 vm_thread_dispose(struct thread *td)
461 {
462 	vm_object_t ksobj;
463 	vm_offset_t ks;
464 	vm_page_t m;
465 	int i, pages;
466 
467 	pages = td->td_kstack_pages;
468 	ksobj = td->td_kstack_obj;
469 	ks = td->td_kstack;
470 	pmap_qremove(ks, pages);
471 	VM_OBJECT_LOCK(ksobj);
472 	for (i = 0; i < pages; i++) {
473 		m = vm_page_lookup(ksobj, i);
474 		if (m == NULL)
475 			panic("vm_thread_dispose: kstack already missing?");
476 		vm_page_lock_queues();
477 		vm_page_busy(m);
478 		vm_page_unwire(m, 0);
479 		vm_page_free(m);
480 		vm_page_unlock_queues();
481 	}
482 	VM_OBJECT_UNLOCK(ksobj);
483 	vm_object_deallocate(ksobj);
484 	kmem_free(kernel_map, ks - (KSTACK_GUARD_PAGES * PAGE_SIZE),
485 	    (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
486 }
487 
488 /*
489  * Allow a thread's kernel stack to be paged out.
490  */
491 void
492 vm_thread_swapout(struct thread *td)
493 {
494 	vm_object_t ksobj;
495 	vm_page_t m;
496 	int i, pages;
497 
498 	cpu_thread_swapout(td);
499 	pages = td->td_kstack_pages;
500 	ksobj = td->td_kstack_obj;
501 	pmap_qremove(td->td_kstack, pages);
502 	VM_OBJECT_LOCK(ksobj);
503 	for (i = 0; i < pages; i++) {
504 		m = vm_page_lookup(ksobj, i);
505 		if (m == NULL)
506 			panic("vm_thread_swapout: kstack already missing?");
507 		vm_page_lock_queues();
508 		vm_page_dirty(m);
509 		vm_page_unwire(m, 0);
510 		vm_page_unlock_queues();
511 	}
512 	VM_OBJECT_UNLOCK(ksobj);
513 }
514 
515 /*
516  * Bring the kernel stack for a specified thread back in.
517  */
518 void
519 vm_thread_swapin(struct thread *td)
520 {
521 	vm_object_t ksobj;
522 	vm_page_t m, ma[KSTACK_MAX_PAGES];
523 	int i, pages, rv;
524 
525 	pages = td->td_kstack_pages;
526 	ksobj = td->td_kstack_obj;
527 	VM_OBJECT_LOCK(ksobj);
528 	for (i = 0; i < pages; i++) {
529 		m = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
530 		if (m->valid != VM_PAGE_BITS_ALL) {
531 			rv = vm_pager_get_pages(ksobj, &m, 1, 0);
532 			if (rv != VM_PAGER_OK)
533 				panic("vm_thread_swapin: cannot get kstack for proc: %d", td->td_proc->p_pid);
534 			m = vm_page_lookup(ksobj, i);
535 			m->valid = VM_PAGE_BITS_ALL;
536 		}
537 		ma[i] = m;
538 		vm_page_lock_queues();
539 		vm_page_wire(m);
540 		vm_page_wakeup(m);
541 		vm_page_unlock_queues();
542 	}
543 	VM_OBJECT_UNLOCK(ksobj);
544 	pmap_qenter(td->td_kstack, ma, pages);
545 	cpu_thread_swapin(td);
546 }
547 
548 /*
549  * Set up a variable-sized alternate kstack.
550  */
551 void
552 vm_thread_new_altkstack(struct thread *td, int pages)
553 {
554 
555 	td->td_altkstack = td->td_kstack;
556 	td->td_altkstack_obj = td->td_kstack_obj;
557 	td->td_altkstack_pages = td->td_kstack_pages;
558 
559 	vm_thread_new(td, pages);
560 }
561 
562 /*
563  * Restore the original kstack.
564  */
565 void
566 vm_thread_dispose_altkstack(struct thread *td)
567 {
568 
569 	vm_thread_dispose(td);
570 
571 	td->td_kstack = td->td_altkstack;
572 	td->td_kstack_obj = td->td_altkstack_obj;
573 	td->td_kstack_pages = td->td_altkstack_pages;
574 	td->td_altkstack = 0;
575 	td->td_altkstack_obj = NULL;
576 	td->td_altkstack_pages = 0;
577 }
578 
579 /*
580  * Implement fork's actions on an address space.
581  * Here we arrange for the address space to be copied or referenced,
582  * allocate a user struct (pcb and kernel stack), then call the
583  * machine-dependent layer to fill those in and make the new process
584  * ready to run.  The new process is set up so that it returns directly
585  * to user mode to avoid stack copying and relocation problems.
586  */
587 void
588 vm_forkproc(td, p2, td2, flags)
589 	struct thread *td;
590 	struct proc *p2;
591 	struct thread *td2;
592 	int flags;
593 {
594 	struct proc *p1 = td->td_proc;
595 	struct user *up;
596 
597 	GIANT_REQUIRED;
598 
599 	if ((flags & RFPROC) == 0) {
600 		/*
601 		 * Divorce the memory, if it is shared, essentially
602 		 * this changes shared memory amongst threads, into
603 		 * COW locally.
604 		 */
605 		if ((flags & RFMEM) == 0) {
606 			if (p1->p_vmspace->vm_refcnt > 1) {
607 				vmspace_unshare(p1);
608 			}
609 		}
610 		cpu_fork(td, p2, td2, flags);
611 		return;
612 	}
613 
614 	if (flags & RFMEM) {
615 		p2->p_vmspace = p1->p_vmspace;
616 		p1->p_vmspace->vm_refcnt++;
617 	}
618 
619 	while (vm_page_count_severe()) {
620 		VM_WAIT;
621 	}
622 
623 	if ((flags & RFMEM) == 0) {
624 		p2->p_vmspace = vmspace_fork(p1->p_vmspace);
625 
626 		pmap_pinit2(vmspace_pmap(p2->p_vmspace));
627 
628 		if (p1->p_vmspace->vm_shm)
629 			shmfork(p1, p2);
630 	}
631 
632 	/* XXXKSE this is unsatisfactory but should be adequate */
633 	up = p2->p_uarea;
634 	MPASS(p2->p_sigacts != NULL);
635 
636 	/*
637 	 * p_stats currently points at fields in the user struct
638 	 * but not at &u, instead at p_addr. Copy parts of
639 	 * p_stats; zero the rest of p_stats (statistics).
640 	 */
641 	p2->p_stats = &up->u_stats;
642 	bzero(&up->u_stats.pstat_startzero,
643 	    (unsigned) ((caddr_t) &up->u_stats.pstat_endzero -
644 		(caddr_t) &up->u_stats.pstat_startzero));
645 	bcopy(&p1->p_stats->pstat_startcopy, &up->u_stats.pstat_startcopy,
646 	    ((caddr_t) &up->u_stats.pstat_endcopy -
647 		(caddr_t) &up->u_stats.pstat_startcopy));
648 
649 	/*
650 	 * cpu_fork will copy and update the pcb, set up the kernel stack,
651 	 * and make the child ready to run.
652 	 */
653 	cpu_fork(td, p2, td2, flags);
654 }
655 
656 /*
657  * Called after process has been wait(2)'ed apon and is being reaped.
658  * The idea is to reclaim resources that we could not reclaim while
659  * the process was still executing.
660  */
661 void
662 vm_waitproc(p)
663 	struct proc *p;
664 {
665 
666 	GIANT_REQUIRED;
667 	vmspace_exitfree(p);		/* and clean-out the vmspace */
668 }
669 
670 /*
671  * Set default limits for VM system.
672  * Called for proc 0, and then inherited by all others.
673  *
674  * XXX should probably act directly on proc0.
675  */
676 static void
677 vm_init_limits(udata)
678 	void *udata;
679 {
680 	struct proc *p = udata;
681 	int rss_limit;
682 
683 	/*
684 	 * Set up the initial limits on process VM. Set the maximum resident
685 	 * set size to be half of (reasonably) available memory.  Since this
686 	 * is a soft limit, it comes into effect only when the system is out
687 	 * of memory - half of main memory helps to favor smaller processes,
688 	 * and reduces thrashing of the object cache.
689 	 */
690 	p->p_rlimit[RLIMIT_STACK].rlim_cur = dflssiz;
691 	p->p_rlimit[RLIMIT_STACK].rlim_max = maxssiz;
692 	p->p_rlimit[RLIMIT_DATA].rlim_cur = dfldsiz;
693 	p->p_rlimit[RLIMIT_DATA].rlim_max = maxdsiz;
694 	/* limit the limit to no less than 2MB */
695 	rss_limit = max(cnt.v_free_count, 512);
696 	p->p_rlimit[RLIMIT_RSS].rlim_cur = ptoa(rss_limit);
697 	p->p_rlimit[RLIMIT_RSS].rlim_max = RLIM_INFINITY;
698 }
699 
700 void
701 faultin(p)
702 	struct proc *p;
703 {
704 #ifdef NO_SWAPPING
705 
706 	PROC_LOCK_ASSERT(p, MA_OWNED);
707 	if ((p->p_sflag & PS_INMEM) == 0)
708 		panic("faultin: proc swapped out with NO_SWAPPING!");
709 #else /* !NO_SWAPPING */
710 	struct thread *td;
711 
712 	GIANT_REQUIRED;
713 	PROC_LOCK_ASSERT(p, MA_OWNED);
714 	/*
715 	 * If another process is swapping in this process,
716 	 * just wait until it finishes.
717 	 */
718 	if (p->p_sflag & PS_SWAPPINGIN)
719 		msleep(&p->p_sflag, &p->p_mtx, PVM, "faultin", 0);
720 	else if ((p->p_sflag & PS_INMEM) == 0) {
721 		/*
722 		 * Don't let another thread swap process p out while we are
723 		 * busy swapping it in.
724 		 */
725 		++p->p_lock;
726 		mtx_lock_spin(&sched_lock);
727 		p->p_sflag |= PS_SWAPPINGIN;
728 		mtx_unlock_spin(&sched_lock);
729 		PROC_UNLOCK(p);
730 
731 		vm_proc_swapin(p);
732 		FOREACH_THREAD_IN_PROC(p, td)
733 			vm_thread_swapin(td);
734 
735 		PROC_LOCK(p);
736 		mtx_lock_spin(&sched_lock);
737 		p->p_sflag &= ~PS_SWAPPINGIN;
738 		p->p_sflag |= PS_INMEM;
739 		FOREACH_THREAD_IN_PROC(p, td) {
740 			TD_CLR_SWAPPED(td);
741 			if (TD_CAN_RUN(td))
742 				setrunnable(td);
743 		}
744 		mtx_unlock_spin(&sched_lock);
745 
746 		wakeup(&p->p_sflag);
747 
748 		/* Allow other threads to swap p out now. */
749 		--p->p_lock;
750 	}
751 #endif /* NO_SWAPPING */
752 }
753 
754 /*
755  * This swapin algorithm attempts to swap-in processes only if there
756  * is enough space for them.  Of course, if a process waits for a long
757  * time, it will be swapped in anyway.
758  *
759  *  XXXKSE - process with the thread with highest priority counts..
760  *
761  * Giant is still held at this point, to be released in tsleep.
762  */
763 /* ARGSUSED*/
764 static void
765 scheduler(dummy)
766 	void *dummy;
767 {
768 	struct proc *p;
769 	struct thread *td;
770 	int pri;
771 	struct proc *pp;
772 	int ppri;
773 
774 	mtx_assert(&Giant, MA_OWNED | MA_NOTRECURSED);
775 	/* GIANT_REQUIRED */
776 
777 loop:
778 	if (vm_page_count_min()) {
779 		VM_WAIT;
780 		goto loop;
781 	}
782 
783 	pp = NULL;
784 	ppri = INT_MIN;
785 	sx_slock(&allproc_lock);
786 	FOREACH_PROC_IN_SYSTEM(p) {
787 		struct ksegrp *kg;
788 		if (p->p_sflag & (PS_INMEM | PS_SWAPPINGOUT | PS_SWAPPINGIN)) {
789 			continue;
790 		}
791 		mtx_lock_spin(&sched_lock);
792 		FOREACH_THREAD_IN_PROC(p, td) {
793 			/*
794 			 * An otherwise runnable thread of a process
795 			 * swapped out has only the TDI_SWAPPED bit set.
796 			 *
797 			 */
798 			if (td->td_inhibitors == TDI_SWAPPED) {
799 				kg = td->td_ksegrp;
800 				pri = p->p_swtime + kg->kg_slptime;
801 				if ((p->p_sflag & PS_SWAPINREQ) == 0) {
802 					pri -= kg->kg_nice * 8;
803 				}
804 
805 				/*
806 				 * if this ksegrp is higher priority
807 				 * and there is enough space, then select
808 				 * this process instead of the previous
809 				 * selection.
810 				 */
811 				if (pri > ppri) {
812 					pp = p;
813 					ppri = pri;
814 				}
815 			}
816 		}
817 		mtx_unlock_spin(&sched_lock);
818 	}
819 	sx_sunlock(&allproc_lock);
820 
821 	/*
822 	 * Nothing to do, back to sleep.
823 	 */
824 	if ((p = pp) == NULL) {
825 		tsleep(&proc0, PVM, "sched", maxslp * hz / 2);
826 		goto loop;
827 	}
828 	PROC_LOCK(p);
829 
830 	/*
831 	 * Another process may be bringing or may have already
832 	 * brought this process in while we traverse all threads.
833 	 * Or, this process may even be being swapped out again.
834 	 */
835 	if (p->p_sflag & (PS_INMEM | PS_SWAPPINGOUT | PS_SWAPPINGIN)) {
836 		PROC_UNLOCK(p);
837 		goto loop;
838 	}
839 
840 	mtx_lock_spin(&sched_lock);
841 	p->p_sflag &= ~PS_SWAPINREQ;
842 	mtx_unlock_spin(&sched_lock);
843 
844 	/*
845 	 * We would like to bring someone in. (only if there is space).
846 	 * [What checks the space? ]
847 	 */
848 	faultin(p);
849 	PROC_UNLOCK(p);
850 	mtx_lock_spin(&sched_lock);
851 	p->p_swtime = 0;
852 	mtx_unlock_spin(&sched_lock);
853 	goto loop;
854 }
855 
856 #ifndef NO_SWAPPING
857 
858 /*
859  * Swap_idle_threshold1 is the guaranteed swapped in time for a process
860  */
861 static int swap_idle_threshold1 = 2;
862 SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold1, CTLFLAG_RW,
863     &swap_idle_threshold1, 0, "Guaranteed swapped in time for a process");
864 
865 /*
866  * Swap_idle_threshold2 is the time that a process can be idle before
867  * it will be swapped out, if idle swapping is enabled.
868  */
869 static int swap_idle_threshold2 = 10;
870 SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold2, CTLFLAG_RW,
871     &swap_idle_threshold2, 0, "Time before a process will be swapped out");
872 
873 /*
874  * Swapout is driven by the pageout daemon.  Very simple, we find eligible
875  * procs and unwire their u-areas.  We try to always "swap" at least one
876  * process in case we need the room for a swapin.
877  * If any procs have been sleeping/stopped for at least maxslp seconds,
878  * they are swapped.  Else, we swap the longest-sleeping or stopped process,
879  * if any, otherwise the longest-resident process.
880  */
881 void
882 swapout_procs(action)
883 int action;
884 {
885 	struct proc *p;
886 	struct thread *td;
887 	struct ksegrp *kg;
888 	int didswap = 0;
889 
890 	GIANT_REQUIRED;
891 
892 retry:
893 	sx_slock(&allproc_lock);
894 	FOREACH_PROC_IN_SYSTEM(p) {
895 		struct vmspace *vm;
896 		int minslptime = 100000;
897 
898 		/*
899 		 * Watch out for a process in
900 		 * creation.  It may have no
901 		 * address space or lock yet.
902 		 */
903 		mtx_lock_spin(&sched_lock);
904 		if (p->p_state == PRS_NEW) {
905 			mtx_unlock_spin(&sched_lock);
906 			continue;
907 		}
908 		mtx_unlock_spin(&sched_lock);
909 
910 		/*
911 		 * An aio daemon switches its
912 		 * address space while running.
913 		 * Perform a quick check whether
914 		 * a process has P_SYSTEM.
915 		 */
916 		if ((p->p_flag & P_SYSTEM) != 0)
917 			continue;
918 
919 		/*
920 		 * Do not swapout a process that
921 		 * is waiting for VM data
922 		 * structures as there is a possible
923 		 * deadlock.  Test this first as
924 		 * this may block.
925 		 *
926 		 * Lock the map until swapout
927 		 * finishes, or a thread of this
928 		 * process may attempt to alter
929 		 * the map.
930 		 */
931 		PROC_LOCK(p);
932 		vm = p->p_vmspace;
933 		KASSERT(vm != NULL,
934 			("swapout_procs: a process has no address space"));
935 		++vm->vm_refcnt;
936 		PROC_UNLOCK(p);
937 		if (!vm_map_trylock(&vm->vm_map))
938 			goto nextproc1;
939 
940 		PROC_LOCK(p);
941 		if (p->p_lock != 0 ||
942 		    (p->p_flag & (P_STOPPED_SINGLE|P_TRACED|P_SYSTEM|P_WEXIT)
943 		    ) != 0) {
944 			goto nextproc2;
945 		}
946 		/*
947 		 * only aiod changes vmspace, however it will be
948 		 * skipped because of the if statement above checking
949 		 * for P_SYSTEM
950 		 */
951 		if ((p->p_sflag & (PS_INMEM|PS_SWAPPINGOUT|PS_SWAPPINGIN)) != PS_INMEM)
952 			goto nextproc2;
953 
954 		switch (p->p_state) {
955 		default:
956 			/* Don't swap out processes in any sort
957 			 * of 'special' state. */
958 			break;
959 
960 		case PRS_NORMAL:
961 			mtx_lock_spin(&sched_lock);
962 			/*
963 			 * do not swapout a realtime process
964 			 * Check all the thread groups..
965 			 */
966 			FOREACH_KSEGRP_IN_PROC(p, kg) {
967 				if (PRI_IS_REALTIME(kg->kg_pri_class))
968 					goto nextproc;
969 
970 				/*
971 				 * Guarantee swap_idle_threshold1
972 				 * time in memory.
973 				 */
974 				if (kg->kg_slptime < swap_idle_threshold1)
975 					goto nextproc;
976 
977 				/*
978 				 * Do not swapout a process if it is
979 				 * waiting on a critical event of some
980 				 * kind or there is a thread whose
981 				 * pageable memory may be accessed.
982 				 *
983 				 * This could be refined to support
984 				 * swapping out a thread.
985 				 */
986 				FOREACH_THREAD_IN_GROUP(kg, td) {
987 					if ((td->td_priority) < PSOCK ||
988 					    !thread_safetoswapout(td))
989 						goto nextproc;
990 				}
991 				/*
992 				 * If the system is under memory stress,
993 				 * or if we are swapping
994 				 * idle processes >= swap_idle_threshold2,
995 				 * then swap the process out.
996 				 */
997 				if (((action & VM_SWAP_NORMAL) == 0) &&
998 				    (((action & VM_SWAP_IDLE) == 0) ||
999 				    (kg->kg_slptime < swap_idle_threshold2)))
1000 					goto nextproc;
1001 
1002 				if (minslptime > kg->kg_slptime)
1003 					minslptime = kg->kg_slptime;
1004 			}
1005 
1006 			/*
1007 			 * If the process has been asleep for awhile and had
1008 			 * most of its pages taken away already, swap it out.
1009 			 */
1010 			if ((action & VM_SWAP_NORMAL) ||
1011 				((action & VM_SWAP_IDLE) &&
1012 				 (minslptime > swap_idle_threshold2))) {
1013 				swapout(p);
1014 				didswap++;
1015 				mtx_unlock_spin(&sched_lock);
1016 				PROC_UNLOCK(p);
1017 				vm_map_unlock(&vm->vm_map);
1018 				vmspace_free(vm);
1019 				sx_sunlock(&allproc_lock);
1020 				goto retry;
1021 			}
1022 nextproc:
1023 			mtx_unlock_spin(&sched_lock);
1024 		}
1025 nextproc2:
1026 		PROC_UNLOCK(p);
1027 		vm_map_unlock(&vm->vm_map);
1028 nextproc1:
1029 		vmspace_free(vm);
1030 		continue;
1031 	}
1032 	sx_sunlock(&allproc_lock);
1033 	/*
1034 	 * If we swapped something out, and another process needed memory,
1035 	 * then wakeup the sched process.
1036 	 */
1037 	if (didswap)
1038 		wakeup(&proc0);
1039 }
1040 
1041 static void
1042 swapout(p)
1043 	struct proc *p;
1044 {
1045 	struct thread *td;
1046 
1047 	PROC_LOCK_ASSERT(p, MA_OWNED);
1048 	mtx_assert(&sched_lock, MA_OWNED | MA_NOTRECURSED);
1049 #if defined(SWAP_DEBUG)
1050 	printf("swapping out %d\n", p->p_pid);
1051 #endif
1052 
1053 	/*
1054 	 * The states of this process and its threads may have changed
1055 	 * by now.  Assuming that there is only one pageout daemon thread,
1056 	 * this process should still be in memory.
1057 	 */
1058 	KASSERT((p->p_sflag & (PS_INMEM|PS_SWAPPINGOUT|PS_SWAPPINGIN)) == PS_INMEM,
1059 		("swapout: lost a swapout race?"));
1060 
1061 #if defined(INVARIANTS)
1062 	/*
1063 	 * Make sure that all threads are safe to be swapped out.
1064 	 *
1065 	 * Alternatively, we could swap out only safe threads.
1066 	 */
1067 	FOREACH_THREAD_IN_PROC(p, td) {
1068 		KASSERT(thread_safetoswapout(td),
1069 			("swapout: there is a thread not safe for swapout"));
1070 	}
1071 #endif /* INVARIANTS */
1072 
1073 	++p->p_stats->p_ru.ru_nswap;
1074 	/*
1075 	 * remember the process resident count
1076 	 */
1077 	p->p_vmspace->vm_swrss = vmspace_resident_count(p->p_vmspace);
1078 
1079 	p->p_sflag &= ~PS_INMEM;
1080 	p->p_sflag |= PS_SWAPPINGOUT;
1081 	PROC_UNLOCK(p);
1082 	FOREACH_THREAD_IN_PROC(p, td)
1083 		TD_SET_SWAPPED(td);
1084 	mtx_unlock_spin(&sched_lock);
1085 
1086 	vm_proc_swapout(p);
1087 	FOREACH_THREAD_IN_PROC(p, td)
1088 		vm_thread_swapout(td);
1089 
1090 	PROC_LOCK(p);
1091 	mtx_lock_spin(&sched_lock);
1092 	p->p_sflag &= ~PS_SWAPPINGOUT;
1093 	p->p_swtime = 0;
1094 }
1095 #endif /* !NO_SWAPPING */
1096