xref: /freebsd/sys/vm/vm_glue.c (revision dcc3a33188bceb5b6e819efdb9c5f72d059084b6)
1 /*-
2  * Copyright (c) 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 4. Neither the name of the University nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  *	from: @(#)vm_glue.c	8.6 (Berkeley) 1/5/94
33  *
34  *
35  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
36  * All rights reserved.
37  *
38  * Permission to use, copy, modify and distribute this software and
39  * its documentation is hereby granted, provided that both the copyright
40  * notice and this permission notice appear in all copies of the
41  * software, derivative works or modified versions, and any portions
42  * thereof, and that both notices appear in supporting documentation.
43  *
44  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
45  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
46  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
47  *
48  * Carnegie Mellon requests users of this software to return to
49  *
50  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
51  *  School of Computer Science
52  *  Carnegie Mellon University
53  *  Pittsburgh PA 15213-3890
54  *
55  * any improvements or extensions that they make and grant Carnegie the
56  * rights to redistribute these changes.
57  */
58 
59 #include <sys/cdefs.h>
60 __FBSDID("$FreeBSD$");
61 
62 #include "opt_vm.h"
63 #include "opt_kstack_pages.h"
64 #include "opt_kstack_max_pages.h"
65 
66 #include <sys/param.h>
67 #include <sys/systm.h>
68 #include <sys/limits.h>
69 #include <sys/lock.h>
70 #include <sys/mutex.h>
71 #include <sys/proc.h>
72 #include <sys/resourcevar.h>
73 #include <sys/sched.h>
74 #include <sys/sf_buf.h>
75 #include <sys/shm.h>
76 #include <sys/vmmeter.h>
77 #include <sys/sx.h>
78 #include <sys/sysctl.h>
79 
80 #include <sys/eventhandler.h>
81 #include <sys/kernel.h>
82 #include <sys/ktr.h>
83 #include <sys/unistd.h>
84 
85 #include <vm/vm.h>
86 #include <vm/vm_param.h>
87 #include <vm/pmap.h>
88 #include <vm/vm_map.h>
89 #include <vm/vm_page.h>
90 #include <vm/vm_pageout.h>
91 #include <vm/vm_object.h>
92 #include <vm/vm_kern.h>
93 #include <vm/vm_extern.h>
94 #include <vm/vm_pager.h>
95 #include <vm/swap_pager.h>
96 
97 extern int maxslp;
98 
99 /*
100  * System initialization
101  *
102  * Note: proc0 from proc.h
103  */
104 static void vm_init_limits(void *);
105 SYSINIT(vm_limits, SI_SUB_VM_CONF, SI_ORDER_FIRST, vm_init_limits, &proc0);
106 
107 /*
108  * THIS MUST BE THE LAST INITIALIZATION ITEM!!!
109  *
110  * Note: run scheduling should be divorced from the vm system.
111  */
112 static void scheduler(void *);
113 SYSINIT(scheduler, SI_SUB_RUN_SCHEDULER, SI_ORDER_ANY, scheduler, NULL);
114 
115 #ifndef NO_SWAPPING
116 static int swapout(struct proc *);
117 static void swapclear(struct proc *);
118 #endif
119 
120 /*
121  * MPSAFE
122  *
123  * WARNING!  This code calls vm_map_check_protection() which only checks
124  * the associated vm_map_entry range.  It does not determine whether the
125  * contents of the memory is actually readable or writable.  In most cases
126  * just checking the vm_map_entry is sufficient within the kernel's address
127  * space.
128  */
129 int
130 kernacc(addr, len, rw)
131 	void *addr;
132 	int len, rw;
133 {
134 	boolean_t rv;
135 	vm_offset_t saddr, eaddr;
136 	vm_prot_t prot;
137 
138 	KASSERT((rw & ~VM_PROT_ALL) == 0,
139 	    ("illegal ``rw'' argument to kernacc (%x)\n", rw));
140 
141 	if ((vm_offset_t)addr + len > kernel_map->max_offset ||
142 	    (vm_offset_t)addr + len < (vm_offset_t)addr)
143 		return (FALSE);
144 
145 	prot = rw;
146 	saddr = trunc_page((vm_offset_t)addr);
147 	eaddr = round_page((vm_offset_t)addr + len);
148 	vm_map_lock_read(kernel_map);
149 	rv = vm_map_check_protection(kernel_map, saddr, eaddr, prot);
150 	vm_map_unlock_read(kernel_map);
151 	return (rv == TRUE);
152 }
153 
154 /*
155  * MPSAFE
156  *
157  * WARNING!  This code calls vm_map_check_protection() which only checks
158  * the associated vm_map_entry range.  It does not determine whether the
159  * contents of the memory is actually readable or writable.  vmapbuf(),
160  * vm_fault_quick(), or copyin()/copout()/su*()/fu*() functions should be
161  * used in conjuction with this call.
162  */
163 int
164 useracc(addr, len, rw)
165 	void *addr;
166 	int len, rw;
167 {
168 	boolean_t rv;
169 	vm_prot_t prot;
170 	vm_map_t map;
171 
172 	KASSERT((rw & ~VM_PROT_ALL) == 0,
173 	    ("illegal ``rw'' argument to useracc (%x)\n", rw));
174 	prot = rw;
175 	map = &curproc->p_vmspace->vm_map;
176 	if ((vm_offset_t)addr + len > vm_map_max(map) ||
177 	    (vm_offset_t)addr + len < (vm_offset_t)addr) {
178 		return (FALSE);
179 	}
180 	vm_map_lock_read(map);
181 	rv = vm_map_check_protection(map, trunc_page((vm_offset_t)addr),
182 	    round_page((vm_offset_t)addr + len), prot);
183 	vm_map_unlock_read(map);
184 	return (rv == TRUE);
185 }
186 
187 int
188 vslock(void *addr, size_t len)
189 {
190 	vm_offset_t end, last, start;
191 	vm_size_t npages;
192 	int error;
193 
194 	last = (vm_offset_t)addr + len;
195 	start = trunc_page((vm_offset_t)addr);
196 	end = round_page(last);
197 	if (last < (vm_offset_t)addr || end < (vm_offset_t)addr)
198 		return (EINVAL);
199 	npages = atop(end - start);
200 	if (npages > vm_page_max_wired)
201 		return (ENOMEM);
202 	PROC_LOCK(curproc);
203 	if (ptoa(npages +
204 	    pmap_wired_count(vm_map_pmap(&curproc->p_vmspace->vm_map))) >
205 	    lim_cur(curproc, RLIMIT_MEMLOCK)) {
206 		PROC_UNLOCK(curproc);
207 		return (ENOMEM);
208 	}
209 	PROC_UNLOCK(curproc);
210 #if 0
211 	/*
212 	 * XXX - not yet
213 	 *
214 	 * The limit for transient usage of wired pages should be
215 	 * larger than for "permanent" wired pages (mlock()).
216 	 *
217 	 * Also, the sysctl code, which is the only present user
218 	 * of vslock(), does a hard loop on EAGAIN.
219 	 */
220 	if (npages + cnt.v_wire_count > vm_page_max_wired)
221 		return (EAGAIN);
222 #endif
223 	error = vm_map_wire(&curproc->p_vmspace->vm_map, start, end,
224 	    VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
225 	/*
226 	 * Return EFAULT on error to match copy{in,out}() behaviour
227 	 * rather than returning ENOMEM like mlock() would.
228 	 */
229 	return (error == KERN_SUCCESS ? 0 : EFAULT);
230 }
231 
232 void
233 vsunlock(void *addr, size_t len)
234 {
235 
236 	/* Rely on the parameter sanity checks performed by vslock(). */
237 	(void)vm_map_unwire(&curproc->p_vmspace->vm_map,
238 	    trunc_page((vm_offset_t)addr), round_page((vm_offset_t)addr + len),
239 	    VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
240 }
241 
242 /*
243  * Pin the page contained within the given object at the given offset.  If the
244  * page is not resident, allocate and load it using the given object's pager.
245  * Return the pinned page if successful; otherwise, return NULL.
246  */
247 static vm_page_t
248 vm_imgact_hold_page(vm_object_t object, vm_ooffset_t offset)
249 {
250 	vm_page_t m, ma[1];
251 	vm_pindex_t pindex;
252 	int rv;
253 
254 	VM_OBJECT_LOCK(object);
255 	pindex = OFF_TO_IDX(offset);
256 	m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
257 	if (m->valid != VM_PAGE_BITS_ALL) {
258 		ma[0] = m;
259 		rv = vm_pager_get_pages(object, ma, 1, 0);
260 		m = vm_page_lookup(object, pindex);
261 		if (m == NULL)
262 			goto out;
263 		if (rv != VM_PAGER_OK) {
264 			vm_page_lock_queues();
265 			vm_page_free(m);
266 			vm_page_unlock_queues();
267 			m = NULL;
268 			goto out;
269 		}
270 	}
271 	vm_page_lock_queues();
272 	vm_page_hold(m);
273 	vm_page_unlock_queues();
274 	vm_page_wakeup(m);
275 out:
276 	VM_OBJECT_UNLOCK(object);
277 	return (m);
278 }
279 
280 /*
281  * Return a CPU private mapping to the page at the given offset within the
282  * given object.  The page is pinned before it is mapped.
283  */
284 struct sf_buf *
285 vm_imgact_map_page(vm_object_t object, vm_ooffset_t offset)
286 {
287 	vm_page_t m;
288 
289 	m = vm_imgact_hold_page(object, offset);
290 	if (m == NULL)
291 		return (NULL);
292 	sched_pin();
293 	return (sf_buf_alloc(m, SFB_CPUPRIVATE));
294 }
295 
296 /*
297  * Destroy the given CPU private mapping and unpin the page that it mapped.
298  */
299 void
300 vm_imgact_unmap_page(struct sf_buf *sf)
301 {
302 	vm_page_t m;
303 
304 	m = sf_buf_page(sf);
305 	sf_buf_free(sf);
306 	sched_unpin();
307 	vm_page_lock_queues();
308 	vm_page_unhold(m);
309 	vm_page_unlock_queues();
310 }
311 
312 struct kstack_cache_entry {
313 	vm_object_t ksobj;
314 	struct kstack_cache_entry *next_ks_entry;
315 };
316 
317 static struct kstack_cache_entry *kstack_cache;
318 static int kstack_cache_size = 128;
319 static int kstacks;
320 static struct mtx kstack_cache_mtx;
321 SYSCTL_INT(_vm, OID_AUTO, kstack_cache_size, CTLFLAG_RW, &kstack_cache_size, 0,
322     "");
323 SYSCTL_INT(_vm, OID_AUTO, kstacks, CTLFLAG_RD, &kstacks, 0,
324     "");
325 
326 #ifndef KSTACK_MAX_PAGES
327 #define KSTACK_MAX_PAGES 32
328 #endif
329 
330 /*
331  * Create the kernel stack (including pcb for i386) for a new thread.
332  * This routine directly affects the fork perf for a process and
333  * create performance for a thread.
334  */
335 int
336 vm_thread_new(struct thread *td, int pages)
337 {
338 	vm_object_t ksobj;
339 	vm_offset_t ks;
340 	vm_page_t m, ma[KSTACK_MAX_PAGES];
341 	struct kstack_cache_entry *ks_ce;
342 	int i;
343 
344 	/* Bounds check */
345 	if (pages <= 1)
346 		pages = KSTACK_PAGES;
347 	else if (pages > KSTACK_MAX_PAGES)
348 		pages = KSTACK_MAX_PAGES;
349 
350 	if (pages == KSTACK_PAGES) {
351 		mtx_lock(&kstack_cache_mtx);
352 		if (kstack_cache != NULL) {
353 			ks_ce = kstack_cache;
354 			kstack_cache = ks_ce->next_ks_entry;
355 			mtx_unlock(&kstack_cache_mtx);
356 
357 			td->td_kstack_obj = ks_ce->ksobj;
358 			td->td_kstack = (vm_offset_t)ks_ce;
359 			td->td_kstack_pages = KSTACK_PAGES;
360 			return (1);
361 		}
362 		mtx_unlock(&kstack_cache_mtx);
363 	}
364 
365 	/*
366 	 * Allocate an object for the kstack.
367 	 */
368 	ksobj = vm_object_allocate(OBJT_DEFAULT, pages);
369 
370 	/*
371 	 * Get a kernel virtual address for this thread's kstack.
372 	 */
373 	ks = kmem_alloc_nofault(kernel_map,
374 	   (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
375 	if (ks == 0) {
376 		printf("vm_thread_new: kstack allocation failed\n");
377 		vm_object_deallocate(ksobj);
378 		return (0);
379 	}
380 
381 	atomic_add_int(&kstacks, 1);
382 	if (KSTACK_GUARD_PAGES != 0) {
383 		pmap_qremove(ks, KSTACK_GUARD_PAGES);
384 		ks += KSTACK_GUARD_PAGES * PAGE_SIZE;
385 	}
386 	td->td_kstack_obj = ksobj;
387 	td->td_kstack = ks;
388 	/*
389 	 * Knowing the number of pages allocated is useful when you
390 	 * want to deallocate them.
391 	 */
392 	td->td_kstack_pages = pages;
393 	/*
394 	 * For the length of the stack, link in a real page of ram for each
395 	 * page of stack.
396 	 */
397 	VM_OBJECT_LOCK(ksobj);
398 	for (i = 0; i < pages; i++) {
399 		/*
400 		 * Get a kernel stack page.
401 		 */
402 		m = vm_page_grab(ksobj, i, VM_ALLOC_NOBUSY |
403 		    VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED);
404 		ma[i] = m;
405 		m->valid = VM_PAGE_BITS_ALL;
406 	}
407 	VM_OBJECT_UNLOCK(ksobj);
408 	pmap_qenter(ks, ma, pages);
409 	return (1);
410 }
411 
412 static void
413 vm_thread_stack_dispose(vm_object_t ksobj, vm_offset_t ks, int pages)
414 {
415 	vm_page_t m;
416 	int i;
417 
418 	atomic_add_int(&kstacks, -1);
419 	pmap_qremove(ks, pages);
420 	VM_OBJECT_LOCK(ksobj);
421 	for (i = 0; i < pages; i++) {
422 		m = vm_page_lookup(ksobj, i);
423 		if (m == NULL)
424 			panic("vm_thread_dispose: kstack already missing?");
425 		vm_page_lock_queues();
426 		vm_page_unwire(m, 0);
427 		vm_page_free(m);
428 		vm_page_unlock_queues();
429 	}
430 	VM_OBJECT_UNLOCK(ksobj);
431 	vm_object_deallocate(ksobj);
432 	kmem_free(kernel_map, ks - (KSTACK_GUARD_PAGES * PAGE_SIZE),
433 	    (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
434 }
435 
436 /*
437  * Dispose of a thread's kernel stack.
438  */
439 void
440 vm_thread_dispose(struct thread *td)
441 {
442 	vm_object_t ksobj;
443 	vm_offset_t ks;
444 	struct kstack_cache_entry *ks_ce;
445 	int pages;
446 
447 	pages = td->td_kstack_pages;
448 	ksobj = td->td_kstack_obj;
449 	ks = td->td_kstack;
450 	td->td_kstack = 0;
451 	td->td_kstack_pages = 0;
452 	if (pages == KSTACK_PAGES && kstacks <= kstack_cache_size) {
453 		ks_ce = (struct kstack_cache_entry *)ks;
454 		ks_ce->ksobj = ksobj;
455 		mtx_lock(&kstack_cache_mtx);
456 		ks_ce->next_ks_entry = kstack_cache;
457 		kstack_cache = ks_ce;
458 		mtx_unlock(&kstack_cache_mtx);
459 		return;
460 	}
461 	vm_thread_stack_dispose(ksobj, ks, pages);
462 }
463 
464 static void
465 vm_thread_stack_lowmem(void *nulll)
466 {
467 	struct kstack_cache_entry *ks_ce, *ks_ce1;
468 
469 	mtx_lock(&kstack_cache_mtx);
470 	ks_ce = kstack_cache;
471 	kstack_cache = NULL;
472 	mtx_unlock(&kstack_cache_mtx);
473 
474 	while (ks_ce != NULL) {
475 		ks_ce1 = ks_ce;
476 		ks_ce = ks_ce->next_ks_entry;
477 
478 		vm_thread_stack_dispose(ks_ce1->ksobj, (vm_offset_t)ks_ce1,
479 		    KSTACK_PAGES);
480 	}
481 }
482 
483 static void
484 kstack_cache_init(void *nulll)
485 {
486 
487 	EVENTHANDLER_REGISTER(vm_lowmem, vm_thread_stack_lowmem, NULL,
488 	    EVENTHANDLER_PRI_ANY);
489 }
490 
491 MTX_SYSINIT(kstack_cache, &kstack_cache_mtx, "kstkch", MTX_DEF);
492 SYSINIT(vm_kstacks, SI_SUB_KTHREAD_INIT, SI_ORDER_ANY, kstack_cache_init, NULL);
493 
494 /*
495  * Allow a thread's kernel stack to be paged out.
496  */
497 void
498 vm_thread_swapout(struct thread *td)
499 {
500 	vm_object_t ksobj;
501 	vm_page_t m;
502 	int i, pages;
503 
504 	cpu_thread_swapout(td);
505 	pages = td->td_kstack_pages;
506 	ksobj = td->td_kstack_obj;
507 	pmap_qremove(td->td_kstack, pages);
508 	VM_OBJECT_LOCK(ksobj);
509 	for (i = 0; i < pages; i++) {
510 		m = vm_page_lookup(ksobj, i);
511 		if (m == NULL)
512 			panic("vm_thread_swapout: kstack already missing?");
513 		vm_page_lock_queues();
514 		vm_page_dirty(m);
515 		vm_page_unwire(m, 0);
516 		vm_page_unlock_queues();
517 	}
518 	VM_OBJECT_UNLOCK(ksobj);
519 }
520 
521 /*
522  * Bring the kernel stack for a specified thread back in.
523  */
524 void
525 vm_thread_swapin(struct thread *td)
526 {
527 	vm_object_t ksobj;
528 	vm_page_t m, ma[KSTACK_MAX_PAGES];
529 	int i, pages, rv;
530 
531 	pages = td->td_kstack_pages;
532 	ksobj = td->td_kstack_obj;
533 	VM_OBJECT_LOCK(ksobj);
534 	for (i = 0; i < pages; i++) {
535 		m = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
536 		if (m->valid != VM_PAGE_BITS_ALL) {
537 			rv = vm_pager_get_pages(ksobj, &m, 1, 0);
538 			if (rv != VM_PAGER_OK)
539 				panic("vm_thread_swapin: cannot get kstack for proc: %d", td->td_proc->p_pid);
540 			m = vm_page_lookup(ksobj, i);
541 		}
542 		ma[i] = m;
543 		vm_page_lock_queues();
544 		vm_page_wire(m);
545 		vm_page_unlock_queues();
546 		vm_page_wakeup(m);
547 	}
548 	VM_OBJECT_UNLOCK(ksobj);
549 	pmap_qenter(td->td_kstack, ma, pages);
550 	cpu_thread_swapin(td);
551 }
552 
553 /*
554  * Implement fork's actions on an address space.
555  * Here we arrange for the address space to be copied or referenced,
556  * allocate a user struct (pcb and kernel stack), then call the
557  * machine-dependent layer to fill those in and make the new process
558  * ready to run.  The new process is set up so that it returns directly
559  * to user mode to avoid stack copying and relocation problems.
560  */
561 int
562 vm_forkproc(td, p2, td2, vm2, flags)
563 	struct thread *td;
564 	struct proc *p2;
565 	struct thread *td2;
566 	struct vmspace *vm2;
567 	int flags;
568 {
569 	struct proc *p1 = td->td_proc;
570 	int error;
571 
572 	if ((flags & RFPROC) == 0) {
573 		/*
574 		 * Divorce the memory, if it is shared, essentially
575 		 * this changes shared memory amongst threads, into
576 		 * COW locally.
577 		 */
578 		if ((flags & RFMEM) == 0) {
579 			if (p1->p_vmspace->vm_refcnt > 1) {
580 				error = vmspace_unshare(p1);
581 				if (error)
582 					return (error);
583 			}
584 		}
585 		cpu_fork(td, p2, td2, flags);
586 		return (0);
587 	}
588 
589 	if (flags & RFMEM) {
590 		p2->p_vmspace = p1->p_vmspace;
591 		atomic_add_int(&p1->p_vmspace->vm_refcnt, 1);
592 	}
593 
594 	while (vm_page_count_severe()) {
595 		VM_WAIT;
596 	}
597 
598 	if ((flags & RFMEM) == 0) {
599 		p2->p_vmspace = vm2;
600 		if (p1->p_vmspace->vm_shm)
601 			shmfork(p1, p2);
602 	}
603 
604 	/*
605 	 * cpu_fork will copy and update the pcb, set up the kernel stack,
606 	 * and make the child ready to run.
607 	 */
608 	cpu_fork(td, p2, td2, flags);
609 	return (0);
610 }
611 
612 /*
613  * Called after process has been wait(2)'ed apon and is being reaped.
614  * The idea is to reclaim resources that we could not reclaim while
615  * the process was still executing.
616  */
617 void
618 vm_waitproc(p)
619 	struct proc *p;
620 {
621 
622 	vmspace_exitfree(p);		/* and clean-out the vmspace */
623 }
624 
625 /*
626  * Set default limits for VM system.
627  * Called for proc 0, and then inherited by all others.
628  *
629  * XXX should probably act directly on proc0.
630  */
631 static void
632 vm_init_limits(udata)
633 	void *udata;
634 {
635 	struct proc *p = udata;
636 	struct plimit *limp;
637 	int rss_limit;
638 
639 	/*
640 	 * Set up the initial limits on process VM. Set the maximum resident
641 	 * set size to be half of (reasonably) available memory.  Since this
642 	 * is a soft limit, it comes into effect only when the system is out
643 	 * of memory - half of main memory helps to favor smaller processes,
644 	 * and reduces thrashing of the object cache.
645 	 */
646 	limp = p->p_limit;
647 	limp->pl_rlimit[RLIMIT_STACK].rlim_cur = dflssiz;
648 	limp->pl_rlimit[RLIMIT_STACK].rlim_max = maxssiz;
649 	limp->pl_rlimit[RLIMIT_DATA].rlim_cur = dfldsiz;
650 	limp->pl_rlimit[RLIMIT_DATA].rlim_max = maxdsiz;
651 	/* limit the limit to no less than 2MB */
652 	rss_limit = max(cnt.v_free_count, 512);
653 	limp->pl_rlimit[RLIMIT_RSS].rlim_cur = ptoa(rss_limit);
654 	limp->pl_rlimit[RLIMIT_RSS].rlim_max = RLIM_INFINITY;
655 }
656 
657 void
658 faultin(p)
659 	struct proc *p;
660 {
661 #ifdef NO_SWAPPING
662 
663 	PROC_LOCK_ASSERT(p, MA_OWNED);
664 	if ((p->p_flag & P_INMEM) == 0)
665 		panic("faultin: proc swapped out with NO_SWAPPING!");
666 #else /* !NO_SWAPPING */
667 	struct thread *td;
668 
669 	PROC_LOCK_ASSERT(p, MA_OWNED);
670 	/*
671 	 * If another process is swapping in this process,
672 	 * just wait until it finishes.
673 	 */
674 	if (p->p_flag & P_SWAPPINGIN) {
675 		while (p->p_flag & P_SWAPPINGIN)
676 			msleep(&p->p_flag, &p->p_mtx, PVM, "faultin", 0);
677 		return;
678 	}
679 	if ((p->p_flag & P_INMEM) == 0) {
680 		/*
681 		 * Don't let another thread swap process p out while we are
682 		 * busy swapping it in.
683 		 */
684 		++p->p_lock;
685 		p->p_flag |= P_SWAPPINGIN;
686 		PROC_UNLOCK(p);
687 
688 		/*
689 		 * We hold no lock here because the list of threads
690 		 * can not change while all threads in the process are
691 		 * swapped out.
692 		 */
693 		FOREACH_THREAD_IN_PROC(p, td)
694 			vm_thread_swapin(td);
695 		PROC_LOCK(p);
696 		swapclear(p);
697 		p->p_swtick = ticks;
698 
699 		wakeup(&p->p_flag);
700 
701 		/* Allow other threads to swap p out now. */
702 		--p->p_lock;
703 	}
704 #endif /* NO_SWAPPING */
705 }
706 
707 /*
708  * This swapin algorithm attempts to swap-in processes only if there
709  * is enough space for them.  Of course, if a process waits for a long
710  * time, it will be swapped in anyway.
711  *
712  * Giant is held on entry.
713  */
714 /* ARGSUSED*/
715 static void
716 scheduler(dummy)
717 	void *dummy;
718 {
719 	struct proc *p;
720 	struct thread *td;
721 	struct proc *pp;
722 	int slptime;
723 	int swtime;
724 	int ppri;
725 	int pri;
726 
727 	mtx_assert(&Giant, MA_OWNED | MA_NOTRECURSED);
728 	mtx_unlock(&Giant);
729 
730 loop:
731 	if (vm_page_count_min()) {
732 		VM_WAIT;
733 		goto loop;
734 	}
735 
736 	pp = NULL;
737 	ppri = INT_MIN;
738 	sx_slock(&allproc_lock);
739 	FOREACH_PROC_IN_SYSTEM(p) {
740 		PROC_LOCK(p);
741 		if (p->p_flag & (P_SWAPPINGOUT | P_SWAPPINGIN | P_INMEM)) {
742 			PROC_UNLOCK(p);
743 			continue;
744 		}
745 		swtime = (ticks - p->p_swtick) / hz;
746 		FOREACH_THREAD_IN_PROC(p, td) {
747 			/*
748 			 * An otherwise runnable thread of a process
749 			 * swapped out has only the TDI_SWAPPED bit set.
750 			 *
751 			 */
752 			thread_lock(td);
753 			if (td->td_inhibitors == TDI_SWAPPED) {
754 				slptime = (ticks - td->td_slptick) / hz;
755 				pri = swtime + slptime;
756 				if ((td->td_flags & TDF_SWAPINREQ) == 0)
757 					pri -= p->p_nice * 8;
758 				/*
759 				 * if this thread is higher priority
760 				 * and there is enough space, then select
761 				 * this process instead of the previous
762 				 * selection.
763 				 */
764 				if (pri > ppri) {
765 					pp = p;
766 					ppri = pri;
767 				}
768 			}
769 			thread_unlock(td);
770 		}
771 		PROC_UNLOCK(p);
772 	}
773 	sx_sunlock(&allproc_lock);
774 
775 	/*
776 	 * Nothing to do, back to sleep.
777 	 */
778 	if ((p = pp) == NULL) {
779 		tsleep(&proc0, PVM, "sched", maxslp * hz / 2);
780 		goto loop;
781 	}
782 	PROC_LOCK(p);
783 
784 	/*
785 	 * Another process may be bringing or may have already
786 	 * brought this process in while we traverse all threads.
787 	 * Or, this process may even be being swapped out again.
788 	 */
789 	if (p->p_flag & (P_INMEM | P_SWAPPINGOUT | P_SWAPPINGIN)) {
790 		PROC_UNLOCK(p);
791 		goto loop;
792 	}
793 
794 	/*
795 	 * We would like to bring someone in. (only if there is space).
796 	 * [What checks the space? ]
797 	 */
798 	faultin(p);
799 	PROC_UNLOCK(p);
800 	goto loop;
801 }
802 
803 void
804 kick_proc0(void)
805 {
806 
807 	wakeup(&proc0);
808 }
809 
810 #ifndef NO_SWAPPING
811 
812 /*
813  * Swap_idle_threshold1 is the guaranteed swapped in time for a process
814  */
815 static int swap_idle_threshold1 = 2;
816 SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold1, CTLFLAG_RW,
817     &swap_idle_threshold1, 0, "Guaranteed swapped in time for a process");
818 
819 /*
820  * Swap_idle_threshold2 is the time that a process can be idle before
821  * it will be swapped out, if idle swapping is enabled.
822  */
823 static int swap_idle_threshold2 = 10;
824 SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold2, CTLFLAG_RW,
825     &swap_idle_threshold2, 0, "Time before a process will be swapped out");
826 
827 /*
828  * Swapout is driven by the pageout daemon.  Very simple, we find eligible
829  * procs and swap out their stacks.  We try to always "swap" at least one
830  * process in case we need the room for a swapin.
831  * If any procs have been sleeping/stopped for at least maxslp seconds,
832  * they are swapped.  Else, we swap the longest-sleeping or stopped process,
833  * if any, otherwise the longest-resident process.
834  */
835 void
836 swapout_procs(action)
837 int action;
838 {
839 	struct proc *p;
840 	struct thread *td;
841 	int didswap = 0;
842 
843 retry:
844 	sx_slock(&allproc_lock);
845 	FOREACH_PROC_IN_SYSTEM(p) {
846 		struct vmspace *vm;
847 		int minslptime = 100000;
848 		int slptime;
849 
850 		/*
851 		 * Watch out for a process in
852 		 * creation.  It may have no
853 		 * address space or lock yet.
854 		 */
855 		if (p->p_state == PRS_NEW)
856 			continue;
857 		/*
858 		 * An aio daemon switches its
859 		 * address space while running.
860 		 * Perform a quick check whether
861 		 * a process has P_SYSTEM.
862 		 */
863 		if ((p->p_flag & P_SYSTEM) != 0)
864 			continue;
865 		/*
866 		 * Do not swapout a process that
867 		 * is waiting for VM data
868 		 * structures as there is a possible
869 		 * deadlock.  Test this first as
870 		 * this may block.
871 		 *
872 		 * Lock the map until swapout
873 		 * finishes, or a thread of this
874 		 * process may attempt to alter
875 		 * the map.
876 		 */
877 		vm = vmspace_acquire_ref(p);
878 		if (vm == NULL)
879 			continue;
880 		if (!vm_map_trylock(&vm->vm_map))
881 			goto nextproc1;
882 
883 		PROC_LOCK(p);
884 		if (p->p_lock != 0 ||
885 		    (p->p_flag & (P_STOPPED_SINGLE|P_TRACED|P_SYSTEM|P_WEXIT)
886 		    ) != 0) {
887 			goto nextproc;
888 		}
889 		/*
890 		 * only aiod changes vmspace, however it will be
891 		 * skipped because of the if statement above checking
892 		 * for P_SYSTEM
893 		 */
894 		if ((p->p_flag & (P_INMEM|P_SWAPPINGOUT|P_SWAPPINGIN)) != P_INMEM)
895 			goto nextproc;
896 
897 		switch (p->p_state) {
898 		default:
899 			/* Don't swap out processes in any sort
900 			 * of 'special' state. */
901 			break;
902 
903 		case PRS_NORMAL:
904 			/*
905 			 * do not swapout a realtime process
906 			 * Check all the thread groups..
907 			 */
908 			FOREACH_THREAD_IN_PROC(p, td) {
909 				thread_lock(td);
910 				if (PRI_IS_REALTIME(td->td_pri_class)) {
911 					thread_unlock(td);
912 					goto nextproc;
913 				}
914 				slptime = (ticks - td->td_slptick) / hz;
915 				/*
916 				 * Guarantee swap_idle_threshold1
917 				 * time in memory.
918 				 */
919 				if (slptime < swap_idle_threshold1) {
920 					thread_unlock(td);
921 					goto nextproc;
922 				}
923 
924 				/*
925 				 * Do not swapout a process if it is
926 				 * waiting on a critical event of some
927 				 * kind or there is a thread whose
928 				 * pageable memory may be accessed.
929 				 *
930 				 * This could be refined to support
931 				 * swapping out a thread.
932 				 */
933 				if (!thread_safetoswapout(td)) {
934 					thread_unlock(td);
935 					goto nextproc;
936 				}
937 				/*
938 				 * If the system is under memory stress,
939 				 * or if we are swapping
940 				 * idle processes >= swap_idle_threshold2,
941 				 * then swap the process out.
942 				 */
943 				if (((action & VM_SWAP_NORMAL) == 0) &&
944 				    (((action & VM_SWAP_IDLE) == 0) ||
945 				    (slptime < swap_idle_threshold2))) {
946 					thread_unlock(td);
947 					goto nextproc;
948 				}
949 
950 				if (minslptime > slptime)
951 					minslptime = slptime;
952 				thread_unlock(td);
953 			}
954 
955 			/*
956 			 * If the pageout daemon didn't free enough pages,
957 			 * or if this process is idle and the system is
958 			 * configured to swap proactively, swap it out.
959 			 */
960 			if ((action & VM_SWAP_NORMAL) ||
961 				((action & VM_SWAP_IDLE) &&
962 				 (minslptime > swap_idle_threshold2))) {
963 				if (swapout(p) == 0)
964 					didswap++;
965 				PROC_UNLOCK(p);
966 				vm_map_unlock(&vm->vm_map);
967 				vmspace_free(vm);
968 				sx_sunlock(&allproc_lock);
969 				goto retry;
970 			}
971 		}
972 nextproc:
973 		PROC_UNLOCK(p);
974 		vm_map_unlock(&vm->vm_map);
975 nextproc1:
976 		vmspace_free(vm);
977 		continue;
978 	}
979 	sx_sunlock(&allproc_lock);
980 	/*
981 	 * If we swapped something out, and another process needed memory,
982 	 * then wakeup the sched process.
983 	 */
984 	if (didswap)
985 		wakeup(&proc0);
986 }
987 
988 static void
989 swapclear(p)
990 	struct proc *p;
991 {
992 	struct thread *td;
993 
994 	PROC_LOCK_ASSERT(p, MA_OWNED);
995 
996 	FOREACH_THREAD_IN_PROC(p, td) {
997 		thread_lock(td);
998 		td->td_flags |= TDF_INMEM;
999 		td->td_flags &= ~TDF_SWAPINREQ;
1000 		TD_CLR_SWAPPED(td);
1001 		if (TD_CAN_RUN(td))
1002 			if (setrunnable(td)) {
1003 #ifdef INVARIANTS
1004 				/*
1005 				 * XXX: We just cleared TDI_SWAPPED
1006 				 * above and set TDF_INMEM, so this
1007 				 * should never happen.
1008 				 */
1009 				panic("not waking up swapper");
1010 #endif
1011 			}
1012 		thread_unlock(td);
1013 	}
1014 	p->p_flag &= ~(P_SWAPPINGIN|P_SWAPPINGOUT);
1015 	p->p_flag |= P_INMEM;
1016 }
1017 
1018 static int
1019 swapout(p)
1020 	struct proc *p;
1021 {
1022 	struct thread *td;
1023 
1024 	PROC_LOCK_ASSERT(p, MA_OWNED);
1025 #if defined(SWAP_DEBUG)
1026 	printf("swapping out %d\n", p->p_pid);
1027 #endif
1028 
1029 	/*
1030 	 * The states of this process and its threads may have changed
1031 	 * by now.  Assuming that there is only one pageout daemon thread,
1032 	 * this process should still be in memory.
1033 	 */
1034 	KASSERT((p->p_flag & (P_INMEM|P_SWAPPINGOUT|P_SWAPPINGIN)) == P_INMEM,
1035 		("swapout: lost a swapout race?"));
1036 
1037 	/*
1038 	 * remember the process resident count
1039 	 */
1040 	p->p_vmspace->vm_swrss = vmspace_resident_count(p->p_vmspace);
1041 	/*
1042 	 * Check and mark all threads before we proceed.
1043 	 */
1044 	p->p_flag &= ~P_INMEM;
1045 	p->p_flag |= P_SWAPPINGOUT;
1046 	FOREACH_THREAD_IN_PROC(p, td) {
1047 		thread_lock(td);
1048 		if (!thread_safetoswapout(td)) {
1049 			thread_unlock(td);
1050 			swapclear(p);
1051 			return (EBUSY);
1052 		}
1053 		td->td_flags &= ~TDF_INMEM;
1054 		TD_SET_SWAPPED(td);
1055 		thread_unlock(td);
1056 	}
1057 	td = FIRST_THREAD_IN_PROC(p);
1058 	++td->td_ru.ru_nswap;
1059 	PROC_UNLOCK(p);
1060 
1061 	/*
1062 	 * This list is stable because all threads are now prevented from
1063 	 * running.  The list is only modified in the context of a running
1064 	 * thread in this process.
1065 	 */
1066 	FOREACH_THREAD_IN_PROC(p, td)
1067 		vm_thread_swapout(td);
1068 
1069 	PROC_LOCK(p);
1070 	p->p_flag &= ~P_SWAPPINGOUT;
1071 	p->p_swtick = ticks;
1072 	return (0);
1073 }
1074 #endif /* !NO_SWAPPING */
1075