xref: /freebsd/sys/vm/vm_glue.c (revision f0a75d274af375d15b97b830966b99a02b7db911)
1 /*-
2  * Copyright (c) 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 4. Neither the name of the University nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  *	from: @(#)vm_glue.c	8.6 (Berkeley) 1/5/94
33  *
34  *
35  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
36  * All rights reserved.
37  *
38  * Permission to use, copy, modify and distribute this software and
39  * its documentation is hereby granted, provided that both the copyright
40  * notice and this permission notice appear in all copies of the
41  * software, derivative works or modified versions, and any portions
42  * thereof, and that both notices appear in supporting documentation.
43  *
44  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
45  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
46  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
47  *
48  * Carnegie Mellon requests users of this software to return to
49  *
50  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
51  *  School of Computer Science
52  *  Carnegie Mellon University
53  *  Pittsburgh PA 15213-3890
54  *
55  * any improvements or extensions that they make and grant Carnegie the
56  * rights to redistribute these changes.
57  */
58 
59 #include <sys/cdefs.h>
60 __FBSDID("$FreeBSD$");
61 
62 #include "opt_vm.h"
63 #include "opt_kstack_pages.h"
64 #include "opt_kstack_max_pages.h"
65 
66 #include <sys/param.h>
67 #include <sys/systm.h>
68 #include <sys/limits.h>
69 #include <sys/lock.h>
70 #include <sys/mutex.h>
71 #include <sys/proc.h>
72 #include <sys/resourcevar.h>
73 #include <sys/sched.h>
74 #include <sys/sf_buf.h>
75 #include <sys/shm.h>
76 #include <sys/vmmeter.h>
77 #include <sys/sx.h>
78 #include <sys/sysctl.h>
79 
80 #include <sys/kernel.h>
81 #include <sys/ktr.h>
82 #include <sys/unistd.h>
83 
84 #include <vm/vm.h>
85 #include <vm/vm_param.h>
86 #include <vm/pmap.h>
87 #include <vm/vm_map.h>
88 #include <vm/vm_page.h>
89 #include <vm/vm_pageout.h>
90 #include <vm/vm_object.h>
91 #include <vm/vm_kern.h>
92 #include <vm/vm_extern.h>
93 #include <vm/vm_pager.h>
94 #include <vm/swap_pager.h>
95 
96 extern int maxslp;
97 
98 /*
99  * System initialization
100  *
101  * Note: proc0 from proc.h
102  */
103 static void vm_init_limits(void *);
104 SYSINIT(vm_limits, SI_SUB_VM_CONF, SI_ORDER_FIRST, vm_init_limits, &proc0)
105 
106 /*
107  * THIS MUST BE THE LAST INITIALIZATION ITEM!!!
108  *
109  * Note: run scheduling should be divorced from the vm system.
110  */
111 static void scheduler(void *);
112 SYSINIT(scheduler, SI_SUB_RUN_SCHEDULER, SI_ORDER_ANY, scheduler, NULL)
113 
114 #ifndef NO_SWAPPING
115 static void swapout(struct proc *);
116 #endif
117 
118 
119 static volatile int proc0_rescan;
120 
121 
122 /*
123  * MPSAFE
124  *
125  * WARNING!  This code calls vm_map_check_protection() which only checks
126  * the associated vm_map_entry range.  It does not determine whether the
127  * contents of the memory is actually readable or writable.  In most cases
128  * just checking the vm_map_entry is sufficient within the kernel's address
129  * space.
130  */
131 int
132 kernacc(addr, len, rw)
133 	void *addr;
134 	int len, rw;
135 {
136 	boolean_t rv;
137 	vm_offset_t saddr, eaddr;
138 	vm_prot_t prot;
139 
140 	KASSERT((rw & ~VM_PROT_ALL) == 0,
141 	    ("illegal ``rw'' argument to kernacc (%x)\n", rw));
142 
143 	if ((vm_offset_t)addr + len > kernel_map->max_offset ||
144 	    (vm_offset_t)addr + len < (vm_offset_t)addr)
145 		return (FALSE);
146 
147 	prot = rw;
148 	saddr = trunc_page((vm_offset_t)addr);
149 	eaddr = round_page((vm_offset_t)addr + len);
150 	vm_map_lock_read(kernel_map);
151 	rv = vm_map_check_protection(kernel_map, saddr, eaddr, prot);
152 	vm_map_unlock_read(kernel_map);
153 	return (rv == TRUE);
154 }
155 
156 /*
157  * MPSAFE
158  *
159  * WARNING!  This code calls vm_map_check_protection() which only checks
160  * the associated vm_map_entry range.  It does not determine whether the
161  * contents of the memory is actually readable or writable.  vmapbuf(),
162  * vm_fault_quick(), or copyin()/copout()/su*()/fu*() functions should be
163  * used in conjuction with this call.
164  */
165 int
166 useracc(addr, len, rw)
167 	void *addr;
168 	int len, rw;
169 {
170 	boolean_t rv;
171 	vm_prot_t prot;
172 	vm_map_t map;
173 
174 	KASSERT((rw & ~VM_PROT_ALL) == 0,
175 	    ("illegal ``rw'' argument to useracc (%x)\n", rw));
176 	prot = rw;
177 	map = &curproc->p_vmspace->vm_map;
178 	if ((vm_offset_t)addr + len > vm_map_max(map) ||
179 	    (vm_offset_t)addr + len < (vm_offset_t)addr) {
180 		return (FALSE);
181 	}
182 	vm_map_lock_read(map);
183 	rv = vm_map_check_protection(map, trunc_page((vm_offset_t)addr),
184 	    round_page((vm_offset_t)addr + len), prot);
185 	vm_map_unlock_read(map);
186 	return (rv == TRUE);
187 }
188 
189 int
190 vslock(void *addr, size_t len)
191 {
192 	vm_offset_t end, last, start;
193 	vm_size_t npages;
194 	int error;
195 
196 	last = (vm_offset_t)addr + len;
197 	start = trunc_page((vm_offset_t)addr);
198 	end = round_page(last);
199 	if (last < (vm_offset_t)addr || end < (vm_offset_t)addr)
200 		return (EINVAL);
201 	npages = atop(end - start);
202 	if (npages > vm_page_max_wired)
203 		return (ENOMEM);
204 	PROC_LOCK(curproc);
205 	if (ptoa(npages +
206 	    pmap_wired_count(vm_map_pmap(&curproc->p_vmspace->vm_map))) >
207 	    lim_cur(curproc, RLIMIT_MEMLOCK)) {
208 		PROC_UNLOCK(curproc);
209 		return (ENOMEM);
210 	}
211 	PROC_UNLOCK(curproc);
212 #if 0
213 	/*
214 	 * XXX - not yet
215 	 *
216 	 * The limit for transient usage of wired pages should be
217 	 * larger than for "permanent" wired pages (mlock()).
218 	 *
219 	 * Also, the sysctl code, which is the only present user
220 	 * of vslock(), does a hard loop on EAGAIN.
221 	 */
222 	if (npages + cnt.v_wire_count > vm_page_max_wired)
223 		return (EAGAIN);
224 #endif
225 	error = vm_map_wire(&curproc->p_vmspace->vm_map, start, end,
226 	    VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
227 	/*
228 	 * Return EFAULT on error to match copy{in,out}() behaviour
229 	 * rather than returning ENOMEM like mlock() would.
230 	 */
231 	return (error == KERN_SUCCESS ? 0 : EFAULT);
232 }
233 
234 void
235 vsunlock(void *addr, size_t len)
236 {
237 
238 	/* Rely on the parameter sanity checks performed by vslock(). */
239 	(void)vm_map_unwire(&curproc->p_vmspace->vm_map,
240 	    trunc_page((vm_offset_t)addr), round_page((vm_offset_t)addr + len),
241 	    VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
242 }
243 
244 /*
245  * Pin the page contained within the given object at the given offset.  If the
246  * page is not resident, allocate and load it using the given object's pager.
247  * Return the pinned page if successful; otherwise, return NULL.
248  */
249 static vm_page_t
250 vm_imgact_hold_page(vm_object_t object, vm_ooffset_t offset)
251 {
252 	vm_page_t m, ma[1];
253 	vm_pindex_t pindex;
254 	int rv;
255 
256 	VM_OBJECT_LOCK(object);
257 	pindex = OFF_TO_IDX(offset);
258 	m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
259 	if ((m->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL) {
260 		ma[0] = m;
261 		rv = vm_pager_get_pages(object, ma, 1, 0);
262 		m = vm_page_lookup(object, pindex);
263 		if (m == NULL)
264 			goto out;
265 		if (m->valid == 0 || rv != VM_PAGER_OK) {
266 			vm_page_lock_queues();
267 			vm_page_free(m);
268 			vm_page_unlock_queues();
269 			m = NULL;
270 			goto out;
271 		}
272 	}
273 	vm_page_lock_queues();
274 	vm_page_hold(m);
275 	vm_page_unlock_queues();
276 	vm_page_wakeup(m);
277 out:
278 	VM_OBJECT_UNLOCK(object);
279 	return (m);
280 }
281 
282 /*
283  * Return a CPU private mapping to the page at the given offset within the
284  * given object.  The page is pinned before it is mapped.
285  */
286 struct sf_buf *
287 vm_imgact_map_page(vm_object_t object, vm_ooffset_t offset)
288 {
289 	vm_page_t m;
290 
291 	m = vm_imgact_hold_page(object, offset);
292 	if (m == NULL)
293 		return (NULL);
294 	sched_pin();
295 	return (sf_buf_alloc(m, SFB_CPUPRIVATE));
296 }
297 
298 /*
299  * Destroy the given CPU private mapping and unpin the page that it mapped.
300  */
301 void
302 vm_imgact_unmap_page(struct sf_buf *sf)
303 {
304 	vm_page_t m;
305 
306 	m = sf_buf_page(sf);
307 	sf_buf_free(sf);
308 	sched_unpin();
309 	vm_page_lock_queues();
310 	vm_page_unhold(m);
311 	vm_page_unlock_queues();
312 }
313 
314 #ifndef KSTACK_MAX_PAGES
315 #define KSTACK_MAX_PAGES 32
316 #endif
317 
318 /*
319  * Create the kernel stack (including pcb for i386) for a new thread.
320  * This routine directly affects the fork perf for a process and
321  * create performance for a thread.
322  */
323 void
324 vm_thread_new(struct thread *td, int pages)
325 {
326 	vm_object_t ksobj;
327 	vm_offset_t ks;
328 	vm_page_t m, ma[KSTACK_MAX_PAGES];
329 	int i;
330 
331 	/* Bounds check */
332 	if (pages <= 1)
333 		pages = KSTACK_PAGES;
334 	else if (pages > KSTACK_MAX_PAGES)
335 		pages = KSTACK_MAX_PAGES;
336 	/*
337 	 * Allocate an object for the kstack.
338 	 */
339 	ksobj = vm_object_allocate(OBJT_DEFAULT, pages);
340 	td->td_kstack_obj = ksobj;
341 	/*
342 	 * Get a kernel virtual address for this thread's kstack.
343 	 */
344 	ks = kmem_alloc_nofault(kernel_map,
345 	   (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
346 	if (ks == 0)
347 		panic("vm_thread_new: kstack allocation failed");
348 	if (KSTACK_GUARD_PAGES != 0) {
349 		pmap_qremove(ks, KSTACK_GUARD_PAGES);
350 		ks += KSTACK_GUARD_PAGES * PAGE_SIZE;
351 	}
352 	td->td_kstack = ks;
353 	/*
354 	 * Knowing the number of pages allocated is useful when you
355 	 * want to deallocate them.
356 	 */
357 	td->td_kstack_pages = pages;
358 	/*
359 	 * For the length of the stack, link in a real page of ram for each
360 	 * page of stack.
361 	 */
362 	VM_OBJECT_LOCK(ksobj);
363 	for (i = 0; i < pages; i++) {
364 		/*
365 		 * Get a kernel stack page.
366 		 */
367 		m = vm_page_grab(ksobj, i, VM_ALLOC_NOBUSY |
368 		    VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED);
369 		ma[i] = m;
370 		m->valid = VM_PAGE_BITS_ALL;
371 	}
372 	VM_OBJECT_UNLOCK(ksobj);
373 	pmap_qenter(ks, ma, pages);
374 }
375 
376 /*
377  * Dispose of a thread's kernel stack.
378  */
379 void
380 vm_thread_dispose(struct thread *td)
381 {
382 	vm_object_t ksobj;
383 	vm_offset_t ks;
384 	vm_page_t m;
385 	int i, pages;
386 
387 	pages = td->td_kstack_pages;
388 	ksobj = td->td_kstack_obj;
389 	ks = td->td_kstack;
390 	pmap_qremove(ks, pages);
391 	VM_OBJECT_LOCK(ksobj);
392 	for (i = 0; i < pages; i++) {
393 		m = vm_page_lookup(ksobj, i);
394 		if (m == NULL)
395 			panic("vm_thread_dispose: kstack already missing?");
396 		vm_page_lock_queues();
397 		vm_page_unwire(m, 0);
398 		vm_page_free(m);
399 		vm_page_unlock_queues();
400 	}
401 	VM_OBJECT_UNLOCK(ksobj);
402 	vm_object_deallocate(ksobj);
403 	kmem_free(kernel_map, ks - (KSTACK_GUARD_PAGES * PAGE_SIZE),
404 	    (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
405 }
406 
407 /*
408  * Allow a thread's kernel stack to be paged out.
409  */
410 void
411 vm_thread_swapout(struct thread *td)
412 {
413 	vm_object_t ksobj;
414 	vm_page_t m;
415 	int i, pages;
416 
417 	cpu_thread_swapout(td);
418 	pages = td->td_kstack_pages;
419 	ksobj = td->td_kstack_obj;
420 	pmap_qremove(td->td_kstack, pages);
421 	VM_OBJECT_LOCK(ksobj);
422 	for (i = 0; i < pages; i++) {
423 		m = vm_page_lookup(ksobj, i);
424 		if (m == NULL)
425 			panic("vm_thread_swapout: kstack already missing?");
426 		vm_page_lock_queues();
427 		vm_page_dirty(m);
428 		vm_page_unwire(m, 0);
429 		vm_page_unlock_queues();
430 	}
431 	VM_OBJECT_UNLOCK(ksobj);
432 }
433 
434 /*
435  * Bring the kernel stack for a specified thread back in.
436  */
437 void
438 vm_thread_swapin(struct thread *td)
439 {
440 	vm_object_t ksobj;
441 	vm_page_t m, ma[KSTACK_MAX_PAGES];
442 	int i, pages, rv;
443 
444 	pages = td->td_kstack_pages;
445 	ksobj = td->td_kstack_obj;
446 	VM_OBJECT_LOCK(ksobj);
447 	for (i = 0; i < pages; i++) {
448 		m = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
449 		if (m->valid != VM_PAGE_BITS_ALL) {
450 			rv = vm_pager_get_pages(ksobj, &m, 1, 0);
451 			if (rv != VM_PAGER_OK)
452 				panic("vm_thread_swapin: cannot get kstack for proc: %d", td->td_proc->p_pid);
453 			m = vm_page_lookup(ksobj, i);
454 			m->valid = VM_PAGE_BITS_ALL;
455 		}
456 		ma[i] = m;
457 		vm_page_lock_queues();
458 		vm_page_wire(m);
459 		vm_page_unlock_queues();
460 		vm_page_wakeup(m);
461 	}
462 	VM_OBJECT_UNLOCK(ksobj);
463 	pmap_qenter(td->td_kstack, ma, pages);
464 	cpu_thread_swapin(td);
465 }
466 
467 /*
468  * Set up a variable-sized alternate kstack.
469  */
470 void
471 vm_thread_new_altkstack(struct thread *td, int pages)
472 {
473 
474 	td->td_altkstack = td->td_kstack;
475 	td->td_altkstack_obj = td->td_kstack_obj;
476 	td->td_altkstack_pages = td->td_kstack_pages;
477 
478 	vm_thread_new(td, pages);
479 }
480 
481 /*
482  * Restore the original kstack.
483  */
484 void
485 vm_thread_dispose_altkstack(struct thread *td)
486 {
487 
488 	vm_thread_dispose(td);
489 
490 	td->td_kstack = td->td_altkstack;
491 	td->td_kstack_obj = td->td_altkstack_obj;
492 	td->td_kstack_pages = td->td_altkstack_pages;
493 	td->td_altkstack = 0;
494 	td->td_altkstack_obj = NULL;
495 	td->td_altkstack_pages = 0;
496 }
497 
498 /*
499  * Implement fork's actions on an address space.
500  * Here we arrange for the address space to be copied or referenced,
501  * allocate a user struct (pcb and kernel stack), then call the
502  * machine-dependent layer to fill those in and make the new process
503  * ready to run.  The new process is set up so that it returns directly
504  * to user mode to avoid stack copying and relocation problems.
505  */
506 void
507 vm_forkproc(td, p2, td2, flags)
508 	struct thread *td;
509 	struct proc *p2;
510 	struct thread *td2;
511 	int flags;
512 {
513 	struct proc *p1 = td->td_proc;
514 
515 	if ((flags & RFPROC) == 0) {
516 		/*
517 		 * Divorce the memory, if it is shared, essentially
518 		 * this changes shared memory amongst threads, into
519 		 * COW locally.
520 		 */
521 		if ((flags & RFMEM) == 0) {
522 			if (p1->p_vmspace->vm_refcnt > 1) {
523 				vmspace_unshare(p1);
524 			}
525 		}
526 		cpu_fork(td, p2, td2, flags);
527 		return;
528 	}
529 
530 	if (flags & RFMEM) {
531 		p2->p_vmspace = p1->p_vmspace;
532 		atomic_add_int(&p1->p_vmspace->vm_refcnt, 1);
533 	}
534 
535 	while (vm_page_count_severe()) {
536 		VM_WAIT;
537 	}
538 
539 	if ((flags & RFMEM) == 0) {
540 		p2->p_vmspace = vmspace_fork(p1->p_vmspace);
541 		if (p1->p_vmspace->vm_shm)
542 			shmfork(p1, p2);
543 	}
544 
545 	/*
546 	 * cpu_fork will copy and update the pcb, set up the kernel stack,
547 	 * and make the child ready to run.
548 	 */
549 	cpu_fork(td, p2, td2, flags);
550 }
551 
552 /*
553  * Called after process has been wait(2)'ed apon and is being reaped.
554  * The idea is to reclaim resources that we could not reclaim while
555  * the process was still executing.
556  */
557 void
558 vm_waitproc(p)
559 	struct proc *p;
560 {
561 
562 	vmspace_exitfree(p);		/* and clean-out the vmspace */
563 }
564 
565 /*
566  * Set default limits for VM system.
567  * Called for proc 0, and then inherited by all others.
568  *
569  * XXX should probably act directly on proc0.
570  */
571 static void
572 vm_init_limits(udata)
573 	void *udata;
574 {
575 	struct proc *p = udata;
576 	struct plimit *limp;
577 	int rss_limit;
578 
579 	/*
580 	 * Set up the initial limits on process VM. Set the maximum resident
581 	 * set size to be half of (reasonably) available memory.  Since this
582 	 * is a soft limit, it comes into effect only when the system is out
583 	 * of memory - half of main memory helps to favor smaller processes,
584 	 * and reduces thrashing of the object cache.
585 	 */
586 	limp = p->p_limit;
587 	limp->pl_rlimit[RLIMIT_STACK].rlim_cur = dflssiz;
588 	limp->pl_rlimit[RLIMIT_STACK].rlim_max = maxssiz;
589 	limp->pl_rlimit[RLIMIT_DATA].rlim_cur = dfldsiz;
590 	limp->pl_rlimit[RLIMIT_DATA].rlim_max = maxdsiz;
591 	/* limit the limit to no less than 2MB */
592 	rss_limit = max(cnt.v_free_count, 512);
593 	limp->pl_rlimit[RLIMIT_RSS].rlim_cur = ptoa(rss_limit);
594 	limp->pl_rlimit[RLIMIT_RSS].rlim_max = RLIM_INFINITY;
595 }
596 
597 void
598 faultin(p)
599 	struct proc *p;
600 {
601 #ifdef NO_SWAPPING
602 
603 	PROC_LOCK_ASSERT(p, MA_OWNED);
604 	if ((p->p_sflag & PS_INMEM) == 0)
605 		panic("faultin: proc swapped out with NO_SWAPPING!");
606 #else /* !NO_SWAPPING */
607 	struct thread *td;
608 
609 	PROC_LOCK_ASSERT(p, MA_OWNED);
610 	/*
611 	 * If another process is swapping in this process,
612 	 * just wait until it finishes.
613 	 */
614 	if (p->p_sflag & PS_SWAPPINGIN)
615 		msleep(&p->p_sflag, &p->p_mtx, PVM, "faultin", 0);
616 	else if ((p->p_sflag & PS_INMEM) == 0) {
617 		/*
618 		 * Don't let another thread swap process p out while we are
619 		 * busy swapping it in.
620 		 */
621 		++p->p_lock;
622 		mtx_lock_spin(&sched_lock);
623 		p->p_sflag |= PS_SWAPPINGIN;
624 		mtx_unlock_spin(&sched_lock);
625 		PROC_UNLOCK(p);
626 
627 		FOREACH_THREAD_IN_PROC(p, td)
628 			vm_thread_swapin(td);
629 
630 		PROC_LOCK(p);
631 		mtx_lock_spin(&sched_lock);
632 		p->p_sflag &= ~PS_SWAPPINGIN;
633 		p->p_sflag |= PS_INMEM;
634 		FOREACH_THREAD_IN_PROC(p, td) {
635 			TD_CLR_SWAPPED(td);
636 			if (TD_CAN_RUN(td))
637 				setrunnable(td);
638 		}
639 		mtx_unlock_spin(&sched_lock);
640 
641 		wakeup(&p->p_sflag);
642 
643 		/* Allow other threads to swap p out now. */
644 		--p->p_lock;
645 	}
646 #endif /* NO_SWAPPING */
647 }
648 
649 /*
650  * This swapin algorithm attempts to swap-in processes only if there
651  * is enough space for them.  Of course, if a process waits for a long
652  * time, it will be swapped in anyway.
653  *
654  *  XXXKSE - process with the thread with highest priority counts..
655  *
656  * Giant is held on entry.
657  */
658 /* ARGSUSED*/
659 static void
660 scheduler(dummy)
661 	void *dummy;
662 {
663 	struct proc *p;
664 	struct thread *td;
665 	int pri;
666 	struct proc *pp;
667 	int ppri;
668 
669 	mtx_assert(&Giant, MA_OWNED | MA_NOTRECURSED);
670 	mtx_unlock(&Giant);
671 
672 loop:
673 	if (vm_page_count_min()) {
674 		VM_WAIT;
675 		mtx_lock_spin(&sched_lock);
676 		proc0_rescan = 0;
677 		mtx_unlock_spin(&sched_lock);
678 		goto loop;
679 	}
680 
681 	pp = NULL;
682 	ppri = INT_MIN;
683 	sx_slock(&allproc_lock);
684 	FOREACH_PROC_IN_SYSTEM(p) {
685 		if (p->p_sflag & (PS_INMEM | PS_SWAPPINGOUT | PS_SWAPPINGIN)) {
686 			continue;
687 		}
688 		mtx_lock_spin(&sched_lock);
689 		FOREACH_THREAD_IN_PROC(p, td) {
690 			/*
691 			 * An otherwise runnable thread of a process
692 			 * swapped out has only the TDI_SWAPPED bit set.
693 			 *
694 			 */
695 			if (td->td_inhibitors == TDI_SWAPPED) {
696 				pri = p->p_swtime + td->td_slptime;
697 				if ((p->p_sflag & PS_SWAPINREQ) == 0) {
698 					pri -= p->p_nice * 8;
699 				}
700 
701 				/*
702 				 * if this thread is higher priority
703 				 * and there is enough space, then select
704 				 * this process instead of the previous
705 				 * selection.
706 				 */
707 				if (pri > ppri) {
708 					pp = p;
709 					ppri = pri;
710 				}
711 			}
712 		}
713 		mtx_unlock_spin(&sched_lock);
714 	}
715 	sx_sunlock(&allproc_lock);
716 
717 	/*
718 	 * Nothing to do, back to sleep.
719 	 */
720 	if ((p = pp) == NULL) {
721 		mtx_lock_spin(&sched_lock);
722 		if (!proc0_rescan) {
723 			TD_SET_IWAIT(&thread0);
724 			mi_switch(SW_VOL, NULL);
725 		}
726 		proc0_rescan = 0;
727 		mtx_unlock_spin(&sched_lock);
728 		goto loop;
729 	}
730 	PROC_LOCK(p);
731 
732 	/*
733 	 * Another process may be bringing or may have already
734 	 * brought this process in while we traverse all threads.
735 	 * Or, this process may even be being swapped out again.
736 	 */
737 	if (p->p_sflag & (PS_INMEM | PS_SWAPPINGOUT | PS_SWAPPINGIN)) {
738 		PROC_UNLOCK(p);
739 		mtx_lock_spin(&sched_lock);
740 		proc0_rescan = 0;
741 		mtx_unlock_spin(&sched_lock);
742 		goto loop;
743 	}
744 
745 	mtx_lock_spin(&sched_lock);
746 	p->p_sflag &= ~PS_SWAPINREQ;
747 	mtx_unlock_spin(&sched_lock);
748 
749 	/*
750 	 * We would like to bring someone in. (only if there is space).
751 	 * [What checks the space? ]
752 	 */
753 	faultin(p);
754 	PROC_UNLOCK(p);
755 	mtx_lock_spin(&sched_lock);
756 	p->p_swtime = 0;
757 	proc0_rescan = 0;
758 	mtx_unlock_spin(&sched_lock);
759 	goto loop;
760 }
761 
762 void kick_proc0(void)
763 {
764 	struct thread *td = &thread0;
765 
766 
767 	if (TD_AWAITING_INTR(td)) {
768 		CTR2(KTR_INTR, "%s: sched_add %d", __func__, 0);
769 		TD_CLR_IWAIT(td);
770 		sched_add(td, SRQ_INTR);
771 	} else {
772 		proc0_rescan = 1;
773 		CTR2(KTR_INTR, "%s: state %d",
774 		    __func__, td->td_state);
775 	}
776 
777 }
778 
779 
780 #ifndef NO_SWAPPING
781 
782 /*
783  * Swap_idle_threshold1 is the guaranteed swapped in time for a process
784  */
785 static int swap_idle_threshold1 = 2;
786 SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold1, CTLFLAG_RW,
787     &swap_idle_threshold1, 0, "Guaranteed swapped in time for a process");
788 
789 /*
790  * Swap_idle_threshold2 is the time that a process can be idle before
791  * it will be swapped out, if idle swapping is enabled.
792  */
793 static int swap_idle_threshold2 = 10;
794 SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold2, CTLFLAG_RW,
795     &swap_idle_threshold2, 0, "Time before a process will be swapped out");
796 
797 /*
798  * Swapout is driven by the pageout daemon.  Very simple, we find eligible
799  * procs and unwire their u-areas.  We try to always "swap" at least one
800  * process in case we need the room for a swapin.
801  * If any procs have been sleeping/stopped for at least maxslp seconds,
802  * they are swapped.  Else, we swap the longest-sleeping or stopped process,
803  * if any, otherwise the longest-resident process.
804  */
805 void
806 swapout_procs(action)
807 int action;
808 {
809 	struct proc *p;
810 	struct thread *td;
811 	int didswap = 0;
812 
813 retry:
814 	sx_slock(&allproc_lock);
815 	FOREACH_PROC_IN_SYSTEM(p) {
816 		struct vmspace *vm;
817 		int minslptime = 100000;
818 
819 		/*
820 		 * Watch out for a process in
821 		 * creation.  It may have no
822 		 * address space or lock yet.
823 		 */
824 		mtx_lock_spin(&sched_lock);
825 		if (p->p_state == PRS_NEW) {
826 			mtx_unlock_spin(&sched_lock);
827 			continue;
828 		}
829 		mtx_unlock_spin(&sched_lock);
830 
831 		/*
832 		 * An aio daemon switches its
833 		 * address space while running.
834 		 * Perform a quick check whether
835 		 * a process has P_SYSTEM.
836 		 */
837 		if ((p->p_flag & P_SYSTEM) != 0)
838 			continue;
839 
840 		/*
841 		 * Do not swapout a process that
842 		 * is waiting for VM data
843 		 * structures as there is a possible
844 		 * deadlock.  Test this first as
845 		 * this may block.
846 		 *
847 		 * Lock the map until swapout
848 		 * finishes, or a thread of this
849 		 * process may attempt to alter
850 		 * the map.
851 		 */
852 		vm = vmspace_acquire_ref(p);
853 		if (vm == NULL)
854 			continue;
855 		if (!vm_map_trylock(&vm->vm_map))
856 			goto nextproc1;
857 
858 		PROC_LOCK(p);
859 		if (p->p_lock != 0 ||
860 		    (p->p_flag & (P_STOPPED_SINGLE|P_TRACED|P_SYSTEM|P_WEXIT)
861 		    ) != 0) {
862 			goto nextproc2;
863 		}
864 		/*
865 		 * only aiod changes vmspace, however it will be
866 		 * skipped because of the if statement above checking
867 		 * for P_SYSTEM
868 		 */
869 		if ((p->p_sflag & (PS_INMEM|PS_SWAPPINGOUT|PS_SWAPPINGIN)) != PS_INMEM)
870 			goto nextproc2;
871 
872 		switch (p->p_state) {
873 		default:
874 			/* Don't swap out processes in any sort
875 			 * of 'special' state. */
876 			break;
877 
878 		case PRS_NORMAL:
879 			mtx_lock_spin(&sched_lock);
880 			/*
881 			 * do not swapout a realtime process
882 			 * Check all the thread groups..
883 			 */
884 			FOREACH_THREAD_IN_PROC(p, td) {
885 				if (PRI_IS_REALTIME(td->td_pri_class))
886 					goto nextproc;
887 
888 				/*
889 				 * Guarantee swap_idle_threshold1
890 				 * time in memory.
891 				 */
892 				if (td->td_slptime < swap_idle_threshold1)
893 					goto nextproc;
894 
895 				/*
896 				 * Do not swapout a process if it is
897 				 * waiting on a critical event of some
898 				 * kind or there is a thread whose
899 				 * pageable memory may be accessed.
900 				 *
901 				 * This could be refined to support
902 				 * swapping out a thread.
903 				 */
904 				if ((td->td_priority) < PSOCK ||
905 				    !thread_safetoswapout(td))
906 					goto nextproc;
907 				/*
908 				 * If the system is under memory stress,
909 				 * or if we are swapping
910 				 * idle processes >= swap_idle_threshold2,
911 				 * then swap the process out.
912 				 */
913 				if (((action & VM_SWAP_NORMAL) == 0) &&
914 				    (((action & VM_SWAP_IDLE) == 0) ||
915 				    (td->td_slptime < swap_idle_threshold2)))
916 					goto nextproc;
917 
918 				if (minslptime > td->td_slptime)
919 					minslptime = td->td_slptime;
920 			}
921 
922 			/*
923 			 * If the pageout daemon didn't free enough pages,
924 			 * or if this process is idle and the system is
925 			 * configured to swap proactively, swap it out.
926 			 */
927 			if ((action & VM_SWAP_NORMAL) ||
928 				((action & VM_SWAP_IDLE) &&
929 				 (minslptime > swap_idle_threshold2))) {
930 				swapout(p);
931 				didswap++;
932 				mtx_unlock_spin(&sched_lock);
933 				PROC_UNLOCK(p);
934 				vm_map_unlock(&vm->vm_map);
935 				vmspace_free(vm);
936 				sx_sunlock(&allproc_lock);
937 				goto retry;
938 			}
939 nextproc:
940 			mtx_unlock_spin(&sched_lock);
941 		}
942 nextproc2:
943 		PROC_UNLOCK(p);
944 		vm_map_unlock(&vm->vm_map);
945 nextproc1:
946 		vmspace_free(vm);
947 		continue;
948 	}
949 	sx_sunlock(&allproc_lock);
950 	/*
951 	 * If we swapped something out, and another process needed memory,
952 	 * then wakeup the sched process.
953 	 */
954 	if (didswap)
955 		wakeup(&proc0);
956 }
957 
958 static void
959 swapout(p)
960 	struct proc *p;
961 {
962 	struct thread *td;
963 
964 	PROC_LOCK_ASSERT(p, MA_OWNED);
965 	mtx_assert(&sched_lock, MA_OWNED | MA_NOTRECURSED);
966 #if defined(SWAP_DEBUG)
967 	printf("swapping out %d\n", p->p_pid);
968 #endif
969 
970 	/*
971 	 * The states of this process and its threads may have changed
972 	 * by now.  Assuming that there is only one pageout daemon thread,
973 	 * this process should still be in memory.
974 	 */
975 	KASSERT((p->p_sflag & (PS_INMEM|PS_SWAPPINGOUT|PS_SWAPPINGIN)) == PS_INMEM,
976 		("swapout: lost a swapout race?"));
977 
978 #if defined(INVARIANTS)
979 	/*
980 	 * Make sure that all threads are safe to be swapped out.
981 	 *
982 	 * Alternatively, we could swap out only safe threads.
983 	 */
984 	FOREACH_THREAD_IN_PROC(p, td) {
985 		KASSERT(thread_safetoswapout(td),
986 			("swapout: there is a thread not safe for swapout"));
987 	}
988 #endif /* INVARIANTS */
989 
990 	++p->p_stats->p_ru.ru_nswap;
991 	/*
992 	 * remember the process resident count
993 	 */
994 	p->p_vmspace->vm_swrss = vmspace_resident_count(p->p_vmspace);
995 
996 	p->p_sflag &= ~PS_INMEM;
997 	p->p_sflag |= PS_SWAPPINGOUT;
998 	PROC_UNLOCK(p);
999 	FOREACH_THREAD_IN_PROC(p, td)
1000 		TD_SET_SWAPPED(td);
1001 	mtx_unlock_spin(&sched_lock);
1002 
1003 	FOREACH_THREAD_IN_PROC(p, td)
1004 		vm_thread_swapout(td);
1005 
1006 	PROC_LOCK(p);
1007 	mtx_lock_spin(&sched_lock);
1008 	p->p_sflag &= ~PS_SWAPPINGOUT;
1009 	p->p_swtime = 0;
1010 }
1011 #endif /* !NO_SWAPPING */
1012