xref: /freebsd/sys/vm/vm_glue.c (revision 99e8005137088aafb1350e23b113d69b01b0820f)
1 /*
2  * Copyright (c) 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *	This product includes software developed by the University of
19  *	California, Berkeley and its contributors.
20  * 4. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *	from: @(#)vm_glue.c	8.6 (Berkeley) 1/5/94
37  *
38  *
39  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40  * All rights reserved.
41  *
42  * Permission to use, copy, modify and distribute this software and
43  * its documentation is hereby granted, provided that both the copyright
44  * notice and this permission notice appear in all copies of the
45  * software, derivative works or modified versions, and any portions
46  * thereof, and that both notices appear in supporting documentation.
47  *
48  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
49  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
50  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
51  *
52  * Carnegie Mellon requests users of this software to return to
53  *
54  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
55  *  School of Computer Science
56  *  Carnegie Mellon University
57  *  Pittsburgh PA 15213-3890
58  *
59  * any improvements or extensions that they make and grant Carnegie the
60  * rights to redistribute these changes.
61  *
62  * $FreeBSD$
63  */
64 
65 #include "opt_rlimit.h"
66 #include "opt_vm.h"
67 
68 #include <sys/param.h>
69 #include <sys/systm.h>
70 #include <sys/lock.h>
71 #include <sys/mutex.h>
72 #include <sys/proc.h>
73 #include <sys/resourcevar.h>
74 #include <sys/shm.h>
75 #include <sys/vmmeter.h>
76 #include <sys/sx.h>
77 #include <sys/sysctl.h>
78 
79 #include <sys/kernel.h>
80 #include <sys/ktr.h>
81 #include <sys/unistd.h>
82 
83 #include <machine/limits.h>
84 
85 #include <vm/vm.h>
86 #include <vm/vm_param.h>
87 #include <vm/pmap.h>
88 #include <vm/vm_map.h>
89 #include <vm/vm_page.h>
90 #include <vm/vm_pageout.h>
91 #include <vm/vm_kern.h>
92 #include <vm/vm_extern.h>
93 
94 #include <sys/user.h>
95 
96 extern int maxslp;
97 
98 /*
99  * System initialization
100  *
101  * Note: proc0 from proc.h
102  */
103 
104 static void vm_init_limits __P((void *));
105 SYSINIT(vm_limits, SI_SUB_VM_CONF, SI_ORDER_FIRST, vm_init_limits, &proc0)
106 
107 /*
108  * THIS MUST BE THE LAST INITIALIZATION ITEM!!!
109  *
110  * Note: run scheduling should be divorced from the vm system.
111  */
112 static void scheduler __P((void *));
113 SYSINIT(scheduler, SI_SUB_RUN_SCHEDULER, SI_ORDER_FIRST, scheduler, NULL)
114 
115 
116 static void swapout __P((struct proc *));
117 
118 int
119 kernacc(addr, len, rw)
120 	caddr_t addr;
121 	int len, rw;
122 {
123 	boolean_t rv;
124 	vm_offset_t saddr, eaddr;
125 	vm_prot_t prot;
126 
127 	KASSERT((rw & (~VM_PROT_ALL)) == 0,
128 	    ("illegal ``rw'' argument to kernacc (%x)\n", rw));
129 	prot = rw;
130 	saddr = trunc_page((vm_offset_t)addr);
131 	eaddr = round_page((vm_offset_t)addr + len);
132 	vm_map_lock_read(kernel_map);
133 	rv = vm_map_check_protection(kernel_map, saddr, eaddr, prot);
134 	vm_map_unlock_read(kernel_map);
135 	return (rv == TRUE);
136 }
137 
138 int
139 useracc(addr, len, rw)
140 	caddr_t addr;
141 	int len, rw;
142 {
143 	boolean_t rv;
144 	vm_prot_t prot;
145 	vm_map_t map;
146 	vm_map_entry_t save_hint;
147 
148 	KASSERT((rw & (~VM_PROT_ALL)) == 0,
149 	    ("illegal ``rw'' argument to useracc (%x)\n", rw));
150 	prot = rw;
151 	/*
152 	 * XXX - check separately to disallow access to user area and user
153 	 * page tables - they are in the map.
154 	 *
155 	 * XXX - VM_MAXUSER_ADDRESS is an end address, not a max.  It was once
156 	 * only used (as an end address) in trap.c.  Use it as an end address
157 	 * here too.  This bogusness has spread.  I just fixed where it was
158 	 * used as a max in vm_mmap.c.
159 	 */
160 	if ((vm_offset_t) addr + len > /* XXX */ VM_MAXUSER_ADDRESS
161 	    || (vm_offset_t) addr + len < (vm_offset_t) addr) {
162 		return (FALSE);
163 	}
164 	mtx_lock(&vm_mtx);
165 	map = &curproc->p_vmspace->vm_map;
166 	vm_map_lock_read(map);
167 	/*
168 	 * We save the map hint, and restore it.  Useracc appears to distort
169 	 * the map hint unnecessarily.
170 	 */
171 	save_hint = map->hint;
172 	rv = vm_map_check_protection(map,
173 	    trunc_page((vm_offset_t)addr), round_page((vm_offset_t)addr + len), prot);
174 	map->hint = save_hint;
175 	vm_map_unlock_read(map);
176 	mtx_unlock(&vm_mtx);
177 
178 	return (rv == TRUE);
179 }
180 
181 void
182 vslock(addr, len)
183 	caddr_t addr;
184 	u_int len;
185 {
186 
187 	mtx_lock(&vm_mtx);
188 	vm_map_pageable(&curproc->p_vmspace->vm_map,
189 	    trunc_page((vm_offset_t)addr),
190 	    round_page((vm_offset_t)addr + len), FALSE);
191 	mtx_unlock(&vm_mtx);
192 }
193 
194 void
195 vsunlock(addr, len)
196 	caddr_t addr;
197 	u_int len;
198 {
199 
200 	mtx_lock(&vm_mtx);
201 	vm_map_pageable(&curproc->p_vmspace->vm_map,
202 	    trunc_page((vm_offset_t)addr),
203 	    round_page((vm_offset_t)addr + len), TRUE);
204 	mtx_unlock(&vm_mtx);
205 }
206 
207 /*
208  * Implement fork's actions on an address space.
209  * Here we arrange for the address space to be copied or referenced,
210  * allocate a user struct (pcb and kernel stack), then call the
211  * machine-dependent layer to fill those in and make the new process
212  * ready to run.  The new process is set up so that it returns directly
213  * to user mode to avoid stack copying and relocation problems.
214  *
215  * Called without vm_mtx.
216  */
217 void
218 vm_fork(p1, p2, flags)
219 	register struct proc *p1, *p2;
220 	int flags;
221 {
222 	register struct user *up;
223 
224 	mtx_lock(&vm_mtx);
225 	if ((flags & RFPROC) == 0) {
226 		/*
227 		 * Divorce the memory, if it is shared, essentially
228 		 * this changes shared memory amongst threads, into
229 		 * COW locally.
230 		 */
231 		if ((flags & RFMEM) == 0) {
232 			if (p1->p_vmspace->vm_refcnt > 1) {
233 				vmspace_unshare(p1);
234 			}
235 		}
236 		cpu_fork(p1, p2, flags);
237 		mtx_unlock(&vm_mtx);
238 		return;
239 	}
240 
241 	if (flags & RFMEM) {
242 		p2->p_vmspace = p1->p_vmspace;
243 		p1->p_vmspace->vm_refcnt++;
244 	}
245 
246 	while (vm_page_count_severe()) {
247 		VM_WAIT;
248 	}
249 
250 	if ((flags & RFMEM) == 0) {
251 		p2->p_vmspace = vmspace_fork(p1->p_vmspace);
252 
253 		pmap_pinit2(vmspace_pmap(p2->p_vmspace));
254 
255 		if (p1->p_vmspace->vm_shm)
256 			shmfork(p1, p2);
257 	}
258 
259 	pmap_new_proc(p2);
260 
261 	up = p2->p_addr;
262 
263 	/*
264 	 * p_stats currently points at fields in the user struct
265 	 * but not at &u, instead at p_addr. Copy parts of
266 	 * p_stats; zero the rest of p_stats (statistics).
267 	 *
268 	 * If procsig->ps_refcnt is 1 and p2->p_sigacts is NULL we dont' need
269 	 * to share sigacts, so we use the up->u_sigacts.
270 	 */
271 	p2->p_stats = &up->u_stats;
272 	if (p2->p_sigacts == NULL) {
273 		if (p2->p_procsig->ps_refcnt != 1)
274 			printf ("PID:%d NULL sigacts with refcnt not 1!\n",p2->p_pid);
275 		p2->p_sigacts = &up->u_sigacts;
276 		up->u_sigacts = *p1->p_sigacts;
277 	}
278 
279 	bzero(&up->u_stats.pstat_startzero,
280 	    (unsigned) ((caddr_t) &up->u_stats.pstat_endzero -
281 		(caddr_t) &up->u_stats.pstat_startzero));
282 	bcopy(&p1->p_stats->pstat_startcopy, &up->u_stats.pstat_startcopy,
283 	    ((caddr_t) &up->u_stats.pstat_endcopy -
284 		(caddr_t) &up->u_stats.pstat_startcopy));
285 
286 
287 	/*
288 	 * cpu_fork will copy and update the pcb, set up the kernel stack,
289 	 * and make the child ready to run.
290 	 */
291 	cpu_fork(p1, p2, flags);
292 	mtx_unlock(&vm_mtx);
293 }
294 
295 /*
296  * Set default limits for VM system.
297  * Called for proc 0, and then inherited by all others.
298  *
299  * XXX should probably act directly on proc0.
300  */
301 static void
302 vm_init_limits(udata)
303 	void *udata;
304 {
305 	register struct proc *p = udata;
306 	int rss_limit;
307 
308 	/*
309 	 * Set up the initial limits on process VM. Set the maximum resident
310 	 * set size to be half of (reasonably) available memory.  Since this
311 	 * is a soft limit, it comes into effect only when the system is out
312 	 * of memory - half of main memory helps to favor smaller processes,
313 	 * and reduces thrashing of the object cache.
314 	 */
315 	p->p_rlimit[RLIMIT_STACK].rlim_cur = DFLSSIZ;
316 	p->p_rlimit[RLIMIT_STACK].rlim_max = MAXSSIZ;
317 	p->p_rlimit[RLIMIT_DATA].rlim_cur = DFLDSIZ;
318 	p->p_rlimit[RLIMIT_DATA].rlim_max = MAXDSIZ;
319 	/* limit the limit to no less than 2MB */
320 	rss_limit = max(cnt.v_free_count, 512);
321 	p->p_rlimit[RLIMIT_RSS].rlim_cur = ptoa(rss_limit);
322 	p->p_rlimit[RLIMIT_RSS].rlim_max = RLIM_INFINITY;
323 }
324 
325 /*
326  * Must be called with the proc struc mutex held.
327  */
328 void
329 faultin(p)
330 	struct proc *p;
331 {
332 
333 	PROC_LOCK_ASSERT(p, MA_OWNED);
334 	mtx_lock_spin(&sched_lock);
335 	if ((p->p_sflag & PS_INMEM) == 0) {
336 
337 		++p->p_lock;
338 		mtx_unlock_spin(&sched_lock);
339 		PROC_UNLOCK(p);
340 
341 		mtx_lock(&vm_mtx);
342 		pmap_swapin_proc(p);
343 		mtx_unlock(&vm_mtx);
344 
345 		PROC_LOCK(p);
346 		mtx_lock_spin(&sched_lock);
347 		if (p->p_stat == SRUN) {
348 			setrunqueue(p);
349 		}
350 
351 		p->p_sflag |= PS_INMEM;
352 
353 		/* undo the effect of setting SLOCK above */
354 		--p->p_lock;
355 	}
356 	mtx_unlock_spin(&sched_lock);
357 }
358 
359 /*
360  * This swapin algorithm attempts to swap-in processes only if there
361  * is enough space for them.  Of course, if a process waits for a long
362  * time, it will be swapped in anyway.
363  *
364  * Giant is still held at this point, to be released in tsleep.
365  */
366 /* ARGSUSED*/
367 static void
368 scheduler(dummy)
369 	void *dummy;
370 {
371 	register struct proc *p;
372 	register int pri;
373 	struct proc *pp;
374 	int ppri;
375 
376 	mtx_assert(&Giant, MA_OWNED | MA_NOTRECURSED);
377 	mtx_unlock(&Giant);
378 
379 loop:
380 	mtx_lock(&vm_mtx);
381 	if (vm_page_count_min()) {
382 		VM_WAIT;
383 		mtx_unlock(&vm_mtx);
384 		goto loop;
385 	}
386 	mtx_unlock(&vm_mtx);
387 
388 	pp = NULL;
389 	ppri = INT_MIN;
390 	sx_slock(&allproc_lock);
391 	LIST_FOREACH(p, &allproc, p_list) {
392 		mtx_lock_spin(&sched_lock);
393 		if (p->p_stat == SRUN &&
394 			(p->p_sflag & (PS_INMEM | PS_SWAPPING)) == 0) {
395 
396 			pri = p->p_swtime + p->p_slptime;
397 			if ((p->p_sflag & PS_SWAPINREQ) == 0) {
398 				pri -= p->p_nice * 8;
399 			}
400 
401 			/*
402 			 * if this process is higher priority and there is
403 			 * enough space, then select this process instead of
404 			 * the previous selection.
405 			 */
406 			if (pri > ppri) {
407 				pp = p;
408 				ppri = pri;
409 			}
410 		}
411 		mtx_unlock_spin(&sched_lock);
412 	}
413 	sx_sunlock(&allproc_lock);
414 
415 	/*
416 	 * Nothing to do, back to sleep.
417 	 */
418 	if ((p = pp) == NULL) {
419 		tsleep(&proc0, PVM, "sched", maxslp * hz / 2);
420 		goto loop;
421 	}
422 	mtx_lock_spin(&sched_lock);
423 	p->p_sflag &= ~PS_SWAPINREQ;
424 	mtx_unlock_spin(&sched_lock);
425 
426 	/*
427 	 * We would like to bring someone in. (only if there is space).
428 	 */
429 	mtx_lock(&Giant);
430 	PROC_LOCK(p);
431 	faultin(p);
432 	PROC_UNLOCK(p);
433 	mtx_unlock(&Giant);
434 	mtx_lock_spin(&sched_lock);
435 	p->p_swtime = 0;
436 	mtx_unlock_spin(&sched_lock);
437 	goto loop;
438 }
439 
440 #ifndef NO_SWAPPING
441 
442 /*
443  * Swap_idle_threshold1 is the guaranteed swapped in time for a process
444  */
445 static int swap_idle_threshold1 = 2;
446 SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold1,
447 	CTLFLAG_RW, &swap_idle_threshold1, 0, "");
448 
449 /*
450  * Swap_idle_threshold2 is the time that a process can be idle before
451  * it will be swapped out, if idle swapping is enabled.
452  */
453 static int swap_idle_threshold2 = 10;
454 SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold2,
455 	CTLFLAG_RW, &swap_idle_threshold2, 0, "");
456 
457 /*
458  * Swapout is driven by the pageout daemon.  Very simple, we find eligible
459  * procs and unwire their u-areas.  We try to always "swap" at least one
460  * process in case we need the room for a swapin.
461  * If any procs have been sleeping/stopped for at least maxslp seconds,
462  * they are swapped.  Else, we swap the longest-sleeping or stopped process,
463  * if any, otherwise the longest-resident process.
464  *
465  * Can block
466  * must be called with vm_mtx
467  */
468 void
469 swapout_procs(action)
470 int action;
471 {
472 	register struct proc *p;
473 	struct proc *outp, *outp2;
474 	int outpri, outpri2;
475 	int didswap = 0;
476 
477 	mtx_assert(&vm_mtx, MA_OWNED);
478 	mtx_unlock(&vm_mtx);
479 	outp = outp2 = NULL;
480 	outpri = outpri2 = INT_MIN;
481 retry:
482 	sx_slock(&allproc_lock);
483 	LIST_FOREACH(p, &allproc, p_list) {
484 		struct vmspace *vm;
485 
486 		mtx_lock(&vm_mtx);
487 		PROC_LOCK(p);
488 		if (p->p_lock != 0 ||
489 		    (p->p_flag & (P_TRACED|P_SYSTEM|P_WEXIT)) != 0) {
490 			PROC_UNLOCK(p);
491 			mtx_unlock(&vm_mtx);
492 			continue;
493 		}
494 		/*
495 		 * only aiod changes vmspace, however it will be
496 		 * skipped because of the if statement above checking
497 		 * for P_SYSTEM
498 		 */
499 		vm = p->p_vmspace;
500 		mtx_lock_spin(&sched_lock);
501 		if ((p->p_sflag & (PS_INMEM|PS_SWAPPING)) != PS_INMEM) {
502 			mtx_unlock_spin(&sched_lock);
503 			PROC_UNLOCK(p);
504 			mtx_unlock(&vm_mtx);
505 			continue;
506 		}
507 
508 		switch (p->p_stat) {
509 		default:
510 			mtx_unlock_spin(&sched_lock);
511 			PROC_UNLOCK(p);
512 			mtx_unlock(&vm_mtx);
513 			continue;
514 
515 		case SSLEEP:
516 		case SSTOP:
517 			/*
518 			 * do not swapout a realtime process
519 			 */
520 			if (PRI_IS_REALTIME(p->p_pri.pri_class)) {
521 				mtx_unlock_spin(&sched_lock);
522 				PROC_UNLOCK(p);
523 				mtx_unlock(&vm_mtx);
524 				continue;
525 			}
526 
527 			/*
528 			 * Do not swapout a process waiting on a critical
529 			 * event of some kind.  Also guarantee swap_idle_threshold1
530 			 * time in memory.
531 			 */
532 			if (((p->p_pri.pri_level) < PSOCK) ||
533 				(p->p_slptime < swap_idle_threshold1)) {
534 				mtx_unlock_spin(&sched_lock);
535 				PROC_UNLOCK(p);
536 				mtx_unlock(&vm_mtx);
537 				continue;
538 			}
539 
540 			/*
541 			 * If the system is under memory stress, or if we are swapping
542 			 * idle processes >= swap_idle_threshold2, then swap the process
543 			 * out.
544 			 */
545 			if (((action & VM_SWAP_NORMAL) == 0) &&
546 				(((action & VM_SWAP_IDLE) == 0) ||
547 				  (p->p_slptime < swap_idle_threshold2))) {
548 				mtx_unlock_spin(&sched_lock);
549 				PROC_UNLOCK(p);
550 				mtx_unlock(&vm_mtx);
551 				continue;
552 			}
553 			mtx_unlock_spin(&sched_lock);
554 
555 			++vm->vm_refcnt;
556 			/*
557 			 * do not swapout a process that is waiting for VM
558 			 * data structures there is a possible deadlock.
559 			 */
560 			if (lockmgr(&vm->vm_map.lock,
561 					LK_EXCLUSIVE | LK_NOWAIT,
562 					NULL, curproc)) {
563 				vmspace_free(vm);
564 				PROC_UNLOCK(p);
565 				mtx_unlock(&vm_mtx);
566 				continue;
567 			}
568 			vm_map_unlock(&vm->vm_map);
569 			/*
570 			 * If the process has been asleep for awhile and had
571 			 * most of its pages taken away already, swap it out.
572 			 */
573 			if ((action & VM_SWAP_NORMAL) ||
574 				((action & VM_SWAP_IDLE) &&
575 				 (p->p_slptime > swap_idle_threshold2))) {
576 				sx_sunlock(&allproc_lock);
577 				swapout(p);
578 				vmspace_free(vm);
579 				didswap++;
580 				mtx_unlock(&vm_mtx);
581 				goto retry;
582 			}
583 			PROC_UNLOCK(p);
584 			vmspace_free(vm);
585 			mtx_unlock(&vm_mtx);
586 		}
587 	}
588 	sx_sunlock(&allproc_lock);
589 	/*
590 	 * If we swapped something out, and another process needed memory,
591 	 * then wakeup the sched process.
592 	 */
593 	mtx_lock(&vm_mtx);
594 	if (didswap)
595 		wakeup(&proc0);
596 }
597 
598 static void
599 swapout(p)
600 	register struct proc *p;
601 {
602 
603 	PROC_LOCK_ASSERT(p, MA_OWNED);
604 #if defined(SWAP_DEBUG)
605 	printf("swapping out %d\n", p->p_pid);
606 #endif
607 	++p->p_stats->p_ru.ru_nswap;
608 	/*
609 	 * remember the process resident count
610 	 */
611 	p->p_vmspace->vm_swrss = vmspace_resident_count(p->p_vmspace);
612 
613 	mtx_lock_spin(&sched_lock);
614 	p->p_sflag &= ~PS_INMEM;
615 	p->p_sflag |= PS_SWAPPING;
616 	PROC_UNLOCK_NOSWITCH(p);
617 	if (p->p_stat == SRUN)
618 		remrunqueue(p);
619 	mtx_unlock_spin(&sched_lock);
620 
621 	pmap_swapout_proc(p);
622 
623 	mtx_lock_spin(&sched_lock);
624 	p->p_sflag &= ~PS_SWAPPING;
625 	p->p_swtime = 0;
626 	mtx_unlock_spin(&sched_lock);
627 }
628 #endif /* !NO_SWAPPING */
629