xref: /freebsd/sys/vm/vm_glue.c (revision 0640d357f29fb1c0daaaffadd0416c5981413afd)
1 /*
2  * Copyright (c) 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *	This product includes software developed by the University of
19  *	California, Berkeley and its contributors.
20  * 4. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *	from: @(#)vm_glue.c	8.6 (Berkeley) 1/5/94
37  *
38  *
39  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40  * All rights reserved.
41  *
42  * Permission to use, copy, modify and distribute this software and
43  * its documentation is hereby granted, provided that both the copyright
44  * notice and this permission notice appear in all copies of the
45  * software, derivative works or modified versions, and any portions
46  * thereof, and that both notices appear in supporting documentation.
47  *
48  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
49  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
50  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
51  *
52  * Carnegie Mellon requests users of this software to return to
53  *
54  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
55  *  School of Computer Science
56  *  Carnegie Mellon University
57  *  Pittsburgh PA 15213-3890
58  *
59  * any improvements or extensions that they make and grant Carnegie the
60  * rights to redistribute these changes.
61  *
62  * $Id: vm_glue.c,v 1.76 1998/09/29 17:33:59 abial Exp $
63  */
64 
65 #include "opt_rlimit.h"
66 #include "opt_vm.h"
67 
68 #include <sys/param.h>
69 #include <sys/systm.h>
70 #include <sys/proc.h>
71 #include <sys/resourcevar.h>
72 #include <sys/buf.h>
73 #include <sys/shm.h>
74 #include <sys/vmmeter.h>
75 #include <sys/sysctl.h>
76 
77 #include <sys/kernel.h>
78 #include <sys/unistd.h>
79 
80 #include <machine/limits.h>
81 
82 #include <vm/vm.h>
83 #include <vm/vm_param.h>
84 #include <vm/vm_prot.h>
85 #include <sys/lock.h>
86 #include <vm/pmap.h>
87 #include <vm/vm_map.h>
88 #include <vm/vm_page.h>
89 #include <vm/vm_pageout.h>
90 #include <vm/vm_kern.h>
91 #include <vm/vm_extern.h>
92 
93 #include <sys/user.h>
94 
95 /*
96  * System initialization
97  *
98  * Note: proc0 from proc.h
99  */
100 
101 static void vm_init_limits __P((void *));
102 SYSINIT(vm_limits, SI_SUB_VM_CONF, SI_ORDER_FIRST, vm_init_limits, &proc0)
103 
104 /*
105  * THIS MUST BE THE LAST INITIALIZATION ITEM!!!
106  *
107  * Note: run scheduling should be divorced from the vm system.
108  */
109 static void scheduler __P((void *));
110 SYSINIT(scheduler, SI_SUB_RUN_SCHEDULER, SI_ORDER_FIRST, scheduler, NULL)
111 
112 
113 static void swapout __P((struct proc *));
114 
115 extern char kstack[];
116 
117 /* vm_map_t upages_map; */
118 
119 int
120 kernacc(addr, len, rw)
121 	caddr_t addr;
122 	int len, rw;
123 {
124 	boolean_t rv;
125 	vm_offset_t saddr, eaddr;
126 	vm_prot_t prot = rw == B_READ ? VM_PROT_READ : VM_PROT_WRITE;
127 
128 	saddr = trunc_page((vm_offset_t)addr);
129 	eaddr = round_page((vm_offset_t)addr + len);
130 	vm_map_lock_read(kernel_map);
131 	rv = vm_map_check_protection(kernel_map, saddr, eaddr, prot);
132 	vm_map_unlock_read(kernel_map);
133 	return (rv == TRUE);
134 }
135 
136 int
137 useracc(addr, len, rw)
138 	caddr_t addr;
139 	int len, rw;
140 {
141 	boolean_t rv;
142 	vm_prot_t prot = rw == B_READ ? VM_PROT_READ : VM_PROT_WRITE;
143 	vm_map_t map;
144 	vm_map_entry_t save_hint;
145 
146 	/*
147 	 * XXX - check separately to disallow access to user area and user
148 	 * page tables - they are in the map.
149 	 *
150 	 * XXX - VM_MAXUSER_ADDRESS is an end address, not a max.  It was once
151 	 * only used (as an end address) in trap.c.  Use it as an end address
152 	 * here too.  This bogusness has spread.  I just fixed where it was
153 	 * used as a max in vm_mmap.c.
154 	 */
155 	if ((vm_offset_t) addr + len > /* XXX */ VM_MAXUSER_ADDRESS
156 	    || (vm_offset_t) addr + len < (vm_offset_t) addr) {
157 		return (FALSE);
158 	}
159 	map = &curproc->p_vmspace->vm_map;
160 	vm_map_lock_read(map);
161 	/*
162 	 * We save the map hint, and restore it.  Useracc appears to distort
163 	 * the map hint unnecessarily.
164 	 */
165 	save_hint = map->hint;
166 	rv = vm_map_check_protection(map,
167 	    trunc_page((vm_offset_t)addr), round_page((vm_offset_t)addr + len), prot);
168 	map->hint = save_hint;
169 	vm_map_unlock_read(map);
170 
171 	return (rv == TRUE);
172 }
173 
174 void
175 vslock(addr, len)
176 	caddr_t addr;
177 	u_int len;
178 {
179 	vm_map_pageable(&curproc->p_vmspace->vm_map, trunc_page((vm_offset_t)addr),
180 	    round_page((vm_offset_t)addr + len), FALSE);
181 }
182 
183 void
184 vsunlock(addr, len, dirtied)
185 	caddr_t addr;
186 	u_int len;
187 	int dirtied;
188 {
189 #ifdef	lint
190 	dirtied++;
191 #endif	/* lint */
192 	vm_map_pageable(&curproc->p_vmspace->vm_map, trunc_page((vm_offset_t)addr),
193 	    round_page((vm_offset_t)addr + len), TRUE);
194 }
195 
196 /*
197  * Implement fork's actions on an address space.
198  * Here we arrange for the address space to be copied or referenced,
199  * allocate a user struct (pcb and kernel stack), then call the
200  * machine-dependent layer to fill those in and make the new process
201  * ready to run.  The new process is set up so that it returns directly
202  * to user mode to avoid stack copying and relocation problems.
203  */
204 void
205 vm_fork(p1, p2, flags)
206 	register struct proc *p1, *p2;
207 	int flags;
208 {
209 	register struct user *up;
210 
211 	if (flags & RFMEM) {
212 		p2->p_vmspace = p1->p_vmspace;
213 		p1->p_vmspace->vm_refcnt++;
214 	}
215 
216 	while ((cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_min) {
217 		vm_pageout_deficit += (UPAGES + VM_INITIAL_PAGEIN);
218 		VM_WAIT;
219 	}
220 
221 	if ((flags & RFMEM) == 0) {
222 		p2->p_vmspace = vmspace_fork(p1->p_vmspace);
223 
224 		if (p1->p_vmspace->vm_shm)
225 			shmfork(p1, p2);
226 	}
227 
228 	pmap_new_proc(p2);
229 
230 	up = p2->p_addr;
231 
232 	/*
233 	 * p_stats and p_sigacts currently point at fields in the user struct
234 	 * but not at &u, instead at p_addr. Copy p_sigacts and parts of
235 	 * p_stats; zero the rest of p_stats (statistics).
236 	 */
237 	p2->p_stats = &up->u_stats;
238 	p2->p_sigacts = &up->u_sigacts;
239 	up->u_sigacts = *p1->p_sigacts;
240 	bzero(&up->u_stats.pstat_startzero,
241 	    (unsigned) ((caddr_t) &up->u_stats.pstat_endzero -
242 		(caddr_t) &up->u_stats.pstat_startzero));
243 	bcopy(&p1->p_stats->pstat_startcopy, &up->u_stats.pstat_startcopy,
244 	    ((caddr_t) &up->u_stats.pstat_endcopy -
245 		(caddr_t) &up->u_stats.pstat_startcopy));
246 
247 
248 	/*
249 	 * cpu_fork will copy and update the pcb, set up the kernel stack,
250 	 * and make the child ready to run.
251 	 */
252 	cpu_fork(p1, p2);
253 }
254 
255 /*
256  * Set default limits for VM system.
257  * Called for proc 0, and then inherited by all others.
258  *
259  * XXX should probably act directly on proc0.
260  */
261 static void
262 vm_init_limits(udata)
263 	void *udata;
264 {
265 	register struct proc *p = udata;
266 	int rss_limit;
267 
268 	/*
269 	 * Set up the initial limits on process VM. Set the maximum resident
270 	 * set size to be half of (reasonably) available memory.  Since this
271 	 * is a soft limit, it comes into effect only when the system is out
272 	 * of memory - half of main memory helps to favor smaller processes,
273 	 * and reduces thrashing of the object cache.
274 	 */
275 	p->p_rlimit[RLIMIT_STACK].rlim_cur = DFLSSIZ;
276 	p->p_rlimit[RLIMIT_STACK].rlim_max = MAXSSIZ;
277 	p->p_rlimit[RLIMIT_DATA].rlim_cur = DFLDSIZ;
278 	p->p_rlimit[RLIMIT_DATA].rlim_max = MAXDSIZ;
279 	/* limit the limit to no less than 2MB */
280 	rss_limit = max(cnt.v_free_count, 512);
281 	p->p_rlimit[RLIMIT_RSS].rlim_cur = ptoa(rss_limit);
282 	p->p_rlimit[RLIMIT_RSS].rlim_max = RLIM_INFINITY;
283 }
284 
285 void
286 faultin(p)
287 	struct proc *p;
288 {
289 	int s;
290 
291 	if ((p->p_flag & P_INMEM) == 0) {
292 
293 		++p->p_lock;
294 
295 		pmap_swapin_proc(p);
296 
297 		s = splhigh();
298 
299 		if (p->p_stat == SRUN)
300 			setrunqueue(p);
301 
302 		p->p_flag |= P_INMEM;
303 
304 		/* undo the effect of setting SLOCK above */
305 		--p->p_lock;
306 		splx(s);
307 
308 	}
309 }
310 
311 /*
312  * This swapin algorithm attempts to swap-in processes only if there
313  * is enough space for them.  Of course, if a process waits for a long
314  * time, it will be swapped in anyway.
315  */
316 /* ARGSUSED*/
317 static void
318 scheduler(dummy)
319 	void *dummy;
320 {
321 	register struct proc *p;
322 	register int pri;
323 	struct proc *pp;
324 	int ppri;
325 
326 loop:
327 	while ((cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_min) {
328 		VM_WAIT;
329 	}
330 
331 	pp = NULL;
332 	ppri = INT_MIN;
333 	for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) {
334 		if (p->p_stat == SRUN &&
335 			(p->p_flag & (P_INMEM | P_SWAPPING)) == 0) {
336 
337 			pri = p->p_swtime + p->p_slptime;
338 			if ((p->p_flag & P_SWAPINREQ) == 0) {
339 				pri -= p->p_nice * 8;
340 			}
341 
342 			/*
343 			 * if this process is higher priority and there is
344 			 * enough space, then select this process instead of
345 			 * the previous selection.
346 			 */
347 			if (pri > ppri) {
348 				pp = p;
349 				ppri = pri;
350 			}
351 		}
352 	}
353 
354 	/*
355 	 * Nothing to do, back to sleep.
356 	 */
357 	if ((p = pp) == NULL) {
358 		tsleep(&proc0, PVM, "sched", 0);
359 		goto loop;
360 	}
361 	p->p_flag &= ~P_SWAPINREQ;
362 
363 	/*
364 	 * We would like to bring someone in. (only if there is space).
365 	 */
366 	faultin(p);
367 	p->p_swtime = 0;
368 	goto loop;
369 }
370 
371 #ifndef NO_SWAPPING
372 
373 #define	swappable(p) \
374 	(((p)->p_lock == 0) && \
375 		((p)->p_flag & (P_TRACED|P_NOSWAP|P_SYSTEM|P_INMEM|P_WEXIT|P_PHYSIO|P_SWAPPING)) == P_INMEM)
376 
377 
378 /*
379  * Swap_idle_threshold1 is the guaranteed swapped in time for a process
380  */
381 static int swap_idle_threshold1 = 2;
382 SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold1,
383 	CTLFLAG_RW, &swap_idle_threshold1, 0, "");
384 
385 /*
386  * Swap_idle_threshold2 is the time that a process can be idle before
387  * it will be swapped out, if idle swapping is enabled.
388  */
389 static int swap_idle_threshold2 = 10;
390 SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold2,
391 	CTLFLAG_RW, &swap_idle_threshold2, 0, "");
392 
393 /*
394  * Swapout is driven by the pageout daemon.  Very simple, we find eligible
395  * procs and unwire their u-areas.  We try to always "swap" at least one
396  * process in case we need the room for a swapin.
397  * If any procs have been sleeping/stopped for at least maxslp seconds,
398  * they are swapped.  Else, we swap the longest-sleeping or stopped process,
399  * if any, otherwise the longest-resident process.
400  */
401 void
402 swapout_procs(action)
403 int action;
404 {
405 	register struct proc *p;
406 	struct proc *outp, *outp2;
407 	int outpri, outpri2;
408 	int didswap = 0;
409 
410 	outp = outp2 = NULL;
411 	outpri = outpri2 = INT_MIN;
412 retry:
413 	for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) {
414 		struct vmspace *vm;
415 		if (!swappable(p))
416 			continue;
417 
418 		vm = p->p_vmspace;
419 
420 		switch (p->p_stat) {
421 		default:
422 			continue;
423 
424 		case SSLEEP:
425 		case SSTOP:
426 			/*
427 			 * do not swapout a realtime process
428 			 */
429 			if (RTP_PRIO_IS_REALTIME(p->p_rtprio.type))
430 				continue;
431 
432 			/*
433 			 * Do not swapout a process waiting on a critical
434 			 * event of some kind.  Also guarantee swap_idle_threshold1
435 			 * time in memory.
436 			 */
437 			if (((p->p_priority & 0x7f) < PSOCK) ||
438 				(p->p_slptime < swap_idle_threshold1))
439 				continue;
440 
441 			/*
442 			 * If the system is under memory stress, or if we are swapping
443 			 * idle processes >= swap_idle_threshold2, then swap the process
444 			 * out.
445 			 */
446 			if (((action & VM_SWAP_NORMAL) == 0) &&
447 				(((action & VM_SWAP_IDLE) == 0) ||
448 				  (p->p_slptime < swap_idle_threshold2)))
449 				continue;
450 
451 			++vm->vm_refcnt;
452 			/*
453 			 * do not swapout a process that is waiting for VM
454 			 * data structures there is a possible deadlock.
455 			 */
456 			if (lockmgr(&vm->vm_map.lock,
457 					LK_EXCLUSIVE | LK_NOWAIT,
458 					(void *)0, curproc)) {
459 				vmspace_free(vm);
460 				continue;
461 			}
462 			vm_map_unlock(&vm->vm_map);
463 			/*
464 			 * If the process has been asleep for awhile and had
465 			 * most of its pages taken away already, swap it out.
466 			 */
467 			if ((action & VM_SWAP_NORMAL) ||
468 				((action & VM_SWAP_IDLE) &&
469 				 (p->p_slptime > swap_idle_threshold2))) {
470 				swapout(p);
471 				vmspace_free(vm);
472 				didswap++;
473 				goto retry;
474 			}
475 		}
476 	}
477 	/*
478 	 * If we swapped something out, and another process needed memory,
479 	 * then wakeup the sched process.
480 	 */
481 	if (didswap)
482 		wakeup(&proc0);
483 }
484 
485 static void
486 swapout(p)
487 	register struct proc *p;
488 {
489 
490 #if defined(SWAP_DEBUG)
491 	printf("swapping out %d\n", p->p_pid);
492 #endif
493 	++p->p_stats->p_ru.ru_nswap;
494 	/*
495 	 * remember the process resident count
496 	 */
497 	p->p_vmspace->vm_swrss =
498 	    p->p_vmspace->vm_pmap.pm_stats.resident_count;
499 
500 	(void) splhigh();
501 	p->p_flag &= ~P_INMEM;
502 	p->p_flag |= P_SWAPPING;
503 	if (p->p_stat == SRUN)
504 		remrq(p);
505 	(void) spl0();
506 
507 	pmap_swapout_proc(p);
508 
509 	p->p_flag &= ~P_SWAPPING;
510 	p->p_swtime = 0;
511 }
512 #endif /* !NO_SWAPPING */
513