xref: /freebsd/sys/vm/vm_glue.c (revision 675878e7326918678a032a023ba6f6ee6029d59a)
1 /*
2  * Copyright (c) 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *	This product includes software developed by the University of
19  *	California, Berkeley and its contributors.
20  * 4. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *	from: @(#)vm_glue.c	8.6 (Berkeley) 1/5/94
37  *
38  *
39  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40  * All rights reserved.
41  *
42  * Permission to use, copy, modify and distribute this software and
43  * its documentation is hereby granted, provided that both the copyright
44  * notice and this permission notice appear in all copies of the
45  * software, derivative works or modified versions, and any portions
46  * thereof, and that both notices appear in supporting documentation.
47  *
48  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
49  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
50  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
51  *
52  * Carnegie Mellon requests users of this software to return to
53  *
54  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
55  *  School of Computer Science
56  *  Carnegie Mellon University
57  *  Pittsburgh PA 15213-3890
58  *
59  * any improvements or extensions that they make and grant Carnegie the
60  * rights to redistribute these changes.
61  *
62  * $Id: vm_glue.c,v 1.53 1996/09/15 11:24:21 bde Exp $
63  */
64 
65 #include <sys/param.h>
66 #include <sys/systm.h>
67 #include <sys/proc.h>
68 #include <sys/resourcevar.h>
69 #include <sys/buf.h>
70 #include <sys/shm.h>
71 #include <sys/vmmeter.h>
72 
73 #include <sys/kernel.h>
74 #include <sys/dkstat.h>
75 
76 #include <vm/vm.h>
77 #include <vm/vm_param.h>
78 #include <vm/vm_inherit.h>
79 #include <vm/vm_prot.h>
80 #include <vm/lock.h>
81 #include <vm/pmap.h>
82 #include <vm/vm_map.h>
83 #include <vm/vm_page.h>
84 #include <vm/vm_pageout.h>
85 #include <vm/vm_kern.h>
86 #include <vm/vm_extern.h>
87 #include <vm/vm_object.h>
88 #include <vm/vm_pager.h>
89 
90 #include <sys/user.h>
91 
92 /*
93  * System initialization
94  *
95  * Note: proc0 from proc.h
96  */
97 
98 static void vm_init_limits __P((void *));
99 SYSINIT(vm_limits, SI_SUB_VM_CONF, SI_ORDER_FIRST, vm_init_limits, &proc0)
100 
101 /*
102  * THIS MUST BE THE LAST INITIALIZATION ITEM!!!
103  *
104  * Note: run scheduling should be divorced from the vm system.
105  */
106 static void scheduler __P((void *));
107 SYSINIT(scheduler, SI_SUB_RUN_SCHEDULER, SI_ORDER_FIRST, scheduler, NULL)
108 
109 
110 static void swapout __P((struct proc *));
111 
112 extern char kstack[];
113 
114 /* vm_map_t upages_map; */
115 
116 int
117 kernacc(addr, len, rw)
118 	caddr_t addr;
119 	int len, rw;
120 {
121 	boolean_t rv;
122 	vm_offset_t saddr, eaddr;
123 	vm_prot_t prot = rw == B_READ ? VM_PROT_READ : VM_PROT_WRITE;
124 
125 	saddr = trunc_page(addr);
126 	eaddr = round_page(addr + len);
127 	rv = vm_map_check_protection(kernel_map, saddr, eaddr, prot);
128 	return (rv == TRUE);
129 }
130 
131 int
132 useracc(addr, len, rw)
133 	caddr_t addr;
134 	int len, rw;
135 {
136 	boolean_t rv;
137 	vm_prot_t prot = rw == B_READ ? VM_PROT_READ : VM_PROT_WRITE;
138 
139 	/*
140 	 * XXX - check separately to disallow access to user area and user
141 	 * page tables - they are in the map.
142 	 *
143 	 * XXX - VM_MAXUSER_ADDRESS is an end address, not a max.  It was once
144 	 * only used (as an end address) in trap.c.  Use it as an end address
145 	 * here too.  This bogusness has spread.  I just fixed where it was
146 	 * used as a max in vm_mmap.c.
147 	 */
148 	if ((vm_offset_t) addr + len > /* XXX */ VM_MAXUSER_ADDRESS
149 	    || (vm_offset_t) addr + len < (vm_offset_t) addr) {
150 		return (FALSE);
151 	}
152 	rv = vm_map_check_protection(&curproc->p_vmspace->vm_map,
153 	    trunc_page(addr), round_page(addr + len), prot);
154 	return (rv == TRUE);
155 }
156 
157 void
158 vslock(addr, len)
159 	caddr_t addr;
160 	u_int len;
161 {
162 	vm_map_pageable(&curproc->p_vmspace->vm_map, trunc_page(addr),
163 	    round_page(addr + len), FALSE);
164 }
165 
166 void
167 vsunlock(addr, len, dirtied)
168 	caddr_t addr;
169 	u_int len;
170 	int dirtied;
171 {
172 #ifdef	lint
173 	dirtied++;
174 #endif	/* lint */
175 	vm_map_pageable(&curproc->p_vmspace->vm_map, trunc_page(addr),
176 	    round_page(addr + len), TRUE);
177 }
178 
179 /*
180  * Implement fork's actions on an address space.
181  * Here we arrange for the address space to be copied or referenced,
182  * allocate a user struct (pcb and kernel stack), then call the
183  * machine-dependent layer to fill those in and make the new process
184  * ready to run.
185  * NOTE: the kernel stack may be at a different location in the child
186  * process, and thus addresses of automatic variables may be invalid
187  * after cpu_fork returns in the child process.  We do nothing here
188  * after cpu_fork returns.
189  */
190 int
191 vm_fork(p1, p2)
192 	register struct proc *p1, *p2;
193 {
194 	register struct user *up;
195 	int i;
196 	pmap_t pvp;
197 	vm_object_t upobj;
198 
199 	while ((cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_min) {
200 		VM_WAIT;
201 	}
202 
203 	p2->p_vmspace = vmspace_fork(p1->p_vmspace);
204 
205 	if (p1->p_vmspace->vm_shm)
206 		shmfork(p1, p2);
207 
208 	pmap_new_proc(p2);
209 
210 	up = p2->p_addr;
211 
212 	/*
213 	 * p_stats and p_sigacts currently point at fields in the user struct
214 	 * but not at &u, instead at p_addr. Copy p_sigacts and parts of
215 	 * p_stats; zero the rest of p_stats (statistics).
216 	 */
217 	p2->p_stats = &up->u_stats;
218 	p2->p_sigacts = &up->u_sigacts;
219 	up->u_sigacts = *p1->p_sigacts;
220 	bzero(&up->u_stats.pstat_startzero,
221 	    (unsigned) ((caddr_t) &up->u_stats.pstat_endzero -
222 		(caddr_t) &up->u_stats.pstat_startzero));
223 	bcopy(&p1->p_stats->pstat_startcopy, &up->u_stats.pstat_startcopy,
224 	    ((caddr_t) &up->u_stats.pstat_endcopy -
225 		(caddr_t) &up->u_stats.pstat_startcopy));
226 
227 
228 	/*
229 	 * cpu_fork will copy and update the kernel stack and pcb, and make
230 	 * the child ready to run.  It marks the child so that it can return
231 	 * differently than the parent. It returns twice, once in the parent
232 	 * process and once in the child.
233 	 */
234 	return (cpu_fork(p1, p2));
235 }
236 
237 /*
238  * Set default limits for VM system.
239  * Called for proc 0, and then inherited by all others.
240  *
241  * XXX should probably act directly on proc0.
242  */
243 static void
244 vm_init_limits(udata)
245 	void *udata;
246 {
247 	register struct proc *p = udata;
248 	int rss_limit;
249 
250 	/*
251 	 * Set up the initial limits on process VM. Set the maximum resident
252 	 * set size to be half of (reasonably) available memory.  Since this
253 	 * is a soft limit, it comes into effect only when the system is out
254 	 * of memory - half of main memory helps to favor smaller processes,
255 	 * and reduces thrashing of the object cache.
256 	 */
257 	p->p_rlimit[RLIMIT_STACK].rlim_cur = DFLSSIZ;
258 	p->p_rlimit[RLIMIT_STACK].rlim_max = MAXSSIZ;
259 	p->p_rlimit[RLIMIT_DATA].rlim_cur = DFLDSIZ;
260 	p->p_rlimit[RLIMIT_DATA].rlim_max = MAXDSIZ;
261 	/* limit the limit to no less than 2MB */
262 	rss_limit = max(cnt.v_free_count, 512);
263 	p->p_rlimit[RLIMIT_RSS].rlim_cur = ptoa(rss_limit);
264 	p->p_rlimit[RLIMIT_RSS].rlim_max = RLIM_INFINITY;
265 }
266 
267 void
268 faultin(p)
269 	struct proc *p;
270 {
271 	vm_offset_t i;
272 	int s;
273 
274 	if ((p->p_flag & P_INMEM) == 0) {
275 
276 		++p->p_lock;
277 
278 		pmap_swapin_proc(p);
279 
280 		s = splhigh();
281 
282 		if (p->p_stat == SRUN)
283 			setrunqueue(p);
284 
285 		p->p_flag |= P_INMEM;
286 
287 		/* undo the effect of setting SLOCK above */
288 		--p->p_lock;
289 		splx(s);
290 
291 	}
292 }
293 
294 /*
295  * This swapin algorithm attempts to swap-in processes only if there
296  * is enough space for them.  Of course, if a process waits for a long
297  * time, it will be swapped in anyway.
298  */
299 /* ARGSUSED*/
300 static void
301 scheduler(dummy)
302 	void *dummy;
303 {
304 	register struct proc *p;
305 	register int pri;
306 	struct proc *pp;
307 	int ppri;
308 
309 	spl0();
310 loop:
311 	while ((cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_min) {
312 		VM_WAIT;
313 	}
314 
315 	pp = NULL;
316 	ppri = INT_MIN;
317 	for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) {
318 		if (p->p_stat == SRUN &&
319 			(p->p_flag & (P_INMEM | P_SWAPPING)) == 0) {
320 			int mempri;
321 
322 			pri = p->p_swtime + p->p_slptime - p->p_nice * 8;
323 			mempri = pri > 0 ? pri : 0;
324 			/*
325 			 * if this process is higher priority and there is
326 			 * enough space, then select this process instead of
327 			 * the previous selection.
328 			 */
329 			if (pri > ppri) {
330 				pp = p;
331 				ppri = pri;
332 			}
333 		}
334 	}
335 
336 	/*
337 	 * Nothing to do, back to sleep
338 	 */
339 	if ((p = pp) == NULL) {
340 		tsleep(&proc0, PVM, "sched", 0);
341 		goto loop;
342 	}
343 	/*
344 	 * We would like to bring someone in. (only if there is space).
345 	 */
346 	faultin(p);
347 	p->p_swtime = 0;
348 	goto loop;
349 }
350 
351 #ifndef NO_SWAPPING
352 
353 #define	swappable(p) \
354 	(((p)->p_lock == 0) && \
355 		((p)->p_flag & (P_TRACED|P_NOSWAP|P_SYSTEM|P_INMEM|P_WEXIT|P_PHYSIO|P_SWAPPING)) == P_INMEM)
356 
357 /*
358  * Swapout is driven by the pageout daemon.  Very simple, we find eligible
359  * procs and unwire their u-areas.  We try to always "swap" at least one
360  * process in case we need the room for a swapin.
361  * If any procs have been sleeping/stopped for at least maxslp seconds,
362  * they are swapped.  Else, we swap the longest-sleeping or stopped process,
363  * if any, otherwise the longest-resident process.
364  */
365 void
366 swapout_procs()
367 {
368 	register struct proc *p;
369 	struct proc *outp, *outp2;
370 	int outpri, outpri2;
371 	int didswap = 0;
372 
373 	outp = outp2 = NULL;
374 	outpri = outpri2 = INT_MIN;
375 retry:
376 	for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) {
377 		struct vmspace *vm;
378 		if (!swappable(p))
379 			continue;
380 
381 		vm = p->p_vmspace;
382 
383 		switch (p->p_stat) {
384 		default:
385 			continue;
386 
387 		case SSLEEP:
388 		case SSTOP:
389 			/*
390 			 * do not swapout a realtime process
391 			 */
392 			if (p->p_rtprio.type == RTP_PRIO_REALTIME)
393 				continue;
394 
395 			/*
396 			 * do not swapout a process waiting on a critical
397 			 * event of some kind
398 			 */
399 			if (((p->p_priority & 0x7f) < PSOCK) ||
400 				(p->p_slptime <= 10))
401 				continue;
402 
403 			++vm->vm_refcnt;
404 			vm_map_reference(&vm->vm_map);
405 			/*
406 			 * do not swapout a process that is waiting for VM
407 			 * datastructures there is a possible deadlock.
408 			 */
409 			if (!lock_try_write(&vm->vm_map.lock)) {
410 				vm_map_deallocate(&vm->vm_map);
411 				vmspace_free(vm);
412 				continue;
413 			}
414 			vm_map_unlock(&vm->vm_map);
415 			/*
416 			 * If the process has been asleep for awhile and had
417 			 * most of its pages taken away already, swap it out.
418 			 */
419 			swapout(p);
420 			vm_map_deallocate(&vm->vm_map);
421 			vmspace_free(vm);
422 			didswap++;
423 			goto retry;
424 		}
425 	}
426 	/*
427 	 * If we swapped something out, and another process needed memory,
428 	 * then wakeup the sched process.
429 	 */
430 	if (didswap)
431 		wakeup(&proc0);
432 }
433 
434 static void
435 swapout(p)
436 	register struct proc *p;
437 {
438 	pmap_t pmap = &p->p_vmspace->vm_pmap;
439 	int i;
440 
441 #if defined(SWAP_DEBUG)
442 	printf("swapping out %d\n", p->p_pid);
443 #endif
444 	++p->p_stats->p_ru.ru_nswap;
445 	/*
446 	 * remember the process resident count
447 	 */
448 	p->p_vmspace->vm_swrss =
449 	    p->p_vmspace->vm_pmap.pm_stats.resident_count;
450 
451 	(void) splhigh();
452 	p->p_flag &= ~P_INMEM;
453 	p->p_flag |= P_SWAPPING;
454 	if (p->p_stat == SRUN)
455 		remrq(p);
456 	(void) spl0();
457 
458 	pmap_swapout_proc(p);
459 
460 	p->p_flag &= ~P_SWAPPING;
461 	p->p_swtime = 0;
462 }
463 #endif /* !NO_SWAPPING */
464