xref: /freebsd/sys/vm/vm_glue.c (revision 877329e0590972edde08e476dc00a8bb01c42dba)
1 /*
2  * Copyright (c) 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *	This product includes software developed by the University of
19  *	California, Berkeley and its contributors.
20  * 4. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *	from: @(#)vm_glue.c	8.6 (Berkeley) 1/5/94
37  *
38  *
39  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40  * All rights reserved.
41  *
42  * Permission to use, copy, modify and distribute this software and
43  * its documentation is hereby granted, provided that both the copyright
44  * notice and this permission notice appear in all copies of the
45  * software, derivative works or modified versions, and any portions
46  * thereof, and that both notices appear in supporting documentation.
47  *
48  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
49  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
50  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
51  *
52  * Carnegie Mellon requests users of this software to return to
53  *
54  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
55  *  School of Computer Science
56  *  Carnegie Mellon University
57  *  Pittsburgh PA 15213-3890
58  *
59  * any improvements or extensions that they make and grant Carnegie the
60  * rights to redistribute these changes.
61  *
62  * $Id: vm_glue.c,v 1.50 1996/05/31 00:37:57 dyson Exp $
63  */
64 
65 #include "opt_ddb.h"
66 
67 #include <sys/param.h>
68 #include <sys/systm.h>
69 #include <sys/proc.h>
70 #include <sys/resourcevar.h>
71 #include <sys/buf.h>
72 #include <sys/shm.h>
73 #include <sys/vmmeter.h>
74 
75 #include <sys/kernel.h>
76 #include <sys/dkstat.h>
77 
78 #include <vm/vm.h>
79 #include <vm/vm_param.h>
80 #include <vm/vm_inherit.h>
81 #include <vm/vm_prot.h>
82 #include <vm/lock.h>
83 #include <vm/pmap.h>
84 #include <vm/vm_map.h>
85 #include <vm/vm_page.h>
86 #include <vm/vm_pageout.h>
87 #include <vm/vm_kern.h>
88 #include <vm/vm_extern.h>
89 #include <vm/vm_object.h>
90 #include <vm/vm_pager.h>
91 
92 #include <sys/user.h>
93 
94 #include <machine/stdarg.h>
95 
96 /*
97  * System initialization
98  *
99  * Note: proc0 from proc.h
100  */
101 
102 static void vm_init_limits __P((void *));
103 SYSINIT(vm_limits, SI_SUB_VM_CONF, SI_ORDER_FIRST, vm_init_limits, &proc0)
104 
105 /*
106  * THIS MUST BE THE LAST INITIALIZATION ITEM!!!
107  *
108  * Note: run scheduling should be divorced from the vm system.
109  */
110 static void scheduler __P((void *));
111 SYSINIT(scheduler, SI_SUB_RUN_SCHEDULER, SI_ORDER_FIRST, scheduler, NULL)
112 
113 
114 static void swapout __P((struct proc *));
115 
116 extern char kstack[];
117 
118 /* vm_map_t upages_map; */
119 
120 int
121 kernacc(addr, len, rw)
122 	caddr_t addr;
123 	int len, rw;
124 {
125 	boolean_t rv;
126 	vm_offset_t saddr, eaddr;
127 	vm_prot_t prot = rw == B_READ ? VM_PROT_READ : VM_PROT_WRITE;
128 
129 	saddr = trunc_page(addr);
130 	eaddr = round_page(addr + len);
131 	rv = vm_map_check_protection(kernel_map, saddr, eaddr, prot);
132 	return (rv == TRUE);
133 }
134 
135 int
136 useracc(addr, len, rw)
137 	caddr_t addr;
138 	int len, rw;
139 {
140 	boolean_t rv;
141 	vm_prot_t prot = rw == B_READ ? VM_PROT_READ : VM_PROT_WRITE;
142 
143 	/*
144 	 * XXX - check separately to disallow access to user area and user
145 	 * page tables - they are in the map.
146 	 *
147 	 * XXX - VM_MAXUSER_ADDRESS is an end address, not a max.  It was once
148 	 * only used (as an end address) in trap.c.  Use it as an end address
149 	 * here too.  This bogusness has spread.  I just fixed where it was
150 	 * used as a max in vm_mmap.c.
151 	 */
152 	if ((vm_offset_t) addr + len > /* XXX */ VM_MAXUSER_ADDRESS
153 	    || (vm_offset_t) addr + len < (vm_offset_t) addr) {
154 		return (FALSE);
155 	}
156 	rv = vm_map_check_protection(&curproc->p_vmspace->vm_map,
157 	    trunc_page(addr), round_page(addr + len), prot);
158 	return (rv == TRUE);
159 }
160 
161 void
162 vslock(addr, len)
163 	caddr_t addr;
164 	u_int len;
165 {
166 	vm_map_pageable(&curproc->p_vmspace->vm_map, trunc_page(addr),
167 	    round_page(addr + len), FALSE);
168 }
169 
170 void
171 vsunlock(addr, len, dirtied)
172 	caddr_t addr;
173 	u_int len;
174 	int dirtied;
175 {
176 #ifdef	lint
177 	dirtied++;
178 #endif	/* lint */
179 	vm_map_pageable(&curproc->p_vmspace->vm_map, trunc_page(addr),
180 	    round_page(addr + len), TRUE);
181 }
182 
183 /*
184  * Implement fork's actions on an address space.
185  * Here we arrange for the address space to be copied or referenced,
186  * allocate a user struct (pcb and kernel stack), then call the
187  * machine-dependent layer to fill those in and make the new process
188  * ready to run.
189  * NOTE: the kernel stack may be at a different location in the child
190  * process, and thus addresses of automatic variables may be invalid
191  * after cpu_fork returns in the child process.  We do nothing here
192  * after cpu_fork returns.
193  */
194 int
195 vm_fork(p1, p2)
196 	register struct proc *p1, *p2;
197 {
198 	register struct user *up;
199 	int i;
200 	pmap_t pvp;
201 	vm_object_t upobj;
202 
203 	while ((cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_min) {
204 		VM_WAIT;
205 	}
206 
207 	p2->p_vmspace = vmspace_fork(p1->p_vmspace);
208 
209 	if (p1->p_vmspace->vm_shm)
210 		shmfork(p1, p2);
211 
212 	/*
213 	 * Allocate a wired-down (for now) pcb and kernel stack for the
214 	 * process
215 	 */
216 
217 	pvp = &p2->p_vmspace->vm_pmap;
218 
219 	/*
220 	 * allocate object for the upages
221 	 */
222 	p2->p_vmspace->vm_upages_obj = upobj = vm_object_allocate( OBJT_DEFAULT,
223 		UPAGES);
224 
225 	/* get a kernel virtual address for the UPAGES for this proc */
226 	up = (struct user *) kmem_alloc_pageable(u_map, UPAGES * PAGE_SIZE);
227 	if (up == NULL)
228 		panic("vm_fork: u_map allocation failed");
229 
230 	for(i=0;i<UPAGES;i++) {
231 		vm_page_t m;
232 
233 		/*
234 		 * Get a kernel stack page
235 		 */
236 		while ((m = vm_page_alloc(upobj,
237 			i, VM_ALLOC_NORMAL)) == NULL) {
238 			VM_WAIT;
239 		}
240 
241 		/*
242 		 * Wire the page
243 		 */
244 		vm_page_wire(m);
245 		PAGE_WAKEUP(m);
246 
247 		/*
248 		 * Enter the page into both the kernel and the process
249 		 * address space.
250 		 */
251 		pmap_enter( pvp, (vm_offset_t) kstack + i * PAGE_SIZE,
252 			VM_PAGE_TO_PHYS(m), VM_PROT_READ|VM_PROT_WRITE, TRUE);
253 		pmap_kenter(((vm_offset_t) up) + i * PAGE_SIZE,
254 			VM_PAGE_TO_PHYS(m));
255 		m->flags &= ~PG_ZERO;
256 		m->flags |= PG_MAPPED;
257 		m->valid = VM_PAGE_BITS_ALL;
258 	}
259 
260 	p2->p_addr = up;
261 
262 	/*
263 	 * p_stats and p_sigacts currently point at fields in the user struct
264 	 * but not at &u, instead at p_addr. Copy p_sigacts and parts of
265 	 * p_stats; zero the rest of p_stats (statistics).
266 	 */
267 	p2->p_stats = &up->u_stats;
268 	p2->p_sigacts = &up->u_sigacts;
269 	up->u_sigacts = *p1->p_sigacts;
270 	bzero(&up->u_stats.pstat_startzero,
271 	    (unsigned) ((caddr_t) &up->u_stats.pstat_endzero -
272 		(caddr_t) &up->u_stats.pstat_startzero));
273 	bcopy(&p1->p_stats->pstat_startcopy, &up->u_stats.pstat_startcopy,
274 	    ((caddr_t) &up->u_stats.pstat_endcopy -
275 		(caddr_t) &up->u_stats.pstat_startcopy));
276 
277 
278 	/*
279 	 * cpu_fork will copy and update the kernel stack and pcb, and make
280 	 * the child ready to run.  It marks the child so that it can return
281 	 * differently than the parent. It returns twice, once in the parent
282 	 * process and once in the child.
283 	 */
284 	return (cpu_fork(p1, p2));
285 }
286 
287 /*
288  * Set default limits for VM system.
289  * Called for proc 0, and then inherited by all others.
290  *
291  * XXX should probably act directly on proc0.
292  */
293 static void
294 vm_init_limits(udata)
295 	void *udata;
296 {
297 	register struct proc *p = udata;
298 	int rss_limit;
299 
300 	/*
301 	 * Set up the initial limits on process VM. Set the maximum resident
302 	 * set size to be half of (reasonably) available memory.  Since this
303 	 * is a soft limit, it comes into effect only when the system is out
304 	 * of memory - half of main memory helps to favor smaller processes,
305 	 * and reduces thrashing of the object cache.
306 	 */
307 	p->p_rlimit[RLIMIT_STACK].rlim_cur = DFLSSIZ;
308 	p->p_rlimit[RLIMIT_STACK].rlim_max = MAXSSIZ;
309 	p->p_rlimit[RLIMIT_DATA].rlim_cur = DFLDSIZ;
310 	p->p_rlimit[RLIMIT_DATA].rlim_max = MAXDSIZ;
311 	/* limit the limit to no less than 2MB */
312 	rss_limit = max(cnt.v_free_count, 512);
313 	p->p_rlimit[RLIMIT_RSS].rlim_cur = ptoa(rss_limit);
314 	p->p_rlimit[RLIMIT_RSS].rlim_max = RLIM_INFINITY;
315 }
316 
317 void
318 faultin(p)
319 	struct proc *p;
320 {
321 	vm_offset_t i;
322 	int s;
323 
324 	if ((p->p_flag & P_INMEM) == 0) {
325 		pmap_t pmap = &p->p_vmspace->vm_pmap;
326 		vm_page_t m;
327 		vm_object_t upobj = p->p_vmspace->vm_upages_obj;
328 
329 		++p->p_lock;
330 #if defined(SWAP_DEBUG)
331 		printf("swapping in %d\n", p->p_pid);
332 #endif
333 
334 		for(i=0;i<UPAGES;i++) {
335 			int s;
336 			s = splvm();
337 retry:
338 			if ((m = vm_page_lookup(upobj, i)) == NULL) {
339 				if ((m = vm_page_alloc(upobj, i, VM_ALLOC_NORMAL)) == NULL) {
340 					VM_WAIT;
341 					goto retry;
342 				}
343 			} else {
344 				if ((m->flags & PG_BUSY) || m->busy) {
345 					m->flags |= PG_WANTED;
346 					tsleep(m, PVM, "swinuw",0);
347 					goto retry;
348 				}
349 				m->flags |= PG_BUSY;
350 			}
351 			vm_page_wire(m);
352 			splx(s);
353 
354 			pmap_enter( pmap, (vm_offset_t) kstack + i * PAGE_SIZE,
355 				VM_PAGE_TO_PHYS(m), VM_PROT_READ|VM_PROT_WRITE, TRUE);
356 			pmap_kenter(((vm_offset_t) p->p_addr) + i * PAGE_SIZE,
357 				VM_PAGE_TO_PHYS(m));
358 			if (m->valid != VM_PAGE_BITS_ALL) {
359 				int rv;
360 				rv = vm_pager_get_pages(upobj,
361 					&m, 1, 0);
362 				if (rv != VM_PAGER_OK)
363 					panic("faultin: cannot get upages for proc: %d\n", p->p_pid);
364 				m->valid = VM_PAGE_BITS_ALL;
365 			}
366 			PAGE_WAKEUP(m);
367 			m->flags |= PG_MAPPED;
368 		}
369 
370 		s = splhigh();
371 
372 		if (p->p_stat == SRUN)
373 			setrunqueue(p);
374 
375 		p->p_flag |= P_INMEM;
376 
377 		/* undo the effect of setting SLOCK above */
378 		--p->p_lock;
379 		splx(s);
380 
381 	}
382 }
383 
384 /*
385  * This swapin algorithm attempts to swap-in processes only if there
386  * is enough space for them.  Of course, if a process waits for a long
387  * time, it will be swapped in anyway.
388  */
389 /* ARGSUSED*/
390 static void
391 scheduler(dummy)
392 	void *dummy;
393 {
394 	register struct proc *p;
395 	register int pri;
396 	struct proc *pp;
397 	int ppri;
398 
399 	spl0();
400 loop:
401 	while ((cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_min) {
402 		VM_WAIT;
403 	}
404 
405 	pp = NULL;
406 	ppri = INT_MIN;
407 	for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) {
408 		if (p->p_stat == SRUN &&
409 			(p->p_flag & (P_INMEM | P_SWAPPING)) == 0) {
410 			int mempri;
411 
412 			pri = p->p_swtime + p->p_slptime - p->p_nice * 8;
413 			mempri = pri > 0 ? pri : 0;
414 			/*
415 			 * if this process is higher priority and there is
416 			 * enough space, then select this process instead of
417 			 * the previous selection.
418 			 */
419 			if (pri > ppri) {
420 				pp = p;
421 				ppri = pri;
422 			}
423 		}
424 	}
425 
426 	/*
427 	 * Nothing to do, back to sleep
428 	 */
429 	if ((p = pp) == NULL) {
430 		tsleep(&proc0, PVM, "sched", 0);
431 		goto loop;
432 	}
433 	/*
434 	 * We would like to bring someone in. (only if there is space).
435 	 */
436 	faultin(p);
437 	p->p_swtime = 0;
438 	goto loop;
439 }
440 
441 #ifndef NO_SWAPPING
442 
443 #define	swappable(p) \
444 	(((p)->p_lock == 0) && \
445 		((p)->p_flag & (P_TRACED|P_NOSWAP|P_SYSTEM|P_INMEM|P_WEXIT|P_PHYSIO|P_SWAPPING)) == P_INMEM)
446 
447 /*
448  * Swapout is driven by the pageout daemon.  Very simple, we find eligible
449  * procs and unwire their u-areas.  We try to always "swap" at least one
450  * process in case we need the room for a swapin.
451  * If any procs have been sleeping/stopped for at least maxslp seconds,
452  * they are swapped.  Else, we swap the longest-sleeping or stopped process,
453  * if any, otherwise the longest-resident process.
454  */
455 void
456 swapout_procs()
457 {
458 	register struct proc *p;
459 	struct proc *outp, *outp2;
460 	int outpri, outpri2;
461 	int didswap = 0;
462 
463 	outp = outp2 = NULL;
464 	outpri = outpri2 = INT_MIN;
465 retry:
466 	for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) {
467 		struct vmspace *vm;
468 		if (!swappable(p))
469 			continue;
470 
471 		vm = p->p_vmspace;
472 
473 		switch (p->p_stat) {
474 		default:
475 			continue;
476 
477 		case SSLEEP:
478 		case SSTOP:
479 			/*
480 			 * do not swapout a realtime process
481 			 */
482 			if (p->p_rtprio.type == RTP_PRIO_REALTIME)
483 				continue;
484 
485 			/*
486 			 * do not swapout a process waiting on a critical
487 			 * event of some kind
488 			 */
489 			if (((p->p_priority & 0x7f) < PSOCK) ||
490 				(p->p_slptime <= 10))
491 				continue;
492 
493 			++vm->vm_refcnt;
494 			vm_map_reference(&vm->vm_map);
495 			/*
496 			 * do not swapout a process that is waiting for VM
497 			 * datastructures there is a possible deadlock.
498 			 */
499 			if (!lock_try_write(&vm->vm_map.lock)) {
500 				vm_map_deallocate(&vm->vm_map);
501 				vmspace_free(vm);
502 				continue;
503 			}
504 			vm_map_unlock(&vm->vm_map);
505 			/*
506 			 * If the process has been asleep for awhile and had
507 			 * most of its pages taken away already, swap it out.
508 			 */
509 			swapout(p);
510 			vm_map_deallocate(&vm->vm_map);
511 			vmspace_free(vm);
512 			didswap++;
513 			goto retry;
514 		}
515 	}
516 	/*
517 	 * If we swapped something out, and another process needed memory,
518 	 * then wakeup the sched process.
519 	 */
520 	if (didswap)
521 		wakeup(&proc0);
522 }
523 
524 static void
525 swapout(p)
526 	register struct proc *p;
527 {
528 	pmap_t pmap = &p->p_vmspace->vm_pmap;
529 	int i;
530 
531 #if defined(SWAP_DEBUG)
532 	printf("swapping out %d\n", p->p_pid);
533 #endif
534 	++p->p_stats->p_ru.ru_nswap;
535 	/*
536 	 * remember the process resident count
537 	 */
538 	p->p_vmspace->vm_swrss =
539 	    p->p_vmspace->vm_pmap.pm_stats.resident_count;
540 
541 	(void) splhigh();
542 	p->p_flag &= ~P_INMEM;
543 	p->p_flag |= P_SWAPPING;
544 	if (p->p_stat == SRUN)
545 		remrq(p);
546 	(void) spl0();
547 
548 	/*
549 	 * let the upages be paged
550 	 */
551 	for(i=0;i<UPAGES;i++) {
552 		vm_page_t m;
553 		if ((m = vm_page_lookup(p->p_vmspace->vm_upages_obj, i)) == NULL)
554 			panic("swapout: upage already missing???");
555 		m->dirty = VM_PAGE_BITS_ALL;
556 		vm_page_unwire(m);
557 		vm_page_deactivate(m);
558 		pmap_kremove( (vm_offset_t) p->p_addr + PAGE_SIZE * i);
559 	}
560 	pmap_remove(pmap, (vm_offset_t) kstack,
561 		(vm_offset_t) kstack + PAGE_SIZE * UPAGES);
562 
563 	p->p_flag &= ~P_SWAPPING;
564 	p->p_swtime = 0;
565 }
566 #endif /* !NO_SWAPPING */
567 
568 #ifdef DDB
569 /*
570  * DEBUG stuff
571  */
572 
573 int indent;
574 
575 #include <machine/stdarg.h>	/* see subr_prf.c */
576 
577 /*ARGSUSED2*/
578 void
579 #if __STDC__
580 iprintf(const char *fmt,...)
581 #else
582 iprintf(fmt /* , va_alist */ )
583 	char *fmt;
584 
585  /* va_dcl */
586 #endif
587 {
588 	register int i;
589 	va_list ap;
590 
591 	for (i = indent; i >= 8; i -= 8)
592 		printf("\t");
593 	while (--i >= 0)
594 		printf(" ");
595 	va_start(ap, fmt);
596 	vprintf(fmt, ap);
597 	va_end(ap);
598 }
599 #endif /* DDB */
600