xref: /freebsd/sys/vm/vm_glue.c (revision 417ed37975261df51f61d13e179ad04d8f4839c7)
1 /*
2  * Copyright (c) 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *	This product includes software developed by the University of
19  *	California, Berkeley and its contributors.
20  * 4. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *	from: @(#)vm_glue.c	8.6 (Berkeley) 1/5/94
37  *
38  *
39  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40  * All rights reserved.
41  *
42  * Permission to use, copy, modify and distribute this software and
43  * its documentation is hereby granted, provided that both the copyright
44  * notice and this permission notice appear in all copies of the
45  * software, derivative works or modified versions, and any portions
46  * thereof, and that both notices appear in supporting documentation.
47  *
48  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
49  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
50  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
51  *
52  * Carnegie Mellon requests users of this software to return to
53  *
54  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
55  *  School of Computer Science
56  *  Carnegie Mellon University
57  *  Pittsburgh PA 15213-3890
58  *
59  * any improvements or extensions that they make and grant Carnegie the
60  * rights to redistribute these changes.
61  *
62  * $Id: vm_glue.c,v 1.11 1995/01/09 16:05:40 davidg Exp $
63  */
64 
65 #include <sys/param.h>
66 #include <sys/systm.h>
67 #include <sys/proc.h>
68 #include <sys/resourcevar.h>
69 #include <sys/buf.h>
70 #include <sys/user.h>
71 
72 #include <sys/kernel.h>
73 #include <sys/dkstat.h>
74 
75 #include <vm/vm.h>
76 #include <vm/vm_page.h>
77 #include <vm/vm_pageout.h>
78 #include <vm/vm_kern.h>
79 
80 #include <machine/stdarg.h>
81 #include <machine/cpu.h>
82 
83 extern char kstack[];
84 int avefree = 0;		/* XXX */
85 int readbuffers = 0;		/* XXX allow kgdb to read kernel buffer pool */
86 
87 /* vm_map_t upages_map; */
88 
89 int
90 kernacc(addr, len, rw)
91 	caddr_t addr;
92 	int len, rw;
93 {
94 	boolean_t rv;
95 	vm_offset_t saddr, eaddr;
96 	vm_prot_t prot = rw == B_READ ? VM_PROT_READ : VM_PROT_WRITE;
97 
98 	saddr = trunc_page(addr);
99 	eaddr = round_page(addr + len);
100 	rv = vm_map_check_protection(kernel_map, saddr, eaddr, prot);
101 	return (rv == TRUE);
102 }
103 
104 int
105 useracc(addr, len, rw)
106 	caddr_t addr;
107 	int len, rw;
108 {
109 	boolean_t rv;
110 	vm_prot_t prot = rw == B_READ ? VM_PROT_READ : VM_PROT_WRITE;
111 
112 	/*
113 	 * XXX - check separately to disallow access to user area and user
114 	 * page tables - they are in the map.
115 	 *
116 	 * XXX - VM_MAXUSER_ADDRESS is an end address, not a max.  It was once
117 	 * only used (as an end address) in trap.c.  Use it as an end address
118 	 * here too.  This bogusness has spread.  I just fixed where it was
119 	 * used as a max in vm_mmap.c.
120 	 */
121 	if ((vm_offset_t) addr + len > /* XXX */ VM_MAXUSER_ADDRESS
122 	    || (vm_offset_t) addr + len < (vm_offset_t) addr) {
123 		return (FALSE);
124 	}
125 	rv = vm_map_check_protection(&curproc->p_vmspace->vm_map,
126 	    trunc_page(addr), round_page(addr + len), prot);
127 	return (rv == TRUE);
128 }
129 
130 #ifdef KGDB
131 /*
132  * Change protections on kernel pages from addr to addr+len
133  * (presumably so debugger can plant a breakpoint).
134  * All addresses are assumed to reside in the Sysmap,
135  */
136 chgkprot(addr, len, rw)
137 	register caddr_t addr;
138 	int len, rw;
139 {
140 	vm_prot_t prot = rw == B_READ ? VM_PROT_READ : VM_PROT_WRITE;
141 
142 	vm_map_protect(kernel_map, trunc_page(addr),
143 	    round_page(addr + len), prot, FALSE);
144 }
145 #endif
146 void
147 vslock(addr, len)
148 	caddr_t addr;
149 	u_int len;
150 {
151 	vm_map_pageable(&curproc->p_vmspace->vm_map, trunc_page(addr),
152 	    round_page(addr + len), FALSE);
153 }
154 
155 void
156 vsunlock(addr, len, dirtied)
157 	caddr_t addr;
158 	u_int len;
159 	int dirtied;
160 {
161 #ifdef	lint
162 	dirtied++;
163 #endif	/* lint */
164 	vm_map_pageable(&curproc->p_vmspace->vm_map, trunc_page(addr),
165 	    round_page(addr + len), TRUE);
166 }
167 
168 /*
169  * Implement fork's actions on an address space.
170  * Here we arrange for the address space to be copied or referenced,
171  * allocate a user struct (pcb and kernel stack), then call the
172  * machine-dependent layer to fill those in and make the new process
173  * ready to run.
174  * NOTE: the kernel stack may be at a different location in the child
175  * process, and thus addresses of automatic variables may be invalid
176  * after cpu_fork returns in the child process.  We do nothing here
177  * after cpu_fork returns.
178  */
179 int
180 vm_fork(p1, p2, isvfork)
181 	register struct proc *p1, *p2;
182 	int isvfork;
183 {
184 	register struct user *up;
185 	vm_offset_t addr, ptaddr;
186 	int i;
187 	struct vm_map *vp;
188 
189 	while ((cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_min) {
190 		VM_WAIT;
191 	}
192 
193 	/*
194 	 * avoid copying any of the parent's pagetables or other per-process
195 	 * objects that reside in the map by marking all of them
196 	 * non-inheritable
197 	 */
198 	(void) vm_map_inherit(&p1->p_vmspace->vm_map,
199 	    UPT_MIN_ADDRESS - UPAGES * NBPG, VM_MAX_ADDRESS, VM_INHERIT_NONE);
200 	p2->p_vmspace = vmspace_fork(p1->p_vmspace);
201 
202 #ifdef SYSVSHM
203 	if (p1->p_vmspace->vm_shm)
204 		shmfork(p1, p2, isvfork);
205 #endif
206 
207 	/*
208 	 * Allocate a wired-down (for now) pcb and kernel stack for the
209 	 * process
210 	 */
211 
212 	addr = (vm_offset_t) kstack;
213 
214 	vp = &p2->p_vmspace->vm_map;
215 
216 	/* ream out old pagetables and kernel stack */
217 	(void) vm_deallocate(vp, addr, UPT_MAX_ADDRESS - addr);
218 
219 	/* get new pagetables and kernel stack */
220 	(void) vm_allocate(vp, &addr, UPT_MAX_ADDRESS - addr, FALSE);
221 
222 	/* force in the page table encompassing the UPAGES */
223 	ptaddr = trunc_page((u_int) vtopte(addr));
224 	vm_map_pageable(vp, ptaddr, ptaddr + NBPG, FALSE);
225 
226 	/* and force in (demand-zero) the UPAGES */
227 	vm_map_pageable(vp, addr, addr + UPAGES * NBPG, FALSE);
228 
229 	/* get a kernel virtual address for the UPAGES for this proc */
230 	up = (struct user *) kmem_alloc_pageable(u_map, UPAGES * NBPG);
231 
232 	/* and force-map the upages into the kernel pmap */
233 	for (i = 0; i < UPAGES; i++)
234 		pmap_enter(vm_map_pmap(u_map),
235 		    ((vm_offset_t) up) + NBPG * i,
236 		    pmap_extract(vp->pmap, addr + NBPG * i),
237 		    VM_PROT_READ | VM_PROT_WRITE, 1);
238 
239 	/*
240 	 * and allow the UPAGES page table entry to be paged (at the vm system
241 	 * level)
242 	 */
243 	vm_map_pageable(vp, ptaddr, ptaddr + NBPG, TRUE);
244 
245 	p2->p_addr = up;
246 
247 	/*
248 	 * p_stats and p_sigacts currently point at fields in the user struct
249 	 * but not at &u, instead at p_addr. Copy p_sigacts and parts of
250 	 * p_stats; zero the rest of p_stats (statistics).
251 	 */
252 	p2->p_stats = &up->u_stats;
253 	p2->p_sigacts = &up->u_sigacts;
254 	up->u_sigacts = *p1->p_sigacts;
255 	bzero(&up->u_stats.pstat_startzero,
256 	    (unsigned) ((caddr_t) &up->u_stats.pstat_endzero -
257 		(caddr_t) &up->u_stats.pstat_startzero));
258 	bcopy(&p1->p_stats->pstat_startcopy, &up->u_stats.pstat_startcopy,
259 	    ((caddr_t) &up->u_stats.pstat_endcopy -
260 		(caddr_t) &up->u_stats.pstat_startcopy));
261 
262 
263 	/*
264 	 * cpu_fork will copy and update the kernel stack and pcb, and make
265 	 * the child ready to run.  It marks the child so that it can return
266 	 * differently than the parent. It returns twice, once in the parent
267 	 * process and once in the child.
268 	 */
269 	return (cpu_fork(p1, p2));
270 }
271 
272 /*
273  * Set default limits for VM system.
274  * Called for proc 0, and then inherited by all others.
275  */
276 void
277 vm_init_limits(p)
278 	register struct proc *p;
279 {
280 	int rss_limit;
281 
282 	/*
283 	 * Set up the initial limits on process VM. Set the maximum resident
284 	 * set size to be half of (reasonably) available memory.  Since this
285 	 * is a soft limit, it comes into effect only when the system is out
286 	 * of memory - half of main memory helps to favor smaller processes,
287 	 * and reduces thrashing of the object cache.
288 	 */
289 	p->p_rlimit[RLIMIT_STACK].rlim_cur = DFLSSIZ;
290 	p->p_rlimit[RLIMIT_STACK].rlim_max = MAXSSIZ;
291 	p->p_rlimit[RLIMIT_DATA].rlim_cur = DFLDSIZ;
292 	p->p_rlimit[RLIMIT_DATA].rlim_max = MAXDSIZ;
293 	/* limit the limit to no less than 2MB */
294 	rss_limit = max(cnt.v_free_count / 2, 512);
295 	p->p_rlimit[RLIMIT_RSS].rlim_cur = ptoa(rss_limit);
296 	p->p_rlimit[RLIMIT_RSS].rlim_max = RLIM_INFINITY;
297 }
298 
299 #ifdef DEBUG
300 int enableswap = 1;
301 int swapdebug = 0;
302 
303 #define	SDB_FOLLOW	1
304 #define SDB_SWAPIN	2
305 #define SDB_SWAPOUT	4
306 #endif
307 
308 void
309 faultin(p)
310 	struct proc *p;
311 {
312 	vm_offset_t i;
313 	vm_offset_t ptaddr;
314 	int s;
315 
316 	if ((p->p_flag & P_INMEM) == 0) {
317 		vm_map_t map;
318 
319 		++p->p_lock;
320 
321 		map = &p->p_vmspace->vm_map;
322 		/* force the page table encompassing the kernel stack (upages) */
323 		ptaddr = trunc_page((u_int) vtopte(kstack));
324 		vm_map_pageable(map, ptaddr, ptaddr + NBPG, FALSE);
325 
326 		/* wire in the UPAGES */
327 		vm_map_pageable(map, (vm_offset_t) kstack,
328 		    (vm_offset_t) kstack + UPAGES * NBPG, FALSE);
329 
330 		/* and map them nicely into the kernel pmap */
331 		for (i = 0; i < UPAGES; i++) {
332 			vm_offset_t off = i * NBPG;
333 			vm_offset_t pa = (vm_offset_t)
334 			pmap_extract(&p->p_vmspace->vm_pmap,
335 			    (vm_offset_t) kstack + off);
336 
337 			pmap_enter(vm_map_pmap(u_map),
338 			    ((vm_offset_t) p->p_addr) + off,
339 			    pa, VM_PROT_READ | VM_PROT_WRITE, 1);
340 		}
341 
342 		/* and let the page table pages go (at least above pmap level) */
343 		vm_map_pageable(map, ptaddr, ptaddr + NBPG, TRUE);
344 
345 		s = splhigh();
346 
347 		if (p->p_stat == SRUN)
348 			setrunqueue(p);
349 
350 		p->p_flag |= P_INMEM;
351 
352 		/* undo the effect of setting SLOCK above */
353 		--p->p_lock;
354 		splx(s);
355 
356 	}
357 }
358 
359 /*
360  * This swapin algorithm attempts to swap-in processes only if there
361  * is enough space for them.  Of course, if a process waits for a long
362  * time, it will be swapped in anyway.
363  */
364 void
365 scheduler()
366 {
367 	register struct proc *p;
368 	register int pri;
369 	struct proc *pp;
370 	int ppri;
371 
372 loop:
373 	while ((cnt.v_free_count + cnt.v_cache_count) < (cnt.v_free_reserved + UPAGES + 2)) {
374 		VM_WAIT;
375 		tsleep((caddr_t) &proc0, PVM, "schedm", 0);
376 	}
377 
378 	pp = NULL;
379 	ppri = INT_MIN;
380 	for (p = (struct proc *) allproc; p != NULL; p = p->p_next) {
381 		if (p->p_stat == SRUN && (p->p_flag & (P_INMEM | P_SWAPPING)) == 0) {
382 			int mempri;
383 
384 			pri = p->p_swtime + p->p_slptime - p->p_nice * 8;
385 			mempri = pri > 0 ? pri : 0;
386 			/*
387 			 * if this process is higher priority and there is
388 			 * enough space, then select this process instead of
389 			 * the previous selection.
390 			 */
391 			if (pri > ppri) {
392 				pp = p;
393 				ppri = pri;
394 			}
395 		}
396 	}
397 
398 	/*
399 	 * Nothing to do, back to sleep
400 	 */
401 	if ((p = pp) == NULL) {
402 		tsleep((caddr_t) &proc0, PVM, "sched", 0);
403 		goto loop;
404 	}
405 	/*
406 	 * We would like to bring someone in. (only if there is space).
407 	 */
408 	faultin(p);
409 	p->p_swtime = 0;
410 	goto loop;
411 }
412 
413 #define	swappable(p) \
414 	(((p)->p_lock == 0) && \
415 		((p)->p_flag & (P_TRACED|P_NOSWAP|P_SYSTEM|P_INMEM|P_WEXIT|P_PHYSIO|P_SWAPPING)) == P_INMEM)
416 
417 extern int vm_pageout_free_min;
418 
419 /*
420  * Swapout is driven by the pageout daemon.  Very simple, we find eligible
421  * procs and unwire their u-areas.  We try to always "swap" at least one
422  * process in case we need the room for a swapin.
423  * If any procs have been sleeping/stopped for at least maxslp seconds,
424  * they are swapped.  Else, we swap the longest-sleeping or stopped process,
425  * if any, otherwise the longest-resident process.
426  */
427 void
428 swapout_threads()
429 {
430 	register struct proc *p;
431 	struct proc *outp, *outp2;
432 	int outpri, outpri2;
433 	int tpri;
434 	int didswap = 0;
435 	extern int maxslp;
436 
437 	outp = outp2 = NULL;
438 	outpri = outpri2 = INT_MIN;
439 retry:
440 	for (p = (struct proc *) allproc; p != NULL; p = p->p_next) {
441 		if (!swappable(p))
442 			continue;
443 		switch (p->p_stat) {
444 		default:
445 			continue;
446 
447 		case SSLEEP:
448 		case SSTOP:
449 			/*
450 			 * do not swapout a realtime process
451 			 */
452 			if (p->p_rtprio.type == RTP_PRIO_REALTIME)
453 				continue;
454 
455 			/*
456 			 * do not swapout a process waiting on a critical
457 			 * event of some kind
458 			 */
459 			if ((p->p_priority & 0x7f) < PSOCK)
460 				continue;
461 
462 			/*
463 			 * do not swapout a process that is waiting for VM
464 			 * datastructures there is a possible deadlock.
465 			 */
466 			if (!lock_try_write(&p->p_vmspace->vm_map.lock)) {
467 				continue;
468 			}
469 			vm_map_unlock(&p->p_vmspace->vm_map);
470 			/*
471 			 * If the process has been asleep for awhile and had
472 			 * most of its pages taken away already, swap it out.
473 			 */
474 			if (p->p_slptime > maxslp) {
475 				swapout(p);
476 				didswap++;
477 				goto retry;
478 			}
479 		}
480 	}
481 	/*
482 	 * If we swapped something out, and another process needed memory,
483 	 * then wakeup the sched process.
484 	 */
485 	if (didswap)
486 		wakeup((caddr_t) &proc0);
487 }
488 
489 void
490 swapout(p)
491 	register struct proc *p;
492 {
493 	vm_map_t map = &p->p_vmspace->vm_map;
494 
495 	++p->p_stats->p_ru.ru_nswap;
496 	/*
497 	 * remember the process resident count
498 	 */
499 	p->p_vmspace->vm_swrss =
500 	    p->p_vmspace->vm_pmap.pm_stats.resident_count;
501 
502 	(void) splhigh();
503 	p->p_flag &= ~P_INMEM;
504 	if (p->p_stat == SRUN)
505 		remrq(p);
506 	(void) spl0();
507 
508 	p->p_flag |= P_SWAPPING;
509 	/*
510 	 * let the upages be paged
511 	 */
512 	pmap_remove(vm_map_pmap(u_map),
513 	    (vm_offset_t) p->p_addr, ((vm_offset_t) p->p_addr) + UPAGES * NBPG);
514 
515 	vm_map_pageable(map, (vm_offset_t) kstack,
516 	    (vm_offset_t) kstack + UPAGES * NBPG, TRUE);
517 
518 	p->p_flag &= ~P_SWAPPING;
519 	p->p_swtime = 0;
520 }
521 
522 /*
523  * The rest of these routines fake thread handling
524  */
525 
526 #ifndef assert_wait
527 void
528 assert_wait(event, ruptible)
529 	int event;
530 	boolean_t ruptible;
531 {
532 #ifdef lint
533 	ruptible++;
534 #endif
535 	curproc->p_thread = event;
536 }
537 #endif
538 
539 void
540 thread_block(char *msg)
541 {
542 	if (curproc->p_thread)
543 		tsleep((caddr_t) curproc->p_thread, PVM, msg, 0);
544 }
545 
546 
547 void
548 thread_sleep_(event, lock, wmesg)
549 	int event;
550 	simple_lock_t lock;
551 	char *wmesg;
552 {
553 
554 	curproc->p_thread = event;
555 	simple_unlock(lock);
556 	if (curproc->p_thread) {
557 		tsleep((caddr_t) event, PVM, wmesg, 0);
558 	}
559 }
560 
561 #ifndef thread_wakeup
562 void
563 thread_wakeup(event)
564 	int event;
565 {
566 	wakeup((caddr_t) event);
567 }
568 #endif
569 
570 /*
571  * DEBUG stuff
572  */
573 
574 int indent = 0;
575 
576 #include <machine/stdarg.h>	/* see subr_prf.c */
577 
578 /*ARGSUSED2*/
579 void
580 #if __STDC__
581 iprintf(const char *fmt,...)
582 #else
583 iprintf(fmt /* , va_alist */ )
584 	char *fmt;
585 
586  /* va_dcl */
587 #endif
588 {
589 	register int i;
590 	va_list ap;
591 
592 	for (i = indent; i >= 8; i -= 8)
593 		printf("\t");
594 	while (--i >= 0)
595 		printf(" ");
596 	va_start(ap, fmt);
597 	printf("%r", fmt, ap);
598 	va_end(ap);
599 }
600