1 /* 2 * Copyright (c) 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * from: @(#)vm_glue.c 8.6 (Berkeley) 1/5/94 37 * 38 * 39 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 40 * All rights reserved. 41 * 42 * Permission to use, copy, modify and distribute this software and 43 * its documentation is hereby granted, provided that both the copyright 44 * notice and this permission notice appear in all copies of the 45 * software, derivative works or modified versions, and any portions 46 * thereof, and that both notices appear in supporting documentation. 47 * 48 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 49 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 50 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 51 * 52 * Carnegie Mellon requests users of this software to return to 53 * 54 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 55 * School of Computer Science 56 * Carnegie Mellon University 57 * Pittsburgh PA 15213-3890 58 * 59 * any improvements or extensions that they make and grant Carnegie the 60 * rights to redistribute these changes. 61 * 62 * $Id: vm_glue.c,v 1.28 1995/10/16 05:45:49 dyson Exp $ 63 */ 64 65 #include <sys/param.h> 66 #include <sys/systm.h> 67 #include <sys/proc.h> 68 #include <sys/resourcevar.h> 69 #include <sys/buf.h> 70 #include <sys/shm.h> 71 #include <sys/user.h> 72 73 #include <sys/kernel.h> 74 #include <sys/dkstat.h> 75 76 #include <vm/vm.h> 77 #include <vm/vm_page.h> 78 #include <vm/vm_pageout.h> 79 #include <vm/vm_kern.h> 80 81 #include <machine/stdarg.h> 82 #include <machine/cpu.h> 83 84 /* 85 * System initialization 86 * 87 * Note: proc0 from proc.h 88 */ 89 90 static void vm_init_limits __P((void *)); 91 SYSINIT(vm_limits, SI_SUB_VM_CONF, SI_ORDER_FIRST, vm_init_limits, &proc0) 92 93 /* 94 * THIS MUST BE THE LAST INITIALIZATION ITEM!!! 95 * 96 * Note: run scheduling should be divorced from the vm system. 97 */ 98 static void scheduler __P((void *)); 99 SYSINIT(scheduler, SI_SUB_RUN_SCHEDULER, SI_ORDER_FIRST, scheduler, NULL) 100 101 102 extern char kstack[]; 103 104 /* vm_map_t upages_map; */ 105 106 int 107 kernacc(addr, len, rw) 108 caddr_t addr; 109 int len, rw; 110 { 111 boolean_t rv; 112 vm_offset_t saddr, eaddr; 113 vm_prot_t prot = rw == B_READ ? VM_PROT_READ : VM_PROT_WRITE; 114 115 saddr = trunc_page(addr); 116 eaddr = round_page(addr + len); 117 rv = vm_map_check_protection(kernel_map, saddr, eaddr, prot); 118 return (rv == TRUE); 119 } 120 121 int 122 useracc(addr, len, rw) 123 caddr_t addr; 124 int len, rw; 125 { 126 boolean_t rv; 127 vm_prot_t prot = rw == B_READ ? VM_PROT_READ : VM_PROT_WRITE; 128 129 /* 130 * XXX - check separately to disallow access to user area and user 131 * page tables - they are in the map. 132 * 133 * XXX - VM_MAXUSER_ADDRESS is an end address, not a max. It was once 134 * only used (as an end address) in trap.c. Use it as an end address 135 * here too. This bogusness has spread. I just fixed where it was 136 * used as a max in vm_mmap.c. 137 */ 138 if ((vm_offset_t) addr + len > /* XXX */ VM_MAXUSER_ADDRESS 139 || (vm_offset_t) addr + len < (vm_offset_t) addr) { 140 return (FALSE); 141 } 142 rv = vm_map_check_protection(&curproc->p_vmspace->vm_map, 143 trunc_page(addr), round_page(addr + len), prot); 144 return (rv == TRUE); 145 } 146 147 #ifdef KGDB 148 /* 149 * Change protections on kernel pages from addr to addr+len 150 * (presumably so debugger can plant a breakpoint). 151 * All addresses are assumed to reside in the Sysmap, 152 */ 153 chgkprot(addr, len, rw) 154 register caddr_t addr; 155 int len, rw; 156 { 157 vm_prot_t prot = rw == B_READ ? VM_PROT_READ : VM_PROT_WRITE; 158 159 vm_map_protect(kernel_map, trunc_page(addr), 160 round_page(addr + len), prot, FALSE); 161 } 162 #endif 163 void 164 vslock(addr, len) 165 caddr_t addr; 166 u_int len; 167 { 168 vm_map_pageable(&curproc->p_vmspace->vm_map, trunc_page(addr), 169 round_page(addr + len), FALSE); 170 } 171 172 void 173 vsunlock(addr, len, dirtied) 174 caddr_t addr; 175 u_int len; 176 int dirtied; 177 { 178 #ifdef lint 179 dirtied++; 180 #endif /* lint */ 181 vm_map_pageable(&curproc->p_vmspace->vm_map, trunc_page(addr), 182 round_page(addr + len), TRUE); 183 } 184 185 /* 186 * Implement fork's actions on an address space. 187 * Here we arrange for the address space to be copied or referenced, 188 * allocate a user struct (pcb and kernel stack), then call the 189 * machine-dependent layer to fill those in and make the new process 190 * ready to run. 191 * NOTE: the kernel stack may be at a different location in the child 192 * process, and thus addresses of automatic variables may be invalid 193 * after cpu_fork returns in the child process. We do nothing here 194 * after cpu_fork returns. 195 */ 196 int 197 vm_fork(p1, p2, isvfork) 198 register struct proc *p1, *p2; 199 int isvfork; 200 { 201 register struct user *up; 202 vm_offset_t addr, ptaddr; 203 int error, i; 204 struct vm_map *vp; 205 206 while ((cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_min) { 207 VM_WAIT; 208 } 209 210 /* 211 * avoid copying any of the parent's pagetables or other per-process 212 * objects that reside in the map by marking all of them 213 * non-inheritable 214 */ 215 (void) vm_map_inherit(&p1->p_vmspace->vm_map, 216 UPT_MIN_ADDRESS - UPAGES * PAGE_SIZE, VM_MAX_ADDRESS, VM_INHERIT_NONE); 217 p2->p_vmspace = vmspace_fork(p1->p_vmspace); 218 219 #ifdef SYSVSHM 220 if (p1->p_vmspace->vm_shm) 221 shmfork(p1, p2, isvfork); 222 #endif 223 224 /* 225 * Allocate a wired-down (for now) pcb and kernel stack for the 226 * process 227 */ 228 229 addr = (vm_offset_t) kstack; 230 231 vp = &p2->p_vmspace->vm_map; 232 233 /* get new pagetables and kernel stack */ 234 (void) vm_map_find(vp, NULL, 0, &addr, UPT_MAX_ADDRESS - addr, FALSE); 235 236 /* force in the page table encompassing the UPAGES */ 237 ptaddr = trunc_page((u_int) vtopte(addr)); 238 error = vm_map_pageable(vp, ptaddr, ptaddr + PAGE_SIZE, FALSE); 239 if (error) 240 panic("vm_fork: wire of PT failed. error=%d", error); 241 242 /* and force in (demand-zero) the UPAGES */ 243 error = vm_map_pageable(vp, addr, addr + UPAGES * PAGE_SIZE, FALSE); 244 if (error) 245 panic("vm_fork: wire of UPAGES failed. error=%d", error); 246 247 /* get a kernel virtual address for the UPAGES for this proc */ 248 up = (struct user *) kmem_alloc_pageable(u_map, UPAGES * PAGE_SIZE); 249 if (up == NULL) 250 panic("vm_fork: u_map allocation failed"); 251 252 /* and force-map the upages into the kernel pmap */ 253 for (i = 0; i < UPAGES; i++) 254 pmap_enter(vm_map_pmap(u_map), 255 ((vm_offset_t) up) + PAGE_SIZE * i, 256 pmap_extract(vp->pmap, addr + PAGE_SIZE * i), 257 VM_PROT_READ | VM_PROT_WRITE, 1); 258 259 p2->p_addr = up; 260 261 /* 262 * p_stats and p_sigacts currently point at fields in the user struct 263 * but not at &u, instead at p_addr. Copy p_sigacts and parts of 264 * p_stats; zero the rest of p_stats (statistics). 265 */ 266 p2->p_stats = &up->u_stats; 267 p2->p_sigacts = &up->u_sigacts; 268 up->u_sigacts = *p1->p_sigacts; 269 bzero(&up->u_stats.pstat_startzero, 270 (unsigned) ((caddr_t) &up->u_stats.pstat_endzero - 271 (caddr_t) &up->u_stats.pstat_startzero)); 272 bcopy(&p1->p_stats->pstat_startcopy, &up->u_stats.pstat_startcopy, 273 ((caddr_t) &up->u_stats.pstat_endcopy - 274 (caddr_t) &up->u_stats.pstat_startcopy)); 275 276 277 /* 278 * cpu_fork will copy and update the kernel stack and pcb, and make 279 * the child ready to run. It marks the child so that it can return 280 * differently than the parent. It returns twice, once in the parent 281 * process and once in the child. 282 */ 283 return (cpu_fork(p1, p2)); 284 } 285 286 /* 287 * Set default limits for VM system. 288 * Called for proc 0, and then inherited by all others. 289 * 290 * XXX should probably act directly on proc0. 291 */ 292 static void 293 vm_init_limits(udata) 294 void *udata; 295 { 296 register struct proc *p = (struct proc *)udata; 297 int rss_limit; 298 299 /* 300 * Set up the initial limits on process VM. Set the maximum resident 301 * set size to be half of (reasonably) available memory. Since this 302 * is a soft limit, it comes into effect only when the system is out 303 * of memory - half of main memory helps to favor smaller processes, 304 * and reduces thrashing of the object cache. 305 */ 306 p->p_rlimit[RLIMIT_STACK].rlim_cur = DFLSSIZ; 307 p->p_rlimit[RLIMIT_STACK].rlim_max = MAXSSIZ; 308 p->p_rlimit[RLIMIT_DATA].rlim_cur = DFLDSIZ; 309 p->p_rlimit[RLIMIT_DATA].rlim_max = MAXDSIZ; 310 /* limit the limit to no less than 2MB */ 311 rss_limit = max(cnt.v_free_count, 512); 312 p->p_rlimit[RLIMIT_RSS].rlim_cur = ptoa(rss_limit); 313 p->p_rlimit[RLIMIT_RSS].rlim_max = RLIM_INFINITY; 314 } 315 316 void 317 faultin(p) 318 struct proc *p; 319 { 320 vm_offset_t i; 321 vm_offset_t ptaddr; 322 int s; 323 324 if ((p->p_flag & P_INMEM) == 0) { 325 vm_map_t map; 326 int error; 327 328 ++p->p_lock; 329 330 map = &p->p_vmspace->vm_map; 331 /* force the page table encompassing the kernel stack (upages) */ 332 ptaddr = trunc_page((u_int) vtopte(kstack)); 333 error = vm_map_pageable(map, ptaddr, ptaddr + PAGE_SIZE, FALSE); 334 if (error) 335 panic("faultin: wire of PT failed. error=%d", error); 336 337 /* wire in the UPAGES */ 338 error = vm_map_pageable(map, (vm_offset_t) kstack, 339 (vm_offset_t) kstack + UPAGES * PAGE_SIZE, FALSE); 340 if (error) 341 panic("faultin: wire of UPAGES failed. error=%d", error); 342 343 /* and map them nicely into the kernel pmap */ 344 for (i = 0; i < UPAGES; i++) { 345 vm_offset_t off = i * PAGE_SIZE; 346 vm_offset_t pa = (vm_offset_t) 347 pmap_extract(&p->p_vmspace->vm_pmap, 348 (vm_offset_t) kstack + off); 349 350 if (pa == 0) 351 panic("faultin: missing page for UPAGES\n"); 352 353 pmap_enter(vm_map_pmap(u_map), 354 ((vm_offset_t) p->p_addr) + off, 355 pa, VM_PROT_READ | VM_PROT_WRITE, 1); 356 } 357 358 s = splhigh(); 359 360 if (p->p_stat == SRUN) 361 setrunqueue(p); 362 363 p->p_flag |= P_INMEM; 364 365 /* undo the effect of setting SLOCK above */ 366 --p->p_lock; 367 splx(s); 368 369 } 370 } 371 372 /* 373 * This swapin algorithm attempts to swap-in processes only if there 374 * is enough space for them. Of course, if a process waits for a long 375 * time, it will be swapped in anyway. 376 */ 377 /* ARGSUSED*/ 378 static void 379 scheduler(udata) 380 void *udata; /* not used*/ 381 { 382 register struct proc *p; 383 register int pri; 384 struct proc *pp; 385 int ppri; 386 387 loop: 388 while ((cnt.v_free_count + cnt.v_cache_count) < (cnt.v_free_reserved + UPAGES + 2)) { 389 VM_WAIT; 390 } 391 392 pp = NULL; 393 ppri = INT_MIN; 394 for (p = (struct proc *) allproc; p != NULL; p = p->p_next) { 395 if (p->p_stat == SRUN && (p->p_flag & (P_INMEM | P_SWAPPING)) == 0) { 396 int mempri; 397 398 pri = p->p_swtime + p->p_slptime - p->p_nice * 8; 399 mempri = pri > 0 ? pri : 0; 400 /* 401 * if this process is higher priority and there is 402 * enough space, then select this process instead of 403 * the previous selection. 404 */ 405 if (pri > ppri) { 406 pp = p; 407 ppri = pri; 408 } 409 } 410 } 411 412 /* 413 * Nothing to do, back to sleep 414 */ 415 if ((p = pp) == NULL) { 416 tsleep(&proc0, PVM, "sched", 0); 417 goto loop; 418 } 419 /* 420 * We would like to bring someone in. (only if there is space). 421 */ 422 faultin(p); 423 p->p_swtime = 0; 424 goto loop; 425 } 426 427 #define swappable(p) \ 428 (((p)->p_lock == 0) && \ 429 ((p)->p_flag & (P_TRACED|P_NOSWAP|P_SYSTEM|P_INMEM|P_WEXIT|P_PHYSIO|P_SWAPPING)) == P_INMEM) 430 431 extern int vm_pageout_free_min; 432 433 /* 434 * Swapout is driven by the pageout daemon. Very simple, we find eligible 435 * procs and unwire their u-areas. We try to always "swap" at least one 436 * process in case we need the room for a swapin. 437 * If any procs have been sleeping/stopped for at least maxslp seconds, 438 * they are swapped. Else, we swap the longest-sleeping or stopped process, 439 * if any, otherwise the longest-resident process. 440 */ 441 void 442 swapout_procs() 443 { 444 register struct proc *p; 445 struct proc *outp, *outp2; 446 int outpri, outpri2; 447 int didswap = 0; 448 449 outp = outp2 = NULL; 450 outpri = outpri2 = INT_MIN; 451 retry: 452 for (p = (struct proc *) allproc; p != NULL; p = p->p_next) { 453 if (!swappable(p)) 454 continue; 455 switch (p->p_stat) { 456 default: 457 continue; 458 459 case SSLEEP: 460 case SSTOP: 461 /* 462 * do not swapout a realtime process 463 */ 464 if (p->p_rtprio.type == RTP_PRIO_REALTIME) 465 continue; 466 467 /* 468 * do not swapout a process waiting on a critical 469 * event of some kind 470 */ 471 if (((p->p_priority & 0x7f) < PSOCK) || 472 (p->p_slptime <= 4)) 473 continue; 474 475 vm_map_reference(&p->p_vmspace->vm_map); 476 /* 477 * do not swapout a process that is waiting for VM 478 * datastructures there is a possible deadlock. 479 */ 480 if (!lock_try_write(&p->p_vmspace->vm_map.lock)) { 481 vm_map_deallocate(&p->p_vmspace->vm_map); 482 continue; 483 } 484 vm_map_unlock(&p->p_vmspace->vm_map); 485 /* 486 * If the process has been asleep for awhile and had 487 * most of its pages taken away already, swap it out. 488 */ 489 swapout(p); 490 vm_map_deallocate(&p->p_vmspace->vm_map); 491 didswap++; 492 goto retry; 493 } 494 } 495 /* 496 * If we swapped something out, and another process needed memory, 497 * then wakeup the sched process. 498 */ 499 if (didswap) 500 wakeup(&proc0); 501 } 502 503 void 504 swapout(p) 505 register struct proc *p; 506 { 507 vm_map_t map = &p->p_vmspace->vm_map; 508 vm_offset_t ptaddr; 509 510 ++p->p_stats->p_ru.ru_nswap; 511 /* 512 * remember the process resident count 513 */ 514 p->p_vmspace->vm_swrss = 515 p->p_vmspace->vm_pmap.pm_stats.resident_count; 516 517 (void) splhigh(); 518 p->p_flag &= ~P_INMEM; 519 p->p_flag |= P_SWAPPING; 520 if (p->p_stat == SRUN) 521 remrq(p); 522 (void) spl0(); 523 524 /* 525 * let the upages be paged 526 */ 527 pmap_remove(vm_map_pmap(u_map), 528 (vm_offset_t) p->p_addr, ((vm_offset_t) p->p_addr) + UPAGES * PAGE_SIZE); 529 530 vm_map_pageable(map, (vm_offset_t) kstack, 531 (vm_offset_t) kstack + UPAGES * PAGE_SIZE, TRUE); 532 533 ptaddr = trunc_page((u_int) vtopte(kstack)); 534 vm_map_pageable(map, ptaddr, ptaddr + PAGE_SIZE, TRUE); 535 536 p->p_flag &= ~P_SWAPPING; 537 p->p_swtime = 0; 538 } 539 540 #ifdef DDB 541 /* 542 * DEBUG stuff 543 */ 544 545 int indent; 546 547 #include <machine/stdarg.h> /* see subr_prf.c */ 548 549 /*ARGSUSED2*/ 550 void 551 #if __STDC__ 552 iprintf(const char *fmt,...) 553 #else 554 iprintf(fmt /* , va_alist */ ) 555 char *fmt; 556 557 /* va_dcl */ 558 #endif 559 { 560 register int i; 561 va_list ap; 562 563 for (i = indent; i >= 8; i -= 8) 564 printf("\t"); 565 while (--i >= 0) 566 printf(" "); 567 va_start(ap, fmt); 568 printf("%r", fmt, ap); 569 va_end(ap); 570 } 571 #endif /* DDB */ 572