1 /* 2 * Copyright (c) 1988 University of Utah. 3 * Copyright (c) 1991, 1993 4 * The Regents of the University of California. All rights reserved. 5 * 6 * This code is derived from software contributed to Berkeley by 7 * the Systems Programming Group of the University of Utah Computer 8 * Science Department. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * from: Utah $Hdr: vm_mmap.c 1.6 91/10/21$ 39 * 40 * @(#)vm_mmap.c 8.4 (Berkeley) 1/12/94 41 * $FreeBSD$ 42 */ 43 44 /* 45 * Mapped file (mmap) interface to VM 46 */ 47 48 #include "opt_compat.h" 49 #include "opt_mac.h" 50 51 #include <sys/param.h> 52 #include <sys/systm.h> 53 #include <sys/kernel.h> 54 #include <sys/lock.h> 55 #include <sys/mutex.h> 56 #include <sys/sysproto.h> 57 #include <sys/filedesc.h> 58 #include <sys/proc.h> 59 #include <sys/resource.h> 60 #include <sys/resourcevar.h> 61 #include <sys/vnode.h> 62 #include <sys/fcntl.h> 63 #include <sys/file.h> 64 #include <sys/mac.h> 65 #include <sys/mman.h> 66 #include <sys/conf.h> 67 #include <sys/stat.h> 68 #include <sys/vmmeter.h> 69 #include <sys/sysctl.h> 70 71 #include <vm/vm.h> 72 #include <vm/vm_param.h> 73 #include <vm/pmap.h> 74 #include <vm/vm_map.h> 75 #include <vm/vm_object.h> 76 #include <vm/vm_page.h> 77 #include <vm/vm_pager.h> 78 #include <vm/vm_pageout.h> 79 #include <vm/vm_extern.h> 80 #include <vm/vm_page.h> 81 #include <vm/vm_kern.h> 82 83 #ifndef _SYS_SYSPROTO_H_ 84 struct sbrk_args { 85 int incr; 86 }; 87 #endif 88 89 static int max_proc_mmap; 90 SYSCTL_INT(_vm, OID_AUTO, max_proc_mmap, CTLFLAG_RW, &max_proc_mmap, 0, ""); 91 92 /* 93 * Set the maximum number of vm_map_entry structures per process. Roughly 94 * speaking vm_map_entry structures are tiny, so allowing them to eat 1/100 95 * of our KVM malloc space still results in generous limits. We want a 96 * default that is good enough to prevent the kernel running out of resources 97 * if attacked from compromised user account but generous enough such that 98 * multi-threaded processes are not unduly inconvenienced. 99 */ 100 static void vmmapentry_rsrc_init(void *); 101 SYSINIT(vmmersrc, SI_SUB_KVM_RSRC, SI_ORDER_FIRST, vmmapentry_rsrc_init, NULL) 102 103 static void 104 vmmapentry_rsrc_init(dummy) 105 void *dummy; 106 { 107 max_proc_mmap = vm_kmem_size / sizeof(struct vm_map_entry); 108 max_proc_mmap /= 100; 109 } 110 111 /* 112 * MPSAFE 113 */ 114 /* ARGSUSED */ 115 int 116 sbrk(td, uap) 117 struct thread *td; 118 struct sbrk_args *uap; 119 { 120 /* Not yet implemented */ 121 /* mtx_lock(&Giant); */ 122 /* mtx_unlock(&Giant); */ 123 return (EOPNOTSUPP); 124 } 125 126 #ifndef _SYS_SYSPROTO_H_ 127 struct sstk_args { 128 int incr; 129 }; 130 #endif 131 132 /* 133 * MPSAFE 134 */ 135 /* ARGSUSED */ 136 int 137 sstk(td, uap) 138 struct thread *td; 139 struct sstk_args *uap; 140 { 141 /* Not yet implemented */ 142 /* mtx_lock(&Giant); */ 143 /* mtx_unlock(&Giant); */ 144 return (EOPNOTSUPP); 145 } 146 147 #if defined(COMPAT_43) || defined(COMPAT_SUNOS) 148 #ifndef _SYS_SYSPROTO_H_ 149 struct getpagesize_args { 150 int dummy; 151 }; 152 #endif 153 154 /* ARGSUSED */ 155 int 156 ogetpagesize(td, uap) 157 struct thread *td; 158 struct getpagesize_args *uap; 159 { 160 /* MP SAFE */ 161 td->td_retval[0] = PAGE_SIZE; 162 return (0); 163 } 164 #endif /* COMPAT_43 || COMPAT_SUNOS */ 165 166 167 /* 168 * Memory Map (mmap) system call. Note that the file offset 169 * and address are allowed to be NOT page aligned, though if 170 * the MAP_FIXED flag it set, both must have the same remainder 171 * modulo the PAGE_SIZE (POSIX 1003.1b). If the address is not 172 * page-aligned, the actual mapping starts at trunc_page(addr) 173 * and the return value is adjusted up by the page offset. 174 * 175 * Generally speaking, only character devices which are themselves 176 * memory-based, such as a video framebuffer, can be mmap'd. Otherwise 177 * there would be no cache coherency between a descriptor and a VM mapping 178 * both to the same character device. 179 * 180 * Block devices can be mmap'd no matter what they represent. Cache coherency 181 * is maintained as long as you do not write directly to the underlying 182 * character device. 183 */ 184 #ifndef _SYS_SYSPROTO_H_ 185 struct mmap_args { 186 void *addr; 187 size_t len; 188 int prot; 189 int flags; 190 int fd; 191 long pad; 192 off_t pos; 193 }; 194 #endif 195 196 /* 197 * MPSAFE 198 */ 199 int 200 mmap(td, uap) 201 struct thread *td; 202 struct mmap_args *uap; 203 { 204 struct file *fp = NULL; 205 struct vnode *vp; 206 vm_offset_t addr; 207 vm_size_t size, pageoff; 208 vm_prot_t prot, maxprot; 209 void *handle; 210 int flags, error; 211 int disablexworkaround; 212 off_t pos; 213 struct vmspace *vms = td->td_proc->p_vmspace; 214 vm_object_t obj; 215 216 addr = (vm_offset_t) uap->addr; 217 size = uap->len; 218 prot = uap->prot & VM_PROT_ALL; 219 flags = uap->flags; 220 pos = uap->pos; 221 222 vp = NULL; 223 fp = NULL; 224 /* make sure mapping fits into numeric range etc */ 225 if ((ssize_t) uap->len < 0 || 226 ((flags & MAP_ANON) && uap->fd != -1)) 227 return (EINVAL); 228 229 if (flags & MAP_STACK) { 230 if ((uap->fd != -1) || 231 ((prot & (PROT_READ | PROT_WRITE)) != (PROT_READ | PROT_WRITE))) 232 return (EINVAL); 233 flags |= MAP_ANON; 234 pos = 0; 235 } 236 237 /* 238 * Align the file position to a page boundary, 239 * and save its page offset component. 240 */ 241 pageoff = (pos & PAGE_MASK); 242 pos -= pageoff; 243 244 /* Adjust size for rounding (on both ends). */ 245 size += pageoff; /* low end... */ 246 size = (vm_size_t) round_page(size); /* hi end */ 247 248 /* 249 * Check for illegal addresses. Watch out for address wrap... Note 250 * that VM_*_ADDRESS are not constants due to casts (argh). 251 */ 252 if (flags & MAP_FIXED) { 253 /* 254 * The specified address must have the same remainder 255 * as the file offset taken modulo PAGE_SIZE, so it 256 * should be aligned after adjustment by pageoff. 257 */ 258 addr -= pageoff; 259 if (addr & PAGE_MASK) 260 return (EINVAL); 261 /* Address range must be all in user VM space. */ 262 if (addr < vm_map_min(&vms->vm_map) || 263 addr + size > vm_map_max(&vms->vm_map)) 264 return (EINVAL); 265 if (addr + size < addr) 266 return (EINVAL); 267 } 268 /* 269 * XXX for non-fixed mappings where no hint is provided or 270 * the hint would fall in the potential heap space, 271 * place it after the end of the largest possible heap. 272 * 273 * There should really be a pmap call to determine a reasonable 274 * location. 275 */ 276 else if (addr == 0 || 277 (addr >= round_page((vm_offset_t)vms->vm_taddr) && 278 addr < round_page((vm_offset_t)vms->vm_daddr + maxdsiz))) 279 addr = round_page((vm_offset_t)vms->vm_daddr + maxdsiz); 280 281 mtx_lock(&Giant); /* syscall marked mp-safe but isn't */ 282 if (flags & MAP_ANON) { 283 /* 284 * Mapping blank space is trivial. 285 */ 286 handle = NULL; 287 maxprot = VM_PROT_ALL; 288 pos = 0; 289 } else { 290 /* 291 * Mapping file, get fp for validation. Obtain vnode and make 292 * sure it is of appropriate type. 293 * don't let the descriptor disappear on us if we block 294 */ 295 if ((error = fget(td, uap->fd, &fp)) != 0) 296 goto done; 297 if (fp->f_type != DTYPE_VNODE) { 298 error = EINVAL; 299 goto done; 300 } 301 302 /* 303 * POSIX shared-memory objects are defined to have 304 * kernel persistence, and are not defined to support 305 * read(2)/write(2) -- or even open(2). Thus, we can 306 * use MAP_ASYNC to trade on-disk coherence for speed. 307 * The shm_open(3) library routine turns on the FPOSIXSHM 308 * flag to request this behavior. 309 */ 310 if (fp->f_flag & FPOSIXSHM) 311 flags |= MAP_NOSYNC; 312 vp = fp->f_data; 313 error = vget(vp, LK_EXCLUSIVE, td); 314 if (error) 315 goto done; 316 if (vp->v_type != VREG && vp->v_type != VCHR) { 317 error = EINVAL; 318 goto done; 319 } 320 if (vp->v_type == VREG) { 321 /* 322 * Get the proper underlying object 323 */ 324 if (VOP_GETVOBJECT(vp, &obj) != 0) { 325 error = EINVAL; 326 goto done; 327 } 328 if (obj->handle != vp) { 329 vput(vp); 330 vp = (struct vnode*)obj->handle; 331 vget(vp, LK_EXCLUSIVE, td); 332 } 333 } 334 /* 335 * XXX hack to handle use of /dev/zero to map anon memory (ala 336 * SunOS). 337 */ 338 if ((vp->v_type == VCHR) && 339 (vp->v_rdev->si_devsw->d_flags & D_MMAP_ANON)) { 340 handle = NULL; 341 maxprot = VM_PROT_ALL; 342 flags |= MAP_ANON; 343 pos = 0; 344 } else { 345 /* 346 * cdevs does not provide private mappings of any kind. 347 */ 348 /* 349 * However, for XIG X server to continue to work, 350 * we should allow the superuser to do it anyway. 351 * We only allow it at securelevel < 1. 352 * (Because the XIG X server writes directly to video 353 * memory via /dev/mem, it should never work at any 354 * other securelevel. 355 * XXX this will have to go 356 */ 357 if (securelevel_ge(td->td_ucred, 1)) 358 disablexworkaround = 1; 359 else 360 disablexworkaround = suser(td); 361 if (vp->v_type == VCHR && disablexworkaround && 362 (flags & (MAP_PRIVATE|MAP_COPY))) { 363 error = EINVAL; 364 goto done; 365 } 366 /* 367 * Ensure that file and memory protections are 368 * compatible. Note that we only worry about 369 * writability if mapping is shared; in this case, 370 * current and max prot are dictated by the open file. 371 * XXX use the vnode instead? Problem is: what 372 * credentials do we use for determination? What if 373 * proc does a setuid? 374 */ 375 maxprot = VM_PROT_EXECUTE; /* ??? */ 376 if (fp->f_flag & FREAD) { 377 maxprot |= VM_PROT_READ; 378 } else if (prot & PROT_READ) { 379 error = EACCES; 380 goto done; 381 } 382 /* 383 * If we are sharing potential changes (either via 384 * MAP_SHARED or via the implicit sharing of character 385 * device mappings), and we are trying to get write 386 * permission although we opened it without asking 387 * for it, bail out. Check for superuser, only if 388 * we're at securelevel < 1, to allow the XIG X server 389 * to continue to work. 390 */ 391 if ((flags & MAP_SHARED) != 0 || 392 (vp->v_type == VCHR && disablexworkaround)) { 393 if ((fp->f_flag & FWRITE) != 0) { 394 struct vattr va; 395 if ((error = 396 VOP_GETATTR(vp, &va, 397 td->td_ucred, td))) { 398 goto done; 399 } 400 if ((va.va_flags & 401 (SF_SNAPSHOT|IMMUTABLE|APPEND)) == 0) { 402 maxprot |= VM_PROT_WRITE; 403 } else if (prot & PROT_WRITE) { 404 error = EPERM; 405 goto done; 406 } 407 } else if ((prot & PROT_WRITE) != 0) { 408 error = EACCES; 409 goto done; 410 } 411 } else { 412 maxprot |= VM_PROT_WRITE; 413 } 414 415 handle = (void *)vp; 416 } 417 } 418 419 /* 420 * Do not allow more then a certain number of vm_map_entry structures 421 * per process. Scale with the number of rforks sharing the map 422 * to make the limit reasonable for threads. 423 */ 424 if (max_proc_mmap && 425 vms->vm_map.nentries >= max_proc_mmap * vms->vm_refcnt) { 426 error = ENOMEM; 427 goto done; 428 } 429 430 mtx_unlock(&Giant); 431 error = 0; 432 #ifdef MAC 433 if (handle != NULL && (flags & MAP_SHARED) != 0) { 434 error = mac_check_vnode_mmap(td->td_ucred, 435 (struct vnode *)handle, prot); 436 } 437 #endif 438 if (error == 0) 439 error = vm_mmap(&vms->vm_map, &addr, size, prot, maxprot, 440 flags, handle, pos); 441 mtx_lock(&Giant); 442 if (error == 0) 443 td->td_retval[0] = (register_t) (addr + pageoff); 444 done: 445 if (vp) 446 vput(vp); 447 mtx_unlock(&Giant); 448 if (fp) 449 fdrop(fp, td); 450 451 return (error); 452 } 453 454 #ifdef COMPAT_43 455 #ifndef _SYS_SYSPROTO_H_ 456 struct ommap_args { 457 caddr_t addr; 458 int len; 459 int prot; 460 int flags; 461 int fd; 462 long pos; 463 }; 464 #endif 465 int 466 ommap(td, uap) 467 struct thread *td; 468 struct ommap_args *uap; 469 { 470 struct mmap_args nargs; 471 static const char cvtbsdprot[8] = { 472 0, 473 PROT_EXEC, 474 PROT_WRITE, 475 PROT_EXEC | PROT_WRITE, 476 PROT_READ, 477 PROT_EXEC | PROT_READ, 478 PROT_WRITE | PROT_READ, 479 PROT_EXEC | PROT_WRITE | PROT_READ, 480 }; 481 482 #define OMAP_ANON 0x0002 483 #define OMAP_COPY 0x0020 484 #define OMAP_SHARED 0x0010 485 #define OMAP_FIXED 0x0100 486 487 nargs.addr = uap->addr; 488 nargs.len = uap->len; 489 nargs.prot = cvtbsdprot[uap->prot & 0x7]; 490 nargs.flags = 0; 491 if (uap->flags & OMAP_ANON) 492 nargs.flags |= MAP_ANON; 493 if (uap->flags & OMAP_COPY) 494 nargs.flags |= MAP_COPY; 495 if (uap->flags & OMAP_SHARED) 496 nargs.flags |= MAP_SHARED; 497 else 498 nargs.flags |= MAP_PRIVATE; 499 if (uap->flags & OMAP_FIXED) 500 nargs.flags |= MAP_FIXED; 501 nargs.fd = uap->fd; 502 nargs.pos = uap->pos; 503 return (mmap(td, &nargs)); 504 } 505 #endif /* COMPAT_43 */ 506 507 508 #ifndef _SYS_SYSPROTO_H_ 509 struct msync_args { 510 void *addr; 511 int len; 512 int flags; 513 }; 514 #endif 515 /* 516 * MPSAFE 517 */ 518 int 519 msync(td, uap) 520 struct thread *td; 521 struct msync_args *uap; 522 { 523 vm_offset_t addr; 524 vm_size_t size, pageoff; 525 int flags; 526 vm_map_t map; 527 int rv; 528 529 addr = (vm_offset_t) uap->addr; 530 size = uap->len; 531 flags = uap->flags; 532 533 pageoff = (addr & PAGE_MASK); 534 addr -= pageoff; 535 size += pageoff; 536 size = (vm_size_t) round_page(size); 537 if (addr + size < addr) 538 return (EINVAL); 539 540 if ((flags & (MS_ASYNC|MS_INVALIDATE)) == (MS_ASYNC|MS_INVALIDATE)) 541 return (EINVAL); 542 543 mtx_lock(&Giant); 544 545 map = &td->td_proc->p_vmspace->vm_map; 546 547 /* 548 * XXX Gak! If size is zero we are supposed to sync "all modified 549 * pages with the region containing addr". Unfortunately, we don't 550 * really keep track of individual mmaps so we approximate by flushing 551 * the range of the map entry containing addr. This can be incorrect 552 * if the region splits or is coalesced with a neighbor. 553 */ 554 if (size == 0) { 555 vm_map_entry_t entry; 556 557 vm_map_lock_read(map); 558 rv = vm_map_lookup_entry(map, addr, &entry); 559 vm_map_unlock_read(map); 560 if (rv == FALSE) { 561 rv = -1; 562 goto done2; 563 } 564 addr = entry->start; 565 size = entry->end - entry->start; 566 } 567 568 /* 569 * Clean the pages and interpret the return value. 570 */ 571 rv = vm_map_clean(map, addr, addr + size, (flags & MS_ASYNC) == 0, 572 (flags & MS_INVALIDATE) != 0); 573 574 done2: 575 mtx_unlock(&Giant); 576 577 switch (rv) { 578 case KERN_SUCCESS: 579 return (0); 580 case KERN_INVALID_ADDRESS: 581 return (EINVAL); /* Sun returns ENOMEM? */ 582 case KERN_FAILURE: 583 return (EIO); 584 default: 585 return (EINVAL); 586 } 587 } 588 589 #ifndef _SYS_SYSPROTO_H_ 590 struct munmap_args { 591 void *addr; 592 size_t len; 593 }; 594 #endif 595 /* 596 * MPSAFE 597 */ 598 int 599 munmap(td, uap) 600 struct thread *td; 601 struct munmap_args *uap; 602 { 603 vm_offset_t addr; 604 vm_size_t size, pageoff; 605 vm_map_t map; 606 607 addr = (vm_offset_t) uap->addr; 608 size = uap->len; 609 610 pageoff = (addr & PAGE_MASK); 611 addr -= pageoff; 612 size += pageoff; 613 size = (vm_size_t) round_page(size); 614 if (addr + size < addr) 615 return (EINVAL); 616 617 if (size == 0) 618 return (0); 619 620 /* 621 * Check for illegal addresses. Watch out for address wrap... 622 */ 623 map = &td->td_proc->p_vmspace->vm_map; 624 if (addr < vm_map_min(map) || addr + size > vm_map_max(map)) 625 return (EINVAL); 626 /* 627 * Make sure entire range is allocated. 628 */ 629 if (!vm_map_check_protection(map, addr, addr + size, VM_PROT_NONE)) 630 return (EINVAL); 631 632 /* returns nothing but KERN_SUCCESS anyway */ 633 (void) vm_map_remove(map, addr, addr + size); 634 return (0); 635 } 636 637 #if 0 638 void 639 munmapfd(td, fd) 640 struct thread *td; 641 int fd; 642 { 643 /* 644 * XXX should unmap any regions mapped to this file 645 */ 646 FILEDESC_LOCK(p->p_fd); 647 td->td_proc->p_fd->fd_ofileflags[fd] &= ~UF_MAPPED; 648 FILEDESC_UNLOCK(p->p_fd); 649 } 650 #endif 651 652 #ifndef _SYS_SYSPROTO_H_ 653 struct mprotect_args { 654 const void *addr; 655 size_t len; 656 int prot; 657 }; 658 #endif 659 /* 660 * MPSAFE 661 */ 662 int 663 mprotect(td, uap) 664 struct thread *td; 665 struct mprotect_args *uap; 666 { 667 vm_offset_t addr; 668 vm_size_t size, pageoff; 669 vm_prot_t prot; 670 671 addr = (vm_offset_t) uap->addr; 672 size = uap->len; 673 prot = uap->prot & VM_PROT_ALL; 674 #if defined(VM_PROT_READ_IS_EXEC) 675 if (prot & VM_PROT_READ) 676 prot |= VM_PROT_EXECUTE; 677 #endif 678 679 pageoff = (addr & PAGE_MASK); 680 addr -= pageoff; 681 size += pageoff; 682 size = (vm_size_t) round_page(size); 683 if (addr + size < addr) 684 return (EINVAL); 685 686 switch (vm_map_protect(&td->td_proc->p_vmspace->vm_map, addr, 687 addr + size, prot, FALSE)) { 688 case KERN_SUCCESS: 689 return (0); 690 case KERN_PROTECTION_FAILURE: 691 return (EACCES); 692 } 693 return (EINVAL); 694 } 695 696 #ifndef _SYS_SYSPROTO_H_ 697 struct minherit_args { 698 void *addr; 699 size_t len; 700 int inherit; 701 }; 702 #endif 703 /* 704 * MPSAFE 705 */ 706 int 707 minherit(td, uap) 708 struct thread *td; 709 struct minherit_args *uap; 710 { 711 vm_offset_t addr; 712 vm_size_t size, pageoff; 713 vm_inherit_t inherit; 714 715 addr = (vm_offset_t)uap->addr; 716 size = uap->len; 717 inherit = uap->inherit; 718 719 pageoff = (addr & PAGE_MASK); 720 addr -= pageoff; 721 size += pageoff; 722 size = (vm_size_t) round_page(size); 723 if (addr + size < addr) 724 return (EINVAL); 725 726 switch (vm_map_inherit(&td->td_proc->p_vmspace->vm_map, addr, 727 addr + size, inherit)) { 728 case KERN_SUCCESS: 729 return (0); 730 case KERN_PROTECTION_FAILURE: 731 return (EACCES); 732 } 733 return (EINVAL); 734 } 735 736 #ifndef _SYS_SYSPROTO_H_ 737 struct madvise_args { 738 void *addr; 739 size_t len; 740 int behav; 741 }; 742 #endif 743 744 /* 745 * MPSAFE 746 */ 747 /* ARGSUSED */ 748 int 749 madvise(td, uap) 750 struct thread *td; 751 struct madvise_args *uap; 752 { 753 vm_offset_t start, end; 754 vm_map_t map; 755 struct proc *p; 756 int error; 757 758 /* 759 * Check for our special case, advising the swap pager we are 760 * "immortal." 761 */ 762 if (uap->behav == MADV_PROTECT) { 763 error = suser(td); 764 if (error == 0) { 765 p = td->td_proc; 766 PROC_LOCK(p); 767 p->p_flag |= P_PROTECTED; 768 PROC_UNLOCK(p); 769 } 770 return (error); 771 } 772 /* 773 * Check for illegal behavior 774 */ 775 if (uap->behav < 0 || uap->behav > MADV_CORE) 776 return (EINVAL); 777 /* 778 * Check for illegal addresses. Watch out for address wrap... Note 779 * that VM_*_ADDRESS are not constants due to casts (argh). 780 */ 781 map = &td->td_proc->p_vmspace->vm_map; 782 if ((vm_offset_t)uap->addr < vm_map_min(map) || 783 (vm_offset_t)uap->addr + uap->len > vm_map_max(map)) 784 return (EINVAL); 785 if (((vm_offset_t) uap->addr + uap->len) < (vm_offset_t) uap->addr) 786 return (EINVAL); 787 788 /* 789 * Since this routine is only advisory, we default to conservative 790 * behavior. 791 */ 792 start = trunc_page((vm_offset_t) uap->addr); 793 end = round_page((vm_offset_t) uap->addr + uap->len); 794 795 if (vm_map_madvise(map, start, end, uap->behav)) 796 return (EINVAL); 797 return (0); 798 } 799 800 #ifndef _SYS_SYSPROTO_H_ 801 struct mincore_args { 802 const void *addr; 803 size_t len; 804 char *vec; 805 }; 806 #endif 807 808 /* 809 * MPSAFE 810 */ 811 /* ARGSUSED */ 812 int 813 mincore(td, uap) 814 struct thread *td; 815 struct mincore_args *uap; 816 { 817 vm_offset_t addr, first_addr; 818 vm_offset_t end, cend; 819 pmap_t pmap; 820 vm_map_t map; 821 char *vec; 822 int error = 0; 823 int vecindex, lastvecindex; 824 vm_map_entry_t current; 825 vm_map_entry_t entry; 826 int mincoreinfo; 827 unsigned int timestamp; 828 829 /* 830 * Make sure that the addresses presented are valid for user 831 * mode. 832 */ 833 first_addr = addr = trunc_page((vm_offset_t) uap->addr); 834 end = addr + (vm_size_t)round_page(uap->len); 835 map = &td->td_proc->p_vmspace->vm_map; 836 if (end > vm_map_max(map) || end < addr) 837 return (EINVAL); 838 839 /* 840 * Address of byte vector 841 */ 842 vec = uap->vec; 843 844 mtx_lock(&Giant); 845 pmap = vmspace_pmap(td->td_proc->p_vmspace); 846 847 vm_map_lock_read(map); 848 RestartScan: 849 timestamp = map->timestamp; 850 851 if (!vm_map_lookup_entry(map, addr, &entry)) 852 entry = entry->next; 853 854 /* 855 * Do this on a map entry basis so that if the pages are not 856 * in the current processes address space, we can easily look 857 * up the pages elsewhere. 858 */ 859 lastvecindex = -1; 860 for (current = entry; 861 (current != &map->header) && (current->start < end); 862 current = current->next) { 863 864 /* 865 * ignore submaps (for now) or null objects 866 */ 867 if ((current->eflags & MAP_ENTRY_IS_SUB_MAP) || 868 current->object.vm_object == NULL) 869 continue; 870 871 /* 872 * limit this scan to the current map entry and the 873 * limits for the mincore call 874 */ 875 if (addr < current->start) 876 addr = current->start; 877 cend = current->end; 878 if (cend > end) 879 cend = end; 880 881 /* 882 * scan this entry one page at a time 883 */ 884 while (addr < cend) { 885 /* 886 * Check pmap first, it is likely faster, also 887 * it can provide info as to whether we are the 888 * one referencing or modifying the page. 889 */ 890 mincoreinfo = pmap_mincore(pmap, addr); 891 if (!mincoreinfo) { 892 vm_pindex_t pindex; 893 vm_ooffset_t offset; 894 vm_page_t m; 895 /* 896 * calculate the page index into the object 897 */ 898 offset = current->offset + (addr - current->start); 899 pindex = OFF_TO_IDX(offset); 900 m = vm_page_lookup(current->object.vm_object, 901 pindex); 902 vm_page_lock_queues(); 903 /* 904 * if the page is resident, then gather information about 905 * it. 906 */ 907 if (m) { 908 mincoreinfo = MINCORE_INCORE; 909 if (m->dirty || 910 pmap_is_modified(m)) 911 mincoreinfo |= MINCORE_MODIFIED_OTHER; 912 if ((m->flags & PG_REFERENCED) || 913 pmap_ts_referenced(m)) { 914 vm_page_flag_set(m, PG_REFERENCED); 915 mincoreinfo |= MINCORE_REFERENCED_OTHER; 916 } 917 } 918 vm_page_unlock_queues(); 919 } 920 921 /* 922 * subyte may page fault. In case it needs to modify 923 * the map, we release the lock. 924 */ 925 vm_map_unlock_read(map); 926 927 /* 928 * calculate index into user supplied byte vector 929 */ 930 vecindex = OFF_TO_IDX(addr - first_addr); 931 932 /* 933 * If we have skipped map entries, we need to make sure that 934 * the byte vector is zeroed for those skipped entries. 935 */ 936 while ((lastvecindex + 1) < vecindex) { 937 error = subyte(vec + lastvecindex, 0); 938 if (error) { 939 error = EFAULT; 940 goto done2; 941 } 942 ++lastvecindex; 943 } 944 945 /* 946 * Pass the page information to the user 947 */ 948 error = subyte(vec + vecindex, mincoreinfo); 949 if (error) { 950 error = EFAULT; 951 goto done2; 952 } 953 954 /* 955 * If the map has changed, due to the subyte, the previous 956 * output may be invalid. 957 */ 958 vm_map_lock_read(map); 959 if (timestamp != map->timestamp) 960 goto RestartScan; 961 962 lastvecindex = vecindex; 963 addr += PAGE_SIZE; 964 } 965 } 966 967 /* 968 * subyte may page fault. In case it needs to modify 969 * the map, we release the lock. 970 */ 971 vm_map_unlock_read(map); 972 973 /* 974 * Zero the last entries in the byte vector. 975 */ 976 vecindex = OFF_TO_IDX(end - first_addr); 977 while ((lastvecindex + 1) < vecindex) { 978 error = subyte(vec + lastvecindex, 0); 979 if (error) { 980 error = EFAULT; 981 goto done2; 982 } 983 ++lastvecindex; 984 } 985 986 /* 987 * If the map has changed, due to the subyte, the previous 988 * output may be invalid. 989 */ 990 vm_map_lock_read(map); 991 if (timestamp != map->timestamp) 992 goto RestartScan; 993 vm_map_unlock_read(map); 994 done2: 995 mtx_unlock(&Giant); 996 return (error); 997 } 998 999 #ifndef _SYS_SYSPROTO_H_ 1000 struct mlock_args { 1001 const void *addr; 1002 size_t len; 1003 }; 1004 #endif 1005 /* 1006 * MPSAFE 1007 */ 1008 int 1009 mlock(td, uap) 1010 struct thread *td; 1011 struct mlock_args *uap; 1012 { 1013 vm_offset_t addr; 1014 vm_size_t size, pageoff; 1015 int error; 1016 1017 addr = (vm_offset_t) uap->addr; 1018 size = uap->len; 1019 1020 pageoff = (addr & PAGE_MASK); 1021 addr -= pageoff; 1022 size += pageoff; 1023 size = (vm_size_t) round_page(size); 1024 1025 /* disable wrap around */ 1026 if (addr + size < addr) 1027 return (EINVAL); 1028 1029 if (atop(size) + cnt.v_wire_count > vm_page_max_wired) 1030 return (EAGAIN); 1031 1032 #ifdef pmap_wired_count 1033 if (size + ptoa(pmap_wired_count(vm_map_pmap(&td->td_proc->p_vmspace->vm_map))) > 1034 td->td_proc->p_rlimit[RLIMIT_MEMLOCK].rlim_cur) 1035 return (ENOMEM); 1036 #else 1037 error = suser(td); 1038 if (error) 1039 return (error); 1040 #endif 1041 1042 error = vm_map_wire(&td->td_proc->p_vmspace->vm_map, addr, 1043 addr + size, TRUE); 1044 return (error == KERN_SUCCESS ? 0 : ENOMEM); 1045 } 1046 1047 #ifndef _SYS_SYSPROTO_H_ 1048 struct mlockall_args { 1049 int how; 1050 }; 1051 #endif 1052 1053 /* 1054 * MPSAFE 1055 */ 1056 int 1057 mlockall(td, uap) 1058 struct thread *td; 1059 struct mlockall_args *uap; 1060 { 1061 /* mtx_lock(&Giant); */ 1062 /* mtx_unlock(&Giant); */ 1063 return 0; 1064 } 1065 1066 #ifndef _SYS_SYSPROTO_H_ 1067 struct munlockall_args { 1068 int how; 1069 }; 1070 #endif 1071 1072 /* 1073 * MPSAFE 1074 */ 1075 int 1076 munlockall(td, uap) 1077 struct thread *td; 1078 struct munlockall_args *uap; 1079 { 1080 /* mtx_lock(&Giant); */ 1081 /* mtx_unlock(&Giant); */ 1082 return 0; 1083 } 1084 1085 #ifndef _SYS_SYSPROTO_H_ 1086 struct munlock_args { 1087 const void *addr; 1088 size_t len; 1089 }; 1090 #endif 1091 /* 1092 * MPSAFE 1093 */ 1094 int 1095 munlock(td, uap) 1096 struct thread *td; 1097 struct munlock_args *uap; 1098 { 1099 vm_offset_t addr; 1100 vm_size_t size, pageoff; 1101 int error; 1102 1103 addr = (vm_offset_t) uap->addr; 1104 size = uap->len; 1105 1106 pageoff = (addr & PAGE_MASK); 1107 addr -= pageoff; 1108 size += pageoff; 1109 size = (vm_size_t) round_page(size); 1110 1111 /* disable wrap around */ 1112 if (addr + size < addr) 1113 return (EINVAL); 1114 1115 #ifndef pmap_wired_count 1116 error = suser(td); 1117 if (error) 1118 return (error); 1119 #endif 1120 1121 error = vm_map_unwire(&td->td_proc->p_vmspace->vm_map, addr, 1122 addr + size, TRUE); 1123 return (error == KERN_SUCCESS ? 0 : ENOMEM); 1124 } 1125 1126 /* 1127 * vm_mmap() 1128 * 1129 * MPSAFE 1130 * 1131 * Internal version of mmap. Currently used by mmap, exec, and sys5 1132 * shared memory. Handle is either a vnode pointer or NULL for MAP_ANON. 1133 */ 1134 int 1135 vm_mmap(vm_map_t map, vm_offset_t *addr, vm_size_t size, vm_prot_t prot, 1136 vm_prot_t maxprot, int flags, 1137 void *handle, 1138 vm_ooffset_t foff) 1139 { 1140 boolean_t fitit; 1141 vm_object_t object; 1142 struct vnode *vp = NULL; 1143 objtype_t type; 1144 int rv = KERN_SUCCESS; 1145 vm_ooffset_t objsize; 1146 int docow; 1147 struct thread *td = curthread; 1148 1149 if (size == 0) 1150 return (0); 1151 1152 objsize = size = round_page(size); 1153 1154 if (td->td_proc->p_vmspace->vm_map.size + size > 1155 td->td_proc->p_rlimit[RLIMIT_VMEM].rlim_cur) { 1156 return(ENOMEM); 1157 } 1158 1159 /* 1160 * We currently can only deal with page aligned file offsets. 1161 * The check is here rather than in the syscall because the 1162 * kernel calls this function internally for other mmaping 1163 * operations (such as in exec) and non-aligned offsets will 1164 * cause pmap inconsistencies...so we want to be sure to 1165 * disallow this in all cases. 1166 */ 1167 if (foff & PAGE_MASK) 1168 return (EINVAL); 1169 1170 if ((flags & MAP_FIXED) == 0) { 1171 fitit = TRUE; 1172 *addr = round_page(*addr); 1173 } else { 1174 if (*addr != trunc_page(*addr)) 1175 return (EINVAL); 1176 fitit = FALSE; 1177 (void) vm_map_remove(map, *addr, *addr + size); 1178 } 1179 1180 /* 1181 * Lookup/allocate object. 1182 */ 1183 if (flags & MAP_ANON) { 1184 type = OBJT_DEFAULT; 1185 /* 1186 * Unnamed anonymous regions always start at 0. 1187 */ 1188 if (handle == 0) 1189 foff = 0; 1190 } else { 1191 vp = (struct vnode *) handle; 1192 mtx_lock(&Giant); 1193 ASSERT_VOP_LOCKED(vp, "vm_mmap"); 1194 if (vp->v_type == VCHR) { 1195 type = OBJT_DEVICE; 1196 handle = (void *)(intptr_t)vp->v_rdev; 1197 } else { 1198 struct vattr vat; 1199 int error; 1200 1201 error = VOP_GETATTR(vp, &vat, td->td_ucred, td); 1202 if (error) { 1203 mtx_unlock(&Giant); 1204 return (error); 1205 } 1206 objsize = round_page(vat.va_size); 1207 type = OBJT_VNODE; 1208 /* 1209 * if it is a regular file without any references 1210 * we do not need to sync it. 1211 */ 1212 if (vp->v_type == VREG && vat.va_nlink == 0) { 1213 flags |= MAP_NOSYNC; 1214 } 1215 } 1216 mtx_unlock(&Giant); 1217 } 1218 1219 if (handle == NULL) { 1220 object = NULL; 1221 docow = 0; 1222 } else { 1223 object = vm_pager_allocate(type, 1224 handle, objsize, prot, foff); 1225 if (object == NULL) { 1226 return (type == OBJT_DEVICE ? EINVAL : ENOMEM); 1227 } 1228 docow = MAP_PREFAULT_PARTIAL; 1229 } 1230 1231 /* 1232 * Force device mappings to be shared. 1233 */ 1234 if (type == OBJT_DEVICE) { 1235 flags &= ~(MAP_PRIVATE|MAP_COPY); 1236 flags |= MAP_SHARED; 1237 } 1238 1239 if ((flags & (MAP_ANON|MAP_SHARED)) == 0) 1240 docow |= MAP_COPY_ON_WRITE; 1241 if (flags & MAP_NOSYNC) 1242 docow |= MAP_DISABLE_SYNCER; 1243 if (flags & MAP_NOCORE) 1244 docow |= MAP_DISABLE_COREDUMP; 1245 1246 #if defined(VM_PROT_READ_IS_EXEC) 1247 if (prot & VM_PROT_READ) 1248 prot |= VM_PROT_EXECUTE; 1249 1250 if (maxprot & VM_PROT_READ) 1251 maxprot |= VM_PROT_EXECUTE; 1252 #endif 1253 1254 if (fitit) 1255 *addr = pmap_addr_hint(object, *addr, size); 1256 1257 if (flags & MAP_STACK) 1258 rv = vm_map_stack (map, *addr, size, prot, 1259 maxprot, docow); 1260 else 1261 rv = vm_map_find(map, object, foff, addr, size, fitit, 1262 prot, maxprot, docow); 1263 1264 if (rv != KERN_SUCCESS) { 1265 /* 1266 * Lose the object reference. Will destroy the 1267 * object if it's an unnamed anonymous mapping 1268 * or named anonymous without other references. 1269 */ 1270 vm_object_deallocate(object); 1271 } else if (flags & MAP_SHARED) { 1272 /* 1273 * Shared memory is also shared with children. 1274 */ 1275 rv = vm_map_inherit(map, *addr, *addr + size, VM_INHERIT_SHARE); 1276 if (rv != KERN_SUCCESS) 1277 (void) vm_map_remove(map, *addr, *addr + size); 1278 } 1279 switch (rv) { 1280 case KERN_SUCCESS: 1281 return (0); 1282 case KERN_INVALID_ADDRESS: 1283 case KERN_NO_SPACE: 1284 return (ENOMEM); 1285 case KERN_PROTECTION_FAILURE: 1286 return (EACCES); 1287 default: 1288 return (EINVAL); 1289 } 1290 } 1291