1 /* 2 * Copyright (c) 1988 University of Utah. 3 * Copyright (c) 1991, 1993 4 * The Regents of the University of California. All rights reserved. 5 * 6 * This code is derived from software contributed to Berkeley by 7 * the Systems Programming Group of the University of Utah Computer 8 * Science Department. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * from: Utah $Hdr: vm_mmap.c 1.6 91/10/21$ 39 * 40 * @(#)vm_mmap.c 8.4 (Berkeley) 1/12/94 41 * $FreeBSD$ 42 */ 43 44 /* 45 * Mapped file (mmap) interface to VM 46 */ 47 48 #include "opt_compat.h" 49 50 #include <sys/param.h> 51 #include <sys/systm.h> 52 #include <sys/kernel.h> 53 #include <sys/lock.h> 54 #include <sys/mutex.h> 55 #include <sys/sysproto.h> 56 #include <sys/filedesc.h> 57 #include <sys/proc.h> 58 #include <sys/vnode.h> 59 #include <sys/fcntl.h> 60 #include <sys/file.h> 61 #include <sys/mman.h> 62 #include <sys/conf.h> 63 #include <sys/stat.h> 64 #include <sys/vmmeter.h> 65 #include <sys/sysctl.h> 66 67 #include <vm/vm.h> 68 #include <vm/vm_param.h> 69 #include <vm/pmap.h> 70 #include <vm/vm_map.h> 71 #include <vm/vm_object.h> 72 #include <vm/vm_page.h> 73 #include <vm/vm_pager.h> 74 #include <vm/vm_pageout.h> 75 #include <vm/vm_extern.h> 76 #include <vm/vm_page.h> 77 #include <vm/vm_kern.h> 78 79 #ifndef _SYS_SYSPROTO_H_ 80 struct sbrk_args { 81 int incr; 82 }; 83 #endif 84 85 static int max_proc_mmap; 86 SYSCTL_INT(_vm, OID_AUTO, max_proc_mmap, CTLFLAG_RW, &max_proc_mmap, 0, ""); 87 88 /* 89 * Set the maximum number of vm_map_entry structures per process. Roughly 90 * speaking vm_map_entry structures are tiny, so allowing them to eat 1/100 91 * of our KVM malloc space still results in generous limits. We want a 92 * default that is good enough to prevent the kernel running out of resources 93 * if attacked from compromised user account but generous enough such that 94 * multi-threaded processes are not unduly inconvenienced. 95 */ 96 static void vmmapentry_rsrc_init(void *); 97 SYSINIT(vmmersrc, SI_SUB_KVM_RSRC, SI_ORDER_FIRST, vmmapentry_rsrc_init, NULL) 98 99 static void 100 vmmapentry_rsrc_init(dummy) 101 void *dummy; 102 { 103 max_proc_mmap = vm_kmem_size / sizeof(struct vm_map_entry); 104 max_proc_mmap /= 100; 105 } 106 107 /* 108 * MPSAFE 109 */ 110 /* ARGSUSED */ 111 int 112 sbrk(td, uap) 113 struct thread *td; 114 struct sbrk_args *uap; 115 { 116 /* Not yet implemented */ 117 /* mtx_lock(&Giant); */ 118 /* mtx_unlock(&Giant); */ 119 return (EOPNOTSUPP); 120 } 121 122 #ifndef _SYS_SYSPROTO_H_ 123 struct sstk_args { 124 int incr; 125 }; 126 #endif 127 128 /* 129 * MPSAFE 130 */ 131 /* ARGSUSED */ 132 int 133 sstk(td, uap) 134 struct thread *td; 135 struct sstk_args *uap; 136 { 137 /* Not yet implemented */ 138 /* mtx_lock(&Giant); */ 139 /* mtx_unlock(&Giant); */ 140 return (EOPNOTSUPP); 141 } 142 143 #if defined(COMPAT_43) || defined(COMPAT_SUNOS) 144 #ifndef _SYS_SYSPROTO_H_ 145 struct getpagesize_args { 146 int dummy; 147 }; 148 #endif 149 150 /* ARGSUSED */ 151 int 152 ogetpagesize(td, uap) 153 struct thread *td; 154 struct getpagesize_args *uap; 155 { 156 /* MP SAFE */ 157 td->td_retval[0] = PAGE_SIZE; 158 return (0); 159 } 160 #endif /* COMPAT_43 || COMPAT_SUNOS */ 161 162 163 /* 164 * Memory Map (mmap) system call. Note that the file offset 165 * and address are allowed to be NOT page aligned, though if 166 * the MAP_FIXED flag it set, both must have the same remainder 167 * modulo the PAGE_SIZE (POSIX 1003.1b). If the address is not 168 * page-aligned, the actual mapping starts at trunc_page(addr) 169 * and the return value is adjusted up by the page offset. 170 * 171 * Generally speaking, only character devices which are themselves 172 * memory-based, such as a video framebuffer, can be mmap'd. Otherwise 173 * there would be no cache coherency between a descriptor and a VM mapping 174 * both to the same character device. 175 * 176 * Block devices can be mmap'd no matter what they represent. Cache coherency 177 * is maintained as long as you do not write directly to the underlying 178 * character device. 179 */ 180 #ifndef _SYS_SYSPROTO_H_ 181 struct mmap_args { 182 void *addr; 183 size_t len; 184 int prot; 185 int flags; 186 int fd; 187 long pad; 188 off_t pos; 189 }; 190 #endif 191 192 /* 193 * MPSAFE 194 */ 195 int 196 mmap(td, uap) 197 struct thread *td; 198 struct mmap_args *uap; 199 { 200 struct file *fp = NULL; 201 struct vnode *vp; 202 vm_offset_t addr; 203 vm_size_t size, pageoff; 204 vm_prot_t prot, maxprot; 205 void *handle; 206 int flags, error; 207 int disablexworkaround; 208 off_t pos; 209 struct vmspace *vms = td->td_proc->p_vmspace; 210 vm_object_t obj; 211 212 addr = (vm_offset_t) uap->addr; 213 size = uap->len; 214 prot = uap->prot & VM_PROT_ALL; 215 flags = uap->flags; 216 pos = uap->pos; 217 218 fp = NULL; 219 /* make sure mapping fits into numeric range etc */ 220 if ((ssize_t) uap->len < 0 || 221 ((flags & MAP_ANON) && uap->fd != -1)) 222 return (EINVAL); 223 224 if (flags & MAP_STACK) { 225 if ((uap->fd != -1) || 226 ((prot & (PROT_READ | PROT_WRITE)) != (PROT_READ | PROT_WRITE))) 227 return (EINVAL); 228 flags |= MAP_ANON; 229 pos = 0; 230 } 231 232 /* 233 * Align the file position to a page boundary, 234 * and save its page offset component. 235 */ 236 pageoff = (pos & PAGE_MASK); 237 pos -= pageoff; 238 239 /* Adjust size for rounding (on both ends). */ 240 size += pageoff; /* low end... */ 241 size = (vm_size_t) round_page(size); /* hi end */ 242 243 /* 244 * Check for illegal addresses. Watch out for address wrap... Note 245 * that VM_*_ADDRESS are not constants due to casts (argh). 246 */ 247 if (flags & MAP_FIXED) { 248 /* 249 * The specified address must have the same remainder 250 * as the file offset taken modulo PAGE_SIZE, so it 251 * should be aligned after adjustment by pageoff. 252 */ 253 addr -= pageoff; 254 if (addr & PAGE_MASK) 255 return (EINVAL); 256 /* Address range must be all in user VM space. */ 257 if (VM_MAXUSER_ADDRESS > 0 && addr + size > VM_MAXUSER_ADDRESS) 258 return (EINVAL); 259 #ifndef i386 260 if (VM_MIN_ADDRESS > 0 && addr < VM_MIN_ADDRESS) 261 return (EINVAL); 262 #endif 263 if (addr + size < addr) 264 return (EINVAL); 265 } 266 /* 267 * XXX for non-fixed mappings where no hint is provided or 268 * the hint would fall in the potential heap space, 269 * place it after the end of the largest possible heap. 270 * 271 * There should really be a pmap call to determine a reasonable 272 * location. 273 */ 274 else if (addr == 0 || 275 (addr >= round_page((vm_offset_t)vms->vm_taddr) && 276 addr < round_page((vm_offset_t)vms->vm_daddr + maxdsiz))) 277 addr = round_page((vm_offset_t)vms->vm_daddr + maxdsiz); 278 279 mtx_lock(&Giant); /* syscall marked mp-safe but isn't */ 280 if (flags & MAP_ANON) { 281 /* 282 * Mapping blank space is trivial. 283 */ 284 handle = NULL; 285 maxprot = VM_PROT_ALL; 286 pos = 0; 287 } else { 288 /* 289 * Mapping file, get fp for validation. Obtain vnode and make 290 * sure it is of appropriate type. 291 * don't let the descriptor disappear on us if we block 292 */ 293 if ((error = fget(td, uap->fd, &fp)) != 0) 294 goto done; 295 if (fp->f_type != DTYPE_VNODE) { 296 error = EINVAL; 297 goto done; 298 } 299 300 /* 301 * POSIX shared-memory objects are defined to have 302 * kernel persistence, and are not defined to support 303 * read(2)/write(2) -- or even open(2). Thus, we can 304 * use MAP_ASYNC to trade on-disk coherence for speed. 305 * The shm_open(3) library routine turns on the FPOSIXSHM 306 * flag to request this behavior. 307 */ 308 if (fp->f_flag & FPOSIXSHM) 309 flags |= MAP_NOSYNC; 310 vp = (struct vnode *) fp->f_data; 311 if (vp->v_type != VREG && vp->v_type != VCHR) { 312 error = EINVAL; 313 goto done; 314 } 315 if (vp->v_type == VREG) { 316 /* 317 * Get the proper underlying object 318 */ 319 if (VOP_GETVOBJECT(vp, &obj) != 0) { 320 error = EINVAL; 321 goto done; 322 } 323 vp = (struct vnode*)obj->handle; 324 } 325 /* 326 * XXX hack to handle use of /dev/zero to map anon memory (ala 327 * SunOS). 328 */ 329 if ((vp->v_type == VCHR) && 330 (vp->v_rdev->si_devsw->d_flags & D_MMAP_ANON)) { 331 handle = NULL; 332 maxprot = VM_PROT_ALL; 333 flags |= MAP_ANON; 334 pos = 0; 335 } else { 336 /* 337 * cdevs does not provide private mappings of any kind. 338 */ 339 /* 340 * However, for XIG X server to continue to work, 341 * we should allow the superuser to do it anyway. 342 * We only allow it at securelevel < 1. 343 * (Because the XIG X server writes directly to video 344 * memory via /dev/mem, it should never work at any 345 * other securelevel. 346 * XXX this will have to go 347 */ 348 if (securelevel_ge(td->td_ucred, 1)) 349 disablexworkaround = 1; 350 else 351 disablexworkaround = suser_td(td); 352 if (vp->v_type == VCHR && disablexworkaround && 353 (flags & (MAP_PRIVATE|MAP_COPY))) { 354 error = EINVAL; 355 goto done; 356 } 357 /* 358 * Ensure that file and memory protections are 359 * compatible. Note that we only worry about 360 * writability if mapping is shared; in this case, 361 * current and max prot are dictated by the open file. 362 * XXX use the vnode instead? Problem is: what 363 * credentials do we use for determination? What if 364 * proc does a setuid? 365 */ 366 maxprot = VM_PROT_EXECUTE; /* ??? */ 367 if (fp->f_flag & FREAD) { 368 maxprot |= VM_PROT_READ; 369 } else if (prot & PROT_READ) { 370 error = EACCES; 371 goto done; 372 } 373 /* 374 * If we are sharing potential changes (either via 375 * MAP_SHARED or via the implicit sharing of character 376 * device mappings), and we are trying to get write 377 * permission although we opened it without asking 378 * for it, bail out. Check for superuser, only if 379 * we're at securelevel < 1, to allow the XIG X server 380 * to continue to work. 381 */ 382 if ((flags & MAP_SHARED) != 0 || 383 (vp->v_type == VCHR && disablexworkaround)) { 384 if ((fp->f_flag & FWRITE) != 0) { 385 struct vattr va; 386 if ((error = 387 VOP_GETATTR(vp, &va, 388 td->td_ucred, td))) { 389 goto done; 390 } 391 if ((va.va_flags & 392 (SF_SNAPSHOT|IMMUTABLE|APPEND)) == 0) { 393 maxprot |= VM_PROT_WRITE; 394 } else if (prot & PROT_WRITE) { 395 error = EPERM; 396 goto done; 397 } 398 } else if ((prot & PROT_WRITE) != 0) { 399 error = EACCES; 400 goto done; 401 } 402 } else { 403 maxprot |= VM_PROT_WRITE; 404 } 405 406 handle = (void *)vp; 407 } 408 } 409 410 /* 411 * Do not allow more then a certain number of vm_map_entry structures 412 * per process. Scale with the number of rforks sharing the map 413 * to make the limit reasonable for threads. 414 */ 415 if (max_proc_mmap && 416 vms->vm_map.nentries >= max_proc_mmap * vms->vm_refcnt) { 417 error = ENOMEM; 418 goto done; 419 } 420 421 mtx_unlock(&Giant); 422 error = vm_mmap(&vms->vm_map, &addr, size, prot, maxprot, 423 flags, handle, pos); 424 if (error == 0) 425 td->td_retval[0] = (register_t) (addr + pageoff); 426 mtx_lock(&Giant); 427 done: 428 if (fp) 429 fdrop(fp, td); 430 mtx_unlock(&Giant); 431 return (error); 432 } 433 434 #ifdef COMPAT_43 435 #ifndef _SYS_SYSPROTO_H_ 436 struct ommap_args { 437 caddr_t addr; 438 int len; 439 int prot; 440 int flags; 441 int fd; 442 long pos; 443 }; 444 #endif 445 int 446 ommap(td, uap) 447 struct thread *td; 448 struct ommap_args *uap; 449 { 450 struct mmap_args nargs; 451 static const char cvtbsdprot[8] = { 452 0, 453 PROT_EXEC, 454 PROT_WRITE, 455 PROT_EXEC | PROT_WRITE, 456 PROT_READ, 457 PROT_EXEC | PROT_READ, 458 PROT_WRITE | PROT_READ, 459 PROT_EXEC | PROT_WRITE | PROT_READ, 460 }; 461 462 #define OMAP_ANON 0x0002 463 #define OMAP_COPY 0x0020 464 #define OMAP_SHARED 0x0010 465 #define OMAP_FIXED 0x0100 466 467 nargs.addr = uap->addr; 468 nargs.len = uap->len; 469 nargs.prot = cvtbsdprot[uap->prot & 0x7]; 470 nargs.flags = 0; 471 if (uap->flags & OMAP_ANON) 472 nargs.flags |= MAP_ANON; 473 if (uap->flags & OMAP_COPY) 474 nargs.flags |= MAP_COPY; 475 if (uap->flags & OMAP_SHARED) 476 nargs.flags |= MAP_SHARED; 477 else 478 nargs.flags |= MAP_PRIVATE; 479 if (uap->flags & OMAP_FIXED) 480 nargs.flags |= MAP_FIXED; 481 nargs.fd = uap->fd; 482 nargs.pos = uap->pos; 483 return (mmap(td, &nargs)); 484 } 485 #endif /* COMPAT_43 */ 486 487 488 #ifndef _SYS_SYSPROTO_H_ 489 struct msync_args { 490 void *addr; 491 int len; 492 int flags; 493 }; 494 #endif 495 /* 496 * MPSAFE 497 */ 498 int 499 msync(td, uap) 500 struct thread *td; 501 struct msync_args *uap; 502 { 503 vm_offset_t addr; 504 vm_size_t size, pageoff; 505 int flags; 506 vm_map_t map; 507 int rv; 508 509 addr = (vm_offset_t) uap->addr; 510 size = uap->len; 511 flags = uap->flags; 512 513 pageoff = (addr & PAGE_MASK); 514 addr -= pageoff; 515 size += pageoff; 516 size = (vm_size_t) round_page(size); 517 if (addr + size < addr) 518 return (EINVAL); 519 520 if ((flags & (MS_ASYNC|MS_INVALIDATE)) == (MS_ASYNC|MS_INVALIDATE)) 521 return (EINVAL); 522 523 mtx_lock(&Giant); 524 525 map = &td->td_proc->p_vmspace->vm_map; 526 527 /* 528 * XXX Gak! If size is zero we are supposed to sync "all modified 529 * pages with the region containing addr". Unfortunately, we don't 530 * really keep track of individual mmaps so we approximate by flushing 531 * the range of the map entry containing addr. This can be incorrect 532 * if the region splits or is coalesced with a neighbor. 533 */ 534 if (size == 0) { 535 vm_map_entry_t entry; 536 537 vm_map_lock_read(map); 538 rv = vm_map_lookup_entry(map, addr, &entry); 539 vm_map_unlock_read(map); 540 if (rv == FALSE) { 541 rv = -1; 542 goto done2; 543 } 544 addr = entry->start; 545 size = entry->end - entry->start; 546 } 547 548 /* 549 * Clean the pages and interpret the return value. 550 */ 551 rv = vm_map_clean(map, addr, addr + size, (flags & MS_ASYNC) == 0, 552 (flags & MS_INVALIDATE) != 0); 553 554 done2: 555 mtx_unlock(&Giant); 556 557 switch (rv) { 558 case KERN_SUCCESS: 559 return (0); 560 case KERN_INVALID_ADDRESS: 561 return (EINVAL); /* Sun returns ENOMEM? */ 562 case KERN_FAILURE: 563 return (EIO); 564 default: 565 return (EINVAL); 566 } 567 } 568 569 #ifndef _SYS_SYSPROTO_H_ 570 struct munmap_args { 571 void *addr; 572 size_t len; 573 }; 574 #endif 575 /* 576 * MPSAFE 577 */ 578 int 579 munmap(td, uap) 580 struct thread *td; 581 struct munmap_args *uap; 582 { 583 vm_offset_t addr; 584 vm_size_t size, pageoff; 585 vm_map_t map; 586 587 addr = (vm_offset_t) uap->addr; 588 size = uap->len; 589 590 pageoff = (addr & PAGE_MASK); 591 addr -= pageoff; 592 size += pageoff; 593 size = (vm_size_t) round_page(size); 594 if (addr + size < addr) 595 return (EINVAL); 596 597 if (size == 0) 598 return (0); 599 600 /* 601 * Check for illegal addresses. Watch out for address wrap... Note 602 * that VM_*_ADDRESS are not constants due to casts (argh). 603 */ 604 if (VM_MAXUSER_ADDRESS > 0 && addr + size > VM_MAXUSER_ADDRESS) 605 return (EINVAL); 606 #ifndef i386 607 if (VM_MIN_ADDRESS > 0 && addr < VM_MIN_ADDRESS) 608 return (EINVAL); 609 #endif 610 mtx_lock(&Giant); 611 map = &td->td_proc->p_vmspace->vm_map; 612 /* 613 * Make sure entire range is allocated. 614 */ 615 if (!vm_map_check_protection(map, addr, addr + size, VM_PROT_NONE)) { 616 mtx_unlock(&Giant); 617 return (EINVAL); 618 } 619 /* returns nothing but KERN_SUCCESS anyway */ 620 (void) vm_map_remove(map, addr, addr + size); 621 mtx_unlock(&Giant); 622 return (0); 623 } 624 625 #if 0 626 void 627 munmapfd(td, fd) 628 struct thread *td; 629 int fd; 630 { 631 /* 632 * XXX should unmap any regions mapped to this file 633 */ 634 FILEDESC_LOCK(p->p_fd); 635 td->td_proc->p_fd->fd_ofileflags[fd] &= ~UF_MAPPED; 636 FILEDESC_UNLOCK(p->p_fd); 637 } 638 #endif 639 640 #ifndef _SYS_SYSPROTO_H_ 641 struct mprotect_args { 642 const void *addr; 643 size_t len; 644 int prot; 645 }; 646 #endif 647 /* 648 * MPSAFE 649 */ 650 int 651 mprotect(td, uap) 652 struct thread *td; 653 struct mprotect_args *uap; 654 { 655 vm_offset_t addr; 656 vm_size_t size, pageoff; 657 vm_prot_t prot; 658 int ret; 659 660 addr = (vm_offset_t) uap->addr; 661 size = uap->len; 662 prot = uap->prot & VM_PROT_ALL; 663 #if defined(VM_PROT_READ_IS_EXEC) 664 if (prot & VM_PROT_READ) 665 prot |= VM_PROT_EXECUTE; 666 #endif 667 668 pageoff = (addr & PAGE_MASK); 669 addr -= pageoff; 670 size += pageoff; 671 size = (vm_size_t) round_page(size); 672 if (addr + size < addr) 673 return (EINVAL); 674 675 mtx_lock(&Giant); 676 ret = vm_map_protect(&td->td_proc->p_vmspace->vm_map, addr, 677 addr + size, prot, FALSE); 678 mtx_unlock(&Giant); 679 switch (ret) { 680 case KERN_SUCCESS: 681 return (0); 682 case KERN_PROTECTION_FAILURE: 683 return (EACCES); 684 } 685 return (EINVAL); 686 } 687 688 #ifndef _SYS_SYSPROTO_H_ 689 struct minherit_args { 690 void *addr; 691 size_t len; 692 int inherit; 693 }; 694 #endif 695 /* 696 * MPSAFE 697 */ 698 int 699 minherit(td, uap) 700 struct thread *td; 701 struct minherit_args *uap; 702 { 703 vm_offset_t addr; 704 vm_size_t size, pageoff; 705 vm_inherit_t inherit; 706 int ret; 707 708 addr = (vm_offset_t)uap->addr; 709 size = uap->len; 710 inherit = uap->inherit; 711 712 pageoff = (addr & PAGE_MASK); 713 addr -= pageoff; 714 size += pageoff; 715 size = (vm_size_t) round_page(size); 716 if (addr + size < addr) 717 return (EINVAL); 718 719 mtx_lock(&Giant); 720 ret = vm_map_inherit(&td->td_proc->p_vmspace->vm_map, addr, addr+size, 721 inherit); 722 mtx_unlock(&Giant); 723 724 switch (ret) { 725 case KERN_SUCCESS: 726 return (0); 727 case KERN_PROTECTION_FAILURE: 728 return (EACCES); 729 } 730 return (EINVAL); 731 } 732 733 #ifndef _SYS_SYSPROTO_H_ 734 struct madvise_args { 735 void *addr; 736 size_t len; 737 int behav; 738 }; 739 #endif 740 741 /* 742 * MPSAFE 743 */ 744 /* ARGSUSED */ 745 int 746 madvise(td, uap) 747 struct thread *td; 748 struct madvise_args *uap; 749 { 750 vm_offset_t start, end; 751 int ret; 752 753 /* 754 * Check for illegal behavior 755 */ 756 if (uap->behav < 0 || uap->behav > MADV_CORE) 757 return (EINVAL); 758 /* 759 * Check for illegal addresses. Watch out for address wrap... Note 760 * that VM_*_ADDRESS are not constants due to casts (argh). 761 */ 762 if (VM_MAXUSER_ADDRESS > 0 && 763 ((vm_offset_t) uap->addr + uap->len) > VM_MAXUSER_ADDRESS) 764 return (EINVAL); 765 #ifndef i386 766 if (VM_MIN_ADDRESS > 0 && uap->addr < VM_MIN_ADDRESS) 767 return (EINVAL); 768 #endif 769 if (((vm_offset_t) uap->addr + uap->len) < (vm_offset_t) uap->addr) 770 return (EINVAL); 771 772 /* 773 * Since this routine is only advisory, we default to conservative 774 * behavior. 775 */ 776 start = trunc_page((vm_offset_t) uap->addr); 777 end = round_page((vm_offset_t) uap->addr + uap->len); 778 779 mtx_lock(&Giant); 780 ret = vm_map_madvise(&td->td_proc->p_vmspace->vm_map, start, end, uap->behav); 781 mtx_unlock(&Giant); 782 return (ret ? EINVAL : 0); 783 } 784 785 #ifndef _SYS_SYSPROTO_H_ 786 struct mincore_args { 787 const void *addr; 788 size_t len; 789 char *vec; 790 }; 791 #endif 792 793 /* 794 * MPSAFE 795 */ 796 /* ARGSUSED */ 797 int 798 mincore(td, uap) 799 struct thread *td; 800 struct mincore_args *uap; 801 { 802 vm_offset_t addr, first_addr; 803 vm_offset_t end, cend; 804 pmap_t pmap; 805 vm_map_t map; 806 char *vec; 807 int error = 0; 808 int vecindex, lastvecindex; 809 vm_map_entry_t current; 810 vm_map_entry_t entry; 811 int mincoreinfo; 812 unsigned int timestamp; 813 814 /* 815 * Make sure that the addresses presented are valid for user 816 * mode. 817 */ 818 first_addr = addr = trunc_page((vm_offset_t) uap->addr); 819 end = addr + (vm_size_t)round_page(uap->len); 820 if (VM_MAXUSER_ADDRESS > 0 && end > VM_MAXUSER_ADDRESS) 821 return (EINVAL); 822 if (end < addr) 823 return (EINVAL); 824 825 /* 826 * Address of byte vector 827 */ 828 vec = uap->vec; 829 830 mtx_lock(&Giant); 831 map = &td->td_proc->p_vmspace->vm_map; 832 pmap = vmspace_pmap(td->td_proc->p_vmspace); 833 834 vm_map_lock_read(map); 835 RestartScan: 836 timestamp = map->timestamp; 837 838 if (!vm_map_lookup_entry(map, addr, &entry)) 839 entry = entry->next; 840 841 /* 842 * Do this on a map entry basis so that if the pages are not 843 * in the current processes address space, we can easily look 844 * up the pages elsewhere. 845 */ 846 lastvecindex = -1; 847 for (current = entry; 848 (current != &map->header) && (current->start < end); 849 current = current->next) { 850 851 /* 852 * ignore submaps (for now) or null objects 853 */ 854 if ((current->eflags & MAP_ENTRY_IS_SUB_MAP) || 855 current->object.vm_object == NULL) 856 continue; 857 858 /* 859 * limit this scan to the current map entry and the 860 * limits for the mincore call 861 */ 862 if (addr < current->start) 863 addr = current->start; 864 cend = current->end; 865 if (cend > end) 866 cend = end; 867 868 /* 869 * scan this entry one page at a time 870 */ 871 while (addr < cend) { 872 /* 873 * Check pmap first, it is likely faster, also 874 * it can provide info as to whether we are the 875 * one referencing or modifying the page. 876 */ 877 mincoreinfo = pmap_mincore(pmap, addr); 878 if (!mincoreinfo) { 879 vm_pindex_t pindex; 880 vm_ooffset_t offset; 881 vm_page_t m; 882 /* 883 * calculate the page index into the object 884 */ 885 offset = current->offset + (addr - current->start); 886 pindex = OFF_TO_IDX(offset); 887 m = vm_page_lookup(current->object.vm_object, 888 pindex); 889 /* 890 * if the page is resident, then gather information about 891 * it. 892 */ 893 if (m) { 894 mincoreinfo = MINCORE_INCORE; 895 if (m->dirty || 896 pmap_is_modified(m)) 897 mincoreinfo |= MINCORE_MODIFIED_OTHER; 898 if ((m->flags & PG_REFERENCED) || 899 pmap_ts_referenced(m)) { 900 vm_page_flag_set(m, PG_REFERENCED); 901 mincoreinfo |= MINCORE_REFERENCED_OTHER; 902 } 903 } 904 } 905 906 /* 907 * subyte may page fault. In case it needs to modify 908 * the map, we release the lock. 909 */ 910 vm_map_unlock_read(map); 911 912 /* 913 * calculate index into user supplied byte vector 914 */ 915 vecindex = OFF_TO_IDX(addr - first_addr); 916 917 /* 918 * If we have skipped map entries, we need to make sure that 919 * the byte vector is zeroed for those skipped entries. 920 */ 921 while ((lastvecindex + 1) < vecindex) { 922 error = subyte(vec + lastvecindex, 0); 923 if (error) { 924 error = EFAULT; 925 goto done2; 926 } 927 ++lastvecindex; 928 } 929 930 /* 931 * Pass the page information to the user 932 */ 933 error = subyte(vec + vecindex, mincoreinfo); 934 if (error) { 935 error = EFAULT; 936 goto done2; 937 } 938 939 /* 940 * If the map has changed, due to the subyte, the previous 941 * output may be invalid. 942 */ 943 vm_map_lock_read(map); 944 if (timestamp != map->timestamp) 945 goto RestartScan; 946 947 lastvecindex = vecindex; 948 addr += PAGE_SIZE; 949 } 950 } 951 952 /* 953 * subyte may page fault. In case it needs to modify 954 * the map, we release the lock. 955 */ 956 vm_map_unlock_read(map); 957 958 /* 959 * Zero the last entries in the byte vector. 960 */ 961 vecindex = OFF_TO_IDX(end - first_addr); 962 while ((lastvecindex + 1) < vecindex) { 963 error = subyte(vec + lastvecindex, 0); 964 if (error) { 965 error = EFAULT; 966 goto done2; 967 } 968 ++lastvecindex; 969 } 970 971 /* 972 * If the map has changed, due to the subyte, the previous 973 * output may be invalid. 974 */ 975 vm_map_lock_read(map); 976 if (timestamp != map->timestamp) 977 goto RestartScan; 978 vm_map_unlock_read(map); 979 done2: 980 mtx_unlock(&Giant); 981 return (error); 982 } 983 984 #ifndef _SYS_SYSPROTO_H_ 985 struct mlock_args { 986 const void *addr; 987 size_t len; 988 }; 989 #endif 990 /* 991 * MPSAFE 992 */ 993 int 994 mlock(td, uap) 995 struct thread *td; 996 struct mlock_args *uap; 997 { 998 vm_offset_t addr; 999 vm_size_t size, pageoff; 1000 int error; 1001 1002 addr = (vm_offset_t) uap->addr; 1003 size = uap->len; 1004 1005 pageoff = (addr & PAGE_MASK); 1006 addr -= pageoff; 1007 size += pageoff; 1008 size = (vm_size_t) round_page(size); 1009 1010 /* disable wrap around */ 1011 if (addr + size < addr) 1012 return (EINVAL); 1013 1014 if (atop(size) + cnt.v_wire_count > vm_page_max_wired) 1015 return (EAGAIN); 1016 1017 #ifdef pmap_wired_count 1018 if (size + ptoa(pmap_wired_count(vm_map_pmap(&td->td_proc->p_vmspace->vm_map))) > 1019 td->td_proc->p_rlimit[RLIMIT_MEMLOCK].rlim_cur) 1020 return (ENOMEM); 1021 #else 1022 error = suser_td(td); 1023 if (error) 1024 return (error); 1025 #endif 1026 1027 mtx_lock(&Giant); 1028 error = vm_map_user_pageable(&td->td_proc->p_vmspace->vm_map, addr, 1029 addr + size, FALSE); 1030 mtx_unlock(&Giant); 1031 return (error == KERN_SUCCESS ? 0 : ENOMEM); 1032 } 1033 1034 #ifndef _SYS_SYSPROTO_H_ 1035 struct mlockall_args { 1036 int how; 1037 }; 1038 #endif 1039 1040 /* 1041 * MPSAFE 1042 */ 1043 int 1044 mlockall(td, uap) 1045 struct thread *td; 1046 struct mlockall_args *uap; 1047 { 1048 /* mtx_lock(&Giant); */ 1049 /* mtx_unlock(&Giant); */ 1050 return 0; 1051 } 1052 1053 #ifndef _SYS_SYSPROTO_H_ 1054 struct mlockall_args { 1055 int how; 1056 }; 1057 #endif 1058 1059 /* 1060 * MPSAFE 1061 */ 1062 int 1063 munlockall(td, uap) 1064 struct thread *td; 1065 struct munlockall_args *uap; 1066 { 1067 /* mtx_lock(&Giant); */ 1068 /* mtx_unlock(&Giant); */ 1069 return 0; 1070 } 1071 1072 #ifndef _SYS_SYSPROTO_H_ 1073 struct munlock_args { 1074 const void *addr; 1075 size_t len; 1076 }; 1077 #endif 1078 /* 1079 * MPSAFE 1080 */ 1081 int 1082 munlock(td, uap) 1083 struct thread *td; 1084 struct munlock_args *uap; 1085 { 1086 vm_offset_t addr; 1087 vm_size_t size, pageoff; 1088 int error; 1089 1090 addr = (vm_offset_t) uap->addr; 1091 size = uap->len; 1092 1093 pageoff = (addr & PAGE_MASK); 1094 addr -= pageoff; 1095 size += pageoff; 1096 size = (vm_size_t) round_page(size); 1097 1098 /* disable wrap around */ 1099 if (addr + size < addr) 1100 return (EINVAL); 1101 1102 #ifndef pmap_wired_count 1103 error = suser_td(td); 1104 if (error) 1105 return (error); 1106 #endif 1107 1108 mtx_lock(&Giant); 1109 error = vm_map_user_pageable(&td->td_proc->p_vmspace->vm_map, addr, 1110 addr + size, TRUE); 1111 mtx_unlock(&Giant); 1112 return (error == KERN_SUCCESS ? 0 : ENOMEM); 1113 } 1114 1115 /* 1116 * vm_mmap() 1117 * 1118 * MPSAFE 1119 * 1120 * Internal version of mmap. Currently used by mmap, exec, and sys5 1121 * shared memory. Handle is either a vnode pointer or NULL for MAP_ANON. 1122 */ 1123 int 1124 vm_mmap(vm_map_t map, vm_offset_t *addr, vm_size_t size, vm_prot_t prot, 1125 vm_prot_t maxprot, int flags, 1126 void *handle, 1127 vm_ooffset_t foff) 1128 { 1129 boolean_t fitit; 1130 vm_object_t object; 1131 struct vnode *vp = NULL; 1132 objtype_t type; 1133 int rv = KERN_SUCCESS; 1134 vm_ooffset_t objsize; 1135 int docow; 1136 struct thread *td = curthread; 1137 1138 if (size == 0) 1139 return (0); 1140 1141 objsize = size = round_page(size); 1142 1143 /* 1144 * We currently can only deal with page aligned file offsets. 1145 * The check is here rather than in the syscall because the 1146 * kernel calls this function internally for other mmaping 1147 * operations (such as in exec) and non-aligned offsets will 1148 * cause pmap inconsistencies...so we want to be sure to 1149 * disallow this in all cases. 1150 */ 1151 if (foff & PAGE_MASK) 1152 return (EINVAL); 1153 1154 if ((flags & MAP_FIXED) == 0) { 1155 fitit = TRUE; 1156 *addr = round_page(*addr); 1157 mtx_lock(&Giant); 1158 } else { 1159 if (*addr != trunc_page(*addr)) 1160 return (EINVAL); 1161 fitit = FALSE; 1162 mtx_lock(&Giant); 1163 (void) vm_map_remove(map, *addr, *addr + size); 1164 } 1165 1166 /* 1167 * Lookup/allocate object. 1168 */ 1169 if (flags & MAP_ANON) { 1170 type = OBJT_DEFAULT; 1171 /* 1172 * Unnamed anonymous regions always start at 0. 1173 */ 1174 if (handle == 0) 1175 foff = 0; 1176 } else { 1177 vp = (struct vnode *) handle; 1178 if (vp->v_type == VCHR) { 1179 type = OBJT_DEVICE; 1180 handle = (void *)(intptr_t)vp->v_rdev; 1181 } else { 1182 struct vattr vat; 1183 int error; 1184 1185 error = VOP_GETATTR(vp, &vat, td->td_ucred, td); 1186 if (error) { 1187 mtx_unlock(&Giant); 1188 return (error); 1189 } 1190 objsize = round_page(vat.va_size); 1191 type = OBJT_VNODE; 1192 /* 1193 * if it is a regular file without any references 1194 * we do not need to sync it. 1195 */ 1196 if (vp->v_type == VREG && vat.va_nlink == 0) { 1197 flags |= MAP_NOSYNC; 1198 } 1199 } 1200 } 1201 1202 if (handle == NULL) { 1203 object = NULL; 1204 docow = 0; 1205 } else { 1206 object = vm_pager_allocate(type, 1207 handle, objsize, prot, foff); 1208 if (object == NULL) { 1209 mtx_unlock(&Giant); 1210 return (type == OBJT_DEVICE ? EINVAL : ENOMEM); 1211 } 1212 docow = MAP_PREFAULT_PARTIAL; 1213 } 1214 1215 /* 1216 * Force device mappings to be shared. 1217 */ 1218 if (type == OBJT_DEVICE || type == OBJT_PHYS) { 1219 flags &= ~(MAP_PRIVATE|MAP_COPY); 1220 flags |= MAP_SHARED; 1221 } 1222 1223 if ((flags & (MAP_ANON|MAP_SHARED)) == 0) 1224 docow |= MAP_COPY_ON_WRITE; 1225 if (flags & MAP_NOSYNC) 1226 docow |= MAP_DISABLE_SYNCER; 1227 if (flags & MAP_NOCORE) 1228 docow |= MAP_DISABLE_COREDUMP; 1229 1230 #if defined(VM_PROT_READ_IS_EXEC) 1231 if (prot & VM_PROT_READ) 1232 prot |= VM_PROT_EXECUTE; 1233 1234 if (maxprot & VM_PROT_READ) 1235 maxprot |= VM_PROT_EXECUTE; 1236 #endif 1237 1238 if (fitit) 1239 *addr = pmap_addr_hint(object, *addr, size); 1240 1241 if (flags & MAP_STACK) 1242 rv = vm_map_stack (map, *addr, size, prot, 1243 maxprot, docow); 1244 else 1245 rv = vm_map_find(map, object, foff, addr, size, fitit, 1246 prot, maxprot, docow); 1247 1248 if (rv != KERN_SUCCESS) { 1249 /* 1250 * Lose the object reference. Will destroy the 1251 * object if it's an unnamed anonymous mapping 1252 * or named anonymous without other references. 1253 */ 1254 vm_object_deallocate(object); 1255 } else if (flags & MAP_SHARED) { 1256 /* 1257 * Shared memory is also shared with children. 1258 */ 1259 rv = vm_map_inherit(map, *addr, *addr + size, VM_INHERIT_SHARE); 1260 if (rv != KERN_SUCCESS) 1261 (void) vm_map_remove(map, *addr, *addr + size); 1262 } 1263 mtx_unlock(&Giant); 1264 switch (rv) { 1265 case KERN_SUCCESS: 1266 return (0); 1267 case KERN_INVALID_ADDRESS: 1268 case KERN_NO_SPACE: 1269 return (ENOMEM); 1270 case KERN_PROTECTION_FAILURE: 1271 return (EACCES); 1272 default: 1273 return (EINVAL); 1274 } 1275 } 1276