1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1988 University of Utah. 5 * Copyright (c) 1991, 1993 6 * The Regents of the University of California. All rights reserved. 7 * 8 * This code is derived from software contributed to Berkeley by 9 * the Systems Programming Group of the University of Utah Computer 10 * Science Department. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * from: Utah $Hdr: vm_mmap.c 1.6 91/10/21$ 37 * 38 * @(#)vm_mmap.c 8.4 (Berkeley) 1/12/94 39 */ 40 41 /* 42 * Mapped file (mmap) interface to VM 43 */ 44 45 #include <sys/cdefs.h> 46 __FBSDID("$FreeBSD$"); 47 48 #include "opt_hwpmc_hooks.h" 49 #include "opt_vm.h" 50 51 #include <sys/param.h> 52 #include <sys/systm.h> 53 #include <sys/capsicum.h> 54 #include <sys/kernel.h> 55 #include <sys/lock.h> 56 #include <sys/mutex.h> 57 #include <sys/sysproto.h> 58 #include <sys/filedesc.h> 59 #include <sys/priv.h> 60 #include <sys/proc.h> 61 #include <sys/procctl.h> 62 #include <sys/racct.h> 63 #include <sys/resource.h> 64 #include <sys/resourcevar.h> 65 #include <sys/rwlock.h> 66 #include <sys/sysctl.h> 67 #include <sys/vnode.h> 68 #include <sys/fcntl.h> 69 #include <sys/file.h> 70 #include <sys/mman.h> 71 #include <sys/mount.h> 72 #include <sys/conf.h> 73 #include <sys/stat.h> 74 #include <sys/syscallsubr.h> 75 #include <sys/sysent.h> 76 #include <sys/vmmeter.h> 77 78 #include <security/audit/audit.h> 79 #include <security/mac/mac_framework.h> 80 81 #include <vm/vm.h> 82 #include <vm/vm_param.h> 83 #include <vm/pmap.h> 84 #include <vm/vm_map.h> 85 #include <vm/vm_object.h> 86 #include <vm/vm_page.h> 87 #include <vm/vm_pager.h> 88 #include <vm/vm_pageout.h> 89 #include <vm/vm_extern.h> 90 #include <vm/vm_page.h> 91 #include <vm/vnode_pager.h> 92 93 #ifdef HWPMC_HOOKS 94 #include <sys/pmckern.h> 95 #endif 96 97 int old_mlock = 0; 98 SYSCTL_INT(_vm, OID_AUTO, old_mlock, CTLFLAG_RWTUN, &old_mlock, 0, 99 "Do not apply RLIMIT_MEMLOCK on mlockall"); 100 101 #ifdef MAP_32BIT 102 #define MAP_32BIT_MAX_ADDR ((vm_offset_t)1 << 31) 103 #endif 104 105 #ifndef _SYS_SYSPROTO_H_ 106 struct sbrk_args { 107 int incr; 108 }; 109 #endif 110 111 int 112 sys_sbrk(struct thread *td, struct sbrk_args *uap) 113 { 114 /* Not yet implemented */ 115 return (EOPNOTSUPP); 116 } 117 118 #ifndef _SYS_SYSPROTO_H_ 119 struct sstk_args { 120 int incr; 121 }; 122 #endif 123 124 int 125 sys_sstk(struct thread *td, struct sstk_args *uap) 126 { 127 /* Not yet implemented */ 128 return (EOPNOTSUPP); 129 } 130 131 #if defined(COMPAT_43) 132 #ifndef _SYS_SYSPROTO_H_ 133 struct getpagesize_args { 134 int dummy; 135 }; 136 #endif 137 138 int 139 ogetpagesize(struct thread *td, struct getpagesize_args *uap) 140 { 141 142 td->td_retval[0] = PAGE_SIZE; 143 return (0); 144 } 145 #endif /* COMPAT_43 */ 146 147 148 /* 149 * Memory Map (mmap) system call. Note that the file offset 150 * and address are allowed to be NOT page aligned, though if 151 * the MAP_FIXED flag it set, both must have the same remainder 152 * modulo the PAGE_SIZE (POSIX 1003.1b). If the address is not 153 * page-aligned, the actual mapping starts at trunc_page(addr) 154 * and the return value is adjusted up by the page offset. 155 * 156 * Generally speaking, only character devices which are themselves 157 * memory-based, such as a video framebuffer, can be mmap'd. Otherwise 158 * there would be no cache coherency between a descriptor and a VM mapping 159 * both to the same character device. 160 */ 161 #ifndef _SYS_SYSPROTO_H_ 162 struct mmap_args { 163 void *addr; 164 size_t len; 165 int prot; 166 int flags; 167 int fd; 168 long pad; 169 off_t pos; 170 }; 171 #endif 172 173 int 174 sys_mmap(struct thread *td, struct mmap_args *uap) 175 { 176 177 return (kern_mmap(td, (uintptr_t)uap->addr, uap->len, uap->prot, 178 uap->flags, uap->fd, uap->pos)); 179 } 180 181 int 182 kern_mmap(struct thread *td, uintptr_t addr0, size_t size, int prot, int flags, 183 int fd, off_t pos) 184 { 185 struct vmspace *vms; 186 struct file *fp; 187 vm_offset_t addr; 188 vm_size_t pageoff; 189 vm_prot_t cap_maxprot; 190 int align, error; 191 cap_rights_t rights; 192 193 vms = td->td_proc->p_vmspace; 194 fp = NULL; 195 AUDIT_ARG_FD(fd); 196 addr = addr0; 197 198 /* 199 * Ignore old flags that used to be defined but did not do anything. 200 */ 201 flags &= ~(MAP_RESERVED0020 | MAP_RESERVED0040); 202 203 /* 204 * Enforce the constraints. 205 * Mapping of length 0 is only allowed for old binaries. 206 * Anonymous mapping shall specify -1 as filedescriptor and 207 * zero position for new code. Be nice to ancient a.out 208 * binaries and correct pos for anonymous mapping, since old 209 * ld.so sometimes issues anonymous map requests with non-zero 210 * pos. 211 */ 212 if (!SV_CURPROC_FLAG(SV_AOUT)) { 213 if ((size == 0 && curproc->p_osrel >= P_OSREL_MAP_ANON) || 214 ((flags & MAP_ANON) != 0 && (fd != -1 || pos != 0))) 215 return (EINVAL); 216 } else { 217 if ((flags & MAP_ANON) != 0) 218 pos = 0; 219 } 220 221 if (flags & MAP_STACK) { 222 if ((fd != -1) || 223 ((prot & (PROT_READ | PROT_WRITE)) != (PROT_READ | PROT_WRITE))) 224 return (EINVAL); 225 flags |= MAP_ANON; 226 pos = 0; 227 } 228 if ((flags & ~(MAP_SHARED | MAP_PRIVATE | MAP_FIXED | MAP_HASSEMAPHORE | 229 MAP_STACK | MAP_NOSYNC | MAP_ANON | MAP_EXCL | MAP_NOCORE | 230 MAP_PREFAULT_READ | MAP_GUARD | 231 #ifdef MAP_32BIT 232 MAP_32BIT | 233 #endif 234 MAP_ALIGNMENT_MASK)) != 0) 235 return (EINVAL); 236 if ((flags & (MAP_EXCL | MAP_FIXED)) == MAP_EXCL) 237 return (EINVAL); 238 if ((flags & (MAP_SHARED | MAP_PRIVATE)) == (MAP_SHARED | MAP_PRIVATE)) 239 return (EINVAL); 240 if (prot != PROT_NONE && 241 (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC)) != 0) 242 return (EINVAL); 243 if ((flags & MAP_GUARD) != 0 && (prot != PROT_NONE || fd != -1 || 244 pos != 0 || (flags & ~(MAP_FIXED | MAP_GUARD | MAP_EXCL | 245 #ifdef MAP_32BIT 246 MAP_32BIT | 247 #endif 248 MAP_ALIGNMENT_MASK)) != 0)) 249 return (EINVAL); 250 251 /* 252 * Align the file position to a page boundary, 253 * and save its page offset component. 254 */ 255 pageoff = (pos & PAGE_MASK); 256 pos -= pageoff; 257 258 /* Adjust size for rounding (on both ends). */ 259 size += pageoff; /* low end... */ 260 size = (vm_size_t) round_page(size); /* hi end */ 261 262 /* Ensure alignment is at least a page and fits in a pointer. */ 263 align = flags & MAP_ALIGNMENT_MASK; 264 if (align != 0 && align != MAP_ALIGNED_SUPER && 265 (align >> MAP_ALIGNMENT_SHIFT >= sizeof(void *) * NBBY || 266 align >> MAP_ALIGNMENT_SHIFT < PAGE_SHIFT)) 267 return (EINVAL); 268 269 /* 270 * Check for illegal addresses. Watch out for address wrap... Note 271 * that VM_*_ADDRESS are not constants due to casts (argh). 272 */ 273 if (flags & MAP_FIXED) { 274 /* 275 * The specified address must have the same remainder 276 * as the file offset taken modulo PAGE_SIZE, so it 277 * should be aligned after adjustment by pageoff. 278 */ 279 addr -= pageoff; 280 if (addr & PAGE_MASK) 281 return (EINVAL); 282 283 /* Address range must be all in user VM space. */ 284 if (addr < vm_map_min(&vms->vm_map) || 285 addr + size > vm_map_max(&vms->vm_map)) 286 return (EINVAL); 287 if (addr + size < addr) 288 return (EINVAL); 289 #ifdef MAP_32BIT 290 if (flags & MAP_32BIT && addr + size > MAP_32BIT_MAX_ADDR) 291 return (EINVAL); 292 } else if (flags & MAP_32BIT) { 293 /* 294 * For MAP_32BIT, override the hint if it is too high and 295 * do not bother moving the mapping past the heap (since 296 * the heap is usually above 2GB). 297 */ 298 if (addr + size > MAP_32BIT_MAX_ADDR) 299 addr = 0; 300 #endif 301 } else { 302 /* 303 * XXX for non-fixed mappings where no hint is provided or 304 * the hint would fall in the potential heap space, 305 * place it after the end of the largest possible heap. 306 * 307 * There should really be a pmap call to determine a reasonable 308 * location. 309 */ 310 if (addr == 0 || 311 (addr >= round_page((vm_offset_t)vms->vm_taddr) && 312 addr < round_page((vm_offset_t)vms->vm_daddr + 313 lim_max(td, RLIMIT_DATA)))) 314 addr = round_page((vm_offset_t)vms->vm_daddr + 315 lim_max(td, RLIMIT_DATA)); 316 } 317 if (size == 0) { 318 /* 319 * Return success without mapping anything for old 320 * binaries that request a page-aligned mapping of 321 * length 0. For modern binaries, this function 322 * returns an error earlier. 323 */ 324 error = 0; 325 } else if ((flags & MAP_GUARD) != 0) { 326 error = vm_mmap_object(&vms->vm_map, &addr, size, VM_PROT_NONE, 327 VM_PROT_NONE, flags, NULL, pos, FALSE, td); 328 } else if ((flags & MAP_ANON) != 0) { 329 /* 330 * Mapping blank space is trivial. 331 * 332 * This relies on VM_PROT_* matching PROT_*. 333 */ 334 error = vm_mmap_object(&vms->vm_map, &addr, size, prot, 335 VM_PROT_ALL, flags, NULL, pos, FALSE, td); 336 } else { 337 /* 338 * Mapping file, get fp for validation and don't let the 339 * descriptor disappear on us if we block. Check capability 340 * rights, but also return the maximum rights to be combined 341 * with maxprot later. 342 */ 343 cap_rights_init(&rights, CAP_MMAP); 344 if (prot & PROT_READ) 345 cap_rights_set(&rights, CAP_MMAP_R); 346 if ((flags & MAP_SHARED) != 0) { 347 if (prot & PROT_WRITE) 348 cap_rights_set(&rights, CAP_MMAP_W); 349 } 350 if (prot & PROT_EXEC) 351 cap_rights_set(&rights, CAP_MMAP_X); 352 error = fget_mmap(td, fd, &rights, &cap_maxprot, &fp); 353 if (error != 0) 354 goto done; 355 if ((flags & (MAP_SHARED | MAP_PRIVATE)) == 0 && 356 td->td_proc->p_osrel >= P_OSREL_MAP_FSTRICT) { 357 error = EINVAL; 358 goto done; 359 } 360 361 /* This relies on VM_PROT_* matching PROT_*. */ 362 error = fo_mmap(fp, &vms->vm_map, &addr, size, prot, 363 cap_maxprot, flags, pos, td); 364 } 365 366 if (error == 0) 367 td->td_retval[0] = (register_t) (addr + pageoff); 368 done: 369 if (fp) 370 fdrop(fp, td); 371 372 return (error); 373 } 374 375 #if defined(COMPAT_FREEBSD6) 376 int 377 freebsd6_mmap(struct thread *td, struct freebsd6_mmap_args *uap) 378 { 379 380 return (kern_mmap(td, (uintptr_t)uap->addr, uap->len, uap->prot, 381 uap->flags, uap->fd, uap->pos)); 382 } 383 #endif 384 385 #ifdef COMPAT_43 386 #ifndef _SYS_SYSPROTO_H_ 387 struct ommap_args { 388 caddr_t addr; 389 int len; 390 int prot; 391 int flags; 392 int fd; 393 long pos; 394 }; 395 #endif 396 int 397 ommap(struct thread *td, struct ommap_args *uap) 398 { 399 static const char cvtbsdprot[8] = { 400 0, 401 PROT_EXEC, 402 PROT_WRITE, 403 PROT_EXEC | PROT_WRITE, 404 PROT_READ, 405 PROT_EXEC | PROT_READ, 406 PROT_WRITE | PROT_READ, 407 PROT_EXEC | PROT_WRITE | PROT_READ, 408 }; 409 int flags, prot; 410 411 #define OMAP_ANON 0x0002 412 #define OMAP_COPY 0x0020 413 #define OMAP_SHARED 0x0010 414 #define OMAP_FIXED 0x0100 415 416 prot = cvtbsdprot[uap->prot & 0x7]; 417 #ifdef COMPAT_FREEBSD32 418 #if defined(__amd64__) 419 if (i386_read_exec && SV_PROC_FLAG(td->td_proc, SV_ILP32) && 420 prot != 0) 421 prot |= PROT_EXEC; 422 #endif 423 #endif 424 flags = 0; 425 if (uap->flags & OMAP_ANON) 426 flags |= MAP_ANON; 427 if (uap->flags & OMAP_COPY) 428 flags |= MAP_COPY; 429 if (uap->flags & OMAP_SHARED) 430 flags |= MAP_SHARED; 431 else 432 flags |= MAP_PRIVATE; 433 if (uap->flags & OMAP_FIXED) 434 flags |= MAP_FIXED; 435 return (kern_mmap(td, (uintptr_t)uap->addr, uap->len, prot, flags, 436 uap->fd, uap->pos)); 437 } 438 #endif /* COMPAT_43 */ 439 440 441 #ifndef _SYS_SYSPROTO_H_ 442 struct msync_args { 443 void *addr; 444 size_t len; 445 int flags; 446 }; 447 #endif 448 int 449 sys_msync(struct thread *td, struct msync_args *uap) 450 { 451 452 return (kern_msync(td, (uintptr_t)uap->addr, uap->len, uap->flags)); 453 } 454 455 int 456 kern_msync(struct thread *td, uintptr_t addr0, size_t size, int flags) 457 { 458 vm_offset_t addr; 459 vm_size_t pageoff; 460 vm_map_t map; 461 int rv; 462 463 addr = addr0; 464 pageoff = (addr & PAGE_MASK); 465 addr -= pageoff; 466 size += pageoff; 467 size = (vm_size_t) round_page(size); 468 if (addr + size < addr) 469 return (EINVAL); 470 471 if ((flags & (MS_ASYNC|MS_INVALIDATE)) == (MS_ASYNC|MS_INVALIDATE)) 472 return (EINVAL); 473 474 map = &td->td_proc->p_vmspace->vm_map; 475 476 /* 477 * Clean the pages and interpret the return value. 478 */ 479 rv = vm_map_sync(map, addr, addr + size, (flags & MS_ASYNC) == 0, 480 (flags & MS_INVALIDATE) != 0); 481 switch (rv) { 482 case KERN_SUCCESS: 483 return (0); 484 case KERN_INVALID_ADDRESS: 485 return (ENOMEM); 486 case KERN_INVALID_ARGUMENT: 487 return (EBUSY); 488 case KERN_FAILURE: 489 return (EIO); 490 default: 491 return (EINVAL); 492 } 493 } 494 495 #ifndef _SYS_SYSPROTO_H_ 496 struct munmap_args { 497 void *addr; 498 size_t len; 499 }; 500 #endif 501 int 502 sys_munmap(struct thread *td, struct munmap_args *uap) 503 { 504 505 return (kern_munmap(td, (uintptr_t)uap->addr, uap->len)); 506 } 507 508 int 509 kern_munmap(struct thread *td, uintptr_t addr0, size_t size) 510 { 511 #ifdef HWPMC_HOOKS 512 struct pmckern_map_out pkm; 513 vm_map_entry_t entry; 514 bool pmc_handled; 515 #endif 516 vm_offset_t addr; 517 vm_size_t pageoff; 518 vm_map_t map; 519 520 if (size == 0) 521 return (EINVAL); 522 523 addr = addr0; 524 pageoff = (addr & PAGE_MASK); 525 addr -= pageoff; 526 size += pageoff; 527 size = (vm_size_t) round_page(size); 528 if (addr + size < addr) 529 return (EINVAL); 530 531 /* 532 * Check for illegal addresses. Watch out for address wrap... 533 */ 534 map = &td->td_proc->p_vmspace->vm_map; 535 if (addr < vm_map_min(map) || addr + size > vm_map_max(map)) 536 return (EINVAL); 537 vm_map_lock(map); 538 #ifdef HWPMC_HOOKS 539 pmc_handled = false; 540 if (PMC_HOOK_INSTALLED(PMC_FN_MUNMAP)) { 541 pmc_handled = true; 542 /* 543 * Inform hwpmc if the address range being unmapped contains 544 * an executable region. 545 */ 546 pkm.pm_address = (uintptr_t) NULL; 547 if (vm_map_lookup_entry(map, addr, &entry)) { 548 for (; entry->start < addr + size; 549 entry = entry->next) { 550 if (vm_map_check_protection(map, entry->start, 551 entry->end, VM_PROT_EXECUTE) == TRUE) { 552 pkm.pm_address = (uintptr_t) addr; 553 pkm.pm_size = (size_t) size; 554 break; 555 } 556 } 557 } 558 } 559 #endif 560 vm_map_delete(map, addr, addr + size); 561 562 #ifdef HWPMC_HOOKS 563 if (__predict_false(pmc_handled)) { 564 /* downgrade the lock to prevent a LOR with the pmc-sx lock */ 565 vm_map_lock_downgrade(map); 566 if (pkm.pm_address != (uintptr_t) NULL) 567 PMC_CALL_HOOK(td, PMC_FN_MUNMAP, (void *) &pkm); 568 vm_map_unlock_read(map); 569 } else 570 #endif 571 vm_map_unlock(map); 572 573 /* vm_map_delete returns nothing but KERN_SUCCESS anyway */ 574 return (0); 575 } 576 577 #ifndef _SYS_SYSPROTO_H_ 578 struct mprotect_args { 579 const void *addr; 580 size_t len; 581 int prot; 582 }; 583 #endif 584 int 585 sys_mprotect(struct thread *td, struct mprotect_args *uap) 586 { 587 588 return (kern_mprotect(td, (uintptr_t)uap->addr, uap->len, uap->prot)); 589 } 590 591 int 592 kern_mprotect(struct thread *td, uintptr_t addr0, size_t size, int prot) 593 { 594 vm_offset_t addr; 595 vm_size_t pageoff; 596 597 addr = addr0; 598 prot = (prot & VM_PROT_ALL); 599 pageoff = (addr & PAGE_MASK); 600 addr -= pageoff; 601 size += pageoff; 602 size = (vm_size_t) round_page(size); 603 if (addr + size < addr) 604 return (EINVAL); 605 606 switch (vm_map_protect(&td->td_proc->p_vmspace->vm_map, addr, 607 addr + size, prot, FALSE)) { 608 case KERN_SUCCESS: 609 return (0); 610 case KERN_PROTECTION_FAILURE: 611 return (EACCES); 612 case KERN_RESOURCE_SHORTAGE: 613 return (ENOMEM); 614 } 615 return (EINVAL); 616 } 617 618 #ifndef _SYS_SYSPROTO_H_ 619 struct minherit_args { 620 void *addr; 621 size_t len; 622 int inherit; 623 }; 624 #endif 625 int 626 sys_minherit(struct thread *td, struct minherit_args *uap) 627 { 628 vm_offset_t addr; 629 vm_size_t size, pageoff; 630 vm_inherit_t inherit; 631 632 addr = (vm_offset_t)uap->addr; 633 size = uap->len; 634 inherit = uap->inherit; 635 636 pageoff = (addr & PAGE_MASK); 637 addr -= pageoff; 638 size += pageoff; 639 size = (vm_size_t) round_page(size); 640 if (addr + size < addr) 641 return (EINVAL); 642 643 switch (vm_map_inherit(&td->td_proc->p_vmspace->vm_map, addr, 644 addr + size, inherit)) { 645 case KERN_SUCCESS: 646 return (0); 647 case KERN_PROTECTION_FAILURE: 648 return (EACCES); 649 } 650 return (EINVAL); 651 } 652 653 #ifndef _SYS_SYSPROTO_H_ 654 struct madvise_args { 655 void *addr; 656 size_t len; 657 int behav; 658 }; 659 #endif 660 661 int 662 sys_madvise(struct thread *td, struct madvise_args *uap) 663 { 664 665 return (kern_madvise(td, (uintptr_t)uap->addr, uap->len, uap->behav)); 666 } 667 668 int 669 kern_madvise(struct thread *td, uintptr_t addr0, size_t len, int behav) 670 { 671 vm_map_t map; 672 vm_offset_t addr, end, start; 673 int flags; 674 675 /* 676 * Check for our special case, advising the swap pager we are 677 * "immortal." 678 */ 679 if (behav == MADV_PROTECT) { 680 flags = PPROT_SET; 681 return (kern_procctl(td, P_PID, td->td_proc->p_pid, 682 PROC_SPROTECT, &flags)); 683 } 684 685 /* 686 * Check for illegal addresses. Watch out for address wrap... Note 687 * that VM_*_ADDRESS are not constants due to casts (argh). 688 */ 689 map = &td->td_proc->p_vmspace->vm_map; 690 addr = addr0; 691 if (addr < vm_map_min(map) || addr + len > vm_map_max(map)) 692 return (EINVAL); 693 if ((addr + len) < addr) 694 return (EINVAL); 695 696 /* 697 * Since this routine is only advisory, we default to conservative 698 * behavior. 699 */ 700 start = trunc_page(addr); 701 end = round_page(addr + len); 702 703 /* 704 * vm_map_madvise() checks for illegal values of behav. 705 */ 706 return (vm_map_madvise(map, start, end, behav)); 707 } 708 709 #ifndef _SYS_SYSPROTO_H_ 710 struct mincore_args { 711 const void *addr; 712 size_t len; 713 char *vec; 714 }; 715 #endif 716 717 int 718 sys_mincore(struct thread *td, struct mincore_args *uap) 719 { 720 721 return (kern_mincore(td, (uintptr_t)uap->addr, uap->len, uap->vec)); 722 } 723 724 int 725 kern_mincore(struct thread *td, uintptr_t addr0, size_t len, char *vec) 726 { 727 vm_offset_t addr, first_addr; 728 vm_offset_t end, cend; 729 pmap_t pmap; 730 vm_map_t map; 731 int error = 0; 732 int vecindex, lastvecindex; 733 vm_map_entry_t current; 734 vm_map_entry_t entry; 735 vm_object_t object; 736 vm_paddr_t locked_pa; 737 vm_page_t m; 738 vm_pindex_t pindex; 739 int mincoreinfo; 740 unsigned int timestamp; 741 boolean_t locked; 742 743 /* 744 * Make sure that the addresses presented are valid for user 745 * mode. 746 */ 747 first_addr = addr = trunc_page(addr0); 748 end = addr + (vm_size_t)round_page(len); 749 map = &td->td_proc->p_vmspace->vm_map; 750 if (end > vm_map_max(map) || end < addr) 751 return (ENOMEM); 752 753 pmap = vmspace_pmap(td->td_proc->p_vmspace); 754 755 vm_map_lock_read(map); 756 RestartScan: 757 timestamp = map->timestamp; 758 759 if (!vm_map_lookup_entry(map, addr, &entry)) { 760 vm_map_unlock_read(map); 761 return (ENOMEM); 762 } 763 764 /* 765 * Do this on a map entry basis so that if the pages are not 766 * in the current processes address space, we can easily look 767 * up the pages elsewhere. 768 */ 769 lastvecindex = -1; 770 for (current = entry; current->start < end; current = current->next) { 771 772 /* 773 * check for contiguity 774 */ 775 if (current->end < end && current->next->start > current->end) { 776 vm_map_unlock_read(map); 777 return (ENOMEM); 778 } 779 780 /* 781 * ignore submaps (for now) or null objects 782 */ 783 if ((current->eflags & MAP_ENTRY_IS_SUB_MAP) || 784 current->object.vm_object == NULL) 785 continue; 786 787 /* 788 * limit this scan to the current map entry and the 789 * limits for the mincore call 790 */ 791 if (addr < current->start) 792 addr = current->start; 793 cend = current->end; 794 if (cend > end) 795 cend = end; 796 797 /* 798 * scan this entry one page at a time 799 */ 800 while (addr < cend) { 801 /* 802 * Check pmap first, it is likely faster, also 803 * it can provide info as to whether we are the 804 * one referencing or modifying the page. 805 */ 806 object = NULL; 807 locked_pa = 0; 808 retry: 809 m = NULL; 810 mincoreinfo = pmap_mincore(pmap, addr, &locked_pa); 811 if (locked_pa != 0) { 812 /* 813 * The page is mapped by this process but not 814 * both accessed and modified. It is also 815 * managed. Acquire the object lock so that 816 * other mappings might be examined. 817 */ 818 m = PHYS_TO_VM_PAGE(locked_pa); 819 if (m->object != object) { 820 if (object != NULL) 821 VM_OBJECT_WUNLOCK(object); 822 object = m->object; 823 locked = VM_OBJECT_TRYWLOCK(object); 824 vm_page_unlock(m); 825 if (!locked) { 826 VM_OBJECT_WLOCK(object); 827 vm_page_lock(m); 828 goto retry; 829 } 830 } else 831 vm_page_unlock(m); 832 KASSERT(m->valid == VM_PAGE_BITS_ALL, 833 ("mincore: page %p is mapped but invalid", 834 m)); 835 } else if (mincoreinfo == 0) { 836 /* 837 * The page is not mapped by this process. If 838 * the object implements managed pages, then 839 * determine if the page is resident so that 840 * the mappings might be examined. 841 */ 842 if (current->object.vm_object != object) { 843 if (object != NULL) 844 VM_OBJECT_WUNLOCK(object); 845 object = current->object.vm_object; 846 VM_OBJECT_WLOCK(object); 847 } 848 if (object->type == OBJT_DEFAULT || 849 object->type == OBJT_SWAP || 850 object->type == OBJT_VNODE) { 851 pindex = OFF_TO_IDX(current->offset + 852 (addr - current->start)); 853 m = vm_page_lookup(object, pindex); 854 if (m != NULL && m->valid == 0) 855 m = NULL; 856 if (m != NULL) 857 mincoreinfo = MINCORE_INCORE; 858 } 859 } 860 if (m != NULL) { 861 /* Examine other mappings to the page. */ 862 if (m->dirty == 0 && pmap_is_modified(m)) 863 vm_page_dirty(m); 864 if (m->dirty != 0) 865 mincoreinfo |= MINCORE_MODIFIED_OTHER; 866 /* 867 * The first test for PGA_REFERENCED is an 868 * optimization. The second test is 869 * required because a concurrent pmap 870 * operation could clear the last reference 871 * and set PGA_REFERENCED before the call to 872 * pmap_is_referenced(). 873 */ 874 if ((m->aflags & PGA_REFERENCED) != 0 || 875 pmap_is_referenced(m) || 876 (m->aflags & PGA_REFERENCED) != 0) 877 mincoreinfo |= MINCORE_REFERENCED_OTHER; 878 } 879 if (object != NULL) 880 VM_OBJECT_WUNLOCK(object); 881 882 /* 883 * subyte may page fault. In case it needs to modify 884 * the map, we release the lock. 885 */ 886 vm_map_unlock_read(map); 887 888 /* 889 * calculate index into user supplied byte vector 890 */ 891 vecindex = atop(addr - first_addr); 892 893 /* 894 * If we have skipped map entries, we need to make sure that 895 * the byte vector is zeroed for those skipped entries. 896 */ 897 while ((lastvecindex + 1) < vecindex) { 898 ++lastvecindex; 899 error = subyte(vec + lastvecindex, 0); 900 if (error) { 901 error = EFAULT; 902 goto done2; 903 } 904 } 905 906 /* 907 * Pass the page information to the user 908 */ 909 error = subyte(vec + vecindex, mincoreinfo); 910 if (error) { 911 error = EFAULT; 912 goto done2; 913 } 914 915 /* 916 * If the map has changed, due to the subyte, the previous 917 * output may be invalid. 918 */ 919 vm_map_lock_read(map); 920 if (timestamp != map->timestamp) 921 goto RestartScan; 922 923 lastvecindex = vecindex; 924 addr += PAGE_SIZE; 925 } 926 } 927 928 /* 929 * subyte may page fault. In case it needs to modify 930 * the map, we release the lock. 931 */ 932 vm_map_unlock_read(map); 933 934 /* 935 * Zero the last entries in the byte vector. 936 */ 937 vecindex = atop(end - first_addr); 938 while ((lastvecindex + 1) < vecindex) { 939 ++lastvecindex; 940 error = subyte(vec + lastvecindex, 0); 941 if (error) { 942 error = EFAULT; 943 goto done2; 944 } 945 } 946 947 /* 948 * If the map has changed, due to the subyte, the previous 949 * output may be invalid. 950 */ 951 vm_map_lock_read(map); 952 if (timestamp != map->timestamp) 953 goto RestartScan; 954 vm_map_unlock_read(map); 955 done2: 956 return (error); 957 } 958 959 #ifndef _SYS_SYSPROTO_H_ 960 struct mlock_args { 961 const void *addr; 962 size_t len; 963 }; 964 #endif 965 int 966 sys_mlock(struct thread *td, struct mlock_args *uap) 967 { 968 969 return (kern_mlock(td->td_proc, td->td_ucred, 970 __DECONST(uintptr_t, uap->addr), uap->len)); 971 } 972 973 int 974 kern_mlock(struct proc *proc, struct ucred *cred, uintptr_t addr0, size_t len) 975 { 976 vm_offset_t addr, end, last, start; 977 vm_size_t npages, size; 978 vm_map_t map; 979 unsigned long nsize; 980 int error; 981 982 error = priv_check_cred(cred, PRIV_VM_MLOCK, 0); 983 if (error) 984 return (error); 985 addr = addr0; 986 size = len; 987 last = addr + size; 988 start = trunc_page(addr); 989 end = round_page(last); 990 if (last < addr || end < addr) 991 return (EINVAL); 992 npages = atop(end - start); 993 if (npages > vm_page_max_wired) 994 return (ENOMEM); 995 map = &proc->p_vmspace->vm_map; 996 PROC_LOCK(proc); 997 nsize = ptoa(npages + pmap_wired_count(map->pmap)); 998 if (nsize > lim_cur_proc(proc, RLIMIT_MEMLOCK)) { 999 PROC_UNLOCK(proc); 1000 return (ENOMEM); 1001 } 1002 PROC_UNLOCK(proc); 1003 if (npages + vm_wire_count() > vm_page_max_wired) 1004 return (EAGAIN); 1005 #ifdef RACCT 1006 if (racct_enable) { 1007 PROC_LOCK(proc); 1008 error = racct_set(proc, RACCT_MEMLOCK, nsize); 1009 PROC_UNLOCK(proc); 1010 if (error != 0) 1011 return (ENOMEM); 1012 } 1013 #endif 1014 error = vm_map_wire(map, start, end, 1015 VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES); 1016 #ifdef RACCT 1017 if (racct_enable && error != KERN_SUCCESS) { 1018 PROC_LOCK(proc); 1019 racct_set(proc, RACCT_MEMLOCK, 1020 ptoa(pmap_wired_count(map->pmap))); 1021 PROC_UNLOCK(proc); 1022 } 1023 #endif 1024 return (error == KERN_SUCCESS ? 0 : ENOMEM); 1025 } 1026 1027 #ifndef _SYS_SYSPROTO_H_ 1028 struct mlockall_args { 1029 int how; 1030 }; 1031 #endif 1032 1033 int 1034 sys_mlockall(struct thread *td, struct mlockall_args *uap) 1035 { 1036 vm_map_t map; 1037 int error; 1038 1039 map = &td->td_proc->p_vmspace->vm_map; 1040 error = priv_check(td, PRIV_VM_MLOCK); 1041 if (error) 1042 return (error); 1043 1044 if ((uap->how == 0) || ((uap->how & ~(MCL_CURRENT|MCL_FUTURE)) != 0)) 1045 return (EINVAL); 1046 1047 /* 1048 * If wiring all pages in the process would cause it to exceed 1049 * a hard resource limit, return ENOMEM. 1050 */ 1051 if (!old_mlock && uap->how & MCL_CURRENT) { 1052 PROC_LOCK(td->td_proc); 1053 if (map->size > lim_cur(td, RLIMIT_MEMLOCK)) { 1054 PROC_UNLOCK(td->td_proc); 1055 return (ENOMEM); 1056 } 1057 PROC_UNLOCK(td->td_proc); 1058 } 1059 #ifdef RACCT 1060 if (racct_enable) { 1061 PROC_LOCK(td->td_proc); 1062 error = racct_set(td->td_proc, RACCT_MEMLOCK, map->size); 1063 PROC_UNLOCK(td->td_proc); 1064 if (error != 0) 1065 return (ENOMEM); 1066 } 1067 #endif 1068 1069 if (uap->how & MCL_FUTURE) { 1070 vm_map_lock(map); 1071 vm_map_modflags(map, MAP_WIREFUTURE, 0); 1072 vm_map_unlock(map); 1073 error = 0; 1074 } 1075 1076 if (uap->how & MCL_CURRENT) { 1077 /* 1078 * P1003.1-2001 mandates that all currently mapped pages 1079 * will be memory resident and locked (wired) upon return 1080 * from mlockall(). vm_map_wire() will wire pages, by 1081 * calling vm_fault_wire() for each page in the region. 1082 */ 1083 error = vm_map_wire(map, vm_map_min(map), vm_map_max(map), 1084 VM_MAP_WIRE_USER|VM_MAP_WIRE_HOLESOK); 1085 error = (error == KERN_SUCCESS ? 0 : EAGAIN); 1086 } 1087 #ifdef RACCT 1088 if (racct_enable && error != KERN_SUCCESS) { 1089 PROC_LOCK(td->td_proc); 1090 racct_set(td->td_proc, RACCT_MEMLOCK, 1091 ptoa(pmap_wired_count(map->pmap))); 1092 PROC_UNLOCK(td->td_proc); 1093 } 1094 #endif 1095 1096 return (error); 1097 } 1098 1099 #ifndef _SYS_SYSPROTO_H_ 1100 struct munlockall_args { 1101 register_t dummy; 1102 }; 1103 #endif 1104 1105 int 1106 sys_munlockall(struct thread *td, struct munlockall_args *uap) 1107 { 1108 vm_map_t map; 1109 int error; 1110 1111 map = &td->td_proc->p_vmspace->vm_map; 1112 error = priv_check(td, PRIV_VM_MUNLOCK); 1113 if (error) 1114 return (error); 1115 1116 /* Clear the MAP_WIREFUTURE flag from this vm_map. */ 1117 vm_map_lock(map); 1118 vm_map_modflags(map, 0, MAP_WIREFUTURE); 1119 vm_map_unlock(map); 1120 1121 /* Forcibly unwire all pages. */ 1122 error = vm_map_unwire(map, vm_map_min(map), vm_map_max(map), 1123 VM_MAP_WIRE_USER|VM_MAP_WIRE_HOLESOK); 1124 #ifdef RACCT 1125 if (racct_enable && error == KERN_SUCCESS) { 1126 PROC_LOCK(td->td_proc); 1127 racct_set(td->td_proc, RACCT_MEMLOCK, 0); 1128 PROC_UNLOCK(td->td_proc); 1129 } 1130 #endif 1131 1132 return (error); 1133 } 1134 1135 #ifndef _SYS_SYSPROTO_H_ 1136 struct munlock_args { 1137 const void *addr; 1138 size_t len; 1139 }; 1140 #endif 1141 int 1142 sys_munlock(struct thread *td, struct munlock_args *uap) 1143 { 1144 1145 return (kern_munlock(td, (uintptr_t)uap->addr, uap->len)); 1146 } 1147 1148 int 1149 kern_munlock(struct thread *td, uintptr_t addr0, size_t size) 1150 { 1151 vm_offset_t addr, end, last, start; 1152 #ifdef RACCT 1153 vm_map_t map; 1154 #endif 1155 int error; 1156 1157 error = priv_check(td, PRIV_VM_MUNLOCK); 1158 if (error) 1159 return (error); 1160 addr = addr0; 1161 last = addr + size; 1162 start = trunc_page(addr); 1163 end = round_page(last); 1164 if (last < addr || end < addr) 1165 return (EINVAL); 1166 error = vm_map_unwire(&td->td_proc->p_vmspace->vm_map, start, end, 1167 VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES); 1168 #ifdef RACCT 1169 if (racct_enable && error == KERN_SUCCESS) { 1170 PROC_LOCK(td->td_proc); 1171 map = &td->td_proc->p_vmspace->vm_map; 1172 racct_set(td->td_proc, RACCT_MEMLOCK, 1173 ptoa(pmap_wired_count(map->pmap))); 1174 PROC_UNLOCK(td->td_proc); 1175 } 1176 #endif 1177 return (error == KERN_SUCCESS ? 0 : ENOMEM); 1178 } 1179 1180 /* 1181 * vm_mmap_vnode() 1182 * 1183 * Helper function for vm_mmap. Perform sanity check specific for mmap 1184 * operations on vnodes. 1185 */ 1186 int 1187 vm_mmap_vnode(struct thread *td, vm_size_t objsize, 1188 vm_prot_t prot, vm_prot_t *maxprotp, int *flagsp, 1189 struct vnode *vp, vm_ooffset_t *foffp, vm_object_t *objp, 1190 boolean_t *writecounted) 1191 { 1192 struct vattr va; 1193 vm_object_t obj; 1194 vm_ooffset_t foff; 1195 struct ucred *cred; 1196 int error, flags, locktype; 1197 1198 cred = td->td_ucred; 1199 if ((*maxprotp & VM_PROT_WRITE) && (*flagsp & MAP_SHARED)) 1200 locktype = LK_EXCLUSIVE; 1201 else 1202 locktype = LK_SHARED; 1203 if ((error = vget(vp, locktype, td)) != 0) 1204 return (error); 1205 AUDIT_ARG_VNODE1(vp); 1206 foff = *foffp; 1207 flags = *flagsp; 1208 obj = vp->v_object; 1209 if (vp->v_type == VREG) { 1210 /* 1211 * Get the proper underlying object 1212 */ 1213 if (obj == NULL) { 1214 error = EINVAL; 1215 goto done; 1216 } 1217 if (obj->type == OBJT_VNODE && obj->handle != vp) { 1218 vput(vp); 1219 vp = (struct vnode *)obj->handle; 1220 /* 1221 * Bypass filesystems obey the mpsafety of the 1222 * underlying fs. Tmpfs never bypasses. 1223 */ 1224 error = vget(vp, locktype, td); 1225 if (error != 0) 1226 return (error); 1227 } 1228 if (locktype == LK_EXCLUSIVE) { 1229 *writecounted = TRUE; 1230 vnode_pager_update_writecount(obj, 0, objsize); 1231 } 1232 } else { 1233 error = EINVAL; 1234 goto done; 1235 } 1236 if ((error = VOP_GETATTR(vp, &va, cred))) 1237 goto done; 1238 #ifdef MAC 1239 /* This relies on VM_PROT_* matching PROT_*. */ 1240 error = mac_vnode_check_mmap(cred, vp, (int)prot, flags); 1241 if (error != 0) 1242 goto done; 1243 #endif 1244 if ((flags & MAP_SHARED) != 0) { 1245 if ((va.va_flags & (SF_SNAPSHOT|IMMUTABLE|APPEND)) != 0) { 1246 if (prot & VM_PROT_WRITE) { 1247 error = EPERM; 1248 goto done; 1249 } 1250 *maxprotp &= ~VM_PROT_WRITE; 1251 } 1252 } 1253 /* 1254 * If it is a regular file without any references 1255 * we do not need to sync it. 1256 * Adjust object size to be the size of actual file. 1257 */ 1258 objsize = round_page(va.va_size); 1259 if (va.va_nlink == 0) 1260 flags |= MAP_NOSYNC; 1261 if (obj->type == OBJT_VNODE) { 1262 obj = vm_pager_allocate(OBJT_VNODE, vp, objsize, prot, foff, 1263 cred); 1264 if (obj == NULL) { 1265 error = ENOMEM; 1266 goto done; 1267 } 1268 } else { 1269 KASSERT(obj->type == OBJT_DEFAULT || obj->type == OBJT_SWAP, 1270 ("wrong object type")); 1271 VM_OBJECT_WLOCK(obj); 1272 vm_object_reference_locked(obj); 1273 #if VM_NRESERVLEVEL > 0 1274 vm_object_color(obj, 0); 1275 #endif 1276 VM_OBJECT_WUNLOCK(obj); 1277 } 1278 *objp = obj; 1279 *flagsp = flags; 1280 1281 vfs_mark_atime(vp, cred); 1282 1283 done: 1284 if (error != 0 && *writecounted) { 1285 *writecounted = FALSE; 1286 vnode_pager_update_writecount(obj, objsize, 0); 1287 } 1288 vput(vp); 1289 return (error); 1290 } 1291 1292 /* 1293 * vm_mmap_cdev() 1294 * 1295 * Helper function for vm_mmap. Perform sanity check specific for mmap 1296 * operations on cdevs. 1297 */ 1298 int 1299 vm_mmap_cdev(struct thread *td, vm_size_t objsize, vm_prot_t prot, 1300 vm_prot_t *maxprotp, int *flagsp, struct cdev *cdev, struct cdevsw *dsw, 1301 vm_ooffset_t *foff, vm_object_t *objp) 1302 { 1303 vm_object_t obj; 1304 int error, flags; 1305 1306 flags = *flagsp; 1307 1308 if (dsw->d_flags & D_MMAP_ANON) { 1309 *objp = NULL; 1310 *foff = 0; 1311 *maxprotp = VM_PROT_ALL; 1312 *flagsp |= MAP_ANON; 1313 return (0); 1314 } 1315 /* 1316 * cdevs do not provide private mappings of any kind. 1317 */ 1318 if ((*maxprotp & VM_PROT_WRITE) == 0 && 1319 (prot & VM_PROT_WRITE) != 0) 1320 return (EACCES); 1321 if (flags & (MAP_PRIVATE|MAP_COPY)) 1322 return (EINVAL); 1323 /* 1324 * Force device mappings to be shared. 1325 */ 1326 flags |= MAP_SHARED; 1327 #ifdef MAC_XXX 1328 error = mac_cdev_check_mmap(td->td_ucred, cdev, (int)prot); 1329 if (error != 0) 1330 return (error); 1331 #endif 1332 /* 1333 * First, try d_mmap_single(). If that is not implemented 1334 * (returns ENODEV), fall back to using the device pager. 1335 * Note that d_mmap_single() must return a reference to the 1336 * object (it needs to bump the reference count of the object 1337 * it returns somehow). 1338 * 1339 * XXX assumes VM_PROT_* == PROT_* 1340 */ 1341 error = dsw->d_mmap_single(cdev, foff, objsize, objp, (int)prot); 1342 if (error != ENODEV) 1343 return (error); 1344 obj = vm_pager_allocate(OBJT_DEVICE, cdev, objsize, prot, *foff, 1345 td->td_ucred); 1346 if (obj == NULL) 1347 return (EINVAL); 1348 *objp = obj; 1349 *flagsp = flags; 1350 return (0); 1351 } 1352 1353 /* 1354 * vm_mmap() 1355 * 1356 * Internal version of mmap used by exec, sys5 shared memory, and 1357 * various device drivers. Handle is either a vnode pointer, a 1358 * character device, or NULL for MAP_ANON. 1359 */ 1360 int 1361 vm_mmap(vm_map_t map, vm_offset_t *addr, vm_size_t size, vm_prot_t prot, 1362 vm_prot_t maxprot, int flags, 1363 objtype_t handle_type, void *handle, 1364 vm_ooffset_t foff) 1365 { 1366 vm_object_t object; 1367 struct thread *td = curthread; 1368 int error; 1369 boolean_t writecounted; 1370 1371 if (size == 0) 1372 return (EINVAL); 1373 1374 size = round_page(size); 1375 object = NULL; 1376 writecounted = FALSE; 1377 1378 /* 1379 * Lookup/allocate object. 1380 */ 1381 switch (handle_type) { 1382 case OBJT_DEVICE: { 1383 struct cdevsw *dsw; 1384 struct cdev *cdev; 1385 int ref; 1386 1387 cdev = handle; 1388 dsw = dev_refthread(cdev, &ref); 1389 if (dsw == NULL) 1390 return (ENXIO); 1391 error = vm_mmap_cdev(td, size, prot, &maxprot, &flags, cdev, 1392 dsw, &foff, &object); 1393 dev_relthread(cdev, ref); 1394 break; 1395 } 1396 case OBJT_VNODE: 1397 error = vm_mmap_vnode(td, size, prot, &maxprot, &flags, 1398 handle, &foff, &object, &writecounted); 1399 break; 1400 case OBJT_DEFAULT: 1401 if (handle == NULL) { 1402 error = 0; 1403 break; 1404 } 1405 /* FALLTHROUGH */ 1406 default: 1407 error = EINVAL; 1408 break; 1409 } 1410 if (error) 1411 return (error); 1412 1413 error = vm_mmap_object(map, addr, size, prot, maxprot, flags, object, 1414 foff, writecounted, td); 1415 if (error != 0 && object != NULL) { 1416 /* 1417 * If this mapping was accounted for in the vnode's 1418 * writecount, then undo that now. 1419 */ 1420 if (writecounted) 1421 vnode_pager_release_writecount(object, 0, size); 1422 vm_object_deallocate(object); 1423 } 1424 return (error); 1425 } 1426 1427 /* 1428 * Internal version of mmap that maps a specific VM object into an 1429 * map. Called by mmap for MAP_ANON, vm_mmap, shm_mmap, and vn_mmap. 1430 */ 1431 int 1432 vm_mmap_object(vm_map_t map, vm_offset_t *addr, vm_size_t size, vm_prot_t prot, 1433 vm_prot_t maxprot, int flags, vm_object_t object, vm_ooffset_t foff, 1434 boolean_t writecounted, struct thread *td) 1435 { 1436 boolean_t curmap, fitit; 1437 vm_offset_t max_addr; 1438 int docow, error, findspace, rv; 1439 1440 curmap = map == &td->td_proc->p_vmspace->vm_map; 1441 if (curmap) { 1442 PROC_LOCK(td->td_proc); 1443 if (map->size + size > lim_cur_proc(td->td_proc, RLIMIT_VMEM)) { 1444 PROC_UNLOCK(td->td_proc); 1445 return (ENOMEM); 1446 } 1447 if (racct_set(td->td_proc, RACCT_VMEM, map->size + size)) { 1448 PROC_UNLOCK(td->td_proc); 1449 return (ENOMEM); 1450 } 1451 if (!old_mlock && map->flags & MAP_WIREFUTURE) { 1452 if (ptoa(pmap_wired_count(map->pmap)) + size > 1453 lim_cur_proc(td->td_proc, RLIMIT_MEMLOCK)) { 1454 racct_set_force(td->td_proc, RACCT_VMEM, 1455 map->size); 1456 PROC_UNLOCK(td->td_proc); 1457 return (ENOMEM); 1458 } 1459 error = racct_set(td->td_proc, RACCT_MEMLOCK, 1460 ptoa(pmap_wired_count(map->pmap)) + size); 1461 if (error != 0) { 1462 racct_set_force(td->td_proc, RACCT_VMEM, 1463 map->size); 1464 PROC_UNLOCK(td->td_proc); 1465 return (error); 1466 } 1467 } 1468 PROC_UNLOCK(td->td_proc); 1469 } 1470 1471 /* 1472 * We currently can only deal with page aligned file offsets. 1473 * The mmap() system call already enforces this by subtracting 1474 * the page offset from the file offset, but checking here 1475 * catches errors in device drivers (e.g. d_single_mmap() 1476 * callbacks) and other internal mapping requests (such as in 1477 * exec). 1478 */ 1479 if (foff & PAGE_MASK) 1480 return (EINVAL); 1481 1482 if ((flags & MAP_FIXED) == 0) { 1483 fitit = TRUE; 1484 *addr = round_page(*addr); 1485 } else { 1486 if (*addr != trunc_page(*addr)) 1487 return (EINVAL); 1488 fitit = FALSE; 1489 } 1490 1491 if (flags & MAP_ANON) { 1492 if (object != NULL || foff != 0) 1493 return (EINVAL); 1494 docow = 0; 1495 } else if (flags & MAP_PREFAULT_READ) 1496 docow = MAP_PREFAULT; 1497 else 1498 docow = MAP_PREFAULT_PARTIAL; 1499 1500 if ((flags & (MAP_ANON|MAP_SHARED)) == 0) 1501 docow |= MAP_COPY_ON_WRITE; 1502 if (flags & MAP_NOSYNC) 1503 docow |= MAP_DISABLE_SYNCER; 1504 if (flags & MAP_NOCORE) 1505 docow |= MAP_DISABLE_COREDUMP; 1506 /* Shared memory is also shared with children. */ 1507 if (flags & MAP_SHARED) 1508 docow |= MAP_INHERIT_SHARE; 1509 if (writecounted) 1510 docow |= MAP_VN_WRITECOUNT; 1511 if (flags & MAP_STACK) { 1512 if (object != NULL) 1513 return (EINVAL); 1514 docow |= MAP_STACK_GROWS_DOWN; 1515 } 1516 if ((flags & MAP_EXCL) != 0) 1517 docow |= MAP_CHECK_EXCL; 1518 if ((flags & MAP_GUARD) != 0) 1519 docow |= MAP_CREATE_GUARD; 1520 1521 if (fitit) { 1522 if ((flags & MAP_ALIGNMENT_MASK) == MAP_ALIGNED_SUPER) 1523 findspace = VMFS_SUPER_SPACE; 1524 else if ((flags & MAP_ALIGNMENT_MASK) != 0) 1525 findspace = VMFS_ALIGNED_SPACE(flags >> 1526 MAP_ALIGNMENT_SHIFT); 1527 else 1528 findspace = VMFS_OPTIMAL_SPACE; 1529 max_addr = 0; 1530 #ifdef MAP_32BIT 1531 if ((flags & MAP_32BIT) != 0) 1532 max_addr = MAP_32BIT_MAX_ADDR; 1533 #endif 1534 if (curmap) { 1535 rv = vm_map_find_min(map, object, foff, addr, size, 1536 round_page((vm_offset_t)td->td_proc->p_vmspace-> 1537 vm_daddr + lim_max(td, RLIMIT_DATA)), max_addr, 1538 findspace, prot, maxprot, docow); 1539 } else { 1540 rv = vm_map_find(map, object, foff, addr, size, 1541 max_addr, findspace, prot, maxprot, docow); 1542 } 1543 } else { 1544 rv = vm_map_fixed(map, object, foff, *addr, size, 1545 prot, maxprot, docow); 1546 } 1547 1548 if (rv == KERN_SUCCESS) { 1549 /* 1550 * If the process has requested that all future mappings 1551 * be wired, then heed this. 1552 */ 1553 if (map->flags & MAP_WIREFUTURE) { 1554 vm_map_wire(map, *addr, *addr + size, 1555 VM_MAP_WIRE_USER | ((flags & MAP_STACK) ? 1556 VM_MAP_WIRE_HOLESOK : VM_MAP_WIRE_NOHOLES)); 1557 } 1558 } 1559 return (vm_mmap_to_errno(rv)); 1560 } 1561 1562 /* 1563 * Translate a Mach VM return code to zero on success or the appropriate errno 1564 * on failure. 1565 */ 1566 int 1567 vm_mmap_to_errno(int rv) 1568 { 1569 1570 switch (rv) { 1571 case KERN_SUCCESS: 1572 return (0); 1573 case KERN_INVALID_ADDRESS: 1574 case KERN_NO_SPACE: 1575 return (ENOMEM); 1576 case KERN_PROTECTION_FAILURE: 1577 return (EACCES); 1578 default: 1579 return (EINVAL); 1580 } 1581 } 1582