1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1988 University of Utah. 5 * Copyright (c) 1991, 1993 6 * The Regents of the University of California. All rights reserved. 7 * 8 * This code is derived from software contributed to Berkeley by 9 * the Systems Programming Group of the University of Utah Computer 10 * Science Department. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * from: Utah $Hdr: vm_mmap.c 1.6 91/10/21$ 37 * 38 * @(#)vm_mmap.c 8.4 (Berkeley) 1/12/94 39 */ 40 41 /* 42 * Mapped file (mmap) interface to VM 43 */ 44 45 #include <sys/cdefs.h> 46 __FBSDID("$FreeBSD$"); 47 48 #include "opt_hwpmc_hooks.h" 49 #include "opt_vm.h" 50 51 #include <sys/param.h> 52 #include <sys/systm.h> 53 #include <sys/capsicum.h> 54 #include <sys/kernel.h> 55 #include <sys/lock.h> 56 #include <sys/mutex.h> 57 #include <sys/sysproto.h> 58 #include <sys/filedesc.h> 59 #include <sys/priv.h> 60 #include <sys/proc.h> 61 #include <sys/procctl.h> 62 #include <sys/racct.h> 63 #include <sys/resource.h> 64 #include <sys/resourcevar.h> 65 #include <sys/rwlock.h> 66 #include <sys/sysctl.h> 67 #include <sys/vnode.h> 68 #include <sys/fcntl.h> 69 #include <sys/file.h> 70 #include <sys/mman.h> 71 #include <sys/mount.h> 72 #include <sys/conf.h> 73 #include <sys/stat.h> 74 #include <sys/syscallsubr.h> 75 #include <sys/sysent.h> 76 #include <sys/vmmeter.h> 77 78 #include <security/audit/audit.h> 79 #include <security/mac/mac_framework.h> 80 81 #include <vm/vm.h> 82 #include <vm/vm_param.h> 83 #include <vm/pmap.h> 84 #include <vm/vm_map.h> 85 #include <vm/vm_object.h> 86 #include <vm/vm_page.h> 87 #include <vm/vm_pager.h> 88 #include <vm/vm_pageout.h> 89 #include <vm/vm_extern.h> 90 #include <vm/vm_page.h> 91 #include <vm/vnode_pager.h> 92 93 #ifdef HWPMC_HOOKS 94 #include <sys/pmckern.h> 95 #endif 96 97 int old_mlock = 0; 98 SYSCTL_INT(_vm, OID_AUTO, old_mlock, CTLFLAG_RWTUN, &old_mlock, 0, 99 "Do not apply RLIMIT_MEMLOCK on mlockall"); 100 101 #ifdef MAP_32BIT 102 #define MAP_32BIT_MAX_ADDR ((vm_offset_t)1 << 31) 103 #endif 104 105 #ifndef _SYS_SYSPROTO_H_ 106 struct sbrk_args { 107 int incr; 108 }; 109 #endif 110 111 int 112 sys_sbrk(struct thread *td, struct sbrk_args *uap) 113 { 114 /* Not yet implemented */ 115 return (EOPNOTSUPP); 116 } 117 118 #ifndef _SYS_SYSPROTO_H_ 119 struct sstk_args { 120 int incr; 121 }; 122 #endif 123 124 int 125 sys_sstk(struct thread *td, struct sstk_args *uap) 126 { 127 /* Not yet implemented */ 128 return (EOPNOTSUPP); 129 } 130 131 #if defined(COMPAT_43) 132 #ifndef _SYS_SYSPROTO_H_ 133 struct getpagesize_args { 134 int dummy; 135 }; 136 #endif 137 138 int 139 ogetpagesize(struct thread *td, struct getpagesize_args *uap) 140 { 141 142 td->td_retval[0] = PAGE_SIZE; 143 return (0); 144 } 145 #endif /* COMPAT_43 */ 146 147 148 /* 149 * Memory Map (mmap) system call. Note that the file offset 150 * and address are allowed to be NOT page aligned, though if 151 * the MAP_FIXED flag it set, both must have the same remainder 152 * modulo the PAGE_SIZE (POSIX 1003.1b). If the address is not 153 * page-aligned, the actual mapping starts at trunc_page(addr) 154 * and the return value is adjusted up by the page offset. 155 * 156 * Generally speaking, only character devices which are themselves 157 * memory-based, such as a video framebuffer, can be mmap'd. Otherwise 158 * there would be no cache coherency between a descriptor and a VM mapping 159 * both to the same character device. 160 */ 161 #ifndef _SYS_SYSPROTO_H_ 162 struct mmap_args { 163 void *addr; 164 size_t len; 165 int prot; 166 int flags; 167 int fd; 168 long pad; 169 off_t pos; 170 }; 171 #endif 172 173 int 174 sys_mmap(struct thread *td, struct mmap_args *uap) 175 { 176 177 return (kern_mmap(td, (uintptr_t)uap->addr, uap->len, uap->prot, 178 uap->flags, uap->fd, uap->pos)); 179 } 180 181 int 182 kern_mmap(struct thread *td, uintptr_t addr0, size_t size, int prot, int flags, 183 int fd, off_t pos) 184 { 185 struct vmspace *vms; 186 struct file *fp; 187 vm_offset_t addr; 188 vm_size_t pageoff; 189 vm_prot_t cap_maxprot; 190 int align, error; 191 cap_rights_t rights; 192 193 vms = td->td_proc->p_vmspace; 194 fp = NULL; 195 AUDIT_ARG_FD(fd); 196 addr = addr0; 197 198 /* 199 * Ignore old flags that used to be defined but did not do anything. 200 */ 201 flags &= ~(MAP_RESERVED0020 | MAP_RESERVED0040); 202 203 /* 204 * Enforce the constraints. 205 * Mapping of length 0 is only allowed for old binaries. 206 * Anonymous mapping shall specify -1 as filedescriptor and 207 * zero position for new code. Be nice to ancient a.out 208 * binaries and correct pos for anonymous mapping, since old 209 * ld.so sometimes issues anonymous map requests with non-zero 210 * pos. 211 */ 212 if (!SV_CURPROC_FLAG(SV_AOUT)) { 213 if ((size == 0 && curproc->p_osrel >= P_OSREL_MAP_ANON) || 214 ((flags & MAP_ANON) != 0 && (fd != -1 || pos != 0))) 215 return (EINVAL); 216 } else { 217 if ((flags & MAP_ANON) != 0) 218 pos = 0; 219 } 220 221 if (flags & MAP_STACK) { 222 if ((fd != -1) || 223 ((prot & (PROT_READ | PROT_WRITE)) != (PROT_READ | PROT_WRITE))) 224 return (EINVAL); 225 flags |= MAP_ANON; 226 pos = 0; 227 } 228 if ((flags & ~(MAP_SHARED | MAP_PRIVATE | MAP_FIXED | MAP_HASSEMAPHORE | 229 MAP_STACK | MAP_NOSYNC | MAP_ANON | MAP_EXCL | MAP_NOCORE | 230 MAP_PREFAULT_READ | MAP_GUARD | 231 #ifdef MAP_32BIT 232 MAP_32BIT | 233 #endif 234 MAP_ALIGNMENT_MASK)) != 0) 235 return (EINVAL); 236 if ((flags & (MAP_EXCL | MAP_FIXED)) == MAP_EXCL) 237 return (EINVAL); 238 if ((flags & (MAP_SHARED | MAP_PRIVATE)) == (MAP_SHARED | MAP_PRIVATE)) 239 return (EINVAL); 240 if (prot != PROT_NONE && 241 (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC)) != 0) 242 return (EINVAL); 243 if ((flags & MAP_GUARD) != 0 && (prot != PROT_NONE || fd != -1 || 244 pos != 0 || (flags & ~(MAP_FIXED | MAP_GUARD | MAP_EXCL | 245 #ifdef MAP_32BIT 246 MAP_32BIT | 247 #endif 248 MAP_ALIGNMENT_MASK)) != 0)) 249 return (EINVAL); 250 251 /* 252 * Align the file position to a page boundary, 253 * and save its page offset component. 254 */ 255 pageoff = (pos & PAGE_MASK); 256 pos -= pageoff; 257 258 /* Adjust size for rounding (on both ends). */ 259 size += pageoff; /* low end... */ 260 size = (vm_size_t) round_page(size); /* hi end */ 261 262 /* Ensure alignment is at least a page and fits in a pointer. */ 263 align = flags & MAP_ALIGNMENT_MASK; 264 if (align != 0 && align != MAP_ALIGNED_SUPER && 265 (align >> MAP_ALIGNMENT_SHIFT >= sizeof(void *) * NBBY || 266 align >> MAP_ALIGNMENT_SHIFT < PAGE_SHIFT)) 267 return (EINVAL); 268 269 /* 270 * Check for illegal addresses. Watch out for address wrap... Note 271 * that VM_*_ADDRESS are not constants due to casts (argh). 272 */ 273 if (flags & MAP_FIXED) { 274 /* 275 * The specified address must have the same remainder 276 * as the file offset taken modulo PAGE_SIZE, so it 277 * should be aligned after adjustment by pageoff. 278 */ 279 addr -= pageoff; 280 if (addr & PAGE_MASK) 281 return (EINVAL); 282 283 /* Address range must be all in user VM space. */ 284 if (addr < vm_map_min(&vms->vm_map) || 285 addr + size > vm_map_max(&vms->vm_map)) 286 return (EINVAL); 287 if (addr + size < addr) 288 return (EINVAL); 289 #ifdef MAP_32BIT 290 if (flags & MAP_32BIT && addr + size > MAP_32BIT_MAX_ADDR) 291 return (EINVAL); 292 } else if (flags & MAP_32BIT) { 293 /* 294 * For MAP_32BIT, override the hint if it is too high and 295 * do not bother moving the mapping past the heap (since 296 * the heap is usually above 2GB). 297 */ 298 if (addr + size > MAP_32BIT_MAX_ADDR) 299 addr = 0; 300 #endif 301 } else { 302 /* 303 * XXX for non-fixed mappings where no hint is provided or 304 * the hint would fall in the potential heap space, 305 * place it after the end of the largest possible heap. 306 * 307 * There should really be a pmap call to determine a reasonable 308 * location. 309 */ 310 if (addr == 0 || 311 (addr >= round_page((vm_offset_t)vms->vm_taddr) && 312 addr < round_page((vm_offset_t)vms->vm_daddr + 313 lim_max(td, RLIMIT_DATA)))) 314 addr = round_page((vm_offset_t)vms->vm_daddr + 315 lim_max(td, RLIMIT_DATA)); 316 } 317 if (size == 0) { 318 /* 319 * Return success without mapping anything for old 320 * binaries that request a page-aligned mapping of 321 * length 0. For modern binaries, this function 322 * returns an error earlier. 323 */ 324 error = 0; 325 } else if ((flags & MAP_GUARD) != 0) { 326 error = vm_mmap_object(&vms->vm_map, &addr, size, VM_PROT_NONE, 327 VM_PROT_NONE, flags, NULL, pos, FALSE, td); 328 } else if ((flags & MAP_ANON) != 0) { 329 /* 330 * Mapping blank space is trivial. 331 * 332 * This relies on VM_PROT_* matching PROT_*. 333 */ 334 error = vm_mmap_object(&vms->vm_map, &addr, size, prot, 335 VM_PROT_ALL, flags, NULL, pos, FALSE, td); 336 } else { 337 /* 338 * Mapping file, get fp for validation and don't let the 339 * descriptor disappear on us if we block. Check capability 340 * rights, but also return the maximum rights to be combined 341 * with maxprot later. 342 */ 343 cap_rights_init(&rights, CAP_MMAP); 344 if (prot & PROT_READ) 345 cap_rights_set(&rights, CAP_MMAP_R); 346 if ((flags & MAP_SHARED) != 0) { 347 if (prot & PROT_WRITE) 348 cap_rights_set(&rights, CAP_MMAP_W); 349 } 350 if (prot & PROT_EXEC) 351 cap_rights_set(&rights, CAP_MMAP_X); 352 error = fget_mmap(td, fd, &rights, &cap_maxprot, &fp); 353 if (error != 0) 354 goto done; 355 if ((flags & (MAP_SHARED | MAP_PRIVATE)) == 0 && 356 td->td_proc->p_osrel >= P_OSREL_MAP_FSTRICT) { 357 error = EINVAL; 358 goto done; 359 } 360 361 /* This relies on VM_PROT_* matching PROT_*. */ 362 error = fo_mmap(fp, &vms->vm_map, &addr, size, prot, 363 cap_maxprot, flags, pos, td); 364 } 365 366 if (error == 0) 367 td->td_retval[0] = (register_t) (addr + pageoff); 368 done: 369 if (fp) 370 fdrop(fp, td); 371 372 return (error); 373 } 374 375 #if defined(COMPAT_FREEBSD6) 376 int 377 freebsd6_mmap(struct thread *td, struct freebsd6_mmap_args *uap) 378 { 379 380 return (kern_mmap(td, (uintptr_t)uap->addr, uap->len, uap->prot, 381 uap->flags, uap->fd, uap->pos)); 382 } 383 #endif 384 385 #ifdef COMPAT_43 386 #ifndef _SYS_SYSPROTO_H_ 387 struct ommap_args { 388 caddr_t addr; 389 int len; 390 int prot; 391 int flags; 392 int fd; 393 long pos; 394 }; 395 #endif 396 int 397 ommap(struct thread *td, struct ommap_args *uap) 398 { 399 static const char cvtbsdprot[8] = { 400 0, 401 PROT_EXEC, 402 PROT_WRITE, 403 PROT_EXEC | PROT_WRITE, 404 PROT_READ, 405 PROT_EXEC | PROT_READ, 406 PROT_WRITE | PROT_READ, 407 PROT_EXEC | PROT_WRITE | PROT_READ, 408 }; 409 int flags, prot; 410 411 #define OMAP_ANON 0x0002 412 #define OMAP_COPY 0x0020 413 #define OMAP_SHARED 0x0010 414 #define OMAP_FIXED 0x0100 415 416 prot = cvtbsdprot[uap->prot & 0x7]; 417 #ifdef COMPAT_FREEBSD32 418 #if defined(__amd64__) 419 if (i386_read_exec && SV_PROC_FLAG(td->td_proc, SV_ILP32) && 420 prot != 0) 421 prot |= PROT_EXEC; 422 #endif 423 #endif 424 flags = 0; 425 if (uap->flags & OMAP_ANON) 426 flags |= MAP_ANON; 427 if (uap->flags & OMAP_COPY) 428 flags |= MAP_COPY; 429 if (uap->flags & OMAP_SHARED) 430 flags |= MAP_SHARED; 431 else 432 flags |= MAP_PRIVATE; 433 if (uap->flags & OMAP_FIXED) 434 flags |= MAP_FIXED; 435 return (kern_mmap(td, (uintptr_t)uap->addr, uap->len, prot, flags, 436 uap->fd, uap->pos)); 437 } 438 #endif /* COMPAT_43 */ 439 440 441 #ifndef _SYS_SYSPROTO_H_ 442 struct msync_args { 443 void *addr; 444 size_t len; 445 int flags; 446 }; 447 #endif 448 int 449 sys_msync(struct thread *td, struct msync_args *uap) 450 { 451 452 return (kern_msync(td, (uintptr_t)uap->addr, uap->len, uap->flags)); 453 } 454 455 int 456 kern_msync(struct thread *td, uintptr_t addr0, size_t size, int flags) 457 { 458 vm_offset_t addr; 459 vm_size_t pageoff; 460 vm_map_t map; 461 int rv; 462 463 addr = addr0; 464 pageoff = (addr & PAGE_MASK); 465 addr -= pageoff; 466 size += pageoff; 467 size = (vm_size_t) round_page(size); 468 if (addr + size < addr) 469 return (EINVAL); 470 471 if ((flags & (MS_ASYNC|MS_INVALIDATE)) == (MS_ASYNC|MS_INVALIDATE)) 472 return (EINVAL); 473 474 map = &td->td_proc->p_vmspace->vm_map; 475 476 /* 477 * Clean the pages and interpret the return value. 478 */ 479 rv = vm_map_sync(map, addr, addr + size, (flags & MS_ASYNC) == 0, 480 (flags & MS_INVALIDATE) != 0); 481 switch (rv) { 482 case KERN_SUCCESS: 483 return (0); 484 case KERN_INVALID_ADDRESS: 485 return (ENOMEM); 486 case KERN_INVALID_ARGUMENT: 487 return (EBUSY); 488 case KERN_FAILURE: 489 return (EIO); 490 default: 491 return (EINVAL); 492 } 493 } 494 495 #ifndef _SYS_SYSPROTO_H_ 496 struct munmap_args { 497 void *addr; 498 size_t len; 499 }; 500 #endif 501 int 502 sys_munmap(struct thread *td, struct munmap_args *uap) 503 { 504 505 return (kern_munmap(td, (uintptr_t)uap->addr, uap->len)); 506 } 507 508 int 509 kern_munmap(struct thread *td, uintptr_t addr0, size_t size) 510 { 511 #ifdef HWPMC_HOOKS 512 struct pmckern_map_out pkm; 513 vm_map_entry_t entry; 514 bool pmc_handled; 515 #endif 516 vm_offset_t addr; 517 vm_size_t pageoff; 518 vm_map_t map; 519 520 if (size == 0) 521 return (EINVAL); 522 523 addr = addr0; 524 pageoff = (addr & PAGE_MASK); 525 addr -= pageoff; 526 size += pageoff; 527 size = (vm_size_t) round_page(size); 528 if (addr + size < addr) 529 return (EINVAL); 530 531 /* 532 * Check for illegal addresses. Watch out for address wrap... 533 */ 534 map = &td->td_proc->p_vmspace->vm_map; 535 if (addr < vm_map_min(map) || addr + size > vm_map_max(map)) 536 return (EINVAL); 537 vm_map_lock(map); 538 #ifdef HWPMC_HOOKS 539 pmc_handled = false; 540 if (PMC_HOOK_INSTALLED(PMC_FN_MUNMAP)) { 541 pmc_handled = true; 542 /* 543 * Inform hwpmc if the address range being unmapped contains 544 * an executable region. 545 */ 546 pkm.pm_address = (uintptr_t) NULL; 547 if (vm_map_lookup_entry(map, addr, &entry)) { 548 for (; entry->start < addr + size; 549 entry = entry->next) { 550 if (vm_map_check_protection(map, entry->start, 551 entry->end, VM_PROT_EXECUTE) == TRUE) { 552 pkm.pm_address = (uintptr_t) addr; 553 pkm.pm_size = (size_t) size; 554 break; 555 } 556 } 557 } 558 } 559 #endif 560 vm_map_delete(map, addr, addr + size); 561 562 #ifdef HWPMC_HOOKS 563 if (__predict_false(pmc_handled)) { 564 /* downgrade the lock to prevent a LOR with the pmc-sx lock */ 565 vm_map_lock_downgrade(map); 566 if (pkm.pm_address != (uintptr_t) NULL) 567 PMC_CALL_HOOK(td, PMC_FN_MUNMAP, (void *) &pkm); 568 vm_map_unlock_read(map); 569 } else 570 #endif 571 vm_map_unlock(map); 572 573 /* vm_map_delete returns nothing but KERN_SUCCESS anyway */ 574 return (0); 575 } 576 577 #ifndef _SYS_SYSPROTO_H_ 578 struct mprotect_args { 579 const void *addr; 580 size_t len; 581 int prot; 582 }; 583 #endif 584 int 585 sys_mprotect(struct thread *td, struct mprotect_args *uap) 586 { 587 588 return (kern_mprotect(td, (uintptr_t)uap->addr, uap->len, uap->prot)); 589 } 590 591 int 592 kern_mprotect(struct thread *td, uintptr_t addr0, size_t size, int prot) 593 { 594 vm_offset_t addr; 595 vm_size_t pageoff; 596 597 addr = addr0; 598 prot = (prot & VM_PROT_ALL); 599 pageoff = (addr & PAGE_MASK); 600 addr -= pageoff; 601 size += pageoff; 602 size = (vm_size_t) round_page(size); 603 if (addr + size < addr) 604 return (EINVAL); 605 606 switch (vm_map_protect(&td->td_proc->p_vmspace->vm_map, addr, 607 addr + size, prot, FALSE)) { 608 case KERN_SUCCESS: 609 return (0); 610 case KERN_PROTECTION_FAILURE: 611 return (EACCES); 612 case KERN_RESOURCE_SHORTAGE: 613 return (ENOMEM); 614 } 615 return (EINVAL); 616 } 617 618 #ifndef _SYS_SYSPROTO_H_ 619 struct minherit_args { 620 void *addr; 621 size_t len; 622 int inherit; 623 }; 624 #endif 625 int 626 sys_minherit(struct thread *td, struct minherit_args *uap) 627 { 628 vm_offset_t addr; 629 vm_size_t size, pageoff; 630 vm_inherit_t inherit; 631 632 addr = (vm_offset_t)uap->addr; 633 size = uap->len; 634 inherit = uap->inherit; 635 636 pageoff = (addr & PAGE_MASK); 637 addr -= pageoff; 638 size += pageoff; 639 size = (vm_size_t) round_page(size); 640 if (addr + size < addr) 641 return (EINVAL); 642 643 switch (vm_map_inherit(&td->td_proc->p_vmspace->vm_map, addr, 644 addr + size, inherit)) { 645 case KERN_SUCCESS: 646 return (0); 647 case KERN_PROTECTION_FAILURE: 648 return (EACCES); 649 } 650 return (EINVAL); 651 } 652 653 #ifndef _SYS_SYSPROTO_H_ 654 struct madvise_args { 655 void *addr; 656 size_t len; 657 int behav; 658 }; 659 #endif 660 661 int 662 sys_madvise(struct thread *td, struct madvise_args *uap) 663 { 664 665 return (kern_madvise(td, (uintptr_t)uap->addr, uap->len, uap->behav)); 666 } 667 668 int 669 kern_madvise(struct thread *td, uintptr_t addr0, size_t len, int behav) 670 { 671 vm_map_t map; 672 vm_offset_t addr, end, start; 673 int flags; 674 675 /* 676 * Check for our special case, advising the swap pager we are 677 * "immortal." 678 */ 679 if (behav == MADV_PROTECT) { 680 flags = PPROT_SET; 681 return (kern_procctl(td, P_PID, td->td_proc->p_pid, 682 PROC_SPROTECT, &flags)); 683 } 684 685 /* 686 * Check for illegal behavior 687 */ 688 if (behav < 0 || behav > MADV_CORE) 689 return (EINVAL); 690 /* 691 * Check for illegal addresses. Watch out for address wrap... Note 692 * that VM_*_ADDRESS are not constants due to casts (argh). 693 */ 694 map = &td->td_proc->p_vmspace->vm_map; 695 addr = addr0; 696 if (addr < vm_map_min(map) || addr + len > vm_map_max(map)) 697 return (EINVAL); 698 if ((addr + len) < addr) 699 return (EINVAL); 700 701 /* 702 * Since this routine is only advisory, we default to conservative 703 * behavior. 704 */ 705 start = trunc_page(addr); 706 end = round_page(addr + len); 707 708 if (vm_map_madvise(map, start, end, behav)) 709 return (EINVAL); 710 return (0); 711 } 712 713 #ifndef _SYS_SYSPROTO_H_ 714 struct mincore_args { 715 const void *addr; 716 size_t len; 717 char *vec; 718 }; 719 #endif 720 721 int 722 sys_mincore(struct thread *td, struct mincore_args *uap) 723 { 724 725 return (kern_mincore(td, (uintptr_t)uap->addr, uap->len, uap->vec)); 726 } 727 728 int 729 kern_mincore(struct thread *td, uintptr_t addr0, size_t len, char *vec) 730 { 731 vm_offset_t addr, first_addr; 732 vm_offset_t end, cend; 733 pmap_t pmap; 734 vm_map_t map; 735 int error = 0; 736 int vecindex, lastvecindex; 737 vm_map_entry_t current; 738 vm_map_entry_t entry; 739 vm_object_t object; 740 vm_paddr_t locked_pa; 741 vm_page_t m; 742 vm_pindex_t pindex; 743 int mincoreinfo; 744 unsigned int timestamp; 745 boolean_t locked; 746 747 /* 748 * Make sure that the addresses presented are valid for user 749 * mode. 750 */ 751 first_addr = addr = trunc_page(addr0); 752 end = addr + (vm_size_t)round_page(len); 753 map = &td->td_proc->p_vmspace->vm_map; 754 if (end > vm_map_max(map) || end < addr) 755 return (ENOMEM); 756 757 pmap = vmspace_pmap(td->td_proc->p_vmspace); 758 759 vm_map_lock_read(map); 760 RestartScan: 761 timestamp = map->timestamp; 762 763 if (!vm_map_lookup_entry(map, addr, &entry)) { 764 vm_map_unlock_read(map); 765 return (ENOMEM); 766 } 767 768 /* 769 * Do this on a map entry basis so that if the pages are not 770 * in the current processes address space, we can easily look 771 * up the pages elsewhere. 772 */ 773 lastvecindex = -1; 774 for (current = entry; current->start < end; current = current->next) { 775 776 /* 777 * check for contiguity 778 */ 779 if (current->end < end && current->next->start > current->end) { 780 vm_map_unlock_read(map); 781 return (ENOMEM); 782 } 783 784 /* 785 * ignore submaps (for now) or null objects 786 */ 787 if ((current->eflags & MAP_ENTRY_IS_SUB_MAP) || 788 current->object.vm_object == NULL) 789 continue; 790 791 /* 792 * limit this scan to the current map entry and the 793 * limits for the mincore call 794 */ 795 if (addr < current->start) 796 addr = current->start; 797 cend = current->end; 798 if (cend > end) 799 cend = end; 800 801 /* 802 * scan this entry one page at a time 803 */ 804 while (addr < cend) { 805 /* 806 * Check pmap first, it is likely faster, also 807 * it can provide info as to whether we are the 808 * one referencing or modifying the page. 809 */ 810 object = NULL; 811 locked_pa = 0; 812 retry: 813 m = NULL; 814 mincoreinfo = pmap_mincore(pmap, addr, &locked_pa); 815 if (locked_pa != 0) { 816 /* 817 * The page is mapped by this process but not 818 * both accessed and modified. It is also 819 * managed. Acquire the object lock so that 820 * other mappings might be examined. 821 */ 822 m = PHYS_TO_VM_PAGE(locked_pa); 823 if (m->object != object) { 824 if (object != NULL) 825 VM_OBJECT_WUNLOCK(object); 826 object = m->object; 827 locked = VM_OBJECT_TRYWLOCK(object); 828 vm_page_unlock(m); 829 if (!locked) { 830 VM_OBJECT_WLOCK(object); 831 vm_page_lock(m); 832 goto retry; 833 } 834 } else 835 vm_page_unlock(m); 836 KASSERT(m->valid == VM_PAGE_BITS_ALL, 837 ("mincore: page %p is mapped but invalid", 838 m)); 839 } else if (mincoreinfo == 0) { 840 /* 841 * The page is not mapped by this process. If 842 * the object implements managed pages, then 843 * determine if the page is resident so that 844 * the mappings might be examined. 845 */ 846 if (current->object.vm_object != object) { 847 if (object != NULL) 848 VM_OBJECT_WUNLOCK(object); 849 object = current->object.vm_object; 850 VM_OBJECT_WLOCK(object); 851 } 852 if (object->type == OBJT_DEFAULT || 853 object->type == OBJT_SWAP || 854 object->type == OBJT_VNODE) { 855 pindex = OFF_TO_IDX(current->offset + 856 (addr - current->start)); 857 m = vm_page_lookup(object, pindex); 858 if (m != NULL && m->valid == 0) 859 m = NULL; 860 if (m != NULL) 861 mincoreinfo = MINCORE_INCORE; 862 } 863 } 864 if (m != NULL) { 865 /* Examine other mappings to the page. */ 866 if (m->dirty == 0 && pmap_is_modified(m)) 867 vm_page_dirty(m); 868 if (m->dirty != 0) 869 mincoreinfo |= MINCORE_MODIFIED_OTHER; 870 /* 871 * The first test for PGA_REFERENCED is an 872 * optimization. The second test is 873 * required because a concurrent pmap 874 * operation could clear the last reference 875 * and set PGA_REFERENCED before the call to 876 * pmap_is_referenced(). 877 */ 878 if ((m->aflags & PGA_REFERENCED) != 0 || 879 pmap_is_referenced(m) || 880 (m->aflags & PGA_REFERENCED) != 0) 881 mincoreinfo |= MINCORE_REFERENCED_OTHER; 882 } 883 if (object != NULL) 884 VM_OBJECT_WUNLOCK(object); 885 886 /* 887 * subyte may page fault. In case it needs to modify 888 * the map, we release the lock. 889 */ 890 vm_map_unlock_read(map); 891 892 /* 893 * calculate index into user supplied byte vector 894 */ 895 vecindex = atop(addr - first_addr); 896 897 /* 898 * If we have skipped map entries, we need to make sure that 899 * the byte vector is zeroed for those skipped entries. 900 */ 901 while ((lastvecindex + 1) < vecindex) { 902 ++lastvecindex; 903 error = subyte(vec + lastvecindex, 0); 904 if (error) { 905 error = EFAULT; 906 goto done2; 907 } 908 } 909 910 /* 911 * Pass the page information to the user 912 */ 913 error = subyte(vec + vecindex, mincoreinfo); 914 if (error) { 915 error = EFAULT; 916 goto done2; 917 } 918 919 /* 920 * If the map has changed, due to the subyte, the previous 921 * output may be invalid. 922 */ 923 vm_map_lock_read(map); 924 if (timestamp != map->timestamp) 925 goto RestartScan; 926 927 lastvecindex = vecindex; 928 addr += PAGE_SIZE; 929 } 930 } 931 932 /* 933 * subyte may page fault. In case it needs to modify 934 * the map, we release the lock. 935 */ 936 vm_map_unlock_read(map); 937 938 /* 939 * Zero the last entries in the byte vector. 940 */ 941 vecindex = atop(end - first_addr); 942 while ((lastvecindex + 1) < vecindex) { 943 ++lastvecindex; 944 error = subyte(vec + lastvecindex, 0); 945 if (error) { 946 error = EFAULT; 947 goto done2; 948 } 949 } 950 951 /* 952 * If the map has changed, due to the subyte, the previous 953 * output may be invalid. 954 */ 955 vm_map_lock_read(map); 956 if (timestamp != map->timestamp) 957 goto RestartScan; 958 vm_map_unlock_read(map); 959 done2: 960 return (error); 961 } 962 963 #ifndef _SYS_SYSPROTO_H_ 964 struct mlock_args { 965 const void *addr; 966 size_t len; 967 }; 968 #endif 969 int 970 sys_mlock(struct thread *td, struct mlock_args *uap) 971 { 972 973 return (kern_mlock(td->td_proc, td->td_ucred, 974 __DECONST(uintptr_t, uap->addr), uap->len)); 975 } 976 977 int 978 kern_mlock(struct proc *proc, struct ucred *cred, uintptr_t addr0, size_t len) 979 { 980 vm_offset_t addr, end, last, start; 981 vm_size_t npages, size; 982 vm_map_t map; 983 unsigned long nsize; 984 int error; 985 986 error = priv_check_cred(cred, PRIV_VM_MLOCK, 0); 987 if (error) 988 return (error); 989 addr = addr0; 990 size = len; 991 last = addr + size; 992 start = trunc_page(addr); 993 end = round_page(last); 994 if (last < addr || end < addr) 995 return (EINVAL); 996 npages = atop(end - start); 997 if (npages > vm_page_max_wired) 998 return (ENOMEM); 999 map = &proc->p_vmspace->vm_map; 1000 PROC_LOCK(proc); 1001 nsize = ptoa(npages + pmap_wired_count(map->pmap)); 1002 if (nsize > lim_cur_proc(proc, RLIMIT_MEMLOCK)) { 1003 PROC_UNLOCK(proc); 1004 return (ENOMEM); 1005 } 1006 PROC_UNLOCK(proc); 1007 if (npages + vm_wire_count() > vm_page_max_wired) 1008 return (EAGAIN); 1009 #ifdef RACCT 1010 if (racct_enable) { 1011 PROC_LOCK(proc); 1012 error = racct_set(proc, RACCT_MEMLOCK, nsize); 1013 PROC_UNLOCK(proc); 1014 if (error != 0) 1015 return (ENOMEM); 1016 } 1017 #endif 1018 error = vm_map_wire(map, start, end, 1019 VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES); 1020 #ifdef RACCT 1021 if (racct_enable && error != KERN_SUCCESS) { 1022 PROC_LOCK(proc); 1023 racct_set(proc, RACCT_MEMLOCK, 1024 ptoa(pmap_wired_count(map->pmap))); 1025 PROC_UNLOCK(proc); 1026 } 1027 #endif 1028 return (error == KERN_SUCCESS ? 0 : ENOMEM); 1029 } 1030 1031 #ifndef _SYS_SYSPROTO_H_ 1032 struct mlockall_args { 1033 int how; 1034 }; 1035 #endif 1036 1037 int 1038 sys_mlockall(struct thread *td, struct mlockall_args *uap) 1039 { 1040 vm_map_t map; 1041 int error; 1042 1043 map = &td->td_proc->p_vmspace->vm_map; 1044 error = priv_check(td, PRIV_VM_MLOCK); 1045 if (error) 1046 return (error); 1047 1048 if ((uap->how == 0) || ((uap->how & ~(MCL_CURRENT|MCL_FUTURE)) != 0)) 1049 return (EINVAL); 1050 1051 /* 1052 * If wiring all pages in the process would cause it to exceed 1053 * a hard resource limit, return ENOMEM. 1054 */ 1055 if (!old_mlock && uap->how & MCL_CURRENT) { 1056 PROC_LOCK(td->td_proc); 1057 if (map->size > lim_cur(td, RLIMIT_MEMLOCK)) { 1058 PROC_UNLOCK(td->td_proc); 1059 return (ENOMEM); 1060 } 1061 PROC_UNLOCK(td->td_proc); 1062 } 1063 #ifdef RACCT 1064 if (racct_enable) { 1065 PROC_LOCK(td->td_proc); 1066 error = racct_set(td->td_proc, RACCT_MEMLOCK, map->size); 1067 PROC_UNLOCK(td->td_proc); 1068 if (error != 0) 1069 return (ENOMEM); 1070 } 1071 #endif 1072 1073 if (uap->how & MCL_FUTURE) { 1074 vm_map_lock(map); 1075 vm_map_modflags(map, MAP_WIREFUTURE, 0); 1076 vm_map_unlock(map); 1077 error = 0; 1078 } 1079 1080 if (uap->how & MCL_CURRENT) { 1081 /* 1082 * P1003.1-2001 mandates that all currently mapped pages 1083 * will be memory resident and locked (wired) upon return 1084 * from mlockall(). vm_map_wire() will wire pages, by 1085 * calling vm_fault_wire() for each page in the region. 1086 */ 1087 error = vm_map_wire(map, vm_map_min(map), vm_map_max(map), 1088 VM_MAP_WIRE_USER|VM_MAP_WIRE_HOLESOK); 1089 error = (error == KERN_SUCCESS ? 0 : EAGAIN); 1090 } 1091 #ifdef RACCT 1092 if (racct_enable && error != KERN_SUCCESS) { 1093 PROC_LOCK(td->td_proc); 1094 racct_set(td->td_proc, RACCT_MEMLOCK, 1095 ptoa(pmap_wired_count(map->pmap))); 1096 PROC_UNLOCK(td->td_proc); 1097 } 1098 #endif 1099 1100 return (error); 1101 } 1102 1103 #ifndef _SYS_SYSPROTO_H_ 1104 struct munlockall_args { 1105 register_t dummy; 1106 }; 1107 #endif 1108 1109 int 1110 sys_munlockall(struct thread *td, struct munlockall_args *uap) 1111 { 1112 vm_map_t map; 1113 int error; 1114 1115 map = &td->td_proc->p_vmspace->vm_map; 1116 error = priv_check(td, PRIV_VM_MUNLOCK); 1117 if (error) 1118 return (error); 1119 1120 /* Clear the MAP_WIREFUTURE flag from this vm_map. */ 1121 vm_map_lock(map); 1122 vm_map_modflags(map, 0, MAP_WIREFUTURE); 1123 vm_map_unlock(map); 1124 1125 /* Forcibly unwire all pages. */ 1126 error = vm_map_unwire(map, vm_map_min(map), vm_map_max(map), 1127 VM_MAP_WIRE_USER|VM_MAP_WIRE_HOLESOK); 1128 #ifdef RACCT 1129 if (racct_enable && error == KERN_SUCCESS) { 1130 PROC_LOCK(td->td_proc); 1131 racct_set(td->td_proc, RACCT_MEMLOCK, 0); 1132 PROC_UNLOCK(td->td_proc); 1133 } 1134 #endif 1135 1136 return (error); 1137 } 1138 1139 #ifndef _SYS_SYSPROTO_H_ 1140 struct munlock_args { 1141 const void *addr; 1142 size_t len; 1143 }; 1144 #endif 1145 int 1146 sys_munlock(struct thread *td, struct munlock_args *uap) 1147 { 1148 1149 return (kern_munlock(td, (uintptr_t)uap->addr, uap->len)); 1150 } 1151 1152 int 1153 kern_munlock(struct thread *td, uintptr_t addr0, size_t size) 1154 { 1155 vm_offset_t addr, end, last, start; 1156 #ifdef RACCT 1157 vm_map_t map; 1158 #endif 1159 int error; 1160 1161 error = priv_check(td, PRIV_VM_MUNLOCK); 1162 if (error) 1163 return (error); 1164 addr = addr0; 1165 last = addr + size; 1166 start = trunc_page(addr); 1167 end = round_page(last); 1168 if (last < addr || end < addr) 1169 return (EINVAL); 1170 error = vm_map_unwire(&td->td_proc->p_vmspace->vm_map, start, end, 1171 VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES); 1172 #ifdef RACCT 1173 if (racct_enable && error == KERN_SUCCESS) { 1174 PROC_LOCK(td->td_proc); 1175 map = &td->td_proc->p_vmspace->vm_map; 1176 racct_set(td->td_proc, RACCT_MEMLOCK, 1177 ptoa(pmap_wired_count(map->pmap))); 1178 PROC_UNLOCK(td->td_proc); 1179 } 1180 #endif 1181 return (error == KERN_SUCCESS ? 0 : ENOMEM); 1182 } 1183 1184 /* 1185 * vm_mmap_vnode() 1186 * 1187 * Helper function for vm_mmap. Perform sanity check specific for mmap 1188 * operations on vnodes. 1189 */ 1190 int 1191 vm_mmap_vnode(struct thread *td, vm_size_t objsize, 1192 vm_prot_t prot, vm_prot_t *maxprotp, int *flagsp, 1193 struct vnode *vp, vm_ooffset_t *foffp, vm_object_t *objp, 1194 boolean_t *writecounted) 1195 { 1196 struct vattr va; 1197 vm_object_t obj; 1198 vm_ooffset_t foff; 1199 struct ucred *cred; 1200 int error, flags, locktype; 1201 1202 cred = td->td_ucred; 1203 if ((*maxprotp & VM_PROT_WRITE) && (*flagsp & MAP_SHARED)) 1204 locktype = LK_EXCLUSIVE; 1205 else 1206 locktype = LK_SHARED; 1207 if ((error = vget(vp, locktype, td)) != 0) 1208 return (error); 1209 AUDIT_ARG_VNODE1(vp); 1210 foff = *foffp; 1211 flags = *flagsp; 1212 obj = vp->v_object; 1213 if (vp->v_type == VREG) { 1214 /* 1215 * Get the proper underlying object 1216 */ 1217 if (obj == NULL) { 1218 error = EINVAL; 1219 goto done; 1220 } 1221 if (obj->type == OBJT_VNODE && obj->handle != vp) { 1222 vput(vp); 1223 vp = (struct vnode *)obj->handle; 1224 /* 1225 * Bypass filesystems obey the mpsafety of the 1226 * underlying fs. Tmpfs never bypasses. 1227 */ 1228 error = vget(vp, locktype, td); 1229 if (error != 0) 1230 return (error); 1231 } 1232 if (locktype == LK_EXCLUSIVE) { 1233 *writecounted = TRUE; 1234 vnode_pager_update_writecount(obj, 0, objsize); 1235 } 1236 } else { 1237 error = EINVAL; 1238 goto done; 1239 } 1240 if ((error = VOP_GETATTR(vp, &va, cred))) 1241 goto done; 1242 #ifdef MAC 1243 /* This relies on VM_PROT_* matching PROT_*. */ 1244 error = mac_vnode_check_mmap(cred, vp, (int)prot, flags); 1245 if (error != 0) 1246 goto done; 1247 #endif 1248 if ((flags & MAP_SHARED) != 0) { 1249 if ((va.va_flags & (SF_SNAPSHOT|IMMUTABLE|APPEND)) != 0) { 1250 if (prot & VM_PROT_WRITE) { 1251 error = EPERM; 1252 goto done; 1253 } 1254 *maxprotp &= ~VM_PROT_WRITE; 1255 } 1256 } 1257 /* 1258 * If it is a regular file without any references 1259 * we do not need to sync it. 1260 * Adjust object size to be the size of actual file. 1261 */ 1262 objsize = round_page(va.va_size); 1263 if (va.va_nlink == 0) 1264 flags |= MAP_NOSYNC; 1265 if (obj->type == OBJT_VNODE) { 1266 obj = vm_pager_allocate(OBJT_VNODE, vp, objsize, prot, foff, 1267 cred); 1268 if (obj == NULL) { 1269 error = ENOMEM; 1270 goto done; 1271 } 1272 } else { 1273 KASSERT(obj->type == OBJT_DEFAULT || obj->type == OBJT_SWAP, 1274 ("wrong object type")); 1275 VM_OBJECT_WLOCK(obj); 1276 vm_object_reference_locked(obj); 1277 #if VM_NRESERVLEVEL > 0 1278 vm_object_color(obj, 0); 1279 #endif 1280 VM_OBJECT_WUNLOCK(obj); 1281 } 1282 *objp = obj; 1283 *flagsp = flags; 1284 1285 vfs_mark_atime(vp, cred); 1286 1287 done: 1288 if (error != 0 && *writecounted) { 1289 *writecounted = FALSE; 1290 vnode_pager_update_writecount(obj, objsize, 0); 1291 } 1292 vput(vp); 1293 return (error); 1294 } 1295 1296 /* 1297 * vm_mmap_cdev() 1298 * 1299 * Helper function for vm_mmap. Perform sanity check specific for mmap 1300 * operations on cdevs. 1301 */ 1302 int 1303 vm_mmap_cdev(struct thread *td, vm_size_t objsize, vm_prot_t prot, 1304 vm_prot_t *maxprotp, int *flagsp, struct cdev *cdev, struct cdevsw *dsw, 1305 vm_ooffset_t *foff, vm_object_t *objp) 1306 { 1307 vm_object_t obj; 1308 int error, flags; 1309 1310 flags = *flagsp; 1311 1312 if (dsw->d_flags & D_MMAP_ANON) { 1313 *objp = NULL; 1314 *foff = 0; 1315 *maxprotp = VM_PROT_ALL; 1316 *flagsp |= MAP_ANON; 1317 return (0); 1318 } 1319 /* 1320 * cdevs do not provide private mappings of any kind. 1321 */ 1322 if ((*maxprotp & VM_PROT_WRITE) == 0 && 1323 (prot & VM_PROT_WRITE) != 0) 1324 return (EACCES); 1325 if (flags & (MAP_PRIVATE|MAP_COPY)) 1326 return (EINVAL); 1327 /* 1328 * Force device mappings to be shared. 1329 */ 1330 flags |= MAP_SHARED; 1331 #ifdef MAC_XXX 1332 error = mac_cdev_check_mmap(td->td_ucred, cdev, (int)prot); 1333 if (error != 0) 1334 return (error); 1335 #endif 1336 /* 1337 * First, try d_mmap_single(). If that is not implemented 1338 * (returns ENODEV), fall back to using the device pager. 1339 * Note that d_mmap_single() must return a reference to the 1340 * object (it needs to bump the reference count of the object 1341 * it returns somehow). 1342 * 1343 * XXX assumes VM_PROT_* == PROT_* 1344 */ 1345 error = dsw->d_mmap_single(cdev, foff, objsize, objp, (int)prot); 1346 if (error != ENODEV) 1347 return (error); 1348 obj = vm_pager_allocate(OBJT_DEVICE, cdev, objsize, prot, *foff, 1349 td->td_ucred); 1350 if (obj == NULL) 1351 return (EINVAL); 1352 *objp = obj; 1353 *flagsp = flags; 1354 return (0); 1355 } 1356 1357 /* 1358 * vm_mmap() 1359 * 1360 * Internal version of mmap used by exec, sys5 shared memory, and 1361 * various device drivers. Handle is either a vnode pointer, a 1362 * character device, or NULL for MAP_ANON. 1363 */ 1364 int 1365 vm_mmap(vm_map_t map, vm_offset_t *addr, vm_size_t size, vm_prot_t prot, 1366 vm_prot_t maxprot, int flags, 1367 objtype_t handle_type, void *handle, 1368 vm_ooffset_t foff) 1369 { 1370 vm_object_t object; 1371 struct thread *td = curthread; 1372 int error; 1373 boolean_t writecounted; 1374 1375 if (size == 0) 1376 return (EINVAL); 1377 1378 size = round_page(size); 1379 object = NULL; 1380 writecounted = FALSE; 1381 1382 /* 1383 * Lookup/allocate object. 1384 */ 1385 switch (handle_type) { 1386 case OBJT_DEVICE: { 1387 struct cdevsw *dsw; 1388 struct cdev *cdev; 1389 int ref; 1390 1391 cdev = handle; 1392 dsw = dev_refthread(cdev, &ref); 1393 if (dsw == NULL) 1394 return (ENXIO); 1395 error = vm_mmap_cdev(td, size, prot, &maxprot, &flags, cdev, 1396 dsw, &foff, &object); 1397 dev_relthread(cdev, ref); 1398 break; 1399 } 1400 case OBJT_VNODE: 1401 error = vm_mmap_vnode(td, size, prot, &maxprot, &flags, 1402 handle, &foff, &object, &writecounted); 1403 break; 1404 case OBJT_DEFAULT: 1405 if (handle == NULL) { 1406 error = 0; 1407 break; 1408 } 1409 /* FALLTHROUGH */ 1410 default: 1411 error = EINVAL; 1412 break; 1413 } 1414 if (error) 1415 return (error); 1416 1417 error = vm_mmap_object(map, addr, size, prot, maxprot, flags, object, 1418 foff, writecounted, td); 1419 if (error != 0 && object != NULL) { 1420 /* 1421 * If this mapping was accounted for in the vnode's 1422 * writecount, then undo that now. 1423 */ 1424 if (writecounted) 1425 vnode_pager_release_writecount(object, 0, size); 1426 vm_object_deallocate(object); 1427 } 1428 return (error); 1429 } 1430 1431 /* 1432 * Internal version of mmap that maps a specific VM object into an 1433 * map. Called by mmap for MAP_ANON, vm_mmap, shm_mmap, and vn_mmap. 1434 */ 1435 int 1436 vm_mmap_object(vm_map_t map, vm_offset_t *addr, vm_size_t size, vm_prot_t prot, 1437 vm_prot_t maxprot, int flags, vm_object_t object, vm_ooffset_t foff, 1438 boolean_t writecounted, struct thread *td) 1439 { 1440 boolean_t curmap, fitit; 1441 vm_offset_t max_addr; 1442 int docow, error, findspace, rv; 1443 1444 curmap = map == &td->td_proc->p_vmspace->vm_map; 1445 if (curmap) { 1446 PROC_LOCK(td->td_proc); 1447 if (map->size + size > lim_cur_proc(td->td_proc, RLIMIT_VMEM)) { 1448 PROC_UNLOCK(td->td_proc); 1449 return (ENOMEM); 1450 } 1451 if (racct_set(td->td_proc, RACCT_VMEM, map->size + size)) { 1452 PROC_UNLOCK(td->td_proc); 1453 return (ENOMEM); 1454 } 1455 if (!old_mlock && map->flags & MAP_WIREFUTURE) { 1456 if (ptoa(pmap_wired_count(map->pmap)) + size > 1457 lim_cur_proc(td->td_proc, RLIMIT_MEMLOCK)) { 1458 racct_set_force(td->td_proc, RACCT_VMEM, 1459 map->size); 1460 PROC_UNLOCK(td->td_proc); 1461 return (ENOMEM); 1462 } 1463 error = racct_set(td->td_proc, RACCT_MEMLOCK, 1464 ptoa(pmap_wired_count(map->pmap)) + size); 1465 if (error != 0) { 1466 racct_set_force(td->td_proc, RACCT_VMEM, 1467 map->size); 1468 PROC_UNLOCK(td->td_proc); 1469 return (error); 1470 } 1471 } 1472 PROC_UNLOCK(td->td_proc); 1473 } 1474 1475 /* 1476 * We currently can only deal with page aligned file offsets. 1477 * The mmap() system call already enforces this by subtracting 1478 * the page offset from the file offset, but checking here 1479 * catches errors in device drivers (e.g. d_single_mmap() 1480 * callbacks) and other internal mapping requests (such as in 1481 * exec). 1482 */ 1483 if (foff & PAGE_MASK) 1484 return (EINVAL); 1485 1486 if ((flags & MAP_FIXED) == 0) { 1487 fitit = TRUE; 1488 *addr = round_page(*addr); 1489 } else { 1490 if (*addr != trunc_page(*addr)) 1491 return (EINVAL); 1492 fitit = FALSE; 1493 } 1494 1495 if (flags & MAP_ANON) { 1496 if (object != NULL || foff != 0) 1497 return (EINVAL); 1498 docow = 0; 1499 } else if (flags & MAP_PREFAULT_READ) 1500 docow = MAP_PREFAULT; 1501 else 1502 docow = MAP_PREFAULT_PARTIAL; 1503 1504 if ((flags & (MAP_ANON|MAP_SHARED)) == 0) 1505 docow |= MAP_COPY_ON_WRITE; 1506 if (flags & MAP_NOSYNC) 1507 docow |= MAP_DISABLE_SYNCER; 1508 if (flags & MAP_NOCORE) 1509 docow |= MAP_DISABLE_COREDUMP; 1510 /* Shared memory is also shared with children. */ 1511 if (flags & MAP_SHARED) 1512 docow |= MAP_INHERIT_SHARE; 1513 if (writecounted) 1514 docow |= MAP_VN_WRITECOUNT; 1515 if (flags & MAP_STACK) { 1516 if (object != NULL) 1517 return (EINVAL); 1518 docow |= MAP_STACK_GROWS_DOWN; 1519 } 1520 if ((flags & MAP_EXCL) != 0) 1521 docow |= MAP_CHECK_EXCL; 1522 if ((flags & MAP_GUARD) != 0) 1523 docow |= MAP_CREATE_GUARD; 1524 1525 if (fitit) { 1526 if ((flags & MAP_ALIGNMENT_MASK) == MAP_ALIGNED_SUPER) 1527 findspace = VMFS_SUPER_SPACE; 1528 else if ((flags & MAP_ALIGNMENT_MASK) != 0) 1529 findspace = VMFS_ALIGNED_SPACE(flags >> 1530 MAP_ALIGNMENT_SHIFT); 1531 else 1532 findspace = VMFS_OPTIMAL_SPACE; 1533 max_addr = 0; 1534 #ifdef MAP_32BIT 1535 if ((flags & MAP_32BIT) != 0) 1536 max_addr = MAP_32BIT_MAX_ADDR; 1537 #endif 1538 if (curmap) { 1539 rv = vm_map_find_min(map, object, foff, addr, size, 1540 round_page((vm_offset_t)td->td_proc->p_vmspace-> 1541 vm_daddr + lim_max(td, RLIMIT_DATA)), max_addr, 1542 findspace, prot, maxprot, docow); 1543 } else { 1544 rv = vm_map_find(map, object, foff, addr, size, 1545 max_addr, findspace, prot, maxprot, docow); 1546 } 1547 } else { 1548 rv = vm_map_fixed(map, object, foff, *addr, size, 1549 prot, maxprot, docow); 1550 } 1551 1552 if (rv == KERN_SUCCESS) { 1553 /* 1554 * If the process has requested that all future mappings 1555 * be wired, then heed this. 1556 */ 1557 if (map->flags & MAP_WIREFUTURE) { 1558 vm_map_wire(map, *addr, *addr + size, 1559 VM_MAP_WIRE_USER | ((flags & MAP_STACK) ? 1560 VM_MAP_WIRE_HOLESOK : VM_MAP_WIRE_NOHOLES)); 1561 } 1562 } 1563 return (vm_mmap_to_errno(rv)); 1564 } 1565 1566 /* 1567 * Translate a Mach VM return code to zero on success or the appropriate errno 1568 * on failure. 1569 */ 1570 int 1571 vm_mmap_to_errno(int rv) 1572 { 1573 1574 switch (rv) { 1575 case KERN_SUCCESS: 1576 return (0); 1577 case KERN_INVALID_ADDRESS: 1578 case KERN_NO_SPACE: 1579 return (ENOMEM); 1580 case KERN_PROTECTION_FAILURE: 1581 return (EACCES); 1582 default: 1583 return (EINVAL); 1584 } 1585 } 1586