1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1988 University of Utah. 5 * Copyright (c) 1991, 1993 6 * The Regents of the University of California. All rights reserved. 7 * 8 * This code is derived from software contributed to Berkeley by 9 * the Systems Programming Group of the University of Utah Computer 10 * Science Department. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * from: Utah $Hdr: vm_mmap.c 1.6 91/10/21$ 37 * 38 * @(#)vm_mmap.c 8.4 (Berkeley) 1/12/94 39 */ 40 41 /* 42 * Mapped file (mmap) interface to VM 43 */ 44 45 #include <sys/cdefs.h> 46 __FBSDID("$FreeBSD$"); 47 48 #include "opt_hwpmc_hooks.h" 49 #include "opt_vm.h" 50 51 #include <sys/param.h> 52 #include <sys/systm.h> 53 #include <sys/capsicum.h> 54 #include <sys/kernel.h> 55 #include <sys/lock.h> 56 #include <sys/mutex.h> 57 #include <sys/sysproto.h> 58 #include <sys/elf.h> 59 #include <sys/filedesc.h> 60 #include <sys/priv.h> 61 #include <sys/proc.h> 62 #include <sys/procctl.h> 63 #include <sys/racct.h> 64 #include <sys/resource.h> 65 #include <sys/resourcevar.h> 66 #include <sys/rwlock.h> 67 #include <sys/sysctl.h> 68 #include <sys/vnode.h> 69 #include <sys/fcntl.h> 70 #include <sys/file.h> 71 #include <sys/mman.h> 72 #include <sys/mount.h> 73 #include <sys/conf.h> 74 #include <sys/stat.h> 75 #include <sys/syscallsubr.h> 76 #include <sys/sysent.h> 77 #include <sys/vmmeter.h> 78 #if defined(__amd64__) || defined(__i386__) /* for i386_read_exec */ 79 #include <machine/md_var.h> 80 #endif 81 82 #include <security/audit/audit.h> 83 #include <security/mac/mac_framework.h> 84 85 #include <vm/vm.h> 86 #include <vm/vm_param.h> 87 #include <vm/pmap.h> 88 #include <vm/vm_map.h> 89 #include <vm/vm_object.h> 90 #include <vm/vm_page.h> 91 #include <vm/vm_pager.h> 92 #include <vm/vm_pageout.h> 93 #include <vm/vm_extern.h> 94 #include <vm/vm_page.h> 95 #include <vm/vnode_pager.h> 96 97 #ifdef HWPMC_HOOKS 98 #include <sys/pmckern.h> 99 #endif 100 101 int old_mlock = 0; 102 SYSCTL_INT(_vm, OID_AUTO, old_mlock, CTLFLAG_RWTUN, &old_mlock, 0, 103 "Do not apply RLIMIT_MEMLOCK on mlockall"); 104 static int mincore_mapped = 1; 105 SYSCTL_INT(_vm, OID_AUTO, mincore_mapped, CTLFLAG_RWTUN, &mincore_mapped, 0, 106 "mincore reports mappings, not residency"); 107 static int imply_prot_max = 0; 108 SYSCTL_INT(_vm, OID_AUTO, imply_prot_max, CTLFLAG_RWTUN, &imply_prot_max, 0, 109 "Imply maximum page permissions in mmap() when none are specified"); 110 111 #ifdef MAP_32BIT 112 #define MAP_32BIT_MAX_ADDR ((vm_offset_t)1 << 31) 113 #endif 114 115 #ifndef _SYS_SYSPROTO_H_ 116 struct sbrk_args { 117 int incr; 118 }; 119 #endif 120 121 int 122 sys_sbrk(struct thread *td, struct sbrk_args *uap) 123 { 124 /* Not yet implemented */ 125 return (EOPNOTSUPP); 126 } 127 128 #ifndef _SYS_SYSPROTO_H_ 129 struct sstk_args { 130 int incr; 131 }; 132 #endif 133 134 int 135 sys_sstk(struct thread *td, struct sstk_args *uap) 136 { 137 /* Not yet implemented */ 138 return (EOPNOTSUPP); 139 } 140 141 #if defined(COMPAT_43) 142 int 143 ogetpagesize(struct thread *td, struct ogetpagesize_args *uap) 144 { 145 146 td->td_retval[0] = PAGE_SIZE; 147 return (0); 148 } 149 #endif /* COMPAT_43 */ 150 151 152 /* 153 * Memory Map (mmap) system call. Note that the file offset 154 * and address are allowed to be NOT page aligned, though if 155 * the MAP_FIXED flag it set, both must have the same remainder 156 * modulo the PAGE_SIZE (POSIX 1003.1b). If the address is not 157 * page-aligned, the actual mapping starts at trunc_page(addr) 158 * and the return value is adjusted up by the page offset. 159 * 160 * Generally speaking, only character devices which are themselves 161 * memory-based, such as a video framebuffer, can be mmap'd. Otherwise 162 * there would be no cache coherency between a descriptor and a VM mapping 163 * both to the same character device. 164 */ 165 #ifndef _SYS_SYSPROTO_H_ 166 struct mmap_args { 167 void *addr; 168 size_t len; 169 int prot; 170 int flags; 171 int fd; 172 long pad; 173 off_t pos; 174 }; 175 #endif 176 177 int 178 sys_mmap(struct thread *td, struct mmap_args *uap) 179 { 180 181 return (kern_mmap(td, (uintptr_t)uap->addr, uap->len, uap->prot, 182 uap->flags, uap->fd, uap->pos)); 183 } 184 185 int 186 kern_mmap_maxprot(struct proc *p, int prot) 187 { 188 189 if ((p->p_flag2 & P2_PROTMAX_DISABLE) != 0 || 190 (p->p_fctl0 & NT_FREEBSD_FCTL_PROTMAX_DISABLE) != 0) 191 return (_PROT_ALL); 192 if (((p->p_flag2 & P2_PROTMAX_ENABLE) != 0 || imply_prot_max) && 193 prot != PROT_NONE) 194 return (prot); 195 return (_PROT_ALL); 196 } 197 198 int 199 kern_mmap(struct thread *td, uintptr_t addr0, size_t len, int prot, int flags, 200 int fd, off_t pos) 201 { 202 struct vmspace *vms; 203 struct file *fp; 204 struct proc *p; 205 vm_offset_t addr; 206 vm_size_t pageoff, size; 207 vm_prot_t cap_maxprot; 208 int align, error, max_prot; 209 cap_rights_t rights; 210 211 if ((prot & ~(_PROT_ALL | PROT_MAX(_PROT_ALL))) != 0) 212 return (EINVAL); 213 max_prot = PROT_MAX_EXTRACT(prot); 214 prot = PROT_EXTRACT(prot); 215 if (max_prot != 0 && (max_prot & prot) != prot) 216 return (EINVAL); 217 218 p = td->td_proc; 219 220 /* 221 * Always honor PROT_MAX if set. If not, default to all 222 * permissions unless we're implying maximum permissions. 223 */ 224 if (max_prot == 0) 225 max_prot = kern_mmap_maxprot(p, prot); 226 227 vms = p->p_vmspace; 228 fp = NULL; 229 AUDIT_ARG_FD(fd); 230 addr = addr0; 231 232 /* 233 * Ignore old flags that used to be defined but did not do anything. 234 */ 235 flags &= ~(MAP_RESERVED0020 | MAP_RESERVED0040); 236 237 /* 238 * Enforce the constraints. 239 * Mapping of length 0 is only allowed for old binaries. 240 * Anonymous mapping shall specify -1 as filedescriptor and 241 * zero position for new code. Be nice to ancient a.out 242 * binaries and correct pos for anonymous mapping, since old 243 * ld.so sometimes issues anonymous map requests with non-zero 244 * pos. 245 */ 246 if (!SV_CURPROC_FLAG(SV_AOUT)) { 247 if ((len == 0 && p->p_osrel >= P_OSREL_MAP_ANON) || 248 ((flags & MAP_ANON) != 0 && (fd != -1 || pos != 0))) 249 return (EINVAL); 250 } else { 251 if ((flags & MAP_ANON) != 0) 252 pos = 0; 253 } 254 255 if (flags & MAP_STACK) { 256 if ((fd != -1) || 257 ((prot & (PROT_READ | PROT_WRITE)) != (PROT_READ | PROT_WRITE))) 258 return (EINVAL); 259 flags |= MAP_ANON; 260 pos = 0; 261 } 262 if ((flags & ~(MAP_SHARED | MAP_PRIVATE | MAP_FIXED | MAP_HASSEMAPHORE | 263 MAP_STACK | MAP_NOSYNC | MAP_ANON | MAP_EXCL | MAP_NOCORE | 264 MAP_PREFAULT_READ | MAP_GUARD | 265 #ifdef MAP_32BIT 266 MAP_32BIT | 267 #endif 268 MAP_ALIGNMENT_MASK)) != 0) 269 return (EINVAL); 270 if ((flags & (MAP_EXCL | MAP_FIXED)) == MAP_EXCL) 271 return (EINVAL); 272 if ((flags & (MAP_SHARED | MAP_PRIVATE)) == (MAP_SHARED | MAP_PRIVATE)) 273 return (EINVAL); 274 if (prot != PROT_NONE && 275 (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC)) != 0) 276 return (EINVAL); 277 if ((flags & MAP_GUARD) != 0 && (prot != PROT_NONE || fd != -1 || 278 pos != 0 || (flags & ~(MAP_FIXED | MAP_GUARD | MAP_EXCL | 279 #ifdef MAP_32BIT 280 MAP_32BIT | 281 #endif 282 MAP_ALIGNMENT_MASK)) != 0)) 283 return (EINVAL); 284 285 /* 286 * Align the file position to a page boundary, 287 * and save its page offset component. 288 */ 289 pageoff = (pos & PAGE_MASK); 290 pos -= pageoff; 291 292 /* Compute size from len by rounding (on both ends). */ 293 size = len + pageoff; /* low end... */ 294 size = round_page(size); /* hi end */ 295 /* Check for rounding up to zero. */ 296 if (len > size) 297 return (ENOMEM); 298 299 /* Ensure alignment is at least a page and fits in a pointer. */ 300 align = flags & MAP_ALIGNMENT_MASK; 301 if (align != 0 && align != MAP_ALIGNED_SUPER && 302 (align >> MAP_ALIGNMENT_SHIFT >= sizeof(void *) * NBBY || 303 align >> MAP_ALIGNMENT_SHIFT < PAGE_SHIFT)) 304 return (EINVAL); 305 306 /* 307 * Check for illegal addresses. Watch out for address wrap... Note 308 * that VM_*_ADDRESS are not constants due to casts (argh). 309 */ 310 if (flags & MAP_FIXED) { 311 /* 312 * The specified address must have the same remainder 313 * as the file offset taken modulo PAGE_SIZE, so it 314 * should be aligned after adjustment by pageoff. 315 */ 316 addr -= pageoff; 317 if (addr & PAGE_MASK) 318 return (EINVAL); 319 320 /* Address range must be all in user VM space. */ 321 if (addr < vm_map_min(&vms->vm_map) || 322 addr + size > vm_map_max(&vms->vm_map)) 323 return (EINVAL); 324 if (addr + size < addr) 325 return (EINVAL); 326 #ifdef MAP_32BIT 327 if (flags & MAP_32BIT && addr + size > MAP_32BIT_MAX_ADDR) 328 return (EINVAL); 329 } else if (flags & MAP_32BIT) { 330 /* 331 * For MAP_32BIT, override the hint if it is too high and 332 * do not bother moving the mapping past the heap (since 333 * the heap is usually above 2GB). 334 */ 335 if (addr + size > MAP_32BIT_MAX_ADDR) 336 addr = 0; 337 #endif 338 } else { 339 /* 340 * XXX for non-fixed mappings where no hint is provided or 341 * the hint would fall in the potential heap space, 342 * place it after the end of the largest possible heap. 343 * 344 * There should really be a pmap call to determine a reasonable 345 * location. 346 */ 347 if (addr == 0 || 348 (addr >= round_page((vm_offset_t)vms->vm_taddr) && 349 addr < round_page((vm_offset_t)vms->vm_daddr + 350 lim_max(td, RLIMIT_DATA)))) 351 addr = round_page((vm_offset_t)vms->vm_daddr + 352 lim_max(td, RLIMIT_DATA)); 353 } 354 if (len == 0) { 355 /* 356 * Return success without mapping anything for old 357 * binaries that request a page-aligned mapping of 358 * length 0. For modern binaries, this function 359 * returns an error earlier. 360 */ 361 error = 0; 362 } else if ((flags & MAP_GUARD) != 0) { 363 error = vm_mmap_object(&vms->vm_map, &addr, size, VM_PROT_NONE, 364 VM_PROT_NONE, flags, NULL, pos, FALSE, td); 365 } else if ((flags & MAP_ANON) != 0) { 366 /* 367 * Mapping blank space is trivial. 368 * 369 * This relies on VM_PROT_* matching PROT_*. 370 */ 371 error = vm_mmap_object(&vms->vm_map, &addr, size, prot, 372 max_prot, flags, NULL, pos, FALSE, td); 373 } else { 374 /* 375 * Mapping file, get fp for validation and don't let the 376 * descriptor disappear on us if we block. Check capability 377 * rights, but also return the maximum rights to be combined 378 * with maxprot later. 379 */ 380 cap_rights_init(&rights, CAP_MMAP); 381 if (prot & PROT_READ) 382 cap_rights_set(&rights, CAP_MMAP_R); 383 if ((flags & MAP_SHARED) != 0) { 384 if (prot & PROT_WRITE) 385 cap_rights_set(&rights, CAP_MMAP_W); 386 } 387 if (prot & PROT_EXEC) 388 cap_rights_set(&rights, CAP_MMAP_X); 389 error = fget_mmap(td, fd, &rights, &cap_maxprot, &fp); 390 if (error != 0) 391 goto done; 392 if ((flags & (MAP_SHARED | MAP_PRIVATE)) == 0 && 393 p->p_osrel >= P_OSREL_MAP_FSTRICT) { 394 error = EINVAL; 395 goto done; 396 } 397 398 /* This relies on VM_PROT_* matching PROT_*. */ 399 error = fo_mmap(fp, &vms->vm_map, &addr, size, prot, 400 max_prot & cap_maxprot, flags, pos, td); 401 } 402 403 if (error == 0) 404 td->td_retval[0] = (register_t) (addr + pageoff); 405 done: 406 if (fp) 407 fdrop(fp, td); 408 409 return (error); 410 } 411 412 #if defined(COMPAT_FREEBSD6) 413 int 414 freebsd6_mmap(struct thread *td, struct freebsd6_mmap_args *uap) 415 { 416 417 return (kern_mmap(td, (uintptr_t)uap->addr, uap->len, uap->prot, 418 uap->flags, uap->fd, uap->pos)); 419 } 420 #endif 421 422 #ifdef COMPAT_43 423 #ifndef _SYS_SYSPROTO_H_ 424 struct ommap_args { 425 caddr_t addr; 426 int len; 427 int prot; 428 int flags; 429 int fd; 430 long pos; 431 }; 432 #endif 433 int 434 ommap(struct thread *td, struct ommap_args *uap) 435 { 436 static const char cvtbsdprot[8] = { 437 0, 438 PROT_EXEC, 439 PROT_WRITE, 440 PROT_EXEC | PROT_WRITE, 441 PROT_READ, 442 PROT_EXEC | PROT_READ, 443 PROT_WRITE | PROT_READ, 444 PROT_EXEC | PROT_WRITE | PROT_READ, 445 }; 446 int flags, prot; 447 448 #define OMAP_ANON 0x0002 449 #define OMAP_COPY 0x0020 450 #define OMAP_SHARED 0x0010 451 #define OMAP_FIXED 0x0100 452 453 prot = cvtbsdprot[uap->prot & 0x7]; 454 #if (defined(COMPAT_FREEBSD32) && defined(__amd64__)) || defined(__i386__) 455 if (i386_read_exec && SV_PROC_FLAG(td->td_proc, SV_ILP32) && 456 prot != 0) 457 prot |= PROT_EXEC; 458 #endif 459 flags = 0; 460 if (uap->flags & OMAP_ANON) 461 flags |= MAP_ANON; 462 if (uap->flags & OMAP_COPY) 463 flags |= MAP_COPY; 464 if (uap->flags & OMAP_SHARED) 465 flags |= MAP_SHARED; 466 else 467 flags |= MAP_PRIVATE; 468 if (uap->flags & OMAP_FIXED) 469 flags |= MAP_FIXED; 470 return (kern_mmap(td, (uintptr_t)uap->addr, uap->len, prot, flags, 471 uap->fd, uap->pos)); 472 } 473 #endif /* COMPAT_43 */ 474 475 476 #ifndef _SYS_SYSPROTO_H_ 477 struct msync_args { 478 void *addr; 479 size_t len; 480 int flags; 481 }; 482 #endif 483 int 484 sys_msync(struct thread *td, struct msync_args *uap) 485 { 486 487 return (kern_msync(td, (uintptr_t)uap->addr, uap->len, uap->flags)); 488 } 489 490 int 491 kern_msync(struct thread *td, uintptr_t addr0, size_t size, int flags) 492 { 493 vm_offset_t addr; 494 vm_size_t pageoff; 495 vm_map_t map; 496 int rv; 497 498 addr = addr0; 499 pageoff = (addr & PAGE_MASK); 500 addr -= pageoff; 501 size += pageoff; 502 size = (vm_size_t) round_page(size); 503 if (addr + size < addr) 504 return (EINVAL); 505 506 if ((flags & (MS_ASYNC|MS_INVALIDATE)) == (MS_ASYNC|MS_INVALIDATE)) 507 return (EINVAL); 508 509 map = &td->td_proc->p_vmspace->vm_map; 510 511 /* 512 * Clean the pages and interpret the return value. 513 */ 514 rv = vm_map_sync(map, addr, addr + size, (flags & MS_ASYNC) == 0, 515 (flags & MS_INVALIDATE) != 0); 516 switch (rv) { 517 case KERN_SUCCESS: 518 return (0); 519 case KERN_INVALID_ADDRESS: 520 return (ENOMEM); 521 case KERN_INVALID_ARGUMENT: 522 return (EBUSY); 523 case KERN_FAILURE: 524 return (EIO); 525 default: 526 return (EINVAL); 527 } 528 } 529 530 #ifndef _SYS_SYSPROTO_H_ 531 struct munmap_args { 532 void *addr; 533 size_t len; 534 }; 535 #endif 536 int 537 sys_munmap(struct thread *td, struct munmap_args *uap) 538 { 539 540 return (kern_munmap(td, (uintptr_t)uap->addr, uap->len)); 541 } 542 543 int 544 kern_munmap(struct thread *td, uintptr_t addr0, size_t size) 545 { 546 #ifdef HWPMC_HOOKS 547 struct pmckern_map_out pkm; 548 vm_map_entry_t entry; 549 bool pmc_handled; 550 #endif 551 vm_offset_t addr; 552 vm_size_t pageoff; 553 vm_map_t map; 554 555 if (size == 0) 556 return (EINVAL); 557 558 addr = addr0; 559 pageoff = (addr & PAGE_MASK); 560 addr -= pageoff; 561 size += pageoff; 562 size = (vm_size_t) round_page(size); 563 if (addr + size < addr) 564 return (EINVAL); 565 566 /* 567 * Check for illegal addresses. Watch out for address wrap... 568 */ 569 map = &td->td_proc->p_vmspace->vm_map; 570 if (addr < vm_map_min(map) || addr + size > vm_map_max(map)) 571 return (EINVAL); 572 vm_map_lock(map); 573 #ifdef HWPMC_HOOKS 574 pmc_handled = false; 575 if (PMC_HOOK_INSTALLED(PMC_FN_MUNMAP)) { 576 pmc_handled = true; 577 /* 578 * Inform hwpmc if the address range being unmapped contains 579 * an executable region. 580 */ 581 pkm.pm_address = (uintptr_t) NULL; 582 if (vm_map_lookup_entry(map, addr, &entry)) { 583 for (; entry->start < addr + size; 584 entry = entry->next) { 585 if (vm_map_check_protection(map, entry->start, 586 entry->end, VM_PROT_EXECUTE) == TRUE) { 587 pkm.pm_address = (uintptr_t) addr; 588 pkm.pm_size = (size_t) size; 589 break; 590 } 591 } 592 } 593 } 594 #endif 595 vm_map_delete(map, addr, addr + size); 596 597 #ifdef HWPMC_HOOKS 598 if (__predict_false(pmc_handled)) { 599 /* downgrade the lock to prevent a LOR with the pmc-sx lock */ 600 vm_map_lock_downgrade(map); 601 if (pkm.pm_address != (uintptr_t) NULL) 602 PMC_CALL_HOOK(td, PMC_FN_MUNMAP, (void *) &pkm); 603 vm_map_unlock_read(map); 604 } else 605 #endif 606 vm_map_unlock(map); 607 608 /* vm_map_delete returns nothing but KERN_SUCCESS anyway */ 609 return (0); 610 } 611 612 #ifndef _SYS_SYSPROTO_H_ 613 struct mprotect_args { 614 const void *addr; 615 size_t len; 616 int prot; 617 }; 618 #endif 619 int 620 sys_mprotect(struct thread *td, struct mprotect_args *uap) 621 { 622 623 return (kern_mprotect(td, (uintptr_t)uap->addr, uap->len, uap->prot)); 624 } 625 626 int 627 kern_mprotect(struct thread *td, uintptr_t addr0, size_t size, int prot) 628 { 629 vm_offset_t addr; 630 vm_size_t pageoff; 631 int vm_error, max_prot; 632 633 addr = addr0; 634 if ((prot & ~(_PROT_ALL | PROT_MAX(_PROT_ALL))) != 0) 635 return (EINVAL); 636 max_prot = PROT_MAX_EXTRACT(prot); 637 prot = PROT_EXTRACT(prot); 638 pageoff = (addr & PAGE_MASK); 639 addr -= pageoff; 640 size += pageoff; 641 size = (vm_size_t) round_page(size); 642 #ifdef COMPAT_FREEBSD32 643 if (SV_PROC_FLAG(td->td_proc, SV_ILP32)) { 644 if (((addr + size) & 0xffffffff) < addr) 645 return (EINVAL); 646 } else 647 #endif 648 if (addr + size < addr) 649 return (EINVAL); 650 651 vm_error = KERN_SUCCESS; 652 if (max_prot != 0) { 653 if ((max_prot & prot) != prot) 654 return (EINVAL); 655 vm_error = vm_map_protect(&td->td_proc->p_vmspace->vm_map, 656 addr, addr + size, max_prot, TRUE); 657 } 658 if (vm_error == KERN_SUCCESS) 659 vm_error = vm_map_protect(&td->td_proc->p_vmspace->vm_map, 660 addr, addr + size, prot, FALSE); 661 662 switch (vm_error) { 663 case KERN_SUCCESS: 664 return (0); 665 case KERN_PROTECTION_FAILURE: 666 return (EACCES); 667 case KERN_RESOURCE_SHORTAGE: 668 return (ENOMEM); 669 } 670 return (EINVAL); 671 } 672 673 #ifndef _SYS_SYSPROTO_H_ 674 struct minherit_args { 675 void *addr; 676 size_t len; 677 int inherit; 678 }; 679 #endif 680 int 681 sys_minherit(struct thread *td, struct minherit_args *uap) 682 { 683 vm_offset_t addr; 684 vm_size_t size, pageoff; 685 vm_inherit_t inherit; 686 687 addr = (vm_offset_t)uap->addr; 688 size = uap->len; 689 inherit = uap->inherit; 690 691 pageoff = (addr & PAGE_MASK); 692 addr -= pageoff; 693 size += pageoff; 694 size = (vm_size_t) round_page(size); 695 if (addr + size < addr) 696 return (EINVAL); 697 698 switch (vm_map_inherit(&td->td_proc->p_vmspace->vm_map, addr, 699 addr + size, inherit)) { 700 case KERN_SUCCESS: 701 return (0); 702 case KERN_PROTECTION_FAILURE: 703 return (EACCES); 704 } 705 return (EINVAL); 706 } 707 708 #ifndef _SYS_SYSPROTO_H_ 709 struct madvise_args { 710 void *addr; 711 size_t len; 712 int behav; 713 }; 714 #endif 715 716 int 717 sys_madvise(struct thread *td, struct madvise_args *uap) 718 { 719 720 return (kern_madvise(td, (uintptr_t)uap->addr, uap->len, uap->behav)); 721 } 722 723 int 724 kern_madvise(struct thread *td, uintptr_t addr0, size_t len, int behav) 725 { 726 vm_map_t map; 727 vm_offset_t addr, end, start; 728 int flags; 729 730 /* 731 * Check for our special case, advising the swap pager we are 732 * "immortal." 733 */ 734 if (behav == MADV_PROTECT) { 735 flags = PPROT_SET; 736 return (kern_procctl(td, P_PID, td->td_proc->p_pid, 737 PROC_SPROTECT, &flags)); 738 } 739 740 /* 741 * Check for illegal addresses. Watch out for address wrap... Note 742 * that VM_*_ADDRESS are not constants due to casts (argh). 743 */ 744 map = &td->td_proc->p_vmspace->vm_map; 745 addr = addr0; 746 if (addr < vm_map_min(map) || addr + len > vm_map_max(map)) 747 return (EINVAL); 748 if ((addr + len) < addr) 749 return (EINVAL); 750 751 /* 752 * Since this routine is only advisory, we default to conservative 753 * behavior. 754 */ 755 start = trunc_page(addr); 756 end = round_page(addr + len); 757 758 /* 759 * vm_map_madvise() checks for illegal values of behav. 760 */ 761 return (vm_map_madvise(map, start, end, behav)); 762 } 763 764 #ifndef _SYS_SYSPROTO_H_ 765 struct mincore_args { 766 const void *addr; 767 size_t len; 768 char *vec; 769 }; 770 #endif 771 772 int 773 sys_mincore(struct thread *td, struct mincore_args *uap) 774 { 775 776 return (kern_mincore(td, (uintptr_t)uap->addr, uap->len, uap->vec)); 777 } 778 779 int 780 kern_mincore(struct thread *td, uintptr_t addr0, size_t len, char *vec) 781 { 782 pmap_t pmap; 783 vm_map_t map; 784 vm_map_entry_t current, entry; 785 vm_object_t object; 786 vm_offset_t addr, cend, end, first_addr; 787 vm_paddr_t pa; 788 vm_page_t m; 789 vm_pindex_t pindex; 790 int error, lastvecindex, mincoreinfo, vecindex; 791 unsigned int timestamp; 792 793 /* 794 * Make sure that the addresses presented are valid for user 795 * mode. 796 */ 797 first_addr = addr = trunc_page(addr0); 798 end = round_page(addr0 + len); 799 map = &td->td_proc->p_vmspace->vm_map; 800 if (end > vm_map_max(map) || end < addr) 801 return (ENOMEM); 802 803 pmap = vmspace_pmap(td->td_proc->p_vmspace); 804 805 vm_map_lock_read(map); 806 RestartScan: 807 timestamp = map->timestamp; 808 809 if (!vm_map_lookup_entry(map, addr, &entry)) { 810 vm_map_unlock_read(map); 811 return (ENOMEM); 812 } 813 814 /* 815 * Do this on a map entry basis so that if the pages are not 816 * in the current processes address space, we can easily look 817 * up the pages elsewhere. 818 */ 819 lastvecindex = -1; 820 for (current = entry; current->start < end; current = current->next) { 821 822 /* 823 * check for contiguity 824 */ 825 if (current->end < end && current->next->start > current->end) { 826 vm_map_unlock_read(map); 827 return (ENOMEM); 828 } 829 830 /* 831 * ignore submaps (for now) or null objects 832 */ 833 if ((current->eflags & MAP_ENTRY_IS_SUB_MAP) || 834 current->object.vm_object == NULL) 835 continue; 836 837 /* 838 * limit this scan to the current map entry and the 839 * limits for the mincore call 840 */ 841 if (addr < current->start) 842 addr = current->start; 843 cend = current->end; 844 if (cend > end) 845 cend = end; 846 847 for (; addr < cend; addr += PAGE_SIZE) { 848 /* 849 * Check pmap first, it is likely faster, also 850 * it can provide info as to whether we are the 851 * one referencing or modifying the page. 852 */ 853 m = NULL; 854 object = NULL; 855 retry: 856 pa = 0; 857 mincoreinfo = pmap_mincore(pmap, addr, &pa); 858 if (mincore_mapped) { 859 /* 860 * We only care about this pmap's 861 * mapping of the page, if any. 862 */ 863 ; 864 } else if (pa != 0) { 865 /* 866 * The page is mapped by this process but not 867 * both accessed and modified. It is also 868 * managed. Acquire the object lock so that 869 * other mappings might be examined. The page's 870 * identity may change at any point before its 871 * object lock is acquired, so re-validate if 872 * necessary. 873 */ 874 m = PHYS_TO_VM_PAGE(pa); 875 while (object == NULL || m->object != object) { 876 if (object != NULL) 877 VM_OBJECT_WUNLOCK(object); 878 object = (vm_object_t)atomic_load_ptr( 879 &m->object); 880 if (object == NULL) 881 goto retry; 882 VM_OBJECT_WLOCK(object); 883 } 884 if (pa != pmap_extract(pmap, addr)) 885 goto retry; 886 KASSERT(vm_page_all_valid(m), 887 ("mincore: page %p is mapped but invalid", 888 m)); 889 } else if (mincoreinfo == 0) { 890 /* 891 * The page is not mapped by this process. If 892 * the object implements managed pages, then 893 * determine if the page is resident so that 894 * the mappings might be examined. 895 */ 896 if (current->object.vm_object != object) { 897 if (object != NULL) 898 VM_OBJECT_WUNLOCK(object); 899 object = current->object.vm_object; 900 VM_OBJECT_WLOCK(object); 901 } 902 if (object->type == OBJT_DEFAULT || 903 object->type == OBJT_SWAP || 904 object->type == OBJT_VNODE) { 905 pindex = OFF_TO_IDX(current->offset + 906 (addr - current->start)); 907 m = vm_page_lookup(object, pindex); 908 if (m != NULL && vm_page_none_valid(m)) 909 m = NULL; 910 if (m != NULL) 911 mincoreinfo = MINCORE_INCORE; 912 } 913 } 914 if (m != NULL) { 915 VM_OBJECT_ASSERT_WLOCKED(m->object); 916 917 /* Examine other mappings of the page. */ 918 if (m->dirty == 0 && pmap_is_modified(m)) 919 vm_page_dirty(m); 920 if (m->dirty != 0) 921 mincoreinfo |= MINCORE_MODIFIED_OTHER; 922 923 /* 924 * The first test for PGA_REFERENCED is an 925 * optimization. The second test is 926 * required because a concurrent pmap 927 * operation could clear the last reference 928 * and set PGA_REFERENCED before the call to 929 * pmap_is_referenced(). 930 */ 931 if ((m->aflags & PGA_REFERENCED) != 0 || 932 pmap_is_referenced(m) || 933 (m->aflags & PGA_REFERENCED) != 0) 934 mincoreinfo |= MINCORE_REFERENCED_OTHER; 935 } 936 if (object != NULL) 937 VM_OBJECT_WUNLOCK(object); 938 939 /* 940 * subyte may page fault. In case it needs to modify 941 * the map, we release the lock. 942 */ 943 vm_map_unlock_read(map); 944 945 /* 946 * calculate index into user supplied byte vector 947 */ 948 vecindex = atop(addr - first_addr); 949 950 /* 951 * If we have skipped map entries, we need to make sure that 952 * the byte vector is zeroed for those skipped entries. 953 */ 954 while ((lastvecindex + 1) < vecindex) { 955 ++lastvecindex; 956 error = subyte(vec + lastvecindex, 0); 957 if (error) { 958 error = EFAULT; 959 goto done2; 960 } 961 } 962 963 /* 964 * Pass the page information to the user 965 */ 966 error = subyte(vec + vecindex, mincoreinfo); 967 if (error) { 968 error = EFAULT; 969 goto done2; 970 } 971 972 /* 973 * If the map has changed, due to the subyte, the previous 974 * output may be invalid. 975 */ 976 vm_map_lock_read(map); 977 if (timestamp != map->timestamp) 978 goto RestartScan; 979 980 lastvecindex = vecindex; 981 } 982 } 983 984 /* 985 * subyte may page fault. In case it needs to modify 986 * the map, we release the lock. 987 */ 988 vm_map_unlock_read(map); 989 990 /* 991 * Zero the last entries in the byte vector. 992 */ 993 vecindex = atop(end - first_addr); 994 while ((lastvecindex + 1) < vecindex) { 995 ++lastvecindex; 996 error = subyte(vec + lastvecindex, 0); 997 if (error) { 998 error = EFAULT; 999 goto done2; 1000 } 1001 } 1002 1003 /* 1004 * If the map has changed, due to the subyte, the previous 1005 * output may be invalid. 1006 */ 1007 vm_map_lock_read(map); 1008 if (timestamp != map->timestamp) 1009 goto RestartScan; 1010 vm_map_unlock_read(map); 1011 done2: 1012 return (error); 1013 } 1014 1015 #ifndef _SYS_SYSPROTO_H_ 1016 struct mlock_args { 1017 const void *addr; 1018 size_t len; 1019 }; 1020 #endif 1021 int 1022 sys_mlock(struct thread *td, struct mlock_args *uap) 1023 { 1024 1025 return (kern_mlock(td->td_proc, td->td_ucred, 1026 __DECONST(uintptr_t, uap->addr), uap->len)); 1027 } 1028 1029 int 1030 kern_mlock(struct proc *proc, struct ucred *cred, uintptr_t addr0, size_t len) 1031 { 1032 vm_offset_t addr, end, last, start; 1033 vm_size_t npages, size; 1034 vm_map_t map; 1035 unsigned long nsize; 1036 int error; 1037 1038 error = priv_check_cred(cred, PRIV_VM_MLOCK); 1039 if (error) 1040 return (error); 1041 addr = addr0; 1042 size = len; 1043 last = addr + size; 1044 start = trunc_page(addr); 1045 end = round_page(last); 1046 if (last < addr || end < addr) 1047 return (EINVAL); 1048 npages = atop(end - start); 1049 if (npages > vm_page_max_user_wired) 1050 return (ENOMEM); 1051 map = &proc->p_vmspace->vm_map; 1052 PROC_LOCK(proc); 1053 nsize = ptoa(npages + pmap_wired_count(map->pmap)); 1054 if (nsize > lim_cur_proc(proc, RLIMIT_MEMLOCK)) { 1055 PROC_UNLOCK(proc); 1056 return (ENOMEM); 1057 } 1058 PROC_UNLOCK(proc); 1059 #ifdef RACCT 1060 if (racct_enable) { 1061 PROC_LOCK(proc); 1062 error = racct_set(proc, RACCT_MEMLOCK, nsize); 1063 PROC_UNLOCK(proc); 1064 if (error != 0) 1065 return (ENOMEM); 1066 } 1067 #endif 1068 error = vm_map_wire(map, start, end, 1069 VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES); 1070 #ifdef RACCT 1071 if (racct_enable && error != KERN_SUCCESS) { 1072 PROC_LOCK(proc); 1073 racct_set(proc, RACCT_MEMLOCK, 1074 ptoa(pmap_wired_count(map->pmap))); 1075 PROC_UNLOCK(proc); 1076 } 1077 #endif 1078 return (error == KERN_SUCCESS ? 0 : ENOMEM); 1079 } 1080 1081 #ifndef _SYS_SYSPROTO_H_ 1082 struct mlockall_args { 1083 int how; 1084 }; 1085 #endif 1086 1087 int 1088 sys_mlockall(struct thread *td, struct mlockall_args *uap) 1089 { 1090 vm_map_t map; 1091 int error; 1092 1093 map = &td->td_proc->p_vmspace->vm_map; 1094 error = priv_check(td, PRIV_VM_MLOCK); 1095 if (error) 1096 return (error); 1097 1098 if ((uap->how == 0) || ((uap->how & ~(MCL_CURRENT|MCL_FUTURE)) != 0)) 1099 return (EINVAL); 1100 1101 /* 1102 * If wiring all pages in the process would cause it to exceed 1103 * a hard resource limit, return ENOMEM. 1104 */ 1105 if (!old_mlock && uap->how & MCL_CURRENT) { 1106 if (map->size > lim_cur(td, RLIMIT_MEMLOCK)) 1107 return (ENOMEM); 1108 } 1109 #ifdef RACCT 1110 if (racct_enable) { 1111 PROC_LOCK(td->td_proc); 1112 error = racct_set(td->td_proc, RACCT_MEMLOCK, map->size); 1113 PROC_UNLOCK(td->td_proc); 1114 if (error != 0) 1115 return (ENOMEM); 1116 } 1117 #endif 1118 1119 if (uap->how & MCL_FUTURE) { 1120 vm_map_lock(map); 1121 vm_map_modflags(map, MAP_WIREFUTURE, 0); 1122 vm_map_unlock(map); 1123 error = 0; 1124 } 1125 1126 if (uap->how & MCL_CURRENT) { 1127 /* 1128 * P1003.1-2001 mandates that all currently mapped pages 1129 * will be memory resident and locked (wired) upon return 1130 * from mlockall(). vm_map_wire() will wire pages, by 1131 * calling vm_fault_wire() for each page in the region. 1132 */ 1133 error = vm_map_wire(map, vm_map_min(map), vm_map_max(map), 1134 VM_MAP_WIRE_USER|VM_MAP_WIRE_HOLESOK); 1135 if (error == KERN_SUCCESS) 1136 error = 0; 1137 else if (error == KERN_RESOURCE_SHORTAGE) 1138 error = ENOMEM; 1139 else 1140 error = EAGAIN; 1141 } 1142 #ifdef RACCT 1143 if (racct_enable && error != KERN_SUCCESS) { 1144 PROC_LOCK(td->td_proc); 1145 racct_set(td->td_proc, RACCT_MEMLOCK, 1146 ptoa(pmap_wired_count(map->pmap))); 1147 PROC_UNLOCK(td->td_proc); 1148 } 1149 #endif 1150 1151 return (error); 1152 } 1153 1154 #ifndef _SYS_SYSPROTO_H_ 1155 struct munlockall_args { 1156 register_t dummy; 1157 }; 1158 #endif 1159 1160 int 1161 sys_munlockall(struct thread *td, struct munlockall_args *uap) 1162 { 1163 vm_map_t map; 1164 int error; 1165 1166 map = &td->td_proc->p_vmspace->vm_map; 1167 error = priv_check(td, PRIV_VM_MUNLOCK); 1168 if (error) 1169 return (error); 1170 1171 /* Clear the MAP_WIREFUTURE flag from this vm_map. */ 1172 vm_map_lock(map); 1173 vm_map_modflags(map, 0, MAP_WIREFUTURE); 1174 vm_map_unlock(map); 1175 1176 /* Forcibly unwire all pages. */ 1177 error = vm_map_unwire(map, vm_map_min(map), vm_map_max(map), 1178 VM_MAP_WIRE_USER|VM_MAP_WIRE_HOLESOK); 1179 #ifdef RACCT 1180 if (racct_enable && error == KERN_SUCCESS) { 1181 PROC_LOCK(td->td_proc); 1182 racct_set(td->td_proc, RACCT_MEMLOCK, 0); 1183 PROC_UNLOCK(td->td_proc); 1184 } 1185 #endif 1186 1187 return (error); 1188 } 1189 1190 #ifndef _SYS_SYSPROTO_H_ 1191 struct munlock_args { 1192 const void *addr; 1193 size_t len; 1194 }; 1195 #endif 1196 int 1197 sys_munlock(struct thread *td, struct munlock_args *uap) 1198 { 1199 1200 return (kern_munlock(td, (uintptr_t)uap->addr, uap->len)); 1201 } 1202 1203 int 1204 kern_munlock(struct thread *td, uintptr_t addr0, size_t size) 1205 { 1206 vm_offset_t addr, end, last, start; 1207 #ifdef RACCT 1208 vm_map_t map; 1209 #endif 1210 int error; 1211 1212 error = priv_check(td, PRIV_VM_MUNLOCK); 1213 if (error) 1214 return (error); 1215 addr = addr0; 1216 last = addr + size; 1217 start = trunc_page(addr); 1218 end = round_page(last); 1219 if (last < addr || end < addr) 1220 return (EINVAL); 1221 error = vm_map_unwire(&td->td_proc->p_vmspace->vm_map, start, end, 1222 VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES); 1223 #ifdef RACCT 1224 if (racct_enable && error == KERN_SUCCESS) { 1225 PROC_LOCK(td->td_proc); 1226 map = &td->td_proc->p_vmspace->vm_map; 1227 racct_set(td->td_proc, RACCT_MEMLOCK, 1228 ptoa(pmap_wired_count(map->pmap))); 1229 PROC_UNLOCK(td->td_proc); 1230 } 1231 #endif 1232 return (error == KERN_SUCCESS ? 0 : ENOMEM); 1233 } 1234 1235 /* 1236 * vm_mmap_vnode() 1237 * 1238 * Helper function for vm_mmap. Perform sanity check specific for mmap 1239 * operations on vnodes. 1240 */ 1241 int 1242 vm_mmap_vnode(struct thread *td, vm_size_t objsize, 1243 vm_prot_t prot, vm_prot_t *maxprotp, int *flagsp, 1244 struct vnode *vp, vm_ooffset_t *foffp, vm_object_t *objp, 1245 boolean_t *writecounted) 1246 { 1247 struct vattr va; 1248 vm_object_t obj; 1249 vm_ooffset_t foff; 1250 struct ucred *cred; 1251 int error, flags; 1252 bool writex; 1253 1254 cred = td->td_ucred; 1255 writex = (*maxprotp & VM_PROT_WRITE) != 0 && 1256 (*flagsp & MAP_SHARED) != 0; 1257 if ((error = vget(vp, LK_SHARED, td)) != 0) 1258 return (error); 1259 AUDIT_ARG_VNODE1(vp); 1260 foff = *foffp; 1261 flags = *flagsp; 1262 obj = vp->v_object; 1263 if (vp->v_type == VREG) { 1264 /* 1265 * Get the proper underlying object 1266 */ 1267 if (obj == NULL) { 1268 error = EINVAL; 1269 goto done; 1270 } 1271 if (obj->type == OBJT_VNODE && obj->handle != vp) { 1272 vput(vp); 1273 vp = (struct vnode *)obj->handle; 1274 /* 1275 * Bypass filesystems obey the mpsafety of the 1276 * underlying fs. Tmpfs never bypasses. 1277 */ 1278 error = vget(vp, LK_SHARED, td); 1279 if (error != 0) 1280 return (error); 1281 } 1282 if (writex) { 1283 *writecounted = TRUE; 1284 vm_pager_update_writecount(obj, 0, objsize); 1285 } 1286 } else { 1287 error = EINVAL; 1288 goto done; 1289 } 1290 if ((error = VOP_GETATTR(vp, &va, cred))) 1291 goto done; 1292 #ifdef MAC 1293 /* This relies on VM_PROT_* matching PROT_*. */ 1294 error = mac_vnode_check_mmap(cred, vp, (int)prot, flags); 1295 if (error != 0) 1296 goto done; 1297 #endif 1298 if ((flags & MAP_SHARED) != 0) { 1299 if ((va.va_flags & (SF_SNAPSHOT|IMMUTABLE|APPEND)) != 0) { 1300 if (prot & VM_PROT_WRITE) { 1301 error = EPERM; 1302 goto done; 1303 } 1304 *maxprotp &= ~VM_PROT_WRITE; 1305 } 1306 } 1307 /* 1308 * If it is a regular file without any references 1309 * we do not need to sync it. 1310 * Adjust object size to be the size of actual file. 1311 */ 1312 objsize = round_page(va.va_size); 1313 if (va.va_nlink == 0) 1314 flags |= MAP_NOSYNC; 1315 if (obj->type == OBJT_VNODE) { 1316 obj = vm_pager_allocate(OBJT_VNODE, vp, objsize, prot, foff, 1317 cred); 1318 if (obj == NULL) { 1319 error = ENOMEM; 1320 goto done; 1321 } 1322 } else { 1323 KASSERT(obj->type == OBJT_DEFAULT || obj->type == OBJT_SWAP, 1324 ("wrong object type")); 1325 VM_OBJECT_WLOCK(obj); 1326 vm_object_reference_locked(obj); 1327 #if VM_NRESERVLEVEL > 0 1328 vm_object_color(obj, 0); 1329 #endif 1330 VM_OBJECT_WUNLOCK(obj); 1331 } 1332 *objp = obj; 1333 *flagsp = flags; 1334 1335 vfs_mark_atime(vp, cred); 1336 1337 done: 1338 if (error != 0 && *writecounted) { 1339 *writecounted = FALSE; 1340 vm_pager_update_writecount(obj, objsize, 0); 1341 } 1342 vput(vp); 1343 return (error); 1344 } 1345 1346 /* 1347 * vm_mmap_cdev() 1348 * 1349 * Helper function for vm_mmap. Perform sanity check specific for mmap 1350 * operations on cdevs. 1351 */ 1352 int 1353 vm_mmap_cdev(struct thread *td, vm_size_t objsize, vm_prot_t prot, 1354 vm_prot_t *maxprotp, int *flagsp, struct cdev *cdev, struct cdevsw *dsw, 1355 vm_ooffset_t *foff, vm_object_t *objp) 1356 { 1357 vm_object_t obj; 1358 int error, flags; 1359 1360 flags = *flagsp; 1361 1362 if (dsw->d_flags & D_MMAP_ANON) { 1363 *objp = NULL; 1364 *foff = 0; 1365 *maxprotp = VM_PROT_ALL; 1366 *flagsp |= MAP_ANON; 1367 return (0); 1368 } 1369 /* 1370 * cdevs do not provide private mappings of any kind. 1371 */ 1372 if ((*maxprotp & VM_PROT_WRITE) == 0 && 1373 (prot & VM_PROT_WRITE) != 0) 1374 return (EACCES); 1375 if (flags & (MAP_PRIVATE|MAP_COPY)) 1376 return (EINVAL); 1377 /* 1378 * Force device mappings to be shared. 1379 */ 1380 flags |= MAP_SHARED; 1381 #ifdef MAC_XXX 1382 error = mac_cdev_check_mmap(td->td_ucred, cdev, (int)prot); 1383 if (error != 0) 1384 return (error); 1385 #endif 1386 /* 1387 * First, try d_mmap_single(). If that is not implemented 1388 * (returns ENODEV), fall back to using the device pager. 1389 * Note that d_mmap_single() must return a reference to the 1390 * object (it needs to bump the reference count of the object 1391 * it returns somehow). 1392 * 1393 * XXX assumes VM_PROT_* == PROT_* 1394 */ 1395 error = dsw->d_mmap_single(cdev, foff, objsize, objp, (int)prot); 1396 if (error != ENODEV) 1397 return (error); 1398 obj = vm_pager_allocate(OBJT_DEVICE, cdev, objsize, prot, *foff, 1399 td->td_ucred); 1400 if (obj == NULL) 1401 return (EINVAL); 1402 *objp = obj; 1403 *flagsp = flags; 1404 return (0); 1405 } 1406 1407 /* 1408 * vm_mmap() 1409 * 1410 * Internal version of mmap used by exec, sys5 shared memory, and 1411 * various device drivers. Handle is either a vnode pointer, a 1412 * character device, or NULL for MAP_ANON. 1413 */ 1414 int 1415 vm_mmap(vm_map_t map, vm_offset_t *addr, vm_size_t size, vm_prot_t prot, 1416 vm_prot_t maxprot, int flags, 1417 objtype_t handle_type, void *handle, 1418 vm_ooffset_t foff) 1419 { 1420 vm_object_t object; 1421 struct thread *td = curthread; 1422 int error; 1423 boolean_t writecounted; 1424 1425 if (size == 0) 1426 return (EINVAL); 1427 1428 size = round_page(size); 1429 object = NULL; 1430 writecounted = FALSE; 1431 1432 /* 1433 * Lookup/allocate object. 1434 */ 1435 switch (handle_type) { 1436 case OBJT_DEVICE: { 1437 struct cdevsw *dsw; 1438 struct cdev *cdev; 1439 int ref; 1440 1441 cdev = handle; 1442 dsw = dev_refthread(cdev, &ref); 1443 if (dsw == NULL) 1444 return (ENXIO); 1445 error = vm_mmap_cdev(td, size, prot, &maxprot, &flags, cdev, 1446 dsw, &foff, &object); 1447 dev_relthread(cdev, ref); 1448 break; 1449 } 1450 case OBJT_VNODE: 1451 error = vm_mmap_vnode(td, size, prot, &maxprot, &flags, 1452 handle, &foff, &object, &writecounted); 1453 break; 1454 case OBJT_DEFAULT: 1455 if (handle == NULL) { 1456 error = 0; 1457 break; 1458 } 1459 /* FALLTHROUGH */ 1460 default: 1461 error = EINVAL; 1462 break; 1463 } 1464 if (error) 1465 return (error); 1466 1467 error = vm_mmap_object(map, addr, size, prot, maxprot, flags, object, 1468 foff, writecounted, td); 1469 if (error != 0 && object != NULL) { 1470 /* 1471 * If this mapping was accounted for in the vnode's 1472 * writecount, then undo that now. 1473 */ 1474 if (writecounted) 1475 vm_pager_release_writecount(object, 0, size); 1476 vm_object_deallocate(object); 1477 } 1478 return (error); 1479 } 1480 1481 /* 1482 * Internal version of mmap that maps a specific VM object into an 1483 * map. Called by mmap for MAP_ANON, vm_mmap, shm_mmap, and vn_mmap. 1484 */ 1485 int 1486 vm_mmap_object(vm_map_t map, vm_offset_t *addr, vm_size_t size, vm_prot_t prot, 1487 vm_prot_t maxprot, int flags, vm_object_t object, vm_ooffset_t foff, 1488 boolean_t writecounted, struct thread *td) 1489 { 1490 boolean_t curmap, fitit; 1491 vm_offset_t max_addr; 1492 int docow, error, findspace, rv; 1493 1494 curmap = map == &td->td_proc->p_vmspace->vm_map; 1495 if (curmap) { 1496 RACCT_PROC_LOCK(td->td_proc); 1497 if (map->size + size > lim_cur(td, RLIMIT_VMEM)) { 1498 RACCT_PROC_UNLOCK(td->td_proc); 1499 return (ENOMEM); 1500 } 1501 if (racct_set(td->td_proc, RACCT_VMEM, map->size + size)) { 1502 RACCT_PROC_UNLOCK(td->td_proc); 1503 return (ENOMEM); 1504 } 1505 if (!old_mlock && map->flags & MAP_WIREFUTURE) { 1506 if (ptoa(pmap_wired_count(map->pmap)) + size > 1507 lim_cur(td, RLIMIT_MEMLOCK)) { 1508 racct_set_force(td->td_proc, RACCT_VMEM, 1509 map->size); 1510 RACCT_PROC_UNLOCK(td->td_proc); 1511 return (ENOMEM); 1512 } 1513 error = racct_set(td->td_proc, RACCT_MEMLOCK, 1514 ptoa(pmap_wired_count(map->pmap)) + size); 1515 if (error != 0) { 1516 racct_set_force(td->td_proc, RACCT_VMEM, 1517 map->size); 1518 RACCT_PROC_UNLOCK(td->td_proc); 1519 return (error); 1520 } 1521 } 1522 RACCT_PROC_UNLOCK(td->td_proc); 1523 } 1524 1525 /* 1526 * We currently can only deal with page aligned file offsets. 1527 * The mmap() system call already enforces this by subtracting 1528 * the page offset from the file offset, but checking here 1529 * catches errors in device drivers (e.g. d_single_mmap() 1530 * callbacks) and other internal mapping requests (such as in 1531 * exec). 1532 */ 1533 if (foff & PAGE_MASK) 1534 return (EINVAL); 1535 1536 if ((flags & MAP_FIXED) == 0) { 1537 fitit = TRUE; 1538 *addr = round_page(*addr); 1539 } else { 1540 if (*addr != trunc_page(*addr)) 1541 return (EINVAL); 1542 fitit = FALSE; 1543 } 1544 1545 if (flags & MAP_ANON) { 1546 if (object != NULL || foff != 0) 1547 return (EINVAL); 1548 docow = 0; 1549 } else if (flags & MAP_PREFAULT_READ) 1550 docow = MAP_PREFAULT; 1551 else 1552 docow = MAP_PREFAULT_PARTIAL; 1553 1554 if ((flags & (MAP_ANON|MAP_SHARED)) == 0) 1555 docow |= MAP_COPY_ON_WRITE; 1556 if (flags & MAP_NOSYNC) 1557 docow |= MAP_DISABLE_SYNCER; 1558 if (flags & MAP_NOCORE) 1559 docow |= MAP_DISABLE_COREDUMP; 1560 /* Shared memory is also shared with children. */ 1561 if (flags & MAP_SHARED) 1562 docow |= MAP_INHERIT_SHARE; 1563 if (writecounted) 1564 docow |= MAP_WRITECOUNT; 1565 if (flags & MAP_STACK) { 1566 if (object != NULL) 1567 return (EINVAL); 1568 docow |= MAP_STACK_GROWS_DOWN; 1569 } 1570 if ((flags & MAP_EXCL) != 0) 1571 docow |= MAP_CHECK_EXCL; 1572 if ((flags & MAP_GUARD) != 0) 1573 docow |= MAP_CREATE_GUARD; 1574 1575 if (fitit) { 1576 if ((flags & MAP_ALIGNMENT_MASK) == MAP_ALIGNED_SUPER) 1577 findspace = VMFS_SUPER_SPACE; 1578 else if ((flags & MAP_ALIGNMENT_MASK) != 0) 1579 findspace = VMFS_ALIGNED_SPACE(flags >> 1580 MAP_ALIGNMENT_SHIFT); 1581 else 1582 findspace = VMFS_OPTIMAL_SPACE; 1583 max_addr = 0; 1584 #ifdef MAP_32BIT 1585 if ((flags & MAP_32BIT) != 0) 1586 max_addr = MAP_32BIT_MAX_ADDR; 1587 #endif 1588 if (curmap) { 1589 rv = vm_map_find_min(map, object, foff, addr, size, 1590 round_page((vm_offset_t)td->td_proc->p_vmspace-> 1591 vm_daddr + lim_max(td, RLIMIT_DATA)), max_addr, 1592 findspace, prot, maxprot, docow); 1593 } else { 1594 rv = vm_map_find(map, object, foff, addr, size, 1595 max_addr, findspace, prot, maxprot, docow); 1596 } 1597 } else { 1598 rv = vm_map_fixed(map, object, foff, *addr, size, 1599 prot, maxprot, docow); 1600 } 1601 1602 if (rv == KERN_SUCCESS) { 1603 /* 1604 * If the process has requested that all future mappings 1605 * be wired, then heed this. 1606 */ 1607 if ((map->flags & MAP_WIREFUTURE) != 0) { 1608 vm_map_lock(map); 1609 if ((map->flags & MAP_WIREFUTURE) != 0) 1610 (void)vm_map_wire_locked(map, *addr, 1611 *addr + size, VM_MAP_WIRE_USER | 1612 ((flags & MAP_STACK) ? VM_MAP_WIRE_HOLESOK : 1613 VM_MAP_WIRE_NOHOLES)); 1614 vm_map_unlock(map); 1615 } 1616 } 1617 return (vm_mmap_to_errno(rv)); 1618 } 1619 1620 /* 1621 * Translate a Mach VM return code to zero on success or the appropriate errno 1622 * on failure. 1623 */ 1624 int 1625 vm_mmap_to_errno(int rv) 1626 { 1627 1628 switch (rv) { 1629 case KERN_SUCCESS: 1630 return (0); 1631 case KERN_INVALID_ADDRESS: 1632 case KERN_NO_SPACE: 1633 return (ENOMEM); 1634 case KERN_PROTECTION_FAILURE: 1635 return (EACCES); 1636 default: 1637 return (EINVAL); 1638 } 1639 } 1640