1df8bae1dSRodney W. Grimes /* 2df8bae1dSRodney W. Grimes * Copyright (c) 1988 University of Utah. 3df8bae1dSRodney W. Grimes * Copyright (c) 1991, 1993 4df8bae1dSRodney W. Grimes * The Regents of the University of California. All rights reserved. 5df8bae1dSRodney W. Grimes * 6df8bae1dSRodney W. Grimes * This code is derived from software contributed to Berkeley by 7df8bae1dSRodney W. Grimes * the Systems Programming Group of the University of Utah Computer 8df8bae1dSRodney W. Grimes * Science Department. 9df8bae1dSRodney W. Grimes * 10df8bae1dSRodney W. Grimes * Redistribution and use in source and binary forms, with or without 11df8bae1dSRodney W. Grimes * modification, are permitted provided that the following conditions 12df8bae1dSRodney W. Grimes * are met: 13df8bae1dSRodney W. Grimes * 1. Redistributions of source code must retain the above copyright 14df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer. 15df8bae1dSRodney W. Grimes * 2. Redistributions in binary form must reproduce the above copyright 16df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer in the 17df8bae1dSRodney W. Grimes * documentation and/or other materials provided with the distribution. 18df8bae1dSRodney W. Grimes * 4. Neither the name of the University nor the names of its contributors 19df8bae1dSRodney W. Grimes * may be used to endorse or promote products derived from this software 20df8bae1dSRodney W. Grimes * without specific prior written permission. 21df8bae1dSRodney W. Grimes * 22df8bae1dSRodney W. Grimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23df8bae1dSRodney W. Grimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24df8bae1dSRodney W. Grimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25df8bae1dSRodney W. Grimes * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26df8bae1dSRodney W. Grimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27df8bae1dSRodney W. Grimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28df8bae1dSRodney W. Grimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29df8bae1dSRodney W. Grimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30df8bae1dSRodney W. Grimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31df8bae1dSRodney W. Grimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32df8bae1dSRodney W. Grimes * SUCH DAMAGE. 33df8bae1dSRodney W. Grimes * 34df8bae1dSRodney W. Grimes * from: Utah $Hdr: vm_mmap.c 1.6 91/10/21$ 35df8bae1dSRodney W. Grimes * 36df8bae1dSRodney W. Grimes * @(#)vm_mmap.c 8.4 (Berkeley) 1/12/94 37df8bae1dSRodney W. Grimes */ 38df8bae1dSRodney W. Grimes 39df8bae1dSRodney W. Grimes /* 40df8bae1dSRodney W. Grimes * Mapped file (mmap) interface to VM 41df8bae1dSRodney W. Grimes */ 42df8bae1dSRodney W. Grimes 43874651b1SDavid E. O'Brien #include <sys/cdefs.h> 44874651b1SDavid E. O'Brien __FBSDID("$FreeBSD$"); 45874651b1SDavid E. O'Brien 465591b823SEivind Eklund #include "opt_compat.h" 473e732e7dSRobert Watson #include "opt_mac.h" 48e9822d92SJoerg Wunsch 49df8bae1dSRodney W. Grimes #include <sys/param.h> 50df8bae1dSRodney W. Grimes #include <sys/systm.h> 51fb919e4dSMark Murray #include <sys/kernel.h> 52fb919e4dSMark Murray #include <sys/lock.h> 5323955314SAlfred Perlstein #include <sys/mutex.h> 54d2d3e875SBruce Evans #include <sys/sysproto.h> 55df8bae1dSRodney W. Grimes #include <sys/filedesc.h> 56df8bae1dSRodney W. Grimes #include <sys/proc.h> 57070f64feSMatthew Dillon #include <sys/resource.h> 58070f64feSMatthew Dillon #include <sys/resourcevar.h> 59df8bae1dSRodney W. Grimes #include <sys/vnode.h> 603ac4d1efSBruce Evans #include <sys/fcntl.h> 61df8bae1dSRodney W. Grimes #include <sys/file.h> 623e732e7dSRobert Watson #include <sys/mac.h> 63df8bae1dSRodney W. Grimes #include <sys/mman.h> 64b483c7f6SGuido van Rooij #include <sys/mount.h> 65df8bae1dSRodney W. Grimes #include <sys/conf.h> 664183b6b6SPeter Wemm #include <sys/stat.h> 67efeaf95aSDavid Greenman #include <sys/vmmeter.h> 681f6889a1SMatthew Dillon #include <sys/sysctl.h> 69df8bae1dSRodney W. Grimes 70df8bae1dSRodney W. Grimes #include <vm/vm.h> 71efeaf95aSDavid Greenman #include <vm/vm_param.h> 72efeaf95aSDavid Greenman #include <vm/pmap.h> 73efeaf95aSDavid Greenman #include <vm/vm_map.h> 74efeaf95aSDavid Greenman #include <vm/vm_object.h> 751c7c3c6aSMatthew Dillon #include <vm/vm_page.h> 76df8bae1dSRodney W. Grimes #include <vm/vm_pager.h> 77b5e8ce9fSBruce Evans #include <vm/vm_pageout.h> 78efeaf95aSDavid Greenman #include <vm/vm_extern.h> 79867a482dSJohn Dyson #include <vm/vm_page.h> 801f6889a1SMatthew Dillon #include <vm/vm_kern.h> 81df8bae1dSRodney W. Grimes 82d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_ 83df8bae1dSRodney W. Grimes struct sbrk_args { 84df8bae1dSRodney W. Grimes int incr; 85df8bae1dSRodney W. Grimes }; 86d2d3e875SBruce Evans #endif 870d94caffSDavid Greenman 881f6889a1SMatthew Dillon static int max_proc_mmap; 891f6889a1SMatthew Dillon SYSCTL_INT(_vm, OID_AUTO, max_proc_mmap, CTLFLAG_RW, &max_proc_mmap, 0, ""); 901f6889a1SMatthew Dillon 911f6889a1SMatthew Dillon /* 921f6889a1SMatthew Dillon * Set the maximum number of vm_map_entry structures per process. Roughly 931f6889a1SMatthew Dillon * speaking vm_map_entry structures are tiny, so allowing them to eat 1/100 941f6889a1SMatthew Dillon * of our KVM malloc space still results in generous limits. We want a 951f6889a1SMatthew Dillon * default that is good enough to prevent the kernel running out of resources 961f6889a1SMatthew Dillon * if attacked from compromised user account but generous enough such that 971f6889a1SMatthew Dillon * multi-threaded processes are not unduly inconvenienced. 981f6889a1SMatthew Dillon */ 9911caded3SAlfred Perlstein static void vmmapentry_rsrc_init(void *); 1001f6889a1SMatthew Dillon SYSINIT(vmmersrc, SI_SUB_KVM_RSRC, SI_ORDER_FIRST, vmmapentry_rsrc_init, NULL) 1011f6889a1SMatthew Dillon 1021f6889a1SMatthew Dillon static void 1031f6889a1SMatthew Dillon vmmapentry_rsrc_init(dummy) 1041f6889a1SMatthew Dillon void *dummy; 1051f6889a1SMatthew Dillon { 1061f6889a1SMatthew Dillon max_proc_mmap = vm_kmem_size / sizeof(struct vm_map_entry); 1071f6889a1SMatthew Dillon max_proc_mmap /= 100; 1081f6889a1SMatthew Dillon } 1091f6889a1SMatthew Dillon 110c8daea13SAlexander Kabaev static int vm_mmap_vnode(struct thread *, vm_size_t, vm_prot_t, vm_prot_t *, 111c8daea13SAlexander Kabaev int *, struct vnode *, vm_ooffset_t, vm_object_t *); 112c8daea13SAlexander Kabaev 113d2c60af8SMatthew Dillon /* 114d2c60af8SMatthew Dillon * MPSAFE 115d2c60af8SMatthew Dillon */ 116df8bae1dSRodney W. Grimes /* ARGSUSED */ 117df8bae1dSRodney W. Grimes int 118b40ce416SJulian Elischer sbrk(td, uap) 119b40ce416SJulian Elischer struct thread *td; 120df8bae1dSRodney W. Grimes struct sbrk_args *uap; 121df8bae1dSRodney W. Grimes { 122df8bae1dSRodney W. Grimes /* Not yet implemented */ 1230cddd8f0SMatthew Dillon /* mtx_lock(&Giant); */ 1240cddd8f0SMatthew Dillon /* mtx_unlock(&Giant); */ 125df8bae1dSRodney W. Grimes return (EOPNOTSUPP); 126df8bae1dSRodney W. Grimes } 127df8bae1dSRodney W. Grimes 128d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_ 129df8bae1dSRodney W. Grimes struct sstk_args { 130df8bae1dSRodney W. Grimes int incr; 131df8bae1dSRodney W. Grimes }; 132d2d3e875SBruce Evans #endif 1330d94caffSDavid Greenman 134d2c60af8SMatthew Dillon /* 135d2c60af8SMatthew Dillon * MPSAFE 136d2c60af8SMatthew Dillon */ 137df8bae1dSRodney W. Grimes /* ARGSUSED */ 138df8bae1dSRodney W. Grimes int 139b40ce416SJulian Elischer sstk(td, uap) 140b40ce416SJulian Elischer struct thread *td; 141df8bae1dSRodney W. Grimes struct sstk_args *uap; 142df8bae1dSRodney W. Grimes { 143df8bae1dSRodney W. Grimes /* Not yet implemented */ 1440cddd8f0SMatthew Dillon /* mtx_lock(&Giant); */ 1450cddd8f0SMatthew Dillon /* mtx_unlock(&Giant); */ 146df8bae1dSRodney W. Grimes return (EOPNOTSUPP); 147df8bae1dSRodney W. Grimes } 148df8bae1dSRodney W. Grimes 1491930e303SPoul-Henning Kamp #if defined(COMPAT_43) 150d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_ 151df8bae1dSRodney W. Grimes struct getpagesize_args { 152df8bae1dSRodney W. Grimes int dummy; 153df8bae1dSRodney W. Grimes }; 154d2d3e875SBruce Evans #endif 1550d94caffSDavid Greenman 156df8bae1dSRodney W. Grimes /* ARGSUSED */ 157df8bae1dSRodney W. Grimes int 158b40ce416SJulian Elischer ogetpagesize(td, uap) 159b40ce416SJulian Elischer struct thread *td; 160df8bae1dSRodney W. Grimes struct getpagesize_args *uap; 161df8bae1dSRodney W. Grimes { 1620cddd8f0SMatthew Dillon /* MP SAFE */ 163b40ce416SJulian Elischer td->td_retval[0] = PAGE_SIZE; 164df8bae1dSRodney W. Grimes return (0); 165df8bae1dSRodney W. Grimes } 1661930e303SPoul-Henning Kamp #endif /* COMPAT_43 */ 167df8bae1dSRodney W. Grimes 16854f42e4bSPeter Wemm 16954f42e4bSPeter Wemm /* 17054f42e4bSPeter Wemm * Memory Map (mmap) system call. Note that the file offset 17154f42e4bSPeter Wemm * and address are allowed to be NOT page aligned, though if 17254f42e4bSPeter Wemm * the MAP_FIXED flag it set, both must have the same remainder 17354f42e4bSPeter Wemm * modulo the PAGE_SIZE (POSIX 1003.1b). If the address is not 17454f42e4bSPeter Wemm * page-aligned, the actual mapping starts at trunc_page(addr) 17554f42e4bSPeter Wemm * and the return value is adjusted up by the page offset. 176b4309055SMatthew Dillon * 177b4309055SMatthew Dillon * Generally speaking, only character devices which are themselves 178b4309055SMatthew Dillon * memory-based, such as a video framebuffer, can be mmap'd. Otherwise 179b4309055SMatthew Dillon * there would be no cache coherency between a descriptor and a VM mapping 180b4309055SMatthew Dillon * both to the same character device. 181b4309055SMatthew Dillon * 182b4309055SMatthew Dillon * Block devices can be mmap'd no matter what they represent. Cache coherency 183b4309055SMatthew Dillon * is maintained as long as you do not write directly to the underlying 184b4309055SMatthew Dillon * character device. 18554f42e4bSPeter Wemm */ 186d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_ 187df8bae1dSRodney W. Grimes struct mmap_args { 188651bb817SAlexander Langer void *addr; 189df8bae1dSRodney W. Grimes size_t len; 190df8bae1dSRodney W. Grimes int prot; 191df8bae1dSRodney W. Grimes int flags; 192df8bae1dSRodney W. Grimes int fd; 193df8bae1dSRodney W. Grimes long pad; 194df8bae1dSRodney W. Grimes off_t pos; 195df8bae1dSRodney W. Grimes }; 196d2d3e875SBruce Evans #endif 197df8bae1dSRodney W. Grimes 198d2c60af8SMatthew Dillon /* 199d2c60af8SMatthew Dillon * MPSAFE 200d2c60af8SMatthew Dillon */ 201df8bae1dSRodney W. Grimes int 202b40ce416SJulian Elischer mmap(td, uap) 203b40ce416SJulian Elischer struct thread *td; 20454d92145SMatthew Dillon struct mmap_args *uap; 205df8bae1dSRodney W. Grimes { 206c8daea13SAlexander Kabaev struct file *fp; 207df8bae1dSRodney W. Grimes struct vnode *vp; 208df8bae1dSRodney W. Grimes vm_offset_t addr; 2099154ee6aSPeter Wemm vm_size_t size, pageoff; 210df8bae1dSRodney W. Grimes vm_prot_t prot, maxprot; 211651bb817SAlexander Langer void *handle; 212df8bae1dSRodney W. Grimes int flags, error; 21354f42e4bSPeter Wemm off_t pos; 214b40ce416SJulian Elischer struct vmspace *vms = td->td_proc->p_vmspace; 215df8bae1dSRodney W. Grimes 21654f42e4bSPeter Wemm addr = (vm_offset_t) uap->addr; 21754f42e4bSPeter Wemm size = uap->len; 218df8bae1dSRodney W. Grimes prot = uap->prot & VM_PROT_ALL; 219df8bae1dSRodney W. Grimes flags = uap->flags; 22054f42e4bSPeter Wemm pos = uap->pos; 22154f42e4bSPeter Wemm 222426da3bcSAlfred Perlstein fp = NULL; 22354f42e4bSPeter Wemm /* make sure mapping fits into numeric range etc */ 224fc565456SDmitrij Tejblum if ((ssize_t) uap->len < 0 || 22554f42e4bSPeter Wemm ((flags & MAP_ANON) && uap->fd != -1)) 226df8bae1dSRodney W. Grimes return (EINVAL); 2279154ee6aSPeter Wemm 2282267af78SJulian Elischer if (flags & MAP_STACK) { 2292267af78SJulian Elischer if ((uap->fd != -1) || 2302267af78SJulian Elischer ((prot & (PROT_READ | PROT_WRITE)) != (PROT_READ | PROT_WRITE))) 2312267af78SJulian Elischer return (EINVAL); 2322267af78SJulian Elischer flags |= MAP_ANON; 2332267af78SJulian Elischer pos = 0; 2342907af2aSJulian Elischer } 2352907af2aSJulian Elischer 2369154ee6aSPeter Wemm /* 23754f42e4bSPeter Wemm * Align the file position to a page boundary, 23854f42e4bSPeter Wemm * and save its page offset component. 2399154ee6aSPeter Wemm */ 24054f42e4bSPeter Wemm pageoff = (pos & PAGE_MASK); 24154f42e4bSPeter Wemm pos -= pageoff; 24254f42e4bSPeter Wemm 24354f42e4bSPeter Wemm /* Adjust size for rounding (on both ends). */ 24454f42e4bSPeter Wemm size += pageoff; /* low end... */ 24554f42e4bSPeter Wemm size = (vm_size_t) round_page(size); /* hi end */ 2469154ee6aSPeter Wemm 247df8bae1dSRodney W. Grimes /* 2480d94caffSDavid Greenman * Check for illegal addresses. Watch out for address wrap... Note 2490d94caffSDavid Greenman * that VM_*_ADDRESS are not constants due to casts (argh). 250df8bae1dSRodney W. Grimes */ 251df8bae1dSRodney W. Grimes if (flags & MAP_FIXED) { 25254f42e4bSPeter Wemm /* 25354f42e4bSPeter Wemm * The specified address must have the same remainder 25454f42e4bSPeter Wemm * as the file offset taken modulo PAGE_SIZE, so it 25554f42e4bSPeter Wemm * should be aligned after adjustment by pageoff. 25654f42e4bSPeter Wemm */ 25754f42e4bSPeter Wemm addr -= pageoff; 25854f42e4bSPeter Wemm if (addr & PAGE_MASK) 25954f42e4bSPeter Wemm return (EINVAL); 26054f42e4bSPeter Wemm /* Address range must be all in user VM space. */ 26105ba50f5SJake Burkholder if (addr < vm_map_min(&vms->vm_map) || 26205ba50f5SJake Burkholder addr + size > vm_map_max(&vms->vm_map)) 263df8bae1dSRodney W. Grimes return (EINVAL); 264bbc0ec52SDavid Greenman if (addr + size < addr) 265df8bae1dSRodney W. Grimes return (EINVAL); 26691d5354aSJohn Baldwin } else { 267df8bae1dSRodney W. Grimes /* 26854f42e4bSPeter Wemm * XXX for non-fixed mappings where no hint is provided or 26954f42e4bSPeter Wemm * the hint would fall in the potential heap space, 27054f42e4bSPeter Wemm * place it after the end of the largest possible heap. 271df8bae1dSRodney W. Grimes * 27254f42e4bSPeter Wemm * There should really be a pmap call to determine a reasonable 27354f42e4bSPeter Wemm * location. 274df8bae1dSRodney W. Grimes */ 27591d5354aSJohn Baldwin PROC_LOCK(td->td_proc); 27691d5354aSJohn Baldwin if (addr == 0 || 2771f6889a1SMatthew Dillon (addr >= round_page((vm_offset_t)vms->vm_taddr) && 278c460ac3aSPeter Wemm addr < round_page((vm_offset_t)vms->vm_daddr + 27991d5354aSJohn Baldwin lim_max(td->td_proc, RLIMIT_DATA)))) 280c460ac3aSPeter Wemm addr = round_page((vm_offset_t)vms->vm_daddr + 28191d5354aSJohn Baldwin lim_max(td->td_proc, RLIMIT_DATA)); 28291d5354aSJohn Baldwin PROC_UNLOCK(td->td_proc); 28391d5354aSJohn Baldwin } 284df8bae1dSRodney W. Grimes if (flags & MAP_ANON) { 285df8bae1dSRodney W. Grimes /* 286df8bae1dSRodney W. Grimes * Mapping blank space is trivial. 287df8bae1dSRodney W. Grimes */ 288df8bae1dSRodney W. Grimes handle = NULL; 289df8bae1dSRodney W. Grimes maxprot = VM_PROT_ALL; 29054f42e4bSPeter Wemm pos = 0; 29130d4dd7eSAlexander Kabaev } else { 292df8bae1dSRodney W. Grimes /* 2930d94caffSDavid Greenman * Mapping file, get fp for validation. Obtain vnode and make 2940d94caffSDavid Greenman * sure it is of appropriate type. 295426da3bcSAlfred Perlstein * don't let the descriptor disappear on us if we block 296df8bae1dSRodney W. Grimes */ 297a4db4953SAlfred Perlstein if ((error = fget(td, uap->fd, &fp)) != 0) 298426da3bcSAlfred Perlstein goto done; 299e4ca250dSJohn Baldwin if (fp->f_type != DTYPE_VNODE) { 300d2c60af8SMatthew Dillon error = EINVAL; 301426da3bcSAlfred Perlstein goto done; 302e4ca250dSJohn Baldwin } 303279d7226SMatthew Dillon /* 304aa543039SGarrett Wollman * POSIX shared-memory objects are defined to have 305aa543039SGarrett Wollman * kernel persistence, and are not defined to support 306aa543039SGarrett Wollman * read(2)/write(2) -- or even open(2). Thus, we can 307aa543039SGarrett Wollman * use MAP_ASYNC to trade on-disk coherence for speed. 308aa543039SGarrett Wollman * The shm_open(3) library routine turns on the FPOSIXSHM 309aa543039SGarrett Wollman * flag to request this behavior. 310aa543039SGarrett Wollman */ 311aa543039SGarrett Wollman if (fp->f_flag & FPOSIXSHM) 312aa543039SGarrett Wollman flags |= MAP_NOSYNC; 3133b6d9652SPoul-Henning Kamp vp = fp->f_vnode; 314c8bdd56bSGuido van Rooij /* 315df8bae1dSRodney W. Grimes * Ensure that file and memory protections are 316df8bae1dSRodney W. Grimes * compatible. Note that we only worry about 317df8bae1dSRodney W. Grimes * writability if mapping is shared; in this case, 318df8bae1dSRodney W. Grimes * current and max prot are dictated by the open file. 319df8bae1dSRodney W. Grimes * XXX use the vnode instead? Problem is: what 3200d94caffSDavid Greenman * credentials do we use for determination? What if 3210d94caffSDavid Greenman * proc does a setuid? 322df8bae1dSRodney W. Grimes */ 3238eec77b0STim J. Robbins if (vp->v_mount != NULL && vp->v_mount->mnt_flag & MNT_NOEXEC) 324b483c7f6SGuido van Rooij maxprot = VM_PROT_NONE; 325b483c7f6SGuido van Rooij else 326b483c7f6SGuido van Rooij maxprot = VM_PROT_EXECUTE; 327279d7226SMatthew Dillon if (fp->f_flag & FREAD) { 328df8bae1dSRodney W. Grimes maxprot |= VM_PROT_READ; 329279d7226SMatthew Dillon } else if (prot & PROT_READ) { 330279d7226SMatthew Dillon error = EACCES; 331279d7226SMatthew Dillon goto done; 332279d7226SMatthew Dillon } 333c8bdd56bSGuido van Rooij /* 334c8bdd56bSGuido van Rooij * If we are sharing potential changes (either via 335c8bdd56bSGuido van Rooij * MAP_SHARED or via the implicit sharing of character 336c8bdd56bSGuido van Rooij * device mappings), and we are trying to get write 337c8bdd56bSGuido van Rooij * permission although we opened it without asking 338c8daea13SAlexander Kabaev * for it, bail out. 339c8bdd56bSGuido van Rooij */ 340ce7a036dSAlexander Kabaev if ((flags & MAP_SHARED) != 0) { 34105feb99fSGuido van Rooij if ((fp->f_flag & FWRITE) != 0) { 342df8bae1dSRodney W. Grimes maxprot |= VM_PROT_WRITE; 343279d7226SMatthew Dillon } else if ((prot & PROT_WRITE) != 0) { 344279d7226SMatthew Dillon error = EACCES; 345279d7226SMatthew Dillon goto done; 346279d7226SMatthew Dillon } 347ce7a036dSAlexander Kabaev } else if (vp->v_type != VCHR || (fp->f_flag & FWRITE) != 0) { 34805feb99fSGuido van Rooij maxprot |= VM_PROT_WRITE; 349279d7226SMatthew Dillon } 350651bb817SAlexander Langer handle = (void *)vp; 35130d4dd7eSAlexander Kabaev } 3521f6889a1SMatthew Dillon 3531f6889a1SMatthew Dillon /* 3541f6889a1SMatthew Dillon * Do not allow more then a certain number of vm_map_entry structures 3551f6889a1SMatthew Dillon * per process. Scale with the number of rforks sharing the map 3561f6889a1SMatthew Dillon * to make the limit reasonable for threads. 3571f6889a1SMatthew Dillon */ 3581f6889a1SMatthew Dillon if (max_proc_mmap && 3591f6889a1SMatthew Dillon vms->vm_map.nentries >= max_proc_mmap * vms->vm_refcnt) { 360279d7226SMatthew Dillon error = ENOMEM; 361279d7226SMatthew Dillon goto done; 3621f6889a1SMatthew Dillon } 3631f6889a1SMatthew Dillon 3641f6889a1SMatthew Dillon error = vm_mmap(&vms->vm_map, &addr, size, prot, maxprot, 36554f42e4bSPeter Wemm flags, handle, pos); 366df8bae1dSRodney W. Grimes if (error == 0) 367b40ce416SJulian Elischer td->td_retval[0] = (register_t) (addr + pageoff); 368279d7226SMatthew Dillon done: 369279d7226SMatthew Dillon if (fp) 370b40ce416SJulian Elischer fdrop(fp, td); 371f6b5b182SJeff Roberson 372df8bae1dSRodney W. Grimes return (error); 373df8bae1dSRodney W. Grimes } 374df8bae1dSRodney W. Grimes 37505f0fdd2SPoul-Henning Kamp #ifdef COMPAT_43 376d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_ 37705f0fdd2SPoul-Henning Kamp struct ommap_args { 37805f0fdd2SPoul-Henning Kamp caddr_t addr; 37905f0fdd2SPoul-Henning Kamp int len; 38005f0fdd2SPoul-Henning Kamp int prot; 38105f0fdd2SPoul-Henning Kamp int flags; 38205f0fdd2SPoul-Henning Kamp int fd; 38305f0fdd2SPoul-Henning Kamp long pos; 38405f0fdd2SPoul-Henning Kamp }; 385d2d3e875SBruce Evans #endif 38605f0fdd2SPoul-Henning Kamp int 387b40ce416SJulian Elischer ommap(td, uap) 388b40ce416SJulian Elischer struct thread *td; 38954d92145SMatthew Dillon struct ommap_args *uap; 39005f0fdd2SPoul-Henning Kamp { 39105f0fdd2SPoul-Henning Kamp struct mmap_args nargs; 39205f0fdd2SPoul-Henning Kamp static const char cvtbsdprot[8] = { 39305f0fdd2SPoul-Henning Kamp 0, 39405f0fdd2SPoul-Henning Kamp PROT_EXEC, 39505f0fdd2SPoul-Henning Kamp PROT_WRITE, 39605f0fdd2SPoul-Henning Kamp PROT_EXEC | PROT_WRITE, 39705f0fdd2SPoul-Henning Kamp PROT_READ, 39805f0fdd2SPoul-Henning Kamp PROT_EXEC | PROT_READ, 39905f0fdd2SPoul-Henning Kamp PROT_WRITE | PROT_READ, 40005f0fdd2SPoul-Henning Kamp PROT_EXEC | PROT_WRITE | PROT_READ, 40105f0fdd2SPoul-Henning Kamp }; 4020d94caffSDavid Greenman 40305f0fdd2SPoul-Henning Kamp #define OMAP_ANON 0x0002 40405f0fdd2SPoul-Henning Kamp #define OMAP_COPY 0x0020 40505f0fdd2SPoul-Henning Kamp #define OMAP_SHARED 0x0010 40605f0fdd2SPoul-Henning Kamp #define OMAP_FIXED 0x0100 40705f0fdd2SPoul-Henning Kamp 40805f0fdd2SPoul-Henning Kamp nargs.addr = uap->addr; 40905f0fdd2SPoul-Henning Kamp nargs.len = uap->len; 41005f0fdd2SPoul-Henning Kamp nargs.prot = cvtbsdprot[uap->prot & 0x7]; 41105f0fdd2SPoul-Henning Kamp nargs.flags = 0; 41205f0fdd2SPoul-Henning Kamp if (uap->flags & OMAP_ANON) 41305f0fdd2SPoul-Henning Kamp nargs.flags |= MAP_ANON; 41405f0fdd2SPoul-Henning Kamp if (uap->flags & OMAP_COPY) 41505f0fdd2SPoul-Henning Kamp nargs.flags |= MAP_COPY; 41605f0fdd2SPoul-Henning Kamp if (uap->flags & OMAP_SHARED) 41705f0fdd2SPoul-Henning Kamp nargs.flags |= MAP_SHARED; 41805f0fdd2SPoul-Henning Kamp else 41905f0fdd2SPoul-Henning Kamp nargs.flags |= MAP_PRIVATE; 42005f0fdd2SPoul-Henning Kamp if (uap->flags & OMAP_FIXED) 42105f0fdd2SPoul-Henning Kamp nargs.flags |= MAP_FIXED; 42205f0fdd2SPoul-Henning Kamp nargs.fd = uap->fd; 42305f0fdd2SPoul-Henning Kamp nargs.pos = uap->pos; 424b40ce416SJulian Elischer return (mmap(td, &nargs)); 42505f0fdd2SPoul-Henning Kamp } 42605f0fdd2SPoul-Henning Kamp #endif /* COMPAT_43 */ 42705f0fdd2SPoul-Henning Kamp 42805f0fdd2SPoul-Henning Kamp 429d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_ 430df8bae1dSRodney W. Grimes struct msync_args { 431651bb817SAlexander Langer void *addr; 432df8bae1dSRodney W. Grimes int len; 433e6c6af11SDavid Greenman int flags; 434df8bae1dSRodney W. Grimes }; 435d2d3e875SBruce Evans #endif 436d2c60af8SMatthew Dillon /* 437d2c60af8SMatthew Dillon * MPSAFE 438d2c60af8SMatthew Dillon */ 439df8bae1dSRodney W. Grimes int 440b40ce416SJulian Elischer msync(td, uap) 441b40ce416SJulian Elischer struct thread *td; 442df8bae1dSRodney W. Grimes struct msync_args *uap; 443df8bae1dSRodney W. Grimes { 444df8bae1dSRodney W. Grimes vm_offset_t addr; 445dabee6feSPeter Wemm vm_size_t size, pageoff; 446e6c6af11SDavid Greenman int flags; 447df8bae1dSRodney W. Grimes vm_map_t map; 448df8bae1dSRodney W. Grimes int rv; 449df8bae1dSRodney W. Grimes 450df8bae1dSRodney W. Grimes addr = (vm_offset_t) uap->addr; 4519154ee6aSPeter Wemm size = uap->len; 452e6c6af11SDavid Greenman flags = uap->flags; 453e6c6af11SDavid Greenman 454dabee6feSPeter Wemm pageoff = (addr & PAGE_MASK); 455dabee6feSPeter Wemm addr -= pageoff; 456dabee6feSPeter Wemm size += pageoff; 457dabee6feSPeter Wemm size = (vm_size_t) round_page(size); 4589154ee6aSPeter Wemm if (addr + size < addr) 459dabee6feSPeter Wemm return (EINVAL); 460dabee6feSPeter Wemm 461dabee6feSPeter Wemm if ((flags & (MS_ASYNC|MS_INVALIDATE)) == (MS_ASYNC|MS_INVALIDATE)) 4621e62bc63SDavid Greenman return (EINVAL); 4631e62bc63SDavid Greenman 464b40ce416SJulian Elischer map = &td->td_proc->p_vmspace->vm_map; 4659154ee6aSPeter Wemm 466df8bae1dSRodney W. Grimes /* 467df8bae1dSRodney W. Grimes * Clean the pages and interpret the return value. 468df8bae1dSRodney W. Grimes */ 469950f8459SAlan Cox rv = vm_map_sync(map, addr, addr + size, (flags & MS_ASYNC) == 0, 470e6c6af11SDavid Greenman (flags & MS_INVALIDATE) != 0); 471df8bae1dSRodney W. Grimes switch (rv) { 472df8bae1dSRodney W. Grimes case KERN_SUCCESS: 473d2c60af8SMatthew Dillon return (0); 474df8bae1dSRodney W. Grimes case KERN_INVALID_ADDRESS: 475df8bae1dSRodney W. Grimes return (EINVAL); /* Sun returns ENOMEM? */ 476b7b7cd44SAlan Cox case KERN_INVALID_ARGUMENT: 477b7b7cd44SAlan Cox return (EBUSY); 478df8bae1dSRodney W. Grimes default: 479df8bae1dSRodney W. Grimes return (EINVAL); 480df8bae1dSRodney W. Grimes } 481df8bae1dSRodney W. Grimes } 482df8bae1dSRodney W. Grimes 483d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_ 484df8bae1dSRodney W. Grimes struct munmap_args { 485651bb817SAlexander Langer void *addr; 4869154ee6aSPeter Wemm size_t len; 487df8bae1dSRodney W. Grimes }; 488d2d3e875SBruce Evans #endif 489d2c60af8SMatthew Dillon /* 490d2c60af8SMatthew Dillon * MPSAFE 491d2c60af8SMatthew Dillon */ 492df8bae1dSRodney W. Grimes int 493b40ce416SJulian Elischer munmap(td, uap) 494b40ce416SJulian Elischer struct thread *td; 49554d92145SMatthew Dillon struct munmap_args *uap; 496df8bae1dSRodney W. Grimes { 497df8bae1dSRodney W. Grimes vm_offset_t addr; 498dabee6feSPeter Wemm vm_size_t size, pageoff; 499df8bae1dSRodney W. Grimes vm_map_t map; 500df8bae1dSRodney W. Grimes 501df8bae1dSRodney W. Grimes addr = (vm_offset_t) uap->addr; 5029154ee6aSPeter Wemm size = uap->len; 503d8834602SAlan Cox if (size == 0) 504d8834602SAlan Cox return (EINVAL); 505dabee6feSPeter Wemm 506dabee6feSPeter Wemm pageoff = (addr & PAGE_MASK); 507dabee6feSPeter Wemm addr -= pageoff; 508dabee6feSPeter Wemm size += pageoff; 509dabee6feSPeter Wemm size = (vm_size_t) round_page(size); 5109154ee6aSPeter Wemm if (addr + size < addr) 511df8bae1dSRodney W. Grimes return (EINVAL); 5129154ee6aSPeter Wemm 513df8bae1dSRodney W. Grimes /* 51405ba50f5SJake Burkholder * Check for illegal addresses. Watch out for address wrap... 515df8bae1dSRodney W. Grimes */ 516b40ce416SJulian Elischer map = &td->td_proc->p_vmspace->vm_map; 51705ba50f5SJake Burkholder if (addr < vm_map_min(map) || addr + size > vm_map_max(map)) 51805ba50f5SJake Burkholder return (EINVAL); 519d8834602SAlan Cox vm_map_lock(map); 520df8bae1dSRodney W. Grimes /* 521df8bae1dSRodney W. Grimes * Make sure entire range is allocated. 522df8bae1dSRodney W. Grimes */ 523d8834602SAlan Cox if (!vm_map_check_protection(map, addr, addr + size, VM_PROT_NONE)) { 524d8834602SAlan Cox vm_map_unlock(map); 525df8bae1dSRodney W. Grimes return (EINVAL); 526d8834602SAlan Cox } 527df8bae1dSRodney W. Grimes /* returns nothing but KERN_SUCCESS anyway */ 528d8834602SAlan Cox vm_map_delete(map, addr, addr + size); 529d8834602SAlan Cox vm_map_unlock(map); 530df8bae1dSRodney W. Grimes return (0); 531df8bae1dSRodney W. Grimes } 532df8bae1dSRodney W. Grimes 533d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_ 534df8bae1dSRodney W. Grimes struct mprotect_args { 535651bb817SAlexander Langer const void *addr; 5369154ee6aSPeter Wemm size_t len; 537df8bae1dSRodney W. Grimes int prot; 538df8bae1dSRodney W. Grimes }; 539d2d3e875SBruce Evans #endif 540d2c60af8SMatthew Dillon /* 541d2c60af8SMatthew Dillon * MPSAFE 542d2c60af8SMatthew Dillon */ 543df8bae1dSRodney W. Grimes int 544b40ce416SJulian Elischer mprotect(td, uap) 545b40ce416SJulian Elischer struct thread *td; 546df8bae1dSRodney W. Grimes struct mprotect_args *uap; 547df8bae1dSRodney W. Grimes { 548df8bae1dSRodney W. Grimes vm_offset_t addr; 549dabee6feSPeter Wemm vm_size_t size, pageoff; 55054d92145SMatthew Dillon vm_prot_t prot; 551df8bae1dSRodney W. Grimes 552df8bae1dSRodney W. Grimes addr = (vm_offset_t) uap->addr; 5539154ee6aSPeter Wemm size = uap->len; 554df8bae1dSRodney W. Grimes prot = uap->prot & VM_PROT_ALL; 555d0aea04fSJohn Dyson #if defined(VM_PROT_READ_IS_EXEC) 556d0aea04fSJohn Dyson if (prot & VM_PROT_READ) 557d0aea04fSJohn Dyson prot |= VM_PROT_EXECUTE; 558d0aea04fSJohn Dyson #endif 559df8bae1dSRodney W. Grimes 560dabee6feSPeter Wemm pageoff = (addr & PAGE_MASK); 561dabee6feSPeter Wemm addr -= pageoff; 562dabee6feSPeter Wemm size += pageoff; 563dabee6feSPeter Wemm size = (vm_size_t) round_page(size); 5649154ee6aSPeter Wemm if (addr + size < addr) 565dabee6feSPeter Wemm return (EINVAL); 566dabee6feSPeter Wemm 56743285049SAlan Cox switch (vm_map_protect(&td->td_proc->p_vmspace->vm_map, addr, 56843285049SAlan Cox addr + size, prot, FALSE)) { 569df8bae1dSRodney W. Grimes case KERN_SUCCESS: 570df8bae1dSRodney W. Grimes return (0); 571df8bae1dSRodney W. Grimes case KERN_PROTECTION_FAILURE: 572df8bae1dSRodney W. Grimes return (EACCES); 573df8bae1dSRodney W. Grimes } 574df8bae1dSRodney W. Grimes return (EINVAL); 575df8bae1dSRodney W. Grimes } 576df8bae1dSRodney W. Grimes 577d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_ 578dabee6feSPeter Wemm struct minherit_args { 579651bb817SAlexander Langer void *addr; 5809154ee6aSPeter Wemm size_t len; 581dabee6feSPeter Wemm int inherit; 582dabee6feSPeter Wemm }; 583dabee6feSPeter Wemm #endif 584d2c60af8SMatthew Dillon /* 585d2c60af8SMatthew Dillon * MPSAFE 586d2c60af8SMatthew Dillon */ 587dabee6feSPeter Wemm int 588b40ce416SJulian Elischer minherit(td, uap) 589b40ce416SJulian Elischer struct thread *td; 590dabee6feSPeter Wemm struct minherit_args *uap; 591dabee6feSPeter Wemm { 592dabee6feSPeter Wemm vm_offset_t addr; 593dabee6feSPeter Wemm vm_size_t size, pageoff; 59454d92145SMatthew Dillon vm_inherit_t inherit; 595dabee6feSPeter Wemm 596dabee6feSPeter Wemm addr = (vm_offset_t)uap->addr; 5979154ee6aSPeter Wemm size = uap->len; 598dabee6feSPeter Wemm inherit = uap->inherit; 599dabee6feSPeter Wemm 600dabee6feSPeter Wemm pageoff = (addr & PAGE_MASK); 601dabee6feSPeter Wemm addr -= pageoff; 602dabee6feSPeter Wemm size += pageoff; 603dabee6feSPeter Wemm size = (vm_size_t) round_page(size); 6049154ee6aSPeter Wemm if (addr + size < addr) 605dabee6feSPeter Wemm return (EINVAL); 606dabee6feSPeter Wemm 607e0be79afSAlan Cox switch (vm_map_inherit(&td->td_proc->p_vmspace->vm_map, addr, 608e0be79afSAlan Cox addr + size, inherit)) { 609dabee6feSPeter Wemm case KERN_SUCCESS: 610dabee6feSPeter Wemm return (0); 611dabee6feSPeter Wemm case KERN_PROTECTION_FAILURE: 612dabee6feSPeter Wemm return (EACCES); 613dabee6feSPeter Wemm } 614dabee6feSPeter Wemm return (EINVAL); 615dabee6feSPeter Wemm } 616dabee6feSPeter Wemm 617dabee6feSPeter Wemm #ifndef _SYS_SYSPROTO_H_ 618df8bae1dSRodney W. Grimes struct madvise_args { 619651bb817SAlexander Langer void *addr; 6209154ee6aSPeter Wemm size_t len; 621df8bae1dSRodney W. Grimes int behav; 622df8bae1dSRodney W. Grimes }; 623d2d3e875SBruce Evans #endif 6240d94caffSDavid Greenman 625d2c60af8SMatthew Dillon /* 626d2c60af8SMatthew Dillon * MPSAFE 627d2c60af8SMatthew Dillon */ 628df8bae1dSRodney W. Grimes /* ARGSUSED */ 629df8bae1dSRodney W. Grimes int 630b40ce416SJulian Elischer madvise(td, uap) 631b40ce416SJulian Elischer struct thread *td; 632df8bae1dSRodney W. Grimes struct madvise_args *uap; 633df8bae1dSRodney W. Grimes { 634f35329acSJohn Dyson vm_offset_t start, end; 63505ba50f5SJake Burkholder vm_map_t map; 636f4cf2141SWes Peters struct proc *p; 637f4cf2141SWes Peters int error; 638b4309055SMatthew Dillon 639b4309055SMatthew Dillon /* 640f4cf2141SWes Peters * Check for our special case, advising the swap pager we are 641f4cf2141SWes Peters * "immortal." 642f4cf2141SWes Peters */ 643f4cf2141SWes Peters if (uap->behav == MADV_PROTECT) { 64469297bf8SJohn Baldwin error = suser(td); 64569297bf8SJohn Baldwin if (error == 0) { 646f4cf2141SWes Peters p = td->td_proc; 647f4cf2141SWes Peters PROC_LOCK(p); 648f4cf2141SWes Peters p->p_flag |= P_PROTECTED; 649f4cf2141SWes Peters PROC_UNLOCK(p); 65069297bf8SJohn Baldwin } 651f4cf2141SWes Peters return (error); 652f4cf2141SWes Peters } 653f4cf2141SWes Peters /* 654b4309055SMatthew Dillon * Check for illegal behavior 655b4309055SMatthew Dillon */ 6569730a5daSPaul Saab if (uap->behav < 0 || uap->behav > MADV_CORE) 657b4309055SMatthew Dillon return (EINVAL); 658867a482dSJohn Dyson /* 659867a482dSJohn Dyson * Check for illegal addresses. Watch out for address wrap... Note 660867a482dSJohn Dyson * that VM_*_ADDRESS are not constants due to casts (argh). 661867a482dSJohn Dyson */ 66205ba50f5SJake Burkholder map = &td->td_proc->p_vmspace->vm_map; 66305ba50f5SJake Burkholder if ((vm_offset_t)uap->addr < vm_map_min(map) || 66405ba50f5SJake Burkholder (vm_offset_t)uap->addr + uap->len > vm_map_max(map)) 665867a482dSJohn Dyson return (EINVAL); 666867a482dSJohn Dyson if (((vm_offset_t) uap->addr + uap->len) < (vm_offset_t) uap->addr) 667867a482dSJohn Dyson return (EINVAL); 668867a482dSJohn Dyson 669867a482dSJohn Dyson /* 670867a482dSJohn Dyson * Since this routine is only advisory, we default to conservative 671867a482dSJohn Dyson * behavior. 672867a482dSJohn Dyson */ 673cd6eea25SDavid Greenman start = trunc_page((vm_offset_t) uap->addr); 674cd6eea25SDavid Greenman end = round_page((vm_offset_t) uap->addr + uap->len); 675867a482dSJohn Dyson 67605ba50f5SJake Burkholder if (vm_map_madvise(map, start, end, uap->behav)) 677094f6d26SAlan Cox return (EINVAL); 678094f6d26SAlan Cox return (0); 679df8bae1dSRodney W. Grimes } 680df8bae1dSRodney W. Grimes 681d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_ 682df8bae1dSRodney W. Grimes struct mincore_args { 683651bb817SAlexander Langer const void *addr; 6849154ee6aSPeter Wemm size_t len; 685df8bae1dSRodney W. Grimes char *vec; 686df8bae1dSRodney W. Grimes }; 687d2d3e875SBruce Evans #endif 6880d94caffSDavid Greenman 689d2c60af8SMatthew Dillon /* 690d2c60af8SMatthew Dillon * MPSAFE 691d2c60af8SMatthew Dillon */ 692df8bae1dSRodney W. Grimes /* ARGSUSED */ 693df8bae1dSRodney W. Grimes int 694b40ce416SJulian Elischer mincore(td, uap) 695b40ce416SJulian Elischer struct thread *td; 696df8bae1dSRodney W. Grimes struct mincore_args *uap; 697df8bae1dSRodney W. Grimes { 698867a482dSJohn Dyson vm_offset_t addr, first_addr; 699867a482dSJohn Dyson vm_offset_t end, cend; 700867a482dSJohn Dyson pmap_t pmap; 701867a482dSJohn Dyson vm_map_t map; 70202c04a2fSJohn Dyson char *vec; 703d2c60af8SMatthew Dillon int error = 0; 704867a482dSJohn Dyson int vecindex, lastvecindex; 70554d92145SMatthew Dillon vm_map_entry_t current; 706867a482dSJohn Dyson vm_map_entry_t entry; 707867a482dSJohn Dyson int mincoreinfo; 708dd2622a8SAlan Cox unsigned int timestamp; 709df8bae1dSRodney W. Grimes 710867a482dSJohn Dyson /* 711867a482dSJohn Dyson * Make sure that the addresses presented are valid for user 712867a482dSJohn Dyson * mode. 713867a482dSJohn Dyson */ 714867a482dSJohn Dyson first_addr = addr = trunc_page((vm_offset_t) uap->addr); 7159154ee6aSPeter Wemm end = addr + (vm_size_t)round_page(uap->len); 71605ba50f5SJake Burkholder map = &td->td_proc->p_vmspace->vm_map; 71705ba50f5SJake Burkholder if (end > vm_map_max(map) || end < addr) 71802c04a2fSJohn Dyson return (EINVAL); 71902c04a2fSJohn Dyson 720867a482dSJohn Dyson /* 721867a482dSJohn Dyson * Address of byte vector 722867a482dSJohn Dyson */ 72302c04a2fSJohn Dyson vec = uap->vec; 724867a482dSJohn Dyson 725b40ce416SJulian Elischer pmap = vmspace_pmap(td->td_proc->p_vmspace); 726867a482dSJohn Dyson 727eff50fcdSAlan Cox vm_map_lock_read(map); 728dd2622a8SAlan Cox RestartScan: 729dd2622a8SAlan Cox timestamp = map->timestamp; 730867a482dSJohn Dyson 731867a482dSJohn Dyson if (!vm_map_lookup_entry(map, addr, &entry)) 732867a482dSJohn Dyson entry = entry->next; 733867a482dSJohn Dyson 734867a482dSJohn Dyson /* 735867a482dSJohn Dyson * Do this on a map entry basis so that if the pages are not 736867a482dSJohn Dyson * in the current processes address space, we can easily look 737867a482dSJohn Dyson * up the pages elsewhere. 738867a482dSJohn Dyson */ 739867a482dSJohn Dyson lastvecindex = -1; 740867a482dSJohn Dyson for (current = entry; 741867a482dSJohn Dyson (current != &map->header) && (current->start < end); 742867a482dSJohn Dyson current = current->next) { 743867a482dSJohn Dyson 744867a482dSJohn Dyson /* 745867a482dSJohn Dyson * ignore submaps (for now) or null objects 746867a482dSJohn Dyson */ 7479fdfe602SMatthew Dillon if ((current->eflags & MAP_ENTRY_IS_SUB_MAP) || 748867a482dSJohn Dyson current->object.vm_object == NULL) 749867a482dSJohn Dyson continue; 750867a482dSJohn Dyson 751867a482dSJohn Dyson /* 752867a482dSJohn Dyson * limit this scan to the current map entry and the 753867a482dSJohn Dyson * limits for the mincore call 754867a482dSJohn Dyson */ 755867a482dSJohn Dyson if (addr < current->start) 756867a482dSJohn Dyson addr = current->start; 757867a482dSJohn Dyson cend = current->end; 758867a482dSJohn Dyson if (cend > end) 759867a482dSJohn Dyson cend = end; 760867a482dSJohn Dyson 761867a482dSJohn Dyson /* 762867a482dSJohn Dyson * scan this entry one page at a time 763867a482dSJohn Dyson */ 764867a482dSJohn Dyson while (addr < cend) { 765867a482dSJohn Dyson /* 766867a482dSJohn Dyson * Check pmap first, it is likely faster, also 767867a482dSJohn Dyson * it can provide info as to whether we are the 768867a482dSJohn Dyson * one referencing or modifying the page. 769867a482dSJohn Dyson */ 770867a482dSJohn Dyson mincoreinfo = pmap_mincore(pmap, addr); 771867a482dSJohn Dyson if (!mincoreinfo) { 772867a482dSJohn Dyson vm_pindex_t pindex; 773867a482dSJohn Dyson vm_ooffset_t offset; 774867a482dSJohn Dyson vm_page_t m; 775867a482dSJohn Dyson /* 776867a482dSJohn Dyson * calculate the page index into the object 777867a482dSJohn Dyson */ 778867a482dSJohn Dyson offset = current->offset + (addr - current->start); 779867a482dSJohn Dyson pindex = OFF_TO_IDX(offset); 780bc5b057fSAlan Cox VM_OBJECT_LOCK(current->object.vm_object); 781867a482dSJohn Dyson m = vm_page_lookup(current->object.vm_object, 782867a482dSJohn Dyson pindex); 783867a482dSJohn Dyson /* 784867a482dSJohn Dyson * if the page is resident, then gather information about 785867a482dSJohn Dyson * it. 786867a482dSJohn Dyson */ 787cafe836aSAlan Cox if (m != NULL && m->valid != 0) { 788867a482dSJohn Dyson mincoreinfo = MINCORE_INCORE; 7897ebcee37SAlan Cox vm_page_lock_queues(); 790867a482dSJohn Dyson if (m->dirty || 7910385347cSPeter Wemm pmap_is_modified(m)) 792867a482dSJohn Dyson mincoreinfo |= MINCORE_MODIFIED_OTHER; 793867a482dSJohn Dyson if ((m->flags & PG_REFERENCED) || 7940385347cSPeter Wemm pmap_ts_referenced(m)) { 795e69763a3SDoug Rabson vm_page_flag_set(m, PG_REFERENCED); 796867a482dSJohn Dyson mincoreinfo |= MINCORE_REFERENCED_OTHER; 79702c04a2fSJohn Dyson } 798e80b7b69SAlan Cox vm_page_unlock_queues(); 7999b5a5d81SJohn Dyson } 8007ebcee37SAlan Cox VM_OBJECT_UNLOCK(current->object.vm_object); 8017ebcee37SAlan Cox } 802867a482dSJohn Dyson 803867a482dSJohn Dyson /* 804dd2622a8SAlan Cox * subyte may page fault. In case it needs to modify 805dd2622a8SAlan Cox * the map, we release the lock. 806dd2622a8SAlan Cox */ 807dd2622a8SAlan Cox vm_map_unlock_read(map); 808dd2622a8SAlan Cox 809dd2622a8SAlan Cox /* 810867a482dSJohn Dyson * calculate index into user supplied byte vector 811867a482dSJohn Dyson */ 812867a482dSJohn Dyson vecindex = OFF_TO_IDX(addr - first_addr); 813867a482dSJohn Dyson 814867a482dSJohn Dyson /* 815867a482dSJohn Dyson * If we have skipped map entries, we need to make sure that 816867a482dSJohn Dyson * the byte vector is zeroed for those skipped entries. 817867a482dSJohn Dyson */ 818867a482dSJohn Dyson while ((lastvecindex + 1) < vecindex) { 819867a482dSJohn Dyson error = subyte(vec + lastvecindex, 0); 820867a482dSJohn Dyson if (error) { 821d2c60af8SMatthew Dillon error = EFAULT; 822d2c60af8SMatthew Dillon goto done2; 823867a482dSJohn Dyson } 824867a482dSJohn Dyson ++lastvecindex; 825867a482dSJohn Dyson } 826867a482dSJohn Dyson 827867a482dSJohn Dyson /* 828867a482dSJohn Dyson * Pass the page information to the user 829867a482dSJohn Dyson */ 830867a482dSJohn Dyson error = subyte(vec + vecindex, mincoreinfo); 831867a482dSJohn Dyson if (error) { 832d2c60af8SMatthew Dillon error = EFAULT; 833d2c60af8SMatthew Dillon goto done2; 834867a482dSJohn Dyson } 835dd2622a8SAlan Cox 836dd2622a8SAlan Cox /* 837dd2622a8SAlan Cox * If the map has changed, due to the subyte, the previous 838dd2622a8SAlan Cox * output may be invalid. 839dd2622a8SAlan Cox */ 840dd2622a8SAlan Cox vm_map_lock_read(map); 841dd2622a8SAlan Cox if (timestamp != map->timestamp) 842dd2622a8SAlan Cox goto RestartScan; 843dd2622a8SAlan Cox 844867a482dSJohn Dyson lastvecindex = vecindex; 84502c04a2fSJohn Dyson addr += PAGE_SIZE; 84602c04a2fSJohn Dyson } 847867a482dSJohn Dyson } 848867a482dSJohn Dyson 849867a482dSJohn Dyson /* 850dd2622a8SAlan Cox * subyte may page fault. In case it needs to modify 851dd2622a8SAlan Cox * the map, we release the lock. 852dd2622a8SAlan Cox */ 853dd2622a8SAlan Cox vm_map_unlock_read(map); 854dd2622a8SAlan Cox 855dd2622a8SAlan Cox /* 856867a482dSJohn Dyson * Zero the last entries in the byte vector. 857867a482dSJohn Dyson */ 858867a482dSJohn Dyson vecindex = OFF_TO_IDX(end - first_addr); 859867a482dSJohn Dyson while ((lastvecindex + 1) < vecindex) { 860867a482dSJohn Dyson error = subyte(vec + lastvecindex, 0); 861867a482dSJohn Dyson if (error) { 862d2c60af8SMatthew Dillon error = EFAULT; 863d2c60af8SMatthew Dillon goto done2; 864867a482dSJohn Dyson } 865867a482dSJohn Dyson ++lastvecindex; 866867a482dSJohn Dyson } 867867a482dSJohn Dyson 868dd2622a8SAlan Cox /* 869dd2622a8SAlan Cox * If the map has changed, due to the subyte, the previous 870dd2622a8SAlan Cox * output may be invalid. 871dd2622a8SAlan Cox */ 872dd2622a8SAlan Cox vm_map_lock_read(map); 873dd2622a8SAlan Cox if (timestamp != map->timestamp) 874dd2622a8SAlan Cox goto RestartScan; 875eff50fcdSAlan Cox vm_map_unlock_read(map); 876d2c60af8SMatthew Dillon done2: 877d2c60af8SMatthew Dillon return (error); 878df8bae1dSRodney W. Grimes } 879df8bae1dSRodney W. Grimes 880d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_ 881df8bae1dSRodney W. Grimes struct mlock_args { 882651bb817SAlexander Langer const void *addr; 883df8bae1dSRodney W. Grimes size_t len; 884df8bae1dSRodney W. Grimes }; 885d2d3e875SBruce Evans #endif 886d2c60af8SMatthew Dillon /* 887d2c60af8SMatthew Dillon * MPSAFE 888d2c60af8SMatthew Dillon */ 889df8bae1dSRodney W. Grimes int 890b40ce416SJulian Elischer mlock(td, uap) 891b40ce416SJulian Elischer struct thread *td; 892df8bae1dSRodney W. Grimes struct mlock_args *uap; 893df8bae1dSRodney W. Grimes { 894f0ea4612SDon Lewis struct proc *proc; 895bb734798SDon Lewis vm_offset_t addr, end, last, start; 896bb734798SDon Lewis vm_size_t npages, size; 897bb734798SDon Lewis int error; 898df8bae1dSRodney W. Grimes 89947934cefSDon Lewis error = suser(td); 90047934cefSDon Lewis if (error) 90147934cefSDon Lewis return (error); 90216929939SDon Lewis addr = (vm_offset_t)uap->addr; 90316929939SDon Lewis size = uap->len; 904bb734798SDon Lewis last = addr + size; 90516929939SDon Lewis start = trunc_page(addr); 906bb734798SDon Lewis end = round_page(last); 907bb734798SDon Lewis if (last < addr || end < addr) 908df8bae1dSRodney W. Grimes return (EINVAL); 90916929939SDon Lewis npages = atop(end - start); 91016929939SDon Lewis if (npages > vm_page_max_wired) 91116929939SDon Lewis return (ENOMEM); 912f0ea4612SDon Lewis proc = td->td_proc; 91347934cefSDon Lewis PROC_LOCK(proc); 914bb734798SDon Lewis if (ptoa(npages + 915bb734798SDon Lewis pmap_wired_count(vm_map_pmap(&proc->p_vmspace->vm_map))) > 916bb734798SDon Lewis lim_cur(proc, RLIMIT_MEMLOCK)) { 91747934cefSDon Lewis PROC_UNLOCK(proc); 9184a40e3d4SJohn Dyson return (ENOMEM); 91991d5354aSJohn Baldwin } 92047934cefSDon Lewis PROC_UNLOCK(proc); 92116929939SDon Lewis if (npages + cnt.v_wire_count > vm_page_max_wired) 92216929939SDon Lewis return (EAGAIN); 92316929939SDon Lewis error = vm_map_wire(&proc->p_vmspace->vm_map, start, end, 92416929939SDon Lewis VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES); 925df8bae1dSRodney W. Grimes return (error == KERN_SUCCESS ? 0 : ENOMEM); 926df8bae1dSRodney W. Grimes } 927df8bae1dSRodney W. Grimes 928d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_ 9294a40e3d4SJohn Dyson struct mlockall_args { 9304a40e3d4SJohn Dyson int how; 9314a40e3d4SJohn Dyson }; 9324a40e3d4SJohn Dyson #endif 9334a40e3d4SJohn Dyson 934d2c60af8SMatthew Dillon /* 935d2c60af8SMatthew Dillon * MPSAFE 936d2c60af8SMatthew Dillon */ 9374a40e3d4SJohn Dyson int 938b40ce416SJulian Elischer mlockall(td, uap) 939b40ce416SJulian Elischer struct thread *td; 9404a40e3d4SJohn Dyson struct mlockall_args *uap; 9414a40e3d4SJohn Dyson { 942abd498aaSBruce M Simpson vm_map_t map; 943abd498aaSBruce M Simpson int error; 944abd498aaSBruce M Simpson 945abd498aaSBruce M Simpson map = &td->td_proc->p_vmspace->vm_map; 946abd498aaSBruce M Simpson error = 0; 947abd498aaSBruce M Simpson 948abd498aaSBruce M Simpson if ((uap->how == 0) || ((uap->how & ~(MCL_CURRENT|MCL_FUTURE)) != 0)) 949abd498aaSBruce M Simpson return (EINVAL); 950abd498aaSBruce M Simpson 95111f7ddc5SBruce M Simpson #if 0 952abd498aaSBruce M Simpson /* 953abd498aaSBruce M Simpson * If wiring all pages in the process would cause it to exceed 954abd498aaSBruce M Simpson * a hard resource limit, return ENOMEM. 955abd498aaSBruce M Simpson */ 95691d5354aSJohn Baldwin PROC_LOCK(td->td_proc); 957abd498aaSBruce M Simpson if (map->size - ptoa(pmap_wired_count(vm_map_pmap(map)) > 95891d5354aSJohn Baldwin lim_cur(td->td_proc, RLIMIT_MEMLOCK))) { 95991d5354aSJohn Baldwin PROC_UNLOCK(td->td_proc); 960abd498aaSBruce M Simpson return (ENOMEM); 96191d5354aSJohn Baldwin } 96291d5354aSJohn Baldwin PROC_UNLOCK(td->td_proc); 963abd498aaSBruce M Simpson #else 964abd498aaSBruce M Simpson error = suser(td); 965abd498aaSBruce M Simpson if (error) 966abd498aaSBruce M Simpson return (error); 967abd498aaSBruce M Simpson #endif 968abd498aaSBruce M Simpson 969abd498aaSBruce M Simpson if (uap->how & MCL_FUTURE) { 970abd498aaSBruce M Simpson vm_map_lock(map); 971abd498aaSBruce M Simpson vm_map_modflags(map, MAP_WIREFUTURE, 0); 972abd498aaSBruce M Simpson vm_map_unlock(map); 973abd498aaSBruce M Simpson error = 0; 974abd498aaSBruce M Simpson } 975abd498aaSBruce M Simpson 976abd498aaSBruce M Simpson if (uap->how & MCL_CURRENT) { 977abd498aaSBruce M Simpson /* 978abd498aaSBruce M Simpson * P1003.1-2001 mandates that all currently mapped pages 979abd498aaSBruce M Simpson * will be memory resident and locked (wired) upon return 980abd498aaSBruce M Simpson * from mlockall(). vm_map_wire() will wire pages, by 981abd498aaSBruce M Simpson * calling vm_fault_wire() for each page in the region. 982abd498aaSBruce M Simpson */ 983abd498aaSBruce M Simpson error = vm_map_wire(map, vm_map_min(map), vm_map_max(map), 984abd498aaSBruce M Simpson VM_MAP_WIRE_USER|VM_MAP_WIRE_HOLESOK); 985abd498aaSBruce M Simpson error = (error == KERN_SUCCESS ? 0 : EAGAIN); 986abd498aaSBruce M Simpson } 987abd498aaSBruce M Simpson 988abd498aaSBruce M Simpson return (error); 9894a40e3d4SJohn Dyson } 9904a40e3d4SJohn Dyson 9914a40e3d4SJohn Dyson #ifndef _SYS_SYSPROTO_H_ 992fa721254SAlfred Perlstein struct munlockall_args { 993abd498aaSBruce M Simpson register_t dummy; 9944a40e3d4SJohn Dyson }; 9954a40e3d4SJohn Dyson #endif 9964a40e3d4SJohn Dyson 997d2c60af8SMatthew Dillon /* 998d2c60af8SMatthew Dillon * MPSAFE 999d2c60af8SMatthew Dillon */ 10004a40e3d4SJohn Dyson int 1001b40ce416SJulian Elischer munlockall(td, uap) 1002b40ce416SJulian Elischer struct thread *td; 10034a40e3d4SJohn Dyson struct munlockall_args *uap; 10044a40e3d4SJohn Dyson { 1005abd498aaSBruce M Simpson vm_map_t map; 1006abd498aaSBruce M Simpson int error; 1007abd498aaSBruce M Simpson 1008abd498aaSBruce M Simpson map = &td->td_proc->p_vmspace->vm_map; 1009abd498aaSBruce M Simpson error = suser(td); 1010abd498aaSBruce M Simpson if (error) 1011abd498aaSBruce M Simpson return (error); 1012abd498aaSBruce M Simpson 1013abd498aaSBruce M Simpson /* Clear the MAP_WIREFUTURE flag from this vm_map. */ 1014abd498aaSBruce M Simpson vm_map_lock(map); 1015abd498aaSBruce M Simpson vm_map_modflags(map, 0, MAP_WIREFUTURE); 1016abd498aaSBruce M Simpson vm_map_unlock(map); 1017abd498aaSBruce M Simpson 1018abd498aaSBruce M Simpson /* Forcibly unwire all pages. */ 1019abd498aaSBruce M Simpson error = vm_map_unwire(map, vm_map_min(map), vm_map_max(map), 1020abd498aaSBruce M Simpson VM_MAP_WIRE_USER|VM_MAP_WIRE_HOLESOK); 1021abd498aaSBruce M Simpson 1022abd498aaSBruce M Simpson return (error); 10234a40e3d4SJohn Dyson } 10244a40e3d4SJohn Dyson 10254a40e3d4SJohn Dyson #ifndef _SYS_SYSPROTO_H_ 1026df8bae1dSRodney W. Grimes struct munlock_args { 1027651bb817SAlexander Langer const void *addr; 1028df8bae1dSRodney W. Grimes size_t len; 1029df8bae1dSRodney W. Grimes }; 1030d2d3e875SBruce Evans #endif 1031d2c60af8SMatthew Dillon /* 1032d2c60af8SMatthew Dillon * MPSAFE 1033d2c60af8SMatthew Dillon */ 1034df8bae1dSRodney W. Grimes int 1035b40ce416SJulian Elischer munlock(td, uap) 1036b40ce416SJulian Elischer struct thread *td; 1037df8bae1dSRodney W. Grimes struct munlock_args *uap; 1038df8bae1dSRodney W. Grimes { 1039bb734798SDon Lewis vm_offset_t addr, end, last, start; 104016929939SDon Lewis vm_size_t size; 1041df8bae1dSRodney W. Grimes int error; 1042df8bae1dSRodney W. Grimes 104347934cefSDon Lewis error = suser(td); 104447934cefSDon Lewis if (error) 104547934cefSDon Lewis return (error); 104616929939SDon Lewis addr = (vm_offset_t)uap->addr; 104716929939SDon Lewis size = uap->len; 1048bb734798SDon Lewis last = addr + size; 104916929939SDon Lewis start = trunc_page(addr); 1050bb734798SDon Lewis end = round_page(last); 1051bb734798SDon Lewis if (last < addr || end < addr) 1052df8bae1dSRodney W. Grimes return (EINVAL); 105316929939SDon Lewis error = vm_map_unwire(&td->td_proc->p_vmspace->vm_map, start, end, 105416929939SDon Lewis VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES); 1055df8bae1dSRodney W. Grimes return (error == KERN_SUCCESS ? 0 : ENOMEM); 1056df8bae1dSRodney W. Grimes } 1057df8bae1dSRodney W. Grimes 1058df8bae1dSRodney W. Grimes /* 1059c8daea13SAlexander Kabaev * vm_mmap_vnode() 1060c8daea13SAlexander Kabaev * 1061c8daea13SAlexander Kabaev * MPSAFE 1062c8daea13SAlexander Kabaev * 1063c8daea13SAlexander Kabaev * Helper function for vm_mmap. Perform sanity check specific for mmap 1064c8daea13SAlexander Kabaev * operations on vnodes. 1065c8daea13SAlexander Kabaev */ 1066c8daea13SAlexander Kabaev int 1067c8daea13SAlexander Kabaev vm_mmap_vnode(struct thread *td, vm_size_t objsize, 1068c8daea13SAlexander Kabaev vm_prot_t prot, vm_prot_t *maxprotp, int *flagsp, 1069c8daea13SAlexander Kabaev struct vnode *vp, vm_ooffset_t foff, vm_object_t *objp) 1070c8daea13SAlexander Kabaev { 1071c8daea13SAlexander Kabaev struct vattr va; 1072c8daea13SAlexander Kabaev void *handle; 1073c8daea13SAlexander Kabaev vm_object_t obj; 107423fc1a90SPoul-Henning Kamp int error, flags, type; 1075c8daea13SAlexander Kabaev 1076c8daea13SAlexander Kabaev mtx_lock(&Giant); 1077c8daea13SAlexander Kabaev if ((error = vget(vp, LK_EXCLUSIVE, td)) != 0) { 1078c8daea13SAlexander Kabaev mtx_unlock(&Giant); 1079c8daea13SAlexander Kabaev return (error); 1080c8daea13SAlexander Kabaev } 1081c8daea13SAlexander Kabaev flags = *flagsp; 1082c8daea13SAlexander Kabaev if (vp->v_type == VREG) { 1083c8daea13SAlexander Kabaev /* 1084c8daea13SAlexander Kabaev * Get the proper underlying object 1085c8daea13SAlexander Kabaev */ 1086c8daea13SAlexander Kabaev if (VOP_GETVOBJECT(vp, &obj) != 0) { 1087c8daea13SAlexander Kabaev error = EINVAL; 1088c8daea13SAlexander Kabaev goto done; 1089c8daea13SAlexander Kabaev } 1090c8daea13SAlexander Kabaev if (obj->handle != vp) { 1091c8daea13SAlexander Kabaev vput(vp); 1092c8daea13SAlexander Kabaev vp = (struct vnode*)obj->handle; 1093c8daea13SAlexander Kabaev vget(vp, LK_EXCLUSIVE, td); 1094c8daea13SAlexander Kabaev } 1095c8daea13SAlexander Kabaev type = OBJT_VNODE; 1096c8daea13SAlexander Kabaev handle = vp; 1097c8daea13SAlexander Kabaev } else if (vp->v_type == VCHR) { 1098c8daea13SAlexander Kabaev type = OBJT_DEVICE; 1099c8daea13SAlexander Kabaev handle = vp->v_rdev; 1100c8daea13SAlexander Kabaev 1101c8daea13SAlexander Kabaev if(vp->v_rdev->si_devsw->d_flags & D_MMAP_ANON) { 1102c8daea13SAlexander Kabaev *maxprotp = VM_PROT_ALL; 1103c8daea13SAlexander Kabaev *flagsp |= MAP_ANON; 1104c8daea13SAlexander Kabaev error = 0; 1105c8daea13SAlexander Kabaev goto done; 1106c8daea13SAlexander Kabaev } 1107c8daea13SAlexander Kabaev /* 1108c8daea13SAlexander Kabaev * cdevs does not provide private mappings of any kind. 1109c8daea13SAlexander Kabaev */ 1110ce7a036dSAlexander Kabaev if ((*maxprotp & VM_PROT_WRITE) == 0 && 1111ce7a036dSAlexander Kabaev (prot & PROT_WRITE) != 0) { 1112ce7a036dSAlexander Kabaev error = EACCES; 1113ce7a036dSAlexander Kabaev goto done; 1114ce7a036dSAlexander Kabaev } 111523fc1a90SPoul-Henning Kamp if (flags & (MAP_PRIVATE|MAP_COPY)) { 1116c8daea13SAlexander Kabaev error = EINVAL; 1117c8daea13SAlexander Kabaev goto done; 1118c8daea13SAlexander Kabaev } 1119c8daea13SAlexander Kabaev /* 1120c8daea13SAlexander Kabaev * Force device mappings to be shared. 1121c8daea13SAlexander Kabaev */ 1122c8daea13SAlexander Kabaev flags &= ~(MAP_PRIVATE|MAP_COPY); 1123c8daea13SAlexander Kabaev flags |= MAP_SHARED; 1124c8daea13SAlexander Kabaev } else { 1125c8daea13SAlexander Kabaev error = EINVAL; 1126c8daea13SAlexander Kabaev goto done; 1127c8daea13SAlexander Kabaev } 1128c8daea13SAlexander Kabaev if ((error = VOP_GETATTR(vp, &va, td->td_ucred, td))) { 1129c8daea13SAlexander Kabaev goto done; 1130c8daea13SAlexander Kabaev } 1131c8daea13SAlexander Kabaev if ((flags & MAP_SHARED) != 0) { 1132c8daea13SAlexander Kabaev if ((va.va_flags & (SF_SNAPSHOT|IMMUTABLE|APPEND)) != 0) { 1133c8daea13SAlexander Kabaev if (prot & PROT_WRITE) { 1134c8daea13SAlexander Kabaev error = EPERM; 1135c8daea13SAlexander Kabaev goto done; 1136c8daea13SAlexander Kabaev } 1137c8daea13SAlexander Kabaev *maxprotp &= ~VM_PROT_WRITE; 1138c8daea13SAlexander Kabaev } 1139c8daea13SAlexander Kabaev #ifdef MAC 1140c8daea13SAlexander Kabaev error = mac_check_vnode_mmap(td->td_ucred, vp, prot); 1141c8daea13SAlexander Kabaev if (error != 0) 1142c8daea13SAlexander Kabaev goto done; 1143c8daea13SAlexander Kabaev #endif 1144c8daea13SAlexander Kabaev } 1145c8daea13SAlexander Kabaev /* 1146c8daea13SAlexander Kabaev * If it is a regular file without any references 1147c8daea13SAlexander Kabaev * we do not need to sync it. 1148c8daea13SAlexander Kabaev * Adjust object size to be the size of actual file. 1149c8daea13SAlexander Kabaev */ 1150c8daea13SAlexander Kabaev if (vp->v_type == VREG) { 1151c8daea13SAlexander Kabaev objsize = round_page(va.va_size); 1152c8daea13SAlexander Kabaev if (va.va_nlink == 0) 1153c8daea13SAlexander Kabaev flags |= MAP_NOSYNC; 1154c8daea13SAlexander Kabaev } 1155c8daea13SAlexander Kabaev obj = vm_pager_allocate(type, handle, objsize, prot, foff); 1156c8daea13SAlexander Kabaev if (obj == NULL) { 1157c8daea13SAlexander Kabaev error = (type == OBJT_DEVICE ? EINVAL : ENOMEM); 1158c8daea13SAlexander Kabaev goto done; 1159c8daea13SAlexander Kabaev } 1160c8daea13SAlexander Kabaev *objp = obj; 1161c8daea13SAlexander Kabaev *flagsp = flags; 1162c8daea13SAlexander Kabaev done: 1163c8daea13SAlexander Kabaev vput(vp); 1164c8daea13SAlexander Kabaev mtx_unlock(&Giant); 1165c8daea13SAlexander Kabaev return (error); 1166c8daea13SAlexander Kabaev } 1167c8daea13SAlexander Kabaev 1168c8daea13SAlexander Kabaev /* 1169d2c60af8SMatthew Dillon * vm_mmap() 1170d2c60af8SMatthew Dillon * 1171d2c60af8SMatthew Dillon * MPSAFE 1172d2c60af8SMatthew Dillon * 1173d2c60af8SMatthew Dillon * Internal version of mmap. Currently used by mmap, exec, and sys5 1174d2c60af8SMatthew Dillon * shared memory. Handle is either a vnode pointer or NULL for MAP_ANON. 1175df8bae1dSRodney W. Grimes */ 1176df8bae1dSRodney W. Grimes int 1177b9dcd593SBruce Evans vm_mmap(vm_map_t map, vm_offset_t *addr, vm_size_t size, vm_prot_t prot, 1178b9dcd593SBruce Evans vm_prot_t maxprot, int flags, 1179651bb817SAlexander Langer void *handle, 1180b9dcd593SBruce Evans vm_ooffset_t foff) 1181df8bae1dSRodney W. Grimes { 1182df8bae1dSRodney W. Grimes boolean_t fitit; 1183fcae040bSJohn Dyson vm_object_t object; 1184df8bae1dSRodney W. Grimes int rv = KERN_SUCCESS; 1185bd7e5f99SJohn Dyson vm_ooffset_t objsize; 118620eec4bbSAlan Cox int docow, error; 1187b40ce416SJulian Elischer struct thread *td = curthread; 1188df8bae1dSRodney W. Grimes 1189df8bae1dSRodney W. Grimes if (size == 0) 1190df8bae1dSRodney W. Grimes return (0); 1191df8bae1dSRodney W. Grimes 119206cb7259SDavid Greenman objsize = size = round_page(size); 1193df8bae1dSRodney W. Grimes 119491d5354aSJohn Baldwin PROC_LOCK(td->td_proc); 1195070f64feSMatthew Dillon if (td->td_proc->p_vmspace->vm_map.size + size > 119691d5354aSJohn Baldwin lim_cur(td->td_proc, RLIMIT_VMEM)) { 119791d5354aSJohn Baldwin PROC_UNLOCK(td->td_proc); 1198070f64feSMatthew Dillon return(ENOMEM); 1199070f64feSMatthew Dillon } 120091d5354aSJohn Baldwin PROC_UNLOCK(td->td_proc); 1201070f64feSMatthew Dillon 1202df8bae1dSRodney W. Grimes /* 1203bc9ad247SDavid Greenman * We currently can only deal with page aligned file offsets. 1204bc9ad247SDavid Greenman * The check is here rather than in the syscall because the 1205bc9ad247SDavid Greenman * kernel calls this function internally for other mmaping 1206bc9ad247SDavid Greenman * operations (such as in exec) and non-aligned offsets will 1207bc9ad247SDavid Greenman * cause pmap inconsistencies...so we want to be sure to 1208bc9ad247SDavid Greenman * disallow this in all cases. 1209bc9ad247SDavid Greenman */ 1210bc9ad247SDavid Greenman if (foff & PAGE_MASK) 1211bc9ad247SDavid Greenman return (EINVAL); 1212bc9ad247SDavid Greenman 121306cb7259SDavid Greenman if ((flags & MAP_FIXED) == 0) { 121406cb7259SDavid Greenman fitit = TRUE; 121506cb7259SDavid Greenman *addr = round_page(*addr); 121606cb7259SDavid Greenman } else { 121706cb7259SDavid Greenman if (*addr != trunc_page(*addr)) 121806cb7259SDavid Greenman return (EINVAL); 121906cb7259SDavid Greenman fitit = FALSE; 122006cb7259SDavid Greenman (void) vm_map_remove(map, *addr, *addr + size); 122106cb7259SDavid Greenman } 1222bc9ad247SDavid Greenman /* 122324a1cce3SDavid Greenman * Lookup/allocate object. 1224df8bae1dSRodney W. Grimes */ 1225c8daea13SAlexander Kabaev if (handle != NULL) { 1226c8daea13SAlexander Kabaev error = vm_mmap_vnode(td, size, prot, &maxprot, &flags, 1227c8daea13SAlexander Kabaev handle, foff, &object); 1228c8daea13SAlexander Kabaev if (error) { 1229c8daea13SAlexander Kabaev return (error); 1230c8daea13SAlexander Kabaev } 1231c8daea13SAlexander Kabaev } 12325f55e841SDavid Greenman if (flags & MAP_ANON) { 1233c8daea13SAlexander Kabaev object = NULL; 1234c8daea13SAlexander Kabaev docow = 0; 12355f55e841SDavid Greenman /* 12365f55e841SDavid Greenman * Unnamed anonymous regions always start at 0. 12375f55e841SDavid Greenman */ 123867bf6868SJohn Dyson if (handle == 0) 12395f55e841SDavid Greenman foff = 0; 12405f55e841SDavid Greenman } else { 12414738fa09SAlan Cox docow = MAP_PREFAULT_PARTIAL; 124294328e90SJohn Dyson } 1243df8bae1dSRodney W. Grimes 12444f79d873SMatthew Dillon if ((flags & (MAP_ANON|MAP_SHARED)) == 0) 12454738fa09SAlan Cox docow |= MAP_COPY_ON_WRITE; 12464f79d873SMatthew Dillon if (flags & MAP_NOSYNC) 12474f79d873SMatthew Dillon docow |= MAP_DISABLE_SYNCER; 12489730a5daSPaul Saab if (flags & MAP_NOCORE) 12499730a5daSPaul Saab docow |= MAP_DISABLE_COREDUMP; 12505850152dSJohn Dyson 1251d0aea04fSJohn Dyson #if defined(VM_PROT_READ_IS_EXEC) 1252d0aea04fSJohn Dyson if (prot & VM_PROT_READ) 1253d0aea04fSJohn Dyson prot |= VM_PROT_EXECUTE; 1254d0aea04fSJohn Dyson 1255d0aea04fSJohn Dyson if (maxprot & VM_PROT_READ) 1256d0aea04fSJohn Dyson maxprot |= VM_PROT_EXECUTE; 1257d0aea04fSJohn Dyson #endif 1258d0aea04fSJohn Dyson 1259e4ca250dSJohn Baldwin if (fitit) 12600a0a85b3SJohn Dyson *addr = pmap_addr_hint(object, *addr, size); 12610a0a85b3SJohn Dyson 12622267af78SJulian Elischer if (flags & MAP_STACK) 1263fd75d710SMarcel Moolenaar rv = vm_map_stack(map, *addr, size, prot, maxprot, 1264fd75d710SMarcel Moolenaar docow | MAP_STACK_GROWS_DOWN); 12652267af78SJulian Elischer else 1266bd7e5f99SJohn Dyson rv = vm_map_find(map, object, foff, addr, size, fitit, 1267bd7e5f99SJohn Dyson prot, maxprot, docow); 1268bd7e5f99SJohn Dyson 1269d2c60af8SMatthew Dillon if (rv != KERN_SUCCESS) { 12707fb0c17eSDavid Greenman /* 127124a1cce3SDavid Greenman * Lose the object reference. Will destroy the 127224a1cce3SDavid Greenman * object if it's an unnamed anonymous mapping 127324a1cce3SDavid Greenman * or named anonymous without other references. 12747fb0c17eSDavid Greenman */ 1275df8bae1dSRodney W. Grimes vm_object_deallocate(object); 1276d2c60af8SMatthew Dillon } else if (flags & MAP_SHARED) { 1277df8bae1dSRodney W. Grimes /* 1278df8bae1dSRodney W. Grimes * Shared memory is also shared with children. 1279df8bae1dSRodney W. Grimes */ 1280df8bae1dSRodney W. Grimes rv = vm_map_inherit(map, *addr, *addr + size, VM_INHERIT_SHARE); 1281e4ca250dSJohn Baldwin if (rv != KERN_SUCCESS) 12827fb0c17eSDavid Greenman (void) vm_map_remove(map, *addr, *addr + size); 1283df8bae1dSRodney W. Grimes } 1284abd498aaSBruce M Simpson 1285abd498aaSBruce M Simpson /* 1286abd498aaSBruce M Simpson * If the process has requested that all future mappings 1287abd498aaSBruce M Simpson * be wired, then heed this. 1288abd498aaSBruce M Simpson */ 1289abd498aaSBruce M Simpson if ((rv == KERN_SUCCESS) && (map->flags & MAP_WIREFUTURE)) 1290abd498aaSBruce M Simpson vm_map_wire(map, *addr, *addr + size, 1291abd498aaSBruce M Simpson VM_MAP_WIRE_USER|VM_MAP_WIRE_NOHOLES); 1292abd498aaSBruce M Simpson 1293df8bae1dSRodney W. Grimes switch (rv) { 1294df8bae1dSRodney W. Grimes case KERN_SUCCESS: 1295df8bae1dSRodney W. Grimes return (0); 1296df8bae1dSRodney W. Grimes case KERN_INVALID_ADDRESS: 1297df8bae1dSRodney W. Grimes case KERN_NO_SPACE: 1298df8bae1dSRodney W. Grimes return (ENOMEM); 1299df8bae1dSRodney W. Grimes case KERN_PROTECTION_FAILURE: 1300df8bae1dSRodney W. Grimes return (EACCES); 1301df8bae1dSRodney W. Grimes default: 1302df8bae1dSRodney W. Grimes return (EINVAL); 1303df8bae1dSRodney W. Grimes } 1304df8bae1dSRodney W. Grimes } 1305