160727d8bSWarner Losh /*- 2df8bae1dSRodney W. Grimes * Copyright (c) 1988 University of Utah. 3df8bae1dSRodney W. Grimes * Copyright (c) 1991, 1993 4df8bae1dSRodney W. Grimes * The Regents of the University of California. All rights reserved. 5df8bae1dSRodney W. Grimes * 6df8bae1dSRodney W. Grimes * This code is derived from software contributed to Berkeley by 7df8bae1dSRodney W. Grimes * the Systems Programming Group of the University of Utah Computer 8df8bae1dSRodney W. Grimes * Science Department. 9df8bae1dSRodney W. Grimes * 10df8bae1dSRodney W. Grimes * Redistribution and use in source and binary forms, with or without 11df8bae1dSRodney W. Grimes * modification, are permitted provided that the following conditions 12df8bae1dSRodney W. Grimes * are met: 13df8bae1dSRodney W. Grimes * 1. Redistributions of source code must retain the above copyright 14df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer. 15df8bae1dSRodney W. Grimes * 2. Redistributions in binary form must reproduce the above copyright 16df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer in the 17df8bae1dSRodney W. Grimes * documentation and/or other materials provided with the distribution. 18df8bae1dSRodney W. Grimes * 4. Neither the name of the University nor the names of its contributors 19df8bae1dSRodney W. Grimes * may be used to endorse or promote products derived from this software 20df8bae1dSRodney W. Grimes * without specific prior written permission. 21df8bae1dSRodney W. Grimes * 22df8bae1dSRodney W. Grimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23df8bae1dSRodney W. Grimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24df8bae1dSRodney W. Grimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25df8bae1dSRodney W. Grimes * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26df8bae1dSRodney W. Grimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27df8bae1dSRodney W. Grimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28df8bae1dSRodney W. Grimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29df8bae1dSRodney W. Grimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30df8bae1dSRodney W. Grimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31df8bae1dSRodney W. Grimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32df8bae1dSRodney W. Grimes * SUCH DAMAGE. 33df8bae1dSRodney W. Grimes * 34df8bae1dSRodney W. Grimes * from: Utah $Hdr: vm_mmap.c 1.6 91/10/21$ 35df8bae1dSRodney W. Grimes * 36df8bae1dSRodney W. Grimes * @(#)vm_mmap.c 8.4 (Berkeley) 1/12/94 37df8bae1dSRodney W. Grimes */ 38df8bae1dSRodney W. Grimes 39df8bae1dSRodney W. Grimes /* 40df8bae1dSRodney W. Grimes * Mapped file (mmap) interface to VM 41df8bae1dSRodney W. Grimes */ 42df8bae1dSRodney W. Grimes 43874651b1SDavid E. O'Brien #include <sys/cdefs.h> 44874651b1SDavid E. O'Brien __FBSDID("$FreeBSD$"); 45874651b1SDavid E. O'Brien 465591b823SEivind Eklund #include "opt_compat.h" 473e732e7dSRobert Watson #include "opt_mac.h" 48e9822d92SJoerg Wunsch 49df8bae1dSRodney W. Grimes #include <sys/param.h> 50df8bae1dSRodney W. Grimes #include <sys/systm.h> 51fb919e4dSMark Murray #include <sys/kernel.h> 52fb919e4dSMark Murray #include <sys/lock.h> 5323955314SAlfred Perlstein #include <sys/mutex.h> 54d2d3e875SBruce Evans #include <sys/sysproto.h> 55df8bae1dSRodney W. Grimes #include <sys/filedesc.h> 56df8bae1dSRodney W. Grimes #include <sys/proc.h> 57070f64feSMatthew Dillon #include <sys/resource.h> 58070f64feSMatthew Dillon #include <sys/resourcevar.h> 59df8bae1dSRodney W. Grimes #include <sys/vnode.h> 603ac4d1efSBruce Evans #include <sys/fcntl.h> 61df8bae1dSRodney W. Grimes #include <sys/file.h> 623e732e7dSRobert Watson #include <sys/mac.h> 63df8bae1dSRodney W. Grimes #include <sys/mman.h> 64b483c7f6SGuido van Rooij #include <sys/mount.h> 65df8bae1dSRodney W. Grimes #include <sys/conf.h> 664183b6b6SPeter Wemm #include <sys/stat.h> 67efeaf95aSDavid Greenman #include <sys/vmmeter.h> 681f6889a1SMatthew Dillon #include <sys/sysctl.h> 69df8bae1dSRodney W. Grimes 70df8bae1dSRodney W. Grimes #include <vm/vm.h> 71efeaf95aSDavid Greenman #include <vm/vm_param.h> 72efeaf95aSDavid Greenman #include <vm/pmap.h> 73efeaf95aSDavid Greenman #include <vm/vm_map.h> 74efeaf95aSDavid Greenman #include <vm/vm_object.h> 751c7c3c6aSMatthew Dillon #include <vm/vm_page.h> 76df8bae1dSRodney W. Grimes #include <vm/vm_pager.h> 77b5e8ce9fSBruce Evans #include <vm/vm_pageout.h> 78efeaf95aSDavid Greenman #include <vm/vm_extern.h> 79867a482dSJohn Dyson #include <vm/vm_page.h> 801f6889a1SMatthew Dillon #include <vm/vm_kern.h> 81df8bae1dSRodney W. Grimes 82d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_ 83df8bae1dSRodney W. Grimes struct sbrk_args { 84df8bae1dSRodney W. Grimes int incr; 85df8bae1dSRodney W. Grimes }; 86d2d3e875SBruce Evans #endif 870d94caffSDavid Greenman 881f6889a1SMatthew Dillon static int max_proc_mmap; 891f6889a1SMatthew Dillon SYSCTL_INT(_vm, OID_AUTO, max_proc_mmap, CTLFLAG_RW, &max_proc_mmap, 0, ""); 901f6889a1SMatthew Dillon 911f6889a1SMatthew Dillon /* 921f6889a1SMatthew Dillon * Set the maximum number of vm_map_entry structures per process. Roughly 931f6889a1SMatthew Dillon * speaking vm_map_entry structures are tiny, so allowing them to eat 1/100 941f6889a1SMatthew Dillon * of our KVM malloc space still results in generous limits. We want a 951f6889a1SMatthew Dillon * default that is good enough to prevent the kernel running out of resources 961f6889a1SMatthew Dillon * if attacked from compromised user account but generous enough such that 971f6889a1SMatthew Dillon * multi-threaded processes are not unduly inconvenienced. 981f6889a1SMatthew Dillon */ 9911caded3SAlfred Perlstein static void vmmapentry_rsrc_init(void *); 1001f6889a1SMatthew Dillon SYSINIT(vmmersrc, SI_SUB_KVM_RSRC, SI_ORDER_FIRST, vmmapentry_rsrc_init, NULL) 1011f6889a1SMatthew Dillon 1021f6889a1SMatthew Dillon static void 1031f6889a1SMatthew Dillon vmmapentry_rsrc_init(dummy) 1041f6889a1SMatthew Dillon void *dummy; 1051f6889a1SMatthew Dillon { 1061f6889a1SMatthew Dillon max_proc_mmap = vm_kmem_size / sizeof(struct vm_map_entry); 1071f6889a1SMatthew Dillon max_proc_mmap /= 100; 1081f6889a1SMatthew Dillon } 1091f6889a1SMatthew Dillon 110c8daea13SAlexander Kabaev static int vm_mmap_vnode(struct thread *, vm_size_t, vm_prot_t, vm_prot_t *, 111c8daea13SAlexander Kabaev int *, struct vnode *, vm_ooffset_t, vm_object_t *); 11298df9218SJohn Baldwin static int vm_mmap_cdev(struct thread *, vm_size_t, vm_prot_t, vm_prot_t *, 11398df9218SJohn Baldwin int *, struct cdev *, vm_ooffset_t, vm_object_t *); 114c8daea13SAlexander Kabaev 115d2c60af8SMatthew Dillon /* 116d2c60af8SMatthew Dillon * MPSAFE 117d2c60af8SMatthew Dillon */ 118df8bae1dSRodney W. Grimes /* ARGSUSED */ 119df8bae1dSRodney W. Grimes int 120b40ce416SJulian Elischer sbrk(td, uap) 121b40ce416SJulian Elischer struct thread *td; 122df8bae1dSRodney W. Grimes struct sbrk_args *uap; 123df8bae1dSRodney W. Grimes { 124df8bae1dSRodney W. Grimes /* Not yet implemented */ 125df8bae1dSRodney W. Grimes return (EOPNOTSUPP); 126df8bae1dSRodney W. Grimes } 127df8bae1dSRodney W. Grimes 128d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_ 129df8bae1dSRodney W. Grimes struct sstk_args { 130df8bae1dSRodney W. Grimes int incr; 131df8bae1dSRodney W. Grimes }; 132d2d3e875SBruce Evans #endif 1330d94caffSDavid Greenman 134d2c60af8SMatthew Dillon /* 135d2c60af8SMatthew Dillon * MPSAFE 136d2c60af8SMatthew Dillon */ 137df8bae1dSRodney W. Grimes /* ARGSUSED */ 138df8bae1dSRodney W. Grimes int 139b40ce416SJulian Elischer sstk(td, uap) 140b40ce416SJulian Elischer struct thread *td; 141df8bae1dSRodney W. Grimes struct sstk_args *uap; 142df8bae1dSRodney W. Grimes { 143df8bae1dSRodney W. Grimes /* Not yet implemented */ 144df8bae1dSRodney W. Grimes return (EOPNOTSUPP); 145df8bae1dSRodney W. Grimes } 146df8bae1dSRodney W. Grimes 1471930e303SPoul-Henning Kamp #if defined(COMPAT_43) 148d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_ 149df8bae1dSRodney W. Grimes struct getpagesize_args { 150df8bae1dSRodney W. Grimes int dummy; 151df8bae1dSRodney W. Grimes }; 152d2d3e875SBruce Evans #endif 1530d94caffSDavid Greenman 154df8bae1dSRodney W. Grimes /* ARGSUSED */ 155df8bae1dSRodney W. Grimes int 156b40ce416SJulian Elischer ogetpagesize(td, uap) 157b40ce416SJulian Elischer struct thread *td; 158df8bae1dSRodney W. Grimes struct getpagesize_args *uap; 159df8bae1dSRodney W. Grimes { 1600cddd8f0SMatthew Dillon /* MP SAFE */ 161b40ce416SJulian Elischer td->td_retval[0] = PAGE_SIZE; 162df8bae1dSRodney W. Grimes return (0); 163df8bae1dSRodney W. Grimes } 1641930e303SPoul-Henning Kamp #endif /* COMPAT_43 */ 165df8bae1dSRodney W. Grimes 16654f42e4bSPeter Wemm 16754f42e4bSPeter Wemm /* 16854f42e4bSPeter Wemm * Memory Map (mmap) system call. Note that the file offset 16954f42e4bSPeter Wemm * and address are allowed to be NOT page aligned, though if 17054f42e4bSPeter Wemm * the MAP_FIXED flag it set, both must have the same remainder 17154f42e4bSPeter Wemm * modulo the PAGE_SIZE (POSIX 1003.1b). If the address is not 17254f42e4bSPeter Wemm * page-aligned, the actual mapping starts at trunc_page(addr) 17354f42e4bSPeter Wemm * and the return value is adjusted up by the page offset. 174b4309055SMatthew Dillon * 175b4309055SMatthew Dillon * Generally speaking, only character devices which are themselves 176b4309055SMatthew Dillon * memory-based, such as a video framebuffer, can be mmap'd. Otherwise 177b4309055SMatthew Dillon * there would be no cache coherency between a descriptor and a VM mapping 178b4309055SMatthew Dillon * both to the same character device. 179b4309055SMatthew Dillon * 180b4309055SMatthew Dillon * Block devices can be mmap'd no matter what they represent. Cache coherency 181b4309055SMatthew Dillon * is maintained as long as you do not write directly to the underlying 182b4309055SMatthew Dillon * character device. 18354f42e4bSPeter Wemm */ 184d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_ 185df8bae1dSRodney W. Grimes struct mmap_args { 186651bb817SAlexander Langer void *addr; 187df8bae1dSRodney W. Grimes size_t len; 188df8bae1dSRodney W. Grimes int prot; 189df8bae1dSRodney W. Grimes int flags; 190df8bae1dSRodney W. Grimes int fd; 191df8bae1dSRodney W. Grimes long pad; 192df8bae1dSRodney W. Grimes off_t pos; 193df8bae1dSRodney W. Grimes }; 194d2d3e875SBruce Evans #endif 195df8bae1dSRodney W. Grimes 196d2c60af8SMatthew Dillon /* 197d2c60af8SMatthew Dillon * MPSAFE 198d2c60af8SMatthew Dillon */ 199df8bae1dSRodney W. Grimes int 200b40ce416SJulian Elischer mmap(td, uap) 201b40ce416SJulian Elischer struct thread *td; 20254d92145SMatthew Dillon struct mmap_args *uap; 203df8bae1dSRodney W. Grimes { 204c8daea13SAlexander Kabaev struct file *fp; 205df8bae1dSRodney W. Grimes struct vnode *vp; 206df8bae1dSRodney W. Grimes vm_offset_t addr; 2079154ee6aSPeter Wemm vm_size_t size, pageoff; 208df8bae1dSRodney W. Grimes vm_prot_t prot, maxprot; 209651bb817SAlexander Langer void *handle; 21098df9218SJohn Baldwin objtype_t handle_type; 211df8bae1dSRodney W. Grimes int flags, error; 21254f42e4bSPeter Wemm off_t pos; 213b40ce416SJulian Elischer struct vmspace *vms = td->td_proc->p_vmspace; 214df8bae1dSRodney W. Grimes 21554f42e4bSPeter Wemm addr = (vm_offset_t) uap->addr; 21654f42e4bSPeter Wemm size = uap->len; 217df8bae1dSRodney W. Grimes prot = uap->prot & VM_PROT_ALL; 218df8bae1dSRodney W. Grimes flags = uap->flags; 21954f42e4bSPeter Wemm pos = uap->pos; 22054f42e4bSPeter Wemm 221426da3bcSAlfred Perlstein fp = NULL; 22254f42e4bSPeter Wemm /* make sure mapping fits into numeric range etc */ 223fc565456SDmitrij Tejblum if ((ssize_t) uap->len < 0 || 22454f42e4bSPeter Wemm ((flags & MAP_ANON) && uap->fd != -1)) 225df8bae1dSRodney W. Grimes return (EINVAL); 2269154ee6aSPeter Wemm 2272267af78SJulian Elischer if (flags & MAP_STACK) { 2282267af78SJulian Elischer if ((uap->fd != -1) || 2292267af78SJulian Elischer ((prot & (PROT_READ | PROT_WRITE)) != (PROT_READ | PROT_WRITE))) 2302267af78SJulian Elischer return (EINVAL); 2312267af78SJulian Elischer flags |= MAP_ANON; 2322267af78SJulian Elischer pos = 0; 2332907af2aSJulian Elischer } 2342907af2aSJulian Elischer 2359154ee6aSPeter Wemm /* 23654f42e4bSPeter Wemm * Align the file position to a page boundary, 23754f42e4bSPeter Wemm * and save its page offset component. 2389154ee6aSPeter Wemm */ 23954f42e4bSPeter Wemm pageoff = (pos & PAGE_MASK); 24054f42e4bSPeter Wemm pos -= pageoff; 24154f42e4bSPeter Wemm 24254f42e4bSPeter Wemm /* Adjust size for rounding (on both ends). */ 24354f42e4bSPeter Wemm size += pageoff; /* low end... */ 24454f42e4bSPeter Wemm size = (vm_size_t) round_page(size); /* hi end */ 2459154ee6aSPeter Wemm 246df8bae1dSRodney W. Grimes /* 2470d94caffSDavid Greenman * Check for illegal addresses. Watch out for address wrap... Note 2480d94caffSDavid Greenman * that VM_*_ADDRESS are not constants due to casts (argh). 249df8bae1dSRodney W. Grimes */ 250df8bae1dSRodney W. Grimes if (flags & MAP_FIXED) { 25154f42e4bSPeter Wemm /* 25254f42e4bSPeter Wemm * The specified address must have the same remainder 25354f42e4bSPeter Wemm * as the file offset taken modulo PAGE_SIZE, so it 25454f42e4bSPeter Wemm * should be aligned after adjustment by pageoff. 25554f42e4bSPeter Wemm */ 25654f42e4bSPeter Wemm addr -= pageoff; 25754f42e4bSPeter Wemm if (addr & PAGE_MASK) 25854f42e4bSPeter Wemm return (EINVAL); 25954f42e4bSPeter Wemm /* Address range must be all in user VM space. */ 26005ba50f5SJake Burkholder if (addr < vm_map_min(&vms->vm_map) || 26105ba50f5SJake Burkholder addr + size > vm_map_max(&vms->vm_map)) 262df8bae1dSRodney W. Grimes return (EINVAL); 263bbc0ec52SDavid Greenman if (addr + size < addr) 264df8bae1dSRodney W. Grimes return (EINVAL); 26591d5354aSJohn Baldwin } else { 266df8bae1dSRodney W. Grimes /* 26754f42e4bSPeter Wemm * XXX for non-fixed mappings where no hint is provided or 26854f42e4bSPeter Wemm * the hint would fall in the potential heap space, 26954f42e4bSPeter Wemm * place it after the end of the largest possible heap. 270df8bae1dSRodney W. Grimes * 27154f42e4bSPeter Wemm * There should really be a pmap call to determine a reasonable 27254f42e4bSPeter Wemm * location. 273df8bae1dSRodney W. Grimes */ 27491d5354aSJohn Baldwin PROC_LOCK(td->td_proc); 27591d5354aSJohn Baldwin if (addr == 0 || 2761f6889a1SMatthew Dillon (addr >= round_page((vm_offset_t)vms->vm_taddr) && 277c460ac3aSPeter Wemm addr < round_page((vm_offset_t)vms->vm_daddr + 27891d5354aSJohn Baldwin lim_max(td->td_proc, RLIMIT_DATA)))) 279c460ac3aSPeter Wemm addr = round_page((vm_offset_t)vms->vm_daddr + 28091d5354aSJohn Baldwin lim_max(td->td_proc, RLIMIT_DATA)); 28191d5354aSJohn Baldwin PROC_UNLOCK(td->td_proc); 28291d5354aSJohn Baldwin } 283df8bae1dSRodney W. Grimes if (flags & MAP_ANON) { 284df8bae1dSRodney W. Grimes /* 285df8bae1dSRodney W. Grimes * Mapping blank space is trivial. 286df8bae1dSRodney W. Grimes */ 287df8bae1dSRodney W. Grimes handle = NULL; 28898df9218SJohn Baldwin handle_type = OBJT_DEFAULT; 289df8bae1dSRodney W. Grimes maxprot = VM_PROT_ALL; 29054f42e4bSPeter Wemm pos = 0; 29130d4dd7eSAlexander Kabaev } else { 292df8bae1dSRodney W. Grimes /* 2930d94caffSDavid Greenman * Mapping file, get fp for validation. Obtain vnode and make 2940d94caffSDavid Greenman * sure it is of appropriate type. 295426da3bcSAlfred Perlstein * don't let the descriptor disappear on us if we block 296df8bae1dSRodney W. Grimes */ 297a4db4953SAlfred Perlstein if ((error = fget(td, uap->fd, &fp)) != 0) 298426da3bcSAlfred Perlstein goto done; 299e4ca250dSJohn Baldwin if (fp->f_type != DTYPE_VNODE) { 300d2c60af8SMatthew Dillon error = EINVAL; 301426da3bcSAlfred Perlstein goto done; 302e4ca250dSJohn Baldwin } 303279d7226SMatthew Dillon /* 304aa543039SGarrett Wollman * POSIX shared-memory objects are defined to have 305aa543039SGarrett Wollman * kernel persistence, and are not defined to support 306aa543039SGarrett Wollman * read(2)/write(2) -- or even open(2). Thus, we can 307aa543039SGarrett Wollman * use MAP_ASYNC to trade on-disk coherence for speed. 308aa543039SGarrett Wollman * The shm_open(3) library routine turns on the FPOSIXSHM 309aa543039SGarrett Wollman * flag to request this behavior. 310aa543039SGarrett Wollman */ 311aa543039SGarrett Wollman if (fp->f_flag & FPOSIXSHM) 312aa543039SGarrett Wollman flags |= MAP_NOSYNC; 3133b6d9652SPoul-Henning Kamp vp = fp->f_vnode; 314c8bdd56bSGuido van Rooij /* 315df8bae1dSRodney W. Grimes * Ensure that file and memory protections are 316df8bae1dSRodney W. Grimes * compatible. Note that we only worry about 317df8bae1dSRodney W. Grimes * writability if mapping is shared; in this case, 318df8bae1dSRodney W. Grimes * current and max prot are dictated by the open file. 319df8bae1dSRodney W. Grimes * XXX use the vnode instead? Problem is: what 3200d94caffSDavid Greenman * credentials do we use for determination? What if 3210d94caffSDavid Greenman * proc does a setuid? 322df8bae1dSRodney W. Grimes */ 3238eec77b0STim J. Robbins if (vp->v_mount != NULL && vp->v_mount->mnt_flag & MNT_NOEXEC) 324b483c7f6SGuido van Rooij maxprot = VM_PROT_NONE; 325b483c7f6SGuido van Rooij else 326b483c7f6SGuido van Rooij maxprot = VM_PROT_EXECUTE; 327279d7226SMatthew Dillon if (fp->f_flag & FREAD) { 328df8bae1dSRodney W. Grimes maxprot |= VM_PROT_READ; 329279d7226SMatthew Dillon } else if (prot & PROT_READ) { 330279d7226SMatthew Dillon error = EACCES; 331279d7226SMatthew Dillon goto done; 332279d7226SMatthew Dillon } 333c8bdd56bSGuido van Rooij /* 334c8bdd56bSGuido van Rooij * If we are sharing potential changes (either via 335c8bdd56bSGuido van Rooij * MAP_SHARED or via the implicit sharing of character 336c8bdd56bSGuido van Rooij * device mappings), and we are trying to get write 337c8bdd56bSGuido van Rooij * permission although we opened it without asking 338c8daea13SAlexander Kabaev * for it, bail out. 339c8bdd56bSGuido van Rooij */ 340ce7a036dSAlexander Kabaev if ((flags & MAP_SHARED) != 0) { 34105feb99fSGuido van Rooij if ((fp->f_flag & FWRITE) != 0) { 342df8bae1dSRodney W. Grimes maxprot |= VM_PROT_WRITE; 343279d7226SMatthew Dillon } else if ((prot & PROT_WRITE) != 0) { 344279d7226SMatthew Dillon error = EACCES; 345279d7226SMatthew Dillon goto done; 346279d7226SMatthew Dillon } 347ce7a036dSAlexander Kabaev } else if (vp->v_type != VCHR || (fp->f_flag & FWRITE) != 0) { 34805feb99fSGuido van Rooij maxprot |= VM_PROT_WRITE; 349279d7226SMatthew Dillon } 350651bb817SAlexander Langer handle = (void *)vp; 35198df9218SJohn Baldwin handle_type = OBJT_VNODE; 35230d4dd7eSAlexander Kabaev } 3531f6889a1SMatthew Dillon 3541f6889a1SMatthew Dillon /* 3551f6889a1SMatthew Dillon * Do not allow more then a certain number of vm_map_entry structures 3561f6889a1SMatthew Dillon * per process. Scale with the number of rforks sharing the map 3571f6889a1SMatthew Dillon * to make the limit reasonable for threads. 3581f6889a1SMatthew Dillon */ 3591f6889a1SMatthew Dillon if (max_proc_mmap && 3601f6889a1SMatthew Dillon vms->vm_map.nentries >= max_proc_mmap * vms->vm_refcnt) { 361279d7226SMatthew Dillon error = ENOMEM; 362279d7226SMatthew Dillon goto done; 3631f6889a1SMatthew Dillon } 3641f6889a1SMatthew Dillon 3651f6889a1SMatthew Dillon error = vm_mmap(&vms->vm_map, &addr, size, prot, maxprot, 36698df9218SJohn Baldwin flags, handle_type, handle, pos); 367df8bae1dSRodney W. Grimes if (error == 0) 368b40ce416SJulian Elischer td->td_retval[0] = (register_t) (addr + pageoff); 369279d7226SMatthew Dillon done: 370279d7226SMatthew Dillon if (fp) 371b40ce416SJulian Elischer fdrop(fp, td); 372f6b5b182SJeff Roberson 373df8bae1dSRodney W. Grimes return (error); 374df8bae1dSRodney W. Grimes } 375df8bae1dSRodney W. Grimes 37605f0fdd2SPoul-Henning Kamp #ifdef COMPAT_43 377d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_ 37805f0fdd2SPoul-Henning Kamp struct ommap_args { 37905f0fdd2SPoul-Henning Kamp caddr_t addr; 38005f0fdd2SPoul-Henning Kamp int len; 38105f0fdd2SPoul-Henning Kamp int prot; 38205f0fdd2SPoul-Henning Kamp int flags; 38305f0fdd2SPoul-Henning Kamp int fd; 38405f0fdd2SPoul-Henning Kamp long pos; 38505f0fdd2SPoul-Henning Kamp }; 386d2d3e875SBruce Evans #endif 38705f0fdd2SPoul-Henning Kamp int 388b40ce416SJulian Elischer ommap(td, uap) 389b40ce416SJulian Elischer struct thread *td; 39054d92145SMatthew Dillon struct ommap_args *uap; 39105f0fdd2SPoul-Henning Kamp { 39205f0fdd2SPoul-Henning Kamp struct mmap_args nargs; 39305f0fdd2SPoul-Henning Kamp static const char cvtbsdprot[8] = { 39405f0fdd2SPoul-Henning Kamp 0, 39505f0fdd2SPoul-Henning Kamp PROT_EXEC, 39605f0fdd2SPoul-Henning Kamp PROT_WRITE, 39705f0fdd2SPoul-Henning Kamp PROT_EXEC | PROT_WRITE, 39805f0fdd2SPoul-Henning Kamp PROT_READ, 39905f0fdd2SPoul-Henning Kamp PROT_EXEC | PROT_READ, 40005f0fdd2SPoul-Henning Kamp PROT_WRITE | PROT_READ, 40105f0fdd2SPoul-Henning Kamp PROT_EXEC | PROT_WRITE | PROT_READ, 40205f0fdd2SPoul-Henning Kamp }; 4030d94caffSDavid Greenman 40405f0fdd2SPoul-Henning Kamp #define OMAP_ANON 0x0002 40505f0fdd2SPoul-Henning Kamp #define OMAP_COPY 0x0020 40605f0fdd2SPoul-Henning Kamp #define OMAP_SHARED 0x0010 40705f0fdd2SPoul-Henning Kamp #define OMAP_FIXED 0x0100 40805f0fdd2SPoul-Henning Kamp 40905f0fdd2SPoul-Henning Kamp nargs.addr = uap->addr; 41005f0fdd2SPoul-Henning Kamp nargs.len = uap->len; 41105f0fdd2SPoul-Henning Kamp nargs.prot = cvtbsdprot[uap->prot & 0x7]; 41205f0fdd2SPoul-Henning Kamp nargs.flags = 0; 41305f0fdd2SPoul-Henning Kamp if (uap->flags & OMAP_ANON) 41405f0fdd2SPoul-Henning Kamp nargs.flags |= MAP_ANON; 41505f0fdd2SPoul-Henning Kamp if (uap->flags & OMAP_COPY) 41605f0fdd2SPoul-Henning Kamp nargs.flags |= MAP_COPY; 41705f0fdd2SPoul-Henning Kamp if (uap->flags & OMAP_SHARED) 41805f0fdd2SPoul-Henning Kamp nargs.flags |= MAP_SHARED; 41905f0fdd2SPoul-Henning Kamp else 42005f0fdd2SPoul-Henning Kamp nargs.flags |= MAP_PRIVATE; 42105f0fdd2SPoul-Henning Kamp if (uap->flags & OMAP_FIXED) 42205f0fdd2SPoul-Henning Kamp nargs.flags |= MAP_FIXED; 42305f0fdd2SPoul-Henning Kamp nargs.fd = uap->fd; 42405f0fdd2SPoul-Henning Kamp nargs.pos = uap->pos; 425b40ce416SJulian Elischer return (mmap(td, &nargs)); 42605f0fdd2SPoul-Henning Kamp } 42705f0fdd2SPoul-Henning Kamp #endif /* COMPAT_43 */ 42805f0fdd2SPoul-Henning Kamp 42905f0fdd2SPoul-Henning Kamp 430d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_ 431df8bae1dSRodney W. Grimes struct msync_args { 432651bb817SAlexander Langer void *addr; 433df8bae1dSRodney W. Grimes int len; 434e6c6af11SDavid Greenman int flags; 435df8bae1dSRodney W. Grimes }; 436d2d3e875SBruce Evans #endif 437d2c60af8SMatthew Dillon /* 438d2c60af8SMatthew Dillon * MPSAFE 439d2c60af8SMatthew Dillon */ 440df8bae1dSRodney W. Grimes int 441b40ce416SJulian Elischer msync(td, uap) 442b40ce416SJulian Elischer struct thread *td; 443df8bae1dSRodney W. Grimes struct msync_args *uap; 444df8bae1dSRodney W. Grimes { 445df8bae1dSRodney W. Grimes vm_offset_t addr; 446dabee6feSPeter Wemm vm_size_t size, pageoff; 447e6c6af11SDavid Greenman int flags; 448df8bae1dSRodney W. Grimes vm_map_t map; 449df8bae1dSRodney W. Grimes int rv; 450df8bae1dSRodney W. Grimes 451df8bae1dSRodney W. Grimes addr = (vm_offset_t) uap->addr; 4529154ee6aSPeter Wemm size = uap->len; 453e6c6af11SDavid Greenman flags = uap->flags; 454e6c6af11SDavid Greenman 455dabee6feSPeter Wemm pageoff = (addr & PAGE_MASK); 456dabee6feSPeter Wemm addr -= pageoff; 457dabee6feSPeter Wemm size += pageoff; 458dabee6feSPeter Wemm size = (vm_size_t) round_page(size); 4599154ee6aSPeter Wemm if (addr + size < addr) 460dabee6feSPeter Wemm return (EINVAL); 461dabee6feSPeter Wemm 462dabee6feSPeter Wemm if ((flags & (MS_ASYNC|MS_INVALIDATE)) == (MS_ASYNC|MS_INVALIDATE)) 4631e62bc63SDavid Greenman return (EINVAL); 4641e62bc63SDavid Greenman 465b40ce416SJulian Elischer map = &td->td_proc->p_vmspace->vm_map; 4669154ee6aSPeter Wemm 467df8bae1dSRodney W. Grimes /* 468df8bae1dSRodney W. Grimes * Clean the pages and interpret the return value. 469df8bae1dSRodney W. Grimes */ 470950f8459SAlan Cox rv = vm_map_sync(map, addr, addr + size, (flags & MS_ASYNC) == 0, 471e6c6af11SDavid Greenman (flags & MS_INVALIDATE) != 0); 472df8bae1dSRodney W. Grimes switch (rv) { 473df8bae1dSRodney W. Grimes case KERN_SUCCESS: 474d2c60af8SMatthew Dillon return (0); 475df8bae1dSRodney W. Grimes case KERN_INVALID_ADDRESS: 476df8bae1dSRodney W. Grimes return (EINVAL); /* Sun returns ENOMEM? */ 477b7b7cd44SAlan Cox case KERN_INVALID_ARGUMENT: 478b7b7cd44SAlan Cox return (EBUSY); 479df8bae1dSRodney W. Grimes default: 480df8bae1dSRodney W. Grimes return (EINVAL); 481df8bae1dSRodney W. Grimes } 482df8bae1dSRodney W. Grimes } 483df8bae1dSRodney W. Grimes 484d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_ 485df8bae1dSRodney W. Grimes struct munmap_args { 486651bb817SAlexander Langer void *addr; 4879154ee6aSPeter Wemm size_t len; 488df8bae1dSRodney W. Grimes }; 489d2d3e875SBruce Evans #endif 490d2c60af8SMatthew Dillon /* 491d2c60af8SMatthew Dillon * MPSAFE 492d2c60af8SMatthew Dillon */ 493df8bae1dSRodney W. Grimes int 494b40ce416SJulian Elischer munmap(td, uap) 495b40ce416SJulian Elischer struct thread *td; 49654d92145SMatthew Dillon struct munmap_args *uap; 497df8bae1dSRodney W. Grimes { 498df8bae1dSRodney W. Grimes vm_offset_t addr; 499dabee6feSPeter Wemm vm_size_t size, pageoff; 500df8bae1dSRodney W. Grimes vm_map_t map; 501df8bae1dSRodney W. Grimes 502df8bae1dSRodney W. Grimes addr = (vm_offset_t) uap->addr; 5039154ee6aSPeter Wemm size = uap->len; 504d8834602SAlan Cox if (size == 0) 505d8834602SAlan Cox return (EINVAL); 506dabee6feSPeter Wemm 507dabee6feSPeter Wemm pageoff = (addr & PAGE_MASK); 508dabee6feSPeter Wemm addr -= pageoff; 509dabee6feSPeter Wemm size += pageoff; 510dabee6feSPeter Wemm size = (vm_size_t) round_page(size); 5119154ee6aSPeter Wemm if (addr + size < addr) 512df8bae1dSRodney W. Grimes return (EINVAL); 5139154ee6aSPeter Wemm 514df8bae1dSRodney W. Grimes /* 51505ba50f5SJake Burkholder * Check for illegal addresses. Watch out for address wrap... 516df8bae1dSRodney W. Grimes */ 517b40ce416SJulian Elischer map = &td->td_proc->p_vmspace->vm_map; 51805ba50f5SJake Burkholder if (addr < vm_map_min(map) || addr + size > vm_map_max(map)) 51905ba50f5SJake Burkholder return (EINVAL); 520d8834602SAlan Cox vm_map_lock(map); 521df8bae1dSRodney W. Grimes /* 522df8bae1dSRodney W. Grimes * Make sure entire range is allocated. 523df8bae1dSRodney W. Grimes */ 524d8834602SAlan Cox if (!vm_map_check_protection(map, addr, addr + size, VM_PROT_NONE)) { 525d8834602SAlan Cox vm_map_unlock(map); 526df8bae1dSRodney W. Grimes return (EINVAL); 527d8834602SAlan Cox } 528df8bae1dSRodney W. Grimes /* returns nothing but KERN_SUCCESS anyway */ 529d8834602SAlan Cox vm_map_delete(map, addr, addr + size); 530d8834602SAlan Cox vm_map_unlock(map); 531df8bae1dSRodney W. Grimes return (0); 532df8bae1dSRodney W. Grimes } 533df8bae1dSRodney W. Grimes 534d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_ 535df8bae1dSRodney W. Grimes struct mprotect_args { 536651bb817SAlexander Langer const void *addr; 5379154ee6aSPeter Wemm size_t len; 538df8bae1dSRodney W. Grimes int prot; 539df8bae1dSRodney W. Grimes }; 540d2d3e875SBruce Evans #endif 541d2c60af8SMatthew Dillon /* 542d2c60af8SMatthew Dillon * MPSAFE 543d2c60af8SMatthew Dillon */ 544df8bae1dSRodney W. Grimes int 545b40ce416SJulian Elischer mprotect(td, uap) 546b40ce416SJulian Elischer struct thread *td; 547df8bae1dSRodney W. Grimes struct mprotect_args *uap; 548df8bae1dSRodney W. Grimes { 549df8bae1dSRodney W. Grimes vm_offset_t addr; 550dabee6feSPeter Wemm vm_size_t size, pageoff; 55154d92145SMatthew Dillon vm_prot_t prot; 552df8bae1dSRodney W. Grimes 553df8bae1dSRodney W. Grimes addr = (vm_offset_t) uap->addr; 5549154ee6aSPeter Wemm size = uap->len; 555df8bae1dSRodney W. Grimes prot = uap->prot & VM_PROT_ALL; 556d0aea04fSJohn Dyson #if defined(VM_PROT_READ_IS_EXEC) 557d0aea04fSJohn Dyson if (prot & VM_PROT_READ) 558d0aea04fSJohn Dyson prot |= VM_PROT_EXECUTE; 559d0aea04fSJohn Dyson #endif 560df8bae1dSRodney W. Grimes 561dabee6feSPeter Wemm pageoff = (addr & PAGE_MASK); 562dabee6feSPeter Wemm addr -= pageoff; 563dabee6feSPeter Wemm size += pageoff; 564dabee6feSPeter Wemm size = (vm_size_t) round_page(size); 5659154ee6aSPeter Wemm if (addr + size < addr) 566dabee6feSPeter Wemm return (EINVAL); 567dabee6feSPeter Wemm 56843285049SAlan Cox switch (vm_map_protect(&td->td_proc->p_vmspace->vm_map, addr, 56943285049SAlan Cox addr + size, prot, FALSE)) { 570df8bae1dSRodney W. Grimes case KERN_SUCCESS: 571df8bae1dSRodney W. Grimes return (0); 572df8bae1dSRodney W. Grimes case KERN_PROTECTION_FAILURE: 573df8bae1dSRodney W. Grimes return (EACCES); 574df8bae1dSRodney W. Grimes } 575df8bae1dSRodney W. Grimes return (EINVAL); 576df8bae1dSRodney W. Grimes } 577df8bae1dSRodney W. Grimes 578d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_ 579dabee6feSPeter Wemm struct minherit_args { 580651bb817SAlexander Langer void *addr; 5819154ee6aSPeter Wemm size_t len; 582dabee6feSPeter Wemm int inherit; 583dabee6feSPeter Wemm }; 584dabee6feSPeter Wemm #endif 585d2c60af8SMatthew Dillon /* 586d2c60af8SMatthew Dillon * MPSAFE 587d2c60af8SMatthew Dillon */ 588dabee6feSPeter Wemm int 589b40ce416SJulian Elischer minherit(td, uap) 590b40ce416SJulian Elischer struct thread *td; 591dabee6feSPeter Wemm struct minherit_args *uap; 592dabee6feSPeter Wemm { 593dabee6feSPeter Wemm vm_offset_t addr; 594dabee6feSPeter Wemm vm_size_t size, pageoff; 59554d92145SMatthew Dillon vm_inherit_t inherit; 596dabee6feSPeter Wemm 597dabee6feSPeter Wemm addr = (vm_offset_t)uap->addr; 5989154ee6aSPeter Wemm size = uap->len; 599dabee6feSPeter Wemm inherit = uap->inherit; 600dabee6feSPeter Wemm 601dabee6feSPeter Wemm pageoff = (addr & PAGE_MASK); 602dabee6feSPeter Wemm addr -= pageoff; 603dabee6feSPeter Wemm size += pageoff; 604dabee6feSPeter Wemm size = (vm_size_t) round_page(size); 6059154ee6aSPeter Wemm if (addr + size < addr) 606dabee6feSPeter Wemm return (EINVAL); 607dabee6feSPeter Wemm 608e0be79afSAlan Cox switch (vm_map_inherit(&td->td_proc->p_vmspace->vm_map, addr, 609e0be79afSAlan Cox addr + size, inherit)) { 610dabee6feSPeter Wemm case KERN_SUCCESS: 611dabee6feSPeter Wemm return (0); 612dabee6feSPeter Wemm case KERN_PROTECTION_FAILURE: 613dabee6feSPeter Wemm return (EACCES); 614dabee6feSPeter Wemm } 615dabee6feSPeter Wemm return (EINVAL); 616dabee6feSPeter Wemm } 617dabee6feSPeter Wemm 618dabee6feSPeter Wemm #ifndef _SYS_SYSPROTO_H_ 619df8bae1dSRodney W. Grimes struct madvise_args { 620651bb817SAlexander Langer void *addr; 6219154ee6aSPeter Wemm size_t len; 622df8bae1dSRodney W. Grimes int behav; 623df8bae1dSRodney W. Grimes }; 624d2d3e875SBruce Evans #endif 6250d94caffSDavid Greenman 626d2c60af8SMatthew Dillon /* 627d2c60af8SMatthew Dillon * MPSAFE 628d2c60af8SMatthew Dillon */ 629df8bae1dSRodney W. Grimes /* ARGSUSED */ 630df8bae1dSRodney W. Grimes int 631b40ce416SJulian Elischer madvise(td, uap) 632b40ce416SJulian Elischer struct thread *td; 633df8bae1dSRodney W. Grimes struct madvise_args *uap; 634df8bae1dSRodney W. Grimes { 635f35329acSJohn Dyson vm_offset_t start, end; 63605ba50f5SJake Burkholder vm_map_t map; 637f4cf2141SWes Peters struct proc *p; 638f4cf2141SWes Peters int error; 639b4309055SMatthew Dillon 640b4309055SMatthew Dillon /* 641f4cf2141SWes Peters * Check for our special case, advising the swap pager we are 642f4cf2141SWes Peters * "immortal." 643f4cf2141SWes Peters */ 644f4cf2141SWes Peters if (uap->behav == MADV_PROTECT) { 64569297bf8SJohn Baldwin error = suser(td); 64669297bf8SJohn Baldwin if (error == 0) { 647f4cf2141SWes Peters p = td->td_proc; 648f4cf2141SWes Peters PROC_LOCK(p); 649f4cf2141SWes Peters p->p_flag |= P_PROTECTED; 650f4cf2141SWes Peters PROC_UNLOCK(p); 65169297bf8SJohn Baldwin } 652f4cf2141SWes Peters return (error); 653f4cf2141SWes Peters } 654f4cf2141SWes Peters /* 655b4309055SMatthew Dillon * Check for illegal behavior 656b4309055SMatthew Dillon */ 6579730a5daSPaul Saab if (uap->behav < 0 || uap->behav > MADV_CORE) 658b4309055SMatthew Dillon return (EINVAL); 659867a482dSJohn Dyson /* 660867a482dSJohn Dyson * Check for illegal addresses. Watch out for address wrap... Note 661867a482dSJohn Dyson * that VM_*_ADDRESS are not constants due to casts (argh). 662867a482dSJohn Dyson */ 66305ba50f5SJake Burkholder map = &td->td_proc->p_vmspace->vm_map; 66405ba50f5SJake Burkholder if ((vm_offset_t)uap->addr < vm_map_min(map) || 66505ba50f5SJake Burkholder (vm_offset_t)uap->addr + uap->len > vm_map_max(map)) 666867a482dSJohn Dyson return (EINVAL); 667867a482dSJohn Dyson if (((vm_offset_t) uap->addr + uap->len) < (vm_offset_t) uap->addr) 668867a482dSJohn Dyson return (EINVAL); 669867a482dSJohn Dyson 670867a482dSJohn Dyson /* 671867a482dSJohn Dyson * Since this routine is only advisory, we default to conservative 672867a482dSJohn Dyson * behavior. 673867a482dSJohn Dyson */ 674cd6eea25SDavid Greenman start = trunc_page((vm_offset_t) uap->addr); 675cd6eea25SDavid Greenman end = round_page((vm_offset_t) uap->addr + uap->len); 676867a482dSJohn Dyson 67705ba50f5SJake Burkholder if (vm_map_madvise(map, start, end, uap->behav)) 678094f6d26SAlan Cox return (EINVAL); 679094f6d26SAlan Cox return (0); 680df8bae1dSRodney W. Grimes } 681df8bae1dSRodney W. Grimes 682d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_ 683df8bae1dSRodney W. Grimes struct mincore_args { 684651bb817SAlexander Langer const void *addr; 6859154ee6aSPeter Wemm size_t len; 686df8bae1dSRodney W. Grimes char *vec; 687df8bae1dSRodney W. Grimes }; 688d2d3e875SBruce Evans #endif 6890d94caffSDavid Greenman 690d2c60af8SMatthew Dillon /* 691d2c60af8SMatthew Dillon * MPSAFE 692d2c60af8SMatthew Dillon */ 693df8bae1dSRodney W. Grimes /* ARGSUSED */ 694df8bae1dSRodney W. Grimes int 695b40ce416SJulian Elischer mincore(td, uap) 696b40ce416SJulian Elischer struct thread *td; 697df8bae1dSRodney W. Grimes struct mincore_args *uap; 698df8bae1dSRodney W. Grimes { 699867a482dSJohn Dyson vm_offset_t addr, first_addr; 700867a482dSJohn Dyson vm_offset_t end, cend; 701867a482dSJohn Dyson pmap_t pmap; 702867a482dSJohn Dyson vm_map_t map; 70302c04a2fSJohn Dyson char *vec; 704d2c60af8SMatthew Dillon int error = 0; 705867a482dSJohn Dyson int vecindex, lastvecindex; 70654d92145SMatthew Dillon vm_map_entry_t current; 707867a482dSJohn Dyson vm_map_entry_t entry; 708867a482dSJohn Dyson int mincoreinfo; 709dd2622a8SAlan Cox unsigned int timestamp; 710df8bae1dSRodney W. Grimes 711867a482dSJohn Dyson /* 712867a482dSJohn Dyson * Make sure that the addresses presented are valid for user 713867a482dSJohn Dyson * mode. 714867a482dSJohn Dyson */ 715867a482dSJohn Dyson first_addr = addr = trunc_page((vm_offset_t) uap->addr); 7169154ee6aSPeter Wemm end = addr + (vm_size_t)round_page(uap->len); 71705ba50f5SJake Burkholder map = &td->td_proc->p_vmspace->vm_map; 71805ba50f5SJake Burkholder if (end > vm_map_max(map) || end < addr) 71902c04a2fSJohn Dyson return (EINVAL); 72002c04a2fSJohn Dyson 721867a482dSJohn Dyson /* 722867a482dSJohn Dyson * Address of byte vector 723867a482dSJohn Dyson */ 72402c04a2fSJohn Dyson vec = uap->vec; 725867a482dSJohn Dyson 726b40ce416SJulian Elischer pmap = vmspace_pmap(td->td_proc->p_vmspace); 727867a482dSJohn Dyson 728eff50fcdSAlan Cox vm_map_lock_read(map); 729dd2622a8SAlan Cox RestartScan: 730dd2622a8SAlan Cox timestamp = map->timestamp; 731867a482dSJohn Dyson 732867a482dSJohn Dyson if (!vm_map_lookup_entry(map, addr, &entry)) 733867a482dSJohn Dyson entry = entry->next; 734867a482dSJohn Dyson 735867a482dSJohn Dyson /* 736867a482dSJohn Dyson * Do this on a map entry basis so that if the pages are not 737867a482dSJohn Dyson * in the current processes address space, we can easily look 738867a482dSJohn Dyson * up the pages elsewhere. 739867a482dSJohn Dyson */ 740867a482dSJohn Dyson lastvecindex = -1; 741867a482dSJohn Dyson for (current = entry; 742867a482dSJohn Dyson (current != &map->header) && (current->start < end); 743867a482dSJohn Dyson current = current->next) { 744867a482dSJohn Dyson 745867a482dSJohn Dyson /* 746867a482dSJohn Dyson * ignore submaps (for now) or null objects 747867a482dSJohn Dyson */ 7489fdfe602SMatthew Dillon if ((current->eflags & MAP_ENTRY_IS_SUB_MAP) || 749867a482dSJohn Dyson current->object.vm_object == NULL) 750867a482dSJohn Dyson continue; 751867a482dSJohn Dyson 752867a482dSJohn Dyson /* 753867a482dSJohn Dyson * limit this scan to the current map entry and the 754867a482dSJohn Dyson * limits for the mincore call 755867a482dSJohn Dyson */ 756867a482dSJohn Dyson if (addr < current->start) 757867a482dSJohn Dyson addr = current->start; 758867a482dSJohn Dyson cend = current->end; 759867a482dSJohn Dyson if (cend > end) 760867a482dSJohn Dyson cend = end; 761867a482dSJohn Dyson 762867a482dSJohn Dyson /* 763867a482dSJohn Dyson * scan this entry one page at a time 764867a482dSJohn Dyson */ 765867a482dSJohn Dyson while (addr < cend) { 766867a482dSJohn Dyson /* 767867a482dSJohn Dyson * Check pmap first, it is likely faster, also 768867a482dSJohn Dyson * it can provide info as to whether we are the 769867a482dSJohn Dyson * one referencing or modifying the page. 770867a482dSJohn Dyson */ 771867a482dSJohn Dyson mincoreinfo = pmap_mincore(pmap, addr); 772867a482dSJohn Dyson if (!mincoreinfo) { 773867a482dSJohn Dyson vm_pindex_t pindex; 774867a482dSJohn Dyson vm_ooffset_t offset; 775867a482dSJohn Dyson vm_page_t m; 776867a482dSJohn Dyson /* 777867a482dSJohn Dyson * calculate the page index into the object 778867a482dSJohn Dyson */ 779867a482dSJohn Dyson offset = current->offset + (addr - current->start); 780867a482dSJohn Dyson pindex = OFF_TO_IDX(offset); 781bc5b057fSAlan Cox VM_OBJECT_LOCK(current->object.vm_object); 782867a482dSJohn Dyson m = vm_page_lookup(current->object.vm_object, 783867a482dSJohn Dyson pindex); 784867a482dSJohn Dyson /* 785867a482dSJohn Dyson * if the page is resident, then gather information about 786867a482dSJohn Dyson * it. 787867a482dSJohn Dyson */ 788cafe836aSAlan Cox if (m != NULL && m->valid != 0) { 789867a482dSJohn Dyson mincoreinfo = MINCORE_INCORE; 7907ebcee37SAlan Cox vm_page_lock_queues(); 791867a482dSJohn Dyson if (m->dirty || 7920385347cSPeter Wemm pmap_is_modified(m)) 793867a482dSJohn Dyson mincoreinfo |= MINCORE_MODIFIED_OTHER; 794867a482dSJohn Dyson if ((m->flags & PG_REFERENCED) || 7950385347cSPeter Wemm pmap_ts_referenced(m)) { 796e69763a3SDoug Rabson vm_page_flag_set(m, PG_REFERENCED); 797867a482dSJohn Dyson mincoreinfo |= MINCORE_REFERENCED_OTHER; 79802c04a2fSJohn Dyson } 799e80b7b69SAlan Cox vm_page_unlock_queues(); 8009b5a5d81SJohn Dyson } 8017ebcee37SAlan Cox VM_OBJECT_UNLOCK(current->object.vm_object); 8027ebcee37SAlan Cox } 803867a482dSJohn Dyson 804867a482dSJohn Dyson /* 805dd2622a8SAlan Cox * subyte may page fault. In case it needs to modify 806dd2622a8SAlan Cox * the map, we release the lock. 807dd2622a8SAlan Cox */ 808dd2622a8SAlan Cox vm_map_unlock_read(map); 809dd2622a8SAlan Cox 810dd2622a8SAlan Cox /* 811867a482dSJohn Dyson * calculate index into user supplied byte vector 812867a482dSJohn Dyson */ 813867a482dSJohn Dyson vecindex = OFF_TO_IDX(addr - first_addr); 814867a482dSJohn Dyson 815867a482dSJohn Dyson /* 816867a482dSJohn Dyson * If we have skipped map entries, we need to make sure that 817867a482dSJohn Dyson * the byte vector is zeroed for those skipped entries. 818867a482dSJohn Dyson */ 819867a482dSJohn Dyson while ((lastvecindex + 1) < vecindex) { 820867a482dSJohn Dyson error = subyte(vec + lastvecindex, 0); 821867a482dSJohn Dyson if (error) { 822d2c60af8SMatthew Dillon error = EFAULT; 823d2c60af8SMatthew Dillon goto done2; 824867a482dSJohn Dyson } 825867a482dSJohn Dyson ++lastvecindex; 826867a482dSJohn Dyson } 827867a482dSJohn Dyson 828867a482dSJohn Dyson /* 829867a482dSJohn Dyson * Pass the page information to the user 830867a482dSJohn Dyson */ 831867a482dSJohn Dyson error = subyte(vec + vecindex, mincoreinfo); 832867a482dSJohn Dyson if (error) { 833d2c60af8SMatthew Dillon error = EFAULT; 834d2c60af8SMatthew Dillon goto done2; 835867a482dSJohn Dyson } 836dd2622a8SAlan Cox 837dd2622a8SAlan Cox /* 838dd2622a8SAlan Cox * If the map has changed, due to the subyte, the previous 839dd2622a8SAlan Cox * output may be invalid. 840dd2622a8SAlan Cox */ 841dd2622a8SAlan Cox vm_map_lock_read(map); 842dd2622a8SAlan Cox if (timestamp != map->timestamp) 843dd2622a8SAlan Cox goto RestartScan; 844dd2622a8SAlan Cox 845867a482dSJohn Dyson lastvecindex = vecindex; 84602c04a2fSJohn Dyson addr += PAGE_SIZE; 84702c04a2fSJohn Dyson } 848867a482dSJohn Dyson } 849867a482dSJohn Dyson 850867a482dSJohn Dyson /* 851dd2622a8SAlan Cox * subyte may page fault. In case it needs to modify 852dd2622a8SAlan Cox * the map, we release the lock. 853dd2622a8SAlan Cox */ 854dd2622a8SAlan Cox vm_map_unlock_read(map); 855dd2622a8SAlan Cox 856dd2622a8SAlan Cox /* 857867a482dSJohn Dyson * Zero the last entries in the byte vector. 858867a482dSJohn Dyson */ 859867a482dSJohn Dyson vecindex = OFF_TO_IDX(end - first_addr); 860867a482dSJohn Dyson while ((lastvecindex + 1) < vecindex) { 861867a482dSJohn Dyson error = subyte(vec + lastvecindex, 0); 862867a482dSJohn Dyson if (error) { 863d2c60af8SMatthew Dillon error = EFAULT; 864d2c60af8SMatthew Dillon goto done2; 865867a482dSJohn Dyson } 866867a482dSJohn Dyson ++lastvecindex; 867867a482dSJohn Dyson } 868867a482dSJohn Dyson 869dd2622a8SAlan Cox /* 870dd2622a8SAlan Cox * If the map has changed, due to the subyte, the previous 871dd2622a8SAlan Cox * output may be invalid. 872dd2622a8SAlan Cox */ 873dd2622a8SAlan Cox vm_map_lock_read(map); 874dd2622a8SAlan Cox if (timestamp != map->timestamp) 875dd2622a8SAlan Cox goto RestartScan; 876eff50fcdSAlan Cox vm_map_unlock_read(map); 877d2c60af8SMatthew Dillon done2: 878d2c60af8SMatthew Dillon return (error); 879df8bae1dSRodney W. Grimes } 880df8bae1dSRodney W. Grimes 881d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_ 882df8bae1dSRodney W. Grimes struct mlock_args { 883651bb817SAlexander Langer const void *addr; 884df8bae1dSRodney W. Grimes size_t len; 885df8bae1dSRodney W. Grimes }; 886d2d3e875SBruce Evans #endif 887d2c60af8SMatthew Dillon /* 888d2c60af8SMatthew Dillon * MPSAFE 889d2c60af8SMatthew Dillon */ 890df8bae1dSRodney W. Grimes int 891b40ce416SJulian Elischer mlock(td, uap) 892b40ce416SJulian Elischer struct thread *td; 893df8bae1dSRodney W. Grimes struct mlock_args *uap; 894df8bae1dSRodney W. Grimes { 895f0ea4612SDon Lewis struct proc *proc; 896bb734798SDon Lewis vm_offset_t addr, end, last, start; 897bb734798SDon Lewis vm_size_t npages, size; 898bb734798SDon Lewis int error; 899df8bae1dSRodney W. Grimes 90047934cefSDon Lewis error = suser(td); 90147934cefSDon Lewis if (error) 90247934cefSDon Lewis return (error); 90316929939SDon Lewis addr = (vm_offset_t)uap->addr; 90416929939SDon Lewis size = uap->len; 905bb734798SDon Lewis last = addr + size; 90616929939SDon Lewis start = trunc_page(addr); 907bb734798SDon Lewis end = round_page(last); 908bb734798SDon Lewis if (last < addr || end < addr) 909df8bae1dSRodney W. Grimes return (EINVAL); 91016929939SDon Lewis npages = atop(end - start); 91116929939SDon Lewis if (npages > vm_page_max_wired) 91216929939SDon Lewis return (ENOMEM); 913f0ea4612SDon Lewis proc = td->td_proc; 91447934cefSDon Lewis PROC_LOCK(proc); 915bb734798SDon Lewis if (ptoa(npages + 916bb734798SDon Lewis pmap_wired_count(vm_map_pmap(&proc->p_vmspace->vm_map))) > 917bb734798SDon Lewis lim_cur(proc, RLIMIT_MEMLOCK)) { 91847934cefSDon Lewis PROC_UNLOCK(proc); 9194a40e3d4SJohn Dyson return (ENOMEM); 92091d5354aSJohn Baldwin } 92147934cefSDon Lewis PROC_UNLOCK(proc); 92216929939SDon Lewis if (npages + cnt.v_wire_count > vm_page_max_wired) 92316929939SDon Lewis return (EAGAIN); 92416929939SDon Lewis error = vm_map_wire(&proc->p_vmspace->vm_map, start, end, 92516929939SDon Lewis VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES); 926df8bae1dSRodney W. Grimes return (error == KERN_SUCCESS ? 0 : ENOMEM); 927df8bae1dSRodney W. Grimes } 928df8bae1dSRodney W. Grimes 929d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_ 9304a40e3d4SJohn Dyson struct mlockall_args { 9314a40e3d4SJohn Dyson int how; 9324a40e3d4SJohn Dyson }; 9334a40e3d4SJohn Dyson #endif 9344a40e3d4SJohn Dyson 935d2c60af8SMatthew Dillon /* 936d2c60af8SMatthew Dillon * MPSAFE 937d2c60af8SMatthew Dillon */ 9384a40e3d4SJohn Dyson int 939b40ce416SJulian Elischer mlockall(td, uap) 940b40ce416SJulian Elischer struct thread *td; 9414a40e3d4SJohn Dyson struct mlockall_args *uap; 9424a40e3d4SJohn Dyson { 943abd498aaSBruce M Simpson vm_map_t map; 944abd498aaSBruce M Simpson int error; 945abd498aaSBruce M Simpson 946abd498aaSBruce M Simpson map = &td->td_proc->p_vmspace->vm_map; 947abd498aaSBruce M Simpson error = 0; 948abd498aaSBruce M Simpson 949abd498aaSBruce M Simpson if ((uap->how == 0) || ((uap->how & ~(MCL_CURRENT|MCL_FUTURE)) != 0)) 950abd498aaSBruce M Simpson return (EINVAL); 951abd498aaSBruce M Simpson 95211f7ddc5SBruce M Simpson #if 0 953abd498aaSBruce M Simpson /* 954abd498aaSBruce M Simpson * If wiring all pages in the process would cause it to exceed 955abd498aaSBruce M Simpson * a hard resource limit, return ENOMEM. 956abd498aaSBruce M Simpson */ 95791d5354aSJohn Baldwin PROC_LOCK(td->td_proc); 958abd498aaSBruce M Simpson if (map->size - ptoa(pmap_wired_count(vm_map_pmap(map)) > 95991d5354aSJohn Baldwin lim_cur(td->td_proc, RLIMIT_MEMLOCK))) { 96091d5354aSJohn Baldwin PROC_UNLOCK(td->td_proc); 961abd498aaSBruce M Simpson return (ENOMEM); 96291d5354aSJohn Baldwin } 96391d5354aSJohn Baldwin PROC_UNLOCK(td->td_proc); 964abd498aaSBruce M Simpson #else 965abd498aaSBruce M Simpson error = suser(td); 966abd498aaSBruce M Simpson if (error) 967abd498aaSBruce M Simpson return (error); 968abd498aaSBruce M Simpson #endif 969abd498aaSBruce M Simpson 970abd498aaSBruce M Simpson if (uap->how & MCL_FUTURE) { 971abd498aaSBruce M Simpson vm_map_lock(map); 972abd498aaSBruce M Simpson vm_map_modflags(map, MAP_WIREFUTURE, 0); 973abd498aaSBruce M Simpson vm_map_unlock(map); 974abd498aaSBruce M Simpson error = 0; 975abd498aaSBruce M Simpson } 976abd498aaSBruce M Simpson 977abd498aaSBruce M Simpson if (uap->how & MCL_CURRENT) { 978abd498aaSBruce M Simpson /* 979abd498aaSBruce M Simpson * P1003.1-2001 mandates that all currently mapped pages 980abd498aaSBruce M Simpson * will be memory resident and locked (wired) upon return 981abd498aaSBruce M Simpson * from mlockall(). vm_map_wire() will wire pages, by 982abd498aaSBruce M Simpson * calling vm_fault_wire() for each page in the region. 983abd498aaSBruce M Simpson */ 984abd498aaSBruce M Simpson error = vm_map_wire(map, vm_map_min(map), vm_map_max(map), 985abd498aaSBruce M Simpson VM_MAP_WIRE_USER|VM_MAP_WIRE_HOLESOK); 986abd498aaSBruce M Simpson error = (error == KERN_SUCCESS ? 0 : EAGAIN); 987abd498aaSBruce M Simpson } 988abd498aaSBruce M Simpson 989abd498aaSBruce M Simpson return (error); 9904a40e3d4SJohn Dyson } 9914a40e3d4SJohn Dyson 9924a40e3d4SJohn Dyson #ifndef _SYS_SYSPROTO_H_ 993fa721254SAlfred Perlstein struct munlockall_args { 994abd498aaSBruce M Simpson register_t dummy; 9954a40e3d4SJohn Dyson }; 9964a40e3d4SJohn Dyson #endif 9974a40e3d4SJohn Dyson 998d2c60af8SMatthew Dillon /* 999d2c60af8SMatthew Dillon * MPSAFE 1000d2c60af8SMatthew Dillon */ 10014a40e3d4SJohn Dyson int 1002b40ce416SJulian Elischer munlockall(td, uap) 1003b40ce416SJulian Elischer struct thread *td; 10044a40e3d4SJohn Dyson struct munlockall_args *uap; 10054a40e3d4SJohn Dyson { 1006abd498aaSBruce M Simpson vm_map_t map; 1007abd498aaSBruce M Simpson int error; 1008abd498aaSBruce M Simpson 1009abd498aaSBruce M Simpson map = &td->td_proc->p_vmspace->vm_map; 1010abd498aaSBruce M Simpson error = suser(td); 1011abd498aaSBruce M Simpson if (error) 1012abd498aaSBruce M Simpson return (error); 1013abd498aaSBruce M Simpson 1014abd498aaSBruce M Simpson /* Clear the MAP_WIREFUTURE flag from this vm_map. */ 1015abd498aaSBruce M Simpson vm_map_lock(map); 1016abd498aaSBruce M Simpson vm_map_modflags(map, 0, MAP_WIREFUTURE); 1017abd498aaSBruce M Simpson vm_map_unlock(map); 1018abd498aaSBruce M Simpson 1019abd498aaSBruce M Simpson /* Forcibly unwire all pages. */ 1020abd498aaSBruce M Simpson error = vm_map_unwire(map, vm_map_min(map), vm_map_max(map), 1021abd498aaSBruce M Simpson VM_MAP_WIRE_USER|VM_MAP_WIRE_HOLESOK); 1022abd498aaSBruce M Simpson 1023abd498aaSBruce M Simpson return (error); 10244a40e3d4SJohn Dyson } 10254a40e3d4SJohn Dyson 10264a40e3d4SJohn Dyson #ifndef _SYS_SYSPROTO_H_ 1027df8bae1dSRodney W. Grimes struct munlock_args { 1028651bb817SAlexander Langer const void *addr; 1029df8bae1dSRodney W. Grimes size_t len; 1030df8bae1dSRodney W. Grimes }; 1031d2d3e875SBruce Evans #endif 1032d2c60af8SMatthew Dillon /* 1033d2c60af8SMatthew Dillon * MPSAFE 1034d2c60af8SMatthew Dillon */ 1035df8bae1dSRodney W. Grimes int 1036b40ce416SJulian Elischer munlock(td, uap) 1037b40ce416SJulian Elischer struct thread *td; 1038df8bae1dSRodney W. Grimes struct munlock_args *uap; 1039df8bae1dSRodney W. Grimes { 1040bb734798SDon Lewis vm_offset_t addr, end, last, start; 104116929939SDon Lewis vm_size_t size; 1042df8bae1dSRodney W. Grimes int error; 1043df8bae1dSRodney W. Grimes 104447934cefSDon Lewis error = suser(td); 104547934cefSDon Lewis if (error) 104647934cefSDon Lewis return (error); 104716929939SDon Lewis addr = (vm_offset_t)uap->addr; 104816929939SDon Lewis size = uap->len; 1049bb734798SDon Lewis last = addr + size; 105016929939SDon Lewis start = trunc_page(addr); 1051bb734798SDon Lewis end = round_page(last); 1052bb734798SDon Lewis if (last < addr || end < addr) 1053df8bae1dSRodney W. Grimes return (EINVAL); 105416929939SDon Lewis error = vm_map_unwire(&td->td_proc->p_vmspace->vm_map, start, end, 105516929939SDon Lewis VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES); 1056df8bae1dSRodney W. Grimes return (error == KERN_SUCCESS ? 0 : ENOMEM); 1057df8bae1dSRodney W. Grimes } 1058df8bae1dSRodney W. Grimes 1059df8bae1dSRodney W. Grimes /* 1060c8daea13SAlexander Kabaev * vm_mmap_vnode() 1061c8daea13SAlexander Kabaev * 1062c8daea13SAlexander Kabaev * MPSAFE 1063c8daea13SAlexander Kabaev * 1064c8daea13SAlexander Kabaev * Helper function for vm_mmap. Perform sanity check specific for mmap 1065c8daea13SAlexander Kabaev * operations on vnodes. 1066c8daea13SAlexander Kabaev */ 1067c8daea13SAlexander Kabaev int 1068c8daea13SAlexander Kabaev vm_mmap_vnode(struct thread *td, vm_size_t objsize, 1069c8daea13SAlexander Kabaev vm_prot_t prot, vm_prot_t *maxprotp, int *flagsp, 1070c8daea13SAlexander Kabaev struct vnode *vp, vm_ooffset_t foff, vm_object_t *objp) 1071c8daea13SAlexander Kabaev { 1072c8daea13SAlexander Kabaev struct vattr va; 1073c8daea13SAlexander Kabaev void *handle; 1074c8daea13SAlexander Kabaev vm_object_t obj; 1075ae51ff11SJeff Roberson struct mount *mp; 107623fc1a90SPoul-Henning Kamp int error, flags, type; 1077ae51ff11SJeff Roberson int vfslocked; 1078c8daea13SAlexander Kabaev 1079ae51ff11SJeff Roberson mp = vp->v_mount; 1080ae51ff11SJeff Roberson vfslocked = VFS_LOCK_GIANT(mp); 1081c8daea13SAlexander Kabaev if ((error = vget(vp, LK_EXCLUSIVE, td)) != 0) { 1082ae51ff11SJeff Roberson VFS_UNLOCK_GIANT(vfslocked); 1083c8daea13SAlexander Kabaev return (error); 1084c8daea13SAlexander Kabaev } 1085c8daea13SAlexander Kabaev flags = *flagsp; 10868516dd18SPoul-Henning Kamp obj = vp->v_object; 1087c8daea13SAlexander Kabaev if (vp->v_type == VREG) { 1088c8daea13SAlexander Kabaev /* 1089c8daea13SAlexander Kabaev * Get the proper underlying object 1090c8daea13SAlexander Kabaev */ 10918516dd18SPoul-Henning Kamp if (obj == NULL) { 1092c8daea13SAlexander Kabaev error = EINVAL; 1093c8daea13SAlexander Kabaev goto done; 1094c8daea13SAlexander Kabaev } 1095c8daea13SAlexander Kabaev if (obj->handle != vp) { 1096c8daea13SAlexander Kabaev vput(vp); 1097c8daea13SAlexander Kabaev vp = (struct vnode*)obj->handle; 1098c8daea13SAlexander Kabaev vget(vp, LK_EXCLUSIVE, td); 1099c8daea13SAlexander Kabaev } 1100c8daea13SAlexander Kabaev type = OBJT_VNODE; 1101c8daea13SAlexander Kabaev handle = vp; 1102c8daea13SAlexander Kabaev } else if (vp->v_type == VCHR) { 1103c8daea13SAlexander Kabaev type = OBJT_DEVICE; 1104c8daea13SAlexander Kabaev handle = vp->v_rdev; 1105c8daea13SAlexander Kabaev 1106891822a8SPoul-Henning Kamp /* XXX: lack thredref on device */ 1107c8daea13SAlexander Kabaev if(vp->v_rdev->si_devsw->d_flags & D_MMAP_ANON) { 1108c8daea13SAlexander Kabaev *maxprotp = VM_PROT_ALL; 1109c8daea13SAlexander Kabaev *flagsp |= MAP_ANON; 1110c8daea13SAlexander Kabaev error = 0; 1111c8daea13SAlexander Kabaev goto done; 1112c8daea13SAlexander Kabaev } 1113c8daea13SAlexander Kabaev /* 1114c8daea13SAlexander Kabaev * cdevs does not provide private mappings of any kind. 1115c8daea13SAlexander Kabaev */ 1116ce7a036dSAlexander Kabaev if ((*maxprotp & VM_PROT_WRITE) == 0 && 1117ce7a036dSAlexander Kabaev (prot & PROT_WRITE) != 0) { 1118ce7a036dSAlexander Kabaev error = EACCES; 1119ce7a036dSAlexander Kabaev goto done; 1120ce7a036dSAlexander Kabaev } 112123fc1a90SPoul-Henning Kamp if (flags & (MAP_PRIVATE|MAP_COPY)) { 1122c8daea13SAlexander Kabaev error = EINVAL; 1123c8daea13SAlexander Kabaev goto done; 1124c8daea13SAlexander Kabaev } 1125c8daea13SAlexander Kabaev /* 1126c8daea13SAlexander Kabaev * Force device mappings to be shared. 1127c8daea13SAlexander Kabaev */ 1128c8daea13SAlexander Kabaev flags |= MAP_SHARED; 1129c8daea13SAlexander Kabaev } else { 1130c8daea13SAlexander Kabaev error = EINVAL; 1131c8daea13SAlexander Kabaev goto done; 1132c8daea13SAlexander Kabaev } 1133c8daea13SAlexander Kabaev if ((error = VOP_GETATTR(vp, &va, td->td_ucred, td))) { 1134c8daea13SAlexander Kabaev goto done; 1135c8daea13SAlexander Kabaev } 1136c92163dcSChristian S.J. Peron #ifdef MAC 1137c92163dcSChristian S.J. Peron error = mac_check_vnode_mmap(td->td_ucred, vp, prot, flags); 1138c92163dcSChristian S.J. Peron if (error != 0) 1139c92163dcSChristian S.J. Peron goto done; 1140c92163dcSChristian S.J. Peron #endif 1141c8daea13SAlexander Kabaev if ((flags & MAP_SHARED) != 0) { 1142c8daea13SAlexander Kabaev if ((va.va_flags & (SF_SNAPSHOT|IMMUTABLE|APPEND)) != 0) { 1143c8daea13SAlexander Kabaev if (prot & PROT_WRITE) { 1144c8daea13SAlexander Kabaev error = EPERM; 1145c8daea13SAlexander Kabaev goto done; 1146c8daea13SAlexander Kabaev } 1147c8daea13SAlexander Kabaev *maxprotp &= ~VM_PROT_WRITE; 1148c8daea13SAlexander Kabaev } 1149c8daea13SAlexander Kabaev } 1150c8daea13SAlexander Kabaev /* 1151c8daea13SAlexander Kabaev * If it is a regular file without any references 1152c8daea13SAlexander Kabaev * we do not need to sync it. 1153c8daea13SAlexander Kabaev * Adjust object size to be the size of actual file. 1154c8daea13SAlexander Kabaev */ 1155c8daea13SAlexander Kabaev if (vp->v_type == VREG) { 1156c8daea13SAlexander Kabaev objsize = round_page(va.va_size); 1157c8daea13SAlexander Kabaev if (va.va_nlink == 0) 1158c8daea13SAlexander Kabaev flags |= MAP_NOSYNC; 1159c8daea13SAlexander Kabaev } 1160c8daea13SAlexander Kabaev obj = vm_pager_allocate(type, handle, objsize, prot, foff); 1161c8daea13SAlexander Kabaev if (obj == NULL) { 1162c8daea13SAlexander Kabaev error = (type == OBJT_DEVICE ? EINVAL : ENOMEM); 1163c8daea13SAlexander Kabaev goto done; 1164c8daea13SAlexander Kabaev } 1165c8daea13SAlexander Kabaev *objp = obj; 1166c8daea13SAlexander Kabaev *flagsp = flags; 1167c8daea13SAlexander Kabaev done: 1168c8daea13SAlexander Kabaev vput(vp); 1169ae51ff11SJeff Roberson VFS_UNLOCK_GIANT(vfslocked); 1170c8daea13SAlexander Kabaev return (error); 1171c8daea13SAlexander Kabaev } 1172c8daea13SAlexander Kabaev 1173c8daea13SAlexander Kabaev /* 117498df9218SJohn Baldwin * vm_mmap_cdev() 117598df9218SJohn Baldwin * 117698df9218SJohn Baldwin * MPSAFE 117798df9218SJohn Baldwin * 117898df9218SJohn Baldwin * Helper function for vm_mmap. Perform sanity check specific for mmap 117998df9218SJohn Baldwin * operations on cdevs. 118098df9218SJohn Baldwin */ 118198df9218SJohn Baldwin int 118298df9218SJohn Baldwin vm_mmap_cdev(struct thread *td, vm_size_t objsize, 118398df9218SJohn Baldwin vm_prot_t prot, vm_prot_t *maxprotp, int *flagsp, 118498df9218SJohn Baldwin struct cdev *cdev, vm_ooffset_t foff, vm_object_t *objp) 118598df9218SJohn Baldwin { 118698df9218SJohn Baldwin vm_object_t obj; 118798df9218SJohn Baldwin int flags; 118898df9218SJohn Baldwin 118998df9218SJohn Baldwin flags = *flagsp; 119098df9218SJohn Baldwin 119198df9218SJohn Baldwin /* XXX: lack thredref on device */ 119298df9218SJohn Baldwin if (cdev->si_devsw->d_flags & D_MMAP_ANON) { 119398df9218SJohn Baldwin *maxprotp = VM_PROT_ALL; 119498df9218SJohn Baldwin *flagsp |= MAP_ANON; 119598df9218SJohn Baldwin return (0); 119698df9218SJohn Baldwin } 119798df9218SJohn Baldwin /* 119898df9218SJohn Baldwin * cdevs does not provide private mappings of any kind. 119998df9218SJohn Baldwin */ 120098df9218SJohn Baldwin if ((*maxprotp & VM_PROT_WRITE) == 0 && 120198df9218SJohn Baldwin (prot & PROT_WRITE) != 0) 120298df9218SJohn Baldwin return (EACCES); 120398df9218SJohn Baldwin if (flags & (MAP_PRIVATE|MAP_COPY)) 120498df9218SJohn Baldwin return (EINVAL); 120598df9218SJohn Baldwin /* 120698df9218SJohn Baldwin * Force device mappings to be shared. 120798df9218SJohn Baldwin */ 120898df9218SJohn Baldwin flags |= MAP_SHARED; 120998df9218SJohn Baldwin #ifdef MAC_XXX 121098df9218SJohn Baldwin error = mac_check_cdev_mmap(td->td_ucred, cdev, prot); 121198df9218SJohn Baldwin if (error != 0) 121298df9218SJohn Baldwin return (error); 121398df9218SJohn Baldwin #endif 121498df9218SJohn Baldwin obj = vm_pager_allocate(OBJT_DEVICE, cdev, objsize, prot, foff); 121598df9218SJohn Baldwin if (obj == NULL) 121698df9218SJohn Baldwin return (EINVAL); 121798df9218SJohn Baldwin *objp = obj; 121898df9218SJohn Baldwin *flagsp = flags; 121998df9218SJohn Baldwin return (0); 122098df9218SJohn Baldwin } 122198df9218SJohn Baldwin 122298df9218SJohn Baldwin /* 1223d2c60af8SMatthew Dillon * vm_mmap() 1224d2c60af8SMatthew Dillon * 1225d2c60af8SMatthew Dillon * MPSAFE 1226d2c60af8SMatthew Dillon * 1227d2c60af8SMatthew Dillon * Internal version of mmap. Currently used by mmap, exec, and sys5 1228d2c60af8SMatthew Dillon * shared memory. Handle is either a vnode pointer or NULL for MAP_ANON. 1229df8bae1dSRodney W. Grimes */ 1230df8bae1dSRodney W. Grimes int 1231b9dcd593SBruce Evans vm_mmap(vm_map_t map, vm_offset_t *addr, vm_size_t size, vm_prot_t prot, 1232b9dcd593SBruce Evans vm_prot_t maxprot, int flags, 123398df9218SJohn Baldwin objtype_t handle_type, void *handle, 1234b9dcd593SBruce Evans vm_ooffset_t foff) 1235df8bae1dSRodney W. Grimes { 1236df8bae1dSRodney W. Grimes boolean_t fitit; 1237fcae040bSJohn Dyson vm_object_t object; 1238df8bae1dSRodney W. Grimes int rv = KERN_SUCCESS; 1239bd7e5f99SJohn Dyson vm_ooffset_t objsize; 124020eec4bbSAlan Cox int docow, error; 1241b40ce416SJulian Elischer struct thread *td = curthread; 1242df8bae1dSRodney W. Grimes 1243df8bae1dSRodney W. Grimes if (size == 0) 1244df8bae1dSRodney W. Grimes return (0); 1245df8bae1dSRodney W. Grimes 124606cb7259SDavid Greenman objsize = size = round_page(size); 1247df8bae1dSRodney W. Grimes 124891d5354aSJohn Baldwin PROC_LOCK(td->td_proc); 1249070f64feSMatthew Dillon if (td->td_proc->p_vmspace->vm_map.size + size > 125091d5354aSJohn Baldwin lim_cur(td->td_proc, RLIMIT_VMEM)) { 125191d5354aSJohn Baldwin PROC_UNLOCK(td->td_proc); 1252070f64feSMatthew Dillon return(ENOMEM); 1253070f64feSMatthew Dillon } 125491d5354aSJohn Baldwin PROC_UNLOCK(td->td_proc); 1255070f64feSMatthew Dillon 1256df8bae1dSRodney W. Grimes /* 1257bc9ad247SDavid Greenman * We currently can only deal with page aligned file offsets. 1258bc9ad247SDavid Greenman * The check is here rather than in the syscall because the 1259bc9ad247SDavid Greenman * kernel calls this function internally for other mmaping 1260bc9ad247SDavid Greenman * operations (such as in exec) and non-aligned offsets will 1261bc9ad247SDavid Greenman * cause pmap inconsistencies...so we want to be sure to 1262bc9ad247SDavid Greenman * disallow this in all cases. 1263bc9ad247SDavid Greenman */ 1264bc9ad247SDavid Greenman if (foff & PAGE_MASK) 1265bc9ad247SDavid Greenman return (EINVAL); 1266bc9ad247SDavid Greenman 126706cb7259SDavid Greenman if ((flags & MAP_FIXED) == 0) { 126806cb7259SDavid Greenman fitit = TRUE; 126906cb7259SDavid Greenman *addr = round_page(*addr); 127006cb7259SDavid Greenman } else { 127106cb7259SDavid Greenman if (*addr != trunc_page(*addr)) 127206cb7259SDavid Greenman return (EINVAL); 127306cb7259SDavid Greenman fitit = FALSE; 127406cb7259SDavid Greenman (void) vm_map_remove(map, *addr, *addr + size); 127506cb7259SDavid Greenman } 1276bc9ad247SDavid Greenman /* 127724a1cce3SDavid Greenman * Lookup/allocate object. 1278df8bae1dSRodney W. Grimes */ 127998df9218SJohn Baldwin switch (handle_type) { 128098df9218SJohn Baldwin case OBJT_DEVICE: 128198df9218SJohn Baldwin error = vm_mmap_cdev(td, size, prot, &maxprot, &flags, 128298df9218SJohn Baldwin handle, foff, &object); 128398df9218SJohn Baldwin break; 128498df9218SJohn Baldwin case OBJT_VNODE: 1285c8daea13SAlexander Kabaev error = vm_mmap_vnode(td, size, prot, &maxprot, &flags, 1286c8daea13SAlexander Kabaev handle, foff, &object); 128798df9218SJohn Baldwin break; 128898df9218SJohn Baldwin case OBJT_DEFAULT: 128998df9218SJohn Baldwin if (handle == NULL) { 129098df9218SJohn Baldwin error = 0; 129198df9218SJohn Baldwin break; 129298df9218SJohn Baldwin } 129398df9218SJohn Baldwin /* FALLTHROUGH */ 129498df9218SJohn Baldwin default: 129598df9218SJohn Baldwin error = EINVAL; 129698df9218SJohn Baldwin } 129798df9218SJohn Baldwin if (error) 1298c8daea13SAlexander Kabaev return (error); 12995f55e841SDavid Greenman if (flags & MAP_ANON) { 1300c8daea13SAlexander Kabaev object = NULL; 1301c8daea13SAlexander Kabaev docow = 0; 13025f55e841SDavid Greenman /* 13035f55e841SDavid Greenman * Unnamed anonymous regions always start at 0. 13045f55e841SDavid Greenman */ 130567bf6868SJohn Dyson if (handle == 0) 13065f55e841SDavid Greenman foff = 0; 13075f55e841SDavid Greenman } else { 13084738fa09SAlan Cox docow = MAP_PREFAULT_PARTIAL; 130994328e90SJohn Dyson } 1310df8bae1dSRodney W. Grimes 13114f79d873SMatthew Dillon if ((flags & (MAP_ANON|MAP_SHARED)) == 0) 13124738fa09SAlan Cox docow |= MAP_COPY_ON_WRITE; 13134f79d873SMatthew Dillon if (flags & MAP_NOSYNC) 13144f79d873SMatthew Dillon docow |= MAP_DISABLE_SYNCER; 13159730a5daSPaul Saab if (flags & MAP_NOCORE) 13169730a5daSPaul Saab docow |= MAP_DISABLE_COREDUMP; 13175850152dSJohn Dyson 1318d0aea04fSJohn Dyson #if defined(VM_PROT_READ_IS_EXEC) 1319d0aea04fSJohn Dyson if (prot & VM_PROT_READ) 1320d0aea04fSJohn Dyson prot |= VM_PROT_EXECUTE; 1321d0aea04fSJohn Dyson 1322d0aea04fSJohn Dyson if (maxprot & VM_PROT_READ) 1323d0aea04fSJohn Dyson maxprot |= VM_PROT_EXECUTE; 1324d0aea04fSJohn Dyson #endif 1325d0aea04fSJohn Dyson 1326e4ca250dSJohn Baldwin if (fitit) 13270a0a85b3SJohn Dyson *addr = pmap_addr_hint(object, *addr, size); 13280a0a85b3SJohn Dyson 13292267af78SJulian Elischer if (flags & MAP_STACK) 1330fd75d710SMarcel Moolenaar rv = vm_map_stack(map, *addr, size, prot, maxprot, 1331fd75d710SMarcel Moolenaar docow | MAP_STACK_GROWS_DOWN); 13322267af78SJulian Elischer else 1333bd7e5f99SJohn Dyson rv = vm_map_find(map, object, foff, addr, size, fitit, 1334bd7e5f99SJohn Dyson prot, maxprot, docow); 1335bd7e5f99SJohn Dyson 1336d2c60af8SMatthew Dillon if (rv != KERN_SUCCESS) { 13377fb0c17eSDavid Greenman /* 133824a1cce3SDavid Greenman * Lose the object reference. Will destroy the 133924a1cce3SDavid Greenman * object if it's an unnamed anonymous mapping 134024a1cce3SDavid Greenman * or named anonymous without other references. 13417fb0c17eSDavid Greenman */ 1342df8bae1dSRodney W. Grimes vm_object_deallocate(object); 1343d2c60af8SMatthew Dillon } else if (flags & MAP_SHARED) { 1344df8bae1dSRodney W. Grimes /* 1345df8bae1dSRodney W. Grimes * Shared memory is also shared with children. 1346df8bae1dSRodney W. Grimes */ 1347df8bae1dSRodney W. Grimes rv = vm_map_inherit(map, *addr, *addr + size, VM_INHERIT_SHARE); 1348e4ca250dSJohn Baldwin if (rv != KERN_SUCCESS) 13497fb0c17eSDavid Greenman (void) vm_map_remove(map, *addr, *addr + size); 1350df8bae1dSRodney W. Grimes } 1351abd498aaSBruce M Simpson 1352abd498aaSBruce M Simpson /* 1353abd498aaSBruce M Simpson * If the process has requested that all future mappings 1354abd498aaSBruce M Simpson * be wired, then heed this. 1355abd498aaSBruce M Simpson */ 1356abd498aaSBruce M Simpson if ((rv == KERN_SUCCESS) && (map->flags & MAP_WIREFUTURE)) 1357abd498aaSBruce M Simpson vm_map_wire(map, *addr, *addr + size, 1358abd498aaSBruce M Simpson VM_MAP_WIRE_USER|VM_MAP_WIRE_NOHOLES); 1359abd498aaSBruce M Simpson 1360df8bae1dSRodney W. Grimes switch (rv) { 1361df8bae1dSRodney W. Grimes case KERN_SUCCESS: 1362df8bae1dSRodney W. Grimes return (0); 1363df8bae1dSRodney W. Grimes case KERN_INVALID_ADDRESS: 1364df8bae1dSRodney W. Grimes case KERN_NO_SPACE: 1365df8bae1dSRodney W. Grimes return (ENOMEM); 1366df8bae1dSRodney W. Grimes case KERN_PROTECTION_FAILURE: 1367df8bae1dSRodney W. Grimes return (EACCES); 1368df8bae1dSRodney W. Grimes default: 1369df8bae1dSRodney W. Grimes return (EINVAL); 1370df8bae1dSRodney W. Grimes } 1371df8bae1dSRodney W. Grimes } 1372