1df8bae1dSRodney W. Grimes /* 2df8bae1dSRodney W. Grimes * Copyright (c) 1988 University of Utah. 3df8bae1dSRodney W. Grimes * Copyright (c) 1991, 1993 4df8bae1dSRodney W. Grimes * The Regents of the University of California. All rights reserved. 5df8bae1dSRodney W. Grimes * 6df8bae1dSRodney W. Grimes * This code is derived from software contributed to Berkeley by 7df8bae1dSRodney W. Grimes * the Systems Programming Group of the University of Utah Computer 8df8bae1dSRodney W. Grimes * Science Department. 9df8bae1dSRodney W. Grimes * 10df8bae1dSRodney W. Grimes * Redistribution and use in source and binary forms, with or without 11df8bae1dSRodney W. Grimes * modification, are permitted provided that the following conditions 12df8bae1dSRodney W. Grimes * are met: 13df8bae1dSRodney W. Grimes * 1. Redistributions of source code must retain the above copyright 14df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer. 15df8bae1dSRodney W. Grimes * 2. Redistributions in binary form must reproduce the above copyright 16df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer in the 17df8bae1dSRodney W. Grimes * documentation and/or other materials provided with the distribution. 18df8bae1dSRodney W. Grimes * 3. All advertising materials mentioning features or use of this software 195929bcfaSPhilippe Charnier * must display the following acknowledgement: 20df8bae1dSRodney W. Grimes * This product includes software developed by the University of 21df8bae1dSRodney W. Grimes * California, Berkeley and its contributors. 22df8bae1dSRodney W. Grimes * 4. Neither the name of the University nor the names of its contributors 23df8bae1dSRodney W. Grimes * may be used to endorse or promote products derived from this software 24df8bae1dSRodney W. Grimes * without specific prior written permission. 25df8bae1dSRodney W. Grimes * 26df8bae1dSRodney W. Grimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27df8bae1dSRodney W. Grimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28df8bae1dSRodney W. Grimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29df8bae1dSRodney W. Grimes * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30df8bae1dSRodney W. Grimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31df8bae1dSRodney W. Grimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32df8bae1dSRodney W. Grimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33df8bae1dSRodney W. Grimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34df8bae1dSRodney W. Grimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35df8bae1dSRodney W. Grimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36df8bae1dSRodney W. Grimes * SUCH DAMAGE. 37df8bae1dSRodney W. Grimes * 38df8bae1dSRodney W. Grimes * from: Utah $Hdr: vm_mmap.c 1.6 91/10/21$ 39df8bae1dSRodney W. Grimes * 40df8bae1dSRodney W. Grimes * @(#)vm_mmap.c 8.4 (Berkeley) 1/12/94 41df8bae1dSRodney W. Grimes */ 42df8bae1dSRodney W. Grimes 43df8bae1dSRodney W. Grimes /* 44df8bae1dSRodney W. Grimes * Mapped file (mmap) interface to VM 45df8bae1dSRodney W. Grimes */ 46df8bae1dSRodney W. Grimes 47874651b1SDavid E. O'Brien #include <sys/cdefs.h> 48874651b1SDavid E. O'Brien __FBSDID("$FreeBSD$"); 49874651b1SDavid E. O'Brien 505591b823SEivind Eklund #include "opt_compat.h" 513e732e7dSRobert Watson #include "opt_mac.h" 52e9822d92SJoerg Wunsch 53df8bae1dSRodney W. Grimes #include <sys/param.h> 54df8bae1dSRodney W. Grimes #include <sys/systm.h> 55fb919e4dSMark Murray #include <sys/kernel.h> 56fb919e4dSMark Murray #include <sys/lock.h> 5723955314SAlfred Perlstein #include <sys/mutex.h> 58d2d3e875SBruce Evans #include <sys/sysproto.h> 59df8bae1dSRodney W. Grimes #include <sys/filedesc.h> 60df8bae1dSRodney W. Grimes #include <sys/proc.h> 61070f64feSMatthew Dillon #include <sys/resource.h> 62070f64feSMatthew Dillon #include <sys/resourcevar.h> 63df8bae1dSRodney W. Grimes #include <sys/vnode.h> 643ac4d1efSBruce Evans #include <sys/fcntl.h> 65df8bae1dSRodney W. Grimes #include <sys/file.h> 663e732e7dSRobert Watson #include <sys/mac.h> 67df8bae1dSRodney W. Grimes #include <sys/mman.h> 68df8bae1dSRodney W. Grimes #include <sys/conf.h> 694183b6b6SPeter Wemm #include <sys/stat.h> 70efeaf95aSDavid Greenman #include <sys/vmmeter.h> 711f6889a1SMatthew Dillon #include <sys/sysctl.h> 72df8bae1dSRodney W. Grimes 73df8bae1dSRodney W. Grimes #include <vm/vm.h> 74efeaf95aSDavid Greenman #include <vm/vm_param.h> 75efeaf95aSDavid Greenman #include <vm/pmap.h> 76efeaf95aSDavid Greenman #include <vm/vm_map.h> 77efeaf95aSDavid Greenman #include <vm/vm_object.h> 781c7c3c6aSMatthew Dillon #include <vm/vm_page.h> 79df8bae1dSRodney W. Grimes #include <vm/vm_pager.h> 80b5e8ce9fSBruce Evans #include <vm/vm_pageout.h> 81efeaf95aSDavid Greenman #include <vm/vm_extern.h> 82867a482dSJohn Dyson #include <vm/vm_page.h> 831f6889a1SMatthew Dillon #include <vm/vm_kern.h> 84df8bae1dSRodney W. Grimes 85d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_ 86df8bae1dSRodney W. Grimes struct sbrk_args { 87df8bae1dSRodney W. Grimes int incr; 88df8bae1dSRodney W. Grimes }; 89d2d3e875SBruce Evans #endif 900d94caffSDavid Greenman 911f6889a1SMatthew Dillon static int max_proc_mmap; 921f6889a1SMatthew Dillon SYSCTL_INT(_vm, OID_AUTO, max_proc_mmap, CTLFLAG_RW, &max_proc_mmap, 0, ""); 931f6889a1SMatthew Dillon 941f6889a1SMatthew Dillon /* 951f6889a1SMatthew Dillon * Set the maximum number of vm_map_entry structures per process. Roughly 961f6889a1SMatthew Dillon * speaking vm_map_entry structures are tiny, so allowing them to eat 1/100 971f6889a1SMatthew Dillon * of our KVM malloc space still results in generous limits. We want a 981f6889a1SMatthew Dillon * default that is good enough to prevent the kernel running out of resources 991f6889a1SMatthew Dillon * if attacked from compromised user account but generous enough such that 1001f6889a1SMatthew Dillon * multi-threaded processes are not unduly inconvenienced. 1011f6889a1SMatthew Dillon */ 10211caded3SAlfred Perlstein static void vmmapentry_rsrc_init(void *); 1031f6889a1SMatthew Dillon SYSINIT(vmmersrc, SI_SUB_KVM_RSRC, SI_ORDER_FIRST, vmmapentry_rsrc_init, NULL) 1041f6889a1SMatthew Dillon 1051f6889a1SMatthew Dillon static void 1061f6889a1SMatthew Dillon vmmapentry_rsrc_init(dummy) 1071f6889a1SMatthew Dillon void *dummy; 1081f6889a1SMatthew Dillon { 1091f6889a1SMatthew Dillon max_proc_mmap = vm_kmem_size / sizeof(struct vm_map_entry); 1101f6889a1SMatthew Dillon max_proc_mmap /= 100; 1111f6889a1SMatthew Dillon } 1121f6889a1SMatthew Dillon 113d2c60af8SMatthew Dillon /* 114d2c60af8SMatthew Dillon * MPSAFE 115d2c60af8SMatthew Dillon */ 116df8bae1dSRodney W. Grimes /* ARGSUSED */ 117df8bae1dSRodney W. Grimes int 118b40ce416SJulian Elischer sbrk(td, uap) 119b40ce416SJulian Elischer struct thread *td; 120df8bae1dSRodney W. Grimes struct sbrk_args *uap; 121df8bae1dSRodney W. Grimes { 122df8bae1dSRodney W. Grimes /* Not yet implemented */ 1230cddd8f0SMatthew Dillon /* mtx_lock(&Giant); */ 1240cddd8f0SMatthew Dillon /* mtx_unlock(&Giant); */ 125df8bae1dSRodney W. Grimes return (EOPNOTSUPP); 126df8bae1dSRodney W. Grimes } 127df8bae1dSRodney W. Grimes 128d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_ 129df8bae1dSRodney W. Grimes struct sstk_args { 130df8bae1dSRodney W. Grimes int incr; 131df8bae1dSRodney W. Grimes }; 132d2d3e875SBruce Evans #endif 1330d94caffSDavid Greenman 134d2c60af8SMatthew Dillon /* 135d2c60af8SMatthew Dillon * MPSAFE 136d2c60af8SMatthew Dillon */ 137df8bae1dSRodney W. Grimes /* ARGSUSED */ 138df8bae1dSRodney W. Grimes int 139b40ce416SJulian Elischer sstk(td, uap) 140b40ce416SJulian Elischer struct thread *td; 141df8bae1dSRodney W. Grimes struct sstk_args *uap; 142df8bae1dSRodney W. Grimes { 143df8bae1dSRodney W. Grimes /* Not yet implemented */ 1440cddd8f0SMatthew Dillon /* mtx_lock(&Giant); */ 1450cddd8f0SMatthew Dillon /* mtx_unlock(&Giant); */ 146df8bae1dSRodney W. Grimes return (EOPNOTSUPP); 147df8bae1dSRodney W. Grimes } 148df8bae1dSRodney W. Grimes 149df8bae1dSRodney W. Grimes #if defined(COMPAT_43) || defined(COMPAT_SUNOS) 150d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_ 151df8bae1dSRodney W. Grimes struct getpagesize_args { 152df8bae1dSRodney W. Grimes int dummy; 153df8bae1dSRodney W. Grimes }; 154d2d3e875SBruce Evans #endif 1550d94caffSDavid Greenman 156df8bae1dSRodney W. Grimes /* ARGSUSED */ 157df8bae1dSRodney W. Grimes int 158b40ce416SJulian Elischer ogetpagesize(td, uap) 159b40ce416SJulian Elischer struct thread *td; 160df8bae1dSRodney W. Grimes struct getpagesize_args *uap; 161df8bae1dSRodney W. Grimes { 1620cddd8f0SMatthew Dillon /* MP SAFE */ 163b40ce416SJulian Elischer td->td_retval[0] = PAGE_SIZE; 164df8bae1dSRodney W. Grimes return (0); 165df8bae1dSRodney W. Grimes } 166df8bae1dSRodney W. Grimes #endif /* COMPAT_43 || COMPAT_SUNOS */ 167df8bae1dSRodney W. Grimes 16854f42e4bSPeter Wemm 16954f42e4bSPeter Wemm /* 17054f42e4bSPeter Wemm * Memory Map (mmap) system call. Note that the file offset 17154f42e4bSPeter Wemm * and address are allowed to be NOT page aligned, though if 17254f42e4bSPeter Wemm * the MAP_FIXED flag it set, both must have the same remainder 17354f42e4bSPeter Wemm * modulo the PAGE_SIZE (POSIX 1003.1b). If the address is not 17454f42e4bSPeter Wemm * page-aligned, the actual mapping starts at trunc_page(addr) 17554f42e4bSPeter Wemm * and the return value is adjusted up by the page offset. 176b4309055SMatthew Dillon * 177b4309055SMatthew Dillon * Generally speaking, only character devices which are themselves 178b4309055SMatthew Dillon * memory-based, such as a video framebuffer, can be mmap'd. Otherwise 179b4309055SMatthew Dillon * there would be no cache coherency between a descriptor and a VM mapping 180b4309055SMatthew Dillon * both to the same character device. 181b4309055SMatthew Dillon * 182b4309055SMatthew Dillon * Block devices can be mmap'd no matter what they represent. Cache coherency 183b4309055SMatthew Dillon * is maintained as long as you do not write directly to the underlying 184b4309055SMatthew Dillon * character device. 18554f42e4bSPeter Wemm */ 186d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_ 187df8bae1dSRodney W. Grimes struct mmap_args { 188651bb817SAlexander Langer void *addr; 189df8bae1dSRodney W. Grimes size_t len; 190df8bae1dSRodney W. Grimes int prot; 191df8bae1dSRodney W. Grimes int flags; 192df8bae1dSRodney W. Grimes int fd; 193df8bae1dSRodney W. Grimes long pad; 194df8bae1dSRodney W. Grimes off_t pos; 195df8bae1dSRodney W. Grimes }; 196d2d3e875SBruce Evans #endif 197df8bae1dSRodney W. Grimes 198d2c60af8SMatthew Dillon /* 199d2c60af8SMatthew Dillon * MPSAFE 200d2c60af8SMatthew Dillon */ 201df8bae1dSRodney W. Grimes int 202b40ce416SJulian Elischer mmap(td, uap) 203b40ce416SJulian Elischer struct thread *td; 20454d92145SMatthew Dillon struct mmap_args *uap; 205df8bae1dSRodney W. Grimes { 20654d92145SMatthew Dillon struct file *fp = NULL; 207df8bae1dSRodney W. Grimes struct vnode *vp; 208df8bae1dSRodney W. Grimes vm_offset_t addr; 2099154ee6aSPeter Wemm vm_size_t size, pageoff; 210df8bae1dSRodney W. Grimes vm_prot_t prot, maxprot; 211651bb817SAlexander Langer void *handle; 212df8bae1dSRodney W. Grimes int flags, error; 213c8bdd56bSGuido van Rooij int disablexworkaround; 21454f42e4bSPeter Wemm off_t pos; 215b40ce416SJulian Elischer struct vmspace *vms = td->td_proc->p_vmspace; 2169ff5ce6bSBoris Popov vm_object_t obj; 217df8bae1dSRodney W. Grimes 21854f42e4bSPeter Wemm addr = (vm_offset_t) uap->addr; 21954f42e4bSPeter Wemm size = uap->len; 220df8bae1dSRodney W. Grimes prot = uap->prot & VM_PROT_ALL; 221df8bae1dSRodney W. Grimes flags = uap->flags; 22254f42e4bSPeter Wemm pos = uap->pos; 22354f42e4bSPeter Wemm 224f6b5b182SJeff Roberson vp = NULL; 225426da3bcSAlfred Perlstein fp = NULL; 22654f42e4bSPeter Wemm /* make sure mapping fits into numeric range etc */ 227fc565456SDmitrij Tejblum if ((ssize_t) uap->len < 0 || 22854f42e4bSPeter Wemm ((flags & MAP_ANON) && uap->fd != -1)) 229df8bae1dSRodney W. Grimes return (EINVAL); 2309154ee6aSPeter Wemm 2312267af78SJulian Elischer if (flags & MAP_STACK) { 2322267af78SJulian Elischer if ((uap->fd != -1) || 2332267af78SJulian Elischer ((prot & (PROT_READ | PROT_WRITE)) != (PROT_READ | PROT_WRITE))) 2342267af78SJulian Elischer return (EINVAL); 2352267af78SJulian Elischer flags |= MAP_ANON; 2362267af78SJulian Elischer pos = 0; 2372907af2aSJulian Elischer } 2382907af2aSJulian Elischer 2399154ee6aSPeter Wemm /* 24054f42e4bSPeter Wemm * Align the file position to a page boundary, 24154f42e4bSPeter Wemm * and save its page offset component. 2429154ee6aSPeter Wemm */ 24354f42e4bSPeter Wemm pageoff = (pos & PAGE_MASK); 24454f42e4bSPeter Wemm pos -= pageoff; 24554f42e4bSPeter Wemm 24654f42e4bSPeter Wemm /* Adjust size for rounding (on both ends). */ 24754f42e4bSPeter Wemm size += pageoff; /* low end... */ 24854f42e4bSPeter Wemm size = (vm_size_t) round_page(size); /* hi end */ 2499154ee6aSPeter Wemm 250df8bae1dSRodney W. Grimes /* 2510d94caffSDavid Greenman * Check for illegal addresses. Watch out for address wrap... Note 2520d94caffSDavid Greenman * that VM_*_ADDRESS are not constants due to casts (argh). 253df8bae1dSRodney W. Grimes */ 254df8bae1dSRodney W. Grimes if (flags & MAP_FIXED) { 25554f42e4bSPeter Wemm /* 25654f42e4bSPeter Wemm * The specified address must have the same remainder 25754f42e4bSPeter Wemm * as the file offset taken modulo PAGE_SIZE, so it 25854f42e4bSPeter Wemm * should be aligned after adjustment by pageoff. 25954f42e4bSPeter Wemm */ 26054f42e4bSPeter Wemm addr -= pageoff; 26154f42e4bSPeter Wemm if (addr & PAGE_MASK) 26254f42e4bSPeter Wemm return (EINVAL); 26354f42e4bSPeter Wemm /* Address range must be all in user VM space. */ 26405ba50f5SJake Burkholder if (addr < vm_map_min(&vms->vm_map) || 26505ba50f5SJake Burkholder addr + size > vm_map_max(&vms->vm_map)) 266df8bae1dSRodney W. Grimes return (EINVAL); 267bbc0ec52SDavid Greenman if (addr + size < addr) 268df8bae1dSRodney W. Grimes return (EINVAL); 269df8bae1dSRodney W. Grimes } 270df8bae1dSRodney W. Grimes /* 27154f42e4bSPeter Wemm * XXX for non-fixed mappings where no hint is provided or 27254f42e4bSPeter Wemm * the hint would fall in the potential heap space, 27354f42e4bSPeter Wemm * place it after the end of the largest possible heap. 274df8bae1dSRodney W. Grimes * 27554f42e4bSPeter Wemm * There should really be a pmap call to determine a reasonable 27654f42e4bSPeter Wemm * location. 277df8bae1dSRodney W. Grimes */ 278d28ab90fSLuoqi Chen else if (addr == 0 || 2791f6889a1SMatthew Dillon (addr >= round_page((vm_offset_t)vms->vm_taddr) && 280cbc89bfbSPaul Saab addr < round_page((vm_offset_t)vms->vm_daddr + maxdsiz))) 281cbc89bfbSPaul Saab addr = round_page((vm_offset_t)vms->vm_daddr + maxdsiz); 28254f42e4bSPeter Wemm 2830cddd8f0SMatthew Dillon mtx_lock(&Giant); /* syscall marked mp-safe but isn't */ 284df8bae1dSRodney W. Grimes if (flags & MAP_ANON) { 285df8bae1dSRodney W. Grimes /* 286df8bae1dSRodney W. Grimes * Mapping blank space is trivial. 287df8bae1dSRodney W. Grimes */ 288df8bae1dSRodney W. Grimes handle = NULL; 289df8bae1dSRodney W. Grimes maxprot = VM_PROT_ALL; 29054f42e4bSPeter Wemm pos = 0; 291df8bae1dSRodney W. Grimes } else { 292df8bae1dSRodney W. Grimes /* 2930d94caffSDavid Greenman * Mapping file, get fp for validation. Obtain vnode and make 2940d94caffSDavid Greenman * sure it is of appropriate type. 295426da3bcSAlfred Perlstein * don't let the descriptor disappear on us if we block 296df8bae1dSRodney W. Grimes */ 297a4db4953SAlfred Perlstein if ((error = fget(td, uap->fd, &fp)) != 0) 298426da3bcSAlfred Perlstein goto done; 299e4ca250dSJohn Baldwin if (fp->f_type != DTYPE_VNODE) { 300d2c60af8SMatthew Dillon error = EINVAL; 301426da3bcSAlfred Perlstein goto done; 302e4ca250dSJohn Baldwin } 303279d7226SMatthew Dillon 304279d7226SMatthew Dillon /* 305aa543039SGarrett Wollman * POSIX shared-memory objects are defined to have 306aa543039SGarrett Wollman * kernel persistence, and are not defined to support 307aa543039SGarrett Wollman * read(2)/write(2) -- or even open(2). Thus, we can 308aa543039SGarrett Wollman * use MAP_ASYNC to trade on-disk coherence for speed. 309aa543039SGarrett Wollman * The shm_open(3) library routine turns on the FPOSIXSHM 310aa543039SGarrett Wollman * flag to request this behavior. 311aa543039SGarrett Wollman */ 312aa543039SGarrett Wollman if (fp->f_flag & FPOSIXSHM) 313aa543039SGarrett Wollman flags |= MAP_NOSYNC; 31448e3128bSMatthew Dillon vp = fp->f_data; 315f6b5b182SJeff Roberson error = vget(vp, LK_EXCLUSIVE, td); 316f6b5b182SJeff Roberson if (error) 317f6b5b182SJeff Roberson goto done; 318e4ca250dSJohn Baldwin if (vp->v_type != VREG && vp->v_type != VCHR) { 319e4ca250dSJohn Baldwin error = EINVAL; 320e4ca250dSJohn Baldwin goto done; 321e4ca250dSJohn Baldwin } 3229ff5ce6bSBoris Popov if (vp->v_type == VREG) { 3239ff5ce6bSBoris Popov /* 3249ff5ce6bSBoris Popov * Get the proper underlying object 3259ff5ce6bSBoris Popov */ 3260cddd8f0SMatthew Dillon if (VOP_GETVOBJECT(vp, &obj) != 0) { 3270cddd8f0SMatthew Dillon error = EINVAL; 3280cddd8f0SMatthew Dillon goto done; 3290cddd8f0SMatthew Dillon } 330f6b5b182SJeff Roberson if (obj->handle != vp) { 331f6b5b182SJeff Roberson vput(vp); 3329ff5ce6bSBoris Popov vp = (struct vnode*)obj->handle; 333f6b5b182SJeff Roberson vget(vp, LK_EXCLUSIVE, td); 334f6b5b182SJeff Roberson } 3359ff5ce6bSBoris Popov } 336df8bae1dSRodney W. Grimes /* 3370d94caffSDavid Greenman * XXX hack to handle use of /dev/zero to map anon memory (ala 3380d94caffSDavid Greenman * SunOS). 339df8bae1dSRodney W. Grimes */ 3402589f249SMark Murray if ((vp->v_type == VCHR) && 3412589f249SMark Murray (vp->v_rdev->si_devsw->d_flags & D_MMAP_ANON)) { 342df8bae1dSRodney W. Grimes handle = NULL; 343df8bae1dSRodney W. Grimes maxprot = VM_PROT_ALL; 344df8bae1dSRodney W. Grimes flags |= MAP_ANON; 34554f42e4bSPeter Wemm pos = 0; 346df8bae1dSRodney W. Grimes } else { 347df8bae1dSRodney W. Grimes /* 348c8bdd56bSGuido van Rooij * cdevs does not provide private mappings of any kind. 349c8bdd56bSGuido van Rooij */ 350c8bdd56bSGuido van Rooij /* 351c8bdd56bSGuido van Rooij * However, for XIG X server to continue to work, 352c8bdd56bSGuido van Rooij * we should allow the superuser to do it anyway. 353c8bdd56bSGuido van Rooij * We only allow it at securelevel < 1. 354c8bdd56bSGuido van Rooij * (Because the XIG X server writes directly to video 355c8bdd56bSGuido van Rooij * memory via /dev/mem, it should never work at any 356c8bdd56bSGuido van Rooij * other securelevel. 357c8bdd56bSGuido van Rooij * XXX this will have to go 358c8bdd56bSGuido van Rooij */ 359a854ed98SJohn Baldwin if (securelevel_ge(td->td_ucred, 1)) 360c8bdd56bSGuido van Rooij disablexworkaround = 1; 361c8bdd56bSGuido van Rooij else 36244731cabSJohn Baldwin disablexworkaround = suser(td); 363c8bdd56bSGuido van Rooij if (vp->v_type == VCHR && disablexworkaround && 364279d7226SMatthew Dillon (flags & (MAP_PRIVATE|MAP_COPY))) { 365279d7226SMatthew Dillon error = EINVAL; 366279d7226SMatthew Dillon goto done; 367279d7226SMatthew Dillon } 368c8bdd56bSGuido van Rooij /* 369df8bae1dSRodney W. Grimes * Ensure that file and memory protections are 370df8bae1dSRodney W. Grimes * compatible. Note that we only worry about 371df8bae1dSRodney W. Grimes * writability if mapping is shared; in this case, 372df8bae1dSRodney W. Grimes * current and max prot are dictated by the open file. 373df8bae1dSRodney W. Grimes * XXX use the vnode instead? Problem is: what 3740d94caffSDavid Greenman * credentials do we use for determination? What if 3750d94caffSDavid Greenman * proc does a setuid? 376df8bae1dSRodney W. Grimes */ 377df8bae1dSRodney W. Grimes maxprot = VM_PROT_EXECUTE; /* ??? */ 378279d7226SMatthew Dillon if (fp->f_flag & FREAD) { 379df8bae1dSRodney W. Grimes maxprot |= VM_PROT_READ; 380279d7226SMatthew Dillon } else if (prot & PROT_READ) { 381279d7226SMatthew Dillon error = EACCES; 382279d7226SMatthew Dillon goto done; 383279d7226SMatthew Dillon } 384c8bdd56bSGuido van Rooij /* 385c8bdd56bSGuido van Rooij * If we are sharing potential changes (either via 386c8bdd56bSGuido van Rooij * MAP_SHARED or via the implicit sharing of character 387c8bdd56bSGuido van Rooij * device mappings), and we are trying to get write 388c8bdd56bSGuido van Rooij * permission although we opened it without asking 389c8bdd56bSGuido van Rooij * for it, bail out. Check for superuser, only if 390c8bdd56bSGuido van Rooij * we're at securelevel < 1, to allow the XIG X server 391c8bdd56bSGuido van Rooij * to continue to work. 392c8bdd56bSGuido van Rooij */ 39305feb99fSGuido van Rooij if ((flags & MAP_SHARED) != 0 || 39405feb99fSGuido van Rooij (vp->v_type == VCHR && disablexworkaround)) { 39505feb99fSGuido van Rooij if ((fp->f_flag & FWRITE) != 0) { 3964183b6b6SPeter Wemm struct vattr va; 39705feb99fSGuido van Rooij if ((error = 39805feb99fSGuido van Rooij VOP_GETATTR(vp, &va, 399a854ed98SJohn Baldwin td->td_ucred, td))) { 400279d7226SMatthew Dillon goto done; 401279d7226SMatthew Dillon } 40205feb99fSGuido van Rooij if ((va.va_flags & 403279d7226SMatthew Dillon (SF_SNAPSHOT|IMMUTABLE|APPEND)) == 0) { 404df8bae1dSRodney W. Grimes maxprot |= VM_PROT_WRITE; 405279d7226SMatthew Dillon } else if (prot & PROT_WRITE) { 406279d7226SMatthew Dillon error = EPERM; 407279d7226SMatthew Dillon goto done; 408279d7226SMatthew Dillon } 409279d7226SMatthew Dillon } else if ((prot & PROT_WRITE) != 0) { 410279d7226SMatthew Dillon error = EACCES; 411279d7226SMatthew Dillon goto done; 412279d7226SMatthew Dillon } 413279d7226SMatthew Dillon } else { 41405feb99fSGuido van Rooij maxprot |= VM_PROT_WRITE; 415279d7226SMatthew Dillon } 41605feb99fSGuido van Rooij 417651bb817SAlexander Langer handle = (void *)vp; 418df8bae1dSRodney W. Grimes } 419df8bae1dSRodney W. Grimes } 4201f6889a1SMatthew Dillon 4211f6889a1SMatthew Dillon /* 4221f6889a1SMatthew Dillon * Do not allow more then a certain number of vm_map_entry structures 4231f6889a1SMatthew Dillon * per process. Scale with the number of rforks sharing the map 4241f6889a1SMatthew Dillon * to make the limit reasonable for threads. 4251f6889a1SMatthew Dillon */ 4261f6889a1SMatthew Dillon if (max_proc_mmap && 4271f6889a1SMatthew Dillon vms->vm_map.nentries >= max_proc_mmap * vms->vm_refcnt) { 428279d7226SMatthew Dillon error = ENOMEM; 429279d7226SMatthew Dillon goto done; 4301f6889a1SMatthew Dillon } 4311f6889a1SMatthew Dillon 432e4ca250dSJohn Baldwin mtx_unlock(&Giant); 4333e732e7dSRobert Watson error = 0; 4343e732e7dSRobert Watson #ifdef MAC 4353e732e7dSRobert Watson if (handle != NULL && (flags & MAP_SHARED) != 0) { 4363e732e7dSRobert Watson error = mac_check_vnode_mmap(td->td_ucred, 4373e732e7dSRobert Watson (struct vnode *)handle, prot); 4383e732e7dSRobert Watson } 4393e732e7dSRobert Watson #endif 4403e732e7dSRobert Watson if (error == 0) 4411f6889a1SMatthew Dillon error = vm_mmap(&vms->vm_map, &addr, size, prot, maxprot, 44254f42e4bSPeter Wemm flags, handle, pos); 443f6b5b182SJeff Roberson mtx_lock(&Giant); 444df8bae1dSRodney W. Grimes if (error == 0) 445b40ce416SJulian Elischer td->td_retval[0] = (register_t) (addr + pageoff); 446279d7226SMatthew Dillon done: 447f6b5b182SJeff Roberson if (vp) 448f6b5b182SJeff Roberson vput(vp); 4492cd301d1SAlan Cox mtx_unlock(&Giant); 450279d7226SMatthew Dillon if (fp) 451b40ce416SJulian Elischer fdrop(fp, td); 452f6b5b182SJeff Roberson 453df8bae1dSRodney W. Grimes return (error); 454df8bae1dSRodney W. Grimes } 455df8bae1dSRodney W. Grimes 45605f0fdd2SPoul-Henning Kamp #ifdef COMPAT_43 457d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_ 45805f0fdd2SPoul-Henning Kamp struct ommap_args { 45905f0fdd2SPoul-Henning Kamp caddr_t addr; 46005f0fdd2SPoul-Henning Kamp int len; 46105f0fdd2SPoul-Henning Kamp int prot; 46205f0fdd2SPoul-Henning Kamp int flags; 46305f0fdd2SPoul-Henning Kamp int fd; 46405f0fdd2SPoul-Henning Kamp long pos; 46505f0fdd2SPoul-Henning Kamp }; 466d2d3e875SBruce Evans #endif 46705f0fdd2SPoul-Henning Kamp int 468b40ce416SJulian Elischer ommap(td, uap) 469b40ce416SJulian Elischer struct thread *td; 47054d92145SMatthew Dillon struct ommap_args *uap; 47105f0fdd2SPoul-Henning Kamp { 47205f0fdd2SPoul-Henning Kamp struct mmap_args nargs; 47305f0fdd2SPoul-Henning Kamp static const char cvtbsdprot[8] = { 47405f0fdd2SPoul-Henning Kamp 0, 47505f0fdd2SPoul-Henning Kamp PROT_EXEC, 47605f0fdd2SPoul-Henning Kamp PROT_WRITE, 47705f0fdd2SPoul-Henning Kamp PROT_EXEC | PROT_WRITE, 47805f0fdd2SPoul-Henning Kamp PROT_READ, 47905f0fdd2SPoul-Henning Kamp PROT_EXEC | PROT_READ, 48005f0fdd2SPoul-Henning Kamp PROT_WRITE | PROT_READ, 48105f0fdd2SPoul-Henning Kamp PROT_EXEC | PROT_WRITE | PROT_READ, 48205f0fdd2SPoul-Henning Kamp }; 4830d94caffSDavid Greenman 48405f0fdd2SPoul-Henning Kamp #define OMAP_ANON 0x0002 48505f0fdd2SPoul-Henning Kamp #define OMAP_COPY 0x0020 48605f0fdd2SPoul-Henning Kamp #define OMAP_SHARED 0x0010 48705f0fdd2SPoul-Henning Kamp #define OMAP_FIXED 0x0100 48805f0fdd2SPoul-Henning Kamp 48905f0fdd2SPoul-Henning Kamp nargs.addr = uap->addr; 49005f0fdd2SPoul-Henning Kamp nargs.len = uap->len; 49105f0fdd2SPoul-Henning Kamp nargs.prot = cvtbsdprot[uap->prot & 0x7]; 49205f0fdd2SPoul-Henning Kamp nargs.flags = 0; 49305f0fdd2SPoul-Henning Kamp if (uap->flags & OMAP_ANON) 49405f0fdd2SPoul-Henning Kamp nargs.flags |= MAP_ANON; 49505f0fdd2SPoul-Henning Kamp if (uap->flags & OMAP_COPY) 49605f0fdd2SPoul-Henning Kamp nargs.flags |= MAP_COPY; 49705f0fdd2SPoul-Henning Kamp if (uap->flags & OMAP_SHARED) 49805f0fdd2SPoul-Henning Kamp nargs.flags |= MAP_SHARED; 49905f0fdd2SPoul-Henning Kamp else 50005f0fdd2SPoul-Henning Kamp nargs.flags |= MAP_PRIVATE; 50105f0fdd2SPoul-Henning Kamp if (uap->flags & OMAP_FIXED) 50205f0fdd2SPoul-Henning Kamp nargs.flags |= MAP_FIXED; 50305f0fdd2SPoul-Henning Kamp nargs.fd = uap->fd; 50405f0fdd2SPoul-Henning Kamp nargs.pos = uap->pos; 505b40ce416SJulian Elischer return (mmap(td, &nargs)); 50605f0fdd2SPoul-Henning Kamp } 50705f0fdd2SPoul-Henning Kamp #endif /* COMPAT_43 */ 50805f0fdd2SPoul-Henning Kamp 50905f0fdd2SPoul-Henning Kamp 510d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_ 511df8bae1dSRodney W. Grimes struct msync_args { 512651bb817SAlexander Langer void *addr; 513df8bae1dSRodney W. Grimes int len; 514e6c6af11SDavid Greenman int flags; 515df8bae1dSRodney W. Grimes }; 516d2d3e875SBruce Evans #endif 517d2c60af8SMatthew Dillon /* 518d2c60af8SMatthew Dillon * MPSAFE 519d2c60af8SMatthew Dillon */ 520df8bae1dSRodney W. Grimes int 521b40ce416SJulian Elischer msync(td, uap) 522b40ce416SJulian Elischer struct thread *td; 523df8bae1dSRodney W. Grimes struct msync_args *uap; 524df8bae1dSRodney W. Grimes { 525df8bae1dSRodney W. Grimes vm_offset_t addr; 526dabee6feSPeter Wemm vm_size_t size, pageoff; 527e6c6af11SDavid Greenman int flags; 528df8bae1dSRodney W. Grimes vm_map_t map; 529df8bae1dSRodney W. Grimes int rv; 530df8bae1dSRodney W. Grimes 531df8bae1dSRodney W. Grimes addr = (vm_offset_t) uap->addr; 5329154ee6aSPeter Wemm size = uap->len; 533e6c6af11SDavid Greenman flags = uap->flags; 534e6c6af11SDavid Greenman 535dabee6feSPeter Wemm pageoff = (addr & PAGE_MASK); 536dabee6feSPeter Wemm addr -= pageoff; 537dabee6feSPeter Wemm size += pageoff; 538dabee6feSPeter Wemm size = (vm_size_t) round_page(size); 5399154ee6aSPeter Wemm if (addr + size < addr) 540dabee6feSPeter Wemm return (EINVAL); 541dabee6feSPeter Wemm 542dabee6feSPeter Wemm if ((flags & (MS_ASYNC|MS_INVALIDATE)) == (MS_ASYNC|MS_INVALIDATE)) 5431e62bc63SDavid Greenman return (EINVAL); 5441e62bc63SDavid Greenman 5450cddd8f0SMatthew Dillon mtx_lock(&Giant); 5460cddd8f0SMatthew Dillon 547b40ce416SJulian Elischer map = &td->td_proc->p_vmspace->vm_map; 5489154ee6aSPeter Wemm 549df8bae1dSRodney W. Grimes /* 550df8bae1dSRodney W. Grimes * XXX Gak! If size is zero we are supposed to sync "all modified 5510d94caffSDavid Greenman * pages with the region containing addr". Unfortunately, we don't 5520d94caffSDavid Greenman * really keep track of individual mmaps so we approximate by flushing 5530d94caffSDavid Greenman * the range of the map entry containing addr. This can be incorrect 5540d94caffSDavid Greenman * if the region splits or is coalesced with a neighbor. 555df8bae1dSRodney W. Grimes */ 556df8bae1dSRodney W. Grimes if (size == 0) { 557df8bae1dSRodney W. Grimes vm_map_entry_t entry; 558df8bae1dSRodney W. Grimes 559df8bae1dSRodney W. Grimes vm_map_lock_read(map); 560df8bae1dSRodney W. Grimes rv = vm_map_lookup_entry(map, addr, &entry); 561df8bae1dSRodney W. Grimes vm_map_unlock_read(map); 56223955314SAlfred Perlstein if (rv == FALSE) { 563d2c60af8SMatthew Dillon rv = -1; 564d2c60af8SMatthew Dillon goto done2; 56523955314SAlfred Perlstein } 566df8bae1dSRodney W. Grimes addr = entry->start; 567df8bae1dSRodney W. Grimes size = entry->end - entry->start; 568df8bae1dSRodney W. Grimes } 569e6c6af11SDavid Greenman 570df8bae1dSRodney W. Grimes /* 571df8bae1dSRodney W. Grimes * Clean the pages and interpret the return value. 572df8bae1dSRodney W. Grimes */ 5736c534ad8SDavid Greenman rv = vm_map_clean(map, addr, addr + size, (flags & MS_ASYNC) == 0, 574e6c6af11SDavid Greenman (flags & MS_INVALIDATE) != 0); 575e6c6af11SDavid Greenman 576d2c60af8SMatthew Dillon done2: 577190609ddSJohn Baldwin mtx_unlock(&Giant); 5780cddd8f0SMatthew Dillon 579df8bae1dSRodney W. Grimes switch (rv) { 580df8bae1dSRodney W. Grimes case KERN_SUCCESS: 581d2c60af8SMatthew Dillon return (0); 582df8bae1dSRodney W. Grimes case KERN_INVALID_ADDRESS: 583df8bae1dSRodney W. Grimes return (EINVAL); /* Sun returns ENOMEM? */ 584df8bae1dSRodney W. Grimes case KERN_FAILURE: 585df8bae1dSRodney W. Grimes return (EIO); 586df8bae1dSRodney W. Grimes default: 587df8bae1dSRodney W. Grimes return (EINVAL); 588df8bae1dSRodney W. Grimes } 589df8bae1dSRodney W. Grimes } 590df8bae1dSRodney W. Grimes 591d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_ 592df8bae1dSRodney W. Grimes struct munmap_args { 593651bb817SAlexander Langer void *addr; 5949154ee6aSPeter Wemm size_t len; 595df8bae1dSRodney W. Grimes }; 596d2d3e875SBruce Evans #endif 597d2c60af8SMatthew Dillon /* 598d2c60af8SMatthew Dillon * MPSAFE 599d2c60af8SMatthew Dillon */ 600df8bae1dSRodney W. Grimes int 601b40ce416SJulian Elischer munmap(td, uap) 602b40ce416SJulian Elischer struct thread *td; 60354d92145SMatthew Dillon struct munmap_args *uap; 604df8bae1dSRodney W. Grimes { 605df8bae1dSRodney W. Grimes vm_offset_t addr; 606dabee6feSPeter Wemm vm_size_t size, pageoff; 607df8bae1dSRodney W. Grimes vm_map_t map; 608df8bae1dSRodney W. Grimes 609df8bae1dSRodney W. Grimes addr = (vm_offset_t) uap->addr; 6109154ee6aSPeter Wemm size = uap->len; 611dabee6feSPeter Wemm 612dabee6feSPeter Wemm pageoff = (addr & PAGE_MASK); 613dabee6feSPeter Wemm addr -= pageoff; 614dabee6feSPeter Wemm size += pageoff; 615dabee6feSPeter Wemm size = (vm_size_t) round_page(size); 6169154ee6aSPeter Wemm if (addr + size < addr) 617df8bae1dSRodney W. Grimes return (EINVAL); 6189154ee6aSPeter Wemm 619df8bae1dSRodney W. Grimes if (size == 0) 620df8bae1dSRodney W. Grimes return (0); 621dabee6feSPeter Wemm 622df8bae1dSRodney W. Grimes /* 62305ba50f5SJake Burkholder * Check for illegal addresses. Watch out for address wrap... 624df8bae1dSRodney W. Grimes */ 625b40ce416SJulian Elischer map = &td->td_proc->p_vmspace->vm_map; 62605ba50f5SJake Burkholder if (addr < vm_map_min(map) || addr + size > vm_map_max(map)) 62705ba50f5SJake Burkholder return (EINVAL); 628df8bae1dSRodney W. Grimes /* 629df8bae1dSRodney W. Grimes * Make sure entire range is allocated. 630df8bae1dSRodney W. Grimes */ 6318c5c5d04SAlan Cox if (!vm_map_check_protection(map, addr, addr + size, VM_PROT_NONE)) 632df8bae1dSRodney W. Grimes return (EINVAL); 6338c5c5d04SAlan Cox 634df8bae1dSRodney W. Grimes /* returns nothing but KERN_SUCCESS anyway */ 635df8bae1dSRodney W. Grimes (void) vm_map_remove(map, addr, addr + size); 636df8bae1dSRodney W. Grimes return (0); 637df8bae1dSRodney W. Grimes } 638df8bae1dSRodney W. Grimes 639279d7226SMatthew Dillon #if 0 640df8bae1dSRodney W. Grimes void 641b40ce416SJulian Elischer munmapfd(td, fd) 642b40ce416SJulian Elischer struct thread *td; 643df8bae1dSRodney W. Grimes int fd; 644df8bae1dSRodney W. Grimes { 645df8bae1dSRodney W. Grimes /* 646c4ed5a07SDavid Greenman * XXX should unmap any regions mapped to this file 647df8bae1dSRodney W. Grimes */ 648426da3bcSAlfred Perlstein FILEDESC_LOCK(p->p_fd); 649b40ce416SJulian Elischer td->td_proc->p_fd->fd_ofileflags[fd] &= ~UF_MAPPED; 650426da3bcSAlfred Perlstein FILEDESC_UNLOCK(p->p_fd); 651df8bae1dSRodney W. Grimes } 652279d7226SMatthew Dillon #endif 653df8bae1dSRodney W. Grimes 654d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_ 655df8bae1dSRodney W. Grimes struct mprotect_args { 656651bb817SAlexander Langer const void *addr; 6579154ee6aSPeter Wemm size_t len; 658df8bae1dSRodney W. Grimes int prot; 659df8bae1dSRodney W. Grimes }; 660d2d3e875SBruce Evans #endif 661d2c60af8SMatthew Dillon /* 662d2c60af8SMatthew Dillon * MPSAFE 663d2c60af8SMatthew Dillon */ 664df8bae1dSRodney W. Grimes int 665b40ce416SJulian Elischer mprotect(td, uap) 666b40ce416SJulian Elischer struct thread *td; 667df8bae1dSRodney W. Grimes struct mprotect_args *uap; 668df8bae1dSRodney W. Grimes { 669df8bae1dSRodney W. Grimes vm_offset_t addr; 670dabee6feSPeter Wemm vm_size_t size, pageoff; 67154d92145SMatthew Dillon vm_prot_t prot; 672df8bae1dSRodney W. Grimes 673df8bae1dSRodney W. Grimes addr = (vm_offset_t) uap->addr; 6749154ee6aSPeter Wemm size = uap->len; 675df8bae1dSRodney W. Grimes prot = uap->prot & VM_PROT_ALL; 676d0aea04fSJohn Dyson #if defined(VM_PROT_READ_IS_EXEC) 677d0aea04fSJohn Dyson if (prot & VM_PROT_READ) 678d0aea04fSJohn Dyson prot |= VM_PROT_EXECUTE; 679d0aea04fSJohn Dyson #endif 680df8bae1dSRodney W. Grimes 681dabee6feSPeter Wemm pageoff = (addr & PAGE_MASK); 682dabee6feSPeter Wemm addr -= pageoff; 683dabee6feSPeter Wemm size += pageoff; 684dabee6feSPeter Wemm size = (vm_size_t) round_page(size); 6859154ee6aSPeter Wemm if (addr + size < addr) 686dabee6feSPeter Wemm return (EINVAL); 687dabee6feSPeter Wemm 68843285049SAlan Cox switch (vm_map_protect(&td->td_proc->p_vmspace->vm_map, addr, 68943285049SAlan Cox addr + size, prot, FALSE)) { 690df8bae1dSRodney W. Grimes case KERN_SUCCESS: 691df8bae1dSRodney W. Grimes return (0); 692df8bae1dSRodney W. Grimes case KERN_PROTECTION_FAILURE: 693df8bae1dSRodney W. Grimes return (EACCES); 694df8bae1dSRodney W. Grimes } 695df8bae1dSRodney W. Grimes return (EINVAL); 696df8bae1dSRodney W. Grimes } 697df8bae1dSRodney W. Grimes 698d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_ 699dabee6feSPeter Wemm struct minherit_args { 700651bb817SAlexander Langer void *addr; 7019154ee6aSPeter Wemm size_t len; 702dabee6feSPeter Wemm int inherit; 703dabee6feSPeter Wemm }; 704dabee6feSPeter Wemm #endif 705d2c60af8SMatthew Dillon /* 706d2c60af8SMatthew Dillon * MPSAFE 707d2c60af8SMatthew Dillon */ 708dabee6feSPeter Wemm int 709b40ce416SJulian Elischer minherit(td, uap) 710b40ce416SJulian Elischer struct thread *td; 711dabee6feSPeter Wemm struct minherit_args *uap; 712dabee6feSPeter Wemm { 713dabee6feSPeter Wemm vm_offset_t addr; 714dabee6feSPeter Wemm vm_size_t size, pageoff; 71554d92145SMatthew Dillon vm_inherit_t inherit; 716dabee6feSPeter Wemm 717dabee6feSPeter Wemm addr = (vm_offset_t)uap->addr; 7189154ee6aSPeter Wemm size = uap->len; 719dabee6feSPeter Wemm inherit = uap->inherit; 720dabee6feSPeter Wemm 721dabee6feSPeter Wemm pageoff = (addr & PAGE_MASK); 722dabee6feSPeter Wemm addr -= pageoff; 723dabee6feSPeter Wemm size += pageoff; 724dabee6feSPeter Wemm size = (vm_size_t) round_page(size); 7259154ee6aSPeter Wemm if (addr + size < addr) 726dabee6feSPeter Wemm return (EINVAL); 727dabee6feSPeter Wemm 728e0be79afSAlan Cox switch (vm_map_inherit(&td->td_proc->p_vmspace->vm_map, addr, 729e0be79afSAlan Cox addr + size, inherit)) { 730dabee6feSPeter Wemm case KERN_SUCCESS: 731dabee6feSPeter Wemm return (0); 732dabee6feSPeter Wemm case KERN_PROTECTION_FAILURE: 733dabee6feSPeter Wemm return (EACCES); 734dabee6feSPeter Wemm } 735dabee6feSPeter Wemm return (EINVAL); 736dabee6feSPeter Wemm } 737dabee6feSPeter Wemm 738dabee6feSPeter Wemm #ifndef _SYS_SYSPROTO_H_ 739df8bae1dSRodney W. Grimes struct madvise_args { 740651bb817SAlexander Langer void *addr; 7419154ee6aSPeter Wemm size_t len; 742df8bae1dSRodney W. Grimes int behav; 743df8bae1dSRodney W. Grimes }; 744d2d3e875SBruce Evans #endif 7450d94caffSDavid Greenman 746d2c60af8SMatthew Dillon /* 747d2c60af8SMatthew Dillon * MPSAFE 748d2c60af8SMatthew Dillon */ 749df8bae1dSRodney W. Grimes /* ARGSUSED */ 750df8bae1dSRodney W. Grimes int 751b40ce416SJulian Elischer madvise(td, uap) 752b40ce416SJulian Elischer struct thread *td; 753df8bae1dSRodney W. Grimes struct madvise_args *uap; 754df8bae1dSRodney W. Grimes { 755f35329acSJohn Dyson vm_offset_t start, end; 75605ba50f5SJake Burkholder vm_map_t map; 757f4cf2141SWes Peters struct proc *p; 758f4cf2141SWes Peters int error; 759b4309055SMatthew Dillon 760b4309055SMatthew Dillon /* 761f4cf2141SWes Peters * Check for our special case, advising the swap pager we are 762f4cf2141SWes Peters * "immortal." 763f4cf2141SWes Peters */ 764f4cf2141SWes Peters if (uap->behav == MADV_PROTECT) { 76569297bf8SJohn Baldwin error = suser(td); 76669297bf8SJohn Baldwin if (error == 0) { 767f4cf2141SWes Peters p = td->td_proc; 768f4cf2141SWes Peters PROC_LOCK(p); 769f4cf2141SWes Peters p->p_flag |= P_PROTECTED; 770f4cf2141SWes Peters PROC_UNLOCK(p); 77169297bf8SJohn Baldwin } 772f4cf2141SWes Peters return (error); 773f4cf2141SWes Peters } 774f4cf2141SWes Peters /* 775b4309055SMatthew Dillon * Check for illegal behavior 776b4309055SMatthew Dillon */ 7779730a5daSPaul Saab if (uap->behav < 0 || uap->behav > MADV_CORE) 778b4309055SMatthew Dillon return (EINVAL); 779867a482dSJohn Dyson /* 780867a482dSJohn Dyson * Check for illegal addresses. Watch out for address wrap... Note 781867a482dSJohn Dyson * that VM_*_ADDRESS are not constants due to casts (argh). 782867a482dSJohn Dyson */ 78305ba50f5SJake Burkholder map = &td->td_proc->p_vmspace->vm_map; 78405ba50f5SJake Burkholder if ((vm_offset_t)uap->addr < vm_map_min(map) || 78505ba50f5SJake Burkholder (vm_offset_t)uap->addr + uap->len > vm_map_max(map)) 786867a482dSJohn Dyson return (EINVAL); 787867a482dSJohn Dyson if (((vm_offset_t) uap->addr + uap->len) < (vm_offset_t) uap->addr) 788867a482dSJohn Dyson return (EINVAL); 789867a482dSJohn Dyson 790867a482dSJohn Dyson /* 791867a482dSJohn Dyson * Since this routine is only advisory, we default to conservative 792867a482dSJohn Dyson * behavior. 793867a482dSJohn Dyson */ 794cd6eea25SDavid Greenman start = trunc_page((vm_offset_t) uap->addr); 795cd6eea25SDavid Greenman end = round_page((vm_offset_t) uap->addr + uap->len); 796867a482dSJohn Dyson 79705ba50f5SJake Burkholder if (vm_map_madvise(map, start, end, uap->behav)) 798094f6d26SAlan Cox return (EINVAL); 799094f6d26SAlan Cox return (0); 800df8bae1dSRodney W. Grimes } 801df8bae1dSRodney W. Grimes 802d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_ 803df8bae1dSRodney W. Grimes struct mincore_args { 804651bb817SAlexander Langer const void *addr; 8059154ee6aSPeter Wemm size_t len; 806df8bae1dSRodney W. Grimes char *vec; 807df8bae1dSRodney W. Grimes }; 808d2d3e875SBruce Evans #endif 8090d94caffSDavid Greenman 810d2c60af8SMatthew Dillon /* 811d2c60af8SMatthew Dillon * MPSAFE 812d2c60af8SMatthew Dillon */ 813df8bae1dSRodney W. Grimes /* ARGSUSED */ 814df8bae1dSRodney W. Grimes int 815b40ce416SJulian Elischer mincore(td, uap) 816b40ce416SJulian Elischer struct thread *td; 817df8bae1dSRodney W. Grimes struct mincore_args *uap; 818df8bae1dSRodney W. Grimes { 819867a482dSJohn Dyson vm_offset_t addr, first_addr; 820867a482dSJohn Dyson vm_offset_t end, cend; 821867a482dSJohn Dyson pmap_t pmap; 822867a482dSJohn Dyson vm_map_t map; 82302c04a2fSJohn Dyson char *vec; 824d2c60af8SMatthew Dillon int error = 0; 825867a482dSJohn Dyson int vecindex, lastvecindex; 82654d92145SMatthew Dillon vm_map_entry_t current; 827867a482dSJohn Dyson vm_map_entry_t entry; 828867a482dSJohn Dyson int mincoreinfo; 829dd2622a8SAlan Cox unsigned int timestamp; 830df8bae1dSRodney W. Grimes 831867a482dSJohn Dyson /* 832867a482dSJohn Dyson * Make sure that the addresses presented are valid for user 833867a482dSJohn Dyson * mode. 834867a482dSJohn Dyson */ 835867a482dSJohn Dyson first_addr = addr = trunc_page((vm_offset_t) uap->addr); 8369154ee6aSPeter Wemm end = addr + (vm_size_t)round_page(uap->len); 83705ba50f5SJake Burkholder map = &td->td_proc->p_vmspace->vm_map; 83805ba50f5SJake Burkholder if (end > vm_map_max(map) || end < addr) 83902c04a2fSJohn Dyson return (EINVAL); 84002c04a2fSJohn Dyson 841867a482dSJohn Dyson /* 842867a482dSJohn Dyson * Address of byte vector 843867a482dSJohn Dyson */ 84402c04a2fSJohn Dyson vec = uap->vec; 845867a482dSJohn Dyson 846190609ddSJohn Baldwin mtx_lock(&Giant); 847b40ce416SJulian Elischer pmap = vmspace_pmap(td->td_proc->p_vmspace); 848867a482dSJohn Dyson 849eff50fcdSAlan Cox vm_map_lock_read(map); 850dd2622a8SAlan Cox RestartScan: 851dd2622a8SAlan Cox timestamp = map->timestamp; 852867a482dSJohn Dyson 853867a482dSJohn Dyson if (!vm_map_lookup_entry(map, addr, &entry)) 854867a482dSJohn Dyson entry = entry->next; 855867a482dSJohn Dyson 856867a482dSJohn Dyson /* 857867a482dSJohn Dyson * Do this on a map entry basis so that if the pages are not 858867a482dSJohn Dyson * in the current processes address space, we can easily look 859867a482dSJohn Dyson * up the pages elsewhere. 860867a482dSJohn Dyson */ 861867a482dSJohn Dyson lastvecindex = -1; 862867a482dSJohn Dyson for (current = entry; 863867a482dSJohn Dyson (current != &map->header) && (current->start < end); 864867a482dSJohn Dyson current = current->next) { 865867a482dSJohn Dyson 866867a482dSJohn Dyson /* 867867a482dSJohn Dyson * ignore submaps (for now) or null objects 868867a482dSJohn Dyson */ 8699fdfe602SMatthew Dillon if ((current->eflags & MAP_ENTRY_IS_SUB_MAP) || 870867a482dSJohn Dyson current->object.vm_object == NULL) 871867a482dSJohn Dyson continue; 872867a482dSJohn Dyson 873867a482dSJohn Dyson /* 874867a482dSJohn Dyson * limit this scan to the current map entry and the 875867a482dSJohn Dyson * limits for the mincore call 876867a482dSJohn Dyson */ 877867a482dSJohn Dyson if (addr < current->start) 878867a482dSJohn Dyson addr = current->start; 879867a482dSJohn Dyson cend = current->end; 880867a482dSJohn Dyson if (cend > end) 881867a482dSJohn Dyson cend = end; 882867a482dSJohn Dyson 883867a482dSJohn Dyson /* 884867a482dSJohn Dyson * scan this entry one page at a time 885867a482dSJohn Dyson */ 886867a482dSJohn Dyson while (addr < cend) { 887867a482dSJohn Dyson /* 888867a482dSJohn Dyson * Check pmap first, it is likely faster, also 889867a482dSJohn Dyson * it can provide info as to whether we are the 890867a482dSJohn Dyson * one referencing or modifying the page. 891867a482dSJohn Dyson */ 892867a482dSJohn Dyson mincoreinfo = pmap_mincore(pmap, addr); 893867a482dSJohn Dyson if (!mincoreinfo) { 894867a482dSJohn Dyson vm_pindex_t pindex; 895867a482dSJohn Dyson vm_ooffset_t offset; 896867a482dSJohn Dyson vm_page_t m; 897867a482dSJohn Dyson /* 898867a482dSJohn Dyson * calculate the page index into the object 899867a482dSJohn Dyson */ 900867a482dSJohn Dyson offset = current->offset + (addr - current->start); 901867a482dSJohn Dyson pindex = OFF_TO_IDX(offset); 902bc5b057fSAlan Cox VM_OBJECT_LOCK(current->object.vm_object); 903867a482dSJohn Dyson m = vm_page_lookup(current->object.vm_object, 904867a482dSJohn Dyson pindex); 905bc5b057fSAlan Cox VM_OBJECT_UNLOCK(current->object.vm_object); 906e80b7b69SAlan Cox vm_page_lock_queues(); 907867a482dSJohn Dyson /* 908867a482dSJohn Dyson * if the page is resident, then gather information about 909867a482dSJohn Dyson * it. 910867a482dSJohn Dyson */ 911867a482dSJohn Dyson if (m) { 912867a482dSJohn Dyson mincoreinfo = MINCORE_INCORE; 913867a482dSJohn Dyson if (m->dirty || 9140385347cSPeter Wemm pmap_is_modified(m)) 915867a482dSJohn Dyson mincoreinfo |= MINCORE_MODIFIED_OTHER; 916867a482dSJohn Dyson if ((m->flags & PG_REFERENCED) || 9170385347cSPeter Wemm pmap_ts_referenced(m)) { 918e69763a3SDoug Rabson vm_page_flag_set(m, PG_REFERENCED); 919867a482dSJohn Dyson mincoreinfo |= MINCORE_REFERENCED_OTHER; 92002c04a2fSJohn Dyson } 921867a482dSJohn Dyson } 922e80b7b69SAlan Cox vm_page_unlock_queues(); 9239b5a5d81SJohn Dyson } 924867a482dSJohn Dyson 925867a482dSJohn Dyson /* 926dd2622a8SAlan Cox * subyte may page fault. In case it needs to modify 927dd2622a8SAlan Cox * the map, we release the lock. 928dd2622a8SAlan Cox */ 929dd2622a8SAlan Cox vm_map_unlock_read(map); 930dd2622a8SAlan Cox 931dd2622a8SAlan Cox /* 932867a482dSJohn Dyson * calculate index into user supplied byte vector 933867a482dSJohn Dyson */ 934867a482dSJohn Dyson vecindex = OFF_TO_IDX(addr - first_addr); 935867a482dSJohn Dyson 936867a482dSJohn Dyson /* 937867a482dSJohn Dyson * If we have skipped map entries, we need to make sure that 938867a482dSJohn Dyson * the byte vector is zeroed for those skipped entries. 939867a482dSJohn Dyson */ 940867a482dSJohn Dyson while ((lastvecindex + 1) < vecindex) { 941867a482dSJohn Dyson error = subyte(vec + lastvecindex, 0); 942867a482dSJohn Dyson if (error) { 943d2c60af8SMatthew Dillon error = EFAULT; 944d2c60af8SMatthew Dillon goto done2; 945867a482dSJohn Dyson } 946867a482dSJohn Dyson ++lastvecindex; 947867a482dSJohn Dyson } 948867a482dSJohn Dyson 949867a482dSJohn Dyson /* 950867a482dSJohn Dyson * Pass the page information to the user 951867a482dSJohn Dyson */ 952867a482dSJohn Dyson error = subyte(vec + vecindex, mincoreinfo); 953867a482dSJohn Dyson if (error) { 954d2c60af8SMatthew Dillon error = EFAULT; 955d2c60af8SMatthew Dillon goto done2; 956867a482dSJohn Dyson } 957dd2622a8SAlan Cox 958dd2622a8SAlan Cox /* 959dd2622a8SAlan Cox * If the map has changed, due to the subyte, the previous 960dd2622a8SAlan Cox * output may be invalid. 961dd2622a8SAlan Cox */ 962dd2622a8SAlan Cox vm_map_lock_read(map); 963dd2622a8SAlan Cox if (timestamp != map->timestamp) 964dd2622a8SAlan Cox goto RestartScan; 965dd2622a8SAlan Cox 966867a482dSJohn Dyson lastvecindex = vecindex; 96702c04a2fSJohn Dyson addr += PAGE_SIZE; 96802c04a2fSJohn Dyson } 969867a482dSJohn Dyson } 970867a482dSJohn Dyson 971867a482dSJohn Dyson /* 972dd2622a8SAlan Cox * subyte may page fault. In case it needs to modify 973dd2622a8SAlan Cox * the map, we release the lock. 974dd2622a8SAlan Cox */ 975dd2622a8SAlan Cox vm_map_unlock_read(map); 976dd2622a8SAlan Cox 977dd2622a8SAlan Cox /* 978867a482dSJohn Dyson * Zero the last entries in the byte vector. 979867a482dSJohn Dyson */ 980867a482dSJohn Dyson vecindex = OFF_TO_IDX(end - first_addr); 981867a482dSJohn Dyson while ((lastvecindex + 1) < vecindex) { 982867a482dSJohn Dyson error = subyte(vec + lastvecindex, 0); 983867a482dSJohn Dyson if (error) { 984d2c60af8SMatthew Dillon error = EFAULT; 985d2c60af8SMatthew Dillon goto done2; 986867a482dSJohn Dyson } 987867a482dSJohn Dyson ++lastvecindex; 988867a482dSJohn Dyson } 989867a482dSJohn Dyson 990dd2622a8SAlan Cox /* 991dd2622a8SAlan Cox * If the map has changed, due to the subyte, the previous 992dd2622a8SAlan Cox * output may be invalid. 993dd2622a8SAlan Cox */ 994dd2622a8SAlan Cox vm_map_lock_read(map); 995dd2622a8SAlan Cox if (timestamp != map->timestamp) 996dd2622a8SAlan Cox goto RestartScan; 997eff50fcdSAlan Cox vm_map_unlock_read(map); 998d2c60af8SMatthew Dillon done2: 999190609ddSJohn Baldwin mtx_unlock(&Giant); 1000d2c60af8SMatthew Dillon return (error); 1001df8bae1dSRodney W. Grimes } 1002df8bae1dSRodney W. Grimes 1003d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_ 1004df8bae1dSRodney W. Grimes struct mlock_args { 1005651bb817SAlexander Langer const void *addr; 1006df8bae1dSRodney W. Grimes size_t len; 1007df8bae1dSRodney W. Grimes }; 1008d2d3e875SBruce Evans #endif 1009d2c60af8SMatthew Dillon /* 1010d2c60af8SMatthew Dillon * MPSAFE 1011d2c60af8SMatthew Dillon */ 1012df8bae1dSRodney W. Grimes int 1013b40ce416SJulian Elischer mlock(td, uap) 1014b40ce416SJulian Elischer struct thread *td; 1015df8bae1dSRodney W. Grimes struct mlock_args *uap; 1016df8bae1dSRodney W. Grimes { 1017df8bae1dSRodney W. Grimes vm_offset_t addr; 1018dabee6feSPeter Wemm vm_size_t size, pageoff; 1019df8bae1dSRodney W. Grimes int error; 1020df8bae1dSRodney W. Grimes 1021df8bae1dSRodney W. Grimes addr = (vm_offset_t) uap->addr; 10229154ee6aSPeter Wemm size = uap->len; 10239154ee6aSPeter Wemm 1024dabee6feSPeter Wemm pageoff = (addr & PAGE_MASK); 1025dabee6feSPeter Wemm addr -= pageoff; 1026dabee6feSPeter Wemm size += pageoff; 1027dabee6feSPeter Wemm size = (vm_size_t) round_page(size); 1028dabee6feSPeter Wemm 1029dabee6feSPeter Wemm /* disable wrap around */ 10309154ee6aSPeter Wemm if (addr + size < addr) 1031df8bae1dSRodney W. Grimes return (EINVAL); 1032dabee6feSPeter Wemm 1033df8bae1dSRodney W. Grimes if (atop(size) + cnt.v_wire_count > vm_page_max_wired) 1034df8bae1dSRodney W. Grimes return (EAGAIN); 10359154ee6aSPeter Wemm 1036df8bae1dSRodney W. Grimes #ifdef pmap_wired_count 1037b40ce416SJulian Elischer if (size + ptoa(pmap_wired_count(vm_map_pmap(&td->td_proc->p_vmspace->vm_map))) > 1038b40ce416SJulian Elischer td->td_proc->p_rlimit[RLIMIT_MEMLOCK].rlim_cur) 10394a40e3d4SJohn Dyson return (ENOMEM); 1040df8bae1dSRodney W. Grimes #else 104144731cabSJohn Baldwin error = suser(td); 104205f0fdd2SPoul-Henning Kamp if (error) 1043df8bae1dSRodney W. Grimes return (error); 1044df8bae1dSRodney W. Grimes #endif 1045df8bae1dSRodney W. Grimes 10461d7cf06cSAlan Cox error = vm_map_wire(&td->td_proc->p_vmspace->vm_map, addr, 10471d7cf06cSAlan Cox addr + size, TRUE); 1048df8bae1dSRodney W. Grimes return (error == KERN_SUCCESS ? 0 : ENOMEM); 1049df8bae1dSRodney W. Grimes } 1050df8bae1dSRodney W. Grimes 1051d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_ 10524a40e3d4SJohn Dyson struct mlockall_args { 10534a40e3d4SJohn Dyson int how; 10544a40e3d4SJohn Dyson }; 10554a40e3d4SJohn Dyson #endif 10564a40e3d4SJohn Dyson 1057d2c60af8SMatthew Dillon /* 1058d2c60af8SMatthew Dillon * MPSAFE 1059d2c60af8SMatthew Dillon */ 10604a40e3d4SJohn Dyson int 1061b40ce416SJulian Elischer mlockall(td, uap) 1062b40ce416SJulian Elischer struct thread *td; 10634a40e3d4SJohn Dyson struct mlockall_args *uap; 10644a40e3d4SJohn Dyson { 10650cddd8f0SMatthew Dillon /* mtx_lock(&Giant); */ 10660cddd8f0SMatthew Dillon /* mtx_unlock(&Giant); */ 10674a40e3d4SJohn Dyson return 0; 10684a40e3d4SJohn Dyson } 10694a40e3d4SJohn Dyson 10704a40e3d4SJohn Dyson #ifndef _SYS_SYSPROTO_H_ 1071fa721254SAlfred Perlstein struct munlockall_args { 10724a40e3d4SJohn Dyson int how; 10734a40e3d4SJohn Dyson }; 10744a40e3d4SJohn Dyson #endif 10754a40e3d4SJohn Dyson 1076d2c60af8SMatthew Dillon /* 1077d2c60af8SMatthew Dillon * MPSAFE 1078d2c60af8SMatthew Dillon */ 10794a40e3d4SJohn Dyson int 1080b40ce416SJulian Elischer munlockall(td, uap) 1081b40ce416SJulian Elischer struct thread *td; 10824a40e3d4SJohn Dyson struct munlockall_args *uap; 10834a40e3d4SJohn Dyson { 10840cddd8f0SMatthew Dillon /* mtx_lock(&Giant); */ 10850cddd8f0SMatthew Dillon /* mtx_unlock(&Giant); */ 10864a40e3d4SJohn Dyson return 0; 10874a40e3d4SJohn Dyson } 10884a40e3d4SJohn Dyson 10894a40e3d4SJohn Dyson #ifndef _SYS_SYSPROTO_H_ 1090df8bae1dSRodney W. Grimes struct munlock_args { 1091651bb817SAlexander Langer const void *addr; 1092df8bae1dSRodney W. Grimes size_t len; 1093df8bae1dSRodney W. Grimes }; 1094d2d3e875SBruce Evans #endif 1095d2c60af8SMatthew Dillon /* 1096d2c60af8SMatthew Dillon * MPSAFE 1097d2c60af8SMatthew Dillon */ 1098df8bae1dSRodney W. Grimes int 1099b40ce416SJulian Elischer munlock(td, uap) 1100b40ce416SJulian Elischer struct thread *td; 1101df8bae1dSRodney W. Grimes struct munlock_args *uap; 1102df8bae1dSRodney W. Grimes { 1103df8bae1dSRodney W. Grimes vm_offset_t addr; 1104dabee6feSPeter Wemm vm_size_t size, pageoff; 1105df8bae1dSRodney W. Grimes int error; 1106df8bae1dSRodney W. Grimes 1107df8bae1dSRodney W. Grimes addr = (vm_offset_t) uap->addr; 11089154ee6aSPeter Wemm size = uap->len; 11099154ee6aSPeter Wemm 1110dabee6feSPeter Wemm pageoff = (addr & PAGE_MASK); 1111dabee6feSPeter Wemm addr -= pageoff; 1112dabee6feSPeter Wemm size += pageoff; 1113dabee6feSPeter Wemm size = (vm_size_t) round_page(size); 1114dabee6feSPeter Wemm 1115dabee6feSPeter Wemm /* disable wrap around */ 11169154ee6aSPeter Wemm if (addr + size < addr) 1117df8bae1dSRodney W. Grimes return (EINVAL); 1118dabee6feSPeter Wemm 1119df8bae1dSRodney W. Grimes #ifndef pmap_wired_count 112044731cabSJohn Baldwin error = suser(td); 112105f0fdd2SPoul-Henning Kamp if (error) 1122df8bae1dSRodney W. Grimes return (error); 1123df8bae1dSRodney W. Grimes #endif 1124df8bae1dSRodney W. Grimes 11251d7cf06cSAlan Cox error = vm_map_unwire(&td->td_proc->p_vmspace->vm_map, addr, 112623955314SAlfred Perlstein addr + size, TRUE); 1127df8bae1dSRodney W. Grimes return (error == KERN_SUCCESS ? 0 : ENOMEM); 1128df8bae1dSRodney W. Grimes } 1129df8bae1dSRodney W. Grimes 1130df8bae1dSRodney W. Grimes /* 1131d2c60af8SMatthew Dillon * vm_mmap() 1132d2c60af8SMatthew Dillon * 1133d2c60af8SMatthew Dillon * MPSAFE 1134d2c60af8SMatthew Dillon * 1135d2c60af8SMatthew Dillon * Internal version of mmap. Currently used by mmap, exec, and sys5 1136d2c60af8SMatthew Dillon * shared memory. Handle is either a vnode pointer or NULL for MAP_ANON. 1137df8bae1dSRodney W. Grimes */ 1138df8bae1dSRodney W. Grimes int 1139b9dcd593SBruce Evans vm_mmap(vm_map_t map, vm_offset_t *addr, vm_size_t size, vm_prot_t prot, 1140b9dcd593SBruce Evans vm_prot_t maxprot, int flags, 1141651bb817SAlexander Langer void *handle, 1142b9dcd593SBruce Evans vm_ooffset_t foff) 1143df8bae1dSRodney W. Grimes { 1144df8bae1dSRodney W. Grimes boolean_t fitit; 1145fcae040bSJohn Dyson vm_object_t object; 1146df8bae1dSRodney W. Grimes struct vnode *vp = NULL; 114724a1cce3SDavid Greenman objtype_t type; 1148df8bae1dSRodney W. Grimes int rv = KERN_SUCCESS; 1149bd7e5f99SJohn Dyson vm_ooffset_t objsize; 1150bd7e5f99SJohn Dyson int docow; 1151b40ce416SJulian Elischer struct thread *td = curthread; 1152df8bae1dSRodney W. Grimes 1153df8bae1dSRodney W. Grimes if (size == 0) 1154df8bae1dSRodney W. Grimes return (0); 1155df8bae1dSRodney W. Grimes 115606cb7259SDavid Greenman objsize = size = round_page(size); 1157df8bae1dSRodney W. Grimes 1158070f64feSMatthew Dillon if (td->td_proc->p_vmspace->vm_map.size + size > 1159070f64feSMatthew Dillon td->td_proc->p_rlimit[RLIMIT_VMEM].rlim_cur) { 1160070f64feSMatthew Dillon return(ENOMEM); 1161070f64feSMatthew Dillon } 1162070f64feSMatthew Dillon 1163df8bae1dSRodney W. Grimes /* 1164bc9ad247SDavid Greenman * We currently can only deal with page aligned file offsets. 1165bc9ad247SDavid Greenman * The check is here rather than in the syscall because the 1166bc9ad247SDavid Greenman * kernel calls this function internally for other mmaping 1167bc9ad247SDavid Greenman * operations (such as in exec) and non-aligned offsets will 1168bc9ad247SDavid Greenman * cause pmap inconsistencies...so we want to be sure to 1169bc9ad247SDavid Greenman * disallow this in all cases. 1170bc9ad247SDavid Greenman */ 1171bc9ad247SDavid Greenman if (foff & PAGE_MASK) 1172bc9ad247SDavid Greenman return (EINVAL); 1173bc9ad247SDavid Greenman 117406cb7259SDavid Greenman if ((flags & MAP_FIXED) == 0) { 117506cb7259SDavid Greenman fitit = TRUE; 117606cb7259SDavid Greenman *addr = round_page(*addr); 117706cb7259SDavid Greenman } else { 117806cb7259SDavid Greenman if (*addr != trunc_page(*addr)) 117906cb7259SDavid Greenman return (EINVAL); 118006cb7259SDavid Greenman fitit = FALSE; 118106cb7259SDavid Greenman (void) vm_map_remove(map, *addr, *addr + size); 118206cb7259SDavid Greenman } 118306cb7259SDavid Greenman 1184bc9ad247SDavid Greenman /* 118524a1cce3SDavid Greenman * Lookup/allocate object. 1186df8bae1dSRodney W. Grimes */ 11875f55e841SDavid Greenman if (flags & MAP_ANON) { 1188851c12ffSJohn Dyson type = OBJT_DEFAULT; 11895f55e841SDavid Greenman /* 11905f55e841SDavid Greenman * Unnamed anonymous regions always start at 0. 11915f55e841SDavid Greenman */ 119267bf6868SJohn Dyson if (handle == 0) 11935f55e841SDavid Greenman foff = 0; 11945f55e841SDavid Greenman } else { 1195df8bae1dSRodney W. Grimes vp = (struct vnode *) handle; 1196c04c996bSAlan Cox mtx_lock(&Giant); 1197f6b5b182SJeff Roberson ASSERT_VOP_LOCKED(vp, "vm_mmap"); 1198df8bae1dSRodney W. Grimes if (vp->v_type == VCHR) { 119924a1cce3SDavid Greenman type = OBJT_DEVICE; 1200a23d65bfSBruce Evans handle = (void *)(intptr_t)vp->v_rdev; 120106cb7259SDavid Greenman } else { 120206cb7259SDavid Greenman struct vattr vat; 120306cb7259SDavid Greenman int error; 120406cb7259SDavid Greenman 1205a854ed98SJohn Baldwin error = VOP_GETATTR(vp, &vat, td->td_ucred, td); 1206e4ca250dSJohn Baldwin if (error) { 120723955314SAlfred Perlstein mtx_unlock(&Giant); 120806cb7259SDavid Greenman return (error); 1209e4ca250dSJohn Baldwin } 1210bd7e5f99SJohn Dyson objsize = round_page(vat.va_size); 121124a1cce3SDavid Greenman type = OBJT_VNODE; 121200d76afeSGuido van Rooij /* 121300d76afeSGuido van Rooij * if it is a regular file without any references 121400d76afeSGuido van Rooij * we do not need to sync it. 121500d76afeSGuido van Rooij */ 121600d76afeSGuido van Rooij if (vp->v_type == VREG && vat.va_nlink == 0) { 121700d76afeSGuido van Rooij flags |= MAP_NOSYNC; 121800d76afeSGuido van Rooij } 1219df8bae1dSRodney W. Grimes } 1220c04c996bSAlan Cox mtx_unlock(&Giant); 122106cb7259SDavid Greenman } 122294328e90SJohn Dyson 122394328e90SJohn Dyson if (handle == NULL) { 122494328e90SJohn Dyson object = NULL; 12254738fa09SAlan Cox docow = 0; 122694328e90SJohn Dyson } else { 12270a0a85b3SJohn Dyson object = vm_pager_allocate(type, 12286cde7a16SDavid Greenman handle, objsize, prot, foff); 1229e4ca250dSJohn Baldwin if (object == NULL) { 123024a1cce3SDavid Greenman return (type == OBJT_DEVICE ? EINVAL : ENOMEM); 1231e4ca250dSJohn Baldwin } 12324738fa09SAlan Cox docow = MAP_PREFAULT_PARTIAL; 123394328e90SJohn Dyson } 1234df8bae1dSRodney W. Grimes 12355850152dSJohn Dyson /* 12368f2ec877SDavid Greenman * Force device mappings to be shared. 12375850152dSJohn Dyson */ 12386900a17cSMaxime Henrion if (type == OBJT_DEVICE) { 12398f2ec877SDavid Greenman flags &= ~(MAP_PRIVATE|MAP_COPY); 12405850152dSJohn Dyson flags |= MAP_SHARED; 12418f2ec877SDavid Greenman } 12425850152dSJohn Dyson 12434f79d873SMatthew Dillon if ((flags & (MAP_ANON|MAP_SHARED)) == 0) 12444738fa09SAlan Cox docow |= MAP_COPY_ON_WRITE; 12454f79d873SMatthew Dillon if (flags & MAP_NOSYNC) 12464f79d873SMatthew Dillon docow |= MAP_DISABLE_SYNCER; 12479730a5daSPaul Saab if (flags & MAP_NOCORE) 12489730a5daSPaul Saab docow |= MAP_DISABLE_COREDUMP; 12495850152dSJohn Dyson 1250d0aea04fSJohn Dyson #if defined(VM_PROT_READ_IS_EXEC) 1251d0aea04fSJohn Dyson if (prot & VM_PROT_READ) 1252d0aea04fSJohn Dyson prot |= VM_PROT_EXECUTE; 1253d0aea04fSJohn Dyson 1254d0aea04fSJohn Dyson if (maxprot & VM_PROT_READ) 1255d0aea04fSJohn Dyson maxprot |= VM_PROT_EXECUTE; 1256d0aea04fSJohn Dyson #endif 1257d0aea04fSJohn Dyson 1258e4ca250dSJohn Baldwin if (fitit) 12590a0a85b3SJohn Dyson *addr = pmap_addr_hint(object, *addr, size); 12600a0a85b3SJohn Dyson 12612267af78SJulian Elischer if (flags & MAP_STACK) 12622267af78SJulian Elischer rv = vm_map_stack (map, *addr, size, prot, 12632267af78SJulian Elischer maxprot, docow); 12642267af78SJulian Elischer else 1265bd7e5f99SJohn Dyson rv = vm_map_find(map, object, foff, addr, size, fitit, 1266bd7e5f99SJohn Dyson prot, maxprot, docow); 1267bd7e5f99SJohn Dyson 1268d2c60af8SMatthew Dillon if (rv != KERN_SUCCESS) { 12697fb0c17eSDavid Greenman /* 127024a1cce3SDavid Greenman * Lose the object reference. Will destroy the 127124a1cce3SDavid Greenman * object if it's an unnamed anonymous mapping 127224a1cce3SDavid Greenman * or named anonymous without other references. 12737fb0c17eSDavid Greenman */ 1274df8bae1dSRodney W. Grimes vm_object_deallocate(object); 1275d2c60af8SMatthew Dillon } else if (flags & MAP_SHARED) { 1276df8bae1dSRodney W. Grimes /* 1277df8bae1dSRodney W. Grimes * Shared memory is also shared with children. 1278df8bae1dSRodney W. Grimes */ 1279df8bae1dSRodney W. Grimes rv = vm_map_inherit(map, *addr, *addr + size, VM_INHERIT_SHARE); 1280e4ca250dSJohn Baldwin if (rv != KERN_SUCCESS) 12817fb0c17eSDavid Greenman (void) vm_map_remove(map, *addr, *addr + size); 1282df8bae1dSRodney W. Grimes } 1283df8bae1dSRodney W. Grimes switch (rv) { 1284df8bae1dSRodney W. Grimes case KERN_SUCCESS: 1285df8bae1dSRodney W. Grimes return (0); 1286df8bae1dSRodney W. Grimes case KERN_INVALID_ADDRESS: 1287df8bae1dSRodney W. Grimes case KERN_NO_SPACE: 1288df8bae1dSRodney W. Grimes return (ENOMEM); 1289df8bae1dSRodney W. Grimes case KERN_PROTECTION_FAILURE: 1290df8bae1dSRodney W. Grimes return (EACCES); 1291df8bae1dSRodney W. Grimes default: 1292df8bae1dSRodney W. Grimes return (EINVAL); 1293df8bae1dSRodney W. Grimes } 1294df8bae1dSRodney W. Grimes } 1295