1df8bae1dSRodney W. Grimes /* 2df8bae1dSRodney W. Grimes * Copyright (c) 1988 University of Utah. 3df8bae1dSRodney W. Grimes * Copyright (c) 1991, 1993 4df8bae1dSRodney W. Grimes * The Regents of the University of California. All rights reserved. 5df8bae1dSRodney W. Grimes * 6df8bae1dSRodney W. Grimes * This code is derived from software contributed to Berkeley by 7df8bae1dSRodney W. Grimes * the Systems Programming Group of the University of Utah Computer 8df8bae1dSRodney W. Grimes * Science Department. 9df8bae1dSRodney W. Grimes * 10df8bae1dSRodney W. Grimes * Redistribution and use in source and binary forms, with or without 11df8bae1dSRodney W. Grimes * modification, are permitted provided that the following conditions 12df8bae1dSRodney W. Grimes * are met: 13df8bae1dSRodney W. Grimes * 1. Redistributions of source code must retain the above copyright 14df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer. 15df8bae1dSRodney W. Grimes * 2. Redistributions in binary form must reproduce the above copyright 16df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer in the 17df8bae1dSRodney W. Grimes * documentation and/or other materials provided with the distribution. 18df8bae1dSRodney W. Grimes * 3. All advertising materials mentioning features or use of this software 195929bcfaSPhilippe Charnier * must display the following acknowledgement: 20df8bae1dSRodney W. Grimes * This product includes software developed by the University of 21df8bae1dSRodney W. Grimes * California, Berkeley and its contributors. 22df8bae1dSRodney W. Grimes * 4. Neither the name of the University nor the names of its contributors 23df8bae1dSRodney W. Grimes * may be used to endorse or promote products derived from this software 24df8bae1dSRodney W. Grimes * without specific prior written permission. 25df8bae1dSRodney W. Grimes * 26df8bae1dSRodney W. Grimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27df8bae1dSRodney W. Grimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28df8bae1dSRodney W. Grimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29df8bae1dSRodney W. Grimes * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30df8bae1dSRodney W. Grimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31df8bae1dSRodney W. Grimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32df8bae1dSRodney W. Grimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33df8bae1dSRodney W. Grimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34df8bae1dSRodney W. Grimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35df8bae1dSRodney W. Grimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36df8bae1dSRodney W. Grimes * SUCH DAMAGE. 37df8bae1dSRodney W. Grimes * 38df8bae1dSRodney W. Grimes * from: Utah $Hdr: vm_mmap.c 1.6 91/10/21$ 39df8bae1dSRodney W. Grimes * 40df8bae1dSRodney W. Grimes * @(#)vm_mmap.c 8.4 (Berkeley) 1/12/94 41df8bae1dSRodney W. Grimes */ 42df8bae1dSRodney W. Grimes 43df8bae1dSRodney W. Grimes /* 44df8bae1dSRodney W. Grimes * Mapped file (mmap) interface to VM 45df8bae1dSRodney W. Grimes */ 46df8bae1dSRodney W. Grimes 47874651b1SDavid E. O'Brien #include <sys/cdefs.h> 48874651b1SDavid E. O'Brien __FBSDID("$FreeBSD$"); 49874651b1SDavid E. O'Brien 505591b823SEivind Eklund #include "opt_compat.h" 513e732e7dSRobert Watson #include "opt_mac.h" 52e9822d92SJoerg Wunsch 53df8bae1dSRodney W. Grimes #include <sys/param.h> 54df8bae1dSRodney W. Grimes #include <sys/systm.h> 55fb919e4dSMark Murray #include <sys/kernel.h> 56fb919e4dSMark Murray #include <sys/lock.h> 5723955314SAlfred Perlstein #include <sys/mutex.h> 58d2d3e875SBruce Evans #include <sys/sysproto.h> 59df8bae1dSRodney W. Grimes #include <sys/filedesc.h> 60df8bae1dSRodney W. Grimes #include <sys/proc.h> 61070f64feSMatthew Dillon #include <sys/resource.h> 62070f64feSMatthew Dillon #include <sys/resourcevar.h> 63df8bae1dSRodney W. Grimes #include <sys/vnode.h> 643ac4d1efSBruce Evans #include <sys/fcntl.h> 65df8bae1dSRodney W. Grimes #include <sys/file.h> 663e732e7dSRobert Watson #include <sys/mac.h> 67df8bae1dSRodney W. Grimes #include <sys/mman.h> 68df8bae1dSRodney W. Grimes #include <sys/conf.h> 694183b6b6SPeter Wemm #include <sys/stat.h> 70efeaf95aSDavid Greenman #include <sys/vmmeter.h> 711f6889a1SMatthew Dillon #include <sys/sysctl.h> 72df8bae1dSRodney W. Grimes 73df8bae1dSRodney W. Grimes #include <vm/vm.h> 74efeaf95aSDavid Greenman #include <vm/vm_param.h> 75efeaf95aSDavid Greenman #include <vm/pmap.h> 76efeaf95aSDavid Greenman #include <vm/vm_map.h> 77efeaf95aSDavid Greenman #include <vm/vm_object.h> 781c7c3c6aSMatthew Dillon #include <vm/vm_page.h> 79df8bae1dSRodney W. Grimes #include <vm/vm_pager.h> 80b5e8ce9fSBruce Evans #include <vm/vm_pageout.h> 81efeaf95aSDavid Greenman #include <vm/vm_extern.h> 82867a482dSJohn Dyson #include <vm/vm_page.h> 831f6889a1SMatthew Dillon #include <vm/vm_kern.h> 84df8bae1dSRodney W. Grimes 85d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_ 86df8bae1dSRodney W. Grimes struct sbrk_args { 87df8bae1dSRodney W. Grimes int incr; 88df8bae1dSRodney W. Grimes }; 89d2d3e875SBruce Evans #endif 900d94caffSDavid Greenman 911f6889a1SMatthew Dillon static int max_proc_mmap; 921f6889a1SMatthew Dillon SYSCTL_INT(_vm, OID_AUTO, max_proc_mmap, CTLFLAG_RW, &max_proc_mmap, 0, ""); 931f6889a1SMatthew Dillon 941f6889a1SMatthew Dillon /* 951f6889a1SMatthew Dillon * Set the maximum number of vm_map_entry structures per process. Roughly 961f6889a1SMatthew Dillon * speaking vm_map_entry structures are tiny, so allowing them to eat 1/100 971f6889a1SMatthew Dillon * of our KVM malloc space still results in generous limits. We want a 981f6889a1SMatthew Dillon * default that is good enough to prevent the kernel running out of resources 991f6889a1SMatthew Dillon * if attacked from compromised user account but generous enough such that 1001f6889a1SMatthew Dillon * multi-threaded processes are not unduly inconvenienced. 1011f6889a1SMatthew Dillon */ 10211caded3SAlfred Perlstein static void vmmapentry_rsrc_init(void *); 1031f6889a1SMatthew Dillon SYSINIT(vmmersrc, SI_SUB_KVM_RSRC, SI_ORDER_FIRST, vmmapentry_rsrc_init, NULL) 1041f6889a1SMatthew Dillon 1051f6889a1SMatthew Dillon static void 1061f6889a1SMatthew Dillon vmmapentry_rsrc_init(dummy) 1071f6889a1SMatthew Dillon void *dummy; 1081f6889a1SMatthew Dillon { 1091f6889a1SMatthew Dillon max_proc_mmap = vm_kmem_size / sizeof(struct vm_map_entry); 1101f6889a1SMatthew Dillon max_proc_mmap /= 100; 1111f6889a1SMatthew Dillon } 1121f6889a1SMatthew Dillon 113d2c60af8SMatthew Dillon /* 114d2c60af8SMatthew Dillon * MPSAFE 115d2c60af8SMatthew Dillon */ 116df8bae1dSRodney W. Grimes /* ARGSUSED */ 117df8bae1dSRodney W. Grimes int 118b40ce416SJulian Elischer sbrk(td, uap) 119b40ce416SJulian Elischer struct thread *td; 120df8bae1dSRodney W. Grimes struct sbrk_args *uap; 121df8bae1dSRodney W. Grimes { 122df8bae1dSRodney W. Grimes /* Not yet implemented */ 1230cddd8f0SMatthew Dillon /* mtx_lock(&Giant); */ 1240cddd8f0SMatthew Dillon /* mtx_unlock(&Giant); */ 125df8bae1dSRodney W. Grimes return (EOPNOTSUPP); 126df8bae1dSRodney W. Grimes } 127df8bae1dSRodney W. Grimes 128d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_ 129df8bae1dSRodney W. Grimes struct sstk_args { 130df8bae1dSRodney W. Grimes int incr; 131df8bae1dSRodney W. Grimes }; 132d2d3e875SBruce Evans #endif 1330d94caffSDavid Greenman 134d2c60af8SMatthew Dillon /* 135d2c60af8SMatthew Dillon * MPSAFE 136d2c60af8SMatthew Dillon */ 137df8bae1dSRodney W. Grimes /* ARGSUSED */ 138df8bae1dSRodney W. Grimes int 139b40ce416SJulian Elischer sstk(td, uap) 140b40ce416SJulian Elischer struct thread *td; 141df8bae1dSRodney W. Grimes struct sstk_args *uap; 142df8bae1dSRodney W. Grimes { 143df8bae1dSRodney W. Grimes /* Not yet implemented */ 1440cddd8f0SMatthew Dillon /* mtx_lock(&Giant); */ 1450cddd8f0SMatthew Dillon /* mtx_unlock(&Giant); */ 146df8bae1dSRodney W. Grimes return (EOPNOTSUPP); 147df8bae1dSRodney W. Grimes } 148df8bae1dSRodney W. Grimes 149df8bae1dSRodney W. Grimes #if defined(COMPAT_43) || defined(COMPAT_SUNOS) 150d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_ 151df8bae1dSRodney W. Grimes struct getpagesize_args { 152df8bae1dSRodney W. Grimes int dummy; 153df8bae1dSRodney W. Grimes }; 154d2d3e875SBruce Evans #endif 1550d94caffSDavid Greenman 156df8bae1dSRodney W. Grimes /* ARGSUSED */ 157df8bae1dSRodney W. Grimes int 158b40ce416SJulian Elischer ogetpagesize(td, uap) 159b40ce416SJulian Elischer struct thread *td; 160df8bae1dSRodney W. Grimes struct getpagesize_args *uap; 161df8bae1dSRodney W. Grimes { 1620cddd8f0SMatthew Dillon /* MP SAFE */ 163b40ce416SJulian Elischer td->td_retval[0] = PAGE_SIZE; 164df8bae1dSRodney W. Grimes return (0); 165df8bae1dSRodney W. Grimes } 166df8bae1dSRodney W. Grimes #endif /* COMPAT_43 || COMPAT_SUNOS */ 167df8bae1dSRodney W. Grimes 16854f42e4bSPeter Wemm 16954f42e4bSPeter Wemm /* 17054f42e4bSPeter Wemm * Memory Map (mmap) system call. Note that the file offset 17154f42e4bSPeter Wemm * and address are allowed to be NOT page aligned, though if 17254f42e4bSPeter Wemm * the MAP_FIXED flag it set, both must have the same remainder 17354f42e4bSPeter Wemm * modulo the PAGE_SIZE (POSIX 1003.1b). If the address is not 17454f42e4bSPeter Wemm * page-aligned, the actual mapping starts at trunc_page(addr) 17554f42e4bSPeter Wemm * and the return value is adjusted up by the page offset. 176b4309055SMatthew Dillon * 177b4309055SMatthew Dillon * Generally speaking, only character devices which are themselves 178b4309055SMatthew Dillon * memory-based, such as a video framebuffer, can be mmap'd. Otherwise 179b4309055SMatthew Dillon * there would be no cache coherency between a descriptor and a VM mapping 180b4309055SMatthew Dillon * both to the same character device. 181b4309055SMatthew Dillon * 182b4309055SMatthew Dillon * Block devices can be mmap'd no matter what they represent. Cache coherency 183b4309055SMatthew Dillon * is maintained as long as you do not write directly to the underlying 184b4309055SMatthew Dillon * character device. 18554f42e4bSPeter Wemm */ 186d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_ 187df8bae1dSRodney W. Grimes struct mmap_args { 188651bb817SAlexander Langer void *addr; 189df8bae1dSRodney W. Grimes size_t len; 190df8bae1dSRodney W. Grimes int prot; 191df8bae1dSRodney W. Grimes int flags; 192df8bae1dSRodney W. Grimes int fd; 193df8bae1dSRodney W. Grimes long pad; 194df8bae1dSRodney W. Grimes off_t pos; 195df8bae1dSRodney W. Grimes }; 196d2d3e875SBruce Evans #endif 197df8bae1dSRodney W. Grimes 198d2c60af8SMatthew Dillon /* 199d2c60af8SMatthew Dillon * MPSAFE 200d2c60af8SMatthew Dillon */ 201df8bae1dSRodney W. Grimes int 202b40ce416SJulian Elischer mmap(td, uap) 203b40ce416SJulian Elischer struct thread *td; 20454d92145SMatthew Dillon struct mmap_args *uap; 205df8bae1dSRodney W. Grimes { 20654d92145SMatthew Dillon struct file *fp = NULL; 207df8bae1dSRodney W. Grimes struct vnode *vp; 208df8bae1dSRodney W. Grimes vm_offset_t addr; 2099154ee6aSPeter Wemm vm_size_t size, pageoff; 210df8bae1dSRodney W. Grimes vm_prot_t prot, maxprot; 211651bb817SAlexander Langer void *handle; 212df8bae1dSRodney W. Grimes int flags, error; 213c8bdd56bSGuido van Rooij int disablexworkaround; 21454f42e4bSPeter Wemm off_t pos; 215b40ce416SJulian Elischer struct vmspace *vms = td->td_proc->p_vmspace; 2169ff5ce6bSBoris Popov vm_object_t obj; 217df8bae1dSRodney W. Grimes 21854f42e4bSPeter Wemm addr = (vm_offset_t) uap->addr; 21954f42e4bSPeter Wemm size = uap->len; 220df8bae1dSRodney W. Grimes prot = uap->prot & VM_PROT_ALL; 221df8bae1dSRodney W. Grimes flags = uap->flags; 22254f42e4bSPeter Wemm pos = uap->pos; 22354f42e4bSPeter Wemm 224f6b5b182SJeff Roberson vp = NULL; 225426da3bcSAlfred Perlstein fp = NULL; 22654f42e4bSPeter Wemm /* make sure mapping fits into numeric range etc */ 227fc565456SDmitrij Tejblum if ((ssize_t) uap->len < 0 || 22854f42e4bSPeter Wemm ((flags & MAP_ANON) && uap->fd != -1)) 229df8bae1dSRodney W. Grimes return (EINVAL); 2309154ee6aSPeter Wemm 2312267af78SJulian Elischer if (flags & MAP_STACK) { 2322267af78SJulian Elischer if ((uap->fd != -1) || 2332267af78SJulian Elischer ((prot & (PROT_READ | PROT_WRITE)) != (PROT_READ | PROT_WRITE))) 2342267af78SJulian Elischer return (EINVAL); 2352267af78SJulian Elischer flags |= MAP_ANON; 2362267af78SJulian Elischer pos = 0; 2372907af2aSJulian Elischer } 2382907af2aSJulian Elischer 2399154ee6aSPeter Wemm /* 24054f42e4bSPeter Wemm * Align the file position to a page boundary, 24154f42e4bSPeter Wemm * and save its page offset component. 2429154ee6aSPeter Wemm */ 24354f42e4bSPeter Wemm pageoff = (pos & PAGE_MASK); 24454f42e4bSPeter Wemm pos -= pageoff; 24554f42e4bSPeter Wemm 24654f42e4bSPeter Wemm /* Adjust size for rounding (on both ends). */ 24754f42e4bSPeter Wemm size += pageoff; /* low end... */ 24854f42e4bSPeter Wemm size = (vm_size_t) round_page(size); /* hi end */ 2499154ee6aSPeter Wemm 250df8bae1dSRodney W. Grimes /* 2510d94caffSDavid Greenman * Check for illegal addresses. Watch out for address wrap... Note 2520d94caffSDavid Greenman * that VM_*_ADDRESS are not constants due to casts (argh). 253df8bae1dSRodney W. Grimes */ 254df8bae1dSRodney W. Grimes if (flags & MAP_FIXED) { 25554f42e4bSPeter Wemm /* 25654f42e4bSPeter Wemm * The specified address must have the same remainder 25754f42e4bSPeter Wemm * as the file offset taken modulo PAGE_SIZE, so it 25854f42e4bSPeter Wemm * should be aligned after adjustment by pageoff. 25954f42e4bSPeter Wemm */ 26054f42e4bSPeter Wemm addr -= pageoff; 26154f42e4bSPeter Wemm if (addr & PAGE_MASK) 26254f42e4bSPeter Wemm return (EINVAL); 26354f42e4bSPeter Wemm /* Address range must be all in user VM space. */ 26405ba50f5SJake Burkholder if (addr < vm_map_min(&vms->vm_map) || 26505ba50f5SJake Burkholder addr + size > vm_map_max(&vms->vm_map)) 266df8bae1dSRodney W. Grimes return (EINVAL); 267bbc0ec52SDavid Greenman if (addr + size < addr) 268df8bae1dSRodney W. Grimes return (EINVAL); 269df8bae1dSRodney W. Grimes } 270df8bae1dSRodney W. Grimes /* 27154f42e4bSPeter Wemm * XXX for non-fixed mappings where no hint is provided or 27254f42e4bSPeter Wemm * the hint would fall in the potential heap space, 27354f42e4bSPeter Wemm * place it after the end of the largest possible heap. 274df8bae1dSRodney W. Grimes * 27554f42e4bSPeter Wemm * There should really be a pmap call to determine a reasonable 27654f42e4bSPeter Wemm * location. 277df8bae1dSRodney W. Grimes */ 278d28ab90fSLuoqi Chen else if (addr == 0 || 2791f6889a1SMatthew Dillon (addr >= round_page((vm_offset_t)vms->vm_taddr) && 280cbc89bfbSPaul Saab addr < round_page((vm_offset_t)vms->vm_daddr + maxdsiz))) 281cbc89bfbSPaul Saab addr = round_page((vm_offset_t)vms->vm_daddr + maxdsiz); 28254f42e4bSPeter Wemm 2830cddd8f0SMatthew Dillon mtx_lock(&Giant); /* syscall marked mp-safe but isn't */ 284a6af4ff1SPoul-Henning Kamp do { 285df8bae1dSRodney W. Grimes if (flags & MAP_ANON) { 286df8bae1dSRodney W. Grimes /* 287df8bae1dSRodney W. Grimes * Mapping blank space is trivial. 288df8bae1dSRodney W. Grimes */ 289df8bae1dSRodney W. Grimes handle = NULL; 290df8bae1dSRodney W. Grimes maxprot = VM_PROT_ALL; 29154f42e4bSPeter Wemm pos = 0; 292a6af4ff1SPoul-Henning Kamp break; 293a6af4ff1SPoul-Henning Kamp } 294df8bae1dSRodney W. Grimes /* 2950d94caffSDavid Greenman * Mapping file, get fp for validation. Obtain vnode and make 2960d94caffSDavid Greenman * sure it is of appropriate type. 297426da3bcSAlfred Perlstein * don't let the descriptor disappear on us if we block 298df8bae1dSRodney W. Grimes */ 299a4db4953SAlfred Perlstein if ((error = fget(td, uap->fd, &fp)) != 0) 300426da3bcSAlfred Perlstein goto done; 301e4ca250dSJohn Baldwin if (fp->f_type != DTYPE_VNODE) { 302d2c60af8SMatthew Dillon error = EINVAL; 303426da3bcSAlfred Perlstein goto done; 304e4ca250dSJohn Baldwin } 305279d7226SMatthew Dillon 306279d7226SMatthew Dillon /* 307aa543039SGarrett Wollman * POSIX shared-memory objects are defined to have 308aa543039SGarrett Wollman * kernel persistence, and are not defined to support 309aa543039SGarrett Wollman * read(2)/write(2) -- or even open(2). Thus, we can 310aa543039SGarrett Wollman * use MAP_ASYNC to trade on-disk coherence for speed. 311aa543039SGarrett Wollman * The shm_open(3) library routine turns on the FPOSIXSHM 312aa543039SGarrett Wollman * flag to request this behavior. 313aa543039SGarrett Wollman */ 314aa543039SGarrett Wollman if (fp->f_flag & FPOSIXSHM) 315aa543039SGarrett Wollman flags |= MAP_NOSYNC; 31648e3128bSMatthew Dillon vp = fp->f_data; 317f6b5b182SJeff Roberson error = vget(vp, LK_EXCLUSIVE, td); 318f6b5b182SJeff Roberson if (error) 319f6b5b182SJeff Roberson goto done; 320e4ca250dSJohn Baldwin if (vp->v_type != VREG && vp->v_type != VCHR) { 321e4ca250dSJohn Baldwin error = EINVAL; 322e4ca250dSJohn Baldwin goto done; 323e4ca250dSJohn Baldwin } 3249ff5ce6bSBoris Popov if (vp->v_type == VREG) { 3259ff5ce6bSBoris Popov /* 3269ff5ce6bSBoris Popov * Get the proper underlying object 3279ff5ce6bSBoris Popov */ 3280cddd8f0SMatthew Dillon if (VOP_GETVOBJECT(vp, &obj) != 0) { 3290cddd8f0SMatthew Dillon error = EINVAL; 3300cddd8f0SMatthew Dillon goto done; 3310cddd8f0SMatthew Dillon } 332f6b5b182SJeff Roberson if (obj->handle != vp) { 333f6b5b182SJeff Roberson vput(vp); 3349ff5ce6bSBoris Popov vp = (struct vnode*)obj->handle; 335f6b5b182SJeff Roberson vget(vp, LK_EXCLUSIVE, td); 336f6b5b182SJeff Roberson } 3379ff5ce6bSBoris Popov } 338df8bae1dSRodney W. Grimes /* 3390d94caffSDavid Greenman * XXX hack to handle use of /dev/zero to map anon memory (ala 3400d94caffSDavid Greenman * SunOS). 341df8bae1dSRodney W. Grimes */ 3422589f249SMark Murray if ((vp->v_type == VCHR) && 3432589f249SMark Murray (vp->v_rdev->si_devsw->d_flags & D_MMAP_ANON)) { 344df8bae1dSRodney W. Grimes handle = NULL; 345df8bae1dSRodney W. Grimes maxprot = VM_PROT_ALL; 346df8bae1dSRodney W. Grimes flags |= MAP_ANON; 34754f42e4bSPeter Wemm pos = 0; 348a6af4ff1SPoul-Henning Kamp break; 349a6af4ff1SPoul-Henning Kamp } 350df8bae1dSRodney W. Grimes /* 351c8bdd56bSGuido van Rooij * cdevs does not provide private mappings of any kind. 352c8bdd56bSGuido van Rooij */ 353c8bdd56bSGuido van Rooij /* 354c8bdd56bSGuido van Rooij * However, for XIG X server to continue to work, 355c8bdd56bSGuido van Rooij * we should allow the superuser to do it anyway. 356c8bdd56bSGuido van Rooij * We only allow it at securelevel < 1. 357c8bdd56bSGuido van Rooij * (Because the XIG X server writes directly to video 358c8bdd56bSGuido van Rooij * memory via /dev/mem, it should never work at any 359c8bdd56bSGuido van Rooij * other securelevel. 360c8bdd56bSGuido van Rooij * XXX this will have to go 361c8bdd56bSGuido van Rooij */ 362a854ed98SJohn Baldwin if (securelevel_ge(td->td_ucred, 1)) 363c8bdd56bSGuido van Rooij disablexworkaround = 1; 364c8bdd56bSGuido van Rooij else 36544731cabSJohn Baldwin disablexworkaround = suser(td); 366c8bdd56bSGuido van Rooij if (vp->v_type == VCHR && disablexworkaround && 367279d7226SMatthew Dillon (flags & (MAP_PRIVATE|MAP_COPY))) { 368279d7226SMatthew Dillon error = EINVAL; 369279d7226SMatthew Dillon goto done; 370279d7226SMatthew Dillon } 371c8bdd56bSGuido van Rooij /* 372df8bae1dSRodney W. Grimes * Ensure that file and memory protections are 373df8bae1dSRodney W. Grimes * compatible. Note that we only worry about 374df8bae1dSRodney W. Grimes * writability if mapping is shared; in this case, 375df8bae1dSRodney W. Grimes * current and max prot are dictated by the open file. 376df8bae1dSRodney W. Grimes * XXX use the vnode instead? Problem is: what 3770d94caffSDavid Greenman * credentials do we use for determination? What if 3780d94caffSDavid Greenman * proc does a setuid? 379df8bae1dSRodney W. Grimes */ 380df8bae1dSRodney W. Grimes maxprot = VM_PROT_EXECUTE; /* ??? */ 381279d7226SMatthew Dillon if (fp->f_flag & FREAD) { 382df8bae1dSRodney W. Grimes maxprot |= VM_PROT_READ; 383279d7226SMatthew Dillon } else if (prot & PROT_READ) { 384279d7226SMatthew Dillon error = EACCES; 385279d7226SMatthew Dillon goto done; 386279d7226SMatthew Dillon } 387c8bdd56bSGuido van Rooij /* 388c8bdd56bSGuido van Rooij * If we are sharing potential changes (either via 389c8bdd56bSGuido van Rooij * MAP_SHARED or via the implicit sharing of character 390c8bdd56bSGuido van Rooij * device mappings), and we are trying to get write 391c8bdd56bSGuido van Rooij * permission although we opened it without asking 392c8bdd56bSGuido van Rooij * for it, bail out. Check for superuser, only if 393c8bdd56bSGuido van Rooij * we're at securelevel < 1, to allow the XIG X server 394c8bdd56bSGuido van Rooij * to continue to work. 395c8bdd56bSGuido van Rooij */ 39605feb99fSGuido van Rooij if ((flags & MAP_SHARED) != 0 || 39705feb99fSGuido van Rooij (vp->v_type == VCHR && disablexworkaround)) { 39805feb99fSGuido van Rooij if ((fp->f_flag & FWRITE) != 0) { 3994183b6b6SPeter Wemm struct vattr va; 40005feb99fSGuido van Rooij if ((error = 40105feb99fSGuido van Rooij VOP_GETATTR(vp, &va, 402a854ed98SJohn Baldwin td->td_ucred, td))) { 403279d7226SMatthew Dillon goto done; 404279d7226SMatthew Dillon } 40505feb99fSGuido van Rooij if ((va.va_flags & 406279d7226SMatthew Dillon (SF_SNAPSHOT|IMMUTABLE|APPEND)) == 0) { 407df8bae1dSRodney W. Grimes maxprot |= VM_PROT_WRITE; 408279d7226SMatthew Dillon } else if (prot & PROT_WRITE) { 409279d7226SMatthew Dillon error = EPERM; 410279d7226SMatthew Dillon goto done; 411279d7226SMatthew Dillon } 412279d7226SMatthew Dillon } else if ((prot & PROT_WRITE) != 0) { 413279d7226SMatthew Dillon error = EACCES; 414279d7226SMatthew Dillon goto done; 415279d7226SMatthew Dillon } 416279d7226SMatthew Dillon } else { 41705feb99fSGuido van Rooij maxprot |= VM_PROT_WRITE; 418279d7226SMatthew Dillon } 41905feb99fSGuido van Rooij 420651bb817SAlexander Langer handle = (void *)vp; 421a6af4ff1SPoul-Henning Kamp } while (0); 4221f6889a1SMatthew Dillon 4231f6889a1SMatthew Dillon /* 4241f6889a1SMatthew Dillon * Do not allow more then a certain number of vm_map_entry structures 4251f6889a1SMatthew Dillon * per process. Scale with the number of rforks sharing the map 4261f6889a1SMatthew Dillon * to make the limit reasonable for threads. 4271f6889a1SMatthew Dillon */ 4281f6889a1SMatthew Dillon if (max_proc_mmap && 4291f6889a1SMatthew Dillon vms->vm_map.nentries >= max_proc_mmap * vms->vm_refcnt) { 430279d7226SMatthew Dillon error = ENOMEM; 431279d7226SMatthew Dillon goto done; 4321f6889a1SMatthew Dillon } 4331f6889a1SMatthew Dillon 434e4ca250dSJohn Baldwin mtx_unlock(&Giant); 4353e732e7dSRobert Watson error = 0; 4363e732e7dSRobert Watson #ifdef MAC 4373e732e7dSRobert Watson if (handle != NULL && (flags & MAP_SHARED) != 0) { 4383e732e7dSRobert Watson error = mac_check_vnode_mmap(td->td_ucred, 4393e732e7dSRobert Watson (struct vnode *)handle, prot); 4403e732e7dSRobert Watson } 4413e732e7dSRobert Watson #endif 4423e732e7dSRobert Watson if (error == 0) 4431f6889a1SMatthew Dillon error = vm_mmap(&vms->vm_map, &addr, size, prot, maxprot, 44454f42e4bSPeter Wemm flags, handle, pos); 445f6b5b182SJeff Roberson mtx_lock(&Giant); 446df8bae1dSRodney W. Grimes if (error == 0) 447b40ce416SJulian Elischer td->td_retval[0] = (register_t) (addr + pageoff); 448279d7226SMatthew Dillon done: 449f6b5b182SJeff Roberson if (vp) 450f6b5b182SJeff Roberson vput(vp); 4512cd301d1SAlan Cox mtx_unlock(&Giant); 452279d7226SMatthew Dillon if (fp) 453b40ce416SJulian Elischer fdrop(fp, td); 454f6b5b182SJeff Roberson 455df8bae1dSRodney W. Grimes return (error); 456df8bae1dSRodney W. Grimes } 457df8bae1dSRodney W. Grimes 45805f0fdd2SPoul-Henning Kamp #ifdef COMPAT_43 459d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_ 46005f0fdd2SPoul-Henning Kamp struct ommap_args { 46105f0fdd2SPoul-Henning Kamp caddr_t addr; 46205f0fdd2SPoul-Henning Kamp int len; 46305f0fdd2SPoul-Henning Kamp int prot; 46405f0fdd2SPoul-Henning Kamp int flags; 46505f0fdd2SPoul-Henning Kamp int fd; 46605f0fdd2SPoul-Henning Kamp long pos; 46705f0fdd2SPoul-Henning Kamp }; 468d2d3e875SBruce Evans #endif 46905f0fdd2SPoul-Henning Kamp int 470b40ce416SJulian Elischer ommap(td, uap) 471b40ce416SJulian Elischer struct thread *td; 47254d92145SMatthew Dillon struct ommap_args *uap; 47305f0fdd2SPoul-Henning Kamp { 47405f0fdd2SPoul-Henning Kamp struct mmap_args nargs; 47505f0fdd2SPoul-Henning Kamp static const char cvtbsdprot[8] = { 47605f0fdd2SPoul-Henning Kamp 0, 47705f0fdd2SPoul-Henning Kamp PROT_EXEC, 47805f0fdd2SPoul-Henning Kamp PROT_WRITE, 47905f0fdd2SPoul-Henning Kamp PROT_EXEC | PROT_WRITE, 48005f0fdd2SPoul-Henning Kamp PROT_READ, 48105f0fdd2SPoul-Henning Kamp PROT_EXEC | PROT_READ, 48205f0fdd2SPoul-Henning Kamp PROT_WRITE | PROT_READ, 48305f0fdd2SPoul-Henning Kamp PROT_EXEC | PROT_WRITE | PROT_READ, 48405f0fdd2SPoul-Henning Kamp }; 4850d94caffSDavid Greenman 48605f0fdd2SPoul-Henning Kamp #define OMAP_ANON 0x0002 48705f0fdd2SPoul-Henning Kamp #define OMAP_COPY 0x0020 48805f0fdd2SPoul-Henning Kamp #define OMAP_SHARED 0x0010 48905f0fdd2SPoul-Henning Kamp #define OMAP_FIXED 0x0100 49005f0fdd2SPoul-Henning Kamp 49105f0fdd2SPoul-Henning Kamp nargs.addr = uap->addr; 49205f0fdd2SPoul-Henning Kamp nargs.len = uap->len; 49305f0fdd2SPoul-Henning Kamp nargs.prot = cvtbsdprot[uap->prot & 0x7]; 49405f0fdd2SPoul-Henning Kamp nargs.flags = 0; 49505f0fdd2SPoul-Henning Kamp if (uap->flags & OMAP_ANON) 49605f0fdd2SPoul-Henning Kamp nargs.flags |= MAP_ANON; 49705f0fdd2SPoul-Henning Kamp if (uap->flags & OMAP_COPY) 49805f0fdd2SPoul-Henning Kamp nargs.flags |= MAP_COPY; 49905f0fdd2SPoul-Henning Kamp if (uap->flags & OMAP_SHARED) 50005f0fdd2SPoul-Henning Kamp nargs.flags |= MAP_SHARED; 50105f0fdd2SPoul-Henning Kamp else 50205f0fdd2SPoul-Henning Kamp nargs.flags |= MAP_PRIVATE; 50305f0fdd2SPoul-Henning Kamp if (uap->flags & OMAP_FIXED) 50405f0fdd2SPoul-Henning Kamp nargs.flags |= MAP_FIXED; 50505f0fdd2SPoul-Henning Kamp nargs.fd = uap->fd; 50605f0fdd2SPoul-Henning Kamp nargs.pos = uap->pos; 507b40ce416SJulian Elischer return (mmap(td, &nargs)); 50805f0fdd2SPoul-Henning Kamp } 50905f0fdd2SPoul-Henning Kamp #endif /* COMPAT_43 */ 51005f0fdd2SPoul-Henning Kamp 51105f0fdd2SPoul-Henning Kamp 512d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_ 513df8bae1dSRodney W. Grimes struct msync_args { 514651bb817SAlexander Langer void *addr; 515df8bae1dSRodney W. Grimes int len; 516e6c6af11SDavid Greenman int flags; 517df8bae1dSRodney W. Grimes }; 518d2d3e875SBruce Evans #endif 519d2c60af8SMatthew Dillon /* 520d2c60af8SMatthew Dillon * MPSAFE 521d2c60af8SMatthew Dillon */ 522df8bae1dSRodney W. Grimes int 523b40ce416SJulian Elischer msync(td, uap) 524b40ce416SJulian Elischer struct thread *td; 525df8bae1dSRodney W. Grimes struct msync_args *uap; 526df8bae1dSRodney W. Grimes { 527df8bae1dSRodney W. Grimes vm_offset_t addr; 528dabee6feSPeter Wemm vm_size_t size, pageoff; 529e6c6af11SDavid Greenman int flags; 530df8bae1dSRodney W. Grimes vm_map_t map; 531df8bae1dSRodney W. Grimes int rv; 532df8bae1dSRodney W. Grimes 533df8bae1dSRodney W. Grimes addr = (vm_offset_t) uap->addr; 5349154ee6aSPeter Wemm size = uap->len; 535e6c6af11SDavid Greenman flags = uap->flags; 536e6c6af11SDavid Greenman 537dabee6feSPeter Wemm pageoff = (addr & PAGE_MASK); 538dabee6feSPeter Wemm addr -= pageoff; 539dabee6feSPeter Wemm size += pageoff; 540dabee6feSPeter Wemm size = (vm_size_t) round_page(size); 5419154ee6aSPeter Wemm if (addr + size < addr) 542dabee6feSPeter Wemm return (EINVAL); 543dabee6feSPeter Wemm 544dabee6feSPeter Wemm if ((flags & (MS_ASYNC|MS_INVALIDATE)) == (MS_ASYNC|MS_INVALIDATE)) 5451e62bc63SDavid Greenman return (EINVAL); 5461e62bc63SDavid Greenman 5470cddd8f0SMatthew Dillon mtx_lock(&Giant); 5480cddd8f0SMatthew Dillon 549b40ce416SJulian Elischer map = &td->td_proc->p_vmspace->vm_map; 5509154ee6aSPeter Wemm 551df8bae1dSRodney W. Grimes /* 552df8bae1dSRodney W. Grimes * XXX Gak! If size is zero we are supposed to sync "all modified 5530d94caffSDavid Greenman * pages with the region containing addr". Unfortunately, we don't 5540d94caffSDavid Greenman * really keep track of individual mmaps so we approximate by flushing 5550d94caffSDavid Greenman * the range of the map entry containing addr. This can be incorrect 5560d94caffSDavid Greenman * if the region splits or is coalesced with a neighbor. 557df8bae1dSRodney W. Grimes */ 558df8bae1dSRodney W. Grimes if (size == 0) { 559df8bae1dSRodney W. Grimes vm_map_entry_t entry; 560df8bae1dSRodney W. Grimes 561df8bae1dSRodney W. Grimes vm_map_lock_read(map); 562df8bae1dSRodney W. Grimes rv = vm_map_lookup_entry(map, addr, &entry); 563df8bae1dSRodney W. Grimes vm_map_unlock_read(map); 56423955314SAlfred Perlstein if (rv == FALSE) { 565d2c60af8SMatthew Dillon rv = -1; 566d2c60af8SMatthew Dillon goto done2; 56723955314SAlfred Perlstein } 568df8bae1dSRodney W. Grimes addr = entry->start; 569df8bae1dSRodney W. Grimes size = entry->end - entry->start; 570df8bae1dSRodney W. Grimes } 571e6c6af11SDavid Greenman 572df8bae1dSRodney W. Grimes /* 573df8bae1dSRodney W. Grimes * Clean the pages and interpret the return value. 574df8bae1dSRodney W. Grimes */ 5756c534ad8SDavid Greenman rv = vm_map_clean(map, addr, addr + size, (flags & MS_ASYNC) == 0, 576e6c6af11SDavid Greenman (flags & MS_INVALIDATE) != 0); 577e6c6af11SDavid Greenman 578d2c60af8SMatthew Dillon done2: 579190609ddSJohn Baldwin mtx_unlock(&Giant); 5800cddd8f0SMatthew Dillon 581df8bae1dSRodney W. Grimes switch (rv) { 582df8bae1dSRodney W. Grimes case KERN_SUCCESS: 583d2c60af8SMatthew Dillon return (0); 584df8bae1dSRodney W. Grimes case KERN_INVALID_ADDRESS: 585df8bae1dSRodney W. Grimes return (EINVAL); /* Sun returns ENOMEM? */ 586df8bae1dSRodney W. Grimes case KERN_FAILURE: 587df8bae1dSRodney W. Grimes return (EIO); 588df8bae1dSRodney W. Grimes default: 589df8bae1dSRodney W. Grimes return (EINVAL); 590df8bae1dSRodney W. Grimes } 591df8bae1dSRodney W. Grimes } 592df8bae1dSRodney W. Grimes 593d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_ 594df8bae1dSRodney W. Grimes struct munmap_args { 595651bb817SAlexander Langer void *addr; 5969154ee6aSPeter Wemm size_t len; 597df8bae1dSRodney W. Grimes }; 598d2d3e875SBruce Evans #endif 599d2c60af8SMatthew Dillon /* 600d2c60af8SMatthew Dillon * MPSAFE 601d2c60af8SMatthew Dillon */ 602df8bae1dSRodney W. Grimes int 603b40ce416SJulian Elischer munmap(td, uap) 604b40ce416SJulian Elischer struct thread *td; 60554d92145SMatthew Dillon struct munmap_args *uap; 606df8bae1dSRodney W. Grimes { 607df8bae1dSRodney W. Grimes vm_offset_t addr; 608dabee6feSPeter Wemm vm_size_t size, pageoff; 609df8bae1dSRodney W. Grimes vm_map_t map; 610df8bae1dSRodney W. Grimes 611df8bae1dSRodney W. Grimes addr = (vm_offset_t) uap->addr; 6129154ee6aSPeter Wemm size = uap->len; 613dabee6feSPeter Wemm 614dabee6feSPeter Wemm pageoff = (addr & PAGE_MASK); 615dabee6feSPeter Wemm addr -= pageoff; 616dabee6feSPeter Wemm size += pageoff; 617dabee6feSPeter Wemm size = (vm_size_t) round_page(size); 6189154ee6aSPeter Wemm if (addr + size < addr) 619df8bae1dSRodney W. Grimes return (EINVAL); 6209154ee6aSPeter Wemm 621df8bae1dSRodney W. Grimes if (size == 0) 622df8bae1dSRodney W. Grimes return (0); 623dabee6feSPeter Wemm 624df8bae1dSRodney W. Grimes /* 62505ba50f5SJake Burkholder * Check for illegal addresses. Watch out for address wrap... 626df8bae1dSRodney W. Grimes */ 627b40ce416SJulian Elischer map = &td->td_proc->p_vmspace->vm_map; 62805ba50f5SJake Burkholder if (addr < vm_map_min(map) || addr + size > vm_map_max(map)) 62905ba50f5SJake Burkholder return (EINVAL); 630df8bae1dSRodney W. Grimes /* 631df8bae1dSRodney W. Grimes * Make sure entire range is allocated. 632df8bae1dSRodney W. Grimes */ 6338c5c5d04SAlan Cox if (!vm_map_check_protection(map, addr, addr + size, VM_PROT_NONE)) 634df8bae1dSRodney W. Grimes return (EINVAL); 6358c5c5d04SAlan Cox 636df8bae1dSRodney W. Grimes /* returns nothing but KERN_SUCCESS anyway */ 637df8bae1dSRodney W. Grimes (void) vm_map_remove(map, addr, addr + size); 638df8bae1dSRodney W. Grimes return (0); 639df8bae1dSRodney W. Grimes } 640df8bae1dSRodney W. Grimes 641279d7226SMatthew Dillon #if 0 642df8bae1dSRodney W. Grimes void 643b40ce416SJulian Elischer munmapfd(td, fd) 644b40ce416SJulian Elischer struct thread *td; 645df8bae1dSRodney W. Grimes int fd; 646df8bae1dSRodney W. Grimes { 647df8bae1dSRodney W. Grimes /* 648c4ed5a07SDavid Greenman * XXX should unmap any regions mapped to this file 649df8bae1dSRodney W. Grimes */ 650426da3bcSAlfred Perlstein FILEDESC_LOCK(p->p_fd); 651b40ce416SJulian Elischer td->td_proc->p_fd->fd_ofileflags[fd] &= ~UF_MAPPED; 652426da3bcSAlfred Perlstein FILEDESC_UNLOCK(p->p_fd); 653df8bae1dSRodney W. Grimes } 654279d7226SMatthew Dillon #endif 655df8bae1dSRodney W. Grimes 656d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_ 657df8bae1dSRodney W. Grimes struct mprotect_args { 658651bb817SAlexander Langer const void *addr; 6599154ee6aSPeter Wemm size_t len; 660df8bae1dSRodney W. Grimes int prot; 661df8bae1dSRodney W. Grimes }; 662d2d3e875SBruce Evans #endif 663d2c60af8SMatthew Dillon /* 664d2c60af8SMatthew Dillon * MPSAFE 665d2c60af8SMatthew Dillon */ 666df8bae1dSRodney W. Grimes int 667b40ce416SJulian Elischer mprotect(td, uap) 668b40ce416SJulian Elischer struct thread *td; 669df8bae1dSRodney W. Grimes struct mprotect_args *uap; 670df8bae1dSRodney W. Grimes { 671df8bae1dSRodney W. Grimes vm_offset_t addr; 672dabee6feSPeter Wemm vm_size_t size, pageoff; 67354d92145SMatthew Dillon vm_prot_t prot; 674df8bae1dSRodney W. Grimes 675df8bae1dSRodney W. Grimes addr = (vm_offset_t) uap->addr; 6769154ee6aSPeter Wemm size = uap->len; 677df8bae1dSRodney W. Grimes prot = uap->prot & VM_PROT_ALL; 678d0aea04fSJohn Dyson #if defined(VM_PROT_READ_IS_EXEC) 679d0aea04fSJohn Dyson if (prot & VM_PROT_READ) 680d0aea04fSJohn Dyson prot |= VM_PROT_EXECUTE; 681d0aea04fSJohn Dyson #endif 682df8bae1dSRodney W. Grimes 683dabee6feSPeter Wemm pageoff = (addr & PAGE_MASK); 684dabee6feSPeter Wemm addr -= pageoff; 685dabee6feSPeter Wemm size += pageoff; 686dabee6feSPeter Wemm size = (vm_size_t) round_page(size); 6879154ee6aSPeter Wemm if (addr + size < addr) 688dabee6feSPeter Wemm return (EINVAL); 689dabee6feSPeter Wemm 69043285049SAlan Cox switch (vm_map_protect(&td->td_proc->p_vmspace->vm_map, addr, 69143285049SAlan Cox addr + size, prot, FALSE)) { 692df8bae1dSRodney W. Grimes case KERN_SUCCESS: 693df8bae1dSRodney W. Grimes return (0); 694df8bae1dSRodney W. Grimes case KERN_PROTECTION_FAILURE: 695df8bae1dSRodney W. Grimes return (EACCES); 696df8bae1dSRodney W. Grimes } 697df8bae1dSRodney W. Grimes return (EINVAL); 698df8bae1dSRodney W. Grimes } 699df8bae1dSRodney W. Grimes 700d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_ 701dabee6feSPeter Wemm struct minherit_args { 702651bb817SAlexander Langer void *addr; 7039154ee6aSPeter Wemm size_t len; 704dabee6feSPeter Wemm int inherit; 705dabee6feSPeter Wemm }; 706dabee6feSPeter Wemm #endif 707d2c60af8SMatthew Dillon /* 708d2c60af8SMatthew Dillon * MPSAFE 709d2c60af8SMatthew Dillon */ 710dabee6feSPeter Wemm int 711b40ce416SJulian Elischer minherit(td, uap) 712b40ce416SJulian Elischer struct thread *td; 713dabee6feSPeter Wemm struct minherit_args *uap; 714dabee6feSPeter Wemm { 715dabee6feSPeter Wemm vm_offset_t addr; 716dabee6feSPeter Wemm vm_size_t size, pageoff; 71754d92145SMatthew Dillon vm_inherit_t inherit; 718dabee6feSPeter Wemm 719dabee6feSPeter Wemm addr = (vm_offset_t)uap->addr; 7209154ee6aSPeter Wemm size = uap->len; 721dabee6feSPeter Wemm inherit = uap->inherit; 722dabee6feSPeter Wemm 723dabee6feSPeter Wemm pageoff = (addr & PAGE_MASK); 724dabee6feSPeter Wemm addr -= pageoff; 725dabee6feSPeter Wemm size += pageoff; 726dabee6feSPeter Wemm size = (vm_size_t) round_page(size); 7279154ee6aSPeter Wemm if (addr + size < addr) 728dabee6feSPeter Wemm return (EINVAL); 729dabee6feSPeter Wemm 730e0be79afSAlan Cox switch (vm_map_inherit(&td->td_proc->p_vmspace->vm_map, addr, 731e0be79afSAlan Cox addr + size, inherit)) { 732dabee6feSPeter Wemm case KERN_SUCCESS: 733dabee6feSPeter Wemm return (0); 734dabee6feSPeter Wemm case KERN_PROTECTION_FAILURE: 735dabee6feSPeter Wemm return (EACCES); 736dabee6feSPeter Wemm } 737dabee6feSPeter Wemm return (EINVAL); 738dabee6feSPeter Wemm } 739dabee6feSPeter Wemm 740dabee6feSPeter Wemm #ifndef _SYS_SYSPROTO_H_ 741df8bae1dSRodney W. Grimes struct madvise_args { 742651bb817SAlexander Langer void *addr; 7439154ee6aSPeter Wemm size_t len; 744df8bae1dSRodney W. Grimes int behav; 745df8bae1dSRodney W. Grimes }; 746d2d3e875SBruce Evans #endif 7470d94caffSDavid Greenman 748d2c60af8SMatthew Dillon /* 749d2c60af8SMatthew Dillon * MPSAFE 750d2c60af8SMatthew Dillon */ 751df8bae1dSRodney W. Grimes /* ARGSUSED */ 752df8bae1dSRodney W. Grimes int 753b40ce416SJulian Elischer madvise(td, uap) 754b40ce416SJulian Elischer struct thread *td; 755df8bae1dSRodney W. Grimes struct madvise_args *uap; 756df8bae1dSRodney W. Grimes { 757f35329acSJohn Dyson vm_offset_t start, end; 75805ba50f5SJake Burkholder vm_map_t map; 759f4cf2141SWes Peters struct proc *p; 760f4cf2141SWes Peters int error; 761b4309055SMatthew Dillon 762b4309055SMatthew Dillon /* 763f4cf2141SWes Peters * Check for our special case, advising the swap pager we are 764f4cf2141SWes Peters * "immortal." 765f4cf2141SWes Peters */ 766f4cf2141SWes Peters if (uap->behav == MADV_PROTECT) { 76769297bf8SJohn Baldwin error = suser(td); 76869297bf8SJohn Baldwin if (error == 0) { 769f4cf2141SWes Peters p = td->td_proc; 770f4cf2141SWes Peters PROC_LOCK(p); 771f4cf2141SWes Peters p->p_flag |= P_PROTECTED; 772f4cf2141SWes Peters PROC_UNLOCK(p); 77369297bf8SJohn Baldwin } 774f4cf2141SWes Peters return (error); 775f4cf2141SWes Peters } 776f4cf2141SWes Peters /* 777b4309055SMatthew Dillon * Check for illegal behavior 778b4309055SMatthew Dillon */ 7799730a5daSPaul Saab if (uap->behav < 0 || uap->behav > MADV_CORE) 780b4309055SMatthew Dillon return (EINVAL); 781867a482dSJohn Dyson /* 782867a482dSJohn Dyson * Check for illegal addresses. Watch out for address wrap... Note 783867a482dSJohn Dyson * that VM_*_ADDRESS are not constants due to casts (argh). 784867a482dSJohn Dyson */ 78505ba50f5SJake Burkholder map = &td->td_proc->p_vmspace->vm_map; 78605ba50f5SJake Burkholder if ((vm_offset_t)uap->addr < vm_map_min(map) || 78705ba50f5SJake Burkholder (vm_offset_t)uap->addr + uap->len > vm_map_max(map)) 788867a482dSJohn Dyson return (EINVAL); 789867a482dSJohn Dyson if (((vm_offset_t) uap->addr + uap->len) < (vm_offset_t) uap->addr) 790867a482dSJohn Dyson return (EINVAL); 791867a482dSJohn Dyson 792867a482dSJohn Dyson /* 793867a482dSJohn Dyson * Since this routine is only advisory, we default to conservative 794867a482dSJohn Dyson * behavior. 795867a482dSJohn Dyson */ 796cd6eea25SDavid Greenman start = trunc_page((vm_offset_t) uap->addr); 797cd6eea25SDavid Greenman end = round_page((vm_offset_t) uap->addr + uap->len); 798867a482dSJohn Dyson 79905ba50f5SJake Burkholder if (vm_map_madvise(map, start, end, uap->behav)) 800094f6d26SAlan Cox return (EINVAL); 801094f6d26SAlan Cox return (0); 802df8bae1dSRodney W. Grimes } 803df8bae1dSRodney W. Grimes 804d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_ 805df8bae1dSRodney W. Grimes struct mincore_args { 806651bb817SAlexander Langer const void *addr; 8079154ee6aSPeter Wemm size_t len; 808df8bae1dSRodney W. Grimes char *vec; 809df8bae1dSRodney W. Grimes }; 810d2d3e875SBruce Evans #endif 8110d94caffSDavid Greenman 812d2c60af8SMatthew Dillon /* 813d2c60af8SMatthew Dillon * MPSAFE 814d2c60af8SMatthew Dillon */ 815df8bae1dSRodney W. Grimes /* ARGSUSED */ 816df8bae1dSRodney W. Grimes int 817b40ce416SJulian Elischer mincore(td, uap) 818b40ce416SJulian Elischer struct thread *td; 819df8bae1dSRodney W. Grimes struct mincore_args *uap; 820df8bae1dSRodney W. Grimes { 821867a482dSJohn Dyson vm_offset_t addr, first_addr; 822867a482dSJohn Dyson vm_offset_t end, cend; 823867a482dSJohn Dyson pmap_t pmap; 824867a482dSJohn Dyson vm_map_t map; 82502c04a2fSJohn Dyson char *vec; 826d2c60af8SMatthew Dillon int error = 0; 827867a482dSJohn Dyson int vecindex, lastvecindex; 82854d92145SMatthew Dillon vm_map_entry_t current; 829867a482dSJohn Dyson vm_map_entry_t entry; 830867a482dSJohn Dyson int mincoreinfo; 831dd2622a8SAlan Cox unsigned int timestamp; 832df8bae1dSRodney W. Grimes 833867a482dSJohn Dyson /* 834867a482dSJohn Dyson * Make sure that the addresses presented are valid for user 835867a482dSJohn Dyson * mode. 836867a482dSJohn Dyson */ 837867a482dSJohn Dyson first_addr = addr = trunc_page((vm_offset_t) uap->addr); 8389154ee6aSPeter Wemm end = addr + (vm_size_t)round_page(uap->len); 83905ba50f5SJake Burkholder map = &td->td_proc->p_vmspace->vm_map; 84005ba50f5SJake Burkholder if (end > vm_map_max(map) || end < addr) 84102c04a2fSJohn Dyson return (EINVAL); 84202c04a2fSJohn Dyson 843867a482dSJohn Dyson /* 844867a482dSJohn Dyson * Address of byte vector 845867a482dSJohn Dyson */ 84602c04a2fSJohn Dyson vec = uap->vec; 847867a482dSJohn Dyson 848190609ddSJohn Baldwin mtx_lock(&Giant); 849b40ce416SJulian Elischer pmap = vmspace_pmap(td->td_proc->p_vmspace); 850867a482dSJohn Dyson 851eff50fcdSAlan Cox vm_map_lock_read(map); 852dd2622a8SAlan Cox RestartScan: 853dd2622a8SAlan Cox timestamp = map->timestamp; 854867a482dSJohn Dyson 855867a482dSJohn Dyson if (!vm_map_lookup_entry(map, addr, &entry)) 856867a482dSJohn Dyson entry = entry->next; 857867a482dSJohn Dyson 858867a482dSJohn Dyson /* 859867a482dSJohn Dyson * Do this on a map entry basis so that if the pages are not 860867a482dSJohn Dyson * in the current processes address space, we can easily look 861867a482dSJohn Dyson * up the pages elsewhere. 862867a482dSJohn Dyson */ 863867a482dSJohn Dyson lastvecindex = -1; 864867a482dSJohn Dyson for (current = entry; 865867a482dSJohn Dyson (current != &map->header) && (current->start < end); 866867a482dSJohn Dyson current = current->next) { 867867a482dSJohn Dyson 868867a482dSJohn Dyson /* 869867a482dSJohn Dyson * ignore submaps (for now) or null objects 870867a482dSJohn Dyson */ 8719fdfe602SMatthew Dillon if ((current->eflags & MAP_ENTRY_IS_SUB_MAP) || 872867a482dSJohn Dyson current->object.vm_object == NULL) 873867a482dSJohn Dyson continue; 874867a482dSJohn Dyson 875867a482dSJohn Dyson /* 876867a482dSJohn Dyson * limit this scan to the current map entry and the 877867a482dSJohn Dyson * limits for the mincore call 878867a482dSJohn Dyson */ 879867a482dSJohn Dyson if (addr < current->start) 880867a482dSJohn Dyson addr = current->start; 881867a482dSJohn Dyson cend = current->end; 882867a482dSJohn Dyson if (cend > end) 883867a482dSJohn Dyson cend = end; 884867a482dSJohn Dyson 885867a482dSJohn Dyson /* 886867a482dSJohn Dyson * scan this entry one page at a time 887867a482dSJohn Dyson */ 888867a482dSJohn Dyson while (addr < cend) { 889867a482dSJohn Dyson /* 890867a482dSJohn Dyson * Check pmap first, it is likely faster, also 891867a482dSJohn Dyson * it can provide info as to whether we are the 892867a482dSJohn Dyson * one referencing or modifying the page. 893867a482dSJohn Dyson */ 894867a482dSJohn Dyson mincoreinfo = pmap_mincore(pmap, addr); 895867a482dSJohn Dyson if (!mincoreinfo) { 896867a482dSJohn Dyson vm_pindex_t pindex; 897867a482dSJohn Dyson vm_ooffset_t offset; 898867a482dSJohn Dyson vm_page_t m; 899867a482dSJohn Dyson /* 900867a482dSJohn Dyson * calculate the page index into the object 901867a482dSJohn Dyson */ 902867a482dSJohn Dyson offset = current->offset + (addr - current->start); 903867a482dSJohn Dyson pindex = OFF_TO_IDX(offset); 904bc5b057fSAlan Cox VM_OBJECT_LOCK(current->object.vm_object); 905867a482dSJohn Dyson m = vm_page_lookup(current->object.vm_object, 906867a482dSJohn Dyson pindex); 907bc5b057fSAlan Cox VM_OBJECT_UNLOCK(current->object.vm_object); 908e80b7b69SAlan Cox vm_page_lock_queues(); 909867a482dSJohn Dyson /* 910867a482dSJohn Dyson * if the page is resident, then gather information about 911867a482dSJohn Dyson * it. 912867a482dSJohn Dyson */ 913867a482dSJohn Dyson if (m) { 914867a482dSJohn Dyson mincoreinfo = MINCORE_INCORE; 915867a482dSJohn Dyson if (m->dirty || 9160385347cSPeter Wemm pmap_is_modified(m)) 917867a482dSJohn Dyson mincoreinfo |= MINCORE_MODIFIED_OTHER; 918867a482dSJohn Dyson if ((m->flags & PG_REFERENCED) || 9190385347cSPeter Wemm pmap_ts_referenced(m)) { 920e69763a3SDoug Rabson vm_page_flag_set(m, PG_REFERENCED); 921867a482dSJohn Dyson mincoreinfo |= MINCORE_REFERENCED_OTHER; 92202c04a2fSJohn Dyson } 923867a482dSJohn Dyson } 924e80b7b69SAlan Cox vm_page_unlock_queues(); 9259b5a5d81SJohn Dyson } 926867a482dSJohn Dyson 927867a482dSJohn Dyson /* 928dd2622a8SAlan Cox * subyte may page fault. In case it needs to modify 929dd2622a8SAlan Cox * the map, we release the lock. 930dd2622a8SAlan Cox */ 931dd2622a8SAlan Cox vm_map_unlock_read(map); 932dd2622a8SAlan Cox 933dd2622a8SAlan Cox /* 934867a482dSJohn Dyson * calculate index into user supplied byte vector 935867a482dSJohn Dyson */ 936867a482dSJohn Dyson vecindex = OFF_TO_IDX(addr - first_addr); 937867a482dSJohn Dyson 938867a482dSJohn Dyson /* 939867a482dSJohn Dyson * If we have skipped map entries, we need to make sure that 940867a482dSJohn Dyson * the byte vector is zeroed for those skipped entries. 941867a482dSJohn Dyson */ 942867a482dSJohn Dyson while ((lastvecindex + 1) < vecindex) { 943867a482dSJohn Dyson error = subyte(vec + lastvecindex, 0); 944867a482dSJohn Dyson if (error) { 945d2c60af8SMatthew Dillon error = EFAULT; 946d2c60af8SMatthew Dillon goto done2; 947867a482dSJohn Dyson } 948867a482dSJohn Dyson ++lastvecindex; 949867a482dSJohn Dyson } 950867a482dSJohn Dyson 951867a482dSJohn Dyson /* 952867a482dSJohn Dyson * Pass the page information to the user 953867a482dSJohn Dyson */ 954867a482dSJohn Dyson error = subyte(vec + vecindex, mincoreinfo); 955867a482dSJohn Dyson if (error) { 956d2c60af8SMatthew Dillon error = EFAULT; 957d2c60af8SMatthew Dillon goto done2; 958867a482dSJohn Dyson } 959dd2622a8SAlan Cox 960dd2622a8SAlan Cox /* 961dd2622a8SAlan Cox * If the map has changed, due to the subyte, the previous 962dd2622a8SAlan Cox * output may be invalid. 963dd2622a8SAlan Cox */ 964dd2622a8SAlan Cox vm_map_lock_read(map); 965dd2622a8SAlan Cox if (timestamp != map->timestamp) 966dd2622a8SAlan Cox goto RestartScan; 967dd2622a8SAlan Cox 968867a482dSJohn Dyson lastvecindex = vecindex; 96902c04a2fSJohn Dyson addr += PAGE_SIZE; 97002c04a2fSJohn Dyson } 971867a482dSJohn Dyson } 972867a482dSJohn Dyson 973867a482dSJohn Dyson /* 974dd2622a8SAlan Cox * subyte may page fault. In case it needs to modify 975dd2622a8SAlan Cox * the map, we release the lock. 976dd2622a8SAlan Cox */ 977dd2622a8SAlan Cox vm_map_unlock_read(map); 978dd2622a8SAlan Cox 979dd2622a8SAlan Cox /* 980867a482dSJohn Dyson * Zero the last entries in the byte vector. 981867a482dSJohn Dyson */ 982867a482dSJohn Dyson vecindex = OFF_TO_IDX(end - first_addr); 983867a482dSJohn Dyson while ((lastvecindex + 1) < vecindex) { 984867a482dSJohn Dyson error = subyte(vec + lastvecindex, 0); 985867a482dSJohn Dyson if (error) { 986d2c60af8SMatthew Dillon error = EFAULT; 987d2c60af8SMatthew Dillon goto done2; 988867a482dSJohn Dyson } 989867a482dSJohn Dyson ++lastvecindex; 990867a482dSJohn Dyson } 991867a482dSJohn Dyson 992dd2622a8SAlan Cox /* 993dd2622a8SAlan Cox * If the map has changed, due to the subyte, the previous 994dd2622a8SAlan Cox * output may be invalid. 995dd2622a8SAlan Cox */ 996dd2622a8SAlan Cox vm_map_lock_read(map); 997dd2622a8SAlan Cox if (timestamp != map->timestamp) 998dd2622a8SAlan Cox goto RestartScan; 999eff50fcdSAlan Cox vm_map_unlock_read(map); 1000d2c60af8SMatthew Dillon done2: 1001190609ddSJohn Baldwin mtx_unlock(&Giant); 1002d2c60af8SMatthew Dillon return (error); 1003df8bae1dSRodney W. Grimes } 1004df8bae1dSRodney W. Grimes 1005d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_ 1006df8bae1dSRodney W. Grimes struct mlock_args { 1007651bb817SAlexander Langer const void *addr; 1008df8bae1dSRodney W. Grimes size_t len; 1009df8bae1dSRodney W. Grimes }; 1010d2d3e875SBruce Evans #endif 1011d2c60af8SMatthew Dillon /* 1012d2c60af8SMatthew Dillon * MPSAFE 1013d2c60af8SMatthew Dillon */ 1014df8bae1dSRodney W. Grimes int 1015b40ce416SJulian Elischer mlock(td, uap) 1016b40ce416SJulian Elischer struct thread *td; 1017df8bae1dSRodney W. Grimes struct mlock_args *uap; 1018df8bae1dSRodney W. Grimes { 1019df8bae1dSRodney W. Grimes vm_offset_t addr; 1020dabee6feSPeter Wemm vm_size_t size, pageoff; 1021df8bae1dSRodney W. Grimes int error; 1022df8bae1dSRodney W. Grimes 1023df8bae1dSRodney W. Grimes addr = (vm_offset_t) uap->addr; 10249154ee6aSPeter Wemm size = uap->len; 10259154ee6aSPeter Wemm 1026dabee6feSPeter Wemm pageoff = (addr & PAGE_MASK); 1027dabee6feSPeter Wemm addr -= pageoff; 1028dabee6feSPeter Wemm size += pageoff; 1029dabee6feSPeter Wemm size = (vm_size_t) round_page(size); 1030dabee6feSPeter Wemm 1031dabee6feSPeter Wemm /* disable wrap around */ 10329154ee6aSPeter Wemm if (addr + size < addr) 1033df8bae1dSRodney W. Grimes return (EINVAL); 1034dabee6feSPeter Wemm 1035df8bae1dSRodney W. Grimes if (atop(size) + cnt.v_wire_count > vm_page_max_wired) 1036df8bae1dSRodney W. Grimes return (EAGAIN); 10379154ee6aSPeter Wemm 1038df8bae1dSRodney W. Grimes #ifdef pmap_wired_count 1039b40ce416SJulian Elischer if (size + ptoa(pmap_wired_count(vm_map_pmap(&td->td_proc->p_vmspace->vm_map))) > 1040b40ce416SJulian Elischer td->td_proc->p_rlimit[RLIMIT_MEMLOCK].rlim_cur) 10414a40e3d4SJohn Dyson return (ENOMEM); 1042df8bae1dSRodney W. Grimes #else 104344731cabSJohn Baldwin error = suser(td); 104405f0fdd2SPoul-Henning Kamp if (error) 1045df8bae1dSRodney W. Grimes return (error); 1046df8bae1dSRodney W. Grimes #endif 1047df8bae1dSRodney W. Grimes 10481d7cf06cSAlan Cox error = vm_map_wire(&td->td_proc->p_vmspace->vm_map, addr, 10491d7cf06cSAlan Cox addr + size, TRUE); 1050df8bae1dSRodney W. Grimes return (error == KERN_SUCCESS ? 0 : ENOMEM); 1051df8bae1dSRodney W. Grimes } 1052df8bae1dSRodney W. Grimes 1053d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_ 10544a40e3d4SJohn Dyson struct mlockall_args { 10554a40e3d4SJohn Dyson int how; 10564a40e3d4SJohn Dyson }; 10574a40e3d4SJohn Dyson #endif 10584a40e3d4SJohn Dyson 1059d2c60af8SMatthew Dillon /* 1060d2c60af8SMatthew Dillon * MPSAFE 1061d2c60af8SMatthew Dillon */ 10624a40e3d4SJohn Dyson int 1063b40ce416SJulian Elischer mlockall(td, uap) 1064b40ce416SJulian Elischer struct thread *td; 10654a40e3d4SJohn Dyson struct mlockall_args *uap; 10664a40e3d4SJohn Dyson { 10670cddd8f0SMatthew Dillon /* mtx_lock(&Giant); */ 10680cddd8f0SMatthew Dillon /* mtx_unlock(&Giant); */ 10694a40e3d4SJohn Dyson return 0; 10704a40e3d4SJohn Dyson } 10714a40e3d4SJohn Dyson 10724a40e3d4SJohn Dyson #ifndef _SYS_SYSPROTO_H_ 1073fa721254SAlfred Perlstein struct munlockall_args { 10744a40e3d4SJohn Dyson int how; 10754a40e3d4SJohn Dyson }; 10764a40e3d4SJohn Dyson #endif 10774a40e3d4SJohn Dyson 1078d2c60af8SMatthew Dillon /* 1079d2c60af8SMatthew Dillon * MPSAFE 1080d2c60af8SMatthew Dillon */ 10814a40e3d4SJohn Dyson int 1082b40ce416SJulian Elischer munlockall(td, uap) 1083b40ce416SJulian Elischer struct thread *td; 10844a40e3d4SJohn Dyson struct munlockall_args *uap; 10854a40e3d4SJohn Dyson { 10860cddd8f0SMatthew Dillon /* mtx_lock(&Giant); */ 10870cddd8f0SMatthew Dillon /* mtx_unlock(&Giant); */ 10884a40e3d4SJohn Dyson return 0; 10894a40e3d4SJohn Dyson } 10904a40e3d4SJohn Dyson 10914a40e3d4SJohn Dyson #ifndef _SYS_SYSPROTO_H_ 1092df8bae1dSRodney W. Grimes struct munlock_args { 1093651bb817SAlexander Langer const void *addr; 1094df8bae1dSRodney W. Grimes size_t len; 1095df8bae1dSRodney W. Grimes }; 1096d2d3e875SBruce Evans #endif 1097d2c60af8SMatthew Dillon /* 1098d2c60af8SMatthew Dillon * MPSAFE 1099d2c60af8SMatthew Dillon */ 1100df8bae1dSRodney W. Grimes int 1101b40ce416SJulian Elischer munlock(td, uap) 1102b40ce416SJulian Elischer struct thread *td; 1103df8bae1dSRodney W. Grimes struct munlock_args *uap; 1104df8bae1dSRodney W. Grimes { 1105df8bae1dSRodney W. Grimes vm_offset_t addr; 1106dabee6feSPeter Wemm vm_size_t size, pageoff; 1107df8bae1dSRodney W. Grimes int error; 1108df8bae1dSRodney W. Grimes 1109df8bae1dSRodney W. Grimes addr = (vm_offset_t) uap->addr; 11109154ee6aSPeter Wemm size = uap->len; 11119154ee6aSPeter Wemm 1112dabee6feSPeter Wemm pageoff = (addr & PAGE_MASK); 1113dabee6feSPeter Wemm addr -= pageoff; 1114dabee6feSPeter Wemm size += pageoff; 1115dabee6feSPeter Wemm size = (vm_size_t) round_page(size); 1116dabee6feSPeter Wemm 1117dabee6feSPeter Wemm /* disable wrap around */ 11189154ee6aSPeter Wemm if (addr + size < addr) 1119df8bae1dSRodney W. Grimes return (EINVAL); 1120dabee6feSPeter Wemm 1121df8bae1dSRodney W. Grimes #ifndef pmap_wired_count 112244731cabSJohn Baldwin error = suser(td); 112305f0fdd2SPoul-Henning Kamp if (error) 1124df8bae1dSRodney W. Grimes return (error); 1125df8bae1dSRodney W. Grimes #endif 1126df8bae1dSRodney W. Grimes 11271d7cf06cSAlan Cox error = vm_map_unwire(&td->td_proc->p_vmspace->vm_map, addr, 112823955314SAlfred Perlstein addr + size, TRUE); 1129df8bae1dSRodney W. Grimes return (error == KERN_SUCCESS ? 0 : ENOMEM); 1130df8bae1dSRodney W. Grimes } 1131df8bae1dSRodney W. Grimes 1132df8bae1dSRodney W. Grimes /* 1133d2c60af8SMatthew Dillon * vm_mmap() 1134d2c60af8SMatthew Dillon * 1135d2c60af8SMatthew Dillon * MPSAFE 1136d2c60af8SMatthew Dillon * 1137d2c60af8SMatthew Dillon * Internal version of mmap. Currently used by mmap, exec, and sys5 1138d2c60af8SMatthew Dillon * shared memory. Handle is either a vnode pointer or NULL for MAP_ANON. 1139df8bae1dSRodney W. Grimes */ 1140df8bae1dSRodney W. Grimes int 1141b9dcd593SBruce Evans vm_mmap(vm_map_t map, vm_offset_t *addr, vm_size_t size, vm_prot_t prot, 1142b9dcd593SBruce Evans vm_prot_t maxprot, int flags, 1143651bb817SAlexander Langer void *handle, 1144b9dcd593SBruce Evans vm_ooffset_t foff) 1145df8bae1dSRodney W. Grimes { 1146df8bae1dSRodney W. Grimes boolean_t fitit; 1147fcae040bSJohn Dyson vm_object_t object; 1148df8bae1dSRodney W. Grimes struct vnode *vp = NULL; 114924a1cce3SDavid Greenman objtype_t type; 1150df8bae1dSRodney W. Grimes int rv = KERN_SUCCESS; 1151bd7e5f99SJohn Dyson vm_ooffset_t objsize; 1152bd7e5f99SJohn Dyson int docow; 1153b40ce416SJulian Elischer struct thread *td = curthread; 1154df8bae1dSRodney W. Grimes 1155df8bae1dSRodney W. Grimes if (size == 0) 1156df8bae1dSRodney W. Grimes return (0); 1157df8bae1dSRodney W. Grimes 115806cb7259SDavid Greenman objsize = size = round_page(size); 1159df8bae1dSRodney W. Grimes 1160070f64feSMatthew Dillon if (td->td_proc->p_vmspace->vm_map.size + size > 1161070f64feSMatthew Dillon td->td_proc->p_rlimit[RLIMIT_VMEM].rlim_cur) { 1162070f64feSMatthew Dillon return(ENOMEM); 1163070f64feSMatthew Dillon } 1164070f64feSMatthew Dillon 1165df8bae1dSRodney W. Grimes /* 1166bc9ad247SDavid Greenman * We currently can only deal with page aligned file offsets. 1167bc9ad247SDavid Greenman * The check is here rather than in the syscall because the 1168bc9ad247SDavid Greenman * kernel calls this function internally for other mmaping 1169bc9ad247SDavid Greenman * operations (such as in exec) and non-aligned offsets will 1170bc9ad247SDavid Greenman * cause pmap inconsistencies...so we want to be sure to 1171bc9ad247SDavid Greenman * disallow this in all cases. 1172bc9ad247SDavid Greenman */ 1173bc9ad247SDavid Greenman if (foff & PAGE_MASK) 1174bc9ad247SDavid Greenman return (EINVAL); 1175bc9ad247SDavid Greenman 117606cb7259SDavid Greenman if ((flags & MAP_FIXED) == 0) { 117706cb7259SDavid Greenman fitit = TRUE; 117806cb7259SDavid Greenman *addr = round_page(*addr); 117906cb7259SDavid Greenman } else { 118006cb7259SDavid Greenman if (*addr != trunc_page(*addr)) 118106cb7259SDavid Greenman return (EINVAL); 118206cb7259SDavid Greenman fitit = FALSE; 118306cb7259SDavid Greenman (void) vm_map_remove(map, *addr, *addr + size); 118406cb7259SDavid Greenman } 118506cb7259SDavid Greenman 1186bc9ad247SDavid Greenman /* 118724a1cce3SDavid Greenman * Lookup/allocate object. 1188df8bae1dSRodney W. Grimes */ 11895f55e841SDavid Greenman if (flags & MAP_ANON) { 1190851c12ffSJohn Dyson type = OBJT_DEFAULT; 11915f55e841SDavid Greenman /* 11925f55e841SDavid Greenman * Unnamed anonymous regions always start at 0. 11935f55e841SDavid Greenman */ 119467bf6868SJohn Dyson if (handle == 0) 11955f55e841SDavid Greenman foff = 0; 11965f55e841SDavid Greenman } else { 1197df8bae1dSRodney W. Grimes vp = (struct vnode *) handle; 1198c04c996bSAlan Cox mtx_lock(&Giant); 1199f6b5b182SJeff Roberson ASSERT_VOP_LOCKED(vp, "vm_mmap"); 1200df8bae1dSRodney W. Grimes if (vp->v_type == VCHR) { 120124a1cce3SDavid Greenman type = OBJT_DEVICE; 1202a23d65bfSBruce Evans handle = (void *)(intptr_t)vp->v_rdev; 120306cb7259SDavid Greenman } else { 120406cb7259SDavid Greenman struct vattr vat; 120506cb7259SDavid Greenman int error; 120606cb7259SDavid Greenman 1207a854ed98SJohn Baldwin error = VOP_GETATTR(vp, &vat, td->td_ucred, td); 1208e4ca250dSJohn Baldwin if (error) { 120923955314SAlfred Perlstein mtx_unlock(&Giant); 121006cb7259SDavid Greenman return (error); 1211e4ca250dSJohn Baldwin } 1212bd7e5f99SJohn Dyson objsize = round_page(vat.va_size); 121324a1cce3SDavid Greenman type = OBJT_VNODE; 121400d76afeSGuido van Rooij /* 121500d76afeSGuido van Rooij * if it is a regular file without any references 121600d76afeSGuido van Rooij * we do not need to sync it. 121700d76afeSGuido van Rooij */ 121800d76afeSGuido van Rooij if (vp->v_type == VREG && vat.va_nlink == 0) { 121900d76afeSGuido van Rooij flags |= MAP_NOSYNC; 122000d76afeSGuido van Rooij } 1221df8bae1dSRodney W. Grimes } 1222c04c996bSAlan Cox mtx_unlock(&Giant); 122306cb7259SDavid Greenman } 122494328e90SJohn Dyson 122594328e90SJohn Dyson if (handle == NULL) { 122694328e90SJohn Dyson object = NULL; 12274738fa09SAlan Cox docow = 0; 122894328e90SJohn Dyson } else { 12290a0a85b3SJohn Dyson object = vm_pager_allocate(type, 12306cde7a16SDavid Greenman handle, objsize, prot, foff); 1231e4ca250dSJohn Baldwin if (object == NULL) { 123224a1cce3SDavid Greenman return (type == OBJT_DEVICE ? EINVAL : ENOMEM); 1233e4ca250dSJohn Baldwin } 12344738fa09SAlan Cox docow = MAP_PREFAULT_PARTIAL; 123594328e90SJohn Dyson } 1236df8bae1dSRodney W. Grimes 12375850152dSJohn Dyson /* 12388f2ec877SDavid Greenman * Force device mappings to be shared. 12395850152dSJohn Dyson */ 12406900a17cSMaxime Henrion if (type == OBJT_DEVICE) { 12418f2ec877SDavid Greenman flags &= ~(MAP_PRIVATE|MAP_COPY); 12425850152dSJohn Dyson flags |= MAP_SHARED; 12438f2ec877SDavid Greenman } 12445850152dSJohn Dyson 12454f79d873SMatthew Dillon if ((flags & (MAP_ANON|MAP_SHARED)) == 0) 12464738fa09SAlan Cox docow |= MAP_COPY_ON_WRITE; 12474f79d873SMatthew Dillon if (flags & MAP_NOSYNC) 12484f79d873SMatthew Dillon docow |= MAP_DISABLE_SYNCER; 12499730a5daSPaul Saab if (flags & MAP_NOCORE) 12509730a5daSPaul Saab docow |= MAP_DISABLE_COREDUMP; 12515850152dSJohn Dyson 1252d0aea04fSJohn Dyson #if defined(VM_PROT_READ_IS_EXEC) 1253d0aea04fSJohn Dyson if (prot & VM_PROT_READ) 1254d0aea04fSJohn Dyson prot |= VM_PROT_EXECUTE; 1255d0aea04fSJohn Dyson 1256d0aea04fSJohn Dyson if (maxprot & VM_PROT_READ) 1257d0aea04fSJohn Dyson maxprot |= VM_PROT_EXECUTE; 1258d0aea04fSJohn Dyson #endif 1259d0aea04fSJohn Dyson 1260e4ca250dSJohn Baldwin if (fitit) 12610a0a85b3SJohn Dyson *addr = pmap_addr_hint(object, *addr, size); 12620a0a85b3SJohn Dyson 12632267af78SJulian Elischer if (flags & MAP_STACK) 12642267af78SJulian Elischer rv = vm_map_stack (map, *addr, size, prot, 12652267af78SJulian Elischer maxprot, docow); 12662267af78SJulian Elischer else 1267bd7e5f99SJohn Dyson rv = vm_map_find(map, object, foff, addr, size, fitit, 1268bd7e5f99SJohn Dyson prot, maxprot, docow); 1269bd7e5f99SJohn Dyson 1270d2c60af8SMatthew Dillon if (rv != KERN_SUCCESS) { 12717fb0c17eSDavid Greenman /* 127224a1cce3SDavid Greenman * Lose the object reference. Will destroy the 127324a1cce3SDavid Greenman * object if it's an unnamed anonymous mapping 127424a1cce3SDavid Greenman * or named anonymous without other references. 12757fb0c17eSDavid Greenman */ 1276df8bae1dSRodney W. Grimes vm_object_deallocate(object); 1277d2c60af8SMatthew Dillon } else if (flags & MAP_SHARED) { 1278df8bae1dSRodney W. Grimes /* 1279df8bae1dSRodney W. Grimes * Shared memory is also shared with children. 1280df8bae1dSRodney W. Grimes */ 1281df8bae1dSRodney W. Grimes rv = vm_map_inherit(map, *addr, *addr + size, VM_INHERIT_SHARE); 1282e4ca250dSJohn Baldwin if (rv != KERN_SUCCESS) 12837fb0c17eSDavid Greenman (void) vm_map_remove(map, *addr, *addr + size); 1284df8bae1dSRodney W. Grimes } 1285df8bae1dSRodney W. Grimes switch (rv) { 1286df8bae1dSRodney W. Grimes case KERN_SUCCESS: 1287df8bae1dSRodney W. Grimes return (0); 1288df8bae1dSRodney W. Grimes case KERN_INVALID_ADDRESS: 1289df8bae1dSRodney W. Grimes case KERN_NO_SPACE: 1290df8bae1dSRodney W. Grimes return (ENOMEM); 1291df8bae1dSRodney W. Grimes case KERN_PROTECTION_FAILURE: 1292df8bae1dSRodney W. Grimes return (EACCES); 1293df8bae1dSRodney W. Grimes default: 1294df8bae1dSRodney W. Grimes return (EINVAL); 1295df8bae1dSRodney W. Grimes } 1296df8bae1dSRodney W. Grimes } 1297