1df8bae1dSRodney W. Grimes /* 2df8bae1dSRodney W. Grimes * Copyright (c) 1988 University of Utah. 3df8bae1dSRodney W. Grimes * Copyright (c) 1991, 1993 4df8bae1dSRodney W. Grimes * The Regents of the University of California. All rights reserved. 5df8bae1dSRodney W. Grimes * 6df8bae1dSRodney W. Grimes * This code is derived from software contributed to Berkeley by 7df8bae1dSRodney W. Grimes * the Systems Programming Group of the University of Utah Computer 8df8bae1dSRodney W. Grimes * Science Department. 9df8bae1dSRodney W. Grimes * 10df8bae1dSRodney W. Grimes * Redistribution and use in source and binary forms, with or without 11df8bae1dSRodney W. Grimes * modification, are permitted provided that the following conditions 12df8bae1dSRodney W. Grimes * are met: 13df8bae1dSRodney W. Grimes * 1. Redistributions of source code must retain the above copyright 14df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer. 15df8bae1dSRodney W. Grimes * 2. Redistributions in binary form must reproduce the above copyright 16df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer in the 17df8bae1dSRodney W. Grimes * documentation and/or other materials provided with the distribution. 18df8bae1dSRodney W. Grimes * 4. Neither the name of the University nor the names of its contributors 19df8bae1dSRodney W. Grimes * may be used to endorse or promote products derived from this software 20df8bae1dSRodney W. Grimes * without specific prior written permission. 21df8bae1dSRodney W. Grimes * 22df8bae1dSRodney W. Grimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23df8bae1dSRodney W. Grimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24df8bae1dSRodney W. Grimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25df8bae1dSRodney W. Grimes * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26df8bae1dSRodney W. Grimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27df8bae1dSRodney W. Grimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28df8bae1dSRodney W. Grimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29df8bae1dSRodney W. Grimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30df8bae1dSRodney W. Grimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31df8bae1dSRodney W. Grimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32df8bae1dSRodney W. Grimes * SUCH DAMAGE. 33df8bae1dSRodney W. Grimes * 34df8bae1dSRodney W. Grimes * from: Utah $Hdr: vm_mmap.c 1.6 91/10/21$ 35df8bae1dSRodney W. Grimes * 36df8bae1dSRodney W. Grimes * @(#)vm_mmap.c 8.4 (Berkeley) 1/12/94 37df8bae1dSRodney W. Grimes */ 38df8bae1dSRodney W. Grimes 39df8bae1dSRodney W. Grimes /* 40df8bae1dSRodney W. Grimes * Mapped file (mmap) interface to VM 41df8bae1dSRodney W. Grimes */ 42df8bae1dSRodney W. Grimes 43874651b1SDavid E. O'Brien #include <sys/cdefs.h> 44874651b1SDavid E. O'Brien __FBSDID("$FreeBSD$"); 45874651b1SDavid E. O'Brien 465591b823SEivind Eklund #include "opt_compat.h" 473e732e7dSRobert Watson #include "opt_mac.h" 48e9822d92SJoerg Wunsch 49df8bae1dSRodney W. Grimes #include <sys/param.h> 50df8bae1dSRodney W. Grimes #include <sys/systm.h> 51fb919e4dSMark Murray #include <sys/kernel.h> 52fb919e4dSMark Murray #include <sys/lock.h> 5323955314SAlfred Perlstein #include <sys/mutex.h> 54d2d3e875SBruce Evans #include <sys/sysproto.h> 55df8bae1dSRodney W. Grimes #include <sys/filedesc.h> 56df8bae1dSRodney W. Grimes #include <sys/proc.h> 57070f64feSMatthew Dillon #include <sys/resource.h> 58070f64feSMatthew Dillon #include <sys/resourcevar.h> 59df8bae1dSRodney W. Grimes #include <sys/vnode.h> 603ac4d1efSBruce Evans #include <sys/fcntl.h> 61df8bae1dSRodney W. Grimes #include <sys/file.h> 623e732e7dSRobert Watson #include <sys/mac.h> 63df8bae1dSRodney W. Grimes #include <sys/mman.h> 64b483c7f6SGuido van Rooij #include <sys/mount.h> 65df8bae1dSRodney W. Grimes #include <sys/conf.h> 664183b6b6SPeter Wemm #include <sys/stat.h> 67efeaf95aSDavid Greenman #include <sys/vmmeter.h> 681f6889a1SMatthew Dillon #include <sys/sysctl.h> 69df8bae1dSRodney W. Grimes 70df8bae1dSRodney W. Grimes #include <vm/vm.h> 71efeaf95aSDavid Greenman #include <vm/vm_param.h> 72efeaf95aSDavid Greenman #include <vm/pmap.h> 73efeaf95aSDavid Greenman #include <vm/vm_map.h> 74efeaf95aSDavid Greenman #include <vm/vm_object.h> 751c7c3c6aSMatthew Dillon #include <vm/vm_page.h> 76df8bae1dSRodney W. Grimes #include <vm/vm_pager.h> 77b5e8ce9fSBruce Evans #include <vm/vm_pageout.h> 78efeaf95aSDavid Greenman #include <vm/vm_extern.h> 79867a482dSJohn Dyson #include <vm/vm_page.h> 801f6889a1SMatthew Dillon #include <vm/vm_kern.h> 81df8bae1dSRodney W. Grimes 82d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_ 83df8bae1dSRodney W. Grimes struct sbrk_args { 84df8bae1dSRodney W. Grimes int incr; 85df8bae1dSRodney W. Grimes }; 86d2d3e875SBruce Evans #endif 870d94caffSDavid Greenman 881f6889a1SMatthew Dillon static int max_proc_mmap; 891f6889a1SMatthew Dillon SYSCTL_INT(_vm, OID_AUTO, max_proc_mmap, CTLFLAG_RW, &max_proc_mmap, 0, ""); 901f6889a1SMatthew Dillon 911f6889a1SMatthew Dillon /* 921f6889a1SMatthew Dillon * Set the maximum number of vm_map_entry structures per process. Roughly 931f6889a1SMatthew Dillon * speaking vm_map_entry structures are tiny, so allowing them to eat 1/100 941f6889a1SMatthew Dillon * of our KVM malloc space still results in generous limits. We want a 951f6889a1SMatthew Dillon * default that is good enough to prevent the kernel running out of resources 961f6889a1SMatthew Dillon * if attacked from compromised user account but generous enough such that 971f6889a1SMatthew Dillon * multi-threaded processes are not unduly inconvenienced. 981f6889a1SMatthew Dillon */ 9911caded3SAlfred Perlstein static void vmmapentry_rsrc_init(void *); 1001f6889a1SMatthew Dillon SYSINIT(vmmersrc, SI_SUB_KVM_RSRC, SI_ORDER_FIRST, vmmapentry_rsrc_init, NULL) 1011f6889a1SMatthew Dillon 1021f6889a1SMatthew Dillon static void 1031f6889a1SMatthew Dillon vmmapentry_rsrc_init(dummy) 1041f6889a1SMatthew Dillon void *dummy; 1051f6889a1SMatthew Dillon { 1061f6889a1SMatthew Dillon max_proc_mmap = vm_kmem_size / sizeof(struct vm_map_entry); 1071f6889a1SMatthew Dillon max_proc_mmap /= 100; 1081f6889a1SMatthew Dillon } 1091f6889a1SMatthew Dillon 110c8daea13SAlexander Kabaev static int vm_mmap_vnode(struct thread *, vm_size_t, vm_prot_t, vm_prot_t *, 111c8daea13SAlexander Kabaev int *, struct vnode *, vm_ooffset_t, vm_object_t *); 112c8daea13SAlexander Kabaev 113d2c60af8SMatthew Dillon /* 114d2c60af8SMatthew Dillon * MPSAFE 115d2c60af8SMatthew Dillon */ 116df8bae1dSRodney W. Grimes /* ARGSUSED */ 117df8bae1dSRodney W. Grimes int 118b40ce416SJulian Elischer sbrk(td, uap) 119b40ce416SJulian Elischer struct thread *td; 120df8bae1dSRodney W. Grimes struct sbrk_args *uap; 121df8bae1dSRodney W. Grimes { 122df8bae1dSRodney W. Grimes /* Not yet implemented */ 123df8bae1dSRodney W. Grimes return (EOPNOTSUPP); 124df8bae1dSRodney W. Grimes } 125df8bae1dSRodney W. Grimes 126d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_ 127df8bae1dSRodney W. Grimes struct sstk_args { 128df8bae1dSRodney W. Grimes int incr; 129df8bae1dSRodney W. Grimes }; 130d2d3e875SBruce Evans #endif 1310d94caffSDavid Greenman 132d2c60af8SMatthew Dillon /* 133d2c60af8SMatthew Dillon * MPSAFE 134d2c60af8SMatthew Dillon */ 135df8bae1dSRodney W. Grimes /* ARGSUSED */ 136df8bae1dSRodney W. Grimes int 137b40ce416SJulian Elischer sstk(td, uap) 138b40ce416SJulian Elischer struct thread *td; 139df8bae1dSRodney W. Grimes struct sstk_args *uap; 140df8bae1dSRodney W. Grimes { 141df8bae1dSRodney W. Grimes /* Not yet implemented */ 142df8bae1dSRodney W. Grimes return (EOPNOTSUPP); 143df8bae1dSRodney W. Grimes } 144df8bae1dSRodney W. Grimes 1451930e303SPoul-Henning Kamp #if defined(COMPAT_43) 146d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_ 147df8bae1dSRodney W. Grimes struct getpagesize_args { 148df8bae1dSRodney W. Grimes int dummy; 149df8bae1dSRodney W. Grimes }; 150d2d3e875SBruce Evans #endif 1510d94caffSDavid Greenman 152df8bae1dSRodney W. Grimes /* ARGSUSED */ 153df8bae1dSRodney W. Grimes int 154b40ce416SJulian Elischer ogetpagesize(td, uap) 155b40ce416SJulian Elischer struct thread *td; 156df8bae1dSRodney W. Grimes struct getpagesize_args *uap; 157df8bae1dSRodney W. Grimes { 1580cddd8f0SMatthew Dillon /* MP SAFE */ 159b40ce416SJulian Elischer td->td_retval[0] = PAGE_SIZE; 160df8bae1dSRodney W. Grimes return (0); 161df8bae1dSRodney W. Grimes } 1621930e303SPoul-Henning Kamp #endif /* COMPAT_43 */ 163df8bae1dSRodney W. Grimes 16454f42e4bSPeter Wemm 16554f42e4bSPeter Wemm /* 16654f42e4bSPeter Wemm * Memory Map (mmap) system call. Note that the file offset 16754f42e4bSPeter Wemm * and address are allowed to be NOT page aligned, though if 16854f42e4bSPeter Wemm * the MAP_FIXED flag it set, both must have the same remainder 16954f42e4bSPeter Wemm * modulo the PAGE_SIZE (POSIX 1003.1b). If the address is not 17054f42e4bSPeter Wemm * page-aligned, the actual mapping starts at trunc_page(addr) 17154f42e4bSPeter Wemm * and the return value is adjusted up by the page offset. 172b4309055SMatthew Dillon * 173b4309055SMatthew Dillon * Generally speaking, only character devices which are themselves 174b4309055SMatthew Dillon * memory-based, such as a video framebuffer, can be mmap'd. Otherwise 175b4309055SMatthew Dillon * there would be no cache coherency between a descriptor and a VM mapping 176b4309055SMatthew Dillon * both to the same character device. 177b4309055SMatthew Dillon * 178b4309055SMatthew Dillon * Block devices can be mmap'd no matter what they represent. Cache coherency 179b4309055SMatthew Dillon * is maintained as long as you do not write directly to the underlying 180b4309055SMatthew Dillon * character device. 18154f42e4bSPeter Wemm */ 182d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_ 183df8bae1dSRodney W. Grimes struct mmap_args { 184651bb817SAlexander Langer void *addr; 185df8bae1dSRodney W. Grimes size_t len; 186df8bae1dSRodney W. Grimes int prot; 187df8bae1dSRodney W. Grimes int flags; 188df8bae1dSRodney W. Grimes int fd; 189df8bae1dSRodney W. Grimes long pad; 190df8bae1dSRodney W. Grimes off_t pos; 191df8bae1dSRodney W. Grimes }; 192d2d3e875SBruce Evans #endif 193df8bae1dSRodney W. Grimes 194d2c60af8SMatthew Dillon /* 195d2c60af8SMatthew Dillon * MPSAFE 196d2c60af8SMatthew Dillon */ 197df8bae1dSRodney W. Grimes int 198b40ce416SJulian Elischer mmap(td, uap) 199b40ce416SJulian Elischer struct thread *td; 20054d92145SMatthew Dillon struct mmap_args *uap; 201df8bae1dSRodney W. Grimes { 202c8daea13SAlexander Kabaev struct file *fp; 203df8bae1dSRodney W. Grimes struct vnode *vp; 204df8bae1dSRodney W. Grimes vm_offset_t addr; 2059154ee6aSPeter Wemm vm_size_t size, pageoff; 206df8bae1dSRodney W. Grimes vm_prot_t prot, maxprot; 207651bb817SAlexander Langer void *handle; 208df8bae1dSRodney W. Grimes int flags, error; 20954f42e4bSPeter Wemm off_t pos; 210b40ce416SJulian Elischer struct vmspace *vms = td->td_proc->p_vmspace; 211df8bae1dSRodney W. Grimes 21254f42e4bSPeter Wemm addr = (vm_offset_t) uap->addr; 21354f42e4bSPeter Wemm size = uap->len; 214df8bae1dSRodney W. Grimes prot = uap->prot & VM_PROT_ALL; 215df8bae1dSRodney W. Grimes flags = uap->flags; 21654f42e4bSPeter Wemm pos = uap->pos; 21754f42e4bSPeter Wemm 218426da3bcSAlfred Perlstein fp = NULL; 21954f42e4bSPeter Wemm /* make sure mapping fits into numeric range etc */ 220fc565456SDmitrij Tejblum if ((ssize_t) uap->len < 0 || 22154f42e4bSPeter Wemm ((flags & MAP_ANON) && uap->fd != -1)) 222df8bae1dSRodney W. Grimes return (EINVAL); 2239154ee6aSPeter Wemm 2242267af78SJulian Elischer if (flags & MAP_STACK) { 2252267af78SJulian Elischer if ((uap->fd != -1) || 2262267af78SJulian Elischer ((prot & (PROT_READ | PROT_WRITE)) != (PROT_READ | PROT_WRITE))) 2272267af78SJulian Elischer return (EINVAL); 2282267af78SJulian Elischer flags |= MAP_ANON; 2292267af78SJulian Elischer pos = 0; 2302907af2aSJulian Elischer } 2312907af2aSJulian Elischer 2329154ee6aSPeter Wemm /* 23354f42e4bSPeter Wemm * Align the file position to a page boundary, 23454f42e4bSPeter Wemm * and save its page offset component. 2359154ee6aSPeter Wemm */ 23654f42e4bSPeter Wemm pageoff = (pos & PAGE_MASK); 23754f42e4bSPeter Wemm pos -= pageoff; 23854f42e4bSPeter Wemm 23954f42e4bSPeter Wemm /* Adjust size for rounding (on both ends). */ 24054f42e4bSPeter Wemm size += pageoff; /* low end... */ 24154f42e4bSPeter Wemm size = (vm_size_t) round_page(size); /* hi end */ 2429154ee6aSPeter Wemm 243df8bae1dSRodney W. Grimes /* 2440d94caffSDavid Greenman * Check for illegal addresses. Watch out for address wrap... Note 2450d94caffSDavid Greenman * that VM_*_ADDRESS are not constants due to casts (argh). 246df8bae1dSRodney W. Grimes */ 247df8bae1dSRodney W. Grimes if (flags & MAP_FIXED) { 24854f42e4bSPeter Wemm /* 24954f42e4bSPeter Wemm * The specified address must have the same remainder 25054f42e4bSPeter Wemm * as the file offset taken modulo PAGE_SIZE, so it 25154f42e4bSPeter Wemm * should be aligned after adjustment by pageoff. 25254f42e4bSPeter Wemm */ 25354f42e4bSPeter Wemm addr -= pageoff; 25454f42e4bSPeter Wemm if (addr & PAGE_MASK) 25554f42e4bSPeter Wemm return (EINVAL); 25654f42e4bSPeter Wemm /* Address range must be all in user VM space. */ 25705ba50f5SJake Burkholder if (addr < vm_map_min(&vms->vm_map) || 25805ba50f5SJake Burkholder addr + size > vm_map_max(&vms->vm_map)) 259df8bae1dSRodney W. Grimes return (EINVAL); 260bbc0ec52SDavid Greenman if (addr + size < addr) 261df8bae1dSRodney W. Grimes return (EINVAL); 26291d5354aSJohn Baldwin } else { 263df8bae1dSRodney W. Grimes /* 26454f42e4bSPeter Wemm * XXX for non-fixed mappings where no hint is provided or 26554f42e4bSPeter Wemm * the hint would fall in the potential heap space, 26654f42e4bSPeter Wemm * place it after the end of the largest possible heap. 267df8bae1dSRodney W. Grimes * 26854f42e4bSPeter Wemm * There should really be a pmap call to determine a reasonable 26954f42e4bSPeter Wemm * location. 270df8bae1dSRodney W. Grimes */ 27191d5354aSJohn Baldwin PROC_LOCK(td->td_proc); 27291d5354aSJohn Baldwin if (addr == 0 || 2731f6889a1SMatthew Dillon (addr >= round_page((vm_offset_t)vms->vm_taddr) && 274c460ac3aSPeter Wemm addr < round_page((vm_offset_t)vms->vm_daddr + 27591d5354aSJohn Baldwin lim_max(td->td_proc, RLIMIT_DATA)))) 276c460ac3aSPeter Wemm addr = round_page((vm_offset_t)vms->vm_daddr + 27791d5354aSJohn Baldwin lim_max(td->td_proc, RLIMIT_DATA)); 27891d5354aSJohn Baldwin PROC_UNLOCK(td->td_proc); 27991d5354aSJohn Baldwin } 280df8bae1dSRodney W. Grimes if (flags & MAP_ANON) { 281df8bae1dSRodney W. Grimes /* 282df8bae1dSRodney W. Grimes * Mapping blank space is trivial. 283df8bae1dSRodney W. Grimes */ 284df8bae1dSRodney W. Grimes handle = NULL; 285df8bae1dSRodney W. Grimes maxprot = VM_PROT_ALL; 28654f42e4bSPeter Wemm pos = 0; 28730d4dd7eSAlexander Kabaev } else { 288df8bae1dSRodney W. Grimes /* 2890d94caffSDavid Greenman * Mapping file, get fp for validation. Obtain vnode and make 2900d94caffSDavid Greenman * sure it is of appropriate type. 291426da3bcSAlfred Perlstein * don't let the descriptor disappear on us if we block 292df8bae1dSRodney W. Grimes */ 293a4db4953SAlfred Perlstein if ((error = fget(td, uap->fd, &fp)) != 0) 294426da3bcSAlfred Perlstein goto done; 295e4ca250dSJohn Baldwin if (fp->f_type != DTYPE_VNODE) { 296d2c60af8SMatthew Dillon error = EINVAL; 297426da3bcSAlfred Perlstein goto done; 298e4ca250dSJohn Baldwin } 299279d7226SMatthew Dillon /* 300aa543039SGarrett Wollman * POSIX shared-memory objects are defined to have 301aa543039SGarrett Wollman * kernel persistence, and are not defined to support 302aa543039SGarrett Wollman * read(2)/write(2) -- or even open(2). Thus, we can 303aa543039SGarrett Wollman * use MAP_ASYNC to trade on-disk coherence for speed. 304aa543039SGarrett Wollman * The shm_open(3) library routine turns on the FPOSIXSHM 305aa543039SGarrett Wollman * flag to request this behavior. 306aa543039SGarrett Wollman */ 307aa543039SGarrett Wollman if (fp->f_flag & FPOSIXSHM) 308aa543039SGarrett Wollman flags |= MAP_NOSYNC; 3093b6d9652SPoul-Henning Kamp vp = fp->f_vnode; 310c8bdd56bSGuido van Rooij /* 311df8bae1dSRodney W. Grimes * Ensure that file and memory protections are 312df8bae1dSRodney W. Grimes * compatible. Note that we only worry about 313df8bae1dSRodney W. Grimes * writability if mapping is shared; in this case, 314df8bae1dSRodney W. Grimes * current and max prot are dictated by the open file. 315df8bae1dSRodney W. Grimes * XXX use the vnode instead? Problem is: what 3160d94caffSDavid Greenman * credentials do we use for determination? What if 3170d94caffSDavid Greenman * proc does a setuid? 318df8bae1dSRodney W. Grimes */ 3198eec77b0STim J. Robbins if (vp->v_mount != NULL && vp->v_mount->mnt_flag & MNT_NOEXEC) 320b483c7f6SGuido van Rooij maxprot = VM_PROT_NONE; 321b483c7f6SGuido van Rooij else 322b483c7f6SGuido van Rooij maxprot = VM_PROT_EXECUTE; 323279d7226SMatthew Dillon if (fp->f_flag & FREAD) { 324df8bae1dSRodney W. Grimes maxprot |= VM_PROT_READ; 325279d7226SMatthew Dillon } else if (prot & PROT_READ) { 326279d7226SMatthew Dillon error = EACCES; 327279d7226SMatthew Dillon goto done; 328279d7226SMatthew Dillon } 329c8bdd56bSGuido van Rooij /* 330c8bdd56bSGuido van Rooij * If we are sharing potential changes (either via 331c8bdd56bSGuido van Rooij * MAP_SHARED or via the implicit sharing of character 332c8bdd56bSGuido van Rooij * device mappings), and we are trying to get write 333c8bdd56bSGuido van Rooij * permission although we opened it without asking 334c8daea13SAlexander Kabaev * for it, bail out. 335c8bdd56bSGuido van Rooij */ 336ce7a036dSAlexander Kabaev if ((flags & MAP_SHARED) != 0) { 33705feb99fSGuido van Rooij if ((fp->f_flag & FWRITE) != 0) { 338df8bae1dSRodney W. Grimes maxprot |= VM_PROT_WRITE; 339279d7226SMatthew Dillon } else if ((prot & PROT_WRITE) != 0) { 340279d7226SMatthew Dillon error = EACCES; 341279d7226SMatthew Dillon goto done; 342279d7226SMatthew Dillon } 343ce7a036dSAlexander Kabaev } else if (vp->v_type != VCHR || (fp->f_flag & FWRITE) != 0) { 34405feb99fSGuido van Rooij maxprot |= VM_PROT_WRITE; 345279d7226SMatthew Dillon } 346651bb817SAlexander Langer handle = (void *)vp; 34730d4dd7eSAlexander Kabaev } 3481f6889a1SMatthew Dillon 3491f6889a1SMatthew Dillon /* 3501f6889a1SMatthew Dillon * Do not allow more then a certain number of vm_map_entry structures 3511f6889a1SMatthew Dillon * per process. Scale with the number of rforks sharing the map 3521f6889a1SMatthew Dillon * to make the limit reasonable for threads. 3531f6889a1SMatthew Dillon */ 3541f6889a1SMatthew Dillon if (max_proc_mmap && 3551f6889a1SMatthew Dillon vms->vm_map.nentries >= max_proc_mmap * vms->vm_refcnt) { 356279d7226SMatthew Dillon error = ENOMEM; 357279d7226SMatthew Dillon goto done; 3581f6889a1SMatthew Dillon } 3591f6889a1SMatthew Dillon 3601f6889a1SMatthew Dillon error = vm_mmap(&vms->vm_map, &addr, size, prot, maxprot, 36154f42e4bSPeter Wemm flags, handle, pos); 362df8bae1dSRodney W. Grimes if (error == 0) 363b40ce416SJulian Elischer td->td_retval[0] = (register_t) (addr + pageoff); 364279d7226SMatthew Dillon done: 365279d7226SMatthew Dillon if (fp) 366b40ce416SJulian Elischer fdrop(fp, td); 367f6b5b182SJeff Roberson 368df8bae1dSRodney W. Grimes return (error); 369df8bae1dSRodney W. Grimes } 370df8bae1dSRodney W. Grimes 37105f0fdd2SPoul-Henning Kamp #ifdef COMPAT_43 372d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_ 37305f0fdd2SPoul-Henning Kamp struct ommap_args { 37405f0fdd2SPoul-Henning Kamp caddr_t addr; 37505f0fdd2SPoul-Henning Kamp int len; 37605f0fdd2SPoul-Henning Kamp int prot; 37705f0fdd2SPoul-Henning Kamp int flags; 37805f0fdd2SPoul-Henning Kamp int fd; 37905f0fdd2SPoul-Henning Kamp long pos; 38005f0fdd2SPoul-Henning Kamp }; 381d2d3e875SBruce Evans #endif 38205f0fdd2SPoul-Henning Kamp int 383b40ce416SJulian Elischer ommap(td, uap) 384b40ce416SJulian Elischer struct thread *td; 38554d92145SMatthew Dillon struct ommap_args *uap; 38605f0fdd2SPoul-Henning Kamp { 38705f0fdd2SPoul-Henning Kamp struct mmap_args nargs; 38805f0fdd2SPoul-Henning Kamp static const char cvtbsdprot[8] = { 38905f0fdd2SPoul-Henning Kamp 0, 39005f0fdd2SPoul-Henning Kamp PROT_EXEC, 39105f0fdd2SPoul-Henning Kamp PROT_WRITE, 39205f0fdd2SPoul-Henning Kamp PROT_EXEC | PROT_WRITE, 39305f0fdd2SPoul-Henning Kamp PROT_READ, 39405f0fdd2SPoul-Henning Kamp PROT_EXEC | PROT_READ, 39505f0fdd2SPoul-Henning Kamp PROT_WRITE | PROT_READ, 39605f0fdd2SPoul-Henning Kamp PROT_EXEC | PROT_WRITE | PROT_READ, 39705f0fdd2SPoul-Henning Kamp }; 3980d94caffSDavid Greenman 39905f0fdd2SPoul-Henning Kamp #define OMAP_ANON 0x0002 40005f0fdd2SPoul-Henning Kamp #define OMAP_COPY 0x0020 40105f0fdd2SPoul-Henning Kamp #define OMAP_SHARED 0x0010 40205f0fdd2SPoul-Henning Kamp #define OMAP_FIXED 0x0100 40305f0fdd2SPoul-Henning Kamp 40405f0fdd2SPoul-Henning Kamp nargs.addr = uap->addr; 40505f0fdd2SPoul-Henning Kamp nargs.len = uap->len; 40605f0fdd2SPoul-Henning Kamp nargs.prot = cvtbsdprot[uap->prot & 0x7]; 40705f0fdd2SPoul-Henning Kamp nargs.flags = 0; 40805f0fdd2SPoul-Henning Kamp if (uap->flags & OMAP_ANON) 40905f0fdd2SPoul-Henning Kamp nargs.flags |= MAP_ANON; 41005f0fdd2SPoul-Henning Kamp if (uap->flags & OMAP_COPY) 41105f0fdd2SPoul-Henning Kamp nargs.flags |= MAP_COPY; 41205f0fdd2SPoul-Henning Kamp if (uap->flags & OMAP_SHARED) 41305f0fdd2SPoul-Henning Kamp nargs.flags |= MAP_SHARED; 41405f0fdd2SPoul-Henning Kamp else 41505f0fdd2SPoul-Henning Kamp nargs.flags |= MAP_PRIVATE; 41605f0fdd2SPoul-Henning Kamp if (uap->flags & OMAP_FIXED) 41705f0fdd2SPoul-Henning Kamp nargs.flags |= MAP_FIXED; 41805f0fdd2SPoul-Henning Kamp nargs.fd = uap->fd; 41905f0fdd2SPoul-Henning Kamp nargs.pos = uap->pos; 420b40ce416SJulian Elischer return (mmap(td, &nargs)); 42105f0fdd2SPoul-Henning Kamp } 42205f0fdd2SPoul-Henning Kamp #endif /* COMPAT_43 */ 42305f0fdd2SPoul-Henning Kamp 42405f0fdd2SPoul-Henning Kamp 425d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_ 426df8bae1dSRodney W. Grimes struct msync_args { 427651bb817SAlexander Langer void *addr; 428df8bae1dSRodney W. Grimes int len; 429e6c6af11SDavid Greenman int flags; 430df8bae1dSRodney W. Grimes }; 431d2d3e875SBruce Evans #endif 432d2c60af8SMatthew Dillon /* 433d2c60af8SMatthew Dillon * MPSAFE 434d2c60af8SMatthew Dillon */ 435df8bae1dSRodney W. Grimes int 436b40ce416SJulian Elischer msync(td, uap) 437b40ce416SJulian Elischer struct thread *td; 438df8bae1dSRodney W. Grimes struct msync_args *uap; 439df8bae1dSRodney W. Grimes { 440df8bae1dSRodney W. Grimes vm_offset_t addr; 441dabee6feSPeter Wemm vm_size_t size, pageoff; 442e6c6af11SDavid Greenman int flags; 443df8bae1dSRodney W. Grimes vm_map_t map; 444df8bae1dSRodney W. Grimes int rv; 445df8bae1dSRodney W. Grimes 446df8bae1dSRodney W. Grimes addr = (vm_offset_t) uap->addr; 4479154ee6aSPeter Wemm size = uap->len; 448e6c6af11SDavid Greenman flags = uap->flags; 449e6c6af11SDavid Greenman 450dabee6feSPeter Wemm pageoff = (addr & PAGE_MASK); 451dabee6feSPeter Wemm addr -= pageoff; 452dabee6feSPeter Wemm size += pageoff; 453dabee6feSPeter Wemm size = (vm_size_t) round_page(size); 4549154ee6aSPeter Wemm if (addr + size < addr) 455dabee6feSPeter Wemm return (EINVAL); 456dabee6feSPeter Wemm 457dabee6feSPeter Wemm if ((flags & (MS_ASYNC|MS_INVALIDATE)) == (MS_ASYNC|MS_INVALIDATE)) 4581e62bc63SDavid Greenman return (EINVAL); 4591e62bc63SDavid Greenman 460b40ce416SJulian Elischer map = &td->td_proc->p_vmspace->vm_map; 4619154ee6aSPeter Wemm 462df8bae1dSRodney W. Grimes /* 463df8bae1dSRodney W. Grimes * Clean the pages and interpret the return value. 464df8bae1dSRodney W. Grimes */ 465950f8459SAlan Cox rv = vm_map_sync(map, addr, addr + size, (flags & MS_ASYNC) == 0, 466e6c6af11SDavid Greenman (flags & MS_INVALIDATE) != 0); 467df8bae1dSRodney W. Grimes switch (rv) { 468df8bae1dSRodney W. Grimes case KERN_SUCCESS: 469d2c60af8SMatthew Dillon return (0); 470df8bae1dSRodney W. Grimes case KERN_INVALID_ADDRESS: 471df8bae1dSRodney W. Grimes return (EINVAL); /* Sun returns ENOMEM? */ 472b7b7cd44SAlan Cox case KERN_INVALID_ARGUMENT: 473b7b7cd44SAlan Cox return (EBUSY); 474df8bae1dSRodney W. Grimes default: 475df8bae1dSRodney W. Grimes return (EINVAL); 476df8bae1dSRodney W. Grimes } 477df8bae1dSRodney W. Grimes } 478df8bae1dSRodney W. Grimes 479d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_ 480df8bae1dSRodney W. Grimes struct munmap_args { 481651bb817SAlexander Langer void *addr; 4829154ee6aSPeter Wemm size_t len; 483df8bae1dSRodney W. Grimes }; 484d2d3e875SBruce Evans #endif 485d2c60af8SMatthew Dillon /* 486d2c60af8SMatthew Dillon * MPSAFE 487d2c60af8SMatthew Dillon */ 488df8bae1dSRodney W. Grimes int 489b40ce416SJulian Elischer munmap(td, uap) 490b40ce416SJulian Elischer struct thread *td; 49154d92145SMatthew Dillon struct munmap_args *uap; 492df8bae1dSRodney W. Grimes { 493df8bae1dSRodney W. Grimes vm_offset_t addr; 494dabee6feSPeter Wemm vm_size_t size, pageoff; 495df8bae1dSRodney W. Grimes vm_map_t map; 496df8bae1dSRodney W. Grimes 497df8bae1dSRodney W. Grimes addr = (vm_offset_t) uap->addr; 4989154ee6aSPeter Wemm size = uap->len; 499d8834602SAlan Cox if (size == 0) 500d8834602SAlan Cox return (EINVAL); 501dabee6feSPeter Wemm 502dabee6feSPeter Wemm pageoff = (addr & PAGE_MASK); 503dabee6feSPeter Wemm addr -= pageoff; 504dabee6feSPeter Wemm size += pageoff; 505dabee6feSPeter Wemm size = (vm_size_t) round_page(size); 5069154ee6aSPeter Wemm if (addr + size < addr) 507df8bae1dSRodney W. Grimes return (EINVAL); 5089154ee6aSPeter Wemm 509df8bae1dSRodney W. Grimes /* 51005ba50f5SJake Burkholder * Check for illegal addresses. Watch out for address wrap... 511df8bae1dSRodney W. Grimes */ 512b40ce416SJulian Elischer map = &td->td_proc->p_vmspace->vm_map; 51305ba50f5SJake Burkholder if (addr < vm_map_min(map) || addr + size > vm_map_max(map)) 51405ba50f5SJake Burkholder return (EINVAL); 515d8834602SAlan Cox vm_map_lock(map); 516df8bae1dSRodney W. Grimes /* 517df8bae1dSRodney W. Grimes * Make sure entire range is allocated. 518df8bae1dSRodney W. Grimes */ 519d8834602SAlan Cox if (!vm_map_check_protection(map, addr, addr + size, VM_PROT_NONE)) { 520d8834602SAlan Cox vm_map_unlock(map); 521df8bae1dSRodney W. Grimes return (EINVAL); 522d8834602SAlan Cox } 523df8bae1dSRodney W. Grimes /* returns nothing but KERN_SUCCESS anyway */ 524d8834602SAlan Cox vm_map_delete(map, addr, addr + size); 525d8834602SAlan Cox vm_map_unlock(map); 526df8bae1dSRodney W. Grimes return (0); 527df8bae1dSRodney W. Grimes } 528df8bae1dSRodney W. Grimes 529d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_ 530df8bae1dSRodney W. Grimes struct mprotect_args { 531651bb817SAlexander Langer const void *addr; 5329154ee6aSPeter Wemm size_t len; 533df8bae1dSRodney W. Grimes int prot; 534df8bae1dSRodney W. Grimes }; 535d2d3e875SBruce Evans #endif 536d2c60af8SMatthew Dillon /* 537d2c60af8SMatthew Dillon * MPSAFE 538d2c60af8SMatthew Dillon */ 539df8bae1dSRodney W. Grimes int 540b40ce416SJulian Elischer mprotect(td, uap) 541b40ce416SJulian Elischer struct thread *td; 542df8bae1dSRodney W. Grimes struct mprotect_args *uap; 543df8bae1dSRodney W. Grimes { 544df8bae1dSRodney W. Grimes vm_offset_t addr; 545dabee6feSPeter Wemm vm_size_t size, pageoff; 54654d92145SMatthew Dillon vm_prot_t prot; 547df8bae1dSRodney W. Grimes 548df8bae1dSRodney W. Grimes addr = (vm_offset_t) uap->addr; 5499154ee6aSPeter Wemm size = uap->len; 550df8bae1dSRodney W. Grimes prot = uap->prot & VM_PROT_ALL; 551d0aea04fSJohn Dyson #if defined(VM_PROT_READ_IS_EXEC) 552d0aea04fSJohn Dyson if (prot & VM_PROT_READ) 553d0aea04fSJohn Dyson prot |= VM_PROT_EXECUTE; 554d0aea04fSJohn Dyson #endif 555df8bae1dSRodney W. Grimes 556dabee6feSPeter Wemm pageoff = (addr & PAGE_MASK); 557dabee6feSPeter Wemm addr -= pageoff; 558dabee6feSPeter Wemm size += pageoff; 559dabee6feSPeter Wemm size = (vm_size_t) round_page(size); 5609154ee6aSPeter Wemm if (addr + size < addr) 561dabee6feSPeter Wemm return (EINVAL); 562dabee6feSPeter Wemm 56343285049SAlan Cox switch (vm_map_protect(&td->td_proc->p_vmspace->vm_map, addr, 56443285049SAlan Cox addr + size, prot, FALSE)) { 565df8bae1dSRodney W. Grimes case KERN_SUCCESS: 566df8bae1dSRodney W. Grimes return (0); 567df8bae1dSRodney W. Grimes case KERN_PROTECTION_FAILURE: 568df8bae1dSRodney W. Grimes return (EACCES); 569df8bae1dSRodney W. Grimes } 570df8bae1dSRodney W. Grimes return (EINVAL); 571df8bae1dSRodney W. Grimes } 572df8bae1dSRodney W. Grimes 573d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_ 574dabee6feSPeter Wemm struct minherit_args { 575651bb817SAlexander Langer void *addr; 5769154ee6aSPeter Wemm size_t len; 577dabee6feSPeter Wemm int inherit; 578dabee6feSPeter Wemm }; 579dabee6feSPeter Wemm #endif 580d2c60af8SMatthew Dillon /* 581d2c60af8SMatthew Dillon * MPSAFE 582d2c60af8SMatthew Dillon */ 583dabee6feSPeter Wemm int 584b40ce416SJulian Elischer minherit(td, uap) 585b40ce416SJulian Elischer struct thread *td; 586dabee6feSPeter Wemm struct minherit_args *uap; 587dabee6feSPeter Wemm { 588dabee6feSPeter Wemm vm_offset_t addr; 589dabee6feSPeter Wemm vm_size_t size, pageoff; 59054d92145SMatthew Dillon vm_inherit_t inherit; 591dabee6feSPeter Wemm 592dabee6feSPeter Wemm addr = (vm_offset_t)uap->addr; 5939154ee6aSPeter Wemm size = uap->len; 594dabee6feSPeter Wemm inherit = uap->inherit; 595dabee6feSPeter Wemm 596dabee6feSPeter Wemm pageoff = (addr & PAGE_MASK); 597dabee6feSPeter Wemm addr -= pageoff; 598dabee6feSPeter Wemm size += pageoff; 599dabee6feSPeter Wemm size = (vm_size_t) round_page(size); 6009154ee6aSPeter Wemm if (addr + size < addr) 601dabee6feSPeter Wemm return (EINVAL); 602dabee6feSPeter Wemm 603e0be79afSAlan Cox switch (vm_map_inherit(&td->td_proc->p_vmspace->vm_map, addr, 604e0be79afSAlan Cox addr + size, inherit)) { 605dabee6feSPeter Wemm case KERN_SUCCESS: 606dabee6feSPeter Wemm return (0); 607dabee6feSPeter Wemm case KERN_PROTECTION_FAILURE: 608dabee6feSPeter Wemm return (EACCES); 609dabee6feSPeter Wemm } 610dabee6feSPeter Wemm return (EINVAL); 611dabee6feSPeter Wemm } 612dabee6feSPeter Wemm 613dabee6feSPeter Wemm #ifndef _SYS_SYSPROTO_H_ 614df8bae1dSRodney W. Grimes struct madvise_args { 615651bb817SAlexander Langer void *addr; 6169154ee6aSPeter Wemm size_t len; 617df8bae1dSRodney W. Grimes int behav; 618df8bae1dSRodney W. Grimes }; 619d2d3e875SBruce Evans #endif 6200d94caffSDavid Greenman 621d2c60af8SMatthew Dillon /* 622d2c60af8SMatthew Dillon * MPSAFE 623d2c60af8SMatthew Dillon */ 624df8bae1dSRodney W. Grimes /* ARGSUSED */ 625df8bae1dSRodney W. Grimes int 626b40ce416SJulian Elischer madvise(td, uap) 627b40ce416SJulian Elischer struct thread *td; 628df8bae1dSRodney W. Grimes struct madvise_args *uap; 629df8bae1dSRodney W. Grimes { 630f35329acSJohn Dyson vm_offset_t start, end; 63105ba50f5SJake Burkholder vm_map_t map; 632f4cf2141SWes Peters struct proc *p; 633f4cf2141SWes Peters int error; 634b4309055SMatthew Dillon 635b4309055SMatthew Dillon /* 636f4cf2141SWes Peters * Check for our special case, advising the swap pager we are 637f4cf2141SWes Peters * "immortal." 638f4cf2141SWes Peters */ 639f4cf2141SWes Peters if (uap->behav == MADV_PROTECT) { 64069297bf8SJohn Baldwin error = suser(td); 64169297bf8SJohn Baldwin if (error == 0) { 642f4cf2141SWes Peters p = td->td_proc; 643f4cf2141SWes Peters PROC_LOCK(p); 644f4cf2141SWes Peters p->p_flag |= P_PROTECTED; 645f4cf2141SWes Peters PROC_UNLOCK(p); 64669297bf8SJohn Baldwin } 647f4cf2141SWes Peters return (error); 648f4cf2141SWes Peters } 649f4cf2141SWes Peters /* 650b4309055SMatthew Dillon * Check for illegal behavior 651b4309055SMatthew Dillon */ 6529730a5daSPaul Saab if (uap->behav < 0 || uap->behav > MADV_CORE) 653b4309055SMatthew Dillon return (EINVAL); 654867a482dSJohn Dyson /* 655867a482dSJohn Dyson * Check for illegal addresses. Watch out for address wrap... Note 656867a482dSJohn Dyson * that VM_*_ADDRESS are not constants due to casts (argh). 657867a482dSJohn Dyson */ 65805ba50f5SJake Burkholder map = &td->td_proc->p_vmspace->vm_map; 65905ba50f5SJake Burkholder if ((vm_offset_t)uap->addr < vm_map_min(map) || 66005ba50f5SJake Burkholder (vm_offset_t)uap->addr + uap->len > vm_map_max(map)) 661867a482dSJohn Dyson return (EINVAL); 662867a482dSJohn Dyson if (((vm_offset_t) uap->addr + uap->len) < (vm_offset_t) uap->addr) 663867a482dSJohn Dyson return (EINVAL); 664867a482dSJohn Dyson 665867a482dSJohn Dyson /* 666867a482dSJohn Dyson * Since this routine is only advisory, we default to conservative 667867a482dSJohn Dyson * behavior. 668867a482dSJohn Dyson */ 669cd6eea25SDavid Greenman start = trunc_page((vm_offset_t) uap->addr); 670cd6eea25SDavid Greenman end = round_page((vm_offset_t) uap->addr + uap->len); 671867a482dSJohn Dyson 67205ba50f5SJake Burkholder if (vm_map_madvise(map, start, end, uap->behav)) 673094f6d26SAlan Cox return (EINVAL); 674094f6d26SAlan Cox return (0); 675df8bae1dSRodney W. Grimes } 676df8bae1dSRodney W. Grimes 677d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_ 678df8bae1dSRodney W. Grimes struct mincore_args { 679651bb817SAlexander Langer const void *addr; 6809154ee6aSPeter Wemm size_t len; 681df8bae1dSRodney W. Grimes char *vec; 682df8bae1dSRodney W. Grimes }; 683d2d3e875SBruce Evans #endif 6840d94caffSDavid Greenman 685d2c60af8SMatthew Dillon /* 686d2c60af8SMatthew Dillon * MPSAFE 687d2c60af8SMatthew Dillon */ 688df8bae1dSRodney W. Grimes /* ARGSUSED */ 689df8bae1dSRodney W. Grimes int 690b40ce416SJulian Elischer mincore(td, uap) 691b40ce416SJulian Elischer struct thread *td; 692df8bae1dSRodney W. Grimes struct mincore_args *uap; 693df8bae1dSRodney W. Grimes { 694867a482dSJohn Dyson vm_offset_t addr, first_addr; 695867a482dSJohn Dyson vm_offset_t end, cend; 696867a482dSJohn Dyson pmap_t pmap; 697867a482dSJohn Dyson vm_map_t map; 69802c04a2fSJohn Dyson char *vec; 699d2c60af8SMatthew Dillon int error = 0; 700867a482dSJohn Dyson int vecindex, lastvecindex; 70154d92145SMatthew Dillon vm_map_entry_t current; 702867a482dSJohn Dyson vm_map_entry_t entry; 703867a482dSJohn Dyson int mincoreinfo; 704dd2622a8SAlan Cox unsigned int timestamp; 705df8bae1dSRodney W. Grimes 706867a482dSJohn Dyson /* 707867a482dSJohn Dyson * Make sure that the addresses presented are valid for user 708867a482dSJohn Dyson * mode. 709867a482dSJohn Dyson */ 710867a482dSJohn Dyson first_addr = addr = trunc_page((vm_offset_t) uap->addr); 7119154ee6aSPeter Wemm end = addr + (vm_size_t)round_page(uap->len); 71205ba50f5SJake Burkholder map = &td->td_proc->p_vmspace->vm_map; 71305ba50f5SJake Burkholder if (end > vm_map_max(map) || end < addr) 71402c04a2fSJohn Dyson return (EINVAL); 71502c04a2fSJohn Dyson 716867a482dSJohn Dyson /* 717867a482dSJohn Dyson * Address of byte vector 718867a482dSJohn Dyson */ 71902c04a2fSJohn Dyson vec = uap->vec; 720867a482dSJohn Dyson 721b40ce416SJulian Elischer pmap = vmspace_pmap(td->td_proc->p_vmspace); 722867a482dSJohn Dyson 723eff50fcdSAlan Cox vm_map_lock_read(map); 724dd2622a8SAlan Cox RestartScan: 725dd2622a8SAlan Cox timestamp = map->timestamp; 726867a482dSJohn Dyson 727867a482dSJohn Dyson if (!vm_map_lookup_entry(map, addr, &entry)) 728867a482dSJohn Dyson entry = entry->next; 729867a482dSJohn Dyson 730867a482dSJohn Dyson /* 731867a482dSJohn Dyson * Do this on a map entry basis so that if the pages are not 732867a482dSJohn Dyson * in the current processes address space, we can easily look 733867a482dSJohn Dyson * up the pages elsewhere. 734867a482dSJohn Dyson */ 735867a482dSJohn Dyson lastvecindex = -1; 736867a482dSJohn Dyson for (current = entry; 737867a482dSJohn Dyson (current != &map->header) && (current->start < end); 738867a482dSJohn Dyson current = current->next) { 739867a482dSJohn Dyson 740867a482dSJohn Dyson /* 741867a482dSJohn Dyson * ignore submaps (for now) or null objects 742867a482dSJohn Dyson */ 7439fdfe602SMatthew Dillon if ((current->eflags & MAP_ENTRY_IS_SUB_MAP) || 744867a482dSJohn Dyson current->object.vm_object == NULL) 745867a482dSJohn Dyson continue; 746867a482dSJohn Dyson 747867a482dSJohn Dyson /* 748867a482dSJohn Dyson * limit this scan to the current map entry and the 749867a482dSJohn Dyson * limits for the mincore call 750867a482dSJohn Dyson */ 751867a482dSJohn Dyson if (addr < current->start) 752867a482dSJohn Dyson addr = current->start; 753867a482dSJohn Dyson cend = current->end; 754867a482dSJohn Dyson if (cend > end) 755867a482dSJohn Dyson cend = end; 756867a482dSJohn Dyson 757867a482dSJohn Dyson /* 758867a482dSJohn Dyson * scan this entry one page at a time 759867a482dSJohn Dyson */ 760867a482dSJohn Dyson while (addr < cend) { 761867a482dSJohn Dyson /* 762867a482dSJohn Dyson * Check pmap first, it is likely faster, also 763867a482dSJohn Dyson * it can provide info as to whether we are the 764867a482dSJohn Dyson * one referencing or modifying the page. 765867a482dSJohn Dyson */ 766867a482dSJohn Dyson mincoreinfo = pmap_mincore(pmap, addr); 767867a482dSJohn Dyson if (!mincoreinfo) { 768867a482dSJohn Dyson vm_pindex_t pindex; 769867a482dSJohn Dyson vm_ooffset_t offset; 770867a482dSJohn Dyson vm_page_t m; 771867a482dSJohn Dyson /* 772867a482dSJohn Dyson * calculate the page index into the object 773867a482dSJohn Dyson */ 774867a482dSJohn Dyson offset = current->offset + (addr - current->start); 775867a482dSJohn Dyson pindex = OFF_TO_IDX(offset); 776bc5b057fSAlan Cox VM_OBJECT_LOCK(current->object.vm_object); 777867a482dSJohn Dyson m = vm_page_lookup(current->object.vm_object, 778867a482dSJohn Dyson pindex); 779867a482dSJohn Dyson /* 780867a482dSJohn Dyson * if the page is resident, then gather information about 781867a482dSJohn Dyson * it. 782867a482dSJohn Dyson */ 783cafe836aSAlan Cox if (m != NULL && m->valid != 0) { 784867a482dSJohn Dyson mincoreinfo = MINCORE_INCORE; 7857ebcee37SAlan Cox vm_page_lock_queues(); 786867a482dSJohn Dyson if (m->dirty || 7870385347cSPeter Wemm pmap_is_modified(m)) 788867a482dSJohn Dyson mincoreinfo |= MINCORE_MODIFIED_OTHER; 789867a482dSJohn Dyson if ((m->flags & PG_REFERENCED) || 7900385347cSPeter Wemm pmap_ts_referenced(m)) { 791e69763a3SDoug Rabson vm_page_flag_set(m, PG_REFERENCED); 792867a482dSJohn Dyson mincoreinfo |= MINCORE_REFERENCED_OTHER; 79302c04a2fSJohn Dyson } 794e80b7b69SAlan Cox vm_page_unlock_queues(); 7959b5a5d81SJohn Dyson } 7967ebcee37SAlan Cox VM_OBJECT_UNLOCK(current->object.vm_object); 7977ebcee37SAlan Cox } 798867a482dSJohn Dyson 799867a482dSJohn Dyson /* 800dd2622a8SAlan Cox * subyte may page fault. In case it needs to modify 801dd2622a8SAlan Cox * the map, we release the lock. 802dd2622a8SAlan Cox */ 803dd2622a8SAlan Cox vm_map_unlock_read(map); 804dd2622a8SAlan Cox 805dd2622a8SAlan Cox /* 806867a482dSJohn Dyson * calculate index into user supplied byte vector 807867a482dSJohn Dyson */ 808867a482dSJohn Dyson vecindex = OFF_TO_IDX(addr - first_addr); 809867a482dSJohn Dyson 810867a482dSJohn Dyson /* 811867a482dSJohn Dyson * If we have skipped map entries, we need to make sure that 812867a482dSJohn Dyson * the byte vector is zeroed for those skipped entries. 813867a482dSJohn Dyson */ 814867a482dSJohn Dyson while ((lastvecindex + 1) < vecindex) { 815867a482dSJohn Dyson error = subyte(vec + lastvecindex, 0); 816867a482dSJohn Dyson if (error) { 817d2c60af8SMatthew Dillon error = EFAULT; 818d2c60af8SMatthew Dillon goto done2; 819867a482dSJohn Dyson } 820867a482dSJohn Dyson ++lastvecindex; 821867a482dSJohn Dyson } 822867a482dSJohn Dyson 823867a482dSJohn Dyson /* 824867a482dSJohn Dyson * Pass the page information to the user 825867a482dSJohn Dyson */ 826867a482dSJohn Dyson error = subyte(vec + vecindex, mincoreinfo); 827867a482dSJohn Dyson if (error) { 828d2c60af8SMatthew Dillon error = EFAULT; 829d2c60af8SMatthew Dillon goto done2; 830867a482dSJohn Dyson } 831dd2622a8SAlan Cox 832dd2622a8SAlan Cox /* 833dd2622a8SAlan Cox * If the map has changed, due to the subyte, the previous 834dd2622a8SAlan Cox * output may be invalid. 835dd2622a8SAlan Cox */ 836dd2622a8SAlan Cox vm_map_lock_read(map); 837dd2622a8SAlan Cox if (timestamp != map->timestamp) 838dd2622a8SAlan Cox goto RestartScan; 839dd2622a8SAlan Cox 840867a482dSJohn Dyson lastvecindex = vecindex; 84102c04a2fSJohn Dyson addr += PAGE_SIZE; 84202c04a2fSJohn Dyson } 843867a482dSJohn Dyson } 844867a482dSJohn Dyson 845867a482dSJohn Dyson /* 846dd2622a8SAlan Cox * subyte may page fault. In case it needs to modify 847dd2622a8SAlan Cox * the map, we release the lock. 848dd2622a8SAlan Cox */ 849dd2622a8SAlan Cox vm_map_unlock_read(map); 850dd2622a8SAlan Cox 851dd2622a8SAlan Cox /* 852867a482dSJohn Dyson * Zero the last entries in the byte vector. 853867a482dSJohn Dyson */ 854867a482dSJohn Dyson vecindex = OFF_TO_IDX(end - first_addr); 855867a482dSJohn Dyson while ((lastvecindex + 1) < vecindex) { 856867a482dSJohn Dyson error = subyte(vec + lastvecindex, 0); 857867a482dSJohn Dyson if (error) { 858d2c60af8SMatthew Dillon error = EFAULT; 859d2c60af8SMatthew Dillon goto done2; 860867a482dSJohn Dyson } 861867a482dSJohn Dyson ++lastvecindex; 862867a482dSJohn Dyson } 863867a482dSJohn Dyson 864dd2622a8SAlan Cox /* 865dd2622a8SAlan Cox * If the map has changed, due to the subyte, the previous 866dd2622a8SAlan Cox * output may be invalid. 867dd2622a8SAlan Cox */ 868dd2622a8SAlan Cox vm_map_lock_read(map); 869dd2622a8SAlan Cox if (timestamp != map->timestamp) 870dd2622a8SAlan Cox goto RestartScan; 871eff50fcdSAlan Cox vm_map_unlock_read(map); 872d2c60af8SMatthew Dillon done2: 873d2c60af8SMatthew Dillon return (error); 874df8bae1dSRodney W. Grimes } 875df8bae1dSRodney W. Grimes 876d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_ 877df8bae1dSRodney W. Grimes struct mlock_args { 878651bb817SAlexander Langer const void *addr; 879df8bae1dSRodney W. Grimes size_t len; 880df8bae1dSRodney W. Grimes }; 881d2d3e875SBruce Evans #endif 882d2c60af8SMatthew Dillon /* 883d2c60af8SMatthew Dillon * MPSAFE 884d2c60af8SMatthew Dillon */ 885df8bae1dSRodney W. Grimes int 886b40ce416SJulian Elischer mlock(td, uap) 887b40ce416SJulian Elischer struct thread *td; 888df8bae1dSRodney W. Grimes struct mlock_args *uap; 889df8bae1dSRodney W. Grimes { 890f0ea4612SDon Lewis struct proc *proc; 891bb734798SDon Lewis vm_offset_t addr, end, last, start; 892bb734798SDon Lewis vm_size_t npages, size; 893bb734798SDon Lewis int error; 894df8bae1dSRodney W. Grimes 89547934cefSDon Lewis error = suser(td); 89647934cefSDon Lewis if (error) 89747934cefSDon Lewis return (error); 89816929939SDon Lewis addr = (vm_offset_t)uap->addr; 89916929939SDon Lewis size = uap->len; 900bb734798SDon Lewis last = addr + size; 90116929939SDon Lewis start = trunc_page(addr); 902bb734798SDon Lewis end = round_page(last); 903bb734798SDon Lewis if (last < addr || end < addr) 904df8bae1dSRodney W. Grimes return (EINVAL); 90516929939SDon Lewis npages = atop(end - start); 90616929939SDon Lewis if (npages > vm_page_max_wired) 90716929939SDon Lewis return (ENOMEM); 908f0ea4612SDon Lewis proc = td->td_proc; 90947934cefSDon Lewis PROC_LOCK(proc); 910bb734798SDon Lewis if (ptoa(npages + 911bb734798SDon Lewis pmap_wired_count(vm_map_pmap(&proc->p_vmspace->vm_map))) > 912bb734798SDon Lewis lim_cur(proc, RLIMIT_MEMLOCK)) { 91347934cefSDon Lewis PROC_UNLOCK(proc); 9144a40e3d4SJohn Dyson return (ENOMEM); 91591d5354aSJohn Baldwin } 91647934cefSDon Lewis PROC_UNLOCK(proc); 91716929939SDon Lewis if (npages + cnt.v_wire_count > vm_page_max_wired) 91816929939SDon Lewis return (EAGAIN); 91916929939SDon Lewis error = vm_map_wire(&proc->p_vmspace->vm_map, start, end, 92016929939SDon Lewis VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES); 921df8bae1dSRodney W. Grimes return (error == KERN_SUCCESS ? 0 : ENOMEM); 922df8bae1dSRodney W. Grimes } 923df8bae1dSRodney W. Grimes 924d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_ 9254a40e3d4SJohn Dyson struct mlockall_args { 9264a40e3d4SJohn Dyson int how; 9274a40e3d4SJohn Dyson }; 9284a40e3d4SJohn Dyson #endif 9294a40e3d4SJohn Dyson 930d2c60af8SMatthew Dillon /* 931d2c60af8SMatthew Dillon * MPSAFE 932d2c60af8SMatthew Dillon */ 9334a40e3d4SJohn Dyson int 934b40ce416SJulian Elischer mlockall(td, uap) 935b40ce416SJulian Elischer struct thread *td; 9364a40e3d4SJohn Dyson struct mlockall_args *uap; 9374a40e3d4SJohn Dyson { 938abd498aaSBruce M Simpson vm_map_t map; 939abd498aaSBruce M Simpson int error; 940abd498aaSBruce M Simpson 941abd498aaSBruce M Simpson map = &td->td_proc->p_vmspace->vm_map; 942abd498aaSBruce M Simpson error = 0; 943abd498aaSBruce M Simpson 944abd498aaSBruce M Simpson if ((uap->how == 0) || ((uap->how & ~(MCL_CURRENT|MCL_FUTURE)) != 0)) 945abd498aaSBruce M Simpson return (EINVAL); 946abd498aaSBruce M Simpson 94711f7ddc5SBruce M Simpson #if 0 948abd498aaSBruce M Simpson /* 949abd498aaSBruce M Simpson * If wiring all pages in the process would cause it to exceed 950abd498aaSBruce M Simpson * a hard resource limit, return ENOMEM. 951abd498aaSBruce M Simpson */ 95291d5354aSJohn Baldwin PROC_LOCK(td->td_proc); 953abd498aaSBruce M Simpson if (map->size - ptoa(pmap_wired_count(vm_map_pmap(map)) > 95491d5354aSJohn Baldwin lim_cur(td->td_proc, RLIMIT_MEMLOCK))) { 95591d5354aSJohn Baldwin PROC_UNLOCK(td->td_proc); 956abd498aaSBruce M Simpson return (ENOMEM); 95791d5354aSJohn Baldwin } 95891d5354aSJohn Baldwin PROC_UNLOCK(td->td_proc); 959abd498aaSBruce M Simpson #else 960abd498aaSBruce M Simpson error = suser(td); 961abd498aaSBruce M Simpson if (error) 962abd498aaSBruce M Simpson return (error); 963abd498aaSBruce M Simpson #endif 964abd498aaSBruce M Simpson 965abd498aaSBruce M Simpson if (uap->how & MCL_FUTURE) { 966abd498aaSBruce M Simpson vm_map_lock(map); 967abd498aaSBruce M Simpson vm_map_modflags(map, MAP_WIREFUTURE, 0); 968abd498aaSBruce M Simpson vm_map_unlock(map); 969abd498aaSBruce M Simpson error = 0; 970abd498aaSBruce M Simpson } 971abd498aaSBruce M Simpson 972abd498aaSBruce M Simpson if (uap->how & MCL_CURRENT) { 973abd498aaSBruce M Simpson /* 974abd498aaSBruce M Simpson * P1003.1-2001 mandates that all currently mapped pages 975abd498aaSBruce M Simpson * will be memory resident and locked (wired) upon return 976abd498aaSBruce M Simpson * from mlockall(). vm_map_wire() will wire pages, by 977abd498aaSBruce M Simpson * calling vm_fault_wire() for each page in the region. 978abd498aaSBruce M Simpson */ 979abd498aaSBruce M Simpson error = vm_map_wire(map, vm_map_min(map), vm_map_max(map), 980abd498aaSBruce M Simpson VM_MAP_WIRE_USER|VM_MAP_WIRE_HOLESOK); 981abd498aaSBruce M Simpson error = (error == KERN_SUCCESS ? 0 : EAGAIN); 982abd498aaSBruce M Simpson } 983abd498aaSBruce M Simpson 984abd498aaSBruce M Simpson return (error); 9854a40e3d4SJohn Dyson } 9864a40e3d4SJohn Dyson 9874a40e3d4SJohn Dyson #ifndef _SYS_SYSPROTO_H_ 988fa721254SAlfred Perlstein struct munlockall_args { 989abd498aaSBruce M Simpson register_t dummy; 9904a40e3d4SJohn Dyson }; 9914a40e3d4SJohn Dyson #endif 9924a40e3d4SJohn Dyson 993d2c60af8SMatthew Dillon /* 994d2c60af8SMatthew Dillon * MPSAFE 995d2c60af8SMatthew Dillon */ 9964a40e3d4SJohn Dyson int 997b40ce416SJulian Elischer munlockall(td, uap) 998b40ce416SJulian Elischer struct thread *td; 9994a40e3d4SJohn Dyson struct munlockall_args *uap; 10004a40e3d4SJohn Dyson { 1001abd498aaSBruce M Simpson vm_map_t map; 1002abd498aaSBruce M Simpson int error; 1003abd498aaSBruce M Simpson 1004abd498aaSBruce M Simpson map = &td->td_proc->p_vmspace->vm_map; 1005abd498aaSBruce M Simpson error = suser(td); 1006abd498aaSBruce M Simpson if (error) 1007abd498aaSBruce M Simpson return (error); 1008abd498aaSBruce M Simpson 1009abd498aaSBruce M Simpson /* Clear the MAP_WIREFUTURE flag from this vm_map. */ 1010abd498aaSBruce M Simpson vm_map_lock(map); 1011abd498aaSBruce M Simpson vm_map_modflags(map, 0, MAP_WIREFUTURE); 1012abd498aaSBruce M Simpson vm_map_unlock(map); 1013abd498aaSBruce M Simpson 1014abd498aaSBruce M Simpson /* Forcibly unwire all pages. */ 1015abd498aaSBruce M Simpson error = vm_map_unwire(map, vm_map_min(map), vm_map_max(map), 1016abd498aaSBruce M Simpson VM_MAP_WIRE_USER|VM_MAP_WIRE_HOLESOK); 1017abd498aaSBruce M Simpson 1018abd498aaSBruce M Simpson return (error); 10194a40e3d4SJohn Dyson } 10204a40e3d4SJohn Dyson 10214a40e3d4SJohn Dyson #ifndef _SYS_SYSPROTO_H_ 1022df8bae1dSRodney W. Grimes struct munlock_args { 1023651bb817SAlexander Langer const void *addr; 1024df8bae1dSRodney W. Grimes size_t len; 1025df8bae1dSRodney W. Grimes }; 1026d2d3e875SBruce Evans #endif 1027d2c60af8SMatthew Dillon /* 1028d2c60af8SMatthew Dillon * MPSAFE 1029d2c60af8SMatthew Dillon */ 1030df8bae1dSRodney W. Grimes int 1031b40ce416SJulian Elischer munlock(td, uap) 1032b40ce416SJulian Elischer struct thread *td; 1033df8bae1dSRodney W. Grimes struct munlock_args *uap; 1034df8bae1dSRodney W. Grimes { 1035bb734798SDon Lewis vm_offset_t addr, end, last, start; 103616929939SDon Lewis vm_size_t size; 1037df8bae1dSRodney W. Grimes int error; 1038df8bae1dSRodney W. Grimes 103947934cefSDon Lewis error = suser(td); 104047934cefSDon Lewis if (error) 104147934cefSDon Lewis return (error); 104216929939SDon Lewis addr = (vm_offset_t)uap->addr; 104316929939SDon Lewis size = uap->len; 1044bb734798SDon Lewis last = addr + size; 104516929939SDon Lewis start = trunc_page(addr); 1046bb734798SDon Lewis end = round_page(last); 1047bb734798SDon Lewis if (last < addr || end < addr) 1048df8bae1dSRodney W. Grimes return (EINVAL); 104916929939SDon Lewis error = vm_map_unwire(&td->td_proc->p_vmspace->vm_map, start, end, 105016929939SDon Lewis VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES); 1051df8bae1dSRodney W. Grimes return (error == KERN_SUCCESS ? 0 : ENOMEM); 1052df8bae1dSRodney W. Grimes } 1053df8bae1dSRodney W. Grimes 1054df8bae1dSRodney W. Grimes /* 1055c8daea13SAlexander Kabaev * vm_mmap_vnode() 1056c8daea13SAlexander Kabaev * 1057c8daea13SAlexander Kabaev * MPSAFE 1058c8daea13SAlexander Kabaev * 1059c8daea13SAlexander Kabaev * Helper function for vm_mmap. Perform sanity check specific for mmap 1060c8daea13SAlexander Kabaev * operations on vnodes. 1061c8daea13SAlexander Kabaev */ 1062c8daea13SAlexander Kabaev int 1063c8daea13SAlexander Kabaev vm_mmap_vnode(struct thread *td, vm_size_t objsize, 1064c8daea13SAlexander Kabaev vm_prot_t prot, vm_prot_t *maxprotp, int *flagsp, 1065c8daea13SAlexander Kabaev struct vnode *vp, vm_ooffset_t foff, vm_object_t *objp) 1066c8daea13SAlexander Kabaev { 1067c8daea13SAlexander Kabaev struct vattr va; 1068c8daea13SAlexander Kabaev void *handle; 1069c8daea13SAlexander Kabaev vm_object_t obj; 107023fc1a90SPoul-Henning Kamp int error, flags, type; 1071c8daea13SAlexander Kabaev 1072c8daea13SAlexander Kabaev mtx_lock(&Giant); 1073c8daea13SAlexander Kabaev if ((error = vget(vp, LK_EXCLUSIVE, td)) != 0) { 1074c8daea13SAlexander Kabaev mtx_unlock(&Giant); 1075c8daea13SAlexander Kabaev return (error); 1076c8daea13SAlexander Kabaev } 1077c8daea13SAlexander Kabaev flags = *flagsp; 1078c8daea13SAlexander Kabaev if (vp->v_type == VREG) { 1079c8daea13SAlexander Kabaev /* 1080c8daea13SAlexander Kabaev * Get the proper underlying object 1081c8daea13SAlexander Kabaev */ 1082c8daea13SAlexander Kabaev if (VOP_GETVOBJECT(vp, &obj) != 0) { 1083c8daea13SAlexander Kabaev error = EINVAL; 1084c8daea13SAlexander Kabaev goto done; 1085c8daea13SAlexander Kabaev } 1086c8daea13SAlexander Kabaev if (obj->handle != vp) { 1087c8daea13SAlexander Kabaev vput(vp); 1088c8daea13SAlexander Kabaev vp = (struct vnode*)obj->handle; 1089c8daea13SAlexander Kabaev vget(vp, LK_EXCLUSIVE, td); 1090c8daea13SAlexander Kabaev } 1091c8daea13SAlexander Kabaev type = OBJT_VNODE; 1092c8daea13SAlexander Kabaev handle = vp; 1093c8daea13SAlexander Kabaev } else if (vp->v_type == VCHR) { 1094c8daea13SAlexander Kabaev type = OBJT_DEVICE; 1095c8daea13SAlexander Kabaev handle = vp->v_rdev; 1096c8daea13SAlexander Kabaev 1097891822a8SPoul-Henning Kamp /* XXX: lack thredref on device */ 1098c8daea13SAlexander Kabaev if(vp->v_rdev->si_devsw->d_flags & D_MMAP_ANON) { 1099c8daea13SAlexander Kabaev *maxprotp = VM_PROT_ALL; 1100c8daea13SAlexander Kabaev *flagsp |= MAP_ANON; 1101c8daea13SAlexander Kabaev error = 0; 1102c8daea13SAlexander Kabaev goto done; 1103c8daea13SAlexander Kabaev } 1104c8daea13SAlexander Kabaev /* 1105c8daea13SAlexander Kabaev * cdevs does not provide private mappings of any kind. 1106c8daea13SAlexander Kabaev */ 1107ce7a036dSAlexander Kabaev if ((*maxprotp & VM_PROT_WRITE) == 0 && 1108ce7a036dSAlexander Kabaev (prot & PROT_WRITE) != 0) { 1109ce7a036dSAlexander Kabaev error = EACCES; 1110ce7a036dSAlexander Kabaev goto done; 1111ce7a036dSAlexander Kabaev } 111223fc1a90SPoul-Henning Kamp if (flags & (MAP_PRIVATE|MAP_COPY)) { 1113c8daea13SAlexander Kabaev error = EINVAL; 1114c8daea13SAlexander Kabaev goto done; 1115c8daea13SAlexander Kabaev } 1116c8daea13SAlexander Kabaev /* 1117c8daea13SAlexander Kabaev * Force device mappings to be shared. 1118c8daea13SAlexander Kabaev */ 1119c8daea13SAlexander Kabaev flags &= ~(MAP_PRIVATE|MAP_COPY); 1120c8daea13SAlexander Kabaev flags |= MAP_SHARED; 1121c8daea13SAlexander Kabaev } else { 1122c8daea13SAlexander Kabaev error = EINVAL; 1123c8daea13SAlexander Kabaev goto done; 1124c8daea13SAlexander Kabaev } 1125c8daea13SAlexander Kabaev if ((error = VOP_GETATTR(vp, &va, td->td_ucred, td))) { 1126c8daea13SAlexander Kabaev goto done; 1127c8daea13SAlexander Kabaev } 1128c8daea13SAlexander Kabaev if ((flags & MAP_SHARED) != 0) { 1129c8daea13SAlexander Kabaev if ((va.va_flags & (SF_SNAPSHOT|IMMUTABLE|APPEND)) != 0) { 1130c8daea13SAlexander Kabaev if (prot & PROT_WRITE) { 1131c8daea13SAlexander Kabaev error = EPERM; 1132c8daea13SAlexander Kabaev goto done; 1133c8daea13SAlexander Kabaev } 1134c8daea13SAlexander Kabaev *maxprotp &= ~VM_PROT_WRITE; 1135c8daea13SAlexander Kabaev } 1136c8daea13SAlexander Kabaev #ifdef MAC 1137c8daea13SAlexander Kabaev error = mac_check_vnode_mmap(td->td_ucred, vp, prot); 1138c8daea13SAlexander Kabaev if (error != 0) 1139c8daea13SAlexander Kabaev goto done; 1140c8daea13SAlexander Kabaev #endif 1141c8daea13SAlexander Kabaev } 1142c8daea13SAlexander Kabaev /* 1143c8daea13SAlexander Kabaev * If it is a regular file without any references 1144c8daea13SAlexander Kabaev * we do not need to sync it. 1145c8daea13SAlexander Kabaev * Adjust object size to be the size of actual file. 1146c8daea13SAlexander Kabaev */ 1147c8daea13SAlexander Kabaev if (vp->v_type == VREG) { 1148c8daea13SAlexander Kabaev objsize = round_page(va.va_size); 1149c8daea13SAlexander Kabaev if (va.va_nlink == 0) 1150c8daea13SAlexander Kabaev flags |= MAP_NOSYNC; 1151c8daea13SAlexander Kabaev } 1152c8daea13SAlexander Kabaev obj = vm_pager_allocate(type, handle, objsize, prot, foff); 1153c8daea13SAlexander Kabaev if (obj == NULL) { 1154c8daea13SAlexander Kabaev error = (type == OBJT_DEVICE ? EINVAL : ENOMEM); 1155c8daea13SAlexander Kabaev goto done; 1156c8daea13SAlexander Kabaev } 1157c8daea13SAlexander Kabaev *objp = obj; 1158c8daea13SAlexander Kabaev *flagsp = flags; 1159c8daea13SAlexander Kabaev done: 1160c8daea13SAlexander Kabaev vput(vp); 1161c8daea13SAlexander Kabaev mtx_unlock(&Giant); 1162c8daea13SAlexander Kabaev return (error); 1163c8daea13SAlexander Kabaev } 1164c8daea13SAlexander Kabaev 1165c8daea13SAlexander Kabaev /* 1166d2c60af8SMatthew Dillon * vm_mmap() 1167d2c60af8SMatthew Dillon * 1168d2c60af8SMatthew Dillon * MPSAFE 1169d2c60af8SMatthew Dillon * 1170d2c60af8SMatthew Dillon * Internal version of mmap. Currently used by mmap, exec, and sys5 1171d2c60af8SMatthew Dillon * shared memory. Handle is either a vnode pointer or NULL for MAP_ANON. 1172df8bae1dSRodney W. Grimes */ 1173df8bae1dSRodney W. Grimes int 1174b9dcd593SBruce Evans vm_mmap(vm_map_t map, vm_offset_t *addr, vm_size_t size, vm_prot_t prot, 1175b9dcd593SBruce Evans vm_prot_t maxprot, int flags, 1176651bb817SAlexander Langer void *handle, 1177b9dcd593SBruce Evans vm_ooffset_t foff) 1178df8bae1dSRodney W. Grimes { 1179df8bae1dSRodney W. Grimes boolean_t fitit; 1180fcae040bSJohn Dyson vm_object_t object; 1181df8bae1dSRodney W. Grimes int rv = KERN_SUCCESS; 1182bd7e5f99SJohn Dyson vm_ooffset_t objsize; 118320eec4bbSAlan Cox int docow, error; 1184b40ce416SJulian Elischer struct thread *td = curthread; 1185df8bae1dSRodney W. Grimes 1186df8bae1dSRodney W. Grimes if (size == 0) 1187df8bae1dSRodney W. Grimes return (0); 1188df8bae1dSRodney W. Grimes 118906cb7259SDavid Greenman objsize = size = round_page(size); 1190df8bae1dSRodney W. Grimes 119191d5354aSJohn Baldwin PROC_LOCK(td->td_proc); 1192070f64feSMatthew Dillon if (td->td_proc->p_vmspace->vm_map.size + size > 119391d5354aSJohn Baldwin lim_cur(td->td_proc, RLIMIT_VMEM)) { 119491d5354aSJohn Baldwin PROC_UNLOCK(td->td_proc); 1195070f64feSMatthew Dillon return(ENOMEM); 1196070f64feSMatthew Dillon } 119791d5354aSJohn Baldwin PROC_UNLOCK(td->td_proc); 1198070f64feSMatthew Dillon 1199df8bae1dSRodney W. Grimes /* 1200bc9ad247SDavid Greenman * We currently can only deal with page aligned file offsets. 1201bc9ad247SDavid Greenman * The check is here rather than in the syscall because the 1202bc9ad247SDavid Greenman * kernel calls this function internally for other mmaping 1203bc9ad247SDavid Greenman * operations (such as in exec) and non-aligned offsets will 1204bc9ad247SDavid Greenman * cause pmap inconsistencies...so we want to be sure to 1205bc9ad247SDavid Greenman * disallow this in all cases. 1206bc9ad247SDavid Greenman */ 1207bc9ad247SDavid Greenman if (foff & PAGE_MASK) 1208bc9ad247SDavid Greenman return (EINVAL); 1209bc9ad247SDavid Greenman 121006cb7259SDavid Greenman if ((flags & MAP_FIXED) == 0) { 121106cb7259SDavid Greenman fitit = TRUE; 121206cb7259SDavid Greenman *addr = round_page(*addr); 121306cb7259SDavid Greenman } else { 121406cb7259SDavid Greenman if (*addr != trunc_page(*addr)) 121506cb7259SDavid Greenman return (EINVAL); 121606cb7259SDavid Greenman fitit = FALSE; 121706cb7259SDavid Greenman (void) vm_map_remove(map, *addr, *addr + size); 121806cb7259SDavid Greenman } 1219bc9ad247SDavid Greenman /* 122024a1cce3SDavid Greenman * Lookup/allocate object. 1221df8bae1dSRodney W. Grimes */ 1222c8daea13SAlexander Kabaev if (handle != NULL) { 1223c8daea13SAlexander Kabaev error = vm_mmap_vnode(td, size, prot, &maxprot, &flags, 1224c8daea13SAlexander Kabaev handle, foff, &object); 1225c8daea13SAlexander Kabaev if (error) { 1226c8daea13SAlexander Kabaev return (error); 1227c8daea13SAlexander Kabaev } 1228c8daea13SAlexander Kabaev } 12295f55e841SDavid Greenman if (flags & MAP_ANON) { 1230c8daea13SAlexander Kabaev object = NULL; 1231c8daea13SAlexander Kabaev docow = 0; 12325f55e841SDavid Greenman /* 12335f55e841SDavid Greenman * Unnamed anonymous regions always start at 0. 12345f55e841SDavid Greenman */ 123567bf6868SJohn Dyson if (handle == 0) 12365f55e841SDavid Greenman foff = 0; 12375f55e841SDavid Greenman } else { 12384738fa09SAlan Cox docow = MAP_PREFAULT_PARTIAL; 123994328e90SJohn Dyson } 1240df8bae1dSRodney W. Grimes 12414f79d873SMatthew Dillon if ((flags & (MAP_ANON|MAP_SHARED)) == 0) 12424738fa09SAlan Cox docow |= MAP_COPY_ON_WRITE; 12434f79d873SMatthew Dillon if (flags & MAP_NOSYNC) 12444f79d873SMatthew Dillon docow |= MAP_DISABLE_SYNCER; 12459730a5daSPaul Saab if (flags & MAP_NOCORE) 12469730a5daSPaul Saab docow |= MAP_DISABLE_COREDUMP; 12475850152dSJohn Dyson 1248d0aea04fSJohn Dyson #if defined(VM_PROT_READ_IS_EXEC) 1249d0aea04fSJohn Dyson if (prot & VM_PROT_READ) 1250d0aea04fSJohn Dyson prot |= VM_PROT_EXECUTE; 1251d0aea04fSJohn Dyson 1252d0aea04fSJohn Dyson if (maxprot & VM_PROT_READ) 1253d0aea04fSJohn Dyson maxprot |= VM_PROT_EXECUTE; 1254d0aea04fSJohn Dyson #endif 1255d0aea04fSJohn Dyson 1256e4ca250dSJohn Baldwin if (fitit) 12570a0a85b3SJohn Dyson *addr = pmap_addr_hint(object, *addr, size); 12580a0a85b3SJohn Dyson 12592267af78SJulian Elischer if (flags & MAP_STACK) 1260fd75d710SMarcel Moolenaar rv = vm_map_stack(map, *addr, size, prot, maxprot, 1261fd75d710SMarcel Moolenaar docow | MAP_STACK_GROWS_DOWN); 12622267af78SJulian Elischer else 1263bd7e5f99SJohn Dyson rv = vm_map_find(map, object, foff, addr, size, fitit, 1264bd7e5f99SJohn Dyson prot, maxprot, docow); 1265bd7e5f99SJohn Dyson 1266d2c60af8SMatthew Dillon if (rv != KERN_SUCCESS) { 12677fb0c17eSDavid Greenman /* 126824a1cce3SDavid Greenman * Lose the object reference. Will destroy the 126924a1cce3SDavid Greenman * object if it's an unnamed anonymous mapping 127024a1cce3SDavid Greenman * or named anonymous without other references. 12717fb0c17eSDavid Greenman */ 1272df8bae1dSRodney W. Grimes vm_object_deallocate(object); 1273d2c60af8SMatthew Dillon } else if (flags & MAP_SHARED) { 1274df8bae1dSRodney W. Grimes /* 1275df8bae1dSRodney W. Grimes * Shared memory is also shared with children. 1276df8bae1dSRodney W. Grimes */ 1277df8bae1dSRodney W. Grimes rv = vm_map_inherit(map, *addr, *addr + size, VM_INHERIT_SHARE); 1278e4ca250dSJohn Baldwin if (rv != KERN_SUCCESS) 12797fb0c17eSDavid Greenman (void) vm_map_remove(map, *addr, *addr + size); 1280df8bae1dSRodney W. Grimes } 1281abd498aaSBruce M Simpson 1282abd498aaSBruce M Simpson /* 1283abd498aaSBruce M Simpson * If the process has requested that all future mappings 1284abd498aaSBruce M Simpson * be wired, then heed this. 1285abd498aaSBruce M Simpson */ 1286abd498aaSBruce M Simpson if ((rv == KERN_SUCCESS) && (map->flags & MAP_WIREFUTURE)) 1287abd498aaSBruce M Simpson vm_map_wire(map, *addr, *addr + size, 1288abd498aaSBruce M Simpson VM_MAP_WIRE_USER|VM_MAP_WIRE_NOHOLES); 1289abd498aaSBruce M Simpson 1290df8bae1dSRodney W. Grimes switch (rv) { 1291df8bae1dSRodney W. Grimes case KERN_SUCCESS: 1292df8bae1dSRodney W. Grimes return (0); 1293df8bae1dSRodney W. Grimes case KERN_INVALID_ADDRESS: 1294df8bae1dSRodney W. Grimes case KERN_NO_SPACE: 1295df8bae1dSRodney W. Grimes return (ENOMEM); 1296df8bae1dSRodney W. Grimes case KERN_PROTECTION_FAILURE: 1297df8bae1dSRodney W. Grimes return (EACCES); 1298df8bae1dSRodney W. Grimes default: 1299df8bae1dSRodney W. Grimes return (EINVAL); 1300df8bae1dSRodney W. Grimes } 1301df8bae1dSRodney W. Grimes } 1302