1df8bae1dSRodney W. Grimes /* 2df8bae1dSRodney W. Grimes * Copyright (c) 1988 University of Utah. 3df8bae1dSRodney W. Grimes * Copyright (c) 1991, 1993 4df8bae1dSRodney W. Grimes * The Regents of the University of California. All rights reserved. 5df8bae1dSRodney W. Grimes * 6df8bae1dSRodney W. Grimes * This code is derived from software contributed to Berkeley by 7df8bae1dSRodney W. Grimes * the Systems Programming Group of the University of Utah Computer 8df8bae1dSRodney W. Grimes * Science Department. 9df8bae1dSRodney W. Grimes * 10df8bae1dSRodney W. Grimes * Redistribution and use in source and binary forms, with or without 11df8bae1dSRodney W. Grimes * modification, are permitted provided that the following conditions 12df8bae1dSRodney W. Grimes * are met: 13df8bae1dSRodney W. Grimes * 1. Redistributions of source code must retain the above copyright 14df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer. 15df8bae1dSRodney W. Grimes * 2. Redistributions in binary form must reproduce the above copyright 16df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer in the 17df8bae1dSRodney W. Grimes * documentation and/or other materials provided with the distribution. 18df8bae1dSRodney W. Grimes * 3. All advertising materials mentioning features or use of this software 19df8bae1dSRodney W. Grimes * must display the following acknowledgement: 20df8bae1dSRodney W. Grimes * This product includes software developed by the University of 21df8bae1dSRodney W. Grimes * California, Berkeley and its contributors. 22df8bae1dSRodney W. Grimes * 4. Neither the name of the University nor the names of its contributors 23df8bae1dSRodney W. Grimes * may be used to endorse or promote products derived from this software 24df8bae1dSRodney W. Grimes * without specific prior written permission. 25df8bae1dSRodney W. Grimes * 26df8bae1dSRodney W. Grimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27df8bae1dSRodney W. Grimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28df8bae1dSRodney W. Grimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29df8bae1dSRodney W. Grimes * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30df8bae1dSRodney W. Grimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31df8bae1dSRodney W. Grimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32df8bae1dSRodney W. Grimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33df8bae1dSRodney W. Grimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34df8bae1dSRodney W. Grimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35df8bae1dSRodney W. Grimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36df8bae1dSRodney W. Grimes * SUCH DAMAGE. 37df8bae1dSRodney W. Grimes * 38df8bae1dSRodney W. Grimes * from: Utah $Hdr: vm_mmap.c 1.6 91/10/21$ 39df8bae1dSRodney W. Grimes * 40df8bae1dSRodney W. Grimes * @(#)vm_mmap.c 8.4 (Berkeley) 1/12/94 41fc565456SDmitrij Tejblum * $Id: vm_mmap.c,v 1.84 1998/10/13 08:24:44 dg Exp $ 42df8bae1dSRodney W. Grimes */ 43df8bae1dSRodney W. Grimes 44df8bae1dSRodney W. Grimes /* 45df8bae1dSRodney W. Grimes * Mapped file (mmap) interface to VM 46df8bae1dSRodney W. Grimes */ 47df8bae1dSRodney W. Grimes 485591b823SEivind Eklund #include "opt_compat.h" 49e9822d92SJoerg Wunsch #include "opt_rlimit.h" 50e9822d92SJoerg Wunsch 51df8bae1dSRodney W. Grimes #include <sys/param.h> 52df8bae1dSRodney W. Grimes #include <sys/systm.h> 53d2d3e875SBruce Evans #include <sys/sysproto.h> 54df8bae1dSRodney W. Grimes #include <sys/filedesc.h> 55df8bae1dSRodney W. Grimes #include <sys/proc.h> 56df8bae1dSRodney W. Grimes #include <sys/vnode.h> 573ac4d1efSBruce Evans #include <sys/fcntl.h> 58df8bae1dSRodney W. Grimes #include <sys/file.h> 59df8bae1dSRodney W. Grimes #include <sys/mman.h> 60df8bae1dSRodney W. Grimes #include <sys/conf.h> 614183b6b6SPeter Wemm #include <sys/stat.h> 62efeaf95aSDavid Greenman #include <sys/vmmeter.h> 63df8bae1dSRodney W. Grimes 64df8bae1dSRodney W. Grimes #include <miscfs/specfs/specdev.h> 65df8bae1dSRodney W. Grimes 66df8bae1dSRodney W. Grimes #include <vm/vm.h> 67efeaf95aSDavid Greenman #include <vm/vm_param.h> 68efeaf95aSDavid Greenman #include <vm/vm_prot.h> 69efeaf95aSDavid Greenman #include <vm/vm_inherit.h> 70996c772fSJohn Dyson #include <sys/lock.h> 71efeaf95aSDavid Greenman #include <vm/pmap.h> 72efeaf95aSDavid Greenman #include <vm/vm_map.h> 73efeaf95aSDavid Greenman #include <vm/vm_object.h> 74df8bae1dSRodney W. Grimes #include <vm/vm_pager.h> 75b5e8ce9fSBruce Evans #include <vm/vm_pageout.h> 76efeaf95aSDavid Greenman #include <vm/vm_extern.h> 77867a482dSJohn Dyson #include <vm/vm_page.h> 78df8bae1dSRodney W. Grimes 79d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_ 80df8bae1dSRodney W. Grimes struct sbrk_args { 81df8bae1dSRodney W. Grimes int incr; 82df8bae1dSRodney W. Grimes }; 83d2d3e875SBruce Evans #endif 840d94caffSDavid Greenman 85df8bae1dSRodney W. Grimes /* ARGSUSED */ 86df8bae1dSRodney W. Grimes int 87cb226aaaSPoul-Henning Kamp sbrk(p, uap) 88df8bae1dSRodney W. Grimes struct proc *p; 89df8bae1dSRodney W. Grimes struct sbrk_args *uap; 90df8bae1dSRodney W. Grimes { 91df8bae1dSRodney W. Grimes 92df8bae1dSRodney W. Grimes /* Not yet implemented */ 93df8bae1dSRodney W. Grimes return (EOPNOTSUPP); 94df8bae1dSRodney W. Grimes } 95df8bae1dSRodney W. Grimes 96d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_ 97df8bae1dSRodney W. Grimes struct sstk_args { 98df8bae1dSRodney W. Grimes int incr; 99df8bae1dSRodney W. Grimes }; 100d2d3e875SBruce Evans #endif 1010d94caffSDavid Greenman 102df8bae1dSRodney W. Grimes /* ARGSUSED */ 103df8bae1dSRodney W. Grimes int 104cb226aaaSPoul-Henning Kamp sstk(p, uap) 105df8bae1dSRodney W. Grimes struct proc *p; 106df8bae1dSRodney W. Grimes struct sstk_args *uap; 107df8bae1dSRodney W. Grimes { 108df8bae1dSRodney W. Grimes 109df8bae1dSRodney W. Grimes /* Not yet implemented */ 110df8bae1dSRodney W. Grimes return (EOPNOTSUPP); 111df8bae1dSRodney W. Grimes } 112df8bae1dSRodney W. Grimes 113df8bae1dSRodney W. Grimes #if defined(COMPAT_43) || defined(COMPAT_SUNOS) 114d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_ 115df8bae1dSRodney W. Grimes struct getpagesize_args { 116df8bae1dSRodney W. Grimes int dummy; 117df8bae1dSRodney W. Grimes }; 118d2d3e875SBruce Evans #endif 1190d94caffSDavid Greenman 120df8bae1dSRodney W. Grimes /* ARGSUSED */ 121df8bae1dSRodney W. Grimes int 122cb226aaaSPoul-Henning Kamp ogetpagesize(p, uap) 123df8bae1dSRodney W. Grimes struct proc *p; 124df8bae1dSRodney W. Grimes struct getpagesize_args *uap; 125df8bae1dSRodney W. Grimes { 126df8bae1dSRodney W. Grimes 127cb226aaaSPoul-Henning Kamp p->p_retval[0] = PAGE_SIZE; 128df8bae1dSRodney W. Grimes return (0); 129df8bae1dSRodney W. Grimes } 130df8bae1dSRodney W. Grimes #endif /* COMPAT_43 || COMPAT_SUNOS */ 131df8bae1dSRodney W. Grimes 13254f42e4bSPeter Wemm 13354f42e4bSPeter Wemm /* 13454f42e4bSPeter Wemm * Memory Map (mmap) system call. Note that the file offset 13554f42e4bSPeter Wemm * and address are allowed to be NOT page aligned, though if 13654f42e4bSPeter Wemm * the MAP_FIXED flag it set, both must have the same remainder 13754f42e4bSPeter Wemm * modulo the PAGE_SIZE (POSIX 1003.1b). If the address is not 13854f42e4bSPeter Wemm * page-aligned, the actual mapping starts at trunc_page(addr) 13954f42e4bSPeter Wemm * and the return value is adjusted up by the page offset. 14054f42e4bSPeter Wemm */ 141d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_ 142df8bae1dSRodney W. Grimes struct mmap_args { 143651bb817SAlexander Langer void *addr; 144df8bae1dSRodney W. Grimes size_t len; 145df8bae1dSRodney W. Grimes int prot; 146df8bae1dSRodney W. Grimes int flags; 147df8bae1dSRodney W. Grimes int fd; 148df8bae1dSRodney W. Grimes long pad; 149df8bae1dSRodney W. Grimes off_t pos; 150df8bae1dSRodney W. Grimes }; 151d2d3e875SBruce Evans #endif 152df8bae1dSRodney W. Grimes 153df8bae1dSRodney W. Grimes int 154cb226aaaSPoul-Henning Kamp mmap(p, uap) 155df8bae1dSRodney W. Grimes struct proc *p; 156df8bae1dSRodney W. Grimes register struct mmap_args *uap; 157df8bae1dSRodney W. Grimes { 158df8bae1dSRodney W. Grimes register struct filedesc *fdp = p->p_fd; 159df8bae1dSRodney W. Grimes register struct file *fp; 160df8bae1dSRodney W. Grimes struct vnode *vp; 161df8bae1dSRodney W. Grimes vm_offset_t addr; 1629154ee6aSPeter Wemm vm_size_t size, pageoff; 163df8bae1dSRodney W. Grimes vm_prot_t prot, maxprot; 164651bb817SAlexander Langer void *handle; 165df8bae1dSRodney W. Grimes int flags, error; 166c8bdd56bSGuido van Rooij int disablexworkaround; 16754f42e4bSPeter Wemm off_t pos; 168df8bae1dSRodney W. Grimes 16954f42e4bSPeter Wemm addr = (vm_offset_t) uap->addr; 17054f42e4bSPeter Wemm size = uap->len; 171df8bae1dSRodney W. Grimes prot = uap->prot & VM_PROT_ALL; 172df8bae1dSRodney W. Grimes flags = uap->flags; 17354f42e4bSPeter Wemm pos = uap->pos; 17454f42e4bSPeter Wemm 17554f42e4bSPeter Wemm /* make sure mapping fits into numeric range etc */ 176fc565456SDmitrij Tejblum if ((ssize_t) uap->len < 0 || 17754f42e4bSPeter Wemm ((flags & MAP_ANON) && uap->fd != -1)) 178df8bae1dSRodney W. Grimes return (EINVAL); 1799154ee6aSPeter Wemm 1809154ee6aSPeter Wemm /* 18154f42e4bSPeter Wemm * Align the file position to a page boundary, 18254f42e4bSPeter Wemm * and save its page offset component. 1839154ee6aSPeter Wemm */ 18454f42e4bSPeter Wemm pageoff = (pos & PAGE_MASK); 18554f42e4bSPeter Wemm pos -= pageoff; 18654f42e4bSPeter Wemm 18754f42e4bSPeter Wemm /* Adjust size for rounding (on both ends). */ 18854f42e4bSPeter Wemm size += pageoff; /* low end... */ 18954f42e4bSPeter Wemm size = (vm_size_t) round_page(size); /* hi end */ 1909154ee6aSPeter Wemm 191df8bae1dSRodney W. Grimes /* 1920d94caffSDavid Greenman * Check for illegal addresses. Watch out for address wrap... Note 1930d94caffSDavid Greenman * that VM_*_ADDRESS are not constants due to casts (argh). 194df8bae1dSRodney W. Grimes */ 195df8bae1dSRodney W. Grimes if (flags & MAP_FIXED) { 19654f42e4bSPeter Wemm /* 19754f42e4bSPeter Wemm * The specified address must have the same remainder 19854f42e4bSPeter Wemm * as the file offset taken modulo PAGE_SIZE, so it 19954f42e4bSPeter Wemm * should be aligned after adjustment by pageoff. 20054f42e4bSPeter Wemm */ 20154f42e4bSPeter Wemm addr -= pageoff; 20254f42e4bSPeter Wemm if (addr & PAGE_MASK) 20354f42e4bSPeter Wemm return (EINVAL); 20454f42e4bSPeter Wemm /* Address range must be all in user VM space. */ 205bbc0ec52SDavid Greenman if (VM_MAXUSER_ADDRESS > 0 && addr + size > VM_MAXUSER_ADDRESS) 206df8bae1dSRodney W. Grimes return (EINVAL); 20726f9a767SRodney W. Grimes #ifndef i386 208df8bae1dSRodney W. Grimes if (VM_MIN_ADDRESS > 0 && addr < VM_MIN_ADDRESS) 209df8bae1dSRodney W. Grimes return (EINVAL); 21026f9a767SRodney W. Grimes #endif 211bbc0ec52SDavid Greenman if (addr + size < addr) 212df8bae1dSRodney W. Grimes return (EINVAL); 213df8bae1dSRodney W. Grimes } 214df8bae1dSRodney W. Grimes /* 21554f42e4bSPeter Wemm * XXX for non-fixed mappings where no hint is provided or 21654f42e4bSPeter Wemm * the hint would fall in the potential heap space, 21754f42e4bSPeter Wemm * place it after the end of the largest possible heap. 218df8bae1dSRodney W. Grimes * 21954f42e4bSPeter Wemm * There should really be a pmap call to determine a reasonable 22054f42e4bSPeter Wemm * location. 221df8bae1dSRodney W. Grimes */ 2226cde7a16SDavid Greenman else if (addr < round_page((vm_offset_t)p->p_vmspace->vm_daddr + MAXDSIZ)) 2236cde7a16SDavid Greenman addr = round_page((vm_offset_t)p->p_vmspace->vm_daddr + MAXDSIZ); 22454f42e4bSPeter Wemm 225df8bae1dSRodney W. Grimes if (flags & MAP_ANON) { 226df8bae1dSRodney W. Grimes /* 227df8bae1dSRodney W. Grimes * Mapping blank space is trivial. 228df8bae1dSRodney W. Grimes */ 229df8bae1dSRodney W. Grimes handle = NULL; 230df8bae1dSRodney W. Grimes maxprot = VM_PROT_ALL; 23154f42e4bSPeter Wemm pos = 0; 232df8bae1dSRodney W. Grimes } else { 233df8bae1dSRodney W. Grimes /* 2340d94caffSDavid Greenman * Mapping file, get fp for validation. Obtain vnode and make 2350d94caffSDavid Greenman * sure it is of appropriate type. 236df8bae1dSRodney W. Grimes */ 237df8bae1dSRodney W. Grimes if (((unsigned) uap->fd) >= fdp->fd_nfiles || 238df8bae1dSRodney W. Grimes (fp = fdp->fd_ofiles[uap->fd]) == NULL) 239df8bae1dSRodney W. Grimes return (EBADF); 240df8bae1dSRodney W. Grimes if (fp->f_type != DTYPE_VNODE) 241df8bae1dSRodney W. Grimes return (EINVAL); 242df8bae1dSRodney W. Grimes vp = (struct vnode *) fp->f_data; 243df8bae1dSRodney W. Grimes if (vp->v_type != VREG && vp->v_type != VCHR) 244df8bae1dSRodney W. Grimes return (EINVAL); 245df8bae1dSRodney W. Grimes /* 2460d94caffSDavid Greenman * XXX hack to handle use of /dev/zero to map anon memory (ala 2470d94caffSDavid Greenman * SunOS). 248df8bae1dSRodney W. Grimes */ 249df8bae1dSRodney W. Grimes if (vp->v_type == VCHR && iszerodev(vp->v_rdev)) { 250df8bae1dSRodney W. Grimes handle = NULL; 251df8bae1dSRodney W. Grimes maxprot = VM_PROT_ALL; 252df8bae1dSRodney W. Grimes flags |= MAP_ANON; 25354f42e4bSPeter Wemm pos = 0; 254df8bae1dSRodney W. Grimes } else { 255df8bae1dSRodney W. Grimes /* 256c8bdd56bSGuido van Rooij * cdevs does not provide private mappings of any kind. 257c8bdd56bSGuido van Rooij */ 258c8bdd56bSGuido van Rooij /* 259c8bdd56bSGuido van Rooij * However, for XIG X server to continue to work, 260c8bdd56bSGuido van Rooij * we should allow the superuser to do it anyway. 261c8bdd56bSGuido van Rooij * We only allow it at securelevel < 1. 262c8bdd56bSGuido van Rooij * (Because the XIG X server writes directly to video 263c8bdd56bSGuido van Rooij * memory via /dev/mem, it should never work at any 264c8bdd56bSGuido van Rooij * other securelevel. 265c8bdd56bSGuido van Rooij * XXX this will have to go 266c8bdd56bSGuido van Rooij */ 267c8bdd56bSGuido van Rooij if (securelevel >= 1) 268c8bdd56bSGuido van Rooij disablexworkaround = 1; 269c8bdd56bSGuido van Rooij else 270c8bdd56bSGuido van Rooij disablexworkaround = suser(p->p_ucred, 271c8bdd56bSGuido van Rooij &p->p_acflag); 272c8bdd56bSGuido van Rooij if (vp->v_type == VCHR && disablexworkaround && 273c8bdd56bSGuido van Rooij (flags & (MAP_PRIVATE|MAP_COPY))) 274c8bdd56bSGuido van Rooij return (EINVAL); 275c8bdd56bSGuido van Rooij /* 276df8bae1dSRodney W. Grimes * Ensure that file and memory protections are 277df8bae1dSRodney W. Grimes * compatible. Note that we only worry about 278df8bae1dSRodney W. Grimes * writability if mapping is shared; in this case, 279df8bae1dSRodney W. Grimes * current and max prot are dictated by the open file. 280df8bae1dSRodney W. Grimes * XXX use the vnode instead? Problem is: what 2810d94caffSDavid Greenman * credentials do we use for determination? What if 2820d94caffSDavid Greenman * proc does a setuid? 283df8bae1dSRodney W. Grimes */ 284df8bae1dSRodney W. Grimes maxprot = VM_PROT_EXECUTE; /* ??? */ 285df8bae1dSRodney W. Grimes if (fp->f_flag & FREAD) 286df8bae1dSRodney W. Grimes maxprot |= VM_PROT_READ; 287df8bae1dSRodney W. Grimes else if (prot & PROT_READ) 288df8bae1dSRodney W. Grimes return (EACCES); 289c8bdd56bSGuido van Rooij /* 290c8bdd56bSGuido van Rooij * If we are sharing potential changes (either via 291c8bdd56bSGuido van Rooij * MAP_SHARED or via the implicit sharing of character 292c8bdd56bSGuido van Rooij * device mappings), and we are trying to get write 293c8bdd56bSGuido van Rooij * permission although we opened it without asking 294c8bdd56bSGuido van Rooij * for it, bail out. Check for superuser, only if 295c8bdd56bSGuido van Rooij * we're at securelevel < 1, to allow the XIG X server 296c8bdd56bSGuido van Rooij * to continue to work. 297c8bdd56bSGuido van Rooij */ 29805feb99fSGuido van Rooij 29905feb99fSGuido van Rooij if ((flags & MAP_SHARED) != 0 || 30005feb99fSGuido van Rooij (vp->v_type == VCHR && disablexworkaround)) { 30105feb99fSGuido van Rooij if ((fp->f_flag & FWRITE) != 0) { 3024183b6b6SPeter Wemm struct vattr va; 30305feb99fSGuido van Rooij if ((error = 30405feb99fSGuido van Rooij VOP_GETATTR(vp, &va, 30505feb99fSGuido van Rooij p->p_ucred, p))) 30605feb99fSGuido van Rooij return (error); 30705feb99fSGuido van Rooij if ((va.va_flags & 30805feb99fSGuido van Rooij (IMMUTABLE|APPEND)) == 0) 309df8bae1dSRodney W. Grimes maxprot |= VM_PROT_WRITE; 31005feb99fSGuido van Rooij else if (prot & PROT_WRITE) 31105feb99fSGuido van Rooij return (EPERM); 31205feb99fSGuido van Rooij } else if ((prot & PROT_WRITE) != 0) 31305feb99fSGuido van Rooij return (EACCES); 31405feb99fSGuido van Rooij } else 31505feb99fSGuido van Rooij maxprot |= VM_PROT_WRITE; 31605feb99fSGuido van Rooij 317651bb817SAlexander Langer handle = (void *)vp; 318df8bae1dSRodney W. Grimes } 319df8bae1dSRodney W. Grimes } 320df8bae1dSRodney W. Grimes error = vm_mmap(&p->p_vmspace->vm_map, &addr, size, prot, maxprot, 32154f42e4bSPeter Wemm flags, handle, pos); 322df8bae1dSRodney W. Grimes if (error == 0) 323711458e3SDoug Rabson p->p_retval[0] = (register_t) (addr + pageoff); 324df8bae1dSRodney W. Grimes return (error); 325df8bae1dSRodney W. Grimes } 326df8bae1dSRodney W. Grimes 32705f0fdd2SPoul-Henning Kamp #ifdef COMPAT_43 328d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_ 32905f0fdd2SPoul-Henning Kamp struct ommap_args { 33005f0fdd2SPoul-Henning Kamp caddr_t addr; 33105f0fdd2SPoul-Henning Kamp int len; 33205f0fdd2SPoul-Henning Kamp int prot; 33305f0fdd2SPoul-Henning Kamp int flags; 33405f0fdd2SPoul-Henning Kamp int fd; 33505f0fdd2SPoul-Henning Kamp long pos; 33605f0fdd2SPoul-Henning Kamp }; 337d2d3e875SBruce Evans #endif 33805f0fdd2SPoul-Henning Kamp int 339cb226aaaSPoul-Henning Kamp ommap(p, uap) 34005f0fdd2SPoul-Henning Kamp struct proc *p; 34105f0fdd2SPoul-Henning Kamp register struct ommap_args *uap; 34205f0fdd2SPoul-Henning Kamp { 34305f0fdd2SPoul-Henning Kamp struct mmap_args nargs; 34405f0fdd2SPoul-Henning Kamp static const char cvtbsdprot[8] = { 34505f0fdd2SPoul-Henning Kamp 0, 34605f0fdd2SPoul-Henning Kamp PROT_EXEC, 34705f0fdd2SPoul-Henning Kamp PROT_WRITE, 34805f0fdd2SPoul-Henning Kamp PROT_EXEC | PROT_WRITE, 34905f0fdd2SPoul-Henning Kamp PROT_READ, 35005f0fdd2SPoul-Henning Kamp PROT_EXEC | PROT_READ, 35105f0fdd2SPoul-Henning Kamp PROT_WRITE | PROT_READ, 35205f0fdd2SPoul-Henning Kamp PROT_EXEC | PROT_WRITE | PROT_READ, 35305f0fdd2SPoul-Henning Kamp }; 3540d94caffSDavid Greenman 35505f0fdd2SPoul-Henning Kamp #define OMAP_ANON 0x0002 35605f0fdd2SPoul-Henning Kamp #define OMAP_COPY 0x0020 35705f0fdd2SPoul-Henning Kamp #define OMAP_SHARED 0x0010 35805f0fdd2SPoul-Henning Kamp #define OMAP_FIXED 0x0100 35905f0fdd2SPoul-Henning Kamp #define OMAP_INHERIT 0x0800 36005f0fdd2SPoul-Henning Kamp 36105f0fdd2SPoul-Henning Kamp nargs.addr = uap->addr; 36205f0fdd2SPoul-Henning Kamp nargs.len = uap->len; 36305f0fdd2SPoul-Henning Kamp nargs.prot = cvtbsdprot[uap->prot & 0x7]; 36405f0fdd2SPoul-Henning Kamp nargs.flags = 0; 36505f0fdd2SPoul-Henning Kamp if (uap->flags & OMAP_ANON) 36605f0fdd2SPoul-Henning Kamp nargs.flags |= MAP_ANON; 36705f0fdd2SPoul-Henning Kamp if (uap->flags & OMAP_COPY) 36805f0fdd2SPoul-Henning Kamp nargs.flags |= MAP_COPY; 36905f0fdd2SPoul-Henning Kamp if (uap->flags & OMAP_SHARED) 37005f0fdd2SPoul-Henning Kamp nargs.flags |= MAP_SHARED; 37105f0fdd2SPoul-Henning Kamp else 37205f0fdd2SPoul-Henning Kamp nargs.flags |= MAP_PRIVATE; 37305f0fdd2SPoul-Henning Kamp if (uap->flags & OMAP_FIXED) 37405f0fdd2SPoul-Henning Kamp nargs.flags |= MAP_FIXED; 37505f0fdd2SPoul-Henning Kamp if (uap->flags & OMAP_INHERIT) 37605f0fdd2SPoul-Henning Kamp nargs.flags |= MAP_INHERIT; 37705f0fdd2SPoul-Henning Kamp nargs.fd = uap->fd; 37805f0fdd2SPoul-Henning Kamp nargs.pos = uap->pos; 379cb226aaaSPoul-Henning Kamp return (mmap(p, &nargs)); 38005f0fdd2SPoul-Henning Kamp } 38105f0fdd2SPoul-Henning Kamp #endif /* COMPAT_43 */ 38205f0fdd2SPoul-Henning Kamp 38305f0fdd2SPoul-Henning Kamp 384d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_ 385df8bae1dSRodney W. Grimes struct msync_args { 386651bb817SAlexander Langer void *addr; 387df8bae1dSRodney W. Grimes int len; 388e6c6af11SDavid Greenman int flags; 389df8bae1dSRodney W. Grimes }; 390d2d3e875SBruce Evans #endif 391df8bae1dSRodney W. Grimes int 392cb226aaaSPoul-Henning Kamp msync(p, uap) 393df8bae1dSRodney W. Grimes struct proc *p; 394df8bae1dSRodney W. Grimes struct msync_args *uap; 395df8bae1dSRodney W. Grimes { 396df8bae1dSRodney W. Grimes vm_offset_t addr; 397dabee6feSPeter Wemm vm_size_t size, pageoff; 398e6c6af11SDavid Greenman int flags; 399df8bae1dSRodney W. Grimes vm_map_t map; 400df8bae1dSRodney W. Grimes int rv; 401df8bae1dSRodney W. Grimes 402df8bae1dSRodney W. Grimes addr = (vm_offset_t) uap->addr; 4039154ee6aSPeter Wemm size = uap->len; 404e6c6af11SDavid Greenman flags = uap->flags; 405e6c6af11SDavid Greenman 406dabee6feSPeter Wemm pageoff = (addr & PAGE_MASK); 407dabee6feSPeter Wemm addr -= pageoff; 408dabee6feSPeter Wemm size += pageoff; 409dabee6feSPeter Wemm size = (vm_size_t) round_page(size); 4109154ee6aSPeter Wemm if (addr + size < addr) 411dabee6feSPeter Wemm return(EINVAL); 412dabee6feSPeter Wemm 413dabee6feSPeter Wemm if ((flags & (MS_ASYNC|MS_INVALIDATE)) == (MS_ASYNC|MS_INVALIDATE)) 4141e62bc63SDavid Greenman return (EINVAL); 4151e62bc63SDavid Greenman 4169154ee6aSPeter Wemm map = &p->p_vmspace->vm_map; 4179154ee6aSPeter Wemm 418df8bae1dSRodney W. Grimes /* 419df8bae1dSRodney W. Grimes * XXX Gak! If size is zero we are supposed to sync "all modified 4200d94caffSDavid Greenman * pages with the region containing addr". Unfortunately, we don't 4210d94caffSDavid Greenman * really keep track of individual mmaps so we approximate by flushing 4220d94caffSDavid Greenman * the range of the map entry containing addr. This can be incorrect 4230d94caffSDavid Greenman * if the region splits or is coalesced with a neighbor. 424df8bae1dSRodney W. Grimes */ 425df8bae1dSRodney W. Grimes if (size == 0) { 426df8bae1dSRodney W. Grimes vm_map_entry_t entry; 427df8bae1dSRodney W. Grimes 428df8bae1dSRodney W. Grimes vm_map_lock_read(map); 429df8bae1dSRodney W. Grimes rv = vm_map_lookup_entry(map, addr, &entry); 430df8bae1dSRodney W. Grimes vm_map_unlock_read(map); 431fbcfcdf7SDavid Greenman if (rv == FALSE) 432df8bae1dSRodney W. Grimes return (EINVAL); 433df8bae1dSRodney W. Grimes addr = entry->start; 434df8bae1dSRodney W. Grimes size = entry->end - entry->start; 435df8bae1dSRodney W. Grimes } 436e6c6af11SDavid Greenman 437df8bae1dSRodney W. Grimes /* 438df8bae1dSRodney W. Grimes * Clean the pages and interpret the return value. 439df8bae1dSRodney W. Grimes */ 4406c534ad8SDavid Greenman rv = vm_map_clean(map, addr, addr + size, (flags & MS_ASYNC) == 0, 441e6c6af11SDavid Greenman (flags & MS_INVALIDATE) != 0); 442e6c6af11SDavid Greenman 443df8bae1dSRodney W. Grimes switch (rv) { 444df8bae1dSRodney W. Grimes case KERN_SUCCESS: 445df8bae1dSRodney W. Grimes break; 446df8bae1dSRodney W. Grimes case KERN_INVALID_ADDRESS: 447df8bae1dSRodney W. Grimes return (EINVAL); /* Sun returns ENOMEM? */ 448df8bae1dSRodney W. Grimes case KERN_FAILURE: 449df8bae1dSRodney W. Grimes return (EIO); 450df8bae1dSRodney W. Grimes default: 451df8bae1dSRodney W. Grimes return (EINVAL); 452df8bae1dSRodney W. Grimes } 453e6c6af11SDavid Greenman 454df8bae1dSRodney W. Grimes return (0); 455df8bae1dSRodney W. Grimes } 456df8bae1dSRodney W. Grimes 457d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_ 458df8bae1dSRodney W. Grimes struct munmap_args { 459651bb817SAlexander Langer void *addr; 4609154ee6aSPeter Wemm size_t len; 461df8bae1dSRodney W. Grimes }; 462d2d3e875SBruce Evans #endif 463df8bae1dSRodney W. Grimes int 464cb226aaaSPoul-Henning Kamp munmap(p, uap) 465df8bae1dSRodney W. Grimes register struct proc *p; 466df8bae1dSRodney W. Grimes register struct munmap_args *uap; 467df8bae1dSRodney W. Grimes { 468df8bae1dSRodney W. Grimes vm_offset_t addr; 469dabee6feSPeter Wemm vm_size_t size, pageoff; 470df8bae1dSRodney W. Grimes vm_map_t map; 471df8bae1dSRodney W. Grimes 472df8bae1dSRodney W. Grimes addr = (vm_offset_t) uap->addr; 4739154ee6aSPeter Wemm size = uap->len; 474dabee6feSPeter Wemm 475dabee6feSPeter Wemm pageoff = (addr & PAGE_MASK); 476dabee6feSPeter Wemm addr -= pageoff; 477dabee6feSPeter Wemm size += pageoff; 478dabee6feSPeter Wemm size = (vm_size_t) round_page(size); 4799154ee6aSPeter Wemm if (addr + size < addr) 480df8bae1dSRodney W. Grimes return(EINVAL); 4819154ee6aSPeter Wemm 482df8bae1dSRodney W. Grimes if (size == 0) 483df8bae1dSRodney W. Grimes return (0); 484dabee6feSPeter Wemm 485df8bae1dSRodney W. Grimes /* 4860d94caffSDavid Greenman * Check for illegal addresses. Watch out for address wrap... Note 4870d94caffSDavid Greenman * that VM_*_ADDRESS are not constants due to casts (argh). 488df8bae1dSRodney W. Grimes */ 489bbc0ec52SDavid Greenman if (VM_MAXUSER_ADDRESS > 0 && addr + size > VM_MAXUSER_ADDRESS) 490df8bae1dSRodney W. Grimes return (EINVAL); 49126f9a767SRodney W. Grimes #ifndef i386 492df8bae1dSRodney W. Grimes if (VM_MIN_ADDRESS > 0 && addr < VM_MIN_ADDRESS) 493df8bae1dSRodney W. Grimes return (EINVAL); 49426f9a767SRodney W. Grimes #endif 495df8bae1dSRodney W. Grimes map = &p->p_vmspace->vm_map; 496df8bae1dSRodney W. Grimes /* 497df8bae1dSRodney W. Grimes * Make sure entire range is allocated. 498df8bae1dSRodney W. Grimes */ 499df8bae1dSRodney W. Grimes if (!vm_map_check_protection(map, addr, addr + size, VM_PROT_NONE)) 500df8bae1dSRodney W. Grimes return (EINVAL); 501df8bae1dSRodney W. Grimes /* returns nothing but KERN_SUCCESS anyway */ 502df8bae1dSRodney W. Grimes (void) vm_map_remove(map, addr, addr + size); 503df8bae1dSRodney W. Grimes return (0); 504df8bae1dSRodney W. Grimes } 505df8bae1dSRodney W. Grimes 506df8bae1dSRodney W. Grimes void 50790324b07SDavid Greenman munmapfd(p, fd) 50890324b07SDavid Greenman struct proc *p; 509df8bae1dSRodney W. Grimes int fd; 510df8bae1dSRodney W. Grimes { 511df8bae1dSRodney W. Grimes /* 512c4ed5a07SDavid Greenman * XXX should unmap any regions mapped to this file 513df8bae1dSRodney W. Grimes */ 51490324b07SDavid Greenman p->p_fd->fd_ofileflags[fd] &= ~UF_MAPPED; 515df8bae1dSRodney W. Grimes } 516df8bae1dSRodney W. Grimes 517d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_ 518df8bae1dSRodney W. Grimes struct mprotect_args { 519651bb817SAlexander Langer const void *addr; 5209154ee6aSPeter Wemm size_t len; 521df8bae1dSRodney W. Grimes int prot; 522df8bae1dSRodney W. Grimes }; 523d2d3e875SBruce Evans #endif 524df8bae1dSRodney W. Grimes int 525cb226aaaSPoul-Henning Kamp mprotect(p, uap) 526df8bae1dSRodney W. Grimes struct proc *p; 527df8bae1dSRodney W. Grimes struct mprotect_args *uap; 528df8bae1dSRodney W. Grimes { 529df8bae1dSRodney W. Grimes vm_offset_t addr; 530dabee6feSPeter Wemm vm_size_t size, pageoff; 531df8bae1dSRodney W. Grimes register vm_prot_t prot; 532df8bae1dSRodney W. Grimes 533df8bae1dSRodney W. Grimes addr = (vm_offset_t) uap->addr; 5349154ee6aSPeter Wemm size = uap->len; 535df8bae1dSRodney W. Grimes prot = uap->prot & VM_PROT_ALL; 536d0aea04fSJohn Dyson #if defined(VM_PROT_READ_IS_EXEC) 537d0aea04fSJohn Dyson if (prot & VM_PROT_READ) 538d0aea04fSJohn Dyson prot |= VM_PROT_EXECUTE; 539d0aea04fSJohn Dyson #endif 540df8bae1dSRodney W. Grimes 541dabee6feSPeter Wemm pageoff = (addr & PAGE_MASK); 542dabee6feSPeter Wemm addr -= pageoff; 543dabee6feSPeter Wemm size += pageoff; 544dabee6feSPeter Wemm size = (vm_size_t) round_page(size); 5459154ee6aSPeter Wemm if (addr + size < addr) 546dabee6feSPeter Wemm return(EINVAL); 547dabee6feSPeter Wemm 548df8bae1dSRodney W. Grimes switch (vm_map_protect(&p->p_vmspace->vm_map, addr, addr + size, prot, 549df8bae1dSRodney W. Grimes FALSE)) { 550df8bae1dSRodney W. Grimes case KERN_SUCCESS: 551df8bae1dSRodney W. Grimes return (0); 552df8bae1dSRodney W. Grimes case KERN_PROTECTION_FAILURE: 553df8bae1dSRodney W. Grimes return (EACCES); 554df8bae1dSRodney W. Grimes } 555df8bae1dSRodney W. Grimes return (EINVAL); 556df8bae1dSRodney W. Grimes } 557df8bae1dSRodney W. Grimes 558d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_ 559dabee6feSPeter Wemm struct minherit_args { 560651bb817SAlexander Langer void *addr; 5619154ee6aSPeter Wemm size_t len; 562dabee6feSPeter Wemm int inherit; 563dabee6feSPeter Wemm }; 564dabee6feSPeter Wemm #endif 565dabee6feSPeter Wemm int 566cb226aaaSPoul-Henning Kamp minherit(p, uap) 567dabee6feSPeter Wemm struct proc *p; 568dabee6feSPeter Wemm struct minherit_args *uap; 569dabee6feSPeter Wemm { 570dabee6feSPeter Wemm vm_offset_t addr; 571dabee6feSPeter Wemm vm_size_t size, pageoff; 572dabee6feSPeter Wemm register vm_inherit_t inherit; 573dabee6feSPeter Wemm 574dabee6feSPeter Wemm addr = (vm_offset_t)uap->addr; 5759154ee6aSPeter Wemm size = uap->len; 576dabee6feSPeter Wemm inherit = uap->inherit; 577dabee6feSPeter Wemm 578dabee6feSPeter Wemm pageoff = (addr & PAGE_MASK); 579dabee6feSPeter Wemm addr -= pageoff; 580dabee6feSPeter Wemm size += pageoff; 581dabee6feSPeter Wemm size = (vm_size_t) round_page(size); 5829154ee6aSPeter Wemm if (addr + size < addr) 583dabee6feSPeter Wemm return(EINVAL); 584dabee6feSPeter Wemm 585dabee6feSPeter Wemm switch (vm_map_inherit(&p->p_vmspace->vm_map, addr, addr+size, 586dabee6feSPeter Wemm inherit)) { 587dabee6feSPeter Wemm case KERN_SUCCESS: 588dabee6feSPeter Wemm return (0); 589dabee6feSPeter Wemm case KERN_PROTECTION_FAILURE: 590dabee6feSPeter Wemm return (EACCES); 591dabee6feSPeter Wemm } 592dabee6feSPeter Wemm return (EINVAL); 593dabee6feSPeter Wemm } 594dabee6feSPeter Wemm 595dabee6feSPeter Wemm #ifndef _SYS_SYSPROTO_H_ 596df8bae1dSRodney W. Grimes struct madvise_args { 597651bb817SAlexander Langer void *addr; 5989154ee6aSPeter Wemm size_t len; 599df8bae1dSRodney W. Grimes int behav; 600df8bae1dSRodney W. Grimes }; 601d2d3e875SBruce Evans #endif 6020d94caffSDavid Greenman 603df8bae1dSRodney W. Grimes /* ARGSUSED */ 604df8bae1dSRodney W. Grimes int 605cb226aaaSPoul-Henning Kamp madvise(p, uap) 606df8bae1dSRodney W. Grimes struct proc *p; 607df8bae1dSRodney W. Grimes struct madvise_args *uap; 608df8bae1dSRodney W. Grimes { 609867a482dSJohn Dyson vm_map_t map; 610867a482dSJohn Dyson pmap_t pmap; 611f35329acSJohn Dyson vm_offset_t start, end; 612867a482dSJohn Dyson /* 613867a482dSJohn Dyson * Check for illegal addresses. Watch out for address wrap... Note 614867a482dSJohn Dyson * that VM_*_ADDRESS are not constants due to casts (argh). 615867a482dSJohn Dyson */ 616867a482dSJohn Dyson if (VM_MAXUSER_ADDRESS > 0 && 617867a482dSJohn Dyson ((vm_offset_t) uap->addr + uap->len) > VM_MAXUSER_ADDRESS) 618867a482dSJohn Dyson return (EINVAL); 619867a482dSJohn Dyson #ifndef i386 620867a482dSJohn Dyson if (VM_MIN_ADDRESS > 0 && uap->addr < VM_MIN_ADDRESS) 621867a482dSJohn Dyson return (EINVAL); 622867a482dSJohn Dyson #endif 623867a482dSJohn Dyson if (((vm_offset_t) uap->addr + uap->len) < (vm_offset_t) uap->addr) 624867a482dSJohn Dyson return (EINVAL); 625867a482dSJohn Dyson 626867a482dSJohn Dyson /* 627867a482dSJohn Dyson * Since this routine is only advisory, we default to conservative 628867a482dSJohn Dyson * behavior. 629867a482dSJohn Dyson */ 630cd6eea25SDavid Greenman start = trunc_page((vm_offset_t) uap->addr); 631cd6eea25SDavid Greenman end = round_page((vm_offset_t) uap->addr + uap->len); 632867a482dSJohn Dyson 633867a482dSJohn Dyson map = &p->p_vmspace->vm_map; 634867a482dSJohn Dyson pmap = &p->p_vmspace->vm_pmap; 635867a482dSJohn Dyson 636867a482dSJohn Dyson vm_map_madvise(map, pmap, start, end, uap->behav); 637df8bae1dSRodney W. Grimes 638867a482dSJohn Dyson return (0); 639df8bae1dSRodney W. Grimes } 640df8bae1dSRodney W. Grimes 641d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_ 642df8bae1dSRodney W. Grimes struct mincore_args { 643651bb817SAlexander Langer const void *addr; 6449154ee6aSPeter Wemm size_t len; 645df8bae1dSRodney W. Grimes char *vec; 646df8bae1dSRodney W. Grimes }; 647d2d3e875SBruce Evans #endif 6480d94caffSDavid Greenman 649df8bae1dSRodney W. Grimes /* ARGSUSED */ 650df8bae1dSRodney W. Grimes int 651cb226aaaSPoul-Henning Kamp mincore(p, uap) 652df8bae1dSRodney W. Grimes struct proc *p; 653df8bae1dSRodney W. Grimes struct mincore_args *uap; 654df8bae1dSRodney W. Grimes { 655867a482dSJohn Dyson vm_offset_t addr, first_addr; 656867a482dSJohn Dyson vm_offset_t end, cend; 657867a482dSJohn Dyson pmap_t pmap; 658867a482dSJohn Dyson vm_map_t map; 65902c04a2fSJohn Dyson char *vec; 660867a482dSJohn Dyson int error; 661867a482dSJohn Dyson int vecindex, lastvecindex; 662867a482dSJohn Dyson register vm_map_entry_t current; 663867a482dSJohn Dyson vm_map_entry_t entry; 664867a482dSJohn Dyson int mincoreinfo; 665df8bae1dSRodney W. Grimes 666867a482dSJohn Dyson /* 667867a482dSJohn Dyson * Make sure that the addresses presented are valid for user 668867a482dSJohn Dyson * mode. 669867a482dSJohn Dyson */ 670867a482dSJohn Dyson first_addr = addr = trunc_page((vm_offset_t) uap->addr); 6719154ee6aSPeter Wemm end = addr + (vm_size_t)round_page(uap->len); 67202c04a2fSJohn Dyson if (VM_MAXUSER_ADDRESS > 0 && end > VM_MAXUSER_ADDRESS) 67302c04a2fSJohn Dyson return (EINVAL); 67402c04a2fSJohn Dyson if (end < addr) 67502c04a2fSJohn Dyson return (EINVAL); 67602c04a2fSJohn Dyson 677867a482dSJohn Dyson /* 678867a482dSJohn Dyson * Address of byte vector 679867a482dSJohn Dyson */ 68002c04a2fSJohn Dyson vec = uap->vec; 681867a482dSJohn Dyson 682867a482dSJohn Dyson map = &p->p_vmspace->vm_map; 683867a482dSJohn Dyson pmap = &p->p_vmspace->vm_pmap; 684867a482dSJohn Dyson 685867a482dSJohn Dyson vm_map_lock(map); 686867a482dSJohn Dyson 687867a482dSJohn Dyson if (!vm_map_lookup_entry(map, addr, &entry)) 688867a482dSJohn Dyson entry = entry->next; 689867a482dSJohn Dyson 690867a482dSJohn Dyson /* 691867a482dSJohn Dyson * Do this on a map entry basis so that if the pages are not 692867a482dSJohn Dyson * in the current processes address space, we can easily look 693867a482dSJohn Dyson * up the pages elsewhere. 694867a482dSJohn Dyson */ 695867a482dSJohn Dyson lastvecindex = -1; 696867a482dSJohn Dyson for(current = entry; 697867a482dSJohn Dyson (current != &map->header) && (current->start < end); 698867a482dSJohn Dyson current = current->next) { 699867a482dSJohn Dyson 700867a482dSJohn Dyson /* 701867a482dSJohn Dyson * ignore submaps (for now) or null objects 702867a482dSJohn Dyson */ 703afa07f7eSJohn Dyson if ((current->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) || 704867a482dSJohn Dyson current->object.vm_object == NULL) 705867a482dSJohn Dyson continue; 706867a482dSJohn Dyson 707867a482dSJohn Dyson /* 708867a482dSJohn Dyson * limit this scan to the current map entry and the 709867a482dSJohn Dyson * limits for the mincore call 710867a482dSJohn Dyson */ 711867a482dSJohn Dyson if (addr < current->start) 712867a482dSJohn Dyson addr = current->start; 713867a482dSJohn Dyson cend = current->end; 714867a482dSJohn Dyson if (cend > end) 715867a482dSJohn Dyson cend = end; 716867a482dSJohn Dyson 717867a482dSJohn Dyson /* 718867a482dSJohn Dyson * scan this entry one page at a time 719867a482dSJohn Dyson */ 720867a482dSJohn Dyson while(addr < cend) { 721867a482dSJohn Dyson /* 722867a482dSJohn Dyson * Check pmap first, it is likely faster, also 723867a482dSJohn Dyson * it can provide info as to whether we are the 724867a482dSJohn Dyson * one referencing or modifying the page. 725867a482dSJohn Dyson */ 726867a482dSJohn Dyson mincoreinfo = pmap_mincore(pmap, addr); 727867a482dSJohn Dyson if (!mincoreinfo) { 728867a482dSJohn Dyson vm_pindex_t pindex; 729867a482dSJohn Dyson vm_ooffset_t offset; 730867a482dSJohn Dyson vm_page_t m; 731867a482dSJohn Dyson /* 732867a482dSJohn Dyson * calculate the page index into the object 733867a482dSJohn Dyson */ 734867a482dSJohn Dyson offset = current->offset + (addr - current->start); 735867a482dSJohn Dyson pindex = OFF_TO_IDX(offset); 736867a482dSJohn Dyson m = vm_page_lookup(current->object.vm_object, 737867a482dSJohn Dyson pindex); 738867a482dSJohn Dyson /* 739867a482dSJohn Dyson * if the page is resident, then gather information about 740867a482dSJohn Dyson * it. 741867a482dSJohn Dyson */ 742867a482dSJohn Dyson if (m) { 743867a482dSJohn Dyson mincoreinfo = MINCORE_INCORE; 744867a482dSJohn Dyson if (m->dirty || 74567bf6868SJohn Dyson pmap_is_modified(VM_PAGE_TO_PHYS(m))) 746867a482dSJohn Dyson mincoreinfo |= MINCORE_MODIFIED_OTHER; 747867a482dSJohn Dyson if ((m->flags & PG_REFERENCED) || 7489b5a5d81SJohn Dyson pmap_ts_referenced(VM_PAGE_TO_PHYS(m))) { 749e69763a3SDoug Rabson vm_page_flag_set(m, PG_REFERENCED); 750867a482dSJohn Dyson mincoreinfo |= MINCORE_REFERENCED_OTHER; 75102c04a2fSJohn Dyson } 752867a482dSJohn Dyson } 7539b5a5d81SJohn Dyson } 754867a482dSJohn Dyson 755867a482dSJohn Dyson /* 756867a482dSJohn Dyson * calculate index into user supplied byte vector 757867a482dSJohn Dyson */ 758867a482dSJohn Dyson vecindex = OFF_TO_IDX(addr - first_addr); 759867a482dSJohn Dyson 760867a482dSJohn Dyson /* 761867a482dSJohn Dyson * If we have skipped map entries, we need to make sure that 762867a482dSJohn Dyson * the byte vector is zeroed for those skipped entries. 763867a482dSJohn Dyson */ 764867a482dSJohn Dyson while((lastvecindex + 1) < vecindex) { 765867a482dSJohn Dyson error = subyte( vec + lastvecindex, 0); 766867a482dSJohn Dyson if (error) { 767867a482dSJohn Dyson vm_map_unlock(map); 768867a482dSJohn Dyson return (EFAULT); 769867a482dSJohn Dyson } 770867a482dSJohn Dyson ++lastvecindex; 771867a482dSJohn Dyson } 772867a482dSJohn Dyson 773867a482dSJohn Dyson /* 774867a482dSJohn Dyson * Pass the page information to the user 775867a482dSJohn Dyson */ 776867a482dSJohn Dyson error = subyte( vec + vecindex, mincoreinfo); 777867a482dSJohn Dyson if (error) { 778867a482dSJohn Dyson vm_map_unlock(map); 779867a482dSJohn Dyson return (EFAULT); 780867a482dSJohn Dyson } 781867a482dSJohn Dyson lastvecindex = vecindex; 78202c04a2fSJohn Dyson addr += PAGE_SIZE; 78302c04a2fSJohn Dyson } 784867a482dSJohn Dyson } 785867a482dSJohn Dyson 786867a482dSJohn Dyson /* 787867a482dSJohn Dyson * Zero the last entries in the byte vector. 788867a482dSJohn Dyson */ 789867a482dSJohn Dyson vecindex = OFF_TO_IDX(end - first_addr); 790867a482dSJohn Dyson while((lastvecindex + 1) < vecindex) { 791867a482dSJohn Dyson error = subyte( vec + lastvecindex, 0); 792867a482dSJohn Dyson if (error) { 793867a482dSJohn Dyson vm_map_unlock(map); 794867a482dSJohn Dyson return (EFAULT); 795867a482dSJohn Dyson } 796867a482dSJohn Dyson ++lastvecindex; 797867a482dSJohn Dyson } 798867a482dSJohn Dyson 799867a482dSJohn Dyson vm_map_unlock(map); 80002c04a2fSJohn Dyson return (0); 801df8bae1dSRodney W. Grimes } 802df8bae1dSRodney W. Grimes 803d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_ 804df8bae1dSRodney W. Grimes struct mlock_args { 805651bb817SAlexander Langer const void *addr; 806df8bae1dSRodney W. Grimes size_t len; 807df8bae1dSRodney W. Grimes }; 808d2d3e875SBruce Evans #endif 809df8bae1dSRodney W. Grimes int 810cb226aaaSPoul-Henning Kamp mlock(p, uap) 811df8bae1dSRodney W. Grimes struct proc *p; 812df8bae1dSRodney W. Grimes struct mlock_args *uap; 813df8bae1dSRodney W. Grimes { 814df8bae1dSRodney W. Grimes vm_offset_t addr; 815dabee6feSPeter Wemm vm_size_t size, pageoff; 816df8bae1dSRodney W. Grimes int error; 817df8bae1dSRodney W. Grimes 818df8bae1dSRodney W. Grimes addr = (vm_offset_t) uap->addr; 8199154ee6aSPeter Wemm size = uap->len; 8209154ee6aSPeter Wemm 821dabee6feSPeter Wemm pageoff = (addr & PAGE_MASK); 822dabee6feSPeter Wemm addr -= pageoff; 823dabee6feSPeter Wemm size += pageoff; 824dabee6feSPeter Wemm size = (vm_size_t) round_page(size); 825dabee6feSPeter Wemm 826dabee6feSPeter Wemm /* disable wrap around */ 8279154ee6aSPeter Wemm if (addr + size < addr) 828df8bae1dSRodney W. Grimes return (EINVAL); 829dabee6feSPeter Wemm 830df8bae1dSRodney W. Grimes if (atop(size) + cnt.v_wire_count > vm_page_max_wired) 831df8bae1dSRodney W. Grimes return (EAGAIN); 8329154ee6aSPeter Wemm 833df8bae1dSRodney W. Grimes #ifdef pmap_wired_count 834df8bae1dSRodney W. Grimes if (size + ptoa(pmap_wired_count(vm_map_pmap(&p->p_vmspace->vm_map))) > 835df8bae1dSRodney W. Grimes p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur) 8364a40e3d4SJohn Dyson return (ENOMEM); 837df8bae1dSRodney W. Grimes #else 83805f0fdd2SPoul-Henning Kamp error = suser(p->p_ucred, &p->p_acflag); 83905f0fdd2SPoul-Henning Kamp if (error) 840df8bae1dSRodney W. Grimes return (error); 841df8bae1dSRodney W. Grimes #endif 842df8bae1dSRodney W. Grimes 8437aaaa4fdSJohn Dyson error = vm_map_user_pageable(&p->p_vmspace->vm_map, addr, addr + size, FALSE); 844df8bae1dSRodney W. Grimes return (error == KERN_SUCCESS ? 0 : ENOMEM); 845df8bae1dSRodney W. Grimes } 846df8bae1dSRodney W. Grimes 847d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_ 8484a40e3d4SJohn Dyson struct mlockall_args { 8494a40e3d4SJohn Dyson int how; 8504a40e3d4SJohn Dyson }; 8514a40e3d4SJohn Dyson #endif 8524a40e3d4SJohn Dyson 8534a40e3d4SJohn Dyson int 854cb226aaaSPoul-Henning Kamp mlockall(p, uap) 8554a40e3d4SJohn Dyson struct proc *p; 8564a40e3d4SJohn Dyson struct mlockall_args *uap; 8574a40e3d4SJohn Dyson { 8584a40e3d4SJohn Dyson return 0; 8594a40e3d4SJohn Dyson } 8604a40e3d4SJohn Dyson 8614a40e3d4SJohn Dyson #ifndef _SYS_SYSPROTO_H_ 8624a40e3d4SJohn Dyson struct mlockall_args { 8634a40e3d4SJohn Dyson int how; 8644a40e3d4SJohn Dyson }; 8654a40e3d4SJohn Dyson #endif 8664a40e3d4SJohn Dyson 8674a40e3d4SJohn Dyson int 868cb226aaaSPoul-Henning Kamp munlockall(p, uap) 8694a40e3d4SJohn Dyson struct proc *p; 8704a40e3d4SJohn Dyson struct munlockall_args *uap; 8714a40e3d4SJohn Dyson { 8724a40e3d4SJohn Dyson return 0; 8734a40e3d4SJohn Dyson } 8744a40e3d4SJohn Dyson 8754a40e3d4SJohn Dyson #ifndef _SYS_SYSPROTO_H_ 876df8bae1dSRodney W. Grimes struct munlock_args { 877651bb817SAlexander Langer const void *addr; 878df8bae1dSRodney W. Grimes size_t len; 879df8bae1dSRodney W. Grimes }; 880d2d3e875SBruce Evans #endif 881df8bae1dSRodney W. Grimes int 882cb226aaaSPoul-Henning Kamp munlock(p, uap) 883df8bae1dSRodney W. Grimes struct proc *p; 884df8bae1dSRodney W. Grimes struct munlock_args *uap; 885df8bae1dSRodney W. Grimes { 886df8bae1dSRodney W. Grimes vm_offset_t addr; 887dabee6feSPeter Wemm vm_size_t size, pageoff; 888df8bae1dSRodney W. Grimes int error; 889df8bae1dSRodney W. Grimes 890df8bae1dSRodney W. Grimes addr = (vm_offset_t) uap->addr; 8919154ee6aSPeter Wemm size = uap->len; 8929154ee6aSPeter Wemm 893dabee6feSPeter Wemm pageoff = (addr & PAGE_MASK); 894dabee6feSPeter Wemm addr -= pageoff; 895dabee6feSPeter Wemm size += pageoff; 896dabee6feSPeter Wemm size = (vm_size_t) round_page(size); 897dabee6feSPeter Wemm 898dabee6feSPeter Wemm /* disable wrap around */ 8999154ee6aSPeter Wemm if (addr + size < addr) 900df8bae1dSRodney W. Grimes return (EINVAL); 901dabee6feSPeter Wemm 902df8bae1dSRodney W. Grimes #ifndef pmap_wired_count 90305f0fdd2SPoul-Henning Kamp error = suser(p->p_ucred, &p->p_acflag); 90405f0fdd2SPoul-Henning Kamp if (error) 905df8bae1dSRodney W. Grimes return (error); 906df8bae1dSRodney W. Grimes #endif 907df8bae1dSRodney W. Grimes 9087aaaa4fdSJohn Dyson error = vm_map_user_pageable(&p->p_vmspace->vm_map, addr, addr + size, TRUE); 909df8bae1dSRodney W. Grimes return (error == KERN_SUCCESS ? 0 : ENOMEM); 910df8bae1dSRodney W. Grimes } 911df8bae1dSRodney W. Grimes 912df8bae1dSRodney W. Grimes /* 913df8bae1dSRodney W. Grimes * Internal version of mmap. 914df8bae1dSRodney W. Grimes * Currently used by mmap, exec, and sys5 shared memory. 915df8bae1dSRodney W. Grimes * Handle is either a vnode pointer or NULL for MAP_ANON. 916df8bae1dSRodney W. Grimes */ 917df8bae1dSRodney W. Grimes int 918b9dcd593SBruce Evans vm_mmap(vm_map_t map, vm_offset_t *addr, vm_size_t size, vm_prot_t prot, 919b9dcd593SBruce Evans vm_prot_t maxprot, int flags, 920651bb817SAlexander Langer void *handle, 921b9dcd593SBruce Evans vm_ooffset_t foff) 922df8bae1dSRodney W. Grimes { 923df8bae1dSRodney W. Grimes boolean_t fitit; 924fcae040bSJohn Dyson vm_object_t object; 925df8bae1dSRodney W. Grimes struct vnode *vp = NULL; 92624a1cce3SDavid Greenman objtype_t type; 927df8bae1dSRodney W. Grimes int rv = KERN_SUCCESS; 928bd7e5f99SJohn Dyson vm_ooffset_t objsize; 929bd7e5f99SJohn Dyson int docow; 93006cb7259SDavid Greenman struct proc *p = curproc; 931df8bae1dSRodney W. Grimes 932df8bae1dSRodney W. Grimes if (size == 0) 933df8bae1dSRodney W. Grimes return (0); 934df8bae1dSRodney W. Grimes 93506cb7259SDavid Greenman objsize = size = round_page(size); 936df8bae1dSRodney W. Grimes 937df8bae1dSRodney W. Grimes /* 938bc9ad247SDavid Greenman * We currently can only deal with page aligned file offsets. 939bc9ad247SDavid Greenman * The check is here rather than in the syscall because the 940bc9ad247SDavid Greenman * kernel calls this function internally for other mmaping 941bc9ad247SDavid Greenman * operations (such as in exec) and non-aligned offsets will 942bc9ad247SDavid Greenman * cause pmap inconsistencies...so we want to be sure to 943bc9ad247SDavid Greenman * disallow this in all cases. 944bc9ad247SDavid Greenman */ 945bc9ad247SDavid Greenman if (foff & PAGE_MASK) 946bc9ad247SDavid Greenman return (EINVAL); 947bc9ad247SDavid Greenman 94806cb7259SDavid Greenman if ((flags & MAP_FIXED) == 0) { 94906cb7259SDavid Greenman fitit = TRUE; 95006cb7259SDavid Greenman *addr = round_page(*addr); 95106cb7259SDavid Greenman } else { 95206cb7259SDavid Greenman if (*addr != trunc_page(*addr)) 95306cb7259SDavid Greenman return (EINVAL); 95406cb7259SDavid Greenman fitit = FALSE; 95506cb7259SDavid Greenman (void) vm_map_remove(map, *addr, *addr + size); 95606cb7259SDavid Greenman } 95706cb7259SDavid Greenman 958bc9ad247SDavid Greenman /* 95924a1cce3SDavid Greenman * Lookup/allocate object. 960df8bae1dSRodney W. Grimes */ 9615f55e841SDavid Greenman if (flags & MAP_ANON) { 962851c12ffSJohn Dyson type = OBJT_DEFAULT; 9635f55e841SDavid Greenman /* 9645f55e841SDavid Greenman * Unnamed anonymous regions always start at 0. 9655f55e841SDavid Greenman */ 96667bf6868SJohn Dyson if (handle == 0) 9675f55e841SDavid Greenman foff = 0; 9685f55e841SDavid Greenman } else { 969df8bae1dSRodney W. Grimes vp = (struct vnode *) handle; 970df8bae1dSRodney W. Grimes if (vp->v_type == VCHR) { 97124a1cce3SDavid Greenman type = OBJT_DEVICE; 972a23d65bfSBruce Evans handle = (void *)(intptr_t)vp->v_rdev; 97306cb7259SDavid Greenman } else { 97406cb7259SDavid Greenman struct vattr vat; 97506cb7259SDavid Greenman int error; 97606cb7259SDavid Greenman 97706cb7259SDavid Greenman error = VOP_GETATTR(vp, &vat, p->p_ucred, p); 97806cb7259SDavid Greenman if (error) 97906cb7259SDavid Greenman return (error); 980bd7e5f99SJohn Dyson objsize = round_page(vat.va_size); 98124a1cce3SDavid Greenman type = OBJT_VNODE; 982df8bae1dSRodney W. Grimes } 98306cb7259SDavid Greenman } 98494328e90SJohn Dyson 98594328e90SJohn Dyson if (handle == NULL) { 98694328e90SJohn Dyson object = NULL; 98794328e90SJohn Dyson } else { 9880a0a85b3SJohn Dyson object = vm_pager_allocate(type, 9896cde7a16SDavid Greenman handle, objsize, prot, foff); 99024a1cce3SDavid Greenman if (object == NULL) 99124a1cce3SDavid Greenman return (type == OBJT_DEVICE ? EINVAL : ENOMEM); 99294328e90SJohn Dyson } 993df8bae1dSRodney W. Grimes 9945850152dSJohn Dyson /* 9958f2ec877SDavid Greenman * Force device mappings to be shared. 9965850152dSJohn Dyson */ 9978f2ec877SDavid Greenman if (type == OBJT_DEVICE) { 9988f2ec877SDavid Greenman flags &= ~(MAP_PRIVATE|MAP_COPY); 9995850152dSJohn Dyson flags |= MAP_SHARED; 10008f2ec877SDavid Greenman } 10015850152dSJohn Dyson 1002bd7e5f99SJohn Dyson docow = 0; 10035850152dSJohn Dyson if ((flags & (MAP_ANON|MAP_SHARED)) == 0) { 1004fcae040bSJohn Dyson docow = MAP_COPY_ON_WRITE | MAP_COPY_NEEDED; 1005bd7e5f99SJohn Dyson } 10065850152dSJohn Dyson 1007d0aea04fSJohn Dyson #if defined(VM_PROT_READ_IS_EXEC) 1008d0aea04fSJohn Dyson if (prot & VM_PROT_READ) 1009d0aea04fSJohn Dyson prot |= VM_PROT_EXECUTE; 1010d0aea04fSJohn Dyson 1011d0aea04fSJohn Dyson if (maxprot & VM_PROT_READ) 1012d0aea04fSJohn Dyson maxprot |= VM_PROT_EXECUTE; 1013d0aea04fSJohn Dyson #endif 1014d0aea04fSJohn Dyson 10150a0a85b3SJohn Dyson if (fitit) { 10160a0a85b3SJohn Dyson *addr = pmap_addr_hint(object, *addr, size); 10170a0a85b3SJohn Dyson } 10180a0a85b3SJohn Dyson 1019bd7e5f99SJohn Dyson rv = vm_map_find(map, object, foff, addr, size, fitit, 1020bd7e5f99SJohn Dyson prot, maxprot, docow); 1021bd7e5f99SJohn Dyson 1022df8bae1dSRodney W. Grimes if (rv != KERN_SUCCESS) { 10237fb0c17eSDavid Greenman /* 102424a1cce3SDavid Greenman * Lose the object reference. Will destroy the 102524a1cce3SDavid Greenman * object if it's an unnamed anonymous mapping 102624a1cce3SDavid Greenman * or named anonymous without other references. 10277fb0c17eSDavid Greenman */ 1028df8bae1dSRodney W. Grimes vm_object_deallocate(object); 1029df8bae1dSRodney W. Grimes goto out; 1030df8bae1dSRodney W. Grimes } 1031e17bed12SJohn Dyson 1032df8bae1dSRodney W. Grimes /* 10337fb0c17eSDavid Greenman * "Pre-fault" resident pages. 10347fb0c17eSDavid Greenman */ 10350a0a85b3SJohn Dyson if ((map->pmap != NULL) && (object != NULL)) { 1036a316d390SJohn Dyson pmap_object_init_pt(map->pmap, *addr, 1037867a482dSJohn Dyson object, (vm_pindex_t) OFF_TO_IDX(foff), size, 1); 1038df8bae1dSRodney W. Grimes } 10397fb0c17eSDavid Greenman 1040df8bae1dSRodney W. Grimes /* 1041df8bae1dSRodney W. Grimes * Shared memory is also shared with children. 1042df8bae1dSRodney W. Grimes */ 10435850152dSJohn Dyson if (flags & (MAP_SHARED|MAP_INHERIT)) { 1044df8bae1dSRodney W. Grimes rv = vm_map_inherit(map, *addr, *addr + size, VM_INHERIT_SHARE); 1045df8bae1dSRodney W. Grimes if (rv != KERN_SUCCESS) { 10467fb0c17eSDavid Greenman (void) vm_map_remove(map, *addr, *addr + size); 1047df8bae1dSRodney W. Grimes goto out; 1048df8bae1dSRodney W. Grimes } 1049df8bae1dSRodney W. Grimes } 1050df8bae1dSRodney W. Grimes out: 1051df8bae1dSRodney W. Grimes switch (rv) { 1052df8bae1dSRodney W. Grimes case KERN_SUCCESS: 1053df8bae1dSRodney W. Grimes return (0); 1054df8bae1dSRodney W. Grimes case KERN_INVALID_ADDRESS: 1055df8bae1dSRodney W. Grimes case KERN_NO_SPACE: 1056df8bae1dSRodney W. Grimes return (ENOMEM); 1057df8bae1dSRodney W. Grimes case KERN_PROTECTION_FAILURE: 1058df8bae1dSRodney W. Grimes return (EACCES); 1059df8bae1dSRodney W. Grimes default: 1060df8bae1dSRodney W. Grimes return (EINVAL); 1061df8bae1dSRodney W. Grimes } 1062df8bae1dSRodney W. Grimes } 1063