1df8bae1dSRodney W. Grimes /* 2df8bae1dSRodney W. Grimes * Copyright (c) 1988 University of Utah. 3df8bae1dSRodney W. Grimes * Copyright (c) 1991, 1993 4df8bae1dSRodney W. Grimes * The Regents of the University of California. All rights reserved. 5df8bae1dSRodney W. Grimes * 6df8bae1dSRodney W. Grimes * This code is derived from software contributed to Berkeley by 7df8bae1dSRodney W. Grimes * the Systems Programming Group of the University of Utah Computer 8df8bae1dSRodney W. Grimes * Science Department. 9df8bae1dSRodney W. Grimes * 10df8bae1dSRodney W. Grimes * Redistribution and use in source and binary forms, with or without 11df8bae1dSRodney W. Grimes * modification, are permitted provided that the following conditions 12df8bae1dSRodney W. Grimes * are met: 13df8bae1dSRodney W. Grimes * 1. Redistributions of source code must retain the above copyright 14df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer. 15df8bae1dSRodney W. Grimes * 2. Redistributions in binary form must reproduce the above copyright 16df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer in the 17df8bae1dSRodney W. Grimes * documentation and/or other materials provided with the distribution. 18df8bae1dSRodney W. Grimes * 3. All advertising materials mentioning features or use of this software 19df8bae1dSRodney W. Grimes * must display the following acknowledgement: 20df8bae1dSRodney W. Grimes * This product includes software developed by the University of 21df8bae1dSRodney W. Grimes * California, Berkeley and its contributors. 22df8bae1dSRodney W. Grimes * 4. Neither the name of the University nor the names of its contributors 23df8bae1dSRodney W. Grimes * may be used to endorse or promote products derived from this software 24df8bae1dSRodney W. Grimes * without specific prior written permission. 25df8bae1dSRodney W. Grimes * 26df8bae1dSRodney W. Grimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27df8bae1dSRodney W. Grimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28df8bae1dSRodney W. Grimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29df8bae1dSRodney W. Grimes * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30df8bae1dSRodney W. Grimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31df8bae1dSRodney W. Grimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32df8bae1dSRodney W. Grimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33df8bae1dSRodney W. Grimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34df8bae1dSRodney W. Grimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35df8bae1dSRodney W. Grimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36df8bae1dSRodney W. Grimes * SUCH DAMAGE. 37df8bae1dSRodney W. Grimes * 38df8bae1dSRodney W. Grimes * from: Utah $Hdr: vm_mmap.c 1.6 91/10/21$ 39df8bae1dSRodney W. Grimes * 40df8bae1dSRodney W. Grimes * @(#)vm_mmap.c 8.4 (Berkeley) 1/12/94 41711458e3SDoug Rabson * $Id: vm_mmap.c,v 1.79 1998/06/21 18:02:47 bde Exp $ 42df8bae1dSRodney W. Grimes */ 43df8bae1dSRodney W. Grimes 44df8bae1dSRodney W. Grimes /* 45df8bae1dSRodney W. Grimes * Mapped file (mmap) interface to VM 46df8bae1dSRodney W. Grimes */ 47df8bae1dSRodney W. Grimes 485591b823SEivind Eklund #include "opt_compat.h" 49e9822d92SJoerg Wunsch #include "opt_rlimit.h" 50e9822d92SJoerg Wunsch 51df8bae1dSRodney W. Grimes #include <sys/param.h> 52df8bae1dSRodney W. Grimes #include <sys/systm.h> 53d2d3e875SBruce Evans #include <sys/sysproto.h> 54df8bae1dSRodney W. Grimes #include <sys/filedesc.h> 55df8bae1dSRodney W. Grimes #include <sys/proc.h> 56df8bae1dSRodney W. Grimes #include <sys/vnode.h> 573ac4d1efSBruce Evans #include <sys/fcntl.h> 58df8bae1dSRodney W. Grimes #include <sys/file.h> 59df8bae1dSRodney W. Grimes #include <sys/mman.h> 60df8bae1dSRodney W. Grimes #include <sys/conf.h> 614183b6b6SPeter Wemm #include <sys/stat.h> 62efeaf95aSDavid Greenman #include <sys/vmmeter.h> 63df8bae1dSRodney W. Grimes 64df8bae1dSRodney W. Grimes #include <miscfs/specfs/specdev.h> 65df8bae1dSRodney W. Grimes 66df8bae1dSRodney W. Grimes #include <vm/vm.h> 67efeaf95aSDavid Greenman #include <vm/vm_param.h> 68efeaf95aSDavid Greenman #include <vm/vm_prot.h> 69efeaf95aSDavid Greenman #include <vm/vm_inherit.h> 70996c772fSJohn Dyson #include <sys/lock.h> 71efeaf95aSDavid Greenman #include <vm/pmap.h> 72efeaf95aSDavid Greenman #include <vm/vm_map.h> 73efeaf95aSDavid Greenman #include <vm/vm_object.h> 74df8bae1dSRodney W. Grimes #include <vm/vm_pager.h> 75b5e8ce9fSBruce Evans #include <vm/vm_pageout.h> 76efeaf95aSDavid Greenman #include <vm/vm_extern.h> 77867a482dSJohn Dyson #include <vm/vm_page.h> 78df8bae1dSRodney W. Grimes 79d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_ 80df8bae1dSRodney W. Grimes struct sbrk_args { 81df8bae1dSRodney W. Grimes int incr; 82df8bae1dSRodney W. Grimes }; 83d2d3e875SBruce Evans #endif 840d94caffSDavid Greenman 85df8bae1dSRodney W. Grimes /* ARGSUSED */ 86df8bae1dSRodney W. Grimes int 87cb226aaaSPoul-Henning Kamp sbrk(p, uap) 88df8bae1dSRodney W. Grimes struct proc *p; 89df8bae1dSRodney W. Grimes struct sbrk_args *uap; 90df8bae1dSRodney W. Grimes { 91df8bae1dSRodney W. Grimes 92df8bae1dSRodney W. Grimes /* Not yet implemented */ 93df8bae1dSRodney W. Grimes return (EOPNOTSUPP); 94df8bae1dSRodney W. Grimes } 95df8bae1dSRodney W. Grimes 96d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_ 97df8bae1dSRodney W. Grimes struct sstk_args { 98df8bae1dSRodney W. Grimes int incr; 99df8bae1dSRodney W. Grimes }; 100d2d3e875SBruce Evans #endif 1010d94caffSDavid Greenman 102df8bae1dSRodney W. Grimes /* ARGSUSED */ 103df8bae1dSRodney W. Grimes int 104cb226aaaSPoul-Henning Kamp sstk(p, uap) 105df8bae1dSRodney W. Grimes struct proc *p; 106df8bae1dSRodney W. Grimes struct sstk_args *uap; 107df8bae1dSRodney W. Grimes { 108df8bae1dSRodney W. Grimes 109df8bae1dSRodney W. Grimes /* Not yet implemented */ 110df8bae1dSRodney W. Grimes return (EOPNOTSUPP); 111df8bae1dSRodney W. Grimes } 112df8bae1dSRodney W. Grimes 113df8bae1dSRodney W. Grimes #if defined(COMPAT_43) || defined(COMPAT_SUNOS) 114d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_ 115df8bae1dSRodney W. Grimes struct getpagesize_args { 116df8bae1dSRodney W. Grimes int dummy; 117df8bae1dSRodney W. Grimes }; 118d2d3e875SBruce Evans #endif 1190d94caffSDavid Greenman 120df8bae1dSRodney W. Grimes /* ARGSUSED */ 121df8bae1dSRodney W. Grimes int 122cb226aaaSPoul-Henning Kamp ogetpagesize(p, uap) 123df8bae1dSRodney W. Grimes struct proc *p; 124df8bae1dSRodney W. Grimes struct getpagesize_args *uap; 125df8bae1dSRodney W. Grimes { 126df8bae1dSRodney W. Grimes 127cb226aaaSPoul-Henning Kamp p->p_retval[0] = PAGE_SIZE; 128df8bae1dSRodney W. Grimes return (0); 129df8bae1dSRodney W. Grimes } 130df8bae1dSRodney W. Grimes #endif /* COMPAT_43 || COMPAT_SUNOS */ 131df8bae1dSRodney W. Grimes 13254f42e4bSPeter Wemm 13354f42e4bSPeter Wemm /* 13454f42e4bSPeter Wemm * Memory Map (mmap) system call. Note that the file offset 13554f42e4bSPeter Wemm * and address are allowed to be NOT page aligned, though if 13654f42e4bSPeter Wemm * the MAP_FIXED flag it set, both must have the same remainder 13754f42e4bSPeter Wemm * modulo the PAGE_SIZE (POSIX 1003.1b). If the address is not 13854f42e4bSPeter Wemm * page-aligned, the actual mapping starts at trunc_page(addr) 13954f42e4bSPeter Wemm * and the return value is adjusted up by the page offset. 14054f42e4bSPeter Wemm */ 141d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_ 142df8bae1dSRodney W. Grimes struct mmap_args { 143651bb817SAlexander Langer void *addr; 144df8bae1dSRodney W. Grimes size_t len; 145df8bae1dSRodney W. Grimes int prot; 146df8bae1dSRodney W. Grimes int flags; 147df8bae1dSRodney W. Grimes int fd; 148df8bae1dSRodney W. Grimes long pad; 149df8bae1dSRodney W. Grimes off_t pos; 150df8bae1dSRodney W. Grimes }; 151d2d3e875SBruce Evans #endif 152df8bae1dSRodney W. Grimes 153df8bae1dSRodney W. Grimes int 154cb226aaaSPoul-Henning Kamp mmap(p, uap) 155df8bae1dSRodney W. Grimes struct proc *p; 156df8bae1dSRodney W. Grimes register struct mmap_args *uap; 157df8bae1dSRodney W. Grimes { 158df8bae1dSRodney W. Grimes register struct filedesc *fdp = p->p_fd; 159df8bae1dSRodney W. Grimes register struct file *fp; 160df8bae1dSRodney W. Grimes struct vnode *vp; 161df8bae1dSRodney W. Grimes vm_offset_t addr; 1629154ee6aSPeter Wemm vm_size_t size, pageoff; 163df8bae1dSRodney W. Grimes vm_prot_t prot, maxprot; 164651bb817SAlexander Langer void *handle; 165df8bae1dSRodney W. Grimes int flags, error; 166c8bdd56bSGuido van Rooij int disablexworkaround; 16754f42e4bSPeter Wemm off_t pos; 168df8bae1dSRodney W. Grimes 16954f42e4bSPeter Wemm addr = (vm_offset_t) uap->addr; 17054f42e4bSPeter Wemm size = uap->len; 171df8bae1dSRodney W. Grimes prot = uap->prot & VM_PROT_ALL; 172df8bae1dSRodney W. Grimes flags = uap->flags; 17354f42e4bSPeter Wemm pos = uap->pos; 17454f42e4bSPeter Wemm 17554f42e4bSPeter Wemm /* make sure mapping fits into numeric range etc */ 17654f42e4bSPeter Wemm if ((pos + size > (vm_offset_t)-PAGE_SIZE) || 17754f42e4bSPeter Wemm (ssize_t) uap->len < 0 || 17854f42e4bSPeter Wemm ((flags & MAP_ANON) && uap->fd != -1)) 179df8bae1dSRodney W. Grimes return (EINVAL); 1809154ee6aSPeter Wemm 1819154ee6aSPeter Wemm /* 18254f42e4bSPeter Wemm * Align the file position to a page boundary, 18354f42e4bSPeter Wemm * and save its page offset component. 1849154ee6aSPeter Wemm */ 18554f42e4bSPeter Wemm pageoff = (pos & PAGE_MASK); 18654f42e4bSPeter Wemm pos -= pageoff; 18754f42e4bSPeter Wemm 18854f42e4bSPeter Wemm /* Adjust size for rounding (on both ends). */ 18954f42e4bSPeter Wemm size += pageoff; /* low end... */ 19054f42e4bSPeter Wemm size = (vm_size_t) round_page(size); /* hi end */ 1919154ee6aSPeter Wemm 192df8bae1dSRodney W. Grimes /* 1930d94caffSDavid Greenman * Check for illegal addresses. Watch out for address wrap... Note 1940d94caffSDavid Greenman * that VM_*_ADDRESS are not constants due to casts (argh). 195df8bae1dSRodney W. Grimes */ 196df8bae1dSRodney W. Grimes if (flags & MAP_FIXED) { 19754f42e4bSPeter Wemm /* 19854f42e4bSPeter Wemm * The specified address must have the same remainder 19954f42e4bSPeter Wemm * as the file offset taken modulo PAGE_SIZE, so it 20054f42e4bSPeter Wemm * should be aligned after adjustment by pageoff. 20154f42e4bSPeter Wemm */ 20254f42e4bSPeter Wemm addr -= pageoff; 20354f42e4bSPeter Wemm if (addr & PAGE_MASK) 20454f42e4bSPeter Wemm return (EINVAL); 20554f42e4bSPeter Wemm /* Address range must be all in user VM space. */ 206bbc0ec52SDavid Greenman if (VM_MAXUSER_ADDRESS > 0 && addr + size > VM_MAXUSER_ADDRESS) 207df8bae1dSRodney W. Grimes return (EINVAL); 20826f9a767SRodney W. Grimes #ifndef i386 209df8bae1dSRodney W. Grimes if (VM_MIN_ADDRESS > 0 && addr < VM_MIN_ADDRESS) 210df8bae1dSRodney W. Grimes return (EINVAL); 21126f9a767SRodney W. Grimes #endif 212bbc0ec52SDavid Greenman if (addr + size < addr) 213df8bae1dSRodney W. Grimes return (EINVAL); 214df8bae1dSRodney W. Grimes } 215df8bae1dSRodney W. Grimes /* 21654f42e4bSPeter Wemm * XXX for non-fixed mappings where no hint is provided or 21754f42e4bSPeter Wemm * the hint would fall in the potential heap space, 21854f42e4bSPeter Wemm * place it after the end of the largest possible heap. 219df8bae1dSRodney W. Grimes * 22054f42e4bSPeter Wemm * There should really be a pmap call to determine a reasonable 22154f42e4bSPeter Wemm * location. 222df8bae1dSRodney W. Grimes */ 22354f42e4bSPeter Wemm else if (addr < round_page(p->p_vmspace->vm_daddr + MAXDSIZ)) 224df8bae1dSRodney W. Grimes addr = round_page(p->p_vmspace->vm_daddr + MAXDSIZ); 22554f42e4bSPeter Wemm 226df8bae1dSRodney W. Grimes if (flags & MAP_ANON) { 227df8bae1dSRodney W. Grimes /* 228df8bae1dSRodney W. Grimes * Mapping blank space is trivial. 229df8bae1dSRodney W. Grimes */ 230df8bae1dSRodney W. Grimes handle = NULL; 231df8bae1dSRodney W. Grimes maxprot = VM_PROT_ALL; 23254f42e4bSPeter Wemm pos = 0; 233df8bae1dSRodney W. Grimes } else { 234df8bae1dSRodney W. Grimes /* 2350d94caffSDavid Greenman * Mapping file, get fp for validation. Obtain vnode and make 2360d94caffSDavid Greenman * sure it is of appropriate type. 237df8bae1dSRodney W. Grimes */ 238df8bae1dSRodney W. Grimes if (((unsigned) uap->fd) >= fdp->fd_nfiles || 239df8bae1dSRodney W. Grimes (fp = fdp->fd_ofiles[uap->fd]) == NULL) 240df8bae1dSRodney W. Grimes return (EBADF); 241df8bae1dSRodney W. Grimes if (fp->f_type != DTYPE_VNODE) 242df8bae1dSRodney W. Grimes return (EINVAL); 243df8bae1dSRodney W. Grimes vp = (struct vnode *) fp->f_data; 244df8bae1dSRodney W. Grimes if (vp->v_type != VREG && vp->v_type != VCHR) 245df8bae1dSRodney W. Grimes return (EINVAL); 246df8bae1dSRodney W. Grimes /* 2470d94caffSDavid Greenman * XXX hack to handle use of /dev/zero to map anon memory (ala 2480d94caffSDavid Greenman * SunOS). 249df8bae1dSRodney W. Grimes */ 250df8bae1dSRodney W. Grimes if (vp->v_type == VCHR && iszerodev(vp->v_rdev)) { 251df8bae1dSRodney W. Grimes handle = NULL; 252df8bae1dSRodney W. Grimes maxprot = VM_PROT_ALL; 253df8bae1dSRodney W. Grimes flags |= MAP_ANON; 25454f42e4bSPeter Wemm pos = 0; 255df8bae1dSRodney W. Grimes } else { 256df8bae1dSRodney W. Grimes /* 257c8bdd56bSGuido van Rooij * cdevs does not provide private mappings of any kind. 258c8bdd56bSGuido van Rooij */ 259c8bdd56bSGuido van Rooij /* 260c8bdd56bSGuido van Rooij * However, for XIG X server to continue to work, 261c8bdd56bSGuido van Rooij * we should allow the superuser to do it anyway. 262c8bdd56bSGuido van Rooij * We only allow it at securelevel < 1. 263c8bdd56bSGuido van Rooij * (Because the XIG X server writes directly to video 264c8bdd56bSGuido van Rooij * memory via /dev/mem, it should never work at any 265c8bdd56bSGuido van Rooij * other securelevel. 266c8bdd56bSGuido van Rooij * XXX this will have to go 267c8bdd56bSGuido van Rooij */ 268c8bdd56bSGuido van Rooij if (securelevel >= 1) 269c8bdd56bSGuido van Rooij disablexworkaround = 1; 270c8bdd56bSGuido van Rooij else 271c8bdd56bSGuido van Rooij disablexworkaround = suser(p->p_ucred, 272c8bdd56bSGuido van Rooij &p->p_acflag); 273c8bdd56bSGuido van Rooij if (vp->v_type == VCHR && disablexworkaround && 274c8bdd56bSGuido van Rooij (flags & (MAP_PRIVATE|MAP_COPY))) 275c8bdd56bSGuido van Rooij return (EINVAL); 276c8bdd56bSGuido van Rooij /* 277df8bae1dSRodney W. Grimes * Ensure that file and memory protections are 278df8bae1dSRodney W. Grimes * compatible. Note that we only worry about 279df8bae1dSRodney W. Grimes * writability if mapping is shared; in this case, 280df8bae1dSRodney W. Grimes * current and max prot are dictated by the open file. 281df8bae1dSRodney W. Grimes * XXX use the vnode instead? Problem is: what 2820d94caffSDavid Greenman * credentials do we use for determination? What if 2830d94caffSDavid Greenman * proc does a setuid? 284df8bae1dSRodney W. Grimes */ 285df8bae1dSRodney W. Grimes maxprot = VM_PROT_EXECUTE; /* ??? */ 286df8bae1dSRodney W. Grimes if (fp->f_flag & FREAD) 287df8bae1dSRodney W. Grimes maxprot |= VM_PROT_READ; 288df8bae1dSRodney W. Grimes else if (prot & PROT_READ) 289df8bae1dSRodney W. Grimes return (EACCES); 290c8bdd56bSGuido van Rooij /* 291c8bdd56bSGuido van Rooij * If we are sharing potential changes (either via 292c8bdd56bSGuido van Rooij * MAP_SHARED or via the implicit sharing of character 293c8bdd56bSGuido van Rooij * device mappings), and we are trying to get write 294c8bdd56bSGuido van Rooij * permission although we opened it without asking 295c8bdd56bSGuido van Rooij * for it, bail out. Check for superuser, only if 296c8bdd56bSGuido van Rooij * we're at securelevel < 1, to allow the XIG X server 297c8bdd56bSGuido van Rooij * to continue to work. 298c8bdd56bSGuido van Rooij */ 29905feb99fSGuido van Rooij 30005feb99fSGuido van Rooij if ((flags & MAP_SHARED) != 0 || 30105feb99fSGuido van Rooij (vp->v_type == VCHR && disablexworkaround)) { 30205feb99fSGuido van Rooij if ((fp->f_flag & FWRITE) != 0) { 3034183b6b6SPeter Wemm struct vattr va; 30405feb99fSGuido van Rooij if ((error = 30505feb99fSGuido van Rooij VOP_GETATTR(vp, &va, 30605feb99fSGuido van Rooij p->p_ucred, p))) 30705feb99fSGuido van Rooij return (error); 30805feb99fSGuido van Rooij if ((va.va_flags & 30905feb99fSGuido van Rooij (IMMUTABLE|APPEND)) == 0) 310df8bae1dSRodney W. Grimes maxprot |= VM_PROT_WRITE; 31105feb99fSGuido van Rooij else if (prot & PROT_WRITE) 31205feb99fSGuido van Rooij return (EPERM); 31305feb99fSGuido van Rooij } else if ((prot & PROT_WRITE) != 0) 31405feb99fSGuido van Rooij return (EACCES); 31505feb99fSGuido van Rooij } else 31605feb99fSGuido van Rooij maxprot |= VM_PROT_WRITE; 31705feb99fSGuido van Rooij 318651bb817SAlexander Langer handle = (void *)vp; 319df8bae1dSRodney W. Grimes } 320df8bae1dSRodney W. Grimes } 321df8bae1dSRodney W. Grimes error = vm_mmap(&p->p_vmspace->vm_map, &addr, size, prot, maxprot, 32254f42e4bSPeter Wemm flags, handle, pos); 323df8bae1dSRodney W. Grimes if (error == 0) 324711458e3SDoug Rabson p->p_retval[0] = (register_t) (addr + pageoff); 325df8bae1dSRodney W. Grimes return (error); 326df8bae1dSRodney W. Grimes } 327df8bae1dSRodney W. Grimes 32805f0fdd2SPoul-Henning Kamp #ifdef COMPAT_43 329d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_ 33005f0fdd2SPoul-Henning Kamp struct ommap_args { 33105f0fdd2SPoul-Henning Kamp caddr_t addr; 33205f0fdd2SPoul-Henning Kamp int len; 33305f0fdd2SPoul-Henning Kamp int prot; 33405f0fdd2SPoul-Henning Kamp int flags; 33505f0fdd2SPoul-Henning Kamp int fd; 33605f0fdd2SPoul-Henning Kamp long pos; 33705f0fdd2SPoul-Henning Kamp }; 338d2d3e875SBruce Evans #endif 33905f0fdd2SPoul-Henning Kamp int 340cb226aaaSPoul-Henning Kamp ommap(p, uap) 34105f0fdd2SPoul-Henning Kamp struct proc *p; 34205f0fdd2SPoul-Henning Kamp register struct ommap_args *uap; 34305f0fdd2SPoul-Henning Kamp { 34405f0fdd2SPoul-Henning Kamp struct mmap_args nargs; 34505f0fdd2SPoul-Henning Kamp static const char cvtbsdprot[8] = { 34605f0fdd2SPoul-Henning Kamp 0, 34705f0fdd2SPoul-Henning Kamp PROT_EXEC, 34805f0fdd2SPoul-Henning Kamp PROT_WRITE, 34905f0fdd2SPoul-Henning Kamp PROT_EXEC | PROT_WRITE, 35005f0fdd2SPoul-Henning Kamp PROT_READ, 35105f0fdd2SPoul-Henning Kamp PROT_EXEC | PROT_READ, 35205f0fdd2SPoul-Henning Kamp PROT_WRITE | PROT_READ, 35305f0fdd2SPoul-Henning Kamp PROT_EXEC | PROT_WRITE | PROT_READ, 35405f0fdd2SPoul-Henning Kamp }; 3550d94caffSDavid Greenman 35605f0fdd2SPoul-Henning Kamp #define OMAP_ANON 0x0002 35705f0fdd2SPoul-Henning Kamp #define OMAP_COPY 0x0020 35805f0fdd2SPoul-Henning Kamp #define OMAP_SHARED 0x0010 35905f0fdd2SPoul-Henning Kamp #define OMAP_FIXED 0x0100 36005f0fdd2SPoul-Henning Kamp #define OMAP_INHERIT 0x0800 36105f0fdd2SPoul-Henning Kamp 36205f0fdd2SPoul-Henning Kamp nargs.addr = uap->addr; 36305f0fdd2SPoul-Henning Kamp nargs.len = uap->len; 36405f0fdd2SPoul-Henning Kamp nargs.prot = cvtbsdprot[uap->prot & 0x7]; 36505f0fdd2SPoul-Henning Kamp nargs.flags = 0; 36605f0fdd2SPoul-Henning Kamp if (uap->flags & OMAP_ANON) 36705f0fdd2SPoul-Henning Kamp nargs.flags |= MAP_ANON; 36805f0fdd2SPoul-Henning Kamp if (uap->flags & OMAP_COPY) 36905f0fdd2SPoul-Henning Kamp nargs.flags |= MAP_COPY; 37005f0fdd2SPoul-Henning Kamp if (uap->flags & OMAP_SHARED) 37105f0fdd2SPoul-Henning Kamp nargs.flags |= MAP_SHARED; 37205f0fdd2SPoul-Henning Kamp else 37305f0fdd2SPoul-Henning Kamp nargs.flags |= MAP_PRIVATE; 37405f0fdd2SPoul-Henning Kamp if (uap->flags & OMAP_FIXED) 37505f0fdd2SPoul-Henning Kamp nargs.flags |= MAP_FIXED; 37605f0fdd2SPoul-Henning Kamp if (uap->flags & OMAP_INHERIT) 37705f0fdd2SPoul-Henning Kamp nargs.flags |= MAP_INHERIT; 37805f0fdd2SPoul-Henning Kamp nargs.fd = uap->fd; 37905f0fdd2SPoul-Henning Kamp nargs.pos = uap->pos; 380cb226aaaSPoul-Henning Kamp return (mmap(p, &nargs)); 38105f0fdd2SPoul-Henning Kamp } 38205f0fdd2SPoul-Henning Kamp #endif /* COMPAT_43 */ 38305f0fdd2SPoul-Henning Kamp 38405f0fdd2SPoul-Henning Kamp 385d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_ 386df8bae1dSRodney W. Grimes struct msync_args { 387651bb817SAlexander Langer void *addr; 388df8bae1dSRodney W. Grimes int len; 389e6c6af11SDavid Greenman int flags; 390df8bae1dSRodney W. Grimes }; 391d2d3e875SBruce Evans #endif 392df8bae1dSRodney W. Grimes int 393cb226aaaSPoul-Henning Kamp msync(p, uap) 394df8bae1dSRodney W. Grimes struct proc *p; 395df8bae1dSRodney W. Grimes struct msync_args *uap; 396df8bae1dSRodney W. Grimes { 397df8bae1dSRodney W. Grimes vm_offset_t addr; 398dabee6feSPeter Wemm vm_size_t size, pageoff; 399e6c6af11SDavid Greenman int flags; 400df8bae1dSRodney W. Grimes vm_map_t map; 401df8bae1dSRodney W. Grimes int rv; 402df8bae1dSRodney W. Grimes 403df8bae1dSRodney W. Grimes addr = (vm_offset_t) uap->addr; 4049154ee6aSPeter Wemm size = uap->len; 405e6c6af11SDavid Greenman flags = uap->flags; 406e6c6af11SDavid Greenman 407dabee6feSPeter Wemm pageoff = (addr & PAGE_MASK); 408dabee6feSPeter Wemm addr -= pageoff; 409dabee6feSPeter Wemm size += pageoff; 410dabee6feSPeter Wemm size = (vm_size_t) round_page(size); 4119154ee6aSPeter Wemm if (addr + size < addr) 412dabee6feSPeter Wemm return(EINVAL); 413dabee6feSPeter Wemm 414dabee6feSPeter Wemm if ((flags & (MS_ASYNC|MS_INVALIDATE)) == (MS_ASYNC|MS_INVALIDATE)) 4151e62bc63SDavid Greenman return (EINVAL); 4161e62bc63SDavid Greenman 4179154ee6aSPeter Wemm map = &p->p_vmspace->vm_map; 4189154ee6aSPeter Wemm 419df8bae1dSRodney W. Grimes /* 420df8bae1dSRodney W. Grimes * XXX Gak! If size is zero we are supposed to sync "all modified 4210d94caffSDavid Greenman * pages with the region containing addr". Unfortunately, we don't 4220d94caffSDavid Greenman * really keep track of individual mmaps so we approximate by flushing 4230d94caffSDavid Greenman * the range of the map entry containing addr. This can be incorrect 4240d94caffSDavid Greenman * if the region splits or is coalesced with a neighbor. 425df8bae1dSRodney W. Grimes */ 426df8bae1dSRodney W. Grimes if (size == 0) { 427df8bae1dSRodney W. Grimes vm_map_entry_t entry; 428df8bae1dSRodney W. Grimes 429df8bae1dSRodney W. Grimes vm_map_lock_read(map); 430df8bae1dSRodney W. Grimes rv = vm_map_lookup_entry(map, addr, &entry); 431df8bae1dSRodney W. Grimes vm_map_unlock_read(map); 432fbcfcdf7SDavid Greenman if (rv == FALSE) 433df8bae1dSRodney W. Grimes return (EINVAL); 434df8bae1dSRodney W. Grimes addr = entry->start; 435df8bae1dSRodney W. Grimes size = entry->end - entry->start; 436df8bae1dSRodney W. Grimes } 437e6c6af11SDavid Greenman 438df8bae1dSRodney W. Grimes /* 439df8bae1dSRodney W. Grimes * Clean the pages and interpret the return value. 440df8bae1dSRodney W. Grimes */ 4416c534ad8SDavid Greenman rv = vm_map_clean(map, addr, addr + size, (flags & MS_ASYNC) == 0, 442e6c6af11SDavid Greenman (flags & MS_INVALIDATE) != 0); 443e6c6af11SDavid Greenman 444df8bae1dSRodney W. Grimes switch (rv) { 445df8bae1dSRodney W. Grimes case KERN_SUCCESS: 446df8bae1dSRodney W. Grimes break; 447df8bae1dSRodney W. Grimes case KERN_INVALID_ADDRESS: 448df8bae1dSRodney W. Grimes return (EINVAL); /* Sun returns ENOMEM? */ 449df8bae1dSRodney W. Grimes case KERN_FAILURE: 450df8bae1dSRodney W. Grimes return (EIO); 451df8bae1dSRodney W. Grimes default: 452df8bae1dSRodney W. Grimes return (EINVAL); 453df8bae1dSRodney W. Grimes } 454e6c6af11SDavid Greenman 455df8bae1dSRodney W. Grimes return (0); 456df8bae1dSRodney W. Grimes } 457df8bae1dSRodney W. Grimes 458d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_ 459df8bae1dSRodney W. Grimes struct munmap_args { 460651bb817SAlexander Langer void *addr; 4619154ee6aSPeter Wemm size_t len; 462df8bae1dSRodney W. Grimes }; 463d2d3e875SBruce Evans #endif 464df8bae1dSRodney W. Grimes int 465cb226aaaSPoul-Henning Kamp munmap(p, uap) 466df8bae1dSRodney W. Grimes register struct proc *p; 467df8bae1dSRodney W. Grimes register struct munmap_args *uap; 468df8bae1dSRodney W. Grimes { 469df8bae1dSRodney W. Grimes vm_offset_t addr; 470dabee6feSPeter Wemm vm_size_t size, pageoff; 471df8bae1dSRodney W. Grimes vm_map_t map; 472df8bae1dSRodney W. Grimes 473df8bae1dSRodney W. Grimes addr = (vm_offset_t) uap->addr; 4749154ee6aSPeter Wemm size = uap->len; 475dabee6feSPeter Wemm 476dabee6feSPeter Wemm pageoff = (addr & PAGE_MASK); 477dabee6feSPeter Wemm addr -= pageoff; 478dabee6feSPeter Wemm size += pageoff; 479dabee6feSPeter Wemm size = (vm_size_t) round_page(size); 4809154ee6aSPeter Wemm if (addr + size < addr) 481df8bae1dSRodney W. Grimes return(EINVAL); 4829154ee6aSPeter Wemm 483df8bae1dSRodney W. Grimes if (size == 0) 484df8bae1dSRodney W. Grimes return (0); 485dabee6feSPeter Wemm 486df8bae1dSRodney W. Grimes /* 4870d94caffSDavid Greenman * Check for illegal addresses. Watch out for address wrap... Note 4880d94caffSDavid Greenman * that VM_*_ADDRESS are not constants due to casts (argh). 489df8bae1dSRodney W. Grimes */ 490bbc0ec52SDavid Greenman if (VM_MAXUSER_ADDRESS > 0 && addr + size > VM_MAXUSER_ADDRESS) 491df8bae1dSRodney W. Grimes return (EINVAL); 49226f9a767SRodney W. Grimes #ifndef i386 493df8bae1dSRodney W. Grimes if (VM_MIN_ADDRESS > 0 && addr < VM_MIN_ADDRESS) 494df8bae1dSRodney W. Grimes return (EINVAL); 49526f9a767SRodney W. Grimes #endif 496df8bae1dSRodney W. Grimes map = &p->p_vmspace->vm_map; 497df8bae1dSRodney W. Grimes /* 498df8bae1dSRodney W. Grimes * Make sure entire range is allocated. 499df8bae1dSRodney W. Grimes */ 500df8bae1dSRodney W. Grimes if (!vm_map_check_protection(map, addr, addr + size, VM_PROT_NONE)) 501df8bae1dSRodney W. Grimes return (EINVAL); 502df8bae1dSRodney W. Grimes /* returns nothing but KERN_SUCCESS anyway */ 503df8bae1dSRodney W. Grimes (void) vm_map_remove(map, addr, addr + size); 504df8bae1dSRodney W. Grimes return (0); 505df8bae1dSRodney W. Grimes } 506df8bae1dSRodney W. Grimes 507df8bae1dSRodney W. Grimes void 50890324b07SDavid Greenman munmapfd(p, fd) 50990324b07SDavid Greenman struct proc *p; 510df8bae1dSRodney W. Grimes int fd; 511df8bae1dSRodney W. Grimes { 512df8bae1dSRodney W. Grimes /* 513c4ed5a07SDavid Greenman * XXX should unmap any regions mapped to this file 514df8bae1dSRodney W. Grimes */ 51590324b07SDavid Greenman p->p_fd->fd_ofileflags[fd] &= ~UF_MAPPED; 516df8bae1dSRodney W. Grimes } 517df8bae1dSRodney W. Grimes 518d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_ 519df8bae1dSRodney W. Grimes struct mprotect_args { 520651bb817SAlexander Langer const void *addr; 5219154ee6aSPeter Wemm size_t len; 522df8bae1dSRodney W. Grimes int prot; 523df8bae1dSRodney W. Grimes }; 524d2d3e875SBruce Evans #endif 525df8bae1dSRodney W. Grimes int 526cb226aaaSPoul-Henning Kamp mprotect(p, uap) 527df8bae1dSRodney W. Grimes struct proc *p; 528df8bae1dSRodney W. Grimes struct mprotect_args *uap; 529df8bae1dSRodney W. Grimes { 530df8bae1dSRodney W. Grimes vm_offset_t addr; 531dabee6feSPeter Wemm vm_size_t size, pageoff; 532df8bae1dSRodney W. Grimes register vm_prot_t prot; 533df8bae1dSRodney W. Grimes 534df8bae1dSRodney W. Grimes addr = (vm_offset_t) uap->addr; 5359154ee6aSPeter Wemm size = uap->len; 536df8bae1dSRodney W. Grimes prot = uap->prot & VM_PROT_ALL; 537d0aea04fSJohn Dyson #if defined(VM_PROT_READ_IS_EXEC) 538d0aea04fSJohn Dyson if (prot & VM_PROT_READ) 539d0aea04fSJohn Dyson prot |= VM_PROT_EXECUTE; 540d0aea04fSJohn Dyson #endif 541df8bae1dSRodney W. Grimes 542dabee6feSPeter Wemm pageoff = (addr & PAGE_MASK); 543dabee6feSPeter Wemm addr -= pageoff; 544dabee6feSPeter Wemm size += pageoff; 545dabee6feSPeter Wemm size = (vm_size_t) round_page(size); 5469154ee6aSPeter Wemm if (addr + size < addr) 547dabee6feSPeter Wemm return(EINVAL); 548dabee6feSPeter Wemm 549df8bae1dSRodney W. Grimes switch (vm_map_protect(&p->p_vmspace->vm_map, addr, addr + size, prot, 550df8bae1dSRodney W. Grimes FALSE)) { 551df8bae1dSRodney W. Grimes case KERN_SUCCESS: 552df8bae1dSRodney W. Grimes return (0); 553df8bae1dSRodney W. Grimes case KERN_PROTECTION_FAILURE: 554df8bae1dSRodney W. Grimes return (EACCES); 555df8bae1dSRodney W. Grimes } 556df8bae1dSRodney W. Grimes return (EINVAL); 557df8bae1dSRodney W. Grimes } 558df8bae1dSRodney W. Grimes 559d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_ 560dabee6feSPeter Wemm struct minherit_args { 561651bb817SAlexander Langer void *addr; 5629154ee6aSPeter Wemm size_t len; 563dabee6feSPeter Wemm int inherit; 564dabee6feSPeter Wemm }; 565dabee6feSPeter Wemm #endif 566dabee6feSPeter Wemm int 567cb226aaaSPoul-Henning Kamp minherit(p, uap) 568dabee6feSPeter Wemm struct proc *p; 569dabee6feSPeter Wemm struct minherit_args *uap; 570dabee6feSPeter Wemm { 571dabee6feSPeter Wemm vm_offset_t addr; 572dabee6feSPeter Wemm vm_size_t size, pageoff; 573dabee6feSPeter Wemm register vm_inherit_t inherit; 574dabee6feSPeter Wemm 575dabee6feSPeter Wemm addr = (vm_offset_t)uap->addr; 5769154ee6aSPeter Wemm size = uap->len; 577dabee6feSPeter Wemm inherit = uap->inherit; 578dabee6feSPeter Wemm 579dabee6feSPeter Wemm pageoff = (addr & PAGE_MASK); 580dabee6feSPeter Wemm addr -= pageoff; 581dabee6feSPeter Wemm size += pageoff; 582dabee6feSPeter Wemm size = (vm_size_t) round_page(size); 5839154ee6aSPeter Wemm if (addr + size < addr) 584dabee6feSPeter Wemm return(EINVAL); 585dabee6feSPeter Wemm 586dabee6feSPeter Wemm switch (vm_map_inherit(&p->p_vmspace->vm_map, addr, addr+size, 587dabee6feSPeter Wemm inherit)) { 588dabee6feSPeter Wemm case KERN_SUCCESS: 589dabee6feSPeter Wemm return (0); 590dabee6feSPeter Wemm case KERN_PROTECTION_FAILURE: 591dabee6feSPeter Wemm return (EACCES); 592dabee6feSPeter Wemm } 593dabee6feSPeter Wemm return (EINVAL); 594dabee6feSPeter Wemm } 595dabee6feSPeter Wemm 596dabee6feSPeter Wemm #ifndef _SYS_SYSPROTO_H_ 597df8bae1dSRodney W. Grimes struct madvise_args { 598651bb817SAlexander Langer void *addr; 5999154ee6aSPeter Wemm size_t len; 600df8bae1dSRodney W. Grimes int behav; 601df8bae1dSRodney W. Grimes }; 602d2d3e875SBruce Evans #endif 6030d94caffSDavid Greenman 604df8bae1dSRodney W. Grimes /* ARGSUSED */ 605df8bae1dSRodney W. Grimes int 606cb226aaaSPoul-Henning Kamp madvise(p, uap) 607df8bae1dSRodney W. Grimes struct proc *p; 608df8bae1dSRodney W. Grimes struct madvise_args *uap; 609df8bae1dSRodney W. Grimes { 610867a482dSJohn Dyson vm_map_t map; 611867a482dSJohn Dyson pmap_t pmap; 612f35329acSJohn Dyson vm_offset_t start, end; 613867a482dSJohn Dyson /* 614867a482dSJohn Dyson * Check for illegal addresses. Watch out for address wrap... Note 615867a482dSJohn Dyson * that VM_*_ADDRESS are not constants due to casts (argh). 616867a482dSJohn Dyson */ 617867a482dSJohn Dyson if (VM_MAXUSER_ADDRESS > 0 && 618867a482dSJohn Dyson ((vm_offset_t) uap->addr + uap->len) > VM_MAXUSER_ADDRESS) 619867a482dSJohn Dyson return (EINVAL); 620867a482dSJohn Dyson #ifndef i386 621867a482dSJohn Dyson if (VM_MIN_ADDRESS > 0 && uap->addr < VM_MIN_ADDRESS) 622867a482dSJohn Dyson return (EINVAL); 623867a482dSJohn Dyson #endif 624867a482dSJohn Dyson if (((vm_offset_t) uap->addr + uap->len) < (vm_offset_t) uap->addr) 625867a482dSJohn Dyson return (EINVAL); 626867a482dSJohn Dyson 627867a482dSJohn Dyson /* 628867a482dSJohn Dyson * Since this routine is only advisory, we default to conservative 629867a482dSJohn Dyson * behavior. 630867a482dSJohn Dyson */ 631cd6eea25SDavid Greenman start = trunc_page((vm_offset_t) uap->addr); 632cd6eea25SDavid Greenman end = round_page((vm_offset_t) uap->addr + uap->len); 633867a482dSJohn Dyson 634867a482dSJohn Dyson map = &p->p_vmspace->vm_map; 635867a482dSJohn Dyson pmap = &p->p_vmspace->vm_pmap; 636867a482dSJohn Dyson 637867a482dSJohn Dyson vm_map_madvise(map, pmap, start, end, uap->behav); 638df8bae1dSRodney W. Grimes 639867a482dSJohn Dyson return (0); 640df8bae1dSRodney W. Grimes } 641df8bae1dSRodney W. Grimes 642d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_ 643df8bae1dSRodney W. Grimes struct mincore_args { 644651bb817SAlexander Langer const void *addr; 6459154ee6aSPeter Wemm size_t len; 646df8bae1dSRodney W. Grimes char *vec; 647df8bae1dSRodney W. Grimes }; 648d2d3e875SBruce Evans #endif 6490d94caffSDavid Greenman 650df8bae1dSRodney W. Grimes /* ARGSUSED */ 651df8bae1dSRodney W. Grimes int 652cb226aaaSPoul-Henning Kamp mincore(p, uap) 653df8bae1dSRodney W. Grimes struct proc *p; 654df8bae1dSRodney W. Grimes struct mincore_args *uap; 655df8bae1dSRodney W. Grimes { 656867a482dSJohn Dyson vm_offset_t addr, first_addr; 657867a482dSJohn Dyson vm_offset_t end, cend; 658867a482dSJohn Dyson pmap_t pmap; 659867a482dSJohn Dyson vm_map_t map; 66002c04a2fSJohn Dyson char *vec; 661867a482dSJohn Dyson int error; 662867a482dSJohn Dyson int vecindex, lastvecindex; 663867a482dSJohn Dyson register vm_map_entry_t current; 664867a482dSJohn Dyson vm_map_entry_t entry; 665867a482dSJohn Dyson int mincoreinfo; 666df8bae1dSRodney W. Grimes 667867a482dSJohn Dyson /* 668867a482dSJohn Dyson * Make sure that the addresses presented are valid for user 669867a482dSJohn Dyson * mode. 670867a482dSJohn Dyson */ 671867a482dSJohn Dyson first_addr = addr = trunc_page((vm_offset_t) uap->addr); 6729154ee6aSPeter Wemm end = addr + (vm_size_t)round_page(uap->len); 67302c04a2fSJohn Dyson if (VM_MAXUSER_ADDRESS > 0 && end > VM_MAXUSER_ADDRESS) 67402c04a2fSJohn Dyson return (EINVAL); 67502c04a2fSJohn Dyson if (end < addr) 67602c04a2fSJohn Dyson return (EINVAL); 67702c04a2fSJohn Dyson 678867a482dSJohn Dyson /* 679867a482dSJohn Dyson * Address of byte vector 680867a482dSJohn Dyson */ 68102c04a2fSJohn Dyson vec = uap->vec; 682867a482dSJohn Dyson 683867a482dSJohn Dyson map = &p->p_vmspace->vm_map; 684867a482dSJohn Dyson pmap = &p->p_vmspace->vm_pmap; 685867a482dSJohn Dyson 686867a482dSJohn Dyson vm_map_lock(map); 687867a482dSJohn Dyson 688867a482dSJohn Dyson if (!vm_map_lookup_entry(map, addr, &entry)) 689867a482dSJohn Dyson entry = entry->next; 690867a482dSJohn Dyson 691867a482dSJohn Dyson /* 692867a482dSJohn Dyson * Do this on a map entry basis so that if the pages are not 693867a482dSJohn Dyson * in the current processes address space, we can easily look 694867a482dSJohn Dyson * up the pages elsewhere. 695867a482dSJohn Dyson */ 696867a482dSJohn Dyson lastvecindex = -1; 697867a482dSJohn Dyson for(current = entry; 698867a482dSJohn Dyson (current != &map->header) && (current->start < end); 699867a482dSJohn Dyson current = current->next) { 700867a482dSJohn Dyson 701867a482dSJohn Dyson /* 702867a482dSJohn Dyson * ignore submaps (for now) or null objects 703867a482dSJohn Dyson */ 704afa07f7eSJohn Dyson if ((current->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) || 705867a482dSJohn Dyson current->object.vm_object == NULL) 706867a482dSJohn Dyson continue; 707867a482dSJohn Dyson 708867a482dSJohn Dyson /* 709867a482dSJohn Dyson * limit this scan to the current map entry and the 710867a482dSJohn Dyson * limits for the mincore call 711867a482dSJohn Dyson */ 712867a482dSJohn Dyson if (addr < current->start) 713867a482dSJohn Dyson addr = current->start; 714867a482dSJohn Dyson cend = current->end; 715867a482dSJohn Dyson if (cend > end) 716867a482dSJohn Dyson cend = end; 717867a482dSJohn Dyson 718867a482dSJohn Dyson /* 719867a482dSJohn Dyson * scan this entry one page at a time 720867a482dSJohn Dyson */ 721867a482dSJohn Dyson while(addr < cend) { 722867a482dSJohn Dyson /* 723867a482dSJohn Dyson * Check pmap first, it is likely faster, also 724867a482dSJohn Dyson * it can provide info as to whether we are the 725867a482dSJohn Dyson * one referencing or modifying the page. 726867a482dSJohn Dyson */ 727867a482dSJohn Dyson mincoreinfo = pmap_mincore(pmap, addr); 728867a482dSJohn Dyson if (!mincoreinfo) { 729867a482dSJohn Dyson vm_pindex_t pindex; 730867a482dSJohn Dyson vm_ooffset_t offset; 731867a482dSJohn Dyson vm_page_t m; 732867a482dSJohn Dyson /* 733867a482dSJohn Dyson * calculate the page index into the object 734867a482dSJohn Dyson */ 735867a482dSJohn Dyson offset = current->offset + (addr - current->start); 736867a482dSJohn Dyson pindex = OFF_TO_IDX(offset); 737867a482dSJohn Dyson m = vm_page_lookup(current->object.vm_object, 738867a482dSJohn Dyson pindex); 739867a482dSJohn Dyson /* 740867a482dSJohn Dyson * if the page is resident, then gather information about 741867a482dSJohn Dyson * it. 742867a482dSJohn Dyson */ 743867a482dSJohn Dyson if (m) { 744867a482dSJohn Dyson mincoreinfo = MINCORE_INCORE; 745867a482dSJohn Dyson if (m->dirty || 74667bf6868SJohn Dyson pmap_is_modified(VM_PAGE_TO_PHYS(m))) 747867a482dSJohn Dyson mincoreinfo |= MINCORE_MODIFIED_OTHER; 748867a482dSJohn Dyson if ((m->flags & PG_REFERENCED) || 7499b5a5d81SJohn Dyson pmap_ts_referenced(VM_PAGE_TO_PHYS(m))) { 7509b5a5d81SJohn Dyson m->flags |= PG_REFERENCED; 751867a482dSJohn Dyson mincoreinfo |= MINCORE_REFERENCED_OTHER; 75202c04a2fSJohn Dyson } 753867a482dSJohn Dyson } 7549b5a5d81SJohn Dyson } 755867a482dSJohn Dyson 756867a482dSJohn Dyson /* 757867a482dSJohn Dyson * calculate index into user supplied byte vector 758867a482dSJohn Dyson */ 759867a482dSJohn Dyson vecindex = OFF_TO_IDX(addr - first_addr); 760867a482dSJohn Dyson 761867a482dSJohn Dyson /* 762867a482dSJohn Dyson * If we have skipped map entries, we need to make sure that 763867a482dSJohn Dyson * the byte vector is zeroed for those skipped entries. 764867a482dSJohn Dyson */ 765867a482dSJohn Dyson while((lastvecindex + 1) < vecindex) { 766867a482dSJohn Dyson error = subyte( vec + lastvecindex, 0); 767867a482dSJohn Dyson if (error) { 768867a482dSJohn Dyson vm_map_unlock(map); 769867a482dSJohn Dyson return (EFAULT); 770867a482dSJohn Dyson } 771867a482dSJohn Dyson ++lastvecindex; 772867a482dSJohn Dyson } 773867a482dSJohn Dyson 774867a482dSJohn Dyson /* 775867a482dSJohn Dyson * Pass the page information to the user 776867a482dSJohn Dyson */ 777867a482dSJohn Dyson error = subyte( vec + vecindex, mincoreinfo); 778867a482dSJohn Dyson if (error) { 779867a482dSJohn Dyson vm_map_unlock(map); 780867a482dSJohn Dyson return (EFAULT); 781867a482dSJohn Dyson } 782867a482dSJohn Dyson lastvecindex = vecindex; 78302c04a2fSJohn Dyson addr += PAGE_SIZE; 78402c04a2fSJohn Dyson } 785867a482dSJohn Dyson } 786867a482dSJohn Dyson 787867a482dSJohn Dyson /* 788867a482dSJohn Dyson * Zero the last entries in the byte vector. 789867a482dSJohn Dyson */ 790867a482dSJohn Dyson vecindex = OFF_TO_IDX(end - first_addr); 791867a482dSJohn Dyson while((lastvecindex + 1) < vecindex) { 792867a482dSJohn Dyson error = subyte( vec + lastvecindex, 0); 793867a482dSJohn Dyson if (error) { 794867a482dSJohn Dyson vm_map_unlock(map); 795867a482dSJohn Dyson return (EFAULT); 796867a482dSJohn Dyson } 797867a482dSJohn Dyson ++lastvecindex; 798867a482dSJohn Dyson } 799867a482dSJohn Dyson 800867a482dSJohn Dyson vm_map_unlock(map); 80102c04a2fSJohn Dyson return (0); 802df8bae1dSRodney W. Grimes } 803df8bae1dSRodney W. Grimes 804d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_ 805df8bae1dSRodney W. Grimes struct mlock_args { 806651bb817SAlexander Langer const void *addr; 807df8bae1dSRodney W. Grimes size_t len; 808df8bae1dSRodney W. Grimes }; 809d2d3e875SBruce Evans #endif 810df8bae1dSRodney W. Grimes int 811cb226aaaSPoul-Henning Kamp mlock(p, uap) 812df8bae1dSRodney W. Grimes struct proc *p; 813df8bae1dSRodney W. Grimes struct mlock_args *uap; 814df8bae1dSRodney W. Grimes { 815df8bae1dSRodney W. Grimes vm_offset_t addr; 816dabee6feSPeter Wemm vm_size_t size, pageoff; 817df8bae1dSRodney W. Grimes int error; 818df8bae1dSRodney W. Grimes 819df8bae1dSRodney W. Grimes addr = (vm_offset_t) uap->addr; 8209154ee6aSPeter Wemm size = uap->len; 8219154ee6aSPeter Wemm 822dabee6feSPeter Wemm pageoff = (addr & PAGE_MASK); 823dabee6feSPeter Wemm addr -= pageoff; 824dabee6feSPeter Wemm size += pageoff; 825dabee6feSPeter Wemm size = (vm_size_t) round_page(size); 826dabee6feSPeter Wemm 827dabee6feSPeter Wemm /* disable wrap around */ 8289154ee6aSPeter Wemm if (addr + size < addr) 829df8bae1dSRodney W. Grimes return (EINVAL); 830dabee6feSPeter Wemm 831df8bae1dSRodney W. Grimes if (atop(size) + cnt.v_wire_count > vm_page_max_wired) 832df8bae1dSRodney W. Grimes return (EAGAIN); 8339154ee6aSPeter Wemm 834df8bae1dSRodney W. Grimes #ifdef pmap_wired_count 835df8bae1dSRodney W. Grimes if (size + ptoa(pmap_wired_count(vm_map_pmap(&p->p_vmspace->vm_map))) > 836df8bae1dSRodney W. Grimes p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur) 8374a40e3d4SJohn Dyson return (ENOMEM); 838df8bae1dSRodney W. Grimes #else 83905f0fdd2SPoul-Henning Kamp error = suser(p->p_ucred, &p->p_acflag); 84005f0fdd2SPoul-Henning Kamp if (error) 841df8bae1dSRodney W. Grimes return (error); 842df8bae1dSRodney W. Grimes #endif 843df8bae1dSRodney W. Grimes 8447aaaa4fdSJohn Dyson error = vm_map_user_pageable(&p->p_vmspace->vm_map, addr, addr + size, FALSE); 845df8bae1dSRodney W. Grimes return (error == KERN_SUCCESS ? 0 : ENOMEM); 846df8bae1dSRodney W. Grimes } 847df8bae1dSRodney W. Grimes 848d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_ 8494a40e3d4SJohn Dyson struct mlockall_args { 8504a40e3d4SJohn Dyson int how; 8514a40e3d4SJohn Dyson }; 8524a40e3d4SJohn Dyson #endif 8534a40e3d4SJohn Dyson 8544a40e3d4SJohn Dyson int 855cb226aaaSPoul-Henning Kamp mlockall(p, uap) 8564a40e3d4SJohn Dyson struct proc *p; 8574a40e3d4SJohn Dyson struct mlockall_args *uap; 8584a40e3d4SJohn Dyson { 8594a40e3d4SJohn Dyson return 0; 8604a40e3d4SJohn Dyson } 8614a40e3d4SJohn Dyson 8624a40e3d4SJohn Dyson #ifndef _SYS_SYSPROTO_H_ 8634a40e3d4SJohn Dyson struct mlockall_args { 8644a40e3d4SJohn Dyson int how; 8654a40e3d4SJohn Dyson }; 8664a40e3d4SJohn Dyson #endif 8674a40e3d4SJohn Dyson 8684a40e3d4SJohn Dyson int 869cb226aaaSPoul-Henning Kamp munlockall(p, uap) 8704a40e3d4SJohn Dyson struct proc *p; 8714a40e3d4SJohn Dyson struct munlockall_args *uap; 8724a40e3d4SJohn Dyson { 8734a40e3d4SJohn Dyson return 0; 8744a40e3d4SJohn Dyson } 8754a40e3d4SJohn Dyson 8764a40e3d4SJohn Dyson #ifndef _SYS_SYSPROTO_H_ 877df8bae1dSRodney W. Grimes struct munlock_args { 878651bb817SAlexander Langer const void *addr; 879df8bae1dSRodney W. Grimes size_t len; 880df8bae1dSRodney W. Grimes }; 881d2d3e875SBruce Evans #endif 882df8bae1dSRodney W. Grimes int 883cb226aaaSPoul-Henning Kamp munlock(p, uap) 884df8bae1dSRodney W. Grimes struct proc *p; 885df8bae1dSRodney W. Grimes struct munlock_args *uap; 886df8bae1dSRodney W. Grimes { 887df8bae1dSRodney W. Grimes vm_offset_t addr; 888dabee6feSPeter Wemm vm_size_t size, pageoff; 889df8bae1dSRodney W. Grimes int error; 890df8bae1dSRodney W. Grimes 891df8bae1dSRodney W. Grimes addr = (vm_offset_t) uap->addr; 8929154ee6aSPeter Wemm size = uap->len; 8939154ee6aSPeter Wemm 894dabee6feSPeter Wemm pageoff = (addr & PAGE_MASK); 895dabee6feSPeter Wemm addr -= pageoff; 896dabee6feSPeter Wemm size += pageoff; 897dabee6feSPeter Wemm size = (vm_size_t) round_page(size); 898dabee6feSPeter Wemm 899dabee6feSPeter Wemm /* disable wrap around */ 9009154ee6aSPeter Wemm if (addr + size < addr) 901df8bae1dSRodney W. Grimes return (EINVAL); 902dabee6feSPeter Wemm 903df8bae1dSRodney W. Grimes #ifndef pmap_wired_count 90405f0fdd2SPoul-Henning Kamp error = suser(p->p_ucred, &p->p_acflag); 90505f0fdd2SPoul-Henning Kamp if (error) 906df8bae1dSRodney W. Grimes return (error); 907df8bae1dSRodney W. Grimes #endif 908df8bae1dSRodney W. Grimes 9097aaaa4fdSJohn Dyson error = vm_map_user_pageable(&p->p_vmspace->vm_map, addr, addr + size, TRUE); 910df8bae1dSRodney W. Grimes return (error == KERN_SUCCESS ? 0 : ENOMEM); 911df8bae1dSRodney W. Grimes } 912df8bae1dSRodney W. Grimes 913df8bae1dSRodney W. Grimes /* 914df8bae1dSRodney W. Grimes * Internal version of mmap. 915df8bae1dSRodney W. Grimes * Currently used by mmap, exec, and sys5 shared memory. 916df8bae1dSRodney W. Grimes * Handle is either a vnode pointer or NULL for MAP_ANON. 917df8bae1dSRodney W. Grimes */ 918df8bae1dSRodney W. Grimes int 919b9dcd593SBruce Evans vm_mmap(vm_map_t map, vm_offset_t *addr, vm_size_t size, vm_prot_t prot, 920b9dcd593SBruce Evans vm_prot_t maxprot, int flags, 921651bb817SAlexander Langer void *handle, 922b9dcd593SBruce Evans vm_ooffset_t foff) 923df8bae1dSRodney W. Grimes { 924df8bae1dSRodney W. Grimes boolean_t fitit; 925fcae040bSJohn Dyson vm_object_t object; 926df8bae1dSRodney W. Grimes struct vnode *vp = NULL; 92724a1cce3SDavid Greenman objtype_t type; 928df8bae1dSRodney W. Grimes int rv = KERN_SUCCESS; 929bd7e5f99SJohn Dyson vm_ooffset_t objsize; 930bd7e5f99SJohn Dyson int docow; 93106cb7259SDavid Greenman struct proc *p = curproc; 932df8bae1dSRodney W. Grimes 933df8bae1dSRodney W. Grimes if (size == 0) 934df8bae1dSRodney W. Grimes return (0); 935df8bae1dSRodney W. Grimes 93606cb7259SDavid Greenman objsize = size = round_page(size); 937df8bae1dSRodney W. Grimes 938df8bae1dSRodney W. Grimes /* 939bc9ad247SDavid Greenman * We currently can only deal with page aligned file offsets. 940bc9ad247SDavid Greenman * The check is here rather than in the syscall because the 941bc9ad247SDavid Greenman * kernel calls this function internally for other mmaping 942bc9ad247SDavid Greenman * operations (such as in exec) and non-aligned offsets will 943bc9ad247SDavid Greenman * cause pmap inconsistencies...so we want to be sure to 944bc9ad247SDavid Greenman * disallow this in all cases. 945bc9ad247SDavid Greenman */ 946bc9ad247SDavid Greenman if (foff & PAGE_MASK) 947bc9ad247SDavid Greenman return (EINVAL); 948bc9ad247SDavid Greenman 94906cb7259SDavid Greenman if ((flags & MAP_FIXED) == 0) { 95006cb7259SDavid Greenman fitit = TRUE; 95106cb7259SDavid Greenman *addr = round_page(*addr); 95206cb7259SDavid Greenman } else { 95306cb7259SDavid Greenman if (*addr != trunc_page(*addr)) 95406cb7259SDavid Greenman return (EINVAL); 95506cb7259SDavid Greenman fitit = FALSE; 95606cb7259SDavid Greenman (void) vm_map_remove(map, *addr, *addr + size); 95706cb7259SDavid Greenman } 95806cb7259SDavid Greenman 959bc9ad247SDavid Greenman /* 96024a1cce3SDavid Greenman * Lookup/allocate object. 961df8bae1dSRodney W. Grimes */ 9625f55e841SDavid Greenman if (flags & MAP_ANON) { 963851c12ffSJohn Dyson type = OBJT_DEFAULT; 9645f55e841SDavid Greenman /* 9655f55e841SDavid Greenman * Unnamed anonymous regions always start at 0. 9665f55e841SDavid Greenman */ 96767bf6868SJohn Dyson if (handle == 0) 9685f55e841SDavid Greenman foff = 0; 9695f55e841SDavid Greenman } else { 970df8bae1dSRodney W. Grimes vp = (struct vnode *) handle; 971df8bae1dSRodney W. Grimes if (vp->v_type == VCHR) { 97224a1cce3SDavid Greenman type = OBJT_DEVICE; 973ecbb00a2SDoug Rabson handle = (void *)(long)vp->v_rdev; 97406cb7259SDavid Greenman } else { 97506cb7259SDavid Greenman struct vattr vat; 97606cb7259SDavid Greenman int error; 97706cb7259SDavid Greenman 97806cb7259SDavid Greenman error = VOP_GETATTR(vp, &vat, p->p_ucred, p); 97906cb7259SDavid Greenman if (error) 98006cb7259SDavid Greenman return (error); 981bd7e5f99SJohn Dyson objsize = round_page(vat.va_size); 98224a1cce3SDavid Greenman type = OBJT_VNODE; 983df8bae1dSRodney W. Grimes } 98406cb7259SDavid Greenman } 98594328e90SJohn Dyson 98694328e90SJohn Dyson if (handle == NULL) { 98794328e90SJohn Dyson object = NULL; 98894328e90SJohn Dyson } else { 9890a0a85b3SJohn Dyson object = vm_pager_allocate(type, 9900a0a85b3SJohn Dyson handle, OFF_TO_IDX(objsize), prot, foff); 99124a1cce3SDavid Greenman if (object == NULL) 99224a1cce3SDavid Greenman return (type == OBJT_DEVICE ? EINVAL : ENOMEM); 99394328e90SJohn Dyson } 994df8bae1dSRodney W. Grimes 9955850152dSJohn Dyson /* 9968f2ec877SDavid Greenman * Force device mappings to be shared. 9975850152dSJohn Dyson */ 9988f2ec877SDavid Greenman if (type == OBJT_DEVICE) { 9998f2ec877SDavid Greenman flags &= ~(MAP_PRIVATE|MAP_COPY); 10005850152dSJohn Dyson flags |= MAP_SHARED; 10018f2ec877SDavid Greenman } 10025850152dSJohn Dyson 1003bd7e5f99SJohn Dyson docow = 0; 10045850152dSJohn Dyson if ((flags & (MAP_ANON|MAP_SHARED)) == 0) { 1005fcae040bSJohn Dyson docow = MAP_COPY_ON_WRITE | MAP_COPY_NEEDED; 1006bd7e5f99SJohn Dyson } 10075850152dSJohn Dyson 1008d0aea04fSJohn Dyson #if defined(VM_PROT_READ_IS_EXEC) 1009d0aea04fSJohn Dyson if (prot & VM_PROT_READ) 1010d0aea04fSJohn Dyson prot |= VM_PROT_EXECUTE; 1011d0aea04fSJohn Dyson 1012d0aea04fSJohn Dyson if (maxprot & VM_PROT_READ) 1013d0aea04fSJohn Dyson maxprot |= VM_PROT_EXECUTE; 1014d0aea04fSJohn Dyson #endif 1015d0aea04fSJohn Dyson 10160a0a85b3SJohn Dyson if (fitit) { 10170a0a85b3SJohn Dyson *addr = pmap_addr_hint(object, *addr, size); 10180a0a85b3SJohn Dyson } 10190a0a85b3SJohn Dyson 1020bd7e5f99SJohn Dyson rv = vm_map_find(map, object, foff, addr, size, fitit, 1021bd7e5f99SJohn Dyson prot, maxprot, docow); 1022bd7e5f99SJohn Dyson 1023df8bae1dSRodney W. Grimes if (rv != KERN_SUCCESS) { 10247fb0c17eSDavid Greenman /* 102524a1cce3SDavid Greenman * Lose the object reference. Will destroy the 102624a1cce3SDavid Greenman * object if it's an unnamed anonymous mapping 102724a1cce3SDavid Greenman * or named anonymous without other references. 10287fb0c17eSDavid Greenman */ 1029df8bae1dSRodney W. Grimes vm_object_deallocate(object); 1030df8bae1dSRodney W. Grimes goto out; 1031df8bae1dSRodney W. Grimes } 1032e17bed12SJohn Dyson 1033df8bae1dSRodney W. Grimes /* 10347fb0c17eSDavid Greenman * "Pre-fault" resident pages. 10357fb0c17eSDavid Greenman */ 10360a0a85b3SJohn Dyson if ((map->pmap != NULL) && (object != NULL)) { 1037a316d390SJohn Dyson pmap_object_init_pt(map->pmap, *addr, 1038867a482dSJohn Dyson object, (vm_pindex_t) OFF_TO_IDX(foff), size, 1); 1039df8bae1dSRodney W. Grimes } 10407fb0c17eSDavid Greenman 1041df8bae1dSRodney W. Grimes /* 1042df8bae1dSRodney W. Grimes * Shared memory is also shared with children. 1043df8bae1dSRodney W. Grimes */ 10445850152dSJohn Dyson if (flags & (MAP_SHARED|MAP_INHERIT)) { 1045df8bae1dSRodney W. Grimes rv = vm_map_inherit(map, *addr, *addr + size, VM_INHERIT_SHARE); 1046df8bae1dSRodney W. Grimes if (rv != KERN_SUCCESS) { 10477fb0c17eSDavid Greenman (void) vm_map_remove(map, *addr, *addr + size); 1048df8bae1dSRodney W. Grimes goto out; 1049df8bae1dSRodney W. Grimes } 1050df8bae1dSRodney W. Grimes } 1051df8bae1dSRodney W. Grimes out: 1052df8bae1dSRodney W. Grimes switch (rv) { 1053df8bae1dSRodney W. Grimes case KERN_SUCCESS: 1054df8bae1dSRodney W. Grimes return (0); 1055df8bae1dSRodney W. Grimes case KERN_INVALID_ADDRESS: 1056df8bae1dSRodney W. Grimes case KERN_NO_SPACE: 1057df8bae1dSRodney W. Grimes return (ENOMEM); 1058df8bae1dSRodney W. Grimes case KERN_PROTECTION_FAILURE: 1059df8bae1dSRodney W. Grimes return (EACCES); 1060df8bae1dSRodney W. Grimes default: 1061df8bae1dSRodney W. Grimes return (EINVAL); 1062df8bae1dSRodney W. Grimes } 1063df8bae1dSRodney W. Grimes } 1064