160727d8bSWarner Losh /*- 2df8bae1dSRodney W. Grimes * Copyright (c) 1988 University of Utah. 3df8bae1dSRodney W. Grimes * Copyright (c) 1991, 1993 4df8bae1dSRodney W. Grimes * The Regents of the University of California. All rights reserved. 5df8bae1dSRodney W. Grimes * 6df8bae1dSRodney W. Grimes * This code is derived from software contributed to Berkeley by 7df8bae1dSRodney W. Grimes * the Systems Programming Group of the University of Utah Computer 8df8bae1dSRodney W. Grimes * Science Department. 9df8bae1dSRodney W. Grimes * 10df8bae1dSRodney W. Grimes * Redistribution and use in source and binary forms, with or without 11df8bae1dSRodney W. Grimes * modification, are permitted provided that the following conditions 12df8bae1dSRodney W. Grimes * are met: 13df8bae1dSRodney W. Grimes * 1. Redistributions of source code must retain the above copyright 14df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer. 15df8bae1dSRodney W. Grimes * 2. Redistributions in binary form must reproduce the above copyright 16df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer in the 17df8bae1dSRodney W. Grimes * documentation and/or other materials provided with the distribution. 18df8bae1dSRodney W. Grimes * 4. Neither the name of the University nor the names of its contributors 19df8bae1dSRodney W. Grimes * may be used to endorse or promote products derived from this software 20df8bae1dSRodney W. Grimes * without specific prior written permission. 21df8bae1dSRodney W. Grimes * 22df8bae1dSRodney W. Grimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23df8bae1dSRodney W. Grimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24df8bae1dSRodney W. Grimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25df8bae1dSRodney W. Grimes * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26df8bae1dSRodney W. Grimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27df8bae1dSRodney W. Grimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28df8bae1dSRodney W. Grimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29df8bae1dSRodney W. Grimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30df8bae1dSRodney W. Grimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31df8bae1dSRodney W. Grimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32df8bae1dSRodney W. Grimes * SUCH DAMAGE. 33df8bae1dSRodney W. Grimes * 34df8bae1dSRodney W. Grimes * from: Utah $Hdr: vm_mmap.c 1.6 91/10/21$ 35df8bae1dSRodney W. Grimes * 36df8bae1dSRodney W. Grimes * @(#)vm_mmap.c 8.4 (Berkeley) 1/12/94 37df8bae1dSRodney W. Grimes */ 38df8bae1dSRodney W. Grimes 39df8bae1dSRodney W. Grimes /* 40df8bae1dSRodney W. Grimes * Mapped file (mmap) interface to VM 41df8bae1dSRodney W. Grimes */ 42df8bae1dSRodney W. Grimes 43874651b1SDavid E. O'Brien #include <sys/cdefs.h> 44874651b1SDavid E. O'Brien __FBSDID("$FreeBSD$"); 45874651b1SDavid E. O'Brien 465591b823SEivind Eklund #include "opt_compat.h" 4749874f6eSJoseph Koshy #include "opt_hwpmc_hooks.h" 48e9822d92SJoerg Wunsch 49df8bae1dSRodney W. Grimes #include <sys/param.h> 50df8bae1dSRodney W. Grimes #include <sys/systm.h> 51fb919e4dSMark Murray #include <sys/kernel.h> 52fb919e4dSMark Murray #include <sys/lock.h> 5323955314SAlfred Perlstein #include <sys/mutex.h> 54d2d3e875SBruce Evans #include <sys/sysproto.h> 55df8bae1dSRodney W. Grimes #include <sys/filedesc.h> 56acd3428bSRobert Watson #include <sys/priv.h> 57df8bae1dSRodney W. Grimes #include <sys/proc.h> 58070f64feSMatthew Dillon #include <sys/resource.h> 59070f64feSMatthew Dillon #include <sys/resourcevar.h> 60df8bae1dSRodney W. Grimes #include <sys/vnode.h> 613ac4d1efSBruce Evans #include <sys/fcntl.h> 62df8bae1dSRodney W. Grimes #include <sys/file.h> 63df8bae1dSRodney W. Grimes #include <sys/mman.h> 64b483c7f6SGuido van Rooij #include <sys/mount.h> 65df8bae1dSRodney W. Grimes #include <sys/conf.h> 664183b6b6SPeter Wemm #include <sys/stat.h> 67497a8238SKonstantin Belousov #include <sys/sysent.h> 68efeaf95aSDavid Greenman #include <sys/vmmeter.h> 691f6889a1SMatthew Dillon #include <sys/sysctl.h> 70df8bae1dSRodney W. Grimes 71aed55708SRobert Watson #include <security/mac/mac_framework.h> 72aed55708SRobert Watson 73df8bae1dSRodney W. Grimes #include <vm/vm.h> 74efeaf95aSDavid Greenman #include <vm/vm_param.h> 75efeaf95aSDavid Greenman #include <vm/pmap.h> 76efeaf95aSDavid Greenman #include <vm/vm_map.h> 77efeaf95aSDavid Greenman #include <vm/vm_object.h> 781c7c3c6aSMatthew Dillon #include <vm/vm_page.h> 79df8bae1dSRodney W. Grimes #include <vm/vm_pager.h> 80b5e8ce9fSBruce Evans #include <vm/vm_pageout.h> 81efeaf95aSDavid Greenman #include <vm/vm_extern.h> 82867a482dSJohn Dyson #include <vm/vm_page.h> 831f6889a1SMatthew Dillon #include <vm/vm_kern.h> 84df8bae1dSRodney W. Grimes 8549874f6eSJoseph Koshy #ifdef HWPMC_HOOKS 8649874f6eSJoseph Koshy #include <sys/pmckern.h> 8749874f6eSJoseph Koshy #endif 8849874f6eSJoseph Koshy 89d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_ 90df8bae1dSRodney W. Grimes struct sbrk_args { 91df8bae1dSRodney W. Grimes int incr; 92df8bae1dSRodney W. Grimes }; 93d2d3e875SBruce Evans #endif 940d94caffSDavid Greenman 951f6889a1SMatthew Dillon static int max_proc_mmap; 966bd9cb1cSTom Rhodes SYSCTL_INT(_vm, OID_AUTO, max_proc_mmap, CTLFLAG_RW, &max_proc_mmap, 0, 976bd9cb1cSTom Rhodes "Maximum number of memory-mapped files per process"); 981f6889a1SMatthew Dillon 991f6889a1SMatthew Dillon /* 1001f6889a1SMatthew Dillon * Set the maximum number of vm_map_entry structures per process. Roughly 1011f6889a1SMatthew Dillon * speaking vm_map_entry structures are tiny, so allowing them to eat 1/100 1021f6889a1SMatthew Dillon * of our KVM malloc space still results in generous limits. We want a 1031f6889a1SMatthew Dillon * default that is good enough to prevent the kernel running out of resources 1041f6889a1SMatthew Dillon * if attacked from compromised user account but generous enough such that 1051f6889a1SMatthew Dillon * multi-threaded processes are not unduly inconvenienced. 1061f6889a1SMatthew Dillon */ 10711caded3SAlfred Perlstein static void vmmapentry_rsrc_init(void *); 108237fdd78SRobert Watson SYSINIT(vmmersrc, SI_SUB_KVM_RSRC, SI_ORDER_FIRST, vmmapentry_rsrc_init, 109237fdd78SRobert Watson NULL); 1101f6889a1SMatthew Dillon 1111f6889a1SMatthew Dillon static void 1121f6889a1SMatthew Dillon vmmapentry_rsrc_init(dummy) 1131f6889a1SMatthew Dillon void *dummy; 1141f6889a1SMatthew Dillon { 1151f6889a1SMatthew Dillon max_proc_mmap = vm_kmem_size / sizeof(struct vm_map_entry); 1161f6889a1SMatthew Dillon max_proc_mmap /= 100; 1171f6889a1SMatthew Dillon } 1181f6889a1SMatthew Dillon 119c8daea13SAlexander Kabaev static int vm_mmap_vnode(struct thread *, vm_size_t, vm_prot_t, vm_prot_t *, 12064345f0bSJohn Baldwin int *, struct vnode *, vm_ooffset_t *, vm_object_t *); 12198df9218SJohn Baldwin static int vm_mmap_cdev(struct thread *, vm_size_t, vm_prot_t, vm_prot_t *, 12264345f0bSJohn Baldwin int *, struct cdev *, vm_ooffset_t *, vm_object_t *); 1238e38aeffSJohn Baldwin static int vm_mmap_shm(struct thread *, vm_size_t, vm_prot_t, vm_prot_t *, 1248e38aeffSJohn Baldwin int *, struct shmfd *, vm_ooffset_t, vm_object_t *); 125c8daea13SAlexander Kabaev 126d2c60af8SMatthew Dillon /* 127d2c60af8SMatthew Dillon * MPSAFE 128d2c60af8SMatthew Dillon */ 129df8bae1dSRodney W. Grimes /* ARGSUSED */ 130df8bae1dSRodney W. Grimes int 131b40ce416SJulian Elischer sbrk(td, uap) 132b40ce416SJulian Elischer struct thread *td; 133df8bae1dSRodney W. Grimes struct sbrk_args *uap; 134df8bae1dSRodney W. Grimes { 135df8bae1dSRodney W. Grimes /* Not yet implemented */ 136df8bae1dSRodney W. Grimes return (EOPNOTSUPP); 137df8bae1dSRodney W. Grimes } 138df8bae1dSRodney W. Grimes 139d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_ 140df8bae1dSRodney W. Grimes struct sstk_args { 141df8bae1dSRodney W. Grimes int incr; 142df8bae1dSRodney W. Grimes }; 143d2d3e875SBruce Evans #endif 1440d94caffSDavid Greenman 145d2c60af8SMatthew Dillon /* 146d2c60af8SMatthew Dillon * MPSAFE 147d2c60af8SMatthew Dillon */ 148df8bae1dSRodney W. Grimes /* ARGSUSED */ 149df8bae1dSRodney W. Grimes int 150b40ce416SJulian Elischer sstk(td, uap) 151b40ce416SJulian Elischer struct thread *td; 152df8bae1dSRodney W. Grimes struct sstk_args *uap; 153df8bae1dSRodney W. Grimes { 154df8bae1dSRodney W. Grimes /* Not yet implemented */ 155df8bae1dSRodney W. Grimes return (EOPNOTSUPP); 156df8bae1dSRodney W. Grimes } 157df8bae1dSRodney W. Grimes 1581930e303SPoul-Henning Kamp #if defined(COMPAT_43) 159d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_ 160df8bae1dSRodney W. Grimes struct getpagesize_args { 161df8bae1dSRodney W. Grimes int dummy; 162df8bae1dSRodney W. Grimes }; 163d2d3e875SBruce Evans #endif 1640d94caffSDavid Greenman 165df8bae1dSRodney W. Grimes /* ARGSUSED */ 166df8bae1dSRodney W. Grimes int 167b40ce416SJulian Elischer ogetpagesize(td, uap) 168b40ce416SJulian Elischer struct thread *td; 169df8bae1dSRodney W. Grimes struct getpagesize_args *uap; 170df8bae1dSRodney W. Grimes { 1710cddd8f0SMatthew Dillon /* MP SAFE */ 172b40ce416SJulian Elischer td->td_retval[0] = PAGE_SIZE; 173df8bae1dSRodney W. Grimes return (0); 174df8bae1dSRodney W. Grimes } 1751930e303SPoul-Henning Kamp #endif /* COMPAT_43 */ 176df8bae1dSRodney W. Grimes 17754f42e4bSPeter Wemm 17854f42e4bSPeter Wemm /* 17954f42e4bSPeter Wemm * Memory Map (mmap) system call. Note that the file offset 18054f42e4bSPeter Wemm * and address are allowed to be NOT page aligned, though if 18154f42e4bSPeter Wemm * the MAP_FIXED flag it set, both must have the same remainder 18254f42e4bSPeter Wemm * modulo the PAGE_SIZE (POSIX 1003.1b). If the address is not 18354f42e4bSPeter Wemm * page-aligned, the actual mapping starts at trunc_page(addr) 18454f42e4bSPeter Wemm * and the return value is adjusted up by the page offset. 185b4309055SMatthew Dillon * 186b4309055SMatthew Dillon * Generally speaking, only character devices which are themselves 187b4309055SMatthew Dillon * memory-based, such as a video framebuffer, can be mmap'd. Otherwise 188b4309055SMatthew Dillon * there would be no cache coherency between a descriptor and a VM mapping 189b4309055SMatthew Dillon * both to the same character device. 19054f42e4bSPeter Wemm */ 191d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_ 192df8bae1dSRodney W. Grimes struct mmap_args { 193651bb817SAlexander Langer void *addr; 194df8bae1dSRodney W. Grimes size_t len; 195df8bae1dSRodney W. Grimes int prot; 196df8bae1dSRodney W. Grimes int flags; 197df8bae1dSRodney W. Grimes int fd; 198df8bae1dSRodney W. Grimes long pad; 199df8bae1dSRodney W. Grimes off_t pos; 200df8bae1dSRodney W. Grimes }; 201d2d3e875SBruce Evans #endif 202df8bae1dSRodney W. Grimes 203d2c60af8SMatthew Dillon /* 204d2c60af8SMatthew Dillon * MPSAFE 205d2c60af8SMatthew Dillon */ 206df8bae1dSRodney W. Grimes int 207b40ce416SJulian Elischer mmap(td, uap) 208b40ce416SJulian Elischer struct thread *td; 20954d92145SMatthew Dillon struct mmap_args *uap; 210df8bae1dSRodney W. Grimes { 21149874f6eSJoseph Koshy #ifdef HWPMC_HOOKS 21249874f6eSJoseph Koshy struct pmckern_map_in pkm; 21349874f6eSJoseph Koshy #endif 214c8daea13SAlexander Kabaev struct file *fp; 215df8bae1dSRodney W. Grimes struct vnode *vp; 216df8bae1dSRodney W. Grimes vm_offset_t addr; 2179154ee6aSPeter Wemm vm_size_t size, pageoff; 218df8bae1dSRodney W. Grimes vm_prot_t prot, maxprot; 219651bb817SAlexander Langer void *handle; 22098df9218SJohn Baldwin objtype_t handle_type; 221df8bae1dSRodney W. Grimes int flags, error; 22254f42e4bSPeter Wemm off_t pos; 223b40ce416SJulian Elischer struct vmspace *vms = td->td_proc->p_vmspace; 224df8bae1dSRodney W. Grimes 22554f42e4bSPeter Wemm addr = (vm_offset_t) uap->addr; 22654f42e4bSPeter Wemm size = uap->len; 227df8bae1dSRodney W. Grimes prot = uap->prot & VM_PROT_ALL; 228df8bae1dSRodney W. Grimes flags = uap->flags; 22954f42e4bSPeter Wemm pos = uap->pos; 23054f42e4bSPeter Wemm 231426da3bcSAlfred Perlstein fp = NULL; 23227bfa958SSimon L. B. Nielsen 23327bfa958SSimon L. B. Nielsen /* Make sure mapping fits into numeric range, etc. */ 234497a8238SKonstantin Belousov if ((uap->len == 0 && !SV_CURPROC_FLAG(SV_AOUT) && 235497a8238SKonstantin Belousov curproc->p_osrel >= 800104) || 2365711bf30SJohn Baldwin ((flags & MAP_ANON) && (uap->fd != -1 || pos != 0))) 237df8bae1dSRodney W. Grimes return (EINVAL); 2389154ee6aSPeter Wemm 2392267af78SJulian Elischer if (flags & MAP_STACK) { 2402267af78SJulian Elischer if ((uap->fd != -1) || 2412267af78SJulian Elischer ((prot & (PROT_READ | PROT_WRITE)) != (PROT_READ | PROT_WRITE))) 2422267af78SJulian Elischer return (EINVAL); 2432267af78SJulian Elischer flags |= MAP_ANON; 2442267af78SJulian Elischer pos = 0; 2452907af2aSJulian Elischer } 2462907af2aSJulian Elischer 2479154ee6aSPeter Wemm /* 24854f42e4bSPeter Wemm * Align the file position to a page boundary, 24954f42e4bSPeter Wemm * and save its page offset component. 2509154ee6aSPeter Wemm */ 25154f42e4bSPeter Wemm pageoff = (pos & PAGE_MASK); 25254f42e4bSPeter Wemm pos -= pageoff; 25354f42e4bSPeter Wemm 25454f42e4bSPeter Wemm /* Adjust size for rounding (on both ends). */ 25554f42e4bSPeter Wemm size += pageoff; /* low end... */ 25654f42e4bSPeter Wemm size = (vm_size_t) round_page(size); /* hi end */ 2579154ee6aSPeter Wemm 258df8bae1dSRodney W. Grimes /* 2590d94caffSDavid Greenman * Check for illegal addresses. Watch out for address wrap... Note 2600d94caffSDavid Greenman * that VM_*_ADDRESS are not constants due to casts (argh). 261df8bae1dSRodney W. Grimes */ 262df8bae1dSRodney W. Grimes if (flags & MAP_FIXED) { 26354f42e4bSPeter Wemm /* 26454f42e4bSPeter Wemm * The specified address must have the same remainder 26554f42e4bSPeter Wemm * as the file offset taken modulo PAGE_SIZE, so it 26654f42e4bSPeter Wemm * should be aligned after adjustment by pageoff. 26754f42e4bSPeter Wemm */ 26854f42e4bSPeter Wemm addr -= pageoff; 26954f42e4bSPeter Wemm if (addr & PAGE_MASK) 27054f42e4bSPeter Wemm return (EINVAL); 27127bfa958SSimon L. B. Nielsen 27254f42e4bSPeter Wemm /* Address range must be all in user VM space. */ 27305ba50f5SJake Burkholder if (addr < vm_map_min(&vms->vm_map) || 27405ba50f5SJake Burkholder addr + size > vm_map_max(&vms->vm_map)) 275df8bae1dSRodney W. Grimes return (EINVAL); 276bbc0ec52SDavid Greenman if (addr + size < addr) 277df8bae1dSRodney W. Grimes return (EINVAL); 27891d5354aSJohn Baldwin } else { 279df8bae1dSRodney W. Grimes /* 28054f42e4bSPeter Wemm * XXX for non-fixed mappings where no hint is provided or 28154f42e4bSPeter Wemm * the hint would fall in the potential heap space, 28254f42e4bSPeter Wemm * place it after the end of the largest possible heap. 283df8bae1dSRodney W. Grimes * 28454f42e4bSPeter Wemm * There should really be a pmap call to determine a reasonable 28554f42e4bSPeter Wemm * location. 286df8bae1dSRodney W. Grimes */ 28791d5354aSJohn Baldwin PROC_LOCK(td->td_proc); 28891d5354aSJohn Baldwin if (addr == 0 || 2891f6889a1SMatthew Dillon (addr >= round_page((vm_offset_t)vms->vm_taddr) && 290c460ac3aSPeter Wemm addr < round_page((vm_offset_t)vms->vm_daddr + 29191d5354aSJohn Baldwin lim_max(td->td_proc, RLIMIT_DATA)))) 292c460ac3aSPeter Wemm addr = round_page((vm_offset_t)vms->vm_daddr + 29391d5354aSJohn Baldwin lim_max(td->td_proc, RLIMIT_DATA)); 29491d5354aSJohn Baldwin PROC_UNLOCK(td->td_proc); 29591d5354aSJohn Baldwin } 296df8bae1dSRodney W. Grimes if (flags & MAP_ANON) { 297df8bae1dSRodney W. Grimes /* 298df8bae1dSRodney W. Grimes * Mapping blank space is trivial. 299df8bae1dSRodney W. Grimes */ 300df8bae1dSRodney W. Grimes handle = NULL; 30198df9218SJohn Baldwin handle_type = OBJT_DEFAULT; 302df8bae1dSRodney W. Grimes maxprot = VM_PROT_ALL; 30330d4dd7eSAlexander Kabaev } else { 304df8bae1dSRodney W. Grimes /* 3058e38aeffSJohn Baldwin * Mapping file, get fp for validation and 3068e38aeffSJohn Baldwin * don't let the descriptor disappear on us if we block. 307df8bae1dSRodney W. Grimes */ 308a4db4953SAlfred Perlstein if ((error = fget(td, uap->fd, &fp)) != 0) 309426da3bcSAlfred Perlstein goto done; 3108e38aeffSJohn Baldwin if (fp->f_type == DTYPE_SHM) { 3118e38aeffSJohn Baldwin handle = fp->f_data; 3128e38aeffSJohn Baldwin handle_type = OBJT_SWAP; 3138e38aeffSJohn Baldwin maxprot = VM_PROT_NONE; 3148e38aeffSJohn Baldwin 3158e38aeffSJohn Baldwin /* FREAD should always be set. */ 3168e38aeffSJohn Baldwin if (fp->f_flag & FREAD) 3178e38aeffSJohn Baldwin maxprot |= VM_PROT_EXECUTE | VM_PROT_READ; 3188e38aeffSJohn Baldwin if (fp->f_flag & FWRITE) 3198e38aeffSJohn Baldwin maxprot |= VM_PROT_WRITE; 3208e38aeffSJohn Baldwin goto map; 3218e38aeffSJohn Baldwin } 322e4ca250dSJohn Baldwin if (fp->f_type != DTYPE_VNODE) { 32389eae00bSTom Rhodes error = ENODEV; 324426da3bcSAlfred Perlstein goto done; 325e4ca250dSJohn Baldwin } 3268e38aeffSJohn Baldwin #if defined(COMPAT_FREEBSD7) || defined(COMPAT_FREEBSD6) || \ 3278e38aeffSJohn Baldwin defined(COMPAT_FREEBSD5) || defined(COMPAT_FREEBSD4) 328279d7226SMatthew Dillon /* 329aa543039SGarrett Wollman * POSIX shared-memory objects are defined to have 330aa543039SGarrett Wollman * kernel persistence, and are not defined to support 331aa543039SGarrett Wollman * read(2)/write(2) -- or even open(2). Thus, we can 332aa543039SGarrett Wollman * use MAP_ASYNC to trade on-disk coherence for speed. 333aa543039SGarrett Wollman * The shm_open(3) library routine turns on the FPOSIXSHM 334aa543039SGarrett Wollman * flag to request this behavior. 335aa543039SGarrett Wollman */ 336aa543039SGarrett Wollman if (fp->f_flag & FPOSIXSHM) 337aa543039SGarrett Wollman flags |= MAP_NOSYNC; 3388e38aeffSJohn Baldwin #endif 3393b6d9652SPoul-Henning Kamp vp = fp->f_vnode; 340c8bdd56bSGuido van Rooij /* 341df8bae1dSRodney W. Grimes * Ensure that file and memory protections are 342df8bae1dSRodney W. Grimes * compatible. Note that we only worry about 343df8bae1dSRodney W. Grimes * writability if mapping is shared; in this case, 344df8bae1dSRodney W. Grimes * current and max prot are dictated by the open file. 345df8bae1dSRodney W. Grimes * XXX use the vnode instead? Problem is: what 3460d94caffSDavid Greenman * credentials do we use for determination? What if 3470d94caffSDavid Greenman * proc does a setuid? 348df8bae1dSRodney W. Grimes */ 3498eec77b0STim J. Robbins if (vp->v_mount != NULL && vp->v_mount->mnt_flag & MNT_NOEXEC) 350b483c7f6SGuido van Rooij maxprot = VM_PROT_NONE; 351b483c7f6SGuido van Rooij else 352b483c7f6SGuido van Rooij maxprot = VM_PROT_EXECUTE; 353279d7226SMatthew Dillon if (fp->f_flag & FREAD) { 354df8bae1dSRodney W. Grimes maxprot |= VM_PROT_READ; 355279d7226SMatthew Dillon } else if (prot & PROT_READ) { 356279d7226SMatthew Dillon error = EACCES; 357279d7226SMatthew Dillon goto done; 358279d7226SMatthew Dillon } 359c8bdd56bSGuido van Rooij /* 360c8bdd56bSGuido van Rooij * If we are sharing potential changes (either via 361c8bdd56bSGuido van Rooij * MAP_SHARED or via the implicit sharing of character 362c8bdd56bSGuido van Rooij * device mappings), and we are trying to get write 363c8bdd56bSGuido van Rooij * permission although we opened it without asking 364c8daea13SAlexander Kabaev * for it, bail out. 365c8bdd56bSGuido van Rooij */ 366ce7a036dSAlexander Kabaev if ((flags & MAP_SHARED) != 0) { 36705feb99fSGuido van Rooij if ((fp->f_flag & FWRITE) != 0) { 368df8bae1dSRodney W. Grimes maxprot |= VM_PROT_WRITE; 369279d7226SMatthew Dillon } else if ((prot & PROT_WRITE) != 0) { 370279d7226SMatthew Dillon error = EACCES; 371279d7226SMatthew Dillon goto done; 372279d7226SMatthew Dillon } 373ce7a036dSAlexander Kabaev } else if (vp->v_type != VCHR || (fp->f_flag & FWRITE) != 0) { 37405feb99fSGuido van Rooij maxprot |= VM_PROT_WRITE; 375279d7226SMatthew Dillon } 376651bb817SAlexander Langer handle = (void *)vp; 37798df9218SJohn Baldwin handle_type = OBJT_VNODE; 37830d4dd7eSAlexander Kabaev } 3798e38aeffSJohn Baldwin map: 3801f6889a1SMatthew Dillon 3811f6889a1SMatthew Dillon /* 3821f6889a1SMatthew Dillon * Do not allow more then a certain number of vm_map_entry structures 3831f6889a1SMatthew Dillon * per process. Scale with the number of rforks sharing the map 3841f6889a1SMatthew Dillon * to make the limit reasonable for threads. 3851f6889a1SMatthew Dillon */ 3861f6889a1SMatthew Dillon if (max_proc_mmap && 3871f6889a1SMatthew Dillon vms->vm_map.nentries >= max_proc_mmap * vms->vm_refcnt) { 388279d7226SMatthew Dillon error = ENOMEM; 389279d7226SMatthew Dillon goto done; 3901f6889a1SMatthew Dillon } 3911f6889a1SMatthew Dillon 39236b90789SKonstantin Belousov td->td_fpop = fp; 3931f6889a1SMatthew Dillon error = vm_mmap(&vms->vm_map, &addr, size, prot, maxprot, 39498df9218SJohn Baldwin flags, handle_type, handle, pos); 39536b90789SKonstantin Belousov td->td_fpop = NULL; 39649874f6eSJoseph Koshy #ifdef HWPMC_HOOKS 39749874f6eSJoseph Koshy /* inform hwpmc(4) if an executable is being mapped */ 39849874f6eSJoseph Koshy if (error == 0 && handle_type == OBJT_VNODE && 39949874f6eSJoseph Koshy (prot & PROT_EXEC)) { 40049874f6eSJoseph Koshy pkm.pm_file = handle; 40149874f6eSJoseph Koshy pkm.pm_address = (uintptr_t) addr; 40249874f6eSJoseph Koshy PMC_CALL_HOOK(td, PMC_FN_MMAP, (void *) &pkm); 40349874f6eSJoseph Koshy } 40449874f6eSJoseph Koshy #endif 405df8bae1dSRodney W. Grimes if (error == 0) 406b40ce416SJulian Elischer td->td_retval[0] = (register_t) (addr + pageoff); 407279d7226SMatthew Dillon done: 408279d7226SMatthew Dillon if (fp) 409b40ce416SJulian Elischer fdrop(fp, td); 410f6b5b182SJeff Roberson 411df8bae1dSRodney W. Grimes return (error); 412df8bae1dSRodney W. Grimes } 413df8bae1dSRodney W. Grimes 414c2815ad5SPeter Wemm int 415c2815ad5SPeter Wemm freebsd6_mmap(struct thread *td, struct freebsd6_mmap_args *uap) 416c2815ad5SPeter Wemm { 417c2815ad5SPeter Wemm struct mmap_args oargs; 418c2815ad5SPeter Wemm 419c2815ad5SPeter Wemm oargs.addr = uap->addr; 420c2815ad5SPeter Wemm oargs.len = uap->len; 421c2815ad5SPeter Wemm oargs.prot = uap->prot; 422c2815ad5SPeter Wemm oargs.flags = uap->flags; 423c2815ad5SPeter Wemm oargs.fd = uap->fd; 424c2815ad5SPeter Wemm oargs.pos = uap->pos; 425c2815ad5SPeter Wemm return (mmap(td, &oargs)); 426c2815ad5SPeter Wemm } 427c2815ad5SPeter Wemm 42805f0fdd2SPoul-Henning Kamp #ifdef COMPAT_43 429d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_ 43005f0fdd2SPoul-Henning Kamp struct ommap_args { 43105f0fdd2SPoul-Henning Kamp caddr_t addr; 43205f0fdd2SPoul-Henning Kamp int len; 43305f0fdd2SPoul-Henning Kamp int prot; 43405f0fdd2SPoul-Henning Kamp int flags; 43505f0fdd2SPoul-Henning Kamp int fd; 43605f0fdd2SPoul-Henning Kamp long pos; 43705f0fdd2SPoul-Henning Kamp }; 438d2d3e875SBruce Evans #endif 43905f0fdd2SPoul-Henning Kamp int 440b40ce416SJulian Elischer ommap(td, uap) 441b40ce416SJulian Elischer struct thread *td; 44254d92145SMatthew Dillon struct ommap_args *uap; 44305f0fdd2SPoul-Henning Kamp { 44405f0fdd2SPoul-Henning Kamp struct mmap_args nargs; 44505f0fdd2SPoul-Henning Kamp static const char cvtbsdprot[8] = { 44605f0fdd2SPoul-Henning Kamp 0, 44705f0fdd2SPoul-Henning Kamp PROT_EXEC, 44805f0fdd2SPoul-Henning Kamp PROT_WRITE, 44905f0fdd2SPoul-Henning Kamp PROT_EXEC | PROT_WRITE, 45005f0fdd2SPoul-Henning Kamp PROT_READ, 45105f0fdd2SPoul-Henning Kamp PROT_EXEC | PROT_READ, 45205f0fdd2SPoul-Henning Kamp PROT_WRITE | PROT_READ, 45305f0fdd2SPoul-Henning Kamp PROT_EXEC | PROT_WRITE | PROT_READ, 45405f0fdd2SPoul-Henning Kamp }; 4550d94caffSDavid Greenman 45605f0fdd2SPoul-Henning Kamp #define OMAP_ANON 0x0002 45705f0fdd2SPoul-Henning Kamp #define OMAP_COPY 0x0020 45805f0fdd2SPoul-Henning Kamp #define OMAP_SHARED 0x0010 45905f0fdd2SPoul-Henning Kamp #define OMAP_FIXED 0x0100 46005f0fdd2SPoul-Henning Kamp 46105f0fdd2SPoul-Henning Kamp nargs.addr = uap->addr; 46205f0fdd2SPoul-Henning Kamp nargs.len = uap->len; 46305f0fdd2SPoul-Henning Kamp nargs.prot = cvtbsdprot[uap->prot & 0x7]; 46405f0fdd2SPoul-Henning Kamp nargs.flags = 0; 46505f0fdd2SPoul-Henning Kamp if (uap->flags & OMAP_ANON) 46605f0fdd2SPoul-Henning Kamp nargs.flags |= MAP_ANON; 46705f0fdd2SPoul-Henning Kamp if (uap->flags & OMAP_COPY) 46805f0fdd2SPoul-Henning Kamp nargs.flags |= MAP_COPY; 46905f0fdd2SPoul-Henning Kamp if (uap->flags & OMAP_SHARED) 47005f0fdd2SPoul-Henning Kamp nargs.flags |= MAP_SHARED; 47105f0fdd2SPoul-Henning Kamp else 47205f0fdd2SPoul-Henning Kamp nargs.flags |= MAP_PRIVATE; 47305f0fdd2SPoul-Henning Kamp if (uap->flags & OMAP_FIXED) 47405f0fdd2SPoul-Henning Kamp nargs.flags |= MAP_FIXED; 47505f0fdd2SPoul-Henning Kamp nargs.fd = uap->fd; 47605f0fdd2SPoul-Henning Kamp nargs.pos = uap->pos; 477b40ce416SJulian Elischer return (mmap(td, &nargs)); 47805f0fdd2SPoul-Henning Kamp } 47905f0fdd2SPoul-Henning Kamp #endif /* COMPAT_43 */ 48005f0fdd2SPoul-Henning Kamp 48105f0fdd2SPoul-Henning Kamp 482d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_ 483df8bae1dSRodney W. Grimes struct msync_args { 484651bb817SAlexander Langer void *addr; 485c899450bSPeter Wemm size_t len; 486e6c6af11SDavid Greenman int flags; 487df8bae1dSRodney W. Grimes }; 488d2d3e875SBruce Evans #endif 489d2c60af8SMatthew Dillon /* 490d2c60af8SMatthew Dillon * MPSAFE 491d2c60af8SMatthew Dillon */ 492df8bae1dSRodney W. Grimes int 493b40ce416SJulian Elischer msync(td, uap) 494b40ce416SJulian Elischer struct thread *td; 495df8bae1dSRodney W. Grimes struct msync_args *uap; 496df8bae1dSRodney W. Grimes { 497df8bae1dSRodney W. Grimes vm_offset_t addr; 498dabee6feSPeter Wemm vm_size_t size, pageoff; 499e6c6af11SDavid Greenman int flags; 500df8bae1dSRodney W. Grimes vm_map_t map; 501df8bae1dSRodney W. Grimes int rv; 502df8bae1dSRodney W. Grimes 503df8bae1dSRodney W. Grimes addr = (vm_offset_t) uap->addr; 5049154ee6aSPeter Wemm size = uap->len; 505e6c6af11SDavid Greenman flags = uap->flags; 506e6c6af11SDavid Greenman 507dabee6feSPeter Wemm pageoff = (addr & PAGE_MASK); 508dabee6feSPeter Wemm addr -= pageoff; 509dabee6feSPeter Wemm size += pageoff; 510dabee6feSPeter Wemm size = (vm_size_t) round_page(size); 5119154ee6aSPeter Wemm if (addr + size < addr) 512dabee6feSPeter Wemm return (EINVAL); 513dabee6feSPeter Wemm 514dabee6feSPeter Wemm if ((flags & (MS_ASYNC|MS_INVALIDATE)) == (MS_ASYNC|MS_INVALIDATE)) 5151e62bc63SDavid Greenman return (EINVAL); 5161e62bc63SDavid Greenman 517b40ce416SJulian Elischer map = &td->td_proc->p_vmspace->vm_map; 5189154ee6aSPeter Wemm 519df8bae1dSRodney W. Grimes /* 520df8bae1dSRodney W. Grimes * Clean the pages and interpret the return value. 521df8bae1dSRodney W. Grimes */ 522950f8459SAlan Cox rv = vm_map_sync(map, addr, addr + size, (flags & MS_ASYNC) == 0, 523e6c6af11SDavid Greenman (flags & MS_INVALIDATE) != 0); 524df8bae1dSRodney W. Grimes switch (rv) { 525df8bae1dSRodney W. Grimes case KERN_SUCCESS: 526d2c60af8SMatthew Dillon return (0); 527df8bae1dSRodney W. Grimes case KERN_INVALID_ADDRESS: 528df8bae1dSRodney W. Grimes return (EINVAL); /* Sun returns ENOMEM? */ 529b7b7cd44SAlan Cox case KERN_INVALID_ARGUMENT: 530b7b7cd44SAlan Cox return (EBUSY); 531df8bae1dSRodney W. Grimes default: 532df8bae1dSRodney W. Grimes return (EINVAL); 533df8bae1dSRodney W. Grimes } 534df8bae1dSRodney W. Grimes } 535df8bae1dSRodney W. Grimes 536d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_ 537df8bae1dSRodney W. Grimes struct munmap_args { 538651bb817SAlexander Langer void *addr; 5399154ee6aSPeter Wemm size_t len; 540df8bae1dSRodney W. Grimes }; 541d2d3e875SBruce Evans #endif 542d2c60af8SMatthew Dillon /* 543d2c60af8SMatthew Dillon * MPSAFE 544d2c60af8SMatthew Dillon */ 545df8bae1dSRodney W. Grimes int 546b40ce416SJulian Elischer munmap(td, uap) 547b40ce416SJulian Elischer struct thread *td; 54854d92145SMatthew Dillon struct munmap_args *uap; 549df8bae1dSRodney W. Grimes { 55049874f6eSJoseph Koshy #ifdef HWPMC_HOOKS 55149874f6eSJoseph Koshy struct pmckern_map_out pkm; 55249874f6eSJoseph Koshy vm_map_entry_t entry; 55349874f6eSJoseph Koshy #endif 554df8bae1dSRodney W. Grimes vm_offset_t addr; 555dabee6feSPeter Wemm vm_size_t size, pageoff; 556df8bae1dSRodney W. Grimes vm_map_t map; 557df8bae1dSRodney W. Grimes 558df8bae1dSRodney W. Grimes addr = (vm_offset_t) uap->addr; 5599154ee6aSPeter Wemm size = uap->len; 560d8834602SAlan Cox if (size == 0) 561d8834602SAlan Cox return (EINVAL); 562dabee6feSPeter Wemm 563dabee6feSPeter Wemm pageoff = (addr & PAGE_MASK); 564dabee6feSPeter Wemm addr -= pageoff; 565dabee6feSPeter Wemm size += pageoff; 566dabee6feSPeter Wemm size = (vm_size_t) round_page(size); 5679154ee6aSPeter Wemm if (addr + size < addr) 568df8bae1dSRodney W. Grimes return (EINVAL); 5699154ee6aSPeter Wemm 570df8bae1dSRodney W. Grimes /* 57105ba50f5SJake Burkholder * Check for illegal addresses. Watch out for address wrap... 572df8bae1dSRodney W. Grimes */ 573b40ce416SJulian Elischer map = &td->td_proc->p_vmspace->vm_map; 57405ba50f5SJake Burkholder if (addr < vm_map_min(map) || addr + size > vm_map_max(map)) 57505ba50f5SJake Burkholder return (EINVAL); 576d8834602SAlan Cox vm_map_lock(map); 57749874f6eSJoseph Koshy #ifdef HWPMC_HOOKS 57849874f6eSJoseph Koshy /* 57949874f6eSJoseph Koshy * Inform hwpmc if the address range being unmapped contains 58049874f6eSJoseph Koshy * an executable region. 58149874f6eSJoseph Koshy */ 5820d419640SRyan Stone pkm.pm_address = (uintptr_t) NULL; 58349874f6eSJoseph Koshy if (vm_map_lookup_entry(map, addr, &entry)) { 58449874f6eSJoseph Koshy for (; 58549874f6eSJoseph Koshy entry != &map->header && entry->start < addr + size; 58649874f6eSJoseph Koshy entry = entry->next) { 58749874f6eSJoseph Koshy if (vm_map_check_protection(map, entry->start, 58849874f6eSJoseph Koshy entry->end, VM_PROT_EXECUTE) == TRUE) { 58949874f6eSJoseph Koshy pkm.pm_address = (uintptr_t) addr; 59049874f6eSJoseph Koshy pkm.pm_size = (size_t) size; 59149874f6eSJoseph Koshy break; 59249874f6eSJoseph Koshy } 59349874f6eSJoseph Koshy } 59449874f6eSJoseph Koshy } 59549874f6eSJoseph Koshy #endif 596655c3490SKonstantin Belousov vm_map_delete(map, addr, addr + size); 5970d419640SRyan Stone 5980d419640SRyan Stone #ifdef HWPMC_HOOKS 5990d419640SRyan Stone /* downgrade the lock to prevent a LOR with the pmc-sx lock */ 6000d419640SRyan Stone vm_map_lock_downgrade(map); 601d473d3a1SRyan Stone if (pkm.pm_address != (uintptr_t) NULL) 6020d419640SRyan Stone PMC_CALL_HOOK(td, PMC_FN_MUNMAP, (void *) &pkm); 6030d419640SRyan Stone vm_map_unlock_read(map); 6040d419640SRyan Stone #else 605d8834602SAlan Cox vm_map_unlock(map); 6060d419640SRyan Stone #endif 6070d419640SRyan Stone /* vm_map_delete returns nothing but KERN_SUCCESS anyway */ 608df8bae1dSRodney W. Grimes return (0); 609df8bae1dSRodney W. Grimes } 610df8bae1dSRodney W. Grimes 611d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_ 612df8bae1dSRodney W. Grimes struct mprotect_args { 613651bb817SAlexander Langer const void *addr; 6149154ee6aSPeter Wemm size_t len; 615df8bae1dSRodney W. Grimes int prot; 616df8bae1dSRodney W. Grimes }; 617d2d3e875SBruce Evans #endif 618d2c60af8SMatthew Dillon /* 619d2c60af8SMatthew Dillon * MPSAFE 620d2c60af8SMatthew Dillon */ 621df8bae1dSRodney W. Grimes int 622b40ce416SJulian Elischer mprotect(td, uap) 623b40ce416SJulian Elischer struct thread *td; 624df8bae1dSRodney W. Grimes struct mprotect_args *uap; 625df8bae1dSRodney W. Grimes { 626df8bae1dSRodney W. Grimes vm_offset_t addr; 627dabee6feSPeter Wemm vm_size_t size, pageoff; 62854d92145SMatthew Dillon vm_prot_t prot; 629df8bae1dSRodney W. Grimes 630df8bae1dSRodney W. Grimes addr = (vm_offset_t) uap->addr; 6319154ee6aSPeter Wemm size = uap->len; 632df8bae1dSRodney W. Grimes prot = uap->prot & VM_PROT_ALL; 633df8bae1dSRodney W. Grimes 634dabee6feSPeter Wemm pageoff = (addr & PAGE_MASK); 635dabee6feSPeter Wemm addr -= pageoff; 636dabee6feSPeter Wemm size += pageoff; 637dabee6feSPeter Wemm size = (vm_size_t) round_page(size); 6389154ee6aSPeter Wemm if (addr + size < addr) 639dabee6feSPeter Wemm return (EINVAL); 640dabee6feSPeter Wemm 64143285049SAlan Cox switch (vm_map_protect(&td->td_proc->p_vmspace->vm_map, addr, 64243285049SAlan Cox addr + size, prot, FALSE)) { 643df8bae1dSRodney W. Grimes case KERN_SUCCESS: 644df8bae1dSRodney W. Grimes return (0); 645df8bae1dSRodney W. Grimes case KERN_PROTECTION_FAILURE: 646df8bae1dSRodney W. Grimes return (EACCES); 6473364c323SKonstantin Belousov case KERN_RESOURCE_SHORTAGE: 6483364c323SKonstantin Belousov return (ENOMEM); 649df8bae1dSRodney W. Grimes } 650df8bae1dSRodney W. Grimes return (EINVAL); 651df8bae1dSRodney W. Grimes } 652df8bae1dSRodney W. Grimes 653d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_ 654dabee6feSPeter Wemm struct minherit_args { 655651bb817SAlexander Langer void *addr; 6569154ee6aSPeter Wemm size_t len; 657dabee6feSPeter Wemm int inherit; 658dabee6feSPeter Wemm }; 659dabee6feSPeter Wemm #endif 660d2c60af8SMatthew Dillon /* 661d2c60af8SMatthew Dillon * MPSAFE 662d2c60af8SMatthew Dillon */ 663dabee6feSPeter Wemm int 664b40ce416SJulian Elischer minherit(td, uap) 665b40ce416SJulian Elischer struct thread *td; 666dabee6feSPeter Wemm struct minherit_args *uap; 667dabee6feSPeter Wemm { 668dabee6feSPeter Wemm vm_offset_t addr; 669dabee6feSPeter Wemm vm_size_t size, pageoff; 67054d92145SMatthew Dillon vm_inherit_t inherit; 671dabee6feSPeter Wemm 672dabee6feSPeter Wemm addr = (vm_offset_t)uap->addr; 6739154ee6aSPeter Wemm size = uap->len; 674dabee6feSPeter Wemm inherit = uap->inherit; 675dabee6feSPeter Wemm 676dabee6feSPeter Wemm pageoff = (addr & PAGE_MASK); 677dabee6feSPeter Wemm addr -= pageoff; 678dabee6feSPeter Wemm size += pageoff; 679dabee6feSPeter Wemm size = (vm_size_t) round_page(size); 6809154ee6aSPeter Wemm if (addr + size < addr) 681dabee6feSPeter Wemm return (EINVAL); 682dabee6feSPeter Wemm 683e0be79afSAlan Cox switch (vm_map_inherit(&td->td_proc->p_vmspace->vm_map, addr, 684e0be79afSAlan Cox addr + size, inherit)) { 685dabee6feSPeter Wemm case KERN_SUCCESS: 686dabee6feSPeter Wemm return (0); 687dabee6feSPeter Wemm case KERN_PROTECTION_FAILURE: 688dabee6feSPeter Wemm return (EACCES); 689dabee6feSPeter Wemm } 690dabee6feSPeter Wemm return (EINVAL); 691dabee6feSPeter Wemm } 692dabee6feSPeter Wemm 693dabee6feSPeter Wemm #ifndef _SYS_SYSPROTO_H_ 694df8bae1dSRodney W. Grimes struct madvise_args { 695651bb817SAlexander Langer void *addr; 6969154ee6aSPeter Wemm size_t len; 697df8bae1dSRodney W. Grimes int behav; 698df8bae1dSRodney W. Grimes }; 699d2d3e875SBruce Evans #endif 7000d94caffSDavid Greenman 701d2c60af8SMatthew Dillon /* 702d2c60af8SMatthew Dillon * MPSAFE 703d2c60af8SMatthew Dillon */ 704df8bae1dSRodney W. Grimes /* ARGSUSED */ 705df8bae1dSRodney W. Grimes int 706b40ce416SJulian Elischer madvise(td, uap) 707b40ce416SJulian Elischer struct thread *td; 708df8bae1dSRodney W. Grimes struct madvise_args *uap; 709df8bae1dSRodney W. Grimes { 710f35329acSJohn Dyson vm_offset_t start, end; 71105ba50f5SJake Burkholder vm_map_t map; 712f4cf2141SWes Peters struct proc *p; 713f4cf2141SWes Peters int error; 714b4309055SMatthew Dillon 715b4309055SMatthew Dillon /* 716f4cf2141SWes Peters * Check for our special case, advising the swap pager we are 717f4cf2141SWes Peters * "immortal." 718f4cf2141SWes Peters */ 719f4cf2141SWes Peters if (uap->behav == MADV_PROTECT) { 720acd3428bSRobert Watson error = priv_check(td, PRIV_VM_MADV_PROTECT); 72169297bf8SJohn Baldwin if (error == 0) { 722f4cf2141SWes Peters p = td->td_proc; 723f4cf2141SWes Peters PROC_LOCK(p); 724f4cf2141SWes Peters p->p_flag |= P_PROTECTED; 725f4cf2141SWes Peters PROC_UNLOCK(p); 72669297bf8SJohn Baldwin } 727f4cf2141SWes Peters return (error); 728f4cf2141SWes Peters } 729f4cf2141SWes Peters /* 730b4309055SMatthew Dillon * Check for illegal behavior 731b4309055SMatthew Dillon */ 7329730a5daSPaul Saab if (uap->behav < 0 || uap->behav > MADV_CORE) 733b4309055SMatthew Dillon return (EINVAL); 734867a482dSJohn Dyson /* 735867a482dSJohn Dyson * Check for illegal addresses. Watch out for address wrap... Note 736867a482dSJohn Dyson * that VM_*_ADDRESS are not constants due to casts (argh). 737867a482dSJohn Dyson */ 73805ba50f5SJake Burkholder map = &td->td_proc->p_vmspace->vm_map; 73905ba50f5SJake Burkholder if ((vm_offset_t)uap->addr < vm_map_min(map) || 74005ba50f5SJake Burkholder (vm_offset_t)uap->addr + uap->len > vm_map_max(map)) 741867a482dSJohn Dyson return (EINVAL); 742867a482dSJohn Dyson if (((vm_offset_t) uap->addr + uap->len) < (vm_offset_t) uap->addr) 743867a482dSJohn Dyson return (EINVAL); 744867a482dSJohn Dyson 745867a482dSJohn Dyson /* 746867a482dSJohn Dyson * Since this routine is only advisory, we default to conservative 747867a482dSJohn Dyson * behavior. 748867a482dSJohn Dyson */ 749cd6eea25SDavid Greenman start = trunc_page((vm_offset_t) uap->addr); 750cd6eea25SDavid Greenman end = round_page((vm_offset_t) uap->addr + uap->len); 751867a482dSJohn Dyson 75205ba50f5SJake Burkholder if (vm_map_madvise(map, start, end, uap->behav)) 753094f6d26SAlan Cox return (EINVAL); 754094f6d26SAlan Cox return (0); 755df8bae1dSRodney W. Grimes } 756df8bae1dSRodney W. Grimes 757d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_ 758df8bae1dSRodney W. Grimes struct mincore_args { 759651bb817SAlexander Langer const void *addr; 7609154ee6aSPeter Wemm size_t len; 761df8bae1dSRodney W. Grimes char *vec; 762df8bae1dSRodney W. Grimes }; 763d2d3e875SBruce Evans #endif 7640d94caffSDavid Greenman 765d2c60af8SMatthew Dillon /* 766d2c60af8SMatthew Dillon * MPSAFE 767d2c60af8SMatthew Dillon */ 768df8bae1dSRodney W. Grimes /* ARGSUSED */ 769df8bae1dSRodney W. Grimes int 770b40ce416SJulian Elischer mincore(td, uap) 771b40ce416SJulian Elischer struct thread *td; 772df8bae1dSRodney W. Grimes struct mincore_args *uap; 773df8bae1dSRodney W. Grimes { 774867a482dSJohn Dyson vm_offset_t addr, first_addr; 775867a482dSJohn Dyson vm_offset_t end, cend; 776867a482dSJohn Dyson pmap_t pmap; 777867a482dSJohn Dyson vm_map_t map; 77802c04a2fSJohn Dyson char *vec; 779d2c60af8SMatthew Dillon int error = 0; 780867a482dSJohn Dyson int vecindex, lastvecindex; 78154d92145SMatthew Dillon vm_map_entry_t current; 782867a482dSJohn Dyson vm_map_entry_t entry; 783567e51e1SAlan Cox vm_object_t object; 784567e51e1SAlan Cox vm_paddr_t locked_pa; 785567e51e1SAlan Cox vm_page_t m; 786567e51e1SAlan Cox vm_pindex_t pindex; 787867a482dSJohn Dyson int mincoreinfo; 788dd2622a8SAlan Cox unsigned int timestamp; 789567e51e1SAlan Cox boolean_t locked; 790df8bae1dSRodney W. Grimes 791867a482dSJohn Dyson /* 792867a482dSJohn Dyson * Make sure that the addresses presented are valid for user 793867a482dSJohn Dyson * mode. 794867a482dSJohn Dyson */ 795867a482dSJohn Dyson first_addr = addr = trunc_page((vm_offset_t) uap->addr); 7969154ee6aSPeter Wemm end = addr + (vm_size_t)round_page(uap->len); 79705ba50f5SJake Burkholder map = &td->td_proc->p_vmspace->vm_map; 79805ba50f5SJake Burkholder if (end > vm_map_max(map) || end < addr) 799455dd7d4SKonstantin Belousov return (ENOMEM); 80002c04a2fSJohn Dyson 801867a482dSJohn Dyson /* 802867a482dSJohn Dyson * Address of byte vector 803867a482dSJohn Dyson */ 80402c04a2fSJohn Dyson vec = uap->vec; 805867a482dSJohn Dyson 806b40ce416SJulian Elischer pmap = vmspace_pmap(td->td_proc->p_vmspace); 807867a482dSJohn Dyson 808eff50fcdSAlan Cox vm_map_lock_read(map); 809dd2622a8SAlan Cox RestartScan: 810dd2622a8SAlan Cox timestamp = map->timestamp; 811867a482dSJohn Dyson 812455dd7d4SKonstantin Belousov if (!vm_map_lookup_entry(map, addr, &entry)) { 813455dd7d4SKonstantin Belousov vm_map_unlock_read(map); 814455dd7d4SKonstantin Belousov return (ENOMEM); 815455dd7d4SKonstantin Belousov } 816867a482dSJohn Dyson 817867a482dSJohn Dyson /* 818867a482dSJohn Dyson * Do this on a map entry basis so that if the pages are not 819867a482dSJohn Dyson * in the current processes address space, we can easily look 820867a482dSJohn Dyson * up the pages elsewhere. 821867a482dSJohn Dyson */ 822867a482dSJohn Dyson lastvecindex = -1; 823867a482dSJohn Dyson for (current = entry; 824867a482dSJohn Dyson (current != &map->header) && (current->start < end); 825867a482dSJohn Dyson current = current->next) { 826867a482dSJohn Dyson 827867a482dSJohn Dyson /* 828455dd7d4SKonstantin Belousov * check for contiguity 829455dd7d4SKonstantin Belousov */ 830455dd7d4SKonstantin Belousov if (current->end < end && 831455dd7d4SKonstantin Belousov (entry->next == &map->header || 832455dd7d4SKonstantin Belousov current->next->start > current->end)) { 833455dd7d4SKonstantin Belousov vm_map_unlock_read(map); 834455dd7d4SKonstantin Belousov return (ENOMEM); 835455dd7d4SKonstantin Belousov } 836455dd7d4SKonstantin Belousov 837455dd7d4SKonstantin Belousov /* 838867a482dSJohn Dyson * ignore submaps (for now) or null objects 839867a482dSJohn Dyson */ 8409fdfe602SMatthew Dillon if ((current->eflags & MAP_ENTRY_IS_SUB_MAP) || 841867a482dSJohn Dyson current->object.vm_object == NULL) 842867a482dSJohn Dyson continue; 843867a482dSJohn Dyson 844867a482dSJohn Dyson /* 845867a482dSJohn Dyson * limit this scan to the current map entry and the 846867a482dSJohn Dyson * limits for the mincore call 847867a482dSJohn Dyson */ 848867a482dSJohn Dyson if (addr < current->start) 849867a482dSJohn Dyson addr = current->start; 850867a482dSJohn Dyson cend = current->end; 851867a482dSJohn Dyson if (cend > end) 852867a482dSJohn Dyson cend = end; 853867a482dSJohn Dyson 854867a482dSJohn Dyson /* 855867a482dSJohn Dyson * scan this entry one page at a time 856867a482dSJohn Dyson */ 857867a482dSJohn Dyson while (addr < cend) { 858867a482dSJohn Dyson /* 859867a482dSJohn Dyson * Check pmap first, it is likely faster, also 860867a482dSJohn Dyson * it can provide info as to whether we are the 861867a482dSJohn Dyson * one referencing or modifying the page. 862867a482dSJohn Dyson */ 863567e51e1SAlan Cox object = NULL; 864567e51e1SAlan Cox locked_pa = 0; 865567e51e1SAlan Cox retry: 866567e51e1SAlan Cox m = NULL; 867567e51e1SAlan Cox mincoreinfo = pmap_mincore(pmap, addr, &locked_pa); 868567e51e1SAlan Cox if (locked_pa != 0) { 869867a482dSJohn Dyson /* 870567e51e1SAlan Cox * The page is mapped by this process but not 871567e51e1SAlan Cox * both accessed and modified. It is also 872567e51e1SAlan Cox * managed. Acquire the object lock so that 873567e51e1SAlan Cox * other mappings might be examined. 874867a482dSJohn Dyson */ 875567e51e1SAlan Cox m = PHYS_TO_VM_PAGE(locked_pa); 876567e51e1SAlan Cox if (m->object != object) { 877567e51e1SAlan Cox if (object != NULL) 878567e51e1SAlan Cox VM_OBJECT_UNLOCK(object); 879567e51e1SAlan Cox object = m->object; 880567e51e1SAlan Cox locked = VM_OBJECT_TRYLOCK(object); 881567e51e1SAlan Cox vm_page_unlock(m); 882567e51e1SAlan Cox if (!locked) { 883567e51e1SAlan Cox VM_OBJECT_LOCK(object); 8842965a453SKip Macy vm_page_lock(m); 885567e51e1SAlan Cox goto retry; 886567e51e1SAlan Cox } 887567e51e1SAlan Cox } else 888567e51e1SAlan Cox vm_page_unlock(m); 889567e51e1SAlan Cox KASSERT(m->valid == VM_PAGE_BITS_ALL, 890567e51e1SAlan Cox ("mincore: page %p is mapped but invalid", 891567e51e1SAlan Cox m)); 892567e51e1SAlan Cox } else if (mincoreinfo == 0) { 893567e51e1SAlan Cox /* 894567e51e1SAlan Cox * The page is not mapped by this process. If 895567e51e1SAlan Cox * the object implements managed pages, then 896567e51e1SAlan Cox * determine if the page is resident so that 897567e51e1SAlan Cox * the mappings might be examined. 898567e51e1SAlan Cox */ 899567e51e1SAlan Cox if (current->object.vm_object != object) { 900567e51e1SAlan Cox if (object != NULL) 901567e51e1SAlan Cox VM_OBJECT_UNLOCK(object); 902567e51e1SAlan Cox object = current->object.vm_object; 903567e51e1SAlan Cox VM_OBJECT_LOCK(object); 904567e51e1SAlan Cox } 905567e51e1SAlan Cox if (object->type == OBJT_DEFAULT || 906567e51e1SAlan Cox object->type == OBJT_SWAP || 907567e51e1SAlan Cox object->type == OBJT_VNODE) { 908567e51e1SAlan Cox pindex = OFF_TO_IDX(current->offset + 909567e51e1SAlan Cox (addr - current->start)); 910567e51e1SAlan Cox m = vm_page_lookup(object, pindex); 911567e51e1SAlan Cox if (m != NULL && m->valid == 0) 912567e51e1SAlan Cox m = NULL; 913567e51e1SAlan Cox if (m != NULL) 914567e51e1SAlan Cox mincoreinfo = MINCORE_INCORE; 915567e51e1SAlan Cox } 916567e51e1SAlan Cox } 917567e51e1SAlan Cox if (m != NULL) { 918567e51e1SAlan Cox /* Examine other mappings to the page. */ 919567e51e1SAlan Cox if (m->dirty == 0 && pmap_is_modified(m)) 920567e51e1SAlan Cox vm_page_dirty(m); 921567e51e1SAlan Cox if (m->dirty != 0) 922867a482dSJohn Dyson mincoreinfo |= MINCORE_MODIFIED_OTHER; 923c46b90e9SAlan Cox /* 924c46b90e9SAlan Cox * The first test for PG_REFERENCED is an 925c46b90e9SAlan Cox * optimization. The second test is 926c46b90e9SAlan Cox * required because a concurrent pmap 927c46b90e9SAlan Cox * operation could clear the last reference 928c46b90e9SAlan Cox * and set PG_REFERENCED before the call to 929c46b90e9SAlan Cox * pmap_is_referenced(). 930c46b90e9SAlan Cox */ 931567e51e1SAlan Cox if ((m->flags & PG_REFERENCED) != 0 || 932c46b90e9SAlan Cox pmap_is_referenced(m) || 933c46b90e9SAlan Cox (m->flags & PG_REFERENCED) != 0) 934867a482dSJohn Dyson mincoreinfo |= MINCORE_REFERENCED_OTHER; 9359b5a5d81SJohn Dyson } 936567e51e1SAlan Cox if (object != NULL) 937567e51e1SAlan Cox VM_OBJECT_UNLOCK(object); 938867a482dSJohn Dyson 939867a482dSJohn Dyson /* 940dd2622a8SAlan Cox * subyte may page fault. In case it needs to modify 941dd2622a8SAlan Cox * the map, we release the lock. 942dd2622a8SAlan Cox */ 943dd2622a8SAlan Cox vm_map_unlock_read(map); 944dd2622a8SAlan Cox 945dd2622a8SAlan Cox /* 946867a482dSJohn Dyson * calculate index into user supplied byte vector 947867a482dSJohn Dyson */ 948867a482dSJohn Dyson vecindex = OFF_TO_IDX(addr - first_addr); 949867a482dSJohn Dyson 950867a482dSJohn Dyson /* 951867a482dSJohn Dyson * If we have skipped map entries, we need to make sure that 952867a482dSJohn Dyson * the byte vector is zeroed for those skipped entries. 953867a482dSJohn Dyson */ 954867a482dSJohn Dyson while ((lastvecindex + 1) < vecindex) { 955867a482dSJohn Dyson error = subyte(vec + lastvecindex, 0); 956867a482dSJohn Dyson if (error) { 957d2c60af8SMatthew Dillon error = EFAULT; 958d2c60af8SMatthew Dillon goto done2; 959867a482dSJohn Dyson } 960867a482dSJohn Dyson ++lastvecindex; 961867a482dSJohn Dyson } 962867a482dSJohn Dyson 963867a482dSJohn Dyson /* 964867a482dSJohn Dyson * Pass the page information to the user 965867a482dSJohn Dyson */ 966867a482dSJohn Dyson error = subyte(vec + vecindex, mincoreinfo); 967867a482dSJohn Dyson if (error) { 968d2c60af8SMatthew Dillon error = EFAULT; 969d2c60af8SMatthew Dillon goto done2; 970867a482dSJohn Dyson } 971dd2622a8SAlan Cox 972dd2622a8SAlan Cox /* 973dd2622a8SAlan Cox * If the map has changed, due to the subyte, the previous 974dd2622a8SAlan Cox * output may be invalid. 975dd2622a8SAlan Cox */ 976dd2622a8SAlan Cox vm_map_lock_read(map); 977dd2622a8SAlan Cox if (timestamp != map->timestamp) 978dd2622a8SAlan Cox goto RestartScan; 979dd2622a8SAlan Cox 980867a482dSJohn Dyson lastvecindex = vecindex; 98102c04a2fSJohn Dyson addr += PAGE_SIZE; 98202c04a2fSJohn Dyson } 983867a482dSJohn Dyson } 984867a482dSJohn Dyson 985867a482dSJohn Dyson /* 986dd2622a8SAlan Cox * subyte may page fault. In case it needs to modify 987dd2622a8SAlan Cox * the map, we release the lock. 988dd2622a8SAlan Cox */ 989dd2622a8SAlan Cox vm_map_unlock_read(map); 990dd2622a8SAlan Cox 991dd2622a8SAlan Cox /* 992867a482dSJohn Dyson * Zero the last entries in the byte vector. 993867a482dSJohn Dyson */ 994867a482dSJohn Dyson vecindex = OFF_TO_IDX(end - first_addr); 995867a482dSJohn Dyson while ((lastvecindex + 1) < vecindex) { 996867a482dSJohn Dyson error = subyte(vec + lastvecindex, 0); 997867a482dSJohn Dyson if (error) { 998d2c60af8SMatthew Dillon error = EFAULT; 999d2c60af8SMatthew Dillon goto done2; 1000867a482dSJohn Dyson } 1001867a482dSJohn Dyson ++lastvecindex; 1002867a482dSJohn Dyson } 1003867a482dSJohn Dyson 1004dd2622a8SAlan Cox /* 1005dd2622a8SAlan Cox * If the map has changed, due to the subyte, the previous 1006dd2622a8SAlan Cox * output may be invalid. 1007dd2622a8SAlan Cox */ 1008dd2622a8SAlan Cox vm_map_lock_read(map); 1009dd2622a8SAlan Cox if (timestamp != map->timestamp) 1010dd2622a8SAlan Cox goto RestartScan; 1011eff50fcdSAlan Cox vm_map_unlock_read(map); 1012d2c60af8SMatthew Dillon done2: 1013d2c60af8SMatthew Dillon return (error); 1014df8bae1dSRodney W. Grimes } 1015df8bae1dSRodney W. Grimes 1016d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_ 1017df8bae1dSRodney W. Grimes struct mlock_args { 1018651bb817SAlexander Langer const void *addr; 1019df8bae1dSRodney W. Grimes size_t len; 1020df8bae1dSRodney W. Grimes }; 1021d2d3e875SBruce Evans #endif 1022d2c60af8SMatthew Dillon /* 1023d2c60af8SMatthew Dillon * MPSAFE 1024d2c60af8SMatthew Dillon */ 1025df8bae1dSRodney W. Grimes int 1026b40ce416SJulian Elischer mlock(td, uap) 1027b40ce416SJulian Elischer struct thread *td; 1028df8bae1dSRodney W. Grimes struct mlock_args *uap; 1029df8bae1dSRodney W. Grimes { 1030f0ea4612SDon Lewis struct proc *proc; 1031bb734798SDon Lewis vm_offset_t addr, end, last, start; 1032bb734798SDon Lewis vm_size_t npages, size; 1033bb734798SDon Lewis int error; 1034df8bae1dSRodney W. Grimes 1035acd3428bSRobert Watson error = priv_check(td, PRIV_VM_MLOCK); 103647934cefSDon Lewis if (error) 103747934cefSDon Lewis return (error); 103816929939SDon Lewis addr = (vm_offset_t)uap->addr; 103916929939SDon Lewis size = uap->len; 1040bb734798SDon Lewis last = addr + size; 104116929939SDon Lewis start = trunc_page(addr); 1042bb734798SDon Lewis end = round_page(last); 1043bb734798SDon Lewis if (last < addr || end < addr) 1044df8bae1dSRodney W. Grimes return (EINVAL); 104516929939SDon Lewis npages = atop(end - start); 104616929939SDon Lewis if (npages > vm_page_max_wired) 104716929939SDon Lewis return (ENOMEM); 1048f0ea4612SDon Lewis proc = td->td_proc; 104947934cefSDon Lewis PROC_LOCK(proc); 1050bb734798SDon Lewis if (ptoa(npages + 1051bb734798SDon Lewis pmap_wired_count(vm_map_pmap(&proc->p_vmspace->vm_map))) > 1052bb734798SDon Lewis lim_cur(proc, RLIMIT_MEMLOCK)) { 105347934cefSDon Lewis PROC_UNLOCK(proc); 10544a40e3d4SJohn Dyson return (ENOMEM); 105591d5354aSJohn Baldwin } 105647934cefSDon Lewis PROC_UNLOCK(proc); 10572feb50bfSAttilio Rao if (npages + cnt.v_wire_count > vm_page_max_wired) 105816929939SDon Lewis return (EAGAIN); 105916929939SDon Lewis error = vm_map_wire(&proc->p_vmspace->vm_map, start, end, 106016929939SDon Lewis VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES); 1061df8bae1dSRodney W. Grimes return (error == KERN_SUCCESS ? 0 : ENOMEM); 1062df8bae1dSRodney W. Grimes } 1063df8bae1dSRodney W. Grimes 1064d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_ 10654a40e3d4SJohn Dyson struct mlockall_args { 10664a40e3d4SJohn Dyson int how; 10674a40e3d4SJohn Dyson }; 10684a40e3d4SJohn Dyson #endif 10694a40e3d4SJohn Dyson 1070d2c60af8SMatthew Dillon /* 1071d2c60af8SMatthew Dillon * MPSAFE 1072d2c60af8SMatthew Dillon */ 10734a40e3d4SJohn Dyson int 1074b40ce416SJulian Elischer mlockall(td, uap) 1075b40ce416SJulian Elischer struct thread *td; 10764a40e3d4SJohn Dyson struct mlockall_args *uap; 10774a40e3d4SJohn Dyson { 1078abd498aaSBruce M Simpson vm_map_t map; 1079abd498aaSBruce M Simpson int error; 1080abd498aaSBruce M Simpson 1081abd498aaSBruce M Simpson map = &td->td_proc->p_vmspace->vm_map; 1082abd498aaSBruce M Simpson error = 0; 1083abd498aaSBruce M Simpson 1084abd498aaSBruce M Simpson if ((uap->how == 0) || ((uap->how & ~(MCL_CURRENT|MCL_FUTURE)) != 0)) 1085abd498aaSBruce M Simpson return (EINVAL); 1086abd498aaSBruce M Simpson 108711f7ddc5SBruce M Simpson #if 0 1088abd498aaSBruce M Simpson /* 1089abd498aaSBruce M Simpson * If wiring all pages in the process would cause it to exceed 1090abd498aaSBruce M Simpson * a hard resource limit, return ENOMEM. 1091abd498aaSBruce M Simpson */ 109291d5354aSJohn Baldwin PROC_LOCK(td->td_proc); 1093fd6f4ffbSEdward Tomasz Napierala if (map->size > lim_cur(td->td_proc, RLIMIT_MEMLOCK)) { 109491d5354aSJohn Baldwin PROC_UNLOCK(td->td_proc); 1095abd498aaSBruce M Simpson return (ENOMEM); 109691d5354aSJohn Baldwin } 109791d5354aSJohn Baldwin PROC_UNLOCK(td->td_proc); 1098abd498aaSBruce M Simpson #else 1099acd3428bSRobert Watson error = priv_check(td, PRIV_VM_MLOCK); 1100abd498aaSBruce M Simpson if (error) 1101abd498aaSBruce M Simpson return (error); 1102abd498aaSBruce M Simpson #endif 1103abd498aaSBruce M Simpson 1104abd498aaSBruce M Simpson if (uap->how & MCL_FUTURE) { 1105abd498aaSBruce M Simpson vm_map_lock(map); 1106abd498aaSBruce M Simpson vm_map_modflags(map, MAP_WIREFUTURE, 0); 1107abd498aaSBruce M Simpson vm_map_unlock(map); 1108abd498aaSBruce M Simpson error = 0; 1109abd498aaSBruce M Simpson } 1110abd498aaSBruce M Simpson 1111abd498aaSBruce M Simpson if (uap->how & MCL_CURRENT) { 1112abd498aaSBruce M Simpson /* 1113abd498aaSBruce M Simpson * P1003.1-2001 mandates that all currently mapped pages 1114abd498aaSBruce M Simpson * will be memory resident and locked (wired) upon return 1115abd498aaSBruce M Simpson * from mlockall(). vm_map_wire() will wire pages, by 1116abd498aaSBruce M Simpson * calling vm_fault_wire() for each page in the region. 1117abd498aaSBruce M Simpson */ 1118abd498aaSBruce M Simpson error = vm_map_wire(map, vm_map_min(map), vm_map_max(map), 1119abd498aaSBruce M Simpson VM_MAP_WIRE_USER|VM_MAP_WIRE_HOLESOK); 1120abd498aaSBruce M Simpson error = (error == KERN_SUCCESS ? 0 : EAGAIN); 1121abd498aaSBruce M Simpson } 1122abd498aaSBruce M Simpson 1123abd498aaSBruce M Simpson return (error); 11244a40e3d4SJohn Dyson } 11254a40e3d4SJohn Dyson 11264a40e3d4SJohn Dyson #ifndef _SYS_SYSPROTO_H_ 1127fa721254SAlfred Perlstein struct munlockall_args { 1128abd498aaSBruce M Simpson register_t dummy; 11294a40e3d4SJohn Dyson }; 11304a40e3d4SJohn Dyson #endif 11314a40e3d4SJohn Dyson 1132d2c60af8SMatthew Dillon /* 1133d2c60af8SMatthew Dillon * MPSAFE 1134d2c60af8SMatthew Dillon */ 11354a40e3d4SJohn Dyson int 1136b40ce416SJulian Elischer munlockall(td, uap) 1137b40ce416SJulian Elischer struct thread *td; 11384a40e3d4SJohn Dyson struct munlockall_args *uap; 11394a40e3d4SJohn Dyson { 1140abd498aaSBruce M Simpson vm_map_t map; 1141abd498aaSBruce M Simpson int error; 1142abd498aaSBruce M Simpson 1143abd498aaSBruce M Simpson map = &td->td_proc->p_vmspace->vm_map; 1144acd3428bSRobert Watson error = priv_check(td, PRIV_VM_MUNLOCK); 1145abd498aaSBruce M Simpson if (error) 1146abd498aaSBruce M Simpson return (error); 1147abd498aaSBruce M Simpson 1148abd498aaSBruce M Simpson /* Clear the MAP_WIREFUTURE flag from this vm_map. */ 1149abd498aaSBruce M Simpson vm_map_lock(map); 1150abd498aaSBruce M Simpson vm_map_modflags(map, 0, MAP_WIREFUTURE); 1151abd498aaSBruce M Simpson vm_map_unlock(map); 1152abd498aaSBruce M Simpson 1153abd498aaSBruce M Simpson /* Forcibly unwire all pages. */ 1154abd498aaSBruce M Simpson error = vm_map_unwire(map, vm_map_min(map), vm_map_max(map), 1155abd498aaSBruce M Simpson VM_MAP_WIRE_USER|VM_MAP_WIRE_HOLESOK); 1156abd498aaSBruce M Simpson 1157abd498aaSBruce M Simpson return (error); 11584a40e3d4SJohn Dyson } 11594a40e3d4SJohn Dyson 11604a40e3d4SJohn Dyson #ifndef _SYS_SYSPROTO_H_ 1161df8bae1dSRodney W. Grimes struct munlock_args { 1162651bb817SAlexander Langer const void *addr; 1163df8bae1dSRodney W. Grimes size_t len; 1164df8bae1dSRodney W. Grimes }; 1165d2d3e875SBruce Evans #endif 1166d2c60af8SMatthew Dillon /* 1167d2c60af8SMatthew Dillon * MPSAFE 1168d2c60af8SMatthew Dillon */ 1169df8bae1dSRodney W. Grimes int 1170b40ce416SJulian Elischer munlock(td, uap) 1171b40ce416SJulian Elischer struct thread *td; 1172df8bae1dSRodney W. Grimes struct munlock_args *uap; 1173df8bae1dSRodney W. Grimes { 1174bb734798SDon Lewis vm_offset_t addr, end, last, start; 117516929939SDon Lewis vm_size_t size; 1176df8bae1dSRodney W. Grimes int error; 1177df8bae1dSRodney W. Grimes 1178acd3428bSRobert Watson error = priv_check(td, PRIV_VM_MUNLOCK); 117947934cefSDon Lewis if (error) 118047934cefSDon Lewis return (error); 118116929939SDon Lewis addr = (vm_offset_t)uap->addr; 118216929939SDon Lewis size = uap->len; 1183bb734798SDon Lewis last = addr + size; 118416929939SDon Lewis start = trunc_page(addr); 1185bb734798SDon Lewis end = round_page(last); 1186bb734798SDon Lewis if (last < addr || end < addr) 1187df8bae1dSRodney W. Grimes return (EINVAL); 118816929939SDon Lewis error = vm_map_unwire(&td->td_proc->p_vmspace->vm_map, start, end, 118916929939SDon Lewis VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES); 1190df8bae1dSRodney W. Grimes return (error == KERN_SUCCESS ? 0 : ENOMEM); 1191df8bae1dSRodney W. Grimes } 1192df8bae1dSRodney W. Grimes 1193df8bae1dSRodney W. Grimes /* 1194c8daea13SAlexander Kabaev * vm_mmap_vnode() 1195c8daea13SAlexander Kabaev * 1196c8daea13SAlexander Kabaev * MPSAFE 1197c8daea13SAlexander Kabaev * 1198c8daea13SAlexander Kabaev * Helper function for vm_mmap. Perform sanity check specific for mmap 1199c8daea13SAlexander Kabaev * operations on vnodes. 1200c8daea13SAlexander Kabaev */ 1201c8daea13SAlexander Kabaev int 1202c8daea13SAlexander Kabaev vm_mmap_vnode(struct thread *td, vm_size_t objsize, 1203c8daea13SAlexander Kabaev vm_prot_t prot, vm_prot_t *maxprotp, int *flagsp, 120464345f0bSJohn Baldwin struct vnode *vp, vm_ooffset_t *foffp, vm_object_t *objp) 1205c8daea13SAlexander Kabaev { 1206c8daea13SAlexander Kabaev struct vattr va; 1207c8daea13SAlexander Kabaev vm_object_t obj; 120864345f0bSJohn Baldwin vm_offset_t foff; 1209ae51ff11SJeff Roberson struct mount *mp; 12100359a12eSAttilio Rao struct ucred *cred; 121164345f0bSJohn Baldwin int error, flags; 1212ae51ff11SJeff Roberson int vfslocked; 1213c8daea13SAlexander Kabaev 1214ae51ff11SJeff Roberson mp = vp->v_mount; 12150359a12eSAttilio Rao cred = td->td_ucred; 1216ae51ff11SJeff Roberson vfslocked = VFS_LOCK_GIANT(mp); 1217fa3de770SJohn Baldwin if ((error = vget(vp, LK_SHARED, td)) != 0) { 1218ae51ff11SJeff Roberson VFS_UNLOCK_GIANT(vfslocked); 1219c8daea13SAlexander Kabaev return (error); 1220c8daea13SAlexander Kabaev } 122164345f0bSJohn Baldwin foff = *foffp; 1222c8daea13SAlexander Kabaev flags = *flagsp; 12238516dd18SPoul-Henning Kamp obj = vp->v_object; 1224c8daea13SAlexander Kabaev if (vp->v_type == VREG) { 1225c8daea13SAlexander Kabaev /* 1226c8daea13SAlexander Kabaev * Get the proper underlying object 1227c8daea13SAlexander Kabaev */ 12288516dd18SPoul-Henning Kamp if (obj == NULL) { 1229c8daea13SAlexander Kabaev error = EINVAL; 1230c8daea13SAlexander Kabaev goto done; 1231c8daea13SAlexander Kabaev } 1232c8daea13SAlexander Kabaev if (obj->handle != vp) { 1233c8daea13SAlexander Kabaev vput(vp); 1234c8daea13SAlexander Kabaev vp = (struct vnode*)obj->handle; 1235fa3de770SJohn Baldwin vget(vp, LK_SHARED, td); 1236c8daea13SAlexander Kabaev } 1237c8daea13SAlexander Kabaev } else if (vp->v_type == VCHR) { 123864345f0bSJohn Baldwin error = vm_mmap_cdev(td, objsize, prot, maxprotp, flagsp, 123964345f0bSJohn Baldwin vp->v_rdev, foffp, objp); 124064345f0bSJohn Baldwin if (error == 0) 124164345f0bSJohn Baldwin goto mark_atime; 124291a35e78SKonstantin Belousov goto done; 1243c8daea13SAlexander Kabaev } else { 1244c8daea13SAlexander Kabaev error = EINVAL; 1245c8daea13SAlexander Kabaev goto done; 1246c8daea13SAlexander Kabaev } 12470359a12eSAttilio Rao if ((error = VOP_GETATTR(vp, &va, cred))) 1248c8daea13SAlexander Kabaev goto done; 1249c92163dcSChristian S.J. Peron #ifdef MAC 12500359a12eSAttilio Rao error = mac_vnode_check_mmap(cred, vp, prot, flags); 1251c92163dcSChristian S.J. Peron if (error != 0) 1252c92163dcSChristian S.J. Peron goto done; 1253c92163dcSChristian S.J. Peron #endif 1254c8daea13SAlexander Kabaev if ((flags & MAP_SHARED) != 0) { 1255c8daea13SAlexander Kabaev if ((va.va_flags & (SF_SNAPSHOT|IMMUTABLE|APPEND)) != 0) { 1256c8daea13SAlexander Kabaev if (prot & PROT_WRITE) { 1257c8daea13SAlexander Kabaev error = EPERM; 1258c8daea13SAlexander Kabaev goto done; 1259c8daea13SAlexander Kabaev } 1260c8daea13SAlexander Kabaev *maxprotp &= ~VM_PROT_WRITE; 1261c8daea13SAlexander Kabaev } 1262c8daea13SAlexander Kabaev } 1263c8daea13SAlexander Kabaev /* 1264c8daea13SAlexander Kabaev * If it is a regular file without any references 1265c8daea13SAlexander Kabaev * we do not need to sync it. 1266c8daea13SAlexander Kabaev * Adjust object size to be the size of actual file. 1267c8daea13SAlexander Kabaev */ 1268c8daea13SAlexander Kabaev objsize = round_page(va.va_size); 1269c8daea13SAlexander Kabaev if (va.va_nlink == 0) 1270c8daea13SAlexander Kabaev flags |= MAP_NOSYNC; 12713364c323SKonstantin Belousov obj = vm_pager_allocate(OBJT_VNODE, vp, objsize, prot, foff, td->td_ucred); 1272c8daea13SAlexander Kabaev if (obj == NULL) { 127364345f0bSJohn Baldwin error = ENOMEM; 1274c8daea13SAlexander Kabaev goto done; 1275c8daea13SAlexander Kabaev } 1276c8daea13SAlexander Kabaev *objp = obj; 1277c8daea13SAlexander Kabaev *flagsp = flags; 127864345f0bSJohn Baldwin 127964345f0bSJohn Baldwin mark_atime: 12800359a12eSAttilio Rao vfs_mark_atime(vp, cred); 12811e309003SDiomidis Spinellis 1282c8daea13SAlexander Kabaev done: 1283c8daea13SAlexander Kabaev vput(vp); 1284ae51ff11SJeff Roberson VFS_UNLOCK_GIANT(vfslocked); 1285c8daea13SAlexander Kabaev return (error); 1286c8daea13SAlexander Kabaev } 1287c8daea13SAlexander Kabaev 1288c8daea13SAlexander Kabaev /* 128998df9218SJohn Baldwin * vm_mmap_cdev() 129098df9218SJohn Baldwin * 129198df9218SJohn Baldwin * MPSAFE 129298df9218SJohn Baldwin * 129398df9218SJohn Baldwin * Helper function for vm_mmap. Perform sanity check specific for mmap 129498df9218SJohn Baldwin * operations on cdevs. 129598df9218SJohn Baldwin */ 129698df9218SJohn Baldwin int 129798df9218SJohn Baldwin vm_mmap_cdev(struct thread *td, vm_size_t objsize, 129898df9218SJohn Baldwin vm_prot_t prot, vm_prot_t *maxprotp, int *flagsp, 129964345f0bSJohn Baldwin struct cdev *cdev, vm_ooffset_t *foff, vm_object_t *objp) 130098df9218SJohn Baldwin { 130198df9218SJohn Baldwin vm_object_t obj; 130291a35e78SKonstantin Belousov struct cdevsw *dsw; 13033979450bSKonstantin Belousov int error, flags, ref; 130498df9218SJohn Baldwin 130598df9218SJohn Baldwin flags = *flagsp; 130698df9218SJohn Baldwin 13073979450bSKonstantin Belousov dsw = dev_refthread(cdev, &ref); 130891a35e78SKonstantin Belousov if (dsw == NULL) 130991a35e78SKonstantin Belousov return (ENXIO); 131091a35e78SKonstantin Belousov if (dsw->d_flags & D_MMAP_ANON) { 13113979450bSKonstantin Belousov dev_relthread(cdev, ref); 131298df9218SJohn Baldwin *maxprotp = VM_PROT_ALL; 131398df9218SJohn Baldwin *flagsp |= MAP_ANON; 131498df9218SJohn Baldwin return (0); 131598df9218SJohn Baldwin } 131698df9218SJohn Baldwin /* 131764345f0bSJohn Baldwin * cdevs do not provide private mappings of any kind. 131898df9218SJohn Baldwin */ 131998df9218SJohn Baldwin if ((*maxprotp & VM_PROT_WRITE) == 0 && 132064345f0bSJohn Baldwin (prot & PROT_WRITE) != 0) { 13213979450bSKonstantin Belousov dev_relthread(cdev, ref); 132298df9218SJohn Baldwin return (EACCES); 132364345f0bSJohn Baldwin } 132464345f0bSJohn Baldwin if (flags & (MAP_PRIVATE|MAP_COPY)) { 13253979450bSKonstantin Belousov dev_relthread(cdev, ref); 132698df9218SJohn Baldwin return (EINVAL); 132764345f0bSJohn Baldwin } 132898df9218SJohn Baldwin /* 132998df9218SJohn Baldwin * Force device mappings to be shared. 133098df9218SJohn Baldwin */ 133198df9218SJohn Baldwin flags |= MAP_SHARED; 133298df9218SJohn Baldwin #ifdef MAC_XXX 133364345f0bSJohn Baldwin error = mac_cdev_check_mmap(td->td_ucred, cdev, prot); 133464345f0bSJohn Baldwin if (error != 0) { 13353979450bSKonstantin Belousov dev_relthread(cdev, ref); 133698df9218SJohn Baldwin return (error); 133764345f0bSJohn Baldwin } 133898df9218SJohn Baldwin #endif 133964345f0bSJohn Baldwin /* 134064345f0bSJohn Baldwin * First, try d_mmap_single(). If that is not implemented 134164345f0bSJohn Baldwin * (returns ENODEV), fall back to using the device pager. 134264345f0bSJohn Baldwin * Note that d_mmap_single() must return a reference to the 134364345f0bSJohn Baldwin * object (it needs to bump the reference count of the object 134464345f0bSJohn Baldwin * it returns somehow). 134564345f0bSJohn Baldwin * 134664345f0bSJohn Baldwin * XXX assumes VM_PROT_* == PROT_* 134764345f0bSJohn Baldwin */ 134864345f0bSJohn Baldwin error = dsw->d_mmap_single(cdev, foff, objsize, objp, (int)prot); 13493979450bSKonstantin Belousov dev_relthread(cdev, ref); 135064345f0bSJohn Baldwin if (error != ENODEV) 135164345f0bSJohn Baldwin return (error); 13523364c323SKonstantin Belousov obj = vm_pager_allocate(OBJT_DEVICE, cdev, objsize, prot, *foff, 13533364c323SKonstantin Belousov td->td_ucred); 135498df9218SJohn Baldwin if (obj == NULL) 135598df9218SJohn Baldwin return (EINVAL); 135698df9218SJohn Baldwin *objp = obj; 135798df9218SJohn Baldwin *flagsp = flags; 135898df9218SJohn Baldwin return (0); 135998df9218SJohn Baldwin } 136098df9218SJohn Baldwin 136198df9218SJohn Baldwin /* 13628e38aeffSJohn Baldwin * vm_mmap_shm() 13638e38aeffSJohn Baldwin * 13648e38aeffSJohn Baldwin * MPSAFE 13658e38aeffSJohn Baldwin * 13668e38aeffSJohn Baldwin * Helper function for vm_mmap. Perform sanity check specific for mmap 13678e38aeffSJohn Baldwin * operations on shm file descriptors. 13688e38aeffSJohn Baldwin */ 13698e38aeffSJohn Baldwin int 13708e38aeffSJohn Baldwin vm_mmap_shm(struct thread *td, vm_size_t objsize, 13718e38aeffSJohn Baldwin vm_prot_t prot, vm_prot_t *maxprotp, int *flagsp, 13728e38aeffSJohn Baldwin struct shmfd *shmfd, vm_ooffset_t foff, vm_object_t *objp) 13738e38aeffSJohn Baldwin { 13748e38aeffSJohn Baldwin int error; 13758e38aeffSJohn Baldwin 1376*da048309SAlan Cox if ((*flagsp & MAP_SHARED) != 0 && 1377*da048309SAlan Cox (*maxprotp & VM_PROT_WRITE) == 0 && 13788e38aeffSJohn Baldwin (prot & PROT_WRITE) != 0) 13798e38aeffSJohn Baldwin return (EACCES); 13808e38aeffSJohn Baldwin #ifdef MAC 13818e38aeffSJohn Baldwin error = mac_posixshm_check_mmap(td->td_ucred, shmfd, prot, *flagsp); 13828e38aeffSJohn Baldwin if (error != 0) 13838e38aeffSJohn Baldwin return (error); 13848e38aeffSJohn Baldwin #endif 13858e38aeffSJohn Baldwin error = shm_mmap(shmfd, objsize, foff, objp); 13868e38aeffSJohn Baldwin if (error) 13878e38aeffSJohn Baldwin return (error); 13888e38aeffSJohn Baldwin return (0); 13898e38aeffSJohn Baldwin } 13908e38aeffSJohn Baldwin 13918e38aeffSJohn Baldwin /* 1392d2c60af8SMatthew Dillon * vm_mmap() 1393d2c60af8SMatthew Dillon * 1394d2c60af8SMatthew Dillon * MPSAFE 1395d2c60af8SMatthew Dillon * 1396d2c60af8SMatthew Dillon * Internal version of mmap. Currently used by mmap, exec, and sys5 1397d2c60af8SMatthew Dillon * shared memory. Handle is either a vnode pointer or NULL for MAP_ANON. 1398df8bae1dSRodney W. Grimes */ 1399df8bae1dSRodney W. Grimes int 1400b9dcd593SBruce Evans vm_mmap(vm_map_t map, vm_offset_t *addr, vm_size_t size, vm_prot_t prot, 1401b9dcd593SBruce Evans vm_prot_t maxprot, int flags, 140298df9218SJohn Baldwin objtype_t handle_type, void *handle, 1403b9dcd593SBruce Evans vm_ooffset_t foff) 1404df8bae1dSRodney W. Grimes { 1405df8bae1dSRodney W. Grimes boolean_t fitit; 14066bda842dSMatt Jacob vm_object_t object = NULL; 1407df8bae1dSRodney W. Grimes int rv = KERN_SUCCESS; 140820eec4bbSAlan Cox int docow, error; 1409b40ce416SJulian Elischer struct thread *td = curthread; 1410df8bae1dSRodney W. Grimes 1411df8bae1dSRodney W. Grimes if (size == 0) 1412df8bae1dSRodney W. Grimes return (0); 1413df8bae1dSRodney W. Grimes 1414749474f2SPeter Wemm size = round_page(size); 1415df8bae1dSRodney W. Grimes 141691d5354aSJohn Baldwin PROC_LOCK(td->td_proc); 1417070f64feSMatthew Dillon if (td->td_proc->p_vmspace->vm_map.size + size > 141891d5354aSJohn Baldwin lim_cur(td->td_proc, RLIMIT_VMEM)) { 141991d5354aSJohn Baldwin PROC_UNLOCK(td->td_proc); 1420070f64feSMatthew Dillon return(ENOMEM); 1421070f64feSMatthew Dillon } 142291d5354aSJohn Baldwin PROC_UNLOCK(td->td_proc); 1423070f64feSMatthew Dillon 1424df8bae1dSRodney W. Grimes /* 1425bc9ad247SDavid Greenman * We currently can only deal with page aligned file offsets. 1426bc9ad247SDavid Greenman * The check is here rather than in the syscall because the 1427bc9ad247SDavid Greenman * kernel calls this function internally for other mmaping 1428bc9ad247SDavid Greenman * operations (such as in exec) and non-aligned offsets will 1429bc9ad247SDavid Greenman * cause pmap inconsistencies...so we want to be sure to 1430bc9ad247SDavid Greenman * disallow this in all cases. 1431bc9ad247SDavid Greenman */ 1432bc9ad247SDavid Greenman if (foff & PAGE_MASK) 1433bc9ad247SDavid Greenman return (EINVAL); 1434bc9ad247SDavid Greenman 143506cb7259SDavid Greenman if ((flags & MAP_FIXED) == 0) { 143606cb7259SDavid Greenman fitit = TRUE; 143706cb7259SDavid Greenman *addr = round_page(*addr); 143806cb7259SDavid Greenman } else { 143906cb7259SDavid Greenman if (*addr != trunc_page(*addr)) 144006cb7259SDavid Greenman return (EINVAL); 144106cb7259SDavid Greenman fitit = FALSE; 144206cb7259SDavid Greenman } 1443bc9ad247SDavid Greenman /* 144424a1cce3SDavid Greenman * Lookup/allocate object. 1445df8bae1dSRodney W. Grimes */ 144698df9218SJohn Baldwin switch (handle_type) { 144798df9218SJohn Baldwin case OBJT_DEVICE: 144898df9218SJohn Baldwin error = vm_mmap_cdev(td, size, prot, &maxprot, &flags, 144964345f0bSJohn Baldwin handle, &foff, &object); 145098df9218SJohn Baldwin break; 145198df9218SJohn Baldwin case OBJT_VNODE: 1452c8daea13SAlexander Kabaev error = vm_mmap_vnode(td, size, prot, &maxprot, &flags, 145364345f0bSJohn Baldwin handle, &foff, &object); 145498df9218SJohn Baldwin break; 14558e38aeffSJohn Baldwin case OBJT_SWAP: 14568e38aeffSJohn Baldwin error = vm_mmap_shm(td, size, prot, &maxprot, &flags, 14578e38aeffSJohn Baldwin handle, foff, &object); 14588e38aeffSJohn Baldwin break; 145998df9218SJohn Baldwin case OBJT_DEFAULT: 146098df9218SJohn Baldwin if (handle == NULL) { 146198df9218SJohn Baldwin error = 0; 146298df9218SJohn Baldwin break; 146398df9218SJohn Baldwin } 146498df9218SJohn Baldwin /* FALLTHROUGH */ 146598df9218SJohn Baldwin default: 146698df9218SJohn Baldwin error = EINVAL; 14676bda842dSMatt Jacob break; 146898df9218SJohn Baldwin } 146998df9218SJohn Baldwin if (error) 1470c8daea13SAlexander Kabaev return (error); 14715f55e841SDavid Greenman if (flags & MAP_ANON) { 1472c8daea13SAlexander Kabaev object = NULL; 1473c8daea13SAlexander Kabaev docow = 0; 14745f55e841SDavid Greenman /* 14755f55e841SDavid Greenman * Unnamed anonymous regions always start at 0. 14765f55e841SDavid Greenman */ 147767bf6868SJohn Dyson if (handle == 0) 14785f55e841SDavid Greenman foff = 0; 147974ffb9afSAlan Cox } else if (flags & MAP_PREFAULT_READ) 148074ffb9afSAlan Cox docow = MAP_PREFAULT; 148174ffb9afSAlan Cox else 14824738fa09SAlan Cox docow = MAP_PREFAULT_PARTIAL; 1483df8bae1dSRodney W. Grimes 14844f79d873SMatthew Dillon if ((flags & (MAP_ANON|MAP_SHARED)) == 0) 14854738fa09SAlan Cox docow |= MAP_COPY_ON_WRITE; 14864f79d873SMatthew Dillon if (flags & MAP_NOSYNC) 14874f79d873SMatthew Dillon docow |= MAP_DISABLE_SYNCER; 14889730a5daSPaul Saab if (flags & MAP_NOCORE) 14899730a5daSPaul Saab docow |= MAP_DISABLE_COREDUMP; 14905850152dSJohn Dyson 14912267af78SJulian Elischer if (flags & MAP_STACK) 1492fd75d710SMarcel Moolenaar rv = vm_map_stack(map, *addr, size, prot, maxprot, 1493fd75d710SMarcel Moolenaar docow | MAP_STACK_GROWS_DOWN); 1494d239bd3cSKonstantin Belousov else if (fitit) 1495d0a83a83SAlan Cox rv = vm_map_find(map, object, foff, addr, size, 1496d0a83a83SAlan Cox object != NULL && object->type == OBJT_DEVICE ? 1497d0a83a83SAlan Cox VMFS_ALIGNED_SPACE : VMFS_ANY_SPACE, prot, maxprot, docow); 14982267af78SJulian Elischer else 1499b8ca4ef2SAlan Cox rv = vm_map_fixed(map, object, foff, *addr, size, 1500bd7e5f99SJohn Dyson prot, maxprot, docow); 1501bd7e5f99SJohn Dyson 1502d2c60af8SMatthew Dillon if (rv != KERN_SUCCESS) { 15037fb0c17eSDavid Greenman /* 150424a1cce3SDavid Greenman * Lose the object reference. Will destroy the 150524a1cce3SDavid Greenman * object if it's an unnamed anonymous mapping 150624a1cce3SDavid Greenman * or named anonymous without other references. 15077fb0c17eSDavid Greenman */ 1508df8bae1dSRodney W. Grimes vm_object_deallocate(object); 1509d2c60af8SMatthew Dillon } else if (flags & MAP_SHARED) { 1510df8bae1dSRodney W. Grimes /* 1511df8bae1dSRodney W. Grimes * Shared memory is also shared with children. 1512df8bae1dSRodney W. Grimes */ 1513df8bae1dSRodney W. Grimes rv = vm_map_inherit(map, *addr, *addr + size, VM_INHERIT_SHARE); 1514e4ca250dSJohn Baldwin if (rv != KERN_SUCCESS) 15157fb0c17eSDavid Greenman (void) vm_map_remove(map, *addr, *addr + size); 1516df8bae1dSRodney W. Grimes } 1517abd498aaSBruce M Simpson 1518abd498aaSBruce M Simpson /* 1519abd498aaSBruce M Simpson * If the process has requested that all future mappings 1520abd498aaSBruce M Simpson * be wired, then heed this. 1521abd498aaSBruce M Simpson */ 1522abd498aaSBruce M Simpson if ((rv == KERN_SUCCESS) && (map->flags & MAP_WIREFUTURE)) 1523abd498aaSBruce M Simpson vm_map_wire(map, *addr, *addr + size, 1524abd498aaSBruce M Simpson VM_MAP_WIRE_USER|VM_MAP_WIRE_NOHOLES); 1525abd498aaSBruce M Simpson 1526df8bae1dSRodney W. Grimes switch (rv) { 1527df8bae1dSRodney W. Grimes case KERN_SUCCESS: 1528df8bae1dSRodney W. Grimes return (0); 1529df8bae1dSRodney W. Grimes case KERN_INVALID_ADDRESS: 1530df8bae1dSRodney W. Grimes case KERN_NO_SPACE: 1531df8bae1dSRodney W. Grimes return (ENOMEM); 1532df8bae1dSRodney W. Grimes case KERN_PROTECTION_FAILURE: 1533df8bae1dSRodney W. Grimes return (EACCES); 1534df8bae1dSRodney W. Grimes default: 1535df8bae1dSRodney W. Grimes return (EINVAL); 1536df8bae1dSRodney W. Grimes } 1537df8bae1dSRodney W. Grimes } 1538