160727d8bSWarner Losh /*- 2df8bae1dSRodney W. Grimes * Copyright (c) 1988 University of Utah. 3df8bae1dSRodney W. Grimes * Copyright (c) 1991, 1993 4df8bae1dSRodney W. Grimes * The Regents of the University of California. All rights reserved. 5df8bae1dSRodney W. Grimes * 6df8bae1dSRodney W. Grimes * This code is derived from software contributed to Berkeley by 7df8bae1dSRodney W. Grimes * the Systems Programming Group of the University of Utah Computer 8df8bae1dSRodney W. Grimes * Science Department. 9df8bae1dSRodney W. Grimes * 10df8bae1dSRodney W. Grimes * Redistribution and use in source and binary forms, with or without 11df8bae1dSRodney W. Grimes * modification, are permitted provided that the following conditions 12df8bae1dSRodney W. Grimes * are met: 13df8bae1dSRodney W. Grimes * 1. Redistributions of source code must retain the above copyright 14df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer. 15df8bae1dSRodney W. Grimes * 2. Redistributions in binary form must reproduce the above copyright 16df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer in the 17df8bae1dSRodney W. Grimes * documentation and/or other materials provided with the distribution. 18df8bae1dSRodney W. Grimes * 4. Neither the name of the University nor the names of its contributors 19df8bae1dSRodney W. Grimes * may be used to endorse or promote products derived from this software 20df8bae1dSRodney W. Grimes * without specific prior written permission. 21df8bae1dSRodney W. Grimes * 22df8bae1dSRodney W. Grimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23df8bae1dSRodney W. Grimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24df8bae1dSRodney W. Grimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25df8bae1dSRodney W. Grimes * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26df8bae1dSRodney W. Grimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27df8bae1dSRodney W. Grimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28df8bae1dSRodney W. Grimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29df8bae1dSRodney W. Grimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30df8bae1dSRodney W. Grimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31df8bae1dSRodney W. Grimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32df8bae1dSRodney W. Grimes * SUCH DAMAGE. 33df8bae1dSRodney W. Grimes * 34df8bae1dSRodney W. Grimes * from: Utah $Hdr: vm_mmap.c 1.6 91/10/21$ 35df8bae1dSRodney W. Grimes * 36df8bae1dSRodney W. Grimes * @(#)vm_mmap.c 8.4 (Berkeley) 1/12/94 37df8bae1dSRodney W. Grimes */ 38df8bae1dSRodney W. Grimes 39df8bae1dSRodney W. Grimes /* 40df8bae1dSRodney W. Grimes * Mapped file (mmap) interface to VM 41df8bae1dSRodney W. Grimes */ 42df8bae1dSRodney W. Grimes 43874651b1SDavid E. O'Brien #include <sys/cdefs.h> 44874651b1SDavid E. O'Brien __FBSDID("$FreeBSD$"); 45874651b1SDavid E. O'Brien 465591b823SEivind Eklund #include "opt_compat.h" 4749874f6eSJoseph Koshy #include "opt_hwpmc_hooks.h" 48e9822d92SJoerg Wunsch 49df8bae1dSRodney W. Grimes #include <sys/param.h> 50df8bae1dSRodney W. Grimes #include <sys/systm.h> 51fb919e4dSMark Murray #include <sys/kernel.h> 52fb919e4dSMark Murray #include <sys/lock.h> 5323955314SAlfred Perlstein #include <sys/mutex.h> 54d2d3e875SBruce Evans #include <sys/sysproto.h> 55df8bae1dSRodney W. Grimes #include <sys/filedesc.h> 56acd3428bSRobert Watson #include <sys/priv.h> 57df8bae1dSRodney W. Grimes #include <sys/proc.h> 58070f64feSMatthew Dillon #include <sys/resource.h> 59070f64feSMatthew Dillon #include <sys/resourcevar.h> 60df8bae1dSRodney W. Grimes #include <sys/vnode.h> 613ac4d1efSBruce Evans #include <sys/fcntl.h> 62df8bae1dSRodney W. Grimes #include <sys/file.h> 63df8bae1dSRodney W. Grimes #include <sys/mman.h> 64b483c7f6SGuido van Rooij #include <sys/mount.h> 65df8bae1dSRodney W. Grimes #include <sys/conf.h> 664183b6b6SPeter Wemm #include <sys/stat.h> 67497a8238SKonstantin Belousov #include <sys/sysent.h> 68efeaf95aSDavid Greenman #include <sys/vmmeter.h> 691f6889a1SMatthew Dillon #include <sys/sysctl.h> 70df8bae1dSRodney W. Grimes 71aed55708SRobert Watson #include <security/mac/mac_framework.h> 72aed55708SRobert Watson 73df8bae1dSRodney W. Grimes #include <vm/vm.h> 74efeaf95aSDavid Greenman #include <vm/vm_param.h> 75efeaf95aSDavid Greenman #include <vm/pmap.h> 76efeaf95aSDavid Greenman #include <vm/vm_map.h> 77efeaf95aSDavid Greenman #include <vm/vm_object.h> 781c7c3c6aSMatthew Dillon #include <vm/vm_page.h> 79df8bae1dSRodney W. Grimes #include <vm/vm_pager.h> 80b5e8ce9fSBruce Evans #include <vm/vm_pageout.h> 81efeaf95aSDavid Greenman #include <vm/vm_extern.h> 82867a482dSJohn Dyson #include <vm/vm_page.h> 831f6889a1SMatthew Dillon #include <vm/vm_kern.h> 84df8bae1dSRodney W. Grimes 8549874f6eSJoseph Koshy #ifdef HWPMC_HOOKS 8649874f6eSJoseph Koshy #include <sys/pmckern.h> 8749874f6eSJoseph Koshy #endif 8849874f6eSJoseph Koshy 89d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_ 90df8bae1dSRodney W. Grimes struct sbrk_args { 91df8bae1dSRodney W. Grimes int incr; 92df8bae1dSRodney W. Grimes }; 93d2d3e875SBruce Evans #endif 940d94caffSDavid Greenman 951f6889a1SMatthew Dillon static int max_proc_mmap; 966bd9cb1cSTom Rhodes SYSCTL_INT(_vm, OID_AUTO, max_proc_mmap, CTLFLAG_RW, &max_proc_mmap, 0, 976bd9cb1cSTom Rhodes "Maximum number of memory-mapped files per process"); 981f6889a1SMatthew Dillon 991f6889a1SMatthew Dillon /* 1001f6889a1SMatthew Dillon * Set the maximum number of vm_map_entry structures per process. Roughly 1011f6889a1SMatthew Dillon * speaking vm_map_entry structures are tiny, so allowing them to eat 1/100 1021f6889a1SMatthew Dillon * of our KVM malloc space still results in generous limits. We want a 1031f6889a1SMatthew Dillon * default that is good enough to prevent the kernel running out of resources 1041f6889a1SMatthew Dillon * if attacked from compromised user account but generous enough such that 1051f6889a1SMatthew Dillon * multi-threaded processes are not unduly inconvenienced. 1061f6889a1SMatthew Dillon */ 10711caded3SAlfred Perlstein static void vmmapentry_rsrc_init(void *); 108237fdd78SRobert Watson SYSINIT(vmmersrc, SI_SUB_KVM_RSRC, SI_ORDER_FIRST, vmmapentry_rsrc_init, 109237fdd78SRobert Watson NULL); 1101f6889a1SMatthew Dillon 1111f6889a1SMatthew Dillon static void 1121f6889a1SMatthew Dillon vmmapentry_rsrc_init(dummy) 1131f6889a1SMatthew Dillon void *dummy; 1141f6889a1SMatthew Dillon { 1151f6889a1SMatthew Dillon max_proc_mmap = vm_kmem_size / sizeof(struct vm_map_entry); 1161f6889a1SMatthew Dillon max_proc_mmap /= 100; 1171f6889a1SMatthew Dillon } 1181f6889a1SMatthew Dillon 119c8daea13SAlexander Kabaev static int vm_mmap_vnode(struct thread *, vm_size_t, vm_prot_t, vm_prot_t *, 12064345f0bSJohn Baldwin int *, struct vnode *, vm_ooffset_t *, vm_object_t *); 12198df9218SJohn Baldwin static int vm_mmap_cdev(struct thread *, vm_size_t, vm_prot_t, vm_prot_t *, 12264345f0bSJohn Baldwin int *, struct cdev *, vm_ooffset_t *, vm_object_t *); 1238e38aeffSJohn Baldwin static int vm_mmap_shm(struct thread *, vm_size_t, vm_prot_t, vm_prot_t *, 1248e38aeffSJohn Baldwin int *, struct shmfd *, vm_ooffset_t, vm_object_t *); 125c8daea13SAlexander Kabaev 126d2c60af8SMatthew Dillon /* 127d2c60af8SMatthew Dillon * MPSAFE 128d2c60af8SMatthew Dillon */ 129df8bae1dSRodney W. Grimes /* ARGSUSED */ 130df8bae1dSRodney W. Grimes int 131b40ce416SJulian Elischer sbrk(td, uap) 132b40ce416SJulian Elischer struct thread *td; 133df8bae1dSRodney W. Grimes struct sbrk_args *uap; 134df8bae1dSRodney W. Grimes { 135df8bae1dSRodney W. Grimes /* Not yet implemented */ 136df8bae1dSRodney W. Grimes return (EOPNOTSUPP); 137df8bae1dSRodney W. Grimes } 138df8bae1dSRodney W. Grimes 139d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_ 140df8bae1dSRodney W. Grimes struct sstk_args { 141df8bae1dSRodney W. Grimes int incr; 142df8bae1dSRodney W. Grimes }; 143d2d3e875SBruce Evans #endif 1440d94caffSDavid Greenman 145d2c60af8SMatthew Dillon /* 146d2c60af8SMatthew Dillon * MPSAFE 147d2c60af8SMatthew Dillon */ 148df8bae1dSRodney W. Grimes /* ARGSUSED */ 149df8bae1dSRodney W. Grimes int 150b40ce416SJulian Elischer sstk(td, uap) 151b40ce416SJulian Elischer struct thread *td; 152df8bae1dSRodney W. Grimes struct sstk_args *uap; 153df8bae1dSRodney W. Grimes { 154df8bae1dSRodney W. Grimes /* Not yet implemented */ 155df8bae1dSRodney W. Grimes return (EOPNOTSUPP); 156df8bae1dSRodney W. Grimes } 157df8bae1dSRodney W. Grimes 1581930e303SPoul-Henning Kamp #if defined(COMPAT_43) 159d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_ 160df8bae1dSRodney W. Grimes struct getpagesize_args { 161df8bae1dSRodney W. Grimes int dummy; 162df8bae1dSRodney W. Grimes }; 163d2d3e875SBruce Evans #endif 1640d94caffSDavid Greenman 165df8bae1dSRodney W. Grimes /* ARGSUSED */ 166df8bae1dSRodney W. Grimes int 167b40ce416SJulian Elischer ogetpagesize(td, uap) 168b40ce416SJulian Elischer struct thread *td; 169df8bae1dSRodney W. Grimes struct getpagesize_args *uap; 170df8bae1dSRodney W. Grimes { 1710cddd8f0SMatthew Dillon /* MP SAFE */ 172b40ce416SJulian Elischer td->td_retval[0] = PAGE_SIZE; 173df8bae1dSRodney W. Grimes return (0); 174df8bae1dSRodney W. Grimes } 1751930e303SPoul-Henning Kamp #endif /* COMPAT_43 */ 176df8bae1dSRodney W. Grimes 17754f42e4bSPeter Wemm 17854f42e4bSPeter Wemm /* 17954f42e4bSPeter Wemm * Memory Map (mmap) system call. Note that the file offset 18054f42e4bSPeter Wemm * and address are allowed to be NOT page aligned, though if 18154f42e4bSPeter Wemm * the MAP_FIXED flag it set, both must have the same remainder 18254f42e4bSPeter Wemm * modulo the PAGE_SIZE (POSIX 1003.1b). If the address is not 18354f42e4bSPeter Wemm * page-aligned, the actual mapping starts at trunc_page(addr) 18454f42e4bSPeter Wemm * and the return value is adjusted up by the page offset. 185b4309055SMatthew Dillon * 186b4309055SMatthew Dillon * Generally speaking, only character devices which are themselves 187b4309055SMatthew Dillon * memory-based, such as a video framebuffer, can be mmap'd. Otherwise 188b4309055SMatthew Dillon * there would be no cache coherency between a descriptor and a VM mapping 189b4309055SMatthew Dillon * both to the same character device. 19054f42e4bSPeter Wemm */ 191d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_ 192df8bae1dSRodney W. Grimes struct mmap_args { 193651bb817SAlexander Langer void *addr; 194df8bae1dSRodney W. Grimes size_t len; 195df8bae1dSRodney W. Grimes int prot; 196df8bae1dSRodney W. Grimes int flags; 197df8bae1dSRodney W. Grimes int fd; 198df8bae1dSRodney W. Grimes long pad; 199df8bae1dSRodney W. Grimes off_t pos; 200df8bae1dSRodney W. Grimes }; 201d2d3e875SBruce Evans #endif 202df8bae1dSRodney W. Grimes 203d2c60af8SMatthew Dillon /* 204d2c60af8SMatthew Dillon * MPSAFE 205d2c60af8SMatthew Dillon */ 206df8bae1dSRodney W. Grimes int 207b40ce416SJulian Elischer mmap(td, uap) 208b40ce416SJulian Elischer struct thread *td; 20954d92145SMatthew Dillon struct mmap_args *uap; 210df8bae1dSRodney W. Grimes { 21149874f6eSJoseph Koshy #ifdef HWPMC_HOOKS 21249874f6eSJoseph Koshy struct pmckern_map_in pkm; 21349874f6eSJoseph Koshy #endif 214c8daea13SAlexander Kabaev struct file *fp; 215df8bae1dSRodney W. Grimes struct vnode *vp; 216df8bae1dSRodney W. Grimes vm_offset_t addr; 2179154ee6aSPeter Wemm vm_size_t size, pageoff; 218df8bae1dSRodney W. Grimes vm_prot_t prot, maxprot; 219651bb817SAlexander Langer void *handle; 22098df9218SJohn Baldwin objtype_t handle_type; 221df8bae1dSRodney W. Grimes int flags, error; 22254f42e4bSPeter Wemm off_t pos; 223b40ce416SJulian Elischer struct vmspace *vms = td->td_proc->p_vmspace; 224df8bae1dSRodney W. Grimes 22554f42e4bSPeter Wemm addr = (vm_offset_t) uap->addr; 22654f42e4bSPeter Wemm size = uap->len; 227df8bae1dSRodney W. Grimes prot = uap->prot & VM_PROT_ALL; 228df8bae1dSRodney W. Grimes flags = uap->flags; 22954f42e4bSPeter Wemm pos = uap->pos; 23054f42e4bSPeter Wemm 231426da3bcSAlfred Perlstein fp = NULL; 23227bfa958SSimon L. B. Nielsen 23327bfa958SSimon L. B. Nielsen /* Make sure mapping fits into numeric range, etc. */ 234497a8238SKonstantin Belousov if ((uap->len == 0 && !SV_CURPROC_FLAG(SV_AOUT) && 235497a8238SKonstantin Belousov curproc->p_osrel >= 800104) || 2365711bf30SJohn Baldwin ((flags & MAP_ANON) && (uap->fd != -1 || pos != 0))) 237df8bae1dSRodney W. Grimes return (EINVAL); 2389154ee6aSPeter Wemm 2392267af78SJulian Elischer if (flags & MAP_STACK) { 2402267af78SJulian Elischer if ((uap->fd != -1) || 2412267af78SJulian Elischer ((prot & (PROT_READ | PROT_WRITE)) != (PROT_READ | PROT_WRITE))) 2422267af78SJulian Elischer return (EINVAL); 2432267af78SJulian Elischer flags |= MAP_ANON; 2442267af78SJulian Elischer pos = 0; 2452907af2aSJulian Elischer } 2462907af2aSJulian Elischer 2479154ee6aSPeter Wemm /* 24854f42e4bSPeter Wemm * Align the file position to a page boundary, 24954f42e4bSPeter Wemm * and save its page offset component. 2509154ee6aSPeter Wemm */ 25154f42e4bSPeter Wemm pageoff = (pos & PAGE_MASK); 25254f42e4bSPeter Wemm pos -= pageoff; 25354f42e4bSPeter Wemm 25454f42e4bSPeter Wemm /* Adjust size for rounding (on both ends). */ 25554f42e4bSPeter Wemm size += pageoff; /* low end... */ 25654f42e4bSPeter Wemm size = (vm_size_t) round_page(size); /* hi end */ 2579154ee6aSPeter Wemm 258df8bae1dSRodney W. Grimes /* 2590d94caffSDavid Greenman * Check for illegal addresses. Watch out for address wrap... Note 2600d94caffSDavid Greenman * that VM_*_ADDRESS are not constants due to casts (argh). 261df8bae1dSRodney W. Grimes */ 262df8bae1dSRodney W. Grimes if (flags & MAP_FIXED) { 26354f42e4bSPeter Wemm /* 26454f42e4bSPeter Wemm * The specified address must have the same remainder 26554f42e4bSPeter Wemm * as the file offset taken modulo PAGE_SIZE, so it 26654f42e4bSPeter Wemm * should be aligned after adjustment by pageoff. 26754f42e4bSPeter Wemm */ 26854f42e4bSPeter Wemm addr -= pageoff; 26954f42e4bSPeter Wemm if (addr & PAGE_MASK) 27054f42e4bSPeter Wemm return (EINVAL); 27127bfa958SSimon L. B. Nielsen 27254f42e4bSPeter Wemm /* Address range must be all in user VM space. */ 27305ba50f5SJake Burkholder if (addr < vm_map_min(&vms->vm_map) || 27405ba50f5SJake Burkholder addr + size > vm_map_max(&vms->vm_map)) 275df8bae1dSRodney W. Grimes return (EINVAL); 276bbc0ec52SDavid Greenman if (addr + size < addr) 277df8bae1dSRodney W. Grimes return (EINVAL); 27891d5354aSJohn Baldwin } else { 279df8bae1dSRodney W. Grimes /* 28054f42e4bSPeter Wemm * XXX for non-fixed mappings where no hint is provided or 28154f42e4bSPeter Wemm * the hint would fall in the potential heap space, 28254f42e4bSPeter Wemm * place it after the end of the largest possible heap. 283df8bae1dSRodney W. Grimes * 28454f42e4bSPeter Wemm * There should really be a pmap call to determine a reasonable 28554f42e4bSPeter Wemm * location. 286df8bae1dSRodney W. Grimes */ 28791d5354aSJohn Baldwin PROC_LOCK(td->td_proc); 28891d5354aSJohn Baldwin if (addr == 0 || 2891f6889a1SMatthew Dillon (addr >= round_page((vm_offset_t)vms->vm_taddr) && 290c460ac3aSPeter Wemm addr < round_page((vm_offset_t)vms->vm_daddr + 29191d5354aSJohn Baldwin lim_max(td->td_proc, RLIMIT_DATA)))) 292c460ac3aSPeter Wemm addr = round_page((vm_offset_t)vms->vm_daddr + 29391d5354aSJohn Baldwin lim_max(td->td_proc, RLIMIT_DATA)); 29491d5354aSJohn Baldwin PROC_UNLOCK(td->td_proc); 29591d5354aSJohn Baldwin } 296df8bae1dSRodney W. Grimes if (flags & MAP_ANON) { 297df8bae1dSRodney W. Grimes /* 298df8bae1dSRodney W. Grimes * Mapping blank space is trivial. 299df8bae1dSRodney W. Grimes */ 300df8bae1dSRodney W. Grimes handle = NULL; 30198df9218SJohn Baldwin handle_type = OBJT_DEFAULT; 302df8bae1dSRodney W. Grimes maxprot = VM_PROT_ALL; 30330d4dd7eSAlexander Kabaev } else { 304df8bae1dSRodney W. Grimes /* 3058e38aeffSJohn Baldwin * Mapping file, get fp for validation and 3068e38aeffSJohn Baldwin * don't let the descriptor disappear on us if we block. 307df8bae1dSRodney W. Grimes */ 308a4db4953SAlfred Perlstein if ((error = fget(td, uap->fd, &fp)) != 0) 309426da3bcSAlfred Perlstein goto done; 3108e38aeffSJohn Baldwin if (fp->f_type == DTYPE_SHM) { 3118e38aeffSJohn Baldwin handle = fp->f_data; 3128e38aeffSJohn Baldwin handle_type = OBJT_SWAP; 3138e38aeffSJohn Baldwin maxprot = VM_PROT_NONE; 3148e38aeffSJohn Baldwin 3158e38aeffSJohn Baldwin /* FREAD should always be set. */ 3168e38aeffSJohn Baldwin if (fp->f_flag & FREAD) 3178e38aeffSJohn Baldwin maxprot |= VM_PROT_EXECUTE | VM_PROT_READ; 3188e38aeffSJohn Baldwin if (fp->f_flag & FWRITE) 3198e38aeffSJohn Baldwin maxprot |= VM_PROT_WRITE; 3208e38aeffSJohn Baldwin goto map; 3218e38aeffSJohn Baldwin } 322e4ca250dSJohn Baldwin if (fp->f_type != DTYPE_VNODE) { 32389eae00bSTom Rhodes error = ENODEV; 324426da3bcSAlfred Perlstein goto done; 325e4ca250dSJohn Baldwin } 3268e38aeffSJohn Baldwin #if defined(COMPAT_FREEBSD7) || defined(COMPAT_FREEBSD6) || \ 3278e38aeffSJohn Baldwin defined(COMPAT_FREEBSD5) || defined(COMPAT_FREEBSD4) 328279d7226SMatthew Dillon /* 329aa543039SGarrett Wollman * POSIX shared-memory objects are defined to have 330aa543039SGarrett Wollman * kernel persistence, and are not defined to support 331aa543039SGarrett Wollman * read(2)/write(2) -- or even open(2). Thus, we can 332aa543039SGarrett Wollman * use MAP_ASYNC to trade on-disk coherence for speed. 333aa543039SGarrett Wollman * The shm_open(3) library routine turns on the FPOSIXSHM 334aa543039SGarrett Wollman * flag to request this behavior. 335aa543039SGarrett Wollman */ 336aa543039SGarrett Wollman if (fp->f_flag & FPOSIXSHM) 337aa543039SGarrett Wollman flags |= MAP_NOSYNC; 3388e38aeffSJohn Baldwin #endif 3393b6d9652SPoul-Henning Kamp vp = fp->f_vnode; 340c8bdd56bSGuido van Rooij /* 341df8bae1dSRodney W. Grimes * Ensure that file and memory protections are 342df8bae1dSRodney W. Grimes * compatible. Note that we only worry about 343df8bae1dSRodney W. Grimes * writability if mapping is shared; in this case, 344df8bae1dSRodney W. Grimes * current and max prot are dictated by the open file. 345df8bae1dSRodney W. Grimes * XXX use the vnode instead? Problem is: what 3460d94caffSDavid Greenman * credentials do we use for determination? What if 3470d94caffSDavid Greenman * proc does a setuid? 348df8bae1dSRodney W. Grimes */ 3498eec77b0STim J. Robbins if (vp->v_mount != NULL && vp->v_mount->mnt_flag & MNT_NOEXEC) 350b483c7f6SGuido van Rooij maxprot = VM_PROT_NONE; 351b483c7f6SGuido van Rooij else 352b483c7f6SGuido van Rooij maxprot = VM_PROT_EXECUTE; 353279d7226SMatthew Dillon if (fp->f_flag & FREAD) { 354df8bae1dSRodney W. Grimes maxprot |= VM_PROT_READ; 355279d7226SMatthew Dillon } else if (prot & PROT_READ) { 356279d7226SMatthew Dillon error = EACCES; 357279d7226SMatthew Dillon goto done; 358279d7226SMatthew Dillon } 359c8bdd56bSGuido van Rooij /* 360c8bdd56bSGuido van Rooij * If we are sharing potential changes (either via 361c8bdd56bSGuido van Rooij * MAP_SHARED or via the implicit sharing of character 362c8bdd56bSGuido van Rooij * device mappings), and we are trying to get write 363c8bdd56bSGuido van Rooij * permission although we opened it without asking 364c8daea13SAlexander Kabaev * for it, bail out. 365c8bdd56bSGuido van Rooij */ 366ce7a036dSAlexander Kabaev if ((flags & MAP_SHARED) != 0) { 36705feb99fSGuido van Rooij if ((fp->f_flag & FWRITE) != 0) { 368df8bae1dSRodney W. Grimes maxprot |= VM_PROT_WRITE; 369279d7226SMatthew Dillon } else if ((prot & PROT_WRITE) != 0) { 370279d7226SMatthew Dillon error = EACCES; 371279d7226SMatthew Dillon goto done; 372279d7226SMatthew Dillon } 373ce7a036dSAlexander Kabaev } else if (vp->v_type != VCHR || (fp->f_flag & FWRITE) != 0) { 37405feb99fSGuido van Rooij maxprot |= VM_PROT_WRITE; 375279d7226SMatthew Dillon } 376651bb817SAlexander Langer handle = (void *)vp; 37798df9218SJohn Baldwin handle_type = OBJT_VNODE; 37830d4dd7eSAlexander Kabaev } 3798e38aeffSJohn Baldwin map: 3801f6889a1SMatthew Dillon 3811f6889a1SMatthew Dillon /* 3821f6889a1SMatthew Dillon * Do not allow more then a certain number of vm_map_entry structures 3831f6889a1SMatthew Dillon * per process. Scale with the number of rforks sharing the map 3841f6889a1SMatthew Dillon * to make the limit reasonable for threads. 3851f6889a1SMatthew Dillon */ 3861f6889a1SMatthew Dillon if (max_proc_mmap && 3871f6889a1SMatthew Dillon vms->vm_map.nentries >= max_proc_mmap * vms->vm_refcnt) { 388279d7226SMatthew Dillon error = ENOMEM; 389279d7226SMatthew Dillon goto done; 3901f6889a1SMatthew Dillon } 3911f6889a1SMatthew Dillon 39236b90789SKonstantin Belousov td->td_fpop = fp; 3931f6889a1SMatthew Dillon error = vm_mmap(&vms->vm_map, &addr, size, prot, maxprot, 39498df9218SJohn Baldwin flags, handle_type, handle, pos); 39536b90789SKonstantin Belousov td->td_fpop = NULL; 39649874f6eSJoseph Koshy #ifdef HWPMC_HOOKS 39749874f6eSJoseph Koshy /* inform hwpmc(4) if an executable is being mapped */ 39849874f6eSJoseph Koshy if (error == 0 && handle_type == OBJT_VNODE && 39949874f6eSJoseph Koshy (prot & PROT_EXEC)) { 40049874f6eSJoseph Koshy pkm.pm_file = handle; 40149874f6eSJoseph Koshy pkm.pm_address = (uintptr_t) addr; 40249874f6eSJoseph Koshy PMC_CALL_HOOK(td, PMC_FN_MMAP, (void *) &pkm); 40349874f6eSJoseph Koshy } 40449874f6eSJoseph Koshy #endif 405df8bae1dSRodney W. Grimes if (error == 0) 406b40ce416SJulian Elischer td->td_retval[0] = (register_t) (addr + pageoff); 407279d7226SMatthew Dillon done: 408279d7226SMatthew Dillon if (fp) 409b40ce416SJulian Elischer fdrop(fp, td); 410f6b5b182SJeff Roberson 411df8bae1dSRodney W. Grimes return (error); 412df8bae1dSRodney W. Grimes } 413df8bae1dSRodney W. Grimes 414c2815ad5SPeter Wemm int 415c2815ad5SPeter Wemm freebsd6_mmap(struct thread *td, struct freebsd6_mmap_args *uap) 416c2815ad5SPeter Wemm { 417c2815ad5SPeter Wemm struct mmap_args oargs; 418c2815ad5SPeter Wemm 419c2815ad5SPeter Wemm oargs.addr = uap->addr; 420c2815ad5SPeter Wemm oargs.len = uap->len; 421c2815ad5SPeter Wemm oargs.prot = uap->prot; 422c2815ad5SPeter Wemm oargs.flags = uap->flags; 423c2815ad5SPeter Wemm oargs.fd = uap->fd; 424c2815ad5SPeter Wemm oargs.pos = uap->pos; 425c2815ad5SPeter Wemm return (mmap(td, &oargs)); 426c2815ad5SPeter Wemm } 427c2815ad5SPeter Wemm 42805f0fdd2SPoul-Henning Kamp #ifdef COMPAT_43 429d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_ 43005f0fdd2SPoul-Henning Kamp struct ommap_args { 43105f0fdd2SPoul-Henning Kamp caddr_t addr; 43205f0fdd2SPoul-Henning Kamp int len; 43305f0fdd2SPoul-Henning Kamp int prot; 43405f0fdd2SPoul-Henning Kamp int flags; 43505f0fdd2SPoul-Henning Kamp int fd; 43605f0fdd2SPoul-Henning Kamp long pos; 43705f0fdd2SPoul-Henning Kamp }; 438d2d3e875SBruce Evans #endif 43905f0fdd2SPoul-Henning Kamp int 440b40ce416SJulian Elischer ommap(td, uap) 441b40ce416SJulian Elischer struct thread *td; 44254d92145SMatthew Dillon struct ommap_args *uap; 44305f0fdd2SPoul-Henning Kamp { 44405f0fdd2SPoul-Henning Kamp struct mmap_args nargs; 44505f0fdd2SPoul-Henning Kamp static const char cvtbsdprot[8] = { 44605f0fdd2SPoul-Henning Kamp 0, 44705f0fdd2SPoul-Henning Kamp PROT_EXEC, 44805f0fdd2SPoul-Henning Kamp PROT_WRITE, 44905f0fdd2SPoul-Henning Kamp PROT_EXEC | PROT_WRITE, 45005f0fdd2SPoul-Henning Kamp PROT_READ, 45105f0fdd2SPoul-Henning Kamp PROT_EXEC | PROT_READ, 45205f0fdd2SPoul-Henning Kamp PROT_WRITE | PROT_READ, 45305f0fdd2SPoul-Henning Kamp PROT_EXEC | PROT_WRITE | PROT_READ, 45405f0fdd2SPoul-Henning Kamp }; 4550d94caffSDavid Greenman 45605f0fdd2SPoul-Henning Kamp #define OMAP_ANON 0x0002 45705f0fdd2SPoul-Henning Kamp #define OMAP_COPY 0x0020 45805f0fdd2SPoul-Henning Kamp #define OMAP_SHARED 0x0010 45905f0fdd2SPoul-Henning Kamp #define OMAP_FIXED 0x0100 46005f0fdd2SPoul-Henning Kamp 46105f0fdd2SPoul-Henning Kamp nargs.addr = uap->addr; 46205f0fdd2SPoul-Henning Kamp nargs.len = uap->len; 46305f0fdd2SPoul-Henning Kamp nargs.prot = cvtbsdprot[uap->prot & 0x7]; 46405f0fdd2SPoul-Henning Kamp nargs.flags = 0; 46505f0fdd2SPoul-Henning Kamp if (uap->flags & OMAP_ANON) 46605f0fdd2SPoul-Henning Kamp nargs.flags |= MAP_ANON; 46705f0fdd2SPoul-Henning Kamp if (uap->flags & OMAP_COPY) 46805f0fdd2SPoul-Henning Kamp nargs.flags |= MAP_COPY; 46905f0fdd2SPoul-Henning Kamp if (uap->flags & OMAP_SHARED) 47005f0fdd2SPoul-Henning Kamp nargs.flags |= MAP_SHARED; 47105f0fdd2SPoul-Henning Kamp else 47205f0fdd2SPoul-Henning Kamp nargs.flags |= MAP_PRIVATE; 47305f0fdd2SPoul-Henning Kamp if (uap->flags & OMAP_FIXED) 47405f0fdd2SPoul-Henning Kamp nargs.flags |= MAP_FIXED; 47505f0fdd2SPoul-Henning Kamp nargs.fd = uap->fd; 47605f0fdd2SPoul-Henning Kamp nargs.pos = uap->pos; 477b40ce416SJulian Elischer return (mmap(td, &nargs)); 47805f0fdd2SPoul-Henning Kamp } 47905f0fdd2SPoul-Henning Kamp #endif /* COMPAT_43 */ 48005f0fdd2SPoul-Henning Kamp 48105f0fdd2SPoul-Henning Kamp 482d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_ 483df8bae1dSRodney W. Grimes struct msync_args { 484651bb817SAlexander Langer void *addr; 485c899450bSPeter Wemm size_t len; 486e6c6af11SDavid Greenman int flags; 487df8bae1dSRodney W. Grimes }; 488d2d3e875SBruce Evans #endif 489d2c60af8SMatthew Dillon /* 490d2c60af8SMatthew Dillon * MPSAFE 491d2c60af8SMatthew Dillon */ 492df8bae1dSRodney W. Grimes int 493b40ce416SJulian Elischer msync(td, uap) 494b40ce416SJulian Elischer struct thread *td; 495df8bae1dSRodney W. Grimes struct msync_args *uap; 496df8bae1dSRodney W. Grimes { 497df8bae1dSRodney W. Grimes vm_offset_t addr; 498dabee6feSPeter Wemm vm_size_t size, pageoff; 499e6c6af11SDavid Greenman int flags; 500df8bae1dSRodney W. Grimes vm_map_t map; 501df8bae1dSRodney W. Grimes int rv; 502df8bae1dSRodney W. Grimes 503df8bae1dSRodney W. Grimes addr = (vm_offset_t) uap->addr; 5049154ee6aSPeter Wemm size = uap->len; 505e6c6af11SDavid Greenman flags = uap->flags; 506e6c6af11SDavid Greenman 507dabee6feSPeter Wemm pageoff = (addr & PAGE_MASK); 508dabee6feSPeter Wemm addr -= pageoff; 509dabee6feSPeter Wemm size += pageoff; 510dabee6feSPeter Wemm size = (vm_size_t) round_page(size); 5119154ee6aSPeter Wemm if (addr + size < addr) 512dabee6feSPeter Wemm return (EINVAL); 513dabee6feSPeter Wemm 514dabee6feSPeter Wemm if ((flags & (MS_ASYNC|MS_INVALIDATE)) == (MS_ASYNC|MS_INVALIDATE)) 5151e62bc63SDavid Greenman return (EINVAL); 5161e62bc63SDavid Greenman 517b40ce416SJulian Elischer map = &td->td_proc->p_vmspace->vm_map; 5189154ee6aSPeter Wemm 519df8bae1dSRodney W. Grimes /* 520df8bae1dSRodney W. Grimes * Clean the pages and interpret the return value. 521df8bae1dSRodney W. Grimes */ 522950f8459SAlan Cox rv = vm_map_sync(map, addr, addr + size, (flags & MS_ASYNC) == 0, 523e6c6af11SDavid Greenman (flags & MS_INVALIDATE) != 0); 524df8bae1dSRodney W. Grimes switch (rv) { 525df8bae1dSRodney W. Grimes case KERN_SUCCESS: 526d2c60af8SMatthew Dillon return (0); 527df8bae1dSRodney W. Grimes case KERN_INVALID_ADDRESS: 528df8bae1dSRodney W. Grimes return (EINVAL); /* Sun returns ENOMEM? */ 529b7b7cd44SAlan Cox case KERN_INVALID_ARGUMENT: 530b7b7cd44SAlan Cox return (EBUSY); 531df8bae1dSRodney W. Grimes default: 532df8bae1dSRodney W. Grimes return (EINVAL); 533df8bae1dSRodney W. Grimes } 534df8bae1dSRodney W. Grimes } 535df8bae1dSRodney W. Grimes 536d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_ 537df8bae1dSRodney W. Grimes struct munmap_args { 538651bb817SAlexander Langer void *addr; 5399154ee6aSPeter Wemm size_t len; 540df8bae1dSRodney W. Grimes }; 541d2d3e875SBruce Evans #endif 542d2c60af8SMatthew Dillon /* 543d2c60af8SMatthew Dillon * MPSAFE 544d2c60af8SMatthew Dillon */ 545df8bae1dSRodney W. Grimes int 546b40ce416SJulian Elischer munmap(td, uap) 547b40ce416SJulian Elischer struct thread *td; 54854d92145SMatthew Dillon struct munmap_args *uap; 549df8bae1dSRodney W. Grimes { 55049874f6eSJoseph Koshy #ifdef HWPMC_HOOKS 55149874f6eSJoseph Koshy struct pmckern_map_out pkm; 55249874f6eSJoseph Koshy vm_map_entry_t entry; 55349874f6eSJoseph Koshy #endif 554df8bae1dSRodney W. Grimes vm_offset_t addr; 555dabee6feSPeter Wemm vm_size_t size, pageoff; 556df8bae1dSRodney W. Grimes vm_map_t map; 557df8bae1dSRodney W. Grimes 558df8bae1dSRodney W. Grimes addr = (vm_offset_t) uap->addr; 5599154ee6aSPeter Wemm size = uap->len; 560d8834602SAlan Cox if (size == 0) 561d8834602SAlan Cox return (EINVAL); 562dabee6feSPeter Wemm 563dabee6feSPeter Wemm pageoff = (addr & PAGE_MASK); 564dabee6feSPeter Wemm addr -= pageoff; 565dabee6feSPeter Wemm size += pageoff; 566dabee6feSPeter Wemm size = (vm_size_t) round_page(size); 5679154ee6aSPeter Wemm if (addr + size < addr) 568df8bae1dSRodney W. Grimes return (EINVAL); 5699154ee6aSPeter Wemm 570df8bae1dSRodney W. Grimes /* 57105ba50f5SJake Burkholder * Check for illegal addresses. Watch out for address wrap... 572df8bae1dSRodney W. Grimes */ 573b40ce416SJulian Elischer map = &td->td_proc->p_vmspace->vm_map; 57405ba50f5SJake Burkholder if (addr < vm_map_min(map) || addr + size > vm_map_max(map)) 57505ba50f5SJake Burkholder return (EINVAL); 576d8834602SAlan Cox vm_map_lock(map); 57749874f6eSJoseph Koshy #ifdef HWPMC_HOOKS 57849874f6eSJoseph Koshy /* 57949874f6eSJoseph Koshy * Inform hwpmc if the address range being unmapped contains 58049874f6eSJoseph Koshy * an executable region. 58149874f6eSJoseph Koshy */ 58249874f6eSJoseph Koshy if (vm_map_lookup_entry(map, addr, &entry)) { 58349874f6eSJoseph Koshy for (; 58449874f6eSJoseph Koshy entry != &map->header && entry->start < addr + size; 58549874f6eSJoseph Koshy entry = entry->next) { 58649874f6eSJoseph Koshy if (vm_map_check_protection(map, entry->start, 58749874f6eSJoseph Koshy entry->end, VM_PROT_EXECUTE) == TRUE) { 58849874f6eSJoseph Koshy pkm.pm_address = (uintptr_t) addr; 58949874f6eSJoseph Koshy pkm.pm_size = (size_t) size; 59049874f6eSJoseph Koshy PMC_CALL_HOOK(td, PMC_FN_MUNMAP, 59149874f6eSJoseph Koshy (void *) &pkm); 59249874f6eSJoseph Koshy break; 59349874f6eSJoseph Koshy } 59449874f6eSJoseph Koshy } 59549874f6eSJoseph Koshy } 59649874f6eSJoseph Koshy #endif 597df8bae1dSRodney W. Grimes /* returns nothing but KERN_SUCCESS anyway */ 598655c3490SKonstantin Belousov vm_map_delete(map, addr, addr + size); 599d8834602SAlan Cox vm_map_unlock(map); 600df8bae1dSRodney W. Grimes return (0); 601df8bae1dSRodney W. Grimes } 602df8bae1dSRodney W. Grimes 603d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_ 604df8bae1dSRodney W. Grimes struct mprotect_args { 605651bb817SAlexander Langer const void *addr; 6069154ee6aSPeter Wemm size_t len; 607df8bae1dSRodney W. Grimes int prot; 608df8bae1dSRodney W. Grimes }; 609d2d3e875SBruce Evans #endif 610d2c60af8SMatthew Dillon /* 611d2c60af8SMatthew Dillon * MPSAFE 612d2c60af8SMatthew Dillon */ 613df8bae1dSRodney W. Grimes int 614b40ce416SJulian Elischer mprotect(td, uap) 615b40ce416SJulian Elischer struct thread *td; 616df8bae1dSRodney W. Grimes struct mprotect_args *uap; 617df8bae1dSRodney W. Grimes { 618df8bae1dSRodney W. Grimes vm_offset_t addr; 619dabee6feSPeter Wemm vm_size_t size, pageoff; 62054d92145SMatthew Dillon vm_prot_t prot; 621df8bae1dSRodney W. Grimes 622df8bae1dSRodney W. Grimes addr = (vm_offset_t) uap->addr; 6239154ee6aSPeter Wemm size = uap->len; 624df8bae1dSRodney W. Grimes prot = uap->prot & VM_PROT_ALL; 625df8bae1dSRodney W. Grimes 626dabee6feSPeter Wemm pageoff = (addr & PAGE_MASK); 627dabee6feSPeter Wemm addr -= pageoff; 628dabee6feSPeter Wemm size += pageoff; 629dabee6feSPeter Wemm size = (vm_size_t) round_page(size); 6309154ee6aSPeter Wemm if (addr + size < addr) 631dabee6feSPeter Wemm return (EINVAL); 632dabee6feSPeter Wemm 63343285049SAlan Cox switch (vm_map_protect(&td->td_proc->p_vmspace->vm_map, addr, 63443285049SAlan Cox addr + size, prot, FALSE)) { 635df8bae1dSRodney W. Grimes case KERN_SUCCESS: 636df8bae1dSRodney W. Grimes return (0); 637df8bae1dSRodney W. Grimes case KERN_PROTECTION_FAILURE: 638df8bae1dSRodney W. Grimes return (EACCES); 6393364c323SKonstantin Belousov case KERN_RESOURCE_SHORTAGE: 6403364c323SKonstantin Belousov return (ENOMEM); 641df8bae1dSRodney W. Grimes } 642df8bae1dSRodney W. Grimes return (EINVAL); 643df8bae1dSRodney W. Grimes } 644df8bae1dSRodney W. Grimes 645d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_ 646dabee6feSPeter Wemm struct minherit_args { 647651bb817SAlexander Langer void *addr; 6489154ee6aSPeter Wemm size_t len; 649dabee6feSPeter Wemm int inherit; 650dabee6feSPeter Wemm }; 651dabee6feSPeter Wemm #endif 652d2c60af8SMatthew Dillon /* 653d2c60af8SMatthew Dillon * MPSAFE 654d2c60af8SMatthew Dillon */ 655dabee6feSPeter Wemm int 656b40ce416SJulian Elischer minherit(td, uap) 657b40ce416SJulian Elischer struct thread *td; 658dabee6feSPeter Wemm struct minherit_args *uap; 659dabee6feSPeter Wemm { 660dabee6feSPeter Wemm vm_offset_t addr; 661dabee6feSPeter Wemm vm_size_t size, pageoff; 66254d92145SMatthew Dillon vm_inherit_t inherit; 663dabee6feSPeter Wemm 664dabee6feSPeter Wemm addr = (vm_offset_t)uap->addr; 6659154ee6aSPeter Wemm size = uap->len; 666dabee6feSPeter Wemm inherit = uap->inherit; 667dabee6feSPeter Wemm 668dabee6feSPeter Wemm pageoff = (addr & PAGE_MASK); 669dabee6feSPeter Wemm addr -= pageoff; 670dabee6feSPeter Wemm size += pageoff; 671dabee6feSPeter Wemm size = (vm_size_t) round_page(size); 6729154ee6aSPeter Wemm if (addr + size < addr) 673dabee6feSPeter Wemm return (EINVAL); 674dabee6feSPeter Wemm 675e0be79afSAlan Cox switch (vm_map_inherit(&td->td_proc->p_vmspace->vm_map, addr, 676e0be79afSAlan Cox addr + size, inherit)) { 677dabee6feSPeter Wemm case KERN_SUCCESS: 678dabee6feSPeter Wemm return (0); 679dabee6feSPeter Wemm case KERN_PROTECTION_FAILURE: 680dabee6feSPeter Wemm return (EACCES); 681dabee6feSPeter Wemm } 682dabee6feSPeter Wemm return (EINVAL); 683dabee6feSPeter Wemm } 684dabee6feSPeter Wemm 685dabee6feSPeter Wemm #ifndef _SYS_SYSPROTO_H_ 686df8bae1dSRodney W. Grimes struct madvise_args { 687651bb817SAlexander Langer void *addr; 6889154ee6aSPeter Wemm size_t len; 689df8bae1dSRodney W. Grimes int behav; 690df8bae1dSRodney W. Grimes }; 691d2d3e875SBruce Evans #endif 6920d94caffSDavid Greenman 693d2c60af8SMatthew Dillon /* 694d2c60af8SMatthew Dillon * MPSAFE 695d2c60af8SMatthew Dillon */ 696df8bae1dSRodney W. Grimes /* ARGSUSED */ 697df8bae1dSRodney W. Grimes int 698b40ce416SJulian Elischer madvise(td, uap) 699b40ce416SJulian Elischer struct thread *td; 700df8bae1dSRodney W. Grimes struct madvise_args *uap; 701df8bae1dSRodney W. Grimes { 702f35329acSJohn Dyson vm_offset_t start, end; 70305ba50f5SJake Burkholder vm_map_t map; 704f4cf2141SWes Peters struct proc *p; 705f4cf2141SWes Peters int error; 706b4309055SMatthew Dillon 707b4309055SMatthew Dillon /* 708f4cf2141SWes Peters * Check for our special case, advising the swap pager we are 709f4cf2141SWes Peters * "immortal." 710f4cf2141SWes Peters */ 711f4cf2141SWes Peters if (uap->behav == MADV_PROTECT) { 712acd3428bSRobert Watson error = priv_check(td, PRIV_VM_MADV_PROTECT); 71369297bf8SJohn Baldwin if (error == 0) { 714f4cf2141SWes Peters p = td->td_proc; 715f4cf2141SWes Peters PROC_LOCK(p); 716f4cf2141SWes Peters p->p_flag |= P_PROTECTED; 717f4cf2141SWes Peters PROC_UNLOCK(p); 71869297bf8SJohn Baldwin } 719f4cf2141SWes Peters return (error); 720f4cf2141SWes Peters } 721f4cf2141SWes Peters /* 722b4309055SMatthew Dillon * Check for illegal behavior 723b4309055SMatthew Dillon */ 7249730a5daSPaul Saab if (uap->behav < 0 || uap->behav > MADV_CORE) 725b4309055SMatthew Dillon return (EINVAL); 726867a482dSJohn Dyson /* 727867a482dSJohn Dyson * Check for illegal addresses. Watch out for address wrap... Note 728867a482dSJohn Dyson * that VM_*_ADDRESS are not constants due to casts (argh). 729867a482dSJohn Dyson */ 73005ba50f5SJake Burkholder map = &td->td_proc->p_vmspace->vm_map; 73105ba50f5SJake Burkholder if ((vm_offset_t)uap->addr < vm_map_min(map) || 73205ba50f5SJake Burkholder (vm_offset_t)uap->addr + uap->len > vm_map_max(map)) 733867a482dSJohn Dyson return (EINVAL); 734867a482dSJohn Dyson if (((vm_offset_t) uap->addr + uap->len) < (vm_offset_t) uap->addr) 735867a482dSJohn Dyson return (EINVAL); 736867a482dSJohn Dyson 737867a482dSJohn Dyson /* 738867a482dSJohn Dyson * Since this routine is only advisory, we default to conservative 739867a482dSJohn Dyson * behavior. 740867a482dSJohn Dyson */ 741cd6eea25SDavid Greenman start = trunc_page((vm_offset_t) uap->addr); 742cd6eea25SDavid Greenman end = round_page((vm_offset_t) uap->addr + uap->len); 743867a482dSJohn Dyson 74405ba50f5SJake Burkholder if (vm_map_madvise(map, start, end, uap->behav)) 745094f6d26SAlan Cox return (EINVAL); 746094f6d26SAlan Cox return (0); 747df8bae1dSRodney W. Grimes } 748df8bae1dSRodney W. Grimes 749d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_ 750df8bae1dSRodney W. Grimes struct mincore_args { 751651bb817SAlexander Langer const void *addr; 7529154ee6aSPeter Wemm size_t len; 753df8bae1dSRodney W. Grimes char *vec; 754df8bae1dSRodney W. Grimes }; 755d2d3e875SBruce Evans #endif 7560d94caffSDavid Greenman 757d2c60af8SMatthew Dillon /* 758d2c60af8SMatthew Dillon * MPSAFE 759d2c60af8SMatthew Dillon */ 760df8bae1dSRodney W. Grimes /* ARGSUSED */ 761df8bae1dSRodney W. Grimes int 762b40ce416SJulian Elischer mincore(td, uap) 763b40ce416SJulian Elischer struct thread *td; 764df8bae1dSRodney W. Grimes struct mincore_args *uap; 765df8bae1dSRodney W. Grimes { 766867a482dSJohn Dyson vm_offset_t addr, first_addr; 767867a482dSJohn Dyson vm_offset_t end, cend; 768867a482dSJohn Dyson pmap_t pmap; 769867a482dSJohn Dyson vm_map_t map; 77002c04a2fSJohn Dyson char *vec; 771d2c60af8SMatthew Dillon int error = 0; 772867a482dSJohn Dyson int vecindex, lastvecindex; 77354d92145SMatthew Dillon vm_map_entry_t current; 774867a482dSJohn Dyson vm_map_entry_t entry; 775567e51e1SAlan Cox vm_object_t object; 776567e51e1SAlan Cox vm_paddr_t locked_pa; 777567e51e1SAlan Cox vm_page_t m; 778567e51e1SAlan Cox vm_pindex_t pindex; 779867a482dSJohn Dyson int mincoreinfo; 780dd2622a8SAlan Cox unsigned int timestamp; 781567e51e1SAlan Cox boolean_t locked; 782df8bae1dSRodney W. Grimes 783867a482dSJohn Dyson /* 784867a482dSJohn Dyson * Make sure that the addresses presented are valid for user 785867a482dSJohn Dyson * mode. 786867a482dSJohn Dyson */ 787867a482dSJohn Dyson first_addr = addr = trunc_page((vm_offset_t) uap->addr); 7889154ee6aSPeter Wemm end = addr + (vm_size_t)round_page(uap->len); 78905ba50f5SJake Burkholder map = &td->td_proc->p_vmspace->vm_map; 79005ba50f5SJake Burkholder if (end > vm_map_max(map) || end < addr) 791455dd7d4SKonstantin Belousov return (ENOMEM); 79202c04a2fSJohn Dyson 793867a482dSJohn Dyson /* 794867a482dSJohn Dyson * Address of byte vector 795867a482dSJohn Dyson */ 79602c04a2fSJohn Dyson vec = uap->vec; 797867a482dSJohn Dyson 798b40ce416SJulian Elischer pmap = vmspace_pmap(td->td_proc->p_vmspace); 799867a482dSJohn Dyson 800eff50fcdSAlan Cox vm_map_lock_read(map); 801dd2622a8SAlan Cox RestartScan: 802dd2622a8SAlan Cox timestamp = map->timestamp; 803867a482dSJohn Dyson 804455dd7d4SKonstantin Belousov if (!vm_map_lookup_entry(map, addr, &entry)) { 805455dd7d4SKonstantin Belousov vm_map_unlock_read(map); 806455dd7d4SKonstantin Belousov return (ENOMEM); 807455dd7d4SKonstantin Belousov } 808867a482dSJohn Dyson 809867a482dSJohn Dyson /* 810867a482dSJohn Dyson * Do this on a map entry basis so that if the pages are not 811867a482dSJohn Dyson * in the current processes address space, we can easily look 812867a482dSJohn Dyson * up the pages elsewhere. 813867a482dSJohn Dyson */ 814867a482dSJohn Dyson lastvecindex = -1; 815867a482dSJohn Dyson for (current = entry; 816867a482dSJohn Dyson (current != &map->header) && (current->start < end); 817867a482dSJohn Dyson current = current->next) { 818867a482dSJohn Dyson 819867a482dSJohn Dyson /* 820455dd7d4SKonstantin Belousov * check for contiguity 821455dd7d4SKonstantin Belousov */ 822455dd7d4SKonstantin Belousov if (current->end < end && 823455dd7d4SKonstantin Belousov (entry->next == &map->header || 824455dd7d4SKonstantin Belousov current->next->start > current->end)) { 825455dd7d4SKonstantin Belousov vm_map_unlock_read(map); 826455dd7d4SKonstantin Belousov return (ENOMEM); 827455dd7d4SKonstantin Belousov } 828455dd7d4SKonstantin Belousov 829455dd7d4SKonstantin Belousov /* 830867a482dSJohn Dyson * ignore submaps (for now) or null objects 831867a482dSJohn Dyson */ 8329fdfe602SMatthew Dillon if ((current->eflags & MAP_ENTRY_IS_SUB_MAP) || 833867a482dSJohn Dyson current->object.vm_object == NULL) 834867a482dSJohn Dyson continue; 835867a482dSJohn Dyson 836867a482dSJohn Dyson /* 837867a482dSJohn Dyson * limit this scan to the current map entry and the 838867a482dSJohn Dyson * limits for the mincore call 839867a482dSJohn Dyson */ 840867a482dSJohn Dyson if (addr < current->start) 841867a482dSJohn Dyson addr = current->start; 842867a482dSJohn Dyson cend = current->end; 843867a482dSJohn Dyson if (cend > end) 844867a482dSJohn Dyson cend = end; 845867a482dSJohn Dyson 846867a482dSJohn Dyson /* 847867a482dSJohn Dyson * scan this entry one page at a time 848867a482dSJohn Dyson */ 849867a482dSJohn Dyson while (addr < cend) { 850867a482dSJohn Dyson /* 851867a482dSJohn Dyson * Check pmap first, it is likely faster, also 852867a482dSJohn Dyson * it can provide info as to whether we are the 853867a482dSJohn Dyson * one referencing or modifying the page. 854867a482dSJohn Dyson */ 855567e51e1SAlan Cox object = NULL; 856567e51e1SAlan Cox locked_pa = 0; 857567e51e1SAlan Cox retry: 858567e51e1SAlan Cox m = NULL; 859567e51e1SAlan Cox mincoreinfo = pmap_mincore(pmap, addr, &locked_pa); 860567e51e1SAlan Cox if (locked_pa != 0) { 861867a482dSJohn Dyson /* 862567e51e1SAlan Cox * The page is mapped by this process but not 863567e51e1SAlan Cox * both accessed and modified. It is also 864567e51e1SAlan Cox * managed. Acquire the object lock so that 865567e51e1SAlan Cox * other mappings might be examined. 866867a482dSJohn Dyson */ 867567e51e1SAlan Cox m = PHYS_TO_VM_PAGE(locked_pa); 868567e51e1SAlan Cox if (m->object != object) { 869567e51e1SAlan Cox if (object != NULL) 870567e51e1SAlan Cox VM_OBJECT_UNLOCK(object); 871567e51e1SAlan Cox object = m->object; 872567e51e1SAlan Cox locked = VM_OBJECT_TRYLOCK(object); 873567e51e1SAlan Cox vm_page_unlock(m); 874567e51e1SAlan Cox if (!locked) { 875567e51e1SAlan Cox VM_OBJECT_LOCK(object); 8762965a453SKip Macy vm_page_lock(m); 877567e51e1SAlan Cox goto retry; 878567e51e1SAlan Cox } 879567e51e1SAlan Cox } else 880567e51e1SAlan Cox vm_page_unlock(m); 881567e51e1SAlan Cox KASSERT(m->valid == VM_PAGE_BITS_ALL, 882567e51e1SAlan Cox ("mincore: page %p is mapped but invalid", 883567e51e1SAlan Cox m)); 884567e51e1SAlan Cox } else if (mincoreinfo == 0) { 885567e51e1SAlan Cox /* 886567e51e1SAlan Cox * The page is not mapped by this process. If 887567e51e1SAlan Cox * the object implements managed pages, then 888567e51e1SAlan Cox * determine if the page is resident so that 889567e51e1SAlan Cox * the mappings might be examined. 890567e51e1SAlan Cox */ 891567e51e1SAlan Cox if (current->object.vm_object != object) { 892567e51e1SAlan Cox if (object != NULL) 893567e51e1SAlan Cox VM_OBJECT_UNLOCK(object); 894567e51e1SAlan Cox object = current->object.vm_object; 895567e51e1SAlan Cox VM_OBJECT_LOCK(object); 896567e51e1SAlan Cox } 897567e51e1SAlan Cox if (object->type == OBJT_DEFAULT || 898567e51e1SAlan Cox object->type == OBJT_SWAP || 899567e51e1SAlan Cox object->type == OBJT_VNODE) { 900567e51e1SAlan Cox pindex = OFF_TO_IDX(current->offset + 901567e51e1SAlan Cox (addr - current->start)); 902567e51e1SAlan Cox m = vm_page_lookup(object, pindex); 903567e51e1SAlan Cox if (m != NULL && m->valid == 0) 904567e51e1SAlan Cox m = NULL; 905567e51e1SAlan Cox if (m != NULL) 906567e51e1SAlan Cox mincoreinfo = MINCORE_INCORE; 907567e51e1SAlan Cox } 908567e51e1SAlan Cox } 909567e51e1SAlan Cox if (m != NULL) { 910567e51e1SAlan Cox /* Examine other mappings to the page. */ 911567e51e1SAlan Cox if (m->dirty == 0 && pmap_is_modified(m)) 912567e51e1SAlan Cox vm_page_dirty(m); 913567e51e1SAlan Cox if (m->dirty != 0) 914867a482dSJohn Dyson mincoreinfo |= MINCORE_MODIFIED_OTHER; 915c46b90e9SAlan Cox /* 916c46b90e9SAlan Cox * The first test for PG_REFERENCED is an 917c46b90e9SAlan Cox * optimization. The second test is 918c46b90e9SAlan Cox * required because a concurrent pmap 919c46b90e9SAlan Cox * operation could clear the last reference 920c46b90e9SAlan Cox * and set PG_REFERENCED before the call to 921c46b90e9SAlan Cox * pmap_is_referenced(). 922c46b90e9SAlan Cox */ 923567e51e1SAlan Cox if ((m->flags & PG_REFERENCED) != 0 || 924c46b90e9SAlan Cox pmap_is_referenced(m) || 925c46b90e9SAlan Cox (m->flags & PG_REFERENCED) != 0) 926867a482dSJohn Dyson mincoreinfo |= MINCORE_REFERENCED_OTHER; 9279b5a5d81SJohn Dyson } 928567e51e1SAlan Cox if (object != NULL) 929567e51e1SAlan Cox VM_OBJECT_UNLOCK(object); 930867a482dSJohn Dyson 931867a482dSJohn Dyson /* 932dd2622a8SAlan Cox * subyte may page fault. In case it needs to modify 933dd2622a8SAlan Cox * the map, we release the lock. 934dd2622a8SAlan Cox */ 935dd2622a8SAlan Cox vm_map_unlock_read(map); 936dd2622a8SAlan Cox 937dd2622a8SAlan Cox /* 938867a482dSJohn Dyson * calculate index into user supplied byte vector 939867a482dSJohn Dyson */ 940867a482dSJohn Dyson vecindex = OFF_TO_IDX(addr - first_addr); 941867a482dSJohn Dyson 942867a482dSJohn Dyson /* 943867a482dSJohn Dyson * If we have skipped map entries, we need to make sure that 944867a482dSJohn Dyson * the byte vector is zeroed for those skipped entries. 945867a482dSJohn Dyson */ 946867a482dSJohn Dyson while ((lastvecindex + 1) < vecindex) { 947867a482dSJohn Dyson error = subyte(vec + lastvecindex, 0); 948867a482dSJohn Dyson if (error) { 949d2c60af8SMatthew Dillon error = EFAULT; 950d2c60af8SMatthew Dillon goto done2; 951867a482dSJohn Dyson } 952867a482dSJohn Dyson ++lastvecindex; 953867a482dSJohn Dyson } 954867a482dSJohn Dyson 955867a482dSJohn Dyson /* 956867a482dSJohn Dyson * Pass the page information to the user 957867a482dSJohn Dyson */ 958867a482dSJohn Dyson error = subyte(vec + vecindex, mincoreinfo); 959867a482dSJohn Dyson if (error) { 960d2c60af8SMatthew Dillon error = EFAULT; 961d2c60af8SMatthew Dillon goto done2; 962867a482dSJohn Dyson } 963dd2622a8SAlan Cox 964dd2622a8SAlan Cox /* 965dd2622a8SAlan Cox * If the map has changed, due to the subyte, the previous 966dd2622a8SAlan Cox * output may be invalid. 967dd2622a8SAlan Cox */ 968dd2622a8SAlan Cox vm_map_lock_read(map); 969dd2622a8SAlan Cox if (timestamp != map->timestamp) 970dd2622a8SAlan Cox goto RestartScan; 971dd2622a8SAlan Cox 972867a482dSJohn Dyson lastvecindex = vecindex; 97302c04a2fSJohn Dyson addr += PAGE_SIZE; 97402c04a2fSJohn Dyson } 975867a482dSJohn Dyson } 976867a482dSJohn Dyson 977867a482dSJohn Dyson /* 978dd2622a8SAlan Cox * subyte may page fault. In case it needs to modify 979dd2622a8SAlan Cox * the map, we release the lock. 980dd2622a8SAlan Cox */ 981dd2622a8SAlan Cox vm_map_unlock_read(map); 982dd2622a8SAlan Cox 983dd2622a8SAlan Cox /* 984867a482dSJohn Dyson * Zero the last entries in the byte vector. 985867a482dSJohn Dyson */ 986867a482dSJohn Dyson vecindex = OFF_TO_IDX(end - first_addr); 987867a482dSJohn Dyson while ((lastvecindex + 1) < vecindex) { 988867a482dSJohn Dyson error = subyte(vec + lastvecindex, 0); 989867a482dSJohn Dyson if (error) { 990d2c60af8SMatthew Dillon error = EFAULT; 991d2c60af8SMatthew Dillon goto done2; 992867a482dSJohn Dyson } 993867a482dSJohn Dyson ++lastvecindex; 994867a482dSJohn Dyson } 995867a482dSJohn Dyson 996dd2622a8SAlan Cox /* 997dd2622a8SAlan Cox * If the map has changed, due to the subyte, the previous 998dd2622a8SAlan Cox * output may be invalid. 999dd2622a8SAlan Cox */ 1000dd2622a8SAlan Cox vm_map_lock_read(map); 1001dd2622a8SAlan Cox if (timestamp != map->timestamp) 1002dd2622a8SAlan Cox goto RestartScan; 1003eff50fcdSAlan Cox vm_map_unlock_read(map); 1004d2c60af8SMatthew Dillon done2: 1005d2c60af8SMatthew Dillon return (error); 1006df8bae1dSRodney W. Grimes } 1007df8bae1dSRodney W. Grimes 1008d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_ 1009df8bae1dSRodney W. Grimes struct mlock_args { 1010651bb817SAlexander Langer const void *addr; 1011df8bae1dSRodney W. Grimes size_t len; 1012df8bae1dSRodney W. Grimes }; 1013d2d3e875SBruce Evans #endif 1014d2c60af8SMatthew Dillon /* 1015d2c60af8SMatthew Dillon * MPSAFE 1016d2c60af8SMatthew Dillon */ 1017df8bae1dSRodney W. Grimes int 1018b40ce416SJulian Elischer mlock(td, uap) 1019b40ce416SJulian Elischer struct thread *td; 1020df8bae1dSRodney W. Grimes struct mlock_args *uap; 1021df8bae1dSRodney W. Grimes { 1022f0ea4612SDon Lewis struct proc *proc; 1023bb734798SDon Lewis vm_offset_t addr, end, last, start; 1024bb734798SDon Lewis vm_size_t npages, size; 1025bb734798SDon Lewis int error; 1026df8bae1dSRodney W. Grimes 1027acd3428bSRobert Watson error = priv_check(td, PRIV_VM_MLOCK); 102847934cefSDon Lewis if (error) 102947934cefSDon Lewis return (error); 103016929939SDon Lewis addr = (vm_offset_t)uap->addr; 103116929939SDon Lewis size = uap->len; 1032bb734798SDon Lewis last = addr + size; 103316929939SDon Lewis start = trunc_page(addr); 1034bb734798SDon Lewis end = round_page(last); 1035bb734798SDon Lewis if (last < addr || end < addr) 1036df8bae1dSRodney W. Grimes return (EINVAL); 103716929939SDon Lewis npages = atop(end - start); 103816929939SDon Lewis if (npages > vm_page_max_wired) 103916929939SDon Lewis return (ENOMEM); 1040f0ea4612SDon Lewis proc = td->td_proc; 104147934cefSDon Lewis PROC_LOCK(proc); 1042bb734798SDon Lewis if (ptoa(npages + 1043bb734798SDon Lewis pmap_wired_count(vm_map_pmap(&proc->p_vmspace->vm_map))) > 1044bb734798SDon Lewis lim_cur(proc, RLIMIT_MEMLOCK)) { 104547934cefSDon Lewis PROC_UNLOCK(proc); 10464a40e3d4SJohn Dyson return (ENOMEM); 104791d5354aSJohn Baldwin } 104847934cefSDon Lewis PROC_UNLOCK(proc); 10492feb50bfSAttilio Rao if (npages + cnt.v_wire_count > vm_page_max_wired) 105016929939SDon Lewis return (EAGAIN); 105116929939SDon Lewis error = vm_map_wire(&proc->p_vmspace->vm_map, start, end, 105216929939SDon Lewis VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES); 1053df8bae1dSRodney W. Grimes return (error == KERN_SUCCESS ? 0 : ENOMEM); 1054df8bae1dSRodney W. Grimes } 1055df8bae1dSRodney W. Grimes 1056d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_ 10574a40e3d4SJohn Dyson struct mlockall_args { 10584a40e3d4SJohn Dyson int how; 10594a40e3d4SJohn Dyson }; 10604a40e3d4SJohn Dyson #endif 10614a40e3d4SJohn Dyson 1062d2c60af8SMatthew Dillon /* 1063d2c60af8SMatthew Dillon * MPSAFE 1064d2c60af8SMatthew Dillon */ 10654a40e3d4SJohn Dyson int 1066b40ce416SJulian Elischer mlockall(td, uap) 1067b40ce416SJulian Elischer struct thread *td; 10684a40e3d4SJohn Dyson struct mlockall_args *uap; 10694a40e3d4SJohn Dyson { 1070abd498aaSBruce M Simpson vm_map_t map; 1071abd498aaSBruce M Simpson int error; 1072abd498aaSBruce M Simpson 1073abd498aaSBruce M Simpson map = &td->td_proc->p_vmspace->vm_map; 1074abd498aaSBruce M Simpson error = 0; 1075abd498aaSBruce M Simpson 1076abd498aaSBruce M Simpson if ((uap->how == 0) || ((uap->how & ~(MCL_CURRENT|MCL_FUTURE)) != 0)) 1077abd498aaSBruce M Simpson return (EINVAL); 1078abd498aaSBruce M Simpson 107911f7ddc5SBruce M Simpson #if 0 1080abd498aaSBruce M Simpson /* 1081abd498aaSBruce M Simpson * If wiring all pages in the process would cause it to exceed 1082abd498aaSBruce M Simpson * a hard resource limit, return ENOMEM. 1083abd498aaSBruce M Simpson */ 108491d5354aSJohn Baldwin PROC_LOCK(td->td_proc); 1085fd6f4ffbSEdward Tomasz Napierala if (map->size > lim_cur(td->td_proc, RLIMIT_MEMLOCK)) { 108691d5354aSJohn Baldwin PROC_UNLOCK(td->td_proc); 1087abd498aaSBruce M Simpson return (ENOMEM); 108891d5354aSJohn Baldwin } 108991d5354aSJohn Baldwin PROC_UNLOCK(td->td_proc); 1090abd498aaSBruce M Simpson #else 1091acd3428bSRobert Watson error = priv_check(td, PRIV_VM_MLOCK); 1092abd498aaSBruce M Simpson if (error) 1093abd498aaSBruce M Simpson return (error); 1094abd498aaSBruce M Simpson #endif 1095abd498aaSBruce M Simpson 1096abd498aaSBruce M Simpson if (uap->how & MCL_FUTURE) { 1097abd498aaSBruce M Simpson vm_map_lock(map); 1098abd498aaSBruce M Simpson vm_map_modflags(map, MAP_WIREFUTURE, 0); 1099abd498aaSBruce M Simpson vm_map_unlock(map); 1100abd498aaSBruce M Simpson error = 0; 1101abd498aaSBruce M Simpson } 1102abd498aaSBruce M Simpson 1103abd498aaSBruce M Simpson if (uap->how & MCL_CURRENT) { 1104abd498aaSBruce M Simpson /* 1105abd498aaSBruce M Simpson * P1003.1-2001 mandates that all currently mapped pages 1106abd498aaSBruce M Simpson * will be memory resident and locked (wired) upon return 1107abd498aaSBruce M Simpson * from mlockall(). vm_map_wire() will wire pages, by 1108abd498aaSBruce M Simpson * calling vm_fault_wire() for each page in the region. 1109abd498aaSBruce M Simpson */ 1110abd498aaSBruce M Simpson error = vm_map_wire(map, vm_map_min(map), vm_map_max(map), 1111abd498aaSBruce M Simpson VM_MAP_WIRE_USER|VM_MAP_WIRE_HOLESOK); 1112abd498aaSBruce M Simpson error = (error == KERN_SUCCESS ? 0 : EAGAIN); 1113abd498aaSBruce M Simpson } 1114abd498aaSBruce M Simpson 1115abd498aaSBruce M Simpson return (error); 11164a40e3d4SJohn Dyson } 11174a40e3d4SJohn Dyson 11184a40e3d4SJohn Dyson #ifndef _SYS_SYSPROTO_H_ 1119fa721254SAlfred Perlstein struct munlockall_args { 1120abd498aaSBruce M Simpson register_t dummy; 11214a40e3d4SJohn Dyson }; 11224a40e3d4SJohn Dyson #endif 11234a40e3d4SJohn Dyson 1124d2c60af8SMatthew Dillon /* 1125d2c60af8SMatthew Dillon * MPSAFE 1126d2c60af8SMatthew Dillon */ 11274a40e3d4SJohn Dyson int 1128b40ce416SJulian Elischer munlockall(td, uap) 1129b40ce416SJulian Elischer struct thread *td; 11304a40e3d4SJohn Dyson struct munlockall_args *uap; 11314a40e3d4SJohn Dyson { 1132abd498aaSBruce M Simpson vm_map_t map; 1133abd498aaSBruce M Simpson int error; 1134abd498aaSBruce M Simpson 1135abd498aaSBruce M Simpson map = &td->td_proc->p_vmspace->vm_map; 1136acd3428bSRobert Watson error = priv_check(td, PRIV_VM_MUNLOCK); 1137abd498aaSBruce M Simpson if (error) 1138abd498aaSBruce M Simpson return (error); 1139abd498aaSBruce M Simpson 1140abd498aaSBruce M Simpson /* Clear the MAP_WIREFUTURE flag from this vm_map. */ 1141abd498aaSBruce M Simpson vm_map_lock(map); 1142abd498aaSBruce M Simpson vm_map_modflags(map, 0, MAP_WIREFUTURE); 1143abd498aaSBruce M Simpson vm_map_unlock(map); 1144abd498aaSBruce M Simpson 1145abd498aaSBruce M Simpson /* Forcibly unwire all pages. */ 1146abd498aaSBruce M Simpson error = vm_map_unwire(map, vm_map_min(map), vm_map_max(map), 1147abd498aaSBruce M Simpson VM_MAP_WIRE_USER|VM_MAP_WIRE_HOLESOK); 1148abd498aaSBruce M Simpson 1149abd498aaSBruce M Simpson return (error); 11504a40e3d4SJohn Dyson } 11514a40e3d4SJohn Dyson 11524a40e3d4SJohn Dyson #ifndef _SYS_SYSPROTO_H_ 1153df8bae1dSRodney W. Grimes struct munlock_args { 1154651bb817SAlexander Langer const void *addr; 1155df8bae1dSRodney W. Grimes size_t len; 1156df8bae1dSRodney W. Grimes }; 1157d2d3e875SBruce Evans #endif 1158d2c60af8SMatthew Dillon /* 1159d2c60af8SMatthew Dillon * MPSAFE 1160d2c60af8SMatthew Dillon */ 1161df8bae1dSRodney W. Grimes int 1162b40ce416SJulian Elischer munlock(td, uap) 1163b40ce416SJulian Elischer struct thread *td; 1164df8bae1dSRodney W. Grimes struct munlock_args *uap; 1165df8bae1dSRodney W. Grimes { 1166bb734798SDon Lewis vm_offset_t addr, end, last, start; 116716929939SDon Lewis vm_size_t size; 1168df8bae1dSRodney W. Grimes int error; 1169df8bae1dSRodney W. Grimes 1170acd3428bSRobert Watson error = priv_check(td, PRIV_VM_MUNLOCK); 117147934cefSDon Lewis if (error) 117247934cefSDon Lewis return (error); 117316929939SDon Lewis addr = (vm_offset_t)uap->addr; 117416929939SDon Lewis size = uap->len; 1175bb734798SDon Lewis last = addr + size; 117616929939SDon Lewis start = trunc_page(addr); 1177bb734798SDon Lewis end = round_page(last); 1178bb734798SDon Lewis if (last < addr || end < addr) 1179df8bae1dSRodney W. Grimes return (EINVAL); 118016929939SDon Lewis error = vm_map_unwire(&td->td_proc->p_vmspace->vm_map, start, end, 118116929939SDon Lewis VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES); 1182df8bae1dSRodney W. Grimes return (error == KERN_SUCCESS ? 0 : ENOMEM); 1183df8bae1dSRodney W. Grimes } 1184df8bae1dSRodney W. Grimes 1185df8bae1dSRodney W. Grimes /* 1186c8daea13SAlexander Kabaev * vm_mmap_vnode() 1187c8daea13SAlexander Kabaev * 1188c8daea13SAlexander Kabaev * MPSAFE 1189c8daea13SAlexander Kabaev * 1190c8daea13SAlexander Kabaev * Helper function for vm_mmap. Perform sanity check specific for mmap 1191c8daea13SAlexander Kabaev * operations on vnodes. 1192c8daea13SAlexander Kabaev */ 1193c8daea13SAlexander Kabaev int 1194c8daea13SAlexander Kabaev vm_mmap_vnode(struct thread *td, vm_size_t objsize, 1195c8daea13SAlexander Kabaev vm_prot_t prot, vm_prot_t *maxprotp, int *flagsp, 119664345f0bSJohn Baldwin struct vnode *vp, vm_ooffset_t *foffp, vm_object_t *objp) 1197c8daea13SAlexander Kabaev { 1198c8daea13SAlexander Kabaev struct vattr va; 1199c8daea13SAlexander Kabaev vm_object_t obj; 120064345f0bSJohn Baldwin vm_offset_t foff; 1201ae51ff11SJeff Roberson struct mount *mp; 12020359a12eSAttilio Rao struct ucred *cred; 120364345f0bSJohn Baldwin int error, flags; 1204ae51ff11SJeff Roberson int vfslocked; 1205c8daea13SAlexander Kabaev 1206ae51ff11SJeff Roberson mp = vp->v_mount; 12070359a12eSAttilio Rao cred = td->td_ucred; 1208ae51ff11SJeff Roberson vfslocked = VFS_LOCK_GIANT(mp); 1209fa3de770SJohn Baldwin if ((error = vget(vp, LK_SHARED, td)) != 0) { 1210ae51ff11SJeff Roberson VFS_UNLOCK_GIANT(vfslocked); 1211c8daea13SAlexander Kabaev return (error); 1212c8daea13SAlexander Kabaev } 121364345f0bSJohn Baldwin foff = *foffp; 1214c8daea13SAlexander Kabaev flags = *flagsp; 12158516dd18SPoul-Henning Kamp obj = vp->v_object; 1216c8daea13SAlexander Kabaev if (vp->v_type == VREG) { 1217c8daea13SAlexander Kabaev /* 1218c8daea13SAlexander Kabaev * Get the proper underlying object 1219c8daea13SAlexander Kabaev */ 12208516dd18SPoul-Henning Kamp if (obj == NULL) { 1221c8daea13SAlexander Kabaev error = EINVAL; 1222c8daea13SAlexander Kabaev goto done; 1223c8daea13SAlexander Kabaev } 1224c8daea13SAlexander Kabaev if (obj->handle != vp) { 1225c8daea13SAlexander Kabaev vput(vp); 1226c8daea13SAlexander Kabaev vp = (struct vnode*)obj->handle; 1227fa3de770SJohn Baldwin vget(vp, LK_SHARED, td); 1228c8daea13SAlexander Kabaev } 1229c8daea13SAlexander Kabaev } else if (vp->v_type == VCHR) { 123064345f0bSJohn Baldwin error = vm_mmap_cdev(td, objsize, prot, maxprotp, flagsp, 123164345f0bSJohn Baldwin vp->v_rdev, foffp, objp); 123264345f0bSJohn Baldwin if (error == 0) 123364345f0bSJohn Baldwin goto mark_atime; 123491a35e78SKonstantin Belousov goto done; 1235c8daea13SAlexander Kabaev } else { 1236c8daea13SAlexander Kabaev error = EINVAL; 1237c8daea13SAlexander Kabaev goto done; 1238c8daea13SAlexander Kabaev } 12390359a12eSAttilio Rao if ((error = VOP_GETATTR(vp, &va, cred))) 1240c8daea13SAlexander Kabaev goto done; 1241c92163dcSChristian S.J. Peron #ifdef MAC 12420359a12eSAttilio Rao error = mac_vnode_check_mmap(cred, vp, prot, flags); 1243c92163dcSChristian S.J. Peron if (error != 0) 1244c92163dcSChristian S.J. Peron goto done; 1245c92163dcSChristian S.J. Peron #endif 1246c8daea13SAlexander Kabaev if ((flags & MAP_SHARED) != 0) { 1247c8daea13SAlexander Kabaev if ((va.va_flags & (SF_SNAPSHOT|IMMUTABLE|APPEND)) != 0) { 1248c8daea13SAlexander Kabaev if (prot & PROT_WRITE) { 1249c8daea13SAlexander Kabaev error = EPERM; 1250c8daea13SAlexander Kabaev goto done; 1251c8daea13SAlexander Kabaev } 1252c8daea13SAlexander Kabaev *maxprotp &= ~VM_PROT_WRITE; 1253c8daea13SAlexander Kabaev } 1254c8daea13SAlexander Kabaev } 1255c8daea13SAlexander Kabaev /* 1256c8daea13SAlexander Kabaev * If it is a regular file without any references 1257c8daea13SAlexander Kabaev * we do not need to sync it. 1258c8daea13SAlexander Kabaev * Adjust object size to be the size of actual file. 1259c8daea13SAlexander Kabaev */ 1260c8daea13SAlexander Kabaev objsize = round_page(va.va_size); 1261c8daea13SAlexander Kabaev if (va.va_nlink == 0) 1262c8daea13SAlexander Kabaev flags |= MAP_NOSYNC; 12633364c323SKonstantin Belousov obj = vm_pager_allocate(OBJT_VNODE, vp, objsize, prot, foff, td->td_ucred); 1264c8daea13SAlexander Kabaev if (obj == NULL) { 126564345f0bSJohn Baldwin error = ENOMEM; 1266c8daea13SAlexander Kabaev goto done; 1267c8daea13SAlexander Kabaev } 1268c8daea13SAlexander Kabaev *objp = obj; 1269c8daea13SAlexander Kabaev *flagsp = flags; 127064345f0bSJohn Baldwin 127164345f0bSJohn Baldwin mark_atime: 12720359a12eSAttilio Rao vfs_mark_atime(vp, cred); 12731e309003SDiomidis Spinellis 1274c8daea13SAlexander Kabaev done: 1275c8daea13SAlexander Kabaev vput(vp); 1276ae51ff11SJeff Roberson VFS_UNLOCK_GIANT(vfslocked); 1277c8daea13SAlexander Kabaev return (error); 1278c8daea13SAlexander Kabaev } 1279c8daea13SAlexander Kabaev 1280c8daea13SAlexander Kabaev /* 128198df9218SJohn Baldwin * vm_mmap_cdev() 128298df9218SJohn Baldwin * 128398df9218SJohn Baldwin * MPSAFE 128498df9218SJohn Baldwin * 128598df9218SJohn Baldwin * Helper function for vm_mmap. Perform sanity check specific for mmap 128698df9218SJohn Baldwin * operations on cdevs. 128798df9218SJohn Baldwin */ 128898df9218SJohn Baldwin int 128998df9218SJohn Baldwin vm_mmap_cdev(struct thread *td, vm_size_t objsize, 129098df9218SJohn Baldwin vm_prot_t prot, vm_prot_t *maxprotp, int *flagsp, 129164345f0bSJohn Baldwin struct cdev *cdev, vm_ooffset_t *foff, vm_object_t *objp) 129298df9218SJohn Baldwin { 129398df9218SJohn Baldwin vm_object_t obj; 129491a35e78SKonstantin Belousov struct cdevsw *dsw; 1295*3979450bSKonstantin Belousov int error, flags, ref; 129698df9218SJohn Baldwin 129798df9218SJohn Baldwin flags = *flagsp; 129898df9218SJohn Baldwin 1299*3979450bSKonstantin Belousov dsw = dev_refthread(cdev, &ref); 130091a35e78SKonstantin Belousov if (dsw == NULL) 130191a35e78SKonstantin Belousov return (ENXIO); 130291a35e78SKonstantin Belousov if (dsw->d_flags & D_MMAP_ANON) { 1303*3979450bSKonstantin Belousov dev_relthread(cdev, ref); 130498df9218SJohn Baldwin *maxprotp = VM_PROT_ALL; 130598df9218SJohn Baldwin *flagsp |= MAP_ANON; 130698df9218SJohn Baldwin return (0); 130798df9218SJohn Baldwin } 130898df9218SJohn Baldwin /* 130964345f0bSJohn Baldwin * cdevs do not provide private mappings of any kind. 131098df9218SJohn Baldwin */ 131198df9218SJohn Baldwin if ((*maxprotp & VM_PROT_WRITE) == 0 && 131264345f0bSJohn Baldwin (prot & PROT_WRITE) != 0) { 1313*3979450bSKonstantin Belousov dev_relthread(cdev, ref); 131498df9218SJohn Baldwin return (EACCES); 131564345f0bSJohn Baldwin } 131664345f0bSJohn Baldwin if (flags & (MAP_PRIVATE|MAP_COPY)) { 1317*3979450bSKonstantin Belousov dev_relthread(cdev, ref); 131898df9218SJohn Baldwin return (EINVAL); 131964345f0bSJohn Baldwin } 132098df9218SJohn Baldwin /* 132198df9218SJohn Baldwin * Force device mappings to be shared. 132298df9218SJohn Baldwin */ 132398df9218SJohn Baldwin flags |= MAP_SHARED; 132498df9218SJohn Baldwin #ifdef MAC_XXX 132564345f0bSJohn Baldwin error = mac_cdev_check_mmap(td->td_ucred, cdev, prot); 132664345f0bSJohn Baldwin if (error != 0) { 1327*3979450bSKonstantin Belousov dev_relthread(cdev, ref); 132898df9218SJohn Baldwin return (error); 132964345f0bSJohn Baldwin } 133098df9218SJohn Baldwin #endif 133164345f0bSJohn Baldwin /* 133264345f0bSJohn Baldwin * First, try d_mmap_single(). If that is not implemented 133364345f0bSJohn Baldwin * (returns ENODEV), fall back to using the device pager. 133464345f0bSJohn Baldwin * Note that d_mmap_single() must return a reference to the 133564345f0bSJohn Baldwin * object (it needs to bump the reference count of the object 133664345f0bSJohn Baldwin * it returns somehow). 133764345f0bSJohn Baldwin * 133864345f0bSJohn Baldwin * XXX assumes VM_PROT_* == PROT_* 133964345f0bSJohn Baldwin */ 134064345f0bSJohn Baldwin error = dsw->d_mmap_single(cdev, foff, objsize, objp, (int)prot); 1341*3979450bSKonstantin Belousov dev_relthread(cdev, ref); 134264345f0bSJohn Baldwin if (error != ENODEV) 134364345f0bSJohn Baldwin return (error); 13443364c323SKonstantin Belousov obj = vm_pager_allocate(OBJT_DEVICE, cdev, objsize, prot, *foff, 13453364c323SKonstantin Belousov td->td_ucred); 134698df9218SJohn Baldwin if (obj == NULL) 134798df9218SJohn Baldwin return (EINVAL); 134898df9218SJohn Baldwin *objp = obj; 134998df9218SJohn Baldwin *flagsp = flags; 135098df9218SJohn Baldwin return (0); 135198df9218SJohn Baldwin } 135298df9218SJohn Baldwin 135398df9218SJohn Baldwin /* 13548e38aeffSJohn Baldwin * vm_mmap_shm() 13558e38aeffSJohn Baldwin * 13568e38aeffSJohn Baldwin * MPSAFE 13578e38aeffSJohn Baldwin * 13588e38aeffSJohn Baldwin * Helper function for vm_mmap. Perform sanity check specific for mmap 13598e38aeffSJohn Baldwin * operations on shm file descriptors. 13608e38aeffSJohn Baldwin */ 13618e38aeffSJohn Baldwin int 13628e38aeffSJohn Baldwin vm_mmap_shm(struct thread *td, vm_size_t objsize, 13638e38aeffSJohn Baldwin vm_prot_t prot, vm_prot_t *maxprotp, int *flagsp, 13648e38aeffSJohn Baldwin struct shmfd *shmfd, vm_ooffset_t foff, vm_object_t *objp) 13658e38aeffSJohn Baldwin { 13668e38aeffSJohn Baldwin int error; 13678e38aeffSJohn Baldwin 13688e38aeffSJohn Baldwin if ((*maxprotp & VM_PROT_WRITE) == 0 && 13698e38aeffSJohn Baldwin (prot & PROT_WRITE) != 0) 13708e38aeffSJohn Baldwin return (EACCES); 13718e38aeffSJohn Baldwin #ifdef MAC 13728e38aeffSJohn Baldwin error = mac_posixshm_check_mmap(td->td_ucred, shmfd, prot, *flagsp); 13738e38aeffSJohn Baldwin if (error != 0) 13748e38aeffSJohn Baldwin return (error); 13758e38aeffSJohn Baldwin #endif 13768e38aeffSJohn Baldwin error = shm_mmap(shmfd, objsize, foff, objp); 13778e38aeffSJohn Baldwin if (error) 13788e38aeffSJohn Baldwin return (error); 13798e38aeffSJohn Baldwin return (0); 13808e38aeffSJohn Baldwin } 13818e38aeffSJohn Baldwin 13828e38aeffSJohn Baldwin /* 1383d2c60af8SMatthew Dillon * vm_mmap() 1384d2c60af8SMatthew Dillon * 1385d2c60af8SMatthew Dillon * MPSAFE 1386d2c60af8SMatthew Dillon * 1387d2c60af8SMatthew Dillon * Internal version of mmap. Currently used by mmap, exec, and sys5 1388d2c60af8SMatthew Dillon * shared memory. Handle is either a vnode pointer or NULL for MAP_ANON. 1389df8bae1dSRodney W. Grimes */ 1390df8bae1dSRodney W. Grimes int 1391b9dcd593SBruce Evans vm_mmap(vm_map_t map, vm_offset_t *addr, vm_size_t size, vm_prot_t prot, 1392b9dcd593SBruce Evans vm_prot_t maxprot, int flags, 139398df9218SJohn Baldwin objtype_t handle_type, void *handle, 1394b9dcd593SBruce Evans vm_ooffset_t foff) 1395df8bae1dSRodney W. Grimes { 1396df8bae1dSRodney W. Grimes boolean_t fitit; 13976bda842dSMatt Jacob vm_object_t object = NULL; 1398df8bae1dSRodney W. Grimes int rv = KERN_SUCCESS; 139920eec4bbSAlan Cox int docow, error; 1400b40ce416SJulian Elischer struct thread *td = curthread; 1401df8bae1dSRodney W. Grimes 1402df8bae1dSRodney W. Grimes if (size == 0) 1403df8bae1dSRodney W. Grimes return (0); 1404df8bae1dSRodney W. Grimes 1405749474f2SPeter Wemm size = round_page(size); 1406df8bae1dSRodney W. Grimes 140791d5354aSJohn Baldwin PROC_LOCK(td->td_proc); 1408070f64feSMatthew Dillon if (td->td_proc->p_vmspace->vm_map.size + size > 140991d5354aSJohn Baldwin lim_cur(td->td_proc, RLIMIT_VMEM)) { 141091d5354aSJohn Baldwin PROC_UNLOCK(td->td_proc); 1411070f64feSMatthew Dillon return(ENOMEM); 1412070f64feSMatthew Dillon } 141391d5354aSJohn Baldwin PROC_UNLOCK(td->td_proc); 1414070f64feSMatthew Dillon 1415df8bae1dSRodney W. Grimes /* 1416bc9ad247SDavid Greenman * We currently can only deal with page aligned file offsets. 1417bc9ad247SDavid Greenman * The check is here rather than in the syscall because the 1418bc9ad247SDavid Greenman * kernel calls this function internally for other mmaping 1419bc9ad247SDavid Greenman * operations (such as in exec) and non-aligned offsets will 1420bc9ad247SDavid Greenman * cause pmap inconsistencies...so we want to be sure to 1421bc9ad247SDavid Greenman * disallow this in all cases. 1422bc9ad247SDavid Greenman */ 1423bc9ad247SDavid Greenman if (foff & PAGE_MASK) 1424bc9ad247SDavid Greenman return (EINVAL); 1425bc9ad247SDavid Greenman 142606cb7259SDavid Greenman if ((flags & MAP_FIXED) == 0) { 142706cb7259SDavid Greenman fitit = TRUE; 142806cb7259SDavid Greenman *addr = round_page(*addr); 142906cb7259SDavid Greenman } else { 143006cb7259SDavid Greenman if (*addr != trunc_page(*addr)) 143106cb7259SDavid Greenman return (EINVAL); 143206cb7259SDavid Greenman fitit = FALSE; 143306cb7259SDavid Greenman } 1434bc9ad247SDavid Greenman /* 143524a1cce3SDavid Greenman * Lookup/allocate object. 1436df8bae1dSRodney W. Grimes */ 143798df9218SJohn Baldwin switch (handle_type) { 143898df9218SJohn Baldwin case OBJT_DEVICE: 143998df9218SJohn Baldwin error = vm_mmap_cdev(td, size, prot, &maxprot, &flags, 144064345f0bSJohn Baldwin handle, &foff, &object); 144198df9218SJohn Baldwin break; 144298df9218SJohn Baldwin case OBJT_VNODE: 1443c8daea13SAlexander Kabaev error = vm_mmap_vnode(td, size, prot, &maxprot, &flags, 144464345f0bSJohn Baldwin handle, &foff, &object); 144598df9218SJohn Baldwin break; 14468e38aeffSJohn Baldwin case OBJT_SWAP: 14478e38aeffSJohn Baldwin error = vm_mmap_shm(td, size, prot, &maxprot, &flags, 14488e38aeffSJohn Baldwin handle, foff, &object); 14498e38aeffSJohn Baldwin break; 145098df9218SJohn Baldwin case OBJT_DEFAULT: 145198df9218SJohn Baldwin if (handle == NULL) { 145298df9218SJohn Baldwin error = 0; 145398df9218SJohn Baldwin break; 145498df9218SJohn Baldwin } 145598df9218SJohn Baldwin /* FALLTHROUGH */ 145698df9218SJohn Baldwin default: 145798df9218SJohn Baldwin error = EINVAL; 14586bda842dSMatt Jacob break; 145998df9218SJohn Baldwin } 146098df9218SJohn Baldwin if (error) 1461c8daea13SAlexander Kabaev return (error); 14625f55e841SDavid Greenman if (flags & MAP_ANON) { 1463c8daea13SAlexander Kabaev object = NULL; 1464c8daea13SAlexander Kabaev docow = 0; 14655f55e841SDavid Greenman /* 14665f55e841SDavid Greenman * Unnamed anonymous regions always start at 0. 14675f55e841SDavid Greenman */ 146867bf6868SJohn Dyson if (handle == 0) 14695f55e841SDavid Greenman foff = 0; 14705f55e841SDavid Greenman } else { 14714738fa09SAlan Cox docow = MAP_PREFAULT_PARTIAL; 147294328e90SJohn Dyson } 1473df8bae1dSRodney W. Grimes 14744f79d873SMatthew Dillon if ((flags & (MAP_ANON|MAP_SHARED)) == 0) 14754738fa09SAlan Cox docow |= MAP_COPY_ON_WRITE; 14764f79d873SMatthew Dillon if (flags & MAP_NOSYNC) 14774f79d873SMatthew Dillon docow |= MAP_DISABLE_SYNCER; 14789730a5daSPaul Saab if (flags & MAP_NOCORE) 14799730a5daSPaul Saab docow |= MAP_DISABLE_COREDUMP; 14805850152dSJohn Dyson 14812267af78SJulian Elischer if (flags & MAP_STACK) 1482fd75d710SMarcel Moolenaar rv = vm_map_stack(map, *addr, size, prot, maxprot, 1483fd75d710SMarcel Moolenaar docow | MAP_STACK_GROWS_DOWN); 1484d239bd3cSKonstantin Belousov else if (fitit) 1485d0a83a83SAlan Cox rv = vm_map_find(map, object, foff, addr, size, 1486d0a83a83SAlan Cox object != NULL && object->type == OBJT_DEVICE ? 1487d0a83a83SAlan Cox VMFS_ALIGNED_SPACE : VMFS_ANY_SPACE, prot, maxprot, docow); 14882267af78SJulian Elischer else 1489b8ca4ef2SAlan Cox rv = vm_map_fixed(map, object, foff, *addr, size, 1490bd7e5f99SJohn Dyson prot, maxprot, docow); 1491bd7e5f99SJohn Dyson 1492d2c60af8SMatthew Dillon if (rv != KERN_SUCCESS) { 14937fb0c17eSDavid Greenman /* 149424a1cce3SDavid Greenman * Lose the object reference. Will destroy the 149524a1cce3SDavid Greenman * object if it's an unnamed anonymous mapping 149624a1cce3SDavid Greenman * or named anonymous without other references. 14977fb0c17eSDavid Greenman */ 1498df8bae1dSRodney W. Grimes vm_object_deallocate(object); 1499d2c60af8SMatthew Dillon } else if (flags & MAP_SHARED) { 1500df8bae1dSRodney W. Grimes /* 1501df8bae1dSRodney W. Grimes * Shared memory is also shared with children. 1502df8bae1dSRodney W. Grimes */ 1503df8bae1dSRodney W. Grimes rv = vm_map_inherit(map, *addr, *addr + size, VM_INHERIT_SHARE); 1504e4ca250dSJohn Baldwin if (rv != KERN_SUCCESS) 15057fb0c17eSDavid Greenman (void) vm_map_remove(map, *addr, *addr + size); 1506df8bae1dSRodney W. Grimes } 1507abd498aaSBruce M Simpson 1508abd498aaSBruce M Simpson /* 1509abd498aaSBruce M Simpson * If the process has requested that all future mappings 1510abd498aaSBruce M Simpson * be wired, then heed this. 1511abd498aaSBruce M Simpson */ 1512abd498aaSBruce M Simpson if ((rv == KERN_SUCCESS) && (map->flags & MAP_WIREFUTURE)) 1513abd498aaSBruce M Simpson vm_map_wire(map, *addr, *addr + size, 1514abd498aaSBruce M Simpson VM_MAP_WIRE_USER|VM_MAP_WIRE_NOHOLES); 1515abd498aaSBruce M Simpson 1516df8bae1dSRodney W. Grimes switch (rv) { 1517df8bae1dSRodney W. Grimes case KERN_SUCCESS: 1518df8bae1dSRodney W. Grimes return (0); 1519df8bae1dSRodney W. Grimes case KERN_INVALID_ADDRESS: 1520df8bae1dSRodney W. Grimes case KERN_NO_SPACE: 1521df8bae1dSRodney W. Grimes return (ENOMEM); 1522df8bae1dSRodney W. Grimes case KERN_PROTECTION_FAILURE: 1523df8bae1dSRodney W. Grimes return (EACCES); 1524df8bae1dSRodney W. Grimes default: 1525df8bae1dSRodney W. Grimes return (EINVAL); 1526df8bae1dSRodney W. Grimes } 1527df8bae1dSRodney W. Grimes } 1528