xref: /freebsd/sys/vm/vm_mmap.c (revision 7ebcee376a940186bceeaad0c0a33d9a15638483)
1df8bae1dSRodney W. Grimes /*
2df8bae1dSRodney W. Grimes  * Copyright (c) 1988 University of Utah.
3df8bae1dSRodney W. Grimes  * Copyright (c) 1991, 1993
4df8bae1dSRodney W. Grimes  *	The Regents of the University of California.  All rights reserved.
5df8bae1dSRodney W. Grimes  *
6df8bae1dSRodney W. Grimes  * This code is derived from software contributed to Berkeley by
7df8bae1dSRodney W. Grimes  * the Systems Programming Group of the University of Utah Computer
8df8bae1dSRodney W. Grimes  * Science Department.
9df8bae1dSRodney W. Grimes  *
10df8bae1dSRodney W. Grimes  * Redistribution and use in source and binary forms, with or without
11df8bae1dSRodney W. Grimes  * modification, are permitted provided that the following conditions
12df8bae1dSRodney W. Grimes  * are met:
13df8bae1dSRodney W. Grimes  * 1. Redistributions of source code must retain the above copyright
14df8bae1dSRodney W. Grimes  *    notice, this list of conditions and the following disclaimer.
15df8bae1dSRodney W. Grimes  * 2. Redistributions in binary form must reproduce the above copyright
16df8bae1dSRodney W. Grimes  *    notice, this list of conditions and the following disclaimer in the
17df8bae1dSRodney W. Grimes  *    documentation and/or other materials provided with the distribution.
18df8bae1dSRodney W. Grimes  * 3. All advertising materials mentioning features or use of this software
195929bcfaSPhilippe Charnier  *    must display the following acknowledgement:
20df8bae1dSRodney W. Grimes  *	This product includes software developed by the University of
21df8bae1dSRodney W. Grimes  *	California, Berkeley and its contributors.
22df8bae1dSRodney W. Grimes  * 4. Neither the name of the University nor the names of its contributors
23df8bae1dSRodney W. Grimes  *    may be used to endorse or promote products derived from this software
24df8bae1dSRodney W. Grimes  *    without specific prior written permission.
25df8bae1dSRodney W. Grimes  *
26df8bae1dSRodney W. Grimes  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27df8bae1dSRodney W. Grimes  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28df8bae1dSRodney W. Grimes  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29df8bae1dSRodney W. Grimes  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30df8bae1dSRodney W. Grimes  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31df8bae1dSRodney W. Grimes  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32df8bae1dSRodney W. Grimes  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33df8bae1dSRodney W. Grimes  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34df8bae1dSRodney W. Grimes  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35df8bae1dSRodney W. Grimes  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36df8bae1dSRodney W. Grimes  * SUCH DAMAGE.
37df8bae1dSRodney W. Grimes  *
38df8bae1dSRodney W. Grimes  * from: Utah $Hdr: vm_mmap.c 1.6 91/10/21$
39df8bae1dSRodney W. Grimes  *
40df8bae1dSRodney W. Grimes  *	@(#)vm_mmap.c	8.4 (Berkeley) 1/12/94
41df8bae1dSRodney W. Grimes  */
42df8bae1dSRodney W. Grimes 
43df8bae1dSRodney W. Grimes /*
44df8bae1dSRodney W. Grimes  * Mapped file (mmap) interface to VM
45df8bae1dSRodney W. Grimes  */
46df8bae1dSRodney W. Grimes 
47874651b1SDavid E. O'Brien #include <sys/cdefs.h>
48874651b1SDavid E. O'Brien __FBSDID("$FreeBSD$");
49874651b1SDavid E. O'Brien 
505591b823SEivind Eklund #include "opt_compat.h"
513e732e7dSRobert Watson #include "opt_mac.h"
52e9822d92SJoerg Wunsch 
53df8bae1dSRodney W. Grimes #include <sys/param.h>
54df8bae1dSRodney W. Grimes #include <sys/systm.h>
55fb919e4dSMark Murray #include <sys/kernel.h>
56fb919e4dSMark Murray #include <sys/lock.h>
5723955314SAlfred Perlstein #include <sys/mutex.h>
58d2d3e875SBruce Evans #include <sys/sysproto.h>
59df8bae1dSRodney W. Grimes #include <sys/filedesc.h>
60df8bae1dSRodney W. Grimes #include <sys/proc.h>
61070f64feSMatthew Dillon #include <sys/resource.h>
62070f64feSMatthew Dillon #include <sys/resourcevar.h>
63df8bae1dSRodney W. Grimes #include <sys/vnode.h>
643ac4d1efSBruce Evans #include <sys/fcntl.h>
65df8bae1dSRodney W. Grimes #include <sys/file.h>
663e732e7dSRobert Watson #include <sys/mac.h>
67df8bae1dSRodney W. Grimes #include <sys/mman.h>
68df8bae1dSRodney W. Grimes #include <sys/conf.h>
694183b6b6SPeter Wemm #include <sys/stat.h>
70efeaf95aSDavid Greenman #include <sys/vmmeter.h>
711f6889a1SMatthew Dillon #include <sys/sysctl.h>
72df8bae1dSRodney W. Grimes 
73df8bae1dSRodney W. Grimes #include <vm/vm.h>
74efeaf95aSDavid Greenman #include <vm/vm_param.h>
75efeaf95aSDavid Greenman #include <vm/pmap.h>
76efeaf95aSDavid Greenman #include <vm/vm_map.h>
77efeaf95aSDavid Greenman #include <vm/vm_object.h>
781c7c3c6aSMatthew Dillon #include <vm/vm_page.h>
79df8bae1dSRodney W. Grimes #include <vm/vm_pager.h>
80b5e8ce9fSBruce Evans #include <vm/vm_pageout.h>
81efeaf95aSDavid Greenman #include <vm/vm_extern.h>
82867a482dSJohn Dyson #include <vm/vm_page.h>
831f6889a1SMatthew Dillon #include <vm/vm_kern.h>
84df8bae1dSRodney W. Grimes 
85d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_
86df8bae1dSRodney W. Grimes struct sbrk_args {
87df8bae1dSRodney W. Grimes 	int incr;
88df8bae1dSRodney W. Grimes };
89d2d3e875SBruce Evans #endif
900d94caffSDavid Greenman 
911f6889a1SMatthew Dillon static int max_proc_mmap;
921f6889a1SMatthew Dillon SYSCTL_INT(_vm, OID_AUTO, max_proc_mmap, CTLFLAG_RW, &max_proc_mmap, 0, "");
931f6889a1SMatthew Dillon 
941f6889a1SMatthew Dillon /*
951f6889a1SMatthew Dillon  * Set the maximum number of vm_map_entry structures per process.  Roughly
961f6889a1SMatthew Dillon  * speaking vm_map_entry structures are tiny, so allowing them to eat 1/100
971f6889a1SMatthew Dillon  * of our KVM malloc space still results in generous limits.  We want a
981f6889a1SMatthew Dillon  * default that is good enough to prevent the kernel running out of resources
991f6889a1SMatthew Dillon  * if attacked from compromised user account but generous enough such that
1001f6889a1SMatthew Dillon  * multi-threaded processes are not unduly inconvenienced.
1011f6889a1SMatthew Dillon  */
10211caded3SAlfred Perlstein static void vmmapentry_rsrc_init(void *);
1031f6889a1SMatthew Dillon SYSINIT(vmmersrc, SI_SUB_KVM_RSRC, SI_ORDER_FIRST, vmmapentry_rsrc_init, NULL)
1041f6889a1SMatthew Dillon 
1051f6889a1SMatthew Dillon static void
1061f6889a1SMatthew Dillon vmmapentry_rsrc_init(dummy)
1071f6889a1SMatthew Dillon         void *dummy;
1081f6889a1SMatthew Dillon {
1091f6889a1SMatthew Dillon     max_proc_mmap = vm_kmem_size / sizeof(struct vm_map_entry);
1101f6889a1SMatthew Dillon     max_proc_mmap /= 100;
1111f6889a1SMatthew Dillon }
1121f6889a1SMatthew Dillon 
113d2c60af8SMatthew Dillon /*
114d2c60af8SMatthew Dillon  * MPSAFE
115d2c60af8SMatthew Dillon  */
116df8bae1dSRodney W. Grimes /* ARGSUSED */
117df8bae1dSRodney W. Grimes int
118b40ce416SJulian Elischer sbrk(td, uap)
119b40ce416SJulian Elischer 	struct thread *td;
120df8bae1dSRodney W. Grimes 	struct sbrk_args *uap;
121df8bae1dSRodney W. Grimes {
122df8bae1dSRodney W. Grimes 	/* Not yet implemented */
1230cddd8f0SMatthew Dillon 	/* mtx_lock(&Giant); */
1240cddd8f0SMatthew Dillon 	/* mtx_unlock(&Giant); */
125df8bae1dSRodney W. Grimes 	return (EOPNOTSUPP);
126df8bae1dSRodney W. Grimes }
127df8bae1dSRodney W. Grimes 
128d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_
129df8bae1dSRodney W. Grimes struct sstk_args {
130df8bae1dSRodney W. Grimes 	int incr;
131df8bae1dSRodney W. Grimes };
132d2d3e875SBruce Evans #endif
1330d94caffSDavid Greenman 
134d2c60af8SMatthew Dillon /*
135d2c60af8SMatthew Dillon  * MPSAFE
136d2c60af8SMatthew Dillon  */
137df8bae1dSRodney W. Grimes /* ARGSUSED */
138df8bae1dSRodney W. Grimes int
139b40ce416SJulian Elischer sstk(td, uap)
140b40ce416SJulian Elischer 	struct thread *td;
141df8bae1dSRodney W. Grimes 	struct sstk_args *uap;
142df8bae1dSRodney W. Grimes {
143df8bae1dSRodney W. Grimes 	/* Not yet implemented */
1440cddd8f0SMatthew Dillon 	/* mtx_lock(&Giant); */
1450cddd8f0SMatthew Dillon 	/* mtx_unlock(&Giant); */
146df8bae1dSRodney W. Grimes 	return (EOPNOTSUPP);
147df8bae1dSRodney W. Grimes }
148df8bae1dSRodney W. Grimes 
149df8bae1dSRodney W. Grimes #if defined(COMPAT_43) || defined(COMPAT_SUNOS)
150d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_
151df8bae1dSRodney W. Grimes struct getpagesize_args {
152df8bae1dSRodney W. Grimes 	int dummy;
153df8bae1dSRodney W. Grimes };
154d2d3e875SBruce Evans #endif
1550d94caffSDavid Greenman 
156df8bae1dSRodney W. Grimes /* ARGSUSED */
157df8bae1dSRodney W. Grimes int
158b40ce416SJulian Elischer ogetpagesize(td, uap)
159b40ce416SJulian Elischer 	struct thread *td;
160df8bae1dSRodney W. Grimes 	struct getpagesize_args *uap;
161df8bae1dSRodney W. Grimes {
1620cddd8f0SMatthew Dillon 	/* MP SAFE */
163b40ce416SJulian Elischer 	td->td_retval[0] = PAGE_SIZE;
164df8bae1dSRodney W. Grimes 	return (0);
165df8bae1dSRodney W. Grimes }
166df8bae1dSRodney W. Grimes #endif				/* COMPAT_43 || COMPAT_SUNOS */
167df8bae1dSRodney W. Grimes 
16854f42e4bSPeter Wemm 
16954f42e4bSPeter Wemm /*
17054f42e4bSPeter Wemm  * Memory Map (mmap) system call.  Note that the file offset
17154f42e4bSPeter Wemm  * and address are allowed to be NOT page aligned, though if
17254f42e4bSPeter Wemm  * the MAP_FIXED flag it set, both must have the same remainder
17354f42e4bSPeter Wemm  * modulo the PAGE_SIZE (POSIX 1003.1b).  If the address is not
17454f42e4bSPeter Wemm  * page-aligned, the actual mapping starts at trunc_page(addr)
17554f42e4bSPeter Wemm  * and the return value is adjusted up by the page offset.
176b4309055SMatthew Dillon  *
177b4309055SMatthew Dillon  * Generally speaking, only character devices which are themselves
178b4309055SMatthew Dillon  * memory-based, such as a video framebuffer, can be mmap'd.  Otherwise
179b4309055SMatthew Dillon  * there would be no cache coherency between a descriptor and a VM mapping
180b4309055SMatthew Dillon  * both to the same character device.
181b4309055SMatthew Dillon  *
182b4309055SMatthew Dillon  * Block devices can be mmap'd no matter what they represent.  Cache coherency
183b4309055SMatthew Dillon  * is maintained as long as you do not write directly to the underlying
184b4309055SMatthew Dillon  * character device.
18554f42e4bSPeter Wemm  */
186d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_
187df8bae1dSRodney W. Grimes struct mmap_args {
188651bb817SAlexander Langer 	void *addr;
189df8bae1dSRodney W. Grimes 	size_t len;
190df8bae1dSRodney W. Grimes 	int prot;
191df8bae1dSRodney W. Grimes 	int flags;
192df8bae1dSRodney W. Grimes 	int fd;
193df8bae1dSRodney W. Grimes 	long pad;
194df8bae1dSRodney W. Grimes 	off_t pos;
195df8bae1dSRodney W. Grimes };
196d2d3e875SBruce Evans #endif
197df8bae1dSRodney W. Grimes 
198d2c60af8SMatthew Dillon /*
199d2c60af8SMatthew Dillon  * MPSAFE
200d2c60af8SMatthew Dillon  */
201df8bae1dSRodney W. Grimes int
202b40ce416SJulian Elischer mmap(td, uap)
203b40ce416SJulian Elischer 	struct thread *td;
20454d92145SMatthew Dillon 	struct mmap_args *uap;
205df8bae1dSRodney W. Grimes {
20654d92145SMatthew Dillon 	struct file *fp = NULL;
207df8bae1dSRodney W. Grimes 	struct vnode *vp;
208df8bae1dSRodney W. Grimes 	vm_offset_t addr;
2099154ee6aSPeter Wemm 	vm_size_t size, pageoff;
210df8bae1dSRodney W. Grimes 	vm_prot_t prot, maxprot;
211651bb817SAlexander Langer 	void *handle;
212df8bae1dSRodney W. Grimes 	int flags, error;
213c8bdd56bSGuido van Rooij 	int disablexworkaround;
21454f42e4bSPeter Wemm 	off_t pos;
215b40ce416SJulian Elischer 	struct vmspace *vms = td->td_proc->p_vmspace;
2169ff5ce6bSBoris Popov 	vm_object_t obj;
217df8bae1dSRodney W. Grimes 
21854f42e4bSPeter Wemm 	addr = (vm_offset_t) uap->addr;
21954f42e4bSPeter Wemm 	size = uap->len;
220df8bae1dSRodney W. Grimes 	prot = uap->prot & VM_PROT_ALL;
221df8bae1dSRodney W. Grimes 	flags = uap->flags;
22254f42e4bSPeter Wemm 	pos = uap->pos;
22354f42e4bSPeter Wemm 
224f6b5b182SJeff Roberson 	vp = NULL;
225426da3bcSAlfred Perlstein 	fp = NULL;
22654f42e4bSPeter Wemm 	/* make sure mapping fits into numeric range etc */
227fc565456SDmitrij Tejblum 	if ((ssize_t) uap->len < 0 ||
22854f42e4bSPeter Wemm 	    ((flags & MAP_ANON) && uap->fd != -1))
229df8bae1dSRodney W. Grimes 		return (EINVAL);
2309154ee6aSPeter Wemm 
2312267af78SJulian Elischer 	if (flags & MAP_STACK) {
2322267af78SJulian Elischer 		if ((uap->fd != -1) ||
2332267af78SJulian Elischer 		    ((prot & (PROT_READ | PROT_WRITE)) != (PROT_READ | PROT_WRITE)))
2342267af78SJulian Elischer 			return (EINVAL);
2352267af78SJulian Elischer 		flags |= MAP_ANON;
2362267af78SJulian Elischer 		pos = 0;
2372907af2aSJulian Elischer 	}
2382907af2aSJulian Elischer 
2399154ee6aSPeter Wemm 	/*
24054f42e4bSPeter Wemm 	 * Align the file position to a page boundary,
24154f42e4bSPeter Wemm 	 * and save its page offset component.
2429154ee6aSPeter Wemm 	 */
24354f42e4bSPeter Wemm 	pageoff = (pos & PAGE_MASK);
24454f42e4bSPeter Wemm 	pos -= pageoff;
24554f42e4bSPeter Wemm 
24654f42e4bSPeter Wemm 	/* Adjust size for rounding (on both ends). */
24754f42e4bSPeter Wemm 	size += pageoff;			/* low end... */
24854f42e4bSPeter Wemm 	size = (vm_size_t) round_page(size);	/* hi end */
2499154ee6aSPeter Wemm 
250df8bae1dSRodney W. Grimes 	/*
2510d94caffSDavid Greenman 	 * Check for illegal addresses.  Watch out for address wrap... Note
2520d94caffSDavid Greenman 	 * that VM_*_ADDRESS are not constants due to casts (argh).
253df8bae1dSRodney W. Grimes 	 */
254df8bae1dSRodney W. Grimes 	if (flags & MAP_FIXED) {
25554f42e4bSPeter Wemm 		/*
25654f42e4bSPeter Wemm 		 * The specified address must have the same remainder
25754f42e4bSPeter Wemm 		 * as the file offset taken modulo PAGE_SIZE, so it
25854f42e4bSPeter Wemm 		 * should be aligned after adjustment by pageoff.
25954f42e4bSPeter Wemm 		 */
26054f42e4bSPeter Wemm 		addr -= pageoff;
26154f42e4bSPeter Wemm 		if (addr & PAGE_MASK)
26254f42e4bSPeter Wemm 			return (EINVAL);
26354f42e4bSPeter Wemm 		/* Address range must be all in user VM space. */
26405ba50f5SJake Burkholder 		if (addr < vm_map_min(&vms->vm_map) ||
26505ba50f5SJake Burkholder 		    addr + size > vm_map_max(&vms->vm_map))
266df8bae1dSRodney W. Grimes 			return (EINVAL);
267bbc0ec52SDavid Greenman 		if (addr + size < addr)
268df8bae1dSRodney W. Grimes 			return (EINVAL);
269df8bae1dSRodney W. Grimes 	}
270df8bae1dSRodney W. Grimes 	/*
27154f42e4bSPeter Wemm 	 * XXX for non-fixed mappings where no hint is provided or
27254f42e4bSPeter Wemm 	 * the hint would fall in the potential heap space,
27354f42e4bSPeter Wemm 	 * place it after the end of the largest possible heap.
274df8bae1dSRodney W. Grimes 	 *
27554f42e4bSPeter Wemm 	 * There should really be a pmap call to determine a reasonable
27654f42e4bSPeter Wemm 	 * location.
277df8bae1dSRodney W. Grimes 	 */
278d28ab90fSLuoqi Chen 	else if (addr == 0 ||
2791f6889a1SMatthew Dillon 	    (addr >= round_page((vm_offset_t)vms->vm_taddr) &&
280cbc89bfbSPaul Saab 	     addr < round_page((vm_offset_t)vms->vm_daddr + maxdsiz)))
281cbc89bfbSPaul Saab 		addr = round_page((vm_offset_t)vms->vm_daddr + maxdsiz);
28254f42e4bSPeter Wemm 
2830cddd8f0SMatthew Dillon 	mtx_lock(&Giant);	/* syscall marked mp-safe but isn't */
284a6af4ff1SPoul-Henning Kamp 	do {
285df8bae1dSRodney W. Grimes 		if (flags & MAP_ANON) {
286df8bae1dSRodney W. Grimes 			/*
287df8bae1dSRodney W. Grimes 			 * Mapping blank space is trivial.
288df8bae1dSRodney W. Grimes 			 */
289df8bae1dSRodney W. Grimes 			handle = NULL;
290df8bae1dSRodney W. Grimes 			maxprot = VM_PROT_ALL;
29154f42e4bSPeter Wemm 			pos = 0;
292a6af4ff1SPoul-Henning Kamp 			break;
293a6af4ff1SPoul-Henning Kamp 		}
294df8bae1dSRodney W. Grimes 		/*
2950d94caffSDavid Greenman 		 * Mapping file, get fp for validation. Obtain vnode and make
2960d94caffSDavid Greenman 		 * sure it is of appropriate type.
297426da3bcSAlfred Perlstein 		 * don't let the descriptor disappear on us if we block
298df8bae1dSRodney W. Grimes 		 */
299a4db4953SAlfred Perlstein 		if ((error = fget(td, uap->fd, &fp)) != 0)
300426da3bcSAlfred Perlstein 			goto done;
301e4ca250dSJohn Baldwin 		if (fp->f_type != DTYPE_VNODE) {
302d2c60af8SMatthew Dillon 			error = EINVAL;
303426da3bcSAlfred Perlstein 			goto done;
304e4ca250dSJohn Baldwin 		}
305279d7226SMatthew Dillon 
306279d7226SMatthew Dillon 		/*
307aa543039SGarrett Wollman 		 * POSIX shared-memory objects are defined to have
308aa543039SGarrett Wollman 		 * kernel persistence, and are not defined to support
309aa543039SGarrett Wollman 		 * read(2)/write(2) -- or even open(2).  Thus, we can
310aa543039SGarrett Wollman 		 * use MAP_ASYNC to trade on-disk coherence for speed.
311aa543039SGarrett Wollman 		 * The shm_open(3) library routine turns on the FPOSIXSHM
312aa543039SGarrett Wollman 		 * flag to request this behavior.
313aa543039SGarrett Wollman 		 */
314aa543039SGarrett Wollman 		if (fp->f_flag & FPOSIXSHM)
315aa543039SGarrett Wollman 			flags |= MAP_NOSYNC;
3163b6d9652SPoul-Henning Kamp 		vp = fp->f_vnode;
317f6b5b182SJeff Roberson 		error = vget(vp, LK_EXCLUSIVE, td);
318f6b5b182SJeff Roberson 		if (error)
319f6b5b182SJeff Roberson 			goto done;
320e4ca250dSJohn Baldwin 		if (vp->v_type != VREG && vp->v_type != VCHR) {
321e4ca250dSJohn Baldwin 			error = EINVAL;
322e4ca250dSJohn Baldwin 			goto done;
323e4ca250dSJohn Baldwin 		}
3249ff5ce6bSBoris Popov 		if (vp->v_type == VREG) {
3259ff5ce6bSBoris Popov 			/*
3269ff5ce6bSBoris Popov 			 * Get the proper underlying object
3279ff5ce6bSBoris Popov 			 */
3280cddd8f0SMatthew Dillon 			if (VOP_GETVOBJECT(vp, &obj) != 0) {
3290cddd8f0SMatthew Dillon 				error = EINVAL;
3300cddd8f0SMatthew Dillon 				goto done;
3310cddd8f0SMatthew Dillon 			}
332f6b5b182SJeff Roberson 			if (obj->handle != vp) {
333f6b5b182SJeff Roberson 				vput(vp);
3349ff5ce6bSBoris Popov 				vp = (struct vnode*)obj->handle;
335f6b5b182SJeff Roberson 				vget(vp, LK_EXCLUSIVE, td);
336f6b5b182SJeff Roberson 			}
3379ff5ce6bSBoris Popov 		}
338df8bae1dSRodney W. Grimes 		/*
3390d94caffSDavid Greenman 		 * XXX hack to handle use of /dev/zero to map anon memory (ala
3400d94caffSDavid Greenman 		 * SunOS).
341df8bae1dSRodney W. Grimes 		 */
3422589f249SMark Murray 		if ((vp->v_type == VCHR) &&
3432589f249SMark Murray 		    (vp->v_rdev->si_devsw->d_flags & D_MMAP_ANON)) {
344df8bae1dSRodney W. Grimes 			handle = NULL;
345df8bae1dSRodney W. Grimes 			maxprot = VM_PROT_ALL;
346df8bae1dSRodney W. Grimes 			flags |= MAP_ANON;
34754f42e4bSPeter Wemm 			pos = 0;
348a6af4ff1SPoul-Henning Kamp 			break;
349a6af4ff1SPoul-Henning Kamp 		}
350df8bae1dSRodney W. Grimes 		/*
351c8bdd56bSGuido van Rooij 		 * cdevs does not provide private mappings of any kind.
352c8bdd56bSGuido van Rooij 		 */
353c8bdd56bSGuido van Rooij 		/*
354c8bdd56bSGuido van Rooij 		 * However, for XIG X server to continue to work,
355c8bdd56bSGuido van Rooij 		 * we should allow the superuser to do it anyway.
356c8bdd56bSGuido van Rooij 		 * We only allow it at securelevel < 1.
357c8bdd56bSGuido van Rooij 		 * (Because the XIG X server writes directly to video
358c8bdd56bSGuido van Rooij 		 * memory via /dev/mem, it should never work at any
359c8bdd56bSGuido van Rooij 		 * other securelevel.
360c8bdd56bSGuido van Rooij 		 * XXX this will have to go
361c8bdd56bSGuido van Rooij 		 */
362a854ed98SJohn Baldwin 		if (securelevel_ge(td->td_ucred, 1))
363c8bdd56bSGuido van Rooij 			disablexworkaround = 1;
364c8bdd56bSGuido van Rooij 		else
36544731cabSJohn Baldwin 			disablexworkaround = suser(td);
366c8bdd56bSGuido van Rooij 		if (vp->v_type == VCHR && disablexworkaround &&
367279d7226SMatthew Dillon 		    (flags & (MAP_PRIVATE|MAP_COPY))) {
368279d7226SMatthew Dillon 			error = EINVAL;
369279d7226SMatthew Dillon 			goto done;
370279d7226SMatthew Dillon 		}
371c8bdd56bSGuido van Rooij 		/*
372df8bae1dSRodney W. Grimes 		 * Ensure that file and memory protections are
373df8bae1dSRodney W. Grimes 		 * compatible.  Note that we only worry about
374df8bae1dSRodney W. Grimes 		 * writability if mapping is shared; in this case,
375df8bae1dSRodney W. Grimes 		 * current and max prot are dictated by the open file.
376df8bae1dSRodney W. Grimes 		 * XXX use the vnode instead?  Problem is: what
3770d94caffSDavid Greenman 		 * credentials do we use for determination? What if
3780d94caffSDavid Greenman 		 * proc does a setuid?
379df8bae1dSRodney W. Grimes 		 */
380df8bae1dSRodney W. Grimes 		maxprot = VM_PROT_EXECUTE;	/* ??? */
381279d7226SMatthew Dillon 		if (fp->f_flag & FREAD) {
382df8bae1dSRodney W. Grimes 			maxprot |= VM_PROT_READ;
383279d7226SMatthew Dillon 		} else if (prot & PROT_READ) {
384279d7226SMatthew Dillon 			error = EACCES;
385279d7226SMatthew Dillon 			goto done;
386279d7226SMatthew Dillon 		}
387c8bdd56bSGuido van Rooij 		/*
388c8bdd56bSGuido van Rooij 		 * If we are sharing potential changes (either via
389c8bdd56bSGuido van Rooij 		 * MAP_SHARED or via the implicit sharing of character
390c8bdd56bSGuido van Rooij 		 * device mappings), and we are trying to get write
391c8bdd56bSGuido van Rooij 		 * permission although we opened it without asking
392c8bdd56bSGuido van Rooij 		 * for it, bail out.  Check for superuser, only if
393c8bdd56bSGuido van Rooij 		 * we're at securelevel < 1, to allow the XIG X server
394c8bdd56bSGuido van Rooij 		 * to continue to work.
395c8bdd56bSGuido van Rooij 		 */
39605feb99fSGuido van Rooij 		if ((flags & MAP_SHARED) != 0 ||
39705feb99fSGuido van Rooij 		    (vp->v_type == VCHR && disablexworkaround)) {
39805feb99fSGuido van Rooij 			if ((fp->f_flag & FWRITE) != 0) {
3994183b6b6SPeter Wemm 				struct vattr va;
40005feb99fSGuido van Rooij 				if ((error =
40105feb99fSGuido van Rooij 				    VOP_GETATTR(vp, &va,
402a854ed98SJohn Baldwin 						td->td_ucred, td))) {
403279d7226SMatthew Dillon 					goto done;
404279d7226SMatthew Dillon 				}
40505feb99fSGuido van Rooij 				if ((va.va_flags &
406279d7226SMatthew Dillon 				   (SF_SNAPSHOT|IMMUTABLE|APPEND)) == 0) {
407df8bae1dSRodney W. Grimes 					maxprot |= VM_PROT_WRITE;
408279d7226SMatthew Dillon 				} else if (prot & PROT_WRITE) {
409279d7226SMatthew Dillon 					error = EPERM;
410279d7226SMatthew Dillon 					goto done;
411279d7226SMatthew Dillon 				}
412279d7226SMatthew Dillon 			} else if ((prot & PROT_WRITE) != 0) {
413279d7226SMatthew Dillon 				error = EACCES;
414279d7226SMatthew Dillon 				goto done;
415279d7226SMatthew Dillon 			}
416279d7226SMatthew Dillon 		} else {
41705feb99fSGuido van Rooij 			maxprot |= VM_PROT_WRITE;
418279d7226SMatthew Dillon 		}
41905feb99fSGuido van Rooij 
420651bb817SAlexander Langer 		handle = (void *)vp;
421a6af4ff1SPoul-Henning Kamp 	} while (0);
4221f6889a1SMatthew Dillon 
4231f6889a1SMatthew Dillon 	/*
4241f6889a1SMatthew Dillon 	 * Do not allow more then a certain number of vm_map_entry structures
4251f6889a1SMatthew Dillon 	 * per process.  Scale with the number of rforks sharing the map
4261f6889a1SMatthew Dillon 	 * to make the limit reasonable for threads.
4271f6889a1SMatthew Dillon 	 */
4281f6889a1SMatthew Dillon 	if (max_proc_mmap &&
4291f6889a1SMatthew Dillon 	    vms->vm_map.nentries >= max_proc_mmap * vms->vm_refcnt) {
430279d7226SMatthew Dillon 		error = ENOMEM;
431279d7226SMatthew Dillon 		goto done;
4321f6889a1SMatthew Dillon 	}
4331f6889a1SMatthew Dillon 
434e4ca250dSJohn Baldwin 	mtx_unlock(&Giant);
4353e732e7dSRobert Watson 	error = 0;
4363e732e7dSRobert Watson #ifdef MAC
4373e732e7dSRobert Watson 	if (handle != NULL && (flags & MAP_SHARED) != 0) {
4383e732e7dSRobert Watson 		error = mac_check_vnode_mmap(td->td_ucred,
4393e732e7dSRobert Watson 		    (struct vnode *)handle, prot);
4403e732e7dSRobert Watson 	}
4413e732e7dSRobert Watson #endif
4423e732e7dSRobert Watson 	if (error == 0)
4431f6889a1SMatthew Dillon 		error = vm_mmap(&vms->vm_map, &addr, size, prot, maxprot,
44454f42e4bSPeter Wemm 		    flags, handle, pos);
445f6b5b182SJeff Roberson 	mtx_lock(&Giant);
446df8bae1dSRodney W. Grimes 	if (error == 0)
447b40ce416SJulian Elischer 		td->td_retval[0] = (register_t) (addr + pageoff);
448279d7226SMatthew Dillon done:
449f6b5b182SJeff Roberson 	if (vp)
450f6b5b182SJeff Roberson 		vput(vp);
4512cd301d1SAlan Cox 	mtx_unlock(&Giant);
452279d7226SMatthew Dillon 	if (fp)
453b40ce416SJulian Elischer 		fdrop(fp, td);
454f6b5b182SJeff Roberson 
455df8bae1dSRodney W. Grimes 	return (error);
456df8bae1dSRodney W. Grimes }
457df8bae1dSRodney W. Grimes 
45805f0fdd2SPoul-Henning Kamp #ifdef COMPAT_43
459d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_
46005f0fdd2SPoul-Henning Kamp struct ommap_args {
46105f0fdd2SPoul-Henning Kamp 	caddr_t addr;
46205f0fdd2SPoul-Henning Kamp 	int len;
46305f0fdd2SPoul-Henning Kamp 	int prot;
46405f0fdd2SPoul-Henning Kamp 	int flags;
46505f0fdd2SPoul-Henning Kamp 	int fd;
46605f0fdd2SPoul-Henning Kamp 	long pos;
46705f0fdd2SPoul-Henning Kamp };
468d2d3e875SBruce Evans #endif
46905f0fdd2SPoul-Henning Kamp int
470b40ce416SJulian Elischer ommap(td, uap)
471b40ce416SJulian Elischer 	struct thread *td;
47254d92145SMatthew Dillon 	struct ommap_args *uap;
47305f0fdd2SPoul-Henning Kamp {
47405f0fdd2SPoul-Henning Kamp 	struct mmap_args nargs;
47505f0fdd2SPoul-Henning Kamp 	static const char cvtbsdprot[8] = {
47605f0fdd2SPoul-Henning Kamp 		0,
47705f0fdd2SPoul-Henning Kamp 		PROT_EXEC,
47805f0fdd2SPoul-Henning Kamp 		PROT_WRITE,
47905f0fdd2SPoul-Henning Kamp 		PROT_EXEC | PROT_WRITE,
48005f0fdd2SPoul-Henning Kamp 		PROT_READ,
48105f0fdd2SPoul-Henning Kamp 		PROT_EXEC | PROT_READ,
48205f0fdd2SPoul-Henning Kamp 		PROT_WRITE | PROT_READ,
48305f0fdd2SPoul-Henning Kamp 		PROT_EXEC | PROT_WRITE | PROT_READ,
48405f0fdd2SPoul-Henning Kamp 	};
4850d94caffSDavid Greenman 
48605f0fdd2SPoul-Henning Kamp #define	OMAP_ANON	0x0002
48705f0fdd2SPoul-Henning Kamp #define	OMAP_COPY	0x0020
48805f0fdd2SPoul-Henning Kamp #define	OMAP_SHARED	0x0010
48905f0fdd2SPoul-Henning Kamp #define	OMAP_FIXED	0x0100
49005f0fdd2SPoul-Henning Kamp 
49105f0fdd2SPoul-Henning Kamp 	nargs.addr = uap->addr;
49205f0fdd2SPoul-Henning Kamp 	nargs.len = uap->len;
49305f0fdd2SPoul-Henning Kamp 	nargs.prot = cvtbsdprot[uap->prot & 0x7];
49405f0fdd2SPoul-Henning Kamp 	nargs.flags = 0;
49505f0fdd2SPoul-Henning Kamp 	if (uap->flags & OMAP_ANON)
49605f0fdd2SPoul-Henning Kamp 		nargs.flags |= MAP_ANON;
49705f0fdd2SPoul-Henning Kamp 	if (uap->flags & OMAP_COPY)
49805f0fdd2SPoul-Henning Kamp 		nargs.flags |= MAP_COPY;
49905f0fdd2SPoul-Henning Kamp 	if (uap->flags & OMAP_SHARED)
50005f0fdd2SPoul-Henning Kamp 		nargs.flags |= MAP_SHARED;
50105f0fdd2SPoul-Henning Kamp 	else
50205f0fdd2SPoul-Henning Kamp 		nargs.flags |= MAP_PRIVATE;
50305f0fdd2SPoul-Henning Kamp 	if (uap->flags & OMAP_FIXED)
50405f0fdd2SPoul-Henning Kamp 		nargs.flags |= MAP_FIXED;
50505f0fdd2SPoul-Henning Kamp 	nargs.fd = uap->fd;
50605f0fdd2SPoul-Henning Kamp 	nargs.pos = uap->pos;
507b40ce416SJulian Elischer 	return (mmap(td, &nargs));
50805f0fdd2SPoul-Henning Kamp }
50905f0fdd2SPoul-Henning Kamp #endif				/* COMPAT_43 */
51005f0fdd2SPoul-Henning Kamp 
51105f0fdd2SPoul-Henning Kamp 
512d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_
513df8bae1dSRodney W. Grimes struct msync_args {
514651bb817SAlexander Langer 	void *addr;
515df8bae1dSRodney W. Grimes 	int len;
516e6c6af11SDavid Greenman 	int flags;
517df8bae1dSRodney W. Grimes };
518d2d3e875SBruce Evans #endif
519d2c60af8SMatthew Dillon /*
520d2c60af8SMatthew Dillon  * MPSAFE
521d2c60af8SMatthew Dillon  */
522df8bae1dSRodney W. Grimes int
523b40ce416SJulian Elischer msync(td, uap)
524b40ce416SJulian Elischer 	struct thread *td;
525df8bae1dSRodney W. Grimes 	struct msync_args *uap;
526df8bae1dSRodney W. Grimes {
527df8bae1dSRodney W. Grimes 	vm_offset_t addr;
528dabee6feSPeter Wemm 	vm_size_t size, pageoff;
529e6c6af11SDavid Greenman 	int flags;
530df8bae1dSRodney W. Grimes 	vm_map_t map;
531df8bae1dSRodney W. Grimes 	int rv;
532df8bae1dSRodney W. Grimes 
533df8bae1dSRodney W. Grimes 	addr = (vm_offset_t) uap->addr;
5349154ee6aSPeter Wemm 	size = uap->len;
535e6c6af11SDavid Greenman 	flags = uap->flags;
536e6c6af11SDavid Greenman 
537dabee6feSPeter Wemm 	pageoff = (addr & PAGE_MASK);
538dabee6feSPeter Wemm 	addr -= pageoff;
539dabee6feSPeter Wemm 	size += pageoff;
540dabee6feSPeter Wemm 	size = (vm_size_t) round_page(size);
5419154ee6aSPeter Wemm 	if (addr + size < addr)
542dabee6feSPeter Wemm 		return (EINVAL);
543dabee6feSPeter Wemm 
544dabee6feSPeter Wemm 	if ((flags & (MS_ASYNC|MS_INVALIDATE)) == (MS_ASYNC|MS_INVALIDATE))
5451e62bc63SDavid Greenman 		return (EINVAL);
5461e62bc63SDavid Greenman 
5470cddd8f0SMatthew Dillon 	mtx_lock(&Giant);
5480cddd8f0SMatthew Dillon 
549b40ce416SJulian Elischer 	map = &td->td_proc->p_vmspace->vm_map;
5509154ee6aSPeter Wemm 
551df8bae1dSRodney W. Grimes 	/*
552df8bae1dSRodney W. Grimes 	 * XXX Gak!  If size is zero we are supposed to sync "all modified
5530d94caffSDavid Greenman 	 * pages with the region containing addr".  Unfortunately, we don't
5540d94caffSDavid Greenman 	 * really keep track of individual mmaps so we approximate by flushing
5550d94caffSDavid Greenman 	 * the range of the map entry containing addr. This can be incorrect
5560d94caffSDavid Greenman 	 * if the region splits or is coalesced with a neighbor.
557df8bae1dSRodney W. Grimes 	 */
558df8bae1dSRodney W. Grimes 	if (size == 0) {
559df8bae1dSRodney W. Grimes 		vm_map_entry_t entry;
560df8bae1dSRodney W. Grimes 
561df8bae1dSRodney W. Grimes 		vm_map_lock_read(map);
562df8bae1dSRodney W. Grimes 		rv = vm_map_lookup_entry(map, addr, &entry);
563df8bae1dSRodney W. Grimes 		vm_map_unlock_read(map);
56423955314SAlfred Perlstein 		if (rv == FALSE) {
565d2c60af8SMatthew Dillon 			rv = -1;
566d2c60af8SMatthew Dillon 			goto done2;
56723955314SAlfred Perlstein 		}
568df8bae1dSRodney W. Grimes 		addr = entry->start;
569df8bae1dSRodney W. Grimes 		size = entry->end - entry->start;
570df8bae1dSRodney W. Grimes 	}
571e6c6af11SDavid Greenman 
572df8bae1dSRodney W. Grimes 	/*
573df8bae1dSRodney W. Grimes 	 * Clean the pages and interpret the return value.
574df8bae1dSRodney W. Grimes 	 */
5756c534ad8SDavid Greenman 	rv = vm_map_clean(map, addr, addr + size, (flags & MS_ASYNC) == 0,
576e6c6af11SDavid Greenman 	    (flags & MS_INVALIDATE) != 0);
577e6c6af11SDavid Greenman 
578d2c60af8SMatthew Dillon done2:
579190609ddSJohn Baldwin 	mtx_unlock(&Giant);
5800cddd8f0SMatthew Dillon 
581df8bae1dSRodney W. Grimes 	switch (rv) {
582df8bae1dSRodney W. Grimes 	case KERN_SUCCESS:
583d2c60af8SMatthew Dillon 		return (0);
584df8bae1dSRodney W. Grimes 	case KERN_INVALID_ADDRESS:
585df8bae1dSRodney W. Grimes 		return (EINVAL);	/* Sun returns ENOMEM? */
586df8bae1dSRodney W. Grimes 	case KERN_FAILURE:
587df8bae1dSRodney W. Grimes 		return (EIO);
588df8bae1dSRodney W. Grimes 	default:
589df8bae1dSRodney W. Grimes 		return (EINVAL);
590df8bae1dSRodney W. Grimes 	}
591df8bae1dSRodney W. Grimes }
592df8bae1dSRodney W. Grimes 
593d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_
594df8bae1dSRodney W. Grimes struct munmap_args {
595651bb817SAlexander Langer 	void *addr;
5969154ee6aSPeter Wemm 	size_t len;
597df8bae1dSRodney W. Grimes };
598d2d3e875SBruce Evans #endif
599d2c60af8SMatthew Dillon /*
600d2c60af8SMatthew Dillon  * MPSAFE
601d2c60af8SMatthew Dillon  */
602df8bae1dSRodney W. Grimes int
603b40ce416SJulian Elischer munmap(td, uap)
604b40ce416SJulian Elischer 	struct thread *td;
60554d92145SMatthew Dillon 	struct munmap_args *uap;
606df8bae1dSRodney W. Grimes {
607df8bae1dSRodney W. Grimes 	vm_offset_t addr;
608dabee6feSPeter Wemm 	vm_size_t size, pageoff;
609df8bae1dSRodney W. Grimes 	vm_map_t map;
610df8bae1dSRodney W. Grimes 
611df8bae1dSRodney W. Grimes 	addr = (vm_offset_t) uap->addr;
6129154ee6aSPeter Wemm 	size = uap->len;
613dabee6feSPeter Wemm 
614dabee6feSPeter Wemm 	pageoff = (addr & PAGE_MASK);
615dabee6feSPeter Wemm 	addr -= pageoff;
616dabee6feSPeter Wemm 	size += pageoff;
617dabee6feSPeter Wemm 	size = (vm_size_t) round_page(size);
6189154ee6aSPeter Wemm 	if (addr + size < addr)
619df8bae1dSRodney W. Grimes 		return (EINVAL);
6209154ee6aSPeter Wemm 
621df8bae1dSRodney W. Grimes 	if (size == 0)
622df8bae1dSRodney W. Grimes 		return (0);
623dabee6feSPeter Wemm 
624df8bae1dSRodney W. Grimes 	/*
62505ba50f5SJake Burkholder 	 * Check for illegal addresses.  Watch out for address wrap...
626df8bae1dSRodney W. Grimes 	 */
627b40ce416SJulian Elischer 	map = &td->td_proc->p_vmspace->vm_map;
62805ba50f5SJake Burkholder 	if (addr < vm_map_min(map) || addr + size > vm_map_max(map))
62905ba50f5SJake Burkholder 		return (EINVAL);
630df8bae1dSRodney W. Grimes 	/*
631df8bae1dSRodney W. Grimes 	 * Make sure entire range is allocated.
632df8bae1dSRodney W. Grimes 	 */
6338c5c5d04SAlan Cox 	if (!vm_map_check_protection(map, addr, addr + size, VM_PROT_NONE))
634df8bae1dSRodney W. Grimes 		return (EINVAL);
6358c5c5d04SAlan Cox 
636df8bae1dSRodney W. Grimes 	/* returns nothing but KERN_SUCCESS anyway */
637df8bae1dSRodney W. Grimes 	(void) vm_map_remove(map, addr, addr + size);
638df8bae1dSRodney W. Grimes 	return (0);
639df8bae1dSRodney W. Grimes }
640df8bae1dSRodney W. Grimes 
641279d7226SMatthew Dillon #if 0
642df8bae1dSRodney W. Grimes void
643b40ce416SJulian Elischer munmapfd(td, fd)
644b40ce416SJulian Elischer 	struct thread *td;
645df8bae1dSRodney W. Grimes 	int fd;
646df8bae1dSRodney W. Grimes {
647df8bae1dSRodney W. Grimes 	/*
648c4ed5a07SDavid Greenman 	 * XXX should unmap any regions mapped to this file
649df8bae1dSRodney W. Grimes 	 */
650426da3bcSAlfred Perlstein 	FILEDESC_LOCK(p->p_fd);
651b40ce416SJulian Elischer 	td->td_proc->p_fd->fd_ofileflags[fd] &= ~UF_MAPPED;
652426da3bcSAlfred Perlstein 	FILEDESC_UNLOCK(p->p_fd);
653df8bae1dSRodney W. Grimes }
654279d7226SMatthew Dillon #endif
655df8bae1dSRodney W. Grimes 
656d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_
657df8bae1dSRodney W. Grimes struct mprotect_args {
658651bb817SAlexander Langer 	const void *addr;
6599154ee6aSPeter Wemm 	size_t len;
660df8bae1dSRodney W. Grimes 	int prot;
661df8bae1dSRodney W. Grimes };
662d2d3e875SBruce Evans #endif
663d2c60af8SMatthew Dillon /*
664d2c60af8SMatthew Dillon  * MPSAFE
665d2c60af8SMatthew Dillon  */
666df8bae1dSRodney W. Grimes int
667b40ce416SJulian Elischer mprotect(td, uap)
668b40ce416SJulian Elischer 	struct thread *td;
669df8bae1dSRodney W. Grimes 	struct mprotect_args *uap;
670df8bae1dSRodney W. Grimes {
671df8bae1dSRodney W. Grimes 	vm_offset_t addr;
672dabee6feSPeter Wemm 	vm_size_t size, pageoff;
67354d92145SMatthew Dillon 	vm_prot_t prot;
674df8bae1dSRodney W. Grimes 
675df8bae1dSRodney W. Grimes 	addr = (vm_offset_t) uap->addr;
6769154ee6aSPeter Wemm 	size = uap->len;
677df8bae1dSRodney W. Grimes 	prot = uap->prot & VM_PROT_ALL;
678d0aea04fSJohn Dyson #if defined(VM_PROT_READ_IS_EXEC)
679d0aea04fSJohn Dyson 	if (prot & VM_PROT_READ)
680d0aea04fSJohn Dyson 		prot |= VM_PROT_EXECUTE;
681d0aea04fSJohn Dyson #endif
682df8bae1dSRodney W. Grimes 
683dabee6feSPeter Wemm 	pageoff = (addr & PAGE_MASK);
684dabee6feSPeter Wemm 	addr -= pageoff;
685dabee6feSPeter Wemm 	size += pageoff;
686dabee6feSPeter Wemm 	size = (vm_size_t) round_page(size);
6879154ee6aSPeter Wemm 	if (addr + size < addr)
688dabee6feSPeter Wemm 		return (EINVAL);
689dabee6feSPeter Wemm 
69043285049SAlan Cox 	switch (vm_map_protect(&td->td_proc->p_vmspace->vm_map, addr,
69143285049SAlan Cox 	    addr + size, prot, FALSE)) {
692df8bae1dSRodney W. Grimes 	case KERN_SUCCESS:
693df8bae1dSRodney W. Grimes 		return (0);
694df8bae1dSRodney W. Grimes 	case KERN_PROTECTION_FAILURE:
695df8bae1dSRodney W. Grimes 		return (EACCES);
696df8bae1dSRodney W. Grimes 	}
697df8bae1dSRodney W. Grimes 	return (EINVAL);
698df8bae1dSRodney W. Grimes }
699df8bae1dSRodney W. Grimes 
700d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_
701dabee6feSPeter Wemm struct minherit_args {
702651bb817SAlexander Langer 	void *addr;
7039154ee6aSPeter Wemm 	size_t len;
704dabee6feSPeter Wemm 	int inherit;
705dabee6feSPeter Wemm };
706dabee6feSPeter Wemm #endif
707d2c60af8SMatthew Dillon /*
708d2c60af8SMatthew Dillon  * MPSAFE
709d2c60af8SMatthew Dillon  */
710dabee6feSPeter Wemm int
711b40ce416SJulian Elischer minherit(td, uap)
712b40ce416SJulian Elischer 	struct thread *td;
713dabee6feSPeter Wemm 	struct minherit_args *uap;
714dabee6feSPeter Wemm {
715dabee6feSPeter Wemm 	vm_offset_t addr;
716dabee6feSPeter Wemm 	vm_size_t size, pageoff;
71754d92145SMatthew Dillon 	vm_inherit_t inherit;
718dabee6feSPeter Wemm 
719dabee6feSPeter Wemm 	addr = (vm_offset_t)uap->addr;
7209154ee6aSPeter Wemm 	size = uap->len;
721dabee6feSPeter Wemm 	inherit = uap->inherit;
722dabee6feSPeter Wemm 
723dabee6feSPeter Wemm 	pageoff = (addr & PAGE_MASK);
724dabee6feSPeter Wemm 	addr -= pageoff;
725dabee6feSPeter Wemm 	size += pageoff;
726dabee6feSPeter Wemm 	size = (vm_size_t) round_page(size);
7279154ee6aSPeter Wemm 	if (addr + size < addr)
728dabee6feSPeter Wemm 		return (EINVAL);
729dabee6feSPeter Wemm 
730e0be79afSAlan Cox 	switch (vm_map_inherit(&td->td_proc->p_vmspace->vm_map, addr,
731e0be79afSAlan Cox 	    addr + size, inherit)) {
732dabee6feSPeter Wemm 	case KERN_SUCCESS:
733dabee6feSPeter Wemm 		return (0);
734dabee6feSPeter Wemm 	case KERN_PROTECTION_FAILURE:
735dabee6feSPeter Wemm 		return (EACCES);
736dabee6feSPeter Wemm 	}
737dabee6feSPeter Wemm 	return (EINVAL);
738dabee6feSPeter Wemm }
739dabee6feSPeter Wemm 
740dabee6feSPeter Wemm #ifndef _SYS_SYSPROTO_H_
741df8bae1dSRodney W. Grimes struct madvise_args {
742651bb817SAlexander Langer 	void *addr;
7439154ee6aSPeter Wemm 	size_t len;
744df8bae1dSRodney W. Grimes 	int behav;
745df8bae1dSRodney W. Grimes };
746d2d3e875SBruce Evans #endif
7470d94caffSDavid Greenman 
748d2c60af8SMatthew Dillon /*
749d2c60af8SMatthew Dillon  * MPSAFE
750d2c60af8SMatthew Dillon  */
751df8bae1dSRodney W. Grimes /* ARGSUSED */
752df8bae1dSRodney W. Grimes int
753b40ce416SJulian Elischer madvise(td, uap)
754b40ce416SJulian Elischer 	struct thread *td;
755df8bae1dSRodney W. Grimes 	struct madvise_args *uap;
756df8bae1dSRodney W. Grimes {
757f35329acSJohn Dyson 	vm_offset_t start, end;
75805ba50f5SJake Burkholder 	vm_map_t map;
759f4cf2141SWes Peters 	struct proc *p;
760f4cf2141SWes Peters 	int error;
761b4309055SMatthew Dillon 
762b4309055SMatthew Dillon 	/*
763f4cf2141SWes Peters 	 * Check for our special case, advising the swap pager we are
764f4cf2141SWes Peters 	 * "immortal."
765f4cf2141SWes Peters 	 */
766f4cf2141SWes Peters 	if (uap->behav == MADV_PROTECT) {
76769297bf8SJohn Baldwin 		error = suser(td);
76869297bf8SJohn Baldwin 		if (error == 0) {
769f4cf2141SWes Peters 			p = td->td_proc;
770f4cf2141SWes Peters 			PROC_LOCK(p);
771f4cf2141SWes Peters 			p->p_flag |= P_PROTECTED;
772f4cf2141SWes Peters 			PROC_UNLOCK(p);
77369297bf8SJohn Baldwin 		}
774f4cf2141SWes Peters 		return (error);
775f4cf2141SWes Peters 	}
776f4cf2141SWes Peters 	/*
777b4309055SMatthew Dillon 	 * Check for illegal behavior
778b4309055SMatthew Dillon 	 */
7799730a5daSPaul Saab 	if (uap->behav < 0 || uap->behav > MADV_CORE)
780b4309055SMatthew Dillon 		return (EINVAL);
781867a482dSJohn Dyson 	/*
782867a482dSJohn Dyson 	 * Check for illegal addresses.  Watch out for address wrap... Note
783867a482dSJohn Dyson 	 * that VM_*_ADDRESS are not constants due to casts (argh).
784867a482dSJohn Dyson 	 */
78505ba50f5SJake Burkholder 	map = &td->td_proc->p_vmspace->vm_map;
78605ba50f5SJake Burkholder 	if ((vm_offset_t)uap->addr < vm_map_min(map) ||
78705ba50f5SJake Burkholder 	    (vm_offset_t)uap->addr + uap->len > vm_map_max(map))
788867a482dSJohn Dyson 		return (EINVAL);
789867a482dSJohn Dyson 	if (((vm_offset_t) uap->addr + uap->len) < (vm_offset_t) uap->addr)
790867a482dSJohn Dyson 		return (EINVAL);
791867a482dSJohn Dyson 
792867a482dSJohn Dyson 	/*
793867a482dSJohn Dyson 	 * Since this routine is only advisory, we default to conservative
794867a482dSJohn Dyson 	 * behavior.
795867a482dSJohn Dyson 	 */
796cd6eea25SDavid Greenman 	start = trunc_page((vm_offset_t) uap->addr);
797cd6eea25SDavid Greenman 	end = round_page((vm_offset_t) uap->addr + uap->len);
798867a482dSJohn Dyson 
79905ba50f5SJake Burkholder 	if (vm_map_madvise(map, start, end, uap->behav))
800094f6d26SAlan Cox 		return (EINVAL);
801094f6d26SAlan Cox 	return (0);
802df8bae1dSRodney W. Grimes }
803df8bae1dSRodney W. Grimes 
804d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_
805df8bae1dSRodney W. Grimes struct mincore_args {
806651bb817SAlexander Langer 	const void *addr;
8079154ee6aSPeter Wemm 	size_t len;
808df8bae1dSRodney W. Grimes 	char *vec;
809df8bae1dSRodney W. Grimes };
810d2d3e875SBruce Evans #endif
8110d94caffSDavid Greenman 
812d2c60af8SMatthew Dillon /*
813d2c60af8SMatthew Dillon  * MPSAFE
814d2c60af8SMatthew Dillon  */
815df8bae1dSRodney W. Grimes /* ARGSUSED */
816df8bae1dSRodney W. Grimes int
817b40ce416SJulian Elischer mincore(td, uap)
818b40ce416SJulian Elischer 	struct thread *td;
819df8bae1dSRodney W. Grimes 	struct mincore_args *uap;
820df8bae1dSRodney W. Grimes {
821867a482dSJohn Dyson 	vm_offset_t addr, first_addr;
822867a482dSJohn Dyson 	vm_offset_t end, cend;
823867a482dSJohn Dyson 	pmap_t pmap;
824867a482dSJohn Dyson 	vm_map_t map;
82502c04a2fSJohn Dyson 	char *vec;
826d2c60af8SMatthew Dillon 	int error = 0;
827867a482dSJohn Dyson 	int vecindex, lastvecindex;
82854d92145SMatthew Dillon 	vm_map_entry_t current;
829867a482dSJohn Dyson 	vm_map_entry_t entry;
830867a482dSJohn Dyson 	int mincoreinfo;
831dd2622a8SAlan Cox 	unsigned int timestamp;
832df8bae1dSRodney W. Grimes 
833867a482dSJohn Dyson 	/*
834867a482dSJohn Dyson 	 * Make sure that the addresses presented are valid for user
835867a482dSJohn Dyson 	 * mode.
836867a482dSJohn Dyson 	 */
837867a482dSJohn Dyson 	first_addr = addr = trunc_page((vm_offset_t) uap->addr);
8389154ee6aSPeter Wemm 	end = addr + (vm_size_t)round_page(uap->len);
83905ba50f5SJake Burkholder 	map = &td->td_proc->p_vmspace->vm_map;
84005ba50f5SJake Burkholder 	if (end > vm_map_max(map) || end < addr)
84102c04a2fSJohn Dyson 		return (EINVAL);
84202c04a2fSJohn Dyson 
843867a482dSJohn Dyson 	/*
844867a482dSJohn Dyson 	 * Address of byte vector
845867a482dSJohn Dyson 	 */
84602c04a2fSJohn Dyson 	vec = uap->vec;
847867a482dSJohn Dyson 
848b40ce416SJulian Elischer 	pmap = vmspace_pmap(td->td_proc->p_vmspace);
849867a482dSJohn Dyson 
850eff50fcdSAlan Cox 	vm_map_lock_read(map);
851dd2622a8SAlan Cox RestartScan:
852dd2622a8SAlan Cox 	timestamp = map->timestamp;
853867a482dSJohn Dyson 
854867a482dSJohn Dyson 	if (!vm_map_lookup_entry(map, addr, &entry))
855867a482dSJohn Dyson 		entry = entry->next;
856867a482dSJohn Dyson 
857867a482dSJohn Dyson 	/*
858867a482dSJohn Dyson 	 * Do this on a map entry basis so that if the pages are not
859867a482dSJohn Dyson 	 * in the current processes address space, we can easily look
860867a482dSJohn Dyson 	 * up the pages elsewhere.
861867a482dSJohn Dyson 	 */
862867a482dSJohn Dyson 	lastvecindex = -1;
863867a482dSJohn Dyson 	for (current = entry;
864867a482dSJohn Dyson 	    (current != &map->header) && (current->start < end);
865867a482dSJohn Dyson 	    current = current->next) {
866867a482dSJohn Dyson 
867867a482dSJohn Dyson 		/*
868867a482dSJohn Dyson 		 * ignore submaps (for now) or null objects
869867a482dSJohn Dyson 		 */
8709fdfe602SMatthew Dillon 		if ((current->eflags & MAP_ENTRY_IS_SUB_MAP) ||
871867a482dSJohn Dyson 			current->object.vm_object == NULL)
872867a482dSJohn Dyson 			continue;
873867a482dSJohn Dyson 
874867a482dSJohn Dyson 		/*
875867a482dSJohn Dyson 		 * limit this scan to the current map entry and the
876867a482dSJohn Dyson 		 * limits for the mincore call
877867a482dSJohn Dyson 		 */
878867a482dSJohn Dyson 		if (addr < current->start)
879867a482dSJohn Dyson 			addr = current->start;
880867a482dSJohn Dyson 		cend = current->end;
881867a482dSJohn Dyson 		if (cend > end)
882867a482dSJohn Dyson 			cend = end;
883867a482dSJohn Dyson 
884867a482dSJohn Dyson 		/*
885867a482dSJohn Dyson 		 * scan this entry one page at a time
886867a482dSJohn Dyson 		 */
887867a482dSJohn Dyson 		while (addr < cend) {
888867a482dSJohn Dyson 			/*
889867a482dSJohn Dyson 			 * Check pmap first, it is likely faster, also
890867a482dSJohn Dyson 			 * it can provide info as to whether we are the
891867a482dSJohn Dyson 			 * one referencing or modifying the page.
892867a482dSJohn Dyson 			 */
8937ebcee37SAlan Cox 			mtx_lock(&Giant);
894867a482dSJohn Dyson 			mincoreinfo = pmap_mincore(pmap, addr);
8957ebcee37SAlan Cox 			mtx_unlock(&Giant);
896867a482dSJohn Dyson 			if (!mincoreinfo) {
897867a482dSJohn Dyson 				vm_pindex_t pindex;
898867a482dSJohn Dyson 				vm_ooffset_t offset;
899867a482dSJohn Dyson 				vm_page_t m;
900867a482dSJohn Dyson 				/*
901867a482dSJohn Dyson 				 * calculate the page index into the object
902867a482dSJohn Dyson 				 */
903867a482dSJohn Dyson 				offset = current->offset + (addr - current->start);
904867a482dSJohn Dyson 				pindex = OFF_TO_IDX(offset);
905bc5b057fSAlan Cox 				VM_OBJECT_LOCK(current->object.vm_object);
906867a482dSJohn Dyson 				m = vm_page_lookup(current->object.vm_object,
907867a482dSJohn Dyson 					pindex);
908867a482dSJohn Dyson 				/*
909867a482dSJohn Dyson 				 * if the page is resident, then gather information about
910867a482dSJohn Dyson 				 * it.
911867a482dSJohn Dyson 				 */
912867a482dSJohn Dyson 				if (m) {
913867a482dSJohn Dyson 					mincoreinfo = MINCORE_INCORE;
9147ebcee37SAlan Cox 					vm_page_lock_queues();
915867a482dSJohn Dyson 					if (m->dirty ||
9160385347cSPeter Wemm 						pmap_is_modified(m))
917867a482dSJohn Dyson 						mincoreinfo |= MINCORE_MODIFIED_OTHER;
918867a482dSJohn Dyson 					if ((m->flags & PG_REFERENCED) ||
9190385347cSPeter Wemm 						pmap_ts_referenced(m)) {
920e69763a3SDoug Rabson 						vm_page_flag_set(m, PG_REFERENCED);
921867a482dSJohn Dyson 						mincoreinfo |= MINCORE_REFERENCED_OTHER;
92202c04a2fSJohn Dyson 					}
923e80b7b69SAlan Cox 					vm_page_unlock_queues();
9249b5a5d81SJohn Dyson 				}
9257ebcee37SAlan Cox 				VM_OBJECT_UNLOCK(current->object.vm_object);
9267ebcee37SAlan Cox 			}
927867a482dSJohn Dyson 
928867a482dSJohn Dyson 			/*
929dd2622a8SAlan Cox 			 * subyte may page fault.  In case it needs to modify
930dd2622a8SAlan Cox 			 * the map, we release the lock.
931dd2622a8SAlan Cox 			 */
932dd2622a8SAlan Cox 			vm_map_unlock_read(map);
933dd2622a8SAlan Cox 
934dd2622a8SAlan Cox 			/*
935867a482dSJohn Dyson 			 * calculate index into user supplied byte vector
936867a482dSJohn Dyson 			 */
937867a482dSJohn Dyson 			vecindex = OFF_TO_IDX(addr - first_addr);
938867a482dSJohn Dyson 
939867a482dSJohn Dyson 			/*
940867a482dSJohn Dyson 			 * If we have skipped map entries, we need to make sure that
941867a482dSJohn Dyson 			 * the byte vector is zeroed for those skipped entries.
942867a482dSJohn Dyson 			 */
943867a482dSJohn Dyson 			while ((lastvecindex + 1) < vecindex) {
944867a482dSJohn Dyson 				error = subyte(vec + lastvecindex, 0);
945867a482dSJohn Dyson 				if (error) {
946d2c60af8SMatthew Dillon 					error = EFAULT;
947d2c60af8SMatthew Dillon 					goto done2;
948867a482dSJohn Dyson 				}
949867a482dSJohn Dyson 				++lastvecindex;
950867a482dSJohn Dyson 			}
951867a482dSJohn Dyson 
952867a482dSJohn Dyson 			/*
953867a482dSJohn Dyson 			 * Pass the page information to the user
954867a482dSJohn Dyson 			 */
955867a482dSJohn Dyson 			error = subyte(vec + vecindex, mincoreinfo);
956867a482dSJohn Dyson 			if (error) {
957d2c60af8SMatthew Dillon 				error = EFAULT;
958d2c60af8SMatthew Dillon 				goto done2;
959867a482dSJohn Dyson 			}
960dd2622a8SAlan Cox 
961dd2622a8SAlan Cox 			/*
962dd2622a8SAlan Cox 			 * If the map has changed, due to the subyte, the previous
963dd2622a8SAlan Cox 			 * output may be invalid.
964dd2622a8SAlan Cox 			 */
965dd2622a8SAlan Cox 			vm_map_lock_read(map);
966dd2622a8SAlan Cox 			if (timestamp != map->timestamp)
967dd2622a8SAlan Cox 				goto RestartScan;
968dd2622a8SAlan Cox 
969867a482dSJohn Dyson 			lastvecindex = vecindex;
97002c04a2fSJohn Dyson 			addr += PAGE_SIZE;
97102c04a2fSJohn Dyson 		}
972867a482dSJohn Dyson 	}
973867a482dSJohn Dyson 
974867a482dSJohn Dyson 	/*
975dd2622a8SAlan Cox 	 * subyte may page fault.  In case it needs to modify
976dd2622a8SAlan Cox 	 * the map, we release the lock.
977dd2622a8SAlan Cox 	 */
978dd2622a8SAlan Cox 	vm_map_unlock_read(map);
979dd2622a8SAlan Cox 
980dd2622a8SAlan Cox 	/*
981867a482dSJohn Dyson 	 * Zero the last entries in the byte vector.
982867a482dSJohn Dyson 	 */
983867a482dSJohn Dyson 	vecindex = OFF_TO_IDX(end - first_addr);
984867a482dSJohn Dyson 	while ((lastvecindex + 1) < vecindex) {
985867a482dSJohn Dyson 		error = subyte(vec + lastvecindex, 0);
986867a482dSJohn Dyson 		if (error) {
987d2c60af8SMatthew Dillon 			error = EFAULT;
988d2c60af8SMatthew Dillon 			goto done2;
989867a482dSJohn Dyson 		}
990867a482dSJohn Dyson 		++lastvecindex;
991867a482dSJohn Dyson 	}
992867a482dSJohn Dyson 
993dd2622a8SAlan Cox 	/*
994dd2622a8SAlan Cox 	 * If the map has changed, due to the subyte, the previous
995dd2622a8SAlan Cox 	 * output may be invalid.
996dd2622a8SAlan Cox 	 */
997dd2622a8SAlan Cox 	vm_map_lock_read(map);
998dd2622a8SAlan Cox 	if (timestamp != map->timestamp)
999dd2622a8SAlan Cox 		goto RestartScan;
1000eff50fcdSAlan Cox 	vm_map_unlock_read(map);
1001d2c60af8SMatthew Dillon done2:
1002d2c60af8SMatthew Dillon 	return (error);
1003df8bae1dSRodney W. Grimes }
1004df8bae1dSRodney W. Grimes 
1005d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_
1006df8bae1dSRodney W. Grimes struct mlock_args {
1007651bb817SAlexander Langer 	const void *addr;
1008df8bae1dSRodney W. Grimes 	size_t len;
1009df8bae1dSRodney W. Grimes };
1010d2d3e875SBruce Evans #endif
1011d2c60af8SMatthew Dillon /*
1012d2c60af8SMatthew Dillon  * MPSAFE
1013d2c60af8SMatthew Dillon  */
1014df8bae1dSRodney W. Grimes int
1015b40ce416SJulian Elischer mlock(td, uap)
1016b40ce416SJulian Elischer 	struct thread *td;
1017df8bae1dSRodney W. Grimes 	struct mlock_args *uap;
1018df8bae1dSRodney W. Grimes {
1019df8bae1dSRodney W. Grimes 	vm_offset_t addr;
1020dabee6feSPeter Wemm 	vm_size_t size, pageoff;
1021df8bae1dSRodney W. Grimes 	int error;
1022df8bae1dSRodney W. Grimes 
1023df8bae1dSRodney W. Grimes 	addr = (vm_offset_t) uap->addr;
10249154ee6aSPeter Wemm 	size = uap->len;
10259154ee6aSPeter Wemm 
1026dabee6feSPeter Wemm 	pageoff = (addr & PAGE_MASK);
1027dabee6feSPeter Wemm 	addr -= pageoff;
1028dabee6feSPeter Wemm 	size += pageoff;
1029dabee6feSPeter Wemm 	size = (vm_size_t) round_page(size);
1030dabee6feSPeter Wemm 
1031dabee6feSPeter Wemm 	/* disable wrap around */
10329154ee6aSPeter Wemm 	if (addr + size < addr)
1033df8bae1dSRodney W. Grimes 		return (EINVAL);
1034dabee6feSPeter Wemm 
1035df8bae1dSRodney W. Grimes 	if (atop(size) + cnt.v_wire_count > vm_page_max_wired)
1036df8bae1dSRodney W. Grimes 		return (EAGAIN);
10379154ee6aSPeter Wemm 
1038df8bae1dSRodney W. Grimes #ifdef pmap_wired_count
1039b40ce416SJulian Elischer 	if (size + ptoa(pmap_wired_count(vm_map_pmap(&td->td_proc->p_vmspace->vm_map))) >
1040b40ce416SJulian Elischer 	    td->td_proc->p_rlimit[RLIMIT_MEMLOCK].rlim_cur)
10414a40e3d4SJohn Dyson 		return (ENOMEM);
1042df8bae1dSRodney W. Grimes #else
104344731cabSJohn Baldwin 	error = suser(td);
104405f0fdd2SPoul-Henning Kamp 	if (error)
1045df8bae1dSRodney W. Grimes 		return (error);
1046df8bae1dSRodney W. Grimes #endif
1047df8bae1dSRodney W. Grimes 
10481d7cf06cSAlan Cox 	error = vm_map_wire(&td->td_proc->p_vmspace->vm_map, addr,
1049abd498aaSBruce M Simpson 		     addr + size, VM_MAP_WIRE_USER|VM_MAP_WIRE_NOHOLES);
1050df8bae1dSRodney W. Grimes 	return (error == KERN_SUCCESS ? 0 : ENOMEM);
1051df8bae1dSRodney W. Grimes }
1052df8bae1dSRodney W. Grimes 
1053d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_
10544a40e3d4SJohn Dyson struct mlockall_args {
10554a40e3d4SJohn Dyson 	int	how;
10564a40e3d4SJohn Dyson };
10574a40e3d4SJohn Dyson #endif
10584a40e3d4SJohn Dyson 
1059d2c60af8SMatthew Dillon /*
1060d2c60af8SMatthew Dillon  * MPSAFE
1061d2c60af8SMatthew Dillon  */
10624a40e3d4SJohn Dyson int
1063b40ce416SJulian Elischer mlockall(td, uap)
1064b40ce416SJulian Elischer 	struct thread *td;
10654a40e3d4SJohn Dyson 	struct mlockall_args *uap;
10664a40e3d4SJohn Dyson {
1067abd498aaSBruce M Simpson 	vm_map_t map;
1068abd498aaSBruce M Simpson 	int error;
1069abd498aaSBruce M Simpson 
1070abd498aaSBruce M Simpson 	map = &td->td_proc->p_vmspace->vm_map;
1071abd498aaSBruce M Simpson 	error = 0;
1072abd498aaSBruce M Simpson 
1073abd498aaSBruce M Simpson 	if ((uap->how == 0) || ((uap->how & ~(MCL_CURRENT|MCL_FUTURE)) != 0))
1074abd498aaSBruce M Simpson 		return (EINVAL);
1075abd498aaSBruce M Simpson 
1076abd498aaSBruce M Simpson #ifdef pmap_wired_count
1077abd498aaSBruce M Simpson 	/*
1078abd498aaSBruce M Simpson 	 * If wiring all pages in the process would cause it to exceed
1079abd498aaSBruce M Simpson 	 * a hard resource limit, return ENOMEM.
1080abd498aaSBruce M Simpson 	 */
1081abd498aaSBruce M Simpson 	if (map->size - ptoa(pmap_wired_count(vm_map_pmap(map)) >
1082abd498aaSBruce M Simpson 		td->td_proc->p_rlimit[RLIMIT_MEMLOCK].rlim_cur))
1083abd498aaSBruce M Simpson 		return (ENOMEM);
1084abd498aaSBruce M Simpson #else
1085abd498aaSBruce M Simpson 	error = suser(td);
1086abd498aaSBruce M Simpson 	if (error)
1087abd498aaSBruce M Simpson 		return (error);
1088abd498aaSBruce M Simpson #endif
1089abd498aaSBruce M Simpson 
1090abd498aaSBruce M Simpson 	if (uap->how & MCL_FUTURE) {
1091abd498aaSBruce M Simpson 		vm_map_lock(map);
1092abd498aaSBruce M Simpson 		vm_map_modflags(map, MAP_WIREFUTURE, 0);
1093abd498aaSBruce M Simpson 		vm_map_unlock(map);
1094abd498aaSBruce M Simpson 		error = 0;
1095abd498aaSBruce M Simpson 	}
1096abd498aaSBruce M Simpson 
1097abd498aaSBruce M Simpson 	if (uap->how & MCL_CURRENT) {
1098abd498aaSBruce M Simpson 		/*
1099abd498aaSBruce M Simpson 		 * P1003.1-2001 mandates that all currently mapped pages
1100abd498aaSBruce M Simpson 		 * will be memory resident and locked (wired) upon return
1101abd498aaSBruce M Simpson 		 * from mlockall(). vm_map_wire() will wire pages, by
1102abd498aaSBruce M Simpson 		 * calling vm_fault_wire() for each page in the region.
1103abd498aaSBruce M Simpson 		 */
1104abd498aaSBruce M Simpson 		error = vm_map_wire(map, vm_map_min(map), vm_map_max(map),
1105abd498aaSBruce M Simpson 		    VM_MAP_WIRE_USER|VM_MAP_WIRE_HOLESOK);
1106abd498aaSBruce M Simpson 		error = (error == KERN_SUCCESS ? 0 : EAGAIN);
1107abd498aaSBruce M Simpson 	}
1108abd498aaSBruce M Simpson 
1109abd498aaSBruce M Simpson 	return (error);
11104a40e3d4SJohn Dyson }
11114a40e3d4SJohn Dyson 
11124a40e3d4SJohn Dyson #ifndef _SYS_SYSPROTO_H_
1113fa721254SAlfred Perlstein struct munlockall_args {
1114abd498aaSBruce M Simpson 	register_t dummy;
11154a40e3d4SJohn Dyson };
11164a40e3d4SJohn Dyson #endif
11174a40e3d4SJohn Dyson 
1118d2c60af8SMatthew Dillon /*
1119d2c60af8SMatthew Dillon  * MPSAFE
1120d2c60af8SMatthew Dillon  */
11214a40e3d4SJohn Dyson int
1122b40ce416SJulian Elischer munlockall(td, uap)
1123b40ce416SJulian Elischer 	struct thread *td;
11244a40e3d4SJohn Dyson 	struct munlockall_args *uap;
11254a40e3d4SJohn Dyson {
1126abd498aaSBruce M Simpson 	vm_map_t map;
1127abd498aaSBruce M Simpson 	int error;
1128abd498aaSBruce M Simpson 
1129abd498aaSBruce M Simpson 	map = &td->td_proc->p_vmspace->vm_map;
1130abd498aaSBruce M Simpson #ifndef pmap_wired_count
1131abd498aaSBruce M Simpson 	error = suser(td);
1132abd498aaSBruce M Simpson 	if (error)
1133abd498aaSBruce M Simpson 		return (error);
1134abd498aaSBruce M Simpson #endif
1135abd498aaSBruce M Simpson 
1136abd498aaSBruce M Simpson 	/* Clear the MAP_WIREFUTURE flag from this vm_map. */
1137abd498aaSBruce M Simpson 	vm_map_lock(map);
1138abd498aaSBruce M Simpson 	vm_map_modflags(map, 0, MAP_WIREFUTURE);
1139abd498aaSBruce M Simpson 	vm_map_unlock(map);
1140abd498aaSBruce M Simpson 
1141abd498aaSBruce M Simpson 	/* Forcibly unwire all pages. */
1142abd498aaSBruce M Simpson 	error = vm_map_unwire(map, vm_map_min(map), vm_map_max(map),
1143abd498aaSBruce M Simpson 	    VM_MAP_WIRE_USER|VM_MAP_WIRE_HOLESOK);
1144abd498aaSBruce M Simpson 
1145abd498aaSBruce M Simpson 	return (error);
11464a40e3d4SJohn Dyson }
11474a40e3d4SJohn Dyson 
11484a40e3d4SJohn Dyson #ifndef _SYS_SYSPROTO_H_
1149df8bae1dSRodney W. Grimes struct munlock_args {
1150651bb817SAlexander Langer 	const void *addr;
1151df8bae1dSRodney W. Grimes 	size_t len;
1152df8bae1dSRodney W. Grimes };
1153d2d3e875SBruce Evans #endif
1154d2c60af8SMatthew Dillon /*
1155d2c60af8SMatthew Dillon  * MPSAFE
1156d2c60af8SMatthew Dillon  */
1157df8bae1dSRodney W. Grimes int
1158b40ce416SJulian Elischer munlock(td, uap)
1159b40ce416SJulian Elischer 	struct thread *td;
1160df8bae1dSRodney W. Grimes 	struct munlock_args *uap;
1161df8bae1dSRodney W. Grimes {
1162df8bae1dSRodney W. Grimes 	vm_offset_t addr;
1163dabee6feSPeter Wemm 	vm_size_t size, pageoff;
1164df8bae1dSRodney W. Grimes 	int error;
1165df8bae1dSRodney W. Grimes 
1166df8bae1dSRodney W. Grimes 	addr = (vm_offset_t) uap->addr;
11679154ee6aSPeter Wemm 	size = uap->len;
11689154ee6aSPeter Wemm 
1169dabee6feSPeter Wemm 	pageoff = (addr & PAGE_MASK);
1170dabee6feSPeter Wemm 	addr -= pageoff;
1171dabee6feSPeter Wemm 	size += pageoff;
1172dabee6feSPeter Wemm 	size = (vm_size_t) round_page(size);
1173dabee6feSPeter Wemm 
1174dabee6feSPeter Wemm 	/* disable wrap around */
11759154ee6aSPeter Wemm 	if (addr + size < addr)
1176df8bae1dSRodney W. Grimes 		return (EINVAL);
1177dabee6feSPeter Wemm 
1178df8bae1dSRodney W. Grimes #ifndef pmap_wired_count
117944731cabSJohn Baldwin 	error = suser(td);
118005f0fdd2SPoul-Henning Kamp 	if (error)
1181df8bae1dSRodney W. Grimes 		return (error);
1182df8bae1dSRodney W. Grimes #endif
1183df8bae1dSRodney W. Grimes 
11841d7cf06cSAlan Cox 	error = vm_map_unwire(&td->td_proc->p_vmspace->vm_map, addr,
1185abd498aaSBruce M Simpson 		     addr + size, VM_MAP_WIRE_USER|VM_MAP_WIRE_NOHOLES);
1186df8bae1dSRodney W. Grimes 	return (error == KERN_SUCCESS ? 0 : ENOMEM);
1187df8bae1dSRodney W. Grimes }
1188df8bae1dSRodney W. Grimes 
1189df8bae1dSRodney W. Grimes /*
1190d2c60af8SMatthew Dillon  * vm_mmap()
1191d2c60af8SMatthew Dillon  *
1192d2c60af8SMatthew Dillon  * MPSAFE
1193d2c60af8SMatthew Dillon  *
1194d2c60af8SMatthew Dillon  * Internal version of mmap.  Currently used by mmap, exec, and sys5
1195d2c60af8SMatthew Dillon  * shared memory.  Handle is either a vnode pointer or NULL for MAP_ANON.
1196df8bae1dSRodney W. Grimes  */
1197df8bae1dSRodney W. Grimes int
1198b9dcd593SBruce Evans vm_mmap(vm_map_t map, vm_offset_t *addr, vm_size_t size, vm_prot_t prot,
1199b9dcd593SBruce Evans 	vm_prot_t maxprot, int flags,
1200651bb817SAlexander Langer 	void *handle,
1201b9dcd593SBruce Evans 	vm_ooffset_t foff)
1202df8bae1dSRodney W. Grimes {
1203df8bae1dSRodney W. Grimes 	boolean_t fitit;
1204fcae040bSJohn Dyson 	vm_object_t object;
1205df8bae1dSRodney W. Grimes 	struct vnode *vp = NULL;
120624a1cce3SDavid Greenman 	objtype_t type;
1207df8bae1dSRodney W. Grimes 	int rv = KERN_SUCCESS;
1208bd7e5f99SJohn Dyson 	vm_ooffset_t objsize;
1209bd7e5f99SJohn Dyson 	int docow;
1210b40ce416SJulian Elischer 	struct thread *td = curthread;
1211df8bae1dSRodney W. Grimes 
1212df8bae1dSRodney W. Grimes 	if (size == 0)
1213df8bae1dSRodney W. Grimes 		return (0);
1214df8bae1dSRodney W. Grimes 
121506cb7259SDavid Greenman 	objsize = size = round_page(size);
1216df8bae1dSRodney W. Grimes 
1217070f64feSMatthew Dillon 	if (td->td_proc->p_vmspace->vm_map.size + size >
1218070f64feSMatthew Dillon 	    td->td_proc->p_rlimit[RLIMIT_VMEM].rlim_cur) {
1219070f64feSMatthew Dillon 		return(ENOMEM);
1220070f64feSMatthew Dillon 	}
1221070f64feSMatthew Dillon 
1222df8bae1dSRodney W. Grimes 	/*
1223bc9ad247SDavid Greenman 	 * We currently can only deal with page aligned file offsets.
1224bc9ad247SDavid Greenman 	 * The check is here rather than in the syscall because the
1225bc9ad247SDavid Greenman 	 * kernel calls this function internally for other mmaping
1226bc9ad247SDavid Greenman 	 * operations (such as in exec) and non-aligned offsets will
1227bc9ad247SDavid Greenman 	 * cause pmap inconsistencies...so we want to be sure to
1228bc9ad247SDavid Greenman 	 * disallow this in all cases.
1229bc9ad247SDavid Greenman 	 */
1230bc9ad247SDavid Greenman 	if (foff & PAGE_MASK)
1231bc9ad247SDavid Greenman 		return (EINVAL);
1232bc9ad247SDavid Greenman 
123306cb7259SDavid Greenman 	if ((flags & MAP_FIXED) == 0) {
123406cb7259SDavid Greenman 		fitit = TRUE;
123506cb7259SDavid Greenman 		*addr = round_page(*addr);
123606cb7259SDavid Greenman 	} else {
123706cb7259SDavid Greenman 		if (*addr != trunc_page(*addr))
123806cb7259SDavid Greenman 			return (EINVAL);
123906cb7259SDavid Greenman 		fitit = FALSE;
124006cb7259SDavid Greenman 		(void) vm_map_remove(map, *addr, *addr + size);
124106cb7259SDavid Greenman 	}
124206cb7259SDavid Greenman 
1243bc9ad247SDavid Greenman 	/*
124424a1cce3SDavid Greenman 	 * Lookup/allocate object.
1245df8bae1dSRodney W. Grimes 	 */
12465f55e841SDavid Greenman 	if (flags & MAP_ANON) {
1247851c12ffSJohn Dyson 		type = OBJT_DEFAULT;
12485f55e841SDavid Greenman 		/*
12495f55e841SDavid Greenman 		 * Unnamed anonymous regions always start at 0.
12505f55e841SDavid Greenman 		 */
125167bf6868SJohn Dyson 		if (handle == 0)
12525f55e841SDavid Greenman 			foff = 0;
12535f55e841SDavid Greenman 	} else {
1254df8bae1dSRodney W. Grimes 		vp = (struct vnode *) handle;
1255c04c996bSAlan Cox 		mtx_lock(&Giant);
1256f6b5b182SJeff Roberson 		ASSERT_VOP_LOCKED(vp, "vm_mmap");
1257df8bae1dSRodney W. Grimes 		if (vp->v_type == VCHR) {
125824a1cce3SDavid Greenman 			type = OBJT_DEVICE;
1259a5d841d4SPoul-Henning Kamp 			handle = vp->v_rdev;
126006cb7259SDavid Greenman 		} else {
126106cb7259SDavid Greenman 			struct vattr vat;
126206cb7259SDavid Greenman 			int error;
126306cb7259SDavid Greenman 
1264a854ed98SJohn Baldwin 			error = VOP_GETATTR(vp, &vat, td->td_ucred, td);
1265e4ca250dSJohn Baldwin 			if (error) {
126623955314SAlfred Perlstein 				mtx_unlock(&Giant);
126706cb7259SDavid Greenman 				return (error);
1268e4ca250dSJohn Baldwin 			}
1269bd7e5f99SJohn Dyson 			objsize = round_page(vat.va_size);
127024a1cce3SDavid Greenman 			type = OBJT_VNODE;
127100d76afeSGuido van Rooij 			/*
127200d76afeSGuido van Rooij 			 * if it is a regular file without any references
127300d76afeSGuido van Rooij 			 * we do not need to sync it.
127400d76afeSGuido van Rooij 			 */
127500d76afeSGuido van Rooij 			if (vp->v_type == VREG && vat.va_nlink == 0) {
127600d76afeSGuido van Rooij 				flags |= MAP_NOSYNC;
127700d76afeSGuido van Rooij 			}
1278df8bae1dSRodney W. Grimes 		}
1279c04c996bSAlan Cox 		mtx_unlock(&Giant);
128006cb7259SDavid Greenman 	}
128194328e90SJohn Dyson 
128294328e90SJohn Dyson 	if (handle == NULL) {
128394328e90SJohn Dyson 		object = NULL;
12844738fa09SAlan Cox 		docow = 0;
128594328e90SJohn Dyson 	} else {
12860a0a85b3SJohn Dyson 		object = vm_pager_allocate(type,
12876cde7a16SDavid Greenman 			handle, objsize, prot, foff);
1288e4ca250dSJohn Baldwin 		if (object == NULL) {
128924a1cce3SDavid Greenman 			return (type == OBJT_DEVICE ? EINVAL : ENOMEM);
1290e4ca250dSJohn Baldwin 		}
12914738fa09SAlan Cox 		docow = MAP_PREFAULT_PARTIAL;
129294328e90SJohn Dyson 	}
1293df8bae1dSRodney W. Grimes 
12945850152dSJohn Dyson 	/*
12958f2ec877SDavid Greenman 	 * Force device mappings to be shared.
12965850152dSJohn Dyson 	 */
12976900a17cSMaxime Henrion 	if (type == OBJT_DEVICE) {
12988f2ec877SDavid Greenman 		flags &= ~(MAP_PRIVATE|MAP_COPY);
12995850152dSJohn Dyson 		flags |= MAP_SHARED;
13008f2ec877SDavid Greenman 	}
13015850152dSJohn Dyson 
13024f79d873SMatthew Dillon 	if ((flags & (MAP_ANON|MAP_SHARED)) == 0)
13034738fa09SAlan Cox 		docow |= MAP_COPY_ON_WRITE;
13044f79d873SMatthew Dillon 	if (flags & MAP_NOSYNC)
13054f79d873SMatthew Dillon 		docow |= MAP_DISABLE_SYNCER;
13069730a5daSPaul Saab 	if (flags & MAP_NOCORE)
13079730a5daSPaul Saab 		docow |= MAP_DISABLE_COREDUMP;
13085850152dSJohn Dyson 
1309d0aea04fSJohn Dyson #if defined(VM_PROT_READ_IS_EXEC)
1310d0aea04fSJohn Dyson 	if (prot & VM_PROT_READ)
1311d0aea04fSJohn Dyson 		prot |= VM_PROT_EXECUTE;
1312d0aea04fSJohn Dyson 
1313d0aea04fSJohn Dyson 	if (maxprot & VM_PROT_READ)
1314d0aea04fSJohn Dyson 		maxprot |= VM_PROT_EXECUTE;
1315d0aea04fSJohn Dyson #endif
1316d0aea04fSJohn Dyson 
1317e4ca250dSJohn Baldwin 	if (fitit)
13180a0a85b3SJohn Dyson 		*addr = pmap_addr_hint(object, *addr, size);
13190a0a85b3SJohn Dyson 
13202267af78SJulian Elischer 	if (flags & MAP_STACK)
13212267af78SJulian Elischer 		rv = vm_map_stack (map, *addr, size, prot,
13222267af78SJulian Elischer 				   maxprot, docow);
13232267af78SJulian Elischer 	else
1324bd7e5f99SJohn Dyson 		rv = vm_map_find(map, object, foff, addr, size, fitit,
1325bd7e5f99SJohn Dyson 				 prot, maxprot, docow);
1326bd7e5f99SJohn Dyson 
1327d2c60af8SMatthew Dillon 	if (rv != KERN_SUCCESS) {
13287fb0c17eSDavid Greenman 		/*
132924a1cce3SDavid Greenman 		 * Lose the object reference. Will destroy the
133024a1cce3SDavid Greenman 		 * object if it's an unnamed anonymous mapping
133124a1cce3SDavid Greenman 		 * or named anonymous without other references.
13327fb0c17eSDavid Greenman 		 */
1333df8bae1dSRodney W. Grimes 		vm_object_deallocate(object);
1334d2c60af8SMatthew Dillon 	} else if (flags & MAP_SHARED) {
1335df8bae1dSRodney W. Grimes 		/*
1336df8bae1dSRodney W. Grimes 		 * Shared memory is also shared with children.
1337df8bae1dSRodney W. Grimes 		 */
1338df8bae1dSRodney W. Grimes 		rv = vm_map_inherit(map, *addr, *addr + size, VM_INHERIT_SHARE);
1339e4ca250dSJohn Baldwin 		if (rv != KERN_SUCCESS)
13407fb0c17eSDavid Greenman 			(void) vm_map_remove(map, *addr, *addr + size);
1341df8bae1dSRodney W. Grimes 	}
1342abd498aaSBruce M Simpson 
1343abd498aaSBruce M Simpson 	/*
1344abd498aaSBruce M Simpson 	 * If the process has requested that all future mappings
1345abd498aaSBruce M Simpson 	 * be wired, then heed this.
1346abd498aaSBruce M Simpson 	 */
1347abd498aaSBruce M Simpson 	if ((rv == KERN_SUCCESS) && (map->flags & MAP_WIREFUTURE))
1348abd498aaSBruce M Simpson 		vm_map_wire(map, *addr, *addr + size,
1349abd498aaSBruce M Simpson 		    VM_MAP_WIRE_USER|VM_MAP_WIRE_NOHOLES);
1350abd498aaSBruce M Simpson 
1351df8bae1dSRodney W. Grimes 	switch (rv) {
1352df8bae1dSRodney W. Grimes 	case KERN_SUCCESS:
1353df8bae1dSRodney W. Grimes 		return (0);
1354df8bae1dSRodney W. Grimes 	case KERN_INVALID_ADDRESS:
1355df8bae1dSRodney W. Grimes 	case KERN_NO_SPACE:
1356df8bae1dSRodney W. Grimes 		return (ENOMEM);
1357df8bae1dSRodney W. Grimes 	case KERN_PROTECTION_FAILURE:
1358df8bae1dSRodney W. Grimes 		return (EACCES);
1359df8bae1dSRodney W. Grimes 	default:
1360df8bae1dSRodney W. Grimes 		return (EINVAL);
1361df8bae1dSRodney W. Grimes 	}
1362df8bae1dSRodney W. Grimes }
1363