xref: /freebsd/sys/vm/vm_mmap.c (revision 11caded34f959dae72d4415f1e9f84d0dfa32e98)
1df8bae1dSRodney W. Grimes /*
2df8bae1dSRodney W. Grimes  * Copyright (c) 1988 University of Utah.
3df8bae1dSRodney W. Grimes  * Copyright (c) 1991, 1993
4df8bae1dSRodney W. Grimes  *	The Regents of the University of California.  All rights reserved.
5df8bae1dSRodney W. Grimes  *
6df8bae1dSRodney W. Grimes  * This code is derived from software contributed to Berkeley by
7df8bae1dSRodney W. Grimes  * the Systems Programming Group of the University of Utah Computer
8df8bae1dSRodney W. Grimes  * Science Department.
9df8bae1dSRodney W. Grimes  *
10df8bae1dSRodney W. Grimes  * Redistribution and use in source and binary forms, with or without
11df8bae1dSRodney W. Grimes  * modification, are permitted provided that the following conditions
12df8bae1dSRodney W. Grimes  * are met:
13df8bae1dSRodney W. Grimes  * 1. Redistributions of source code must retain the above copyright
14df8bae1dSRodney W. Grimes  *    notice, this list of conditions and the following disclaimer.
15df8bae1dSRodney W. Grimes  * 2. Redistributions in binary form must reproduce the above copyright
16df8bae1dSRodney W. Grimes  *    notice, this list of conditions and the following disclaimer in the
17df8bae1dSRodney W. Grimes  *    documentation and/or other materials provided with the distribution.
18df8bae1dSRodney W. Grimes  * 3. All advertising materials mentioning features or use of this software
195929bcfaSPhilippe Charnier  *    must display the following acknowledgement:
20df8bae1dSRodney W. Grimes  *	This product includes software developed by the University of
21df8bae1dSRodney W. Grimes  *	California, Berkeley and its contributors.
22df8bae1dSRodney W. Grimes  * 4. Neither the name of the University nor the names of its contributors
23df8bae1dSRodney W. Grimes  *    may be used to endorse or promote products derived from this software
24df8bae1dSRodney W. Grimes  *    without specific prior written permission.
25df8bae1dSRodney W. Grimes  *
26df8bae1dSRodney W. Grimes  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27df8bae1dSRodney W. Grimes  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28df8bae1dSRodney W. Grimes  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29df8bae1dSRodney W. Grimes  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30df8bae1dSRodney W. Grimes  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31df8bae1dSRodney W. Grimes  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32df8bae1dSRodney W. Grimes  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33df8bae1dSRodney W. Grimes  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34df8bae1dSRodney W. Grimes  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35df8bae1dSRodney W. Grimes  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36df8bae1dSRodney W. Grimes  * SUCH DAMAGE.
37df8bae1dSRodney W. Grimes  *
38df8bae1dSRodney W. Grimes  * from: Utah $Hdr: vm_mmap.c 1.6 91/10/21$
39df8bae1dSRodney W. Grimes  *
40df8bae1dSRodney W. Grimes  *	@(#)vm_mmap.c	8.4 (Berkeley) 1/12/94
41c3aac50fSPeter Wemm  * $FreeBSD$
42df8bae1dSRodney W. Grimes  */
43df8bae1dSRodney W. Grimes 
44df8bae1dSRodney W. Grimes /*
45df8bae1dSRodney W. Grimes  * Mapped file (mmap) interface to VM
46df8bae1dSRodney W. Grimes  */
47df8bae1dSRodney W. Grimes 
485591b823SEivind Eklund #include "opt_compat.h"
49e9822d92SJoerg Wunsch 
50df8bae1dSRodney W. Grimes #include <sys/param.h>
51df8bae1dSRodney W. Grimes #include <sys/systm.h>
52fb919e4dSMark Murray #include <sys/kernel.h>
53fb919e4dSMark Murray #include <sys/lock.h>
5423955314SAlfred Perlstein #include <sys/mutex.h>
55d2d3e875SBruce Evans #include <sys/sysproto.h>
56df8bae1dSRodney W. Grimes #include <sys/filedesc.h>
57df8bae1dSRodney W. Grimes #include <sys/proc.h>
58df8bae1dSRodney W. Grimes #include <sys/vnode.h>
593ac4d1efSBruce Evans #include <sys/fcntl.h>
60df8bae1dSRodney W. Grimes #include <sys/file.h>
61df8bae1dSRodney W. Grimes #include <sys/mman.h>
62df8bae1dSRodney W. Grimes #include <sys/conf.h>
634183b6b6SPeter Wemm #include <sys/stat.h>
64efeaf95aSDavid Greenman #include <sys/vmmeter.h>
651f6889a1SMatthew Dillon #include <sys/sysctl.h>
66df8bae1dSRodney W. Grimes 
67df8bae1dSRodney W. Grimes #include <vm/vm.h>
68efeaf95aSDavid Greenman #include <vm/vm_param.h>
69efeaf95aSDavid Greenman #include <vm/pmap.h>
70efeaf95aSDavid Greenman #include <vm/vm_map.h>
71efeaf95aSDavid Greenman #include <vm/vm_object.h>
721c7c3c6aSMatthew Dillon #include <vm/vm_page.h>
73df8bae1dSRodney W. Grimes #include <vm/vm_pager.h>
74b5e8ce9fSBruce Evans #include <vm/vm_pageout.h>
75efeaf95aSDavid Greenman #include <vm/vm_extern.h>
76867a482dSJohn Dyson #include <vm/vm_page.h>
771f6889a1SMatthew Dillon #include <vm/vm_kern.h>
78df8bae1dSRodney W. Grimes 
79d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_
80df8bae1dSRodney W. Grimes struct sbrk_args {
81df8bae1dSRodney W. Grimes 	int incr;
82df8bae1dSRodney W. Grimes };
83d2d3e875SBruce Evans #endif
840d94caffSDavid Greenman 
851f6889a1SMatthew Dillon static int max_proc_mmap;
861f6889a1SMatthew Dillon SYSCTL_INT(_vm, OID_AUTO, max_proc_mmap, CTLFLAG_RW, &max_proc_mmap, 0, "");
871f6889a1SMatthew Dillon 
881f6889a1SMatthew Dillon /*
891f6889a1SMatthew Dillon  * Set the maximum number of vm_map_entry structures per process.  Roughly
901f6889a1SMatthew Dillon  * speaking vm_map_entry structures are tiny, so allowing them to eat 1/100
911f6889a1SMatthew Dillon  * of our KVM malloc space still results in generous limits.  We want a
921f6889a1SMatthew Dillon  * default that is good enough to prevent the kernel running out of resources
931f6889a1SMatthew Dillon  * if attacked from compromised user account but generous enough such that
941f6889a1SMatthew Dillon  * multi-threaded processes are not unduly inconvenienced.
951f6889a1SMatthew Dillon  */
9611caded3SAlfred Perlstein static void vmmapentry_rsrc_init(void *);
971f6889a1SMatthew Dillon SYSINIT(vmmersrc, SI_SUB_KVM_RSRC, SI_ORDER_FIRST, vmmapentry_rsrc_init, NULL)
981f6889a1SMatthew Dillon 
991f6889a1SMatthew Dillon static void
1001f6889a1SMatthew Dillon vmmapentry_rsrc_init(dummy)
1011f6889a1SMatthew Dillon         void *dummy;
1021f6889a1SMatthew Dillon {
1031f6889a1SMatthew Dillon     max_proc_mmap = vm_kmem_size / sizeof(struct vm_map_entry);
1041f6889a1SMatthew Dillon     max_proc_mmap /= 100;
1051f6889a1SMatthew Dillon }
1061f6889a1SMatthew Dillon 
107d2c60af8SMatthew Dillon /*
108d2c60af8SMatthew Dillon  * MPSAFE
109d2c60af8SMatthew Dillon  */
110df8bae1dSRodney W. Grimes /* ARGSUSED */
111df8bae1dSRodney W. Grimes int
112b40ce416SJulian Elischer sbrk(td, uap)
113b40ce416SJulian Elischer 	struct thread *td;
114df8bae1dSRodney W. Grimes 	struct sbrk_args *uap;
115df8bae1dSRodney W. Grimes {
116df8bae1dSRodney W. Grimes 	/* Not yet implemented */
1170cddd8f0SMatthew Dillon 	/* mtx_lock(&Giant); */
1180cddd8f0SMatthew Dillon 	/* mtx_unlock(&Giant); */
119df8bae1dSRodney W. Grimes 	return (EOPNOTSUPP);
120df8bae1dSRodney W. Grimes }
121df8bae1dSRodney W. Grimes 
122d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_
123df8bae1dSRodney W. Grimes struct sstk_args {
124df8bae1dSRodney W. Grimes 	int incr;
125df8bae1dSRodney W. Grimes };
126d2d3e875SBruce Evans #endif
1270d94caffSDavid Greenman 
128d2c60af8SMatthew Dillon /*
129d2c60af8SMatthew Dillon  * MPSAFE
130d2c60af8SMatthew Dillon  */
131df8bae1dSRodney W. Grimes /* ARGSUSED */
132df8bae1dSRodney W. Grimes int
133b40ce416SJulian Elischer sstk(td, uap)
134b40ce416SJulian Elischer 	struct thread *td;
135df8bae1dSRodney W. Grimes 	struct sstk_args *uap;
136df8bae1dSRodney W. Grimes {
137df8bae1dSRodney W. Grimes 	/* Not yet implemented */
1380cddd8f0SMatthew Dillon 	/* mtx_lock(&Giant); */
1390cddd8f0SMatthew Dillon 	/* mtx_unlock(&Giant); */
140df8bae1dSRodney W. Grimes 	return (EOPNOTSUPP);
141df8bae1dSRodney W. Grimes }
142df8bae1dSRodney W. Grimes 
143df8bae1dSRodney W. Grimes #if defined(COMPAT_43) || defined(COMPAT_SUNOS)
144d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_
145df8bae1dSRodney W. Grimes struct getpagesize_args {
146df8bae1dSRodney W. Grimes 	int dummy;
147df8bae1dSRodney W. Grimes };
148d2d3e875SBruce Evans #endif
1490d94caffSDavid Greenman 
150df8bae1dSRodney W. Grimes /* ARGSUSED */
151df8bae1dSRodney W. Grimes int
152b40ce416SJulian Elischer ogetpagesize(td, uap)
153b40ce416SJulian Elischer 	struct thread *td;
154df8bae1dSRodney W. Grimes 	struct getpagesize_args *uap;
155df8bae1dSRodney W. Grimes {
1560cddd8f0SMatthew Dillon 	/* MP SAFE */
157b40ce416SJulian Elischer 	td->td_retval[0] = PAGE_SIZE;
158df8bae1dSRodney W. Grimes 	return (0);
159df8bae1dSRodney W. Grimes }
160df8bae1dSRodney W. Grimes #endif				/* COMPAT_43 || COMPAT_SUNOS */
161df8bae1dSRodney W. Grimes 
16254f42e4bSPeter Wemm 
16354f42e4bSPeter Wemm /*
16454f42e4bSPeter Wemm  * Memory Map (mmap) system call.  Note that the file offset
16554f42e4bSPeter Wemm  * and address are allowed to be NOT page aligned, though if
16654f42e4bSPeter Wemm  * the MAP_FIXED flag it set, both must have the same remainder
16754f42e4bSPeter Wemm  * modulo the PAGE_SIZE (POSIX 1003.1b).  If the address is not
16854f42e4bSPeter Wemm  * page-aligned, the actual mapping starts at trunc_page(addr)
16954f42e4bSPeter Wemm  * and the return value is adjusted up by the page offset.
170b4309055SMatthew Dillon  *
171b4309055SMatthew Dillon  * Generally speaking, only character devices which are themselves
172b4309055SMatthew Dillon  * memory-based, such as a video framebuffer, can be mmap'd.  Otherwise
173b4309055SMatthew Dillon  * there would be no cache coherency between a descriptor and a VM mapping
174b4309055SMatthew Dillon  * both to the same character device.
175b4309055SMatthew Dillon  *
176b4309055SMatthew Dillon  * Block devices can be mmap'd no matter what they represent.  Cache coherency
177b4309055SMatthew Dillon  * is maintained as long as you do not write directly to the underlying
178b4309055SMatthew Dillon  * character device.
17954f42e4bSPeter Wemm  */
180d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_
181df8bae1dSRodney W. Grimes struct mmap_args {
182651bb817SAlexander Langer 	void *addr;
183df8bae1dSRodney W. Grimes 	size_t len;
184df8bae1dSRodney W. Grimes 	int prot;
185df8bae1dSRodney W. Grimes 	int flags;
186df8bae1dSRodney W. Grimes 	int fd;
187df8bae1dSRodney W. Grimes 	long pad;
188df8bae1dSRodney W. Grimes 	off_t pos;
189df8bae1dSRodney W. Grimes };
190d2d3e875SBruce Evans #endif
191df8bae1dSRodney W. Grimes 
192d2c60af8SMatthew Dillon /*
193d2c60af8SMatthew Dillon  * MPSAFE
194d2c60af8SMatthew Dillon  */
195df8bae1dSRodney W. Grimes int
196b40ce416SJulian Elischer mmap(td, uap)
197b40ce416SJulian Elischer 	struct thread *td;
19854d92145SMatthew Dillon 	struct mmap_args *uap;
199df8bae1dSRodney W. Grimes {
20054d92145SMatthew Dillon 	struct file *fp = NULL;
201df8bae1dSRodney W. Grimes 	struct vnode *vp;
202df8bae1dSRodney W. Grimes 	vm_offset_t addr;
2039154ee6aSPeter Wemm 	vm_size_t size, pageoff;
204df8bae1dSRodney W. Grimes 	vm_prot_t prot, maxprot;
205651bb817SAlexander Langer 	void *handle;
206df8bae1dSRodney W. Grimes 	int flags, error;
207c8bdd56bSGuido van Rooij 	int disablexworkaround;
20854f42e4bSPeter Wemm 	off_t pos;
209b40ce416SJulian Elischer 	struct vmspace *vms = td->td_proc->p_vmspace;
2109ff5ce6bSBoris Popov 	vm_object_t obj;
211df8bae1dSRodney W. Grimes 
21254f42e4bSPeter Wemm 	addr = (vm_offset_t) uap->addr;
21354f42e4bSPeter Wemm 	size = uap->len;
214df8bae1dSRodney W. Grimes 	prot = uap->prot & VM_PROT_ALL;
215df8bae1dSRodney W. Grimes 	flags = uap->flags;
21654f42e4bSPeter Wemm 	pos = uap->pos;
21754f42e4bSPeter Wemm 
218426da3bcSAlfred Perlstein 	fp = NULL;
21954f42e4bSPeter Wemm 	/* make sure mapping fits into numeric range etc */
220fc565456SDmitrij Tejblum 	if ((ssize_t) uap->len < 0 ||
22154f42e4bSPeter Wemm 	    ((flags & MAP_ANON) && uap->fd != -1))
222df8bae1dSRodney W. Grimes 		return (EINVAL);
2239154ee6aSPeter Wemm 
2242267af78SJulian Elischer 	if (flags & MAP_STACK) {
2252267af78SJulian Elischer 		if ((uap->fd != -1) ||
2262267af78SJulian Elischer 		    ((prot & (PROT_READ | PROT_WRITE)) != (PROT_READ | PROT_WRITE)))
2272267af78SJulian Elischer 			return (EINVAL);
2282267af78SJulian Elischer 		flags |= MAP_ANON;
2292267af78SJulian Elischer 		pos = 0;
2302907af2aSJulian Elischer 	}
2312907af2aSJulian Elischer 
2329154ee6aSPeter Wemm 	/*
23354f42e4bSPeter Wemm 	 * Align the file position to a page boundary,
23454f42e4bSPeter Wemm 	 * and save its page offset component.
2359154ee6aSPeter Wemm 	 */
23654f42e4bSPeter Wemm 	pageoff = (pos & PAGE_MASK);
23754f42e4bSPeter Wemm 	pos -= pageoff;
23854f42e4bSPeter Wemm 
23954f42e4bSPeter Wemm 	/* Adjust size for rounding (on both ends). */
24054f42e4bSPeter Wemm 	size += pageoff;			/* low end... */
24154f42e4bSPeter Wemm 	size = (vm_size_t) round_page(size);	/* hi end */
2429154ee6aSPeter Wemm 
243df8bae1dSRodney W. Grimes 	/*
2440d94caffSDavid Greenman 	 * Check for illegal addresses.  Watch out for address wrap... Note
2450d94caffSDavid Greenman 	 * that VM_*_ADDRESS are not constants due to casts (argh).
246df8bae1dSRodney W. Grimes 	 */
247df8bae1dSRodney W. Grimes 	if (flags & MAP_FIXED) {
24854f42e4bSPeter Wemm 		/*
24954f42e4bSPeter Wemm 		 * The specified address must have the same remainder
25054f42e4bSPeter Wemm 		 * as the file offset taken modulo PAGE_SIZE, so it
25154f42e4bSPeter Wemm 		 * should be aligned after adjustment by pageoff.
25254f42e4bSPeter Wemm 		 */
25354f42e4bSPeter Wemm 		addr -= pageoff;
25454f42e4bSPeter Wemm 		if (addr & PAGE_MASK)
25554f42e4bSPeter Wemm 			return (EINVAL);
25654f42e4bSPeter Wemm 		/* Address range must be all in user VM space. */
257bbc0ec52SDavid Greenman 		if (VM_MAXUSER_ADDRESS > 0 && addr + size > VM_MAXUSER_ADDRESS)
258df8bae1dSRodney W. Grimes 			return (EINVAL);
25926f9a767SRodney W. Grimes #ifndef i386
260df8bae1dSRodney W. Grimes 		if (VM_MIN_ADDRESS > 0 && addr < VM_MIN_ADDRESS)
261df8bae1dSRodney W. Grimes 			return (EINVAL);
26226f9a767SRodney W. Grimes #endif
263bbc0ec52SDavid Greenman 		if (addr + size < addr)
264df8bae1dSRodney W. Grimes 			return (EINVAL);
265df8bae1dSRodney W. Grimes 	}
266df8bae1dSRodney W. Grimes 	/*
26754f42e4bSPeter Wemm 	 * XXX for non-fixed mappings where no hint is provided or
26854f42e4bSPeter Wemm 	 * the hint would fall in the potential heap space,
26954f42e4bSPeter Wemm 	 * place it after the end of the largest possible heap.
270df8bae1dSRodney W. Grimes 	 *
27154f42e4bSPeter Wemm 	 * There should really be a pmap call to determine a reasonable
27254f42e4bSPeter Wemm 	 * location.
273df8bae1dSRodney W. Grimes 	 */
274d28ab90fSLuoqi Chen 	else if (addr == 0 ||
2751f6889a1SMatthew Dillon 	    (addr >= round_page((vm_offset_t)vms->vm_taddr) &&
276cbc89bfbSPaul Saab 	     addr < round_page((vm_offset_t)vms->vm_daddr + maxdsiz)))
277cbc89bfbSPaul Saab 		addr = round_page((vm_offset_t)vms->vm_daddr + maxdsiz);
27854f42e4bSPeter Wemm 
2790cddd8f0SMatthew Dillon 	mtx_lock(&Giant);	/* syscall marked mp-safe but isn't */
280df8bae1dSRodney W. Grimes 	if (flags & MAP_ANON) {
281df8bae1dSRodney W. Grimes 		/*
282df8bae1dSRodney W. Grimes 		 * Mapping blank space is trivial.
283df8bae1dSRodney W. Grimes 		 */
284df8bae1dSRodney W. Grimes 		handle = NULL;
285df8bae1dSRodney W. Grimes 		maxprot = VM_PROT_ALL;
28654f42e4bSPeter Wemm 		pos = 0;
287df8bae1dSRodney W. Grimes 	} else {
288df8bae1dSRodney W. Grimes 		/*
2890d94caffSDavid Greenman 		 * Mapping file, get fp for validation. Obtain vnode and make
2900d94caffSDavid Greenman 		 * sure it is of appropriate type.
291426da3bcSAlfred Perlstein 		 * don't let the descriptor disappear on us if we block
292df8bae1dSRodney W. Grimes 		 */
293a4db4953SAlfred Perlstein 		if ((error = fget(td, uap->fd, &fp)) != 0)
294426da3bcSAlfred Perlstein 			goto done;
295e4ca250dSJohn Baldwin 		if (fp->f_type != DTYPE_VNODE) {
296d2c60af8SMatthew Dillon 			error = EINVAL;
297426da3bcSAlfred Perlstein 			goto done;
298e4ca250dSJohn Baldwin 		}
299279d7226SMatthew Dillon 
300279d7226SMatthew Dillon 		/*
301aa543039SGarrett Wollman 		 * POSIX shared-memory objects are defined to have
302aa543039SGarrett Wollman 		 * kernel persistence, and are not defined to support
303aa543039SGarrett Wollman 		 * read(2)/write(2) -- or even open(2).  Thus, we can
304aa543039SGarrett Wollman 		 * use MAP_ASYNC to trade on-disk coherence for speed.
305aa543039SGarrett Wollman 		 * The shm_open(3) library routine turns on the FPOSIXSHM
306aa543039SGarrett Wollman 		 * flag to request this behavior.
307aa543039SGarrett Wollman 		 */
308aa543039SGarrett Wollman 		if (fp->f_flag & FPOSIXSHM)
309aa543039SGarrett Wollman 			flags |= MAP_NOSYNC;
310df8bae1dSRodney W. Grimes 		vp = (struct vnode *) fp->f_data;
311e4ca250dSJohn Baldwin 		if (vp->v_type != VREG && vp->v_type != VCHR) {
312e4ca250dSJohn Baldwin 			error = EINVAL;
313e4ca250dSJohn Baldwin 			goto done;
314e4ca250dSJohn Baldwin 		}
3159ff5ce6bSBoris Popov 		if (vp->v_type == VREG) {
3169ff5ce6bSBoris Popov 			/*
3179ff5ce6bSBoris Popov 			 * Get the proper underlying object
3189ff5ce6bSBoris Popov 			 */
3190cddd8f0SMatthew Dillon 			if (VOP_GETVOBJECT(vp, &obj) != 0) {
3200cddd8f0SMatthew Dillon 				error = EINVAL;
3210cddd8f0SMatthew Dillon 				goto done;
3220cddd8f0SMatthew Dillon 			}
3239ff5ce6bSBoris Popov 			vp = (struct vnode*)obj->handle;
3249ff5ce6bSBoris Popov 		}
325df8bae1dSRodney W. Grimes 		/*
3260d94caffSDavid Greenman 		 * XXX hack to handle use of /dev/zero to map anon memory (ala
3270d94caffSDavid Greenman 		 * SunOS).
328df8bae1dSRodney W. Grimes 		 */
3292589f249SMark Murray 		if ((vp->v_type == VCHR) &&
3302589f249SMark Murray 		    (vp->v_rdev->si_devsw->d_flags & D_MMAP_ANON)) {
331df8bae1dSRodney W. Grimes 			handle = NULL;
332df8bae1dSRodney W. Grimes 			maxprot = VM_PROT_ALL;
333df8bae1dSRodney W. Grimes 			flags |= MAP_ANON;
33454f42e4bSPeter Wemm 			pos = 0;
335df8bae1dSRodney W. Grimes 		} else {
336df8bae1dSRodney W. Grimes 			/*
337c8bdd56bSGuido van Rooij 			 * cdevs does not provide private mappings of any kind.
338c8bdd56bSGuido van Rooij 			 */
339c8bdd56bSGuido van Rooij 			/*
340c8bdd56bSGuido van Rooij 			 * However, for XIG X server to continue to work,
341c8bdd56bSGuido van Rooij 			 * we should allow the superuser to do it anyway.
342c8bdd56bSGuido van Rooij 			 * We only allow it at securelevel < 1.
343c8bdd56bSGuido van Rooij 			 * (Because the XIG X server writes directly to video
344c8bdd56bSGuido van Rooij 			 * memory via /dev/mem, it should never work at any
345c8bdd56bSGuido van Rooij 			 * other securelevel.
346c8bdd56bSGuido van Rooij 			 * XXX this will have to go
347c8bdd56bSGuido van Rooij 			 */
348a854ed98SJohn Baldwin 			if (securelevel_ge(td->td_ucred, 1))
349c8bdd56bSGuido van Rooij 				disablexworkaround = 1;
350c8bdd56bSGuido van Rooij 			else
351b40ce416SJulian Elischer 				disablexworkaround = suser_td(td);
352c8bdd56bSGuido van Rooij 			if (vp->v_type == VCHR && disablexworkaround &&
353279d7226SMatthew Dillon 			    (flags & (MAP_PRIVATE|MAP_COPY))) {
354279d7226SMatthew Dillon 				error = EINVAL;
355279d7226SMatthew Dillon 				goto done;
356279d7226SMatthew Dillon 			}
357c8bdd56bSGuido van Rooij 			/*
358df8bae1dSRodney W. Grimes 			 * Ensure that file and memory protections are
359df8bae1dSRodney W. Grimes 			 * compatible.  Note that we only worry about
360df8bae1dSRodney W. Grimes 			 * writability if mapping is shared; in this case,
361df8bae1dSRodney W. Grimes 			 * current and max prot are dictated by the open file.
362df8bae1dSRodney W. Grimes 			 * XXX use the vnode instead?  Problem is: what
3630d94caffSDavid Greenman 			 * credentials do we use for determination? What if
3640d94caffSDavid Greenman 			 * proc does a setuid?
365df8bae1dSRodney W. Grimes 			 */
366df8bae1dSRodney W. Grimes 			maxprot = VM_PROT_EXECUTE;	/* ??? */
367279d7226SMatthew Dillon 			if (fp->f_flag & FREAD) {
368df8bae1dSRodney W. Grimes 				maxprot |= VM_PROT_READ;
369279d7226SMatthew Dillon 			} else if (prot & PROT_READ) {
370279d7226SMatthew Dillon 				error = EACCES;
371279d7226SMatthew Dillon 				goto done;
372279d7226SMatthew Dillon 			}
373c8bdd56bSGuido van Rooij 			/*
374c8bdd56bSGuido van Rooij 			 * If we are sharing potential changes (either via
375c8bdd56bSGuido van Rooij 			 * MAP_SHARED or via the implicit sharing of character
376c8bdd56bSGuido van Rooij 			 * device mappings), and we are trying to get write
377c8bdd56bSGuido van Rooij 			 * permission although we opened it without asking
378c8bdd56bSGuido van Rooij 			 * for it, bail out.  Check for superuser, only if
379c8bdd56bSGuido van Rooij 			 * we're at securelevel < 1, to allow the XIG X server
380c8bdd56bSGuido van Rooij 			 * to continue to work.
381c8bdd56bSGuido van Rooij 			 */
38205feb99fSGuido van Rooij 			if ((flags & MAP_SHARED) != 0 ||
38305feb99fSGuido van Rooij 			    (vp->v_type == VCHR && disablexworkaround)) {
38405feb99fSGuido van Rooij 				if ((fp->f_flag & FWRITE) != 0) {
3854183b6b6SPeter Wemm 					struct vattr va;
38605feb99fSGuido van Rooij 					if ((error =
38705feb99fSGuido van Rooij 					    VOP_GETATTR(vp, &va,
388a854ed98SJohn Baldwin 						        td->td_ucred, td))) {
389279d7226SMatthew Dillon 						goto done;
390279d7226SMatthew Dillon 					}
39105feb99fSGuido van Rooij 					if ((va.va_flags &
392279d7226SMatthew Dillon 					   (SF_SNAPSHOT|IMMUTABLE|APPEND)) == 0) {
393df8bae1dSRodney W. Grimes 						maxprot |= VM_PROT_WRITE;
394279d7226SMatthew Dillon 					} else if (prot & PROT_WRITE) {
395279d7226SMatthew Dillon 						error = EPERM;
396279d7226SMatthew Dillon 						goto done;
397279d7226SMatthew Dillon 					}
398279d7226SMatthew Dillon 				} else if ((prot & PROT_WRITE) != 0) {
399279d7226SMatthew Dillon 					error = EACCES;
400279d7226SMatthew Dillon 					goto done;
401279d7226SMatthew Dillon 				}
402279d7226SMatthew Dillon 			} else {
40305feb99fSGuido van Rooij 				maxprot |= VM_PROT_WRITE;
404279d7226SMatthew Dillon 			}
40505feb99fSGuido van Rooij 
406651bb817SAlexander Langer 			handle = (void *)vp;
407df8bae1dSRodney W. Grimes 		}
408df8bae1dSRodney W. Grimes 	}
4091f6889a1SMatthew Dillon 
4101f6889a1SMatthew Dillon 	/*
4111f6889a1SMatthew Dillon 	 * Do not allow more then a certain number of vm_map_entry structures
4121f6889a1SMatthew Dillon 	 * per process.  Scale with the number of rforks sharing the map
4131f6889a1SMatthew Dillon 	 * to make the limit reasonable for threads.
4141f6889a1SMatthew Dillon 	 */
4151f6889a1SMatthew Dillon 	if (max_proc_mmap &&
4161f6889a1SMatthew Dillon 	    vms->vm_map.nentries >= max_proc_mmap * vms->vm_refcnt) {
417279d7226SMatthew Dillon 		error = ENOMEM;
418279d7226SMatthew Dillon 		goto done;
4191f6889a1SMatthew Dillon 	}
4201f6889a1SMatthew Dillon 
421e4ca250dSJohn Baldwin 	mtx_unlock(&Giant);
4221f6889a1SMatthew Dillon 	error = vm_mmap(&vms->vm_map, &addr, size, prot, maxprot,
42354f42e4bSPeter Wemm 	    flags, handle, pos);
424df8bae1dSRodney W. Grimes 	if (error == 0)
425b40ce416SJulian Elischer 		td->td_retval[0] = (register_t) (addr + pageoff);
426e4ca250dSJohn Baldwin 	mtx_lock(&Giant);
427279d7226SMatthew Dillon done:
428279d7226SMatthew Dillon 	if (fp)
429b40ce416SJulian Elischer 		fdrop(fp, td);
430e4ca250dSJohn Baldwin 	mtx_unlock(&Giant);
431df8bae1dSRodney W. Grimes 	return (error);
432df8bae1dSRodney W. Grimes }
433df8bae1dSRodney W. Grimes 
43405f0fdd2SPoul-Henning Kamp #ifdef COMPAT_43
435d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_
43605f0fdd2SPoul-Henning Kamp struct ommap_args {
43705f0fdd2SPoul-Henning Kamp 	caddr_t addr;
43805f0fdd2SPoul-Henning Kamp 	int len;
43905f0fdd2SPoul-Henning Kamp 	int prot;
44005f0fdd2SPoul-Henning Kamp 	int flags;
44105f0fdd2SPoul-Henning Kamp 	int fd;
44205f0fdd2SPoul-Henning Kamp 	long pos;
44305f0fdd2SPoul-Henning Kamp };
444d2d3e875SBruce Evans #endif
44505f0fdd2SPoul-Henning Kamp int
446b40ce416SJulian Elischer ommap(td, uap)
447b40ce416SJulian Elischer 	struct thread *td;
44854d92145SMatthew Dillon 	struct ommap_args *uap;
44905f0fdd2SPoul-Henning Kamp {
45005f0fdd2SPoul-Henning Kamp 	struct mmap_args nargs;
45105f0fdd2SPoul-Henning Kamp 	static const char cvtbsdprot[8] = {
45205f0fdd2SPoul-Henning Kamp 		0,
45305f0fdd2SPoul-Henning Kamp 		PROT_EXEC,
45405f0fdd2SPoul-Henning Kamp 		PROT_WRITE,
45505f0fdd2SPoul-Henning Kamp 		PROT_EXEC | PROT_WRITE,
45605f0fdd2SPoul-Henning Kamp 		PROT_READ,
45705f0fdd2SPoul-Henning Kamp 		PROT_EXEC | PROT_READ,
45805f0fdd2SPoul-Henning Kamp 		PROT_WRITE | PROT_READ,
45905f0fdd2SPoul-Henning Kamp 		PROT_EXEC | PROT_WRITE | PROT_READ,
46005f0fdd2SPoul-Henning Kamp 	};
4610d94caffSDavid Greenman 
46205f0fdd2SPoul-Henning Kamp #define	OMAP_ANON	0x0002
46305f0fdd2SPoul-Henning Kamp #define	OMAP_COPY	0x0020
46405f0fdd2SPoul-Henning Kamp #define	OMAP_SHARED	0x0010
46505f0fdd2SPoul-Henning Kamp #define	OMAP_FIXED	0x0100
46605f0fdd2SPoul-Henning Kamp 
46705f0fdd2SPoul-Henning Kamp 	nargs.addr = uap->addr;
46805f0fdd2SPoul-Henning Kamp 	nargs.len = uap->len;
46905f0fdd2SPoul-Henning Kamp 	nargs.prot = cvtbsdprot[uap->prot & 0x7];
47005f0fdd2SPoul-Henning Kamp 	nargs.flags = 0;
47105f0fdd2SPoul-Henning Kamp 	if (uap->flags & OMAP_ANON)
47205f0fdd2SPoul-Henning Kamp 		nargs.flags |= MAP_ANON;
47305f0fdd2SPoul-Henning Kamp 	if (uap->flags & OMAP_COPY)
47405f0fdd2SPoul-Henning Kamp 		nargs.flags |= MAP_COPY;
47505f0fdd2SPoul-Henning Kamp 	if (uap->flags & OMAP_SHARED)
47605f0fdd2SPoul-Henning Kamp 		nargs.flags |= MAP_SHARED;
47705f0fdd2SPoul-Henning Kamp 	else
47805f0fdd2SPoul-Henning Kamp 		nargs.flags |= MAP_PRIVATE;
47905f0fdd2SPoul-Henning Kamp 	if (uap->flags & OMAP_FIXED)
48005f0fdd2SPoul-Henning Kamp 		nargs.flags |= MAP_FIXED;
48105f0fdd2SPoul-Henning Kamp 	nargs.fd = uap->fd;
48205f0fdd2SPoul-Henning Kamp 	nargs.pos = uap->pos;
483b40ce416SJulian Elischer 	return (mmap(td, &nargs));
48405f0fdd2SPoul-Henning Kamp }
48505f0fdd2SPoul-Henning Kamp #endif				/* COMPAT_43 */
48605f0fdd2SPoul-Henning Kamp 
48705f0fdd2SPoul-Henning Kamp 
488d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_
489df8bae1dSRodney W. Grimes struct msync_args {
490651bb817SAlexander Langer 	void *addr;
491df8bae1dSRodney W. Grimes 	int len;
492e6c6af11SDavid Greenman 	int flags;
493df8bae1dSRodney W. Grimes };
494d2d3e875SBruce Evans #endif
495d2c60af8SMatthew Dillon /*
496d2c60af8SMatthew Dillon  * MPSAFE
497d2c60af8SMatthew Dillon  */
498df8bae1dSRodney W. Grimes int
499b40ce416SJulian Elischer msync(td, uap)
500b40ce416SJulian Elischer 	struct thread *td;
501df8bae1dSRodney W. Grimes 	struct msync_args *uap;
502df8bae1dSRodney W. Grimes {
503df8bae1dSRodney W. Grimes 	vm_offset_t addr;
504dabee6feSPeter Wemm 	vm_size_t size, pageoff;
505e6c6af11SDavid Greenman 	int flags;
506df8bae1dSRodney W. Grimes 	vm_map_t map;
507df8bae1dSRodney W. Grimes 	int rv;
508df8bae1dSRodney W. Grimes 
509df8bae1dSRodney W. Grimes 	addr = (vm_offset_t) uap->addr;
5109154ee6aSPeter Wemm 	size = uap->len;
511e6c6af11SDavid Greenman 	flags = uap->flags;
512e6c6af11SDavid Greenman 
513dabee6feSPeter Wemm 	pageoff = (addr & PAGE_MASK);
514dabee6feSPeter Wemm 	addr -= pageoff;
515dabee6feSPeter Wemm 	size += pageoff;
516dabee6feSPeter Wemm 	size = (vm_size_t) round_page(size);
5179154ee6aSPeter Wemm 	if (addr + size < addr)
518dabee6feSPeter Wemm 		return (EINVAL);
519dabee6feSPeter Wemm 
520dabee6feSPeter Wemm 	if ((flags & (MS_ASYNC|MS_INVALIDATE)) == (MS_ASYNC|MS_INVALIDATE))
5211e62bc63SDavid Greenman 		return (EINVAL);
5221e62bc63SDavid Greenman 
5230cddd8f0SMatthew Dillon 	mtx_lock(&Giant);
5240cddd8f0SMatthew Dillon 
525b40ce416SJulian Elischer 	map = &td->td_proc->p_vmspace->vm_map;
5269154ee6aSPeter Wemm 
527df8bae1dSRodney W. Grimes 	/*
528df8bae1dSRodney W. Grimes 	 * XXX Gak!  If size is zero we are supposed to sync "all modified
5290d94caffSDavid Greenman 	 * pages with the region containing addr".  Unfortunately, we don't
5300d94caffSDavid Greenman 	 * really keep track of individual mmaps so we approximate by flushing
5310d94caffSDavid Greenman 	 * the range of the map entry containing addr. This can be incorrect
5320d94caffSDavid Greenman 	 * if the region splits or is coalesced with a neighbor.
533df8bae1dSRodney W. Grimes 	 */
534df8bae1dSRodney W. Grimes 	if (size == 0) {
535df8bae1dSRodney W. Grimes 		vm_map_entry_t entry;
536df8bae1dSRodney W. Grimes 
537df8bae1dSRodney W. Grimes 		vm_map_lock_read(map);
538df8bae1dSRodney W. Grimes 		rv = vm_map_lookup_entry(map, addr, &entry);
539df8bae1dSRodney W. Grimes 		vm_map_unlock_read(map);
54023955314SAlfred Perlstein 		if (rv == FALSE) {
541d2c60af8SMatthew Dillon 			rv = -1;
542d2c60af8SMatthew Dillon 			goto done2;
54323955314SAlfred Perlstein 		}
544df8bae1dSRodney W. Grimes 		addr = entry->start;
545df8bae1dSRodney W. Grimes 		size = entry->end - entry->start;
546df8bae1dSRodney W. Grimes 	}
547e6c6af11SDavid Greenman 
548df8bae1dSRodney W. Grimes 	/*
549df8bae1dSRodney W. Grimes 	 * Clean the pages and interpret the return value.
550df8bae1dSRodney W. Grimes 	 */
5516c534ad8SDavid Greenman 	rv = vm_map_clean(map, addr, addr + size, (flags & MS_ASYNC) == 0,
552e6c6af11SDavid Greenman 	    (flags & MS_INVALIDATE) != 0);
553e6c6af11SDavid Greenman 
554d2c60af8SMatthew Dillon done2:
555190609ddSJohn Baldwin 	mtx_unlock(&Giant);
5560cddd8f0SMatthew Dillon 
557df8bae1dSRodney W. Grimes 	switch (rv) {
558df8bae1dSRodney W. Grimes 	case KERN_SUCCESS:
559d2c60af8SMatthew Dillon 		return (0);
560df8bae1dSRodney W. Grimes 	case KERN_INVALID_ADDRESS:
561df8bae1dSRodney W. Grimes 		return (EINVAL);	/* Sun returns ENOMEM? */
562df8bae1dSRodney W. Grimes 	case KERN_FAILURE:
563df8bae1dSRodney W. Grimes 		return (EIO);
564df8bae1dSRodney W. Grimes 	default:
565df8bae1dSRodney W. Grimes 		return (EINVAL);
566df8bae1dSRodney W. Grimes 	}
567df8bae1dSRodney W. Grimes }
568df8bae1dSRodney W. Grimes 
569d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_
570df8bae1dSRodney W. Grimes struct munmap_args {
571651bb817SAlexander Langer 	void *addr;
5729154ee6aSPeter Wemm 	size_t len;
573df8bae1dSRodney W. Grimes };
574d2d3e875SBruce Evans #endif
575d2c60af8SMatthew Dillon /*
576d2c60af8SMatthew Dillon  * MPSAFE
577d2c60af8SMatthew Dillon  */
578df8bae1dSRodney W. Grimes int
579b40ce416SJulian Elischer munmap(td, uap)
580b40ce416SJulian Elischer 	struct thread *td;
58154d92145SMatthew Dillon 	struct munmap_args *uap;
582df8bae1dSRodney W. Grimes {
583df8bae1dSRodney W. Grimes 	vm_offset_t addr;
584dabee6feSPeter Wemm 	vm_size_t size, pageoff;
585df8bae1dSRodney W. Grimes 	vm_map_t map;
586df8bae1dSRodney W. Grimes 
587df8bae1dSRodney W. Grimes 	addr = (vm_offset_t) uap->addr;
5889154ee6aSPeter Wemm 	size = uap->len;
589dabee6feSPeter Wemm 
590dabee6feSPeter Wemm 	pageoff = (addr & PAGE_MASK);
591dabee6feSPeter Wemm 	addr -= pageoff;
592dabee6feSPeter Wemm 	size += pageoff;
593dabee6feSPeter Wemm 	size = (vm_size_t) round_page(size);
5949154ee6aSPeter Wemm 	if (addr + size < addr)
595df8bae1dSRodney W. Grimes 		return (EINVAL);
5969154ee6aSPeter Wemm 
597df8bae1dSRodney W. Grimes 	if (size == 0)
598df8bae1dSRodney W. Grimes 		return (0);
599dabee6feSPeter Wemm 
600df8bae1dSRodney W. Grimes 	/*
6010d94caffSDavid Greenman 	 * Check for illegal addresses.  Watch out for address wrap... Note
6020d94caffSDavid Greenman 	 * that VM_*_ADDRESS are not constants due to casts (argh).
603df8bae1dSRodney W. Grimes 	 */
604bbc0ec52SDavid Greenman 	if (VM_MAXUSER_ADDRESS > 0 && addr + size > VM_MAXUSER_ADDRESS)
605df8bae1dSRodney W. Grimes 		return (EINVAL);
60626f9a767SRodney W. Grimes #ifndef i386
607df8bae1dSRodney W. Grimes 	if (VM_MIN_ADDRESS > 0 && addr < VM_MIN_ADDRESS)
608df8bae1dSRodney W. Grimes 		return (EINVAL);
60926f9a767SRodney W. Grimes #endif
6100cddd8f0SMatthew Dillon 	mtx_lock(&Giant);
611b40ce416SJulian Elischer 	map = &td->td_proc->p_vmspace->vm_map;
612df8bae1dSRodney W. Grimes 	/*
613df8bae1dSRodney W. Grimes 	 * Make sure entire range is allocated.
614df8bae1dSRodney W. Grimes 	 */
61523955314SAlfred Perlstein 	if (!vm_map_check_protection(map, addr, addr + size, VM_PROT_NONE)) {
616e4ca250dSJohn Baldwin 		mtx_unlock(&Giant);
617df8bae1dSRodney W. Grimes 		return (EINVAL);
61823955314SAlfred Perlstein 	}
619df8bae1dSRodney W. Grimes 	/* returns nothing but KERN_SUCCESS anyway */
620df8bae1dSRodney W. Grimes 	(void) vm_map_remove(map, addr, addr + size);
621e4ca250dSJohn Baldwin 	mtx_unlock(&Giant);
622df8bae1dSRodney W. Grimes 	return (0);
623df8bae1dSRodney W. Grimes }
624df8bae1dSRodney W. Grimes 
625279d7226SMatthew Dillon #if 0
626df8bae1dSRodney W. Grimes void
627b40ce416SJulian Elischer munmapfd(td, fd)
628b40ce416SJulian Elischer 	struct thread *td;
629df8bae1dSRodney W. Grimes 	int fd;
630df8bae1dSRodney W. Grimes {
631df8bae1dSRodney W. Grimes 	/*
632c4ed5a07SDavid Greenman 	 * XXX should unmap any regions mapped to this file
633df8bae1dSRodney W. Grimes 	 */
634426da3bcSAlfred Perlstein 	FILEDESC_LOCK(p->p_fd);
635b40ce416SJulian Elischer 	td->td_proc->p_fd->fd_ofileflags[fd] &= ~UF_MAPPED;
636426da3bcSAlfred Perlstein 	FILEDESC_UNLOCK(p->p_fd);
637df8bae1dSRodney W. Grimes }
638279d7226SMatthew Dillon #endif
639df8bae1dSRodney W. Grimes 
640d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_
641df8bae1dSRodney W. Grimes struct mprotect_args {
642651bb817SAlexander Langer 	const void *addr;
6439154ee6aSPeter Wemm 	size_t len;
644df8bae1dSRodney W. Grimes 	int prot;
645df8bae1dSRodney W. Grimes };
646d2d3e875SBruce Evans #endif
647d2c60af8SMatthew Dillon /*
648d2c60af8SMatthew Dillon  * MPSAFE
649d2c60af8SMatthew Dillon  */
650df8bae1dSRodney W. Grimes int
651b40ce416SJulian Elischer mprotect(td, uap)
652b40ce416SJulian Elischer 	struct thread *td;
653df8bae1dSRodney W. Grimes 	struct mprotect_args *uap;
654df8bae1dSRodney W. Grimes {
655df8bae1dSRodney W. Grimes 	vm_offset_t addr;
656dabee6feSPeter Wemm 	vm_size_t size, pageoff;
65754d92145SMatthew Dillon 	vm_prot_t prot;
65823955314SAlfred Perlstein 	int ret;
659df8bae1dSRodney W. Grimes 
660df8bae1dSRodney W. Grimes 	addr = (vm_offset_t) uap->addr;
6619154ee6aSPeter Wemm 	size = uap->len;
662df8bae1dSRodney W. Grimes 	prot = uap->prot & VM_PROT_ALL;
663d0aea04fSJohn Dyson #if defined(VM_PROT_READ_IS_EXEC)
664d0aea04fSJohn Dyson 	if (prot & VM_PROT_READ)
665d0aea04fSJohn Dyson 		prot |= VM_PROT_EXECUTE;
666d0aea04fSJohn Dyson #endif
667df8bae1dSRodney W. Grimes 
668dabee6feSPeter Wemm 	pageoff = (addr & PAGE_MASK);
669dabee6feSPeter Wemm 	addr -= pageoff;
670dabee6feSPeter Wemm 	size += pageoff;
671dabee6feSPeter Wemm 	size = (vm_size_t) round_page(size);
6729154ee6aSPeter Wemm 	if (addr + size < addr)
673dabee6feSPeter Wemm 		return (EINVAL);
674dabee6feSPeter Wemm 
675e4ca250dSJohn Baldwin 	mtx_lock(&Giant);
676b40ce416SJulian Elischer 	ret = vm_map_protect(&td->td_proc->p_vmspace->vm_map, addr,
67723955314SAlfred Perlstein 		     addr + size, prot, FALSE);
678e4ca250dSJohn Baldwin 	mtx_unlock(&Giant);
67923955314SAlfred Perlstein 	switch (ret) {
680df8bae1dSRodney W. Grimes 	case KERN_SUCCESS:
681df8bae1dSRodney W. Grimes 		return (0);
682df8bae1dSRodney W. Grimes 	case KERN_PROTECTION_FAILURE:
683df8bae1dSRodney W. Grimes 		return (EACCES);
684df8bae1dSRodney W. Grimes 	}
685df8bae1dSRodney W. Grimes 	return (EINVAL);
686df8bae1dSRodney W. Grimes }
687df8bae1dSRodney W. Grimes 
688d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_
689dabee6feSPeter Wemm struct minherit_args {
690651bb817SAlexander Langer 	void *addr;
6919154ee6aSPeter Wemm 	size_t len;
692dabee6feSPeter Wemm 	int inherit;
693dabee6feSPeter Wemm };
694dabee6feSPeter Wemm #endif
695d2c60af8SMatthew Dillon /*
696d2c60af8SMatthew Dillon  * MPSAFE
697d2c60af8SMatthew Dillon  */
698dabee6feSPeter Wemm int
699b40ce416SJulian Elischer minherit(td, uap)
700b40ce416SJulian Elischer 	struct thread *td;
701dabee6feSPeter Wemm 	struct minherit_args *uap;
702dabee6feSPeter Wemm {
703dabee6feSPeter Wemm 	vm_offset_t addr;
704dabee6feSPeter Wemm 	vm_size_t size, pageoff;
70554d92145SMatthew Dillon 	vm_inherit_t inherit;
70623955314SAlfred Perlstein 	int ret;
707dabee6feSPeter Wemm 
708dabee6feSPeter Wemm 	addr = (vm_offset_t)uap->addr;
7099154ee6aSPeter Wemm 	size = uap->len;
710dabee6feSPeter Wemm 	inherit = uap->inherit;
711dabee6feSPeter Wemm 
712dabee6feSPeter Wemm 	pageoff = (addr & PAGE_MASK);
713dabee6feSPeter Wemm 	addr -= pageoff;
714dabee6feSPeter Wemm 	size += pageoff;
715dabee6feSPeter Wemm 	size = (vm_size_t) round_page(size);
7169154ee6aSPeter Wemm 	if (addr + size < addr)
717dabee6feSPeter Wemm 		return (EINVAL);
718dabee6feSPeter Wemm 
719190609ddSJohn Baldwin 	mtx_lock(&Giant);
720b40ce416SJulian Elischer 	ret = vm_map_inherit(&td->td_proc->p_vmspace->vm_map, addr, addr+size,
72123955314SAlfred Perlstein 		    inherit);
722190609ddSJohn Baldwin 	mtx_unlock(&Giant);
72323955314SAlfred Perlstein 
72423955314SAlfred Perlstein 	switch (ret) {
725dabee6feSPeter Wemm 	case KERN_SUCCESS:
726dabee6feSPeter Wemm 		return (0);
727dabee6feSPeter Wemm 	case KERN_PROTECTION_FAILURE:
728dabee6feSPeter Wemm 		return (EACCES);
729dabee6feSPeter Wemm 	}
730dabee6feSPeter Wemm 	return (EINVAL);
731dabee6feSPeter Wemm }
732dabee6feSPeter Wemm 
733dabee6feSPeter Wemm #ifndef _SYS_SYSPROTO_H_
734df8bae1dSRodney W. Grimes struct madvise_args {
735651bb817SAlexander Langer 	void *addr;
7369154ee6aSPeter Wemm 	size_t len;
737df8bae1dSRodney W. Grimes 	int behav;
738df8bae1dSRodney W. Grimes };
739d2d3e875SBruce Evans #endif
7400d94caffSDavid Greenman 
741d2c60af8SMatthew Dillon /*
742d2c60af8SMatthew Dillon  * MPSAFE
743d2c60af8SMatthew Dillon  */
744df8bae1dSRodney W. Grimes /* ARGSUSED */
745df8bae1dSRodney W. Grimes int
746b40ce416SJulian Elischer madvise(td, uap)
747b40ce416SJulian Elischer 	struct thread *td;
748df8bae1dSRodney W. Grimes 	struct madvise_args *uap;
749df8bae1dSRodney W. Grimes {
750f35329acSJohn Dyson 	vm_offset_t start, end;
75123955314SAlfred Perlstein 	int ret;
752b4309055SMatthew Dillon 
753b4309055SMatthew Dillon 	/*
754b4309055SMatthew Dillon 	 * Check for illegal behavior
755b4309055SMatthew Dillon 	 */
7569730a5daSPaul Saab 	if (uap->behav < 0 || uap->behav > MADV_CORE)
757b4309055SMatthew Dillon 		return (EINVAL);
758867a482dSJohn Dyson 	/*
759867a482dSJohn Dyson 	 * Check for illegal addresses.  Watch out for address wrap... Note
760867a482dSJohn Dyson 	 * that VM_*_ADDRESS are not constants due to casts (argh).
761867a482dSJohn Dyson 	 */
762867a482dSJohn Dyson 	if (VM_MAXUSER_ADDRESS > 0 &&
763867a482dSJohn Dyson 		((vm_offset_t) uap->addr + uap->len) > VM_MAXUSER_ADDRESS)
764867a482dSJohn Dyson 		return (EINVAL);
765867a482dSJohn Dyson #ifndef i386
766867a482dSJohn Dyson 	if (VM_MIN_ADDRESS > 0 && uap->addr < VM_MIN_ADDRESS)
767867a482dSJohn Dyson 		return (EINVAL);
768867a482dSJohn Dyson #endif
769867a482dSJohn Dyson 	if (((vm_offset_t) uap->addr + uap->len) < (vm_offset_t) uap->addr)
770867a482dSJohn Dyson 		return (EINVAL);
771867a482dSJohn Dyson 
772867a482dSJohn Dyson 	/*
773867a482dSJohn Dyson 	 * Since this routine is only advisory, we default to conservative
774867a482dSJohn Dyson 	 * behavior.
775867a482dSJohn Dyson 	 */
776cd6eea25SDavid Greenman 	start = trunc_page((vm_offset_t) uap->addr);
777cd6eea25SDavid Greenman 	end = round_page((vm_offset_t) uap->addr + uap->len);
778867a482dSJohn Dyson 
779190609ddSJohn Baldwin 	mtx_lock(&Giant);
780b40ce416SJulian Elischer 	ret = vm_map_madvise(&td->td_proc->p_vmspace->vm_map, start, end, uap->behav);
781190609ddSJohn Baldwin 	mtx_unlock(&Giant);
78223955314SAlfred Perlstein 	return (ret ? EINVAL : 0);
783df8bae1dSRodney W. Grimes }
784df8bae1dSRodney W. Grimes 
785d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_
786df8bae1dSRodney W. Grimes struct mincore_args {
787651bb817SAlexander Langer 	const void *addr;
7889154ee6aSPeter Wemm 	size_t len;
789df8bae1dSRodney W. Grimes 	char *vec;
790df8bae1dSRodney W. Grimes };
791d2d3e875SBruce Evans #endif
7920d94caffSDavid Greenman 
793d2c60af8SMatthew Dillon /*
794d2c60af8SMatthew Dillon  * MPSAFE
795d2c60af8SMatthew Dillon  */
796df8bae1dSRodney W. Grimes /* ARGSUSED */
797df8bae1dSRodney W. Grimes int
798b40ce416SJulian Elischer mincore(td, uap)
799b40ce416SJulian Elischer 	struct thread *td;
800df8bae1dSRodney W. Grimes 	struct mincore_args *uap;
801df8bae1dSRodney W. Grimes {
802867a482dSJohn Dyson 	vm_offset_t addr, first_addr;
803867a482dSJohn Dyson 	vm_offset_t end, cend;
804867a482dSJohn Dyson 	pmap_t pmap;
805867a482dSJohn Dyson 	vm_map_t map;
80602c04a2fSJohn Dyson 	char *vec;
807d2c60af8SMatthew Dillon 	int error = 0;
808867a482dSJohn Dyson 	int vecindex, lastvecindex;
80954d92145SMatthew Dillon 	vm_map_entry_t current;
810867a482dSJohn Dyson 	vm_map_entry_t entry;
811867a482dSJohn Dyson 	int mincoreinfo;
812dd2622a8SAlan Cox 	unsigned int timestamp;
813df8bae1dSRodney W. Grimes 
814867a482dSJohn Dyson 	/*
815867a482dSJohn Dyson 	 * Make sure that the addresses presented are valid for user
816867a482dSJohn Dyson 	 * mode.
817867a482dSJohn Dyson 	 */
818867a482dSJohn Dyson 	first_addr = addr = trunc_page((vm_offset_t) uap->addr);
8199154ee6aSPeter Wemm 	end = addr + (vm_size_t)round_page(uap->len);
82002c04a2fSJohn Dyson 	if (VM_MAXUSER_ADDRESS > 0 && end > VM_MAXUSER_ADDRESS)
82102c04a2fSJohn Dyson 		return (EINVAL);
82202c04a2fSJohn Dyson 	if (end < addr)
82302c04a2fSJohn Dyson 		return (EINVAL);
82402c04a2fSJohn Dyson 
825867a482dSJohn Dyson 	/*
826867a482dSJohn Dyson 	 * Address of byte vector
827867a482dSJohn Dyson 	 */
82802c04a2fSJohn Dyson 	vec = uap->vec;
829867a482dSJohn Dyson 
830190609ddSJohn Baldwin 	mtx_lock(&Giant);
831b40ce416SJulian Elischer 	map = &td->td_proc->p_vmspace->vm_map;
832b40ce416SJulian Elischer 	pmap = vmspace_pmap(td->td_proc->p_vmspace);
833867a482dSJohn Dyson 
834eff50fcdSAlan Cox 	vm_map_lock_read(map);
835dd2622a8SAlan Cox RestartScan:
836dd2622a8SAlan Cox 	timestamp = map->timestamp;
837867a482dSJohn Dyson 
838867a482dSJohn Dyson 	if (!vm_map_lookup_entry(map, addr, &entry))
839867a482dSJohn Dyson 		entry = entry->next;
840867a482dSJohn Dyson 
841867a482dSJohn Dyson 	/*
842867a482dSJohn Dyson 	 * Do this on a map entry basis so that if the pages are not
843867a482dSJohn Dyson 	 * in the current processes address space, we can easily look
844867a482dSJohn Dyson 	 * up the pages elsewhere.
845867a482dSJohn Dyson 	 */
846867a482dSJohn Dyson 	lastvecindex = -1;
847867a482dSJohn Dyson 	for (current = entry;
848867a482dSJohn Dyson 	    (current != &map->header) && (current->start < end);
849867a482dSJohn Dyson 	    current = current->next) {
850867a482dSJohn Dyson 
851867a482dSJohn Dyson 		/*
852867a482dSJohn Dyson 		 * ignore submaps (for now) or null objects
853867a482dSJohn Dyson 		 */
8549fdfe602SMatthew Dillon 		if ((current->eflags & MAP_ENTRY_IS_SUB_MAP) ||
855867a482dSJohn Dyson 			current->object.vm_object == NULL)
856867a482dSJohn Dyson 			continue;
857867a482dSJohn Dyson 
858867a482dSJohn Dyson 		/*
859867a482dSJohn Dyson 		 * limit this scan to the current map entry and the
860867a482dSJohn Dyson 		 * limits for the mincore call
861867a482dSJohn Dyson 		 */
862867a482dSJohn Dyson 		if (addr < current->start)
863867a482dSJohn Dyson 			addr = current->start;
864867a482dSJohn Dyson 		cend = current->end;
865867a482dSJohn Dyson 		if (cend > end)
866867a482dSJohn Dyson 			cend = end;
867867a482dSJohn Dyson 
868867a482dSJohn Dyson 		/*
869867a482dSJohn Dyson 		 * scan this entry one page at a time
870867a482dSJohn Dyson 		 */
871867a482dSJohn Dyson 		while (addr < cend) {
872867a482dSJohn Dyson 			/*
873867a482dSJohn Dyson 			 * Check pmap first, it is likely faster, also
874867a482dSJohn Dyson 			 * it can provide info as to whether we are the
875867a482dSJohn Dyson 			 * one referencing or modifying the page.
876867a482dSJohn Dyson 			 */
877867a482dSJohn Dyson 			mincoreinfo = pmap_mincore(pmap, addr);
878867a482dSJohn Dyson 			if (!mincoreinfo) {
879867a482dSJohn Dyson 				vm_pindex_t pindex;
880867a482dSJohn Dyson 				vm_ooffset_t offset;
881867a482dSJohn Dyson 				vm_page_t m;
882867a482dSJohn Dyson 				/*
883867a482dSJohn Dyson 				 * calculate the page index into the object
884867a482dSJohn Dyson 				 */
885867a482dSJohn Dyson 				offset = current->offset + (addr - current->start);
886867a482dSJohn Dyson 				pindex = OFF_TO_IDX(offset);
887867a482dSJohn Dyson 				m = vm_page_lookup(current->object.vm_object,
888867a482dSJohn Dyson 					pindex);
889867a482dSJohn Dyson 				/*
890867a482dSJohn Dyson 				 * if the page is resident, then gather information about
891867a482dSJohn Dyson 				 * it.
892867a482dSJohn Dyson 				 */
893867a482dSJohn Dyson 				if (m) {
894867a482dSJohn Dyson 					mincoreinfo = MINCORE_INCORE;
895867a482dSJohn Dyson 					if (m->dirty ||
8960385347cSPeter Wemm 						pmap_is_modified(m))
897867a482dSJohn Dyson 						mincoreinfo |= MINCORE_MODIFIED_OTHER;
898867a482dSJohn Dyson 					if ((m->flags & PG_REFERENCED) ||
8990385347cSPeter Wemm 						pmap_ts_referenced(m)) {
900e69763a3SDoug Rabson 						vm_page_flag_set(m, PG_REFERENCED);
901867a482dSJohn Dyson 						mincoreinfo |= MINCORE_REFERENCED_OTHER;
90202c04a2fSJohn Dyson 					}
903867a482dSJohn Dyson 				}
9049b5a5d81SJohn Dyson 			}
905867a482dSJohn Dyson 
906867a482dSJohn Dyson 			/*
907dd2622a8SAlan Cox 			 * subyte may page fault.  In case it needs to modify
908dd2622a8SAlan Cox 			 * the map, we release the lock.
909dd2622a8SAlan Cox 			 */
910dd2622a8SAlan Cox 			vm_map_unlock_read(map);
911dd2622a8SAlan Cox 
912dd2622a8SAlan Cox 			/*
913867a482dSJohn Dyson 			 * calculate index into user supplied byte vector
914867a482dSJohn Dyson 			 */
915867a482dSJohn Dyson 			vecindex = OFF_TO_IDX(addr - first_addr);
916867a482dSJohn Dyson 
917867a482dSJohn Dyson 			/*
918867a482dSJohn Dyson 			 * If we have skipped map entries, we need to make sure that
919867a482dSJohn Dyson 			 * the byte vector is zeroed for those skipped entries.
920867a482dSJohn Dyson 			 */
921867a482dSJohn Dyson 			while ((lastvecindex + 1) < vecindex) {
922867a482dSJohn Dyson 				error = subyte(vec + lastvecindex, 0);
923867a482dSJohn Dyson 				if (error) {
924d2c60af8SMatthew Dillon 					error = EFAULT;
925d2c60af8SMatthew Dillon 					goto done2;
926867a482dSJohn Dyson 				}
927867a482dSJohn Dyson 				++lastvecindex;
928867a482dSJohn Dyson 			}
929867a482dSJohn Dyson 
930867a482dSJohn Dyson 			/*
931867a482dSJohn Dyson 			 * Pass the page information to the user
932867a482dSJohn Dyson 			 */
933867a482dSJohn Dyson 			error = subyte(vec + vecindex, mincoreinfo);
934867a482dSJohn Dyson 			if (error) {
935d2c60af8SMatthew Dillon 				error = EFAULT;
936d2c60af8SMatthew Dillon 				goto done2;
937867a482dSJohn Dyson 			}
938dd2622a8SAlan Cox 
939dd2622a8SAlan Cox 			/*
940dd2622a8SAlan Cox 			 * If the map has changed, due to the subyte, the previous
941dd2622a8SAlan Cox 			 * output may be invalid.
942dd2622a8SAlan Cox 			 */
943dd2622a8SAlan Cox 			vm_map_lock_read(map);
944dd2622a8SAlan Cox 			if (timestamp != map->timestamp)
945dd2622a8SAlan Cox 				goto RestartScan;
946dd2622a8SAlan Cox 
947867a482dSJohn Dyson 			lastvecindex = vecindex;
94802c04a2fSJohn Dyson 			addr += PAGE_SIZE;
94902c04a2fSJohn Dyson 		}
950867a482dSJohn Dyson 	}
951867a482dSJohn Dyson 
952867a482dSJohn Dyson 	/*
953dd2622a8SAlan Cox 	 * subyte may page fault.  In case it needs to modify
954dd2622a8SAlan Cox 	 * the map, we release the lock.
955dd2622a8SAlan Cox 	 */
956dd2622a8SAlan Cox 	vm_map_unlock_read(map);
957dd2622a8SAlan Cox 
958dd2622a8SAlan Cox 	/*
959867a482dSJohn Dyson 	 * Zero the last entries in the byte vector.
960867a482dSJohn Dyson 	 */
961867a482dSJohn Dyson 	vecindex = OFF_TO_IDX(end - first_addr);
962867a482dSJohn Dyson 	while ((lastvecindex + 1) < vecindex) {
963867a482dSJohn Dyson 		error = subyte(vec + lastvecindex, 0);
964867a482dSJohn Dyson 		if (error) {
965d2c60af8SMatthew Dillon 			error = EFAULT;
966d2c60af8SMatthew Dillon 			goto done2;
967867a482dSJohn Dyson 		}
968867a482dSJohn Dyson 		++lastvecindex;
969867a482dSJohn Dyson 	}
970867a482dSJohn Dyson 
971dd2622a8SAlan Cox 	/*
972dd2622a8SAlan Cox 	 * If the map has changed, due to the subyte, the previous
973dd2622a8SAlan Cox 	 * output may be invalid.
974dd2622a8SAlan Cox 	 */
975dd2622a8SAlan Cox 	vm_map_lock_read(map);
976dd2622a8SAlan Cox 	if (timestamp != map->timestamp)
977dd2622a8SAlan Cox 		goto RestartScan;
978eff50fcdSAlan Cox 	vm_map_unlock_read(map);
979d2c60af8SMatthew Dillon done2:
980190609ddSJohn Baldwin 	mtx_unlock(&Giant);
981d2c60af8SMatthew Dillon 	return (error);
982df8bae1dSRodney W. Grimes }
983df8bae1dSRodney W. Grimes 
984d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_
985df8bae1dSRodney W. Grimes struct mlock_args {
986651bb817SAlexander Langer 	const void *addr;
987df8bae1dSRodney W. Grimes 	size_t len;
988df8bae1dSRodney W. Grimes };
989d2d3e875SBruce Evans #endif
990d2c60af8SMatthew Dillon /*
991d2c60af8SMatthew Dillon  * MPSAFE
992d2c60af8SMatthew Dillon  */
993df8bae1dSRodney W. Grimes int
994b40ce416SJulian Elischer mlock(td, uap)
995b40ce416SJulian Elischer 	struct thread *td;
996df8bae1dSRodney W. Grimes 	struct mlock_args *uap;
997df8bae1dSRodney W. Grimes {
998df8bae1dSRodney W. Grimes 	vm_offset_t addr;
999dabee6feSPeter Wemm 	vm_size_t size, pageoff;
1000df8bae1dSRodney W. Grimes 	int error;
1001df8bae1dSRodney W. Grimes 
1002df8bae1dSRodney W. Grimes 	addr = (vm_offset_t) uap->addr;
10039154ee6aSPeter Wemm 	size = uap->len;
10049154ee6aSPeter Wemm 
1005dabee6feSPeter Wemm 	pageoff = (addr & PAGE_MASK);
1006dabee6feSPeter Wemm 	addr -= pageoff;
1007dabee6feSPeter Wemm 	size += pageoff;
1008dabee6feSPeter Wemm 	size = (vm_size_t) round_page(size);
1009dabee6feSPeter Wemm 
1010dabee6feSPeter Wemm 	/* disable wrap around */
10119154ee6aSPeter Wemm 	if (addr + size < addr)
1012df8bae1dSRodney W. Grimes 		return (EINVAL);
1013dabee6feSPeter Wemm 
1014df8bae1dSRodney W. Grimes 	if (atop(size) + cnt.v_wire_count > vm_page_max_wired)
1015df8bae1dSRodney W. Grimes 		return (EAGAIN);
10169154ee6aSPeter Wemm 
1017df8bae1dSRodney W. Grimes #ifdef pmap_wired_count
1018b40ce416SJulian Elischer 	if (size + ptoa(pmap_wired_count(vm_map_pmap(&td->td_proc->p_vmspace->vm_map))) >
1019b40ce416SJulian Elischer 	    td->td_proc->p_rlimit[RLIMIT_MEMLOCK].rlim_cur)
10204a40e3d4SJohn Dyson 		return (ENOMEM);
1021df8bae1dSRodney W. Grimes #else
1022b40ce416SJulian Elischer 	error = suser_td(td);
102305f0fdd2SPoul-Henning Kamp 	if (error)
1024df8bae1dSRodney W. Grimes 		return (error);
1025df8bae1dSRodney W. Grimes #endif
1026df8bae1dSRodney W. Grimes 
1027190609ddSJohn Baldwin 	mtx_lock(&Giant);
1028b40ce416SJulian Elischer 	error = vm_map_user_pageable(&td->td_proc->p_vmspace->vm_map, addr,
102923955314SAlfred Perlstein 		     addr + size, FALSE);
1030190609ddSJohn Baldwin 	mtx_unlock(&Giant);
1031df8bae1dSRodney W. Grimes 	return (error == KERN_SUCCESS ? 0 : ENOMEM);
1032df8bae1dSRodney W. Grimes }
1033df8bae1dSRodney W. Grimes 
1034d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_
10354a40e3d4SJohn Dyson struct mlockall_args {
10364a40e3d4SJohn Dyson 	int	how;
10374a40e3d4SJohn Dyson };
10384a40e3d4SJohn Dyson #endif
10394a40e3d4SJohn Dyson 
1040d2c60af8SMatthew Dillon /*
1041d2c60af8SMatthew Dillon  * MPSAFE
1042d2c60af8SMatthew Dillon  */
10434a40e3d4SJohn Dyson int
1044b40ce416SJulian Elischer mlockall(td, uap)
1045b40ce416SJulian Elischer 	struct thread *td;
10464a40e3d4SJohn Dyson 	struct mlockall_args *uap;
10474a40e3d4SJohn Dyson {
10480cddd8f0SMatthew Dillon 	/* mtx_lock(&Giant); */
10490cddd8f0SMatthew Dillon 	/* mtx_unlock(&Giant); */
10504a40e3d4SJohn Dyson 	return 0;
10514a40e3d4SJohn Dyson }
10524a40e3d4SJohn Dyson 
10534a40e3d4SJohn Dyson #ifndef _SYS_SYSPROTO_H_
10544a40e3d4SJohn Dyson struct mlockall_args {
10554a40e3d4SJohn Dyson 	int	how;
10564a40e3d4SJohn Dyson };
10574a40e3d4SJohn Dyson #endif
10584a40e3d4SJohn Dyson 
1059d2c60af8SMatthew Dillon /*
1060d2c60af8SMatthew Dillon  * MPSAFE
1061d2c60af8SMatthew Dillon  */
10624a40e3d4SJohn Dyson int
1063b40ce416SJulian Elischer munlockall(td, uap)
1064b40ce416SJulian Elischer 	struct thread *td;
10654a40e3d4SJohn Dyson 	struct munlockall_args *uap;
10664a40e3d4SJohn Dyson {
10670cddd8f0SMatthew Dillon 	/* mtx_lock(&Giant); */
10680cddd8f0SMatthew Dillon 	/* mtx_unlock(&Giant); */
10694a40e3d4SJohn Dyson 	return 0;
10704a40e3d4SJohn Dyson }
10714a40e3d4SJohn Dyson 
10724a40e3d4SJohn Dyson #ifndef _SYS_SYSPROTO_H_
1073df8bae1dSRodney W. Grimes struct munlock_args {
1074651bb817SAlexander Langer 	const void *addr;
1075df8bae1dSRodney W. Grimes 	size_t len;
1076df8bae1dSRodney W. Grimes };
1077d2d3e875SBruce Evans #endif
1078d2c60af8SMatthew Dillon /*
1079d2c60af8SMatthew Dillon  * MPSAFE
1080d2c60af8SMatthew Dillon  */
1081df8bae1dSRodney W. Grimes int
1082b40ce416SJulian Elischer munlock(td, uap)
1083b40ce416SJulian Elischer 	struct thread *td;
1084df8bae1dSRodney W. Grimes 	struct munlock_args *uap;
1085df8bae1dSRodney W. Grimes {
1086df8bae1dSRodney W. Grimes 	vm_offset_t addr;
1087dabee6feSPeter Wemm 	vm_size_t size, pageoff;
1088df8bae1dSRodney W. Grimes 	int error;
1089df8bae1dSRodney W. Grimes 
1090df8bae1dSRodney W. Grimes 	addr = (vm_offset_t) uap->addr;
10919154ee6aSPeter Wemm 	size = uap->len;
10929154ee6aSPeter Wemm 
1093dabee6feSPeter Wemm 	pageoff = (addr & PAGE_MASK);
1094dabee6feSPeter Wemm 	addr -= pageoff;
1095dabee6feSPeter Wemm 	size += pageoff;
1096dabee6feSPeter Wemm 	size = (vm_size_t) round_page(size);
1097dabee6feSPeter Wemm 
1098dabee6feSPeter Wemm 	/* disable wrap around */
10999154ee6aSPeter Wemm 	if (addr + size < addr)
1100df8bae1dSRodney W. Grimes 		return (EINVAL);
1101dabee6feSPeter Wemm 
1102df8bae1dSRodney W. Grimes #ifndef pmap_wired_count
1103b40ce416SJulian Elischer 	error = suser_td(td);
110405f0fdd2SPoul-Henning Kamp 	if (error)
1105df8bae1dSRodney W. Grimes 		return (error);
1106df8bae1dSRodney W. Grimes #endif
1107df8bae1dSRodney W. Grimes 
1108190609ddSJohn Baldwin 	mtx_lock(&Giant);
1109b40ce416SJulian Elischer 	error = vm_map_user_pageable(&td->td_proc->p_vmspace->vm_map, addr,
111023955314SAlfred Perlstein 		     addr + size, TRUE);
1111190609ddSJohn Baldwin 	mtx_unlock(&Giant);
1112df8bae1dSRodney W. Grimes 	return (error == KERN_SUCCESS ? 0 : ENOMEM);
1113df8bae1dSRodney W. Grimes }
1114df8bae1dSRodney W. Grimes 
1115df8bae1dSRodney W. Grimes /*
1116d2c60af8SMatthew Dillon  * vm_mmap()
1117d2c60af8SMatthew Dillon  *
1118d2c60af8SMatthew Dillon  * MPSAFE
1119d2c60af8SMatthew Dillon  *
1120d2c60af8SMatthew Dillon  * Internal version of mmap.  Currently used by mmap, exec, and sys5
1121d2c60af8SMatthew Dillon  * shared memory.  Handle is either a vnode pointer or NULL for MAP_ANON.
1122df8bae1dSRodney W. Grimes  */
1123df8bae1dSRodney W. Grimes int
1124b9dcd593SBruce Evans vm_mmap(vm_map_t map, vm_offset_t *addr, vm_size_t size, vm_prot_t prot,
1125b9dcd593SBruce Evans 	vm_prot_t maxprot, int flags,
1126651bb817SAlexander Langer 	void *handle,
1127b9dcd593SBruce Evans 	vm_ooffset_t foff)
1128df8bae1dSRodney W. Grimes {
1129df8bae1dSRodney W. Grimes 	boolean_t fitit;
1130fcae040bSJohn Dyson 	vm_object_t object;
1131df8bae1dSRodney W. Grimes 	struct vnode *vp = NULL;
113224a1cce3SDavid Greenman 	objtype_t type;
1133df8bae1dSRodney W. Grimes 	int rv = KERN_SUCCESS;
1134bd7e5f99SJohn Dyson 	vm_ooffset_t objsize;
1135bd7e5f99SJohn Dyson 	int docow;
1136b40ce416SJulian Elischer 	struct thread *td = curthread;
1137df8bae1dSRodney W. Grimes 
1138df8bae1dSRodney W. Grimes 	if (size == 0)
1139df8bae1dSRodney W. Grimes 		return (0);
1140df8bae1dSRodney W. Grimes 
114106cb7259SDavid Greenman 	objsize = size = round_page(size);
1142df8bae1dSRodney W. Grimes 
1143df8bae1dSRodney W. Grimes 	/*
1144bc9ad247SDavid Greenman 	 * We currently can only deal with page aligned file offsets.
1145bc9ad247SDavid Greenman 	 * The check is here rather than in the syscall because the
1146bc9ad247SDavid Greenman 	 * kernel calls this function internally for other mmaping
1147bc9ad247SDavid Greenman 	 * operations (such as in exec) and non-aligned offsets will
1148bc9ad247SDavid Greenman 	 * cause pmap inconsistencies...so we want to be sure to
1149bc9ad247SDavid Greenman 	 * disallow this in all cases.
1150bc9ad247SDavid Greenman 	 */
1151bc9ad247SDavid Greenman 	if (foff & PAGE_MASK)
1152bc9ad247SDavid Greenman 		return (EINVAL);
1153bc9ad247SDavid Greenman 
115406cb7259SDavid Greenman 	if ((flags & MAP_FIXED) == 0) {
115506cb7259SDavid Greenman 		fitit = TRUE;
115606cb7259SDavid Greenman 		*addr = round_page(*addr);
1157e4ca250dSJohn Baldwin 		mtx_lock(&Giant);
115806cb7259SDavid Greenman 	} else {
115906cb7259SDavid Greenman 		if (*addr != trunc_page(*addr))
116006cb7259SDavid Greenman 			return (EINVAL);
116106cb7259SDavid Greenman 		fitit = FALSE;
1162e4ca250dSJohn Baldwin 		mtx_lock(&Giant);
116306cb7259SDavid Greenman 		(void) vm_map_remove(map, *addr, *addr + size);
116406cb7259SDavid Greenman 	}
116506cb7259SDavid Greenman 
1166bc9ad247SDavid Greenman 	/*
116724a1cce3SDavid Greenman 	 * Lookup/allocate object.
1168df8bae1dSRodney W. Grimes 	 */
11695f55e841SDavid Greenman 	if (flags & MAP_ANON) {
1170851c12ffSJohn Dyson 		type = OBJT_DEFAULT;
11715f55e841SDavid Greenman 		/*
11725f55e841SDavid Greenman 		 * Unnamed anonymous regions always start at 0.
11735f55e841SDavid Greenman 		 */
117467bf6868SJohn Dyson 		if (handle == 0)
11755f55e841SDavid Greenman 			foff = 0;
11765f55e841SDavid Greenman 	} else {
1177df8bae1dSRodney W. Grimes 		vp = (struct vnode *) handle;
1178df8bae1dSRodney W. Grimes 		if (vp->v_type == VCHR) {
117924a1cce3SDavid Greenman 			type = OBJT_DEVICE;
1180a23d65bfSBruce Evans 			handle = (void *)(intptr_t)vp->v_rdev;
118106cb7259SDavid Greenman 		} else {
118206cb7259SDavid Greenman 			struct vattr vat;
118306cb7259SDavid Greenman 			int error;
118406cb7259SDavid Greenman 
1185a854ed98SJohn Baldwin 			error = VOP_GETATTR(vp, &vat, td->td_ucred, td);
1186e4ca250dSJohn Baldwin 			if (error) {
118723955314SAlfred Perlstein 				mtx_unlock(&Giant);
118806cb7259SDavid Greenman 				return (error);
1189e4ca250dSJohn Baldwin 			}
1190bd7e5f99SJohn Dyson 			objsize = round_page(vat.va_size);
119124a1cce3SDavid Greenman 			type = OBJT_VNODE;
119200d76afeSGuido van Rooij 			/*
119300d76afeSGuido van Rooij 			 * if it is a regular file without any references
119400d76afeSGuido van Rooij 			 * we do not need to sync it.
119500d76afeSGuido van Rooij 			 */
119600d76afeSGuido van Rooij 			if (vp->v_type == VREG && vat.va_nlink == 0) {
119700d76afeSGuido van Rooij 				flags |= MAP_NOSYNC;
119800d76afeSGuido van Rooij 			}
1199df8bae1dSRodney W. Grimes 		}
120006cb7259SDavid Greenman 	}
120194328e90SJohn Dyson 
120294328e90SJohn Dyson 	if (handle == NULL) {
120394328e90SJohn Dyson 		object = NULL;
12044738fa09SAlan Cox 		docow = 0;
120594328e90SJohn Dyson 	} else {
12060a0a85b3SJohn Dyson 		object = vm_pager_allocate(type,
12076cde7a16SDavid Greenman 			handle, objsize, prot, foff);
1208e4ca250dSJohn Baldwin 		if (object == NULL) {
1209e4ca250dSJohn Baldwin 			mtx_unlock(&Giant);
121024a1cce3SDavid Greenman 			return (type == OBJT_DEVICE ? EINVAL : ENOMEM);
1211e4ca250dSJohn Baldwin 		}
12124738fa09SAlan Cox 		docow = MAP_PREFAULT_PARTIAL;
121394328e90SJohn Dyson 	}
1214df8bae1dSRodney W. Grimes 
12155850152dSJohn Dyson 	/*
12168f2ec877SDavid Greenman 	 * Force device mappings to be shared.
12175850152dSJohn Dyson 	 */
121824964514SPeter Wemm 	if (type == OBJT_DEVICE || type == OBJT_PHYS) {
12198f2ec877SDavid Greenman 		flags &= ~(MAP_PRIVATE|MAP_COPY);
12205850152dSJohn Dyson 		flags |= MAP_SHARED;
12218f2ec877SDavid Greenman 	}
12225850152dSJohn Dyson 
12234f79d873SMatthew Dillon 	if ((flags & (MAP_ANON|MAP_SHARED)) == 0)
12244738fa09SAlan Cox 		docow |= MAP_COPY_ON_WRITE;
12254f79d873SMatthew Dillon 	if (flags & MAP_NOSYNC)
12264f79d873SMatthew Dillon 		docow |= MAP_DISABLE_SYNCER;
12279730a5daSPaul Saab 	if (flags & MAP_NOCORE)
12289730a5daSPaul Saab 		docow |= MAP_DISABLE_COREDUMP;
12295850152dSJohn Dyson 
1230d0aea04fSJohn Dyson #if defined(VM_PROT_READ_IS_EXEC)
1231d0aea04fSJohn Dyson 	if (prot & VM_PROT_READ)
1232d0aea04fSJohn Dyson 		prot |= VM_PROT_EXECUTE;
1233d0aea04fSJohn Dyson 
1234d0aea04fSJohn Dyson 	if (maxprot & VM_PROT_READ)
1235d0aea04fSJohn Dyson 		maxprot |= VM_PROT_EXECUTE;
1236d0aea04fSJohn Dyson #endif
1237d0aea04fSJohn Dyson 
1238e4ca250dSJohn Baldwin 	if (fitit)
12390a0a85b3SJohn Dyson 		*addr = pmap_addr_hint(object, *addr, size);
12400a0a85b3SJohn Dyson 
12412267af78SJulian Elischer 	if (flags & MAP_STACK)
12422267af78SJulian Elischer 		rv = vm_map_stack (map, *addr, size, prot,
12432267af78SJulian Elischer 				   maxprot, docow);
12442267af78SJulian Elischer 	else
1245bd7e5f99SJohn Dyson 		rv = vm_map_find(map, object, foff, addr, size, fitit,
1246bd7e5f99SJohn Dyson 				 prot, maxprot, docow);
1247bd7e5f99SJohn Dyson 
1248d2c60af8SMatthew Dillon 	if (rv != KERN_SUCCESS) {
12497fb0c17eSDavid Greenman 		/*
125024a1cce3SDavid Greenman 		 * Lose the object reference. Will destroy the
125124a1cce3SDavid Greenman 		 * object if it's an unnamed anonymous mapping
125224a1cce3SDavid Greenman 		 * or named anonymous without other references.
12537fb0c17eSDavid Greenman 		 */
1254df8bae1dSRodney W. Grimes 		vm_object_deallocate(object);
1255d2c60af8SMatthew Dillon 	} else if (flags & MAP_SHARED) {
1256df8bae1dSRodney W. Grimes 		/*
1257df8bae1dSRodney W. Grimes 		 * Shared memory is also shared with children.
1258df8bae1dSRodney W. Grimes 		 */
1259df8bae1dSRodney W. Grimes 		rv = vm_map_inherit(map, *addr, *addr + size, VM_INHERIT_SHARE);
1260e4ca250dSJohn Baldwin 		if (rv != KERN_SUCCESS)
12617fb0c17eSDavid Greenman 			(void) vm_map_remove(map, *addr, *addr + size);
1262df8bae1dSRodney W. Grimes 	}
1263e4ca250dSJohn Baldwin 	mtx_unlock(&Giant);
1264df8bae1dSRodney W. Grimes 	switch (rv) {
1265df8bae1dSRodney W. Grimes 	case KERN_SUCCESS:
1266df8bae1dSRodney W. Grimes 		return (0);
1267df8bae1dSRodney W. Grimes 	case KERN_INVALID_ADDRESS:
1268df8bae1dSRodney W. Grimes 	case KERN_NO_SPACE:
1269df8bae1dSRodney W. Grimes 		return (ENOMEM);
1270df8bae1dSRodney W. Grimes 	case KERN_PROTECTION_FAILURE:
1271df8bae1dSRodney W. Grimes 		return (EACCES);
1272df8bae1dSRodney W. Grimes 	default:
1273df8bae1dSRodney W. Grimes 		return (EINVAL);
1274df8bae1dSRodney W. Grimes 	}
1275df8bae1dSRodney W. Grimes }
1276