xref: /freebsd/sys/vm/vm_mmap.c (revision 19bd0d9c85ccd7bfe34a26d8b386112c6746d795)
160727d8bSWarner Losh /*-
2df8bae1dSRodney W. Grimes  * Copyright (c) 1988 University of Utah.
3df8bae1dSRodney W. Grimes  * Copyright (c) 1991, 1993
4df8bae1dSRodney W. Grimes  *	The Regents of the University of California.  All rights reserved.
5df8bae1dSRodney W. Grimes  *
6df8bae1dSRodney W. Grimes  * This code is derived from software contributed to Berkeley by
7df8bae1dSRodney W. Grimes  * the Systems Programming Group of the University of Utah Computer
8df8bae1dSRodney W. Grimes  * Science Department.
9df8bae1dSRodney W. Grimes  *
10df8bae1dSRodney W. Grimes  * Redistribution and use in source and binary forms, with or without
11df8bae1dSRodney W. Grimes  * modification, are permitted provided that the following conditions
12df8bae1dSRodney W. Grimes  * are met:
13df8bae1dSRodney W. Grimes  * 1. Redistributions of source code must retain the above copyright
14df8bae1dSRodney W. Grimes  *    notice, this list of conditions and the following disclaimer.
15df8bae1dSRodney W. Grimes  * 2. Redistributions in binary form must reproduce the above copyright
16df8bae1dSRodney W. Grimes  *    notice, this list of conditions and the following disclaimer in the
17df8bae1dSRodney W. Grimes  *    documentation and/or other materials provided with the distribution.
18fbbd9655SWarner Losh  * 3. Neither the name of the University nor the names of its contributors
19df8bae1dSRodney W. Grimes  *    may be used to endorse or promote products derived from this software
20df8bae1dSRodney W. Grimes  *    without specific prior written permission.
21df8bae1dSRodney W. Grimes  *
22df8bae1dSRodney W. Grimes  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23df8bae1dSRodney W. Grimes  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24df8bae1dSRodney W. Grimes  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25df8bae1dSRodney W. Grimes  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26df8bae1dSRodney W. Grimes  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27df8bae1dSRodney W. Grimes  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28df8bae1dSRodney W. Grimes  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29df8bae1dSRodney W. Grimes  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30df8bae1dSRodney W. Grimes  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31df8bae1dSRodney W. Grimes  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32df8bae1dSRodney W. Grimes  * SUCH DAMAGE.
33df8bae1dSRodney W. Grimes  *
34df8bae1dSRodney W. Grimes  * from: Utah $Hdr: vm_mmap.c 1.6 91/10/21$
35df8bae1dSRodney W. Grimes  *
36df8bae1dSRodney W. Grimes  *	@(#)vm_mmap.c	8.4 (Berkeley) 1/12/94
37df8bae1dSRodney W. Grimes  */
38df8bae1dSRodney W. Grimes 
39df8bae1dSRodney W. Grimes /*
40df8bae1dSRodney W. Grimes  * Mapped file (mmap) interface to VM
41df8bae1dSRodney W. Grimes  */
42df8bae1dSRodney W. Grimes 
43874651b1SDavid E. O'Brien #include <sys/cdefs.h>
44874651b1SDavid E. O'Brien __FBSDID("$FreeBSD$");
45874651b1SDavid E. O'Brien 
465591b823SEivind Eklund #include "opt_compat.h"
4749874f6eSJoseph Koshy #include "opt_hwpmc_hooks.h"
483d653db0SAlan Cox #include "opt_vm.h"
49e9822d92SJoerg Wunsch 
50df8bae1dSRodney W. Grimes #include <sys/param.h>
51df8bae1dSRodney W. Grimes #include <sys/systm.h>
524a144410SRobert Watson #include <sys/capsicum.h>
53a9d2f8d8SRobert Watson #include <sys/kernel.h>
54fb919e4dSMark Murray #include <sys/lock.h>
5523955314SAlfred Perlstein #include <sys/mutex.h>
56d2d3e875SBruce Evans #include <sys/sysproto.h>
57df8bae1dSRodney W. Grimes #include <sys/filedesc.h>
58acd3428bSRobert Watson #include <sys/priv.h>
59df8bae1dSRodney W. Grimes #include <sys/proc.h>
6055648840SJohn Baldwin #include <sys/procctl.h>
611ba5ad42SEdward Tomasz Napierala #include <sys/racct.h>
62070f64feSMatthew Dillon #include <sys/resource.h>
63070f64feSMatthew Dillon #include <sys/resourcevar.h>
6489f6b863SAttilio Rao #include <sys/rwlock.h>
657e19eda4SAndrey Zonov #include <sys/sysctl.h>
66df8bae1dSRodney W. Grimes #include <sys/vnode.h>
673ac4d1efSBruce Evans #include <sys/fcntl.h>
68df8bae1dSRodney W. Grimes #include <sys/file.h>
69df8bae1dSRodney W. Grimes #include <sys/mman.h>
70b483c7f6SGuido van Rooij #include <sys/mount.h>
71df8bae1dSRodney W. Grimes #include <sys/conf.h>
724183b6b6SPeter Wemm #include <sys/stat.h>
7355648840SJohn Baldwin #include <sys/syscallsubr.h>
74497a8238SKonstantin Belousov #include <sys/sysent.h>
75efeaf95aSDavid Greenman #include <sys/vmmeter.h>
76df8bae1dSRodney W. Grimes 
7751d1f690SRobert Watson #include <security/audit/audit.h>
78aed55708SRobert Watson #include <security/mac/mac_framework.h>
79aed55708SRobert Watson 
80df8bae1dSRodney W. Grimes #include <vm/vm.h>
81efeaf95aSDavid Greenman #include <vm/vm_param.h>
82efeaf95aSDavid Greenman #include <vm/pmap.h>
83efeaf95aSDavid Greenman #include <vm/vm_map.h>
84efeaf95aSDavid Greenman #include <vm/vm_object.h>
851c7c3c6aSMatthew Dillon #include <vm/vm_page.h>
86df8bae1dSRodney W. Grimes #include <vm/vm_pager.h>
87b5e8ce9fSBruce Evans #include <vm/vm_pageout.h>
88efeaf95aSDavid Greenman #include <vm/vm_extern.h>
89867a482dSJohn Dyson #include <vm/vm_page.h>
9084110e7eSKonstantin Belousov #include <vm/vnode_pager.h>
91df8bae1dSRodney W. Grimes 
9249874f6eSJoseph Koshy #ifdef HWPMC_HOOKS
9349874f6eSJoseph Koshy #include <sys/pmckern.h>
9449874f6eSJoseph Koshy #endif
9549874f6eSJoseph Koshy 
967e19eda4SAndrey Zonov int old_mlock = 0;
97af3b2549SHans Petter Selasky SYSCTL_INT(_vm, OID_AUTO, old_mlock, CTLFLAG_RWTUN, &old_mlock, 0,
987e19eda4SAndrey Zonov     "Do not apply RLIMIT_MEMLOCK on mlockall");
997e19eda4SAndrey Zonov 
100edb572a3SJohn Baldwin #ifdef MAP_32BIT
101edb572a3SJohn Baldwin #define	MAP_32BIT_MAX_ADDR	((vm_offset_t)1 << 31)
102d2d3e875SBruce Evans #endif
1030d94caffSDavid Greenman 
104edb572a3SJohn Baldwin #ifndef _SYS_SYSPROTO_H_
105edb572a3SJohn Baldwin struct sbrk_args {
106edb572a3SJohn Baldwin 	int incr;
107edb572a3SJohn Baldwin };
108edb572a3SJohn Baldwin #endif
109edb572a3SJohn Baldwin 
110df8bae1dSRodney W. Grimes int
11104e89ffbSKonstantin Belousov sys_sbrk(struct thread *td, struct sbrk_args *uap)
112df8bae1dSRodney W. Grimes {
113df8bae1dSRodney W. Grimes 	/* Not yet implemented */
114df8bae1dSRodney W. Grimes 	return (EOPNOTSUPP);
115df8bae1dSRodney W. Grimes }
116df8bae1dSRodney W. Grimes 
117d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_
118df8bae1dSRodney W. Grimes struct sstk_args {
119df8bae1dSRodney W. Grimes 	int incr;
120df8bae1dSRodney W. Grimes };
121d2d3e875SBruce Evans #endif
1220d94caffSDavid Greenman 
123df8bae1dSRodney W. Grimes int
12404e89ffbSKonstantin Belousov sys_sstk(struct thread *td, struct sstk_args *uap)
125df8bae1dSRodney W. Grimes {
126df8bae1dSRodney W. Grimes 	/* Not yet implemented */
127df8bae1dSRodney W. Grimes 	return (EOPNOTSUPP);
128df8bae1dSRodney W. Grimes }
129df8bae1dSRodney W. Grimes 
1301930e303SPoul-Henning Kamp #if defined(COMPAT_43)
131d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_
132df8bae1dSRodney W. Grimes struct getpagesize_args {
133df8bae1dSRodney W. Grimes 	int dummy;
134df8bae1dSRodney W. Grimes };
135d2d3e875SBruce Evans #endif
1360d94caffSDavid Greenman 
137df8bae1dSRodney W. Grimes int
13804e89ffbSKonstantin Belousov ogetpagesize(struct thread *td, struct getpagesize_args *uap)
139df8bae1dSRodney W. Grimes {
14004e89ffbSKonstantin Belousov 
141b40ce416SJulian Elischer 	td->td_retval[0] = PAGE_SIZE;
142df8bae1dSRodney W. Grimes 	return (0);
143df8bae1dSRodney W. Grimes }
1441930e303SPoul-Henning Kamp #endif				/* COMPAT_43 */
145df8bae1dSRodney W. Grimes 
14654f42e4bSPeter Wemm 
14754f42e4bSPeter Wemm /*
14854f42e4bSPeter Wemm  * Memory Map (mmap) system call.  Note that the file offset
14954f42e4bSPeter Wemm  * and address are allowed to be NOT page aligned, though if
15054f42e4bSPeter Wemm  * the MAP_FIXED flag it set, both must have the same remainder
15154f42e4bSPeter Wemm  * modulo the PAGE_SIZE (POSIX 1003.1b).  If the address is not
15254f42e4bSPeter Wemm  * page-aligned, the actual mapping starts at trunc_page(addr)
15354f42e4bSPeter Wemm  * and the return value is adjusted up by the page offset.
154b4309055SMatthew Dillon  *
155b4309055SMatthew Dillon  * Generally speaking, only character devices which are themselves
156b4309055SMatthew Dillon  * memory-based, such as a video framebuffer, can be mmap'd.  Otherwise
157b4309055SMatthew Dillon  * there would be no cache coherency between a descriptor and a VM mapping
158b4309055SMatthew Dillon  * both to the same character device.
15954f42e4bSPeter Wemm  */
160d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_
161df8bae1dSRodney W. Grimes struct mmap_args {
162651bb817SAlexander Langer 	void *addr;
163df8bae1dSRodney W. Grimes 	size_t len;
164df8bae1dSRodney W. Grimes 	int prot;
165df8bae1dSRodney W. Grimes 	int flags;
166df8bae1dSRodney W. Grimes 	int fd;
167df8bae1dSRodney W. Grimes 	long pad;
168df8bae1dSRodney W. Grimes 	off_t pos;
169df8bae1dSRodney W. Grimes };
170d2d3e875SBruce Evans #endif
171df8bae1dSRodney W. Grimes 
172df8bae1dSRodney W. Grimes int
17369cdfcefSEdward Tomasz Napierala sys_mmap(struct thread *td, struct mmap_args *uap)
17469cdfcefSEdward Tomasz Napierala {
17569cdfcefSEdward Tomasz Napierala 
176496ab053SKonstantin Belousov 	return (kern_mmap(td, (uintptr_t)uap->addr, uap->len, uap->prot,
177496ab053SKonstantin Belousov 	    uap->flags, uap->fd, uap->pos));
17869cdfcefSEdward Tomasz Napierala }
17969cdfcefSEdward Tomasz Napierala 
18069cdfcefSEdward Tomasz Napierala int
181496ab053SKonstantin Belousov kern_mmap(struct thread *td, uintptr_t addr0, size_t size, int prot, int flags,
182496ab053SKonstantin Belousov     int fd, off_t pos)
183df8bae1dSRodney W. Grimes {
184496ab053SKonstantin Belousov 	struct vmspace *vms;
185c8daea13SAlexander Kabaev 	struct file *fp;
186496ab053SKonstantin Belousov 	vm_offset_t addr;
18769cdfcefSEdward Tomasz Napierala 	vm_size_t pageoff;
1887077c426SJohn Baldwin 	vm_prot_t cap_maxprot;
18969cdfcefSEdward Tomasz Napierala 	int align, error;
190a9d2f8d8SRobert Watson 	cap_rights_t rights;
191df8bae1dSRodney W. Grimes 
192496ab053SKonstantin Belousov 	vms = td->td_proc->p_vmspace;
193426da3bcSAlfred Perlstein 	fp = NULL;
19469cdfcefSEdward Tomasz Napierala 	AUDIT_ARG_FD(fd);
195496ab053SKonstantin Belousov 	addr = addr0;
19627bfa958SSimon L. B. Nielsen 
1977707ccabSKonstantin Belousov 	/*
1985817298fSJohn Baldwin 	 * Ignore old flags that used to be defined but did not do anything.
1995817298fSJohn Baldwin 	 */
2005817298fSJohn Baldwin 	flags &= ~(MAP_RESERVED0020 | MAP_RESERVED0040);
2015817298fSJohn Baldwin 
2025817298fSJohn Baldwin 	/*
2037707ccabSKonstantin Belousov 	 * Enforce the constraints.
2047707ccabSKonstantin Belousov 	 * Mapping of length 0 is only allowed for old binaries.
2057707ccabSKonstantin Belousov 	 * Anonymous mapping shall specify -1 as filedescriptor and
2067707ccabSKonstantin Belousov 	 * zero position for new code. Be nice to ancient a.out
2077707ccabSKonstantin Belousov 	 * binaries and correct pos for anonymous mapping, since old
2087707ccabSKonstantin Belousov 	 * ld.so sometimes issues anonymous map requests with non-zero
2097707ccabSKonstantin Belousov 	 * pos.
2107707ccabSKonstantin Belousov 	 */
2117707ccabSKonstantin Belousov 	if (!SV_CURPROC_FLAG(SV_AOUT)) {
21269cdfcefSEdward Tomasz Napierala 		if ((size == 0 && curproc->p_osrel >= P_OSREL_MAP_ANON) ||
21369cdfcefSEdward Tomasz Napierala 		    ((flags & MAP_ANON) != 0 && (fd != -1 || pos != 0)))
214df8bae1dSRodney W. Grimes 			return (EINVAL);
2157707ccabSKonstantin Belousov 	} else {
2167707ccabSKonstantin Belousov 		if ((flags & MAP_ANON) != 0)
2177707ccabSKonstantin Belousov 			pos = 0;
2187707ccabSKonstantin Belousov 	}
2199154ee6aSPeter Wemm 
2202267af78SJulian Elischer 	if (flags & MAP_STACK) {
22169cdfcefSEdward Tomasz Napierala 		if ((fd != -1) ||
2222267af78SJulian Elischer 		    ((prot & (PROT_READ | PROT_WRITE)) != (PROT_READ | PROT_WRITE)))
2232267af78SJulian Elischer 			return (EINVAL);
2242267af78SJulian Elischer 		flags |= MAP_ANON;
2252267af78SJulian Elischer 		pos = 0;
2262907af2aSJulian Elischer 	}
2275817298fSJohn Baldwin 	if ((flags & ~(MAP_SHARED | MAP_PRIVATE | MAP_FIXED | MAP_HASSEMAPHORE |
2285817298fSJohn Baldwin 	    MAP_STACK | MAP_NOSYNC | MAP_ANON | MAP_EXCL | MAP_NOCORE |
229*19bd0d9cSKonstantin Belousov 	    MAP_PREFAULT_READ | MAP_GUARD |
2305fd3f8b3SJohn Baldwin #ifdef MAP_32BIT
2315fd3f8b3SJohn Baldwin 	    MAP_32BIT |
2325fd3f8b3SJohn Baldwin #endif
2335fd3f8b3SJohn Baldwin 	    MAP_ALIGNMENT_MASK)) != 0)
2345fd3f8b3SJohn Baldwin 		return (EINVAL);
23511c42bccSKonstantin Belousov 	if ((flags & (MAP_EXCL | MAP_FIXED)) == MAP_EXCL)
23611c42bccSKonstantin Belousov 		return (EINVAL);
23710204535SKonstantin Belousov 	if ((flags & (MAP_SHARED | MAP_PRIVATE)) == (MAP_SHARED | MAP_PRIVATE))
2385fd3f8b3SJohn Baldwin 		return (EINVAL);
2395fd3f8b3SJohn Baldwin 	if (prot != PROT_NONE &&
2405fd3f8b3SJohn Baldwin 	    (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC)) != 0)
2415fd3f8b3SJohn Baldwin 		return (EINVAL);
242*19bd0d9cSKonstantin Belousov 	if ((flags & MAP_GUARD) != 0 && (prot != PROT_NONE || fd != -1 ||
243*19bd0d9cSKonstantin Belousov 	    pos != 0 || (flags & (MAP_SHARED | MAP_PRIVATE | MAP_PREFAULT |
244*19bd0d9cSKonstantin Belousov 	    MAP_PREFAULT_READ | MAP_ANON | MAP_STACK)) != 0))
245*19bd0d9cSKonstantin Belousov 		return (EINVAL);
2462907af2aSJulian Elischer 
2479154ee6aSPeter Wemm 	/*
24854f42e4bSPeter Wemm 	 * Align the file position to a page boundary,
24954f42e4bSPeter Wemm 	 * and save its page offset component.
2509154ee6aSPeter Wemm 	 */
25154f42e4bSPeter Wemm 	pageoff = (pos & PAGE_MASK);
25254f42e4bSPeter Wemm 	pos -= pageoff;
25354f42e4bSPeter Wemm 
25454f42e4bSPeter Wemm 	/* Adjust size for rounding (on both ends). */
25554f42e4bSPeter Wemm 	size += pageoff;			/* low end... */
25654f42e4bSPeter Wemm 	size = (vm_size_t) round_page(size);	/* hi end */
2579154ee6aSPeter Wemm 
2585aa60b6fSJohn Baldwin 	/* Ensure alignment is at least a page and fits in a pointer. */
2595aa60b6fSJohn Baldwin 	align = flags & MAP_ALIGNMENT_MASK;
2605aa60b6fSJohn Baldwin 	if (align != 0 && align != MAP_ALIGNED_SUPER &&
2615aa60b6fSJohn Baldwin 	    (align >> MAP_ALIGNMENT_SHIFT >= sizeof(void *) * NBBY ||
2625aa60b6fSJohn Baldwin 	    align >> MAP_ALIGNMENT_SHIFT < PAGE_SHIFT))
2635aa60b6fSJohn Baldwin 		return (EINVAL);
2645aa60b6fSJohn Baldwin 
265df8bae1dSRodney W. Grimes 	/*
2660d94caffSDavid Greenman 	 * Check for illegal addresses.  Watch out for address wrap... Note
2670d94caffSDavid Greenman 	 * that VM_*_ADDRESS are not constants due to casts (argh).
268df8bae1dSRodney W. Grimes 	 */
269df8bae1dSRodney W. Grimes 	if (flags & MAP_FIXED) {
27054f42e4bSPeter Wemm 		/*
27154f42e4bSPeter Wemm 		 * The specified address must have the same remainder
27254f42e4bSPeter Wemm 		 * as the file offset taken modulo PAGE_SIZE, so it
27354f42e4bSPeter Wemm 		 * should be aligned after adjustment by pageoff.
27454f42e4bSPeter Wemm 		 */
27554f42e4bSPeter Wemm 		addr -= pageoff;
27654f42e4bSPeter Wemm 		if (addr & PAGE_MASK)
27754f42e4bSPeter Wemm 			return (EINVAL);
27827bfa958SSimon L. B. Nielsen 
27954f42e4bSPeter Wemm 		/* Address range must be all in user VM space. */
28005ba50f5SJake Burkholder 		if (addr < vm_map_min(&vms->vm_map) ||
28105ba50f5SJake Burkholder 		    addr + size > vm_map_max(&vms->vm_map))
282df8bae1dSRodney W. Grimes 			return (EINVAL);
283bbc0ec52SDavid Greenman 		if (addr + size < addr)
284df8bae1dSRodney W. Grimes 			return (EINVAL);
285edb572a3SJohn Baldwin #ifdef MAP_32BIT
286edb572a3SJohn Baldwin 		if (flags & MAP_32BIT && addr + size > MAP_32BIT_MAX_ADDR)
287edb572a3SJohn Baldwin 			return (EINVAL);
288edb572a3SJohn Baldwin 	} else if (flags & MAP_32BIT) {
289edb572a3SJohn Baldwin 		/*
290edb572a3SJohn Baldwin 		 * For MAP_32BIT, override the hint if it is too high and
291edb572a3SJohn Baldwin 		 * do not bother moving the mapping past the heap (since
292edb572a3SJohn Baldwin 		 * the heap is usually above 2GB).
293edb572a3SJohn Baldwin 		 */
294edb572a3SJohn Baldwin 		if (addr + size > MAP_32BIT_MAX_ADDR)
295edb572a3SJohn Baldwin 			addr = 0;
296edb572a3SJohn Baldwin #endif
29791d5354aSJohn Baldwin 	} else {
298df8bae1dSRodney W. Grimes 		/*
29954f42e4bSPeter Wemm 		 * XXX for non-fixed mappings where no hint is provided or
30054f42e4bSPeter Wemm 		 * the hint would fall in the potential heap space,
30154f42e4bSPeter Wemm 		 * place it after the end of the largest possible heap.
302df8bae1dSRodney W. Grimes 		 *
30354f42e4bSPeter Wemm 		 * There should really be a pmap call to determine a reasonable
30454f42e4bSPeter Wemm 		 * location.
305df8bae1dSRodney W. Grimes 		 */
30691d5354aSJohn Baldwin 		if (addr == 0 ||
3071f6889a1SMatthew Dillon 		    (addr >= round_page((vm_offset_t)vms->vm_taddr) &&
308c460ac3aSPeter Wemm 		    addr < round_page((vm_offset_t)vms->vm_daddr +
309cd336badSMateusz Guzik 		    lim_max(td, RLIMIT_DATA))))
310c460ac3aSPeter Wemm 			addr = round_page((vm_offset_t)vms->vm_daddr +
311cd336badSMateusz Guzik 			    lim_max(td, RLIMIT_DATA));
31291d5354aSJohn Baldwin 	}
3137077c426SJohn Baldwin 	if (size == 0) {
3147077c426SJohn Baldwin 		/*
3157077c426SJohn Baldwin 		 * Return success without mapping anything for old
3167077c426SJohn Baldwin 		 * binaries that request a page-aligned mapping of
3177077c426SJohn Baldwin 		 * length 0.  For modern binaries, this function
3187077c426SJohn Baldwin 		 * returns an error earlier.
3197077c426SJohn Baldwin 		 */
3207077c426SJohn Baldwin 		error = 0;
321*19bd0d9cSKonstantin Belousov 	} else if ((flags & MAP_GUARD) != 0) {
322*19bd0d9cSKonstantin Belousov 		error = vm_mmap_object(&vms->vm_map, &addr, size, VM_PROT_NONE,
323*19bd0d9cSKonstantin Belousov 		    VM_PROT_NONE, flags, NULL, pos, FALSE, td);
324*19bd0d9cSKonstantin Belousov 	} else if ((flags & MAP_ANON) != 0) {
325df8bae1dSRodney W. Grimes 		/*
326df8bae1dSRodney W. Grimes 		 * Mapping blank space is trivial.
3277077c426SJohn Baldwin 		 *
3287077c426SJohn Baldwin 		 * This relies on VM_PROT_* matching PROT_*.
329df8bae1dSRodney W. Grimes 		 */
3307077c426SJohn Baldwin 		error = vm_mmap_object(&vms->vm_map, &addr, size, prot,
3317077c426SJohn Baldwin 		    VM_PROT_ALL, flags, NULL, pos, FALSE, td);
33230d4dd7eSAlexander Kabaev 	} else {
333df8bae1dSRodney W. Grimes 		/*
334a9d2f8d8SRobert Watson 		 * Mapping file, get fp for validation and don't let the
335a9d2f8d8SRobert Watson 		 * descriptor disappear on us if we block. Check capability
336a9d2f8d8SRobert Watson 		 * rights, but also return the maximum rights to be combined
337a9d2f8d8SRobert Watson 		 * with maxprot later.
338df8bae1dSRodney W. Grimes 		 */
3397008be5bSPawel Jakub Dawidek 		cap_rights_init(&rights, CAP_MMAP);
340a9d2f8d8SRobert Watson 		if (prot & PROT_READ)
3417008be5bSPawel Jakub Dawidek 			cap_rights_set(&rights, CAP_MMAP_R);
342a9d2f8d8SRobert Watson 		if ((flags & MAP_SHARED) != 0) {
343a9d2f8d8SRobert Watson 			if (prot & PROT_WRITE)
3447008be5bSPawel Jakub Dawidek 				cap_rights_set(&rights, CAP_MMAP_W);
345a9d2f8d8SRobert Watson 		}
346a9d2f8d8SRobert Watson 		if (prot & PROT_EXEC)
3477008be5bSPawel Jakub Dawidek 			cap_rights_set(&rights, CAP_MMAP_X);
34869cdfcefSEdward Tomasz Napierala 		error = fget_mmap(td, fd, &rights, &cap_maxprot, &fp);
3497008be5bSPawel Jakub Dawidek 		if (error != 0)
350426da3bcSAlfred Perlstein 			goto done;
35110204535SKonstantin Belousov 		if ((flags & (MAP_SHARED | MAP_PRIVATE)) == 0 &&
35210204535SKonstantin Belousov 		    td->td_proc->p_osrel >= P_OSREL_MAP_FSTRICT) {
35310204535SKonstantin Belousov 			error = EINVAL;
35410204535SKonstantin Belousov 			goto done;
35510204535SKonstantin Belousov 		}
3565fd3f8b3SJohn Baldwin 
3575fd3f8b3SJohn Baldwin 		/* This relies on VM_PROT_* matching PROT_*. */
3587077c426SJohn Baldwin 		error = fo_mmap(fp, &vms->vm_map, &addr, size, prot,
3597077c426SJohn Baldwin 		    cap_maxprot, flags, pos, td);
36049874f6eSJoseph Koshy 	}
3617077c426SJohn Baldwin 
362df8bae1dSRodney W. Grimes 	if (error == 0)
363b40ce416SJulian Elischer 		td->td_retval[0] = (register_t) (addr + pageoff);
364279d7226SMatthew Dillon done:
365279d7226SMatthew Dillon 	if (fp)
366b40ce416SJulian Elischer 		fdrop(fp, td);
367f6b5b182SJeff Roberson 
368df8bae1dSRodney W. Grimes 	return (error);
369df8bae1dSRodney W. Grimes }
370df8bae1dSRodney W. Grimes 
3710538aafcSKonstantin Belousov #if defined(COMPAT_FREEBSD6)
372c2815ad5SPeter Wemm int
373c2815ad5SPeter Wemm freebsd6_mmap(struct thread *td, struct freebsd6_mmap_args *uap)
374c2815ad5SPeter Wemm {
375c2815ad5SPeter Wemm 
376496ab053SKonstantin Belousov 	return (kern_mmap(td, (uintptr_t)uap->addr, uap->len, uap->prot,
377496ab053SKonstantin Belousov 	    uap->flags, uap->fd, uap->pos));
378c2815ad5SPeter Wemm }
3790538aafcSKonstantin Belousov #endif
380c2815ad5SPeter Wemm 
38105f0fdd2SPoul-Henning Kamp #ifdef COMPAT_43
382d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_
38305f0fdd2SPoul-Henning Kamp struct ommap_args {
38405f0fdd2SPoul-Henning Kamp 	caddr_t addr;
38505f0fdd2SPoul-Henning Kamp 	int len;
38605f0fdd2SPoul-Henning Kamp 	int prot;
38705f0fdd2SPoul-Henning Kamp 	int flags;
38805f0fdd2SPoul-Henning Kamp 	int fd;
38905f0fdd2SPoul-Henning Kamp 	long pos;
39005f0fdd2SPoul-Henning Kamp };
391d2d3e875SBruce Evans #endif
39205f0fdd2SPoul-Henning Kamp int
39369cdfcefSEdward Tomasz Napierala ommap(struct thread *td, struct ommap_args *uap)
39405f0fdd2SPoul-Henning Kamp {
39505f0fdd2SPoul-Henning Kamp 	static const char cvtbsdprot[8] = {
39605f0fdd2SPoul-Henning Kamp 		0,
39705f0fdd2SPoul-Henning Kamp 		PROT_EXEC,
39805f0fdd2SPoul-Henning Kamp 		PROT_WRITE,
39905f0fdd2SPoul-Henning Kamp 		PROT_EXEC | PROT_WRITE,
40005f0fdd2SPoul-Henning Kamp 		PROT_READ,
40105f0fdd2SPoul-Henning Kamp 		PROT_EXEC | PROT_READ,
40205f0fdd2SPoul-Henning Kamp 		PROT_WRITE | PROT_READ,
40305f0fdd2SPoul-Henning Kamp 		PROT_EXEC | PROT_WRITE | PROT_READ,
40405f0fdd2SPoul-Henning Kamp 	};
40569cdfcefSEdward Tomasz Napierala 	int flags, prot;
4060d94caffSDavid Greenman 
40705f0fdd2SPoul-Henning Kamp #define	OMAP_ANON	0x0002
40805f0fdd2SPoul-Henning Kamp #define	OMAP_COPY	0x0020
40905f0fdd2SPoul-Henning Kamp #define	OMAP_SHARED	0x0010
41005f0fdd2SPoul-Henning Kamp #define	OMAP_FIXED	0x0100
41105f0fdd2SPoul-Henning Kamp 
41269cdfcefSEdward Tomasz Napierala 	prot = cvtbsdprot[uap->prot & 0x7];
413ee4116b8SKonstantin Belousov #ifdef COMPAT_FREEBSD32
414e7d939bdSMarcel Moolenaar #if defined(__amd64__)
415ee4116b8SKonstantin Belousov 	if (i386_read_exec && SV_PROC_FLAG(td->td_proc, SV_ILP32) &&
41669cdfcefSEdward Tomasz Napierala 	    prot != 0)
41769cdfcefSEdward Tomasz Napierala 		prot |= PROT_EXEC;
418ee4116b8SKonstantin Belousov #endif
419ee4116b8SKonstantin Belousov #endif
42069cdfcefSEdward Tomasz Napierala 	flags = 0;
42105f0fdd2SPoul-Henning Kamp 	if (uap->flags & OMAP_ANON)
42269cdfcefSEdward Tomasz Napierala 		flags |= MAP_ANON;
42305f0fdd2SPoul-Henning Kamp 	if (uap->flags & OMAP_COPY)
42469cdfcefSEdward Tomasz Napierala 		flags |= MAP_COPY;
42505f0fdd2SPoul-Henning Kamp 	if (uap->flags & OMAP_SHARED)
42669cdfcefSEdward Tomasz Napierala 		flags |= MAP_SHARED;
42705f0fdd2SPoul-Henning Kamp 	else
42869cdfcefSEdward Tomasz Napierala 		flags |= MAP_PRIVATE;
42905f0fdd2SPoul-Henning Kamp 	if (uap->flags & OMAP_FIXED)
43069cdfcefSEdward Tomasz Napierala 		flags |= MAP_FIXED;
431496ab053SKonstantin Belousov 	return (kern_mmap(td, (uintptr_t)uap->addr, uap->len, prot, flags,
432496ab053SKonstantin Belousov 	    uap->fd, uap->pos));
43305f0fdd2SPoul-Henning Kamp }
43405f0fdd2SPoul-Henning Kamp #endif				/* COMPAT_43 */
43505f0fdd2SPoul-Henning Kamp 
43605f0fdd2SPoul-Henning Kamp 
437d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_
438df8bae1dSRodney W. Grimes struct msync_args {
439651bb817SAlexander Langer 	void *addr;
440c899450bSPeter Wemm 	size_t len;
441e6c6af11SDavid Greenman 	int flags;
442df8bae1dSRodney W. Grimes };
443d2d3e875SBruce Evans #endif
444df8bae1dSRodney W. Grimes int
44569cdfcefSEdward Tomasz Napierala sys_msync(struct thread *td, struct msync_args *uap)
446df8bae1dSRodney W. Grimes {
44769cdfcefSEdward Tomasz Napierala 
448496ab053SKonstantin Belousov 	return (kern_msync(td, (uintptr_t)uap->addr, uap->len, uap->flags));
44969cdfcefSEdward Tomasz Napierala }
45069cdfcefSEdward Tomasz Napierala 
45169cdfcefSEdward Tomasz Napierala int
452496ab053SKonstantin Belousov kern_msync(struct thread *td, uintptr_t addr0, size_t size, int flags)
45369cdfcefSEdward Tomasz Napierala {
454496ab053SKonstantin Belousov 	vm_offset_t addr;
45569cdfcefSEdward Tomasz Napierala 	vm_size_t pageoff;
456df8bae1dSRodney W. Grimes 	vm_map_t map;
457df8bae1dSRodney W. Grimes 	int rv;
458df8bae1dSRodney W. Grimes 
459496ab053SKonstantin Belousov 	addr = addr0;
460dabee6feSPeter Wemm 	pageoff = (addr & PAGE_MASK);
461dabee6feSPeter Wemm 	addr -= pageoff;
462dabee6feSPeter Wemm 	size += pageoff;
463dabee6feSPeter Wemm 	size = (vm_size_t) round_page(size);
4649154ee6aSPeter Wemm 	if (addr + size < addr)
465dabee6feSPeter Wemm 		return (EINVAL);
466dabee6feSPeter Wemm 
467dabee6feSPeter Wemm 	if ((flags & (MS_ASYNC|MS_INVALIDATE)) == (MS_ASYNC|MS_INVALIDATE))
4681e62bc63SDavid Greenman 		return (EINVAL);
4691e62bc63SDavid Greenman 
470b40ce416SJulian Elischer 	map = &td->td_proc->p_vmspace->vm_map;
4719154ee6aSPeter Wemm 
472df8bae1dSRodney W. Grimes 	/*
473df8bae1dSRodney W. Grimes 	 * Clean the pages and interpret the return value.
474df8bae1dSRodney W. Grimes 	 */
475950f8459SAlan Cox 	rv = vm_map_sync(map, addr, addr + size, (flags & MS_ASYNC) == 0,
476e6c6af11SDavid Greenman 	    (flags & MS_INVALIDATE) != 0);
477df8bae1dSRodney W. Grimes 	switch (rv) {
478df8bae1dSRodney W. Grimes 	case KERN_SUCCESS:
479d2c60af8SMatthew Dillon 		return (0);
480df8bae1dSRodney W. Grimes 	case KERN_INVALID_ADDRESS:
481e103f5b1SPeter Holm 		return (ENOMEM);
482b7b7cd44SAlan Cox 	case KERN_INVALID_ARGUMENT:
483b7b7cd44SAlan Cox 		return (EBUSY);
484126d6082SKonstantin Belousov 	case KERN_FAILURE:
485126d6082SKonstantin Belousov 		return (EIO);
486df8bae1dSRodney W. Grimes 	default:
487df8bae1dSRodney W. Grimes 		return (EINVAL);
488df8bae1dSRodney W. Grimes 	}
489df8bae1dSRodney W. Grimes }
490df8bae1dSRodney W. Grimes 
491d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_
492df8bae1dSRodney W. Grimes struct munmap_args {
493651bb817SAlexander Langer 	void *addr;
4949154ee6aSPeter Wemm 	size_t len;
495df8bae1dSRodney W. Grimes };
496d2d3e875SBruce Evans #endif
497df8bae1dSRodney W. Grimes int
49869cdfcefSEdward Tomasz Napierala sys_munmap(struct thread *td, struct munmap_args *uap)
49969cdfcefSEdward Tomasz Napierala {
50069cdfcefSEdward Tomasz Napierala 
501496ab053SKonstantin Belousov 	return (kern_munmap(td, (uintptr_t)uap->addr, uap->len));
50269cdfcefSEdward Tomasz Napierala }
50369cdfcefSEdward Tomasz Napierala 
50469cdfcefSEdward Tomasz Napierala int
505496ab053SKonstantin Belousov kern_munmap(struct thread *td, uintptr_t addr0, size_t size)
506df8bae1dSRodney W. Grimes {
50749874f6eSJoseph Koshy #ifdef HWPMC_HOOKS
50849874f6eSJoseph Koshy 	struct pmckern_map_out pkm;
50949874f6eSJoseph Koshy 	vm_map_entry_t entry;
510736ff8c3SMateusz Guzik 	bool pmc_handled;
51149874f6eSJoseph Koshy #endif
512496ab053SKonstantin Belousov 	vm_offset_t addr;
51369cdfcefSEdward Tomasz Napierala 	vm_size_t pageoff;
514df8bae1dSRodney W. Grimes 	vm_map_t map;
515df8bae1dSRodney W. Grimes 
516d8834602SAlan Cox 	if (size == 0)
517d8834602SAlan Cox 		return (EINVAL);
518dabee6feSPeter Wemm 
519496ab053SKonstantin Belousov 	addr = addr0;
520dabee6feSPeter Wemm 	pageoff = (addr & PAGE_MASK);
521dabee6feSPeter Wemm 	addr -= pageoff;
522dabee6feSPeter Wemm 	size += pageoff;
523dabee6feSPeter Wemm 	size = (vm_size_t) round_page(size);
5249154ee6aSPeter Wemm 	if (addr + size < addr)
525df8bae1dSRodney W. Grimes 		return (EINVAL);
5269154ee6aSPeter Wemm 
527df8bae1dSRodney W. Grimes 	/*
52805ba50f5SJake Burkholder 	 * Check for illegal addresses.  Watch out for address wrap...
529df8bae1dSRodney W. Grimes 	 */
530b40ce416SJulian Elischer 	map = &td->td_proc->p_vmspace->vm_map;
53105ba50f5SJake Burkholder 	if (addr < vm_map_min(map) || addr + size > vm_map_max(map))
53205ba50f5SJake Burkholder 		return (EINVAL);
533d8834602SAlan Cox 	vm_map_lock(map);
53449874f6eSJoseph Koshy #ifdef HWPMC_HOOKS
535736ff8c3SMateusz Guzik 	pmc_handled = false;
536736ff8c3SMateusz Guzik 	if (PMC_HOOK_INSTALLED(PMC_FN_MUNMAP)) {
537736ff8c3SMateusz Guzik 		pmc_handled = true;
53849874f6eSJoseph Koshy 		/*
53949874f6eSJoseph Koshy 		 * Inform hwpmc if the address range being unmapped contains
54049874f6eSJoseph Koshy 		 * an executable region.
54149874f6eSJoseph Koshy 		 */
5420d419640SRyan Stone 		pkm.pm_address = (uintptr_t) NULL;
54349874f6eSJoseph Koshy 		if (vm_map_lookup_entry(map, addr, &entry)) {
54449874f6eSJoseph Koshy 			for (;
54549874f6eSJoseph Koshy 			    entry != &map->header && entry->start < addr + size;
54649874f6eSJoseph Koshy 			    entry = entry->next) {
54749874f6eSJoseph Koshy 				if (vm_map_check_protection(map, entry->start,
54849874f6eSJoseph Koshy 					entry->end, VM_PROT_EXECUTE) == TRUE) {
54949874f6eSJoseph Koshy 					pkm.pm_address = (uintptr_t) addr;
55049874f6eSJoseph Koshy 					pkm.pm_size = (size_t) size;
55149874f6eSJoseph Koshy 					break;
55249874f6eSJoseph Koshy 				}
55349874f6eSJoseph Koshy 			}
55449874f6eSJoseph Koshy 		}
555736ff8c3SMateusz Guzik 	}
55649874f6eSJoseph Koshy #endif
557655c3490SKonstantin Belousov 	vm_map_delete(map, addr, addr + size);
5580d419640SRyan Stone 
5590d419640SRyan Stone #ifdef HWPMC_HOOKS
560736ff8c3SMateusz Guzik 	if (__predict_false(pmc_handled)) {
5610d419640SRyan Stone 		/* downgrade the lock to prevent a LOR with the pmc-sx lock */
5620d419640SRyan Stone 		vm_map_lock_downgrade(map);
563d473d3a1SRyan Stone 		if (pkm.pm_address != (uintptr_t) NULL)
5640d419640SRyan Stone 			PMC_CALL_HOOK(td, PMC_FN_MUNMAP, (void *) &pkm);
5650d419640SRyan Stone 		vm_map_unlock_read(map);
566736ff8c3SMateusz Guzik 	} else
5670d419640SRyan Stone #endif
568736ff8c3SMateusz Guzik 		vm_map_unlock(map);
569736ff8c3SMateusz Guzik 
5700d419640SRyan Stone 	/* vm_map_delete returns nothing but KERN_SUCCESS anyway */
571df8bae1dSRodney W. Grimes 	return (0);
572df8bae1dSRodney W. Grimes }
573df8bae1dSRodney W. Grimes 
574d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_
575df8bae1dSRodney W. Grimes struct mprotect_args {
576651bb817SAlexander Langer 	const void *addr;
5779154ee6aSPeter Wemm 	size_t len;
578df8bae1dSRodney W. Grimes 	int prot;
579df8bae1dSRodney W. Grimes };
580d2d3e875SBruce Evans #endif
581df8bae1dSRodney W. Grimes int
58269cdfcefSEdward Tomasz Napierala sys_mprotect(struct thread *td, struct mprotect_args *uap)
583df8bae1dSRodney W. Grimes {
584df8bae1dSRodney W. Grimes 
585496ab053SKonstantin Belousov 	return (kern_mprotect(td, (uintptr_t)uap->addr, uap->len, uap->prot));
58669cdfcefSEdward Tomasz Napierala }
587df8bae1dSRodney W. Grimes 
58869cdfcefSEdward Tomasz Napierala int
589496ab053SKonstantin Belousov kern_mprotect(struct thread *td, uintptr_t addr0, size_t size, int prot)
59069cdfcefSEdward Tomasz Napierala {
591496ab053SKonstantin Belousov 	vm_offset_t addr;
59269cdfcefSEdward Tomasz Napierala 	vm_size_t pageoff;
59369cdfcefSEdward Tomasz Napierala 
594496ab053SKonstantin Belousov 	addr = addr0;
59569cdfcefSEdward Tomasz Napierala 	prot = (prot & VM_PROT_ALL);
596dabee6feSPeter Wemm 	pageoff = (addr & PAGE_MASK);
597dabee6feSPeter Wemm 	addr -= pageoff;
598dabee6feSPeter Wemm 	size += pageoff;
599dabee6feSPeter Wemm 	size = (vm_size_t) round_page(size);
6009154ee6aSPeter Wemm 	if (addr + size < addr)
601dabee6feSPeter Wemm 		return (EINVAL);
602dabee6feSPeter Wemm 
60343285049SAlan Cox 	switch (vm_map_protect(&td->td_proc->p_vmspace->vm_map, addr,
60443285049SAlan Cox 	    addr + size, prot, FALSE)) {
605df8bae1dSRodney W. Grimes 	case KERN_SUCCESS:
606df8bae1dSRodney W. Grimes 		return (0);
607df8bae1dSRodney W. Grimes 	case KERN_PROTECTION_FAILURE:
608df8bae1dSRodney W. Grimes 		return (EACCES);
6093364c323SKonstantin Belousov 	case KERN_RESOURCE_SHORTAGE:
6103364c323SKonstantin Belousov 		return (ENOMEM);
611df8bae1dSRodney W. Grimes 	}
612df8bae1dSRodney W. Grimes 	return (EINVAL);
613df8bae1dSRodney W. Grimes }
614df8bae1dSRodney W. Grimes 
615d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_
616dabee6feSPeter Wemm struct minherit_args {
617651bb817SAlexander Langer 	void *addr;
6189154ee6aSPeter Wemm 	size_t len;
619dabee6feSPeter Wemm 	int inherit;
620dabee6feSPeter Wemm };
621dabee6feSPeter Wemm #endif
622dabee6feSPeter Wemm int
62304e89ffbSKonstantin Belousov sys_minherit(struct thread *td, struct minherit_args *uap)
624dabee6feSPeter Wemm {
625dabee6feSPeter Wemm 	vm_offset_t addr;
626dabee6feSPeter Wemm 	vm_size_t size, pageoff;
62754d92145SMatthew Dillon 	vm_inherit_t inherit;
628dabee6feSPeter Wemm 
629dabee6feSPeter Wemm 	addr = (vm_offset_t)uap->addr;
6309154ee6aSPeter Wemm 	size = uap->len;
631dabee6feSPeter Wemm 	inherit = uap->inherit;
632dabee6feSPeter Wemm 
633dabee6feSPeter Wemm 	pageoff = (addr & PAGE_MASK);
634dabee6feSPeter Wemm 	addr -= pageoff;
635dabee6feSPeter Wemm 	size += pageoff;
636dabee6feSPeter Wemm 	size = (vm_size_t) round_page(size);
6379154ee6aSPeter Wemm 	if (addr + size < addr)
638dabee6feSPeter Wemm 		return (EINVAL);
639dabee6feSPeter Wemm 
640e0be79afSAlan Cox 	switch (vm_map_inherit(&td->td_proc->p_vmspace->vm_map, addr,
641e0be79afSAlan Cox 	    addr + size, inherit)) {
642dabee6feSPeter Wemm 	case KERN_SUCCESS:
643dabee6feSPeter Wemm 		return (0);
644dabee6feSPeter Wemm 	case KERN_PROTECTION_FAILURE:
645dabee6feSPeter Wemm 		return (EACCES);
646dabee6feSPeter Wemm 	}
647dabee6feSPeter Wemm 	return (EINVAL);
648dabee6feSPeter Wemm }
649dabee6feSPeter Wemm 
650dabee6feSPeter Wemm #ifndef _SYS_SYSPROTO_H_
651df8bae1dSRodney W. Grimes struct madvise_args {
652651bb817SAlexander Langer 	void *addr;
6539154ee6aSPeter Wemm 	size_t len;
654df8bae1dSRodney W. Grimes 	int behav;
655df8bae1dSRodney W. Grimes };
656d2d3e875SBruce Evans #endif
6570d94caffSDavid Greenman 
658df8bae1dSRodney W. Grimes int
65904e89ffbSKonstantin Belousov sys_madvise(struct thread *td, struct madvise_args *uap)
660df8bae1dSRodney W. Grimes {
66169cdfcefSEdward Tomasz Napierala 
662496ab053SKonstantin Belousov 	return (kern_madvise(td, (uintptr_t)uap->addr, uap->len, uap->behav));
66369cdfcefSEdward Tomasz Napierala }
66469cdfcefSEdward Tomasz Napierala 
66569cdfcefSEdward Tomasz Napierala int
666496ab053SKonstantin Belousov kern_madvise(struct thread *td, uintptr_t addr0, size_t len, int behav)
66769cdfcefSEdward Tomasz Napierala {
66805ba50f5SJake Burkholder 	vm_map_t map;
669496ab053SKonstantin Belousov 	vm_offset_t addr, end, start;
67055648840SJohn Baldwin 	int flags;
671b4309055SMatthew Dillon 
672b4309055SMatthew Dillon 	/*
673f4cf2141SWes Peters 	 * Check for our special case, advising the swap pager we are
674f4cf2141SWes Peters 	 * "immortal."
675f4cf2141SWes Peters 	 */
67669cdfcefSEdward Tomasz Napierala 	if (behav == MADV_PROTECT) {
67755648840SJohn Baldwin 		flags = PPROT_SET;
67855648840SJohn Baldwin 		return (kern_procctl(td, P_PID, td->td_proc->p_pid,
67955648840SJohn Baldwin 		    PROC_SPROTECT, &flags));
68069297bf8SJohn Baldwin 	}
68155648840SJohn Baldwin 
682f4cf2141SWes Peters 	/*
683b4309055SMatthew Dillon 	 * Check for illegal behavior
684b4309055SMatthew Dillon 	 */
68569cdfcefSEdward Tomasz Napierala 	if (behav < 0 || behav > MADV_CORE)
686b4309055SMatthew Dillon 		return (EINVAL);
687867a482dSJohn Dyson 	/*
688867a482dSJohn Dyson 	 * Check for illegal addresses.  Watch out for address wrap... Note
689867a482dSJohn Dyson 	 * that VM_*_ADDRESS are not constants due to casts (argh).
690867a482dSJohn Dyson 	 */
69105ba50f5SJake Burkholder 	map = &td->td_proc->p_vmspace->vm_map;
692496ab053SKonstantin Belousov 	addr = addr0;
69369cdfcefSEdward Tomasz Napierala 	if (addr < vm_map_min(map) || addr + len > vm_map_max(map))
694867a482dSJohn Dyson 		return (EINVAL);
69569cdfcefSEdward Tomasz Napierala 	if ((addr + len) < addr)
696867a482dSJohn Dyson 		return (EINVAL);
697867a482dSJohn Dyson 
698867a482dSJohn Dyson 	/*
699867a482dSJohn Dyson 	 * Since this routine is only advisory, we default to conservative
700867a482dSJohn Dyson 	 * behavior.
701867a482dSJohn Dyson 	 */
70269cdfcefSEdward Tomasz Napierala 	start = trunc_page(addr);
70369cdfcefSEdward Tomasz Napierala 	end = round_page(addr + len);
704867a482dSJohn Dyson 
70569cdfcefSEdward Tomasz Napierala 	if (vm_map_madvise(map, start, end, behav))
706094f6d26SAlan Cox 		return (EINVAL);
707094f6d26SAlan Cox 	return (0);
708df8bae1dSRodney W. Grimes }
709df8bae1dSRodney W. Grimes 
710d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_
711df8bae1dSRodney W. Grimes struct mincore_args {
712651bb817SAlexander Langer 	const void *addr;
7139154ee6aSPeter Wemm 	size_t len;
714df8bae1dSRodney W. Grimes 	char *vec;
715df8bae1dSRodney W. Grimes };
716d2d3e875SBruce Evans #endif
7170d94caffSDavid Greenman 
718df8bae1dSRodney W. Grimes int
71904e89ffbSKonstantin Belousov sys_mincore(struct thread *td, struct mincore_args *uap)
720df8bae1dSRodney W. Grimes {
72146dc8e9dSDmitry Chagin 
72246dc8e9dSDmitry Chagin 	return (kern_mincore(td, (uintptr_t)uap->addr, uap->len, uap->vec));
72346dc8e9dSDmitry Chagin }
72446dc8e9dSDmitry Chagin 
72546dc8e9dSDmitry Chagin int
72646dc8e9dSDmitry Chagin kern_mincore(struct thread *td, uintptr_t addr0, size_t len, char *vec)
72746dc8e9dSDmitry Chagin {
728867a482dSJohn Dyson 	vm_offset_t addr, first_addr;
729867a482dSJohn Dyson 	vm_offset_t end, cend;
730867a482dSJohn Dyson 	pmap_t pmap;
731867a482dSJohn Dyson 	vm_map_t map;
732d2c60af8SMatthew Dillon 	int error = 0;
733867a482dSJohn Dyson 	int vecindex, lastvecindex;
73454d92145SMatthew Dillon 	vm_map_entry_t current;
735867a482dSJohn Dyson 	vm_map_entry_t entry;
736567e51e1SAlan Cox 	vm_object_t object;
737567e51e1SAlan Cox 	vm_paddr_t locked_pa;
738567e51e1SAlan Cox 	vm_page_t m;
739567e51e1SAlan Cox 	vm_pindex_t pindex;
740867a482dSJohn Dyson 	int mincoreinfo;
741dd2622a8SAlan Cox 	unsigned int timestamp;
742567e51e1SAlan Cox 	boolean_t locked;
743df8bae1dSRodney W. Grimes 
744867a482dSJohn Dyson 	/*
745867a482dSJohn Dyson 	 * Make sure that the addresses presented are valid for user
746867a482dSJohn Dyson 	 * mode.
747867a482dSJohn Dyson 	 */
74846dc8e9dSDmitry Chagin 	first_addr = addr = trunc_page(addr0);
74946dc8e9dSDmitry Chagin 	end = addr + (vm_size_t)round_page(len);
75005ba50f5SJake Burkholder 	map = &td->td_proc->p_vmspace->vm_map;
75105ba50f5SJake Burkholder 	if (end > vm_map_max(map) || end < addr)
752455dd7d4SKonstantin Belousov 		return (ENOMEM);
75302c04a2fSJohn Dyson 
754b40ce416SJulian Elischer 	pmap = vmspace_pmap(td->td_proc->p_vmspace);
755867a482dSJohn Dyson 
756eff50fcdSAlan Cox 	vm_map_lock_read(map);
757dd2622a8SAlan Cox RestartScan:
758dd2622a8SAlan Cox 	timestamp = map->timestamp;
759867a482dSJohn Dyson 
760455dd7d4SKonstantin Belousov 	if (!vm_map_lookup_entry(map, addr, &entry)) {
761455dd7d4SKonstantin Belousov 		vm_map_unlock_read(map);
762455dd7d4SKonstantin Belousov 		return (ENOMEM);
763455dd7d4SKonstantin Belousov 	}
764867a482dSJohn Dyson 
765867a482dSJohn Dyson 	/*
766867a482dSJohn Dyson 	 * Do this on a map entry basis so that if the pages are not
767867a482dSJohn Dyson 	 * in the current processes address space, we can easily look
768867a482dSJohn Dyson 	 * up the pages elsewhere.
769867a482dSJohn Dyson 	 */
770867a482dSJohn Dyson 	lastvecindex = -1;
771867a482dSJohn Dyson 	for (current = entry;
772867a482dSJohn Dyson 	    (current != &map->header) && (current->start < end);
773867a482dSJohn Dyson 	    current = current->next) {
774867a482dSJohn Dyson 
775867a482dSJohn Dyson 		/*
776455dd7d4SKonstantin Belousov 		 * check for contiguity
777455dd7d4SKonstantin Belousov 		 */
778455dd7d4SKonstantin Belousov 		if (current->end < end &&
779455dd7d4SKonstantin Belousov 		    (entry->next == &map->header ||
780455dd7d4SKonstantin Belousov 		     current->next->start > current->end)) {
781455dd7d4SKonstantin Belousov 			vm_map_unlock_read(map);
782455dd7d4SKonstantin Belousov 			return (ENOMEM);
783455dd7d4SKonstantin Belousov 		}
784455dd7d4SKonstantin Belousov 
785455dd7d4SKonstantin Belousov 		/*
786867a482dSJohn Dyson 		 * ignore submaps (for now) or null objects
787867a482dSJohn Dyson 		 */
7889fdfe602SMatthew Dillon 		if ((current->eflags & MAP_ENTRY_IS_SUB_MAP) ||
789867a482dSJohn Dyson 			current->object.vm_object == NULL)
790867a482dSJohn Dyson 			continue;
791867a482dSJohn Dyson 
792867a482dSJohn Dyson 		/*
793867a482dSJohn Dyson 		 * limit this scan to the current map entry and the
794867a482dSJohn Dyson 		 * limits for the mincore call
795867a482dSJohn Dyson 		 */
796867a482dSJohn Dyson 		if (addr < current->start)
797867a482dSJohn Dyson 			addr = current->start;
798867a482dSJohn Dyson 		cend = current->end;
799867a482dSJohn Dyson 		if (cend > end)
800867a482dSJohn Dyson 			cend = end;
801867a482dSJohn Dyson 
802867a482dSJohn Dyson 		/*
803867a482dSJohn Dyson 		 * scan this entry one page at a time
804867a482dSJohn Dyson 		 */
805867a482dSJohn Dyson 		while (addr < cend) {
806867a482dSJohn Dyson 			/*
807867a482dSJohn Dyson 			 * Check pmap first, it is likely faster, also
808867a482dSJohn Dyson 			 * it can provide info as to whether we are the
809867a482dSJohn Dyson 			 * one referencing or modifying the page.
810867a482dSJohn Dyson 			 */
811567e51e1SAlan Cox 			object = NULL;
812567e51e1SAlan Cox 			locked_pa = 0;
813567e51e1SAlan Cox 		retry:
814567e51e1SAlan Cox 			m = NULL;
815567e51e1SAlan Cox 			mincoreinfo = pmap_mincore(pmap, addr, &locked_pa);
816567e51e1SAlan Cox 			if (locked_pa != 0) {
817867a482dSJohn Dyson 				/*
818567e51e1SAlan Cox 				 * The page is mapped by this process but not
819567e51e1SAlan Cox 				 * both accessed and modified.  It is also
820567e51e1SAlan Cox 				 * managed.  Acquire the object lock so that
821567e51e1SAlan Cox 				 * other mappings might be examined.
822867a482dSJohn Dyson 				 */
823567e51e1SAlan Cox 				m = PHYS_TO_VM_PAGE(locked_pa);
824567e51e1SAlan Cox 				if (m->object != object) {
825567e51e1SAlan Cox 					if (object != NULL)
82689f6b863SAttilio Rao 						VM_OBJECT_WUNLOCK(object);
827567e51e1SAlan Cox 					object = m->object;
82889f6b863SAttilio Rao 					locked = VM_OBJECT_TRYWLOCK(object);
829567e51e1SAlan Cox 					vm_page_unlock(m);
830567e51e1SAlan Cox 					if (!locked) {
83189f6b863SAttilio Rao 						VM_OBJECT_WLOCK(object);
8322965a453SKip Macy 						vm_page_lock(m);
833567e51e1SAlan Cox 						goto retry;
834567e51e1SAlan Cox 					}
835567e51e1SAlan Cox 				} else
836567e51e1SAlan Cox 					vm_page_unlock(m);
837567e51e1SAlan Cox 				KASSERT(m->valid == VM_PAGE_BITS_ALL,
838567e51e1SAlan Cox 				    ("mincore: page %p is mapped but invalid",
839567e51e1SAlan Cox 				    m));
840567e51e1SAlan Cox 			} else if (mincoreinfo == 0) {
841567e51e1SAlan Cox 				/*
842567e51e1SAlan Cox 				 * The page is not mapped by this process.  If
843567e51e1SAlan Cox 				 * the object implements managed pages, then
844567e51e1SAlan Cox 				 * determine if the page is resident so that
845567e51e1SAlan Cox 				 * the mappings might be examined.
846567e51e1SAlan Cox 				 */
847567e51e1SAlan Cox 				if (current->object.vm_object != object) {
848567e51e1SAlan Cox 					if (object != NULL)
84989f6b863SAttilio Rao 						VM_OBJECT_WUNLOCK(object);
850567e51e1SAlan Cox 					object = current->object.vm_object;
85189f6b863SAttilio Rao 					VM_OBJECT_WLOCK(object);
852567e51e1SAlan Cox 				}
853567e51e1SAlan Cox 				if (object->type == OBJT_DEFAULT ||
854567e51e1SAlan Cox 				    object->type == OBJT_SWAP ||
855567e51e1SAlan Cox 				    object->type == OBJT_VNODE) {
856567e51e1SAlan Cox 					pindex = OFF_TO_IDX(current->offset +
857567e51e1SAlan Cox 					    (addr - current->start));
858567e51e1SAlan Cox 					m = vm_page_lookup(object, pindex);
859567e51e1SAlan Cox 					if (m != NULL && m->valid == 0)
860567e51e1SAlan Cox 						m = NULL;
861567e51e1SAlan Cox 					if (m != NULL)
862567e51e1SAlan Cox 						mincoreinfo = MINCORE_INCORE;
863567e51e1SAlan Cox 				}
864567e51e1SAlan Cox 			}
865567e51e1SAlan Cox 			if (m != NULL) {
866567e51e1SAlan Cox 				/* Examine other mappings to the page. */
867567e51e1SAlan Cox 				if (m->dirty == 0 && pmap_is_modified(m))
868567e51e1SAlan Cox 					vm_page_dirty(m);
869567e51e1SAlan Cox 				if (m->dirty != 0)
870867a482dSJohn Dyson 					mincoreinfo |= MINCORE_MODIFIED_OTHER;
871c46b90e9SAlan Cox 				/*
8723407fefeSKonstantin Belousov 				 * The first test for PGA_REFERENCED is an
873c46b90e9SAlan Cox 				 * optimization.  The second test is
874c46b90e9SAlan Cox 				 * required because a concurrent pmap
875c46b90e9SAlan Cox 				 * operation could clear the last reference
8763407fefeSKonstantin Belousov 				 * and set PGA_REFERENCED before the call to
877c46b90e9SAlan Cox 				 * pmap_is_referenced().
878c46b90e9SAlan Cox 				 */
8793407fefeSKonstantin Belousov 				if ((m->aflags & PGA_REFERENCED) != 0 ||
880c46b90e9SAlan Cox 				    pmap_is_referenced(m) ||
8813407fefeSKonstantin Belousov 				    (m->aflags & PGA_REFERENCED) != 0)
882867a482dSJohn Dyson 					mincoreinfo |= MINCORE_REFERENCED_OTHER;
8839b5a5d81SJohn Dyson 			}
884567e51e1SAlan Cox 			if (object != NULL)
88589f6b863SAttilio Rao 				VM_OBJECT_WUNLOCK(object);
886867a482dSJohn Dyson 
887867a482dSJohn Dyson 			/*
888dd2622a8SAlan Cox 			 * subyte may page fault.  In case it needs to modify
889dd2622a8SAlan Cox 			 * the map, we release the lock.
890dd2622a8SAlan Cox 			 */
891dd2622a8SAlan Cox 			vm_map_unlock_read(map);
892dd2622a8SAlan Cox 
893dd2622a8SAlan Cox 			/*
894867a482dSJohn Dyson 			 * calculate index into user supplied byte vector
895867a482dSJohn Dyson 			 */
896d1780e8dSKonstantin Belousov 			vecindex = atop(addr - first_addr);
897867a482dSJohn Dyson 
898867a482dSJohn Dyson 			/*
899867a482dSJohn Dyson 			 * If we have skipped map entries, we need to make sure that
900867a482dSJohn Dyson 			 * the byte vector is zeroed for those skipped entries.
901867a482dSJohn Dyson 			 */
902867a482dSJohn Dyson 			while ((lastvecindex + 1) < vecindex) {
9036a87d217SJohn Baldwin 				++lastvecindex;
904867a482dSJohn Dyson 				error = subyte(vec + lastvecindex, 0);
905867a482dSJohn Dyson 				if (error) {
906d2c60af8SMatthew Dillon 					error = EFAULT;
907d2c60af8SMatthew Dillon 					goto done2;
908867a482dSJohn Dyson 				}
909867a482dSJohn Dyson 			}
910867a482dSJohn Dyson 
911867a482dSJohn Dyson 			/*
912867a482dSJohn Dyson 			 * Pass the page information to the user
913867a482dSJohn Dyson 			 */
914867a482dSJohn Dyson 			error = subyte(vec + vecindex, mincoreinfo);
915867a482dSJohn Dyson 			if (error) {
916d2c60af8SMatthew Dillon 				error = EFAULT;
917d2c60af8SMatthew Dillon 				goto done2;
918867a482dSJohn Dyson 			}
919dd2622a8SAlan Cox 
920dd2622a8SAlan Cox 			/*
921dd2622a8SAlan Cox 			 * If the map has changed, due to the subyte, the previous
922dd2622a8SAlan Cox 			 * output may be invalid.
923dd2622a8SAlan Cox 			 */
924dd2622a8SAlan Cox 			vm_map_lock_read(map);
925dd2622a8SAlan Cox 			if (timestamp != map->timestamp)
926dd2622a8SAlan Cox 				goto RestartScan;
927dd2622a8SAlan Cox 
928867a482dSJohn Dyson 			lastvecindex = vecindex;
92902c04a2fSJohn Dyson 			addr += PAGE_SIZE;
93002c04a2fSJohn Dyson 		}
931867a482dSJohn Dyson 	}
932867a482dSJohn Dyson 
933867a482dSJohn Dyson 	/*
934dd2622a8SAlan Cox 	 * subyte may page fault.  In case it needs to modify
935dd2622a8SAlan Cox 	 * the map, we release the lock.
936dd2622a8SAlan Cox 	 */
937dd2622a8SAlan Cox 	vm_map_unlock_read(map);
938dd2622a8SAlan Cox 
939dd2622a8SAlan Cox 	/*
940867a482dSJohn Dyson 	 * Zero the last entries in the byte vector.
941867a482dSJohn Dyson 	 */
942d1780e8dSKonstantin Belousov 	vecindex = atop(end - first_addr);
943867a482dSJohn Dyson 	while ((lastvecindex + 1) < vecindex) {
9446a87d217SJohn Baldwin 		++lastvecindex;
945867a482dSJohn Dyson 		error = subyte(vec + lastvecindex, 0);
946867a482dSJohn Dyson 		if (error) {
947d2c60af8SMatthew Dillon 			error = EFAULT;
948d2c60af8SMatthew Dillon 			goto done2;
949867a482dSJohn Dyson 		}
950867a482dSJohn Dyson 	}
951867a482dSJohn Dyson 
952dd2622a8SAlan Cox 	/*
953dd2622a8SAlan Cox 	 * If the map has changed, due to the subyte, the previous
954dd2622a8SAlan Cox 	 * output may be invalid.
955dd2622a8SAlan Cox 	 */
956dd2622a8SAlan Cox 	vm_map_lock_read(map);
957dd2622a8SAlan Cox 	if (timestamp != map->timestamp)
958dd2622a8SAlan Cox 		goto RestartScan;
959eff50fcdSAlan Cox 	vm_map_unlock_read(map);
960d2c60af8SMatthew Dillon done2:
961d2c60af8SMatthew Dillon 	return (error);
962df8bae1dSRodney W. Grimes }
963df8bae1dSRodney W. Grimes 
964d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_
965df8bae1dSRodney W. Grimes struct mlock_args {
966651bb817SAlexander Langer 	const void *addr;
967df8bae1dSRodney W. Grimes 	size_t len;
968df8bae1dSRodney W. Grimes };
969d2d3e875SBruce Evans #endif
970df8bae1dSRodney W. Grimes int
97104e89ffbSKonstantin Belousov sys_mlock(struct thread *td, struct mlock_args *uap)
972df8bae1dSRodney W. Grimes {
973995d7069SGleb Smirnoff 
974496ab053SKonstantin Belousov 	return (kern_mlock(td->td_proc, td->td_ucred,
975496ab053SKonstantin Belousov 	    __DECONST(uintptr_t, uap->addr), uap->len));
976995d7069SGleb Smirnoff }
977995d7069SGleb Smirnoff 
978995d7069SGleb Smirnoff int
979496ab053SKonstantin Belousov kern_mlock(struct proc *proc, struct ucred *cred, uintptr_t addr0, size_t len)
980995d7069SGleb Smirnoff {
981bb734798SDon Lewis 	vm_offset_t addr, end, last, start;
982bb734798SDon Lewis 	vm_size_t npages, size;
9833ac7d297SAndrey Zonov 	vm_map_t map;
9841ba5ad42SEdward Tomasz Napierala 	unsigned long nsize;
985bb734798SDon Lewis 	int error;
986df8bae1dSRodney W. Grimes 
987995d7069SGleb Smirnoff 	error = priv_check_cred(cred, PRIV_VM_MLOCK, 0);
98847934cefSDon Lewis 	if (error)
98947934cefSDon Lewis 		return (error);
990496ab053SKonstantin Belousov 	addr = addr0;
991995d7069SGleb Smirnoff 	size = len;
992bb734798SDon Lewis 	last = addr + size;
99316929939SDon Lewis 	start = trunc_page(addr);
994bb734798SDon Lewis 	end = round_page(last);
995bb734798SDon Lewis 	if (last < addr || end < addr)
996df8bae1dSRodney W. Grimes 		return (EINVAL);
99716929939SDon Lewis 	npages = atop(end - start);
99816929939SDon Lewis 	if (npages > vm_page_max_wired)
99916929939SDon Lewis 		return (ENOMEM);
10003ac7d297SAndrey Zonov 	map = &proc->p_vmspace->vm_map;
100147934cefSDon Lewis 	PROC_LOCK(proc);
10023ac7d297SAndrey Zonov 	nsize = ptoa(npages + pmap_wired_count(map->pmap));
1003f6f6d240SMateusz Guzik 	if (nsize > lim_cur_proc(proc, RLIMIT_MEMLOCK)) {
100447934cefSDon Lewis 		PROC_UNLOCK(proc);
10054a40e3d4SJohn Dyson 		return (ENOMEM);
100691d5354aSJohn Baldwin 	}
100747934cefSDon Lewis 	PROC_UNLOCK(proc);
100844f1c916SBryan Drewery 	if (npages + vm_cnt.v_wire_count > vm_page_max_wired)
100916929939SDon Lewis 		return (EAGAIN);
1010afcc55f3SEdward Tomasz Napierala #ifdef RACCT
10114b5c9cf6SEdward Tomasz Napierala 	if (racct_enable) {
10121ba5ad42SEdward Tomasz Napierala 		PROC_LOCK(proc);
10131ba5ad42SEdward Tomasz Napierala 		error = racct_set(proc, RACCT_MEMLOCK, nsize);
10141ba5ad42SEdward Tomasz Napierala 		PROC_UNLOCK(proc);
10151ba5ad42SEdward Tomasz Napierala 		if (error != 0)
10161ba5ad42SEdward Tomasz Napierala 			return (ENOMEM);
10174b5c9cf6SEdward Tomasz Napierala 	}
1018afcc55f3SEdward Tomasz Napierala #endif
10193ac7d297SAndrey Zonov 	error = vm_map_wire(map, start, end,
102016929939SDon Lewis 	    VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES);
1021afcc55f3SEdward Tomasz Napierala #ifdef RACCT
10224b5c9cf6SEdward Tomasz Napierala 	if (racct_enable && error != KERN_SUCCESS) {
10231ba5ad42SEdward Tomasz Napierala 		PROC_LOCK(proc);
10241ba5ad42SEdward Tomasz Napierala 		racct_set(proc, RACCT_MEMLOCK,
10253ac7d297SAndrey Zonov 		    ptoa(pmap_wired_count(map->pmap)));
10261ba5ad42SEdward Tomasz Napierala 		PROC_UNLOCK(proc);
10271ba5ad42SEdward Tomasz Napierala 	}
1028afcc55f3SEdward Tomasz Napierala #endif
1029df8bae1dSRodney W. Grimes 	return (error == KERN_SUCCESS ? 0 : ENOMEM);
1030df8bae1dSRodney W. Grimes }
1031df8bae1dSRodney W. Grimes 
1032d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_
10334a40e3d4SJohn Dyson struct mlockall_args {
10344a40e3d4SJohn Dyson 	int	how;
10354a40e3d4SJohn Dyson };
10364a40e3d4SJohn Dyson #endif
10374a40e3d4SJohn Dyson 
10384a40e3d4SJohn Dyson int
103904e89ffbSKonstantin Belousov sys_mlockall(struct thread *td, struct mlockall_args *uap)
10404a40e3d4SJohn Dyson {
1041abd498aaSBruce M Simpson 	vm_map_t map;
1042abd498aaSBruce M Simpson 	int error;
1043abd498aaSBruce M Simpson 
1044abd498aaSBruce M Simpson 	map = &td->td_proc->p_vmspace->vm_map;
10457e19eda4SAndrey Zonov 	error = priv_check(td, PRIV_VM_MLOCK);
10467e19eda4SAndrey Zonov 	if (error)
10477e19eda4SAndrey Zonov 		return (error);
1048abd498aaSBruce M Simpson 
1049abd498aaSBruce M Simpson 	if ((uap->how == 0) || ((uap->how & ~(MCL_CURRENT|MCL_FUTURE)) != 0))
1050abd498aaSBruce M Simpson 		return (EINVAL);
1051abd498aaSBruce M Simpson 
1052abd498aaSBruce M Simpson 	/*
1053abd498aaSBruce M Simpson 	 * If wiring all pages in the process would cause it to exceed
1054abd498aaSBruce M Simpson 	 * a hard resource limit, return ENOMEM.
1055abd498aaSBruce M Simpson 	 */
10567e19eda4SAndrey Zonov 	if (!old_mlock && uap->how & MCL_CURRENT) {
105791d5354aSJohn Baldwin 		PROC_LOCK(td->td_proc);
1058f6f6d240SMateusz Guzik 		if (map->size > lim_cur(td, RLIMIT_MEMLOCK)) {
105991d5354aSJohn Baldwin 			PROC_UNLOCK(td->td_proc);
1060abd498aaSBruce M Simpson 			return (ENOMEM);
106191d5354aSJohn Baldwin 		}
106291d5354aSJohn Baldwin 		PROC_UNLOCK(td->td_proc);
10637e19eda4SAndrey Zonov 	}
1064afcc55f3SEdward Tomasz Napierala #ifdef RACCT
10654b5c9cf6SEdward Tomasz Napierala 	if (racct_enable) {
10661ba5ad42SEdward Tomasz Napierala 		PROC_LOCK(td->td_proc);
10671ba5ad42SEdward Tomasz Napierala 		error = racct_set(td->td_proc, RACCT_MEMLOCK, map->size);
10681ba5ad42SEdward Tomasz Napierala 		PROC_UNLOCK(td->td_proc);
10691ba5ad42SEdward Tomasz Napierala 		if (error != 0)
10701ba5ad42SEdward Tomasz Napierala 			return (ENOMEM);
10714b5c9cf6SEdward Tomasz Napierala 	}
1072afcc55f3SEdward Tomasz Napierala #endif
1073abd498aaSBruce M Simpson 
1074abd498aaSBruce M Simpson 	if (uap->how & MCL_FUTURE) {
1075abd498aaSBruce M Simpson 		vm_map_lock(map);
1076abd498aaSBruce M Simpson 		vm_map_modflags(map, MAP_WIREFUTURE, 0);
1077abd498aaSBruce M Simpson 		vm_map_unlock(map);
1078abd498aaSBruce M Simpson 		error = 0;
1079abd498aaSBruce M Simpson 	}
1080abd498aaSBruce M Simpson 
1081abd498aaSBruce M Simpson 	if (uap->how & MCL_CURRENT) {
1082abd498aaSBruce M Simpson 		/*
1083abd498aaSBruce M Simpson 		 * P1003.1-2001 mandates that all currently mapped pages
1084abd498aaSBruce M Simpson 		 * will be memory resident and locked (wired) upon return
1085abd498aaSBruce M Simpson 		 * from mlockall(). vm_map_wire() will wire pages, by
1086abd498aaSBruce M Simpson 		 * calling vm_fault_wire() for each page in the region.
1087abd498aaSBruce M Simpson 		 */
1088abd498aaSBruce M Simpson 		error = vm_map_wire(map, vm_map_min(map), vm_map_max(map),
1089abd498aaSBruce M Simpson 		    VM_MAP_WIRE_USER|VM_MAP_WIRE_HOLESOK);
1090abd498aaSBruce M Simpson 		error = (error == KERN_SUCCESS ? 0 : EAGAIN);
1091abd498aaSBruce M Simpson 	}
1092afcc55f3SEdward Tomasz Napierala #ifdef RACCT
10934b5c9cf6SEdward Tomasz Napierala 	if (racct_enable && error != KERN_SUCCESS) {
10941ba5ad42SEdward Tomasz Napierala 		PROC_LOCK(td->td_proc);
10951ba5ad42SEdward Tomasz Napierala 		racct_set(td->td_proc, RACCT_MEMLOCK,
10963ac7d297SAndrey Zonov 		    ptoa(pmap_wired_count(map->pmap)));
10971ba5ad42SEdward Tomasz Napierala 		PROC_UNLOCK(td->td_proc);
10981ba5ad42SEdward Tomasz Napierala 	}
1099afcc55f3SEdward Tomasz Napierala #endif
1100abd498aaSBruce M Simpson 
1101abd498aaSBruce M Simpson 	return (error);
11024a40e3d4SJohn Dyson }
11034a40e3d4SJohn Dyson 
11044a40e3d4SJohn Dyson #ifndef _SYS_SYSPROTO_H_
1105fa721254SAlfred Perlstein struct munlockall_args {
1106abd498aaSBruce M Simpson 	register_t dummy;
11074a40e3d4SJohn Dyson };
11084a40e3d4SJohn Dyson #endif
11094a40e3d4SJohn Dyson 
11104a40e3d4SJohn Dyson int
111104e89ffbSKonstantin Belousov sys_munlockall(struct thread *td, struct munlockall_args *uap)
11124a40e3d4SJohn Dyson {
1113abd498aaSBruce M Simpson 	vm_map_t map;
1114abd498aaSBruce M Simpson 	int error;
1115abd498aaSBruce M Simpson 
1116abd498aaSBruce M Simpson 	map = &td->td_proc->p_vmspace->vm_map;
1117acd3428bSRobert Watson 	error = priv_check(td, PRIV_VM_MUNLOCK);
1118abd498aaSBruce M Simpson 	if (error)
1119abd498aaSBruce M Simpson 		return (error);
1120abd498aaSBruce M Simpson 
1121abd498aaSBruce M Simpson 	/* Clear the MAP_WIREFUTURE flag from this vm_map. */
1122abd498aaSBruce M Simpson 	vm_map_lock(map);
1123abd498aaSBruce M Simpson 	vm_map_modflags(map, 0, MAP_WIREFUTURE);
1124abd498aaSBruce M Simpson 	vm_map_unlock(map);
1125abd498aaSBruce M Simpson 
1126abd498aaSBruce M Simpson 	/* Forcibly unwire all pages. */
1127abd498aaSBruce M Simpson 	error = vm_map_unwire(map, vm_map_min(map), vm_map_max(map),
1128abd498aaSBruce M Simpson 	    VM_MAP_WIRE_USER|VM_MAP_WIRE_HOLESOK);
1129afcc55f3SEdward Tomasz Napierala #ifdef RACCT
11304b5c9cf6SEdward Tomasz Napierala 	if (racct_enable && error == KERN_SUCCESS) {
11311ba5ad42SEdward Tomasz Napierala 		PROC_LOCK(td->td_proc);
11321ba5ad42SEdward Tomasz Napierala 		racct_set(td->td_proc, RACCT_MEMLOCK, 0);
11331ba5ad42SEdward Tomasz Napierala 		PROC_UNLOCK(td->td_proc);
11341ba5ad42SEdward Tomasz Napierala 	}
1135afcc55f3SEdward Tomasz Napierala #endif
1136abd498aaSBruce M Simpson 
1137abd498aaSBruce M Simpson 	return (error);
11384a40e3d4SJohn Dyson }
11394a40e3d4SJohn Dyson 
11404a40e3d4SJohn Dyson #ifndef _SYS_SYSPROTO_H_
1141df8bae1dSRodney W. Grimes struct munlock_args {
1142651bb817SAlexander Langer 	const void *addr;
1143df8bae1dSRodney W. Grimes 	size_t len;
1144df8bae1dSRodney W. Grimes };
1145d2d3e875SBruce Evans #endif
1146df8bae1dSRodney W. Grimes int
114769cdfcefSEdward Tomasz Napierala sys_munlock(struct thread *td, struct munlock_args *uap)
1148df8bae1dSRodney W. Grimes {
114969cdfcefSEdward Tomasz Napierala 
1150496ab053SKonstantin Belousov 	return (kern_munlock(td, (uintptr_t)uap->addr, uap->len));
115169cdfcefSEdward Tomasz Napierala }
115269cdfcefSEdward Tomasz Napierala 
115369cdfcefSEdward Tomasz Napierala int
1154496ab053SKonstantin Belousov kern_munlock(struct thread *td, uintptr_t addr0, size_t size)
115569cdfcefSEdward Tomasz Napierala {
1156496ab053SKonstantin Belousov 	vm_offset_t addr, end, last, start;
1157fc2b1679SJeremie Le Hen #ifdef RACCT
1158c92b5069SJeremie Le Hen 	vm_map_t map;
1159fc2b1679SJeremie Le Hen #endif
1160df8bae1dSRodney W. Grimes 	int error;
1161df8bae1dSRodney W. Grimes 
1162acd3428bSRobert Watson 	error = priv_check(td, PRIV_VM_MUNLOCK);
116347934cefSDon Lewis 	if (error)
116447934cefSDon Lewis 		return (error);
1165496ab053SKonstantin Belousov 	addr = addr0;
1166bb734798SDon Lewis 	last = addr + size;
116716929939SDon Lewis 	start = trunc_page(addr);
1168bb734798SDon Lewis 	end = round_page(last);
1169bb734798SDon Lewis 	if (last < addr || end < addr)
1170df8bae1dSRodney W. Grimes 		return (EINVAL);
117116929939SDon Lewis 	error = vm_map_unwire(&td->td_proc->p_vmspace->vm_map, start, end,
117216929939SDon Lewis 	    VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES);
1173afcc55f3SEdward Tomasz Napierala #ifdef RACCT
11744b5c9cf6SEdward Tomasz Napierala 	if (racct_enable && error == KERN_SUCCESS) {
11751ba5ad42SEdward Tomasz Napierala 		PROC_LOCK(td->td_proc);
1176c92b5069SJeremie Le Hen 		map = &td->td_proc->p_vmspace->vm_map;
1177c92b5069SJeremie Le Hen 		racct_set(td->td_proc, RACCT_MEMLOCK,
1178c92b5069SJeremie Le Hen 		    ptoa(pmap_wired_count(map->pmap)));
11791ba5ad42SEdward Tomasz Napierala 		PROC_UNLOCK(td->td_proc);
11801ba5ad42SEdward Tomasz Napierala 	}
1181afcc55f3SEdward Tomasz Napierala #endif
1182df8bae1dSRodney W. Grimes 	return (error == KERN_SUCCESS ? 0 : ENOMEM);
1183df8bae1dSRodney W. Grimes }
1184df8bae1dSRodney W. Grimes 
1185df8bae1dSRodney W. Grimes /*
1186c8daea13SAlexander Kabaev  * vm_mmap_vnode()
1187c8daea13SAlexander Kabaev  *
1188c8daea13SAlexander Kabaev  * Helper function for vm_mmap.  Perform sanity check specific for mmap
1189c8daea13SAlexander Kabaev  * operations on vnodes.
1190c8daea13SAlexander Kabaev  */
1191c8daea13SAlexander Kabaev int
1192c8daea13SAlexander Kabaev vm_mmap_vnode(struct thread *td, vm_size_t objsize,
1193c8daea13SAlexander Kabaev     vm_prot_t prot, vm_prot_t *maxprotp, int *flagsp,
119484110e7eSKonstantin Belousov     struct vnode *vp, vm_ooffset_t *foffp, vm_object_t *objp,
119584110e7eSKonstantin Belousov     boolean_t *writecounted)
1196c8daea13SAlexander Kabaev {
1197c8daea13SAlexander Kabaev 	struct vattr va;
1198c8daea13SAlexander Kabaev 	vm_object_t obj;
119964345f0bSJohn Baldwin 	vm_offset_t foff;
12000359a12eSAttilio Rao 	struct ucred *cred;
12015050aa86SKonstantin Belousov 	int error, flags, locktype;
1202c8daea13SAlexander Kabaev 
12030359a12eSAttilio Rao 	cred = td->td_ucred;
120484110e7eSKonstantin Belousov 	if ((*maxprotp & VM_PROT_WRITE) && (*flagsp & MAP_SHARED))
120584110e7eSKonstantin Belousov 		locktype = LK_EXCLUSIVE;
120684110e7eSKonstantin Belousov 	else
120784110e7eSKonstantin Belousov 		locktype = LK_SHARED;
12085050aa86SKonstantin Belousov 	if ((error = vget(vp, locktype, td)) != 0)
1209c8daea13SAlexander Kabaev 		return (error);
12100df42647SRobert Watson 	AUDIT_ARG_VNODE1(vp);
121164345f0bSJohn Baldwin 	foff = *foffp;
1212c8daea13SAlexander Kabaev 	flags = *flagsp;
12138516dd18SPoul-Henning Kamp 	obj = vp->v_object;
1214c8daea13SAlexander Kabaev 	if (vp->v_type == VREG) {
1215c8daea13SAlexander Kabaev 		/*
1216c8daea13SAlexander Kabaev 		 * Get the proper underlying object
1217c8daea13SAlexander Kabaev 		 */
12188516dd18SPoul-Henning Kamp 		if (obj == NULL) {
1219c8daea13SAlexander Kabaev 			error = EINVAL;
1220c8daea13SAlexander Kabaev 			goto done;
1221c8daea13SAlexander Kabaev 		}
1222e5f299ffSKonstantin Belousov 		if (obj->type == OBJT_VNODE && obj->handle != vp) {
1223c8daea13SAlexander Kabaev 			vput(vp);
1224c8daea13SAlexander Kabaev 			vp = (struct vnode *)obj->handle;
122584110e7eSKonstantin Belousov 			/*
122684110e7eSKonstantin Belousov 			 * Bypass filesystems obey the mpsafety of the
122753f5f8a0SKonstantin Belousov 			 * underlying fs.  Tmpfs never bypasses.
122884110e7eSKonstantin Belousov 			 */
122984110e7eSKonstantin Belousov 			error = vget(vp, locktype, td);
12305050aa86SKonstantin Belousov 			if (error != 0)
123184110e7eSKonstantin Belousov 				return (error);
123284110e7eSKonstantin Belousov 		}
123384110e7eSKonstantin Belousov 		if (locktype == LK_EXCLUSIVE) {
123484110e7eSKonstantin Belousov 			*writecounted = TRUE;
123584110e7eSKonstantin Belousov 			vnode_pager_update_writecount(obj, 0, objsize);
123684110e7eSKonstantin Belousov 		}
1237c8daea13SAlexander Kabaev 	} else {
1238c8daea13SAlexander Kabaev 		error = EINVAL;
1239c8daea13SAlexander Kabaev 		goto done;
1240c8daea13SAlexander Kabaev 	}
12410359a12eSAttilio Rao 	if ((error = VOP_GETATTR(vp, &va, cred)))
1242c8daea13SAlexander Kabaev 		goto done;
1243c92163dcSChristian S.J. Peron #ifdef MAC
12447077c426SJohn Baldwin 	/* This relies on VM_PROT_* matching PROT_*. */
12457077c426SJohn Baldwin 	error = mac_vnode_check_mmap(cred, vp, (int)prot, flags);
1246c92163dcSChristian S.J. Peron 	if (error != 0)
1247c92163dcSChristian S.J. Peron 		goto done;
1248c92163dcSChristian S.J. Peron #endif
1249c8daea13SAlexander Kabaev 	if ((flags & MAP_SHARED) != 0) {
1250c8daea13SAlexander Kabaev 		if ((va.va_flags & (SF_SNAPSHOT|IMMUTABLE|APPEND)) != 0) {
12517077c426SJohn Baldwin 			if (prot & VM_PROT_WRITE) {
1252c8daea13SAlexander Kabaev 				error = EPERM;
1253c8daea13SAlexander Kabaev 				goto done;
1254c8daea13SAlexander Kabaev 			}
1255c8daea13SAlexander Kabaev 			*maxprotp &= ~VM_PROT_WRITE;
1256c8daea13SAlexander Kabaev 		}
1257c8daea13SAlexander Kabaev 	}
1258c8daea13SAlexander Kabaev 	/*
1259c8daea13SAlexander Kabaev 	 * If it is a regular file without any references
1260c8daea13SAlexander Kabaev 	 * we do not need to sync it.
1261c8daea13SAlexander Kabaev 	 * Adjust object size to be the size of actual file.
1262c8daea13SAlexander Kabaev 	 */
1263c8daea13SAlexander Kabaev 	objsize = round_page(va.va_size);
1264c8daea13SAlexander Kabaev 	if (va.va_nlink == 0)
1265c8daea13SAlexander Kabaev 		flags |= MAP_NOSYNC;
12663d653db0SAlan Cox 	if (obj->type == OBJT_VNODE) {
1267e5f299ffSKonstantin Belousov 		obj = vm_pager_allocate(OBJT_VNODE, vp, objsize, prot, foff,
1268e5f299ffSKonstantin Belousov 		    cred);
1269c8daea13SAlexander Kabaev 		if (obj == NULL) {
127064345f0bSJohn Baldwin 			error = ENOMEM;
1271c8daea13SAlexander Kabaev 			goto done;
1272c8daea13SAlexander Kabaev 		}
12733d653db0SAlan Cox 	} else {
12743d653db0SAlan Cox 		KASSERT(obj->type == OBJT_DEFAULT || obj->type == OBJT_SWAP,
12753d653db0SAlan Cox 		    ("wrong object type"));
12763d653db0SAlan Cox 		VM_OBJECT_WLOCK(obj);
12773d653db0SAlan Cox 		vm_object_reference_locked(obj);
12783d653db0SAlan Cox #if VM_NRESERVLEVEL > 0
12793d653db0SAlan Cox 		vm_object_color(obj, 0);
12803d653db0SAlan Cox #endif
12813d653db0SAlan Cox 		VM_OBJECT_WUNLOCK(obj);
12823d653db0SAlan Cox 	}
1283c8daea13SAlexander Kabaev 	*objp = obj;
1284c8daea13SAlexander Kabaev 	*flagsp = flags;
128564345f0bSJohn Baldwin 
12860359a12eSAttilio Rao 	vfs_mark_atime(vp, cred);
12871e309003SDiomidis Spinellis 
1288c8daea13SAlexander Kabaev done:
1289bafa6cfcSKonstantin Belousov 	if (error != 0 && *writecounted) {
1290bafa6cfcSKonstantin Belousov 		*writecounted = FALSE;
1291bafa6cfcSKonstantin Belousov 		vnode_pager_update_writecount(obj, objsize, 0);
1292bafa6cfcSKonstantin Belousov 	}
1293c8daea13SAlexander Kabaev 	vput(vp);
1294c8daea13SAlexander Kabaev 	return (error);
1295c8daea13SAlexander Kabaev }
1296c8daea13SAlexander Kabaev 
1297c8daea13SAlexander Kabaev /*
129898df9218SJohn Baldwin  * vm_mmap_cdev()
129998df9218SJohn Baldwin  *
130098df9218SJohn Baldwin  * Helper function for vm_mmap.  Perform sanity check specific for mmap
130198df9218SJohn Baldwin  * operations on cdevs.
130298df9218SJohn Baldwin  */
130398df9218SJohn Baldwin int
13047077c426SJohn Baldwin vm_mmap_cdev(struct thread *td, vm_size_t objsize, vm_prot_t prot,
13057077c426SJohn Baldwin     vm_prot_t *maxprotp, int *flagsp, struct cdev *cdev, struct cdevsw *dsw,
13067077c426SJohn Baldwin     vm_ooffset_t *foff, vm_object_t *objp)
130798df9218SJohn Baldwin {
130898df9218SJohn Baldwin 	vm_object_t obj;
13097077c426SJohn Baldwin 	int error, flags;
131098df9218SJohn Baldwin 
131198df9218SJohn Baldwin 	flags = *flagsp;
131298df9218SJohn Baldwin 
131391a35e78SKonstantin Belousov 	if (dsw->d_flags & D_MMAP_ANON) {
13147077c426SJohn Baldwin 		*objp = NULL;
13157077c426SJohn Baldwin 		*foff = 0;
131698df9218SJohn Baldwin 		*maxprotp = VM_PROT_ALL;
131798df9218SJohn Baldwin 		*flagsp |= MAP_ANON;
131898df9218SJohn Baldwin 		return (0);
131998df9218SJohn Baldwin 	}
132098df9218SJohn Baldwin 	/*
132164345f0bSJohn Baldwin 	 * cdevs do not provide private mappings of any kind.
132298df9218SJohn Baldwin 	 */
132398df9218SJohn Baldwin 	if ((*maxprotp & VM_PROT_WRITE) == 0 &&
13247077c426SJohn Baldwin 	    (prot & VM_PROT_WRITE) != 0)
132598df9218SJohn Baldwin 		return (EACCES);
13267077c426SJohn Baldwin 	if (flags & (MAP_PRIVATE|MAP_COPY))
132798df9218SJohn Baldwin 		return (EINVAL);
132898df9218SJohn Baldwin 	/*
132998df9218SJohn Baldwin 	 * Force device mappings to be shared.
133098df9218SJohn Baldwin 	 */
133198df9218SJohn Baldwin 	flags |= MAP_SHARED;
133298df9218SJohn Baldwin #ifdef MAC_XXX
13337077c426SJohn Baldwin 	error = mac_cdev_check_mmap(td->td_ucred, cdev, (int)prot);
13347077c426SJohn Baldwin 	if (error != 0)
133598df9218SJohn Baldwin 		return (error);
133698df9218SJohn Baldwin #endif
133764345f0bSJohn Baldwin 	/*
133864345f0bSJohn Baldwin 	 * First, try d_mmap_single().  If that is not implemented
133964345f0bSJohn Baldwin 	 * (returns ENODEV), fall back to using the device pager.
134064345f0bSJohn Baldwin 	 * Note that d_mmap_single() must return a reference to the
134164345f0bSJohn Baldwin 	 * object (it needs to bump the reference count of the object
134264345f0bSJohn Baldwin 	 * it returns somehow).
134364345f0bSJohn Baldwin 	 *
134464345f0bSJohn Baldwin 	 * XXX assumes VM_PROT_* == PROT_*
134564345f0bSJohn Baldwin 	 */
134664345f0bSJohn Baldwin 	error = dsw->d_mmap_single(cdev, foff, objsize, objp, (int)prot);
134764345f0bSJohn Baldwin 	if (error != ENODEV)
134864345f0bSJohn Baldwin 		return (error);
13493364c323SKonstantin Belousov 	obj = vm_pager_allocate(OBJT_DEVICE, cdev, objsize, prot, *foff,
13503364c323SKonstantin Belousov 	    td->td_ucred);
135198df9218SJohn Baldwin 	if (obj == NULL)
135298df9218SJohn Baldwin 		return (EINVAL);
135398df9218SJohn Baldwin 	*objp = obj;
135498df9218SJohn Baldwin 	*flagsp = flags;
135598df9218SJohn Baldwin 	return (0);
135698df9218SJohn Baldwin }
135798df9218SJohn Baldwin 
135898df9218SJohn Baldwin /*
1359d2c60af8SMatthew Dillon  * vm_mmap()
1360d2c60af8SMatthew Dillon  *
13617077c426SJohn Baldwin  * Internal version of mmap used by exec, sys5 shared memory, and
13627077c426SJohn Baldwin  * various device drivers.  Handle is either a vnode pointer, a
13637077c426SJohn Baldwin  * character device, or NULL for MAP_ANON.
1364df8bae1dSRodney W. Grimes  */
1365df8bae1dSRodney W. Grimes int
1366b9dcd593SBruce Evans vm_mmap(vm_map_t map, vm_offset_t *addr, vm_size_t size, vm_prot_t prot,
1367b9dcd593SBruce Evans 	vm_prot_t maxprot, int flags,
136898df9218SJohn Baldwin 	objtype_t handle_type, void *handle,
1369b9dcd593SBruce Evans 	vm_ooffset_t foff)
1370df8bae1dSRodney W. Grimes {
13717077c426SJohn Baldwin 	vm_object_t object;
1372b40ce416SJulian Elischer 	struct thread *td = curthread;
13737077c426SJohn Baldwin 	int error;
137484110e7eSKonstantin Belousov 	boolean_t writecounted;
1375df8bae1dSRodney W. Grimes 
1376df8bae1dSRodney W. Grimes 	if (size == 0)
13777077c426SJohn Baldwin 		return (EINVAL);
1378df8bae1dSRodney W. Grimes 
1379749474f2SPeter Wemm 	size = round_page(size);
1380010ba384SMark Johnston 	object = NULL;
13817077c426SJohn Baldwin 	writecounted = FALSE;
13827077c426SJohn Baldwin 
13837077c426SJohn Baldwin 	/*
13847077c426SJohn Baldwin 	 * Lookup/allocate object.
13857077c426SJohn Baldwin 	 */
13867077c426SJohn Baldwin 	switch (handle_type) {
13877077c426SJohn Baldwin 	case OBJT_DEVICE: {
13887077c426SJohn Baldwin 		struct cdevsw *dsw;
13897077c426SJohn Baldwin 		struct cdev *cdev;
13907077c426SJohn Baldwin 		int ref;
13917077c426SJohn Baldwin 
13927077c426SJohn Baldwin 		cdev = handle;
13937077c426SJohn Baldwin 		dsw = dev_refthread(cdev, &ref);
13947077c426SJohn Baldwin 		if (dsw == NULL)
13957077c426SJohn Baldwin 			return (ENXIO);
13967077c426SJohn Baldwin 		error = vm_mmap_cdev(td, size, prot, &maxprot, &flags, cdev,
13977077c426SJohn Baldwin 		    dsw, &foff, &object);
13987077c426SJohn Baldwin 		dev_relthread(cdev, ref);
13997077c426SJohn Baldwin 		break;
14007077c426SJohn Baldwin 	}
14017077c426SJohn Baldwin 	case OBJT_VNODE:
14027077c426SJohn Baldwin 		error = vm_mmap_vnode(td, size, prot, &maxprot, &flags,
14037077c426SJohn Baldwin 		    handle, &foff, &object, &writecounted);
14047077c426SJohn Baldwin 		break;
14057077c426SJohn Baldwin 	case OBJT_DEFAULT:
14067077c426SJohn Baldwin 		if (handle == NULL) {
14077077c426SJohn Baldwin 			error = 0;
14087077c426SJohn Baldwin 			break;
14097077c426SJohn Baldwin 		}
14107077c426SJohn Baldwin 		/* FALLTHROUGH */
14117077c426SJohn Baldwin 	default:
14127077c426SJohn Baldwin 		error = EINVAL;
14137077c426SJohn Baldwin 		break;
14147077c426SJohn Baldwin 	}
14157077c426SJohn Baldwin 	if (error)
14167077c426SJohn Baldwin 		return (error);
14177077c426SJohn Baldwin 
14187077c426SJohn Baldwin 	error = vm_mmap_object(map, addr, size, prot, maxprot, flags, object,
14197077c426SJohn Baldwin 	    foff, writecounted, td);
14207077c426SJohn Baldwin 	if (error != 0 && object != NULL) {
14217077c426SJohn Baldwin 		/*
14227077c426SJohn Baldwin 		 * If this mapping was accounted for in the vnode's
14237077c426SJohn Baldwin 		 * writecount, then undo that now.
14247077c426SJohn Baldwin 		 */
14257077c426SJohn Baldwin 		if (writecounted)
14267077c426SJohn Baldwin 			vnode_pager_release_writecount(object, 0, size);
14277077c426SJohn Baldwin 		vm_object_deallocate(object);
14287077c426SJohn Baldwin 	}
14297077c426SJohn Baldwin 	return (error);
14307077c426SJohn Baldwin }
14317077c426SJohn Baldwin 
14327077c426SJohn Baldwin /*
14337077c426SJohn Baldwin  * Internal version of mmap that maps a specific VM object into an
14347077c426SJohn Baldwin  * map.  Called by mmap for MAP_ANON, vm_mmap, shm_mmap, and vn_mmap.
14357077c426SJohn Baldwin  */
14367077c426SJohn Baldwin int
14377077c426SJohn Baldwin vm_mmap_object(vm_map_t map, vm_offset_t *addr, vm_size_t size, vm_prot_t prot,
14387077c426SJohn Baldwin     vm_prot_t maxprot, int flags, vm_object_t object, vm_ooffset_t foff,
14397077c426SJohn Baldwin     boolean_t writecounted, struct thread *td)
14407077c426SJohn Baldwin {
14417077c426SJohn Baldwin 	boolean_t fitit;
14427077c426SJohn Baldwin 	int docow, error, findspace, rv;
1443df8bae1dSRodney W. Grimes 
1444a6492969SAlan Cox 	if (map == &td->td_proc->p_vmspace->vm_map) {
144591d5354aSJohn Baldwin 		PROC_LOCK(td->td_proc);
1446f6f6d240SMateusz Guzik 		if (map->size + size > lim_cur_proc(td->td_proc, RLIMIT_VMEM)) {
144791d5354aSJohn Baldwin 			PROC_UNLOCK(td->td_proc);
1448070f64feSMatthew Dillon 			return (ENOMEM);
1449070f64feSMatthew Dillon 		}
1450a6492969SAlan Cox 		if (racct_set(td->td_proc, RACCT_VMEM, map->size + size)) {
14511ba5ad42SEdward Tomasz Napierala 			PROC_UNLOCK(td->td_proc);
14521ba5ad42SEdward Tomasz Napierala 			return (ENOMEM);
14531ba5ad42SEdward Tomasz Napierala 		}
14547e19eda4SAndrey Zonov 		if (!old_mlock && map->flags & MAP_WIREFUTURE) {
14553ac7d297SAndrey Zonov 			if (ptoa(pmap_wired_count(map->pmap)) + size >
1456f6f6d240SMateusz Guzik 			    lim_cur_proc(td->td_proc, RLIMIT_MEMLOCK)) {
14577e19eda4SAndrey Zonov 				racct_set_force(td->td_proc, RACCT_VMEM,
14587e19eda4SAndrey Zonov 				    map->size);
14597e19eda4SAndrey Zonov 				PROC_UNLOCK(td->td_proc);
14607e19eda4SAndrey Zonov 				return (ENOMEM);
14617e19eda4SAndrey Zonov 			}
14627e19eda4SAndrey Zonov 			error = racct_set(td->td_proc, RACCT_MEMLOCK,
14633ac7d297SAndrey Zonov 			    ptoa(pmap_wired_count(map->pmap)) + size);
14647e19eda4SAndrey Zonov 			if (error != 0) {
14657e19eda4SAndrey Zonov 				racct_set_force(td->td_proc, RACCT_VMEM,
14667e19eda4SAndrey Zonov 				    map->size);
14677e19eda4SAndrey Zonov 				PROC_UNLOCK(td->td_proc);
14687e19eda4SAndrey Zonov 				return (error);
14697e19eda4SAndrey Zonov 			}
14707e19eda4SAndrey Zonov 		}
147191d5354aSJohn Baldwin 		PROC_UNLOCK(td->td_proc);
1472a6492969SAlan Cox 	}
1473070f64feSMatthew Dillon 
1474df8bae1dSRodney W. Grimes 	/*
1475bc9ad247SDavid Greenman 	 * We currently can only deal with page aligned file offsets.
14767077c426SJohn Baldwin 	 * The mmap() system call already enforces this by subtracting
14777077c426SJohn Baldwin 	 * the page offset from the file offset, but checking here
14787077c426SJohn Baldwin 	 * catches errors in device drivers (e.g. d_single_mmap()
14797077c426SJohn Baldwin 	 * callbacks) and other internal mapping requests (such as in
14807077c426SJohn Baldwin 	 * exec).
1481bc9ad247SDavid Greenman 	 */
1482bc9ad247SDavid Greenman 	if (foff & PAGE_MASK)
1483bc9ad247SDavid Greenman 		return (EINVAL);
1484bc9ad247SDavid Greenman 
148506cb7259SDavid Greenman 	if ((flags & MAP_FIXED) == 0) {
148606cb7259SDavid Greenman 		fitit = TRUE;
148706cb7259SDavid Greenman 		*addr = round_page(*addr);
148806cb7259SDavid Greenman 	} else {
148906cb7259SDavid Greenman 		if (*addr != trunc_page(*addr))
149006cb7259SDavid Greenman 			return (EINVAL);
149106cb7259SDavid Greenman 		fitit = FALSE;
149206cb7259SDavid Greenman 	}
149384110e7eSKonstantin Belousov 
14945f55e841SDavid Greenman 	if (flags & MAP_ANON) {
14957077c426SJohn Baldwin 		if (object != NULL || foff != 0)
14967077c426SJohn Baldwin 			return (EINVAL);
1497c8daea13SAlexander Kabaev 		docow = 0;
149874ffb9afSAlan Cox 	} else if (flags & MAP_PREFAULT_READ)
149974ffb9afSAlan Cox 		docow = MAP_PREFAULT;
150074ffb9afSAlan Cox 	else
15014738fa09SAlan Cox 		docow = MAP_PREFAULT_PARTIAL;
1502df8bae1dSRodney W. Grimes 
15034f79d873SMatthew Dillon 	if ((flags & (MAP_ANON|MAP_SHARED)) == 0)
15044738fa09SAlan Cox 		docow |= MAP_COPY_ON_WRITE;
15054f79d873SMatthew Dillon 	if (flags & MAP_NOSYNC)
15064f79d873SMatthew Dillon 		docow |= MAP_DISABLE_SYNCER;
15079730a5daSPaul Saab 	if (flags & MAP_NOCORE)
15089730a5daSPaul Saab 		docow |= MAP_DISABLE_COREDUMP;
15098211bd45SKonstantin Belousov 	/* Shared memory is also shared with children. */
15108211bd45SKonstantin Belousov 	if (flags & MAP_SHARED)
15118211bd45SKonstantin Belousov 		docow |= MAP_INHERIT_SHARE;
151284110e7eSKonstantin Belousov 	if (writecounted)
151384110e7eSKonstantin Belousov 		docow |= MAP_VN_WRITECOUNT;
15144648ba0aSKonstantin Belousov 	if (flags & MAP_STACK) {
15154648ba0aSKonstantin Belousov 		if (object != NULL)
15164648ba0aSKonstantin Belousov 			return (EINVAL);
15174648ba0aSKonstantin Belousov 		docow |= MAP_STACK_GROWS_DOWN;
15184648ba0aSKonstantin Belousov 	}
151911c42bccSKonstantin Belousov 	if ((flags & MAP_EXCL) != 0)
152011c42bccSKonstantin Belousov 		docow |= MAP_CHECK_EXCL;
1521*19bd0d9cSKonstantin Belousov 	if ((flags & MAP_GUARD) != 0)
1522*19bd0d9cSKonstantin Belousov 		docow |= MAP_CREATE_GUARD;
15235850152dSJohn Dyson 
15244648ba0aSKonstantin Belousov 	if (fitit) {
15255aa60b6fSJohn Baldwin 		if ((flags & MAP_ALIGNMENT_MASK) == MAP_ALIGNED_SUPER)
15265aa60b6fSJohn Baldwin 			findspace = VMFS_SUPER_SPACE;
15275aa60b6fSJohn Baldwin 		else if ((flags & MAP_ALIGNMENT_MASK) != 0)
15285aa60b6fSJohn Baldwin 			findspace = VMFS_ALIGNED_SPACE(flags >>
15295aa60b6fSJohn Baldwin 			    MAP_ALIGNMENT_SHIFT);
15302267af78SJulian Elischer 		else
15315aa60b6fSJohn Baldwin 			findspace = VMFS_OPTIMAL_SPACE;
1532edb572a3SJohn Baldwin 		rv = vm_map_find(map, object, foff, addr, size,
1533edb572a3SJohn Baldwin #ifdef MAP_32BIT
1534edb572a3SJohn Baldwin 		    flags & MAP_32BIT ? MAP_32BIT_MAX_ADDR :
1535edb572a3SJohn Baldwin #endif
1536edb572a3SJohn Baldwin 		    0, findspace, prot, maxprot, docow);
15374648ba0aSKonstantin Belousov 	} else {
1538b8ca4ef2SAlan Cox 		rv = vm_map_fixed(map, object, foff, *addr, size,
1539bd7e5f99SJohn Dyson 		    prot, maxprot, docow);
15404648ba0aSKonstantin Belousov 	}
1541bd7e5f99SJohn Dyson 
1542f9230ad6SAlan Cox 	if (rv == KERN_SUCCESS) {
15437fb0c17eSDavid Greenman 		/*
1544f9230ad6SAlan Cox 		 * If the process has requested that all future mappings
1545f9230ad6SAlan Cox 		 * be wired, then heed this.
1546f9230ad6SAlan Cox 		 */
15471472f4f4SKonstantin Belousov 		if (map->flags & MAP_WIREFUTURE) {
1548f9230ad6SAlan Cox 			vm_map_wire(map, *addr, *addr + size,
15491472f4f4SKonstantin Belousov 			    VM_MAP_WIRE_USER | ((flags & MAP_STACK) ?
15501472f4f4SKonstantin Belousov 			    VM_MAP_WIRE_HOLESOK : VM_MAP_WIRE_NOHOLES));
15511472f4f4SKonstantin Belousov 		}
1552df8bae1dSRodney W. Grimes 	}
15532e32165cSKonstantin Belousov 	return (vm_mmap_to_errno(rv));
15542e32165cSKonstantin Belousov }
15552e32165cSKonstantin Belousov 
1556f9230ad6SAlan Cox /*
1557f9230ad6SAlan Cox  * Translate a Mach VM return code to zero on success or the appropriate errno
1558f9230ad6SAlan Cox  * on failure.
1559f9230ad6SAlan Cox  */
15602e32165cSKonstantin Belousov int
15612e32165cSKonstantin Belousov vm_mmap_to_errno(int rv)
15622e32165cSKonstantin Belousov {
15632e32165cSKonstantin Belousov 
1564df8bae1dSRodney W. Grimes 	switch (rv) {
1565df8bae1dSRodney W. Grimes 	case KERN_SUCCESS:
1566df8bae1dSRodney W. Grimes 		return (0);
1567df8bae1dSRodney W. Grimes 	case KERN_INVALID_ADDRESS:
1568df8bae1dSRodney W. Grimes 	case KERN_NO_SPACE:
1569df8bae1dSRodney W. Grimes 		return (ENOMEM);
1570df8bae1dSRodney W. Grimes 	case KERN_PROTECTION_FAILURE:
1571df8bae1dSRodney W. Grimes 		return (EACCES);
1572df8bae1dSRodney W. Grimes 	default:
1573df8bae1dSRodney W. Grimes 		return (EINVAL);
1574df8bae1dSRodney W. Grimes 	}
1575df8bae1dSRodney W. Grimes }
1576