xref: /freebsd/sys/vm/vm_mmap.c (revision a92a971bbb94ad5b44e2a5bbdc669ad3ae762c8d)
160727d8bSWarner Losh /*-
251369649SPedro F. Giffuni  * SPDX-License-Identifier: BSD-3-Clause
351369649SPedro F. Giffuni  *
4df8bae1dSRodney W. Grimes  * Copyright (c) 1988 University of Utah.
5df8bae1dSRodney W. Grimes  * Copyright (c) 1991, 1993
6df8bae1dSRodney W. Grimes  *	The Regents of the University of California.  All rights reserved.
7df8bae1dSRodney W. Grimes  *
8df8bae1dSRodney W. Grimes  * This code is derived from software contributed to Berkeley by
9df8bae1dSRodney W. Grimes  * the Systems Programming Group of the University of Utah Computer
10df8bae1dSRodney W. Grimes  * Science Department.
11df8bae1dSRodney W. Grimes  *
12df8bae1dSRodney W. Grimes  * Redistribution and use in source and binary forms, with or without
13df8bae1dSRodney W. Grimes  * modification, are permitted provided that the following conditions
14df8bae1dSRodney W. Grimes  * are met:
15df8bae1dSRodney W. Grimes  * 1. Redistributions of source code must retain the above copyright
16df8bae1dSRodney W. Grimes  *    notice, this list of conditions and the following disclaimer.
17df8bae1dSRodney W. Grimes  * 2. Redistributions in binary form must reproduce the above copyright
18df8bae1dSRodney W. Grimes  *    notice, this list of conditions and the following disclaimer in the
19df8bae1dSRodney W. Grimes  *    documentation and/or other materials provided with the distribution.
20fbbd9655SWarner Losh  * 3. Neither the name of the University nor the names of its contributors
21df8bae1dSRodney W. Grimes  *    may be used to endorse or promote products derived from this software
22df8bae1dSRodney W. Grimes  *    without specific prior written permission.
23df8bae1dSRodney W. Grimes  *
24df8bae1dSRodney W. Grimes  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25df8bae1dSRodney W. Grimes  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26df8bae1dSRodney W. Grimes  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27df8bae1dSRodney W. Grimes  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28df8bae1dSRodney W. Grimes  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29df8bae1dSRodney W. Grimes  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30df8bae1dSRodney W. Grimes  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31df8bae1dSRodney W. Grimes  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32df8bae1dSRodney W. Grimes  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33df8bae1dSRodney W. Grimes  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34df8bae1dSRodney W. Grimes  * SUCH DAMAGE.
35df8bae1dSRodney W. Grimes  *
36df8bae1dSRodney W. Grimes  * from: Utah $Hdr: vm_mmap.c 1.6 91/10/21$
37df8bae1dSRodney W. Grimes  *
38df8bae1dSRodney W. Grimes  *	@(#)vm_mmap.c	8.4 (Berkeley) 1/12/94
39df8bae1dSRodney W. Grimes  */
40df8bae1dSRodney W. Grimes 
41df8bae1dSRodney W. Grimes /*
42df8bae1dSRodney W. Grimes  * Mapped file (mmap) interface to VM
43df8bae1dSRodney W. Grimes  */
44df8bae1dSRodney W. Grimes 
45874651b1SDavid E. O'Brien #include <sys/cdefs.h>
46874651b1SDavid E. O'Brien __FBSDID("$FreeBSD$");
47874651b1SDavid E. O'Brien 
4849874f6eSJoseph Koshy #include "opt_hwpmc_hooks.h"
493d653db0SAlan Cox #include "opt_vm.h"
50e9822d92SJoerg Wunsch 
51df8bae1dSRodney W. Grimes #include <sys/param.h>
52df8bae1dSRodney W. Grimes #include <sys/systm.h>
534a144410SRobert Watson #include <sys/capsicum.h>
54a9d2f8d8SRobert Watson #include <sys/kernel.h>
55fb919e4dSMark Murray #include <sys/lock.h>
5623955314SAlfred Perlstein #include <sys/mutex.h>
57d2d3e875SBruce Evans #include <sys/sysproto.h>
585dc7e31aSKonstantin Belousov #include <sys/elf.h>
59df8bae1dSRodney W. Grimes #include <sys/filedesc.h>
60acd3428bSRobert Watson #include <sys/priv.h>
61df8bae1dSRodney W. Grimes #include <sys/proc.h>
6255648840SJohn Baldwin #include <sys/procctl.h>
631ba5ad42SEdward Tomasz Napierala #include <sys/racct.h>
64070f64feSMatthew Dillon #include <sys/resource.h>
65070f64feSMatthew Dillon #include <sys/resourcevar.h>
6689f6b863SAttilio Rao #include <sys/rwlock.h>
677e19eda4SAndrey Zonov #include <sys/sysctl.h>
68df8bae1dSRodney W. Grimes #include <sys/vnode.h>
693ac4d1efSBruce Evans #include <sys/fcntl.h>
70df8bae1dSRodney W. Grimes #include <sys/file.h>
71df8bae1dSRodney W. Grimes #include <sys/mman.h>
72b483c7f6SGuido van Rooij #include <sys/mount.h>
73df8bae1dSRodney W. Grimes #include <sys/conf.h>
744183b6b6SPeter Wemm #include <sys/stat.h>
7555648840SJohn Baldwin #include <sys/syscallsubr.h>
76497a8238SKonstantin Belousov #include <sys/sysent.h>
77efeaf95aSDavid Greenman #include <sys/vmmeter.h>
78a7f67facSKonstantin Belousov #if defined(__amd64__) || defined(__i386__) /* for i386_read_exec */
79a7f67facSKonstantin Belousov #include <machine/md_var.h>
80a7f67facSKonstantin Belousov #endif
81df8bae1dSRodney W. Grimes 
8251d1f690SRobert Watson #include <security/audit/audit.h>
83aed55708SRobert Watson #include <security/mac/mac_framework.h>
84aed55708SRobert Watson 
85df8bae1dSRodney W. Grimes #include <vm/vm.h>
86efeaf95aSDavid Greenman #include <vm/vm_param.h>
87efeaf95aSDavid Greenman #include <vm/pmap.h>
88efeaf95aSDavid Greenman #include <vm/vm_map.h>
89efeaf95aSDavid Greenman #include <vm/vm_object.h>
901c7c3c6aSMatthew Dillon #include <vm/vm_page.h>
91df8bae1dSRodney W. Grimes #include <vm/vm_pager.h>
92b5e8ce9fSBruce Evans #include <vm/vm_pageout.h>
93efeaf95aSDavid Greenman #include <vm/vm_extern.h>
94867a482dSJohn Dyson #include <vm/vm_page.h>
9584110e7eSKonstantin Belousov #include <vm/vnode_pager.h>
96df8bae1dSRodney W. Grimes 
9749874f6eSJoseph Koshy #ifdef HWPMC_HOOKS
9849874f6eSJoseph Koshy #include <sys/pmckern.h>
9949874f6eSJoseph Koshy #endif
10049874f6eSJoseph Koshy 
1017e19eda4SAndrey Zonov int old_mlock = 0;
102af3b2549SHans Petter Selasky SYSCTL_INT(_vm, OID_AUTO, old_mlock, CTLFLAG_RWTUN, &old_mlock, 0,
1037e19eda4SAndrey Zonov     "Do not apply RLIMIT_MEMLOCK on mlockall");
1043fbc2e00SKonstantin Belousov static int mincore_mapped = 1;
1053fbc2e00SKonstantin Belousov SYSCTL_INT(_vm, OID_AUTO, mincore_mapped, CTLFLAG_RWTUN, &mincore_mapped, 0,
1063fbc2e00SKonstantin Belousov     "mincore reports mappings, not residency");
10774a1b66cSBrooks Davis static int imply_prot_max = 0;
10874a1b66cSBrooks Davis SYSCTL_INT(_vm, OID_AUTO, imply_prot_max, CTLFLAG_RWTUN, &imply_prot_max, 0,
1094d13f784SEd Maste     "Imply maximum page protections in mmap() when none are specified");
1107e19eda4SAndrey Zonov 
111edb572a3SJohn Baldwin #ifdef MAP_32BIT
112edb572a3SJohn Baldwin #define	MAP_32BIT_MAX_ADDR	((vm_offset_t)1 << 31)
113d2d3e875SBruce Evans #endif
1140d94caffSDavid Greenman 
115edb572a3SJohn Baldwin #ifndef _SYS_SYSPROTO_H_
116edb572a3SJohn Baldwin struct sbrk_args {
117edb572a3SJohn Baldwin 	int incr;
118edb572a3SJohn Baldwin };
119edb572a3SJohn Baldwin #endif
120edb572a3SJohn Baldwin 
121df8bae1dSRodney W. Grimes int
12204e89ffbSKonstantin Belousov sys_sbrk(struct thread *td, struct sbrk_args *uap)
123df8bae1dSRodney W. Grimes {
124df8bae1dSRodney W. Grimes 	/* Not yet implemented */
125df8bae1dSRodney W. Grimes 	return (EOPNOTSUPP);
126df8bae1dSRodney W. Grimes }
127df8bae1dSRodney W. Grimes 
128d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_
129df8bae1dSRodney W. Grimes struct sstk_args {
130df8bae1dSRodney W. Grimes 	int incr;
131df8bae1dSRodney W. Grimes };
132d2d3e875SBruce Evans #endif
1330d94caffSDavid Greenman 
134df8bae1dSRodney W. Grimes int
13504e89ffbSKonstantin Belousov sys_sstk(struct thread *td, struct sstk_args *uap)
136df8bae1dSRodney W. Grimes {
137df8bae1dSRodney W. Grimes 	/* Not yet implemented */
138df8bae1dSRodney W. Grimes 	return (EOPNOTSUPP);
139df8bae1dSRodney W. Grimes }
140df8bae1dSRodney W. Grimes 
1411930e303SPoul-Henning Kamp #if defined(COMPAT_43)
142df8bae1dSRodney W. Grimes int
143d48719bdSBrooks Davis ogetpagesize(struct thread *td, struct ogetpagesize_args *uap)
144df8bae1dSRodney W. Grimes {
14504e89ffbSKonstantin Belousov 
146b40ce416SJulian Elischer 	td->td_retval[0] = PAGE_SIZE;
147df8bae1dSRodney W. Grimes 	return (0);
148df8bae1dSRodney W. Grimes }
1491930e303SPoul-Henning Kamp #endif				/* COMPAT_43 */
150df8bae1dSRodney W. Grimes 
15154f42e4bSPeter Wemm 
15254f42e4bSPeter Wemm /*
15354f42e4bSPeter Wemm  * Memory Map (mmap) system call.  Note that the file offset
15454f42e4bSPeter Wemm  * and address are allowed to be NOT page aligned, though if
15554f42e4bSPeter Wemm  * the MAP_FIXED flag it set, both must have the same remainder
15654f42e4bSPeter Wemm  * modulo the PAGE_SIZE (POSIX 1003.1b).  If the address is not
15754f42e4bSPeter Wemm  * page-aligned, the actual mapping starts at trunc_page(addr)
15854f42e4bSPeter Wemm  * and the return value is adjusted up by the page offset.
159b4309055SMatthew Dillon  *
160b4309055SMatthew Dillon  * Generally speaking, only character devices which are themselves
161b4309055SMatthew Dillon  * memory-based, such as a video framebuffer, can be mmap'd.  Otherwise
162b4309055SMatthew Dillon  * there would be no cache coherency between a descriptor and a VM mapping
163b4309055SMatthew Dillon  * both to the same character device.
16454f42e4bSPeter Wemm  */
165d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_
166df8bae1dSRodney W. Grimes struct mmap_args {
167651bb817SAlexander Langer 	void *addr;
168df8bae1dSRodney W. Grimes 	size_t len;
169df8bae1dSRodney W. Grimes 	int prot;
170df8bae1dSRodney W. Grimes 	int flags;
171df8bae1dSRodney W. Grimes 	int fd;
172df8bae1dSRodney W. Grimes 	long pad;
173df8bae1dSRodney W. Grimes 	off_t pos;
174df8bae1dSRodney W. Grimes };
175d2d3e875SBruce Evans #endif
176df8bae1dSRodney W. Grimes 
177df8bae1dSRodney W. Grimes int
17869cdfcefSEdward Tomasz Napierala sys_mmap(struct thread *td, struct mmap_args *uap)
17969cdfcefSEdward Tomasz Napierala {
18069cdfcefSEdward Tomasz Napierala 
181496ab053SKonstantin Belousov 	return (kern_mmap(td, (uintptr_t)uap->addr, uap->len, uap->prot,
182496ab053SKonstantin Belousov 	    uap->flags, uap->fd, uap->pos));
18369cdfcefSEdward Tomasz Napierala }
18469cdfcefSEdward Tomasz Napierala 
18569cdfcefSEdward Tomasz Napierala int
1865dc7e31aSKonstantin Belousov kern_mmap_maxprot(struct proc *p, int prot)
1875dc7e31aSKonstantin Belousov {
1885dc7e31aSKonstantin Belousov 
1895dc7e31aSKonstantin Belousov 	if ((p->p_flag2 & P2_PROTMAX_DISABLE) != 0 ||
1905dc7e31aSKonstantin Belousov 	    (p->p_fctl0 & NT_FREEBSD_FCTL_PROTMAX_DISABLE) != 0)
1915dc7e31aSKonstantin Belousov 		return (_PROT_ALL);
1925dc7e31aSKonstantin Belousov 	if (((p->p_flag2 & P2_PROTMAX_ENABLE) != 0 || imply_prot_max) &&
1935dc7e31aSKonstantin Belousov 	    prot != PROT_NONE)
1945dc7e31aSKonstantin Belousov 		 return (prot);
1955dc7e31aSKonstantin Belousov 	return (_PROT_ALL);
1965dc7e31aSKonstantin Belousov }
1975dc7e31aSKonstantin Belousov 
1985dc7e31aSKonstantin Belousov int
19977555b84SDoug Moore kern_mmap(struct thread *td, uintptr_t addr0, size_t len, int prot, int flags,
200496ab053SKonstantin Belousov     int fd, off_t pos)
201df8bae1dSRodney W. Grimes {
202d718de81SBrooks Davis 	struct mmap_req mr = {
203d718de81SBrooks Davis 		.mr_hint = addr0,
204d718de81SBrooks Davis 		.mr_len = len,
205d718de81SBrooks Davis 		.mr_prot = prot,
206d718de81SBrooks Davis 		.mr_flags = flags,
207d718de81SBrooks Davis 		.mr_fd = fd,
208d718de81SBrooks Davis 		.mr_pos = pos
209d718de81SBrooks Davis 	};
21018348a23SKyle Evans 
211d718de81SBrooks Davis 	return (kern_mmap_req(td, &mr));
21218348a23SKyle Evans }
21318348a23SKyle Evans 
21418348a23SKyle Evans int
215d718de81SBrooks Davis kern_mmap_req(struct thread *td, const struct mmap_req *mrp)
21618348a23SKyle Evans {
217496ab053SKonstantin Belousov 	struct vmspace *vms;
218c8daea13SAlexander Kabaev 	struct file *fp;
21937306951SKonstantin Belousov 	struct proc *p;
220d718de81SBrooks Davis 	off_t pos;
221496ab053SKonstantin Belousov 	vm_offset_t addr;
222d718de81SBrooks Davis 	vm_size_t len, pageoff, size;
2237077c426SJohn Baldwin 	vm_prot_t cap_maxprot;
224d718de81SBrooks Davis 	int align, error, fd, flags, max_prot, prot;
225a9d2f8d8SRobert Watson 	cap_rights_t rights;
226d718de81SBrooks Davis 	mmap_check_fp_fn check_fp_fn;
227d718de81SBrooks Davis 
228d718de81SBrooks Davis 	addr  = mrp->mr_hint;
229d718de81SBrooks Davis 	len = mrp->mr_len;
230d718de81SBrooks Davis 	prot = mrp->mr_prot;
231d718de81SBrooks Davis 	flags = mrp->mr_flags;
232d718de81SBrooks Davis 	fd = mrp->mr_fd;
233d718de81SBrooks Davis 	pos = mrp->mr_pos;
234d718de81SBrooks Davis 	check_fp_fn = mrp->mr_check_fp_fn;
235df8bae1dSRodney W. Grimes 
23674a1b66cSBrooks Davis 	if ((prot & ~(_PROT_ALL | PROT_MAX(_PROT_ALL))) != 0)
23774a1b66cSBrooks Davis 		return (EINVAL);
23874a1b66cSBrooks Davis 	max_prot = PROT_MAX_EXTRACT(prot);
23974a1b66cSBrooks Davis 	prot = PROT_EXTRACT(prot);
24074a1b66cSBrooks Davis 	if (max_prot != 0 && (max_prot & prot) != prot)
241acb8858fSEd Maste 		return (ENOTSUP);
24237306951SKonstantin Belousov 
24337306951SKonstantin Belousov 	p = td->td_proc;
24437306951SKonstantin Belousov 
24574a1b66cSBrooks Davis 	/*
24674a1b66cSBrooks Davis 	 * Always honor PROT_MAX if set.  If not, default to all
24774a1b66cSBrooks Davis 	 * permissions unless we're implying maximum permissions.
24874a1b66cSBrooks Davis 	 */
24974a1b66cSBrooks Davis 	if (max_prot == 0)
2505dc7e31aSKonstantin Belousov 		max_prot = kern_mmap_maxprot(p, prot);
25174a1b66cSBrooks Davis 
25237306951SKonstantin Belousov 	vms = p->p_vmspace;
253426da3bcSAlfred Perlstein 	fp = NULL;
25469cdfcefSEdward Tomasz Napierala 	AUDIT_ARG_FD(fd);
25527bfa958SSimon L. B. Nielsen 
2567707ccabSKonstantin Belousov 	/*
2575817298fSJohn Baldwin 	 * Ignore old flags that used to be defined but did not do anything.
2585817298fSJohn Baldwin 	 */
2595817298fSJohn Baldwin 	flags &= ~(MAP_RESERVED0020 | MAP_RESERVED0040);
2605817298fSJohn Baldwin 
2615817298fSJohn Baldwin 	/*
2627707ccabSKonstantin Belousov 	 * Enforce the constraints.
2637707ccabSKonstantin Belousov 	 * Mapping of length 0 is only allowed for old binaries.
2647707ccabSKonstantin Belousov 	 * Anonymous mapping shall specify -1 as filedescriptor and
2657707ccabSKonstantin Belousov 	 * zero position for new code. Be nice to ancient a.out
2667707ccabSKonstantin Belousov 	 * binaries and correct pos for anonymous mapping, since old
2677707ccabSKonstantin Belousov 	 * ld.so sometimes issues anonymous map requests with non-zero
2687707ccabSKonstantin Belousov 	 * pos.
2697707ccabSKonstantin Belousov 	 */
2707707ccabSKonstantin Belousov 	if (!SV_CURPROC_FLAG(SV_AOUT)) {
27137306951SKonstantin Belousov 		if ((len == 0 && p->p_osrel >= P_OSREL_MAP_ANON) ||
27269cdfcefSEdward Tomasz Napierala 		    ((flags & MAP_ANON) != 0 && (fd != -1 || pos != 0)))
273df8bae1dSRodney W. Grimes 			return (EINVAL);
2747707ccabSKonstantin Belousov 	} else {
2757707ccabSKonstantin Belousov 		if ((flags & MAP_ANON) != 0)
2767707ccabSKonstantin Belousov 			pos = 0;
2777707ccabSKonstantin Belousov 	}
2789154ee6aSPeter Wemm 
2792267af78SJulian Elischer 	if (flags & MAP_STACK) {
28069cdfcefSEdward Tomasz Napierala 		if ((fd != -1) ||
2812267af78SJulian Elischer 		    ((prot & (PROT_READ | PROT_WRITE)) != (PROT_READ | PROT_WRITE)))
2822267af78SJulian Elischer 			return (EINVAL);
2832267af78SJulian Elischer 		flags |= MAP_ANON;
2842267af78SJulian Elischer 		pos = 0;
2852907af2aSJulian Elischer 	}
2865817298fSJohn Baldwin 	if ((flags & ~(MAP_SHARED | MAP_PRIVATE | MAP_FIXED | MAP_HASSEMAPHORE |
2875817298fSJohn Baldwin 	    MAP_STACK | MAP_NOSYNC | MAP_ANON | MAP_EXCL | MAP_NOCORE |
28819bd0d9cSKonstantin Belousov 	    MAP_PREFAULT_READ | MAP_GUARD |
2895fd3f8b3SJohn Baldwin #ifdef MAP_32BIT
2905fd3f8b3SJohn Baldwin 	    MAP_32BIT |
2915fd3f8b3SJohn Baldwin #endif
2925fd3f8b3SJohn Baldwin 	    MAP_ALIGNMENT_MASK)) != 0)
2935fd3f8b3SJohn Baldwin 		return (EINVAL);
29411c42bccSKonstantin Belousov 	if ((flags & (MAP_EXCL | MAP_FIXED)) == MAP_EXCL)
29511c42bccSKonstantin Belousov 		return (EINVAL);
29610204535SKonstantin Belousov 	if ((flags & (MAP_SHARED | MAP_PRIVATE)) == (MAP_SHARED | MAP_PRIVATE))
2975fd3f8b3SJohn Baldwin 		return (EINVAL);
2985fd3f8b3SJohn Baldwin 	if (prot != PROT_NONE &&
2995fd3f8b3SJohn Baldwin 	    (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC)) != 0)
3005fd3f8b3SJohn Baldwin 		return (EINVAL);
30119bd0d9cSKonstantin Belousov 	if ((flags & MAP_GUARD) != 0 && (prot != PROT_NONE || fd != -1 ||
30260221a57SAlan Cox 	    pos != 0 || (flags & ~(MAP_FIXED | MAP_GUARD | MAP_EXCL |
303633d3b1cSKonstantin Belousov #ifdef MAP_32BIT
304633d3b1cSKonstantin Belousov 	    MAP_32BIT |
305633d3b1cSKonstantin Belousov #endif
306633d3b1cSKonstantin Belousov 	    MAP_ALIGNMENT_MASK)) != 0))
30719bd0d9cSKonstantin Belousov 		return (EINVAL);
3082907af2aSJulian Elischer 
3099154ee6aSPeter Wemm 	/*
31054f42e4bSPeter Wemm 	 * Align the file position to a page boundary,
31154f42e4bSPeter Wemm 	 * and save its page offset component.
3129154ee6aSPeter Wemm 	 */
31354f42e4bSPeter Wemm 	pageoff = (pos & PAGE_MASK);
31454f42e4bSPeter Wemm 	pos -= pageoff;
31554f42e4bSPeter Wemm 
31677555b84SDoug Moore 	/* Compute size from len by rounding (on both ends). */
31777555b84SDoug Moore 	size = len + pageoff;			/* low end... */
31897220a27SDoug Moore 	size = round_page(size);		/* hi end */
31977555b84SDoug Moore 	/* Check for rounding up to zero. */
320f8c8b2e8SDoug Moore 	if (len > size)
32177555b84SDoug Moore 		return (ENOMEM);
3229154ee6aSPeter Wemm 
3235aa60b6fSJohn Baldwin 	/* Ensure alignment is at least a page and fits in a pointer. */
3245aa60b6fSJohn Baldwin 	align = flags & MAP_ALIGNMENT_MASK;
3255aa60b6fSJohn Baldwin 	if (align != 0 && align != MAP_ALIGNED_SUPER &&
3265aa60b6fSJohn Baldwin 	    (align >> MAP_ALIGNMENT_SHIFT >= sizeof(void *) * NBBY ||
3275aa60b6fSJohn Baldwin 	    align >> MAP_ALIGNMENT_SHIFT < PAGE_SHIFT))
3285aa60b6fSJohn Baldwin 		return (EINVAL);
3295aa60b6fSJohn Baldwin 
330df8bae1dSRodney W. Grimes 	/*
3310d94caffSDavid Greenman 	 * Check for illegal addresses.  Watch out for address wrap... Note
3320d94caffSDavid Greenman 	 * that VM_*_ADDRESS are not constants due to casts (argh).
333df8bae1dSRodney W. Grimes 	 */
334df8bae1dSRodney W. Grimes 	if (flags & MAP_FIXED) {
33554f42e4bSPeter Wemm 		/*
33654f42e4bSPeter Wemm 		 * The specified address must have the same remainder
33754f42e4bSPeter Wemm 		 * as the file offset taken modulo PAGE_SIZE, so it
33854f42e4bSPeter Wemm 		 * should be aligned after adjustment by pageoff.
33954f42e4bSPeter Wemm 		 */
34054f42e4bSPeter Wemm 		addr -= pageoff;
34154f42e4bSPeter Wemm 		if (addr & PAGE_MASK)
34254f42e4bSPeter Wemm 			return (EINVAL);
34327bfa958SSimon L. B. Nielsen 
34454f42e4bSPeter Wemm 		/* Address range must be all in user VM space. */
3450f1e6ec5SMark Johnston 		if (!vm_map_range_valid(&vms->vm_map, addr, addr + size))
346df8bae1dSRodney W. Grimes 			return (EINVAL);
347edb572a3SJohn Baldwin #ifdef MAP_32BIT
348edb572a3SJohn Baldwin 		if (flags & MAP_32BIT && addr + size > MAP_32BIT_MAX_ADDR)
349edb572a3SJohn Baldwin 			return (EINVAL);
350edb572a3SJohn Baldwin 	} else if (flags & MAP_32BIT) {
351edb572a3SJohn Baldwin 		/*
352edb572a3SJohn Baldwin 		 * For MAP_32BIT, override the hint if it is too high and
353edb572a3SJohn Baldwin 		 * do not bother moving the mapping past the heap (since
354edb572a3SJohn Baldwin 		 * the heap is usually above 2GB).
355edb572a3SJohn Baldwin 		 */
356edb572a3SJohn Baldwin 		if (addr + size > MAP_32BIT_MAX_ADDR)
357edb572a3SJohn Baldwin 			addr = 0;
358edb572a3SJohn Baldwin #endif
35991d5354aSJohn Baldwin 	} else {
360df8bae1dSRodney W. Grimes 		/*
36154f42e4bSPeter Wemm 		 * XXX for non-fixed mappings where no hint is provided or
36254f42e4bSPeter Wemm 		 * the hint would fall in the potential heap space,
36354f42e4bSPeter Wemm 		 * place it after the end of the largest possible heap.
364df8bae1dSRodney W. Grimes 		 *
36554f42e4bSPeter Wemm 		 * There should really be a pmap call to determine a reasonable
36654f42e4bSPeter Wemm 		 * location.
367df8bae1dSRodney W. Grimes 		 */
36891d5354aSJohn Baldwin 		if (addr == 0 ||
3691f6889a1SMatthew Dillon 		    (addr >= round_page((vm_offset_t)vms->vm_taddr) &&
370c460ac3aSPeter Wemm 		    addr < round_page((vm_offset_t)vms->vm_daddr +
371cd336badSMateusz Guzik 		    lim_max(td, RLIMIT_DATA))))
372c460ac3aSPeter Wemm 			addr = round_page((vm_offset_t)vms->vm_daddr +
373cd336badSMateusz Guzik 			    lim_max(td, RLIMIT_DATA));
37491d5354aSJohn Baldwin 	}
37577555b84SDoug Moore 	if (len == 0) {
3767077c426SJohn Baldwin 		/*
3777077c426SJohn Baldwin 		 * Return success without mapping anything for old
3787077c426SJohn Baldwin 		 * binaries that request a page-aligned mapping of
3797077c426SJohn Baldwin 		 * length 0.  For modern binaries, this function
3807077c426SJohn Baldwin 		 * returns an error earlier.
3817077c426SJohn Baldwin 		 */
3827077c426SJohn Baldwin 		error = 0;
38319bd0d9cSKonstantin Belousov 	} else if ((flags & MAP_GUARD) != 0) {
38419bd0d9cSKonstantin Belousov 		error = vm_mmap_object(&vms->vm_map, &addr, size, VM_PROT_NONE,
38519bd0d9cSKonstantin Belousov 		    VM_PROT_NONE, flags, NULL, pos, FALSE, td);
38619bd0d9cSKonstantin Belousov 	} else if ((flags & MAP_ANON) != 0) {
387df8bae1dSRodney W. Grimes 		/*
388df8bae1dSRodney W. Grimes 		 * Mapping blank space is trivial.
3897077c426SJohn Baldwin 		 *
3907077c426SJohn Baldwin 		 * This relies on VM_PROT_* matching PROT_*.
391df8bae1dSRodney W. Grimes 		 */
3927077c426SJohn Baldwin 		error = vm_mmap_object(&vms->vm_map, &addr, size, prot,
39374a1b66cSBrooks Davis 		    max_prot, flags, NULL, pos, FALSE, td);
39430d4dd7eSAlexander Kabaev 	} else {
395df8bae1dSRodney W. Grimes 		/*
396a9d2f8d8SRobert Watson 		 * Mapping file, get fp for validation and don't let the
397a9d2f8d8SRobert Watson 		 * descriptor disappear on us if we block. Check capability
398a9d2f8d8SRobert Watson 		 * rights, but also return the maximum rights to be combined
399a9d2f8d8SRobert Watson 		 * with maxprot later.
400df8bae1dSRodney W. Grimes 		 */
4013379d2f9SMateusz Guzik 		cap_rights_init_one(&rights, CAP_MMAP);
402a9d2f8d8SRobert Watson 		if (prot & PROT_READ)
4033379d2f9SMateusz Guzik 			cap_rights_set_one(&rights, CAP_MMAP_R);
404a9d2f8d8SRobert Watson 		if ((flags & MAP_SHARED) != 0) {
405a9d2f8d8SRobert Watson 			if (prot & PROT_WRITE)
4063379d2f9SMateusz Guzik 				cap_rights_set_one(&rights, CAP_MMAP_W);
407a9d2f8d8SRobert Watson 		}
408a9d2f8d8SRobert Watson 		if (prot & PROT_EXEC)
4093379d2f9SMateusz Guzik 			cap_rights_set_one(&rights, CAP_MMAP_X);
41069cdfcefSEdward Tomasz Napierala 		error = fget_mmap(td, fd, &rights, &cap_maxprot, &fp);
4117008be5bSPawel Jakub Dawidek 		if (error != 0)
412426da3bcSAlfred Perlstein 			goto done;
41310204535SKonstantin Belousov 		if ((flags & (MAP_SHARED | MAP_PRIVATE)) == 0 &&
41437306951SKonstantin Belousov 		    p->p_osrel >= P_OSREL_MAP_FSTRICT) {
41510204535SKonstantin Belousov 			error = EINVAL;
41610204535SKonstantin Belousov 			goto done;
41710204535SKonstantin Belousov 		}
41818348a23SKyle Evans 		if (check_fp_fn != NULL) {
41918348a23SKyle Evans 			error = check_fp_fn(fp, prot, max_prot & cap_maxprot,
42018348a23SKyle Evans 			    flags);
42118348a23SKyle Evans 			if (error != 0)
42218348a23SKyle Evans 				goto done;
42318348a23SKyle Evans 		}
4245fd3f8b3SJohn Baldwin 		/* This relies on VM_PROT_* matching PROT_*. */
4257077c426SJohn Baldwin 		error = fo_mmap(fp, &vms->vm_map, &addr, size, prot,
42674a1b66cSBrooks Davis 		    max_prot & cap_maxprot, flags, pos, td);
42749874f6eSJoseph Koshy 	}
4287077c426SJohn Baldwin 
429df8bae1dSRodney W. Grimes 	if (error == 0)
430b40ce416SJulian Elischer 		td->td_retval[0] = (register_t) (addr + pageoff);
431279d7226SMatthew Dillon done:
432279d7226SMatthew Dillon 	if (fp)
433b40ce416SJulian Elischer 		fdrop(fp, td);
434f6b5b182SJeff Roberson 
435df8bae1dSRodney W. Grimes 	return (error);
436df8bae1dSRodney W. Grimes }
437df8bae1dSRodney W. Grimes 
4380538aafcSKonstantin Belousov #if defined(COMPAT_FREEBSD6)
439c2815ad5SPeter Wemm int
440c2815ad5SPeter Wemm freebsd6_mmap(struct thread *td, struct freebsd6_mmap_args *uap)
441c2815ad5SPeter Wemm {
442c2815ad5SPeter Wemm 
443496ab053SKonstantin Belousov 	return (kern_mmap(td, (uintptr_t)uap->addr, uap->len, uap->prot,
444496ab053SKonstantin Belousov 	    uap->flags, uap->fd, uap->pos));
445c2815ad5SPeter Wemm }
4460538aafcSKonstantin Belousov #endif
447c2815ad5SPeter Wemm 
44805f0fdd2SPoul-Henning Kamp #ifdef COMPAT_43
449d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_
45005f0fdd2SPoul-Henning Kamp struct ommap_args {
45105f0fdd2SPoul-Henning Kamp 	caddr_t addr;
45205f0fdd2SPoul-Henning Kamp 	int len;
45305f0fdd2SPoul-Henning Kamp 	int prot;
45405f0fdd2SPoul-Henning Kamp 	int flags;
45505f0fdd2SPoul-Henning Kamp 	int fd;
45605f0fdd2SPoul-Henning Kamp 	long pos;
45705f0fdd2SPoul-Henning Kamp };
458d2d3e875SBruce Evans #endif
45905f0fdd2SPoul-Henning Kamp int
46069cdfcefSEdward Tomasz Napierala ommap(struct thread *td, struct ommap_args *uap)
46105f0fdd2SPoul-Henning Kamp {
46205f0fdd2SPoul-Henning Kamp 	static const char cvtbsdprot[8] = {
46305f0fdd2SPoul-Henning Kamp 		0,
46405f0fdd2SPoul-Henning Kamp 		PROT_EXEC,
46505f0fdd2SPoul-Henning Kamp 		PROT_WRITE,
46605f0fdd2SPoul-Henning Kamp 		PROT_EXEC | PROT_WRITE,
46705f0fdd2SPoul-Henning Kamp 		PROT_READ,
46805f0fdd2SPoul-Henning Kamp 		PROT_EXEC | PROT_READ,
46905f0fdd2SPoul-Henning Kamp 		PROT_WRITE | PROT_READ,
47005f0fdd2SPoul-Henning Kamp 		PROT_EXEC | PROT_WRITE | PROT_READ,
47105f0fdd2SPoul-Henning Kamp 	};
47269cdfcefSEdward Tomasz Napierala 	int flags, prot;
4730d94caffSDavid Greenman 
47405f0fdd2SPoul-Henning Kamp #define	OMAP_ANON	0x0002
47505f0fdd2SPoul-Henning Kamp #define	OMAP_COPY	0x0020
47605f0fdd2SPoul-Henning Kamp #define	OMAP_SHARED	0x0010
47705f0fdd2SPoul-Henning Kamp #define	OMAP_FIXED	0x0100
47805f0fdd2SPoul-Henning Kamp 
47969cdfcefSEdward Tomasz Napierala 	prot = cvtbsdprot[uap->prot & 0x7];
4805dddee2dSKonstantin Belousov #if (defined(COMPAT_FREEBSD32) && defined(__amd64__)) || defined(__i386__)
481ee4116b8SKonstantin Belousov 	if (i386_read_exec && SV_PROC_FLAG(td->td_proc, SV_ILP32) &&
48269cdfcefSEdward Tomasz Napierala 	    prot != 0)
48369cdfcefSEdward Tomasz Napierala 		prot |= PROT_EXEC;
484ee4116b8SKonstantin Belousov #endif
48569cdfcefSEdward Tomasz Napierala 	flags = 0;
48605f0fdd2SPoul-Henning Kamp 	if (uap->flags & OMAP_ANON)
48769cdfcefSEdward Tomasz Napierala 		flags |= MAP_ANON;
48805f0fdd2SPoul-Henning Kamp 	if (uap->flags & OMAP_COPY)
48969cdfcefSEdward Tomasz Napierala 		flags |= MAP_COPY;
49005f0fdd2SPoul-Henning Kamp 	if (uap->flags & OMAP_SHARED)
49169cdfcefSEdward Tomasz Napierala 		flags |= MAP_SHARED;
49205f0fdd2SPoul-Henning Kamp 	else
49369cdfcefSEdward Tomasz Napierala 		flags |= MAP_PRIVATE;
49405f0fdd2SPoul-Henning Kamp 	if (uap->flags & OMAP_FIXED)
49569cdfcefSEdward Tomasz Napierala 		flags |= MAP_FIXED;
496496ab053SKonstantin Belousov 	return (kern_mmap(td, (uintptr_t)uap->addr, uap->len, prot, flags,
497496ab053SKonstantin Belousov 	    uap->fd, uap->pos));
49805f0fdd2SPoul-Henning Kamp }
49905f0fdd2SPoul-Henning Kamp #endif				/* COMPAT_43 */
50005f0fdd2SPoul-Henning Kamp 
50105f0fdd2SPoul-Henning Kamp 
502d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_
503df8bae1dSRodney W. Grimes struct msync_args {
504651bb817SAlexander Langer 	void *addr;
505c899450bSPeter Wemm 	size_t len;
506e6c6af11SDavid Greenman 	int flags;
507df8bae1dSRodney W. Grimes };
508d2d3e875SBruce Evans #endif
509df8bae1dSRodney W. Grimes int
51069cdfcefSEdward Tomasz Napierala sys_msync(struct thread *td, struct msync_args *uap)
511df8bae1dSRodney W. Grimes {
51269cdfcefSEdward Tomasz Napierala 
513496ab053SKonstantin Belousov 	return (kern_msync(td, (uintptr_t)uap->addr, uap->len, uap->flags));
51469cdfcefSEdward Tomasz Napierala }
51569cdfcefSEdward Tomasz Napierala 
51669cdfcefSEdward Tomasz Napierala int
517496ab053SKonstantin Belousov kern_msync(struct thread *td, uintptr_t addr0, size_t size, int flags)
51869cdfcefSEdward Tomasz Napierala {
519496ab053SKonstantin Belousov 	vm_offset_t addr;
52069cdfcefSEdward Tomasz Napierala 	vm_size_t pageoff;
521df8bae1dSRodney W. Grimes 	vm_map_t map;
522df8bae1dSRodney W. Grimes 	int rv;
523df8bae1dSRodney W. Grimes 
524496ab053SKonstantin Belousov 	addr = addr0;
525dabee6feSPeter Wemm 	pageoff = (addr & PAGE_MASK);
526dabee6feSPeter Wemm 	addr -= pageoff;
527dabee6feSPeter Wemm 	size += pageoff;
528dabee6feSPeter Wemm 	size = (vm_size_t) round_page(size);
5299154ee6aSPeter Wemm 	if (addr + size < addr)
530dabee6feSPeter Wemm 		return (EINVAL);
531dabee6feSPeter Wemm 
532dabee6feSPeter Wemm 	if ((flags & (MS_ASYNC|MS_INVALIDATE)) == (MS_ASYNC|MS_INVALIDATE))
5331e62bc63SDavid Greenman 		return (EINVAL);
5341e62bc63SDavid Greenman 
535b40ce416SJulian Elischer 	map = &td->td_proc->p_vmspace->vm_map;
5369154ee6aSPeter Wemm 
537df8bae1dSRodney W. Grimes 	/*
538df8bae1dSRodney W. Grimes 	 * Clean the pages and interpret the return value.
539df8bae1dSRodney W. Grimes 	 */
540950f8459SAlan Cox 	rv = vm_map_sync(map, addr, addr + size, (flags & MS_ASYNC) == 0,
541e6c6af11SDavid Greenman 	    (flags & MS_INVALIDATE) != 0);
542df8bae1dSRodney W. Grimes 	switch (rv) {
543df8bae1dSRodney W. Grimes 	case KERN_SUCCESS:
544d2c60af8SMatthew Dillon 		return (0);
545df8bae1dSRodney W. Grimes 	case KERN_INVALID_ADDRESS:
546e103f5b1SPeter Holm 		return (ENOMEM);
547b7b7cd44SAlan Cox 	case KERN_INVALID_ARGUMENT:
548b7b7cd44SAlan Cox 		return (EBUSY);
549126d6082SKonstantin Belousov 	case KERN_FAILURE:
550126d6082SKonstantin Belousov 		return (EIO);
551df8bae1dSRodney W. Grimes 	default:
552df8bae1dSRodney W. Grimes 		return (EINVAL);
553df8bae1dSRodney W. Grimes 	}
554df8bae1dSRodney W. Grimes }
555df8bae1dSRodney W. Grimes 
556d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_
557df8bae1dSRodney W. Grimes struct munmap_args {
558651bb817SAlexander Langer 	void *addr;
5599154ee6aSPeter Wemm 	size_t len;
560df8bae1dSRodney W. Grimes };
561d2d3e875SBruce Evans #endif
562df8bae1dSRodney W. Grimes int
56369cdfcefSEdward Tomasz Napierala sys_munmap(struct thread *td, struct munmap_args *uap)
56469cdfcefSEdward Tomasz Napierala {
56569cdfcefSEdward Tomasz Napierala 
566496ab053SKonstantin Belousov 	return (kern_munmap(td, (uintptr_t)uap->addr, uap->len));
56769cdfcefSEdward Tomasz Napierala }
56869cdfcefSEdward Tomasz Napierala 
56969cdfcefSEdward Tomasz Napierala int
570496ab053SKonstantin Belousov kern_munmap(struct thread *td, uintptr_t addr0, size_t size)
571df8bae1dSRodney W. Grimes {
57249874f6eSJoseph Koshy #ifdef HWPMC_HOOKS
57349874f6eSJoseph Koshy 	struct pmckern_map_out pkm;
57449874f6eSJoseph Koshy 	vm_map_entry_t entry;
575736ff8c3SMateusz Guzik 	bool pmc_handled;
57649874f6eSJoseph Koshy #endif
5770f1e6ec5SMark Johnston 	vm_offset_t addr, end;
57869cdfcefSEdward Tomasz Napierala 	vm_size_t pageoff;
579df8bae1dSRodney W. Grimes 	vm_map_t map;
580df8bae1dSRodney W. Grimes 
581d8834602SAlan Cox 	if (size == 0)
582d8834602SAlan Cox 		return (EINVAL);
583dabee6feSPeter Wemm 
584496ab053SKonstantin Belousov 	addr = addr0;
585dabee6feSPeter Wemm 	pageoff = (addr & PAGE_MASK);
586dabee6feSPeter Wemm 	addr -= pageoff;
587dabee6feSPeter Wemm 	size += pageoff;
588dabee6feSPeter Wemm 	size = (vm_size_t) round_page(size);
5890f1e6ec5SMark Johnston 	end = addr + size;
5900f1e6ec5SMark Johnston 	map = &td->td_proc->p_vmspace->vm_map;
5910f1e6ec5SMark Johnston 	if (!vm_map_range_valid(map, addr, end))
592df8bae1dSRodney W. Grimes 		return (EINVAL);
5939154ee6aSPeter Wemm 
594d8834602SAlan Cox 	vm_map_lock(map);
59549874f6eSJoseph Koshy #ifdef HWPMC_HOOKS
596736ff8c3SMateusz Guzik 	pmc_handled = false;
597736ff8c3SMateusz Guzik 	if (PMC_HOOK_INSTALLED(PMC_FN_MUNMAP)) {
598736ff8c3SMateusz Guzik 		pmc_handled = true;
59949874f6eSJoseph Koshy 		/*
60049874f6eSJoseph Koshy 		 * Inform hwpmc if the address range being unmapped contains
60149874f6eSJoseph Koshy 		 * an executable region.
60249874f6eSJoseph Koshy 		 */
6030d419640SRyan Stone 		pkm.pm_address = (uintptr_t) NULL;
60449874f6eSJoseph Koshy 		if (vm_map_lookup_entry(map, addr, &entry)) {
6050f1e6ec5SMark Johnston 			for (; entry->start < end;
6067cdcf863SDoug Moore 			    entry = vm_map_entry_succ(entry)) {
60749874f6eSJoseph Koshy 				if (vm_map_check_protection(map, entry->start,
60849874f6eSJoseph Koshy 					entry->end, VM_PROT_EXECUTE) == TRUE) {
60949874f6eSJoseph Koshy 					pkm.pm_address = (uintptr_t) addr;
61049874f6eSJoseph Koshy 					pkm.pm_size = (size_t) size;
61149874f6eSJoseph Koshy 					break;
61249874f6eSJoseph Koshy 				}
61349874f6eSJoseph Koshy 			}
61449874f6eSJoseph Koshy 		}
615736ff8c3SMateusz Guzik 	}
61649874f6eSJoseph Koshy #endif
6170f1e6ec5SMark Johnston 	vm_map_delete(map, addr, end);
6180d419640SRyan Stone 
6190d419640SRyan Stone #ifdef HWPMC_HOOKS
620736ff8c3SMateusz Guzik 	if (__predict_false(pmc_handled)) {
6210d419640SRyan Stone 		/* downgrade the lock to prevent a LOR with the pmc-sx lock */
6220d419640SRyan Stone 		vm_map_lock_downgrade(map);
623d473d3a1SRyan Stone 		if (pkm.pm_address != (uintptr_t) NULL)
6240d419640SRyan Stone 			PMC_CALL_HOOK(td, PMC_FN_MUNMAP, (void *) &pkm);
6250d419640SRyan Stone 		vm_map_unlock_read(map);
626736ff8c3SMateusz Guzik 	} else
6270d419640SRyan Stone #endif
628736ff8c3SMateusz Guzik 		vm_map_unlock(map);
629736ff8c3SMateusz Guzik 
6300d419640SRyan Stone 	/* vm_map_delete returns nothing but KERN_SUCCESS anyway */
631df8bae1dSRodney W. Grimes 	return (0);
632df8bae1dSRodney W. Grimes }
633df8bae1dSRodney W. Grimes 
634d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_
635df8bae1dSRodney W. Grimes struct mprotect_args {
636651bb817SAlexander Langer 	const void *addr;
6379154ee6aSPeter Wemm 	size_t len;
638df8bae1dSRodney W. Grimes 	int prot;
639df8bae1dSRodney W. Grimes };
640d2d3e875SBruce Evans #endif
641df8bae1dSRodney W. Grimes int
64269cdfcefSEdward Tomasz Napierala sys_mprotect(struct thread *td, struct mprotect_args *uap)
643df8bae1dSRodney W. Grimes {
644df8bae1dSRodney W. Grimes 
645496ab053SKonstantin Belousov 	return (kern_mprotect(td, (uintptr_t)uap->addr, uap->len, uap->prot));
64669cdfcefSEdward Tomasz Napierala }
647df8bae1dSRodney W. Grimes 
64869cdfcefSEdward Tomasz Napierala int
649496ab053SKonstantin Belousov kern_mprotect(struct thread *td, uintptr_t addr0, size_t size, int prot)
65069cdfcefSEdward Tomasz Napierala {
651496ab053SKonstantin Belousov 	vm_offset_t addr;
65269cdfcefSEdward Tomasz Napierala 	vm_size_t pageoff;
65374a1b66cSBrooks Davis 	int vm_error, max_prot;
65469cdfcefSEdward Tomasz Napierala 
655496ab053SKonstantin Belousov 	addr = addr0;
65674a1b66cSBrooks Davis 	if ((prot & ~(_PROT_ALL | PROT_MAX(_PROT_ALL))) != 0)
65774a1b66cSBrooks Davis 		return (EINVAL);
65874a1b66cSBrooks Davis 	max_prot = PROT_MAX_EXTRACT(prot);
65974a1b66cSBrooks Davis 	prot = PROT_EXTRACT(prot);
660dabee6feSPeter Wemm 	pageoff = (addr & PAGE_MASK);
661dabee6feSPeter Wemm 	addr -= pageoff;
662dabee6feSPeter Wemm 	size += pageoff;
663dabee6feSPeter Wemm 	size = (vm_size_t) round_page(size);
6646e1d2cf6SKonstantin Belousov #ifdef COMPAT_FREEBSD32
6656e1d2cf6SKonstantin Belousov 	if (SV_PROC_FLAG(td->td_proc, SV_ILP32)) {
6666e1d2cf6SKonstantin Belousov 		if (((addr + size) & 0xffffffff) < addr)
6676e1d2cf6SKonstantin Belousov 			return (EINVAL);
6686e1d2cf6SKonstantin Belousov 	} else
6696e1d2cf6SKonstantin Belousov #endif
6709154ee6aSPeter Wemm 	if (addr + size < addr)
671dabee6feSPeter Wemm 		return (EINVAL);
672dabee6feSPeter Wemm 
67374a1b66cSBrooks Davis 	vm_error = KERN_SUCCESS;
67474a1b66cSBrooks Davis 	if (max_prot != 0) {
67574a1b66cSBrooks Davis 		if ((max_prot & prot) != prot)
676acb8858fSEd Maste 			return (ENOTSUP);
67774a1b66cSBrooks Davis 		vm_error = vm_map_protect(&td->td_proc->p_vmspace->vm_map,
67874a1b66cSBrooks Davis 		    addr, addr + size, max_prot, TRUE);
67974a1b66cSBrooks Davis 	}
68074a1b66cSBrooks Davis 	if (vm_error == KERN_SUCCESS)
68174a1b66cSBrooks Davis 		vm_error = vm_map_protect(&td->td_proc->p_vmspace->vm_map,
68274a1b66cSBrooks Davis 		    addr, addr + size, prot, FALSE);
68374a1b66cSBrooks Davis 
68474a1b66cSBrooks Davis 	switch (vm_error) {
685df8bae1dSRodney W. Grimes 	case KERN_SUCCESS:
686df8bae1dSRodney W. Grimes 		return (0);
687df8bae1dSRodney W. Grimes 	case KERN_PROTECTION_FAILURE:
688df8bae1dSRodney W. Grimes 		return (EACCES);
6893364c323SKonstantin Belousov 	case KERN_RESOURCE_SHORTAGE:
6903364c323SKonstantin Belousov 		return (ENOMEM);
691df8bae1dSRodney W. Grimes 	}
692df8bae1dSRodney W. Grimes 	return (EINVAL);
693df8bae1dSRodney W. Grimes }
694df8bae1dSRodney W. Grimes 
695d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_
696dabee6feSPeter Wemm struct minherit_args {
697651bb817SAlexander Langer 	void *addr;
6989154ee6aSPeter Wemm 	size_t len;
699dabee6feSPeter Wemm 	int inherit;
700dabee6feSPeter Wemm };
701dabee6feSPeter Wemm #endif
702dabee6feSPeter Wemm int
70304e89ffbSKonstantin Belousov sys_minherit(struct thread *td, struct minherit_args *uap)
704dabee6feSPeter Wemm {
70552c81be1SEdward Tomasz Napierala 
70652c81be1SEdward Tomasz Napierala 	return (kern_minherit(td, (uintptr_t)uap->addr, uap->len,
70752c81be1SEdward Tomasz Napierala 	    uap->inherit));
70852c81be1SEdward Tomasz Napierala }
70952c81be1SEdward Tomasz Napierala 
71052c81be1SEdward Tomasz Napierala int
71152c81be1SEdward Tomasz Napierala kern_minherit(struct thread *td, uintptr_t addr0, size_t len, int inherit0)
71252c81be1SEdward Tomasz Napierala {
713dabee6feSPeter Wemm 	vm_offset_t addr;
714dabee6feSPeter Wemm 	vm_size_t size, pageoff;
71554d92145SMatthew Dillon 	vm_inherit_t inherit;
716dabee6feSPeter Wemm 
71752c81be1SEdward Tomasz Napierala 	addr = (vm_offset_t)addr0;
71852c81be1SEdward Tomasz Napierala 	size = len;
71952c81be1SEdward Tomasz Napierala 	inherit = inherit0;
720dabee6feSPeter Wemm 
721dabee6feSPeter Wemm 	pageoff = (addr & PAGE_MASK);
722dabee6feSPeter Wemm 	addr -= pageoff;
723dabee6feSPeter Wemm 	size += pageoff;
724dabee6feSPeter Wemm 	size = (vm_size_t) round_page(size);
7259154ee6aSPeter Wemm 	if (addr + size < addr)
726dabee6feSPeter Wemm 		return (EINVAL);
727dabee6feSPeter Wemm 
728e0be79afSAlan Cox 	switch (vm_map_inherit(&td->td_proc->p_vmspace->vm_map, addr,
729e0be79afSAlan Cox 	    addr + size, inherit)) {
730dabee6feSPeter Wemm 	case KERN_SUCCESS:
731dabee6feSPeter Wemm 		return (0);
732dabee6feSPeter Wemm 	case KERN_PROTECTION_FAILURE:
733dabee6feSPeter Wemm 		return (EACCES);
734dabee6feSPeter Wemm 	}
735dabee6feSPeter Wemm 	return (EINVAL);
736dabee6feSPeter Wemm }
737dabee6feSPeter Wemm 
738dabee6feSPeter Wemm #ifndef _SYS_SYSPROTO_H_
739df8bae1dSRodney W. Grimes struct madvise_args {
740651bb817SAlexander Langer 	void *addr;
7419154ee6aSPeter Wemm 	size_t len;
742df8bae1dSRodney W. Grimes 	int behav;
743df8bae1dSRodney W. Grimes };
744d2d3e875SBruce Evans #endif
7450d94caffSDavid Greenman 
746df8bae1dSRodney W. Grimes int
74704e89ffbSKonstantin Belousov sys_madvise(struct thread *td, struct madvise_args *uap)
748df8bae1dSRodney W. Grimes {
74969cdfcefSEdward Tomasz Napierala 
750496ab053SKonstantin Belousov 	return (kern_madvise(td, (uintptr_t)uap->addr, uap->len, uap->behav));
75169cdfcefSEdward Tomasz Napierala }
75269cdfcefSEdward Tomasz Napierala 
75369cdfcefSEdward Tomasz Napierala int
754496ab053SKonstantin Belousov kern_madvise(struct thread *td, uintptr_t addr0, size_t len, int behav)
75569cdfcefSEdward Tomasz Napierala {
75605ba50f5SJake Burkholder 	vm_map_t map;
757496ab053SKonstantin Belousov 	vm_offset_t addr, end, start;
75855648840SJohn Baldwin 	int flags;
759b4309055SMatthew Dillon 
760b4309055SMatthew Dillon 	/*
761f4cf2141SWes Peters 	 * Check for our special case, advising the swap pager we are
762f4cf2141SWes Peters 	 * "immortal."
763f4cf2141SWes Peters 	 */
76469cdfcefSEdward Tomasz Napierala 	if (behav == MADV_PROTECT) {
76555648840SJohn Baldwin 		flags = PPROT_SET;
76655648840SJohn Baldwin 		return (kern_procctl(td, P_PID, td->td_proc->p_pid,
76755648840SJohn Baldwin 		    PROC_SPROTECT, &flags));
76869297bf8SJohn Baldwin 	}
76955648840SJohn Baldwin 
770f4cf2141SWes Peters 	/*
771867a482dSJohn Dyson 	 * Check for illegal addresses.  Watch out for address wrap... Note
772867a482dSJohn Dyson 	 * that VM_*_ADDRESS are not constants due to casts (argh).
773867a482dSJohn Dyson 	 */
77405ba50f5SJake Burkholder 	map = &td->td_proc->p_vmspace->vm_map;
775496ab053SKonstantin Belousov 	addr = addr0;
7760f1e6ec5SMark Johnston 	if (!vm_map_range_valid(map, addr, addr + len))
777867a482dSJohn Dyson 		return (EINVAL);
778867a482dSJohn Dyson 
779867a482dSJohn Dyson 	/*
780867a482dSJohn Dyson 	 * Since this routine is only advisory, we default to conservative
781867a482dSJohn Dyson 	 * behavior.
782867a482dSJohn Dyson 	 */
78369cdfcefSEdward Tomasz Napierala 	start = trunc_page(addr);
78469cdfcefSEdward Tomasz Napierala 	end = round_page(addr + len);
785867a482dSJohn Dyson 
7863e7cb27cSAlan Cox 	/*
7873e7cb27cSAlan Cox 	 * vm_map_madvise() checks for illegal values of behav.
7883e7cb27cSAlan Cox 	 */
7893e7cb27cSAlan Cox 	return (vm_map_madvise(map, start, end, behav));
790df8bae1dSRodney W. Grimes }
791df8bae1dSRodney W. Grimes 
792d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_
793df8bae1dSRodney W. Grimes struct mincore_args {
794651bb817SAlexander Langer 	const void *addr;
7959154ee6aSPeter Wemm 	size_t len;
796df8bae1dSRodney W. Grimes 	char *vec;
797df8bae1dSRodney W. Grimes };
798d2d3e875SBruce Evans #endif
7990d94caffSDavid Greenman 
800df8bae1dSRodney W. Grimes int
80104e89ffbSKonstantin Belousov sys_mincore(struct thread *td, struct mincore_args *uap)
802df8bae1dSRodney W. Grimes {
80346dc8e9dSDmitry Chagin 
80446dc8e9dSDmitry Chagin 	return (kern_mincore(td, (uintptr_t)uap->addr, uap->len, uap->vec));
80546dc8e9dSDmitry Chagin }
80646dc8e9dSDmitry Chagin 
80746dc8e9dSDmitry Chagin int
80846dc8e9dSDmitry Chagin kern_mincore(struct thread *td, uintptr_t addr0, size_t len, char *vec)
80946dc8e9dSDmitry Chagin {
810867a482dSJohn Dyson 	pmap_t pmap;
811867a482dSJohn Dyson 	vm_map_t map;
81201cef4caSMark Johnston 	vm_map_entry_t current, entry;
813567e51e1SAlan Cox 	vm_object_t object;
81401cef4caSMark Johnston 	vm_offset_t addr, cend, end, first_addr;
81501cef4caSMark Johnston 	vm_paddr_t pa;
816567e51e1SAlan Cox 	vm_page_t m;
817567e51e1SAlan Cox 	vm_pindex_t pindex;
81801cef4caSMark Johnston 	int error, lastvecindex, mincoreinfo, vecindex;
819dd2622a8SAlan Cox 	unsigned int timestamp;
820df8bae1dSRodney W. Grimes 
821867a482dSJohn Dyson 	/*
822867a482dSJohn Dyson 	 * Make sure that the addresses presented are valid for user
823867a482dSJohn Dyson 	 * mode.
824867a482dSJohn Dyson 	 */
82546dc8e9dSDmitry Chagin 	first_addr = addr = trunc_page(addr0);
826d0c9294bSMark Johnston 	end = round_page(addr0 + len);
82705ba50f5SJake Burkholder 	map = &td->td_proc->p_vmspace->vm_map;
82805ba50f5SJake Burkholder 	if (end > vm_map_max(map) || end < addr)
829455dd7d4SKonstantin Belousov 		return (ENOMEM);
83002c04a2fSJohn Dyson 
831b40ce416SJulian Elischer 	pmap = vmspace_pmap(td->td_proc->p_vmspace);
832867a482dSJohn Dyson 
833eff50fcdSAlan Cox 	vm_map_lock_read(map);
834dd2622a8SAlan Cox RestartScan:
835dd2622a8SAlan Cox 	timestamp = map->timestamp;
836867a482dSJohn Dyson 
837455dd7d4SKonstantin Belousov 	if (!vm_map_lookup_entry(map, addr, &entry)) {
838455dd7d4SKonstantin Belousov 		vm_map_unlock_read(map);
839455dd7d4SKonstantin Belousov 		return (ENOMEM);
840455dd7d4SKonstantin Belousov 	}
841867a482dSJohn Dyson 
842867a482dSJohn Dyson 	/*
843867a482dSJohn Dyson 	 * Do this on a map entry basis so that if the pages are not
844867a482dSJohn Dyson 	 * in the current processes address space, we can easily look
845867a482dSJohn Dyson 	 * up the pages elsewhere.
846867a482dSJohn Dyson 	 */
847867a482dSJohn Dyson 	lastvecindex = -1;
8487cdcf863SDoug Moore 	while (entry->start < end) {
849867a482dSJohn Dyson 
850867a482dSJohn Dyson 		/*
851455dd7d4SKonstantin Belousov 		 * check for contiguity
852455dd7d4SKonstantin Belousov 		 */
8537cdcf863SDoug Moore 		current = entry;
8547cdcf863SDoug Moore 		entry = vm_map_entry_succ(current);
8557cdcf863SDoug Moore 		if (current->end < end &&
8567cdcf863SDoug Moore 		    entry->start > current->end) {
857455dd7d4SKonstantin Belousov 			vm_map_unlock_read(map);
858455dd7d4SKonstantin Belousov 			return (ENOMEM);
859455dd7d4SKonstantin Belousov 		}
860455dd7d4SKonstantin Belousov 
861455dd7d4SKonstantin Belousov 		/*
862867a482dSJohn Dyson 		 * ignore submaps (for now) or null objects
863867a482dSJohn Dyson 		 */
8649fdfe602SMatthew Dillon 		if ((current->eflags & MAP_ENTRY_IS_SUB_MAP) ||
865867a482dSJohn Dyson 		    current->object.vm_object == NULL)
866867a482dSJohn Dyson 			continue;
867867a482dSJohn Dyson 
868867a482dSJohn Dyson 		/*
869867a482dSJohn Dyson 		 * limit this scan to the current map entry and the
870867a482dSJohn Dyson 		 * limits for the mincore call
871867a482dSJohn Dyson 		 */
872867a482dSJohn Dyson 		if (addr < current->start)
873867a482dSJohn Dyson 			addr = current->start;
874867a482dSJohn Dyson 		cend = current->end;
875867a482dSJohn Dyson 		if (cend > end)
876867a482dSJohn Dyson 			cend = end;
877867a482dSJohn Dyson 
87801cef4caSMark Johnston 		for (; addr < cend; addr += PAGE_SIZE) {
879867a482dSJohn Dyson 			/*
880867a482dSJohn Dyson 			 * Check pmap first, it is likely faster, also
881867a482dSJohn Dyson 			 * it can provide info as to whether we are the
882867a482dSJohn Dyson 			 * one referencing or modifying the page.
883867a482dSJohn Dyson 			 */
884567e51e1SAlan Cox 			m = NULL;
88501cef4caSMark Johnston 			object = NULL;
88601cef4caSMark Johnston retry:
88701cef4caSMark Johnston 			pa = 0;
88801cef4caSMark Johnston 			mincoreinfo = pmap_mincore(pmap, addr, &pa);
8893fbc2e00SKonstantin Belousov 			if (mincore_mapped) {
8903fbc2e00SKonstantin Belousov 				/*
8913fbc2e00SKonstantin Belousov 				 * We only care about this pmap's
8923fbc2e00SKonstantin Belousov 				 * mapping of the page, if any.
8933fbc2e00SKonstantin Belousov 				 */
89401cef4caSMark Johnston 				;
89501cef4caSMark Johnston 			} else if (pa != 0) {
896867a482dSJohn Dyson 				/*
897567e51e1SAlan Cox 				 * The page is mapped by this process but not
898567e51e1SAlan Cox 				 * both accessed and modified.  It is also
899567e51e1SAlan Cox 				 * managed.  Acquire the object lock so that
90001cef4caSMark Johnston 				 * other mappings might be examined.  The page's
90101cef4caSMark Johnston 				 * identity may change at any point before its
90201cef4caSMark Johnston 				 * object lock is acquired, so re-validate if
90301cef4caSMark Johnston 				 * necessary.
904867a482dSJohn Dyson 				 */
90501cef4caSMark Johnston 				m = PHYS_TO_VM_PAGE(pa);
90601cef4caSMark Johnston 				while (object == NULL || m->object != object) {
907567e51e1SAlan Cox 					if (object != NULL)
90889f6b863SAttilio Rao 						VM_OBJECT_WUNLOCK(object);
90923ed568cSMateusz Guzik 					object = atomic_load_ptr(&m->object);
91001cef4caSMark Johnston 					if (object == NULL)
911567e51e1SAlan Cox 						goto retry;
91201cef4caSMark Johnston 					VM_OBJECT_WLOCK(object);
913567e51e1SAlan Cox 				}
91401cef4caSMark Johnston 				if (pa != pmap_extract(pmap, addr))
91501cef4caSMark Johnston 					goto retry;
9160012f373SJeff Roberson 				KASSERT(vm_page_all_valid(m),
917567e51e1SAlan Cox 				    ("mincore: page %p is mapped but invalid",
918567e51e1SAlan Cox 				    m));
919567e51e1SAlan Cox 			} else if (mincoreinfo == 0) {
920567e51e1SAlan Cox 				/*
921567e51e1SAlan Cox 				 * The page is not mapped by this process.  If
922567e51e1SAlan Cox 				 * the object implements managed pages, then
923567e51e1SAlan Cox 				 * determine if the page is resident so that
924567e51e1SAlan Cox 				 * the mappings might be examined.
925567e51e1SAlan Cox 				 */
926567e51e1SAlan Cox 				if (current->object.vm_object != object) {
927567e51e1SAlan Cox 					if (object != NULL)
92889f6b863SAttilio Rao 						VM_OBJECT_WUNLOCK(object);
929567e51e1SAlan Cox 					object = current->object.vm_object;
93089f6b863SAttilio Rao 					VM_OBJECT_WLOCK(object);
931567e51e1SAlan Cox 				}
932567e51e1SAlan Cox 				if (object->type == OBJT_DEFAULT ||
933567e51e1SAlan Cox 				    object->type == OBJT_SWAP ||
934567e51e1SAlan Cox 				    object->type == OBJT_VNODE) {
935567e51e1SAlan Cox 					pindex = OFF_TO_IDX(current->offset +
936567e51e1SAlan Cox 					    (addr - current->start));
937567e51e1SAlan Cox 					m = vm_page_lookup(object, pindex);
9380012f373SJeff Roberson 					if (m != NULL && vm_page_none_valid(m))
939567e51e1SAlan Cox 						m = NULL;
940567e51e1SAlan Cox 					if (m != NULL)
941567e51e1SAlan Cox 						mincoreinfo = MINCORE_INCORE;
942567e51e1SAlan Cox 				}
943567e51e1SAlan Cox 			}
944567e51e1SAlan Cox 			if (m != NULL) {
94501cef4caSMark Johnston 				VM_OBJECT_ASSERT_WLOCKED(m->object);
94601cef4caSMark Johnston 
94701cef4caSMark Johnston 				/* Examine other mappings of the page. */
948567e51e1SAlan Cox 				if (m->dirty == 0 && pmap_is_modified(m))
949567e51e1SAlan Cox 					vm_page_dirty(m);
950567e51e1SAlan Cox 				if (m->dirty != 0)
951867a482dSJohn Dyson 					mincoreinfo |= MINCORE_MODIFIED_OTHER;
95201cef4caSMark Johnston 
953c46b90e9SAlan Cox 				/*
9543407fefeSKonstantin Belousov 				 * The first test for PGA_REFERENCED is an
955c46b90e9SAlan Cox 				 * optimization.  The second test is
956c46b90e9SAlan Cox 				 * required because a concurrent pmap
957c46b90e9SAlan Cox 				 * operation could clear the last reference
9583407fefeSKonstantin Belousov 				 * and set PGA_REFERENCED before the call to
959c46b90e9SAlan Cox 				 * pmap_is_referenced().
960c46b90e9SAlan Cox 				 */
9615cff1f4dSMark Johnston 				if ((m->a.flags & PGA_REFERENCED) != 0 ||
962c46b90e9SAlan Cox 				    pmap_is_referenced(m) ||
9635cff1f4dSMark Johnston 				    (m->a.flags & PGA_REFERENCED) != 0)
964867a482dSJohn Dyson 					mincoreinfo |= MINCORE_REFERENCED_OTHER;
9659b5a5d81SJohn Dyson 			}
966567e51e1SAlan Cox 			if (object != NULL)
96789f6b863SAttilio Rao 				VM_OBJECT_WUNLOCK(object);
968867a482dSJohn Dyson 
969867a482dSJohn Dyson 			/*
970dd2622a8SAlan Cox 			 * subyte may page fault.  In case it needs to modify
971dd2622a8SAlan Cox 			 * the map, we release the lock.
972dd2622a8SAlan Cox 			 */
973dd2622a8SAlan Cox 			vm_map_unlock_read(map);
974dd2622a8SAlan Cox 
975dd2622a8SAlan Cox 			/*
976867a482dSJohn Dyson 			 * calculate index into user supplied byte vector
977867a482dSJohn Dyson 			 */
978d1780e8dSKonstantin Belousov 			vecindex = atop(addr - first_addr);
979867a482dSJohn Dyson 
980867a482dSJohn Dyson 			/*
981867a482dSJohn Dyson 			 * If we have skipped map entries, we need to make sure that
982867a482dSJohn Dyson 			 * the byte vector is zeroed for those skipped entries.
983867a482dSJohn Dyson 			 */
984867a482dSJohn Dyson 			while ((lastvecindex + 1) < vecindex) {
9856a87d217SJohn Baldwin 				++lastvecindex;
986867a482dSJohn Dyson 				error = subyte(vec + lastvecindex, 0);
987867a482dSJohn Dyson 				if (error) {
988d2c60af8SMatthew Dillon 					error = EFAULT;
989d2c60af8SMatthew Dillon 					goto done2;
990867a482dSJohn Dyson 				}
991867a482dSJohn Dyson 			}
992867a482dSJohn Dyson 
993867a482dSJohn Dyson 			/*
994867a482dSJohn Dyson 			 * Pass the page information to the user
995867a482dSJohn Dyson 			 */
996867a482dSJohn Dyson 			error = subyte(vec + vecindex, mincoreinfo);
997867a482dSJohn Dyson 			if (error) {
998d2c60af8SMatthew Dillon 				error = EFAULT;
999d2c60af8SMatthew Dillon 				goto done2;
1000867a482dSJohn Dyson 			}
1001dd2622a8SAlan Cox 
1002dd2622a8SAlan Cox 			/*
1003dd2622a8SAlan Cox 			 * If the map has changed, due to the subyte, the previous
1004dd2622a8SAlan Cox 			 * output may be invalid.
1005dd2622a8SAlan Cox 			 */
1006dd2622a8SAlan Cox 			vm_map_lock_read(map);
1007dd2622a8SAlan Cox 			if (timestamp != map->timestamp)
1008dd2622a8SAlan Cox 				goto RestartScan;
1009dd2622a8SAlan Cox 
1010867a482dSJohn Dyson 			lastvecindex = vecindex;
101102c04a2fSJohn Dyson 		}
1012867a482dSJohn Dyson 	}
1013867a482dSJohn Dyson 
1014867a482dSJohn Dyson 	/*
1015dd2622a8SAlan Cox 	 * subyte may page fault.  In case it needs to modify
1016dd2622a8SAlan Cox 	 * the map, we release the lock.
1017dd2622a8SAlan Cox 	 */
1018dd2622a8SAlan Cox 	vm_map_unlock_read(map);
1019dd2622a8SAlan Cox 
1020dd2622a8SAlan Cox 	/*
1021867a482dSJohn Dyson 	 * Zero the last entries in the byte vector.
1022867a482dSJohn Dyson 	 */
1023d1780e8dSKonstantin Belousov 	vecindex = atop(end - first_addr);
1024867a482dSJohn Dyson 	while ((lastvecindex + 1) < vecindex) {
10256a87d217SJohn Baldwin 		++lastvecindex;
1026867a482dSJohn Dyson 		error = subyte(vec + lastvecindex, 0);
1027867a482dSJohn Dyson 		if (error) {
1028d2c60af8SMatthew Dillon 			error = EFAULT;
1029d2c60af8SMatthew Dillon 			goto done2;
1030867a482dSJohn Dyson 		}
1031867a482dSJohn Dyson 	}
1032867a482dSJohn Dyson 
1033dd2622a8SAlan Cox 	/*
1034dd2622a8SAlan Cox 	 * If the map has changed, due to the subyte, the previous
1035dd2622a8SAlan Cox 	 * output may be invalid.
1036dd2622a8SAlan Cox 	 */
1037dd2622a8SAlan Cox 	vm_map_lock_read(map);
1038dd2622a8SAlan Cox 	if (timestamp != map->timestamp)
1039dd2622a8SAlan Cox 		goto RestartScan;
1040eff50fcdSAlan Cox 	vm_map_unlock_read(map);
1041d2c60af8SMatthew Dillon done2:
1042d2c60af8SMatthew Dillon 	return (error);
1043df8bae1dSRodney W. Grimes }
1044df8bae1dSRodney W. Grimes 
1045d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_
1046df8bae1dSRodney W. Grimes struct mlock_args {
1047651bb817SAlexander Langer 	const void *addr;
1048df8bae1dSRodney W. Grimes 	size_t len;
1049df8bae1dSRodney W. Grimes };
1050d2d3e875SBruce Evans #endif
1051df8bae1dSRodney W. Grimes int
105204e89ffbSKonstantin Belousov sys_mlock(struct thread *td, struct mlock_args *uap)
1053df8bae1dSRodney W. Grimes {
1054995d7069SGleb Smirnoff 
1055496ab053SKonstantin Belousov 	return (kern_mlock(td->td_proc, td->td_ucred,
1056496ab053SKonstantin Belousov 	    __DECONST(uintptr_t, uap->addr), uap->len));
1057995d7069SGleb Smirnoff }
1058995d7069SGleb Smirnoff 
1059995d7069SGleb Smirnoff int
1060496ab053SKonstantin Belousov kern_mlock(struct proc *proc, struct ucred *cred, uintptr_t addr0, size_t len)
1061995d7069SGleb Smirnoff {
1062bb734798SDon Lewis 	vm_offset_t addr, end, last, start;
1063bb734798SDon Lewis 	vm_size_t npages, size;
10643ac7d297SAndrey Zonov 	vm_map_t map;
10651ba5ad42SEdward Tomasz Napierala 	unsigned long nsize;
1066bb734798SDon Lewis 	int error;
1067df8bae1dSRodney W. Grimes 
1068cc426dd3SMateusz Guzik 	error = priv_check_cred(cred, PRIV_VM_MLOCK);
106947934cefSDon Lewis 	if (error)
107047934cefSDon Lewis 		return (error);
1071496ab053SKonstantin Belousov 	addr = addr0;
1072995d7069SGleb Smirnoff 	size = len;
1073bb734798SDon Lewis 	last = addr + size;
107416929939SDon Lewis 	start = trunc_page(addr);
1075bb734798SDon Lewis 	end = round_page(last);
1076bb734798SDon Lewis 	if (last < addr || end < addr)
1077df8bae1dSRodney W. Grimes 		return (EINVAL);
107816929939SDon Lewis 	npages = atop(end - start);
107954a3a114SMark Johnston 	if (npages > vm_page_max_user_wired)
108016929939SDon Lewis 		return (ENOMEM);
10813ac7d297SAndrey Zonov 	map = &proc->p_vmspace->vm_map;
108247934cefSDon Lewis 	PROC_LOCK(proc);
10833ac7d297SAndrey Zonov 	nsize = ptoa(npages + pmap_wired_count(map->pmap));
1084f6f6d240SMateusz Guzik 	if (nsize > lim_cur_proc(proc, RLIMIT_MEMLOCK)) {
108547934cefSDon Lewis 		PROC_UNLOCK(proc);
10864a40e3d4SJohn Dyson 		return (ENOMEM);
108791d5354aSJohn Baldwin 	}
108847934cefSDon Lewis 	PROC_UNLOCK(proc);
1089afcc55f3SEdward Tomasz Napierala #ifdef RACCT
10904b5c9cf6SEdward Tomasz Napierala 	if (racct_enable) {
10911ba5ad42SEdward Tomasz Napierala 		PROC_LOCK(proc);
10921ba5ad42SEdward Tomasz Napierala 		error = racct_set(proc, RACCT_MEMLOCK, nsize);
10931ba5ad42SEdward Tomasz Napierala 		PROC_UNLOCK(proc);
10941ba5ad42SEdward Tomasz Napierala 		if (error != 0)
10951ba5ad42SEdward Tomasz Napierala 			return (ENOMEM);
10964b5c9cf6SEdward Tomasz Napierala 	}
1097afcc55f3SEdward Tomasz Napierala #endif
10983ac7d297SAndrey Zonov 	error = vm_map_wire(map, start, end,
109916929939SDon Lewis 	    VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES);
1100afcc55f3SEdward Tomasz Napierala #ifdef RACCT
11014b5c9cf6SEdward Tomasz Napierala 	if (racct_enable && error != KERN_SUCCESS) {
11021ba5ad42SEdward Tomasz Napierala 		PROC_LOCK(proc);
11031ba5ad42SEdward Tomasz Napierala 		racct_set(proc, RACCT_MEMLOCK,
11043ac7d297SAndrey Zonov 		    ptoa(pmap_wired_count(map->pmap)));
11051ba5ad42SEdward Tomasz Napierala 		PROC_UNLOCK(proc);
11061ba5ad42SEdward Tomasz Napierala 	}
1107afcc55f3SEdward Tomasz Napierala #endif
1108df8bae1dSRodney W. Grimes 	return (error == KERN_SUCCESS ? 0 : ENOMEM);
1109df8bae1dSRodney W. Grimes }
1110df8bae1dSRodney W. Grimes 
1111d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_
11124a40e3d4SJohn Dyson struct mlockall_args {
11134a40e3d4SJohn Dyson 	int	how;
11144a40e3d4SJohn Dyson };
11154a40e3d4SJohn Dyson #endif
11164a40e3d4SJohn Dyson 
11174a40e3d4SJohn Dyson int
111804e89ffbSKonstantin Belousov sys_mlockall(struct thread *td, struct mlockall_args *uap)
11194a40e3d4SJohn Dyson {
1120abd498aaSBruce M Simpson 	vm_map_t map;
1121abd498aaSBruce M Simpson 	int error;
1122abd498aaSBruce M Simpson 
1123abd498aaSBruce M Simpson 	map = &td->td_proc->p_vmspace->vm_map;
11247e19eda4SAndrey Zonov 	error = priv_check(td, PRIV_VM_MLOCK);
11257e19eda4SAndrey Zonov 	if (error)
11267e19eda4SAndrey Zonov 		return (error);
1127abd498aaSBruce M Simpson 
1128abd498aaSBruce M Simpson 	if ((uap->how == 0) || ((uap->how & ~(MCL_CURRENT|MCL_FUTURE)) != 0))
1129abd498aaSBruce M Simpson 		return (EINVAL);
1130abd498aaSBruce M Simpson 
1131abd498aaSBruce M Simpson 	/*
1132abd498aaSBruce M Simpson 	 * If wiring all pages in the process would cause it to exceed
1133abd498aaSBruce M Simpson 	 * a hard resource limit, return ENOMEM.
1134abd498aaSBruce M Simpson 	 */
11357e19eda4SAndrey Zonov 	if (!old_mlock && uap->how & MCL_CURRENT) {
11362554f86aSMateusz Guzik 		if (map->size > lim_cur(td, RLIMIT_MEMLOCK))
1137abd498aaSBruce M Simpson 			return (ENOMEM);
113891d5354aSJohn Baldwin 	}
1139afcc55f3SEdward Tomasz Napierala #ifdef RACCT
11404b5c9cf6SEdward Tomasz Napierala 	if (racct_enable) {
11411ba5ad42SEdward Tomasz Napierala 		PROC_LOCK(td->td_proc);
11421ba5ad42SEdward Tomasz Napierala 		error = racct_set(td->td_proc, RACCT_MEMLOCK, map->size);
11431ba5ad42SEdward Tomasz Napierala 		PROC_UNLOCK(td->td_proc);
11441ba5ad42SEdward Tomasz Napierala 		if (error != 0)
11451ba5ad42SEdward Tomasz Napierala 			return (ENOMEM);
11464b5c9cf6SEdward Tomasz Napierala 	}
1147afcc55f3SEdward Tomasz Napierala #endif
1148abd498aaSBruce M Simpson 
1149abd498aaSBruce M Simpson 	if (uap->how & MCL_FUTURE) {
1150abd498aaSBruce M Simpson 		vm_map_lock(map);
1151abd498aaSBruce M Simpson 		vm_map_modflags(map, MAP_WIREFUTURE, 0);
1152abd498aaSBruce M Simpson 		vm_map_unlock(map);
1153abd498aaSBruce M Simpson 		error = 0;
1154abd498aaSBruce M Simpson 	}
1155abd498aaSBruce M Simpson 
1156abd498aaSBruce M Simpson 	if (uap->how & MCL_CURRENT) {
1157abd498aaSBruce M Simpson 		/*
1158abd498aaSBruce M Simpson 		 * P1003.1-2001 mandates that all currently mapped pages
1159abd498aaSBruce M Simpson 		 * will be memory resident and locked (wired) upon return
1160abd498aaSBruce M Simpson 		 * from mlockall(). vm_map_wire() will wire pages, by
1161abd498aaSBruce M Simpson 		 * calling vm_fault_wire() for each page in the region.
1162abd498aaSBruce M Simpson 		 */
1163abd498aaSBruce M Simpson 		error = vm_map_wire(map, vm_map_min(map), vm_map_max(map),
1164abd498aaSBruce M Simpson 		    VM_MAP_WIRE_USER|VM_MAP_WIRE_HOLESOK);
116554a3a114SMark Johnston 		if (error == KERN_SUCCESS)
116654a3a114SMark Johnston 			error = 0;
116754a3a114SMark Johnston 		else if (error == KERN_RESOURCE_SHORTAGE)
116854a3a114SMark Johnston 			error = ENOMEM;
116954a3a114SMark Johnston 		else
117054a3a114SMark Johnston 			error = EAGAIN;
1171abd498aaSBruce M Simpson 	}
1172afcc55f3SEdward Tomasz Napierala #ifdef RACCT
11734b5c9cf6SEdward Tomasz Napierala 	if (racct_enable && error != KERN_SUCCESS) {
11741ba5ad42SEdward Tomasz Napierala 		PROC_LOCK(td->td_proc);
11751ba5ad42SEdward Tomasz Napierala 		racct_set(td->td_proc, RACCT_MEMLOCK,
11763ac7d297SAndrey Zonov 		    ptoa(pmap_wired_count(map->pmap)));
11771ba5ad42SEdward Tomasz Napierala 		PROC_UNLOCK(td->td_proc);
11781ba5ad42SEdward Tomasz Napierala 	}
1179afcc55f3SEdward Tomasz Napierala #endif
1180abd498aaSBruce M Simpson 
1181abd498aaSBruce M Simpson 	return (error);
11824a40e3d4SJohn Dyson }
11834a40e3d4SJohn Dyson 
11844a40e3d4SJohn Dyson #ifndef _SYS_SYSPROTO_H_
1185fa721254SAlfred Perlstein struct munlockall_args {
1186abd498aaSBruce M Simpson 	register_t dummy;
11874a40e3d4SJohn Dyson };
11884a40e3d4SJohn Dyson #endif
11894a40e3d4SJohn Dyson 
11904a40e3d4SJohn Dyson int
119104e89ffbSKonstantin Belousov sys_munlockall(struct thread *td, struct munlockall_args *uap)
11924a40e3d4SJohn Dyson {
1193abd498aaSBruce M Simpson 	vm_map_t map;
1194abd498aaSBruce M Simpson 	int error;
1195abd498aaSBruce M Simpson 
1196abd498aaSBruce M Simpson 	map = &td->td_proc->p_vmspace->vm_map;
1197acd3428bSRobert Watson 	error = priv_check(td, PRIV_VM_MUNLOCK);
1198abd498aaSBruce M Simpson 	if (error)
1199abd498aaSBruce M Simpson 		return (error);
1200abd498aaSBruce M Simpson 
1201abd498aaSBruce M Simpson 	/* Clear the MAP_WIREFUTURE flag from this vm_map. */
1202abd498aaSBruce M Simpson 	vm_map_lock(map);
1203abd498aaSBruce M Simpson 	vm_map_modflags(map, 0, MAP_WIREFUTURE);
1204abd498aaSBruce M Simpson 	vm_map_unlock(map);
1205abd498aaSBruce M Simpson 
1206abd498aaSBruce M Simpson 	/* Forcibly unwire all pages. */
1207abd498aaSBruce M Simpson 	error = vm_map_unwire(map, vm_map_min(map), vm_map_max(map),
1208abd498aaSBruce M Simpson 	    VM_MAP_WIRE_USER|VM_MAP_WIRE_HOLESOK);
1209afcc55f3SEdward Tomasz Napierala #ifdef RACCT
12104b5c9cf6SEdward Tomasz Napierala 	if (racct_enable && error == KERN_SUCCESS) {
12111ba5ad42SEdward Tomasz Napierala 		PROC_LOCK(td->td_proc);
12121ba5ad42SEdward Tomasz Napierala 		racct_set(td->td_proc, RACCT_MEMLOCK, 0);
12131ba5ad42SEdward Tomasz Napierala 		PROC_UNLOCK(td->td_proc);
12141ba5ad42SEdward Tomasz Napierala 	}
1215afcc55f3SEdward Tomasz Napierala #endif
1216abd498aaSBruce M Simpson 
1217abd498aaSBruce M Simpson 	return (error);
12184a40e3d4SJohn Dyson }
12194a40e3d4SJohn Dyson 
12204a40e3d4SJohn Dyson #ifndef _SYS_SYSPROTO_H_
1221df8bae1dSRodney W. Grimes struct munlock_args {
1222651bb817SAlexander Langer 	const void *addr;
1223df8bae1dSRodney W. Grimes 	size_t len;
1224df8bae1dSRodney W. Grimes };
1225d2d3e875SBruce Evans #endif
1226df8bae1dSRodney W. Grimes int
122769cdfcefSEdward Tomasz Napierala sys_munlock(struct thread *td, struct munlock_args *uap)
1228df8bae1dSRodney W. Grimes {
122969cdfcefSEdward Tomasz Napierala 
1230496ab053SKonstantin Belousov 	return (kern_munlock(td, (uintptr_t)uap->addr, uap->len));
123169cdfcefSEdward Tomasz Napierala }
123269cdfcefSEdward Tomasz Napierala 
123369cdfcefSEdward Tomasz Napierala int
1234496ab053SKonstantin Belousov kern_munlock(struct thread *td, uintptr_t addr0, size_t size)
123569cdfcefSEdward Tomasz Napierala {
1236496ab053SKonstantin Belousov 	vm_offset_t addr, end, last, start;
1237fc2b1679SJeremie Le Hen #ifdef RACCT
1238c92b5069SJeremie Le Hen 	vm_map_t map;
1239fc2b1679SJeremie Le Hen #endif
1240df8bae1dSRodney W. Grimes 	int error;
1241df8bae1dSRodney W. Grimes 
1242acd3428bSRobert Watson 	error = priv_check(td, PRIV_VM_MUNLOCK);
124347934cefSDon Lewis 	if (error)
124447934cefSDon Lewis 		return (error);
1245496ab053SKonstantin Belousov 	addr = addr0;
1246bb734798SDon Lewis 	last = addr + size;
124716929939SDon Lewis 	start = trunc_page(addr);
1248bb734798SDon Lewis 	end = round_page(last);
1249bb734798SDon Lewis 	if (last < addr || end < addr)
1250df8bae1dSRodney W. Grimes 		return (EINVAL);
125116929939SDon Lewis 	error = vm_map_unwire(&td->td_proc->p_vmspace->vm_map, start, end,
125216929939SDon Lewis 	    VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES);
1253afcc55f3SEdward Tomasz Napierala #ifdef RACCT
12544b5c9cf6SEdward Tomasz Napierala 	if (racct_enable && error == KERN_SUCCESS) {
12551ba5ad42SEdward Tomasz Napierala 		PROC_LOCK(td->td_proc);
1256c92b5069SJeremie Le Hen 		map = &td->td_proc->p_vmspace->vm_map;
1257c92b5069SJeremie Le Hen 		racct_set(td->td_proc, RACCT_MEMLOCK,
1258c92b5069SJeremie Le Hen 		    ptoa(pmap_wired_count(map->pmap)));
12591ba5ad42SEdward Tomasz Napierala 		PROC_UNLOCK(td->td_proc);
12601ba5ad42SEdward Tomasz Napierala 	}
1261afcc55f3SEdward Tomasz Napierala #endif
1262df8bae1dSRodney W. Grimes 	return (error == KERN_SUCCESS ? 0 : ENOMEM);
1263df8bae1dSRodney W. Grimes }
1264df8bae1dSRodney W. Grimes 
1265df8bae1dSRodney W. Grimes /*
1266c8daea13SAlexander Kabaev  * vm_mmap_vnode()
1267c8daea13SAlexander Kabaev  *
1268c8daea13SAlexander Kabaev  * Helper function for vm_mmap.  Perform sanity check specific for mmap
1269c8daea13SAlexander Kabaev  * operations on vnodes.
1270c8daea13SAlexander Kabaev  */
1271c8daea13SAlexander Kabaev int
1272c8daea13SAlexander Kabaev vm_mmap_vnode(struct thread *td, vm_size_t objsize,
1273c8daea13SAlexander Kabaev     vm_prot_t prot, vm_prot_t *maxprotp, int *flagsp,
127484110e7eSKonstantin Belousov     struct vnode *vp, vm_ooffset_t *foffp, vm_object_t *objp,
127584110e7eSKonstantin Belousov     boolean_t *writecounted)
1276c8daea13SAlexander Kabaev {
1277c8daea13SAlexander Kabaev 	struct vattr va;
1278c8daea13SAlexander Kabaev 	vm_object_t obj;
1279bd0e1bebSMark Johnston 	vm_ooffset_t foff;
12800359a12eSAttilio Rao 	struct ucred *cred;
128178022527SKonstantin Belousov 	int error, flags;
128278022527SKonstantin Belousov 	bool writex;
1283c8daea13SAlexander Kabaev 
12840359a12eSAttilio Rao 	cred = td->td_ucred;
128578022527SKonstantin Belousov 	writex = (*maxprotp & VM_PROT_WRITE) != 0 &&
128678022527SKonstantin Belousov 	    (*flagsp & MAP_SHARED) != 0;
1287*a92a971bSMateusz Guzik 	if ((error = vget(vp, LK_SHARED)) != 0)
1288c8daea13SAlexander Kabaev 		return (error);
12890df42647SRobert Watson 	AUDIT_ARG_VNODE1(vp);
129064345f0bSJohn Baldwin 	foff = *foffp;
1291c8daea13SAlexander Kabaev 	flags = *flagsp;
12928516dd18SPoul-Henning Kamp 	obj = vp->v_object;
1293c8daea13SAlexander Kabaev 	if (vp->v_type == VREG) {
1294c8daea13SAlexander Kabaev 		/*
1295c8daea13SAlexander Kabaev 		 * Get the proper underlying object
1296c8daea13SAlexander Kabaev 		 */
12978516dd18SPoul-Henning Kamp 		if (obj == NULL) {
1298c8daea13SAlexander Kabaev 			error = EINVAL;
1299c8daea13SAlexander Kabaev 			goto done;
1300c8daea13SAlexander Kabaev 		}
1301e5f299ffSKonstantin Belousov 		if (obj->type == OBJT_VNODE && obj->handle != vp) {
1302c8daea13SAlexander Kabaev 			vput(vp);
1303c8daea13SAlexander Kabaev 			vp = (struct vnode *)obj->handle;
130484110e7eSKonstantin Belousov 			/*
130584110e7eSKonstantin Belousov 			 * Bypass filesystems obey the mpsafety of the
130653f5f8a0SKonstantin Belousov 			 * underlying fs.  Tmpfs never bypasses.
130784110e7eSKonstantin Belousov 			 */
1308*a92a971bSMateusz Guzik 			error = vget(vp, LK_SHARED);
13095050aa86SKonstantin Belousov 			if (error != 0)
131084110e7eSKonstantin Belousov 				return (error);
131184110e7eSKonstantin Belousov 		}
131278022527SKonstantin Belousov 		if (writex) {
131384110e7eSKonstantin Belousov 			*writecounted = TRUE;
1314fe7bcbafSKyle Evans 			vm_pager_update_writecount(obj, 0, objsize);
131584110e7eSKonstantin Belousov 		}
1316c8daea13SAlexander Kabaev 	} else {
1317c8daea13SAlexander Kabaev 		error = EINVAL;
1318c8daea13SAlexander Kabaev 		goto done;
1319c8daea13SAlexander Kabaev 	}
13200359a12eSAttilio Rao 	if ((error = VOP_GETATTR(vp, &va, cred)))
1321c8daea13SAlexander Kabaev 		goto done;
1322c92163dcSChristian S.J. Peron #ifdef MAC
13237077c426SJohn Baldwin 	/* This relies on VM_PROT_* matching PROT_*. */
13247077c426SJohn Baldwin 	error = mac_vnode_check_mmap(cred, vp, (int)prot, flags);
1325c92163dcSChristian S.J. Peron 	if (error != 0)
1326c92163dcSChristian S.J. Peron 		goto done;
1327c92163dcSChristian S.J. Peron #endif
1328c8daea13SAlexander Kabaev 	if ((flags & MAP_SHARED) != 0) {
1329c8daea13SAlexander Kabaev 		if ((va.va_flags & (SF_SNAPSHOT|IMMUTABLE|APPEND)) != 0) {
13307077c426SJohn Baldwin 			if (prot & VM_PROT_WRITE) {
1331c8daea13SAlexander Kabaev 				error = EPERM;
1332c8daea13SAlexander Kabaev 				goto done;
1333c8daea13SAlexander Kabaev 			}
1334c8daea13SAlexander Kabaev 			*maxprotp &= ~VM_PROT_WRITE;
1335c8daea13SAlexander Kabaev 		}
1336c8daea13SAlexander Kabaev 	}
1337c8daea13SAlexander Kabaev 	/*
1338c8daea13SAlexander Kabaev 	 * If it is a regular file without any references
1339c8daea13SAlexander Kabaev 	 * we do not need to sync it.
1340c8daea13SAlexander Kabaev 	 * Adjust object size to be the size of actual file.
1341c8daea13SAlexander Kabaev 	 */
1342c8daea13SAlexander Kabaev 	objsize = round_page(va.va_size);
1343c8daea13SAlexander Kabaev 	if (va.va_nlink == 0)
1344c8daea13SAlexander Kabaev 		flags |= MAP_NOSYNC;
13453d653db0SAlan Cox 	if (obj->type == OBJT_VNODE) {
1346e5f299ffSKonstantin Belousov 		obj = vm_pager_allocate(OBJT_VNODE, vp, objsize, prot, foff,
1347e5f299ffSKonstantin Belousov 		    cred);
1348c8daea13SAlexander Kabaev 		if (obj == NULL) {
134964345f0bSJohn Baldwin 			error = ENOMEM;
1350c8daea13SAlexander Kabaev 			goto done;
1351c8daea13SAlexander Kabaev 		}
13523d653db0SAlan Cox 	} else {
13533d653db0SAlan Cox 		KASSERT(obj->type == OBJT_DEFAULT || obj->type == OBJT_SWAP,
13543d653db0SAlan Cox 		    ("wrong object type"));
1355f2410510SJeff Roberson 		vm_object_reference(obj);
13563d653db0SAlan Cox #if VM_NRESERVLEVEL > 0
1357f2410510SJeff Roberson 		if ((obj->flags & OBJ_COLORED) == 0) {
1358f2410510SJeff Roberson 			VM_OBJECT_WLOCK(obj);
13593d653db0SAlan Cox 			vm_object_color(obj, 0);
13603d653db0SAlan Cox 			VM_OBJECT_WUNLOCK(obj);
13613d653db0SAlan Cox 		}
1362f2410510SJeff Roberson #endif
1363f2410510SJeff Roberson 	}
1364c8daea13SAlexander Kabaev 	*objp = obj;
1365c8daea13SAlexander Kabaev 	*flagsp = flags;
136664345f0bSJohn Baldwin 
1367643656cfSMateusz Guzik 	VOP_MMAPPED(vp);
13681e309003SDiomidis Spinellis 
1369c8daea13SAlexander Kabaev done:
1370bafa6cfcSKonstantin Belousov 	if (error != 0 && *writecounted) {
1371bafa6cfcSKonstantin Belousov 		*writecounted = FALSE;
1372fe7bcbafSKyle Evans 		vm_pager_update_writecount(obj, objsize, 0);
1373bafa6cfcSKonstantin Belousov 	}
1374c8daea13SAlexander Kabaev 	vput(vp);
1375c8daea13SAlexander Kabaev 	return (error);
1376c8daea13SAlexander Kabaev }
1377c8daea13SAlexander Kabaev 
1378c8daea13SAlexander Kabaev /*
137998df9218SJohn Baldwin  * vm_mmap_cdev()
138098df9218SJohn Baldwin  *
138198df9218SJohn Baldwin  * Helper function for vm_mmap.  Perform sanity check specific for mmap
138298df9218SJohn Baldwin  * operations on cdevs.
138398df9218SJohn Baldwin  */
138498df9218SJohn Baldwin int
13857077c426SJohn Baldwin vm_mmap_cdev(struct thread *td, vm_size_t objsize, vm_prot_t prot,
13867077c426SJohn Baldwin     vm_prot_t *maxprotp, int *flagsp, struct cdev *cdev, struct cdevsw *dsw,
13877077c426SJohn Baldwin     vm_ooffset_t *foff, vm_object_t *objp)
138898df9218SJohn Baldwin {
138998df9218SJohn Baldwin 	vm_object_t obj;
13907077c426SJohn Baldwin 	int error, flags;
139198df9218SJohn Baldwin 
139298df9218SJohn Baldwin 	flags = *flagsp;
139398df9218SJohn Baldwin 
139491a35e78SKonstantin Belousov 	if (dsw->d_flags & D_MMAP_ANON) {
13957077c426SJohn Baldwin 		*objp = NULL;
13967077c426SJohn Baldwin 		*foff = 0;
139798df9218SJohn Baldwin 		*maxprotp = VM_PROT_ALL;
139898df9218SJohn Baldwin 		*flagsp |= MAP_ANON;
139998df9218SJohn Baldwin 		return (0);
140098df9218SJohn Baldwin 	}
140198df9218SJohn Baldwin 	/*
140264345f0bSJohn Baldwin 	 * cdevs do not provide private mappings of any kind.
140398df9218SJohn Baldwin 	 */
140498df9218SJohn Baldwin 	if ((*maxprotp & VM_PROT_WRITE) == 0 &&
14057077c426SJohn Baldwin 	    (prot & VM_PROT_WRITE) != 0)
140698df9218SJohn Baldwin 		return (EACCES);
14077077c426SJohn Baldwin 	if (flags & (MAP_PRIVATE|MAP_COPY))
140898df9218SJohn Baldwin 		return (EINVAL);
140998df9218SJohn Baldwin 	/*
141098df9218SJohn Baldwin 	 * Force device mappings to be shared.
141198df9218SJohn Baldwin 	 */
141298df9218SJohn Baldwin 	flags |= MAP_SHARED;
141398df9218SJohn Baldwin #ifdef MAC_XXX
14147077c426SJohn Baldwin 	error = mac_cdev_check_mmap(td->td_ucred, cdev, (int)prot);
14157077c426SJohn Baldwin 	if (error != 0)
141698df9218SJohn Baldwin 		return (error);
141798df9218SJohn Baldwin #endif
141864345f0bSJohn Baldwin 	/*
141964345f0bSJohn Baldwin 	 * First, try d_mmap_single().  If that is not implemented
142064345f0bSJohn Baldwin 	 * (returns ENODEV), fall back to using the device pager.
142164345f0bSJohn Baldwin 	 * Note that d_mmap_single() must return a reference to the
142264345f0bSJohn Baldwin 	 * object (it needs to bump the reference count of the object
142364345f0bSJohn Baldwin 	 * it returns somehow).
142464345f0bSJohn Baldwin 	 *
142564345f0bSJohn Baldwin 	 * XXX assumes VM_PROT_* == PROT_*
142664345f0bSJohn Baldwin 	 */
142764345f0bSJohn Baldwin 	error = dsw->d_mmap_single(cdev, foff, objsize, objp, (int)prot);
142864345f0bSJohn Baldwin 	if (error != ENODEV)
142964345f0bSJohn Baldwin 		return (error);
14303364c323SKonstantin Belousov 	obj = vm_pager_allocate(OBJT_DEVICE, cdev, objsize, prot, *foff,
14313364c323SKonstantin Belousov 	    td->td_ucred);
143298df9218SJohn Baldwin 	if (obj == NULL)
143398df9218SJohn Baldwin 		return (EINVAL);
143498df9218SJohn Baldwin 	*objp = obj;
143598df9218SJohn Baldwin 	*flagsp = flags;
143698df9218SJohn Baldwin 	return (0);
143798df9218SJohn Baldwin }
143898df9218SJohn Baldwin 
143998df9218SJohn Baldwin /*
1440d2c60af8SMatthew Dillon  * vm_mmap()
1441d2c60af8SMatthew Dillon  *
14427077c426SJohn Baldwin  * Internal version of mmap used by exec, sys5 shared memory, and
14437077c426SJohn Baldwin  * various device drivers.  Handle is either a vnode pointer, a
14447077c426SJohn Baldwin  * character device, or NULL for MAP_ANON.
1445df8bae1dSRodney W. Grimes  */
1446df8bae1dSRodney W. Grimes int
1447b9dcd593SBruce Evans vm_mmap(vm_map_t map, vm_offset_t *addr, vm_size_t size, vm_prot_t prot,
1448b9dcd593SBruce Evans 	vm_prot_t maxprot, int flags,
144998df9218SJohn Baldwin 	objtype_t handle_type, void *handle,
1450b9dcd593SBruce Evans 	vm_ooffset_t foff)
1451df8bae1dSRodney W. Grimes {
14527077c426SJohn Baldwin 	vm_object_t object;
1453b40ce416SJulian Elischer 	struct thread *td = curthread;
14547077c426SJohn Baldwin 	int error;
145584110e7eSKonstantin Belousov 	boolean_t writecounted;
1456df8bae1dSRodney W. Grimes 
1457df8bae1dSRodney W. Grimes 	if (size == 0)
14587077c426SJohn Baldwin 		return (EINVAL);
1459df8bae1dSRodney W. Grimes 
1460749474f2SPeter Wemm 	size = round_page(size);
1461010ba384SMark Johnston 	object = NULL;
14627077c426SJohn Baldwin 	writecounted = FALSE;
14637077c426SJohn Baldwin 
14647077c426SJohn Baldwin 	/*
14657077c426SJohn Baldwin 	 * Lookup/allocate object.
14667077c426SJohn Baldwin 	 */
14677077c426SJohn Baldwin 	switch (handle_type) {
14687077c426SJohn Baldwin 	case OBJT_DEVICE: {
14697077c426SJohn Baldwin 		struct cdevsw *dsw;
14707077c426SJohn Baldwin 		struct cdev *cdev;
14717077c426SJohn Baldwin 		int ref;
14727077c426SJohn Baldwin 
14737077c426SJohn Baldwin 		cdev = handle;
14747077c426SJohn Baldwin 		dsw = dev_refthread(cdev, &ref);
14757077c426SJohn Baldwin 		if (dsw == NULL)
14767077c426SJohn Baldwin 			return (ENXIO);
14777077c426SJohn Baldwin 		error = vm_mmap_cdev(td, size, prot, &maxprot, &flags, cdev,
14787077c426SJohn Baldwin 		    dsw, &foff, &object);
14797077c426SJohn Baldwin 		dev_relthread(cdev, ref);
14807077c426SJohn Baldwin 		break;
14817077c426SJohn Baldwin 	}
14827077c426SJohn Baldwin 	case OBJT_VNODE:
14837077c426SJohn Baldwin 		error = vm_mmap_vnode(td, size, prot, &maxprot, &flags,
14847077c426SJohn Baldwin 		    handle, &foff, &object, &writecounted);
14857077c426SJohn Baldwin 		break;
14867077c426SJohn Baldwin 	case OBJT_DEFAULT:
14877077c426SJohn Baldwin 		if (handle == NULL) {
14887077c426SJohn Baldwin 			error = 0;
14897077c426SJohn Baldwin 			break;
14907077c426SJohn Baldwin 		}
14917077c426SJohn Baldwin 		/* FALLTHROUGH */
14927077c426SJohn Baldwin 	default:
14937077c426SJohn Baldwin 		error = EINVAL;
14947077c426SJohn Baldwin 		break;
14957077c426SJohn Baldwin 	}
14967077c426SJohn Baldwin 	if (error)
14977077c426SJohn Baldwin 		return (error);
14987077c426SJohn Baldwin 
14997077c426SJohn Baldwin 	error = vm_mmap_object(map, addr, size, prot, maxprot, flags, object,
15007077c426SJohn Baldwin 	    foff, writecounted, td);
15017077c426SJohn Baldwin 	if (error != 0 && object != NULL) {
15027077c426SJohn Baldwin 		/*
15037077c426SJohn Baldwin 		 * If this mapping was accounted for in the vnode's
15047077c426SJohn Baldwin 		 * writecount, then undo that now.
15057077c426SJohn Baldwin 		 */
15067077c426SJohn Baldwin 		if (writecounted)
1507fe7bcbafSKyle Evans 			vm_pager_release_writecount(object, 0, size);
15087077c426SJohn Baldwin 		vm_object_deallocate(object);
15097077c426SJohn Baldwin 	}
15107077c426SJohn Baldwin 	return (error);
15117077c426SJohn Baldwin }
15127077c426SJohn Baldwin 
15137077c426SJohn Baldwin /*
15147077c426SJohn Baldwin  * Internal version of mmap that maps a specific VM object into an
15157077c426SJohn Baldwin  * map.  Called by mmap for MAP_ANON, vm_mmap, shm_mmap, and vn_mmap.
15167077c426SJohn Baldwin  */
15177077c426SJohn Baldwin int
15187077c426SJohn Baldwin vm_mmap_object(vm_map_t map, vm_offset_t *addr, vm_size_t size, vm_prot_t prot,
15197077c426SJohn Baldwin     vm_prot_t maxprot, int flags, vm_object_t object, vm_ooffset_t foff,
15207077c426SJohn Baldwin     boolean_t writecounted, struct thread *td)
15217077c426SJohn Baldwin {
15226a97a3f7SKonstantin Belousov 	boolean_t curmap, fitit;
15236a97a3f7SKonstantin Belousov 	vm_offset_t max_addr;
15247077c426SJohn Baldwin 	int docow, error, findspace, rv;
1525df8bae1dSRodney W. Grimes 
15266a97a3f7SKonstantin Belousov 	curmap = map == &td->td_proc->p_vmspace->vm_map;
15276a97a3f7SKonstantin Belousov 	if (curmap) {
15282554f86aSMateusz Guzik 		RACCT_PROC_LOCK(td->td_proc);
15292554f86aSMateusz Guzik 		if (map->size + size > lim_cur(td, RLIMIT_VMEM)) {
15302554f86aSMateusz Guzik 			RACCT_PROC_UNLOCK(td->td_proc);
1531070f64feSMatthew Dillon 			return (ENOMEM);
1532070f64feSMatthew Dillon 		}
1533a6492969SAlan Cox 		if (racct_set(td->td_proc, RACCT_VMEM, map->size + size)) {
15342554f86aSMateusz Guzik 			RACCT_PROC_UNLOCK(td->td_proc);
15351ba5ad42SEdward Tomasz Napierala 			return (ENOMEM);
15361ba5ad42SEdward Tomasz Napierala 		}
15377e19eda4SAndrey Zonov 		if (!old_mlock && map->flags & MAP_WIREFUTURE) {
15383ac7d297SAndrey Zonov 			if (ptoa(pmap_wired_count(map->pmap)) + size >
15392554f86aSMateusz Guzik 			    lim_cur(td, RLIMIT_MEMLOCK)) {
15407e19eda4SAndrey Zonov 				racct_set_force(td->td_proc, RACCT_VMEM,
15417e19eda4SAndrey Zonov 				    map->size);
15422554f86aSMateusz Guzik 				RACCT_PROC_UNLOCK(td->td_proc);
15437e19eda4SAndrey Zonov 				return (ENOMEM);
15447e19eda4SAndrey Zonov 			}
15457e19eda4SAndrey Zonov 			error = racct_set(td->td_proc, RACCT_MEMLOCK,
15463ac7d297SAndrey Zonov 			    ptoa(pmap_wired_count(map->pmap)) + size);
15477e19eda4SAndrey Zonov 			if (error != 0) {
15487e19eda4SAndrey Zonov 				racct_set_force(td->td_proc, RACCT_VMEM,
15497e19eda4SAndrey Zonov 				    map->size);
15502554f86aSMateusz Guzik 				RACCT_PROC_UNLOCK(td->td_proc);
15517e19eda4SAndrey Zonov 				return (error);
15527e19eda4SAndrey Zonov 			}
15537e19eda4SAndrey Zonov 		}
15542554f86aSMateusz Guzik 		RACCT_PROC_UNLOCK(td->td_proc);
1555a6492969SAlan Cox 	}
1556070f64feSMatthew Dillon 
1557df8bae1dSRodney W. Grimes 	/*
1558bc9ad247SDavid Greenman 	 * We currently can only deal with page aligned file offsets.
15597077c426SJohn Baldwin 	 * The mmap() system call already enforces this by subtracting
15607077c426SJohn Baldwin 	 * the page offset from the file offset, but checking here
15617077c426SJohn Baldwin 	 * catches errors in device drivers (e.g. d_single_mmap()
15627077c426SJohn Baldwin 	 * callbacks) and other internal mapping requests (such as in
15637077c426SJohn Baldwin 	 * exec).
1564bc9ad247SDavid Greenman 	 */
1565bc9ad247SDavid Greenman 	if (foff & PAGE_MASK)
1566bc9ad247SDavid Greenman 		return (EINVAL);
1567bc9ad247SDavid Greenman 
156806cb7259SDavid Greenman 	if ((flags & MAP_FIXED) == 0) {
156906cb7259SDavid Greenman 		fitit = TRUE;
157006cb7259SDavid Greenman 		*addr = round_page(*addr);
157106cb7259SDavid Greenman 	} else {
157206cb7259SDavid Greenman 		if (*addr != trunc_page(*addr))
157306cb7259SDavid Greenman 			return (EINVAL);
157406cb7259SDavid Greenman 		fitit = FALSE;
157506cb7259SDavid Greenman 	}
157684110e7eSKonstantin Belousov 
15775f55e841SDavid Greenman 	if (flags & MAP_ANON) {
15787077c426SJohn Baldwin 		if (object != NULL || foff != 0)
15797077c426SJohn Baldwin 			return (EINVAL);
1580c8daea13SAlexander Kabaev 		docow = 0;
158174ffb9afSAlan Cox 	} else if (flags & MAP_PREFAULT_READ)
158274ffb9afSAlan Cox 		docow = MAP_PREFAULT;
158374ffb9afSAlan Cox 	else
15844738fa09SAlan Cox 		docow = MAP_PREFAULT_PARTIAL;
1585df8bae1dSRodney W. Grimes 
15864f79d873SMatthew Dillon 	if ((flags & (MAP_ANON|MAP_SHARED)) == 0)
15874738fa09SAlan Cox 		docow |= MAP_COPY_ON_WRITE;
15884f79d873SMatthew Dillon 	if (flags & MAP_NOSYNC)
15894f79d873SMatthew Dillon 		docow |= MAP_DISABLE_SYNCER;
15909730a5daSPaul Saab 	if (flags & MAP_NOCORE)
15919730a5daSPaul Saab 		docow |= MAP_DISABLE_COREDUMP;
15928211bd45SKonstantin Belousov 	/* Shared memory is also shared with children. */
15938211bd45SKonstantin Belousov 	if (flags & MAP_SHARED)
15948211bd45SKonstantin Belousov 		docow |= MAP_INHERIT_SHARE;
159584110e7eSKonstantin Belousov 	if (writecounted)
1596fe7bcbafSKyle Evans 		docow |= MAP_WRITECOUNT;
15974648ba0aSKonstantin Belousov 	if (flags & MAP_STACK) {
15984648ba0aSKonstantin Belousov 		if (object != NULL)
15994648ba0aSKonstantin Belousov 			return (EINVAL);
16004648ba0aSKonstantin Belousov 		docow |= MAP_STACK_GROWS_DOWN;
16014648ba0aSKonstantin Belousov 	}
160211c42bccSKonstantin Belousov 	if ((flags & MAP_EXCL) != 0)
160311c42bccSKonstantin Belousov 		docow |= MAP_CHECK_EXCL;
160419bd0d9cSKonstantin Belousov 	if ((flags & MAP_GUARD) != 0)
160519bd0d9cSKonstantin Belousov 		docow |= MAP_CREATE_GUARD;
16065850152dSJohn Dyson 
16074648ba0aSKonstantin Belousov 	if (fitit) {
16085aa60b6fSJohn Baldwin 		if ((flags & MAP_ALIGNMENT_MASK) == MAP_ALIGNED_SUPER)
16095aa60b6fSJohn Baldwin 			findspace = VMFS_SUPER_SPACE;
16105aa60b6fSJohn Baldwin 		else if ((flags & MAP_ALIGNMENT_MASK) != 0)
16115aa60b6fSJohn Baldwin 			findspace = VMFS_ALIGNED_SPACE(flags >>
16125aa60b6fSJohn Baldwin 			    MAP_ALIGNMENT_SHIFT);
16132267af78SJulian Elischer 		else
16145aa60b6fSJohn Baldwin 			findspace = VMFS_OPTIMAL_SPACE;
16156a97a3f7SKonstantin Belousov 		max_addr = 0;
1616edb572a3SJohn Baldwin #ifdef MAP_32BIT
16176a97a3f7SKonstantin Belousov 		if ((flags & MAP_32BIT) != 0)
16186a97a3f7SKonstantin Belousov 			max_addr = MAP_32BIT_MAX_ADDR;
1619edb572a3SJohn Baldwin #endif
16206a97a3f7SKonstantin Belousov 		if (curmap) {
16216a97a3f7SKonstantin Belousov 			rv = vm_map_find_min(map, object, foff, addr, size,
16226a97a3f7SKonstantin Belousov 			    round_page((vm_offset_t)td->td_proc->p_vmspace->
16236a97a3f7SKonstantin Belousov 			    vm_daddr + lim_max(td, RLIMIT_DATA)), max_addr,
16246a97a3f7SKonstantin Belousov 			    findspace, prot, maxprot, docow);
16256a97a3f7SKonstantin Belousov 		} else {
16266a97a3f7SKonstantin Belousov 			rv = vm_map_find(map, object, foff, addr, size,
16276a97a3f7SKonstantin Belousov 			    max_addr, findspace, prot, maxprot, docow);
16286a97a3f7SKonstantin Belousov 		}
16294648ba0aSKonstantin Belousov 	} else {
1630b8ca4ef2SAlan Cox 		rv = vm_map_fixed(map, object, foff, *addr, size,
1631bd7e5f99SJohn Dyson 		    prot, maxprot, docow);
16324648ba0aSKonstantin Belousov 	}
1633bd7e5f99SJohn Dyson 
1634f9230ad6SAlan Cox 	if (rv == KERN_SUCCESS) {
16357fb0c17eSDavid Greenman 		/*
1636f9230ad6SAlan Cox 		 * If the process has requested that all future mappings
1637f9230ad6SAlan Cox 		 * be wired, then heed this.
1638f9230ad6SAlan Cox 		 */
163954a3a114SMark Johnston 		if ((map->flags & MAP_WIREFUTURE) != 0) {
164054a3a114SMark Johnston 			vm_map_lock(map);
164154a3a114SMark Johnston 			if ((map->flags & MAP_WIREFUTURE) != 0)
16428cd6a80dSMark Johnston 				(void)vm_map_wire_locked(map, *addr,
164354a3a114SMark Johnston 				    *addr + size, VM_MAP_WIRE_USER |
164454a3a114SMark Johnston 				    ((flags & MAP_STACK) ? VM_MAP_WIRE_HOLESOK :
164554a3a114SMark Johnston 				    VM_MAP_WIRE_NOHOLES));
164654a3a114SMark Johnston 			vm_map_unlock(map);
16471472f4f4SKonstantin Belousov 		}
1648df8bae1dSRodney W. Grimes 	}
16492e32165cSKonstantin Belousov 	return (vm_mmap_to_errno(rv));
16502e32165cSKonstantin Belousov }
16512e32165cSKonstantin Belousov 
1652f9230ad6SAlan Cox /*
1653f9230ad6SAlan Cox  * Translate a Mach VM return code to zero on success or the appropriate errno
1654f9230ad6SAlan Cox  * on failure.
1655f9230ad6SAlan Cox  */
16562e32165cSKonstantin Belousov int
16572e32165cSKonstantin Belousov vm_mmap_to_errno(int rv)
16582e32165cSKonstantin Belousov {
16592e32165cSKonstantin Belousov 
1660df8bae1dSRodney W. Grimes 	switch (rv) {
1661df8bae1dSRodney W. Grimes 	case KERN_SUCCESS:
1662df8bae1dSRodney W. Grimes 		return (0);
1663df8bae1dSRodney W. Grimes 	case KERN_INVALID_ADDRESS:
1664df8bae1dSRodney W. Grimes 	case KERN_NO_SPACE:
1665df8bae1dSRodney W. Grimes 		return (ENOMEM);
1666df8bae1dSRodney W. Grimes 	case KERN_PROTECTION_FAILURE:
1667df8bae1dSRodney W. Grimes 		return (EACCES);
1668df8bae1dSRodney W. Grimes 	default:
1669df8bae1dSRodney W. Grimes 		return (EINVAL);
1670df8bae1dSRodney W. Grimes 	}
1671df8bae1dSRodney W. Grimes }
1672