xref: /freebsd/sys/vm/vm_mmap.c (revision 643656cfaf61e07bfefdc1fc3e287a77b7fb5a6e)
160727d8bSWarner Losh /*-
251369649SPedro F. Giffuni  * SPDX-License-Identifier: BSD-3-Clause
351369649SPedro F. Giffuni  *
4df8bae1dSRodney W. Grimes  * Copyright (c) 1988 University of Utah.
5df8bae1dSRodney W. Grimes  * Copyright (c) 1991, 1993
6df8bae1dSRodney W. Grimes  *	The Regents of the University of California.  All rights reserved.
7df8bae1dSRodney W. Grimes  *
8df8bae1dSRodney W. Grimes  * This code is derived from software contributed to Berkeley by
9df8bae1dSRodney W. Grimes  * the Systems Programming Group of the University of Utah Computer
10df8bae1dSRodney W. Grimes  * Science Department.
11df8bae1dSRodney W. Grimes  *
12df8bae1dSRodney W. Grimes  * Redistribution and use in source and binary forms, with or without
13df8bae1dSRodney W. Grimes  * modification, are permitted provided that the following conditions
14df8bae1dSRodney W. Grimes  * are met:
15df8bae1dSRodney W. Grimes  * 1. Redistributions of source code must retain the above copyright
16df8bae1dSRodney W. Grimes  *    notice, this list of conditions and the following disclaimer.
17df8bae1dSRodney W. Grimes  * 2. Redistributions in binary form must reproduce the above copyright
18df8bae1dSRodney W. Grimes  *    notice, this list of conditions and the following disclaimer in the
19df8bae1dSRodney W. Grimes  *    documentation and/or other materials provided with the distribution.
20fbbd9655SWarner Losh  * 3. Neither the name of the University nor the names of its contributors
21df8bae1dSRodney W. Grimes  *    may be used to endorse or promote products derived from this software
22df8bae1dSRodney W. Grimes  *    without specific prior written permission.
23df8bae1dSRodney W. Grimes  *
24df8bae1dSRodney W. Grimes  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25df8bae1dSRodney W. Grimes  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26df8bae1dSRodney W. Grimes  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27df8bae1dSRodney W. Grimes  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28df8bae1dSRodney W. Grimes  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29df8bae1dSRodney W. Grimes  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30df8bae1dSRodney W. Grimes  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31df8bae1dSRodney W. Grimes  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32df8bae1dSRodney W. Grimes  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33df8bae1dSRodney W. Grimes  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34df8bae1dSRodney W. Grimes  * SUCH DAMAGE.
35df8bae1dSRodney W. Grimes  *
36df8bae1dSRodney W. Grimes  * from: Utah $Hdr: vm_mmap.c 1.6 91/10/21$
37df8bae1dSRodney W. Grimes  *
38df8bae1dSRodney W. Grimes  *	@(#)vm_mmap.c	8.4 (Berkeley) 1/12/94
39df8bae1dSRodney W. Grimes  */
40df8bae1dSRodney W. Grimes 
41df8bae1dSRodney W. Grimes /*
42df8bae1dSRodney W. Grimes  * Mapped file (mmap) interface to VM
43df8bae1dSRodney W. Grimes  */
44df8bae1dSRodney W. Grimes 
45874651b1SDavid E. O'Brien #include <sys/cdefs.h>
46874651b1SDavid E. O'Brien __FBSDID("$FreeBSD$");
47874651b1SDavid E. O'Brien 
4849874f6eSJoseph Koshy #include "opt_hwpmc_hooks.h"
493d653db0SAlan Cox #include "opt_vm.h"
50e9822d92SJoerg Wunsch 
51df8bae1dSRodney W. Grimes #include <sys/param.h>
52df8bae1dSRodney W. Grimes #include <sys/systm.h>
534a144410SRobert Watson #include <sys/capsicum.h>
54a9d2f8d8SRobert Watson #include <sys/kernel.h>
55fb919e4dSMark Murray #include <sys/lock.h>
5623955314SAlfred Perlstein #include <sys/mutex.h>
57d2d3e875SBruce Evans #include <sys/sysproto.h>
585dc7e31aSKonstantin Belousov #include <sys/elf.h>
59df8bae1dSRodney W. Grimes #include <sys/filedesc.h>
60acd3428bSRobert Watson #include <sys/priv.h>
61df8bae1dSRodney W. Grimes #include <sys/proc.h>
6255648840SJohn Baldwin #include <sys/procctl.h>
631ba5ad42SEdward Tomasz Napierala #include <sys/racct.h>
64070f64feSMatthew Dillon #include <sys/resource.h>
65070f64feSMatthew Dillon #include <sys/resourcevar.h>
6689f6b863SAttilio Rao #include <sys/rwlock.h>
677e19eda4SAndrey Zonov #include <sys/sysctl.h>
68df8bae1dSRodney W. Grimes #include <sys/vnode.h>
693ac4d1efSBruce Evans #include <sys/fcntl.h>
70df8bae1dSRodney W. Grimes #include <sys/file.h>
71df8bae1dSRodney W. Grimes #include <sys/mman.h>
72b483c7f6SGuido van Rooij #include <sys/mount.h>
73df8bae1dSRodney W. Grimes #include <sys/conf.h>
744183b6b6SPeter Wemm #include <sys/stat.h>
7555648840SJohn Baldwin #include <sys/syscallsubr.h>
76497a8238SKonstantin Belousov #include <sys/sysent.h>
77efeaf95aSDavid Greenman #include <sys/vmmeter.h>
78a7f67facSKonstantin Belousov #if defined(__amd64__) || defined(__i386__) /* for i386_read_exec */
79a7f67facSKonstantin Belousov #include <machine/md_var.h>
80a7f67facSKonstantin Belousov #endif
81df8bae1dSRodney W. Grimes 
8251d1f690SRobert Watson #include <security/audit/audit.h>
83aed55708SRobert Watson #include <security/mac/mac_framework.h>
84aed55708SRobert Watson 
85df8bae1dSRodney W. Grimes #include <vm/vm.h>
86efeaf95aSDavid Greenman #include <vm/vm_param.h>
87efeaf95aSDavid Greenman #include <vm/pmap.h>
88efeaf95aSDavid Greenman #include <vm/vm_map.h>
89efeaf95aSDavid Greenman #include <vm/vm_object.h>
901c7c3c6aSMatthew Dillon #include <vm/vm_page.h>
91df8bae1dSRodney W. Grimes #include <vm/vm_pager.h>
92b5e8ce9fSBruce Evans #include <vm/vm_pageout.h>
93efeaf95aSDavid Greenman #include <vm/vm_extern.h>
94867a482dSJohn Dyson #include <vm/vm_page.h>
9584110e7eSKonstantin Belousov #include <vm/vnode_pager.h>
96df8bae1dSRodney W. Grimes 
9749874f6eSJoseph Koshy #ifdef HWPMC_HOOKS
9849874f6eSJoseph Koshy #include <sys/pmckern.h>
9949874f6eSJoseph Koshy #endif
10049874f6eSJoseph Koshy 
1017e19eda4SAndrey Zonov int old_mlock = 0;
102af3b2549SHans Petter Selasky SYSCTL_INT(_vm, OID_AUTO, old_mlock, CTLFLAG_RWTUN, &old_mlock, 0,
1037e19eda4SAndrey Zonov     "Do not apply RLIMIT_MEMLOCK on mlockall");
1043fbc2e00SKonstantin Belousov static int mincore_mapped = 1;
1053fbc2e00SKonstantin Belousov SYSCTL_INT(_vm, OID_AUTO, mincore_mapped, CTLFLAG_RWTUN, &mincore_mapped, 0,
1063fbc2e00SKonstantin Belousov     "mincore reports mappings, not residency");
10774a1b66cSBrooks Davis static int imply_prot_max = 0;
10874a1b66cSBrooks Davis SYSCTL_INT(_vm, OID_AUTO, imply_prot_max, CTLFLAG_RWTUN, &imply_prot_max, 0,
10974a1b66cSBrooks Davis     "Imply maximum page permissions in mmap() when none are specified");
1107e19eda4SAndrey Zonov 
111edb572a3SJohn Baldwin #ifdef MAP_32BIT
112edb572a3SJohn Baldwin #define	MAP_32BIT_MAX_ADDR	((vm_offset_t)1 << 31)
113d2d3e875SBruce Evans #endif
1140d94caffSDavid Greenman 
115edb572a3SJohn Baldwin #ifndef _SYS_SYSPROTO_H_
116edb572a3SJohn Baldwin struct sbrk_args {
117edb572a3SJohn Baldwin 	int incr;
118edb572a3SJohn Baldwin };
119edb572a3SJohn Baldwin #endif
120edb572a3SJohn Baldwin 
121df8bae1dSRodney W. Grimes int
12204e89ffbSKonstantin Belousov sys_sbrk(struct thread *td, struct sbrk_args *uap)
123df8bae1dSRodney W. Grimes {
124df8bae1dSRodney W. Grimes 	/* Not yet implemented */
125df8bae1dSRodney W. Grimes 	return (EOPNOTSUPP);
126df8bae1dSRodney W. Grimes }
127df8bae1dSRodney W. Grimes 
128d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_
129df8bae1dSRodney W. Grimes struct sstk_args {
130df8bae1dSRodney W. Grimes 	int incr;
131df8bae1dSRodney W. Grimes };
132d2d3e875SBruce Evans #endif
1330d94caffSDavid Greenman 
134df8bae1dSRodney W. Grimes int
13504e89ffbSKonstantin Belousov sys_sstk(struct thread *td, struct sstk_args *uap)
136df8bae1dSRodney W. Grimes {
137df8bae1dSRodney W. Grimes 	/* Not yet implemented */
138df8bae1dSRodney W. Grimes 	return (EOPNOTSUPP);
139df8bae1dSRodney W. Grimes }
140df8bae1dSRodney W. Grimes 
1411930e303SPoul-Henning Kamp #if defined(COMPAT_43)
142df8bae1dSRodney W. Grimes int
143d48719bdSBrooks Davis ogetpagesize(struct thread *td, struct ogetpagesize_args *uap)
144df8bae1dSRodney W. Grimes {
14504e89ffbSKonstantin Belousov 
146b40ce416SJulian Elischer 	td->td_retval[0] = PAGE_SIZE;
147df8bae1dSRodney W. Grimes 	return (0);
148df8bae1dSRodney W. Grimes }
1491930e303SPoul-Henning Kamp #endif				/* COMPAT_43 */
150df8bae1dSRodney W. Grimes 
15154f42e4bSPeter Wemm 
15254f42e4bSPeter Wemm /*
15354f42e4bSPeter Wemm  * Memory Map (mmap) system call.  Note that the file offset
15454f42e4bSPeter Wemm  * and address are allowed to be NOT page aligned, though if
15554f42e4bSPeter Wemm  * the MAP_FIXED flag it set, both must have the same remainder
15654f42e4bSPeter Wemm  * modulo the PAGE_SIZE (POSIX 1003.1b).  If the address is not
15754f42e4bSPeter Wemm  * page-aligned, the actual mapping starts at trunc_page(addr)
15854f42e4bSPeter Wemm  * and the return value is adjusted up by the page offset.
159b4309055SMatthew Dillon  *
160b4309055SMatthew Dillon  * Generally speaking, only character devices which are themselves
161b4309055SMatthew Dillon  * memory-based, such as a video framebuffer, can be mmap'd.  Otherwise
162b4309055SMatthew Dillon  * there would be no cache coherency between a descriptor and a VM mapping
163b4309055SMatthew Dillon  * both to the same character device.
16454f42e4bSPeter Wemm  */
165d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_
166df8bae1dSRodney W. Grimes struct mmap_args {
167651bb817SAlexander Langer 	void *addr;
168df8bae1dSRodney W. Grimes 	size_t len;
169df8bae1dSRodney W. Grimes 	int prot;
170df8bae1dSRodney W. Grimes 	int flags;
171df8bae1dSRodney W. Grimes 	int fd;
172df8bae1dSRodney W. Grimes 	long pad;
173df8bae1dSRodney W. Grimes 	off_t pos;
174df8bae1dSRodney W. Grimes };
175d2d3e875SBruce Evans #endif
176df8bae1dSRodney W. Grimes 
177df8bae1dSRodney W. Grimes int
17869cdfcefSEdward Tomasz Napierala sys_mmap(struct thread *td, struct mmap_args *uap)
17969cdfcefSEdward Tomasz Napierala {
18069cdfcefSEdward Tomasz Napierala 
181496ab053SKonstantin Belousov 	return (kern_mmap(td, (uintptr_t)uap->addr, uap->len, uap->prot,
182496ab053SKonstantin Belousov 	    uap->flags, uap->fd, uap->pos));
18369cdfcefSEdward Tomasz Napierala }
18469cdfcefSEdward Tomasz Napierala 
18569cdfcefSEdward Tomasz Napierala int
1865dc7e31aSKonstantin Belousov kern_mmap_maxprot(struct proc *p, int prot)
1875dc7e31aSKonstantin Belousov {
1885dc7e31aSKonstantin Belousov 
1895dc7e31aSKonstantin Belousov 	if ((p->p_flag2 & P2_PROTMAX_DISABLE) != 0 ||
1905dc7e31aSKonstantin Belousov 	    (p->p_fctl0 & NT_FREEBSD_FCTL_PROTMAX_DISABLE) != 0)
1915dc7e31aSKonstantin Belousov 		return (_PROT_ALL);
1925dc7e31aSKonstantin Belousov 	if (((p->p_flag2 & P2_PROTMAX_ENABLE) != 0 || imply_prot_max) &&
1935dc7e31aSKonstantin Belousov 	    prot != PROT_NONE)
1945dc7e31aSKonstantin Belousov 		 return (prot);
1955dc7e31aSKonstantin Belousov 	return (_PROT_ALL);
1965dc7e31aSKonstantin Belousov }
1975dc7e31aSKonstantin Belousov 
1985dc7e31aSKonstantin Belousov int
19977555b84SDoug Moore kern_mmap(struct thread *td, uintptr_t addr0, size_t len, int prot, int flags,
200496ab053SKonstantin Belousov     int fd, off_t pos)
201df8bae1dSRodney W. Grimes {
20218348a23SKyle Evans 
2032180f6c6SKyle Evans 	return (kern_mmap_fpcheck(td, addr0, len, prot, flags, fd, pos, NULL));
20418348a23SKyle Evans }
20518348a23SKyle Evans 
20618348a23SKyle Evans /*
20718348a23SKyle Evans  * When mmap'ing a file, check_fp_fn may be used for the caller to do any
20818348a23SKyle Evans  * last-minute validation based on the referenced file in a non-racy way.
20918348a23SKyle Evans  */
21018348a23SKyle Evans int
21118348a23SKyle Evans kern_mmap_fpcheck(struct thread *td, uintptr_t addr0, size_t len, int prot,
21218348a23SKyle Evans     int flags, int fd, off_t pos, mmap_check_fp_fn check_fp_fn)
21318348a23SKyle Evans {
214496ab053SKonstantin Belousov 	struct vmspace *vms;
215c8daea13SAlexander Kabaev 	struct file *fp;
21637306951SKonstantin Belousov 	struct proc *p;
217496ab053SKonstantin Belousov 	vm_offset_t addr;
21877555b84SDoug Moore 	vm_size_t pageoff, size;
2197077c426SJohn Baldwin 	vm_prot_t cap_maxprot;
22074a1b66cSBrooks Davis 	int align, error, max_prot;
221a9d2f8d8SRobert Watson 	cap_rights_t rights;
222df8bae1dSRodney W. Grimes 
22374a1b66cSBrooks Davis 	if ((prot & ~(_PROT_ALL | PROT_MAX(_PROT_ALL))) != 0)
22474a1b66cSBrooks Davis 		return (EINVAL);
22574a1b66cSBrooks Davis 	max_prot = PROT_MAX_EXTRACT(prot);
22674a1b66cSBrooks Davis 	prot = PROT_EXTRACT(prot);
22774a1b66cSBrooks Davis 	if (max_prot != 0 && (max_prot & prot) != prot)
22874a1b66cSBrooks Davis 		return (EINVAL);
22937306951SKonstantin Belousov 
23037306951SKonstantin Belousov 	p = td->td_proc;
23137306951SKonstantin Belousov 
23274a1b66cSBrooks Davis 	/*
23374a1b66cSBrooks Davis 	 * Always honor PROT_MAX if set.  If not, default to all
23474a1b66cSBrooks Davis 	 * permissions unless we're implying maximum permissions.
23574a1b66cSBrooks Davis 	 */
23674a1b66cSBrooks Davis 	if (max_prot == 0)
2375dc7e31aSKonstantin Belousov 		max_prot = kern_mmap_maxprot(p, prot);
23874a1b66cSBrooks Davis 
23937306951SKonstantin Belousov 	vms = p->p_vmspace;
240426da3bcSAlfred Perlstein 	fp = NULL;
24169cdfcefSEdward Tomasz Napierala 	AUDIT_ARG_FD(fd);
242496ab053SKonstantin Belousov 	addr = addr0;
24327bfa958SSimon L. B. Nielsen 
2447707ccabSKonstantin Belousov 	/*
2455817298fSJohn Baldwin 	 * Ignore old flags that used to be defined but did not do anything.
2465817298fSJohn Baldwin 	 */
2475817298fSJohn Baldwin 	flags &= ~(MAP_RESERVED0020 | MAP_RESERVED0040);
2485817298fSJohn Baldwin 
2495817298fSJohn Baldwin 	/*
2507707ccabSKonstantin Belousov 	 * Enforce the constraints.
2517707ccabSKonstantin Belousov 	 * Mapping of length 0 is only allowed for old binaries.
2527707ccabSKonstantin Belousov 	 * Anonymous mapping shall specify -1 as filedescriptor and
2537707ccabSKonstantin Belousov 	 * zero position for new code. Be nice to ancient a.out
2547707ccabSKonstantin Belousov 	 * binaries and correct pos for anonymous mapping, since old
2557707ccabSKonstantin Belousov 	 * ld.so sometimes issues anonymous map requests with non-zero
2567707ccabSKonstantin Belousov 	 * pos.
2577707ccabSKonstantin Belousov 	 */
2587707ccabSKonstantin Belousov 	if (!SV_CURPROC_FLAG(SV_AOUT)) {
25937306951SKonstantin Belousov 		if ((len == 0 && p->p_osrel >= P_OSREL_MAP_ANON) ||
26069cdfcefSEdward Tomasz Napierala 		    ((flags & MAP_ANON) != 0 && (fd != -1 || pos != 0)))
261df8bae1dSRodney W. Grimes 			return (EINVAL);
2627707ccabSKonstantin Belousov 	} else {
2637707ccabSKonstantin Belousov 		if ((flags & MAP_ANON) != 0)
2647707ccabSKonstantin Belousov 			pos = 0;
2657707ccabSKonstantin Belousov 	}
2669154ee6aSPeter Wemm 
2672267af78SJulian Elischer 	if (flags & MAP_STACK) {
26869cdfcefSEdward Tomasz Napierala 		if ((fd != -1) ||
2692267af78SJulian Elischer 		    ((prot & (PROT_READ | PROT_WRITE)) != (PROT_READ | PROT_WRITE)))
2702267af78SJulian Elischer 			return (EINVAL);
2712267af78SJulian Elischer 		flags |= MAP_ANON;
2722267af78SJulian Elischer 		pos = 0;
2732907af2aSJulian Elischer 	}
2745817298fSJohn Baldwin 	if ((flags & ~(MAP_SHARED | MAP_PRIVATE | MAP_FIXED | MAP_HASSEMAPHORE |
2755817298fSJohn Baldwin 	    MAP_STACK | MAP_NOSYNC | MAP_ANON | MAP_EXCL | MAP_NOCORE |
27619bd0d9cSKonstantin Belousov 	    MAP_PREFAULT_READ | MAP_GUARD |
2775fd3f8b3SJohn Baldwin #ifdef MAP_32BIT
2785fd3f8b3SJohn Baldwin 	    MAP_32BIT |
2795fd3f8b3SJohn Baldwin #endif
2805fd3f8b3SJohn Baldwin 	    MAP_ALIGNMENT_MASK)) != 0)
2815fd3f8b3SJohn Baldwin 		return (EINVAL);
28211c42bccSKonstantin Belousov 	if ((flags & (MAP_EXCL | MAP_FIXED)) == MAP_EXCL)
28311c42bccSKonstantin Belousov 		return (EINVAL);
28410204535SKonstantin Belousov 	if ((flags & (MAP_SHARED | MAP_PRIVATE)) == (MAP_SHARED | MAP_PRIVATE))
2855fd3f8b3SJohn Baldwin 		return (EINVAL);
2865fd3f8b3SJohn Baldwin 	if (prot != PROT_NONE &&
2875fd3f8b3SJohn Baldwin 	    (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC)) != 0)
2885fd3f8b3SJohn Baldwin 		return (EINVAL);
28919bd0d9cSKonstantin Belousov 	if ((flags & MAP_GUARD) != 0 && (prot != PROT_NONE || fd != -1 ||
29060221a57SAlan Cox 	    pos != 0 || (flags & ~(MAP_FIXED | MAP_GUARD | MAP_EXCL |
291633d3b1cSKonstantin Belousov #ifdef MAP_32BIT
292633d3b1cSKonstantin Belousov 	    MAP_32BIT |
293633d3b1cSKonstantin Belousov #endif
294633d3b1cSKonstantin Belousov 	    MAP_ALIGNMENT_MASK)) != 0))
29519bd0d9cSKonstantin Belousov 		return (EINVAL);
2962907af2aSJulian Elischer 
2979154ee6aSPeter Wemm 	/*
29854f42e4bSPeter Wemm 	 * Align the file position to a page boundary,
29954f42e4bSPeter Wemm 	 * and save its page offset component.
3009154ee6aSPeter Wemm 	 */
30154f42e4bSPeter Wemm 	pageoff = (pos & PAGE_MASK);
30254f42e4bSPeter Wemm 	pos -= pageoff;
30354f42e4bSPeter Wemm 
30477555b84SDoug Moore 	/* Compute size from len by rounding (on both ends). */
30577555b84SDoug Moore 	size = len + pageoff;			/* low end... */
30697220a27SDoug Moore 	size = round_page(size);		/* hi end */
30777555b84SDoug Moore 	/* Check for rounding up to zero. */
308f8c8b2e8SDoug Moore 	if (len > size)
30977555b84SDoug Moore 		return (ENOMEM);
3109154ee6aSPeter Wemm 
3115aa60b6fSJohn Baldwin 	/* Ensure alignment is at least a page and fits in a pointer. */
3125aa60b6fSJohn Baldwin 	align = flags & MAP_ALIGNMENT_MASK;
3135aa60b6fSJohn Baldwin 	if (align != 0 && align != MAP_ALIGNED_SUPER &&
3145aa60b6fSJohn Baldwin 	    (align >> MAP_ALIGNMENT_SHIFT >= sizeof(void *) * NBBY ||
3155aa60b6fSJohn Baldwin 	    align >> MAP_ALIGNMENT_SHIFT < PAGE_SHIFT))
3165aa60b6fSJohn Baldwin 		return (EINVAL);
3175aa60b6fSJohn Baldwin 
318df8bae1dSRodney W. Grimes 	/*
3190d94caffSDavid Greenman 	 * Check for illegal addresses.  Watch out for address wrap... Note
3200d94caffSDavid Greenman 	 * that VM_*_ADDRESS are not constants due to casts (argh).
321df8bae1dSRodney W. Grimes 	 */
322df8bae1dSRodney W. Grimes 	if (flags & MAP_FIXED) {
32354f42e4bSPeter Wemm 		/*
32454f42e4bSPeter Wemm 		 * The specified address must have the same remainder
32554f42e4bSPeter Wemm 		 * as the file offset taken modulo PAGE_SIZE, so it
32654f42e4bSPeter Wemm 		 * should be aligned after adjustment by pageoff.
32754f42e4bSPeter Wemm 		 */
32854f42e4bSPeter Wemm 		addr -= pageoff;
32954f42e4bSPeter Wemm 		if (addr & PAGE_MASK)
33054f42e4bSPeter Wemm 			return (EINVAL);
33127bfa958SSimon L. B. Nielsen 
33254f42e4bSPeter Wemm 		/* Address range must be all in user VM space. */
33305ba50f5SJake Burkholder 		if (addr < vm_map_min(&vms->vm_map) ||
33405ba50f5SJake Burkholder 		    addr + size > vm_map_max(&vms->vm_map))
335df8bae1dSRodney W. Grimes 			return (EINVAL);
336bbc0ec52SDavid Greenman 		if (addr + size < addr)
337df8bae1dSRodney W. Grimes 			return (EINVAL);
338edb572a3SJohn Baldwin #ifdef MAP_32BIT
339edb572a3SJohn Baldwin 		if (flags & MAP_32BIT && addr + size > MAP_32BIT_MAX_ADDR)
340edb572a3SJohn Baldwin 			return (EINVAL);
341edb572a3SJohn Baldwin 	} else if (flags & MAP_32BIT) {
342edb572a3SJohn Baldwin 		/*
343edb572a3SJohn Baldwin 		 * For MAP_32BIT, override the hint if it is too high and
344edb572a3SJohn Baldwin 		 * do not bother moving the mapping past the heap (since
345edb572a3SJohn Baldwin 		 * the heap is usually above 2GB).
346edb572a3SJohn Baldwin 		 */
347edb572a3SJohn Baldwin 		if (addr + size > MAP_32BIT_MAX_ADDR)
348edb572a3SJohn Baldwin 			addr = 0;
349edb572a3SJohn Baldwin #endif
35091d5354aSJohn Baldwin 	} else {
351df8bae1dSRodney W. Grimes 		/*
35254f42e4bSPeter Wemm 		 * XXX for non-fixed mappings where no hint is provided or
35354f42e4bSPeter Wemm 		 * the hint would fall in the potential heap space,
35454f42e4bSPeter Wemm 		 * place it after the end of the largest possible heap.
355df8bae1dSRodney W. Grimes 		 *
35654f42e4bSPeter Wemm 		 * There should really be a pmap call to determine a reasonable
35754f42e4bSPeter Wemm 		 * location.
358df8bae1dSRodney W. Grimes 		 */
35991d5354aSJohn Baldwin 		if (addr == 0 ||
3601f6889a1SMatthew Dillon 		    (addr >= round_page((vm_offset_t)vms->vm_taddr) &&
361c460ac3aSPeter Wemm 		    addr < round_page((vm_offset_t)vms->vm_daddr +
362cd336badSMateusz Guzik 		    lim_max(td, RLIMIT_DATA))))
363c460ac3aSPeter Wemm 			addr = round_page((vm_offset_t)vms->vm_daddr +
364cd336badSMateusz Guzik 			    lim_max(td, RLIMIT_DATA));
36591d5354aSJohn Baldwin 	}
36677555b84SDoug Moore 	if (len == 0) {
3677077c426SJohn Baldwin 		/*
3687077c426SJohn Baldwin 		 * Return success without mapping anything for old
3697077c426SJohn Baldwin 		 * binaries that request a page-aligned mapping of
3707077c426SJohn Baldwin 		 * length 0.  For modern binaries, this function
3717077c426SJohn Baldwin 		 * returns an error earlier.
3727077c426SJohn Baldwin 		 */
3737077c426SJohn Baldwin 		error = 0;
37419bd0d9cSKonstantin Belousov 	} else if ((flags & MAP_GUARD) != 0) {
37519bd0d9cSKonstantin Belousov 		error = vm_mmap_object(&vms->vm_map, &addr, size, VM_PROT_NONE,
37619bd0d9cSKonstantin Belousov 		    VM_PROT_NONE, flags, NULL, pos, FALSE, td);
37719bd0d9cSKonstantin Belousov 	} else if ((flags & MAP_ANON) != 0) {
378df8bae1dSRodney W. Grimes 		/*
379df8bae1dSRodney W. Grimes 		 * Mapping blank space is trivial.
3807077c426SJohn Baldwin 		 *
3817077c426SJohn Baldwin 		 * This relies on VM_PROT_* matching PROT_*.
382df8bae1dSRodney W. Grimes 		 */
3837077c426SJohn Baldwin 		error = vm_mmap_object(&vms->vm_map, &addr, size, prot,
38474a1b66cSBrooks Davis 		    max_prot, flags, NULL, pos, FALSE, td);
38530d4dd7eSAlexander Kabaev 	} else {
386df8bae1dSRodney W. Grimes 		/*
387a9d2f8d8SRobert Watson 		 * Mapping file, get fp for validation and don't let the
388a9d2f8d8SRobert Watson 		 * descriptor disappear on us if we block. Check capability
389a9d2f8d8SRobert Watson 		 * rights, but also return the maximum rights to be combined
390a9d2f8d8SRobert Watson 		 * with maxprot later.
391df8bae1dSRodney W. Grimes 		 */
3927008be5bSPawel Jakub Dawidek 		cap_rights_init(&rights, CAP_MMAP);
393a9d2f8d8SRobert Watson 		if (prot & PROT_READ)
3947008be5bSPawel Jakub Dawidek 			cap_rights_set(&rights, CAP_MMAP_R);
395a9d2f8d8SRobert Watson 		if ((flags & MAP_SHARED) != 0) {
396a9d2f8d8SRobert Watson 			if (prot & PROT_WRITE)
3977008be5bSPawel Jakub Dawidek 				cap_rights_set(&rights, CAP_MMAP_W);
398a9d2f8d8SRobert Watson 		}
399a9d2f8d8SRobert Watson 		if (prot & PROT_EXEC)
4007008be5bSPawel Jakub Dawidek 			cap_rights_set(&rights, CAP_MMAP_X);
40169cdfcefSEdward Tomasz Napierala 		error = fget_mmap(td, fd, &rights, &cap_maxprot, &fp);
4027008be5bSPawel Jakub Dawidek 		if (error != 0)
403426da3bcSAlfred Perlstein 			goto done;
40410204535SKonstantin Belousov 		if ((flags & (MAP_SHARED | MAP_PRIVATE)) == 0 &&
40537306951SKonstantin Belousov 		    p->p_osrel >= P_OSREL_MAP_FSTRICT) {
40610204535SKonstantin Belousov 			error = EINVAL;
40710204535SKonstantin Belousov 			goto done;
40810204535SKonstantin Belousov 		}
40918348a23SKyle Evans 		if (check_fp_fn != NULL) {
41018348a23SKyle Evans 			error = check_fp_fn(fp, prot, max_prot & cap_maxprot,
41118348a23SKyle Evans 			    flags);
41218348a23SKyle Evans 			if (error != 0)
41318348a23SKyle Evans 				goto done;
41418348a23SKyle Evans 		}
4155fd3f8b3SJohn Baldwin 		/* This relies on VM_PROT_* matching PROT_*. */
4167077c426SJohn Baldwin 		error = fo_mmap(fp, &vms->vm_map, &addr, size, prot,
41774a1b66cSBrooks Davis 		    max_prot & cap_maxprot, flags, pos, td);
41849874f6eSJoseph Koshy 	}
4197077c426SJohn Baldwin 
420df8bae1dSRodney W. Grimes 	if (error == 0)
421b40ce416SJulian Elischer 		td->td_retval[0] = (register_t) (addr + pageoff);
422279d7226SMatthew Dillon done:
423279d7226SMatthew Dillon 	if (fp)
424b40ce416SJulian Elischer 		fdrop(fp, td);
425f6b5b182SJeff Roberson 
426df8bae1dSRodney W. Grimes 	return (error);
427df8bae1dSRodney W. Grimes }
428df8bae1dSRodney W. Grimes 
4290538aafcSKonstantin Belousov #if defined(COMPAT_FREEBSD6)
430c2815ad5SPeter Wemm int
431c2815ad5SPeter Wemm freebsd6_mmap(struct thread *td, struct freebsd6_mmap_args *uap)
432c2815ad5SPeter Wemm {
433c2815ad5SPeter Wemm 
434496ab053SKonstantin Belousov 	return (kern_mmap(td, (uintptr_t)uap->addr, uap->len, uap->prot,
435496ab053SKonstantin Belousov 	    uap->flags, uap->fd, uap->pos));
436c2815ad5SPeter Wemm }
4370538aafcSKonstantin Belousov #endif
438c2815ad5SPeter Wemm 
43905f0fdd2SPoul-Henning Kamp #ifdef COMPAT_43
440d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_
44105f0fdd2SPoul-Henning Kamp struct ommap_args {
44205f0fdd2SPoul-Henning Kamp 	caddr_t addr;
44305f0fdd2SPoul-Henning Kamp 	int len;
44405f0fdd2SPoul-Henning Kamp 	int prot;
44505f0fdd2SPoul-Henning Kamp 	int flags;
44605f0fdd2SPoul-Henning Kamp 	int fd;
44705f0fdd2SPoul-Henning Kamp 	long pos;
44805f0fdd2SPoul-Henning Kamp };
449d2d3e875SBruce Evans #endif
45005f0fdd2SPoul-Henning Kamp int
45169cdfcefSEdward Tomasz Napierala ommap(struct thread *td, struct ommap_args *uap)
45205f0fdd2SPoul-Henning Kamp {
45305f0fdd2SPoul-Henning Kamp 	static const char cvtbsdprot[8] = {
45405f0fdd2SPoul-Henning Kamp 		0,
45505f0fdd2SPoul-Henning Kamp 		PROT_EXEC,
45605f0fdd2SPoul-Henning Kamp 		PROT_WRITE,
45705f0fdd2SPoul-Henning Kamp 		PROT_EXEC | PROT_WRITE,
45805f0fdd2SPoul-Henning Kamp 		PROT_READ,
45905f0fdd2SPoul-Henning Kamp 		PROT_EXEC | PROT_READ,
46005f0fdd2SPoul-Henning Kamp 		PROT_WRITE | PROT_READ,
46105f0fdd2SPoul-Henning Kamp 		PROT_EXEC | PROT_WRITE | PROT_READ,
46205f0fdd2SPoul-Henning Kamp 	};
46369cdfcefSEdward Tomasz Napierala 	int flags, prot;
4640d94caffSDavid Greenman 
46505f0fdd2SPoul-Henning Kamp #define	OMAP_ANON	0x0002
46605f0fdd2SPoul-Henning Kamp #define	OMAP_COPY	0x0020
46705f0fdd2SPoul-Henning Kamp #define	OMAP_SHARED	0x0010
46805f0fdd2SPoul-Henning Kamp #define	OMAP_FIXED	0x0100
46905f0fdd2SPoul-Henning Kamp 
47069cdfcefSEdward Tomasz Napierala 	prot = cvtbsdprot[uap->prot & 0x7];
4715dddee2dSKonstantin Belousov #if (defined(COMPAT_FREEBSD32) && defined(__amd64__)) || defined(__i386__)
472ee4116b8SKonstantin Belousov 	if (i386_read_exec && SV_PROC_FLAG(td->td_proc, SV_ILP32) &&
47369cdfcefSEdward Tomasz Napierala 	    prot != 0)
47469cdfcefSEdward Tomasz Napierala 		prot |= PROT_EXEC;
475ee4116b8SKonstantin Belousov #endif
47669cdfcefSEdward Tomasz Napierala 	flags = 0;
47705f0fdd2SPoul-Henning Kamp 	if (uap->flags & OMAP_ANON)
47869cdfcefSEdward Tomasz Napierala 		flags |= MAP_ANON;
47905f0fdd2SPoul-Henning Kamp 	if (uap->flags & OMAP_COPY)
48069cdfcefSEdward Tomasz Napierala 		flags |= MAP_COPY;
48105f0fdd2SPoul-Henning Kamp 	if (uap->flags & OMAP_SHARED)
48269cdfcefSEdward Tomasz Napierala 		flags |= MAP_SHARED;
48305f0fdd2SPoul-Henning Kamp 	else
48469cdfcefSEdward Tomasz Napierala 		flags |= MAP_PRIVATE;
48505f0fdd2SPoul-Henning Kamp 	if (uap->flags & OMAP_FIXED)
48669cdfcefSEdward Tomasz Napierala 		flags |= MAP_FIXED;
487496ab053SKonstantin Belousov 	return (kern_mmap(td, (uintptr_t)uap->addr, uap->len, prot, flags,
488496ab053SKonstantin Belousov 	    uap->fd, uap->pos));
48905f0fdd2SPoul-Henning Kamp }
49005f0fdd2SPoul-Henning Kamp #endif				/* COMPAT_43 */
49105f0fdd2SPoul-Henning Kamp 
49205f0fdd2SPoul-Henning Kamp 
493d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_
494df8bae1dSRodney W. Grimes struct msync_args {
495651bb817SAlexander Langer 	void *addr;
496c899450bSPeter Wemm 	size_t len;
497e6c6af11SDavid Greenman 	int flags;
498df8bae1dSRodney W. Grimes };
499d2d3e875SBruce Evans #endif
500df8bae1dSRodney W. Grimes int
50169cdfcefSEdward Tomasz Napierala sys_msync(struct thread *td, struct msync_args *uap)
502df8bae1dSRodney W. Grimes {
50369cdfcefSEdward Tomasz Napierala 
504496ab053SKonstantin Belousov 	return (kern_msync(td, (uintptr_t)uap->addr, uap->len, uap->flags));
50569cdfcefSEdward Tomasz Napierala }
50669cdfcefSEdward Tomasz Napierala 
50769cdfcefSEdward Tomasz Napierala int
508496ab053SKonstantin Belousov kern_msync(struct thread *td, uintptr_t addr0, size_t size, int flags)
50969cdfcefSEdward Tomasz Napierala {
510496ab053SKonstantin Belousov 	vm_offset_t addr;
51169cdfcefSEdward Tomasz Napierala 	vm_size_t pageoff;
512df8bae1dSRodney W. Grimes 	vm_map_t map;
513df8bae1dSRodney W. Grimes 	int rv;
514df8bae1dSRodney W. Grimes 
515496ab053SKonstantin Belousov 	addr = addr0;
516dabee6feSPeter Wemm 	pageoff = (addr & PAGE_MASK);
517dabee6feSPeter Wemm 	addr -= pageoff;
518dabee6feSPeter Wemm 	size += pageoff;
519dabee6feSPeter Wemm 	size = (vm_size_t) round_page(size);
5209154ee6aSPeter Wemm 	if (addr + size < addr)
521dabee6feSPeter Wemm 		return (EINVAL);
522dabee6feSPeter Wemm 
523dabee6feSPeter Wemm 	if ((flags & (MS_ASYNC|MS_INVALIDATE)) == (MS_ASYNC|MS_INVALIDATE))
5241e62bc63SDavid Greenman 		return (EINVAL);
5251e62bc63SDavid Greenman 
526b40ce416SJulian Elischer 	map = &td->td_proc->p_vmspace->vm_map;
5279154ee6aSPeter Wemm 
528df8bae1dSRodney W. Grimes 	/*
529df8bae1dSRodney W. Grimes 	 * Clean the pages and interpret the return value.
530df8bae1dSRodney W. Grimes 	 */
531950f8459SAlan Cox 	rv = vm_map_sync(map, addr, addr + size, (flags & MS_ASYNC) == 0,
532e6c6af11SDavid Greenman 	    (flags & MS_INVALIDATE) != 0);
533df8bae1dSRodney W. Grimes 	switch (rv) {
534df8bae1dSRodney W. Grimes 	case KERN_SUCCESS:
535d2c60af8SMatthew Dillon 		return (0);
536df8bae1dSRodney W. Grimes 	case KERN_INVALID_ADDRESS:
537e103f5b1SPeter Holm 		return (ENOMEM);
538b7b7cd44SAlan Cox 	case KERN_INVALID_ARGUMENT:
539b7b7cd44SAlan Cox 		return (EBUSY);
540126d6082SKonstantin Belousov 	case KERN_FAILURE:
541126d6082SKonstantin Belousov 		return (EIO);
542df8bae1dSRodney W. Grimes 	default:
543df8bae1dSRodney W. Grimes 		return (EINVAL);
544df8bae1dSRodney W. Grimes 	}
545df8bae1dSRodney W. Grimes }
546df8bae1dSRodney W. Grimes 
547d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_
548df8bae1dSRodney W. Grimes struct munmap_args {
549651bb817SAlexander Langer 	void *addr;
5509154ee6aSPeter Wemm 	size_t len;
551df8bae1dSRodney W. Grimes };
552d2d3e875SBruce Evans #endif
553df8bae1dSRodney W. Grimes int
55469cdfcefSEdward Tomasz Napierala sys_munmap(struct thread *td, struct munmap_args *uap)
55569cdfcefSEdward Tomasz Napierala {
55669cdfcefSEdward Tomasz Napierala 
557496ab053SKonstantin Belousov 	return (kern_munmap(td, (uintptr_t)uap->addr, uap->len));
55869cdfcefSEdward Tomasz Napierala }
55969cdfcefSEdward Tomasz Napierala 
56069cdfcefSEdward Tomasz Napierala int
561496ab053SKonstantin Belousov kern_munmap(struct thread *td, uintptr_t addr0, size_t size)
562df8bae1dSRodney W. Grimes {
56349874f6eSJoseph Koshy #ifdef HWPMC_HOOKS
56449874f6eSJoseph Koshy 	struct pmckern_map_out pkm;
56549874f6eSJoseph Koshy 	vm_map_entry_t entry;
566736ff8c3SMateusz Guzik 	bool pmc_handled;
56749874f6eSJoseph Koshy #endif
568496ab053SKonstantin Belousov 	vm_offset_t addr;
56969cdfcefSEdward Tomasz Napierala 	vm_size_t pageoff;
570df8bae1dSRodney W. Grimes 	vm_map_t map;
571df8bae1dSRodney W. Grimes 
572d8834602SAlan Cox 	if (size == 0)
573d8834602SAlan Cox 		return (EINVAL);
574dabee6feSPeter Wemm 
575496ab053SKonstantin Belousov 	addr = addr0;
576dabee6feSPeter Wemm 	pageoff = (addr & PAGE_MASK);
577dabee6feSPeter Wemm 	addr -= pageoff;
578dabee6feSPeter Wemm 	size += pageoff;
579dabee6feSPeter Wemm 	size = (vm_size_t) round_page(size);
5809154ee6aSPeter Wemm 	if (addr + size < addr)
581df8bae1dSRodney W. Grimes 		return (EINVAL);
5829154ee6aSPeter Wemm 
583df8bae1dSRodney W. Grimes 	/*
58405ba50f5SJake Burkholder 	 * Check for illegal addresses.  Watch out for address wrap...
585df8bae1dSRodney W. Grimes 	 */
586b40ce416SJulian Elischer 	map = &td->td_proc->p_vmspace->vm_map;
58705ba50f5SJake Burkholder 	if (addr < vm_map_min(map) || addr + size > vm_map_max(map))
58805ba50f5SJake Burkholder 		return (EINVAL);
589d8834602SAlan Cox 	vm_map_lock(map);
59049874f6eSJoseph Koshy #ifdef HWPMC_HOOKS
591736ff8c3SMateusz Guzik 	pmc_handled = false;
592736ff8c3SMateusz Guzik 	if (PMC_HOOK_INSTALLED(PMC_FN_MUNMAP)) {
593736ff8c3SMateusz Guzik 		pmc_handled = true;
59449874f6eSJoseph Koshy 		/*
59549874f6eSJoseph Koshy 		 * Inform hwpmc if the address range being unmapped contains
59649874f6eSJoseph Koshy 		 * an executable region.
59749874f6eSJoseph Koshy 		 */
5980d419640SRyan Stone 		pkm.pm_address = (uintptr_t) NULL;
59949874f6eSJoseph Koshy 		if (vm_map_lookup_entry(map, addr, &entry)) {
6001c5196c3SKonstantin Belousov 			for (; entry->start < addr + size;
6017cdcf863SDoug Moore 			    entry = vm_map_entry_succ(entry)) {
60249874f6eSJoseph Koshy 				if (vm_map_check_protection(map, entry->start,
60349874f6eSJoseph Koshy 					entry->end, VM_PROT_EXECUTE) == TRUE) {
60449874f6eSJoseph Koshy 					pkm.pm_address = (uintptr_t) addr;
60549874f6eSJoseph Koshy 					pkm.pm_size = (size_t) size;
60649874f6eSJoseph Koshy 					break;
60749874f6eSJoseph Koshy 				}
60849874f6eSJoseph Koshy 			}
60949874f6eSJoseph Koshy 		}
610736ff8c3SMateusz Guzik 	}
61149874f6eSJoseph Koshy #endif
612655c3490SKonstantin Belousov 	vm_map_delete(map, addr, addr + size);
6130d419640SRyan Stone 
6140d419640SRyan Stone #ifdef HWPMC_HOOKS
615736ff8c3SMateusz Guzik 	if (__predict_false(pmc_handled)) {
6160d419640SRyan Stone 		/* downgrade the lock to prevent a LOR with the pmc-sx lock */
6170d419640SRyan Stone 		vm_map_lock_downgrade(map);
618d473d3a1SRyan Stone 		if (pkm.pm_address != (uintptr_t) NULL)
6190d419640SRyan Stone 			PMC_CALL_HOOK(td, PMC_FN_MUNMAP, (void *) &pkm);
6200d419640SRyan Stone 		vm_map_unlock_read(map);
621736ff8c3SMateusz Guzik 	} else
6220d419640SRyan Stone #endif
623736ff8c3SMateusz Guzik 		vm_map_unlock(map);
624736ff8c3SMateusz Guzik 
6250d419640SRyan Stone 	/* vm_map_delete returns nothing but KERN_SUCCESS anyway */
626df8bae1dSRodney W. Grimes 	return (0);
627df8bae1dSRodney W. Grimes }
628df8bae1dSRodney W. Grimes 
629d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_
630df8bae1dSRodney W. Grimes struct mprotect_args {
631651bb817SAlexander Langer 	const void *addr;
6329154ee6aSPeter Wemm 	size_t len;
633df8bae1dSRodney W. Grimes 	int prot;
634df8bae1dSRodney W. Grimes };
635d2d3e875SBruce Evans #endif
636df8bae1dSRodney W. Grimes int
63769cdfcefSEdward Tomasz Napierala sys_mprotect(struct thread *td, struct mprotect_args *uap)
638df8bae1dSRodney W. Grimes {
639df8bae1dSRodney W. Grimes 
640496ab053SKonstantin Belousov 	return (kern_mprotect(td, (uintptr_t)uap->addr, uap->len, uap->prot));
64169cdfcefSEdward Tomasz Napierala }
642df8bae1dSRodney W. Grimes 
64369cdfcefSEdward Tomasz Napierala int
644496ab053SKonstantin Belousov kern_mprotect(struct thread *td, uintptr_t addr0, size_t size, int prot)
64569cdfcefSEdward Tomasz Napierala {
646496ab053SKonstantin Belousov 	vm_offset_t addr;
64769cdfcefSEdward Tomasz Napierala 	vm_size_t pageoff;
64874a1b66cSBrooks Davis 	int vm_error, max_prot;
64969cdfcefSEdward Tomasz Napierala 
650496ab053SKonstantin Belousov 	addr = addr0;
65174a1b66cSBrooks Davis 	if ((prot & ~(_PROT_ALL | PROT_MAX(_PROT_ALL))) != 0)
65274a1b66cSBrooks Davis 		return (EINVAL);
65374a1b66cSBrooks Davis 	max_prot = PROT_MAX_EXTRACT(prot);
65474a1b66cSBrooks Davis 	prot = PROT_EXTRACT(prot);
655dabee6feSPeter Wemm 	pageoff = (addr & PAGE_MASK);
656dabee6feSPeter Wemm 	addr -= pageoff;
657dabee6feSPeter Wemm 	size += pageoff;
658dabee6feSPeter Wemm 	size = (vm_size_t) round_page(size);
6596e1d2cf6SKonstantin Belousov #ifdef COMPAT_FREEBSD32
6606e1d2cf6SKonstantin Belousov 	if (SV_PROC_FLAG(td->td_proc, SV_ILP32)) {
6616e1d2cf6SKonstantin Belousov 		if (((addr + size) & 0xffffffff) < addr)
6626e1d2cf6SKonstantin Belousov 			return (EINVAL);
6636e1d2cf6SKonstantin Belousov 	} else
6646e1d2cf6SKonstantin Belousov #endif
6659154ee6aSPeter Wemm 	if (addr + size < addr)
666dabee6feSPeter Wemm 		return (EINVAL);
667dabee6feSPeter Wemm 
66874a1b66cSBrooks Davis 	vm_error = KERN_SUCCESS;
66974a1b66cSBrooks Davis 	if (max_prot != 0) {
67074a1b66cSBrooks Davis 		if ((max_prot & prot) != prot)
67174a1b66cSBrooks Davis 			return (EINVAL);
67274a1b66cSBrooks Davis 		vm_error = vm_map_protect(&td->td_proc->p_vmspace->vm_map,
67374a1b66cSBrooks Davis 		    addr, addr + size, max_prot, TRUE);
67474a1b66cSBrooks Davis 	}
67574a1b66cSBrooks Davis 	if (vm_error == KERN_SUCCESS)
67674a1b66cSBrooks Davis 		vm_error = vm_map_protect(&td->td_proc->p_vmspace->vm_map,
67774a1b66cSBrooks Davis 		    addr, addr + size, prot, FALSE);
67874a1b66cSBrooks Davis 
67974a1b66cSBrooks Davis 	switch (vm_error) {
680df8bae1dSRodney W. Grimes 	case KERN_SUCCESS:
681df8bae1dSRodney W. Grimes 		return (0);
682df8bae1dSRodney W. Grimes 	case KERN_PROTECTION_FAILURE:
683df8bae1dSRodney W. Grimes 		return (EACCES);
6843364c323SKonstantin Belousov 	case KERN_RESOURCE_SHORTAGE:
6853364c323SKonstantin Belousov 		return (ENOMEM);
686df8bae1dSRodney W. Grimes 	}
687df8bae1dSRodney W. Grimes 	return (EINVAL);
688df8bae1dSRodney W. Grimes }
689df8bae1dSRodney W. Grimes 
690d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_
691dabee6feSPeter Wemm struct minherit_args {
692651bb817SAlexander Langer 	void *addr;
6939154ee6aSPeter Wemm 	size_t len;
694dabee6feSPeter Wemm 	int inherit;
695dabee6feSPeter Wemm };
696dabee6feSPeter Wemm #endif
697dabee6feSPeter Wemm int
69804e89ffbSKonstantin Belousov sys_minherit(struct thread *td, struct minherit_args *uap)
699dabee6feSPeter Wemm {
700dabee6feSPeter Wemm 	vm_offset_t addr;
701dabee6feSPeter Wemm 	vm_size_t size, pageoff;
70254d92145SMatthew Dillon 	vm_inherit_t inherit;
703dabee6feSPeter Wemm 
704dabee6feSPeter Wemm 	addr = (vm_offset_t)uap->addr;
7059154ee6aSPeter Wemm 	size = uap->len;
706dabee6feSPeter Wemm 	inherit = uap->inherit;
707dabee6feSPeter Wemm 
708dabee6feSPeter Wemm 	pageoff = (addr & PAGE_MASK);
709dabee6feSPeter Wemm 	addr -= pageoff;
710dabee6feSPeter Wemm 	size += pageoff;
711dabee6feSPeter Wemm 	size = (vm_size_t) round_page(size);
7129154ee6aSPeter Wemm 	if (addr + size < addr)
713dabee6feSPeter Wemm 		return (EINVAL);
714dabee6feSPeter Wemm 
715e0be79afSAlan Cox 	switch (vm_map_inherit(&td->td_proc->p_vmspace->vm_map, addr,
716e0be79afSAlan Cox 	    addr + size, inherit)) {
717dabee6feSPeter Wemm 	case KERN_SUCCESS:
718dabee6feSPeter Wemm 		return (0);
719dabee6feSPeter Wemm 	case KERN_PROTECTION_FAILURE:
720dabee6feSPeter Wemm 		return (EACCES);
721dabee6feSPeter Wemm 	}
722dabee6feSPeter Wemm 	return (EINVAL);
723dabee6feSPeter Wemm }
724dabee6feSPeter Wemm 
725dabee6feSPeter Wemm #ifndef _SYS_SYSPROTO_H_
726df8bae1dSRodney W. Grimes struct madvise_args {
727651bb817SAlexander Langer 	void *addr;
7289154ee6aSPeter Wemm 	size_t len;
729df8bae1dSRodney W. Grimes 	int behav;
730df8bae1dSRodney W. Grimes };
731d2d3e875SBruce Evans #endif
7320d94caffSDavid Greenman 
733df8bae1dSRodney W. Grimes int
73404e89ffbSKonstantin Belousov sys_madvise(struct thread *td, struct madvise_args *uap)
735df8bae1dSRodney W. Grimes {
73669cdfcefSEdward Tomasz Napierala 
737496ab053SKonstantin Belousov 	return (kern_madvise(td, (uintptr_t)uap->addr, uap->len, uap->behav));
73869cdfcefSEdward Tomasz Napierala }
73969cdfcefSEdward Tomasz Napierala 
74069cdfcefSEdward Tomasz Napierala int
741496ab053SKonstantin Belousov kern_madvise(struct thread *td, uintptr_t addr0, size_t len, int behav)
74269cdfcefSEdward Tomasz Napierala {
74305ba50f5SJake Burkholder 	vm_map_t map;
744496ab053SKonstantin Belousov 	vm_offset_t addr, end, start;
74555648840SJohn Baldwin 	int flags;
746b4309055SMatthew Dillon 
747b4309055SMatthew Dillon 	/*
748f4cf2141SWes Peters 	 * Check for our special case, advising the swap pager we are
749f4cf2141SWes Peters 	 * "immortal."
750f4cf2141SWes Peters 	 */
75169cdfcefSEdward Tomasz Napierala 	if (behav == MADV_PROTECT) {
75255648840SJohn Baldwin 		flags = PPROT_SET;
75355648840SJohn Baldwin 		return (kern_procctl(td, P_PID, td->td_proc->p_pid,
75455648840SJohn Baldwin 		    PROC_SPROTECT, &flags));
75569297bf8SJohn Baldwin 	}
75655648840SJohn Baldwin 
757f4cf2141SWes Peters 	/*
758867a482dSJohn Dyson 	 * Check for illegal addresses.  Watch out for address wrap... Note
759867a482dSJohn Dyson 	 * that VM_*_ADDRESS are not constants due to casts (argh).
760867a482dSJohn Dyson 	 */
76105ba50f5SJake Burkholder 	map = &td->td_proc->p_vmspace->vm_map;
762496ab053SKonstantin Belousov 	addr = addr0;
76369cdfcefSEdward Tomasz Napierala 	if (addr < vm_map_min(map) || addr + len > vm_map_max(map))
764867a482dSJohn Dyson 		return (EINVAL);
76569cdfcefSEdward Tomasz Napierala 	if ((addr + len) < addr)
766867a482dSJohn Dyson 		return (EINVAL);
767867a482dSJohn Dyson 
768867a482dSJohn Dyson 	/*
769867a482dSJohn Dyson 	 * Since this routine is only advisory, we default to conservative
770867a482dSJohn Dyson 	 * behavior.
771867a482dSJohn Dyson 	 */
77269cdfcefSEdward Tomasz Napierala 	start = trunc_page(addr);
77369cdfcefSEdward Tomasz Napierala 	end = round_page(addr + len);
774867a482dSJohn Dyson 
7753e7cb27cSAlan Cox 	/*
7763e7cb27cSAlan Cox 	 * vm_map_madvise() checks for illegal values of behav.
7773e7cb27cSAlan Cox 	 */
7783e7cb27cSAlan Cox 	return (vm_map_madvise(map, start, end, behav));
779df8bae1dSRodney W. Grimes }
780df8bae1dSRodney W. Grimes 
781d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_
782df8bae1dSRodney W. Grimes struct mincore_args {
783651bb817SAlexander Langer 	const void *addr;
7849154ee6aSPeter Wemm 	size_t len;
785df8bae1dSRodney W. Grimes 	char *vec;
786df8bae1dSRodney W. Grimes };
787d2d3e875SBruce Evans #endif
7880d94caffSDavid Greenman 
789df8bae1dSRodney W. Grimes int
79004e89ffbSKonstantin Belousov sys_mincore(struct thread *td, struct mincore_args *uap)
791df8bae1dSRodney W. Grimes {
79246dc8e9dSDmitry Chagin 
79346dc8e9dSDmitry Chagin 	return (kern_mincore(td, (uintptr_t)uap->addr, uap->len, uap->vec));
79446dc8e9dSDmitry Chagin }
79546dc8e9dSDmitry Chagin 
79646dc8e9dSDmitry Chagin int
79746dc8e9dSDmitry Chagin kern_mincore(struct thread *td, uintptr_t addr0, size_t len, char *vec)
79846dc8e9dSDmitry Chagin {
799867a482dSJohn Dyson 	pmap_t pmap;
800867a482dSJohn Dyson 	vm_map_t map;
80101cef4caSMark Johnston 	vm_map_entry_t current, entry;
802567e51e1SAlan Cox 	vm_object_t object;
80301cef4caSMark Johnston 	vm_offset_t addr, cend, end, first_addr;
80401cef4caSMark Johnston 	vm_paddr_t pa;
805567e51e1SAlan Cox 	vm_page_t m;
806567e51e1SAlan Cox 	vm_pindex_t pindex;
80701cef4caSMark Johnston 	int error, lastvecindex, mincoreinfo, vecindex;
808dd2622a8SAlan Cox 	unsigned int timestamp;
809df8bae1dSRodney W. Grimes 
810867a482dSJohn Dyson 	/*
811867a482dSJohn Dyson 	 * Make sure that the addresses presented are valid for user
812867a482dSJohn Dyson 	 * mode.
813867a482dSJohn Dyson 	 */
81446dc8e9dSDmitry Chagin 	first_addr = addr = trunc_page(addr0);
815d0c9294bSMark Johnston 	end = round_page(addr0 + len);
81605ba50f5SJake Burkholder 	map = &td->td_proc->p_vmspace->vm_map;
81705ba50f5SJake Burkholder 	if (end > vm_map_max(map) || end < addr)
818455dd7d4SKonstantin Belousov 		return (ENOMEM);
81902c04a2fSJohn Dyson 
820b40ce416SJulian Elischer 	pmap = vmspace_pmap(td->td_proc->p_vmspace);
821867a482dSJohn Dyson 
822eff50fcdSAlan Cox 	vm_map_lock_read(map);
823dd2622a8SAlan Cox RestartScan:
824dd2622a8SAlan Cox 	timestamp = map->timestamp;
825867a482dSJohn Dyson 
826455dd7d4SKonstantin Belousov 	if (!vm_map_lookup_entry(map, addr, &entry)) {
827455dd7d4SKonstantin Belousov 		vm_map_unlock_read(map);
828455dd7d4SKonstantin Belousov 		return (ENOMEM);
829455dd7d4SKonstantin Belousov 	}
830867a482dSJohn Dyson 
831867a482dSJohn Dyson 	/*
832867a482dSJohn Dyson 	 * Do this on a map entry basis so that if the pages are not
833867a482dSJohn Dyson 	 * in the current processes address space, we can easily look
834867a482dSJohn Dyson 	 * up the pages elsewhere.
835867a482dSJohn Dyson 	 */
836867a482dSJohn Dyson 	lastvecindex = -1;
8377cdcf863SDoug Moore 	while (entry->start < end) {
838867a482dSJohn Dyson 
839867a482dSJohn Dyson 		/*
840455dd7d4SKonstantin Belousov 		 * check for contiguity
841455dd7d4SKonstantin Belousov 		 */
8427cdcf863SDoug Moore 		current = entry;
8437cdcf863SDoug Moore 		entry = vm_map_entry_succ(current);
8447cdcf863SDoug Moore 		if (current->end < end &&
8457cdcf863SDoug Moore 		    entry->start > current->end) {
846455dd7d4SKonstantin Belousov 			vm_map_unlock_read(map);
847455dd7d4SKonstantin Belousov 			return (ENOMEM);
848455dd7d4SKonstantin Belousov 		}
849455dd7d4SKonstantin Belousov 
850455dd7d4SKonstantin Belousov 		/*
851867a482dSJohn Dyson 		 * ignore submaps (for now) or null objects
852867a482dSJohn Dyson 		 */
8539fdfe602SMatthew Dillon 		if ((current->eflags & MAP_ENTRY_IS_SUB_MAP) ||
854867a482dSJohn Dyson 		    current->object.vm_object == NULL)
855867a482dSJohn Dyson 			continue;
856867a482dSJohn Dyson 
857867a482dSJohn Dyson 		/*
858867a482dSJohn Dyson 		 * limit this scan to the current map entry and the
859867a482dSJohn Dyson 		 * limits for the mincore call
860867a482dSJohn Dyson 		 */
861867a482dSJohn Dyson 		if (addr < current->start)
862867a482dSJohn Dyson 			addr = current->start;
863867a482dSJohn Dyson 		cend = current->end;
864867a482dSJohn Dyson 		if (cend > end)
865867a482dSJohn Dyson 			cend = end;
866867a482dSJohn Dyson 
86701cef4caSMark Johnston 		for (; addr < cend; addr += PAGE_SIZE) {
868867a482dSJohn Dyson 			/*
869867a482dSJohn Dyson 			 * Check pmap first, it is likely faster, also
870867a482dSJohn Dyson 			 * it can provide info as to whether we are the
871867a482dSJohn Dyson 			 * one referencing or modifying the page.
872867a482dSJohn Dyson 			 */
873567e51e1SAlan Cox 			m = NULL;
87401cef4caSMark Johnston 			object = NULL;
87501cef4caSMark Johnston retry:
87601cef4caSMark Johnston 			pa = 0;
87701cef4caSMark Johnston 			mincoreinfo = pmap_mincore(pmap, addr, &pa);
8783fbc2e00SKonstantin Belousov 			if (mincore_mapped) {
8793fbc2e00SKonstantin Belousov 				/*
8803fbc2e00SKonstantin Belousov 				 * We only care about this pmap's
8813fbc2e00SKonstantin Belousov 				 * mapping of the page, if any.
8823fbc2e00SKonstantin Belousov 				 */
88301cef4caSMark Johnston 				;
88401cef4caSMark Johnston 			} else if (pa != 0) {
885867a482dSJohn Dyson 				/*
886567e51e1SAlan Cox 				 * The page is mapped by this process but not
887567e51e1SAlan Cox 				 * both accessed and modified.  It is also
888567e51e1SAlan Cox 				 * managed.  Acquire the object lock so that
88901cef4caSMark Johnston 				 * other mappings might be examined.  The page's
89001cef4caSMark Johnston 				 * identity may change at any point before its
89101cef4caSMark Johnston 				 * object lock is acquired, so re-validate if
89201cef4caSMark Johnston 				 * necessary.
893867a482dSJohn Dyson 				 */
89401cef4caSMark Johnston 				m = PHYS_TO_VM_PAGE(pa);
89501cef4caSMark Johnston 				while (object == NULL || m->object != object) {
896567e51e1SAlan Cox 					if (object != NULL)
89789f6b863SAttilio Rao 						VM_OBJECT_WUNLOCK(object);
89801cef4caSMark Johnston 					object = (vm_object_t)atomic_load_ptr(
89901cef4caSMark Johnston 					    &m->object);
90001cef4caSMark Johnston 					if (object == NULL)
901567e51e1SAlan Cox 						goto retry;
90201cef4caSMark Johnston 					VM_OBJECT_WLOCK(object);
903567e51e1SAlan Cox 				}
90401cef4caSMark Johnston 				if (pa != pmap_extract(pmap, addr))
90501cef4caSMark Johnston 					goto retry;
9060012f373SJeff Roberson 				KASSERT(vm_page_all_valid(m),
907567e51e1SAlan Cox 				    ("mincore: page %p is mapped but invalid",
908567e51e1SAlan Cox 				    m));
909567e51e1SAlan Cox 			} else if (mincoreinfo == 0) {
910567e51e1SAlan Cox 				/*
911567e51e1SAlan Cox 				 * The page is not mapped by this process.  If
912567e51e1SAlan Cox 				 * the object implements managed pages, then
913567e51e1SAlan Cox 				 * determine if the page is resident so that
914567e51e1SAlan Cox 				 * the mappings might be examined.
915567e51e1SAlan Cox 				 */
916567e51e1SAlan Cox 				if (current->object.vm_object != object) {
917567e51e1SAlan Cox 					if (object != NULL)
91889f6b863SAttilio Rao 						VM_OBJECT_WUNLOCK(object);
919567e51e1SAlan Cox 					object = current->object.vm_object;
92089f6b863SAttilio Rao 					VM_OBJECT_WLOCK(object);
921567e51e1SAlan Cox 				}
922567e51e1SAlan Cox 				if (object->type == OBJT_DEFAULT ||
923567e51e1SAlan Cox 				    object->type == OBJT_SWAP ||
924567e51e1SAlan Cox 				    object->type == OBJT_VNODE) {
925567e51e1SAlan Cox 					pindex = OFF_TO_IDX(current->offset +
926567e51e1SAlan Cox 					    (addr - current->start));
927567e51e1SAlan Cox 					m = vm_page_lookup(object, pindex);
9280012f373SJeff Roberson 					if (m != NULL && vm_page_none_valid(m))
929567e51e1SAlan Cox 						m = NULL;
930567e51e1SAlan Cox 					if (m != NULL)
931567e51e1SAlan Cox 						mincoreinfo = MINCORE_INCORE;
932567e51e1SAlan Cox 				}
933567e51e1SAlan Cox 			}
934567e51e1SAlan Cox 			if (m != NULL) {
93501cef4caSMark Johnston 				VM_OBJECT_ASSERT_WLOCKED(m->object);
93601cef4caSMark Johnston 
93701cef4caSMark Johnston 				/* Examine other mappings of the page. */
938567e51e1SAlan Cox 				if (m->dirty == 0 && pmap_is_modified(m))
939567e51e1SAlan Cox 					vm_page_dirty(m);
940567e51e1SAlan Cox 				if (m->dirty != 0)
941867a482dSJohn Dyson 					mincoreinfo |= MINCORE_MODIFIED_OTHER;
94201cef4caSMark Johnston 
943c46b90e9SAlan Cox 				/*
9443407fefeSKonstantin Belousov 				 * The first test for PGA_REFERENCED is an
945c46b90e9SAlan Cox 				 * optimization.  The second test is
946c46b90e9SAlan Cox 				 * required because a concurrent pmap
947c46b90e9SAlan Cox 				 * operation could clear the last reference
9483407fefeSKonstantin Belousov 				 * and set PGA_REFERENCED before the call to
949c46b90e9SAlan Cox 				 * pmap_is_referenced().
950c46b90e9SAlan Cox 				 */
9515cff1f4dSMark Johnston 				if ((m->a.flags & PGA_REFERENCED) != 0 ||
952c46b90e9SAlan Cox 				    pmap_is_referenced(m) ||
9535cff1f4dSMark Johnston 				    (m->a.flags & PGA_REFERENCED) != 0)
954867a482dSJohn Dyson 					mincoreinfo |= MINCORE_REFERENCED_OTHER;
9559b5a5d81SJohn Dyson 			}
956567e51e1SAlan Cox 			if (object != NULL)
95789f6b863SAttilio Rao 				VM_OBJECT_WUNLOCK(object);
958867a482dSJohn Dyson 
959867a482dSJohn Dyson 			/*
960dd2622a8SAlan Cox 			 * subyte may page fault.  In case it needs to modify
961dd2622a8SAlan Cox 			 * the map, we release the lock.
962dd2622a8SAlan Cox 			 */
963dd2622a8SAlan Cox 			vm_map_unlock_read(map);
964dd2622a8SAlan Cox 
965dd2622a8SAlan Cox 			/*
966867a482dSJohn Dyson 			 * calculate index into user supplied byte vector
967867a482dSJohn Dyson 			 */
968d1780e8dSKonstantin Belousov 			vecindex = atop(addr - first_addr);
969867a482dSJohn Dyson 
970867a482dSJohn Dyson 			/*
971867a482dSJohn Dyson 			 * If we have skipped map entries, we need to make sure that
972867a482dSJohn Dyson 			 * the byte vector is zeroed for those skipped entries.
973867a482dSJohn Dyson 			 */
974867a482dSJohn Dyson 			while ((lastvecindex + 1) < vecindex) {
9756a87d217SJohn Baldwin 				++lastvecindex;
976867a482dSJohn Dyson 				error = subyte(vec + lastvecindex, 0);
977867a482dSJohn Dyson 				if (error) {
978d2c60af8SMatthew Dillon 					error = EFAULT;
979d2c60af8SMatthew Dillon 					goto done2;
980867a482dSJohn Dyson 				}
981867a482dSJohn Dyson 			}
982867a482dSJohn Dyson 
983867a482dSJohn Dyson 			/*
984867a482dSJohn Dyson 			 * Pass the page information to the user
985867a482dSJohn Dyson 			 */
986867a482dSJohn Dyson 			error = subyte(vec + vecindex, mincoreinfo);
987867a482dSJohn Dyson 			if (error) {
988d2c60af8SMatthew Dillon 				error = EFAULT;
989d2c60af8SMatthew Dillon 				goto done2;
990867a482dSJohn Dyson 			}
991dd2622a8SAlan Cox 
992dd2622a8SAlan Cox 			/*
993dd2622a8SAlan Cox 			 * If the map has changed, due to the subyte, the previous
994dd2622a8SAlan Cox 			 * output may be invalid.
995dd2622a8SAlan Cox 			 */
996dd2622a8SAlan Cox 			vm_map_lock_read(map);
997dd2622a8SAlan Cox 			if (timestamp != map->timestamp)
998dd2622a8SAlan Cox 				goto RestartScan;
999dd2622a8SAlan Cox 
1000867a482dSJohn Dyson 			lastvecindex = vecindex;
100102c04a2fSJohn Dyson 		}
1002867a482dSJohn Dyson 	}
1003867a482dSJohn Dyson 
1004867a482dSJohn Dyson 	/*
1005dd2622a8SAlan Cox 	 * subyte may page fault.  In case it needs to modify
1006dd2622a8SAlan Cox 	 * the map, we release the lock.
1007dd2622a8SAlan Cox 	 */
1008dd2622a8SAlan Cox 	vm_map_unlock_read(map);
1009dd2622a8SAlan Cox 
1010dd2622a8SAlan Cox 	/*
1011867a482dSJohn Dyson 	 * Zero the last entries in the byte vector.
1012867a482dSJohn Dyson 	 */
1013d1780e8dSKonstantin Belousov 	vecindex = atop(end - first_addr);
1014867a482dSJohn Dyson 	while ((lastvecindex + 1) < vecindex) {
10156a87d217SJohn Baldwin 		++lastvecindex;
1016867a482dSJohn Dyson 		error = subyte(vec + lastvecindex, 0);
1017867a482dSJohn Dyson 		if (error) {
1018d2c60af8SMatthew Dillon 			error = EFAULT;
1019d2c60af8SMatthew Dillon 			goto done2;
1020867a482dSJohn Dyson 		}
1021867a482dSJohn Dyson 	}
1022867a482dSJohn Dyson 
1023dd2622a8SAlan Cox 	/*
1024dd2622a8SAlan Cox 	 * If the map has changed, due to the subyte, the previous
1025dd2622a8SAlan Cox 	 * output may be invalid.
1026dd2622a8SAlan Cox 	 */
1027dd2622a8SAlan Cox 	vm_map_lock_read(map);
1028dd2622a8SAlan Cox 	if (timestamp != map->timestamp)
1029dd2622a8SAlan Cox 		goto RestartScan;
1030eff50fcdSAlan Cox 	vm_map_unlock_read(map);
1031d2c60af8SMatthew Dillon done2:
1032d2c60af8SMatthew Dillon 	return (error);
1033df8bae1dSRodney W. Grimes }
1034df8bae1dSRodney W. Grimes 
1035d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_
1036df8bae1dSRodney W. Grimes struct mlock_args {
1037651bb817SAlexander Langer 	const void *addr;
1038df8bae1dSRodney W. Grimes 	size_t len;
1039df8bae1dSRodney W. Grimes };
1040d2d3e875SBruce Evans #endif
1041df8bae1dSRodney W. Grimes int
104204e89ffbSKonstantin Belousov sys_mlock(struct thread *td, struct mlock_args *uap)
1043df8bae1dSRodney W. Grimes {
1044995d7069SGleb Smirnoff 
1045496ab053SKonstantin Belousov 	return (kern_mlock(td->td_proc, td->td_ucred,
1046496ab053SKonstantin Belousov 	    __DECONST(uintptr_t, uap->addr), uap->len));
1047995d7069SGleb Smirnoff }
1048995d7069SGleb Smirnoff 
1049995d7069SGleb Smirnoff int
1050496ab053SKonstantin Belousov kern_mlock(struct proc *proc, struct ucred *cred, uintptr_t addr0, size_t len)
1051995d7069SGleb Smirnoff {
1052bb734798SDon Lewis 	vm_offset_t addr, end, last, start;
1053bb734798SDon Lewis 	vm_size_t npages, size;
10543ac7d297SAndrey Zonov 	vm_map_t map;
10551ba5ad42SEdward Tomasz Napierala 	unsigned long nsize;
1056bb734798SDon Lewis 	int error;
1057df8bae1dSRodney W. Grimes 
1058cc426dd3SMateusz Guzik 	error = priv_check_cred(cred, PRIV_VM_MLOCK);
105947934cefSDon Lewis 	if (error)
106047934cefSDon Lewis 		return (error);
1061496ab053SKonstantin Belousov 	addr = addr0;
1062995d7069SGleb Smirnoff 	size = len;
1063bb734798SDon Lewis 	last = addr + size;
106416929939SDon Lewis 	start = trunc_page(addr);
1065bb734798SDon Lewis 	end = round_page(last);
1066bb734798SDon Lewis 	if (last < addr || end < addr)
1067df8bae1dSRodney W. Grimes 		return (EINVAL);
106816929939SDon Lewis 	npages = atop(end - start);
106954a3a114SMark Johnston 	if (npages > vm_page_max_user_wired)
107016929939SDon Lewis 		return (ENOMEM);
10713ac7d297SAndrey Zonov 	map = &proc->p_vmspace->vm_map;
107247934cefSDon Lewis 	PROC_LOCK(proc);
10733ac7d297SAndrey Zonov 	nsize = ptoa(npages + pmap_wired_count(map->pmap));
1074f6f6d240SMateusz Guzik 	if (nsize > lim_cur_proc(proc, RLIMIT_MEMLOCK)) {
107547934cefSDon Lewis 		PROC_UNLOCK(proc);
10764a40e3d4SJohn Dyson 		return (ENOMEM);
107791d5354aSJohn Baldwin 	}
107847934cefSDon Lewis 	PROC_UNLOCK(proc);
1079afcc55f3SEdward Tomasz Napierala #ifdef RACCT
10804b5c9cf6SEdward Tomasz Napierala 	if (racct_enable) {
10811ba5ad42SEdward Tomasz Napierala 		PROC_LOCK(proc);
10821ba5ad42SEdward Tomasz Napierala 		error = racct_set(proc, RACCT_MEMLOCK, nsize);
10831ba5ad42SEdward Tomasz Napierala 		PROC_UNLOCK(proc);
10841ba5ad42SEdward Tomasz Napierala 		if (error != 0)
10851ba5ad42SEdward Tomasz Napierala 			return (ENOMEM);
10864b5c9cf6SEdward Tomasz Napierala 	}
1087afcc55f3SEdward Tomasz Napierala #endif
10883ac7d297SAndrey Zonov 	error = vm_map_wire(map, start, end,
108916929939SDon Lewis 	    VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES);
1090afcc55f3SEdward Tomasz Napierala #ifdef RACCT
10914b5c9cf6SEdward Tomasz Napierala 	if (racct_enable && error != KERN_SUCCESS) {
10921ba5ad42SEdward Tomasz Napierala 		PROC_LOCK(proc);
10931ba5ad42SEdward Tomasz Napierala 		racct_set(proc, RACCT_MEMLOCK,
10943ac7d297SAndrey Zonov 		    ptoa(pmap_wired_count(map->pmap)));
10951ba5ad42SEdward Tomasz Napierala 		PROC_UNLOCK(proc);
10961ba5ad42SEdward Tomasz Napierala 	}
1097afcc55f3SEdward Tomasz Napierala #endif
1098df8bae1dSRodney W. Grimes 	return (error == KERN_SUCCESS ? 0 : ENOMEM);
1099df8bae1dSRodney W. Grimes }
1100df8bae1dSRodney W. Grimes 
1101d2d3e875SBruce Evans #ifndef _SYS_SYSPROTO_H_
11024a40e3d4SJohn Dyson struct mlockall_args {
11034a40e3d4SJohn Dyson 	int	how;
11044a40e3d4SJohn Dyson };
11054a40e3d4SJohn Dyson #endif
11064a40e3d4SJohn Dyson 
11074a40e3d4SJohn Dyson int
110804e89ffbSKonstantin Belousov sys_mlockall(struct thread *td, struct mlockall_args *uap)
11094a40e3d4SJohn Dyson {
1110abd498aaSBruce M Simpson 	vm_map_t map;
1111abd498aaSBruce M Simpson 	int error;
1112abd498aaSBruce M Simpson 
1113abd498aaSBruce M Simpson 	map = &td->td_proc->p_vmspace->vm_map;
11147e19eda4SAndrey Zonov 	error = priv_check(td, PRIV_VM_MLOCK);
11157e19eda4SAndrey Zonov 	if (error)
11167e19eda4SAndrey Zonov 		return (error);
1117abd498aaSBruce M Simpson 
1118abd498aaSBruce M Simpson 	if ((uap->how == 0) || ((uap->how & ~(MCL_CURRENT|MCL_FUTURE)) != 0))
1119abd498aaSBruce M Simpson 		return (EINVAL);
1120abd498aaSBruce M Simpson 
1121abd498aaSBruce M Simpson 	/*
1122abd498aaSBruce M Simpson 	 * If wiring all pages in the process would cause it to exceed
1123abd498aaSBruce M Simpson 	 * a hard resource limit, return ENOMEM.
1124abd498aaSBruce M Simpson 	 */
11257e19eda4SAndrey Zonov 	if (!old_mlock && uap->how & MCL_CURRENT) {
11262554f86aSMateusz Guzik 		if (map->size > lim_cur(td, RLIMIT_MEMLOCK))
1127abd498aaSBruce M Simpson 			return (ENOMEM);
112891d5354aSJohn Baldwin 	}
1129afcc55f3SEdward Tomasz Napierala #ifdef RACCT
11304b5c9cf6SEdward Tomasz Napierala 	if (racct_enable) {
11311ba5ad42SEdward Tomasz Napierala 		PROC_LOCK(td->td_proc);
11321ba5ad42SEdward Tomasz Napierala 		error = racct_set(td->td_proc, RACCT_MEMLOCK, map->size);
11331ba5ad42SEdward Tomasz Napierala 		PROC_UNLOCK(td->td_proc);
11341ba5ad42SEdward Tomasz Napierala 		if (error != 0)
11351ba5ad42SEdward Tomasz Napierala 			return (ENOMEM);
11364b5c9cf6SEdward Tomasz Napierala 	}
1137afcc55f3SEdward Tomasz Napierala #endif
1138abd498aaSBruce M Simpson 
1139abd498aaSBruce M Simpson 	if (uap->how & MCL_FUTURE) {
1140abd498aaSBruce M Simpson 		vm_map_lock(map);
1141abd498aaSBruce M Simpson 		vm_map_modflags(map, MAP_WIREFUTURE, 0);
1142abd498aaSBruce M Simpson 		vm_map_unlock(map);
1143abd498aaSBruce M Simpson 		error = 0;
1144abd498aaSBruce M Simpson 	}
1145abd498aaSBruce M Simpson 
1146abd498aaSBruce M Simpson 	if (uap->how & MCL_CURRENT) {
1147abd498aaSBruce M Simpson 		/*
1148abd498aaSBruce M Simpson 		 * P1003.1-2001 mandates that all currently mapped pages
1149abd498aaSBruce M Simpson 		 * will be memory resident and locked (wired) upon return
1150abd498aaSBruce M Simpson 		 * from mlockall(). vm_map_wire() will wire pages, by
1151abd498aaSBruce M Simpson 		 * calling vm_fault_wire() for each page in the region.
1152abd498aaSBruce M Simpson 		 */
1153abd498aaSBruce M Simpson 		error = vm_map_wire(map, vm_map_min(map), vm_map_max(map),
1154abd498aaSBruce M Simpson 		    VM_MAP_WIRE_USER|VM_MAP_WIRE_HOLESOK);
115554a3a114SMark Johnston 		if (error == KERN_SUCCESS)
115654a3a114SMark Johnston 			error = 0;
115754a3a114SMark Johnston 		else if (error == KERN_RESOURCE_SHORTAGE)
115854a3a114SMark Johnston 			error = ENOMEM;
115954a3a114SMark Johnston 		else
116054a3a114SMark Johnston 			error = EAGAIN;
1161abd498aaSBruce M Simpson 	}
1162afcc55f3SEdward Tomasz Napierala #ifdef RACCT
11634b5c9cf6SEdward Tomasz Napierala 	if (racct_enable && error != KERN_SUCCESS) {
11641ba5ad42SEdward Tomasz Napierala 		PROC_LOCK(td->td_proc);
11651ba5ad42SEdward Tomasz Napierala 		racct_set(td->td_proc, RACCT_MEMLOCK,
11663ac7d297SAndrey Zonov 		    ptoa(pmap_wired_count(map->pmap)));
11671ba5ad42SEdward Tomasz Napierala 		PROC_UNLOCK(td->td_proc);
11681ba5ad42SEdward Tomasz Napierala 	}
1169afcc55f3SEdward Tomasz Napierala #endif
1170abd498aaSBruce M Simpson 
1171abd498aaSBruce M Simpson 	return (error);
11724a40e3d4SJohn Dyson }
11734a40e3d4SJohn Dyson 
11744a40e3d4SJohn Dyson #ifndef _SYS_SYSPROTO_H_
1175fa721254SAlfred Perlstein struct munlockall_args {
1176abd498aaSBruce M Simpson 	register_t dummy;
11774a40e3d4SJohn Dyson };
11784a40e3d4SJohn Dyson #endif
11794a40e3d4SJohn Dyson 
11804a40e3d4SJohn Dyson int
118104e89ffbSKonstantin Belousov sys_munlockall(struct thread *td, struct munlockall_args *uap)
11824a40e3d4SJohn Dyson {
1183abd498aaSBruce M Simpson 	vm_map_t map;
1184abd498aaSBruce M Simpson 	int error;
1185abd498aaSBruce M Simpson 
1186abd498aaSBruce M Simpson 	map = &td->td_proc->p_vmspace->vm_map;
1187acd3428bSRobert Watson 	error = priv_check(td, PRIV_VM_MUNLOCK);
1188abd498aaSBruce M Simpson 	if (error)
1189abd498aaSBruce M Simpson 		return (error);
1190abd498aaSBruce M Simpson 
1191abd498aaSBruce M Simpson 	/* Clear the MAP_WIREFUTURE flag from this vm_map. */
1192abd498aaSBruce M Simpson 	vm_map_lock(map);
1193abd498aaSBruce M Simpson 	vm_map_modflags(map, 0, MAP_WIREFUTURE);
1194abd498aaSBruce M Simpson 	vm_map_unlock(map);
1195abd498aaSBruce M Simpson 
1196abd498aaSBruce M Simpson 	/* Forcibly unwire all pages. */
1197abd498aaSBruce M Simpson 	error = vm_map_unwire(map, vm_map_min(map), vm_map_max(map),
1198abd498aaSBruce M Simpson 	    VM_MAP_WIRE_USER|VM_MAP_WIRE_HOLESOK);
1199afcc55f3SEdward Tomasz Napierala #ifdef RACCT
12004b5c9cf6SEdward Tomasz Napierala 	if (racct_enable && error == KERN_SUCCESS) {
12011ba5ad42SEdward Tomasz Napierala 		PROC_LOCK(td->td_proc);
12021ba5ad42SEdward Tomasz Napierala 		racct_set(td->td_proc, RACCT_MEMLOCK, 0);
12031ba5ad42SEdward Tomasz Napierala 		PROC_UNLOCK(td->td_proc);
12041ba5ad42SEdward Tomasz Napierala 	}
1205afcc55f3SEdward Tomasz Napierala #endif
1206abd498aaSBruce M Simpson 
1207abd498aaSBruce M Simpson 	return (error);
12084a40e3d4SJohn Dyson }
12094a40e3d4SJohn Dyson 
12104a40e3d4SJohn Dyson #ifndef _SYS_SYSPROTO_H_
1211df8bae1dSRodney W. Grimes struct munlock_args {
1212651bb817SAlexander Langer 	const void *addr;
1213df8bae1dSRodney W. Grimes 	size_t len;
1214df8bae1dSRodney W. Grimes };
1215d2d3e875SBruce Evans #endif
1216df8bae1dSRodney W. Grimes int
121769cdfcefSEdward Tomasz Napierala sys_munlock(struct thread *td, struct munlock_args *uap)
1218df8bae1dSRodney W. Grimes {
121969cdfcefSEdward Tomasz Napierala 
1220496ab053SKonstantin Belousov 	return (kern_munlock(td, (uintptr_t)uap->addr, uap->len));
122169cdfcefSEdward Tomasz Napierala }
122269cdfcefSEdward Tomasz Napierala 
122369cdfcefSEdward Tomasz Napierala int
1224496ab053SKonstantin Belousov kern_munlock(struct thread *td, uintptr_t addr0, size_t size)
122569cdfcefSEdward Tomasz Napierala {
1226496ab053SKonstantin Belousov 	vm_offset_t addr, end, last, start;
1227fc2b1679SJeremie Le Hen #ifdef RACCT
1228c92b5069SJeremie Le Hen 	vm_map_t map;
1229fc2b1679SJeremie Le Hen #endif
1230df8bae1dSRodney W. Grimes 	int error;
1231df8bae1dSRodney W. Grimes 
1232acd3428bSRobert Watson 	error = priv_check(td, PRIV_VM_MUNLOCK);
123347934cefSDon Lewis 	if (error)
123447934cefSDon Lewis 		return (error);
1235496ab053SKonstantin Belousov 	addr = addr0;
1236bb734798SDon Lewis 	last = addr + size;
123716929939SDon Lewis 	start = trunc_page(addr);
1238bb734798SDon Lewis 	end = round_page(last);
1239bb734798SDon Lewis 	if (last < addr || end < addr)
1240df8bae1dSRodney W. Grimes 		return (EINVAL);
124116929939SDon Lewis 	error = vm_map_unwire(&td->td_proc->p_vmspace->vm_map, start, end,
124216929939SDon Lewis 	    VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES);
1243afcc55f3SEdward Tomasz Napierala #ifdef RACCT
12444b5c9cf6SEdward Tomasz Napierala 	if (racct_enable && error == KERN_SUCCESS) {
12451ba5ad42SEdward Tomasz Napierala 		PROC_LOCK(td->td_proc);
1246c92b5069SJeremie Le Hen 		map = &td->td_proc->p_vmspace->vm_map;
1247c92b5069SJeremie Le Hen 		racct_set(td->td_proc, RACCT_MEMLOCK,
1248c92b5069SJeremie Le Hen 		    ptoa(pmap_wired_count(map->pmap)));
12491ba5ad42SEdward Tomasz Napierala 		PROC_UNLOCK(td->td_proc);
12501ba5ad42SEdward Tomasz Napierala 	}
1251afcc55f3SEdward Tomasz Napierala #endif
1252df8bae1dSRodney W. Grimes 	return (error == KERN_SUCCESS ? 0 : ENOMEM);
1253df8bae1dSRodney W. Grimes }
1254df8bae1dSRodney W. Grimes 
1255df8bae1dSRodney W. Grimes /*
1256c8daea13SAlexander Kabaev  * vm_mmap_vnode()
1257c8daea13SAlexander Kabaev  *
1258c8daea13SAlexander Kabaev  * Helper function for vm_mmap.  Perform sanity check specific for mmap
1259c8daea13SAlexander Kabaev  * operations on vnodes.
1260c8daea13SAlexander Kabaev  */
1261c8daea13SAlexander Kabaev int
1262c8daea13SAlexander Kabaev vm_mmap_vnode(struct thread *td, vm_size_t objsize,
1263c8daea13SAlexander Kabaev     vm_prot_t prot, vm_prot_t *maxprotp, int *flagsp,
126484110e7eSKonstantin Belousov     struct vnode *vp, vm_ooffset_t *foffp, vm_object_t *objp,
126584110e7eSKonstantin Belousov     boolean_t *writecounted)
1266c8daea13SAlexander Kabaev {
1267c8daea13SAlexander Kabaev 	struct vattr va;
1268c8daea13SAlexander Kabaev 	vm_object_t obj;
1269bd0e1bebSMark Johnston 	vm_ooffset_t foff;
12700359a12eSAttilio Rao 	struct ucred *cred;
127178022527SKonstantin Belousov 	int error, flags;
127278022527SKonstantin Belousov 	bool writex;
1273c8daea13SAlexander Kabaev 
12740359a12eSAttilio Rao 	cred = td->td_ucred;
127578022527SKonstantin Belousov 	writex = (*maxprotp & VM_PROT_WRITE) != 0 &&
127678022527SKonstantin Belousov 	    (*flagsp & MAP_SHARED) != 0;
127778022527SKonstantin Belousov 	if ((error = vget(vp, LK_SHARED, td)) != 0)
1278c8daea13SAlexander Kabaev 		return (error);
12790df42647SRobert Watson 	AUDIT_ARG_VNODE1(vp);
128064345f0bSJohn Baldwin 	foff = *foffp;
1281c8daea13SAlexander Kabaev 	flags = *flagsp;
12828516dd18SPoul-Henning Kamp 	obj = vp->v_object;
1283c8daea13SAlexander Kabaev 	if (vp->v_type == VREG) {
1284c8daea13SAlexander Kabaev 		/*
1285c8daea13SAlexander Kabaev 		 * Get the proper underlying object
1286c8daea13SAlexander Kabaev 		 */
12878516dd18SPoul-Henning Kamp 		if (obj == NULL) {
1288c8daea13SAlexander Kabaev 			error = EINVAL;
1289c8daea13SAlexander Kabaev 			goto done;
1290c8daea13SAlexander Kabaev 		}
1291e5f299ffSKonstantin Belousov 		if (obj->type == OBJT_VNODE && obj->handle != vp) {
1292c8daea13SAlexander Kabaev 			vput(vp);
1293c8daea13SAlexander Kabaev 			vp = (struct vnode *)obj->handle;
129484110e7eSKonstantin Belousov 			/*
129584110e7eSKonstantin Belousov 			 * Bypass filesystems obey the mpsafety of the
129653f5f8a0SKonstantin Belousov 			 * underlying fs.  Tmpfs never bypasses.
129784110e7eSKonstantin Belousov 			 */
129878022527SKonstantin Belousov 			error = vget(vp, LK_SHARED, td);
12995050aa86SKonstantin Belousov 			if (error != 0)
130084110e7eSKonstantin Belousov 				return (error);
130184110e7eSKonstantin Belousov 		}
130278022527SKonstantin Belousov 		if (writex) {
130384110e7eSKonstantin Belousov 			*writecounted = TRUE;
1304fe7bcbafSKyle Evans 			vm_pager_update_writecount(obj, 0, objsize);
130584110e7eSKonstantin Belousov 		}
1306c8daea13SAlexander Kabaev 	} else {
1307c8daea13SAlexander Kabaev 		error = EINVAL;
1308c8daea13SAlexander Kabaev 		goto done;
1309c8daea13SAlexander Kabaev 	}
13100359a12eSAttilio Rao 	if ((error = VOP_GETATTR(vp, &va, cred)))
1311c8daea13SAlexander Kabaev 		goto done;
1312c92163dcSChristian S.J. Peron #ifdef MAC
13137077c426SJohn Baldwin 	/* This relies on VM_PROT_* matching PROT_*. */
13147077c426SJohn Baldwin 	error = mac_vnode_check_mmap(cred, vp, (int)prot, flags);
1315c92163dcSChristian S.J. Peron 	if (error != 0)
1316c92163dcSChristian S.J. Peron 		goto done;
1317c92163dcSChristian S.J. Peron #endif
1318c8daea13SAlexander Kabaev 	if ((flags & MAP_SHARED) != 0) {
1319c8daea13SAlexander Kabaev 		if ((va.va_flags & (SF_SNAPSHOT|IMMUTABLE|APPEND)) != 0) {
13207077c426SJohn Baldwin 			if (prot & VM_PROT_WRITE) {
1321c8daea13SAlexander Kabaev 				error = EPERM;
1322c8daea13SAlexander Kabaev 				goto done;
1323c8daea13SAlexander Kabaev 			}
1324c8daea13SAlexander Kabaev 			*maxprotp &= ~VM_PROT_WRITE;
1325c8daea13SAlexander Kabaev 		}
1326c8daea13SAlexander Kabaev 	}
1327c8daea13SAlexander Kabaev 	/*
1328c8daea13SAlexander Kabaev 	 * If it is a regular file without any references
1329c8daea13SAlexander Kabaev 	 * we do not need to sync it.
1330c8daea13SAlexander Kabaev 	 * Adjust object size to be the size of actual file.
1331c8daea13SAlexander Kabaev 	 */
1332c8daea13SAlexander Kabaev 	objsize = round_page(va.va_size);
1333c8daea13SAlexander Kabaev 	if (va.va_nlink == 0)
1334c8daea13SAlexander Kabaev 		flags |= MAP_NOSYNC;
13353d653db0SAlan Cox 	if (obj->type == OBJT_VNODE) {
1336e5f299ffSKonstantin Belousov 		obj = vm_pager_allocate(OBJT_VNODE, vp, objsize, prot, foff,
1337e5f299ffSKonstantin Belousov 		    cred);
1338c8daea13SAlexander Kabaev 		if (obj == NULL) {
133964345f0bSJohn Baldwin 			error = ENOMEM;
1340c8daea13SAlexander Kabaev 			goto done;
1341c8daea13SAlexander Kabaev 		}
13423d653db0SAlan Cox 	} else {
13433d653db0SAlan Cox 		KASSERT(obj->type == OBJT_DEFAULT || obj->type == OBJT_SWAP,
13443d653db0SAlan Cox 		    ("wrong object type"));
1345f2410510SJeff Roberson 		vm_object_reference(obj);
13463d653db0SAlan Cox #if VM_NRESERVLEVEL > 0
1347f2410510SJeff Roberson 		if ((obj->flags & OBJ_COLORED) == 0) {
1348f2410510SJeff Roberson 			VM_OBJECT_WLOCK(obj);
13493d653db0SAlan Cox 			vm_object_color(obj, 0);
13503d653db0SAlan Cox 			VM_OBJECT_WUNLOCK(obj);
13513d653db0SAlan Cox 		}
1352f2410510SJeff Roberson #endif
1353f2410510SJeff Roberson 	}
1354c8daea13SAlexander Kabaev 	*objp = obj;
1355c8daea13SAlexander Kabaev 	*flagsp = flags;
135664345f0bSJohn Baldwin 
1357*643656cfSMateusz Guzik 	VOP_MMAPPED(vp);
13581e309003SDiomidis Spinellis 
1359c8daea13SAlexander Kabaev done:
1360bafa6cfcSKonstantin Belousov 	if (error != 0 && *writecounted) {
1361bafa6cfcSKonstantin Belousov 		*writecounted = FALSE;
1362fe7bcbafSKyle Evans 		vm_pager_update_writecount(obj, objsize, 0);
1363bafa6cfcSKonstantin Belousov 	}
1364c8daea13SAlexander Kabaev 	vput(vp);
1365c8daea13SAlexander Kabaev 	return (error);
1366c8daea13SAlexander Kabaev }
1367c8daea13SAlexander Kabaev 
1368c8daea13SAlexander Kabaev /*
136998df9218SJohn Baldwin  * vm_mmap_cdev()
137098df9218SJohn Baldwin  *
137198df9218SJohn Baldwin  * Helper function for vm_mmap.  Perform sanity check specific for mmap
137298df9218SJohn Baldwin  * operations on cdevs.
137398df9218SJohn Baldwin  */
137498df9218SJohn Baldwin int
13757077c426SJohn Baldwin vm_mmap_cdev(struct thread *td, vm_size_t objsize, vm_prot_t prot,
13767077c426SJohn Baldwin     vm_prot_t *maxprotp, int *flagsp, struct cdev *cdev, struct cdevsw *dsw,
13777077c426SJohn Baldwin     vm_ooffset_t *foff, vm_object_t *objp)
137898df9218SJohn Baldwin {
137998df9218SJohn Baldwin 	vm_object_t obj;
13807077c426SJohn Baldwin 	int error, flags;
138198df9218SJohn Baldwin 
138298df9218SJohn Baldwin 	flags = *flagsp;
138398df9218SJohn Baldwin 
138491a35e78SKonstantin Belousov 	if (dsw->d_flags & D_MMAP_ANON) {
13857077c426SJohn Baldwin 		*objp = NULL;
13867077c426SJohn Baldwin 		*foff = 0;
138798df9218SJohn Baldwin 		*maxprotp = VM_PROT_ALL;
138898df9218SJohn Baldwin 		*flagsp |= MAP_ANON;
138998df9218SJohn Baldwin 		return (0);
139098df9218SJohn Baldwin 	}
139198df9218SJohn Baldwin 	/*
139264345f0bSJohn Baldwin 	 * cdevs do not provide private mappings of any kind.
139398df9218SJohn Baldwin 	 */
139498df9218SJohn Baldwin 	if ((*maxprotp & VM_PROT_WRITE) == 0 &&
13957077c426SJohn Baldwin 	    (prot & VM_PROT_WRITE) != 0)
139698df9218SJohn Baldwin 		return (EACCES);
13977077c426SJohn Baldwin 	if (flags & (MAP_PRIVATE|MAP_COPY))
139898df9218SJohn Baldwin 		return (EINVAL);
139998df9218SJohn Baldwin 	/*
140098df9218SJohn Baldwin 	 * Force device mappings to be shared.
140198df9218SJohn Baldwin 	 */
140298df9218SJohn Baldwin 	flags |= MAP_SHARED;
140398df9218SJohn Baldwin #ifdef MAC_XXX
14047077c426SJohn Baldwin 	error = mac_cdev_check_mmap(td->td_ucred, cdev, (int)prot);
14057077c426SJohn Baldwin 	if (error != 0)
140698df9218SJohn Baldwin 		return (error);
140798df9218SJohn Baldwin #endif
140864345f0bSJohn Baldwin 	/*
140964345f0bSJohn Baldwin 	 * First, try d_mmap_single().  If that is not implemented
141064345f0bSJohn Baldwin 	 * (returns ENODEV), fall back to using the device pager.
141164345f0bSJohn Baldwin 	 * Note that d_mmap_single() must return a reference to the
141264345f0bSJohn Baldwin 	 * object (it needs to bump the reference count of the object
141364345f0bSJohn Baldwin 	 * it returns somehow).
141464345f0bSJohn Baldwin 	 *
141564345f0bSJohn Baldwin 	 * XXX assumes VM_PROT_* == PROT_*
141664345f0bSJohn Baldwin 	 */
141764345f0bSJohn Baldwin 	error = dsw->d_mmap_single(cdev, foff, objsize, objp, (int)prot);
141864345f0bSJohn Baldwin 	if (error != ENODEV)
141964345f0bSJohn Baldwin 		return (error);
14203364c323SKonstantin Belousov 	obj = vm_pager_allocate(OBJT_DEVICE, cdev, objsize, prot, *foff,
14213364c323SKonstantin Belousov 	    td->td_ucred);
142298df9218SJohn Baldwin 	if (obj == NULL)
142398df9218SJohn Baldwin 		return (EINVAL);
142498df9218SJohn Baldwin 	*objp = obj;
142598df9218SJohn Baldwin 	*flagsp = flags;
142698df9218SJohn Baldwin 	return (0);
142798df9218SJohn Baldwin }
142898df9218SJohn Baldwin 
142998df9218SJohn Baldwin /*
1430d2c60af8SMatthew Dillon  * vm_mmap()
1431d2c60af8SMatthew Dillon  *
14327077c426SJohn Baldwin  * Internal version of mmap used by exec, sys5 shared memory, and
14337077c426SJohn Baldwin  * various device drivers.  Handle is either a vnode pointer, a
14347077c426SJohn Baldwin  * character device, or NULL for MAP_ANON.
1435df8bae1dSRodney W. Grimes  */
1436df8bae1dSRodney W. Grimes int
1437b9dcd593SBruce Evans vm_mmap(vm_map_t map, vm_offset_t *addr, vm_size_t size, vm_prot_t prot,
1438b9dcd593SBruce Evans 	vm_prot_t maxprot, int flags,
143998df9218SJohn Baldwin 	objtype_t handle_type, void *handle,
1440b9dcd593SBruce Evans 	vm_ooffset_t foff)
1441df8bae1dSRodney W. Grimes {
14427077c426SJohn Baldwin 	vm_object_t object;
1443b40ce416SJulian Elischer 	struct thread *td = curthread;
14447077c426SJohn Baldwin 	int error;
144584110e7eSKonstantin Belousov 	boolean_t writecounted;
1446df8bae1dSRodney W. Grimes 
1447df8bae1dSRodney W. Grimes 	if (size == 0)
14487077c426SJohn Baldwin 		return (EINVAL);
1449df8bae1dSRodney W. Grimes 
1450749474f2SPeter Wemm 	size = round_page(size);
1451010ba384SMark Johnston 	object = NULL;
14527077c426SJohn Baldwin 	writecounted = FALSE;
14537077c426SJohn Baldwin 
14547077c426SJohn Baldwin 	/*
14557077c426SJohn Baldwin 	 * Lookup/allocate object.
14567077c426SJohn Baldwin 	 */
14577077c426SJohn Baldwin 	switch (handle_type) {
14587077c426SJohn Baldwin 	case OBJT_DEVICE: {
14597077c426SJohn Baldwin 		struct cdevsw *dsw;
14607077c426SJohn Baldwin 		struct cdev *cdev;
14617077c426SJohn Baldwin 		int ref;
14627077c426SJohn Baldwin 
14637077c426SJohn Baldwin 		cdev = handle;
14647077c426SJohn Baldwin 		dsw = dev_refthread(cdev, &ref);
14657077c426SJohn Baldwin 		if (dsw == NULL)
14667077c426SJohn Baldwin 			return (ENXIO);
14677077c426SJohn Baldwin 		error = vm_mmap_cdev(td, size, prot, &maxprot, &flags, cdev,
14687077c426SJohn Baldwin 		    dsw, &foff, &object);
14697077c426SJohn Baldwin 		dev_relthread(cdev, ref);
14707077c426SJohn Baldwin 		break;
14717077c426SJohn Baldwin 	}
14727077c426SJohn Baldwin 	case OBJT_VNODE:
14737077c426SJohn Baldwin 		error = vm_mmap_vnode(td, size, prot, &maxprot, &flags,
14747077c426SJohn Baldwin 		    handle, &foff, &object, &writecounted);
14757077c426SJohn Baldwin 		break;
14767077c426SJohn Baldwin 	case OBJT_DEFAULT:
14777077c426SJohn Baldwin 		if (handle == NULL) {
14787077c426SJohn Baldwin 			error = 0;
14797077c426SJohn Baldwin 			break;
14807077c426SJohn Baldwin 		}
14817077c426SJohn Baldwin 		/* FALLTHROUGH */
14827077c426SJohn Baldwin 	default:
14837077c426SJohn Baldwin 		error = EINVAL;
14847077c426SJohn Baldwin 		break;
14857077c426SJohn Baldwin 	}
14867077c426SJohn Baldwin 	if (error)
14877077c426SJohn Baldwin 		return (error);
14887077c426SJohn Baldwin 
14897077c426SJohn Baldwin 	error = vm_mmap_object(map, addr, size, prot, maxprot, flags, object,
14907077c426SJohn Baldwin 	    foff, writecounted, td);
14917077c426SJohn Baldwin 	if (error != 0 && object != NULL) {
14927077c426SJohn Baldwin 		/*
14937077c426SJohn Baldwin 		 * If this mapping was accounted for in the vnode's
14947077c426SJohn Baldwin 		 * writecount, then undo that now.
14957077c426SJohn Baldwin 		 */
14967077c426SJohn Baldwin 		if (writecounted)
1497fe7bcbafSKyle Evans 			vm_pager_release_writecount(object, 0, size);
14987077c426SJohn Baldwin 		vm_object_deallocate(object);
14997077c426SJohn Baldwin 	}
15007077c426SJohn Baldwin 	return (error);
15017077c426SJohn Baldwin }
15027077c426SJohn Baldwin 
15037077c426SJohn Baldwin /*
15047077c426SJohn Baldwin  * Internal version of mmap that maps a specific VM object into an
15057077c426SJohn Baldwin  * map.  Called by mmap for MAP_ANON, vm_mmap, shm_mmap, and vn_mmap.
15067077c426SJohn Baldwin  */
15077077c426SJohn Baldwin int
15087077c426SJohn Baldwin vm_mmap_object(vm_map_t map, vm_offset_t *addr, vm_size_t size, vm_prot_t prot,
15097077c426SJohn Baldwin     vm_prot_t maxprot, int flags, vm_object_t object, vm_ooffset_t foff,
15107077c426SJohn Baldwin     boolean_t writecounted, struct thread *td)
15117077c426SJohn Baldwin {
15126a97a3f7SKonstantin Belousov 	boolean_t curmap, fitit;
15136a97a3f7SKonstantin Belousov 	vm_offset_t max_addr;
15147077c426SJohn Baldwin 	int docow, error, findspace, rv;
1515df8bae1dSRodney W. Grimes 
15166a97a3f7SKonstantin Belousov 	curmap = map == &td->td_proc->p_vmspace->vm_map;
15176a97a3f7SKonstantin Belousov 	if (curmap) {
15182554f86aSMateusz Guzik 		RACCT_PROC_LOCK(td->td_proc);
15192554f86aSMateusz Guzik 		if (map->size + size > lim_cur(td, RLIMIT_VMEM)) {
15202554f86aSMateusz Guzik 			RACCT_PROC_UNLOCK(td->td_proc);
1521070f64feSMatthew Dillon 			return (ENOMEM);
1522070f64feSMatthew Dillon 		}
1523a6492969SAlan Cox 		if (racct_set(td->td_proc, RACCT_VMEM, map->size + size)) {
15242554f86aSMateusz Guzik 			RACCT_PROC_UNLOCK(td->td_proc);
15251ba5ad42SEdward Tomasz Napierala 			return (ENOMEM);
15261ba5ad42SEdward Tomasz Napierala 		}
15277e19eda4SAndrey Zonov 		if (!old_mlock && map->flags & MAP_WIREFUTURE) {
15283ac7d297SAndrey Zonov 			if (ptoa(pmap_wired_count(map->pmap)) + size >
15292554f86aSMateusz Guzik 			    lim_cur(td, RLIMIT_MEMLOCK)) {
15307e19eda4SAndrey Zonov 				racct_set_force(td->td_proc, RACCT_VMEM,
15317e19eda4SAndrey Zonov 				    map->size);
15322554f86aSMateusz Guzik 				RACCT_PROC_UNLOCK(td->td_proc);
15337e19eda4SAndrey Zonov 				return (ENOMEM);
15347e19eda4SAndrey Zonov 			}
15357e19eda4SAndrey Zonov 			error = racct_set(td->td_proc, RACCT_MEMLOCK,
15363ac7d297SAndrey Zonov 			    ptoa(pmap_wired_count(map->pmap)) + size);
15377e19eda4SAndrey Zonov 			if (error != 0) {
15387e19eda4SAndrey Zonov 				racct_set_force(td->td_proc, RACCT_VMEM,
15397e19eda4SAndrey Zonov 				    map->size);
15402554f86aSMateusz Guzik 				RACCT_PROC_UNLOCK(td->td_proc);
15417e19eda4SAndrey Zonov 				return (error);
15427e19eda4SAndrey Zonov 			}
15437e19eda4SAndrey Zonov 		}
15442554f86aSMateusz Guzik 		RACCT_PROC_UNLOCK(td->td_proc);
1545a6492969SAlan Cox 	}
1546070f64feSMatthew Dillon 
1547df8bae1dSRodney W. Grimes 	/*
1548bc9ad247SDavid Greenman 	 * We currently can only deal with page aligned file offsets.
15497077c426SJohn Baldwin 	 * The mmap() system call already enforces this by subtracting
15507077c426SJohn Baldwin 	 * the page offset from the file offset, but checking here
15517077c426SJohn Baldwin 	 * catches errors in device drivers (e.g. d_single_mmap()
15527077c426SJohn Baldwin 	 * callbacks) and other internal mapping requests (such as in
15537077c426SJohn Baldwin 	 * exec).
1554bc9ad247SDavid Greenman 	 */
1555bc9ad247SDavid Greenman 	if (foff & PAGE_MASK)
1556bc9ad247SDavid Greenman 		return (EINVAL);
1557bc9ad247SDavid Greenman 
155806cb7259SDavid Greenman 	if ((flags & MAP_FIXED) == 0) {
155906cb7259SDavid Greenman 		fitit = TRUE;
156006cb7259SDavid Greenman 		*addr = round_page(*addr);
156106cb7259SDavid Greenman 	} else {
156206cb7259SDavid Greenman 		if (*addr != trunc_page(*addr))
156306cb7259SDavid Greenman 			return (EINVAL);
156406cb7259SDavid Greenman 		fitit = FALSE;
156506cb7259SDavid Greenman 	}
156684110e7eSKonstantin Belousov 
15675f55e841SDavid Greenman 	if (flags & MAP_ANON) {
15687077c426SJohn Baldwin 		if (object != NULL || foff != 0)
15697077c426SJohn Baldwin 			return (EINVAL);
1570c8daea13SAlexander Kabaev 		docow = 0;
157174ffb9afSAlan Cox 	} else if (flags & MAP_PREFAULT_READ)
157274ffb9afSAlan Cox 		docow = MAP_PREFAULT;
157374ffb9afSAlan Cox 	else
15744738fa09SAlan Cox 		docow = MAP_PREFAULT_PARTIAL;
1575df8bae1dSRodney W. Grimes 
15764f79d873SMatthew Dillon 	if ((flags & (MAP_ANON|MAP_SHARED)) == 0)
15774738fa09SAlan Cox 		docow |= MAP_COPY_ON_WRITE;
15784f79d873SMatthew Dillon 	if (flags & MAP_NOSYNC)
15794f79d873SMatthew Dillon 		docow |= MAP_DISABLE_SYNCER;
15809730a5daSPaul Saab 	if (flags & MAP_NOCORE)
15819730a5daSPaul Saab 		docow |= MAP_DISABLE_COREDUMP;
15828211bd45SKonstantin Belousov 	/* Shared memory is also shared with children. */
15838211bd45SKonstantin Belousov 	if (flags & MAP_SHARED)
15848211bd45SKonstantin Belousov 		docow |= MAP_INHERIT_SHARE;
158584110e7eSKonstantin Belousov 	if (writecounted)
1586fe7bcbafSKyle Evans 		docow |= MAP_WRITECOUNT;
15874648ba0aSKonstantin Belousov 	if (flags & MAP_STACK) {
15884648ba0aSKonstantin Belousov 		if (object != NULL)
15894648ba0aSKonstantin Belousov 			return (EINVAL);
15904648ba0aSKonstantin Belousov 		docow |= MAP_STACK_GROWS_DOWN;
15914648ba0aSKonstantin Belousov 	}
159211c42bccSKonstantin Belousov 	if ((flags & MAP_EXCL) != 0)
159311c42bccSKonstantin Belousov 		docow |= MAP_CHECK_EXCL;
159419bd0d9cSKonstantin Belousov 	if ((flags & MAP_GUARD) != 0)
159519bd0d9cSKonstantin Belousov 		docow |= MAP_CREATE_GUARD;
15965850152dSJohn Dyson 
15974648ba0aSKonstantin Belousov 	if (fitit) {
15985aa60b6fSJohn Baldwin 		if ((flags & MAP_ALIGNMENT_MASK) == MAP_ALIGNED_SUPER)
15995aa60b6fSJohn Baldwin 			findspace = VMFS_SUPER_SPACE;
16005aa60b6fSJohn Baldwin 		else if ((flags & MAP_ALIGNMENT_MASK) != 0)
16015aa60b6fSJohn Baldwin 			findspace = VMFS_ALIGNED_SPACE(flags >>
16025aa60b6fSJohn Baldwin 			    MAP_ALIGNMENT_SHIFT);
16032267af78SJulian Elischer 		else
16045aa60b6fSJohn Baldwin 			findspace = VMFS_OPTIMAL_SPACE;
16056a97a3f7SKonstantin Belousov 		max_addr = 0;
1606edb572a3SJohn Baldwin #ifdef MAP_32BIT
16076a97a3f7SKonstantin Belousov 		if ((flags & MAP_32BIT) != 0)
16086a97a3f7SKonstantin Belousov 			max_addr = MAP_32BIT_MAX_ADDR;
1609edb572a3SJohn Baldwin #endif
16106a97a3f7SKonstantin Belousov 		if (curmap) {
16116a97a3f7SKonstantin Belousov 			rv = vm_map_find_min(map, object, foff, addr, size,
16126a97a3f7SKonstantin Belousov 			    round_page((vm_offset_t)td->td_proc->p_vmspace->
16136a97a3f7SKonstantin Belousov 			    vm_daddr + lim_max(td, RLIMIT_DATA)), max_addr,
16146a97a3f7SKonstantin Belousov 			    findspace, prot, maxprot, docow);
16156a97a3f7SKonstantin Belousov 		} else {
16166a97a3f7SKonstantin Belousov 			rv = vm_map_find(map, object, foff, addr, size,
16176a97a3f7SKonstantin Belousov 			    max_addr, findspace, prot, maxprot, docow);
16186a97a3f7SKonstantin Belousov 		}
16194648ba0aSKonstantin Belousov 	} else {
1620b8ca4ef2SAlan Cox 		rv = vm_map_fixed(map, object, foff, *addr, size,
1621bd7e5f99SJohn Dyson 		    prot, maxprot, docow);
16224648ba0aSKonstantin Belousov 	}
1623bd7e5f99SJohn Dyson 
1624f9230ad6SAlan Cox 	if (rv == KERN_SUCCESS) {
16257fb0c17eSDavid Greenman 		/*
1626f9230ad6SAlan Cox 		 * If the process has requested that all future mappings
1627f9230ad6SAlan Cox 		 * be wired, then heed this.
1628f9230ad6SAlan Cox 		 */
162954a3a114SMark Johnston 		if ((map->flags & MAP_WIREFUTURE) != 0) {
163054a3a114SMark Johnston 			vm_map_lock(map);
163154a3a114SMark Johnston 			if ((map->flags & MAP_WIREFUTURE) != 0)
16328cd6a80dSMark Johnston 				(void)vm_map_wire_locked(map, *addr,
163354a3a114SMark Johnston 				    *addr + size, VM_MAP_WIRE_USER |
163454a3a114SMark Johnston 				    ((flags & MAP_STACK) ? VM_MAP_WIRE_HOLESOK :
163554a3a114SMark Johnston 				    VM_MAP_WIRE_NOHOLES));
163654a3a114SMark Johnston 			vm_map_unlock(map);
16371472f4f4SKonstantin Belousov 		}
1638df8bae1dSRodney W. Grimes 	}
16392e32165cSKonstantin Belousov 	return (vm_mmap_to_errno(rv));
16402e32165cSKonstantin Belousov }
16412e32165cSKonstantin Belousov 
1642f9230ad6SAlan Cox /*
1643f9230ad6SAlan Cox  * Translate a Mach VM return code to zero on success or the appropriate errno
1644f9230ad6SAlan Cox  * on failure.
1645f9230ad6SAlan Cox  */
16462e32165cSKonstantin Belousov int
16472e32165cSKonstantin Belousov vm_mmap_to_errno(int rv)
16482e32165cSKonstantin Belousov {
16492e32165cSKonstantin Belousov 
1650df8bae1dSRodney W. Grimes 	switch (rv) {
1651df8bae1dSRodney W. Grimes 	case KERN_SUCCESS:
1652df8bae1dSRodney W. Grimes 		return (0);
1653df8bae1dSRodney W. Grimes 	case KERN_INVALID_ADDRESS:
1654df8bae1dSRodney W. Grimes 	case KERN_NO_SPACE:
1655df8bae1dSRodney W. Grimes 		return (ENOMEM);
1656df8bae1dSRodney W. Grimes 	case KERN_PROTECTION_FAILURE:
1657df8bae1dSRodney W. Grimes 		return (EACCES);
1658df8bae1dSRodney W. Grimes 	default:
1659df8bae1dSRodney W. Grimes 		return (EINVAL);
1660df8bae1dSRodney W. Grimes 	}
1661df8bae1dSRodney W. Grimes }
1662