xref: /freebsd/sys/kern/uipc_shm.c (revision 3f07b9d9f87e28e577453e784ae369e18d35157b)
18e38aeffSJohn Baldwin /*-
28a36da99SPedro F. Giffuni  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
38a36da99SPedro F. Giffuni  *
415bcf785SRobert Watson  * Copyright (c) 2006, 2011, 2016-2017 Robert N. M. Watson
58e38aeffSJohn Baldwin  * All rights reserved.
68e38aeffSJohn Baldwin  *
715bcf785SRobert Watson  * Portions of this software were developed by BAE Systems, the University of
815bcf785SRobert Watson  * Cambridge Computer Laboratory, and Memorial University under DARPA/AFRL
915bcf785SRobert Watson  * contract FA8650-15-C-7558 ("CADETS"), as part of the DARPA Transparent
1015bcf785SRobert Watson  * Computing (TC) research program.
1115bcf785SRobert Watson  *
128e38aeffSJohn Baldwin  * Redistribution and use in source and binary forms, with or without
138e38aeffSJohn Baldwin  * modification, are permitted provided that the following conditions
148e38aeffSJohn Baldwin  * are met:
158e38aeffSJohn Baldwin  * 1. Redistributions of source code must retain the above copyright
168e38aeffSJohn Baldwin  *    notice, this list of conditions and the following disclaimer.
178e38aeffSJohn Baldwin  * 2. Redistributions in binary form must reproduce the above copyright
188e38aeffSJohn Baldwin  *    notice, this list of conditions and the following disclaimer in the
198e38aeffSJohn Baldwin  *    documentation and/or other materials provided with the distribution.
208e38aeffSJohn Baldwin  *
218e38aeffSJohn Baldwin  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
228e38aeffSJohn Baldwin  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
238e38aeffSJohn Baldwin  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
248e38aeffSJohn Baldwin  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
258e38aeffSJohn Baldwin  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
268e38aeffSJohn Baldwin  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
278e38aeffSJohn Baldwin  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
288e38aeffSJohn Baldwin  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
298e38aeffSJohn Baldwin  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
308e38aeffSJohn Baldwin  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
318e38aeffSJohn Baldwin  * SUCH DAMAGE.
328e38aeffSJohn Baldwin  */
338e38aeffSJohn Baldwin 
348e38aeffSJohn Baldwin /*
358e38aeffSJohn Baldwin  * Support for shared swap-backed anonymous memory objects via
369afb12baSDavid Bright  * shm_open(2), shm_rename(2), and shm_unlink(2).
379afb12baSDavid Bright  * While most of the implementation is here, vm_mmap.c contains
389afb12baSDavid Bright  * mapping logic changes.
398e38aeffSJohn Baldwin  *
405c066cd2SKonstantin Belousov  * posixshmcontrol(1) allows users to inspect the state of the memory
415c066cd2SKonstantin Belousov  * objects.  Per-uid swap resource limit controls total amount of
425c066cd2SKonstantin Belousov  * memory that user can consume for anonymous objects, including
435c066cd2SKonstantin Belousov  * shared.
448e38aeffSJohn Baldwin  */
458e38aeffSJohn Baldwin 
468e38aeffSJohn Baldwin #include <sys/cdefs.h>
478e38aeffSJohn Baldwin __FBSDID("$FreeBSD$");
488e38aeffSJohn Baldwin 
4912bc222eSJonathan Anderson #include "opt_capsicum.h"
50551a7895SRui Paulo #include "opt_ktrace.h"
5112bc222eSJonathan Anderson 
528e38aeffSJohn Baldwin #include <sys/param.h>
534a144410SRobert Watson #include <sys/capsicum.h>
54610a2b3cSJohn Baldwin #include <sys/conf.h>
558e38aeffSJohn Baldwin #include <sys/fcntl.h>
568e38aeffSJohn Baldwin #include <sys/file.h>
578e38aeffSJohn Baldwin #include <sys/filedesc.h>
582b64ab22SMark Johnston #include <sys/filio.h>
598e38aeffSJohn Baldwin #include <sys/fnv_hash.h>
608e38aeffSJohn Baldwin #include <sys/kernel.h>
6191898857SMark Johnston #include <sys/limits.h>
62551a7895SRui Paulo #include <sys/uio.h>
63551a7895SRui Paulo #include <sys/signal.h>
64cc7b259aSJamie Gritton #include <sys/jail.h>
65551a7895SRui Paulo #include <sys/ktrace.h>
668e38aeffSJohn Baldwin #include <sys/lock.h>
678e38aeffSJohn Baldwin #include <sys/malloc.h>
688e38aeffSJohn Baldwin #include <sys/mman.h>
698e38aeffSJohn Baldwin #include <sys/mutex.h>
709c00bb91SKonstantin Belousov #include <sys/priv.h>
718e38aeffSJohn Baldwin #include <sys/proc.h>
728e38aeffSJohn Baldwin #include <sys/refcount.h>
738e38aeffSJohn Baldwin #include <sys/resourcevar.h>
7489f6b863SAttilio Rao #include <sys/rwlock.h>
7556d0e33eSKonstantin Belousov #include <sys/sbuf.h>
768e38aeffSJohn Baldwin #include <sys/stat.h>
777ee1b208SEd Schouten #include <sys/syscallsubr.h>
788e38aeffSJohn Baldwin #include <sys/sysctl.h>
798e38aeffSJohn Baldwin #include <sys/sysproto.h>
808e38aeffSJohn Baldwin #include <sys/systm.h>
818e38aeffSJohn Baldwin #include <sys/sx.h>
828e38aeffSJohn Baldwin #include <sys/time.h>
838e38aeffSJohn Baldwin #include <sys/vnode.h>
84940cb0e2SKonstantin Belousov #include <sys/unistd.h>
859696feebSJohn Baldwin #include <sys/user.h>
868e38aeffSJohn Baldwin 
8715bcf785SRobert Watson #include <security/audit/audit.h>
888e38aeffSJohn Baldwin #include <security/mac/mac_framework.h>
898e38aeffSJohn Baldwin 
908e38aeffSJohn Baldwin #include <vm/vm.h>
918e38aeffSJohn Baldwin #include <vm/vm_param.h>
928e38aeffSJohn Baldwin #include <vm/pmap.h>
93338e7cf2SJohn Baldwin #include <vm/vm_extern.h>
948e38aeffSJohn Baldwin #include <vm/vm_map.h>
95fb680e16SJohn Baldwin #include <vm/vm_kern.h>
968e38aeffSJohn Baldwin #include <vm/vm_object.h>
978e38aeffSJohn Baldwin #include <vm/vm_page.h>
982971897dSAlan Cox #include <vm/vm_pageout.h>
998e38aeffSJohn Baldwin #include <vm/vm_pager.h>
1008e38aeffSJohn Baldwin #include <vm/swap_pager.h>
1018e38aeffSJohn Baldwin 
1028e38aeffSJohn Baldwin struct shm_mapping {
1038e38aeffSJohn Baldwin 	char		*sm_path;
1048e38aeffSJohn Baldwin 	Fnv32_t		sm_fnv;
1058e38aeffSJohn Baldwin 	struct shmfd	*sm_shmfd;
1068e38aeffSJohn Baldwin 	LIST_ENTRY(shm_mapping) sm_link;
1078e38aeffSJohn Baldwin };
1088e38aeffSJohn Baldwin 
1098e38aeffSJohn Baldwin static MALLOC_DEFINE(M_SHMFD, "shmfd", "shared memory file descriptor");
1108e38aeffSJohn Baldwin static LIST_HEAD(, shm_mapping) *shm_dictionary;
1118e38aeffSJohn Baldwin static struct sx shm_dict_lock;
1128e38aeffSJohn Baldwin static struct mtx shm_timestamp_lock;
1138e38aeffSJohn Baldwin static u_long shm_hash;
1147883ce1fSMateusz Guzik static struct unrhdr64 shm_ino_unr;
115610a2b3cSJohn Baldwin static dev_t shm_dev_ino;
1168e38aeffSJohn Baldwin 
1178e38aeffSJohn Baldwin #define	SHM_HASH(fnv)	(&shm_dictionary[(fnv) & shm_hash])
1188e38aeffSJohn Baldwin 
1195be725d7SAndreas Tobler static void	shm_init(void *arg);
1208e38aeffSJohn Baldwin static void	shm_insert(char *path, Fnv32_t fnv, struct shmfd *shmfd);
1218e38aeffSJohn Baldwin static struct shmfd *shm_lookup(char *path, Fnv32_t fnv);
1228e38aeffSJohn Baldwin static int	shm_remove(char *path, Fnv32_t fnv, struct ucred *ucred);
123af755d3eSKyle Evans static int	shm_dotruncate_locked(struct shmfd *shmfd, off_t length,
124af755d3eSKyle Evans     void *rl_cookie);
1252d5603feSDavid Bright static int	shm_copyin_path(struct thread *td, const char *userpath_in,
1262d5603feSDavid Bright     char **path_out);
1278e38aeffSJohn Baldwin 
1288e38aeffSJohn Baldwin static fo_rdwr_t	shm_read;
1298e38aeffSJohn Baldwin static fo_rdwr_t	shm_write;
1308e38aeffSJohn Baldwin static fo_truncate_t	shm_truncate;
1312b64ab22SMark Johnston static fo_ioctl_t	shm_ioctl;
1328e38aeffSJohn Baldwin static fo_stat_t	shm_stat;
1338e38aeffSJohn Baldwin static fo_close_t	shm_close;
1349c00bb91SKonstantin Belousov static fo_chmod_t	shm_chmod;
1359c00bb91SKonstantin Belousov static fo_chown_t	shm_chown;
136940cb0e2SKonstantin Belousov static fo_seek_t	shm_seek;
1379696feebSJohn Baldwin static fo_fill_kinfo_t	shm_fill_kinfo;
1387077c426SJohn Baldwin static fo_mmap_t	shm_mmap;
139af755d3eSKyle Evans static fo_get_seals_t	shm_get_seals;
140af755d3eSKyle Evans static fo_add_seals_t	shm_add_seals;
141f1040532SKyle Evans static fo_fallocate_t	shm_fallocate;
1428e38aeffSJohn Baldwin 
1438e38aeffSJohn Baldwin /* File descriptor operations. */
1441bdbd705SKonstantin Belousov struct fileops shm_ops = {
1458e38aeffSJohn Baldwin 	.fo_read = shm_read,
1468e38aeffSJohn Baldwin 	.fo_write = shm_write,
1478e38aeffSJohn Baldwin 	.fo_truncate = shm_truncate,
1482b64ab22SMark Johnston 	.fo_ioctl = shm_ioctl,
1492d69d0dcSJohn Baldwin 	.fo_poll = invfo_poll,
1502d69d0dcSJohn Baldwin 	.fo_kqfilter = invfo_kqfilter,
1518e38aeffSJohn Baldwin 	.fo_stat = shm_stat,
1528e38aeffSJohn Baldwin 	.fo_close = shm_close,
1539c00bb91SKonstantin Belousov 	.fo_chmod = shm_chmod,
1549c00bb91SKonstantin Belousov 	.fo_chown = shm_chown,
155227aaa86SKonstantin Belousov 	.fo_sendfile = vn_sendfile,
156940cb0e2SKonstantin Belousov 	.fo_seek = shm_seek,
1579696feebSJohn Baldwin 	.fo_fill_kinfo = shm_fill_kinfo,
1587077c426SJohn Baldwin 	.fo_mmap = shm_mmap,
159af755d3eSKyle Evans 	.fo_get_seals = shm_get_seals,
160af755d3eSKyle Evans 	.fo_add_seals = shm_add_seals,
161f1040532SKyle Evans 	.fo_fallocate = shm_fallocate,
162940cb0e2SKonstantin Belousov 	.fo_flags = DFLAG_PASSABLE | DFLAG_SEEKABLE
1638e38aeffSJohn Baldwin };
1648e38aeffSJohn Baldwin 
1658e38aeffSJohn Baldwin FEATURE(posix_shm, "POSIX shared memory");
1668e38aeffSJohn Baldwin 
1678e38aeffSJohn Baldwin static int
16841cf41fdSKonstantin Belousov uiomove_object_page(vm_object_t obj, size_t len, struct uio *uio)
16941cf41fdSKonstantin Belousov {
17041cf41fdSKonstantin Belousov 	vm_page_t m;
17141cf41fdSKonstantin Belousov 	vm_pindex_t idx;
17241cf41fdSKonstantin Belousov 	size_t tlen;
17341cf41fdSKonstantin Belousov 	int error, offset, rv;
17441cf41fdSKonstantin Belousov 
17541cf41fdSKonstantin Belousov 	idx = OFF_TO_IDX(uio->uio_offset);
17641cf41fdSKonstantin Belousov 	offset = uio->uio_offset & PAGE_MASK;
17741cf41fdSKonstantin Belousov 	tlen = MIN(PAGE_SIZE - offset, len);
17841cf41fdSKonstantin Belousov 
179f72eaaebSJeff Roberson 	rv = vm_page_grab_valid_unlocked(&m, obj, idx,
180f72eaaebSJeff Roberson 	    VM_ALLOC_SBUSY | VM_ALLOC_IGN_SBUSY | VM_ALLOC_NOCREAT);
181f72eaaebSJeff Roberson 	if (rv == VM_PAGER_OK)
182f72eaaebSJeff Roberson 		goto found;
18341cf41fdSKonstantin Belousov 
18441cf41fdSKonstantin Belousov 	/*
1856311d7aaSWill Andrews 	 * Read I/O without either a corresponding resident page or swap
1866311d7aaSWill Andrews 	 * page: use zero_region.  This is intended to avoid instantiating
1876311d7aaSWill Andrews 	 * pages on read from a sparse region.
1886311d7aaSWill Andrews 	 */
189f72eaaebSJeff Roberson 	VM_OBJECT_WLOCK(obj);
190f72eaaebSJeff Roberson 	m = vm_page_lookup(obj, idx);
191f72eaaebSJeff Roberson 	if (uio->uio_rw == UIO_READ && m == NULL &&
1926311d7aaSWill Andrews 	    !vm_pager_has_page(obj, idx, NULL, NULL)) {
1936311d7aaSWill Andrews 		VM_OBJECT_WUNLOCK(obj);
194b9062c93SKonstantin Belousov 		return (uiomove(__DECONST(void *, zero_region), tlen, uio));
1956311d7aaSWill Andrews 	}
1966311d7aaSWill Andrews 
1976311d7aaSWill Andrews 	/*
19841cf41fdSKonstantin Belousov 	 * Although the tmpfs vnode lock is held here, it is
19941cf41fdSKonstantin Belousov 	 * nonetheless safe to sleep waiting for a free page.  The
20041cf41fdSKonstantin Belousov 	 * pageout daemon does not need to acquire the tmpfs vnode
20141cf41fdSKonstantin Belousov 	 * lock to page out tobj's pages because tobj is a OBJT_SWAP
20241cf41fdSKonstantin Belousov 	 * type object.
20341cf41fdSKonstantin Belousov 	 */
204c7575748SJeff Roberson 	rv = vm_page_grab_valid(&m, obj, idx,
205a8081778SJeff Roberson 	    VM_ALLOC_NORMAL | VM_ALLOC_SBUSY | VM_ALLOC_IGN_SBUSY);
20641cf41fdSKonstantin Belousov 	if (rv != VM_PAGER_OK) {
20741cf41fdSKonstantin Belousov 		VM_OBJECT_WUNLOCK(obj);
208c7575748SJeff Roberson 		printf("uiomove_object: vm_obj %p idx %jd pager error %d\n",
209c7575748SJeff Roberson 		    obj, idx, rv);
21041cf41fdSKonstantin Belousov 		return (EIO);
21141cf41fdSKonstantin Belousov 	}
21241cf41fdSKonstantin Belousov 	VM_OBJECT_WUNLOCK(obj);
213f72eaaebSJeff Roberson 
214f72eaaebSJeff Roberson found:
21541cf41fdSKonstantin Belousov 	error = uiomove_fromphys(&m, offset, tlen, uio);
216a8081778SJeff Roberson 	if (uio->uio_rw == UIO_WRITE && error == 0)
217a8081778SJeff Roberson 		vm_page_set_dirty(m);
218d29f674fSJeff Roberson 	vm_page_activate(m);
219a8081778SJeff Roberson 	vm_page_sunbusy(m);
22041cf41fdSKonstantin Belousov 
22141cf41fdSKonstantin Belousov 	return (error);
22241cf41fdSKonstantin Belousov }
22341cf41fdSKonstantin Belousov 
22441cf41fdSKonstantin Belousov int
22541cf41fdSKonstantin Belousov uiomove_object(vm_object_t obj, off_t obj_size, struct uio *uio)
22641cf41fdSKonstantin Belousov {
22741cf41fdSKonstantin Belousov 	ssize_t resid;
22841cf41fdSKonstantin Belousov 	size_t len;
22941cf41fdSKonstantin Belousov 	int error;
23041cf41fdSKonstantin Belousov 
23141cf41fdSKonstantin Belousov 	error = 0;
23241cf41fdSKonstantin Belousov 	while ((resid = uio->uio_resid) > 0) {
23341cf41fdSKonstantin Belousov 		if (obj_size <= uio->uio_offset)
23441cf41fdSKonstantin Belousov 			break;
23541cf41fdSKonstantin Belousov 		len = MIN(obj_size - uio->uio_offset, resid);
23641cf41fdSKonstantin Belousov 		if (len == 0)
23741cf41fdSKonstantin Belousov 			break;
23841cf41fdSKonstantin Belousov 		error = uiomove_object_page(obj, len, uio);
23941cf41fdSKonstantin Belousov 		if (error != 0 || resid == uio->uio_resid)
24041cf41fdSKonstantin Belousov 			break;
24141cf41fdSKonstantin Belousov 	}
24241cf41fdSKonstantin Belousov 	return (error);
24341cf41fdSKonstantin Belousov }
24441cf41fdSKonstantin Belousov 
24541cf41fdSKonstantin Belousov static int
246940cb0e2SKonstantin Belousov shm_seek(struct file *fp, off_t offset, int whence, struct thread *td)
247940cb0e2SKonstantin Belousov {
248940cb0e2SKonstantin Belousov 	struct shmfd *shmfd;
249940cb0e2SKonstantin Belousov 	off_t foffset;
250940cb0e2SKonstantin Belousov 	int error;
251940cb0e2SKonstantin Belousov 
252940cb0e2SKonstantin Belousov 	shmfd = fp->f_data;
253940cb0e2SKonstantin Belousov 	foffset = foffset_lock(fp, 0);
254940cb0e2SKonstantin Belousov 	error = 0;
255940cb0e2SKonstantin Belousov 	switch (whence) {
256940cb0e2SKonstantin Belousov 	case L_INCR:
257940cb0e2SKonstantin Belousov 		if (foffset < 0 ||
258940cb0e2SKonstantin Belousov 		    (offset > 0 && foffset > OFF_MAX - offset)) {
259940cb0e2SKonstantin Belousov 			error = EOVERFLOW;
260940cb0e2SKonstantin Belousov 			break;
261940cb0e2SKonstantin Belousov 		}
262940cb0e2SKonstantin Belousov 		offset += foffset;
263940cb0e2SKonstantin Belousov 		break;
264940cb0e2SKonstantin Belousov 	case L_XTND:
265940cb0e2SKonstantin Belousov 		if (offset > 0 && shmfd->shm_size > OFF_MAX - offset) {
266940cb0e2SKonstantin Belousov 			error = EOVERFLOW;
267940cb0e2SKonstantin Belousov 			break;
268940cb0e2SKonstantin Belousov 		}
269940cb0e2SKonstantin Belousov 		offset += shmfd->shm_size;
270940cb0e2SKonstantin Belousov 		break;
271940cb0e2SKonstantin Belousov 	case L_SET:
272940cb0e2SKonstantin Belousov 		break;
273940cb0e2SKonstantin Belousov 	default:
274940cb0e2SKonstantin Belousov 		error = EINVAL;
275940cb0e2SKonstantin Belousov 	}
276940cb0e2SKonstantin Belousov 	if (error == 0) {
277940cb0e2SKonstantin Belousov 		if (offset < 0 || offset > shmfd->shm_size)
278940cb0e2SKonstantin Belousov 			error = EINVAL;
279940cb0e2SKonstantin Belousov 		else
2806f2b769cSJohn-Mark Gurney 			td->td_uretoff.tdu_off = offset;
281940cb0e2SKonstantin Belousov 	}
282940cb0e2SKonstantin Belousov 	foffset_unlock(fp, offset, error != 0 ? FOF_NOUPDATE : 0);
283940cb0e2SKonstantin Belousov 	return (error);
284940cb0e2SKonstantin Belousov }
285940cb0e2SKonstantin Belousov 
286940cb0e2SKonstantin Belousov static int
2878e38aeffSJohn Baldwin shm_read(struct file *fp, struct uio *uio, struct ucred *active_cred,
2888e38aeffSJohn Baldwin     int flags, struct thread *td)
2898e38aeffSJohn Baldwin {
290940cb0e2SKonstantin Belousov 	struct shmfd *shmfd;
291940cb0e2SKonstantin Belousov 	void *rl_cookie;
292940cb0e2SKonstantin Belousov 	int error;
2938e38aeffSJohn Baldwin 
294940cb0e2SKonstantin Belousov 	shmfd = fp->f_data;
295940cb0e2SKonstantin Belousov #ifdef MAC
296940cb0e2SKonstantin Belousov 	error = mac_posixshm_check_read(active_cred, fp->f_cred, shmfd);
297940cb0e2SKonstantin Belousov 	if (error)
298940cb0e2SKonstantin Belousov 		return (error);
299940cb0e2SKonstantin Belousov #endif
3006ea906eeSJilles Tjoelker 	foffset_lock_uio(fp, uio, flags);
3016ea906eeSJilles Tjoelker 	rl_cookie = rangelock_rlock(&shmfd->shm_rl, uio->uio_offset,
3026ea906eeSJilles Tjoelker 	    uio->uio_offset + uio->uio_resid, &shmfd->shm_mtx);
303940cb0e2SKonstantin Belousov 	error = uiomove_object(shmfd->shm_object, shmfd->shm_size, uio);
304940cb0e2SKonstantin Belousov 	rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
305940cb0e2SKonstantin Belousov 	foffset_unlock_uio(fp, uio, flags);
306940cb0e2SKonstantin Belousov 	return (error);
3078e38aeffSJohn Baldwin }
3088e38aeffSJohn Baldwin 
3098e38aeffSJohn Baldwin static int
3108e38aeffSJohn Baldwin shm_write(struct file *fp, struct uio *uio, struct ucred *active_cred,
3118e38aeffSJohn Baldwin     int flags, struct thread *td)
3128e38aeffSJohn Baldwin {
313940cb0e2SKonstantin Belousov 	struct shmfd *shmfd;
314940cb0e2SKonstantin Belousov 	void *rl_cookie;
315940cb0e2SKonstantin Belousov 	int error;
316*3f07b9d9SKyle Evans 	off_t size;
3178e38aeffSJohn Baldwin 
318940cb0e2SKonstantin Belousov 	shmfd = fp->f_data;
319940cb0e2SKonstantin Belousov #ifdef MAC
320940cb0e2SKonstantin Belousov 	error = mac_posixshm_check_write(active_cred, fp->f_cred, shmfd);
321940cb0e2SKonstantin Belousov 	if (error)
322940cb0e2SKonstantin Belousov 		return (error);
323940cb0e2SKonstantin Belousov #endif
324940cb0e2SKonstantin Belousov 	foffset_lock_uio(fp, uio, flags);
325*3f07b9d9SKyle Evans 	if (uio->uio_resid > OFF_MAX - uio->uio_offset) {
326*3f07b9d9SKyle Evans 		/*
327*3f07b9d9SKyle Evans 		 * Overflow is only an error if we're supposed to expand on
328*3f07b9d9SKyle Evans 		 * write.  Otherwise, we'll just truncate the write to the
329*3f07b9d9SKyle Evans 		 * size of the file, which can only grow up to OFF_MAX.
330*3f07b9d9SKyle Evans 		 */
331*3f07b9d9SKyle Evans 		if ((shmfd->shm_flags & SHM_GROW_ON_WRITE) != 0) {
332*3f07b9d9SKyle Evans 			foffset_unlock_uio(fp, uio, flags);
333*3f07b9d9SKyle Evans 			return (EFBIG);
334*3f07b9d9SKyle Evans 		}
335*3f07b9d9SKyle Evans 
336*3f07b9d9SKyle Evans 		size = shmfd->shm_size;
337*3f07b9d9SKyle Evans 	} else {
338*3f07b9d9SKyle Evans 		size = uio->uio_offset + uio->uio_resid;
339*3f07b9d9SKyle Evans 	}
340940cb0e2SKonstantin Belousov 	if ((flags & FOF_OFFSET) == 0) {
341940cb0e2SKonstantin Belousov 		rl_cookie = rangelock_wlock(&shmfd->shm_rl, 0, OFF_MAX,
342940cb0e2SKonstantin Belousov 		    &shmfd->shm_mtx);
343940cb0e2SKonstantin Belousov 	} else {
344940cb0e2SKonstantin Belousov 		rl_cookie = rangelock_wlock(&shmfd->shm_rl, uio->uio_offset,
345*3f07b9d9SKyle Evans 		    size, &shmfd->shm_mtx);
346940cb0e2SKonstantin Belousov 	}
347*3f07b9d9SKyle Evans 	if ((shmfd->shm_seals & F_SEAL_WRITE) != 0) {
348af755d3eSKyle Evans 		error = EPERM;
349*3f07b9d9SKyle Evans 	} else {
350*3f07b9d9SKyle Evans 		error = 0;
351*3f07b9d9SKyle Evans 		if ((shmfd->shm_flags & SHM_GROW_ON_WRITE) != 0 &&
352*3f07b9d9SKyle Evans 		    size > shmfd->shm_size) {
353*3f07b9d9SKyle Evans 			VM_OBJECT_WLOCK(shmfd->shm_object);
354*3f07b9d9SKyle Evans 			error = shm_dotruncate_locked(shmfd, size, rl_cookie);
355*3f07b9d9SKyle Evans 			VM_OBJECT_WUNLOCK(shmfd->shm_object);
356*3f07b9d9SKyle Evans 		}
357*3f07b9d9SKyle Evans 		if (error == 0)
358*3f07b9d9SKyle Evans 			error = uiomove_object(shmfd->shm_object,
359*3f07b9d9SKyle Evans 			    shmfd->shm_size, uio);
360*3f07b9d9SKyle Evans 	}
361940cb0e2SKonstantin Belousov 	rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
362940cb0e2SKonstantin Belousov 	foffset_unlock_uio(fp, uio, flags);
363940cb0e2SKonstantin Belousov 	return (error);
3648e38aeffSJohn Baldwin }
3658e38aeffSJohn Baldwin 
3668e38aeffSJohn Baldwin static int
3678e38aeffSJohn Baldwin shm_truncate(struct file *fp, off_t length, struct ucred *active_cred,
3688e38aeffSJohn Baldwin     struct thread *td)
3698e38aeffSJohn Baldwin {
3708e38aeffSJohn Baldwin 	struct shmfd *shmfd;
3718e38aeffSJohn Baldwin #ifdef MAC
3728e38aeffSJohn Baldwin 	int error;
3738e38aeffSJohn Baldwin #endif
3748e38aeffSJohn Baldwin 
3758e38aeffSJohn Baldwin 	shmfd = fp->f_data;
3768e38aeffSJohn Baldwin #ifdef MAC
3778e38aeffSJohn Baldwin 	error = mac_posixshm_check_truncate(active_cred, fp->f_cred, shmfd);
3788e38aeffSJohn Baldwin 	if (error)
3798e38aeffSJohn Baldwin 		return (error);
3808e38aeffSJohn Baldwin #endif
3813364c323SKonstantin Belousov 	return (shm_dotruncate(shmfd, length));
3828e38aeffSJohn Baldwin }
3838e38aeffSJohn Baldwin 
3842b64ab22SMark Johnston int
3852b64ab22SMark Johnston shm_ioctl(struct file *fp, u_long com, void *data, struct ucred *active_cred,
3862b64ab22SMark Johnston     struct thread *td)
3872b64ab22SMark Johnston {
3882b64ab22SMark Johnston 
3892b64ab22SMark Johnston 	switch (com) {
3902b64ab22SMark Johnston 	case FIONBIO:
3912b64ab22SMark Johnston 	case FIOASYNC:
3922b64ab22SMark Johnston 		/*
3932b64ab22SMark Johnston 		 * Allow fcntl(fd, F_SETFL, O_NONBLOCK) to work,
3942b64ab22SMark Johnston 		 * just like it would on an unlinked regular file
3952b64ab22SMark Johnston 		 */
3962b64ab22SMark Johnston 		return (0);
3972b64ab22SMark Johnston 	default:
3982b64ab22SMark Johnston 		return (ENOTTY);
3992b64ab22SMark Johnston 	}
4002b64ab22SMark Johnston }
4012b64ab22SMark Johnston 
4028e38aeffSJohn Baldwin static int
4038e38aeffSJohn Baldwin shm_stat(struct file *fp, struct stat *sb, struct ucred *active_cred,
4048e38aeffSJohn Baldwin     struct thread *td)
4058e38aeffSJohn Baldwin {
4068e38aeffSJohn Baldwin 	struct shmfd *shmfd;
4078e38aeffSJohn Baldwin #ifdef MAC
4088e38aeffSJohn Baldwin 	int error;
4098e38aeffSJohn Baldwin #endif
4108e38aeffSJohn Baldwin 
4118e38aeffSJohn Baldwin 	shmfd = fp->f_data;
4128e38aeffSJohn Baldwin 
4138e38aeffSJohn Baldwin #ifdef MAC
4148e38aeffSJohn Baldwin 	error = mac_posixshm_check_stat(active_cred, fp->f_cred, shmfd);
4158e38aeffSJohn Baldwin 	if (error)
4168e38aeffSJohn Baldwin 		return (error);
4178e38aeffSJohn Baldwin #endif
4188e38aeffSJohn Baldwin 
4198e38aeffSJohn Baldwin 	/*
4208e38aeffSJohn Baldwin 	 * Attempt to return sanish values for fstat() on a memory file
4218e38aeffSJohn Baldwin 	 * descriptor.
4228e38aeffSJohn Baldwin 	 */
4238e38aeffSJohn Baldwin 	bzero(sb, sizeof(*sb));
4248e38aeffSJohn Baldwin 	sb->st_blksize = PAGE_SIZE;
4258e38aeffSJohn Baldwin 	sb->st_size = shmfd->shm_size;
42655e0987aSPedro F. Giffuni 	sb->st_blocks = howmany(sb->st_size, sb->st_blksize);
4279c00bb91SKonstantin Belousov 	mtx_lock(&shm_timestamp_lock);
428510ea843SEd Schouten 	sb->st_atim = shmfd->shm_atime;
429510ea843SEd Schouten 	sb->st_ctim = shmfd->shm_ctime;
430510ea843SEd Schouten 	sb->st_mtim = shmfd->shm_mtime;
431510ea843SEd Schouten 	sb->st_birthtim = shmfd->shm_birthtime;
4329c00bb91SKonstantin Belousov 	sb->st_mode = S_IFREG | shmfd->shm_mode;		/* XXX */
4338e38aeffSJohn Baldwin 	sb->st_uid = shmfd->shm_uid;
4348e38aeffSJohn Baldwin 	sb->st_gid = shmfd->shm_gid;
4359c00bb91SKonstantin Belousov 	mtx_unlock(&shm_timestamp_lock);
436610a2b3cSJohn Baldwin 	sb->st_dev = shm_dev_ino;
437610a2b3cSJohn Baldwin 	sb->st_ino = shmfd->shm_ino;
438e4b77548SKonstantin Belousov 	sb->st_nlink = shmfd->shm_object->ref_count;
4398e38aeffSJohn Baldwin 
4408e38aeffSJohn Baldwin 	return (0);
4418e38aeffSJohn Baldwin }
4428e38aeffSJohn Baldwin 
4438e38aeffSJohn Baldwin static int
4448e38aeffSJohn Baldwin shm_close(struct file *fp, struct thread *td)
4458e38aeffSJohn Baldwin {
4468e38aeffSJohn Baldwin 	struct shmfd *shmfd;
4478e38aeffSJohn Baldwin 
4488e38aeffSJohn Baldwin 	shmfd = fp->f_data;
4498e38aeffSJohn Baldwin 	fp->f_data = NULL;
4508e38aeffSJohn Baldwin 	shm_drop(shmfd);
4518e38aeffSJohn Baldwin 
4528e38aeffSJohn Baldwin 	return (0);
4538e38aeffSJohn Baldwin }
4548e38aeffSJohn Baldwin 
455af755d3eSKyle Evans static int
4562d5603feSDavid Bright shm_copyin_path(struct thread *td, const char *userpath_in, char **path_out) {
4572d5603feSDavid Bright 	int error;
4582d5603feSDavid Bright 	char *path;
4592d5603feSDavid Bright 	const char *pr_path;
4602d5603feSDavid Bright 	size_t pr_pathlen;
4612d5603feSDavid Bright 
4622d5603feSDavid Bright 	path = malloc(MAXPATHLEN, M_SHMFD, M_WAITOK);
4632d5603feSDavid Bright 	pr_path = td->td_ucred->cr_prison->pr_path;
4642d5603feSDavid Bright 
4652d5603feSDavid Bright 	/* Construct a full pathname for jailed callers. */
4662d5603feSDavid Bright 	pr_pathlen = strcmp(pr_path, "/") ==
4672d5603feSDavid Bright 	    0 ? 0 : strlcpy(path, pr_path, MAXPATHLEN);
4682d5603feSDavid Bright 	error = copyinstr(userpath_in, path + pr_pathlen,
4692d5603feSDavid Bright 	    MAXPATHLEN - pr_pathlen, NULL);
4702d5603feSDavid Bright 	if (error != 0)
4712d5603feSDavid Bright 		goto out;
4722d5603feSDavid Bright 
4732d5603feSDavid Bright #ifdef KTRACE
4742d5603feSDavid Bright 	if (KTRPOINT(curthread, KTR_NAMEI))
4752d5603feSDavid Bright 		ktrnamei(path);
4762d5603feSDavid Bright #endif
4772d5603feSDavid Bright 
4782d5603feSDavid Bright 	/* Require paths to start with a '/' character. */
4792d5603feSDavid Bright 	if (path[pr_pathlen] != '/') {
4802d5603feSDavid Bright 		error = EINVAL;
4812d5603feSDavid Bright 		goto out;
4822d5603feSDavid Bright 	}
4832d5603feSDavid Bright 
4842d5603feSDavid Bright 	*path_out = path;
4852d5603feSDavid Bright 
4862d5603feSDavid Bright out:
4872d5603feSDavid Bright 	if (error != 0)
4882d5603feSDavid Bright 		free(path, M_SHMFD);
4892d5603feSDavid Bright 
4902d5603feSDavid Bright 	return (error);
4912d5603feSDavid Bright }
4922d5603feSDavid Bright 
4932d5603feSDavid Bright static int
494af755d3eSKyle Evans shm_dotruncate_locked(struct shmfd *shmfd, off_t length, void *rl_cookie)
4958e38aeffSJohn Baldwin {
4968e38aeffSJohn Baldwin 	vm_object_t object;
497093c7f39SGleb Smirnoff 	vm_page_t m;
4982971897dSAlan Cox 	vm_pindex_t idx, nobjsize;
4993364c323SKonstantin Belousov 	vm_ooffset_t delta;
5002971897dSAlan Cox 	int base, rv;
5018e38aeffSJohn Baldwin 
5022a016de1SAlan Cox 	KASSERT(length >= 0, ("shm_dotruncate: length < 0"));
5038e38aeffSJohn Baldwin 	object = shmfd->shm_object;
504af755d3eSKyle Evans 	VM_OBJECT_ASSERT_WLOCKED(object);
505af755d3eSKyle Evans 	rangelock_cookie_assert(rl_cookie, RA_WLOCKED);
506af755d3eSKyle Evans 	if (length == shmfd->shm_size)
5073364c323SKonstantin Belousov 		return (0);
5088e38aeffSJohn Baldwin 	nobjsize = OFF_TO_IDX(length + PAGE_MASK);
5098e38aeffSJohn Baldwin 
5108e38aeffSJohn Baldwin 	/* Are we shrinking?  If so, trim the end. */
5118e38aeffSJohn Baldwin 	if (length < shmfd->shm_size) {
512af755d3eSKyle Evans 		if ((shmfd->shm_seals & F_SEAL_SHRINK) != 0)
513af755d3eSKyle Evans 			return (EPERM);
514af755d3eSKyle Evans 
515fb680e16SJohn Baldwin 		/*
516fb680e16SJohn Baldwin 		 * Disallow any requests to shrink the size if this
517fb680e16SJohn Baldwin 		 * object is mapped into the kernel.
518fb680e16SJohn Baldwin 		 */
519af755d3eSKyle Evans 		if (shmfd->shm_kmappings > 0)
520fb680e16SJohn Baldwin 			return (EBUSY);
5212971897dSAlan Cox 
5222971897dSAlan Cox 		/*
5232971897dSAlan Cox 		 * Zero the truncated part of the last page.
5242971897dSAlan Cox 		 */
5252971897dSAlan Cox 		base = length & PAGE_MASK;
5262971897dSAlan Cox 		if (base != 0) {
5272971897dSAlan Cox 			idx = OFF_TO_IDX(length);
5282971897dSAlan Cox retry:
52963e97555SJeff Roberson 			m = vm_page_grab(object, idx, VM_ALLOC_NOCREAT);
5302971897dSAlan Cox 			if (m != NULL) {
5310012f373SJeff Roberson 				MPASS(vm_page_all_valid(m));
5322971897dSAlan Cox 			} else if (vm_pager_has_page(object, idx, NULL, NULL)) {
5338d6fbbb8SJeff Roberson 				m = vm_page_alloc(object, idx,
5348d6fbbb8SJeff Roberson 				    VM_ALLOC_NORMAL | VM_ALLOC_WAITFAIL);
5358d6fbbb8SJeff Roberson 				if (m == NULL)
5362971897dSAlan Cox 					goto retry;
537d6e13f3bSJeff Roberson 				vm_object_pip_add(object, 1);
538d6e13f3bSJeff Roberson 				VM_OBJECT_WUNLOCK(object);
5397667839aSAlan Cox 				rv = vm_pager_get_pages(object, &m, 1, NULL,
5407667839aSAlan Cox 				    NULL);
541d6e13f3bSJeff Roberson 				VM_OBJECT_WLOCK(object);
542d6e13f3bSJeff Roberson 				vm_object_pip_wakeup(object);
5432971897dSAlan Cox 				if (rv == VM_PAGER_OK) {
5442d612d2dSAlan Cox 					/*
5452d612d2dSAlan Cox 					 * Since the page was not resident,
5462d612d2dSAlan Cox 					 * and therefore not recently
5472d612d2dSAlan Cox 					 * accessed, immediately enqueue it
5482d612d2dSAlan Cox 					 * for asynchronous laundering.  The
5492d612d2dSAlan Cox 					 * current operation is not regarded
5502d612d2dSAlan Cox 					 * as an access.
5512d612d2dSAlan Cox 					 */
5522d612d2dSAlan Cox 					vm_page_launder(m);
5532971897dSAlan Cox 				} else {
5542971897dSAlan Cox 					vm_page_free(m);
55589f6b863SAttilio Rao 					VM_OBJECT_WUNLOCK(object);
5562971897dSAlan Cox 					return (EIO);
5572971897dSAlan Cox 				}
5582971897dSAlan Cox 			}
5592971897dSAlan Cox 			if (m != NULL) {
5602971897dSAlan Cox 				pmap_zero_page_area(m, base, PAGE_SIZE - base);
5610012f373SJeff Roberson 				KASSERT(vm_page_all_valid(m),
5622971897dSAlan Cox 				    ("shm_dotruncate: page %p is invalid", m));
563a8081778SJeff Roberson 				vm_page_set_dirty(m);
56463e97555SJeff Roberson 				vm_page_xunbusy(m);
5652971897dSAlan Cox 			}
5662971897dSAlan Cox 		}
5672a016de1SAlan Cox 		delta = IDX_TO_OFF(object->size - nobjsize);
5683364c323SKonstantin Belousov 
5698e38aeffSJohn Baldwin 		if (nobjsize < object->size)
5708e38aeffSJohn Baldwin 			vm_object_page_remove(object, nobjsize, object->size,
5716bbee8e2SAlan Cox 			    0);
5728e38aeffSJohn Baldwin 
5733364c323SKonstantin Belousov 		/* Free the swap accounted for shm */
574ef694c1aSEdward Tomasz Napierala 		swap_release_by_cred(delta, object->cred);
5753364c323SKonstantin Belousov 		object->charge -= delta;
5763364c323SKonstantin Belousov 	} else {
577af755d3eSKyle Evans 		if ((shmfd->shm_seals & F_SEAL_GROW) != 0)
578af755d3eSKyle Evans 			return (EPERM);
579af755d3eSKyle Evans 
5802a016de1SAlan Cox 		/* Try to reserve additional swap space. */
5812a016de1SAlan Cox 		delta = IDX_TO_OFF(nobjsize - object->size);
582af755d3eSKyle Evans 		if (!swap_reserve_by_cred(delta, object->cred))
5833364c323SKonstantin Belousov 			return (ENOMEM);
5843364c323SKonstantin Belousov 		object->charge += delta;
5858e38aeffSJohn Baldwin 	}
5868e38aeffSJohn Baldwin 	shmfd->shm_size = length;
5878e38aeffSJohn Baldwin 	mtx_lock(&shm_timestamp_lock);
5888e38aeffSJohn Baldwin 	vfs_timestamp(&shmfd->shm_ctime);
5898e38aeffSJohn Baldwin 	shmfd->shm_mtime = shmfd->shm_ctime;
5908e38aeffSJohn Baldwin 	mtx_unlock(&shm_timestamp_lock);
5918e38aeffSJohn Baldwin 	object->size = nobjsize;
5923364c323SKonstantin Belousov 	return (0);
5938e38aeffSJohn Baldwin }
5948e38aeffSJohn Baldwin 
595af755d3eSKyle Evans int
596af755d3eSKyle Evans shm_dotruncate(struct shmfd *shmfd, off_t length)
597af755d3eSKyle Evans {
598af755d3eSKyle Evans 	void *rl_cookie;
599af755d3eSKyle Evans 	int error;
600af755d3eSKyle Evans 
601af755d3eSKyle Evans 	rl_cookie = rangelock_wlock(&shmfd->shm_rl, 0, OFF_MAX,
602af755d3eSKyle Evans 	    &shmfd->shm_mtx);
603af755d3eSKyle Evans 	VM_OBJECT_WLOCK(shmfd->shm_object);
604af755d3eSKyle Evans 	error = shm_dotruncate_locked(shmfd, length, rl_cookie);
605af755d3eSKyle Evans 	VM_OBJECT_WUNLOCK(shmfd->shm_object);
606af755d3eSKyle Evans 	rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
607af755d3eSKyle Evans 	return (error);
608af755d3eSKyle Evans }
609af755d3eSKyle Evans 
6108e38aeffSJohn Baldwin /*
6118e38aeffSJohn Baldwin  * shmfd object management including creation and reference counting
6128e38aeffSJohn Baldwin  * routines.
6138e38aeffSJohn Baldwin  */
6141bdbd705SKonstantin Belousov struct shmfd *
6158e38aeffSJohn Baldwin shm_alloc(struct ucred *ucred, mode_t mode)
6168e38aeffSJohn Baldwin {
6178e38aeffSJohn Baldwin 	struct shmfd *shmfd;
6188e38aeffSJohn Baldwin 
6198e38aeffSJohn Baldwin 	shmfd = malloc(sizeof(*shmfd), M_SHMFD, M_WAITOK | M_ZERO);
6208e38aeffSJohn Baldwin 	shmfd->shm_size = 0;
6218e38aeffSJohn Baldwin 	shmfd->shm_uid = ucred->cr_uid;
6228e38aeffSJohn Baldwin 	shmfd->shm_gid = ucred->cr_gid;
6238e38aeffSJohn Baldwin 	shmfd->shm_mode = mode;
62432287ea7SKyle Evans 	shmfd->shm_object = vm_pager_allocate(OBJT_SWAP, NULL,
6253364c323SKonstantin Belousov 	    shmfd->shm_size, VM_PROT_DEFAULT, 0, ucred);
6268e38aeffSJohn Baldwin 	KASSERT(shmfd->shm_object != NULL, ("shm_create: vm_pager_allocate"));
6278e38aeffSJohn Baldwin 	vfs_timestamp(&shmfd->shm_birthtime);
6288e38aeffSJohn Baldwin 	shmfd->shm_atime = shmfd->shm_mtime = shmfd->shm_ctime =
6298e38aeffSJohn Baldwin 	    shmfd->shm_birthtime;
6307883ce1fSMateusz Guzik 	shmfd->shm_ino = alloc_unr64(&shm_ino_unr);
6318e38aeffSJohn Baldwin 	refcount_init(&shmfd->shm_refs, 1);
632940cb0e2SKonstantin Belousov 	mtx_init(&shmfd->shm_mtx, "shmrl", NULL, MTX_DEF);
633940cb0e2SKonstantin Belousov 	rangelock_init(&shmfd->shm_rl);
6348e38aeffSJohn Baldwin #ifdef MAC
6358e38aeffSJohn Baldwin 	mac_posixshm_init(shmfd);
6368e38aeffSJohn Baldwin 	mac_posixshm_create(ucred, shmfd);
6378e38aeffSJohn Baldwin #endif
6388e38aeffSJohn Baldwin 
6398e38aeffSJohn Baldwin 	return (shmfd);
6408e38aeffSJohn Baldwin }
6418e38aeffSJohn Baldwin 
6421bdbd705SKonstantin Belousov struct shmfd *
6438e38aeffSJohn Baldwin shm_hold(struct shmfd *shmfd)
6448e38aeffSJohn Baldwin {
6458e38aeffSJohn Baldwin 
6468e38aeffSJohn Baldwin 	refcount_acquire(&shmfd->shm_refs);
6478e38aeffSJohn Baldwin 	return (shmfd);
6488e38aeffSJohn Baldwin }
6498e38aeffSJohn Baldwin 
6501bdbd705SKonstantin Belousov void
6518e38aeffSJohn Baldwin shm_drop(struct shmfd *shmfd)
6528e38aeffSJohn Baldwin {
6538e38aeffSJohn Baldwin 
6548e38aeffSJohn Baldwin 	if (refcount_release(&shmfd->shm_refs)) {
6558e38aeffSJohn Baldwin #ifdef MAC
6568e38aeffSJohn Baldwin 		mac_posixshm_destroy(shmfd);
6578e38aeffSJohn Baldwin #endif
658940cb0e2SKonstantin Belousov 		rangelock_destroy(&shmfd->shm_rl);
659940cb0e2SKonstantin Belousov 		mtx_destroy(&shmfd->shm_mtx);
6608e38aeffSJohn Baldwin 		vm_object_deallocate(shmfd->shm_object);
6618e38aeffSJohn Baldwin 		free(shmfd, M_SHMFD);
6628e38aeffSJohn Baldwin 	}
6638e38aeffSJohn Baldwin }
6648e38aeffSJohn Baldwin 
6658e38aeffSJohn Baldwin /*
6668e38aeffSJohn Baldwin  * Determine if the credentials have sufficient permissions for a
6678e38aeffSJohn Baldwin  * specified combination of FREAD and FWRITE.
6688e38aeffSJohn Baldwin  */
6691bdbd705SKonstantin Belousov int
6708e38aeffSJohn Baldwin shm_access(struct shmfd *shmfd, struct ucred *ucred, int flags)
6718e38aeffSJohn Baldwin {
67215bc6b2bSEdward Tomasz Napierala 	accmode_t accmode;
6739c00bb91SKonstantin Belousov 	int error;
6748e38aeffSJohn Baldwin 
67515bc6b2bSEdward Tomasz Napierala 	accmode = 0;
6768e38aeffSJohn Baldwin 	if (flags & FREAD)
67715bc6b2bSEdward Tomasz Napierala 		accmode |= VREAD;
6788e38aeffSJohn Baldwin 	if (flags & FWRITE)
67915bc6b2bSEdward Tomasz Napierala 		accmode |= VWRITE;
6809c00bb91SKonstantin Belousov 	mtx_lock(&shm_timestamp_lock);
6819c00bb91SKonstantin Belousov 	error = vaccess(VREG, shmfd->shm_mode, shmfd->shm_uid, shmfd->shm_gid,
6829c00bb91SKonstantin Belousov 	    accmode, ucred, NULL);
6839c00bb91SKonstantin Belousov 	mtx_unlock(&shm_timestamp_lock);
6849c00bb91SKonstantin Belousov 	return (error);
6858e38aeffSJohn Baldwin }
6868e38aeffSJohn Baldwin 
6878e38aeffSJohn Baldwin /*
6888e38aeffSJohn Baldwin  * Dictionary management.  We maintain an in-kernel dictionary to map
6898e38aeffSJohn Baldwin  * paths to shmfd objects.  We use the FNV hash on the path to store
6908e38aeffSJohn Baldwin  * the mappings in a hash table.
6918e38aeffSJohn Baldwin  */
6928e38aeffSJohn Baldwin static void
693610a2b3cSJohn Baldwin shm_init(void *arg)
6948e38aeffSJohn Baldwin {
6958e38aeffSJohn Baldwin 
6968e38aeffSJohn Baldwin 	mtx_init(&shm_timestamp_lock, "shm timestamps", NULL, MTX_DEF);
6978e38aeffSJohn Baldwin 	sx_init(&shm_dict_lock, "shm dictionary");
6988e38aeffSJohn Baldwin 	shm_dictionary = hashinit(1024, M_SHMFD, &shm_hash);
6997883ce1fSMateusz Guzik 	new_unrhdr64(&shm_ino_unr, 1);
700610a2b3cSJohn Baldwin 	shm_dev_ino = devfs_alloc_cdp_inode();
701610a2b3cSJohn Baldwin 	KASSERT(shm_dev_ino > 0, ("shm dev inode not initialized"));
7028e38aeffSJohn Baldwin }
703610a2b3cSJohn Baldwin SYSINIT(shm_init, SI_SUB_SYSV_SHM, SI_ORDER_ANY, shm_init, NULL);
7048e38aeffSJohn Baldwin 
7058e38aeffSJohn Baldwin static struct shmfd *
7068e38aeffSJohn Baldwin shm_lookup(char *path, Fnv32_t fnv)
7078e38aeffSJohn Baldwin {
7088e38aeffSJohn Baldwin 	struct shm_mapping *map;
7098e38aeffSJohn Baldwin 
7108e38aeffSJohn Baldwin 	LIST_FOREACH(map, SHM_HASH(fnv), sm_link) {
7118e38aeffSJohn Baldwin 		if (map->sm_fnv != fnv)
7128e38aeffSJohn Baldwin 			continue;
7138e38aeffSJohn Baldwin 		if (strcmp(map->sm_path, path) == 0)
7148e38aeffSJohn Baldwin 			return (map->sm_shmfd);
7158e38aeffSJohn Baldwin 	}
7168e38aeffSJohn Baldwin 
7178e38aeffSJohn Baldwin 	return (NULL);
7188e38aeffSJohn Baldwin }
7198e38aeffSJohn Baldwin 
7208e38aeffSJohn Baldwin static void
7218e38aeffSJohn Baldwin shm_insert(char *path, Fnv32_t fnv, struct shmfd *shmfd)
7228e38aeffSJohn Baldwin {
7238e38aeffSJohn Baldwin 	struct shm_mapping *map;
7248e38aeffSJohn Baldwin 
7258e38aeffSJohn Baldwin 	map = malloc(sizeof(struct shm_mapping), M_SHMFD, M_WAITOK);
7268e38aeffSJohn Baldwin 	map->sm_path = path;
7278e38aeffSJohn Baldwin 	map->sm_fnv = fnv;
7288e38aeffSJohn Baldwin 	map->sm_shmfd = shm_hold(shmfd);
729e506e182SJohn Baldwin 	shmfd->shm_path = path;
7308e38aeffSJohn Baldwin 	LIST_INSERT_HEAD(SHM_HASH(fnv), map, sm_link);
7318e38aeffSJohn Baldwin }
7328e38aeffSJohn Baldwin 
7338e38aeffSJohn Baldwin static int
7348e38aeffSJohn Baldwin shm_remove(char *path, Fnv32_t fnv, struct ucred *ucred)
7358e38aeffSJohn Baldwin {
7368e38aeffSJohn Baldwin 	struct shm_mapping *map;
7378e38aeffSJohn Baldwin 	int error;
7388e38aeffSJohn Baldwin 
7398e38aeffSJohn Baldwin 	LIST_FOREACH(map, SHM_HASH(fnv), sm_link) {
7408e38aeffSJohn Baldwin 		if (map->sm_fnv != fnv)
7418e38aeffSJohn Baldwin 			continue;
7428e38aeffSJohn Baldwin 		if (strcmp(map->sm_path, path) == 0) {
7438e38aeffSJohn Baldwin #ifdef MAC
7448e38aeffSJohn Baldwin 			error = mac_posixshm_check_unlink(ucred, map->sm_shmfd);
7458e38aeffSJohn Baldwin 			if (error)
7468e38aeffSJohn Baldwin 				return (error);
7478e38aeffSJohn Baldwin #endif
7488e38aeffSJohn Baldwin 			error = shm_access(map->sm_shmfd, ucred,
7498e38aeffSJohn Baldwin 			    FREAD | FWRITE);
7508e38aeffSJohn Baldwin 			if (error)
7518e38aeffSJohn Baldwin 				return (error);
752e506e182SJohn Baldwin 			map->sm_shmfd->shm_path = NULL;
7538e38aeffSJohn Baldwin 			LIST_REMOVE(map, sm_link);
7548e38aeffSJohn Baldwin 			shm_drop(map->sm_shmfd);
7558e38aeffSJohn Baldwin 			free(map->sm_path, M_SHMFD);
7568e38aeffSJohn Baldwin 			free(map, M_SHMFD);
7578e38aeffSJohn Baldwin 			return (0);
7588e38aeffSJohn Baldwin 		}
7598e38aeffSJohn Baldwin 	}
7608e38aeffSJohn Baldwin 
7618e38aeffSJohn Baldwin 	return (ENOENT);
7628e38aeffSJohn Baldwin }
7638e38aeffSJohn Baldwin 
7648e38aeffSJohn Baldwin int
765535b1df9SKyle Evans kern_shm_open2(struct thread *td, const char *userpath, int flags, mode_t mode,
766535b1df9SKyle Evans     int shmflags, struct filecaps *fcaps, const char *name __unused)
7678e38aeffSJohn Baldwin {
7688e38aeffSJohn Baldwin 	struct filedesc *fdp;
7698e38aeffSJohn Baldwin 	struct shmfd *shmfd;
7708e38aeffSJohn Baldwin 	struct file *fp;
7718e38aeffSJohn Baldwin 	char *path;
7720cd95859SKyle Evans 	void *rl_cookie;
7738e38aeffSJohn Baldwin 	Fnv32_t fnv;
7748e38aeffSJohn Baldwin 	mode_t cmode;
775535b1df9SKyle Evans 	int error, fd, initial_seals;
776535b1df9SKyle Evans 
777*3f07b9d9SKyle Evans 	if ((shmflags & ~(SHM_ALLOW_SEALING | SHM_GROW_ON_WRITE)) != 0)
778535b1df9SKyle Evans 		return (EINVAL);
779535b1df9SKyle Evans 
780535b1df9SKyle Evans 	initial_seals = F_SEAL_SEAL;
781535b1df9SKyle Evans 	if ((shmflags & SHM_ALLOW_SEALING) != 0)
782535b1df9SKyle Evans 		initial_seals &= ~F_SEAL_SEAL;
7838e38aeffSJohn Baldwin 
78412bc222eSJonathan Anderson #ifdef CAPABILITY_MODE
78512bc222eSJonathan Anderson 	/*
78612bc222eSJonathan Anderson 	 * shm_open(2) is only allowed for anonymous objects.
78712bc222eSJonathan Anderson 	 */
7887ee1b208SEd Schouten 	if (IN_CAPABILITY_MODE(td) && (userpath != SHM_ANON))
78912bc222eSJonathan Anderson 		return (ECAPMODE);
79012bc222eSJonathan Anderson #endif
79112bc222eSJonathan Anderson 
79215bcf785SRobert Watson 	AUDIT_ARG_FFLAGS(flags);
79315bcf785SRobert Watson 	AUDIT_ARG_MODE(mode);
79415bcf785SRobert Watson 
7957ee1b208SEd Schouten 	if ((flags & O_ACCMODE) != O_RDONLY && (flags & O_ACCMODE) != O_RDWR)
7968e38aeffSJohn Baldwin 		return (EINVAL);
7978e38aeffSJohn Baldwin 
7987ee1b208SEd Schouten 	if ((flags & ~(O_ACCMODE | O_CREAT | O_EXCL | O_TRUNC | O_CLOEXEC)) != 0)
7998e38aeffSJohn Baldwin 		return (EINVAL);
8008e38aeffSJohn Baldwin 
8010cd95859SKyle Evans 	/*
8020cd95859SKyle Evans 	 * Currently only F_SEAL_SEAL may be set when creating or opening shmfd.
8030cd95859SKyle Evans 	 * If the decision is made later to allow additional seals, care must be
8040cd95859SKyle Evans 	 * taken below to ensure that the seals are properly set if the shmfd
8050cd95859SKyle Evans 	 * already existed -- this currently assumes that only F_SEAL_SEAL can
8060cd95859SKyle Evans 	 * be set and doesn't take further precautions to ensure the validity of
8070cd95859SKyle Evans 	 * the seals being added with respect to current mappings.
8080cd95859SKyle Evans 	 */
8090cd95859SKyle Evans 	if ((initial_seals & ~F_SEAL_SEAL) != 0)
8100cd95859SKyle Evans 		return (EINVAL);
8110cd95859SKyle Evans 
8128e38aeffSJohn Baldwin 	fdp = td->td_proc->p_fd;
8137ee1b208SEd Schouten 	cmode = (mode & ~fdp->fd_cmask) & ACCESSPERMS;
8148e38aeffSJohn Baldwin 
815b5a7ac99SKyle Evans 	/*
816b5a7ac99SKyle Evans 	 * shm_open(2) created shm should always have O_CLOEXEC set, as mandated
817b5a7ac99SKyle Evans 	 * by POSIX.  We allow it to be unset here so that an in-kernel
818b5a7ac99SKyle Evans 	 * interface may be written as a thin layer around shm, optionally not
819b5a7ac99SKyle Evans 	 * setting CLOEXEC.  For shm_open(2), O_CLOEXEC is set unconditionally
820b5a7ac99SKyle Evans 	 * in sys_shm_open() to keep this implementation compliant.
821b5a7ac99SKyle Evans 	 */
822b5a7ac99SKyle Evans 	error = falloc_caps(td, &fp, &fd, flags & O_CLOEXEC, fcaps);
8238e38aeffSJohn Baldwin 	if (error)
8248e38aeffSJohn Baldwin 		return (error);
8258e38aeffSJohn Baldwin 
8268e38aeffSJohn Baldwin 	/* A SHM_ANON path pointer creates an anonymous object. */
8277ee1b208SEd Schouten 	if (userpath == SHM_ANON) {
8288e38aeffSJohn Baldwin 		/* A read-only anonymous object is pointless. */
8297ee1b208SEd Schouten 		if ((flags & O_ACCMODE) == O_RDONLY) {
83090f54cbfSMateusz Guzik 			fdclose(td, fp, fd);
8318e38aeffSJohn Baldwin 			fdrop(fp, td);
8328e38aeffSJohn Baldwin 			return (EINVAL);
8338e38aeffSJohn Baldwin 		}
8348e38aeffSJohn Baldwin 		shmfd = shm_alloc(td->td_ucred, cmode);
8350cd95859SKyle Evans 		shmfd->shm_seals = initial_seals;
8368e38aeffSJohn Baldwin 	} else {
8372d5603feSDavid Bright 		error = shm_copyin_path(td, userpath, &path);
8382d5603feSDavid Bright 		if (error != 0) {
83990f54cbfSMateusz Guzik 			fdclose(td, fp, fd);
8408e38aeffSJohn Baldwin 			fdrop(fp, td);
8418e38aeffSJohn Baldwin 			return (error);
8428e38aeffSJohn Baldwin 		}
8438e38aeffSJohn Baldwin 
84415bcf785SRobert Watson 		AUDIT_ARG_UPATH1_CANON(path);
8458e38aeffSJohn Baldwin 		fnv = fnv_32_str(path, FNV1_32_INIT);
8468e38aeffSJohn Baldwin 		sx_xlock(&shm_dict_lock);
8478e38aeffSJohn Baldwin 		shmfd = shm_lookup(path, fnv);
8488e38aeffSJohn Baldwin 		if (shmfd == NULL) {
8498e38aeffSJohn Baldwin 			/* Object does not yet exist, create it if requested. */
8507ee1b208SEd Schouten 			if (flags & O_CREAT) {
8519b6dd12eSRobert Watson #ifdef MAC
8529b6dd12eSRobert Watson 				error = mac_posixshm_check_create(td->td_ucred,
8539b6dd12eSRobert Watson 				    path);
8549b6dd12eSRobert Watson 				if (error == 0) {
8559b6dd12eSRobert Watson #endif
8568e38aeffSJohn Baldwin 					shmfd = shm_alloc(td->td_ucred, cmode);
8570cd95859SKyle Evans 					shmfd->shm_seals = initial_seals;
8588e38aeffSJohn Baldwin 					shm_insert(path, fnv, shmfd);
8599b6dd12eSRobert Watson #ifdef MAC
8609b6dd12eSRobert Watson 				}
8619b6dd12eSRobert Watson #endif
8628e38aeffSJohn Baldwin 			} else {
8638e38aeffSJohn Baldwin 				free(path, M_SHMFD);
8648e38aeffSJohn Baldwin 				error = ENOENT;
8658e38aeffSJohn Baldwin 			}
8668e38aeffSJohn Baldwin 		} else {
8670cd95859SKyle Evans 			rl_cookie = rangelock_wlock(&shmfd->shm_rl, 0, OFF_MAX,
8680cd95859SKyle Evans 			    &shmfd->shm_mtx);
8690cd95859SKyle Evans 
8700cd95859SKyle Evans 			/*
8710cd95859SKyle Evans 			 * kern_shm_open() likely shouldn't ever error out on
8720cd95859SKyle Evans 			 * trying to set a seal that already exists, unlike
8730cd95859SKyle Evans 			 * F_ADD_SEALS.  This would break terribly as
8740cd95859SKyle Evans 			 * shm_open(2) actually sets F_SEAL_SEAL to maintain
8750cd95859SKyle Evans 			 * historical behavior where the underlying file could
8760cd95859SKyle Evans 			 * not be sealed.
8770cd95859SKyle Evans 			 */
8780cd95859SKyle Evans 			initial_seals &= ~shmfd->shm_seals;
8790cd95859SKyle Evans 
8808e38aeffSJohn Baldwin 			/*
8818e38aeffSJohn Baldwin 			 * Object already exists, obtain a new
8828e38aeffSJohn Baldwin 			 * reference if requested and permitted.
8838e38aeffSJohn Baldwin 			 */
8848e38aeffSJohn Baldwin 			free(path, M_SHMFD);
8850cd95859SKyle Evans 
8860cd95859SKyle Evans 			/*
8870cd95859SKyle Evans 			 * initial_seals can't set additional seals if we've
8880cd95859SKyle Evans 			 * already been set F_SEAL_SEAL.  If F_SEAL_SEAL is set,
8890cd95859SKyle Evans 			 * then we've already removed that one from
8900cd95859SKyle Evans 			 * initial_seals.  This is currently redundant as we
8910cd95859SKyle Evans 			 * only allow setting F_SEAL_SEAL at creation time, but
8920cd95859SKyle Evans 			 * it's cheap to check and decreases the effort required
8930cd95859SKyle Evans 			 * to allow additional seals.
8940cd95859SKyle Evans 			 */
8950cd95859SKyle Evans 			if ((shmfd->shm_seals & F_SEAL_SEAL) != 0 &&
8960cd95859SKyle Evans 			    initial_seals != 0)
8970cd95859SKyle Evans 				error = EPERM;
8980cd95859SKyle Evans 			else if ((flags & (O_CREAT | O_EXCL)) ==
8990cd95859SKyle Evans 			    (O_CREAT | O_EXCL))
9008e38aeffSJohn Baldwin 				error = EEXIST;
9018e38aeffSJohn Baldwin 			else {
9028e38aeffSJohn Baldwin #ifdef MAC
9038e38aeffSJohn Baldwin 				error = mac_posixshm_check_open(td->td_ucred,
9047ee1b208SEd Schouten 				    shmfd, FFLAGS(flags & O_ACCMODE));
9058e38aeffSJohn Baldwin 				if (error == 0)
9068e38aeffSJohn Baldwin #endif
9078e38aeffSJohn Baldwin 				error = shm_access(shmfd, td->td_ucred,
9087ee1b208SEd Schouten 				    FFLAGS(flags & O_ACCMODE));
9098e38aeffSJohn Baldwin 			}
9108e38aeffSJohn Baldwin 
9118e38aeffSJohn Baldwin 			/*
9128e38aeffSJohn Baldwin 			 * Truncate the file back to zero length if
9138e38aeffSJohn Baldwin 			 * O_TRUNC was specified and the object was
9148e38aeffSJohn Baldwin 			 * opened with read/write.
9158e38aeffSJohn Baldwin 			 */
9168e38aeffSJohn Baldwin 			if (error == 0 &&
9177ee1b208SEd Schouten 			    (flags & (O_ACCMODE | O_TRUNC)) ==
9188e38aeffSJohn Baldwin 			    (O_RDWR | O_TRUNC)) {
9190cd95859SKyle Evans 				VM_OBJECT_WLOCK(shmfd->shm_object);
9208e38aeffSJohn Baldwin #ifdef MAC
9218e38aeffSJohn Baldwin 				error = mac_posixshm_check_truncate(
9228e38aeffSJohn Baldwin 					td->td_ucred, fp->f_cred, shmfd);
9238e38aeffSJohn Baldwin 				if (error == 0)
9248e38aeffSJohn Baldwin #endif
9250cd95859SKyle Evans 					error = shm_dotruncate_locked(shmfd, 0,
9260cd95859SKyle Evans 					    rl_cookie);
9270cd95859SKyle Evans 				VM_OBJECT_WUNLOCK(shmfd->shm_object);
9288e38aeffSJohn Baldwin 			}
9290cd95859SKyle Evans 			if (error == 0) {
9300cd95859SKyle Evans 				/*
9310cd95859SKyle Evans 				 * Currently we only allow F_SEAL_SEAL to be
9320cd95859SKyle Evans 				 * set initially.  As noted above, this would
9330cd95859SKyle Evans 				 * need to be reworked should that change.
9340cd95859SKyle Evans 				 */
9350cd95859SKyle Evans 				shmfd->shm_seals |= initial_seals;
9368e38aeffSJohn Baldwin 				shm_hold(shmfd);
9378e38aeffSJohn Baldwin 			}
9380cd95859SKyle Evans 			rangelock_unlock(&shmfd->shm_rl, rl_cookie,
9390cd95859SKyle Evans 			    &shmfd->shm_mtx);
9400cd95859SKyle Evans 		}
9418e38aeffSJohn Baldwin 		sx_xunlock(&shm_dict_lock);
9428e38aeffSJohn Baldwin 
9438e38aeffSJohn Baldwin 		if (error) {
94490f54cbfSMateusz Guzik 			fdclose(td, fp, fd);
9458e38aeffSJohn Baldwin 			fdrop(fp, td);
9468e38aeffSJohn Baldwin 			return (error);
9478e38aeffSJohn Baldwin 		}
9488e38aeffSJohn Baldwin 	}
9498e38aeffSJohn Baldwin 
950*3f07b9d9SKyle Evans 	shmfd->shm_flags = shmflags;
9517ee1b208SEd Schouten 	finit(fp, FFLAGS(flags & O_ACCMODE), DTYPE_SHM, shmfd, &shm_ops);
9528e38aeffSJohn Baldwin 
9538e38aeffSJohn Baldwin 	td->td_retval[0] = fd;
9548e38aeffSJohn Baldwin 	fdrop(fp, td);
9558e38aeffSJohn Baldwin 
9568e38aeffSJohn Baldwin 	return (0);
9578e38aeffSJohn Baldwin }
9588e38aeffSJohn Baldwin 
9597ee1b208SEd Schouten /* System calls. */
960a9ac5e14SKyle Evans #ifdef COMPAT_FREEBSD12
9617ee1b208SEd Schouten int
962a9ac5e14SKyle Evans freebsd12_shm_open(struct thread *td, struct freebsd12_shm_open_args *uap)
9637ee1b208SEd Schouten {
9647ee1b208SEd Schouten 
965535b1df9SKyle Evans 	return (kern_shm_open(td, uap->path, uap->flags | O_CLOEXEC,
966535b1df9SKyle Evans 	    uap->mode, NULL));
9677ee1b208SEd Schouten }
968a9ac5e14SKyle Evans #endif
9697ee1b208SEd Schouten 
9708e38aeffSJohn Baldwin int
9718451d0ddSKip Macy sys_shm_unlink(struct thread *td, struct shm_unlink_args *uap)
9728e38aeffSJohn Baldwin {
9738e38aeffSJohn Baldwin 	char *path;
9748e38aeffSJohn Baldwin 	Fnv32_t fnv;
9758e38aeffSJohn Baldwin 	int error;
9768e38aeffSJohn Baldwin 
9772d5603feSDavid Bright 	error = shm_copyin_path(td, uap->path, &path);
9782d5603feSDavid Bright 	if (error != 0)
9798e38aeffSJohn Baldwin 		return (error);
9802d5603feSDavid Bright 
98115bcf785SRobert Watson 	AUDIT_ARG_UPATH1_CANON(path);
9828e38aeffSJohn Baldwin 	fnv = fnv_32_str(path, FNV1_32_INIT);
9838e38aeffSJohn Baldwin 	sx_xlock(&shm_dict_lock);
9848e38aeffSJohn Baldwin 	error = shm_remove(path, fnv, td->td_ucred);
9858e38aeffSJohn Baldwin 	sx_xunlock(&shm_dict_lock);
9864cf919edSMark Johnston 	free(path, M_SHMFD);
9878e38aeffSJohn Baldwin 
9888e38aeffSJohn Baldwin 	return (error);
9898e38aeffSJohn Baldwin }
9908e38aeffSJohn Baldwin 
9918e38aeffSJohn Baldwin int
9929afb12baSDavid Bright sys_shm_rename(struct thread *td, struct shm_rename_args *uap)
9939afb12baSDavid Bright {
9949afb12baSDavid Bright 	char *path_from = NULL, *path_to = NULL;
9959afb12baSDavid Bright 	Fnv32_t fnv_from, fnv_to;
9969afb12baSDavid Bright 	struct shmfd *fd_from;
9979afb12baSDavid Bright 	struct shmfd *fd_to;
9989afb12baSDavid Bright 	int error;
9999afb12baSDavid Bright 	int flags;
10009afb12baSDavid Bright 
10019afb12baSDavid Bright 	flags = uap->flags;
10022d5603feSDavid Bright 	AUDIT_ARG_FFLAGS(flags);
10039afb12baSDavid Bright 
10049afb12baSDavid Bright 	/*
10059afb12baSDavid Bright 	 * Make sure the user passed only valid flags.
10069afb12baSDavid Bright 	 * If you add a new flag, please add a new term here.
10079afb12baSDavid Bright 	 */
10089afb12baSDavid Bright 	if ((flags & ~(
10099afb12baSDavid Bright 	    SHM_RENAME_NOREPLACE |
10109afb12baSDavid Bright 	    SHM_RENAME_EXCHANGE
10119afb12baSDavid Bright 	    )) != 0) {
10129afb12baSDavid Bright 		error = EINVAL;
10139afb12baSDavid Bright 		goto out;
10149afb12baSDavid Bright 	}
10159afb12baSDavid Bright 
10169afb12baSDavid Bright 	/*
10179afb12baSDavid Bright 	 * EXCHANGE and NOREPLACE don't quite make sense together. Let's
10189afb12baSDavid Bright 	 * force the user to choose one or the other.
10199afb12baSDavid Bright 	 */
10209afb12baSDavid Bright 	if ((flags & SHM_RENAME_NOREPLACE) != 0 &&
10219afb12baSDavid Bright 	    (flags & SHM_RENAME_EXCHANGE) != 0) {
10229afb12baSDavid Bright 		error = EINVAL;
10239afb12baSDavid Bright 		goto out;
10249afb12baSDavid Bright 	}
10259afb12baSDavid Bright 
10262d5603feSDavid Bright 	/* Renaming to or from anonymous makes no sense */
10272d5603feSDavid Bright 	if (uap->path_from == SHM_ANON || uap->path_to == SHM_ANON) {
10282d5603feSDavid Bright 		error = EINVAL;
10292d5603feSDavid Bright 		goto out;
10302d5603feSDavid Bright 	}
10312d5603feSDavid Bright 
10322d5603feSDavid Bright 	error = shm_copyin_path(td, uap->path_from, &path_from);
10332d5603feSDavid Bright 	if (error != 0)
10349afb12baSDavid Bright 		goto out;
10359afb12baSDavid Bright 
10362d5603feSDavid Bright 	error = shm_copyin_path(td, uap->path_to, &path_to);
10372d5603feSDavid Bright 	if (error != 0)
10389afb12baSDavid Bright 		goto out;
10399afb12baSDavid Bright 
10402d5603feSDavid Bright 	AUDIT_ARG_UPATH1_CANON(path_from);
10412d5603feSDavid Bright 	AUDIT_ARG_UPATH2_CANON(path_to);
10422d5603feSDavid Bright 
10439afb12baSDavid Bright 	/* Rename with from/to equal is a no-op */
10442d5603feSDavid Bright 	if (strcmp(path_from, path_to) == 0)
10459afb12baSDavid Bright 		goto out;
10469afb12baSDavid Bright 
10479afb12baSDavid Bright 	fnv_from = fnv_32_str(path_from, FNV1_32_INIT);
10489afb12baSDavid Bright 	fnv_to = fnv_32_str(path_to, FNV1_32_INIT);
10499afb12baSDavid Bright 
10509afb12baSDavid Bright 	sx_xlock(&shm_dict_lock);
10519afb12baSDavid Bright 
10529afb12baSDavid Bright 	fd_from = shm_lookup(path_from, fnv_from);
10539afb12baSDavid Bright 	if (fd_from == NULL) {
10549afb12baSDavid Bright 		error = ENOENT;
10552d5603feSDavid Bright 		goto out_locked;
10569afb12baSDavid Bright 	}
10579afb12baSDavid Bright 
10589afb12baSDavid Bright 	fd_to = shm_lookup(path_to, fnv_to);
10599afb12baSDavid Bright 	if ((flags & SHM_RENAME_NOREPLACE) != 0 && fd_to != NULL) {
10609afb12baSDavid Bright 		error = EEXIST;
10612d5603feSDavid Bright 		goto out_locked;
10629afb12baSDavid Bright 	}
10639afb12baSDavid Bright 
10649afb12baSDavid Bright 	/*
10659afb12baSDavid Bright 	 * Unconditionally prevents shm_remove from invalidating the 'from'
10669afb12baSDavid Bright 	 * shm's state.
10679afb12baSDavid Bright 	 */
10689afb12baSDavid Bright 	shm_hold(fd_from);
10699afb12baSDavid Bright 	error = shm_remove(path_from, fnv_from, td->td_ucred);
10709afb12baSDavid Bright 
10719afb12baSDavid Bright 	/*
10729afb12baSDavid Bright 	 * One of my assumptions failed if ENOENT (e.g. locking didn't
10739afb12baSDavid Bright 	 * protect us)
10749afb12baSDavid Bright 	 */
10759afb12baSDavid Bright 	KASSERT(error != ENOENT, ("Our shm disappeared during shm_rename: %s",
10769afb12baSDavid Bright 	    path_from));
10772d5603feSDavid Bright 	if (error != 0) {
10789afb12baSDavid Bright 		shm_drop(fd_from);
10792d5603feSDavid Bright 		goto out_locked;
10809afb12baSDavid Bright 	}
10819afb12baSDavid Bright 
10829afb12baSDavid Bright 	/*
10839afb12baSDavid Bright 	 * If we are exchanging, we need to ensure the shm_remove below
10849afb12baSDavid Bright 	 * doesn't invalidate the dest shm's state.
10859afb12baSDavid Bright 	 */
10869afb12baSDavid Bright 	if ((flags & SHM_RENAME_EXCHANGE) != 0 && fd_to != NULL)
10879afb12baSDavid Bright 		shm_hold(fd_to);
10889afb12baSDavid Bright 
10899afb12baSDavid Bright 	/*
10909afb12baSDavid Bright 	 * NOTE: if path_to is not already in the hash, c'est la vie;
10919afb12baSDavid Bright 	 * it simply means we have nothing already at path_to to unlink.
10929afb12baSDavid Bright 	 * That is the ENOENT case.
10939afb12baSDavid Bright 	 *
10949afb12baSDavid Bright 	 * If we somehow don't have access to unlink this guy, but
10959afb12baSDavid Bright 	 * did for the shm at path_from, then relink the shm to path_from
10969afb12baSDavid Bright 	 * and abort with EACCES.
10979afb12baSDavid Bright 	 *
10989afb12baSDavid Bright 	 * All other errors: that is weird; let's relink and abort the
10999afb12baSDavid Bright 	 * operation.
11009afb12baSDavid Bright 	 */
11019afb12baSDavid Bright 	error = shm_remove(path_to, fnv_to, td->td_ucred);
11022d5603feSDavid Bright 	if (error != 0 && error != ENOENT) {
11039afb12baSDavid Bright 		shm_insert(path_from, fnv_from, fd_from);
11049afb12baSDavid Bright 		shm_drop(fd_from);
11059afb12baSDavid Bright 		/* Don't free path_from now, since the hash references it */
11069afb12baSDavid Bright 		path_from = NULL;
11072d5603feSDavid Bright 		goto out_locked;
11089afb12baSDavid Bright 	}
11099afb12baSDavid Bright 
11102d5603feSDavid Bright 	error = 0;
11112d5603feSDavid Bright 
11129afb12baSDavid Bright 	shm_insert(path_to, fnv_to, fd_from);
11139afb12baSDavid Bright 
11149afb12baSDavid Bright 	/* Don't free path_to now, since the hash references it */
11159afb12baSDavid Bright 	path_to = NULL;
11169afb12baSDavid Bright 
11179afb12baSDavid Bright 	/* We kept a ref when we removed, and incremented again in insert */
11189afb12baSDavid Bright 	shm_drop(fd_from);
11199afb12baSDavid Bright 	KASSERT(fd_from->shm_refs > 0, ("Expected >0 refs; got: %d\n",
11209afb12baSDavid Bright 	    fd_from->shm_refs));
11219afb12baSDavid Bright 
11229afb12baSDavid Bright 	if ((flags & SHM_RENAME_EXCHANGE) != 0 && fd_to != NULL) {
11239afb12baSDavid Bright 		shm_insert(path_from, fnv_from, fd_to);
11249afb12baSDavid Bright 		path_from = NULL;
11259afb12baSDavid Bright 		shm_drop(fd_to);
11269afb12baSDavid Bright 		KASSERT(fd_to->shm_refs > 0, ("Expected >0 refs; got: %d\n",
11279afb12baSDavid Bright 		    fd_to->shm_refs));
11289afb12baSDavid Bright 	}
11299afb12baSDavid Bright 
11302d5603feSDavid Bright out_locked:
11319afb12baSDavid Bright 	sx_xunlock(&shm_dict_lock);
11329afb12baSDavid Bright 
11339afb12baSDavid Bright out:
11349afb12baSDavid Bright 	free(path_from, M_SHMFD);
11359afb12baSDavid Bright 	free(path_to, M_SHMFD);
11369afb12baSDavid Bright 	return (error);
11379afb12baSDavid Bright }
11389afb12baSDavid Bright 
11399afb12baSDavid Bright int
11407077c426SJohn Baldwin shm_mmap(struct file *fp, vm_map_t map, vm_offset_t *addr, vm_size_t objsize,
11417077c426SJohn Baldwin     vm_prot_t prot, vm_prot_t cap_maxprot, int flags,
11427077c426SJohn Baldwin     vm_ooffset_t foff, struct thread *td)
11438e38aeffSJohn Baldwin {
11447077c426SJohn Baldwin 	struct shmfd *shmfd;
11457077c426SJohn Baldwin 	vm_prot_t maxprot;
11467077c426SJohn Baldwin 	int error;
1147dca52ab4SKyle Evans 	bool writecnt;
1148af755d3eSKyle Evans 	void *rl_cookie;
11497077c426SJohn Baldwin 
11507077c426SJohn Baldwin 	shmfd = fp->f_data;
11517077c426SJohn Baldwin 	maxprot = VM_PROT_NONE;
11527077c426SJohn Baldwin 
1153af755d3eSKyle Evans 	rl_cookie = rangelock_rlock(&shmfd->shm_rl, 0, objsize,
1154af755d3eSKyle Evans 	    &shmfd->shm_mtx);
11557077c426SJohn Baldwin 	/* FREAD should always be set. */
11567077c426SJohn Baldwin 	if ((fp->f_flag & FREAD) != 0)
11577077c426SJohn Baldwin 		maxprot |= VM_PROT_EXECUTE | VM_PROT_READ;
115858366f05SKyle Evans 
115958366f05SKyle Evans 	/*
116058366f05SKyle Evans 	 * If FWRITE's set, we can allow VM_PROT_WRITE unless it's a shared
1161c7841c6bSMark Johnston 	 * mapping with a write seal applied.  Private mappings are always
1162c7841c6bSMark Johnston 	 * writeable.
116358366f05SKyle Evans 	 */
1164c7841c6bSMark Johnston 	if ((flags & MAP_SHARED) == 0) {
1165c7841c6bSMark Johnston 		cap_maxprot |= VM_PROT_WRITE;
11667077c426SJohn Baldwin 		maxprot |= VM_PROT_WRITE;
1167c7841c6bSMark Johnston 		writecnt = false;
1168c7841c6bSMark Johnston 	} else {
1169c7841c6bSMark Johnston 		if ((fp->f_flag & FWRITE) != 0 &&
1170c7841c6bSMark Johnston 		    (shmfd->shm_seals & F_SEAL_WRITE) == 0)
1171c7841c6bSMark Johnston 			maxprot |= VM_PROT_WRITE;
1172af755d3eSKyle Evans 
117351a16c84SKyle Evans 		/*
117451a16c84SKyle Evans 		 * Any mappings from a writable descriptor may be upgraded to
117551a16c84SKyle Evans 		 * VM_PROT_WRITE with mprotect(2), unless a write-seal was
117651a16c84SKyle Evans 		 * applied between the open and subsequent mmap(2).  We want to
117751a16c84SKyle Evans 		 * reject application of a write seal as long as any such
117851a16c84SKyle Evans 		 * mapping exists so that the seal cannot be trivially bypassed.
117951a16c84SKyle Evans 		 */
118051a16c84SKyle Evans 		writecnt = (maxprot & VM_PROT_WRITE) != 0;
118151a16c84SKyle Evans 		if (!writecnt && (prot & VM_PROT_WRITE) != 0) {
1182af755d3eSKyle Evans 			error = EACCES;
1183af755d3eSKyle Evans 			goto out;
1184af755d3eSKyle Evans 		}
1185c7841c6bSMark Johnston 	}
11867077c426SJohn Baldwin 	maxprot &= cap_maxprot;
11877077c426SJohn Baldwin 
1188987ff181SKonstantin Belousov 	/* See comment in vn_mmap(). */
1189987ff181SKonstantin Belousov 	if (
1190987ff181SKonstantin Belousov #ifdef _LP64
1191987ff181SKonstantin Belousov 	    objsize > OFF_MAX ||
1192987ff181SKonstantin Belousov #endif
1193af755d3eSKyle Evans 	    foff < 0 || foff > OFF_MAX - objsize) {
1194af755d3eSKyle Evans 		error = EINVAL;
1195af755d3eSKyle Evans 		goto out;
1196af755d3eSKyle Evans 	}
1197987ff181SKonstantin Belousov 
11987077c426SJohn Baldwin #ifdef MAC
11997077c426SJohn Baldwin 	error = mac_posixshm_check_mmap(td->td_ucred, shmfd, prot, flags);
12007077c426SJohn Baldwin 	if (error != 0)
1201af755d3eSKyle Evans 		goto out;
12027077c426SJohn Baldwin #endif
12038e38aeffSJohn Baldwin 
12048e38aeffSJohn Baldwin 	mtx_lock(&shm_timestamp_lock);
12058e38aeffSJohn Baldwin 	vfs_timestamp(&shmfd->shm_atime);
12068e38aeffSJohn Baldwin 	mtx_unlock(&shm_timestamp_lock);
12078e38aeffSJohn Baldwin 	vm_object_reference(shmfd->shm_object);
12087077c426SJohn Baldwin 
1209dca52ab4SKyle Evans 	if (writecnt)
1210dca52ab4SKyle Evans 		vm_pager_update_writecount(shmfd->shm_object, 0, objsize);
12117077c426SJohn Baldwin 	error = vm_mmap_object(map, addr, objsize, prot, maxprot, flags,
1212dca52ab4SKyle Evans 	    shmfd->shm_object, foff, writecnt, td);
1213dca52ab4SKyle Evans 	if (error != 0) {
1214dca52ab4SKyle Evans 		if (writecnt)
1215dca52ab4SKyle Evans 			vm_pager_release_writecount(shmfd->shm_object, 0,
1216dca52ab4SKyle Evans 			    objsize);
12177077c426SJohn Baldwin 		vm_object_deallocate(shmfd->shm_object);
1218dca52ab4SKyle Evans 	}
1219af755d3eSKyle Evans out:
1220af755d3eSKyle Evans 	rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
122134d3e89fSKonstantin Belousov 	return (error);
12228e38aeffSJohn Baldwin }
12239c00bb91SKonstantin Belousov 
12249c00bb91SKonstantin Belousov static int
12259c00bb91SKonstantin Belousov shm_chmod(struct file *fp, mode_t mode, struct ucred *active_cred,
12269c00bb91SKonstantin Belousov     struct thread *td)
12279c00bb91SKonstantin Belousov {
12289c00bb91SKonstantin Belousov 	struct shmfd *shmfd;
12299c00bb91SKonstantin Belousov 	int error;
12309c00bb91SKonstantin Belousov 
12319c00bb91SKonstantin Belousov 	error = 0;
12329c00bb91SKonstantin Belousov 	shmfd = fp->f_data;
12339c00bb91SKonstantin Belousov 	mtx_lock(&shm_timestamp_lock);
12349c00bb91SKonstantin Belousov 	/*
12359c00bb91SKonstantin Belousov 	 * SUSv4 says that x bits of permission need not be affected.
12369c00bb91SKonstantin Belousov 	 * Be consistent with our shm_open there.
12379c00bb91SKonstantin Belousov 	 */
12389c00bb91SKonstantin Belousov #ifdef MAC
12399c00bb91SKonstantin Belousov 	error = mac_posixshm_check_setmode(active_cred, shmfd, mode);
12409c00bb91SKonstantin Belousov 	if (error != 0)
12419c00bb91SKonstantin Belousov 		goto out;
12429c00bb91SKonstantin Belousov #endif
12439c00bb91SKonstantin Belousov 	error = vaccess(VREG, shmfd->shm_mode, shmfd->shm_uid,
12449c00bb91SKonstantin Belousov 	    shmfd->shm_gid, VADMIN, active_cred, NULL);
12459c00bb91SKonstantin Belousov 	if (error != 0)
12469c00bb91SKonstantin Belousov 		goto out;
12479c00bb91SKonstantin Belousov 	shmfd->shm_mode = mode & ACCESSPERMS;
12489c00bb91SKonstantin Belousov out:
12499c00bb91SKonstantin Belousov 	mtx_unlock(&shm_timestamp_lock);
12509c00bb91SKonstantin Belousov 	return (error);
12519c00bb91SKonstantin Belousov }
12529c00bb91SKonstantin Belousov 
12539c00bb91SKonstantin Belousov static int
12549c00bb91SKonstantin Belousov shm_chown(struct file *fp, uid_t uid, gid_t gid, struct ucred *active_cred,
12559c00bb91SKonstantin Belousov     struct thread *td)
12569c00bb91SKonstantin Belousov {
12579c00bb91SKonstantin Belousov 	struct shmfd *shmfd;
12589c00bb91SKonstantin Belousov 	int error;
12599c00bb91SKonstantin Belousov 
126068889ed6SKonstantin Belousov 	error = 0;
12619c00bb91SKonstantin Belousov 	shmfd = fp->f_data;
12629c00bb91SKonstantin Belousov 	mtx_lock(&shm_timestamp_lock);
12639c00bb91SKonstantin Belousov #ifdef MAC
12649c00bb91SKonstantin Belousov 	error = mac_posixshm_check_setowner(active_cred, shmfd, uid, gid);
12659c00bb91SKonstantin Belousov 	if (error != 0)
12669c00bb91SKonstantin Belousov 		goto out;
12679c00bb91SKonstantin Belousov #endif
12689c00bb91SKonstantin Belousov 	if (uid == (uid_t)-1)
12699c00bb91SKonstantin Belousov 		uid = shmfd->shm_uid;
12709c00bb91SKonstantin Belousov 	if (gid == (gid_t)-1)
12719c00bb91SKonstantin Belousov                  gid = shmfd->shm_gid;
12729c00bb91SKonstantin Belousov 	if (((uid != shmfd->shm_uid && uid != active_cred->cr_uid) ||
12739c00bb91SKonstantin Belousov 	    (gid != shmfd->shm_gid && !groupmember(gid, active_cred))) &&
1274cc426dd3SMateusz Guzik 	    (error = priv_check_cred(active_cred, PRIV_VFS_CHOWN)))
12759c00bb91SKonstantin Belousov 		goto out;
12769c00bb91SKonstantin Belousov 	shmfd->shm_uid = uid;
12779c00bb91SKonstantin Belousov 	shmfd->shm_gid = gid;
12789c00bb91SKonstantin Belousov out:
12799c00bb91SKonstantin Belousov 	mtx_unlock(&shm_timestamp_lock);
12809c00bb91SKonstantin Belousov 	return (error);
12819c00bb91SKonstantin Belousov }
1282fb680e16SJohn Baldwin 
1283fb680e16SJohn Baldwin /*
1284fb680e16SJohn Baldwin  * Helper routines to allow the backing object of a shared memory file
1285fb680e16SJohn Baldwin  * descriptor to be mapped in the kernel.
1286fb680e16SJohn Baldwin  */
1287fb680e16SJohn Baldwin int
1288fb680e16SJohn Baldwin shm_map(struct file *fp, size_t size, off_t offset, void **memp)
1289fb680e16SJohn Baldwin {
1290fb680e16SJohn Baldwin 	struct shmfd *shmfd;
1291fb680e16SJohn Baldwin 	vm_offset_t kva, ofs;
1292fb680e16SJohn Baldwin 	vm_object_t obj;
1293fb680e16SJohn Baldwin 	int rv;
1294fb680e16SJohn Baldwin 
1295fb680e16SJohn Baldwin 	if (fp->f_type != DTYPE_SHM)
1296fb680e16SJohn Baldwin 		return (EINVAL);
1297fb680e16SJohn Baldwin 	shmfd = fp->f_data;
1298fb680e16SJohn Baldwin 	obj = shmfd->shm_object;
129989f6b863SAttilio Rao 	VM_OBJECT_WLOCK(obj);
1300fb680e16SJohn Baldwin 	/*
1301fb680e16SJohn Baldwin 	 * XXXRW: This validation is probably insufficient, and subject to
1302fb680e16SJohn Baldwin 	 * sign errors.  It should be fixed.
1303fb680e16SJohn Baldwin 	 */
1304fb680e16SJohn Baldwin 	if (offset >= shmfd->shm_size ||
1305fb680e16SJohn Baldwin 	    offset + size > round_page(shmfd->shm_size)) {
130689f6b863SAttilio Rao 		VM_OBJECT_WUNLOCK(obj);
1307fb680e16SJohn Baldwin 		return (EINVAL);
1308fb680e16SJohn Baldwin 	}
1309fb680e16SJohn Baldwin 
1310fb680e16SJohn Baldwin 	shmfd->shm_kmappings++;
1311fb680e16SJohn Baldwin 	vm_object_reference_locked(obj);
131289f6b863SAttilio Rao 	VM_OBJECT_WUNLOCK(obj);
1313fb680e16SJohn Baldwin 
1314fb680e16SJohn Baldwin 	/* Map the object into the kernel_map and wire it. */
1315fb680e16SJohn Baldwin 	kva = vm_map_min(kernel_map);
1316fb680e16SJohn Baldwin 	ofs = offset & PAGE_MASK;
1317fb680e16SJohn Baldwin 	offset = trunc_page(offset);
1318fb680e16SJohn Baldwin 	size = round_page(size + ofs);
1319edb572a3SJohn Baldwin 	rv = vm_map_find(kernel_map, obj, offset, &kva, size, 0,
13205e3a17c0SJohn Baldwin 	    VMFS_OPTIMAL_SPACE, VM_PROT_READ | VM_PROT_WRITE,
1321fb680e16SJohn Baldwin 	    VM_PROT_READ | VM_PROT_WRITE, 0);
1322fb680e16SJohn Baldwin 	if (rv == KERN_SUCCESS) {
1323fb680e16SJohn Baldwin 		rv = vm_map_wire(kernel_map, kva, kva + size,
1324fb680e16SJohn Baldwin 		    VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
1325fb680e16SJohn Baldwin 		if (rv == KERN_SUCCESS) {
1326fb680e16SJohn Baldwin 			*memp = (void *)(kva + ofs);
1327fb680e16SJohn Baldwin 			return (0);
1328fb680e16SJohn Baldwin 		}
1329fb680e16SJohn Baldwin 		vm_map_remove(kernel_map, kva, kva + size);
1330fb680e16SJohn Baldwin 	} else
1331fb680e16SJohn Baldwin 		vm_object_deallocate(obj);
1332fb680e16SJohn Baldwin 
1333fb680e16SJohn Baldwin 	/* On failure, drop our mapping reference. */
133489f6b863SAttilio Rao 	VM_OBJECT_WLOCK(obj);
1335fb680e16SJohn Baldwin 	shmfd->shm_kmappings--;
133689f6b863SAttilio Rao 	VM_OBJECT_WUNLOCK(obj);
1337fb680e16SJohn Baldwin 
1338338e7cf2SJohn Baldwin 	return (vm_mmap_to_errno(rv));
1339fb680e16SJohn Baldwin }
1340fb680e16SJohn Baldwin 
1341fb680e16SJohn Baldwin /*
1342fb680e16SJohn Baldwin  * We require the caller to unmap the entire entry.  This allows us to
1343fb680e16SJohn Baldwin  * safely decrement shm_kmappings when a mapping is removed.
1344fb680e16SJohn Baldwin  */
1345fb680e16SJohn Baldwin int
1346fb680e16SJohn Baldwin shm_unmap(struct file *fp, void *mem, size_t size)
1347fb680e16SJohn Baldwin {
1348fb680e16SJohn Baldwin 	struct shmfd *shmfd;
1349fb680e16SJohn Baldwin 	vm_map_entry_t entry;
1350fb680e16SJohn Baldwin 	vm_offset_t kva, ofs;
1351fb680e16SJohn Baldwin 	vm_object_t obj;
1352fb680e16SJohn Baldwin 	vm_pindex_t pindex;
1353fb680e16SJohn Baldwin 	vm_prot_t prot;
1354fb680e16SJohn Baldwin 	boolean_t wired;
1355fb680e16SJohn Baldwin 	vm_map_t map;
1356fb680e16SJohn Baldwin 	int rv;
1357fb680e16SJohn Baldwin 
1358fb680e16SJohn Baldwin 	if (fp->f_type != DTYPE_SHM)
1359fb680e16SJohn Baldwin 		return (EINVAL);
1360fb680e16SJohn Baldwin 	shmfd = fp->f_data;
1361fb680e16SJohn Baldwin 	kva = (vm_offset_t)mem;
1362fb680e16SJohn Baldwin 	ofs = kva & PAGE_MASK;
1363fb680e16SJohn Baldwin 	kva = trunc_page(kva);
1364fb680e16SJohn Baldwin 	size = round_page(size + ofs);
1365fb680e16SJohn Baldwin 	map = kernel_map;
1366fb680e16SJohn Baldwin 	rv = vm_map_lookup(&map, kva, VM_PROT_READ | VM_PROT_WRITE, &entry,
1367fb680e16SJohn Baldwin 	    &obj, &pindex, &prot, &wired);
1368fb680e16SJohn Baldwin 	if (rv != KERN_SUCCESS)
1369fb680e16SJohn Baldwin 		return (EINVAL);
1370fb680e16SJohn Baldwin 	if (entry->start != kva || entry->end != kva + size) {
1371fb680e16SJohn Baldwin 		vm_map_lookup_done(map, entry);
1372fb680e16SJohn Baldwin 		return (EINVAL);
1373fb680e16SJohn Baldwin 	}
1374fb680e16SJohn Baldwin 	vm_map_lookup_done(map, entry);
1375fb680e16SJohn Baldwin 	if (obj != shmfd->shm_object)
1376fb680e16SJohn Baldwin 		return (EINVAL);
1377fb680e16SJohn Baldwin 	vm_map_remove(map, kva, kva + size);
137889f6b863SAttilio Rao 	VM_OBJECT_WLOCK(obj);
1379fb680e16SJohn Baldwin 	KASSERT(shmfd->shm_kmappings > 0, ("shm_unmap: object not mapped"));
1380fb680e16SJohn Baldwin 	shmfd->shm_kmappings--;
138189f6b863SAttilio Rao 	VM_OBJECT_WUNLOCK(obj);
1382fb680e16SJohn Baldwin 	return (0);
1383fb680e16SJohn Baldwin }
1384e506e182SJohn Baldwin 
13859696feebSJohn Baldwin static int
138656d0e33eSKonstantin Belousov shm_fill_kinfo_locked(struct shmfd *shmfd, struct kinfo_file *kif, bool list)
1387e506e182SJohn Baldwin {
1388cc7b259aSJamie Gritton 	const char *path, *pr_path;
1389cc7b259aSJamie Gritton 	size_t pr_pathlen;
139056d0e33eSKonstantin Belousov 	bool visible;
1391e506e182SJohn Baldwin 
139256d0e33eSKonstantin Belousov 	sx_assert(&shm_dict_lock, SA_LOCKED);
13939696feebSJohn Baldwin 	kif->kf_type = KF_TYPE_SHM;
139456d0e33eSKonstantin Belousov 	kif->kf_un.kf_file.kf_file_mode = S_IFREG | shmfd->shm_mode;
13959696feebSJohn Baldwin 	kif->kf_un.kf_file.kf_file_size = shmfd->shm_size;
13969696feebSJohn Baldwin 	if (shmfd->shm_path != NULL) {
139744c16975SJamie Gritton 		if (shmfd->shm_path != NULL) {
1398cc7b259aSJamie Gritton 			path = shmfd->shm_path;
1399cc7b259aSJamie Gritton 			pr_path = curthread->td_ucred->cr_prison->pr_path;
140044c16975SJamie Gritton 			if (strcmp(pr_path, "/") != 0) {
140144c16975SJamie Gritton 				/* Return the jail-rooted pathname. */
1402cc7b259aSJamie Gritton 				pr_pathlen = strlen(pr_path);
140356d0e33eSKonstantin Belousov 				visible = strncmp(path, pr_path, pr_pathlen)
140456d0e33eSKonstantin Belousov 				    == 0 && path[pr_pathlen] == '/';
140556d0e33eSKonstantin Belousov 				if (list && !visible)
140656d0e33eSKonstantin Belousov 					return (EPERM);
140756d0e33eSKonstantin Belousov 				if (visible)
1408cc7b259aSJamie Gritton 					path += pr_pathlen;
1409cc7b259aSJamie Gritton 			}
1410cc7b259aSJamie Gritton 			strlcpy(kif->kf_path, path, sizeof(kif->kf_path));
1411cc7b259aSJamie Gritton 		}
1412e506e182SJohn Baldwin 	}
14139696feebSJohn Baldwin 	return (0);
14149696feebSJohn Baldwin }
141556d0e33eSKonstantin Belousov 
141656d0e33eSKonstantin Belousov static int
141756d0e33eSKonstantin Belousov shm_fill_kinfo(struct file *fp, struct kinfo_file *kif,
141856d0e33eSKonstantin Belousov     struct filedesc *fdp __unused)
141956d0e33eSKonstantin Belousov {
142056d0e33eSKonstantin Belousov 	int res;
142156d0e33eSKonstantin Belousov 
142256d0e33eSKonstantin Belousov 	sx_slock(&shm_dict_lock);
142356d0e33eSKonstantin Belousov 	res = shm_fill_kinfo_locked(fp->f_data, kif, false);
142456d0e33eSKonstantin Belousov 	sx_sunlock(&shm_dict_lock);
142556d0e33eSKonstantin Belousov 	return (res);
142656d0e33eSKonstantin Belousov }
142756d0e33eSKonstantin Belousov 
142856d0e33eSKonstantin Belousov static int
1429af755d3eSKyle Evans shm_add_seals(struct file *fp, int seals)
1430af755d3eSKyle Evans {
1431af755d3eSKyle Evans 	struct shmfd *shmfd;
1432af755d3eSKyle Evans 	void *rl_cookie;
1433af755d3eSKyle Evans 	vm_ooffset_t writemappings;
1434af755d3eSKyle Evans 	int error, nseals;
1435af755d3eSKyle Evans 
1436af755d3eSKyle Evans 	error = 0;
1437af755d3eSKyle Evans 	shmfd = fp->f_data;
1438af755d3eSKyle Evans 	rl_cookie = rangelock_wlock(&shmfd->shm_rl, 0, OFF_MAX,
1439af755d3eSKyle Evans 	    &shmfd->shm_mtx);
1440af755d3eSKyle Evans 
1441af755d3eSKyle Evans 	/* Even already-set seals should result in EPERM. */
1442af755d3eSKyle Evans 	if ((shmfd->shm_seals & F_SEAL_SEAL) != 0) {
1443af755d3eSKyle Evans 		error = EPERM;
1444af755d3eSKyle Evans 		goto out;
1445af755d3eSKyle Evans 	}
1446af755d3eSKyle Evans 	nseals = seals & ~shmfd->shm_seals;
1447af755d3eSKyle Evans 	if ((nseals & F_SEAL_WRITE) != 0) {
1448af755d3eSKyle Evans 		/*
1449af755d3eSKyle Evans 		 * The rangelock above prevents writable mappings from being
1450af755d3eSKyle Evans 		 * added after we've started applying seals.  The RLOCK here
1451af755d3eSKyle Evans 		 * is to avoid torn reads on ILP32 arches as unmapping/reducing
1452af755d3eSKyle Evans 		 * writemappings will be done without a rangelock.
1453af755d3eSKyle Evans 		 */
1454af755d3eSKyle Evans 		VM_OBJECT_RLOCK(shmfd->shm_object);
1455af755d3eSKyle Evans 		writemappings = shmfd->shm_object->un_pager.swp.writemappings;
1456af755d3eSKyle Evans 		VM_OBJECT_RUNLOCK(shmfd->shm_object);
1457af755d3eSKyle Evans 		/* kmappings are also writable */
1458af755d3eSKyle Evans 		if (writemappings > 0) {
1459af755d3eSKyle Evans 			error = EBUSY;
1460af755d3eSKyle Evans 			goto out;
1461af755d3eSKyle Evans 		}
1462af755d3eSKyle Evans 	}
1463af755d3eSKyle Evans 	shmfd->shm_seals |= nseals;
1464af755d3eSKyle Evans out:
1465af755d3eSKyle Evans 	rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
1466af755d3eSKyle Evans 	return (error);
1467af755d3eSKyle Evans }
1468af755d3eSKyle Evans 
1469af755d3eSKyle Evans static int
1470af755d3eSKyle Evans shm_get_seals(struct file *fp, int *seals)
1471af755d3eSKyle Evans {
1472af755d3eSKyle Evans 	struct shmfd *shmfd;
1473af755d3eSKyle Evans 
1474af755d3eSKyle Evans 	shmfd = fp->f_data;
1475af755d3eSKyle Evans 	*seals = shmfd->shm_seals;
1476af755d3eSKyle Evans 	return (0);
1477af755d3eSKyle Evans }
1478af755d3eSKyle Evans 
1479af755d3eSKyle Evans static int
1480f1040532SKyle Evans shm_fallocate(struct file *fp, off_t offset, off_t len, struct thread *td)
1481f1040532SKyle Evans {
1482f1040532SKyle Evans 	void *rl_cookie;
1483f1040532SKyle Evans 	struct shmfd *shmfd;
1484f1040532SKyle Evans 	size_t size;
1485f1040532SKyle Evans 	int error;
1486f1040532SKyle Evans 
1487f1040532SKyle Evans 	/* This assumes that the caller already checked for overflow. */
1488f1040532SKyle Evans 	error = 0;
1489f1040532SKyle Evans 	shmfd = fp->f_data;
1490f1040532SKyle Evans 	size = offset + len;
149139eae263SKyle Evans 
149239eae263SKyle Evans 	/*
149339eae263SKyle Evans 	 * Just grab the rangelock for the range that we may be attempting to
149439eae263SKyle Evans 	 * grow, rather than blocking read/write for regions we won't be
149539eae263SKyle Evans 	 * touching while this (potential) resize is in progress.  Other
149639eae263SKyle Evans 	 * attempts to resize the shmfd will have to take a write lock from 0 to
149739eae263SKyle Evans 	 * OFF_MAX, so this being potentially beyond the current usable range of
149839eae263SKyle Evans 	 * the shmfd is not necessarily a concern.  If other mechanisms are
149939eae263SKyle Evans 	 * added to grow a shmfd, this may need to be re-evaluated.
150039eae263SKyle Evans 	 */
150139eae263SKyle Evans 	rl_cookie = rangelock_wlock(&shmfd->shm_rl, offset, size,
1502f1040532SKyle Evans 	    &shmfd->shm_mtx);
1503f1040532SKyle Evans 	if (size > shmfd->shm_size) {
1504f1040532SKyle Evans 		VM_OBJECT_WLOCK(shmfd->shm_object);
1505f1040532SKyle Evans 		error = shm_dotruncate_locked(shmfd, size, rl_cookie);
1506f1040532SKyle Evans 		VM_OBJECT_WUNLOCK(shmfd->shm_object);
1507f1040532SKyle Evans 	}
1508f1040532SKyle Evans 	rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
1509f1040532SKyle Evans 	/* Translate to posix_fallocate(2) return value as needed. */
1510f1040532SKyle Evans 	if (error == ENOMEM)
1511f1040532SKyle Evans 		error = ENOSPC;
1512f1040532SKyle Evans 	return (error);
1513f1040532SKyle Evans }
1514f1040532SKyle Evans 
1515f1040532SKyle Evans static int
151656d0e33eSKonstantin Belousov sysctl_posix_shm_list(SYSCTL_HANDLER_ARGS)
151756d0e33eSKonstantin Belousov {
151856d0e33eSKonstantin Belousov 	struct shm_mapping *shmm;
151956d0e33eSKonstantin Belousov 	struct sbuf sb;
152056d0e33eSKonstantin Belousov 	struct kinfo_file kif;
152156d0e33eSKonstantin Belousov 	u_long i;
152256d0e33eSKonstantin Belousov 	ssize_t curlen;
152356d0e33eSKonstantin Belousov 	int error, error2;
152456d0e33eSKonstantin Belousov 
152556d0e33eSKonstantin Belousov 	sbuf_new_for_sysctl(&sb, NULL, sizeof(struct kinfo_file) * 5, req);
152656d0e33eSKonstantin Belousov 	sbuf_clear_flags(&sb, SBUF_INCLUDENUL);
152756d0e33eSKonstantin Belousov 	curlen = 0;
152856d0e33eSKonstantin Belousov 	error = 0;
152956d0e33eSKonstantin Belousov 	sx_slock(&shm_dict_lock);
153056d0e33eSKonstantin Belousov 	for (i = 0; i < shm_hash + 1; i++) {
153156d0e33eSKonstantin Belousov 		LIST_FOREACH(shmm, &shm_dictionary[i], sm_link) {
153256d0e33eSKonstantin Belousov 			error = shm_fill_kinfo_locked(shmm->sm_shmfd,
153356d0e33eSKonstantin Belousov 			    &kif, true);
153456d0e33eSKonstantin Belousov 			if (error == EPERM)
153556d0e33eSKonstantin Belousov 				continue;
153656d0e33eSKonstantin Belousov 			if (error != 0)
153756d0e33eSKonstantin Belousov 				break;
153856d0e33eSKonstantin Belousov 			pack_kinfo(&kif);
153956d0e33eSKonstantin Belousov 			if (req->oldptr != NULL &&
154056d0e33eSKonstantin Belousov 			    kif.kf_structsize + curlen > req->oldlen)
154156d0e33eSKonstantin Belousov 				break;
154256d0e33eSKonstantin Belousov 			error = sbuf_bcat(&sb, &kif, kif.kf_structsize) == 0 ?
154356d0e33eSKonstantin Belousov 			    0 : ENOMEM;
154456d0e33eSKonstantin Belousov 			if (error != 0)
154556d0e33eSKonstantin Belousov 				break;
154656d0e33eSKonstantin Belousov 			curlen += kif.kf_structsize;
154756d0e33eSKonstantin Belousov 		}
154856d0e33eSKonstantin Belousov 	}
154956d0e33eSKonstantin Belousov 	sx_sunlock(&shm_dict_lock);
155056d0e33eSKonstantin Belousov 	error2 = sbuf_finish(&sb);
155156d0e33eSKonstantin Belousov 	sbuf_delete(&sb);
155256d0e33eSKonstantin Belousov 	return (error != 0 ? error : error2);
155356d0e33eSKonstantin Belousov }
155456d0e33eSKonstantin Belousov 
155556d0e33eSKonstantin Belousov SYSCTL_PROC(_kern_ipc, OID_AUTO, posix_shm_list,
155656d0e33eSKonstantin Belousov     CTLFLAG_RD | CTLFLAG_MPSAFE | CTLTYPE_OPAQUE,
155756d0e33eSKonstantin Belousov     NULL, 0, sysctl_posix_shm_list, "",
155856d0e33eSKonstantin Belousov     "POSIX SHM list");
155920f70576SKyle Evans 
156020f70576SKyle Evans int
1561535b1df9SKyle Evans kern_shm_open(struct thread *td, const char *path, int flags, mode_t mode,
1562535b1df9SKyle Evans     struct filecaps *caps)
156320f70576SKyle Evans {
156420f70576SKyle Evans 
1565535b1df9SKyle Evans 	return (kern_shm_open2(td, path, flags, mode, 0, caps, NULL));
156620f70576SKyle Evans }
156720f70576SKyle Evans 
156820f70576SKyle Evans /*
156920f70576SKyle Evans  * This version of the shm_open() interface leaves CLOEXEC behavior up to the
157020f70576SKyle Evans  * caller, and libc will enforce it for the traditional shm_open() call.  This
157120f70576SKyle Evans  * allows other consumers, like memfd_create(), to opt-in for CLOEXEC.  This
157220f70576SKyle Evans  * interface also includes a 'name' argument that is currently unused, but could
157320f70576SKyle Evans  * potentially be exported later via some interface for debugging purposes.
157420f70576SKyle Evans  * From the kernel's perspective, it is optional.  Individual consumers like
157520f70576SKyle Evans  * memfd_create() may require it in order to be compatible with other systems
157620f70576SKyle Evans  * implementing the same function.
157720f70576SKyle Evans  */
157820f70576SKyle Evans int
157920f70576SKyle Evans sys_shm_open2(struct thread *td, struct shm_open2_args *uap)
158020f70576SKyle Evans {
158120f70576SKyle Evans 
158220f70576SKyle Evans 	return (kern_shm_open2(td, uap->path, uap->flags, uap->mode,
1583535b1df9SKyle Evans 	    uap->shmflags, NULL, uap->name));
158420f70576SKyle Evans }
1585