xref: /freebsd/sys/kern/uipc_shm.c (revision adc56f5a383771f594829b7db9c263b6f0dcf1bd)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2006, 2011, 2016-2017 Robert N. M. Watson
5  * All rights reserved.
6  *
7  * Portions of this software were developed by BAE Systems, the University of
8  * Cambridge Computer Laboratory, and Memorial University under DARPA/AFRL
9  * contract FA8650-15-C-7558 ("CADETS"), as part of the DARPA Transparent
10  * Computing (TC) research program.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
25  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31  * SUCH DAMAGE.
32  */
33 
34 /*
35  * Support for shared swap-backed anonymous memory objects via
36  * shm_open(2), shm_rename(2), and shm_unlink(2).
37  * While most of the implementation is here, vm_mmap.c contains
38  * mapping logic changes.
39  *
40  * posixshmcontrol(1) allows users to inspect the state of the memory
41  * objects.  Per-uid swap resource limit controls total amount of
42  * memory that user can consume for anonymous objects, including
43  * shared.
44  */
45 
46 #include <sys/cdefs.h>
47 __FBSDID("$FreeBSD$");
48 
49 #include "opt_capsicum.h"
50 #include "opt_ktrace.h"
51 
52 #include <sys/param.h>
53 #include <sys/capsicum.h>
54 #include <sys/conf.h>
55 #include <sys/fcntl.h>
56 #include <sys/file.h>
57 #include <sys/filedesc.h>
58 #include <sys/filio.h>
59 #include <sys/fnv_hash.h>
60 #include <sys/kernel.h>
61 #include <sys/limits.h>
62 #include <sys/uio.h>
63 #include <sys/signal.h>
64 #include <sys/jail.h>
65 #include <sys/ktrace.h>
66 #include <sys/lock.h>
67 #include <sys/malloc.h>
68 #include <sys/mman.h>
69 #include <sys/mutex.h>
70 #include <sys/priv.h>
71 #include <sys/proc.h>
72 #include <sys/refcount.h>
73 #include <sys/resourcevar.h>
74 #include <sys/rwlock.h>
75 #include <sys/sbuf.h>
76 #include <sys/stat.h>
77 #include <sys/syscallsubr.h>
78 #include <sys/sysctl.h>
79 #include <sys/sysproto.h>
80 #include <sys/systm.h>
81 #include <sys/sx.h>
82 #include <sys/time.h>
83 #include <sys/vnode.h>
84 #include <sys/unistd.h>
85 #include <sys/user.h>
86 
87 #include <security/audit/audit.h>
88 #include <security/mac/mac_framework.h>
89 
90 #include <vm/vm.h>
91 #include <vm/vm_param.h>
92 #include <vm/pmap.h>
93 #include <vm/vm_extern.h>
94 #include <vm/vm_map.h>
95 #include <vm/vm_kern.h>
96 #include <vm/vm_object.h>
97 #include <vm/vm_page.h>
98 #include <vm/vm_pageout.h>
99 #include <vm/vm_pager.h>
100 #include <vm/swap_pager.h>
101 
102 struct shm_mapping {
103 	char		*sm_path;
104 	Fnv32_t		sm_fnv;
105 	struct shmfd	*sm_shmfd;
106 	LIST_ENTRY(shm_mapping) sm_link;
107 };
108 
109 static MALLOC_DEFINE(M_SHMFD, "shmfd", "shared memory file descriptor");
110 static LIST_HEAD(, shm_mapping) *shm_dictionary;
111 static struct sx shm_dict_lock;
112 static struct mtx shm_timestamp_lock;
113 static u_long shm_hash;
114 static struct unrhdr64 shm_ino_unr;
115 static dev_t shm_dev_ino;
116 
117 #define	SHM_HASH(fnv)	(&shm_dictionary[(fnv) & shm_hash])
118 
119 static void	shm_init(void *arg);
120 static void	shm_insert(char *path, Fnv32_t fnv, struct shmfd *shmfd);
121 static struct shmfd *shm_lookup(char *path, Fnv32_t fnv);
122 static int	shm_remove(char *path, Fnv32_t fnv, struct ucred *ucred);
123 static int	shm_dotruncate_locked(struct shmfd *shmfd, off_t length,
124     void *rl_cookie);
125 static int	shm_copyin_path(struct thread *td, const char *userpath_in,
126     char **path_out);
127 
128 static fo_rdwr_t	shm_read;
129 static fo_rdwr_t	shm_write;
130 static fo_truncate_t	shm_truncate;
131 static fo_ioctl_t	shm_ioctl;
132 static fo_stat_t	shm_stat;
133 static fo_close_t	shm_close;
134 static fo_chmod_t	shm_chmod;
135 static fo_chown_t	shm_chown;
136 static fo_seek_t	shm_seek;
137 static fo_fill_kinfo_t	shm_fill_kinfo;
138 static fo_mmap_t	shm_mmap;
139 static fo_get_seals_t	shm_get_seals;
140 static fo_add_seals_t	shm_add_seals;
141 
142 /* File descriptor operations. */
143 struct fileops shm_ops = {
144 	.fo_read = shm_read,
145 	.fo_write = shm_write,
146 	.fo_truncate = shm_truncate,
147 	.fo_ioctl = shm_ioctl,
148 	.fo_poll = invfo_poll,
149 	.fo_kqfilter = invfo_kqfilter,
150 	.fo_stat = shm_stat,
151 	.fo_close = shm_close,
152 	.fo_chmod = shm_chmod,
153 	.fo_chown = shm_chown,
154 	.fo_sendfile = vn_sendfile,
155 	.fo_seek = shm_seek,
156 	.fo_fill_kinfo = shm_fill_kinfo,
157 	.fo_mmap = shm_mmap,
158 	.fo_get_seals = shm_get_seals,
159 	.fo_add_seals = shm_add_seals,
160 	.fo_flags = DFLAG_PASSABLE | DFLAG_SEEKABLE
161 };
162 
163 FEATURE(posix_shm, "POSIX shared memory");
164 
165 static int
166 uiomove_object_page(vm_object_t obj, size_t len, struct uio *uio)
167 {
168 	vm_page_t m;
169 	vm_pindex_t idx;
170 	size_t tlen;
171 	int error, offset, rv;
172 
173 	idx = OFF_TO_IDX(uio->uio_offset);
174 	offset = uio->uio_offset & PAGE_MASK;
175 	tlen = MIN(PAGE_SIZE - offset, len);
176 
177 	VM_OBJECT_WLOCK(obj);
178 
179 	/*
180 	 * Read I/O without either a corresponding resident page or swap
181 	 * page: use zero_region.  This is intended to avoid instantiating
182 	 * pages on read from a sparse region.
183 	 */
184 	if (uio->uio_rw == UIO_READ && vm_page_lookup(obj, idx) == NULL &&
185 	    !vm_pager_has_page(obj, idx, NULL, NULL)) {
186 		VM_OBJECT_WUNLOCK(obj);
187 		return (uiomove(__DECONST(void *, zero_region), tlen, uio));
188 	}
189 
190 	/*
191 	 * Parallel reads of the page content from disk are prevented
192 	 * by exclusive busy.
193 	 *
194 	 * Although the tmpfs vnode lock is held here, it is
195 	 * nonetheless safe to sleep waiting for a free page.  The
196 	 * pageout daemon does not need to acquire the tmpfs vnode
197 	 * lock to page out tobj's pages because tobj is a OBJT_SWAP
198 	 * type object.
199 	 */
200 	rv = vm_page_grab_valid(&m, obj, idx,
201 	    VM_ALLOC_NORMAL | VM_ALLOC_WIRED | VM_ALLOC_NOBUSY);
202 	if (rv != VM_PAGER_OK) {
203 		VM_OBJECT_WUNLOCK(obj);
204 		printf("uiomove_object: vm_obj %p idx %jd pager error %d\n",
205 		    obj, idx, rv);
206 		return (EIO);
207 	}
208 	VM_OBJECT_WUNLOCK(obj);
209 	error = uiomove_fromphys(&m, offset, tlen, uio);
210 	if (uio->uio_rw == UIO_WRITE && error == 0) {
211 		VM_OBJECT_WLOCK(obj);
212 		vm_page_dirty(m);
213 		vm_pager_page_unswapped(m);
214 		VM_OBJECT_WUNLOCK(obj);
215 	}
216 	vm_page_unwire(m, PQ_ACTIVE);
217 
218 	return (error);
219 }
220 
221 int
222 uiomove_object(vm_object_t obj, off_t obj_size, struct uio *uio)
223 {
224 	ssize_t resid;
225 	size_t len;
226 	int error;
227 
228 	error = 0;
229 	while ((resid = uio->uio_resid) > 0) {
230 		if (obj_size <= uio->uio_offset)
231 			break;
232 		len = MIN(obj_size - uio->uio_offset, resid);
233 		if (len == 0)
234 			break;
235 		error = uiomove_object_page(obj, len, uio);
236 		if (error != 0 || resid == uio->uio_resid)
237 			break;
238 	}
239 	return (error);
240 }
241 
242 static int
243 shm_seek(struct file *fp, off_t offset, int whence, struct thread *td)
244 {
245 	struct shmfd *shmfd;
246 	off_t foffset;
247 	int error;
248 
249 	shmfd = fp->f_data;
250 	foffset = foffset_lock(fp, 0);
251 	error = 0;
252 	switch (whence) {
253 	case L_INCR:
254 		if (foffset < 0 ||
255 		    (offset > 0 && foffset > OFF_MAX - offset)) {
256 			error = EOVERFLOW;
257 			break;
258 		}
259 		offset += foffset;
260 		break;
261 	case L_XTND:
262 		if (offset > 0 && shmfd->shm_size > OFF_MAX - offset) {
263 			error = EOVERFLOW;
264 			break;
265 		}
266 		offset += shmfd->shm_size;
267 		break;
268 	case L_SET:
269 		break;
270 	default:
271 		error = EINVAL;
272 	}
273 	if (error == 0) {
274 		if (offset < 0 || offset > shmfd->shm_size)
275 			error = EINVAL;
276 		else
277 			td->td_uretoff.tdu_off = offset;
278 	}
279 	foffset_unlock(fp, offset, error != 0 ? FOF_NOUPDATE : 0);
280 	return (error);
281 }
282 
283 static int
284 shm_read(struct file *fp, struct uio *uio, struct ucred *active_cred,
285     int flags, struct thread *td)
286 {
287 	struct shmfd *shmfd;
288 	void *rl_cookie;
289 	int error;
290 
291 	shmfd = fp->f_data;
292 #ifdef MAC
293 	error = mac_posixshm_check_read(active_cred, fp->f_cred, shmfd);
294 	if (error)
295 		return (error);
296 #endif
297 	foffset_lock_uio(fp, uio, flags);
298 	rl_cookie = rangelock_rlock(&shmfd->shm_rl, uio->uio_offset,
299 	    uio->uio_offset + uio->uio_resid, &shmfd->shm_mtx);
300 	error = uiomove_object(shmfd->shm_object, shmfd->shm_size, uio);
301 	rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
302 	foffset_unlock_uio(fp, uio, flags);
303 	return (error);
304 }
305 
306 static int
307 shm_write(struct file *fp, struct uio *uio, struct ucred *active_cred,
308     int flags, struct thread *td)
309 {
310 	struct shmfd *shmfd;
311 	void *rl_cookie;
312 	int error;
313 
314 	shmfd = fp->f_data;
315 #ifdef MAC
316 	error = mac_posixshm_check_write(active_cred, fp->f_cred, shmfd);
317 	if (error)
318 		return (error);
319 #endif
320 	foffset_lock_uio(fp, uio, flags);
321 	if ((flags & FOF_OFFSET) == 0) {
322 		rl_cookie = rangelock_wlock(&shmfd->shm_rl, 0, OFF_MAX,
323 		    &shmfd->shm_mtx);
324 	} else {
325 		rl_cookie = rangelock_wlock(&shmfd->shm_rl, uio->uio_offset,
326 		    uio->uio_offset + uio->uio_resid, &shmfd->shm_mtx);
327 	}
328 	if ((shmfd->shm_seals & F_SEAL_WRITE) != 0)
329 		error = EPERM;
330 	else
331 		error = uiomove_object(shmfd->shm_object, shmfd->shm_size, uio);
332 	rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
333 	foffset_unlock_uio(fp, uio, flags);
334 	return (error);
335 }
336 
337 static int
338 shm_truncate(struct file *fp, off_t length, struct ucred *active_cred,
339     struct thread *td)
340 {
341 	struct shmfd *shmfd;
342 #ifdef MAC
343 	int error;
344 #endif
345 
346 	shmfd = fp->f_data;
347 #ifdef MAC
348 	error = mac_posixshm_check_truncate(active_cred, fp->f_cred, shmfd);
349 	if (error)
350 		return (error);
351 #endif
352 	return (shm_dotruncate(shmfd, length));
353 }
354 
355 int
356 shm_ioctl(struct file *fp, u_long com, void *data, struct ucred *active_cred,
357     struct thread *td)
358 {
359 
360 	switch (com) {
361 	case FIONBIO:
362 	case FIOASYNC:
363 		/*
364 		 * Allow fcntl(fd, F_SETFL, O_NONBLOCK) to work,
365 		 * just like it would on an unlinked regular file
366 		 */
367 		return (0);
368 	default:
369 		return (ENOTTY);
370 	}
371 }
372 
373 static int
374 shm_stat(struct file *fp, struct stat *sb, struct ucred *active_cred,
375     struct thread *td)
376 {
377 	struct shmfd *shmfd;
378 #ifdef MAC
379 	int error;
380 #endif
381 
382 	shmfd = fp->f_data;
383 
384 #ifdef MAC
385 	error = mac_posixshm_check_stat(active_cred, fp->f_cred, shmfd);
386 	if (error)
387 		return (error);
388 #endif
389 
390 	/*
391 	 * Attempt to return sanish values for fstat() on a memory file
392 	 * descriptor.
393 	 */
394 	bzero(sb, sizeof(*sb));
395 	sb->st_blksize = PAGE_SIZE;
396 	sb->st_size = shmfd->shm_size;
397 	sb->st_blocks = howmany(sb->st_size, sb->st_blksize);
398 	mtx_lock(&shm_timestamp_lock);
399 	sb->st_atim = shmfd->shm_atime;
400 	sb->st_ctim = shmfd->shm_ctime;
401 	sb->st_mtim = shmfd->shm_mtime;
402 	sb->st_birthtim = shmfd->shm_birthtime;
403 	sb->st_mode = S_IFREG | shmfd->shm_mode;		/* XXX */
404 	sb->st_uid = shmfd->shm_uid;
405 	sb->st_gid = shmfd->shm_gid;
406 	mtx_unlock(&shm_timestamp_lock);
407 	sb->st_dev = shm_dev_ino;
408 	sb->st_ino = shmfd->shm_ino;
409 	sb->st_nlink = shmfd->shm_object->ref_count;
410 
411 	return (0);
412 }
413 
414 static int
415 shm_close(struct file *fp, struct thread *td)
416 {
417 	struct shmfd *shmfd;
418 
419 	shmfd = fp->f_data;
420 	fp->f_data = NULL;
421 	shm_drop(shmfd);
422 
423 	return (0);
424 }
425 
426 static int
427 shm_copyin_path(struct thread *td, const char *userpath_in, char **path_out) {
428 	int error;
429 	char *path;
430 	const char *pr_path;
431 	size_t pr_pathlen;
432 
433 	path = malloc(MAXPATHLEN, M_SHMFD, M_WAITOK);
434 	pr_path = td->td_ucred->cr_prison->pr_path;
435 
436 	/* Construct a full pathname for jailed callers. */
437 	pr_pathlen = strcmp(pr_path, "/") ==
438 	    0 ? 0 : strlcpy(path, pr_path, MAXPATHLEN);
439 	error = copyinstr(userpath_in, path + pr_pathlen,
440 	    MAXPATHLEN - pr_pathlen, NULL);
441 	if (error != 0)
442 		goto out;
443 
444 #ifdef KTRACE
445 	if (KTRPOINT(curthread, KTR_NAMEI))
446 		ktrnamei(path);
447 #endif
448 
449 	/* Require paths to start with a '/' character. */
450 	if (path[pr_pathlen] != '/') {
451 		error = EINVAL;
452 		goto out;
453 	}
454 
455 	*path_out = path;
456 
457 out:
458 	if (error != 0)
459 		free(path, M_SHMFD);
460 
461 	return (error);
462 }
463 
464 static int
465 shm_dotruncate_locked(struct shmfd *shmfd, off_t length, void *rl_cookie)
466 {
467 	vm_object_t object;
468 	vm_page_t m;
469 	vm_pindex_t idx, nobjsize;
470 	vm_ooffset_t delta;
471 	int base, rv;
472 
473 	KASSERT(length >= 0, ("shm_dotruncate: length < 0"));
474 	object = shmfd->shm_object;
475 	VM_OBJECT_ASSERT_WLOCKED(object);
476 	rangelock_cookie_assert(rl_cookie, RA_WLOCKED);
477 	if (length == shmfd->shm_size)
478 		return (0);
479 	nobjsize = OFF_TO_IDX(length + PAGE_MASK);
480 
481 	/* Are we shrinking?  If so, trim the end. */
482 	if (length < shmfd->shm_size) {
483 		if ((shmfd->shm_seals & F_SEAL_SHRINK) != 0)
484 			return (EPERM);
485 
486 		/*
487 		 * Disallow any requests to shrink the size if this
488 		 * object is mapped into the kernel.
489 		 */
490 		if (shmfd->shm_kmappings > 0)
491 			return (EBUSY);
492 
493 		/*
494 		 * Zero the truncated part of the last page.
495 		 */
496 		base = length & PAGE_MASK;
497 		if (base != 0) {
498 			idx = OFF_TO_IDX(length);
499 retry:
500 			m = vm_page_grab(object, idx, VM_ALLOC_NOCREAT);
501 			if (m != NULL) {
502 				MPASS(vm_page_all_valid(m));
503 			} else if (vm_pager_has_page(object, idx, NULL, NULL)) {
504 				m = vm_page_alloc(object, idx,
505 				    VM_ALLOC_NORMAL | VM_ALLOC_WAITFAIL);
506 				if (m == NULL)
507 					goto retry;
508 				rv = vm_pager_get_pages(object, &m, 1, NULL,
509 				    NULL);
510 				if (rv == VM_PAGER_OK) {
511 					/*
512 					 * Since the page was not resident,
513 					 * and therefore not recently
514 					 * accessed, immediately enqueue it
515 					 * for asynchronous laundering.  The
516 					 * current operation is not regarded
517 					 * as an access.
518 					 */
519 					vm_page_launder(m);
520 				} else {
521 					vm_page_free(m);
522 					VM_OBJECT_WUNLOCK(object);
523 					return (EIO);
524 				}
525 			}
526 			if (m != NULL) {
527 				pmap_zero_page_area(m, base, PAGE_SIZE - base);
528 				KASSERT(vm_page_all_valid(m),
529 				    ("shm_dotruncate: page %p is invalid", m));
530 				vm_page_dirty(m);
531 				vm_page_xunbusy(m);
532 				vm_pager_page_unswapped(m);
533 			}
534 		}
535 		delta = IDX_TO_OFF(object->size - nobjsize);
536 
537 		/* Toss in memory pages. */
538 		if (nobjsize < object->size)
539 			vm_object_page_remove(object, nobjsize, object->size,
540 			    0);
541 
542 		/* Toss pages from swap. */
543 		if (object->type == OBJT_SWAP)
544 			swap_pager_freespace(object, nobjsize, delta);
545 
546 		/* Free the swap accounted for shm */
547 		swap_release_by_cred(delta, object->cred);
548 		object->charge -= delta;
549 	} else {
550 		if ((shmfd->shm_seals & F_SEAL_GROW) != 0)
551 			return (EPERM);
552 
553 		/* Try to reserve additional swap space. */
554 		delta = IDX_TO_OFF(nobjsize - object->size);
555 		if (!swap_reserve_by_cred(delta, object->cred))
556 			return (ENOMEM);
557 		object->charge += delta;
558 	}
559 	shmfd->shm_size = length;
560 	mtx_lock(&shm_timestamp_lock);
561 	vfs_timestamp(&shmfd->shm_ctime);
562 	shmfd->shm_mtime = shmfd->shm_ctime;
563 	mtx_unlock(&shm_timestamp_lock);
564 	object->size = nobjsize;
565 	return (0);
566 }
567 
568 int
569 shm_dotruncate(struct shmfd *shmfd, off_t length)
570 {
571 	void *rl_cookie;
572 	int error;
573 
574 	rl_cookie = rangelock_wlock(&shmfd->shm_rl, 0, OFF_MAX,
575 	    &shmfd->shm_mtx);
576 	VM_OBJECT_WLOCK(shmfd->shm_object);
577 	error = shm_dotruncate_locked(shmfd, length, rl_cookie);
578 	VM_OBJECT_WUNLOCK(shmfd->shm_object);
579 	rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
580 	return (error);
581 }
582 
583 /*
584  * shmfd object management including creation and reference counting
585  * routines.
586  */
587 struct shmfd *
588 shm_alloc(struct ucred *ucred, mode_t mode)
589 {
590 	struct shmfd *shmfd;
591 
592 	shmfd = malloc(sizeof(*shmfd), M_SHMFD, M_WAITOK | M_ZERO);
593 	shmfd->shm_size = 0;
594 	shmfd->shm_uid = ucred->cr_uid;
595 	shmfd->shm_gid = ucred->cr_gid;
596 	shmfd->shm_mode = mode;
597 	shmfd->shm_object = vm_pager_allocate(OBJT_SWAP, NULL,
598 	    shmfd->shm_size, VM_PROT_DEFAULT, 0, ucred);
599 	KASSERT(shmfd->shm_object != NULL, ("shm_create: vm_pager_allocate"));
600 	vfs_timestamp(&shmfd->shm_birthtime);
601 	shmfd->shm_atime = shmfd->shm_mtime = shmfd->shm_ctime =
602 	    shmfd->shm_birthtime;
603 	shmfd->shm_ino = alloc_unr64(&shm_ino_unr);
604 	refcount_init(&shmfd->shm_refs, 1);
605 	mtx_init(&shmfd->shm_mtx, "shmrl", NULL, MTX_DEF);
606 	rangelock_init(&shmfd->shm_rl);
607 #ifdef MAC
608 	mac_posixshm_init(shmfd);
609 	mac_posixshm_create(ucred, shmfd);
610 #endif
611 
612 	return (shmfd);
613 }
614 
615 struct shmfd *
616 shm_hold(struct shmfd *shmfd)
617 {
618 
619 	refcount_acquire(&shmfd->shm_refs);
620 	return (shmfd);
621 }
622 
623 void
624 shm_drop(struct shmfd *shmfd)
625 {
626 
627 	if (refcount_release(&shmfd->shm_refs)) {
628 #ifdef MAC
629 		mac_posixshm_destroy(shmfd);
630 #endif
631 		rangelock_destroy(&shmfd->shm_rl);
632 		mtx_destroy(&shmfd->shm_mtx);
633 		vm_object_deallocate(shmfd->shm_object);
634 		free(shmfd, M_SHMFD);
635 	}
636 }
637 
638 /*
639  * Determine if the credentials have sufficient permissions for a
640  * specified combination of FREAD and FWRITE.
641  */
642 int
643 shm_access(struct shmfd *shmfd, struct ucred *ucred, int flags)
644 {
645 	accmode_t accmode;
646 	int error;
647 
648 	accmode = 0;
649 	if (flags & FREAD)
650 		accmode |= VREAD;
651 	if (flags & FWRITE)
652 		accmode |= VWRITE;
653 	mtx_lock(&shm_timestamp_lock);
654 	error = vaccess(VREG, shmfd->shm_mode, shmfd->shm_uid, shmfd->shm_gid,
655 	    accmode, ucred, NULL);
656 	mtx_unlock(&shm_timestamp_lock);
657 	return (error);
658 }
659 
660 /*
661  * Dictionary management.  We maintain an in-kernel dictionary to map
662  * paths to shmfd objects.  We use the FNV hash on the path to store
663  * the mappings in a hash table.
664  */
665 static void
666 shm_init(void *arg)
667 {
668 
669 	mtx_init(&shm_timestamp_lock, "shm timestamps", NULL, MTX_DEF);
670 	sx_init(&shm_dict_lock, "shm dictionary");
671 	shm_dictionary = hashinit(1024, M_SHMFD, &shm_hash);
672 	new_unrhdr64(&shm_ino_unr, 1);
673 	shm_dev_ino = devfs_alloc_cdp_inode();
674 	KASSERT(shm_dev_ino > 0, ("shm dev inode not initialized"));
675 }
676 SYSINIT(shm_init, SI_SUB_SYSV_SHM, SI_ORDER_ANY, shm_init, NULL);
677 
678 static struct shmfd *
679 shm_lookup(char *path, Fnv32_t fnv)
680 {
681 	struct shm_mapping *map;
682 
683 	LIST_FOREACH(map, SHM_HASH(fnv), sm_link) {
684 		if (map->sm_fnv != fnv)
685 			continue;
686 		if (strcmp(map->sm_path, path) == 0)
687 			return (map->sm_shmfd);
688 	}
689 
690 	return (NULL);
691 }
692 
693 static void
694 shm_insert(char *path, Fnv32_t fnv, struct shmfd *shmfd)
695 {
696 	struct shm_mapping *map;
697 
698 	map = malloc(sizeof(struct shm_mapping), M_SHMFD, M_WAITOK);
699 	map->sm_path = path;
700 	map->sm_fnv = fnv;
701 	map->sm_shmfd = shm_hold(shmfd);
702 	shmfd->shm_path = path;
703 	LIST_INSERT_HEAD(SHM_HASH(fnv), map, sm_link);
704 }
705 
706 static int
707 shm_remove(char *path, Fnv32_t fnv, struct ucred *ucred)
708 {
709 	struct shm_mapping *map;
710 	int error;
711 
712 	LIST_FOREACH(map, SHM_HASH(fnv), sm_link) {
713 		if (map->sm_fnv != fnv)
714 			continue;
715 		if (strcmp(map->sm_path, path) == 0) {
716 #ifdef MAC
717 			error = mac_posixshm_check_unlink(ucred, map->sm_shmfd);
718 			if (error)
719 				return (error);
720 #endif
721 			error = shm_access(map->sm_shmfd, ucred,
722 			    FREAD | FWRITE);
723 			if (error)
724 				return (error);
725 			map->sm_shmfd->shm_path = NULL;
726 			LIST_REMOVE(map, sm_link);
727 			shm_drop(map->sm_shmfd);
728 			free(map->sm_path, M_SHMFD);
729 			free(map, M_SHMFD);
730 			return (0);
731 		}
732 	}
733 
734 	return (ENOENT);
735 }
736 
737 int
738 kern_shm_open(struct thread *td, const char *userpath, int flags, mode_t mode,
739     struct filecaps *fcaps, int initial_seals)
740 {
741 	struct filedesc *fdp;
742 	struct shmfd *shmfd;
743 	struct file *fp;
744 	char *path;
745 	void *rl_cookie;
746 	Fnv32_t fnv;
747 	mode_t cmode;
748 	int fd, error;
749 
750 #ifdef CAPABILITY_MODE
751 	/*
752 	 * shm_open(2) is only allowed for anonymous objects.
753 	 */
754 	if (IN_CAPABILITY_MODE(td) && (userpath != SHM_ANON))
755 		return (ECAPMODE);
756 #endif
757 
758 	AUDIT_ARG_FFLAGS(flags);
759 	AUDIT_ARG_MODE(mode);
760 
761 	if ((flags & O_ACCMODE) != O_RDONLY && (flags & O_ACCMODE) != O_RDWR)
762 		return (EINVAL);
763 
764 	if ((flags & ~(O_ACCMODE | O_CREAT | O_EXCL | O_TRUNC | O_CLOEXEC)) != 0)
765 		return (EINVAL);
766 
767 	/*
768 	 * Currently only F_SEAL_SEAL may be set when creating or opening shmfd.
769 	 * If the decision is made later to allow additional seals, care must be
770 	 * taken below to ensure that the seals are properly set if the shmfd
771 	 * already existed -- this currently assumes that only F_SEAL_SEAL can
772 	 * be set and doesn't take further precautions to ensure the validity of
773 	 * the seals being added with respect to current mappings.
774 	 */
775 	if ((initial_seals & ~F_SEAL_SEAL) != 0)
776 		return (EINVAL);
777 
778 	fdp = td->td_proc->p_fd;
779 	cmode = (mode & ~fdp->fd_cmask) & ACCESSPERMS;
780 
781 	/*
782 	 * shm_open(2) created shm should always have O_CLOEXEC set, as mandated
783 	 * by POSIX.  We allow it to be unset here so that an in-kernel
784 	 * interface may be written as a thin layer around shm, optionally not
785 	 * setting CLOEXEC.  For shm_open(2), O_CLOEXEC is set unconditionally
786 	 * in sys_shm_open() to keep this implementation compliant.
787 	 */
788 	error = falloc_caps(td, &fp, &fd, flags & O_CLOEXEC, fcaps);
789 	if (error)
790 		return (error);
791 
792 	/* A SHM_ANON path pointer creates an anonymous object. */
793 	if (userpath == SHM_ANON) {
794 		/* A read-only anonymous object is pointless. */
795 		if ((flags & O_ACCMODE) == O_RDONLY) {
796 			fdclose(td, fp, fd);
797 			fdrop(fp, td);
798 			return (EINVAL);
799 		}
800 		shmfd = shm_alloc(td->td_ucred, cmode);
801 		shmfd->shm_seals = initial_seals;
802 	} else {
803 		error = shm_copyin_path(td, userpath, &path);
804 		if (error != 0) {
805 			fdclose(td, fp, fd);
806 			fdrop(fp, td);
807 			return (error);
808 		}
809 
810 		AUDIT_ARG_UPATH1_CANON(path);
811 		fnv = fnv_32_str(path, FNV1_32_INIT);
812 		sx_xlock(&shm_dict_lock);
813 		shmfd = shm_lookup(path, fnv);
814 		if (shmfd == NULL) {
815 			/* Object does not yet exist, create it if requested. */
816 			if (flags & O_CREAT) {
817 #ifdef MAC
818 				error = mac_posixshm_check_create(td->td_ucred,
819 				    path);
820 				if (error == 0) {
821 #endif
822 					shmfd = shm_alloc(td->td_ucred, cmode);
823 					shmfd->shm_seals = initial_seals;
824 					shm_insert(path, fnv, shmfd);
825 #ifdef MAC
826 				}
827 #endif
828 			} else {
829 				free(path, M_SHMFD);
830 				error = ENOENT;
831 			}
832 		} else {
833 			rl_cookie = rangelock_wlock(&shmfd->shm_rl, 0, OFF_MAX,
834 			    &shmfd->shm_mtx);
835 
836 			/*
837 			 * kern_shm_open() likely shouldn't ever error out on
838 			 * trying to set a seal that already exists, unlike
839 			 * F_ADD_SEALS.  This would break terribly as
840 			 * shm_open(2) actually sets F_SEAL_SEAL to maintain
841 			 * historical behavior where the underlying file could
842 			 * not be sealed.
843 			 */
844 			initial_seals &= ~shmfd->shm_seals;
845 
846 			/*
847 			 * Object already exists, obtain a new
848 			 * reference if requested and permitted.
849 			 */
850 			free(path, M_SHMFD);
851 
852 			/*
853 			 * initial_seals can't set additional seals if we've
854 			 * already been set F_SEAL_SEAL.  If F_SEAL_SEAL is set,
855 			 * then we've already removed that one from
856 			 * initial_seals.  This is currently redundant as we
857 			 * only allow setting F_SEAL_SEAL at creation time, but
858 			 * it's cheap to check and decreases the effort required
859 			 * to allow additional seals.
860 			 */
861 			if ((shmfd->shm_seals & F_SEAL_SEAL) != 0 &&
862 			    initial_seals != 0)
863 				error = EPERM;
864 			else if ((flags & (O_CREAT | O_EXCL)) ==
865 			    (O_CREAT | O_EXCL))
866 				error = EEXIST;
867 			else {
868 #ifdef MAC
869 				error = mac_posixshm_check_open(td->td_ucred,
870 				    shmfd, FFLAGS(flags & O_ACCMODE));
871 				if (error == 0)
872 #endif
873 				error = shm_access(shmfd, td->td_ucred,
874 				    FFLAGS(flags & O_ACCMODE));
875 			}
876 
877 			/*
878 			 * Truncate the file back to zero length if
879 			 * O_TRUNC was specified and the object was
880 			 * opened with read/write.
881 			 */
882 			if (error == 0 &&
883 			    (flags & (O_ACCMODE | O_TRUNC)) ==
884 			    (O_RDWR | O_TRUNC)) {
885 				VM_OBJECT_WLOCK(shmfd->shm_object);
886 #ifdef MAC
887 				error = mac_posixshm_check_truncate(
888 					td->td_ucred, fp->f_cred, shmfd);
889 				if (error == 0)
890 #endif
891 					error = shm_dotruncate_locked(shmfd, 0,
892 					    rl_cookie);
893 				VM_OBJECT_WUNLOCK(shmfd->shm_object);
894 			}
895 			if (error == 0) {
896 				/*
897 				 * Currently we only allow F_SEAL_SEAL to be
898 				 * set initially.  As noted above, this would
899 				 * need to be reworked should that change.
900 				 */
901 				shmfd->shm_seals |= initial_seals;
902 				shm_hold(shmfd);
903 			}
904 			rangelock_unlock(&shmfd->shm_rl, rl_cookie,
905 			    &shmfd->shm_mtx);
906 		}
907 		sx_xunlock(&shm_dict_lock);
908 
909 		if (error) {
910 			fdclose(td, fp, fd);
911 			fdrop(fp, td);
912 			return (error);
913 		}
914 	}
915 
916 	finit(fp, FFLAGS(flags & O_ACCMODE), DTYPE_SHM, shmfd, &shm_ops);
917 
918 	td->td_retval[0] = fd;
919 	fdrop(fp, td);
920 
921 	return (0);
922 }
923 
924 /* System calls. */
925 #ifdef COMPAT_FREEBSD12
926 int
927 freebsd12_shm_open(struct thread *td, struct freebsd12_shm_open_args *uap)
928 {
929 
930 	return (kern_shm_open(td, uap->path, uap->flags | O_CLOEXEC, uap->mode,
931 	    NULL, F_SEAL_SEAL));
932 }
933 #endif
934 
935 int
936 sys_shm_unlink(struct thread *td, struct shm_unlink_args *uap)
937 {
938 	char *path;
939 	Fnv32_t fnv;
940 	int error;
941 
942 	error = shm_copyin_path(td, uap->path, &path);
943 	if (error != 0)
944 		return (error);
945 
946 	AUDIT_ARG_UPATH1_CANON(path);
947 	fnv = fnv_32_str(path, FNV1_32_INIT);
948 	sx_xlock(&shm_dict_lock);
949 	error = shm_remove(path, fnv, td->td_ucred);
950 	sx_xunlock(&shm_dict_lock);
951 	free(path, M_TEMP);
952 
953 	return (error);
954 }
955 
956 int
957 sys_shm_rename(struct thread *td, struct shm_rename_args *uap)
958 {
959 	char *path_from = NULL, *path_to = NULL;
960 	Fnv32_t fnv_from, fnv_to;
961 	struct shmfd *fd_from;
962 	struct shmfd *fd_to;
963 	int error;
964 	int flags;
965 
966 	flags = uap->flags;
967 	AUDIT_ARG_FFLAGS(flags);
968 
969 	/*
970 	 * Make sure the user passed only valid flags.
971 	 * If you add a new flag, please add a new term here.
972 	 */
973 	if ((flags & ~(
974 	    SHM_RENAME_NOREPLACE |
975 	    SHM_RENAME_EXCHANGE
976 	    )) != 0) {
977 		error = EINVAL;
978 		goto out;
979 	}
980 
981 	/*
982 	 * EXCHANGE and NOREPLACE don't quite make sense together. Let's
983 	 * force the user to choose one or the other.
984 	 */
985 	if ((flags & SHM_RENAME_NOREPLACE) != 0 &&
986 	    (flags & SHM_RENAME_EXCHANGE) != 0) {
987 		error = EINVAL;
988 		goto out;
989 	}
990 
991 	/* Renaming to or from anonymous makes no sense */
992 	if (uap->path_from == SHM_ANON || uap->path_to == SHM_ANON) {
993 		error = EINVAL;
994 		goto out;
995 	}
996 
997 	error = shm_copyin_path(td, uap->path_from, &path_from);
998 	if (error != 0)
999 		goto out;
1000 
1001 	error = shm_copyin_path(td, uap->path_to, &path_to);
1002 	if (error != 0)
1003 		goto out;
1004 
1005 	AUDIT_ARG_UPATH1_CANON(path_from);
1006 	AUDIT_ARG_UPATH2_CANON(path_to);
1007 
1008 	/* Rename with from/to equal is a no-op */
1009 	if (strcmp(path_from, path_to) == 0)
1010 		goto out;
1011 
1012 	fnv_from = fnv_32_str(path_from, FNV1_32_INIT);
1013 	fnv_to = fnv_32_str(path_to, FNV1_32_INIT);
1014 
1015 	sx_xlock(&shm_dict_lock);
1016 
1017 	fd_from = shm_lookup(path_from, fnv_from);
1018 	if (fd_from == NULL) {
1019 		error = ENOENT;
1020 		goto out_locked;
1021 	}
1022 
1023 	fd_to = shm_lookup(path_to, fnv_to);
1024 	if ((flags & SHM_RENAME_NOREPLACE) != 0 && fd_to != NULL) {
1025 		error = EEXIST;
1026 		goto out_locked;
1027 	}
1028 
1029 	/*
1030 	 * Unconditionally prevents shm_remove from invalidating the 'from'
1031 	 * shm's state.
1032 	 */
1033 	shm_hold(fd_from);
1034 	error = shm_remove(path_from, fnv_from, td->td_ucred);
1035 
1036 	/*
1037 	 * One of my assumptions failed if ENOENT (e.g. locking didn't
1038 	 * protect us)
1039 	 */
1040 	KASSERT(error != ENOENT, ("Our shm disappeared during shm_rename: %s",
1041 	    path_from));
1042 	if (error != 0) {
1043 		shm_drop(fd_from);
1044 		goto out_locked;
1045 	}
1046 
1047 	/*
1048 	 * If we are exchanging, we need to ensure the shm_remove below
1049 	 * doesn't invalidate the dest shm's state.
1050 	 */
1051 	if ((flags & SHM_RENAME_EXCHANGE) != 0 && fd_to != NULL)
1052 		shm_hold(fd_to);
1053 
1054 	/*
1055 	 * NOTE: if path_to is not already in the hash, c'est la vie;
1056 	 * it simply means we have nothing already at path_to to unlink.
1057 	 * That is the ENOENT case.
1058 	 *
1059 	 * If we somehow don't have access to unlink this guy, but
1060 	 * did for the shm at path_from, then relink the shm to path_from
1061 	 * and abort with EACCES.
1062 	 *
1063 	 * All other errors: that is weird; let's relink and abort the
1064 	 * operation.
1065 	 */
1066 	error = shm_remove(path_to, fnv_to, td->td_ucred);
1067 	if (error != 0 && error != ENOENT) {
1068 		shm_insert(path_from, fnv_from, fd_from);
1069 		shm_drop(fd_from);
1070 		/* Don't free path_from now, since the hash references it */
1071 		path_from = NULL;
1072 		goto out_locked;
1073 	}
1074 
1075 	error = 0;
1076 
1077 	shm_insert(path_to, fnv_to, fd_from);
1078 
1079 	/* Don't free path_to now, since the hash references it */
1080 	path_to = NULL;
1081 
1082 	/* We kept a ref when we removed, and incremented again in insert */
1083 	shm_drop(fd_from);
1084 	KASSERT(fd_from->shm_refs > 0, ("Expected >0 refs; got: %d\n",
1085 	    fd_from->shm_refs));
1086 
1087 	if ((flags & SHM_RENAME_EXCHANGE) != 0 && fd_to != NULL) {
1088 		shm_insert(path_from, fnv_from, fd_to);
1089 		path_from = NULL;
1090 		shm_drop(fd_to);
1091 		KASSERT(fd_to->shm_refs > 0, ("Expected >0 refs; got: %d\n",
1092 		    fd_to->shm_refs));
1093 	}
1094 
1095 out_locked:
1096 	sx_xunlock(&shm_dict_lock);
1097 
1098 out:
1099 	free(path_from, M_SHMFD);
1100 	free(path_to, M_SHMFD);
1101 	return (error);
1102 }
1103 
1104 int
1105 shm_mmap(struct file *fp, vm_map_t map, vm_offset_t *addr, vm_size_t objsize,
1106     vm_prot_t prot, vm_prot_t cap_maxprot, int flags,
1107     vm_ooffset_t foff, struct thread *td)
1108 {
1109 	struct shmfd *shmfd;
1110 	vm_prot_t maxprot;
1111 	int error;
1112 	bool writecnt;
1113 	void *rl_cookie;
1114 
1115 	shmfd = fp->f_data;
1116 	maxprot = VM_PROT_NONE;
1117 
1118 	rl_cookie = rangelock_rlock(&shmfd->shm_rl, 0, objsize,
1119 	    &shmfd->shm_mtx);
1120 	/* FREAD should always be set. */
1121 	if ((fp->f_flag & FREAD) != 0)
1122 		maxprot |= VM_PROT_EXECUTE | VM_PROT_READ;
1123 	if ((fp->f_flag & FWRITE) != 0)
1124 		maxprot |= VM_PROT_WRITE;
1125 
1126 	writecnt = (flags & MAP_SHARED) != 0 && (prot & VM_PROT_WRITE) != 0;
1127 
1128 	if (writecnt && (shmfd->shm_seals & F_SEAL_WRITE) != 0) {
1129 		error = EPERM;
1130 		goto out;
1131 	}
1132 
1133 	/* Don't permit shared writable mappings on read-only descriptors. */
1134 	if (writecnt && (maxprot & VM_PROT_WRITE) == 0) {
1135 		error = EACCES;
1136 		goto out;
1137 	}
1138 	maxprot &= cap_maxprot;
1139 
1140 	/* See comment in vn_mmap(). */
1141 	if (
1142 #ifdef _LP64
1143 	    objsize > OFF_MAX ||
1144 #endif
1145 	    foff < 0 || foff > OFF_MAX - objsize) {
1146 		error = EINVAL;
1147 		goto out;
1148 	}
1149 
1150 #ifdef MAC
1151 	error = mac_posixshm_check_mmap(td->td_ucred, shmfd, prot, flags);
1152 	if (error != 0)
1153 		goto out;
1154 #endif
1155 
1156 	mtx_lock(&shm_timestamp_lock);
1157 	vfs_timestamp(&shmfd->shm_atime);
1158 	mtx_unlock(&shm_timestamp_lock);
1159 	vm_object_reference(shmfd->shm_object);
1160 
1161 	if (writecnt)
1162 		vm_pager_update_writecount(shmfd->shm_object, 0, objsize);
1163 	error = vm_mmap_object(map, addr, objsize, prot, maxprot, flags,
1164 	    shmfd->shm_object, foff, writecnt, td);
1165 	if (error != 0) {
1166 		if (writecnt)
1167 			vm_pager_release_writecount(shmfd->shm_object, 0,
1168 			    objsize);
1169 		vm_object_deallocate(shmfd->shm_object);
1170 	}
1171 out:
1172 	rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
1173 	return (error);
1174 }
1175 
1176 static int
1177 shm_chmod(struct file *fp, mode_t mode, struct ucred *active_cred,
1178     struct thread *td)
1179 {
1180 	struct shmfd *shmfd;
1181 	int error;
1182 
1183 	error = 0;
1184 	shmfd = fp->f_data;
1185 	mtx_lock(&shm_timestamp_lock);
1186 	/*
1187 	 * SUSv4 says that x bits of permission need not be affected.
1188 	 * Be consistent with our shm_open there.
1189 	 */
1190 #ifdef MAC
1191 	error = mac_posixshm_check_setmode(active_cred, shmfd, mode);
1192 	if (error != 0)
1193 		goto out;
1194 #endif
1195 	error = vaccess(VREG, shmfd->shm_mode, shmfd->shm_uid,
1196 	    shmfd->shm_gid, VADMIN, active_cred, NULL);
1197 	if (error != 0)
1198 		goto out;
1199 	shmfd->shm_mode = mode & ACCESSPERMS;
1200 out:
1201 	mtx_unlock(&shm_timestamp_lock);
1202 	return (error);
1203 }
1204 
1205 static int
1206 shm_chown(struct file *fp, uid_t uid, gid_t gid, struct ucred *active_cred,
1207     struct thread *td)
1208 {
1209 	struct shmfd *shmfd;
1210 	int error;
1211 
1212 	error = 0;
1213 	shmfd = fp->f_data;
1214 	mtx_lock(&shm_timestamp_lock);
1215 #ifdef MAC
1216 	error = mac_posixshm_check_setowner(active_cred, shmfd, uid, gid);
1217 	if (error != 0)
1218 		goto out;
1219 #endif
1220 	if (uid == (uid_t)-1)
1221 		uid = shmfd->shm_uid;
1222 	if (gid == (gid_t)-1)
1223                  gid = shmfd->shm_gid;
1224 	if (((uid != shmfd->shm_uid && uid != active_cred->cr_uid) ||
1225 	    (gid != shmfd->shm_gid && !groupmember(gid, active_cred))) &&
1226 	    (error = priv_check_cred(active_cred, PRIV_VFS_CHOWN)))
1227 		goto out;
1228 	shmfd->shm_uid = uid;
1229 	shmfd->shm_gid = gid;
1230 out:
1231 	mtx_unlock(&shm_timestamp_lock);
1232 	return (error);
1233 }
1234 
1235 /*
1236  * Helper routines to allow the backing object of a shared memory file
1237  * descriptor to be mapped in the kernel.
1238  */
1239 int
1240 shm_map(struct file *fp, size_t size, off_t offset, void **memp)
1241 {
1242 	struct shmfd *shmfd;
1243 	vm_offset_t kva, ofs;
1244 	vm_object_t obj;
1245 	int rv;
1246 
1247 	if (fp->f_type != DTYPE_SHM)
1248 		return (EINVAL);
1249 	shmfd = fp->f_data;
1250 	obj = shmfd->shm_object;
1251 	VM_OBJECT_WLOCK(obj);
1252 	/*
1253 	 * XXXRW: This validation is probably insufficient, and subject to
1254 	 * sign errors.  It should be fixed.
1255 	 */
1256 	if (offset >= shmfd->shm_size ||
1257 	    offset + size > round_page(shmfd->shm_size)) {
1258 		VM_OBJECT_WUNLOCK(obj);
1259 		return (EINVAL);
1260 	}
1261 
1262 	shmfd->shm_kmappings++;
1263 	vm_object_reference_locked(obj);
1264 	VM_OBJECT_WUNLOCK(obj);
1265 
1266 	/* Map the object into the kernel_map and wire it. */
1267 	kva = vm_map_min(kernel_map);
1268 	ofs = offset & PAGE_MASK;
1269 	offset = trunc_page(offset);
1270 	size = round_page(size + ofs);
1271 	rv = vm_map_find(kernel_map, obj, offset, &kva, size, 0,
1272 	    VMFS_OPTIMAL_SPACE, VM_PROT_READ | VM_PROT_WRITE,
1273 	    VM_PROT_READ | VM_PROT_WRITE, 0);
1274 	if (rv == KERN_SUCCESS) {
1275 		rv = vm_map_wire(kernel_map, kva, kva + size,
1276 		    VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
1277 		if (rv == KERN_SUCCESS) {
1278 			*memp = (void *)(kva + ofs);
1279 			return (0);
1280 		}
1281 		vm_map_remove(kernel_map, kva, kva + size);
1282 	} else
1283 		vm_object_deallocate(obj);
1284 
1285 	/* On failure, drop our mapping reference. */
1286 	VM_OBJECT_WLOCK(obj);
1287 	shmfd->shm_kmappings--;
1288 	VM_OBJECT_WUNLOCK(obj);
1289 
1290 	return (vm_mmap_to_errno(rv));
1291 }
1292 
1293 /*
1294  * We require the caller to unmap the entire entry.  This allows us to
1295  * safely decrement shm_kmappings when a mapping is removed.
1296  */
1297 int
1298 shm_unmap(struct file *fp, void *mem, size_t size)
1299 {
1300 	struct shmfd *shmfd;
1301 	vm_map_entry_t entry;
1302 	vm_offset_t kva, ofs;
1303 	vm_object_t obj;
1304 	vm_pindex_t pindex;
1305 	vm_prot_t prot;
1306 	boolean_t wired;
1307 	vm_map_t map;
1308 	int rv;
1309 
1310 	if (fp->f_type != DTYPE_SHM)
1311 		return (EINVAL);
1312 	shmfd = fp->f_data;
1313 	kva = (vm_offset_t)mem;
1314 	ofs = kva & PAGE_MASK;
1315 	kva = trunc_page(kva);
1316 	size = round_page(size + ofs);
1317 	map = kernel_map;
1318 	rv = vm_map_lookup(&map, kva, VM_PROT_READ | VM_PROT_WRITE, &entry,
1319 	    &obj, &pindex, &prot, &wired);
1320 	if (rv != KERN_SUCCESS)
1321 		return (EINVAL);
1322 	if (entry->start != kva || entry->end != kva + size) {
1323 		vm_map_lookup_done(map, entry);
1324 		return (EINVAL);
1325 	}
1326 	vm_map_lookup_done(map, entry);
1327 	if (obj != shmfd->shm_object)
1328 		return (EINVAL);
1329 	vm_map_remove(map, kva, kva + size);
1330 	VM_OBJECT_WLOCK(obj);
1331 	KASSERT(shmfd->shm_kmappings > 0, ("shm_unmap: object not mapped"));
1332 	shmfd->shm_kmappings--;
1333 	VM_OBJECT_WUNLOCK(obj);
1334 	return (0);
1335 }
1336 
1337 static int
1338 shm_fill_kinfo_locked(struct shmfd *shmfd, struct kinfo_file *kif, bool list)
1339 {
1340 	const char *path, *pr_path;
1341 	size_t pr_pathlen;
1342 	bool visible;
1343 
1344 	sx_assert(&shm_dict_lock, SA_LOCKED);
1345 	kif->kf_type = KF_TYPE_SHM;
1346 	kif->kf_un.kf_file.kf_file_mode = S_IFREG | shmfd->shm_mode;
1347 	kif->kf_un.kf_file.kf_file_size = shmfd->shm_size;
1348 	if (shmfd->shm_path != NULL) {
1349 		if (shmfd->shm_path != NULL) {
1350 			path = shmfd->shm_path;
1351 			pr_path = curthread->td_ucred->cr_prison->pr_path;
1352 			if (strcmp(pr_path, "/") != 0) {
1353 				/* Return the jail-rooted pathname. */
1354 				pr_pathlen = strlen(pr_path);
1355 				visible = strncmp(path, pr_path, pr_pathlen)
1356 				    == 0 && path[pr_pathlen] == '/';
1357 				if (list && !visible)
1358 					return (EPERM);
1359 				if (visible)
1360 					path += pr_pathlen;
1361 			}
1362 			strlcpy(kif->kf_path, path, sizeof(kif->kf_path));
1363 		}
1364 	}
1365 	return (0);
1366 }
1367 
1368 static int
1369 shm_fill_kinfo(struct file *fp, struct kinfo_file *kif,
1370     struct filedesc *fdp __unused)
1371 {
1372 	int res;
1373 
1374 	sx_slock(&shm_dict_lock);
1375 	res = shm_fill_kinfo_locked(fp->f_data, kif, false);
1376 	sx_sunlock(&shm_dict_lock);
1377 	return (res);
1378 }
1379 
1380 static int
1381 shm_add_seals(struct file *fp, int seals)
1382 {
1383 	struct shmfd *shmfd;
1384 	void *rl_cookie;
1385 	vm_ooffset_t writemappings;
1386 	int error, nseals;
1387 
1388 	error = 0;
1389 	shmfd = fp->f_data;
1390 	rl_cookie = rangelock_wlock(&shmfd->shm_rl, 0, OFF_MAX,
1391 	    &shmfd->shm_mtx);
1392 
1393 	/* Even already-set seals should result in EPERM. */
1394 	if ((shmfd->shm_seals & F_SEAL_SEAL) != 0) {
1395 		error = EPERM;
1396 		goto out;
1397 	}
1398 	nseals = seals & ~shmfd->shm_seals;
1399 	if ((nseals & F_SEAL_WRITE) != 0) {
1400 		/*
1401 		 * The rangelock above prevents writable mappings from being
1402 		 * added after we've started applying seals.  The RLOCK here
1403 		 * is to avoid torn reads on ILP32 arches as unmapping/reducing
1404 		 * writemappings will be done without a rangelock.
1405 		 */
1406 		VM_OBJECT_RLOCK(shmfd->shm_object);
1407 		writemappings = shmfd->shm_object->un_pager.swp.writemappings;
1408 		VM_OBJECT_RUNLOCK(shmfd->shm_object);
1409 		/* kmappings are also writable */
1410 		if (writemappings > 0) {
1411 			error = EBUSY;
1412 			goto out;
1413 		}
1414 	}
1415 	shmfd->shm_seals |= nseals;
1416 out:
1417 	rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
1418 	return (error);
1419 }
1420 
1421 static int
1422 shm_get_seals(struct file *fp, int *seals)
1423 {
1424 	struct shmfd *shmfd;
1425 
1426 	shmfd = fp->f_data;
1427 	*seals = shmfd->shm_seals;
1428 	return (0);
1429 }
1430 
1431 static int
1432 sysctl_posix_shm_list(SYSCTL_HANDLER_ARGS)
1433 {
1434 	struct shm_mapping *shmm;
1435 	struct sbuf sb;
1436 	struct kinfo_file kif;
1437 	u_long i;
1438 	ssize_t curlen;
1439 	int error, error2;
1440 
1441 	sbuf_new_for_sysctl(&sb, NULL, sizeof(struct kinfo_file) * 5, req);
1442 	sbuf_clear_flags(&sb, SBUF_INCLUDENUL);
1443 	curlen = 0;
1444 	error = 0;
1445 	sx_slock(&shm_dict_lock);
1446 	for (i = 0; i < shm_hash + 1; i++) {
1447 		LIST_FOREACH(shmm, &shm_dictionary[i], sm_link) {
1448 			error = shm_fill_kinfo_locked(shmm->sm_shmfd,
1449 			    &kif, true);
1450 			if (error == EPERM)
1451 				continue;
1452 			if (error != 0)
1453 				break;
1454 			pack_kinfo(&kif);
1455 			if (req->oldptr != NULL &&
1456 			    kif.kf_structsize + curlen > req->oldlen)
1457 				break;
1458 			error = sbuf_bcat(&sb, &kif, kif.kf_structsize) == 0 ?
1459 			    0 : ENOMEM;
1460 			if (error != 0)
1461 				break;
1462 			curlen += kif.kf_structsize;
1463 		}
1464 	}
1465 	sx_sunlock(&shm_dict_lock);
1466 	error2 = sbuf_finish(&sb);
1467 	sbuf_delete(&sb);
1468 	return (error != 0 ? error : error2);
1469 }
1470 
1471 SYSCTL_PROC(_kern_ipc, OID_AUTO, posix_shm_list,
1472     CTLFLAG_RD | CTLFLAG_MPSAFE | CTLTYPE_OPAQUE,
1473     NULL, 0, sysctl_posix_shm_list, "",
1474     "POSIX SHM list");
1475 
1476 int
1477 kern_shm_open2(struct thread *td, const char *path, int flags, mode_t mode,
1478     int shmflags, const char *name __unused)
1479 {
1480 	int initial_seals;
1481 
1482 	if ((shmflags & ~SHM_ALLOW_SEALING) != 0)
1483 		return (EINVAL);
1484 
1485 	initial_seals = F_SEAL_SEAL;
1486 	if ((shmflags & SHM_ALLOW_SEALING) != 0)
1487 		initial_seals &= ~F_SEAL_SEAL;
1488 	return (kern_shm_open(td, path, flags, mode, NULL, initial_seals));
1489 }
1490 
1491 /*
1492  * This version of the shm_open() interface leaves CLOEXEC behavior up to the
1493  * caller, and libc will enforce it for the traditional shm_open() call.  This
1494  * allows other consumers, like memfd_create(), to opt-in for CLOEXEC.  This
1495  * interface also includes a 'name' argument that is currently unused, but could
1496  * potentially be exported later via some interface for debugging purposes.
1497  * From the kernel's perspective, it is optional.  Individual consumers like
1498  * memfd_create() may require it in order to be compatible with other systems
1499  * implementing the same function.
1500  */
1501 int
1502 sys_shm_open2(struct thread *td, struct shm_open2_args *uap)
1503 {
1504 
1505 	return (kern_shm_open2(td, uap->path, uap->flags, uap->mode,
1506 	    uap->shmflags, uap->name));
1507 }
1508