xref: /freebsd/sys/kern/uipc_shm.c (revision f18976136625a7d016e97bfd9eabddf640b3e06d)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2006, 2011, 2016-2017 Robert N. M. Watson
5  * All rights reserved.
6  *
7  * Portions of this software were developed by BAE Systems, the University of
8  * Cambridge Computer Laboratory, and Memorial University under DARPA/AFRL
9  * contract FA8650-15-C-7558 ("CADETS"), as part of the DARPA Transparent
10  * Computing (TC) research program.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
25  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31  * SUCH DAMAGE.
32  */
33 
34 /*
35  * Support for shared swap-backed anonymous memory objects via
36  * shm_open(2), shm_rename(2), and shm_unlink(2).
37  * While most of the implementation is here, vm_mmap.c contains
38  * mapping logic changes.
39  *
40  * posixshmcontrol(1) allows users to inspect the state of the memory
41  * objects.  Per-uid swap resource limit controls total amount of
42  * memory that user can consume for anonymous objects, including
43  * shared.
44  */
45 
46 #include <sys/cdefs.h>
47 __FBSDID("$FreeBSD$");
48 
49 #include "opt_capsicum.h"
50 #include "opt_ktrace.h"
51 
52 #include <sys/param.h>
53 #include <sys/capsicum.h>
54 #include <sys/conf.h>
55 #include <sys/fcntl.h>
56 #include <sys/file.h>
57 #include <sys/filedesc.h>
58 #include <sys/filio.h>
59 #include <sys/fnv_hash.h>
60 #include <sys/kernel.h>
61 #include <sys/limits.h>
62 #include <sys/uio.h>
63 #include <sys/signal.h>
64 #include <sys/jail.h>
65 #include <sys/ktrace.h>
66 #include <sys/lock.h>
67 #include <sys/malloc.h>
68 #include <sys/mman.h>
69 #include <sys/mutex.h>
70 #include <sys/priv.h>
71 #include <sys/proc.h>
72 #include <sys/refcount.h>
73 #include <sys/resourcevar.h>
74 #include <sys/rwlock.h>
75 #include <sys/sbuf.h>
76 #include <sys/stat.h>
77 #include <sys/syscallsubr.h>
78 #include <sys/sysctl.h>
79 #include <sys/sysproto.h>
80 #include <sys/systm.h>
81 #include <sys/sx.h>
82 #include <sys/time.h>
83 #include <sys/vnode.h>
84 #include <sys/unistd.h>
85 #include <sys/user.h>
86 
87 #include <security/audit/audit.h>
88 #include <security/mac/mac_framework.h>
89 
90 #include <vm/vm.h>
91 #include <vm/vm_param.h>
92 #include <vm/pmap.h>
93 #include <vm/vm_extern.h>
94 #include <vm/vm_map.h>
95 #include <vm/vm_kern.h>
96 #include <vm/vm_object.h>
97 #include <vm/vm_page.h>
98 #include <vm/vm_pageout.h>
99 #include <vm/vm_pager.h>
100 #include <vm/swap_pager.h>
101 
102 struct shm_mapping {
103 	char		*sm_path;
104 	Fnv32_t		sm_fnv;
105 	struct shmfd	*sm_shmfd;
106 	LIST_ENTRY(shm_mapping) sm_link;
107 };
108 
109 static MALLOC_DEFINE(M_SHMFD, "shmfd", "shared memory file descriptor");
110 static LIST_HEAD(, shm_mapping) *shm_dictionary;
111 static struct sx shm_dict_lock;
112 static struct mtx shm_timestamp_lock;
113 static u_long shm_hash;
114 static struct unrhdr64 shm_ino_unr;
115 static dev_t shm_dev_ino;
116 
117 #define	SHM_HASH(fnv)	(&shm_dictionary[(fnv) & shm_hash])
118 
119 static void	shm_init(void *arg);
120 static void	shm_insert(char *path, Fnv32_t fnv, struct shmfd *shmfd);
121 static struct shmfd *shm_lookup(char *path, Fnv32_t fnv);
122 static int	shm_remove(char *path, Fnv32_t fnv, struct ucred *ucred);
123 static int	shm_dotruncate_locked(struct shmfd *shmfd, off_t length,
124     void *rl_cookie);
125 
126 static fo_rdwr_t	shm_read;
127 static fo_rdwr_t	shm_write;
128 static fo_truncate_t	shm_truncate;
129 static fo_ioctl_t	shm_ioctl;
130 static fo_stat_t	shm_stat;
131 static fo_close_t	shm_close;
132 static fo_chmod_t	shm_chmod;
133 static fo_chown_t	shm_chown;
134 static fo_seek_t	shm_seek;
135 static fo_fill_kinfo_t	shm_fill_kinfo;
136 static fo_mmap_t	shm_mmap;
137 static fo_get_seals_t	shm_get_seals;
138 static fo_add_seals_t	shm_add_seals;
139 
140 /* File descriptor operations. */
141 struct fileops shm_ops = {
142 	.fo_read = shm_read,
143 	.fo_write = shm_write,
144 	.fo_truncate = shm_truncate,
145 	.fo_ioctl = shm_ioctl,
146 	.fo_poll = invfo_poll,
147 	.fo_kqfilter = invfo_kqfilter,
148 	.fo_stat = shm_stat,
149 	.fo_close = shm_close,
150 	.fo_chmod = shm_chmod,
151 	.fo_chown = shm_chown,
152 	.fo_sendfile = vn_sendfile,
153 	.fo_seek = shm_seek,
154 	.fo_fill_kinfo = shm_fill_kinfo,
155 	.fo_mmap = shm_mmap,
156 	.fo_get_seals = shm_get_seals,
157 	.fo_add_seals = shm_add_seals,
158 	.fo_flags = DFLAG_PASSABLE | DFLAG_SEEKABLE
159 };
160 
161 FEATURE(posix_shm, "POSIX shared memory");
162 
163 static int
164 uiomove_object_page(vm_object_t obj, size_t len, struct uio *uio)
165 {
166 	vm_page_t m;
167 	vm_pindex_t idx;
168 	size_t tlen;
169 	int error, offset, rv;
170 
171 	idx = OFF_TO_IDX(uio->uio_offset);
172 	offset = uio->uio_offset & PAGE_MASK;
173 	tlen = MIN(PAGE_SIZE - offset, len);
174 
175 	VM_OBJECT_WLOCK(obj);
176 
177 	/*
178 	 * Read I/O without either a corresponding resident page or swap
179 	 * page: use zero_region.  This is intended to avoid instantiating
180 	 * pages on read from a sparse region.
181 	 */
182 	if (uio->uio_rw == UIO_READ && vm_page_lookup(obj, idx) == NULL &&
183 	    !vm_pager_has_page(obj, idx, NULL, NULL)) {
184 		VM_OBJECT_WUNLOCK(obj);
185 		return (uiomove(__DECONST(void *, zero_region), tlen, uio));
186 	}
187 
188 	/*
189 	 * Parallel reads of the page content from disk are prevented
190 	 * by exclusive busy.
191 	 *
192 	 * Although the tmpfs vnode lock is held here, it is
193 	 * nonetheless safe to sleep waiting for a free page.  The
194 	 * pageout daemon does not need to acquire the tmpfs vnode
195 	 * lock to page out tobj's pages because tobj is a OBJT_SWAP
196 	 * type object.
197 	 */
198 	rv = vm_page_grab_valid(&m, obj, idx,
199 	    VM_ALLOC_NORMAL | VM_ALLOC_WIRED | VM_ALLOC_NOBUSY);
200 	if (rv != VM_PAGER_OK) {
201 		VM_OBJECT_WUNLOCK(obj);
202 		printf("uiomove_object: vm_obj %p idx %jd pager error %d\n",
203 		    obj, idx, rv);
204 		return (EIO);
205 	}
206 	VM_OBJECT_WUNLOCK(obj);
207 	error = uiomove_fromphys(&m, offset, tlen, uio);
208 	if (uio->uio_rw == UIO_WRITE && error == 0) {
209 		VM_OBJECT_WLOCK(obj);
210 		vm_page_dirty(m);
211 		vm_pager_page_unswapped(m);
212 		VM_OBJECT_WUNLOCK(obj);
213 	}
214 	vm_page_unwire(m, PQ_ACTIVE);
215 
216 	return (error);
217 }
218 
219 int
220 uiomove_object(vm_object_t obj, off_t obj_size, struct uio *uio)
221 {
222 	ssize_t resid;
223 	size_t len;
224 	int error;
225 
226 	error = 0;
227 	while ((resid = uio->uio_resid) > 0) {
228 		if (obj_size <= uio->uio_offset)
229 			break;
230 		len = MIN(obj_size - uio->uio_offset, resid);
231 		if (len == 0)
232 			break;
233 		error = uiomove_object_page(obj, len, uio);
234 		if (error != 0 || resid == uio->uio_resid)
235 			break;
236 	}
237 	return (error);
238 }
239 
240 static int
241 shm_seek(struct file *fp, off_t offset, int whence, struct thread *td)
242 {
243 	struct shmfd *shmfd;
244 	off_t foffset;
245 	int error;
246 
247 	shmfd = fp->f_data;
248 	foffset = foffset_lock(fp, 0);
249 	error = 0;
250 	switch (whence) {
251 	case L_INCR:
252 		if (foffset < 0 ||
253 		    (offset > 0 && foffset > OFF_MAX - offset)) {
254 			error = EOVERFLOW;
255 			break;
256 		}
257 		offset += foffset;
258 		break;
259 	case L_XTND:
260 		if (offset > 0 && shmfd->shm_size > OFF_MAX - offset) {
261 			error = EOVERFLOW;
262 			break;
263 		}
264 		offset += shmfd->shm_size;
265 		break;
266 	case L_SET:
267 		break;
268 	default:
269 		error = EINVAL;
270 	}
271 	if (error == 0) {
272 		if (offset < 0 || offset > shmfd->shm_size)
273 			error = EINVAL;
274 		else
275 			td->td_uretoff.tdu_off = offset;
276 	}
277 	foffset_unlock(fp, offset, error != 0 ? FOF_NOUPDATE : 0);
278 	return (error);
279 }
280 
281 static int
282 shm_read(struct file *fp, struct uio *uio, struct ucred *active_cred,
283     int flags, struct thread *td)
284 {
285 	struct shmfd *shmfd;
286 	void *rl_cookie;
287 	int error;
288 
289 	shmfd = fp->f_data;
290 #ifdef MAC
291 	error = mac_posixshm_check_read(active_cred, fp->f_cred, shmfd);
292 	if (error)
293 		return (error);
294 #endif
295 	foffset_lock_uio(fp, uio, flags);
296 	rl_cookie = rangelock_rlock(&shmfd->shm_rl, uio->uio_offset,
297 	    uio->uio_offset + uio->uio_resid, &shmfd->shm_mtx);
298 	error = uiomove_object(shmfd->shm_object, shmfd->shm_size, uio);
299 	rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
300 	foffset_unlock_uio(fp, uio, flags);
301 	return (error);
302 }
303 
304 static int
305 shm_write(struct file *fp, struct uio *uio, struct ucred *active_cred,
306     int flags, struct thread *td)
307 {
308 	struct shmfd *shmfd;
309 	void *rl_cookie;
310 	int error;
311 
312 	shmfd = fp->f_data;
313 #ifdef MAC
314 	error = mac_posixshm_check_write(active_cred, fp->f_cred, shmfd);
315 	if (error)
316 		return (error);
317 #endif
318 	foffset_lock_uio(fp, uio, flags);
319 	if ((flags & FOF_OFFSET) == 0) {
320 		rl_cookie = rangelock_wlock(&shmfd->shm_rl, 0, OFF_MAX,
321 		    &shmfd->shm_mtx);
322 	} else {
323 		rl_cookie = rangelock_wlock(&shmfd->shm_rl, uio->uio_offset,
324 		    uio->uio_offset + uio->uio_resid, &shmfd->shm_mtx);
325 	}
326 	if ((shmfd->shm_seals & F_SEAL_WRITE) != 0)
327 		error = EPERM;
328 	else
329 		error = uiomove_object(shmfd->shm_object, shmfd->shm_size, uio);
330 	rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
331 	foffset_unlock_uio(fp, uio, flags);
332 	return (error);
333 }
334 
335 static int
336 shm_truncate(struct file *fp, off_t length, struct ucred *active_cred,
337     struct thread *td)
338 {
339 	struct shmfd *shmfd;
340 #ifdef MAC
341 	int error;
342 #endif
343 
344 	shmfd = fp->f_data;
345 #ifdef MAC
346 	error = mac_posixshm_check_truncate(active_cred, fp->f_cred, shmfd);
347 	if (error)
348 		return (error);
349 #endif
350 	return (shm_dotruncate(shmfd, length));
351 }
352 
353 int
354 shm_ioctl(struct file *fp, u_long com, void *data, struct ucred *active_cred,
355     struct thread *td)
356 {
357 
358 	switch (com) {
359 	case FIONBIO:
360 	case FIOASYNC:
361 		/*
362 		 * Allow fcntl(fd, F_SETFL, O_NONBLOCK) to work,
363 		 * just like it would on an unlinked regular file
364 		 */
365 		return (0);
366 	default:
367 		return (ENOTTY);
368 	}
369 }
370 
371 static int
372 shm_stat(struct file *fp, struct stat *sb, struct ucred *active_cred,
373     struct thread *td)
374 {
375 	struct shmfd *shmfd;
376 #ifdef MAC
377 	int error;
378 #endif
379 
380 	shmfd = fp->f_data;
381 
382 #ifdef MAC
383 	error = mac_posixshm_check_stat(active_cred, fp->f_cred, shmfd);
384 	if (error)
385 		return (error);
386 #endif
387 
388 	/*
389 	 * Attempt to return sanish values for fstat() on a memory file
390 	 * descriptor.
391 	 */
392 	bzero(sb, sizeof(*sb));
393 	sb->st_blksize = PAGE_SIZE;
394 	sb->st_size = shmfd->shm_size;
395 	sb->st_blocks = howmany(sb->st_size, sb->st_blksize);
396 	mtx_lock(&shm_timestamp_lock);
397 	sb->st_atim = shmfd->shm_atime;
398 	sb->st_ctim = shmfd->shm_ctime;
399 	sb->st_mtim = shmfd->shm_mtime;
400 	sb->st_birthtim = shmfd->shm_birthtime;
401 	sb->st_mode = S_IFREG | shmfd->shm_mode;		/* XXX */
402 	sb->st_uid = shmfd->shm_uid;
403 	sb->st_gid = shmfd->shm_gid;
404 	mtx_unlock(&shm_timestamp_lock);
405 	sb->st_dev = shm_dev_ino;
406 	sb->st_ino = shmfd->shm_ino;
407 	sb->st_nlink = shmfd->shm_object->ref_count;
408 
409 	return (0);
410 }
411 
412 static int
413 shm_close(struct file *fp, struct thread *td)
414 {
415 	struct shmfd *shmfd;
416 
417 	shmfd = fp->f_data;
418 	fp->f_data = NULL;
419 	shm_drop(shmfd);
420 
421 	return (0);
422 }
423 
424 static int
425 shm_dotruncate_locked(struct shmfd *shmfd, off_t length, void *rl_cookie)
426 {
427 	vm_object_t object;
428 	vm_page_t m;
429 	vm_pindex_t idx, nobjsize;
430 	vm_ooffset_t delta;
431 	int base, rv;
432 
433 	KASSERT(length >= 0, ("shm_dotruncate: length < 0"));
434 	object = shmfd->shm_object;
435 	VM_OBJECT_ASSERT_WLOCKED(object);
436 	rangelock_cookie_assert(rl_cookie, RA_WLOCKED);
437 	if (length == shmfd->shm_size)
438 		return (0);
439 	nobjsize = OFF_TO_IDX(length + PAGE_MASK);
440 
441 	/* Are we shrinking?  If so, trim the end. */
442 	if (length < shmfd->shm_size) {
443 		if ((shmfd->shm_seals & F_SEAL_SHRINK) != 0)
444 			return (EPERM);
445 
446 		/*
447 		 * Disallow any requests to shrink the size if this
448 		 * object is mapped into the kernel.
449 		 */
450 		if (shmfd->shm_kmappings > 0)
451 			return (EBUSY);
452 
453 		/*
454 		 * Zero the truncated part of the last page.
455 		 */
456 		base = length & PAGE_MASK;
457 		if (base != 0) {
458 			idx = OFF_TO_IDX(length);
459 retry:
460 			m = vm_page_lookup(object, idx);
461 			if (m != NULL) {
462 				if (vm_page_sleep_if_busy(m, "shmtrc"))
463 					goto retry;
464 			} else if (vm_pager_has_page(object, idx, NULL, NULL)) {
465 				m = vm_page_alloc(object, idx,
466 				    VM_ALLOC_NORMAL | VM_ALLOC_WAITFAIL);
467 				if (m == NULL)
468 					goto retry;
469 				rv = vm_pager_get_pages(object, &m, 1, NULL,
470 				    NULL);
471 				if (rv == VM_PAGER_OK) {
472 					/*
473 					 * Since the page was not resident,
474 					 * and therefore not recently
475 					 * accessed, immediately enqueue it
476 					 * for asynchronous laundering.  The
477 					 * current operation is not regarded
478 					 * as an access.
479 					 */
480 					vm_page_launder(m);
481 					vm_page_xunbusy(m);
482 				} else {
483 					vm_page_free(m);
484 					VM_OBJECT_WUNLOCK(object);
485 					return (EIO);
486 				}
487 			}
488 			if (m != NULL) {
489 				pmap_zero_page_area(m, base, PAGE_SIZE - base);
490 				KASSERT(m->valid == VM_PAGE_BITS_ALL,
491 				    ("shm_dotruncate: page %p is invalid", m));
492 				vm_page_dirty(m);
493 				vm_pager_page_unswapped(m);
494 			}
495 		}
496 		delta = IDX_TO_OFF(object->size - nobjsize);
497 
498 		/* Toss in memory pages. */
499 		if (nobjsize < object->size)
500 			vm_object_page_remove(object, nobjsize, object->size,
501 			    0);
502 
503 		/* Toss pages from swap. */
504 		if (object->type == OBJT_SWAP)
505 			swap_pager_freespace(object, nobjsize, delta);
506 
507 		/* Free the swap accounted for shm */
508 		swap_release_by_cred(delta, object->cred);
509 		object->charge -= delta;
510 	} else {
511 		if ((shmfd->shm_seals & F_SEAL_GROW) != 0)
512 			return (EPERM);
513 
514 		/* Try to reserve additional swap space. */
515 		delta = IDX_TO_OFF(nobjsize - object->size);
516 		if (!swap_reserve_by_cred(delta, object->cred))
517 			return (ENOMEM);
518 		object->charge += delta;
519 	}
520 	shmfd->shm_size = length;
521 	mtx_lock(&shm_timestamp_lock);
522 	vfs_timestamp(&shmfd->shm_ctime);
523 	shmfd->shm_mtime = shmfd->shm_ctime;
524 	mtx_unlock(&shm_timestamp_lock);
525 	object->size = nobjsize;
526 	return (0);
527 }
528 
529 int
530 shm_dotruncate(struct shmfd *shmfd, off_t length)
531 {
532 	void *rl_cookie;
533 	int error;
534 
535 	rl_cookie = rangelock_wlock(&shmfd->shm_rl, 0, OFF_MAX,
536 	    &shmfd->shm_mtx);
537 	VM_OBJECT_WLOCK(shmfd->shm_object);
538 	error = shm_dotruncate_locked(shmfd, length, rl_cookie);
539 	VM_OBJECT_WUNLOCK(shmfd->shm_object);
540 	rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
541 	return (error);
542 }
543 
544 /*
545  * shmfd object management including creation and reference counting
546  * routines.
547  */
548 struct shmfd *
549 shm_alloc(struct ucred *ucred, mode_t mode)
550 {
551 	struct shmfd *shmfd;
552 
553 	shmfd = malloc(sizeof(*shmfd), M_SHMFD, M_WAITOK | M_ZERO);
554 	shmfd->shm_size = 0;
555 	shmfd->shm_uid = ucred->cr_uid;
556 	shmfd->shm_gid = ucred->cr_gid;
557 	shmfd->shm_mode = mode;
558 	shmfd->shm_object = vm_pager_allocate(OBJT_SWAP, NULL,
559 	    shmfd->shm_size, VM_PROT_DEFAULT, 0, ucred);
560 	KASSERT(shmfd->shm_object != NULL, ("shm_create: vm_pager_allocate"));
561 	shmfd->shm_object->pg_color = 0;
562 	VM_OBJECT_WLOCK(shmfd->shm_object);
563 	vm_object_clear_flag(shmfd->shm_object, OBJ_ONEMAPPING);
564 	vm_object_set_flag(shmfd->shm_object, OBJ_COLORED | OBJ_NOSPLIT);
565 	VM_OBJECT_WUNLOCK(shmfd->shm_object);
566 	vfs_timestamp(&shmfd->shm_birthtime);
567 	shmfd->shm_atime = shmfd->shm_mtime = shmfd->shm_ctime =
568 	    shmfd->shm_birthtime;
569 	shmfd->shm_ino = alloc_unr64(&shm_ino_unr);
570 	refcount_init(&shmfd->shm_refs, 1);
571 	mtx_init(&shmfd->shm_mtx, "shmrl", NULL, MTX_DEF);
572 	rangelock_init(&shmfd->shm_rl);
573 #ifdef MAC
574 	mac_posixshm_init(shmfd);
575 	mac_posixshm_create(ucred, shmfd);
576 #endif
577 
578 	return (shmfd);
579 }
580 
581 struct shmfd *
582 shm_hold(struct shmfd *shmfd)
583 {
584 
585 	refcount_acquire(&shmfd->shm_refs);
586 	return (shmfd);
587 }
588 
589 void
590 shm_drop(struct shmfd *shmfd)
591 {
592 
593 	if (refcount_release(&shmfd->shm_refs)) {
594 #ifdef MAC
595 		mac_posixshm_destroy(shmfd);
596 #endif
597 		rangelock_destroy(&shmfd->shm_rl);
598 		mtx_destroy(&shmfd->shm_mtx);
599 		vm_object_deallocate(shmfd->shm_object);
600 		free(shmfd, M_SHMFD);
601 	}
602 }
603 
604 /*
605  * Determine if the credentials have sufficient permissions for a
606  * specified combination of FREAD and FWRITE.
607  */
608 int
609 shm_access(struct shmfd *shmfd, struct ucred *ucred, int flags)
610 {
611 	accmode_t accmode;
612 	int error;
613 
614 	accmode = 0;
615 	if (flags & FREAD)
616 		accmode |= VREAD;
617 	if (flags & FWRITE)
618 		accmode |= VWRITE;
619 	mtx_lock(&shm_timestamp_lock);
620 	error = vaccess(VREG, shmfd->shm_mode, shmfd->shm_uid, shmfd->shm_gid,
621 	    accmode, ucred, NULL);
622 	mtx_unlock(&shm_timestamp_lock);
623 	return (error);
624 }
625 
626 /*
627  * Dictionary management.  We maintain an in-kernel dictionary to map
628  * paths to shmfd objects.  We use the FNV hash on the path to store
629  * the mappings in a hash table.
630  */
631 static void
632 shm_init(void *arg)
633 {
634 
635 	mtx_init(&shm_timestamp_lock, "shm timestamps", NULL, MTX_DEF);
636 	sx_init(&shm_dict_lock, "shm dictionary");
637 	shm_dictionary = hashinit(1024, M_SHMFD, &shm_hash);
638 	new_unrhdr64(&shm_ino_unr, 1);
639 	shm_dev_ino = devfs_alloc_cdp_inode();
640 	KASSERT(shm_dev_ino > 0, ("shm dev inode not initialized"));
641 }
642 SYSINIT(shm_init, SI_SUB_SYSV_SHM, SI_ORDER_ANY, shm_init, NULL);
643 
644 static struct shmfd *
645 shm_lookup(char *path, Fnv32_t fnv)
646 {
647 	struct shm_mapping *map;
648 
649 	LIST_FOREACH(map, SHM_HASH(fnv), sm_link) {
650 		if (map->sm_fnv != fnv)
651 			continue;
652 		if (strcmp(map->sm_path, path) == 0)
653 			return (map->sm_shmfd);
654 	}
655 
656 	return (NULL);
657 }
658 
659 static void
660 shm_insert(char *path, Fnv32_t fnv, struct shmfd *shmfd)
661 {
662 	struct shm_mapping *map;
663 
664 	map = malloc(sizeof(struct shm_mapping), M_SHMFD, M_WAITOK);
665 	map->sm_path = path;
666 	map->sm_fnv = fnv;
667 	map->sm_shmfd = shm_hold(shmfd);
668 	shmfd->shm_path = path;
669 	LIST_INSERT_HEAD(SHM_HASH(fnv), map, sm_link);
670 }
671 
672 static int
673 shm_remove(char *path, Fnv32_t fnv, struct ucred *ucred)
674 {
675 	struct shm_mapping *map;
676 	int error;
677 
678 	LIST_FOREACH(map, SHM_HASH(fnv), sm_link) {
679 		if (map->sm_fnv != fnv)
680 			continue;
681 		if (strcmp(map->sm_path, path) == 0) {
682 #ifdef MAC
683 			error = mac_posixshm_check_unlink(ucred, map->sm_shmfd);
684 			if (error)
685 				return (error);
686 #endif
687 			error = shm_access(map->sm_shmfd, ucred,
688 			    FREAD | FWRITE);
689 			if (error)
690 				return (error);
691 			map->sm_shmfd->shm_path = NULL;
692 			LIST_REMOVE(map, sm_link);
693 			shm_drop(map->sm_shmfd);
694 			free(map->sm_path, M_SHMFD);
695 			free(map, M_SHMFD);
696 			return (0);
697 		}
698 	}
699 
700 	return (ENOENT);
701 }
702 
703 int
704 kern_shm_open(struct thread *td, const char *userpath, int flags, mode_t mode,
705     struct filecaps *fcaps, int initial_seals)
706 {
707 	struct filedesc *fdp;
708 	struct shmfd *shmfd;
709 	struct file *fp;
710 	char *path;
711 	const char *pr_path;
712 	void *rl_cookie;
713 	size_t pr_pathlen;
714 	Fnv32_t fnv;
715 	mode_t cmode;
716 	int fd, error;
717 
718 #ifdef CAPABILITY_MODE
719 	/*
720 	 * shm_open(2) is only allowed for anonymous objects.
721 	 */
722 	if (IN_CAPABILITY_MODE(td) && (userpath != SHM_ANON))
723 		return (ECAPMODE);
724 #endif
725 
726 	AUDIT_ARG_FFLAGS(flags);
727 	AUDIT_ARG_MODE(mode);
728 
729 	if ((flags & O_ACCMODE) != O_RDONLY && (flags & O_ACCMODE) != O_RDWR)
730 		return (EINVAL);
731 
732 	if ((flags & ~(O_ACCMODE | O_CREAT | O_EXCL | O_TRUNC | O_CLOEXEC)) != 0)
733 		return (EINVAL);
734 
735 	/*
736 	 * Currently only F_SEAL_SEAL may be set when creating or opening shmfd.
737 	 * If the decision is made later to allow additional seals, care must be
738 	 * taken below to ensure that the seals are properly set if the shmfd
739 	 * already existed -- this currently assumes that only F_SEAL_SEAL can
740 	 * be set and doesn't take further precautions to ensure the validity of
741 	 * the seals being added with respect to current mappings.
742 	 */
743 	if ((initial_seals & ~F_SEAL_SEAL) != 0)
744 		return (EINVAL);
745 
746 	fdp = td->td_proc->p_fd;
747 	cmode = (mode & ~fdp->fd_cmask) & ACCESSPERMS;
748 
749 	/*
750 	 * shm_open(2) created shm should always have O_CLOEXEC set, as mandated
751 	 * by POSIX.  We allow it to be unset here so that an in-kernel
752 	 * interface may be written as a thin layer around shm, optionally not
753 	 * setting CLOEXEC.  For shm_open(2), O_CLOEXEC is set unconditionally
754 	 * in sys_shm_open() to keep this implementation compliant.
755 	 */
756 	error = falloc_caps(td, &fp, &fd, flags & O_CLOEXEC, fcaps);
757 	if (error)
758 		return (error);
759 
760 	/* A SHM_ANON path pointer creates an anonymous object. */
761 	if (userpath == SHM_ANON) {
762 		/* A read-only anonymous object is pointless. */
763 		if ((flags & O_ACCMODE) == O_RDONLY) {
764 			fdclose(td, fp, fd);
765 			fdrop(fp, td);
766 			return (EINVAL);
767 		}
768 		shmfd = shm_alloc(td->td_ucred, cmode);
769 		shmfd->shm_seals = initial_seals;
770 	} else {
771 		path = malloc(MAXPATHLEN, M_SHMFD, M_WAITOK);
772 		pr_path = td->td_ucred->cr_prison->pr_path;
773 
774 		/* Construct a full pathname for jailed callers. */
775 		pr_pathlen = strcmp(pr_path, "/") == 0 ? 0
776 		    : strlcpy(path, pr_path, MAXPATHLEN);
777 		error = copyinstr(userpath, path + pr_pathlen,
778 		    MAXPATHLEN - pr_pathlen, NULL);
779 #ifdef KTRACE
780 		if (error == 0 && KTRPOINT(curthread, KTR_NAMEI))
781 			ktrnamei(path);
782 #endif
783 		/* Require paths to start with a '/' character. */
784 		if (error == 0 && path[pr_pathlen] != '/')
785 			error = EINVAL;
786 		if (error) {
787 			fdclose(td, fp, fd);
788 			fdrop(fp, td);
789 			free(path, M_SHMFD);
790 			return (error);
791 		}
792 
793 		AUDIT_ARG_UPATH1_CANON(path);
794 		fnv = fnv_32_str(path, FNV1_32_INIT);
795 		sx_xlock(&shm_dict_lock);
796 		shmfd = shm_lookup(path, fnv);
797 		if (shmfd == NULL) {
798 			/* Object does not yet exist, create it if requested. */
799 			if (flags & O_CREAT) {
800 #ifdef MAC
801 				error = mac_posixshm_check_create(td->td_ucred,
802 				    path);
803 				if (error == 0) {
804 #endif
805 					shmfd = shm_alloc(td->td_ucred, cmode);
806 					shmfd->shm_seals = initial_seals;
807 					shm_insert(path, fnv, shmfd);
808 #ifdef MAC
809 				}
810 #endif
811 			} else {
812 				free(path, M_SHMFD);
813 				error = ENOENT;
814 			}
815 		} else {
816 			rl_cookie = rangelock_wlock(&shmfd->shm_rl, 0, OFF_MAX,
817 			    &shmfd->shm_mtx);
818 
819 			/*
820 			 * kern_shm_open() likely shouldn't ever error out on
821 			 * trying to set a seal that already exists, unlike
822 			 * F_ADD_SEALS.  This would break terribly as
823 			 * shm_open(2) actually sets F_SEAL_SEAL to maintain
824 			 * historical behavior where the underlying file could
825 			 * not be sealed.
826 			 */
827 			initial_seals &= ~shmfd->shm_seals;
828 
829 			/*
830 			 * Object already exists, obtain a new
831 			 * reference if requested and permitted.
832 			 */
833 			free(path, M_SHMFD);
834 
835 			/*
836 			 * initial_seals can't set additional seals if we've
837 			 * already been set F_SEAL_SEAL.  If F_SEAL_SEAL is set,
838 			 * then we've already removed that one from
839 			 * initial_seals.  This is currently redundant as we
840 			 * only allow setting F_SEAL_SEAL at creation time, but
841 			 * it's cheap to check and decreases the effort required
842 			 * to allow additional seals.
843 			 */
844 			if ((shmfd->shm_seals & F_SEAL_SEAL) != 0 &&
845 			    initial_seals != 0)
846 				error = EPERM;
847 			else if ((flags & (O_CREAT | O_EXCL)) ==
848 			    (O_CREAT | O_EXCL))
849 				error = EEXIST;
850 			else {
851 #ifdef MAC
852 				error = mac_posixshm_check_open(td->td_ucred,
853 				    shmfd, FFLAGS(flags & O_ACCMODE));
854 				if (error == 0)
855 #endif
856 				error = shm_access(shmfd, td->td_ucred,
857 				    FFLAGS(flags & O_ACCMODE));
858 			}
859 
860 			/*
861 			 * Truncate the file back to zero length if
862 			 * O_TRUNC was specified and the object was
863 			 * opened with read/write.
864 			 */
865 			if (error == 0 &&
866 			    (flags & (O_ACCMODE | O_TRUNC)) ==
867 			    (O_RDWR | O_TRUNC)) {
868 				VM_OBJECT_WLOCK(shmfd->shm_object);
869 #ifdef MAC
870 				error = mac_posixshm_check_truncate(
871 					td->td_ucred, fp->f_cred, shmfd);
872 				if (error == 0)
873 #endif
874 					error = shm_dotruncate_locked(shmfd, 0,
875 					    rl_cookie);
876 				VM_OBJECT_WUNLOCK(shmfd->shm_object);
877 			}
878 			if (error == 0) {
879 				/*
880 				 * Currently we only allow F_SEAL_SEAL to be
881 				 * set initially.  As noted above, this would
882 				 * need to be reworked should that change.
883 				 */
884 				shmfd->shm_seals |= initial_seals;
885 				shm_hold(shmfd);
886 			}
887 			rangelock_unlock(&shmfd->shm_rl, rl_cookie,
888 			    &shmfd->shm_mtx);
889 		}
890 		sx_xunlock(&shm_dict_lock);
891 
892 		if (error) {
893 			fdclose(td, fp, fd);
894 			fdrop(fp, td);
895 			return (error);
896 		}
897 	}
898 
899 	finit(fp, FFLAGS(flags & O_ACCMODE), DTYPE_SHM, shmfd, &shm_ops);
900 
901 	td->td_retval[0] = fd;
902 	fdrop(fp, td);
903 
904 	return (0);
905 }
906 
907 /* System calls. */
908 #ifdef COMPAT_FREEBSD12
909 int
910 freebsd12_shm_open(struct thread *td, struct freebsd12_shm_open_args *uap)
911 {
912 
913 	return (kern_shm_open(td, uap->path, uap->flags | O_CLOEXEC, uap->mode,
914 	    NULL, F_SEAL_SEAL));
915 }
916 #endif
917 
918 int
919 sys_shm_unlink(struct thread *td, struct shm_unlink_args *uap)
920 {
921 	char *path;
922 	const char *pr_path;
923 	size_t pr_pathlen;
924 	Fnv32_t fnv;
925 	int error;
926 
927 	path = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
928 	pr_path = td->td_ucred->cr_prison->pr_path;
929 	pr_pathlen = strcmp(pr_path, "/") == 0 ? 0
930 	    : strlcpy(path, pr_path, MAXPATHLEN);
931 	error = copyinstr(uap->path, path + pr_pathlen, MAXPATHLEN - pr_pathlen,
932 	    NULL);
933 	if (error) {
934 		free(path, M_TEMP);
935 		return (error);
936 	}
937 #ifdef KTRACE
938 	if (KTRPOINT(curthread, KTR_NAMEI))
939 		ktrnamei(path);
940 #endif
941 	AUDIT_ARG_UPATH1_CANON(path);
942 	fnv = fnv_32_str(path, FNV1_32_INIT);
943 	sx_xlock(&shm_dict_lock);
944 	error = shm_remove(path, fnv, td->td_ucred);
945 	sx_xunlock(&shm_dict_lock);
946 	free(path, M_TEMP);
947 
948 	return (error);
949 }
950 
951 int
952 sys_shm_rename(struct thread *td, struct shm_rename_args *uap)
953 {
954 	char *path_from = NULL, *path_to = NULL;
955 	Fnv32_t fnv_from, fnv_to;
956 	struct shmfd *fd_from;
957 	struct shmfd *fd_to;
958 	int error;
959 	int flags;
960 
961 	flags = uap->flags;
962 
963 	/*
964 	 * Make sure the user passed only valid flags.
965 	 * If you add a new flag, please add a new term here.
966 	 */
967 	if ((flags & ~(
968 	    SHM_RENAME_NOREPLACE |
969 	    SHM_RENAME_EXCHANGE
970 	    )) != 0) {
971 		error = EINVAL;
972 		goto out;
973 	}
974 
975 	/*
976 	 * EXCHANGE and NOREPLACE don't quite make sense together. Let's
977 	 * force the user to choose one or the other.
978 	 */
979 	if ((flags & SHM_RENAME_NOREPLACE) != 0 &&
980 	    (flags & SHM_RENAME_EXCHANGE) != 0) {
981 		error = EINVAL;
982 		goto out;
983 	}
984 
985 	/*
986 	 * Malloc zone M_SHMFD, since this path may end up freed later from
987 	 * M_SHMFD if we end up doing an insert.
988 	 */
989 	path_from = malloc(MAXPATHLEN, M_SHMFD, M_WAITOK);
990 	error = copyinstr(uap->path_from, path_from, MAXPATHLEN, NULL);
991 	if (error)
992 		goto out;
993 
994 	path_to = malloc(MAXPATHLEN, M_SHMFD, M_WAITOK);
995 	error = copyinstr(uap->path_to, path_to, MAXPATHLEN, NULL);
996 	if (error)
997 		goto out;
998 
999 	/* Rename with from/to equal is a no-op */
1000 	if (strncmp(path_from, path_to, MAXPATHLEN) == 0)
1001 		goto out;
1002 
1003 	fnv_from = fnv_32_str(path_from, FNV1_32_INIT);
1004 	fnv_to = fnv_32_str(path_to, FNV1_32_INIT);
1005 
1006 	sx_xlock(&shm_dict_lock);
1007 
1008 	fd_from = shm_lookup(path_from, fnv_from);
1009 	if (fd_from == NULL) {
1010 		sx_xunlock(&shm_dict_lock);
1011 		error = ENOENT;
1012 		goto out;
1013 	}
1014 
1015 	fd_to = shm_lookup(path_to, fnv_to);
1016 	if ((flags & SHM_RENAME_NOREPLACE) != 0 && fd_to != NULL) {
1017 		sx_xunlock(&shm_dict_lock);
1018 		error = EEXIST;
1019 		goto out;
1020 	}
1021 
1022 	/*
1023 	 * Unconditionally prevents shm_remove from invalidating the 'from'
1024 	 * shm's state.
1025 	 */
1026 	shm_hold(fd_from);
1027 	error = shm_remove(path_from, fnv_from, td->td_ucred);
1028 
1029 	/*
1030 	 * One of my assumptions failed if ENOENT (e.g. locking didn't
1031 	 * protect us)
1032 	 */
1033 	KASSERT(error != ENOENT, ("Our shm disappeared during shm_rename: %s",
1034 	    path_from));
1035 	if (error) {
1036 		shm_drop(fd_from);
1037 		sx_xunlock(&shm_dict_lock);
1038 		goto out;
1039 	}
1040 
1041 	/*
1042 	 * If we are exchanging, we need to ensure the shm_remove below
1043 	 * doesn't invalidate the dest shm's state.
1044 	 */
1045 	if ((flags & SHM_RENAME_EXCHANGE) != 0 && fd_to != NULL)
1046 		shm_hold(fd_to);
1047 
1048 	/*
1049 	 * NOTE: if path_to is not already in the hash, c'est la vie;
1050 	 * it simply means we have nothing already at path_to to unlink.
1051 	 * That is the ENOENT case.
1052 	 *
1053 	 * If we somehow don't have access to unlink this guy, but
1054 	 * did for the shm at path_from, then relink the shm to path_from
1055 	 * and abort with EACCES.
1056 	 *
1057 	 * All other errors: that is weird; let's relink and abort the
1058 	 * operation.
1059 	 */
1060 	error = shm_remove(path_to, fnv_to, td->td_ucred);
1061 	if (error && error != ENOENT) {
1062 		shm_insert(path_from, fnv_from, fd_from);
1063 		shm_drop(fd_from);
1064 		/* Don't free path_from now, since the hash references it */
1065 		path_from = NULL;
1066 		sx_xunlock(&shm_dict_lock);
1067 		goto out;
1068 	}
1069 
1070 	shm_insert(path_to, fnv_to, fd_from);
1071 
1072 	/* Don't free path_to now, since the hash references it */
1073 	path_to = NULL;
1074 
1075 	/* We kept a ref when we removed, and incremented again in insert */
1076 	shm_drop(fd_from);
1077 #ifdef DEBUG
1078 	KASSERT(fd_from->shm_refs > 0, ("Expected >0 refs; got: %d\n",
1079 	    fd_from->shm_refs));
1080 #endif
1081 
1082 	if ((flags & SHM_RENAME_EXCHANGE) != 0 && fd_to != NULL) {
1083 		shm_insert(path_from, fnv_from, fd_to);
1084 		path_from = NULL;
1085 		shm_drop(fd_to);
1086 #ifdef DEBUG
1087 		KASSERT(fd_to->shm_refs > 0, ("Expected >0 refs; got: %d\n",
1088 		    fd_to->shm_refs));
1089 #endif
1090 	}
1091 
1092 	error = 0;
1093 	sx_xunlock(&shm_dict_lock);
1094 
1095 out:
1096 	if (path_from != NULL)
1097 		free(path_from, M_SHMFD);
1098 	if (path_to != NULL)
1099 		free(path_to, M_SHMFD);
1100 	return(error);
1101 }
1102 
1103 int
1104 shm_mmap(struct file *fp, vm_map_t map, vm_offset_t *addr, vm_size_t objsize,
1105     vm_prot_t prot, vm_prot_t cap_maxprot, int flags,
1106     vm_ooffset_t foff, struct thread *td)
1107 {
1108 	struct shmfd *shmfd;
1109 	vm_prot_t maxprot;
1110 	int error;
1111 	bool writecnt;
1112 	void *rl_cookie;
1113 
1114 	shmfd = fp->f_data;
1115 	maxprot = VM_PROT_NONE;
1116 
1117 	rl_cookie = rangelock_rlock(&shmfd->shm_rl, 0, objsize,
1118 	    &shmfd->shm_mtx);
1119 	/* FREAD should always be set. */
1120 	if ((fp->f_flag & FREAD) != 0)
1121 		maxprot |= VM_PROT_EXECUTE | VM_PROT_READ;
1122 	if ((fp->f_flag & FWRITE) != 0)
1123 		maxprot |= VM_PROT_WRITE;
1124 
1125 	writecnt = (flags & MAP_SHARED) != 0 && (prot & VM_PROT_WRITE) != 0;
1126 
1127 	if (writecnt && (shmfd->shm_seals & F_SEAL_WRITE) != 0) {
1128 		error = EPERM;
1129 		goto out;
1130 	}
1131 
1132 	/* Don't permit shared writable mappings on read-only descriptors. */
1133 	if (writecnt && (maxprot & VM_PROT_WRITE) == 0) {
1134 		error = EACCES;
1135 		goto out;
1136 	}
1137 	maxprot &= cap_maxprot;
1138 
1139 	/* See comment in vn_mmap(). */
1140 	if (
1141 #ifdef _LP64
1142 	    objsize > OFF_MAX ||
1143 #endif
1144 	    foff < 0 || foff > OFF_MAX - objsize) {
1145 		error = EINVAL;
1146 		goto out;
1147 	}
1148 
1149 #ifdef MAC
1150 	error = mac_posixshm_check_mmap(td->td_ucred, shmfd, prot, flags);
1151 	if (error != 0)
1152 		goto out;
1153 #endif
1154 
1155 	mtx_lock(&shm_timestamp_lock);
1156 	vfs_timestamp(&shmfd->shm_atime);
1157 	mtx_unlock(&shm_timestamp_lock);
1158 	vm_object_reference(shmfd->shm_object);
1159 
1160 	if (writecnt)
1161 		vm_pager_update_writecount(shmfd->shm_object, 0, objsize);
1162 	error = vm_mmap_object(map, addr, objsize, prot, maxprot, flags,
1163 	    shmfd->shm_object, foff, writecnt, td);
1164 	if (error != 0) {
1165 		if (writecnt)
1166 			vm_pager_release_writecount(shmfd->shm_object, 0,
1167 			    objsize);
1168 		vm_object_deallocate(shmfd->shm_object);
1169 	}
1170 out:
1171 	rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
1172 	return (error);
1173 }
1174 
1175 static int
1176 shm_chmod(struct file *fp, mode_t mode, struct ucred *active_cred,
1177     struct thread *td)
1178 {
1179 	struct shmfd *shmfd;
1180 	int error;
1181 
1182 	error = 0;
1183 	shmfd = fp->f_data;
1184 	mtx_lock(&shm_timestamp_lock);
1185 	/*
1186 	 * SUSv4 says that x bits of permission need not be affected.
1187 	 * Be consistent with our shm_open there.
1188 	 */
1189 #ifdef MAC
1190 	error = mac_posixshm_check_setmode(active_cred, shmfd, mode);
1191 	if (error != 0)
1192 		goto out;
1193 #endif
1194 	error = vaccess(VREG, shmfd->shm_mode, shmfd->shm_uid,
1195 	    shmfd->shm_gid, VADMIN, active_cred, NULL);
1196 	if (error != 0)
1197 		goto out;
1198 	shmfd->shm_mode = mode & ACCESSPERMS;
1199 out:
1200 	mtx_unlock(&shm_timestamp_lock);
1201 	return (error);
1202 }
1203 
1204 static int
1205 shm_chown(struct file *fp, uid_t uid, gid_t gid, struct ucred *active_cred,
1206     struct thread *td)
1207 {
1208 	struct shmfd *shmfd;
1209 	int error;
1210 
1211 	error = 0;
1212 	shmfd = fp->f_data;
1213 	mtx_lock(&shm_timestamp_lock);
1214 #ifdef MAC
1215 	error = mac_posixshm_check_setowner(active_cred, shmfd, uid, gid);
1216 	if (error != 0)
1217 		goto out;
1218 #endif
1219 	if (uid == (uid_t)-1)
1220 		uid = shmfd->shm_uid;
1221 	if (gid == (gid_t)-1)
1222                  gid = shmfd->shm_gid;
1223 	if (((uid != shmfd->shm_uid && uid != active_cred->cr_uid) ||
1224 	    (gid != shmfd->shm_gid && !groupmember(gid, active_cred))) &&
1225 	    (error = priv_check_cred(active_cred, PRIV_VFS_CHOWN)))
1226 		goto out;
1227 	shmfd->shm_uid = uid;
1228 	shmfd->shm_gid = gid;
1229 out:
1230 	mtx_unlock(&shm_timestamp_lock);
1231 	return (error);
1232 }
1233 
1234 /*
1235  * Helper routines to allow the backing object of a shared memory file
1236  * descriptor to be mapped in the kernel.
1237  */
1238 int
1239 shm_map(struct file *fp, size_t size, off_t offset, void **memp)
1240 {
1241 	struct shmfd *shmfd;
1242 	vm_offset_t kva, ofs;
1243 	vm_object_t obj;
1244 	int rv;
1245 
1246 	if (fp->f_type != DTYPE_SHM)
1247 		return (EINVAL);
1248 	shmfd = fp->f_data;
1249 	obj = shmfd->shm_object;
1250 	VM_OBJECT_WLOCK(obj);
1251 	/*
1252 	 * XXXRW: This validation is probably insufficient, and subject to
1253 	 * sign errors.  It should be fixed.
1254 	 */
1255 	if (offset >= shmfd->shm_size ||
1256 	    offset + size > round_page(shmfd->shm_size)) {
1257 		VM_OBJECT_WUNLOCK(obj);
1258 		return (EINVAL);
1259 	}
1260 
1261 	shmfd->shm_kmappings++;
1262 	vm_object_reference_locked(obj);
1263 	VM_OBJECT_WUNLOCK(obj);
1264 
1265 	/* Map the object into the kernel_map and wire it. */
1266 	kva = vm_map_min(kernel_map);
1267 	ofs = offset & PAGE_MASK;
1268 	offset = trunc_page(offset);
1269 	size = round_page(size + ofs);
1270 	rv = vm_map_find(kernel_map, obj, offset, &kva, size, 0,
1271 	    VMFS_OPTIMAL_SPACE, VM_PROT_READ | VM_PROT_WRITE,
1272 	    VM_PROT_READ | VM_PROT_WRITE, 0);
1273 	if (rv == KERN_SUCCESS) {
1274 		rv = vm_map_wire(kernel_map, kva, kva + size,
1275 		    VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
1276 		if (rv == KERN_SUCCESS) {
1277 			*memp = (void *)(kva + ofs);
1278 			return (0);
1279 		}
1280 		vm_map_remove(kernel_map, kva, kva + size);
1281 	} else
1282 		vm_object_deallocate(obj);
1283 
1284 	/* On failure, drop our mapping reference. */
1285 	VM_OBJECT_WLOCK(obj);
1286 	shmfd->shm_kmappings--;
1287 	VM_OBJECT_WUNLOCK(obj);
1288 
1289 	return (vm_mmap_to_errno(rv));
1290 }
1291 
1292 /*
1293  * We require the caller to unmap the entire entry.  This allows us to
1294  * safely decrement shm_kmappings when a mapping is removed.
1295  */
1296 int
1297 shm_unmap(struct file *fp, void *mem, size_t size)
1298 {
1299 	struct shmfd *shmfd;
1300 	vm_map_entry_t entry;
1301 	vm_offset_t kva, ofs;
1302 	vm_object_t obj;
1303 	vm_pindex_t pindex;
1304 	vm_prot_t prot;
1305 	boolean_t wired;
1306 	vm_map_t map;
1307 	int rv;
1308 
1309 	if (fp->f_type != DTYPE_SHM)
1310 		return (EINVAL);
1311 	shmfd = fp->f_data;
1312 	kva = (vm_offset_t)mem;
1313 	ofs = kva & PAGE_MASK;
1314 	kva = trunc_page(kva);
1315 	size = round_page(size + ofs);
1316 	map = kernel_map;
1317 	rv = vm_map_lookup(&map, kva, VM_PROT_READ | VM_PROT_WRITE, &entry,
1318 	    &obj, &pindex, &prot, &wired);
1319 	if (rv != KERN_SUCCESS)
1320 		return (EINVAL);
1321 	if (entry->start != kva || entry->end != kva + size) {
1322 		vm_map_lookup_done(map, entry);
1323 		return (EINVAL);
1324 	}
1325 	vm_map_lookup_done(map, entry);
1326 	if (obj != shmfd->shm_object)
1327 		return (EINVAL);
1328 	vm_map_remove(map, kva, kva + size);
1329 	VM_OBJECT_WLOCK(obj);
1330 	KASSERT(shmfd->shm_kmappings > 0, ("shm_unmap: object not mapped"));
1331 	shmfd->shm_kmappings--;
1332 	VM_OBJECT_WUNLOCK(obj);
1333 	return (0);
1334 }
1335 
1336 static int
1337 shm_fill_kinfo_locked(struct shmfd *shmfd, struct kinfo_file *kif, bool list)
1338 {
1339 	const char *path, *pr_path;
1340 	size_t pr_pathlen;
1341 	bool visible;
1342 
1343 	sx_assert(&shm_dict_lock, SA_LOCKED);
1344 	kif->kf_type = KF_TYPE_SHM;
1345 	kif->kf_un.kf_file.kf_file_mode = S_IFREG | shmfd->shm_mode;
1346 	kif->kf_un.kf_file.kf_file_size = shmfd->shm_size;
1347 	if (shmfd->shm_path != NULL) {
1348 		if (shmfd->shm_path != NULL) {
1349 			path = shmfd->shm_path;
1350 			pr_path = curthread->td_ucred->cr_prison->pr_path;
1351 			if (strcmp(pr_path, "/") != 0) {
1352 				/* Return the jail-rooted pathname. */
1353 				pr_pathlen = strlen(pr_path);
1354 				visible = strncmp(path, pr_path, pr_pathlen)
1355 				    == 0 && path[pr_pathlen] == '/';
1356 				if (list && !visible)
1357 					return (EPERM);
1358 				if (visible)
1359 					path += pr_pathlen;
1360 			}
1361 			strlcpy(kif->kf_path, path, sizeof(kif->kf_path));
1362 		}
1363 	}
1364 	return (0);
1365 }
1366 
1367 static int
1368 shm_fill_kinfo(struct file *fp, struct kinfo_file *kif,
1369     struct filedesc *fdp __unused)
1370 {
1371 	int res;
1372 
1373 	sx_slock(&shm_dict_lock);
1374 	res = shm_fill_kinfo_locked(fp->f_data, kif, false);
1375 	sx_sunlock(&shm_dict_lock);
1376 	return (res);
1377 }
1378 
1379 static int
1380 shm_add_seals(struct file *fp, int seals)
1381 {
1382 	struct shmfd *shmfd;
1383 	void *rl_cookie;
1384 	vm_ooffset_t writemappings;
1385 	int error, nseals;
1386 
1387 	error = 0;
1388 	shmfd = fp->f_data;
1389 	rl_cookie = rangelock_wlock(&shmfd->shm_rl, 0, OFF_MAX,
1390 	    &shmfd->shm_mtx);
1391 
1392 	/* Even already-set seals should result in EPERM. */
1393 	if ((shmfd->shm_seals & F_SEAL_SEAL) != 0) {
1394 		error = EPERM;
1395 		goto out;
1396 	}
1397 	nseals = seals & ~shmfd->shm_seals;
1398 	if ((nseals & F_SEAL_WRITE) != 0) {
1399 		/*
1400 		 * The rangelock above prevents writable mappings from being
1401 		 * added after we've started applying seals.  The RLOCK here
1402 		 * is to avoid torn reads on ILP32 arches as unmapping/reducing
1403 		 * writemappings will be done without a rangelock.
1404 		 */
1405 		VM_OBJECT_RLOCK(shmfd->shm_object);
1406 		writemappings = shmfd->shm_object->un_pager.swp.writemappings;
1407 		VM_OBJECT_RUNLOCK(shmfd->shm_object);
1408 		/* kmappings are also writable */
1409 		if (writemappings > 0) {
1410 			error = EBUSY;
1411 			goto out;
1412 		}
1413 	}
1414 	shmfd->shm_seals |= nseals;
1415 out:
1416 	rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
1417 	return (error);
1418 }
1419 
1420 static int
1421 shm_get_seals(struct file *fp, int *seals)
1422 {
1423 	struct shmfd *shmfd;
1424 
1425 	shmfd = fp->f_data;
1426 	*seals = shmfd->shm_seals;
1427 	return (0);
1428 }
1429 
1430 static int
1431 sysctl_posix_shm_list(SYSCTL_HANDLER_ARGS)
1432 {
1433 	struct shm_mapping *shmm;
1434 	struct sbuf sb;
1435 	struct kinfo_file kif;
1436 	u_long i;
1437 	ssize_t curlen;
1438 	int error, error2;
1439 
1440 	sbuf_new_for_sysctl(&sb, NULL, sizeof(struct kinfo_file) * 5, req);
1441 	sbuf_clear_flags(&sb, SBUF_INCLUDENUL);
1442 	curlen = 0;
1443 	error = 0;
1444 	sx_slock(&shm_dict_lock);
1445 	for (i = 0; i < shm_hash + 1; i++) {
1446 		LIST_FOREACH(shmm, &shm_dictionary[i], sm_link) {
1447 			error = shm_fill_kinfo_locked(shmm->sm_shmfd,
1448 			    &kif, true);
1449 			if (error == EPERM)
1450 				continue;
1451 			if (error != 0)
1452 				break;
1453 			pack_kinfo(&kif);
1454 			if (req->oldptr != NULL &&
1455 			    kif.kf_structsize + curlen > req->oldlen)
1456 				break;
1457 			error = sbuf_bcat(&sb, &kif, kif.kf_structsize) == 0 ?
1458 			    0 : ENOMEM;
1459 			if (error != 0)
1460 				break;
1461 			curlen += kif.kf_structsize;
1462 		}
1463 	}
1464 	sx_sunlock(&shm_dict_lock);
1465 	error2 = sbuf_finish(&sb);
1466 	sbuf_delete(&sb);
1467 	return (error != 0 ? error : error2);
1468 }
1469 
1470 SYSCTL_PROC(_kern_ipc, OID_AUTO, posix_shm_list,
1471     CTLFLAG_RD | CTLFLAG_MPSAFE | CTLTYPE_OPAQUE,
1472     NULL, 0, sysctl_posix_shm_list, "",
1473     "POSIX SHM list");
1474 
1475 int
1476 kern_shm_open2(struct thread *td, const char *path, int flags, mode_t mode,
1477     int shmflags, const char *name __unused)
1478 {
1479 	int initial_seals;
1480 
1481 	if ((shmflags & ~SHM_ALLOW_SEALING) != 0)
1482 		return (EINVAL);
1483 
1484 	initial_seals = F_SEAL_SEAL;
1485 	if ((shmflags & SHM_ALLOW_SEALING) != 0)
1486 		initial_seals &= ~F_SEAL_SEAL;
1487 	return (kern_shm_open(td, path, flags, mode, NULL, initial_seals));
1488 }
1489 
1490 /*
1491  * This version of the shm_open() interface leaves CLOEXEC behavior up to the
1492  * caller, and libc will enforce it for the traditional shm_open() call.  This
1493  * allows other consumers, like memfd_create(), to opt-in for CLOEXEC.  This
1494  * interface also includes a 'name' argument that is currently unused, but could
1495  * potentially be exported later via some interface for debugging purposes.
1496  * From the kernel's perspective, it is optional.  Individual consumers like
1497  * memfd_create() may require it in order to be compatible with other systems
1498  * implementing the same function.
1499  */
1500 int
1501 sys_shm_open2(struct thread *td, struct shm_open2_args *uap)
1502 {
1503 
1504 	return (kern_shm_open2(td, uap->path, uap->flags, uap->mode,
1505 	    uap->shmflags, uap->name));
1506 }
1507