xref: /freebsd/sys/kern/uipc_shm.c (revision e7dd6e9402cae324c2190a70081854c3c8a8feb9)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2006, 2011, 2016-2017 Robert N. M. Watson
5  * All rights reserved.
6  *
7  * Portions of this software were developed by BAE Systems, the University of
8  * Cambridge Computer Laboratory, and Memorial University under DARPA/AFRL
9  * contract FA8650-15-C-7558 ("CADETS"), as part of the DARPA Transparent
10  * Computing (TC) research program.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
25  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31  * SUCH DAMAGE.
32  */
33 
34 /*
35  * Support for shared swap-backed anonymous memory objects via
36  * shm_open(2) and shm_unlink(2).  While most of the implementation is
37  * here, vm_mmap.c contains mapping logic changes.
38  *
39  * posixshmcontrol(1) allows users to inspect the state of the memory
40  * objects.  Per-uid swap resource limit controls total amount of
41  * memory that user can consume for anonymous objects, including
42  * shared.
43  */
44 
45 #include <sys/cdefs.h>
46 __FBSDID("$FreeBSD$");
47 
48 #include "opt_capsicum.h"
49 #include "opt_ktrace.h"
50 
51 #include <sys/param.h>
52 #include <sys/capsicum.h>
53 #include <sys/conf.h>
54 #include <sys/fcntl.h>
55 #include <sys/file.h>
56 #include <sys/filedesc.h>
57 #include <sys/filio.h>
58 #include <sys/fnv_hash.h>
59 #include <sys/kernel.h>
60 #include <sys/limits.h>
61 #include <sys/uio.h>
62 #include <sys/signal.h>
63 #include <sys/jail.h>
64 #include <sys/ktrace.h>
65 #include <sys/lock.h>
66 #include <sys/malloc.h>
67 #include <sys/mman.h>
68 #include <sys/mutex.h>
69 #include <sys/priv.h>
70 #include <sys/proc.h>
71 #include <sys/refcount.h>
72 #include <sys/resourcevar.h>
73 #include <sys/rwlock.h>
74 #include <sys/sbuf.h>
75 #include <sys/stat.h>
76 #include <sys/syscallsubr.h>
77 #include <sys/sysctl.h>
78 #include <sys/sysproto.h>
79 #include <sys/systm.h>
80 #include <sys/sx.h>
81 #include <sys/time.h>
82 #include <sys/vnode.h>
83 #include <sys/unistd.h>
84 #include <sys/user.h>
85 
86 #include <security/audit/audit.h>
87 #include <security/mac/mac_framework.h>
88 
89 #include <vm/vm.h>
90 #include <vm/vm_param.h>
91 #include <vm/pmap.h>
92 #include <vm/vm_extern.h>
93 #include <vm/vm_map.h>
94 #include <vm/vm_kern.h>
95 #include <vm/vm_object.h>
96 #include <vm/vm_page.h>
97 #include <vm/vm_pageout.h>
98 #include <vm/vm_pager.h>
99 #include <vm/swap_pager.h>
100 
101 struct shm_mapping {
102 	char		*sm_path;
103 	Fnv32_t		sm_fnv;
104 	struct shmfd	*sm_shmfd;
105 	LIST_ENTRY(shm_mapping) sm_link;
106 };
107 
108 static MALLOC_DEFINE(M_SHMFD, "shmfd", "shared memory file descriptor");
109 static LIST_HEAD(, shm_mapping) *shm_dictionary;
110 static struct sx shm_dict_lock;
111 static struct mtx shm_timestamp_lock;
112 static u_long shm_hash;
113 static struct unrhdr64 shm_ino_unr;
114 static dev_t shm_dev_ino;
115 
116 #define	SHM_HASH(fnv)	(&shm_dictionary[(fnv) & shm_hash])
117 
118 static void	shm_init(void *arg);
119 static void	shm_insert(char *path, Fnv32_t fnv, struct shmfd *shmfd);
120 static struct shmfd *shm_lookup(char *path, Fnv32_t fnv);
121 static int	shm_remove(char *path, Fnv32_t fnv, struct ucred *ucred);
122 static int	shm_dotruncate_locked(struct shmfd *shmfd, off_t length,
123     void *rl_cookie);
124 
125 static fo_rdwr_t	shm_read;
126 static fo_rdwr_t	shm_write;
127 static fo_truncate_t	shm_truncate;
128 static fo_ioctl_t	shm_ioctl;
129 static fo_stat_t	shm_stat;
130 static fo_close_t	shm_close;
131 static fo_chmod_t	shm_chmod;
132 static fo_chown_t	shm_chown;
133 static fo_seek_t	shm_seek;
134 static fo_fill_kinfo_t	shm_fill_kinfo;
135 static fo_mmap_t	shm_mmap;
136 static fo_get_seals_t	shm_get_seals;
137 static fo_add_seals_t	shm_add_seals;
138 
139 /* File descriptor operations. */
140 struct fileops shm_ops = {
141 	.fo_read = shm_read,
142 	.fo_write = shm_write,
143 	.fo_truncate = shm_truncate,
144 	.fo_ioctl = shm_ioctl,
145 	.fo_poll = invfo_poll,
146 	.fo_kqfilter = invfo_kqfilter,
147 	.fo_stat = shm_stat,
148 	.fo_close = shm_close,
149 	.fo_chmod = shm_chmod,
150 	.fo_chown = shm_chown,
151 	.fo_sendfile = vn_sendfile,
152 	.fo_seek = shm_seek,
153 	.fo_fill_kinfo = shm_fill_kinfo,
154 	.fo_mmap = shm_mmap,
155 	.fo_get_seals = shm_get_seals,
156 	.fo_add_seals = shm_add_seals,
157 	.fo_flags = DFLAG_PASSABLE | DFLAG_SEEKABLE
158 };
159 
160 FEATURE(posix_shm, "POSIX shared memory");
161 
162 static int
163 uiomove_object_page(vm_object_t obj, size_t len, struct uio *uio)
164 {
165 	vm_page_t m;
166 	vm_pindex_t idx;
167 	size_t tlen;
168 	int error, offset, rv;
169 
170 	idx = OFF_TO_IDX(uio->uio_offset);
171 	offset = uio->uio_offset & PAGE_MASK;
172 	tlen = MIN(PAGE_SIZE - offset, len);
173 
174 	VM_OBJECT_WLOCK(obj);
175 
176 	/*
177 	 * Read I/O without either a corresponding resident page or swap
178 	 * page: use zero_region.  This is intended to avoid instantiating
179 	 * pages on read from a sparse region.
180 	 */
181 	if (uio->uio_rw == UIO_READ && vm_page_lookup(obj, idx) == NULL &&
182 	    !vm_pager_has_page(obj, idx, NULL, NULL)) {
183 		VM_OBJECT_WUNLOCK(obj);
184 		return (uiomove(__DECONST(void *, zero_region), tlen, uio));
185 	}
186 
187 	/*
188 	 * Parallel reads of the page content from disk are prevented
189 	 * by exclusive busy.
190 	 *
191 	 * Although the tmpfs vnode lock is held here, it is
192 	 * nonetheless safe to sleep waiting for a free page.  The
193 	 * pageout daemon does not need to acquire the tmpfs vnode
194 	 * lock to page out tobj's pages because tobj is a OBJT_SWAP
195 	 * type object.
196 	 */
197 	rv = vm_page_grab_valid(&m, obj, idx,
198 	    VM_ALLOC_NORMAL | VM_ALLOC_WIRED | VM_ALLOC_NOBUSY);
199 	if (rv != VM_PAGER_OK) {
200 		VM_OBJECT_WUNLOCK(obj);
201 		printf("uiomove_object: vm_obj %p idx %jd pager error %d\n",
202 		    obj, idx, rv);
203 		return (EIO);
204 	}
205 	VM_OBJECT_WUNLOCK(obj);
206 	error = uiomove_fromphys(&m, offset, tlen, uio);
207 	if (uio->uio_rw == UIO_WRITE && error == 0) {
208 		VM_OBJECT_WLOCK(obj);
209 		vm_page_dirty(m);
210 		vm_pager_page_unswapped(m);
211 		VM_OBJECT_WUNLOCK(obj);
212 	}
213 	vm_page_unwire(m, PQ_ACTIVE);
214 
215 	return (error);
216 }
217 
218 int
219 uiomove_object(vm_object_t obj, off_t obj_size, struct uio *uio)
220 {
221 	ssize_t resid;
222 	size_t len;
223 	int error;
224 
225 	error = 0;
226 	while ((resid = uio->uio_resid) > 0) {
227 		if (obj_size <= uio->uio_offset)
228 			break;
229 		len = MIN(obj_size - uio->uio_offset, resid);
230 		if (len == 0)
231 			break;
232 		error = uiomove_object_page(obj, len, uio);
233 		if (error != 0 || resid == uio->uio_resid)
234 			break;
235 	}
236 	return (error);
237 }
238 
239 static int
240 shm_seek(struct file *fp, off_t offset, int whence, struct thread *td)
241 {
242 	struct shmfd *shmfd;
243 	off_t foffset;
244 	int error;
245 
246 	shmfd = fp->f_data;
247 	foffset = foffset_lock(fp, 0);
248 	error = 0;
249 	switch (whence) {
250 	case L_INCR:
251 		if (foffset < 0 ||
252 		    (offset > 0 && foffset > OFF_MAX - offset)) {
253 			error = EOVERFLOW;
254 			break;
255 		}
256 		offset += foffset;
257 		break;
258 	case L_XTND:
259 		if (offset > 0 && shmfd->shm_size > OFF_MAX - offset) {
260 			error = EOVERFLOW;
261 			break;
262 		}
263 		offset += shmfd->shm_size;
264 		break;
265 	case L_SET:
266 		break;
267 	default:
268 		error = EINVAL;
269 	}
270 	if (error == 0) {
271 		if (offset < 0 || offset > shmfd->shm_size)
272 			error = EINVAL;
273 		else
274 			td->td_uretoff.tdu_off = offset;
275 	}
276 	foffset_unlock(fp, offset, error != 0 ? FOF_NOUPDATE : 0);
277 	return (error);
278 }
279 
280 static int
281 shm_read(struct file *fp, struct uio *uio, struct ucred *active_cred,
282     int flags, struct thread *td)
283 {
284 	struct shmfd *shmfd;
285 	void *rl_cookie;
286 	int error;
287 
288 	shmfd = fp->f_data;
289 #ifdef MAC
290 	error = mac_posixshm_check_read(active_cred, fp->f_cred, shmfd);
291 	if (error)
292 		return (error);
293 #endif
294 	foffset_lock_uio(fp, uio, flags);
295 	rl_cookie = rangelock_rlock(&shmfd->shm_rl, uio->uio_offset,
296 	    uio->uio_offset + uio->uio_resid, &shmfd->shm_mtx);
297 	error = uiomove_object(shmfd->shm_object, shmfd->shm_size, uio);
298 	rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
299 	foffset_unlock_uio(fp, uio, flags);
300 	return (error);
301 }
302 
303 static int
304 shm_write(struct file *fp, struct uio *uio, struct ucred *active_cred,
305     int flags, struct thread *td)
306 {
307 	struct shmfd *shmfd;
308 	void *rl_cookie;
309 	int error;
310 
311 	shmfd = fp->f_data;
312 #ifdef MAC
313 	error = mac_posixshm_check_write(active_cred, fp->f_cred, shmfd);
314 	if (error)
315 		return (error);
316 #endif
317 	foffset_lock_uio(fp, uio, flags);
318 	if ((flags & FOF_OFFSET) == 0) {
319 		rl_cookie = rangelock_wlock(&shmfd->shm_rl, 0, OFF_MAX,
320 		    &shmfd->shm_mtx);
321 	} else {
322 		rl_cookie = rangelock_wlock(&shmfd->shm_rl, uio->uio_offset,
323 		    uio->uio_offset + uio->uio_resid, &shmfd->shm_mtx);
324 	}
325 	if ((shmfd->shm_seals & F_SEAL_WRITE) != 0)
326 		error = EPERM;
327 	else
328 		error = uiomove_object(shmfd->shm_object, shmfd->shm_size, uio);
329 	rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
330 	foffset_unlock_uio(fp, uio, flags);
331 	return (error);
332 }
333 
334 static int
335 shm_truncate(struct file *fp, off_t length, struct ucred *active_cred,
336     struct thread *td)
337 {
338 	struct shmfd *shmfd;
339 #ifdef MAC
340 	int error;
341 #endif
342 
343 	shmfd = fp->f_data;
344 #ifdef MAC
345 	error = mac_posixshm_check_truncate(active_cred, fp->f_cred, shmfd);
346 	if (error)
347 		return (error);
348 #endif
349 	return (shm_dotruncate(shmfd, length));
350 }
351 
352 int
353 shm_ioctl(struct file *fp, u_long com, void *data, struct ucred *active_cred,
354     struct thread *td)
355 {
356 
357 	switch (com) {
358 	case FIONBIO:
359 	case FIOASYNC:
360 		/*
361 		 * Allow fcntl(fd, F_SETFL, O_NONBLOCK) to work,
362 		 * just like it would on an unlinked regular file
363 		 */
364 		return (0);
365 	default:
366 		return (ENOTTY);
367 	}
368 }
369 
370 static int
371 shm_stat(struct file *fp, struct stat *sb, struct ucred *active_cred,
372     struct thread *td)
373 {
374 	struct shmfd *shmfd;
375 #ifdef MAC
376 	int error;
377 #endif
378 
379 	shmfd = fp->f_data;
380 
381 #ifdef MAC
382 	error = mac_posixshm_check_stat(active_cred, fp->f_cred, shmfd);
383 	if (error)
384 		return (error);
385 #endif
386 
387 	/*
388 	 * Attempt to return sanish values for fstat() on a memory file
389 	 * descriptor.
390 	 */
391 	bzero(sb, sizeof(*sb));
392 	sb->st_blksize = PAGE_SIZE;
393 	sb->st_size = shmfd->shm_size;
394 	sb->st_blocks = howmany(sb->st_size, sb->st_blksize);
395 	mtx_lock(&shm_timestamp_lock);
396 	sb->st_atim = shmfd->shm_atime;
397 	sb->st_ctim = shmfd->shm_ctime;
398 	sb->st_mtim = shmfd->shm_mtime;
399 	sb->st_birthtim = shmfd->shm_birthtime;
400 	sb->st_mode = S_IFREG | shmfd->shm_mode;		/* XXX */
401 	sb->st_uid = shmfd->shm_uid;
402 	sb->st_gid = shmfd->shm_gid;
403 	mtx_unlock(&shm_timestamp_lock);
404 	sb->st_dev = shm_dev_ino;
405 	sb->st_ino = shmfd->shm_ino;
406 	sb->st_nlink = shmfd->shm_object->ref_count;
407 
408 	return (0);
409 }
410 
411 static int
412 shm_close(struct file *fp, struct thread *td)
413 {
414 	struct shmfd *shmfd;
415 
416 	shmfd = fp->f_data;
417 	fp->f_data = NULL;
418 	shm_drop(shmfd);
419 
420 	return (0);
421 }
422 
423 static int
424 shm_dotruncate_locked(struct shmfd *shmfd, off_t length, void *rl_cookie)
425 {
426 	vm_object_t object;
427 	vm_page_t m;
428 	vm_pindex_t idx, nobjsize;
429 	vm_ooffset_t delta;
430 	int base, rv;
431 
432 	KASSERT(length >= 0, ("shm_dotruncate: length < 0"));
433 	object = shmfd->shm_object;
434 	VM_OBJECT_ASSERT_WLOCKED(object);
435 	rangelock_cookie_assert(rl_cookie, RA_WLOCKED);
436 	if (length == shmfd->shm_size)
437 		return (0);
438 	nobjsize = OFF_TO_IDX(length + PAGE_MASK);
439 
440 	/* Are we shrinking?  If so, trim the end. */
441 	if (length < shmfd->shm_size) {
442 		if ((shmfd->shm_seals & F_SEAL_SHRINK) != 0)
443 			return (EPERM);
444 
445 		/*
446 		 * Disallow any requests to shrink the size if this
447 		 * object is mapped into the kernel.
448 		 */
449 		if (shmfd->shm_kmappings > 0)
450 			return (EBUSY);
451 
452 		/*
453 		 * Zero the truncated part of the last page.
454 		 */
455 		base = length & PAGE_MASK;
456 		if (base != 0) {
457 			idx = OFF_TO_IDX(length);
458 retry:
459 			m = vm_page_lookup(object, idx);
460 			if (m != NULL) {
461 				if (vm_page_sleep_if_busy(m, "shmtrc"))
462 					goto retry;
463 			} else if (vm_pager_has_page(object, idx, NULL, NULL)) {
464 				m = vm_page_alloc(object, idx,
465 				    VM_ALLOC_NORMAL | VM_ALLOC_WAITFAIL);
466 				if (m == NULL)
467 					goto retry;
468 				rv = vm_pager_get_pages(object, &m, 1, NULL,
469 				    NULL);
470 				if (rv == VM_PAGER_OK) {
471 					/*
472 					 * Since the page was not resident,
473 					 * and therefore not recently
474 					 * accessed, immediately enqueue it
475 					 * for asynchronous laundering.  The
476 					 * current operation is not regarded
477 					 * as an access.
478 					 */
479 					vm_page_launder(m);
480 					vm_page_xunbusy(m);
481 				} else {
482 					vm_page_free(m);
483 					VM_OBJECT_WUNLOCK(object);
484 					return (EIO);
485 				}
486 			}
487 			if (m != NULL) {
488 				pmap_zero_page_area(m, base, PAGE_SIZE - base);
489 				KASSERT(m->valid == VM_PAGE_BITS_ALL,
490 				    ("shm_dotruncate: page %p is invalid", m));
491 				vm_page_dirty(m);
492 				vm_pager_page_unswapped(m);
493 			}
494 		}
495 		delta = IDX_TO_OFF(object->size - nobjsize);
496 
497 		/* Toss in memory pages. */
498 		if (nobjsize < object->size)
499 			vm_object_page_remove(object, nobjsize, object->size,
500 			    0);
501 
502 		/* Toss pages from swap. */
503 		if (object->type == OBJT_SWAP)
504 			swap_pager_freespace(object, nobjsize, delta);
505 
506 		/* Free the swap accounted for shm */
507 		swap_release_by_cred(delta, object->cred);
508 		object->charge -= delta;
509 	} else {
510 		if ((shmfd->shm_seals & F_SEAL_GROW) != 0)
511 			return (EPERM);
512 
513 		/* Try to reserve additional swap space. */
514 		delta = IDX_TO_OFF(nobjsize - object->size);
515 		if (!swap_reserve_by_cred(delta, object->cred))
516 			return (ENOMEM);
517 		object->charge += delta;
518 	}
519 	shmfd->shm_size = length;
520 	mtx_lock(&shm_timestamp_lock);
521 	vfs_timestamp(&shmfd->shm_ctime);
522 	shmfd->shm_mtime = shmfd->shm_ctime;
523 	mtx_unlock(&shm_timestamp_lock);
524 	object->size = nobjsize;
525 	return (0);
526 }
527 
528 int
529 shm_dotruncate(struct shmfd *shmfd, off_t length)
530 {
531 	void *rl_cookie;
532 	int error;
533 
534 	rl_cookie = rangelock_wlock(&shmfd->shm_rl, 0, OFF_MAX,
535 	    &shmfd->shm_mtx);
536 	VM_OBJECT_WLOCK(shmfd->shm_object);
537 	error = shm_dotruncate_locked(shmfd, length, rl_cookie);
538 	VM_OBJECT_WUNLOCK(shmfd->shm_object);
539 	rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
540 	return (error);
541 }
542 
543 /*
544  * shmfd object management including creation and reference counting
545  * routines.
546  */
547 struct shmfd *
548 shm_alloc(struct ucred *ucred, mode_t mode)
549 {
550 	struct shmfd *shmfd;
551 
552 	shmfd = malloc(sizeof(*shmfd), M_SHMFD, M_WAITOK | M_ZERO);
553 	shmfd->shm_size = 0;
554 	shmfd->shm_uid = ucred->cr_uid;
555 	shmfd->shm_gid = ucred->cr_gid;
556 	shmfd->shm_mode = mode;
557 	shmfd->shm_object = vm_pager_allocate(OBJT_SWAP, NULL,
558 	    shmfd->shm_size, VM_PROT_DEFAULT, 0, ucred);
559 	KASSERT(shmfd->shm_object != NULL, ("shm_create: vm_pager_allocate"));
560 	shmfd->shm_object->pg_color = 0;
561 	VM_OBJECT_WLOCK(shmfd->shm_object);
562 	vm_object_clear_flag(shmfd->shm_object, OBJ_ONEMAPPING);
563 	vm_object_set_flag(shmfd->shm_object, OBJ_COLORED | OBJ_NOSPLIT);
564 	VM_OBJECT_WUNLOCK(shmfd->shm_object);
565 	vfs_timestamp(&shmfd->shm_birthtime);
566 	shmfd->shm_atime = shmfd->shm_mtime = shmfd->shm_ctime =
567 	    shmfd->shm_birthtime;
568 	shmfd->shm_ino = alloc_unr64(&shm_ino_unr);
569 	refcount_init(&shmfd->shm_refs, 1);
570 	mtx_init(&shmfd->shm_mtx, "shmrl", NULL, MTX_DEF);
571 	rangelock_init(&shmfd->shm_rl);
572 #ifdef MAC
573 	mac_posixshm_init(shmfd);
574 	mac_posixshm_create(ucred, shmfd);
575 #endif
576 
577 	return (shmfd);
578 }
579 
580 struct shmfd *
581 shm_hold(struct shmfd *shmfd)
582 {
583 
584 	refcount_acquire(&shmfd->shm_refs);
585 	return (shmfd);
586 }
587 
588 void
589 shm_drop(struct shmfd *shmfd)
590 {
591 
592 	if (refcount_release(&shmfd->shm_refs)) {
593 #ifdef MAC
594 		mac_posixshm_destroy(shmfd);
595 #endif
596 		rangelock_destroy(&shmfd->shm_rl);
597 		mtx_destroy(&shmfd->shm_mtx);
598 		vm_object_deallocate(shmfd->shm_object);
599 		free(shmfd, M_SHMFD);
600 	}
601 }
602 
603 /*
604  * Determine if the credentials have sufficient permissions for a
605  * specified combination of FREAD and FWRITE.
606  */
607 int
608 shm_access(struct shmfd *shmfd, struct ucred *ucred, int flags)
609 {
610 	accmode_t accmode;
611 	int error;
612 
613 	accmode = 0;
614 	if (flags & FREAD)
615 		accmode |= VREAD;
616 	if (flags & FWRITE)
617 		accmode |= VWRITE;
618 	mtx_lock(&shm_timestamp_lock);
619 	error = vaccess(VREG, shmfd->shm_mode, shmfd->shm_uid, shmfd->shm_gid,
620 	    accmode, ucred, NULL);
621 	mtx_unlock(&shm_timestamp_lock);
622 	return (error);
623 }
624 
625 /*
626  * Dictionary management.  We maintain an in-kernel dictionary to map
627  * paths to shmfd objects.  We use the FNV hash on the path to store
628  * the mappings in a hash table.
629  */
630 static void
631 shm_init(void *arg)
632 {
633 
634 	mtx_init(&shm_timestamp_lock, "shm timestamps", NULL, MTX_DEF);
635 	sx_init(&shm_dict_lock, "shm dictionary");
636 	shm_dictionary = hashinit(1024, M_SHMFD, &shm_hash);
637 	new_unrhdr64(&shm_ino_unr, 1);
638 	shm_dev_ino = devfs_alloc_cdp_inode();
639 	KASSERT(shm_dev_ino > 0, ("shm dev inode not initialized"));
640 }
641 SYSINIT(shm_init, SI_SUB_SYSV_SHM, SI_ORDER_ANY, shm_init, NULL);
642 
643 static struct shmfd *
644 shm_lookup(char *path, Fnv32_t fnv)
645 {
646 	struct shm_mapping *map;
647 
648 	LIST_FOREACH(map, SHM_HASH(fnv), sm_link) {
649 		if (map->sm_fnv != fnv)
650 			continue;
651 		if (strcmp(map->sm_path, path) == 0)
652 			return (map->sm_shmfd);
653 	}
654 
655 	return (NULL);
656 }
657 
658 static void
659 shm_insert(char *path, Fnv32_t fnv, struct shmfd *shmfd)
660 {
661 	struct shm_mapping *map;
662 
663 	map = malloc(sizeof(struct shm_mapping), M_SHMFD, M_WAITOK);
664 	map->sm_path = path;
665 	map->sm_fnv = fnv;
666 	map->sm_shmfd = shm_hold(shmfd);
667 	shmfd->shm_path = path;
668 	LIST_INSERT_HEAD(SHM_HASH(fnv), map, sm_link);
669 }
670 
671 static int
672 shm_remove(char *path, Fnv32_t fnv, struct ucred *ucred)
673 {
674 	struct shm_mapping *map;
675 	int error;
676 
677 	LIST_FOREACH(map, SHM_HASH(fnv), sm_link) {
678 		if (map->sm_fnv != fnv)
679 			continue;
680 		if (strcmp(map->sm_path, path) == 0) {
681 #ifdef MAC
682 			error = mac_posixshm_check_unlink(ucred, map->sm_shmfd);
683 			if (error)
684 				return (error);
685 #endif
686 			error = shm_access(map->sm_shmfd, ucred,
687 			    FREAD | FWRITE);
688 			if (error)
689 				return (error);
690 			map->sm_shmfd->shm_path = NULL;
691 			LIST_REMOVE(map, sm_link);
692 			shm_drop(map->sm_shmfd);
693 			free(map->sm_path, M_SHMFD);
694 			free(map, M_SHMFD);
695 			return (0);
696 		}
697 	}
698 
699 	return (ENOENT);
700 }
701 
702 int
703 kern_shm_open(struct thread *td, const char *userpath, int flags, mode_t mode,
704     struct filecaps *fcaps, int initial_seals)
705 {
706 	struct filedesc *fdp;
707 	struct shmfd *shmfd;
708 	struct file *fp;
709 	char *path;
710 	const char *pr_path;
711 	void *rl_cookie;
712 	size_t pr_pathlen;
713 	Fnv32_t fnv;
714 	mode_t cmode;
715 	int fd, error;
716 
717 #ifdef CAPABILITY_MODE
718 	/*
719 	 * shm_open(2) is only allowed for anonymous objects.
720 	 */
721 	if (IN_CAPABILITY_MODE(td) && (userpath != SHM_ANON))
722 		return (ECAPMODE);
723 #endif
724 
725 	AUDIT_ARG_FFLAGS(flags);
726 	AUDIT_ARG_MODE(mode);
727 
728 	if ((flags & O_ACCMODE) != O_RDONLY && (flags & O_ACCMODE) != O_RDWR)
729 		return (EINVAL);
730 
731 	if ((flags & ~(O_ACCMODE | O_CREAT | O_EXCL | O_TRUNC | O_CLOEXEC)) != 0)
732 		return (EINVAL);
733 
734 	/*
735 	 * Currently only F_SEAL_SEAL may be set when creating or opening shmfd.
736 	 * If the decision is made later to allow additional seals, care must be
737 	 * taken below to ensure that the seals are properly set if the shmfd
738 	 * already existed -- this currently assumes that only F_SEAL_SEAL can
739 	 * be set and doesn't take further precautions to ensure the validity of
740 	 * the seals being added with respect to current mappings.
741 	 */
742 	if ((initial_seals & ~F_SEAL_SEAL) != 0)
743 		return (EINVAL);
744 
745 	fdp = td->td_proc->p_fd;
746 	cmode = (mode & ~fdp->fd_cmask) & ACCESSPERMS;
747 
748 	/*
749 	 * shm_open(2) created shm should always have O_CLOEXEC set, as mandated
750 	 * by POSIX.  We allow it to be unset here so that an in-kernel
751 	 * interface may be written as a thin layer around shm, optionally not
752 	 * setting CLOEXEC.  For shm_open(2), O_CLOEXEC is set unconditionally
753 	 * in sys_shm_open() to keep this implementation compliant.
754 	 */
755 	error = falloc_caps(td, &fp, &fd, flags & O_CLOEXEC, fcaps);
756 	if (error)
757 		return (error);
758 
759 	/* A SHM_ANON path pointer creates an anonymous object. */
760 	if (userpath == SHM_ANON) {
761 		/* A read-only anonymous object is pointless. */
762 		if ((flags & O_ACCMODE) == O_RDONLY) {
763 			fdclose(td, fp, fd);
764 			fdrop(fp, td);
765 			return (EINVAL);
766 		}
767 		shmfd = shm_alloc(td->td_ucred, cmode);
768 		shmfd->shm_seals = initial_seals;
769 	} else {
770 		path = malloc(MAXPATHLEN, M_SHMFD, M_WAITOK);
771 		pr_path = td->td_ucred->cr_prison->pr_path;
772 
773 		/* Construct a full pathname for jailed callers. */
774 		pr_pathlen = strcmp(pr_path, "/") == 0 ? 0
775 		    : strlcpy(path, pr_path, MAXPATHLEN);
776 		error = copyinstr(userpath, path + pr_pathlen,
777 		    MAXPATHLEN - pr_pathlen, NULL);
778 #ifdef KTRACE
779 		if (error == 0 && KTRPOINT(curthread, KTR_NAMEI))
780 			ktrnamei(path);
781 #endif
782 		/* Require paths to start with a '/' character. */
783 		if (error == 0 && path[pr_pathlen] != '/')
784 			error = EINVAL;
785 		if (error) {
786 			fdclose(td, fp, fd);
787 			fdrop(fp, td);
788 			free(path, M_SHMFD);
789 			return (error);
790 		}
791 
792 		AUDIT_ARG_UPATH1_CANON(path);
793 		fnv = fnv_32_str(path, FNV1_32_INIT);
794 		sx_xlock(&shm_dict_lock);
795 		shmfd = shm_lookup(path, fnv);
796 		if (shmfd == NULL) {
797 			/* Object does not yet exist, create it if requested. */
798 			if (flags & O_CREAT) {
799 #ifdef MAC
800 				error = mac_posixshm_check_create(td->td_ucred,
801 				    path);
802 				if (error == 0) {
803 #endif
804 					shmfd = shm_alloc(td->td_ucred, cmode);
805 					shmfd->shm_seals = initial_seals;
806 					shm_insert(path, fnv, shmfd);
807 #ifdef MAC
808 				}
809 #endif
810 			} else {
811 				free(path, M_SHMFD);
812 				error = ENOENT;
813 			}
814 		} else {
815 			rl_cookie = rangelock_wlock(&shmfd->shm_rl, 0, OFF_MAX,
816 			    &shmfd->shm_mtx);
817 
818 			/*
819 			 * kern_shm_open() likely shouldn't ever error out on
820 			 * trying to set a seal that already exists, unlike
821 			 * F_ADD_SEALS.  This would break terribly as
822 			 * shm_open(2) actually sets F_SEAL_SEAL to maintain
823 			 * historical behavior where the underlying file could
824 			 * not be sealed.
825 			 */
826 			initial_seals &= ~shmfd->shm_seals;
827 
828 			/*
829 			 * Object already exists, obtain a new
830 			 * reference if requested and permitted.
831 			 */
832 			free(path, M_SHMFD);
833 
834 			/*
835 			 * initial_seals can't set additional seals if we've
836 			 * already been set F_SEAL_SEAL.  If F_SEAL_SEAL is set,
837 			 * then we've already removed that one from
838 			 * initial_seals.  This is currently redundant as we
839 			 * only allow setting F_SEAL_SEAL at creation time, but
840 			 * it's cheap to check and decreases the effort required
841 			 * to allow additional seals.
842 			 */
843 			if ((shmfd->shm_seals & F_SEAL_SEAL) != 0 &&
844 			    initial_seals != 0)
845 				error = EPERM;
846 			else if ((flags & (O_CREAT | O_EXCL)) ==
847 			    (O_CREAT | O_EXCL))
848 				error = EEXIST;
849 			else {
850 #ifdef MAC
851 				error = mac_posixshm_check_open(td->td_ucred,
852 				    shmfd, FFLAGS(flags & O_ACCMODE));
853 				if (error == 0)
854 #endif
855 				error = shm_access(shmfd, td->td_ucred,
856 				    FFLAGS(flags & O_ACCMODE));
857 			}
858 
859 			/*
860 			 * Truncate the file back to zero length if
861 			 * O_TRUNC was specified and the object was
862 			 * opened with read/write.
863 			 */
864 			if (error == 0 &&
865 			    (flags & (O_ACCMODE | O_TRUNC)) ==
866 			    (O_RDWR | O_TRUNC)) {
867 				VM_OBJECT_WLOCK(shmfd->shm_object);
868 #ifdef MAC
869 				error = mac_posixshm_check_truncate(
870 					td->td_ucred, fp->f_cred, shmfd);
871 				if (error == 0)
872 #endif
873 					error = shm_dotruncate_locked(shmfd, 0,
874 					    rl_cookie);
875 				VM_OBJECT_WUNLOCK(shmfd->shm_object);
876 			}
877 			if (error == 0) {
878 				/*
879 				 * Currently we only allow F_SEAL_SEAL to be
880 				 * set initially.  As noted above, this would
881 				 * need to be reworked should that change.
882 				 */
883 				shmfd->shm_seals |= initial_seals;
884 				shm_hold(shmfd);
885 			}
886 			rangelock_unlock(&shmfd->shm_rl, rl_cookie,
887 			    &shmfd->shm_mtx);
888 		}
889 		sx_xunlock(&shm_dict_lock);
890 
891 		if (error) {
892 			fdclose(td, fp, fd);
893 			fdrop(fp, td);
894 			return (error);
895 		}
896 	}
897 
898 	finit(fp, FFLAGS(flags & O_ACCMODE), DTYPE_SHM, shmfd, &shm_ops);
899 
900 	td->td_retval[0] = fd;
901 	fdrop(fp, td);
902 
903 	return (0);
904 }
905 
906 /* System calls. */
907 int
908 sys_shm_open(struct thread *td, struct shm_open_args *uap)
909 {
910 
911 	return (kern_shm_open(td, uap->path, uap->flags | O_CLOEXEC, uap->mode,
912 	    NULL, F_SEAL_SEAL));
913 }
914 
915 int
916 sys_shm_unlink(struct thread *td, struct shm_unlink_args *uap)
917 {
918 	char *path;
919 	const char *pr_path;
920 	size_t pr_pathlen;
921 	Fnv32_t fnv;
922 	int error;
923 
924 	path = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
925 	pr_path = td->td_ucred->cr_prison->pr_path;
926 	pr_pathlen = strcmp(pr_path, "/") == 0 ? 0
927 	    : strlcpy(path, pr_path, MAXPATHLEN);
928 	error = copyinstr(uap->path, path + pr_pathlen, MAXPATHLEN - pr_pathlen,
929 	    NULL);
930 	if (error) {
931 		free(path, M_TEMP);
932 		return (error);
933 	}
934 #ifdef KTRACE
935 	if (KTRPOINT(curthread, KTR_NAMEI))
936 		ktrnamei(path);
937 #endif
938 	AUDIT_ARG_UPATH1_CANON(path);
939 	fnv = fnv_32_str(path, FNV1_32_INIT);
940 	sx_xlock(&shm_dict_lock);
941 	error = shm_remove(path, fnv, td->td_ucred);
942 	sx_xunlock(&shm_dict_lock);
943 	free(path, M_TEMP);
944 
945 	return (error);
946 }
947 
948 int
949 shm_mmap(struct file *fp, vm_map_t map, vm_offset_t *addr, vm_size_t objsize,
950     vm_prot_t prot, vm_prot_t cap_maxprot, int flags,
951     vm_ooffset_t foff, struct thread *td)
952 {
953 	struct shmfd *shmfd;
954 	vm_prot_t maxprot;
955 	int error;
956 	bool writecnt;
957 	void *rl_cookie;
958 
959 	shmfd = fp->f_data;
960 	maxprot = VM_PROT_NONE;
961 
962 	rl_cookie = rangelock_rlock(&shmfd->shm_rl, 0, objsize,
963 	    &shmfd->shm_mtx);
964 	/* FREAD should always be set. */
965 	if ((fp->f_flag & FREAD) != 0)
966 		maxprot |= VM_PROT_EXECUTE | VM_PROT_READ;
967 	if ((fp->f_flag & FWRITE) != 0)
968 		maxprot |= VM_PROT_WRITE;
969 
970 	writecnt = (flags & MAP_SHARED) != 0 && (prot & VM_PROT_WRITE) != 0;
971 
972 	if (writecnt && (shmfd->shm_seals & F_SEAL_WRITE) != 0) {
973 		error = EPERM;
974 		goto out;
975 	}
976 
977 	/* Don't permit shared writable mappings on read-only descriptors. */
978 	if (writecnt && (maxprot & VM_PROT_WRITE) == 0) {
979 		error = EACCES;
980 		goto out;
981 	}
982 	maxprot &= cap_maxprot;
983 
984 	/* See comment in vn_mmap(). */
985 	if (
986 #ifdef _LP64
987 	    objsize > OFF_MAX ||
988 #endif
989 	    foff < 0 || foff > OFF_MAX - objsize) {
990 		error = EINVAL;
991 		goto out;
992 	}
993 
994 #ifdef MAC
995 	error = mac_posixshm_check_mmap(td->td_ucred, shmfd, prot, flags);
996 	if (error != 0)
997 		goto out;
998 #endif
999 
1000 	mtx_lock(&shm_timestamp_lock);
1001 	vfs_timestamp(&shmfd->shm_atime);
1002 	mtx_unlock(&shm_timestamp_lock);
1003 	vm_object_reference(shmfd->shm_object);
1004 
1005 	if (writecnt)
1006 		vm_pager_update_writecount(shmfd->shm_object, 0, objsize);
1007 	error = vm_mmap_object(map, addr, objsize, prot, maxprot, flags,
1008 	    shmfd->shm_object, foff, writecnt, td);
1009 	if (error != 0) {
1010 		if (writecnt)
1011 			vm_pager_release_writecount(shmfd->shm_object, 0,
1012 			    objsize);
1013 		vm_object_deallocate(shmfd->shm_object);
1014 	}
1015 out:
1016 	rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
1017 	return (error);
1018 }
1019 
1020 static int
1021 shm_chmod(struct file *fp, mode_t mode, struct ucred *active_cred,
1022     struct thread *td)
1023 {
1024 	struct shmfd *shmfd;
1025 	int error;
1026 
1027 	error = 0;
1028 	shmfd = fp->f_data;
1029 	mtx_lock(&shm_timestamp_lock);
1030 	/*
1031 	 * SUSv4 says that x bits of permission need not be affected.
1032 	 * Be consistent with our shm_open there.
1033 	 */
1034 #ifdef MAC
1035 	error = mac_posixshm_check_setmode(active_cred, shmfd, mode);
1036 	if (error != 0)
1037 		goto out;
1038 #endif
1039 	error = vaccess(VREG, shmfd->shm_mode, shmfd->shm_uid,
1040 	    shmfd->shm_gid, VADMIN, active_cred, NULL);
1041 	if (error != 0)
1042 		goto out;
1043 	shmfd->shm_mode = mode & ACCESSPERMS;
1044 out:
1045 	mtx_unlock(&shm_timestamp_lock);
1046 	return (error);
1047 }
1048 
1049 static int
1050 shm_chown(struct file *fp, uid_t uid, gid_t gid, struct ucred *active_cred,
1051     struct thread *td)
1052 {
1053 	struct shmfd *shmfd;
1054 	int error;
1055 
1056 	error = 0;
1057 	shmfd = fp->f_data;
1058 	mtx_lock(&shm_timestamp_lock);
1059 #ifdef MAC
1060 	error = mac_posixshm_check_setowner(active_cred, shmfd, uid, gid);
1061 	if (error != 0)
1062 		goto out;
1063 #endif
1064 	if (uid == (uid_t)-1)
1065 		uid = shmfd->shm_uid;
1066 	if (gid == (gid_t)-1)
1067                  gid = shmfd->shm_gid;
1068 	if (((uid != shmfd->shm_uid && uid != active_cred->cr_uid) ||
1069 	    (gid != shmfd->shm_gid && !groupmember(gid, active_cred))) &&
1070 	    (error = priv_check_cred(active_cred, PRIV_VFS_CHOWN)))
1071 		goto out;
1072 	shmfd->shm_uid = uid;
1073 	shmfd->shm_gid = gid;
1074 out:
1075 	mtx_unlock(&shm_timestamp_lock);
1076 	return (error);
1077 }
1078 
1079 /*
1080  * Helper routines to allow the backing object of a shared memory file
1081  * descriptor to be mapped in the kernel.
1082  */
1083 int
1084 shm_map(struct file *fp, size_t size, off_t offset, void **memp)
1085 {
1086 	struct shmfd *shmfd;
1087 	vm_offset_t kva, ofs;
1088 	vm_object_t obj;
1089 	int rv;
1090 
1091 	if (fp->f_type != DTYPE_SHM)
1092 		return (EINVAL);
1093 	shmfd = fp->f_data;
1094 	obj = shmfd->shm_object;
1095 	VM_OBJECT_WLOCK(obj);
1096 	/*
1097 	 * XXXRW: This validation is probably insufficient, and subject to
1098 	 * sign errors.  It should be fixed.
1099 	 */
1100 	if (offset >= shmfd->shm_size ||
1101 	    offset + size > round_page(shmfd->shm_size)) {
1102 		VM_OBJECT_WUNLOCK(obj);
1103 		return (EINVAL);
1104 	}
1105 
1106 	shmfd->shm_kmappings++;
1107 	vm_object_reference_locked(obj);
1108 	VM_OBJECT_WUNLOCK(obj);
1109 
1110 	/* Map the object into the kernel_map and wire it. */
1111 	kva = vm_map_min(kernel_map);
1112 	ofs = offset & PAGE_MASK;
1113 	offset = trunc_page(offset);
1114 	size = round_page(size + ofs);
1115 	rv = vm_map_find(kernel_map, obj, offset, &kva, size, 0,
1116 	    VMFS_OPTIMAL_SPACE, VM_PROT_READ | VM_PROT_WRITE,
1117 	    VM_PROT_READ | VM_PROT_WRITE, 0);
1118 	if (rv == KERN_SUCCESS) {
1119 		rv = vm_map_wire(kernel_map, kva, kva + size,
1120 		    VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
1121 		if (rv == KERN_SUCCESS) {
1122 			*memp = (void *)(kva + ofs);
1123 			return (0);
1124 		}
1125 		vm_map_remove(kernel_map, kva, kva + size);
1126 	} else
1127 		vm_object_deallocate(obj);
1128 
1129 	/* On failure, drop our mapping reference. */
1130 	VM_OBJECT_WLOCK(obj);
1131 	shmfd->shm_kmappings--;
1132 	VM_OBJECT_WUNLOCK(obj);
1133 
1134 	return (vm_mmap_to_errno(rv));
1135 }
1136 
1137 /*
1138  * We require the caller to unmap the entire entry.  This allows us to
1139  * safely decrement shm_kmappings when a mapping is removed.
1140  */
1141 int
1142 shm_unmap(struct file *fp, void *mem, size_t size)
1143 {
1144 	struct shmfd *shmfd;
1145 	vm_map_entry_t entry;
1146 	vm_offset_t kva, ofs;
1147 	vm_object_t obj;
1148 	vm_pindex_t pindex;
1149 	vm_prot_t prot;
1150 	boolean_t wired;
1151 	vm_map_t map;
1152 	int rv;
1153 
1154 	if (fp->f_type != DTYPE_SHM)
1155 		return (EINVAL);
1156 	shmfd = fp->f_data;
1157 	kva = (vm_offset_t)mem;
1158 	ofs = kva & PAGE_MASK;
1159 	kva = trunc_page(kva);
1160 	size = round_page(size + ofs);
1161 	map = kernel_map;
1162 	rv = vm_map_lookup(&map, kva, VM_PROT_READ | VM_PROT_WRITE, &entry,
1163 	    &obj, &pindex, &prot, &wired);
1164 	if (rv != KERN_SUCCESS)
1165 		return (EINVAL);
1166 	if (entry->start != kva || entry->end != kva + size) {
1167 		vm_map_lookup_done(map, entry);
1168 		return (EINVAL);
1169 	}
1170 	vm_map_lookup_done(map, entry);
1171 	if (obj != shmfd->shm_object)
1172 		return (EINVAL);
1173 	vm_map_remove(map, kva, kva + size);
1174 	VM_OBJECT_WLOCK(obj);
1175 	KASSERT(shmfd->shm_kmappings > 0, ("shm_unmap: object not mapped"));
1176 	shmfd->shm_kmappings--;
1177 	VM_OBJECT_WUNLOCK(obj);
1178 	return (0);
1179 }
1180 
1181 static int
1182 shm_fill_kinfo_locked(struct shmfd *shmfd, struct kinfo_file *kif, bool list)
1183 {
1184 	const char *path, *pr_path;
1185 	size_t pr_pathlen;
1186 	bool visible;
1187 
1188 	sx_assert(&shm_dict_lock, SA_LOCKED);
1189 	kif->kf_type = KF_TYPE_SHM;
1190 	kif->kf_un.kf_file.kf_file_mode = S_IFREG | shmfd->shm_mode;
1191 	kif->kf_un.kf_file.kf_file_size = shmfd->shm_size;
1192 	if (shmfd->shm_path != NULL) {
1193 		if (shmfd->shm_path != NULL) {
1194 			path = shmfd->shm_path;
1195 			pr_path = curthread->td_ucred->cr_prison->pr_path;
1196 			if (strcmp(pr_path, "/") != 0) {
1197 				/* Return the jail-rooted pathname. */
1198 				pr_pathlen = strlen(pr_path);
1199 				visible = strncmp(path, pr_path, pr_pathlen)
1200 				    == 0 && path[pr_pathlen] == '/';
1201 				if (list && !visible)
1202 					return (EPERM);
1203 				if (visible)
1204 					path += pr_pathlen;
1205 			}
1206 			strlcpy(kif->kf_path, path, sizeof(kif->kf_path));
1207 		}
1208 	}
1209 	return (0);
1210 }
1211 
1212 static int
1213 shm_fill_kinfo(struct file *fp, struct kinfo_file *kif,
1214     struct filedesc *fdp __unused)
1215 {
1216 	int res;
1217 
1218 	sx_slock(&shm_dict_lock);
1219 	res = shm_fill_kinfo_locked(fp->f_data, kif, false);
1220 	sx_sunlock(&shm_dict_lock);
1221 	return (res);
1222 }
1223 
1224 static int
1225 shm_add_seals(struct file *fp, int seals)
1226 {
1227 	struct shmfd *shmfd;
1228 	void *rl_cookie;
1229 	vm_ooffset_t writemappings;
1230 	int error, nseals;
1231 
1232 	error = 0;
1233 	shmfd = fp->f_data;
1234 	rl_cookie = rangelock_wlock(&shmfd->shm_rl, 0, OFF_MAX,
1235 	    &shmfd->shm_mtx);
1236 
1237 	/* Even already-set seals should result in EPERM. */
1238 	if ((shmfd->shm_seals & F_SEAL_SEAL) != 0) {
1239 		error = EPERM;
1240 		goto out;
1241 	}
1242 	nseals = seals & ~shmfd->shm_seals;
1243 	if ((nseals & F_SEAL_WRITE) != 0) {
1244 		/*
1245 		 * The rangelock above prevents writable mappings from being
1246 		 * added after we've started applying seals.  The RLOCK here
1247 		 * is to avoid torn reads on ILP32 arches as unmapping/reducing
1248 		 * writemappings will be done without a rangelock.
1249 		 */
1250 		VM_OBJECT_RLOCK(shmfd->shm_object);
1251 		writemappings = shmfd->shm_object->un_pager.swp.writemappings;
1252 		VM_OBJECT_RUNLOCK(shmfd->shm_object);
1253 		/* kmappings are also writable */
1254 		if (writemappings > 0) {
1255 			error = EBUSY;
1256 			goto out;
1257 		}
1258 	}
1259 	shmfd->shm_seals |= nseals;
1260 out:
1261 	rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
1262 	return (error);
1263 }
1264 
1265 static int
1266 shm_get_seals(struct file *fp, int *seals)
1267 {
1268 	struct shmfd *shmfd;
1269 
1270 	shmfd = fp->f_data;
1271 	*seals = shmfd->shm_seals;
1272 	return (0);
1273 }
1274 
1275 static int
1276 sysctl_posix_shm_list(SYSCTL_HANDLER_ARGS)
1277 {
1278 	struct shm_mapping *shmm;
1279 	struct sbuf sb;
1280 	struct kinfo_file kif;
1281 	u_long i;
1282 	ssize_t curlen;
1283 	int error, error2;
1284 
1285 	sbuf_new_for_sysctl(&sb, NULL, sizeof(struct kinfo_file) * 5, req);
1286 	sbuf_clear_flags(&sb, SBUF_INCLUDENUL);
1287 	curlen = 0;
1288 	error = 0;
1289 	sx_slock(&shm_dict_lock);
1290 	for (i = 0; i < shm_hash + 1; i++) {
1291 		LIST_FOREACH(shmm, &shm_dictionary[i], sm_link) {
1292 			error = shm_fill_kinfo_locked(shmm->sm_shmfd,
1293 			    &kif, true);
1294 			if (error == EPERM)
1295 				continue;
1296 			if (error != 0)
1297 				break;
1298 			pack_kinfo(&kif);
1299 			if (req->oldptr != NULL &&
1300 			    kif.kf_structsize + curlen > req->oldlen)
1301 				break;
1302 			error = sbuf_bcat(&sb, &kif, kif.kf_structsize) == 0 ?
1303 			    0 : ENOMEM;
1304 			if (error != 0)
1305 				break;
1306 			curlen += kif.kf_structsize;
1307 		}
1308 	}
1309 	sx_sunlock(&shm_dict_lock);
1310 	error2 = sbuf_finish(&sb);
1311 	sbuf_delete(&sb);
1312 	return (error != 0 ? error : error2);
1313 }
1314 
1315 SYSCTL_PROC(_kern_ipc, OID_AUTO, posix_shm_list,
1316     CTLFLAG_RD | CTLFLAG_MPSAFE | CTLTYPE_OPAQUE,
1317     NULL, 0, sysctl_posix_shm_list, "",
1318     "POSIX SHM list");
1319 
1320 int
1321 kern_shm_open2(struct thread *td, const char *path, int flags, mode_t mode,
1322     int shmflags, const char *name __unused)
1323 {
1324 	int initial_seals;
1325 
1326 	if ((shmflags & ~SHM_ALLOW_SEALING) != 0)
1327 		return (EINVAL);
1328 
1329 	initial_seals = F_SEAL_SEAL;
1330 	if ((shmflags & SHM_ALLOW_SEALING) != 0)
1331 		initial_seals &= ~F_SEAL_SEAL;
1332 	return (kern_shm_open(td, path, flags, 0, NULL, initial_seals));
1333 }
1334 
1335 /*
1336  * This version of the shm_open() interface leaves CLOEXEC behavior up to the
1337  * caller, and libc will enforce it for the traditional shm_open() call.  This
1338  * allows other consumers, like memfd_create(), to opt-in for CLOEXEC.  This
1339  * interface also includes a 'name' argument that is currently unused, but could
1340  * potentially be exported later via some interface for debugging purposes.
1341  * From the kernel's perspective, it is optional.  Individual consumers like
1342  * memfd_create() may require it in order to be compatible with other systems
1343  * implementing the same function.
1344  */
1345 int
1346 sys_shm_open2(struct thread *td, struct shm_open2_args *uap)
1347 {
1348 
1349 	return (kern_shm_open2(td, uap->path, uap->flags, uap->mode,
1350 	    uap->shmflags, uap->name));
1351 }
1352