xref: /freebsd/sys/kern/uipc_shm.c (revision 7ec4b29b086b67b8c778d2bc50c90e9e0655c03e)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2006, 2011, 2016-2017 Robert N. M. Watson
5  * Copyright 2020 The FreeBSD Foundation
6  * All rights reserved.
7  *
8  * Portions of this software were developed by BAE Systems, the University of
9  * Cambridge Computer Laboratory, and Memorial University under DARPA/AFRL
10  * contract FA8650-15-C-7558 ("CADETS"), as part of the DARPA Transparent
11  * Computing (TC) research program.
12  *
13  * Portions of this software were developed by Konstantin Belousov
14  * under sponsorship from the FreeBSD Foundation.
15  *
16  * Redistribution and use in source and binary forms, with or without
17  * modification, are permitted provided that the following conditions
18  * are met:
19  * 1. Redistributions of source code must retain the above copyright
20  *    notice, this list of conditions and the following disclaimer.
21  * 2. Redistributions in binary form must reproduce the above copyright
22  *    notice, this list of conditions and the following disclaimer in the
23  *    documentation and/or other materials provided with the distribution.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  */
37 
38 /*
39  * Support for shared swap-backed anonymous memory objects via
40  * shm_open(2), shm_rename(2), and shm_unlink(2).
41  * While most of the implementation is here, vm_mmap.c contains
42  * mapping logic changes.
43  *
44  * posixshmcontrol(1) allows users to inspect the state of the memory
45  * objects.  Per-uid swap resource limit controls total amount of
46  * memory that user can consume for anonymous objects, including
47  * shared.
48  */
49 
50 #include <sys/cdefs.h>
51 __FBSDID("$FreeBSD$");
52 
53 #include "opt_capsicum.h"
54 #include "opt_ktrace.h"
55 
56 #include <sys/param.h>
57 #include <sys/capsicum.h>
58 #include <sys/conf.h>
59 #include <sys/fcntl.h>
60 #include <sys/file.h>
61 #include <sys/filedesc.h>
62 #include <sys/filio.h>
63 #include <sys/fnv_hash.h>
64 #include <sys/kernel.h>
65 #include <sys/limits.h>
66 #include <sys/uio.h>
67 #include <sys/signal.h>
68 #include <sys/jail.h>
69 #include <sys/ktrace.h>
70 #include <sys/lock.h>
71 #include <sys/malloc.h>
72 #include <sys/mman.h>
73 #include <sys/mutex.h>
74 #include <sys/priv.h>
75 #include <sys/proc.h>
76 #include <sys/refcount.h>
77 #include <sys/resourcevar.h>
78 #include <sys/rwlock.h>
79 #include <sys/sbuf.h>
80 #include <sys/stat.h>
81 #include <sys/syscallsubr.h>
82 #include <sys/sysctl.h>
83 #include <sys/sysproto.h>
84 #include <sys/systm.h>
85 #include <sys/sx.h>
86 #include <sys/time.h>
87 #include <sys/vmmeter.h>
88 #include <sys/vnode.h>
89 #include <sys/unistd.h>
90 #include <sys/user.h>
91 
92 #include <security/audit/audit.h>
93 #include <security/mac/mac_framework.h>
94 
95 #include <vm/vm.h>
96 #include <vm/vm_param.h>
97 #include <vm/pmap.h>
98 #include <vm/vm_extern.h>
99 #include <vm/vm_map.h>
100 #include <vm/vm_kern.h>
101 #include <vm/vm_object.h>
102 #include <vm/vm_page.h>
103 #include <vm/vm_pageout.h>
104 #include <vm/vm_pager.h>
105 #include <vm/swap_pager.h>
106 
107 struct shm_mapping {
108 	char		*sm_path;
109 	Fnv32_t		sm_fnv;
110 	struct shmfd	*sm_shmfd;
111 	LIST_ENTRY(shm_mapping) sm_link;
112 };
113 
114 static MALLOC_DEFINE(M_SHMFD, "shmfd", "shared memory file descriptor");
115 static LIST_HEAD(, shm_mapping) *shm_dictionary;
116 static struct sx shm_dict_lock;
117 static struct mtx shm_timestamp_lock;
118 static u_long shm_hash;
119 static struct unrhdr64 shm_ino_unr;
120 static dev_t shm_dev_ino;
121 
122 #define	SHM_HASH(fnv)	(&shm_dictionary[(fnv) & shm_hash])
123 
124 static void	shm_init(void *arg);
125 static void	shm_insert(char *path, Fnv32_t fnv, struct shmfd *shmfd);
126 static struct shmfd *shm_lookup(char *path, Fnv32_t fnv);
127 static int	shm_remove(char *path, Fnv32_t fnv, struct ucred *ucred);
128 static void	shm_doremove(struct shm_mapping *map);
129 static int	shm_dotruncate_cookie(struct shmfd *shmfd, off_t length,
130     void *rl_cookie);
131 static int	shm_dotruncate_locked(struct shmfd *shmfd, off_t length,
132     void *rl_cookie);
133 static int	shm_copyin_path(struct thread *td, const char *userpath_in,
134     char **path_out);
135 static int	shm_deallocate(struct shmfd *shmfd, off_t *offset,
136     off_t *length, int flags);
137 
138 static fo_rdwr_t	shm_read;
139 static fo_rdwr_t	shm_write;
140 static fo_truncate_t	shm_truncate;
141 static fo_ioctl_t	shm_ioctl;
142 static fo_stat_t	shm_stat;
143 static fo_close_t	shm_close;
144 static fo_chmod_t	shm_chmod;
145 static fo_chown_t	shm_chown;
146 static fo_seek_t	shm_seek;
147 static fo_fill_kinfo_t	shm_fill_kinfo;
148 static fo_mmap_t	shm_mmap;
149 static fo_get_seals_t	shm_get_seals;
150 static fo_add_seals_t	shm_add_seals;
151 static fo_fallocate_t	shm_fallocate;
152 static fo_fspacectl_t	shm_fspacectl;
153 
154 /* File descriptor operations. */
155 struct fileops shm_ops = {
156 	.fo_read = shm_read,
157 	.fo_write = shm_write,
158 	.fo_truncate = shm_truncate,
159 	.fo_ioctl = shm_ioctl,
160 	.fo_poll = invfo_poll,
161 	.fo_kqfilter = invfo_kqfilter,
162 	.fo_stat = shm_stat,
163 	.fo_close = shm_close,
164 	.fo_chmod = shm_chmod,
165 	.fo_chown = shm_chown,
166 	.fo_sendfile = vn_sendfile,
167 	.fo_seek = shm_seek,
168 	.fo_fill_kinfo = shm_fill_kinfo,
169 	.fo_mmap = shm_mmap,
170 	.fo_get_seals = shm_get_seals,
171 	.fo_add_seals = shm_add_seals,
172 	.fo_fallocate = shm_fallocate,
173 	.fo_fspacectl = shm_fspacectl,
174 	.fo_flags = DFLAG_PASSABLE | DFLAG_SEEKABLE,
175 };
176 
177 FEATURE(posix_shm, "POSIX shared memory");
178 
179 static SYSCTL_NODE(_vm, OID_AUTO, largepages, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
180     "");
181 
182 static int largepage_reclaim_tries = 1;
183 SYSCTL_INT(_vm_largepages, OID_AUTO, reclaim_tries,
184     CTLFLAG_RWTUN, &largepage_reclaim_tries, 0,
185     "Number of contig reclaims before giving up for default alloc policy");
186 
187 static int
188 uiomove_object_page(vm_object_t obj, size_t len, struct uio *uio)
189 {
190 	vm_page_t m;
191 	vm_pindex_t idx;
192 	size_t tlen;
193 	int error, offset, rv;
194 
195 	idx = OFF_TO_IDX(uio->uio_offset);
196 	offset = uio->uio_offset & PAGE_MASK;
197 	tlen = MIN(PAGE_SIZE - offset, len);
198 
199 	rv = vm_page_grab_valid_unlocked(&m, obj, idx,
200 	    VM_ALLOC_SBUSY | VM_ALLOC_IGN_SBUSY | VM_ALLOC_NOCREAT);
201 	if (rv == VM_PAGER_OK)
202 		goto found;
203 
204 	/*
205 	 * Read I/O without either a corresponding resident page or swap
206 	 * page: use zero_region.  This is intended to avoid instantiating
207 	 * pages on read from a sparse region.
208 	 */
209 	VM_OBJECT_WLOCK(obj);
210 	m = vm_page_lookup(obj, idx);
211 	if (uio->uio_rw == UIO_READ && m == NULL &&
212 	    !vm_pager_has_page(obj, idx, NULL, NULL)) {
213 		VM_OBJECT_WUNLOCK(obj);
214 		return (uiomove(__DECONST(void *, zero_region), tlen, uio));
215 	}
216 
217 	/*
218 	 * Although the tmpfs vnode lock is held here, it is
219 	 * nonetheless safe to sleep waiting for a free page.  The
220 	 * pageout daemon does not need to acquire the tmpfs vnode
221 	 * lock to page out tobj's pages because tobj is a OBJT_SWAP
222 	 * type object.
223 	 */
224 	rv = vm_page_grab_valid(&m, obj, idx,
225 	    VM_ALLOC_NORMAL | VM_ALLOC_SBUSY | VM_ALLOC_IGN_SBUSY);
226 	if (rv != VM_PAGER_OK) {
227 		VM_OBJECT_WUNLOCK(obj);
228 		if (bootverbose) {
229 			printf("uiomove_object: vm_obj %p idx %jd "
230 			    "pager error %d\n", obj, idx, rv);
231 		}
232 		return (EIO);
233 	}
234 	VM_OBJECT_WUNLOCK(obj);
235 
236 found:
237 	error = uiomove_fromphys(&m, offset, tlen, uio);
238 	if (uio->uio_rw == UIO_WRITE && error == 0)
239 		vm_page_set_dirty(m);
240 	vm_page_activate(m);
241 	vm_page_sunbusy(m);
242 
243 	return (error);
244 }
245 
246 int
247 uiomove_object(vm_object_t obj, off_t obj_size, struct uio *uio)
248 {
249 	ssize_t resid;
250 	size_t len;
251 	int error;
252 
253 	error = 0;
254 	while ((resid = uio->uio_resid) > 0) {
255 		if (obj_size <= uio->uio_offset)
256 			break;
257 		len = MIN(obj_size - uio->uio_offset, resid);
258 		if (len == 0)
259 			break;
260 		error = uiomove_object_page(obj, len, uio);
261 		if (error != 0 || resid == uio->uio_resid)
262 			break;
263 	}
264 	return (error);
265 }
266 
267 static u_long count_largepages[MAXPAGESIZES];
268 
269 static int
270 shm_largepage_phys_populate(vm_object_t object, vm_pindex_t pidx,
271     int fault_type, vm_prot_t max_prot, vm_pindex_t *first, vm_pindex_t *last)
272 {
273 	vm_page_t m __diagused;
274 	int psind;
275 
276 	psind = object->un_pager.phys.data_val;
277 	if (psind == 0 || pidx >= object->size)
278 		return (VM_PAGER_FAIL);
279 	*first = rounddown2(pidx, pagesizes[psind] / PAGE_SIZE);
280 
281 	/*
282 	 * We only busy the first page in the superpage run.  It is
283 	 * useless to busy whole run since we only remove full
284 	 * superpage, and it takes too long to busy e.g. 512 * 512 ==
285 	 * 262144 pages constituing 1G amd64 superage.
286 	 */
287 	m = vm_page_grab(object, *first, VM_ALLOC_NORMAL | VM_ALLOC_NOCREAT);
288 	MPASS(m != NULL);
289 
290 	*last = *first + atop(pagesizes[psind]) - 1;
291 	return (VM_PAGER_OK);
292 }
293 
294 static boolean_t
295 shm_largepage_phys_haspage(vm_object_t object, vm_pindex_t pindex,
296     int *before, int *after)
297 {
298 	int psind;
299 
300 	psind = object->un_pager.phys.data_val;
301 	if (psind == 0 || pindex >= object->size)
302 		return (FALSE);
303 	if (before != NULL) {
304 		*before = pindex - rounddown2(pindex, pagesizes[psind] /
305 		    PAGE_SIZE);
306 	}
307 	if (after != NULL) {
308 		*after = roundup2(pindex, pagesizes[psind] / PAGE_SIZE) -
309 		    pindex;
310 	}
311 	return (TRUE);
312 }
313 
314 static void
315 shm_largepage_phys_ctor(vm_object_t object, vm_prot_t prot,
316     vm_ooffset_t foff, struct ucred *cred)
317 {
318 }
319 
320 static void
321 shm_largepage_phys_dtor(vm_object_t object)
322 {
323 	int psind;
324 
325 	psind = object->un_pager.phys.data_val;
326 	if (psind != 0) {
327 		atomic_subtract_long(&count_largepages[psind],
328 		    object->size / (pagesizes[psind] / PAGE_SIZE));
329 		vm_wire_sub(object->size);
330 	} else {
331 		KASSERT(object->size == 0,
332 		    ("largepage phys obj %p not initialized bit size %#jx > 0",
333 		    object, (uintmax_t)object->size));
334 	}
335 }
336 
337 static const struct phys_pager_ops shm_largepage_phys_ops = {
338 	.phys_pg_populate =	shm_largepage_phys_populate,
339 	.phys_pg_haspage =	shm_largepage_phys_haspage,
340 	.phys_pg_ctor =		shm_largepage_phys_ctor,
341 	.phys_pg_dtor =		shm_largepage_phys_dtor,
342 };
343 
344 bool
345 shm_largepage(struct shmfd *shmfd)
346 {
347 	return (shmfd->shm_object->type == OBJT_PHYS);
348 }
349 
350 static int
351 shm_seek(struct file *fp, off_t offset, int whence, struct thread *td)
352 {
353 	struct shmfd *shmfd;
354 	off_t foffset;
355 	int error;
356 
357 	shmfd = fp->f_data;
358 	foffset = foffset_lock(fp, 0);
359 	error = 0;
360 	switch (whence) {
361 	case L_INCR:
362 		if (foffset < 0 ||
363 		    (offset > 0 && foffset > OFF_MAX - offset)) {
364 			error = EOVERFLOW;
365 			break;
366 		}
367 		offset += foffset;
368 		break;
369 	case L_XTND:
370 		if (offset > 0 && shmfd->shm_size > OFF_MAX - offset) {
371 			error = EOVERFLOW;
372 			break;
373 		}
374 		offset += shmfd->shm_size;
375 		break;
376 	case L_SET:
377 		break;
378 	default:
379 		error = EINVAL;
380 	}
381 	if (error == 0) {
382 		if (offset < 0 || offset > shmfd->shm_size)
383 			error = EINVAL;
384 		else
385 			td->td_uretoff.tdu_off = offset;
386 	}
387 	foffset_unlock(fp, offset, error != 0 ? FOF_NOUPDATE : 0);
388 	return (error);
389 }
390 
391 static int
392 shm_read(struct file *fp, struct uio *uio, struct ucred *active_cred,
393     int flags, struct thread *td)
394 {
395 	struct shmfd *shmfd;
396 	void *rl_cookie;
397 	int error;
398 
399 	shmfd = fp->f_data;
400 #ifdef MAC
401 	error = mac_posixshm_check_read(active_cred, fp->f_cred, shmfd);
402 	if (error)
403 		return (error);
404 #endif
405 	foffset_lock_uio(fp, uio, flags);
406 	rl_cookie = rangelock_rlock(&shmfd->shm_rl, uio->uio_offset,
407 	    uio->uio_offset + uio->uio_resid, &shmfd->shm_mtx);
408 	error = uiomove_object(shmfd->shm_object, shmfd->shm_size, uio);
409 	rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
410 	foffset_unlock_uio(fp, uio, flags);
411 	return (error);
412 }
413 
414 static int
415 shm_write(struct file *fp, struct uio *uio, struct ucred *active_cred,
416     int flags, struct thread *td)
417 {
418 	struct shmfd *shmfd;
419 	void *rl_cookie;
420 	int error;
421 	off_t size;
422 
423 	shmfd = fp->f_data;
424 #ifdef MAC
425 	error = mac_posixshm_check_write(active_cred, fp->f_cred, shmfd);
426 	if (error)
427 		return (error);
428 #endif
429 	if (shm_largepage(shmfd) && shmfd->shm_lp_psind == 0)
430 		return (EINVAL);
431 	foffset_lock_uio(fp, uio, flags);
432 	if (uio->uio_resid > OFF_MAX - uio->uio_offset) {
433 		/*
434 		 * Overflow is only an error if we're supposed to expand on
435 		 * write.  Otherwise, we'll just truncate the write to the
436 		 * size of the file, which can only grow up to OFF_MAX.
437 		 */
438 		if ((shmfd->shm_flags & SHM_GROW_ON_WRITE) != 0) {
439 			foffset_unlock_uio(fp, uio, flags);
440 			return (EFBIG);
441 		}
442 
443 		size = shmfd->shm_size;
444 	} else {
445 		size = uio->uio_offset + uio->uio_resid;
446 	}
447 	if ((flags & FOF_OFFSET) == 0) {
448 		rl_cookie = rangelock_wlock(&shmfd->shm_rl, 0, OFF_MAX,
449 		    &shmfd->shm_mtx);
450 	} else {
451 		rl_cookie = rangelock_wlock(&shmfd->shm_rl, uio->uio_offset,
452 		    size, &shmfd->shm_mtx);
453 	}
454 	if ((shmfd->shm_seals & F_SEAL_WRITE) != 0) {
455 		error = EPERM;
456 	} else {
457 		error = 0;
458 		if ((shmfd->shm_flags & SHM_GROW_ON_WRITE) != 0 &&
459 		    size > shmfd->shm_size) {
460 			error = shm_dotruncate_cookie(shmfd, size, rl_cookie);
461 		}
462 		if (error == 0)
463 			error = uiomove_object(shmfd->shm_object,
464 			    shmfd->shm_size, uio);
465 	}
466 	rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
467 	foffset_unlock_uio(fp, uio, flags);
468 	return (error);
469 }
470 
471 static int
472 shm_truncate(struct file *fp, off_t length, struct ucred *active_cred,
473     struct thread *td)
474 {
475 	struct shmfd *shmfd;
476 #ifdef MAC
477 	int error;
478 #endif
479 
480 	shmfd = fp->f_data;
481 #ifdef MAC
482 	error = mac_posixshm_check_truncate(active_cred, fp->f_cred, shmfd);
483 	if (error)
484 		return (error);
485 #endif
486 	return (shm_dotruncate(shmfd, length));
487 }
488 
489 int
490 shm_ioctl(struct file *fp, u_long com, void *data, struct ucred *active_cred,
491     struct thread *td)
492 {
493 	struct shmfd *shmfd;
494 	struct shm_largepage_conf *conf;
495 	void *rl_cookie;
496 
497 	shmfd = fp->f_data;
498 	switch (com) {
499 	case FIONBIO:
500 	case FIOASYNC:
501 		/*
502 		 * Allow fcntl(fd, F_SETFL, O_NONBLOCK) to work,
503 		 * just like it would on an unlinked regular file
504 		 */
505 		return (0);
506 	case FIOSSHMLPGCNF:
507 		if (!shm_largepage(shmfd))
508 			return (ENOTTY);
509 		conf = data;
510 		if (shmfd->shm_lp_psind != 0 &&
511 		    conf->psind != shmfd->shm_lp_psind)
512 			return (EINVAL);
513 		if (conf->psind <= 0 || conf->psind >= MAXPAGESIZES ||
514 		    pagesizes[conf->psind] == 0)
515 			return (EINVAL);
516 		if (conf->alloc_policy != SHM_LARGEPAGE_ALLOC_DEFAULT &&
517 		    conf->alloc_policy != SHM_LARGEPAGE_ALLOC_NOWAIT &&
518 		    conf->alloc_policy != SHM_LARGEPAGE_ALLOC_HARD)
519 			return (EINVAL);
520 
521 		rl_cookie = rangelock_wlock(&shmfd->shm_rl, 0, OFF_MAX,
522 		    &shmfd->shm_mtx);
523 		shmfd->shm_lp_psind = conf->psind;
524 		shmfd->shm_lp_alloc_policy = conf->alloc_policy;
525 		shmfd->shm_object->un_pager.phys.data_val = conf->psind;
526 		rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
527 		return (0);
528 	case FIOGSHMLPGCNF:
529 		if (!shm_largepage(shmfd))
530 			return (ENOTTY);
531 		conf = data;
532 		rl_cookie = rangelock_rlock(&shmfd->shm_rl, 0, OFF_MAX,
533 		    &shmfd->shm_mtx);
534 		conf->psind = shmfd->shm_lp_psind;
535 		conf->alloc_policy = shmfd->shm_lp_alloc_policy;
536 		rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
537 		return (0);
538 	default:
539 		return (ENOTTY);
540 	}
541 }
542 
543 static int
544 shm_stat(struct file *fp, struct stat *sb, struct ucred *active_cred)
545 {
546 	struct shmfd *shmfd;
547 #ifdef MAC
548 	int error;
549 #endif
550 
551 	shmfd = fp->f_data;
552 
553 #ifdef MAC
554 	error = mac_posixshm_check_stat(active_cred, fp->f_cred, shmfd);
555 	if (error)
556 		return (error);
557 #endif
558 
559 	/*
560 	 * Attempt to return sanish values for fstat() on a memory file
561 	 * descriptor.
562 	 */
563 	bzero(sb, sizeof(*sb));
564 	sb->st_blksize = PAGE_SIZE;
565 	sb->st_size = shmfd->shm_size;
566 	sb->st_blocks = howmany(sb->st_size, sb->st_blksize);
567 	mtx_lock(&shm_timestamp_lock);
568 	sb->st_atim = shmfd->shm_atime;
569 	sb->st_ctim = shmfd->shm_ctime;
570 	sb->st_mtim = shmfd->shm_mtime;
571 	sb->st_birthtim = shmfd->shm_birthtime;
572 	sb->st_mode = S_IFREG | shmfd->shm_mode;		/* XXX */
573 	sb->st_uid = shmfd->shm_uid;
574 	sb->st_gid = shmfd->shm_gid;
575 	mtx_unlock(&shm_timestamp_lock);
576 	sb->st_dev = shm_dev_ino;
577 	sb->st_ino = shmfd->shm_ino;
578 	sb->st_nlink = shmfd->shm_object->ref_count;
579 	sb->st_blocks = shmfd->shm_object->size /
580 	    (pagesizes[shmfd->shm_lp_psind] >> PAGE_SHIFT);
581 
582 	return (0);
583 }
584 
585 static int
586 shm_close(struct file *fp, struct thread *td)
587 {
588 	struct shmfd *shmfd;
589 
590 	shmfd = fp->f_data;
591 	fp->f_data = NULL;
592 	shm_drop(shmfd);
593 
594 	return (0);
595 }
596 
597 static int
598 shm_copyin_path(struct thread *td, const char *userpath_in, char **path_out) {
599 	int error;
600 	char *path;
601 	const char *pr_path;
602 	size_t pr_pathlen;
603 
604 	path = malloc(MAXPATHLEN, M_SHMFD, M_WAITOK);
605 	pr_path = td->td_ucred->cr_prison->pr_path;
606 
607 	/* Construct a full pathname for jailed callers. */
608 	pr_pathlen = strcmp(pr_path, "/") ==
609 	    0 ? 0 : strlcpy(path, pr_path, MAXPATHLEN);
610 	error = copyinstr(userpath_in, path + pr_pathlen,
611 	    MAXPATHLEN - pr_pathlen, NULL);
612 	if (error != 0)
613 		goto out;
614 
615 #ifdef KTRACE
616 	if (KTRPOINT(curthread, KTR_NAMEI))
617 		ktrnamei(path);
618 #endif
619 
620 	/* Require paths to start with a '/' character. */
621 	if (path[pr_pathlen] != '/') {
622 		error = EINVAL;
623 		goto out;
624 	}
625 
626 	*path_out = path;
627 
628 out:
629 	if (error != 0)
630 		free(path, M_SHMFD);
631 
632 	return (error);
633 }
634 
635 static int
636 shm_partial_page_invalidate(vm_object_t object, vm_pindex_t idx, int base,
637     int end)
638 {
639 	vm_page_t m;
640 	int rv;
641 
642 	VM_OBJECT_ASSERT_WLOCKED(object);
643 	KASSERT(base >= 0, ("%s: base %d", __func__, base));
644 	KASSERT(end - base <= PAGE_SIZE, ("%s: base %d end %d", __func__, base,
645 	    end));
646 
647 retry:
648 	m = vm_page_grab(object, idx, VM_ALLOC_NOCREAT);
649 	if (m != NULL) {
650 		MPASS(vm_page_all_valid(m));
651 	} else if (vm_pager_has_page(object, idx, NULL, NULL)) {
652 		m = vm_page_alloc(object, idx,
653 		    VM_ALLOC_NORMAL | VM_ALLOC_WAITFAIL);
654 		if (m == NULL)
655 			goto retry;
656 		vm_object_pip_add(object, 1);
657 		VM_OBJECT_WUNLOCK(object);
658 		rv = vm_pager_get_pages(object, &m, 1, NULL, NULL);
659 		VM_OBJECT_WLOCK(object);
660 		vm_object_pip_wakeup(object);
661 		if (rv == VM_PAGER_OK) {
662 			/*
663 			 * Since the page was not resident, and therefore not
664 			 * recently accessed, immediately enqueue it for
665 			 * asynchronous laundering.  The current operation is
666 			 * not regarded as an access.
667 			 */
668 			vm_page_launder(m);
669 		} else {
670 			vm_page_free(m);
671 			VM_OBJECT_WUNLOCK(object);
672 			return (EIO);
673 		}
674 	}
675 	if (m != NULL) {
676 		pmap_zero_page_area(m, base, end - base);
677 		KASSERT(vm_page_all_valid(m), ("%s: page %p is invalid",
678 		    __func__, m));
679 		vm_page_set_dirty(m);
680 		vm_page_xunbusy(m);
681 	}
682 
683 	return (0);
684 }
685 
686 static int
687 shm_dotruncate_locked(struct shmfd *shmfd, off_t length, void *rl_cookie)
688 {
689 	vm_object_t object;
690 	vm_pindex_t nobjsize;
691 	vm_ooffset_t delta;
692 	int base, error;
693 
694 	KASSERT(length >= 0, ("shm_dotruncate: length < 0"));
695 	object = shmfd->shm_object;
696 	VM_OBJECT_ASSERT_WLOCKED(object);
697 	rangelock_cookie_assert(rl_cookie, RA_WLOCKED);
698 	if (length == shmfd->shm_size)
699 		return (0);
700 	nobjsize = OFF_TO_IDX(length + PAGE_MASK);
701 
702 	/* Are we shrinking?  If so, trim the end. */
703 	if (length < shmfd->shm_size) {
704 		if ((shmfd->shm_seals & F_SEAL_SHRINK) != 0)
705 			return (EPERM);
706 
707 		/*
708 		 * Disallow any requests to shrink the size if this
709 		 * object is mapped into the kernel.
710 		 */
711 		if (shmfd->shm_kmappings > 0)
712 			return (EBUSY);
713 
714 		/*
715 		 * Zero the truncated part of the last page.
716 		 */
717 		base = length & PAGE_MASK;
718 		if (base != 0) {
719 			error = shm_partial_page_invalidate(object,
720 			    OFF_TO_IDX(length), base, PAGE_SIZE);
721 			if (error)
722 				return (error);
723 		}
724 		delta = IDX_TO_OFF(object->size - nobjsize);
725 
726 		if (nobjsize < object->size)
727 			vm_object_page_remove(object, nobjsize, object->size,
728 			    0);
729 
730 		/* Free the swap accounted for shm */
731 		swap_release_by_cred(delta, object->cred);
732 		object->charge -= delta;
733 	} else {
734 		if ((shmfd->shm_seals & F_SEAL_GROW) != 0)
735 			return (EPERM);
736 
737 		/* Try to reserve additional swap space. */
738 		delta = IDX_TO_OFF(nobjsize - object->size);
739 		if (!swap_reserve_by_cred(delta, object->cred))
740 			return (ENOMEM);
741 		object->charge += delta;
742 	}
743 	shmfd->shm_size = length;
744 	mtx_lock(&shm_timestamp_lock);
745 	vfs_timestamp(&shmfd->shm_ctime);
746 	shmfd->shm_mtime = shmfd->shm_ctime;
747 	mtx_unlock(&shm_timestamp_lock);
748 	object->size = nobjsize;
749 	return (0);
750 }
751 
752 static int
753 shm_dotruncate_largepage(struct shmfd *shmfd, off_t length, void *rl_cookie)
754 {
755 	vm_object_t object;
756 	vm_page_t m;
757 	vm_pindex_t newobjsz;
758 	vm_pindex_t oldobjsz __unused;
759 	int aflags, error, i, psind, try;
760 
761 	KASSERT(length >= 0, ("shm_dotruncate: length < 0"));
762 	object = shmfd->shm_object;
763 	VM_OBJECT_ASSERT_WLOCKED(object);
764 	rangelock_cookie_assert(rl_cookie, RA_WLOCKED);
765 
766 	oldobjsz = object->size;
767 	newobjsz = OFF_TO_IDX(length);
768 	if (length == shmfd->shm_size)
769 		return (0);
770 	psind = shmfd->shm_lp_psind;
771 	if (psind == 0 && length != 0)
772 		return (EINVAL);
773 	if ((length & (pagesizes[psind] - 1)) != 0)
774 		return (EINVAL);
775 
776 	if (length < shmfd->shm_size) {
777 		if ((shmfd->shm_seals & F_SEAL_SHRINK) != 0)
778 			return (EPERM);
779 		if (shmfd->shm_kmappings > 0)
780 			return (EBUSY);
781 		return (ENOTSUP);	/* Pages are unmanaged. */
782 #if 0
783 		vm_object_page_remove(object, newobjsz, oldobjsz, 0);
784 		object->size = newobjsz;
785 		shmfd->shm_size = length;
786 		return (0);
787 #endif
788 	}
789 
790 	if ((shmfd->shm_seals & F_SEAL_GROW) != 0)
791 		return (EPERM);
792 
793 	aflags = VM_ALLOC_NORMAL | VM_ALLOC_ZERO;
794 	if (shmfd->shm_lp_alloc_policy == SHM_LARGEPAGE_ALLOC_NOWAIT)
795 		aflags |= VM_ALLOC_WAITFAIL;
796 	try = 0;
797 
798 	/*
799 	 * Extend shmfd and object, keeping all already fully
800 	 * allocated large pages intact even on error, because dropped
801 	 * object lock might allowed mapping of them.
802 	 */
803 	while (object->size < newobjsz) {
804 		m = vm_page_alloc_contig(object, object->size, aflags,
805 		    pagesizes[psind] / PAGE_SIZE, 0, ~0,
806 		    pagesizes[psind], 0,
807 		    VM_MEMATTR_DEFAULT);
808 		if (m == NULL) {
809 			VM_OBJECT_WUNLOCK(object);
810 			if (shmfd->shm_lp_alloc_policy ==
811 			    SHM_LARGEPAGE_ALLOC_NOWAIT ||
812 			    (shmfd->shm_lp_alloc_policy ==
813 			    SHM_LARGEPAGE_ALLOC_DEFAULT &&
814 			    try >= largepage_reclaim_tries)) {
815 				VM_OBJECT_WLOCK(object);
816 				return (ENOMEM);
817 			}
818 			error = vm_page_reclaim_contig(aflags,
819 			    pagesizes[psind] / PAGE_SIZE, 0, ~0,
820 			    pagesizes[psind], 0) ? 0 :
821 			    vm_wait_intr(object);
822 			if (error != 0) {
823 				VM_OBJECT_WLOCK(object);
824 				return (error);
825 			}
826 			try++;
827 			VM_OBJECT_WLOCK(object);
828 			continue;
829 		}
830 		try = 0;
831 		for (i = 0; i < pagesizes[psind] / PAGE_SIZE; i++) {
832 			if ((m[i].flags & PG_ZERO) == 0)
833 				pmap_zero_page(&m[i]);
834 			vm_page_valid(&m[i]);
835 			vm_page_xunbusy(&m[i]);
836 		}
837 		object->size += OFF_TO_IDX(pagesizes[psind]);
838 		shmfd->shm_size += pagesizes[psind];
839 		atomic_add_long(&count_largepages[psind], 1);
840 		vm_wire_add(atop(pagesizes[psind]));
841 	}
842 	return (0);
843 }
844 
845 static int
846 shm_dotruncate_cookie(struct shmfd *shmfd, off_t length, void *rl_cookie)
847 {
848 	int error;
849 
850 	VM_OBJECT_WLOCK(shmfd->shm_object);
851 	error = shm_largepage(shmfd) ? shm_dotruncate_largepage(shmfd,
852 	    length, rl_cookie) : shm_dotruncate_locked(shmfd, length,
853 	    rl_cookie);
854 	VM_OBJECT_WUNLOCK(shmfd->shm_object);
855 	return (error);
856 }
857 
858 int
859 shm_dotruncate(struct shmfd *shmfd, off_t length)
860 {
861 	void *rl_cookie;
862 	int error;
863 
864 	rl_cookie = rangelock_wlock(&shmfd->shm_rl, 0, OFF_MAX,
865 	    &shmfd->shm_mtx);
866 	error = shm_dotruncate_cookie(shmfd, length, rl_cookie);
867 	rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
868 	return (error);
869 }
870 
871 /*
872  * shmfd object management including creation and reference counting
873  * routines.
874  */
875 struct shmfd *
876 shm_alloc(struct ucred *ucred, mode_t mode, bool largepage)
877 {
878 	struct shmfd *shmfd;
879 
880 	shmfd = malloc(sizeof(*shmfd), M_SHMFD, M_WAITOK | M_ZERO);
881 	shmfd->shm_size = 0;
882 	shmfd->shm_uid = ucred->cr_uid;
883 	shmfd->shm_gid = ucred->cr_gid;
884 	shmfd->shm_mode = mode;
885 	if (largepage) {
886 		shmfd->shm_object = phys_pager_allocate(NULL,
887 		    &shm_largepage_phys_ops, NULL, shmfd->shm_size,
888 		    VM_PROT_DEFAULT, 0, ucred);
889 		shmfd->shm_lp_alloc_policy = SHM_LARGEPAGE_ALLOC_DEFAULT;
890 	} else {
891 		shmfd->shm_object = vm_pager_allocate(OBJT_SWAP, NULL,
892 		    shmfd->shm_size, VM_PROT_DEFAULT, 0, ucred);
893 	}
894 	KASSERT(shmfd->shm_object != NULL, ("shm_create: vm_pager_allocate"));
895 	vfs_timestamp(&shmfd->shm_birthtime);
896 	shmfd->shm_atime = shmfd->shm_mtime = shmfd->shm_ctime =
897 	    shmfd->shm_birthtime;
898 	shmfd->shm_ino = alloc_unr64(&shm_ino_unr);
899 	refcount_init(&shmfd->shm_refs, 1);
900 	mtx_init(&shmfd->shm_mtx, "shmrl", NULL, MTX_DEF);
901 	rangelock_init(&shmfd->shm_rl);
902 #ifdef MAC
903 	mac_posixshm_init(shmfd);
904 	mac_posixshm_create(ucred, shmfd);
905 #endif
906 
907 	return (shmfd);
908 }
909 
910 struct shmfd *
911 shm_hold(struct shmfd *shmfd)
912 {
913 
914 	refcount_acquire(&shmfd->shm_refs);
915 	return (shmfd);
916 }
917 
918 void
919 shm_drop(struct shmfd *shmfd)
920 {
921 
922 	if (refcount_release(&shmfd->shm_refs)) {
923 #ifdef MAC
924 		mac_posixshm_destroy(shmfd);
925 #endif
926 		rangelock_destroy(&shmfd->shm_rl);
927 		mtx_destroy(&shmfd->shm_mtx);
928 		vm_object_deallocate(shmfd->shm_object);
929 		free(shmfd, M_SHMFD);
930 	}
931 }
932 
933 /*
934  * Determine if the credentials have sufficient permissions for a
935  * specified combination of FREAD and FWRITE.
936  */
937 int
938 shm_access(struct shmfd *shmfd, struct ucred *ucred, int flags)
939 {
940 	accmode_t accmode;
941 	int error;
942 
943 	accmode = 0;
944 	if (flags & FREAD)
945 		accmode |= VREAD;
946 	if (flags & FWRITE)
947 		accmode |= VWRITE;
948 	mtx_lock(&shm_timestamp_lock);
949 	error = vaccess(VREG, shmfd->shm_mode, shmfd->shm_uid, shmfd->shm_gid,
950 	    accmode, ucred);
951 	mtx_unlock(&shm_timestamp_lock);
952 	return (error);
953 }
954 
955 static void
956 shm_init(void *arg)
957 {
958 	char name[32];
959 	int i;
960 
961 	mtx_init(&shm_timestamp_lock, "shm timestamps", NULL, MTX_DEF);
962 	sx_init(&shm_dict_lock, "shm dictionary");
963 	shm_dictionary = hashinit(1024, M_SHMFD, &shm_hash);
964 	new_unrhdr64(&shm_ino_unr, 1);
965 	shm_dev_ino = devfs_alloc_cdp_inode();
966 	KASSERT(shm_dev_ino > 0, ("shm dev inode not initialized"));
967 
968 	for (i = 1; i < MAXPAGESIZES; i++) {
969 		if (pagesizes[i] == 0)
970 			break;
971 #define	M	(1024 * 1024)
972 #define	G	(1024 * M)
973 		if (pagesizes[i] >= G)
974 			snprintf(name, sizeof(name), "%luG", pagesizes[i] / G);
975 		else if (pagesizes[i] >= M)
976 			snprintf(name, sizeof(name), "%luM", pagesizes[i] / M);
977 		else
978 			snprintf(name, sizeof(name), "%lu", pagesizes[i]);
979 #undef G
980 #undef M
981 		SYSCTL_ADD_ULONG(NULL, SYSCTL_STATIC_CHILDREN(_vm_largepages),
982 		    OID_AUTO, name, CTLFLAG_RD, &count_largepages[i],
983 		    "number of non-transient largepages allocated");
984 	}
985 }
986 SYSINIT(shm_init, SI_SUB_SYSV_SHM, SI_ORDER_ANY, shm_init, NULL);
987 
988 /*
989  * Remove all shared memory objects that belong to a prison.
990  */
991 void
992 shm_remove_prison(struct prison *pr)
993 {
994 	struct shm_mapping *shmm, *tshmm;
995 	u_long i;
996 
997 	sx_xlock(&shm_dict_lock);
998 	for (i = 0; i < shm_hash + 1; i++) {
999 		LIST_FOREACH_SAFE(shmm, &shm_dictionary[i], sm_link, tshmm) {
1000 			if (shmm->sm_shmfd->shm_object->cred &&
1001 			    shmm->sm_shmfd->shm_object->cred->cr_prison == pr)
1002 				shm_doremove(shmm);
1003 		}
1004 	}
1005 	sx_xunlock(&shm_dict_lock);
1006 }
1007 
1008 /*
1009  * Dictionary management.  We maintain an in-kernel dictionary to map
1010  * paths to shmfd objects.  We use the FNV hash on the path to store
1011  * the mappings in a hash table.
1012  */
1013 static struct shmfd *
1014 shm_lookup(char *path, Fnv32_t fnv)
1015 {
1016 	struct shm_mapping *map;
1017 
1018 	LIST_FOREACH(map, SHM_HASH(fnv), sm_link) {
1019 		if (map->sm_fnv != fnv)
1020 			continue;
1021 		if (strcmp(map->sm_path, path) == 0)
1022 			return (map->sm_shmfd);
1023 	}
1024 
1025 	return (NULL);
1026 }
1027 
1028 static void
1029 shm_insert(char *path, Fnv32_t fnv, struct shmfd *shmfd)
1030 {
1031 	struct shm_mapping *map;
1032 
1033 	map = malloc(sizeof(struct shm_mapping), M_SHMFD, M_WAITOK);
1034 	map->sm_path = path;
1035 	map->sm_fnv = fnv;
1036 	map->sm_shmfd = shm_hold(shmfd);
1037 	shmfd->shm_path = path;
1038 	LIST_INSERT_HEAD(SHM_HASH(fnv), map, sm_link);
1039 }
1040 
1041 static int
1042 shm_remove(char *path, Fnv32_t fnv, struct ucred *ucred)
1043 {
1044 	struct shm_mapping *map;
1045 	int error;
1046 
1047 	LIST_FOREACH(map, SHM_HASH(fnv), sm_link) {
1048 		if (map->sm_fnv != fnv)
1049 			continue;
1050 		if (strcmp(map->sm_path, path) == 0) {
1051 #ifdef MAC
1052 			error = mac_posixshm_check_unlink(ucred, map->sm_shmfd);
1053 			if (error)
1054 				return (error);
1055 #endif
1056 			error = shm_access(map->sm_shmfd, ucred,
1057 			    FREAD | FWRITE);
1058 			if (error)
1059 				return (error);
1060 			shm_doremove(map);
1061 			return (0);
1062 		}
1063 	}
1064 
1065 	return (ENOENT);
1066 }
1067 
1068 static void
1069 shm_doremove(struct shm_mapping *map)
1070 {
1071 	map->sm_shmfd->shm_path = NULL;
1072 	LIST_REMOVE(map, sm_link);
1073 	shm_drop(map->sm_shmfd);
1074 	free(map->sm_path, M_SHMFD);
1075 	free(map, M_SHMFD);
1076 }
1077 
1078 int
1079 kern_shm_open2(struct thread *td, const char *userpath, int flags, mode_t mode,
1080     int shmflags, struct filecaps *fcaps, const char *name __unused)
1081 {
1082 	struct pwddesc *pdp;
1083 	struct shmfd *shmfd;
1084 	struct file *fp;
1085 	char *path;
1086 	void *rl_cookie;
1087 	Fnv32_t fnv;
1088 	mode_t cmode;
1089 	int error, fd, initial_seals;
1090 	bool largepage;
1091 
1092 	if ((shmflags & ~(SHM_ALLOW_SEALING | SHM_GROW_ON_WRITE |
1093 	    SHM_LARGEPAGE)) != 0)
1094 		return (EINVAL);
1095 
1096 	initial_seals = F_SEAL_SEAL;
1097 	if ((shmflags & SHM_ALLOW_SEALING) != 0)
1098 		initial_seals &= ~F_SEAL_SEAL;
1099 
1100 #ifdef CAPABILITY_MODE
1101 	/*
1102 	 * shm_open(2) is only allowed for anonymous objects.
1103 	 */
1104 	if (IN_CAPABILITY_MODE(td) && (userpath != SHM_ANON))
1105 		return (ECAPMODE);
1106 #endif
1107 
1108 	AUDIT_ARG_FFLAGS(flags);
1109 	AUDIT_ARG_MODE(mode);
1110 
1111 	if ((flags & O_ACCMODE) != O_RDONLY && (flags & O_ACCMODE) != O_RDWR)
1112 		return (EINVAL);
1113 
1114 	if ((flags & ~(O_ACCMODE | O_CREAT | O_EXCL | O_TRUNC | O_CLOEXEC)) != 0)
1115 		return (EINVAL);
1116 
1117 	largepage = (shmflags & SHM_LARGEPAGE) != 0;
1118 	if (largepage && !PMAP_HAS_LARGEPAGES)
1119 		return (ENOTTY);
1120 
1121 	/*
1122 	 * Currently only F_SEAL_SEAL may be set when creating or opening shmfd.
1123 	 * If the decision is made later to allow additional seals, care must be
1124 	 * taken below to ensure that the seals are properly set if the shmfd
1125 	 * already existed -- this currently assumes that only F_SEAL_SEAL can
1126 	 * be set and doesn't take further precautions to ensure the validity of
1127 	 * the seals being added with respect to current mappings.
1128 	 */
1129 	if ((initial_seals & ~F_SEAL_SEAL) != 0)
1130 		return (EINVAL);
1131 
1132 	pdp = td->td_proc->p_pd;
1133 	cmode = (mode & ~pdp->pd_cmask) & ACCESSPERMS;
1134 
1135 	/*
1136 	 * shm_open(2) created shm should always have O_CLOEXEC set, as mandated
1137 	 * by POSIX.  We allow it to be unset here so that an in-kernel
1138 	 * interface may be written as a thin layer around shm, optionally not
1139 	 * setting CLOEXEC.  For shm_open(2), O_CLOEXEC is set unconditionally
1140 	 * in sys_shm_open() to keep this implementation compliant.
1141 	 */
1142 	error = falloc_caps(td, &fp, &fd, flags & O_CLOEXEC, fcaps);
1143 	if (error)
1144 		return (error);
1145 
1146 	/* A SHM_ANON path pointer creates an anonymous object. */
1147 	if (userpath == SHM_ANON) {
1148 		/* A read-only anonymous object is pointless. */
1149 		if ((flags & O_ACCMODE) == O_RDONLY) {
1150 			fdclose(td, fp, fd);
1151 			fdrop(fp, td);
1152 			return (EINVAL);
1153 		}
1154 		shmfd = shm_alloc(td->td_ucred, cmode, largepage);
1155 		shmfd->shm_seals = initial_seals;
1156 		shmfd->shm_flags = shmflags;
1157 	} else {
1158 		error = shm_copyin_path(td, userpath, &path);
1159 		if (error != 0) {
1160 			fdclose(td, fp, fd);
1161 			fdrop(fp, td);
1162 			return (error);
1163 		}
1164 
1165 		AUDIT_ARG_UPATH1_CANON(path);
1166 		fnv = fnv_32_str(path, FNV1_32_INIT);
1167 		sx_xlock(&shm_dict_lock);
1168 		shmfd = shm_lookup(path, fnv);
1169 		if (shmfd == NULL) {
1170 			/* Object does not yet exist, create it if requested. */
1171 			if (flags & O_CREAT) {
1172 #ifdef MAC
1173 				error = mac_posixshm_check_create(td->td_ucred,
1174 				    path);
1175 				if (error == 0) {
1176 #endif
1177 					shmfd = shm_alloc(td->td_ucred, cmode,
1178 					    largepage);
1179 					shmfd->shm_seals = initial_seals;
1180 					shmfd->shm_flags = shmflags;
1181 					shm_insert(path, fnv, shmfd);
1182 #ifdef MAC
1183 				}
1184 #endif
1185 			} else {
1186 				free(path, M_SHMFD);
1187 				error = ENOENT;
1188 			}
1189 		} else {
1190 			rl_cookie = rangelock_wlock(&shmfd->shm_rl, 0, OFF_MAX,
1191 			    &shmfd->shm_mtx);
1192 
1193 			/*
1194 			 * kern_shm_open() likely shouldn't ever error out on
1195 			 * trying to set a seal that already exists, unlike
1196 			 * F_ADD_SEALS.  This would break terribly as
1197 			 * shm_open(2) actually sets F_SEAL_SEAL to maintain
1198 			 * historical behavior where the underlying file could
1199 			 * not be sealed.
1200 			 */
1201 			initial_seals &= ~shmfd->shm_seals;
1202 
1203 			/*
1204 			 * Object already exists, obtain a new
1205 			 * reference if requested and permitted.
1206 			 */
1207 			free(path, M_SHMFD);
1208 
1209 			/*
1210 			 * initial_seals can't set additional seals if we've
1211 			 * already been set F_SEAL_SEAL.  If F_SEAL_SEAL is set,
1212 			 * then we've already removed that one from
1213 			 * initial_seals.  This is currently redundant as we
1214 			 * only allow setting F_SEAL_SEAL at creation time, but
1215 			 * it's cheap to check and decreases the effort required
1216 			 * to allow additional seals.
1217 			 */
1218 			if ((shmfd->shm_seals & F_SEAL_SEAL) != 0 &&
1219 			    initial_seals != 0)
1220 				error = EPERM;
1221 			else if ((flags & (O_CREAT | O_EXCL)) ==
1222 			    (O_CREAT | O_EXCL))
1223 				error = EEXIST;
1224 			else if (shmflags != 0 && shmflags != shmfd->shm_flags)
1225 				error = EINVAL;
1226 			else {
1227 #ifdef MAC
1228 				error = mac_posixshm_check_open(td->td_ucred,
1229 				    shmfd, FFLAGS(flags & O_ACCMODE));
1230 				if (error == 0)
1231 #endif
1232 				error = shm_access(shmfd, td->td_ucred,
1233 				    FFLAGS(flags & O_ACCMODE));
1234 			}
1235 
1236 			/*
1237 			 * Truncate the file back to zero length if
1238 			 * O_TRUNC was specified and the object was
1239 			 * opened with read/write.
1240 			 */
1241 			if (error == 0 &&
1242 			    (flags & (O_ACCMODE | O_TRUNC)) ==
1243 			    (O_RDWR | O_TRUNC)) {
1244 				VM_OBJECT_WLOCK(shmfd->shm_object);
1245 #ifdef MAC
1246 				error = mac_posixshm_check_truncate(
1247 					td->td_ucred, fp->f_cred, shmfd);
1248 				if (error == 0)
1249 #endif
1250 					error = shm_dotruncate_locked(shmfd, 0,
1251 					    rl_cookie);
1252 				VM_OBJECT_WUNLOCK(shmfd->shm_object);
1253 			}
1254 			if (error == 0) {
1255 				/*
1256 				 * Currently we only allow F_SEAL_SEAL to be
1257 				 * set initially.  As noted above, this would
1258 				 * need to be reworked should that change.
1259 				 */
1260 				shmfd->shm_seals |= initial_seals;
1261 				shm_hold(shmfd);
1262 			}
1263 			rangelock_unlock(&shmfd->shm_rl, rl_cookie,
1264 			    &shmfd->shm_mtx);
1265 		}
1266 		sx_xunlock(&shm_dict_lock);
1267 
1268 		if (error) {
1269 			fdclose(td, fp, fd);
1270 			fdrop(fp, td);
1271 			return (error);
1272 		}
1273 	}
1274 
1275 	finit(fp, FFLAGS(flags & O_ACCMODE), DTYPE_SHM, shmfd, &shm_ops);
1276 
1277 	td->td_retval[0] = fd;
1278 	fdrop(fp, td);
1279 
1280 	return (0);
1281 }
1282 
1283 /* System calls. */
1284 #ifdef COMPAT_FREEBSD12
1285 int
1286 freebsd12_shm_open(struct thread *td, struct freebsd12_shm_open_args *uap)
1287 {
1288 
1289 	return (kern_shm_open(td, uap->path, uap->flags | O_CLOEXEC,
1290 	    uap->mode, NULL));
1291 }
1292 #endif
1293 
1294 int
1295 sys_shm_unlink(struct thread *td, struct shm_unlink_args *uap)
1296 {
1297 	char *path;
1298 	Fnv32_t fnv;
1299 	int error;
1300 
1301 	error = shm_copyin_path(td, uap->path, &path);
1302 	if (error != 0)
1303 		return (error);
1304 
1305 	AUDIT_ARG_UPATH1_CANON(path);
1306 	fnv = fnv_32_str(path, FNV1_32_INIT);
1307 	sx_xlock(&shm_dict_lock);
1308 	error = shm_remove(path, fnv, td->td_ucred);
1309 	sx_xunlock(&shm_dict_lock);
1310 	free(path, M_SHMFD);
1311 
1312 	return (error);
1313 }
1314 
1315 int
1316 sys_shm_rename(struct thread *td, struct shm_rename_args *uap)
1317 {
1318 	char *path_from = NULL, *path_to = NULL;
1319 	Fnv32_t fnv_from, fnv_to;
1320 	struct shmfd *fd_from;
1321 	struct shmfd *fd_to;
1322 	int error;
1323 	int flags;
1324 
1325 	flags = uap->flags;
1326 	AUDIT_ARG_FFLAGS(flags);
1327 
1328 	/*
1329 	 * Make sure the user passed only valid flags.
1330 	 * If you add a new flag, please add a new term here.
1331 	 */
1332 	if ((flags & ~(
1333 	    SHM_RENAME_NOREPLACE |
1334 	    SHM_RENAME_EXCHANGE
1335 	    )) != 0) {
1336 		error = EINVAL;
1337 		goto out;
1338 	}
1339 
1340 	/*
1341 	 * EXCHANGE and NOREPLACE don't quite make sense together. Let's
1342 	 * force the user to choose one or the other.
1343 	 */
1344 	if ((flags & SHM_RENAME_NOREPLACE) != 0 &&
1345 	    (flags & SHM_RENAME_EXCHANGE) != 0) {
1346 		error = EINVAL;
1347 		goto out;
1348 	}
1349 
1350 	/* Renaming to or from anonymous makes no sense */
1351 	if (uap->path_from == SHM_ANON || uap->path_to == SHM_ANON) {
1352 		error = EINVAL;
1353 		goto out;
1354 	}
1355 
1356 	error = shm_copyin_path(td, uap->path_from, &path_from);
1357 	if (error != 0)
1358 		goto out;
1359 
1360 	error = shm_copyin_path(td, uap->path_to, &path_to);
1361 	if (error != 0)
1362 		goto out;
1363 
1364 	AUDIT_ARG_UPATH1_CANON(path_from);
1365 	AUDIT_ARG_UPATH2_CANON(path_to);
1366 
1367 	/* Rename with from/to equal is a no-op */
1368 	if (strcmp(path_from, path_to) == 0)
1369 		goto out;
1370 
1371 	fnv_from = fnv_32_str(path_from, FNV1_32_INIT);
1372 	fnv_to = fnv_32_str(path_to, FNV1_32_INIT);
1373 
1374 	sx_xlock(&shm_dict_lock);
1375 
1376 	fd_from = shm_lookup(path_from, fnv_from);
1377 	if (fd_from == NULL) {
1378 		error = ENOENT;
1379 		goto out_locked;
1380 	}
1381 
1382 	fd_to = shm_lookup(path_to, fnv_to);
1383 	if ((flags & SHM_RENAME_NOREPLACE) != 0 && fd_to != NULL) {
1384 		error = EEXIST;
1385 		goto out_locked;
1386 	}
1387 
1388 	/*
1389 	 * Unconditionally prevents shm_remove from invalidating the 'from'
1390 	 * shm's state.
1391 	 */
1392 	shm_hold(fd_from);
1393 	error = shm_remove(path_from, fnv_from, td->td_ucred);
1394 
1395 	/*
1396 	 * One of my assumptions failed if ENOENT (e.g. locking didn't
1397 	 * protect us)
1398 	 */
1399 	KASSERT(error != ENOENT, ("Our shm disappeared during shm_rename: %s",
1400 	    path_from));
1401 	if (error != 0) {
1402 		shm_drop(fd_from);
1403 		goto out_locked;
1404 	}
1405 
1406 	/*
1407 	 * If we are exchanging, we need to ensure the shm_remove below
1408 	 * doesn't invalidate the dest shm's state.
1409 	 */
1410 	if ((flags & SHM_RENAME_EXCHANGE) != 0 && fd_to != NULL)
1411 		shm_hold(fd_to);
1412 
1413 	/*
1414 	 * NOTE: if path_to is not already in the hash, c'est la vie;
1415 	 * it simply means we have nothing already at path_to to unlink.
1416 	 * That is the ENOENT case.
1417 	 *
1418 	 * If we somehow don't have access to unlink this guy, but
1419 	 * did for the shm at path_from, then relink the shm to path_from
1420 	 * and abort with EACCES.
1421 	 *
1422 	 * All other errors: that is weird; let's relink and abort the
1423 	 * operation.
1424 	 */
1425 	error = shm_remove(path_to, fnv_to, td->td_ucred);
1426 	if (error != 0 && error != ENOENT) {
1427 		shm_insert(path_from, fnv_from, fd_from);
1428 		shm_drop(fd_from);
1429 		/* Don't free path_from now, since the hash references it */
1430 		path_from = NULL;
1431 		goto out_locked;
1432 	}
1433 
1434 	error = 0;
1435 
1436 	shm_insert(path_to, fnv_to, fd_from);
1437 
1438 	/* Don't free path_to now, since the hash references it */
1439 	path_to = NULL;
1440 
1441 	/* We kept a ref when we removed, and incremented again in insert */
1442 	shm_drop(fd_from);
1443 	KASSERT(fd_from->shm_refs > 0, ("Expected >0 refs; got: %d\n",
1444 	    fd_from->shm_refs));
1445 
1446 	if ((flags & SHM_RENAME_EXCHANGE) != 0 && fd_to != NULL) {
1447 		shm_insert(path_from, fnv_from, fd_to);
1448 		path_from = NULL;
1449 		shm_drop(fd_to);
1450 		KASSERT(fd_to->shm_refs > 0, ("Expected >0 refs; got: %d\n",
1451 		    fd_to->shm_refs));
1452 	}
1453 
1454 out_locked:
1455 	sx_xunlock(&shm_dict_lock);
1456 
1457 out:
1458 	free(path_from, M_SHMFD);
1459 	free(path_to, M_SHMFD);
1460 	return (error);
1461 }
1462 
1463 static int
1464 shm_mmap_large(struct shmfd *shmfd, vm_map_t map, vm_offset_t *addr,
1465     vm_size_t size, vm_prot_t prot, vm_prot_t max_prot, int flags,
1466     vm_ooffset_t foff, struct thread *td)
1467 {
1468 	struct vmspace *vms;
1469 	vm_map_entry_t next_entry, prev_entry;
1470 	vm_offset_t align, mask, maxaddr;
1471 	int docow, error, rv, try;
1472 	bool curmap;
1473 
1474 	if (shmfd->shm_lp_psind == 0)
1475 		return (EINVAL);
1476 
1477 	/* MAP_PRIVATE is disabled */
1478 	if ((flags & ~(MAP_SHARED | MAP_FIXED | MAP_EXCL |
1479 	    MAP_NOCORE |
1480 #ifdef MAP_32BIT
1481 	    MAP_32BIT |
1482 #endif
1483 	    MAP_ALIGNMENT_MASK)) != 0)
1484 		return (EINVAL);
1485 
1486 	vms = td->td_proc->p_vmspace;
1487 	curmap = map == &vms->vm_map;
1488 	if (curmap) {
1489 		error = kern_mmap_racct_check(td, map, size);
1490 		if (error != 0)
1491 			return (error);
1492 	}
1493 
1494 	docow = shmfd->shm_lp_psind << MAP_SPLIT_BOUNDARY_SHIFT;
1495 	docow |= MAP_INHERIT_SHARE;
1496 	if ((flags & MAP_NOCORE) != 0)
1497 		docow |= MAP_DISABLE_COREDUMP;
1498 
1499 	mask = pagesizes[shmfd->shm_lp_psind] - 1;
1500 	if ((foff & mask) != 0)
1501 		return (EINVAL);
1502 	maxaddr = vm_map_max(map);
1503 #ifdef MAP_32BIT
1504 	if ((flags & MAP_32BIT) != 0 && maxaddr > MAP_32BIT_MAX_ADDR)
1505 		maxaddr = MAP_32BIT_MAX_ADDR;
1506 #endif
1507 	if (size == 0 || (size & mask) != 0 ||
1508 	    (*addr != 0 && ((*addr & mask) != 0 ||
1509 	    *addr + size < *addr || *addr + size > maxaddr)))
1510 		return (EINVAL);
1511 
1512 	align = flags & MAP_ALIGNMENT_MASK;
1513 	if (align == 0) {
1514 		align = pagesizes[shmfd->shm_lp_psind];
1515 	} else if (align == MAP_ALIGNED_SUPER) {
1516 		if (shmfd->shm_lp_psind != 1)
1517 			return (EINVAL);
1518 		align = pagesizes[1];
1519 	} else {
1520 		align >>= MAP_ALIGNMENT_SHIFT;
1521 		align = 1ULL << align;
1522 		/* Also handles overflow. */
1523 		if (align < pagesizes[shmfd->shm_lp_psind])
1524 			return (EINVAL);
1525 	}
1526 
1527 	vm_map_lock(map);
1528 	if ((flags & MAP_FIXED) == 0) {
1529 		try = 1;
1530 		if (curmap && (*addr == 0 ||
1531 		    (*addr >= round_page((vm_offset_t)vms->vm_taddr) &&
1532 		    *addr < round_page((vm_offset_t)vms->vm_daddr +
1533 		    lim_max(td, RLIMIT_DATA))))) {
1534 			*addr = roundup2((vm_offset_t)vms->vm_daddr +
1535 			    lim_max(td, RLIMIT_DATA),
1536 			    pagesizes[shmfd->shm_lp_psind]);
1537 		}
1538 again:
1539 		rv = vm_map_find_aligned(map, addr, size, maxaddr, align);
1540 		if (rv != KERN_SUCCESS) {
1541 			if (try == 1) {
1542 				try = 2;
1543 				*addr = vm_map_min(map);
1544 				if ((*addr & mask) != 0)
1545 					*addr = (*addr + mask) & mask;
1546 				goto again;
1547 			}
1548 			goto fail1;
1549 		}
1550 	} else if ((flags & MAP_EXCL) == 0) {
1551 		rv = vm_map_delete(map, *addr, *addr + size);
1552 		if (rv != KERN_SUCCESS)
1553 			goto fail1;
1554 	} else {
1555 		error = ENOSPC;
1556 		if (vm_map_lookup_entry(map, *addr, &prev_entry))
1557 			goto fail;
1558 		next_entry = vm_map_entry_succ(prev_entry);
1559 		if (next_entry->start < *addr + size)
1560 			goto fail;
1561 	}
1562 
1563 	rv = vm_map_insert(map, shmfd->shm_object, foff, *addr, *addr + size,
1564 	    prot, max_prot, docow);
1565 fail1:
1566 	error = vm_mmap_to_errno(rv);
1567 fail:
1568 	vm_map_unlock(map);
1569 	return (error);
1570 }
1571 
1572 static int
1573 shm_mmap(struct file *fp, vm_map_t map, vm_offset_t *addr, vm_size_t objsize,
1574     vm_prot_t prot, vm_prot_t cap_maxprot, int flags,
1575     vm_ooffset_t foff, struct thread *td)
1576 {
1577 	struct shmfd *shmfd;
1578 	vm_prot_t maxprot;
1579 	int error;
1580 	bool writecnt;
1581 	void *rl_cookie;
1582 
1583 	shmfd = fp->f_data;
1584 	maxprot = VM_PROT_NONE;
1585 
1586 	rl_cookie = rangelock_rlock(&shmfd->shm_rl, 0, objsize,
1587 	    &shmfd->shm_mtx);
1588 	/* FREAD should always be set. */
1589 	if ((fp->f_flag & FREAD) != 0)
1590 		maxprot |= VM_PROT_EXECUTE | VM_PROT_READ;
1591 
1592 	/*
1593 	 * If FWRITE's set, we can allow VM_PROT_WRITE unless it's a shared
1594 	 * mapping with a write seal applied.  Private mappings are always
1595 	 * writeable.
1596 	 */
1597 	if ((flags & MAP_SHARED) == 0) {
1598 		cap_maxprot |= VM_PROT_WRITE;
1599 		maxprot |= VM_PROT_WRITE;
1600 		writecnt = false;
1601 	} else {
1602 		if ((fp->f_flag & FWRITE) != 0 &&
1603 		    (shmfd->shm_seals & F_SEAL_WRITE) == 0)
1604 			maxprot |= VM_PROT_WRITE;
1605 
1606 		/*
1607 		 * Any mappings from a writable descriptor may be upgraded to
1608 		 * VM_PROT_WRITE with mprotect(2), unless a write-seal was
1609 		 * applied between the open and subsequent mmap(2).  We want to
1610 		 * reject application of a write seal as long as any such
1611 		 * mapping exists so that the seal cannot be trivially bypassed.
1612 		 */
1613 		writecnt = (maxprot & VM_PROT_WRITE) != 0;
1614 		if (!writecnt && (prot & VM_PROT_WRITE) != 0) {
1615 			error = EACCES;
1616 			goto out;
1617 		}
1618 	}
1619 	maxprot &= cap_maxprot;
1620 
1621 	/* See comment in vn_mmap(). */
1622 	if (
1623 #ifdef _LP64
1624 	    objsize > OFF_MAX ||
1625 #endif
1626 	    foff > OFF_MAX - objsize) {
1627 		error = EINVAL;
1628 		goto out;
1629 	}
1630 
1631 #ifdef MAC
1632 	error = mac_posixshm_check_mmap(td->td_ucred, shmfd, prot, flags);
1633 	if (error != 0)
1634 		goto out;
1635 #endif
1636 
1637 	mtx_lock(&shm_timestamp_lock);
1638 	vfs_timestamp(&shmfd->shm_atime);
1639 	mtx_unlock(&shm_timestamp_lock);
1640 	vm_object_reference(shmfd->shm_object);
1641 
1642 	if (shm_largepage(shmfd)) {
1643 		writecnt = false;
1644 		error = shm_mmap_large(shmfd, map, addr, objsize, prot,
1645 		    maxprot, flags, foff, td);
1646 	} else {
1647 		if (writecnt) {
1648 			vm_pager_update_writecount(shmfd->shm_object, 0,
1649 			    objsize);
1650 		}
1651 		error = vm_mmap_object(map, addr, objsize, prot, maxprot, flags,
1652 		    shmfd->shm_object, foff, writecnt, td);
1653 	}
1654 	if (error != 0) {
1655 		if (writecnt)
1656 			vm_pager_release_writecount(shmfd->shm_object, 0,
1657 			    objsize);
1658 		vm_object_deallocate(shmfd->shm_object);
1659 	}
1660 out:
1661 	rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
1662 	return (error);
1663 }
1664 
1665 static int
1666 shm_chmod(struct file *fp, mode_t mode, struct ucred *active_cred,
1667     struct thread *td)
1668 {
1669 	struct shmfd *shmfd;
1670 	int error;
1671 
1672 	error = 0;
1673 	shmfd = fp->f_data;
1674 	mtx_lock(&shm_timestamp_lock);
1675 	/*
1676 	 * SUSv4 says that x bits of permission need not be affected.
1677 	 * Be consistent with our shm_open there.
1678 	 */
1679 #ifdef MAC
1680 	error = mac_posixshm_check_setmode(active_cred, shmfd, mode);
1681 	if (error != 0)
1682 		goto out;
1683 #endif
1684 	error = vaccess(VREG, shmfd->shm_mode, shmfd->shm_uid, shmfd->shm_gid,
1685 	    VADMIN, active_cred);
1686 	if (error != 0)
1687 		goto out;
1688 	shmfd->shm_mode = mode & ACCESSPERMS;
1689 out:
1690 	mtx_unlock(&shm_timestamp_lock);
1691 	return (error);
1692 }
1693 
1694 static int
1695 shm_chown(struct file *fp, uid_t uid, gid_t gid, struct ucred *active_cred,
1696     struct thread *td)
1697 {
1698 	struct shmfd *shmfd;
1699 	int error;
1700 
1701 	error = 0;
1702 	shmfd = fp->f_data;
1703 	mtx_lock(&shm_timestamp_lock);
1704 #ifdef MAC
1705 	error = mac_posixshm_check_setowner(active_cred, shmfd, uid, gid);
1706 	if (error != 0)
1707 		goto out;
1708 #endif
1709 	if (uid == (uid_t)-1)
1710 		uid = shmfd->shm_uid;
1711 	if (gid == (gid_t)-1)
1712                  gid = shmfd->shm_gid;
1713 	if (((uid != shmfd->shm_uid && uid != active_cred->cr_uid) ||
1714 	    (gid != shmfd->shm_gid && !groupmember(gid, active_cred))) &&
1715 	    (error = priv_check_cred(active_cred, PRIV_VFS_CHOWN)))
1716 		goto out;
1717 	shmfd->shm_uid = uid;
1718 	shmfd->shm_gid = gid;
1719 out:
1720 	mtx_unlock(&shm_timestamp_lock);
1721 	return (error);
1722 }
1723 
1724 /*
1725  * Helper routines to allow the backing object of a shared memory file
1726  * descriptor to be mapped in the kernel.
1727  */
1728 int
1729 shm_map(struct file *fp, size_t size, off_t offset, void **memp)
1730 {
1731 	struct shmfd *shmfd;
1732 	vm_offset_t kva, ofs;
1733 	vm_object_t obj;
1734 	int rv;
1735 
1736 	if (fp->f_type != DTYPE_SHM)
1737 		return (EINVAL);
1738 	shmfd = fp->f_data;
1739 	obj = shmfd->shm_object;
1740 	VM_OBJECT_WLOCK(obj);
1741 	/*
1742 	 * XXXRW: This validation is probably insufficient, and subject to
1743 	 * sign errors.  It should be fixed.
1744 	 */
1745 	if (offset >= shmfd->shm_size ||
1746 	    offset + size > round_page(shmfd->shm_size)) {
1747 		VM_OBJECT_WUNLOCK(obj);
1748 		return (EINVAL);
1749 	}
1750 
1751 	shmfd->shm_kmappings++;
1752 	vm_object_reference_locked(obj);
1753 	VM_OBJECT_WUNLOCK(obj);
1754 
1755 	/* Map the object into the kernel_map and wire it. */
1756 	kva = vm_map_min(kernel_map);
1757 	ofs = offset & PAGE_MASK;
1758 	offset = trunc_page(offset);
1759 	size = round_page(size + ofs);
1760 	rv = vm_map_find(kernel_map, obj, offset, &kva, size, 0,
1761 	    VMFS_OPTIMAL_SPACE, VM_PROT_READ | VM_PROT_WRITE,
1762 	    VM_PROT_READ | VM_PROT_WRITE, 0);
1763 	if (rv == KERN_SUCCESS) {
1764 		rv = vm_map_wire(kernel_map, kva, kva + size,
1765 		    VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
1766 		if (rv == KERN_SUCCESS) {
1767 			*memp = (void *)(kva + ofs);
1768 			return (0);
1769 		}
1770 		vm_map_remove(kernel_map, kva, kva + size);
1771 	} else
1772 		vm_object_deallocate(obj);
1773 
1774 	/* On failure, drop our mapping reference. */
1775 	VM_OBJECT_WLOCK(obj);
1776 	shmfd->shm_kmappings--;
1777 	VM_OBJECT_WUNLOCK(obj);
1778 
1779 	return (vm_mmap_to_errno(rv));
1780 }
1781 
1782 /*
1783  * We require the caller to unmap the entire entry.  This allows us to
1784  * safely decrement shm_kmappings when a mapping is removed.
1785  */
1786 int
1787 shm_unmap(struct file *fp, void *mem, size_t size)
1788 {
1789 	struct shmfd *shmfd;
1790 	vm_map_entry_t entry;
1791 	vm_offset_t kva, ofs;
1792 	vm_object_t obj;
1793 	vm_pindex_t pindex;
1794 	vm_prot_t prot;
1795 	boolean_t wired;
1796 	vm_map_t map;
1797 	int rv;
1798 
1799 	if (fp->f_type != DTYPE_SHM)
1800 		return (EINVAL);
1801 	shmfd = fp->f_data;
1802 	kva = (vm_offset_t)mem;
1803 	ofs = kva & PAGE_MASK;
1804 	kva = trunc_page(kva);
1805 	size = round_page(size + ofs);
1806 	map = kernel_map;
1807 	rv = vm_map_lookup(&map, kva, VM_PROT_READ | VM_PROT_WRITE, &entry,
1808 	    &obj, &pindex, &prot, &wired);
1809 	if (rv != KERN_SUCCESS)
1810 		return (EINVAL);
1811 	if (entry->start != kva || entry->end != kva + size) {
1812 		vm_map_lookup_done(map, entry);
1813 		return (EINVAL);
1814 	}
1815 	vm_map_lookup_done(map, entry);
1816 	if (obj != shmfd->shm_object)
1817 		return (EINVAL);
1818 	vm_map_remove(map, kva, kva + size);
1819 	VM_OBJECT_WLOCK(obj);
1820 	KASSERT(shmfd->shm_kmappings > 0, ("shm_unmap: object not mapped"));
1821 	shmfd->shm_kmappings--;
1822 	VM_OBJECT_WUNLOCK(obj);
1823 	return (0);
1824 }
1825 
1826 static int
1827 shm_fill_kinfo_locked(struct shmfd *shmfd, struct kinfo_file *kif, bool list)
1828 {
1829 	const char *path, *pr_path;
1830 	size_t pr_pathlen;
1831 	bool visible;
1832 
1833 	sx_assert(&shm_dict_lock, SA_LOCKED);
1834 	kif->kf_type = KF_TYPE_SHM;
1835 	kif->kf_un.kf_file.kf_file_mode = S_IFREG | shmfd->shm_mode;
1836 	kif->kf_un.kf_file.kf_file_size = shmfd->shm_size;
1837 	if (shmfd->shm_path != NULL) {
1838 		if (shmfd->shm_path != NULL) {
1839 			path = shmfd->shm_path;
1840 			pr_path = curthread->td_ucred->cr_prison->pr_path;
1841 			if (strcmp(pr_path, "/") != 0) {
1842 				/* Return the jail-rooted pathname. */
1843 				pr_pathlen = strlen(pr_path);
1844 				visible = strncmp(path, pr_path, pr_pathlen)
1845 				    == 0 && path[pr_pathlen] == '/';
1846 				if (list && !visible)
1847 					return (EPERM);
1848 				if (visible)
1849 					path += pr_pathlen;
1850 			}
1851 			strlcpy(kif->kf_path, path, sizeof(kif->kf_path));
1852 		}
1853 	}
1854 	return (0);
1855 }
1856 
1857 static int
1858 shm_fill_kinfo(struct file *fp, struct kinfo_file *kif,
1859     struct filedesc *fdp __unused)
1860 {
1861 	int res;
1862 
1863 	sx_slock(&shm_dict_lock);
1864 	res = shm_fill_kinfo_locked(fp->f_data, kif, false);
1865 	sx_sunlock(&shm_dict_lock);
1866 	return (res);
1867 }
1868 
1869 static int
1870 shm_add_seals(struct file *fp, int seals)
1871 {
1872 	struct shmfd *shmfd;
1873 	void *rl_cookie;
1874 	vm_ooffset_t writemappings;
1875 	int error, nseals;
1876 
1877 	error = 0;
1878 	shmfd = fp->f_data;
1879 	rl_cookie = rangelock_wlock(&shmfd->shm_rl, 0, OFF_MAX,
1880 	    &shmfd->shm_mtx);
1881 
1882 	/* Even already-set seals should result in EPERM. */
1883 	if ((shmfd->shm_seals & F_SEAL_SEAL) != 0) {
1884 		error = EPERM;
1885 		goto out;
1886 	}
1887 	nseals = seals & ~shmfd->shm_seals;
1888 	if ((nseals & F_SEAL_WRITE) != 0) {
1889 		if (shm_largepage(shmfd)) {
1890 			error = ENOTSUP;
1891 			goto out;
1892 		}
1893 
1894 		/*
1895 		 * The rangelock above prevents writable mappings from being
1896 		 * added after we've started applying seals.  The RLOCK here
1897 		 * is to avoid torn reads on ILP32 arches as unmapping/reducing
1898 		 * writemappings will be done without a rangelock.
1899 		 */
1900 		VM_OBJECT_RLOCK(shmfd->shm_object);
1901 		writemappings = shmfd->shm_object->un_pager.swp.writemappings;
1902 		VM_OBJECT_RUNLOCK(shmfd->shm_object);
1903 		/* kmappings are also writable */
1904 		if (writemappings > 0) {
1905 			error = EBUSY;
1906 			goto out;
1907 		}
1908 	}
1909 	shmfd->shm_seals |= nseals;
1910 out:
1911 	rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
1912 	return (error);
1913 }
1914 
1915 static int
1916 shm_get_seals(struct file *fp, int *seals)
1917 {
1918 	struct shmfd *shmfd;
1919 
1920 	shmfd = fp->f_data;
1921 	*seals = shmfd->shm_seals;
1922 	return (0);
1923 }
1924 
1925 static int
1926 shm_deallocate(struct shmfd *shmfd, off_t *offset, off_t *length, int flags)
1927 {
1928 	vm_object_t object;
1929 	vm_pindex_t pistart, pi, piend;
1930 	vm_ooffset_t off, len;
1931 	int startofs, endofs, end;
1932 	int error;
1933 
1934 	off = *offset;
1935 	len = *length;
1936 	KASSERT(off + len <= (vm_ooffset_t)OFF_MAX, ("off + len overflows"));
1937 	if (off + len > shmfd->shm_size)
1938 		len = shmfd->shm_size - off;
1939 	object = shmfd->shm_object;
1940 	startofs = off & PAGE_MASK;
1941 	endofs = (off + len) & PAGE_MASK;
1942 	pistart = OFF_TO_IDX(off);
1943 	piend = OFF_TO_IDX(off + len);
1944 	pi = OFF_TO_IDX(off + PAGE_MASK);
1945 	error = 0;
1946 
1947 	/* Handle the case when offset is on or beyond shm size. */
1948 	if ((off_t)len <= 0) {
1949 		*length = 0;
1950 		return (0);
1951 	}
1952 
1953 	VM_OBJECT_WLOCK(object);
1954 
1955 	if (startofs != 0) {
1956 		end = pistart != piend ? PAGE_SIZE : endofs;
1957 		error = shm_partial_page_invalidate(object, pistart, startofs,
1958 		    end);
1959 		if (error)
1960 			goto out;
1961 		off += end - startofs;
1962 		len -= end - startofs;
1963 	}
1964 
1965 	if (pi < piend) {
1966 		vm_object_page_remove(object, pi, piend, 0);
1967 		off += IDX_TO_OFF(piend - pi);
1968 		len -= IDX_TO_OFF(piend - pi);
1969 	}
1970 
1971 	if (endofs != 0 && pistart != piend) {
1972 		error = shm_partial_page_invalidate(object, piend, 0, endofs);
1973 		if (error)
1974 			goto out;
1975 		off += endofs;
1976 		len -= endofs;
1977 	}
1978 
1979 out:
1980 	VM_OBJECT_WUNLOCK(shmfd->shm_object);
1981 	*offset = off;
1982 	*length = len;
1983 	return (error);
1984 }
1985 
1986 static int
1987 shm_fspacectl(struct file *fp, int cmd, off_t *offset, off_t *length, int flags,
1988     struct ucred *active_cred, struct thread *td)
1989 {
1990 	void *rl_cookie;
1991 	struct shmfd *shmfd;
1992 	off_t off, len;
1993 	int error;
1994 
1995 	KASSERT(cmd == SPACECTL_DEALLOC, ("shm_fspacectl: Invalid cmd"));
1996 	KASSERT((flags & ~SPACECTL_F_SUPPORTED) == 0,
1997 	    ("shm_fspacectl: non-zero flags"));
1998 	KASSERT(*offset >= 0 && *length > 0 && *length <= OFF_MAX - *offset,
1999 	    ("shm_fspacectl: offset/length overflow or underflow"));
2000 	error = EINVAL;
2001 	shmfd = fp->f_data;
2002 	off = *offset;
2003 	len = *length;
2004 
2005 	rl_cookie = rangelock_wlock(&shmfd->shm_rl, off, off + len,
2006 	    &shmfd->shm_mtx);
2007 	switch (cmd) {
2008 	case SPACECTL_DEALLOC:
2009 		if ((shmfd->shm_seals & F_SEAL_WRITE) != 0) {
2010 			error = EPERM;
2011 			break;
2012 		}
2013 		error = shm_deallocate(shmfd, &off, &len, flags);
2014 		*offset = off;
2015 		*length = len;
2016 		break;
2017 	default:
2018 		__assert_unreachable();
2019 	}
2020 	rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
2021 	return (error);
2022 }
2023 
2024 
2025 static int
2026 shm_fallocate(struct file *fp, off_t offset, off_t len, struct thread *td)
2027 {
2028 	void *rl_cookie;
2029 	struct shmfd *shmfd;
2030 	size_t size;
2031 	int error;
2032 
2033 	/* This assumes that the caller already checked for overflow. */
2034 	error = 0;
2035 	shmfd = fp->f_data;
2036 	size = offset + len;
2037 
2038 	/*
2039 	 * Just grab the rangelock for the range that we may be attempting to
2040 	 * grow, rather than blocking read/write for regions we won't be
2041 	 * touching while this (potential) resize is in progress.  Other
2042 	 * attempts to resize the shmfd will have to take a write lock from 0 to
2043 	 * OFF_MAX, so this being potentially beyond the current usable range of
2044 	 * the shmfd is not necessarily a concern.  If other mechanisms are
2045 	 * added to grow a shmfd, this may need to be re-evaluated.
2046 	 */
2047 	rl_cookie = rangelock_wlock(&shmfd->shm_rl, offset, size,
2048 	    &shmfd->shm_mtx);
2049 	if (size > shmfd->shm_size)
2050 		error = shm_dotruncate_cookie(shmfd, size, rl_cookie);
2051 	rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
2052 	/* Translate to posix_fallocate(2) return value as needed. */
2053 	if (error == ENOMEM)
2054 		error = ENOSPC;
2055 	return (error);
2056 }
2057 
2058 static int
2059 sysctl_posix_shm_list(SYSCTL_HANDLER_ARGS)
2060 {
2061 	struct shm_mapping *shmm;
2062 	struct sbuf sb;
2063 	struct kinfo_file kif;
2064 	u_long i;
2065 	int error, error2;
2066 
2067 	sbuf_new_for_sysctl(&sb, NULL, sizeof(struct kinfo_file) * 5, req);
2068 	sbuf_clear_flags(&sb, SBUF_INCLUDENUL);
2069 	error = 0;
2070 	sx_slock(&shm_dict_lock);
2071 	for (i = 0; i < shm_hash + 1; i++) {
2072 		LIST_FOREACH(shmm, &shm_dictionary[i], sm_link) {
2073 			error = shm_fill_kinfo_locked(shmm->sm_shmfd,
2074 			    &kif, true);
2075 			if (error == EPERM) {
2076 				error = 0;
2077 				continue;
2078 			}
2079 			if (error != 0)
2080 				break;
2081 			pack_kinfo(&kif);
2082 			error = sbuf_bcat(&sb, &kif, kif.kf_structsize) == 0 ?
2083 			    0 : ENOMEM;
2084 			if (error != 0)
2085 				break;
2086 		}
2087 	}
2088 	sx_sunlock(&shm_dict_lock);
2089 	error2 = sbuf_finish(&sb);
2090 	sbuf_delete(&sb);
2091 	return (error != 0 ? error : error2);
2092 }
2093 
2094 SYSCTL_PROC(_kern_ipc, OID_AUTO, posix_shm_list,
2095     CTLFLAG_RD | CTLFLAG_PRISON | CTLFLAG_MPSAFE | CTLTYPE_OPAQUE,
2096     NULL, 0, sysctl_posix_shm_list, "",
2097     "POSIX SHM list");
2098 
2099 int
2100 kern_shm_open(struct thread *td, const char *path, int flags, mode_t mode,
2101     struct filecaps *caps)
2102 {
2103 
2104 	return (kern_shm_open2(td, path, flags, mode, 0, caps, NULL));
2105 }
2106 
2107 /*
2108  * This version of the shm_open() interface leaves CLOEXEC behavior up to the
2109  * caller, and libc will enforce it for the traditional shm_open() call.  This
2110  * allows other consumers, like memfd_create(), to opt-in for CLOEXEC.  This
2111  * interface also includes a 'name' argument that is currently unused, but could
2112  * potentially be exported later via some interface for debugging purposes.
2113  * From the kernel's perspective, it is optional.  Individual consumers like
2114  * memfd_create() may require it in order to be compatible with other systems
2115  * implementing the same function.
2116  */
2117 int
2118 sys_shm_open2(struct thread *td, struct shm_open2_args *uap)
2119 {
2120 
2121 	return (kern_shm_open2(td, uap->path, uap->flags, uap->mode,
2122 	    uap->shmflags, NULL, uap->name));
2123 }
2124