xref: /freebsd/sys/kern/uipc_shm.c (revision 77a1348b3c1cfe8547be49a121b56299a1e18b69)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2006, 2011, 2016-2017 Robert N. M. Watson
5  * All rights reserved.
6  *
7  * Portions of this software were developed by BAE Systems, the University of
8  * Cambridge Computer Laboratory, and Memorial University under DARPA/AFRL
9  * contract FA8650-15-C-7558 ("CADETS"), as part of the DARPA Transparent
10  * Computing (TC) research program.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
25  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31  * SUCH DAMAGE.
32  */
33 
34 /*
35  * Support for shared swap-backed anonymous memory objects via
36  * shm_open(2), shm_rename(2), and shm_unlink(2).
37  * While most of the implementation is here, vm_mmap.c contains
38  * mapping logic changes.
39  *
40  * posixshmcontrol(1) allows users to inspect the state of the memory
41  * objects.  Per-uid swap resource limit controls total amount of
42  * memory that user can consume for anonymous objects, including
43  * shared.
44  */
45 
46 #include <sys/cdefs.h>
47 __FBSDID("$FreeBSD$");
48 
49 #include "opt_capsicum.h"
50 #include "opt_ktrace.h"
51 
52 #include <sys/param.h>
53 #include <sys/capsicum.h>
54 #include <sys/conf.h>
55 #include <sys/fcntl.h>
56 #include <sys/file.h>
57 #include <sys/filedesc.h>
58 #include <sys/filio.h>
59 #include <sys/fnv_hash.h>
60 #include <sys/kernel.h>
61 #include <sys/limits.h>
62 #include <sys/uio.h>
63 #include <sys/signal.h>
64 #include <sys/jail.h>
65 #include <sys/ktrace.h>
66 #include <sys/lock.h>
67 #include <sys/malloc.h>
68 #include <sys/mman.h>
69 #include <sys/mutex.h>
70 #include <sys/priv.h>
71 #include <sys/proc.h>
72 #include <sys/refcount.h>
73 #include <sys/resourcevar.h>
74 #include <sys/rwlock.h>
75 #include <sys/sbuf.h>
76 #include <sys/stat.h>
77 #include <sys/syscallsubr.h>
78 #include <sys/sysctl.h>
79 #include <sys/sysproto.h>
80 #include <sys/systm.h>
81 #include <sys/sx.h>
82 #include <sys/time.h>
83 #include <sys/vnode.h>
84 #include <sys/unistd.h>
85 #include <sys/user.h>
86 
87 #include <security/audit/audit.h>
88 #include <security/mac/mac_framework.h>
89 
90 #include <vm/vm.h>
91 #include <vm/vm_param.h>
92 #include <vm/pmap.h>
93 #include <vm/vm_extern.h>
94 #include <vm/vm_map.h>
95 #include <vm/vm_kern.h>
96 #include <vm/vm_object.h>
97 #include <vm/vm_page.h>
98 #include <vm/vm_pageout.h>
99 #include <vm/vm_pager.h>
100 #include <vm/swap_pager.h>
101 
102 struct shm_mapping {
103 	char		*sm_path;
104 	Fnv32_t		sm_fnv;
105 	struct shmfd	*sm_shmfd;
106 	LIST_ENTRY(shm_mapping) sm_link;
107 };
108 
109 static MALLOC_DEFINE(M_SHMFD, "shmfd", "shared memory file descriptor");
110 static LIST_HEAD(, shm_mapping) *shm_dictionary;
111 static struct sx shm_dict_lock;
112 static struct mtx shm_timestamp_lock;
113 static u_long shm_hash;
114 static struct unrhdr64 shm_ino_unr;
115 static dev_t shm_dev_ino;
116 
117 #define	SHM_HASH(fnv)	(&shm_dictionary[(fnv) & shm_hash])
118 
119 static void	shm_init(void *arg);
120 static void	shm_insert(char *path, Fnv32_t fnv, struct shmfd *shmfd);
121 static struct shmfd *shm_lookup(char *path, Fnv32_t fnv);
122 static int	shm_remove(char *path, Fnv32_t fnv, struct ucred *ucred);
123 static int	shm_dotruncate_locked(struct shmfd *shmfd, off_t length,
124     void *rl_cookie);
125 static int	shm_copyin_path(struct thread *td, const char *userpath_in,
126     char **path_out);
127 
128 static fo_rdwr_t	shm_read;
129 static fo_rdwr_t	shm_write;
130 static fo_truncate_t	shm_truncate;
131 static fo_ioctl_t	shm_ioctl;
132 static fo_stat_t	shm_stat;
133 static fo_close_t	shm_close;
134 static fo_chmod_t	shm_chmod;
135 static fo_chown_t	shm_chown;
136 static fo_seek_t	shm_seek;
137 static fo_fill_kinfo_t	shm_fill_kinfo;
138 static fo_mmap_t	shm_mmap;
139 static fo_get_seals_t	shm_get_seals;
140 static fo_add_seals_t	shm_add_seals;
141 static fo_fallocate_t	shm_fallocate;
142 
143 /* File descriptor operations. */
144 struct fileops shm_ops = {
145 	.fo_read = shm_read,
146 	.fo_write = shm_write,
147 	.fo_truncate = shm_truncate,
148 	.fo_ioctl = shm_ioctl,
149 	.fo_poll = invfo_poll,
150 	.fo_kqfilter = invfo_kqfilter,
151 	.fo_stat = shm_stat,
152 	.fo_close = shm_close,
153 	.fo_chmod = shm_chmod,
154 	.fo_chown = shm_chown,
155 	.fo_sendfile = vn_sendfile,
156 	.fo_seek = shm_seek,
157 	.fo_fill_kinfo = shm_fill_kinfo,
158 	.fo_mmap = shm_mmap,
159 	.fo_get_seals = shm_get_seals,
160 	.fo_add_seals = shm_add_seals,
161 	.fo_fallocate = shm_fallocate,
162 	.fo_flags = DFLAG_PASSABLE | DFLAG_SEEKABLE
163 };
164 
165 FEATURE(posix_shm, "POSIX shared memory");
166 
167 static int
168 uiomove_object_page(vm_object_t obj, size_t len, struct uio *uio)
169 {
170 	vm_page_t m;
171 	vm_pindex_t idx;
172 	size_t tlen;
173 	int error, offset, rv;
174 
175 	idx = OFF_TO_IDX(uio->uio_offset);
176 	offset = uio->uio_offset & PAGE_MASK;
177 	tlen = MIN(PAGE_SIZE - offset, len);
178 
179 	rv = vm_page_grab_valid_unlocked(&m, obj, idx,
180 	    VM_ALLOC_SBUSY | VM_ALLOC_IGN_SBUSY | VM_ALLOC_NOCREAT);
181 	if (rv == VM_PAGER_OK)
182 		goto found;
183 
184 	/*
185 	 * Read I/O without either a corresponding resident page or swap
186 	 * page: use zero_region.  This is intended to avoid instantiating
187 	 * pages on read from a sparse region.
188 	 */
189 	VM_OBJECT_WLOCK(obj);
190 	m = vm_page_lookup(obj, idx);
191 	if (uio->uio_rw == UIO_READ && m == NULL &&
192 	    !vm_pager_has_page(obj, idx, NULL, NULL)) {
193 		VM_OBJECT_WUNLOCK(obj);
194 		return (uiomove(__DECONST(void *, zero_region), tlen, uio));
195 	}
196 
197 	/*
198 	 * Although the tmpfs vnode lock is held here, it is
199 	 * nonetheless safe to sleep waiting for a free page.  The
200 	 * pageout daemon does not need to acquire the tmpfs vnode
201 	 * lock to page out tobj's pages because tobj is a OBJT_SWAP
202 	 * type object.
203 	 */
204 	rv = vm_page_grab_valid(&m, obj, idx,
205 	    VM_ALLOC_NORMAL | VM_ALLOC_SBUSY | VM_ALLOC_IGN_SBUSY);
206 	if (rv != VM_PAGER_OK) {
207 		VM_OBJECT_WUNLOCK(obj);
208 		printf("uiomove_object: vm_obj %p idx %jd pager error %d\n",
209 		    obj, idx, rv);
210 		return (EIO);
211 	}
212 	VM_OBJECT_WUNLOCK(obj);
213 
214 found:
215 	error = uiomove_fromphys(&m, offset, tlen, uio);
216 	if (uio->uio_rw == UIO_WRITE && error == 0)
217 		vm_page_set_dirty(m);
218 	vm_page_activate(m);
219 	vm_page_sunbusy(m);
220 
221 	return (error);
222 }
223 
224 int
225 uiomove_object(vm_object_t obj, off_t obj_size, struct uio *uio)
226 {
227 	ssize_t resid;
228 	size_t len;
229 	int error;
230 
231 	error = 0;
232 	while ((resid = uio->uio_resid) > 0) {
233 		if (obj_size <= uio->uio_offset)
234 			break;
235 		len = MIN(obj_size - uio->uio_offset, resid);
236 		if (len == 0)
237 			break;
238 		error = uiomove_object_page(obj, len, uio);
239 		if (error != 0 || resid == uio->uio_resid)
240 			break;
241 	}
242 	return (error);
243 }
244 
245 static int
246 shm_seek(struct file *fp, off_t offset, int whence, struct thread *td)
247 {
248 	struct shmfd *shmfd;
249 	off_t foffset;
250 	int error;
251 
252 	shmfd = fp->f_data;
253 	foffset = foffset_lock(fp, 0);
254 	error = 0;
255 	switch (whence) {
256 	case L_INCR:
257 		if (foffset < 0 ||
258 		    (offset > 0 && foffset > OFF_MAX - offset)) {
259 			error = EOVERFLOW;
260 			break;
261 		}
262 		offset += foffset;
263 		break;
264 	case L_XTND:
265 		if (offset > 0 && shmfd->shm_size > OFF_MAX - offset) {
266 			error = EOVERFLOW;
267 			break;
268 		}
269 		offset += shmfd->shm_size;
270 		break;
271 	case L_SET:
272 		break;
273 	default:
274 		error = EINVAL;
275 	}
276 	if (error == 0) {
277 		if (offset < 0 || offset > shmfd->shm_size)
278 			error = EINVAL;
279 		else
280 			td->td_uretoff.tdu_off = offset;
281 	}
282 	foffset_unlock(fp, offset, error != 0 ? FOF_NOUPDATE : 0);
283 	return (error);
284 }
285 
286 static int
287 shm_read(struct file *fp, struct uio *uio, struct ucred *active_cred,
288     int flags, struct thread *td)
289 {
290 	struct shmfd *shmfd;
291 	void *rl_cookie;
292 	int error;
293 
294 	shmfd = fp->f_data;
295 #ifdef MAC
296 	error = mac_posixshm_check_read(active_cred, fp->f_cred, shmfd);
297 	if (error)
298 		return (error);
299 #endif
300 	foffset_lock_uio(fp, uio, flags);
301 	rl_cookie = rangelock_rlock(&shmfd->shm_rl, uio->uio_offset,
302 	    uio->uio_offset + uio->uio_resid, &shmfd->shm_mtx);
303 	error = uiomove_object(shmfd->shm_object, shmfd->shm_size, uio);
304 	rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
305 	foffset_unlock_uio(fp, uio, flags);
306 	return (error);
307 }
308 
309 static int
310 shm_write(struct file *fp, struct uio *uio, struct ucred *active_cred,
311     int flags, struct thread *td)
312 {
313 	struct shmfd *shmfd;
314 	void *rl_cookie;
315 	int error;
316 
317 	shmfd = fp->f_data;
318 #ifdef MAC
319 	error = mac_posixshm_check_write(active_cred, fp->f_cred, shmfd);
320 	if (error)
321 		return (error);
322 #endif
323 	foffset_lock_uio(fp, uio, flags);
324 	if ((flags & FOF_OFFSET) == 0) {
325 		rl_cookie = rangelock_wlock(&shmfd->shm_rl, 0, OFF_MAX,
326 		    &shmfd->shm_mtx);
327 	} else {
328 		rl_cookie = rangelock_wlock(&shmfd->shm_rl, uio->uio_offset,
329 		    uio->uio_offset + uio->uio_resid, &shmfd->shm_mtx);
330 	}
331 	if ((shmfd->shm_seals & F_SEAL_WRITE) != 0)
332 		error = EPERM;
333 	else
334 		error = uiomove_object(shmfd->shm_object, shmfd->shm_size, uio);
335 	rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
336 	foffset_unlock_uio(fp, uio, flags);
337 	return (error);
338 }
339 
340 static int
341 shm_truncate(struct file *fp, off_t length, struct ucred *active_cred,
342     struct thread *td)
343 {
344 	struct shmfd *shmfd;
345 #ifdef MAC
346 	int error;
347 #endif
348 
349 	shmfd = fp->f_data;
350 #ifdef MAC
351 	error = mac_posixshm_check_truncate(active_cred, fp->f_cred, shmfd);
352 	if (error)
353 		return (error);
354 #endif
355 	return (shm_dotruncate(shmfd, length));
356 }
357 
358 int
359 shm_ioctl(struct file *fp, u_long com, void *data, struct ucred *active_cred,
360     struct thread *td)
361 {
362 
363 	switch (com) {
364 	case FIONBIO:
365 	case FIOASYNC:
366 		/*
367 		 * Allow fcntl(fd, F_SETFL, O_NONBLOCK) to work,
368 		 * just like it would on an unlinked regular file
369 		 */
370 		return (0);
371 	default:
372 		return (ENOTTY);
373 	}
374 }
375 
376 static int
377 shm_stat(struct file *fp, struct stat *sb, struct ucred *active_cred,
378     struct thread *td)
379 {
380 	struct shmfd *shmfd;
381 #ifdef MAC
382 	int error;
383 #endif
384 
385 	shmfd = fp->f_data;
386 
387 #ifdef MAC
388 	error = mac_posixshm_check_stat(active_cred, fp->f_cred, shmfd);
389 	if (error)
390 		return (error);
391 #endif
392 
393 	/*
394 	 * Attempt to return sanish values for fstat() on a memory file
395 	 * descriptor.
396 	 */
397 	bzero(sb, sizeof(*sb));
398 	sb->st_blksize = PAGE_SIZE;
399 	sb->st_size = shmfd->shm_size;
400 	sb->st_blocks = howmany(sb->st_size, sb->st_blksize);
401 	mtx_lock(&shm_timestamp_lock);
402 	sb->st_atim = shmfd->shm_atime;
403 	sb->st_ctim = shmfd->shm_ctime;
404 	sb->st_mtim = shmfd->shm_mtime;
405 	sb->st_birthtim = shmfd->shm_birthtime;
406 	sb->st_mode = S_IFREG | shmfd->shm_mode;		/* XXX */
407 	sb->st_uid = shmfd->shm_uid;
408 	sb->st_gid = shmfd->shm_gid;
409 	mtx_unlock(&shm_timestamp_lock);
410 	sb->st_dev = shm_dev_ino;
411 	sb->st_ino = shmfd->shm_ino;
412 	sb->st_nlink = shmfd->shm_object->ref_count;
413 
414 	return (0);
415 }
416 
417 static int
418 shm_close(struct file *fp, struct thread *td)
419 {
420 	struct shmfd *shmfd;
421 
422 	shmfd = fp->f_data;
423 	fp->f_data = NULL;
424 	shm_drop(shmfd);
425 
426 	return (0);
427 }
428 
429 static int
430 shm_copyin_path(struct thread *td, const char *userpath_in, char **path_out) {
431 	int error;
432 	char *path;
433 	const char *pr_path;
434 	size_t pr_pathlen;
435 
436 	path = malloc(MAXPATHLEN, M_SHMFD, M_WAITOK);
437 	pr_path = td->td_ucred->cr_prison->pr_path;
438 
439 	/* Construct a full pathname for jailed callers. */
440 	pr_pathlen = strcmp(pr_path, "/") ==
441 	    0 ? 0 : strlcpy(path, pr_path, MAXPATHLEN);
442 	error = copyinstr(userpath_in, path + pr_pathlen,
443 	    MAXPATHLEN - pr_pathlen, NULL);
444 	if (error != 0)
445 		goto out;
446 
447 #ifdef KTRACE
448 	if (KTRPOINT(curthread, KTR_NAMEI))
449 		ktrnamei(path);
450 #endif
451 
452 	/* Require paths to start with a '/' character. */
453 	if (path[pr_pathlen] != '/') {
454 		error = EINVAL;
455 		goto out;
456 	}
457 
458 	*path_out = path;
459 
460 out:
461 	if (error != 0)
462 		free(path, M_SHMFD);
463 
464 	return (error);
465 }
466 
467 static int
468 shm_dotruncate_locked(struct shmfd *shmfd, off_t length, void *rl_cookie)
469 {
470 	vm_object_t object;
471 	vm_page_t m;
472 	vm_pindex_t idx, nobjsize;
473 	vm_ooffset_t delta;
474 	int base, rv;
475 
476 	KASSERT(length >= 0, ("shm_dotruncate: length < 0"));
477 	object = shmfd->shm_object;
478 	VM_OBJECT_ASSERT_WLOCKED(object);
479 	rangelock_cookie_assert(rl_cookie, RA_WLOCKED);
480 	if (length == shmfd->shm_size)
481 		return (0);
482 	nobjsize = OFF_TO_IDX(length + PAGE_MASK);
483 
484 	/* Are we shrinking?  If so, trim the end. */
485 	if (length < shmfd->shm_size) {
486 		if ((shmfd->shm_seals & F_SEAL_SHRINK) != 0)
487 			return (EPERM);
488 
489 		/*
490 		 * Disallow any requests to shrink the size if this
491 		 * object is mapped into the kernel.
492 		 */
493 		if (shmfd->shm_kmappings > 0)
494 			return (EBUSY);
495 
496 		/*
497 		 * Zero the truncated part of the last page.
498 		 */
499 		base = length & PAGE_MASK;
500 		if (base != 0) {
501 			idx = OFF_TO_IDX(length);
502 retry:
503 			m = vm_page_grab(object, idx, VM_ALLOC_NOCREAT);
504 			if (m != NULL) {
505 				MPASS(vm_page_all_valid(m));
506 			} else if (vm_pager_has_page(object, idx, NULL, NULL)) {
507 				m = vm_page_alloc(object, idx,
508 				    VM_ALLOC_NORMAL | VM_ALLOC_WAITFAIL);
509 				if (m == NULL)
510 					goto retry;
511 				vm_object_pip_add(object, 1);
512 				VM_OBJECT_WUNLOCK(object);
513 				rv = vm_pager_get_pages(object, &m, 1, NULL,
514 				    NULL);
515 				VM_OBJECT_WLOCK(object);
516 				vm_object_pip_wakeup(object);
517 				if (rv == VM_PAGER_OK) {
518 					/*
519 					 * Since the page was not resident,
520 					 * and therefore not recently
521 					 * accessed, immediately enqueue it
522 					 * for asynchronous laundering.  The
523 					 * current operation is not regarded
524 					 * as an access.
525 					 */
526 					vm_page_launder(m);
527 				} else {
528 					vm_page_free(m);
529 					VM_OBJECT_WUNLOCK(object);
530 					return (EIO);
531 				}
532 			}
533 			if (m != NULL) {
534 				pmap_zero_page_area(m, base, PAGE_SIZE - base);
535 				KASSERT(vm_page_all_valid(m),
536 				    ("shm_dotruncate: page %p is invalid", m));
537 				vm_page_set_dirty(m);
538 				vm_page_xunbusy(m);
539 			}
540 		}
541 		delta = IDX_TO_OFF(object->size - nobjsize);
542 
543 		/* Toss in memory pages. */
544 		if (nobjsize < object->size)
545 			vm_object_page_remove(object, nobjsize, object->size,
546 			    0);
547 
548 		/* Toss pages from swap. */
549 		if (object->type == OBJT_SWAP)
550 			swap_pager_freespace(object, nobjsize, delta);
551 
552 		/* Free the swap accounted for shm */
553 		swap_release_by_cred(delta, object->cred);
554 		object->charge -= delta;
555 	} else {
556 		if ((shmfd->shm_seals & F_SEAL_GROW) != 0)
557 			return (EPERM);
558 
559 		/* Try to reserve additional swap space. */
560 		delta = IDX_TO_OFF(nobjsize - object->size);
561 		if (!swap_reserve_by_cred(delta, object->cred))
562 			return (ENOMEM);
563 		object->charge += delta;
564 	}
565 	shmfd->shm_size = length;
566 	mtx_lock(&shm_timestamp_lock);
567 	vfs_timestamp(&shmfd->shm_ctime);
568 	shmfd->shm_mtime = shmfd->shm_ctime;
569 	mtx_unlock(&shm_timestamp_lock);
570 	object->size = nobjsize;
571 	return (0);
572 }
573 
574 int
575 shm_dotruncate(struct shmfd *shmfd, off_t length)
576 {
577 	void *rl_cookie;
578 	int error;
579 
580 	rl_cookie = rangelock_wlock(&shmfd->shm_rl, 0, OFF_MAX,
581 	    &shmfd->shm_mtx);
582 	VM_OBJECT_WLOCK(shmfd->shm_object);
583 	error = shm_dotruncate_locked(shmfd, length, rl_cookie);
584 	VM_OBJECT_WUNLOCK(shmfd->shm_object);
585 	rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
586 	return (error);
587 }
588 
589 /*
590  * shmfd object management including creation and reference counting
591  * routines.
592  */
593 struct shmfd *
594 shm_alloc(struct ucred *ucred, mode_t mode)
595 {
596 	struct shmfd *shmfd;
597 
598 	shmfd = malloc(sizeof(*shmfd), M_SHMFD, M_WAITOK | M_ZERO);
599 	shmfd->shm_size = 0;
600 	shmfd->shm_uid = ucred->cr_uid;
601 	shmfd->shm_gid = ucred->cr_gid;
602 	shmfd->shm_mode = mode;
603 	shmfd->shm_object = vm_pager_allocate(OBJT_SWAP, NULL,
604 	    shmfd->shm_size, VM_PROT_DEFAULT, 0, ucred);
605 	KASSERT(shmfd->shm_object != NULL, ("shm_create: vm_pager_allocate"));
606 	vfs_timestamp(&shmfd->shm_birthtime);
607 	shmfd->shm_atime = shmfd->shm_mtime = shmfd->shm_ctime =
608 	    shmfd->shm_birthtime;
609 	shmfd->shm_ino = alloc_unr64(&shm_ino_unr);
610 	refcount_init(&shmfd->shm_refs, 1);
611 	mtx_init(&shmfd->shm_mtx, "shmrl", NULL, MTX_DEF);
612 	rangelock_init(&shmfd->shm_rl);
613 #ifdef MAC
614 	mac_posixshm_init(shmfd);
615 	mac_posixshm_create(ucred, shmfd);
616 #endif
617 
618 	return (shmfd);
619 }
620 
621 struct shmfd *
622 shm_hold(struct shmfd *shmfd)
623 {
624 
625 	refcount_acquire(&shmfd->shm_refs);
626 	return (shmfd);
627 }
628 
629 void
630 shm_drop(struct shmfd *shmfd)
631 {
632 
633 	if (refcount_release(&shmfd->shm_refs)) {
634 #ifdef MAC
635 		mac_posixshm_destroy(shmfd);
636 #endif
637 		rangelock_destroy(&shmfd->shm_rl);
638 		mtx_destroy(&shmfd->shm_mtx);
639 		vm_object_deallocate(shmfd->shm_object);
640 		free(shmfd, M_SHMFD);
641 	}
642 }
643 
644 /*
645  * Determine if the credentials have sufficient permissions for a
646  * specified combination of FREAD and FWRITE.
647  */
648 int
649 shm_access(struct shmfd *shmfd, struct ucred *ucred, int flags)
650 {
651 	accmode_t accmode;
652 	int error;
653 
654 	accmode = 0;
655 	if (flags & FREAD)
656 		accmode |= VREAD;
657 	if (flags & FWRITE)
658 		accmode |= VWRITE;
659 	mtx_lock(&shm_timestamp_lock);
660 	error = vaccess(VREG, shmfd->shm_mode, shmfd->shm_uid, shmfd->shm_gid,
661 	    accmode, ucred, NULL);
662 	mtx_unlock(&shm_timestamp_lock);
663 	return (error);
664 }
665 
666 /*
667  * Dictionary management.  We maintain an in-kernel dictionary to map
668  * paths to shmfd objects.  We use the FNV hash on the path to store
669  * the mappings in a hash table.
670  */
671 static void
672 shm_init(void *arg)
673 {
674 
675 	mtx_init(&shm_timestamp_lock, "shm timestamps", NULL, MTX_DEF);
676 	sx_init(&shm_dict_lock, "shm dictionary");
677 	shm_dictionary = hashinit(1024, M_SHMFD, &shm_hash);
678 	new_unrhdr64(&shm_ino_unr, 1);
679 	shm_dev_ino = devfs_alloc_cdp_inode();
680 	KASSERT(shm_dev_ino > 0, ("shm dev inode not initialized"));
681 }
682 SYSINIT(shm_init, SI_SUB_SYSV_SHM, SI_ORDER_ANY, shm_init, NULL);
683 
684 static struct shmfd *
685 shm_lookup(char *path, Fnv32_t fnv)
686 {
687 	struct shm_mapping *map;
688 
689 	LIST_FOREACH(map, SHM_HASH(fnv), sm_link) {
690 		if (map->sm_fnv != fnv)
691 			continue;
692 		if (strcmp(map->sm_path, path) == 0)
693 			return (map->sm_shmfd);
694 	}
695 
696 	return (NULL);
697 }
698 
699 static void
700 shm_insert(char *path, Fnv32_t fnv, struct shmfd *shmfd)
701 {
702 	struct shm_mapping *map;
703 
704 	map = malloc(sizeof(struct shm_mapping), M_SHMFD, M_WAITOK);
705 	map->sm_path = path;
706 	map->sm_fnv = fnv;
707 	map->sm_shmfd = shm_hold(shmfd);
708 	shmfd->shm_path = path;
709 	LIST_INSERT_HEAD(SHM_HASH(fnv), map, sm_link);
710 }
711 
712 static int
713 shm_remove(char *path, Fnv32_t fnv, struct ucred *ucred)
714 {
715 	struct shm_mapping *map;
716 	int error;
717 
718 	LIST_FOREACH(map, SHM_HASH(fnv), sm_link) {
719 		if (map->sm_fnv != fnv)
720 			continue;
721 		if (strcmp(map->sm_path, path) == 0) {
722 #ifdef MAC
723 			error = mac_posixshm_check_unlink(ucred, map->sm_shmfd);
724 			if (error)
725 				return (error);
726 #endif
727 			error = shm_access(map->sm_shmfd, ucred,
728 			    FREAD | FWRITE);
729 			if (error)
730 				return (error);
731 			map->sm_shmfd->shm_path = NULL;
732 			LIST_REMOVE(map, sm_link);
733 			shm_drop(map->sm_shmfd);
734 			free(map->sm_path, M_SHMFD);
735 			free(map, M_SHMFD);
736 			return (0);
737 		}
738 	}
739 
740 	return (ENOENT);
741 }
742 
743 int
744 kern_shm_open2(struct thread *td, const char *userpath, int flags, mode_t mode,
745     int shmflags, struct filecaps *fcaps, const char *name __unused)
746 {
747 	struct filedesc *fdp;
748 	struct shmfd *shmfd;
749 	struct file *fp;
750 	char *path;
751 	void *rl_cookie;
752 	Fnv32_t fnv;
753 	mode_t cmode;
754 	int error, fd, initial_seals;
755 
756 	if ((shmflags & ~SHM_ALLOW_SEALING) != 0)
757 		return (EINVAL);
758 
759 	initial_seals = F_SEAL_SEAL;
760 	if ((shmflags & SHM_ALLOW_SEALING) != 0)
761 		initial_seals &= ~F_SEAL_SEAL;
762 
763 #ifdef CAPABILITY_MODE
764 	/*
765 	 * shm_open(2) is only allowed for anonymous objects.
766 	 */
767 	if (IN_CAPABILITY_MODE(td) && (userpath != SHM_ANON))
768 		return (ECAPMODE);
769 #endif
770 
771 	AUDIT_ARG_FFLAGS(flags);
772 	AUDIT_ARG_MODE(mode);
773 
774 	if ((flags & O_ACCMODE) != O_RDONLY && (flags & O_ACCMODE) != O_RDWR)
775 		return (EINVAL);
776 
777 	if ((flags & ~(O_ACCMODE | O_CREAT | O_EXCL | O_TRUNC | O_CLOEXEC)) != 0)
778 		return (EINVAL);
779 
780 	/*
781 	 * Currently only F_SEAL_SEAL may be set when creating or opening shmfd.
782 	 * If the decision is made later to allow additional seals, care must be
783 	 * taken below to ensure that the seals are properly set if the shmfd
784 	 * already existed -- this currently assumes that only F_SEAL_SEAL can
785 	 * be set and doesn't take further precautions to ensure the validity of
786 	 * the seals being added with respect to current mappings.
787 	 */
788 	if ((initial_seals & ~F_SEAL_SEAL) != 0)
789 		return (EINVAL);
790 
791 	fdp = td->td_proc->p_fd;
792 	cmode = (mode & ~fdp->fd_cmask) & ACCESSPERMS;
793 
794 	/*
795 	 * shm_open(2) created shm should always have O_CLOEXEC set, as mandated
796 	 * by POSIX.  We allow it to be unset here so that an in-kernel
797 	 * interface may be written as a thin layer around shm, optionally not
798 	 * setting CLOEXEC.  For shm_open(2), O_CLOEXEC is set unconditionally
799 	 * in sys_shm_open() to keep this implementation compliant.
800 	 */
801 	error = falloc_caps(td, &fp, &fd, flags & O_CLOEXEC, fcaps);
802 	if (error)
803 		return (error);
804 
805 	/* A SHM_ANON path pointer creates an anonymous object. */
806 	if (userpath == SHM_ANON) {
807 		/* A read-only anonymous object is pointless. */
808 		if ((flags & O_ACCMODE) == O_RDONLY) {
809 			fdclose(td, fp, fd);
810 			fdrop(fp, td);
811 			return (EINVAL);
812 		}
813 		shmfd = shm_alloc(td->td_ucred, cmode);
814 		shmfd->shm_seals = initial_seals;
815 	} else {
816 		error = shm_copyin_path(td, userpath, &path);
817 		if (error != 0) {
818 			fdclose(td, fp, fd);
819 			fdrop(fp, td);
820 			return (error);
821 		}
822 
823 		AUDIT_ARG_UPATH1_CANON(path);
824 		fnv = fnv_32_str(path, FNV1_32_INIT);
825 		sx_xlock(&shm_dict_lock);
826 		shmfd = shm_lookup(path, fnv);
827 		if (shmfd == NULL) {
828 			/* Object does not yet exist, create it if requested. */
829 			if (flags & O_CREAT) {
830 #ifdef MAC
831 				error = mac_posixshm_check_create(td->td_ucred,
832 				    path);
833 				if (error == 0) {
834 #endif
835 					shmfd = shm_alloc(td->td_ucred, cmode);
836 					shmfd->shm_seals = initial_seals;
837 					shm_insert(path, fnv, shmfd);
838 #ifdef MAC
839 				}
840 #endif
841 			} else {
842 				free(path, M_SHMFD);
843 				error = ENOENT;
844 			}
845 		} else {
846 			rl_cookie = rangelock_wlock(&shmfd->shm_rl, 0, OFF_MAX,
847 			    &shmfd->shm_mtx);
848 
849 			/*
850 			 * kern_shm_open() likely shouldn't ever error out on
851 			 * trying to set a seal that already exists, unlike
852 			 * F_ADD_SEALS.  This would break terribly as
853 			 * shm_open(2) actually sets F_SEAL_SEAL to maintain
854 			 * historical behavior where the underlying file could
855 			 * not be sealed.
856 			 */
857 			initial_seals &= ~shmfd->shm_seals;
858 
859 			/*
860 			 * Object already exists, obtain a new
861 			 * reference if requested and permitted.
862 			 */
863 			free(path, M_SHMFD);
864 
865 			/*
866 			 * initial_seals can't set additional seals if we've
867 			 * already been set F_SEAL_SEAL.  If F_SEAL_SEAL is set,
868 			 * then we've already removed that one from
869 			 * initial_seals.  This is currently redundant as we
870 			 * only allow setting F_SEAL_SEAL at creation time, but
871 			 * it's cheap to check and decreases the effort required
872 			 * to allow additional seals.
873 			 */
874 			if ((shmfd->shm_seals & F_SEAL_SEAL) != 0 &&
875 			    initial_seals != 0)
876 				error = EPERM;
877 			else if ((flags & (O_CREAT | O_EXCL)) ==
878 			    (O_CREAT | O_EXCL))
879 				error = EEXIST;
880 			else {
881 #ifdef MAC
882 				error = mac_posixshm_check_open(td->td_ucred,
883 				    shmfd, FFLAGS(flags & O_ACCMODE));
884 				if (error == 0)
885 #endif
886 				error = shm_access(shmfd, td->td_ucred,
887 				    FFLAGS(flags & O_ACCMODE));
888 			}
889 
890 			/*
891 			 * Truncate the file back to zero length if
892 			 * O_TRUNC was specified and the object was
893 			 * opened with read/write.
894 			 */
895 			if (error == 0 &&
896 			    (flags & (O_ACCMODE | O_TRUNC)) ==
897 			    (O_RDWR | O_TRUNC)) {
898 				VM_OBJECT_WLOCK(shmfd->shm_object);
899 #ifdef MAC
900 				error = mac_posixshm_check_truncate(
901 					td->td_ucred, fp->f_cred, shmfd);
902 				if (error == 0)
903 #endif
904 					error = shm_dotruncate_locked(shmfd, 0,
905 					    rl_cookie);
906 				VM_OBJECT_WUNLOCK(shmfd->shm_object);
907 			}
908 			if (error == 0) {
909 				/*
910 				 * Currently we only allow F_SEAL_SEAL to be
911 				 * set initially.  As noted above, this would
912 				 * need to be reworked should that change.
913 				 */
914 				shmfd->shm_seals |= initial_seals;
915 				shm_hold(shmfd);
916 			}
917 			rangelock_unlock(&shmfd->shm_rl, rl_cookie,
918 			    &shmfd->shm_mtx);
919 		}
920 		sx_xunlock(&shm_dict_lock);
921 
922 		if (error) {
923 			fdclose(td, fp, fd);
924 			fdrop(fp, td);
925 			return (error);
926 		}
927 	}
928 
929 	finit(fp, FFLAGS(flags & O_ACCMODE), DTYPE_SHM, shmfd, &shm_ops);
930 
931 	td->td_retval[0] = fd;
932 	fdrop(fp, td);
933 
934 	return (0);
935 }
936 
937 /* System calls. */
938 #ifdef COMPAT_FREEBSD12
939 int
940 freebsd12_shm_open(struct thread *td, struct freebsd12_shm_open_args *uap)
941 {
942 
943 	return (kern_shm_open(td, uap->path, uap->flags | O_CLOEXEC,
944 	    uap->mode, NULL));
945 }
946 #endif
947 
948 int
949 sys_shm_unlink(struct thread *td, struct shm_unlink_args *uap)
950 {
951 	char *path;
952 	Fnv32_t fnv;
953 	int error;
954 
955 	error = shm_copyin_path(td, uap->path, &path);
956 	if (error != 0)
957 		return (error);
958 
959 	AUDIT_ARG_UPATH1_CANON(path);
960 	fnv = fnv_32_str(path, FNV1_32_INIT);
961 	sx_xlock(&shm_dict_lock);
962 	error = shm_remove(path, fnv, td->td_ucred);
963 	sx_xunlock(&shm_dict_lock);
964 	free(path, M_SHMFD);
965 
966 	return (error);
967 }
968 
969 int
970 sys_shm_rename(struct thread *td, struct shm_rename_args *uap)
971 {
972 	char *path_from = NULL, *path_to = NULL;
973 	Fnv32_t fnv_from, fnv_to;
974 	struct shmfd *fd_from;
975 	struct shmfd *fd_to;
976 	int error;
977 	int flags;
978 
979 	flags = uap->flags;
980 	AUDIT_ARG_FFLAGS(flags);
981 
982 	/*
983 	 * Make sure the user passed only valid flags.
984 	 * If you add a new flag, please add a new term here.
985 	 */
986 	if ((flags & ~(
987 	    SHM_RENAME_NOREPLACE |
988 	    SHM_RENAME_EXCHANGE
989 	    )) != 0) {
990 		error = EINVAL;
991 		goto out;
992 	}
993 
994 	/*
995 	 * EXCHANGE and NOREPLACE don't quite make sense together. Let's
996 	 * force the user to choose one or the other.
997 	 */
998 	if ((flags & SHM_RENAME_NOREPLACE) != 0 &&
999 	    (flags & SHM_RENAME_EXCHANGE) != 0) {
1000 		error = EINVAL;
1001 		goto out;
1002 	}
1003 
1004 	/* Renaming to or from anonymous makes no sense */
1005 	if (uap->path_from == SHM_ANON || uap->path_to == SHM_ANON) {
1006 		error = EINVAL;
1007 		goto out;
1008 	}
1009 
1010 	error = shm_copyin_path(td, uap->path_from, &path_from);
1011 	if (error != 0)
1012 		goto out;
1013 
1014 	error = shm_copyin_path(td, uap->path_to, &path_to);
1015 	if (error != 0)
1016 		goto out;
1017 
1018 	AUDIT_ARG_UPATH1_CANON(path_from);
1019 	AUDIT_ARG_UPATH2_CANON(path_to);
1020 
1021 	/* Rename with from/to equal is a no-op */
1022 	if (strcmp(path_from, path_to) == 0)
1023 		goto out;
1024 
1025 	fnv_from = fnv_32_str(path_from, FNV1_32_INIT);
1026 	fnv_to = fnv_32_str(path_to, FNV1_32_INIT);
1027 
1028 	sx_xlock(&shm_dict_lock);
1029 
1030 	fd_from = shm_lookup(path_from, fnv_from);
1031 	if (fd_from == NULL) {
1032 		error = ENOENT;
1033 		goto out_locked;
1034 	}
1035 
1036 	fd_to = shm_lookup(path_to, fnv_to);
1037 	if ((flags & SHM_RENAME_NOREPLACE) != 0 && fd_to != NULL) {
1038 		error = EEXIST;
1039 		goto out_locked;
1040 	}
1041 
1042 	/*
1043 	 * Unconditionally prevents shm_remove from invalidating the 'from'
1044 	 * shm's state.
1045 	 */
1046 	shm_hold(fd_from);
1047 	error = shm_remove(path_from, fnv_from, td->td_ucred);
1048 
1049 	/*
1050 	 * One of my assumptions failed if ENOENT (e.g. locking didn't
1051 	 * protect us)
1052 	 */
1053 	KASSERT(error != ENOENT, ("Our shm disappeared during shm_rename: %s",
1054 	    path_from));
1055 	if (error != 0) {
1056 		shm_drop(fd_from);
1057 		goto out_locked;
1058 	}
1059 
1060 	/*
1061 	 * If we are exchanging, we need to ensure the shm_remove below
1062 	 * doesn't invalidate the dest shm's state.
1063 	 */
1064 	if ((flags & SHM_RENAME_EXCHANGE) != 0 && fd_to != NULL)
1065 		shm_hold(fd_to);
1066 
1067 	/*
1068 	 * NOTE: if path_to is not already in the hash, c'est la vie;
1069 	 * it simply means we have nothing already at path_to to unlink.
1070 	 * That is the ENOENT case.
1071 	 *
1072 	 * If we somehow don't have access to unlink this guy, but
1073 	 * did for the shm at path_from, then relink the shm to path_from
1074 	 * and abort with EACCES.
1075 	 *
1076 	 * All other errors: that is weird; let's relink and abort the
1077 	 * operation.
1078 	 */
1079 	error = shm_remove(path_to, fnv_to, td->td_ucred);
1080 	if (error != 0 && error != ENOENT) {
1081 		shm_insert(path_from, fnv_from, fd_from);
1082 		shm_drop(fd_from);
1083 		/* Don't free path_from now, since the hash references it */
1084 		path_from = NULL;
1085 		goto out_locked;
1086 	}
1087 
1088 	error = 0;
1089 
1090 	shm_insert(path_to, fnv_to, fd_from);
1091 
1092 	/* Don't free path_to now, since the hash references it */
1093 	path_to = NULL;
1094 
1095 	/* We kept a ref when we removed, and incremented again in insert */
1096 	shm_drop(fd_from);
1097 	KASSERT(fd_from->shm_refs > 0, ("Expected >0 refs; got: %d\n",
1098 	    fd_from->shm_refs));
1099 
1100 	if ((flags & SHM_RENAME_EXCHANGE) != 0 && fd_to != NULL) {
1101 		shm_insert(path_from, fnv_from, fd_to);
1102 		path_from = NULL;
1103 		shm_drop(fd_to);
1104 		KASSERT(fd_to->shm_refs > 0, ("Expected >0 refs; got: %d\n",
1105 		    fd_to->shm_refs));
1106 	}
1107 
1108 out_locked:
1109 	sx_xunlock(&shm_dict_lock);
1110 
1111 out:
1112 	free(path_from, M_SHMFD);
1113 	free(path_to, M_SHMFD);
1114 	return (error);
1115 }
1116 
1117 int
1118 shm_mmap(struct file *fp, vm_map_t map, vm_offset_t *addr, vm_size_t objsize,
1119     vm_prot_t prot, vm_prot_t cap_maxprot, int flags,
1120     vm_ooffset_t foff, struct thread *td)
1121 {
1122 	struct shmfd *shmfd;
1123 	vm_prot_t maxprot;
1124 	int error;
1125 	bool writecnt;
1126 	void *rl_cookie;
1127 
1128 	shmfd = fp->f_data;
1129 	maxprot = VM_PROT_NONE;
1130 
1131 	rl_cookie = rangelock_rlock(&shmfd->shm_rl, 0, objsize,
1132 	    &shmfd->shm_mtx);
1133 	/* FREAD should always be set. */
1134 	if ((fp->f_flag & FREAD) != 0)
1135 		maxprot |= VM_PROT_EXECUTE | VM_PROT_READ;
1136 
1137 	/*
1138 	 * If FWRITE's set, we can allow VM_PROT_WRITE unless it's a shared
1139 	 * mapping with a write seal applied.  Private mappings are always
1140 	 * writeable.
1141 	 */
1142 	if ((flags & MAP_SHARED) == 0) {
1143 		cap_maxprot |= VM_PROT_WRITE;
1144 		maxprot |= VM_PROT_WRITE;
1145 		writecnt = false;
1146 	} else {
1147 		if ((fp->f_flag & FWRITE) != 0 &&
1148 		    (shmfd->shm_seals & F_SEAL_WRITE) == 0)
1149 			maxprot |= VM_PROT_WRITE;
1150 		writecnt = (prot & VM_PROT_WRITE) != 0;
1151 		if (writecnt && (shmfd->shm_seals & F_SEAL_WRITE) != 0) {
1152 			error = EPERM;
1153 			goto out;
1154 		}
1155 
1156 		/* Don't permit shared writable mappings on read-only descriptors. */
1157 		if (writecnt && (maxprot & VM_PROT_WRITE) == 0) {
1158 			error = EACCES;
1159 			goto out;
1160 		}
1161 	}
1162 	maxprot &= cap_maxprot;
1163 
1164 	/* See comment in vn_mmap(). */
1165 	if (
1166 #ifdef _LP64
1167 	    objsize > OFF_MAX ||
1168 #endif
1169 	    foff < 0 || foff > OFF_MAX - objsize) {
1170 		error = EINVAL;
1171 		goto out;
1172 	}
1173 
1174 #ifdef MAC
1175 	error = mac_posixshm_check_mmap(td->td_ucred, shmfd, prot, flags);
1176 	if (error != 0)
1177 		goto out;
1178 #endif
1179 
1180 	mtx_lock(&shm_timestamp_lock);
1181 	vfs_timestamp(&shmfd->shm_atime);
1182 	mtx_unlock(&shm_timestamp_lock);
1183 	vm_object_reference(shmfd->shm_object);
1184 
1185 	if (writecnt)
1186 		vm_pager_update_writecount(shmfd->shm_object, 0, objsize);
1187 	error = vm_mmap_object(map, addr, objsize, prot, maxprot, flags,
1188 	    shmfd->shm_object, foff, writecnt, td);
1189 	if (error != 0) {
1190 		if (writecnt)
1191 			vm_pager_release_writecount(shmfd->shm_object, 0,
1192 			    objsize);
1193 		vm_object_deallocate(shmfd->shm_object);
1194 	}
1195 out:
1196 	rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
1197 	return (error);
1198 }
1199 
1200 static int
1201 shm_chmod(struct file *fp, mode_t mode, struct ucred *active_cred,
1202     struct thread *td)
1203 {
1204 	struct shmfd *shmfd;
1205 	int error;
1206 
1207 	error = 0;
1208 	shmfd = fp->f_data;
1209 	mtx_lock(&shm_timestamp_lock);
1210 	/*
1211 	 * SUSv4 says that x bits of permission need not be affected.
1212 	 * Be consistent with our shm_open there.
1213 	 */
1214 #ifdef MAC
1215 	error = mac_posixshm_check_setmode(active_cred, shmfd, mode);
1216 	if (error != 0)
1217 		goto out;
1218 #endif
1219 	error = vaccess(VREG, shmfd->shm_mode, shmfd->shm_uid,
1220 	    shmfd->shm_gid, VADMIN, active_cred, NULL);
1221 	if (error != 0)
1222 		goto out;
1223 	shmfd->shm_mode = mode & ACCESSPERMS;
1224 out:
1225 	mtx_unlock(&shm_timestamp_lock);
1226 	return (error);
1227 }
1228 
1229 static int
1230 shm_chown(struct file *fp, uid_t uid, gid_t gid, struct ucred *active_cred,
1231     struct thread *td)
1232 {
1233 	struct shmfd *shmfd;
1234 	int error;
1235 
1236 	error = 0;
1237 	shmfd = fp->f_data;
1238 	mtx_lock(&shm_timestamp_lock);
1239 #ifdef MAC
1240 	error = mac_posixshm_check_setowner(active_cred, shmfd, uid, gid);
1241 	if (error != 0)
1242 		goto out;
1243 #endif
1244 	if (uid == (uid_t)-1)
1245 		uid = shmfd->shm_uid;
1246 	if (gid == (gid_t)-1)
1247                  gid = shmfd->shm_gid;
1248 	if (((uid != shmfd->shm_uid && uid != active_cred->cr_uid) ||
1249 	    (gid != shmfd->shm_gid && !groupmember(gid, active_cred))) &&
1250 	    (error = priv_check_cred(active_cred, PRIV_VFS_CHOWN)))
1251 		goto out;
1252 	shmfd->shm_uid = uid;
1253 	shmfd->shm_gid = gid;
1254 out:
1255 	mtx_unlock(&shm_timestamp_lock);
1256 	return (error);
1257 }
1258 
1259 /*
1260  * Helper routines to allow the backing object of a shared memory file
1261  * descriptor to be mapped in the kernel.
1262  */
1263 int
1264 shm_map(struct file *fp, size_t size, off_t offset, void **memp)
1265 {
1266 	struct shmfd *shmfd;
1267 	vm_offset_t kva, ofs;
1268 	vm_object_t obj;
1269 	int rv;
1270 
1271 	if (fp->f_type != DTYPE_SHM)
1272 		return (EINVAL);
1273 	shmfd = fp->f_data;
1274 	obj = shmfd->shm_object;
1275 	VM_OBJECT_WLOCK(obj);
1276 	/*
1277 	 * XXXRW: This validation is probably insufficient, and subject to
1278 	 * sign errors.  It should be fixed.
1279 	 */
1280 	if (offset >= shmfd->shm_size ||
1281 	    offset + size > round_page(shmfd->shm_size)) {
1282 		VM_OBJECT_WUNLOCK(obj);
1283 		return (EINVAL);
1284 	}
1285 
1286 	shmfd->shm_kmappings++;
1287 	vm_object_reference_locked(obj);
1288 	VM_OBJECT_WUNLOCK(obj);
1289 
1290 	/* Map the object into the kernel_map and wire it. */
1291 	kva = vm_map_min(kernel_map);
1292 	ofs = offset & PAGE_MASK;
1293 	offset = trunc_page(offset);
1294 	size = round_page(size + ofs);
1295 	rv = vm_map_find(kernel_map, obj, offset, &kva, size, 0,
1296 	    VMFS_OPTIMAL_SPACE, VM_PROT_READ | VM_PROT_WRITE,
1297 	    VM_PROT_READ | VM_PROT_WRITE, 0);
1298 	if (rv == KERN_SUCCESS) {
1299 		rv = vm_map_wire(kernel_map, kva, kva + size,
1300 		    VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
1301 		if (rv == KERN_SUCCESS) {
1302 			*memp = (void *)(kva + ofs);
1303 			return (0);
1304 		}
1305 		vm_map_remove(kernel_map, kva, kva + size);
1306 	} else
1307 		vm_object_deallocate(obj);
1308 
1309 	/* On failure, drop our mapping reference. */
1310 	VM_OBJECT_WLOCK(obj);
1311 	shmfd->shm_kmappings--;
1312 	VM_OBJECT_WUNLOCK(obj);
1313 
1314 	return (vm_mmap_to_errno(rv));
1315 }
1316 
1317 /*
1318  * We require the caller to unmap the entire entry.  This allows us to
1319  * safely decrement shm_kmappings when a mapping is removed.
1320  */
1321 int
1322 shm_unmap(struct file *fp, void *mem, size_t size)
1323 {
1324 	struct shmfd *shmfd;
1325 	vm_map_entry_t entry;
1326 	vm_offset_t kva, ofs;
1327 	vm_object_t obj;
1328 	vm_pindex_t pindex;
1329 	vm_prot_t prot;
1330 	boolean_t wired;
1331 	vm_map_t map;
1332 	int rv;
1333 
1334 	if (fp->f_type != DTYPE_SHM)
1335 		return (EINVAL);
1336 	shmfd = fp->f_data;
1337 	kva = (vm_offset_t)mem;
1338 	ofs = kva & PAGE_MASK;
1339 	kva = trunc_page(kva);
1340 	size = round_page(size + ofs);
1341 	map = kernel_map;
1342 	rv = vm_map_lookup(&map, kva, VM_PROT_READ | VM_PROT_WRITE, &entry,
1343 	    &obj, &pindex, &prot, &wired);
1344 	if (rv != KERN_SUCCESS)
1345 		return (EINVAL);
1346 	if (entry->start != kva || entry->end != kva + size) {
1347 		vm_map_lookup_done(map, entry);
1348 		return (EINVAL);
1349 	}
1350 	vm_map_lookup_done(map, entry);
1351 	if (obj != shmfd->shm_object)
1352 		return (EINVAL);
1353 	vm_map_remove(map, kva, kva + size);
1354 	VM_OBJECT_WLOCK(obj);
1355 	KASSERT(shmfd->shm_kmappings > 0, ("shm_unmap: object not mapped"));
1356 	shmfd->shm_kmappings--;
1357 	VM_OBJECT_WUNLOCK(obj);
1358 	return (0);
1359 }
1360 
1361 static int
1362 shm_fill_kinfo_locked(struct shmfd *shmfd, struct kinfo_file *kif, bool list)
1363 {
1364 	const char *path, *pr_path;
1365 	size_t pr_pathlen;
1366 	bool visible;
1367 
1368 	sx_assert(&shm_dict_lock, SA_LOCKED);
1369 	kif->kf_type = KF_TYPE_SHM;
1370 	kif->kf_un.kf_file.kf_file_mode = S_IFREG | shmfd->shm_mode;
1371 	kif->kf_un.kf_file.kf_file_size = shmfd->shm_size;
1372 	if (shmfd->shm_path != NULL) {
1373 		if (shmfd->shm_path != NULL) {
1374 			path = shmfd->shm_path;
1375 			pr_path = curthread->td_ucred->cr_prison->pr_path;
1376 			if (strcmp(pr_path, "/") != 0) {
1377 				/* Return the jail-rooted pathname. */
1378 				pr_pathlen = strlen(pr_path);
1379 				visible = strncmp(path, pr_path, pr_pathlen)
1380 				    == 0 && path[pr_pathlen] == '/';
1381 				if (list && !visible)
1382 					return (EPERM);
1383 				if (visible)
1384 					path += pr_pathlen;
1385 			}
1386 			strlcpy(kif->kf_path, path, sizeof(kif->kf_path));
1387 		}
1388 	}
1389 	return (0);
1390 }
1391 
1392 static int
1393 shm_fill_kinfo(struct file *fp, struct kinfo_file *kif,
1394     struct filedesc *fdp __unused)
1395 {
1396 	int res;
1397 
1398 	sx_slock(&shm_dict_lock);
1399 	res = shm_fill_kinfo_locked(fp->f_data, kif, false);
1400 	sx_sunlock(&shm_dict_lock);
1401 	return (res);
1402 }
1403 
1404 static int
1405 shm_add_seals(struct file *fp, int seals)
1406 {
1407 	struct shmfd *shmfd;
1408 	void *rl_cookie;
1409 	vm_ooffset_t writemappings;
1410 	int error, nseals;
1411 
1412 	error = 0;
1413 	shmfd = fp->f_data;
1414 	rl_cookie = rangelock_wlock(&shmfd->shm_rl, 0, OFF_MAX,
1415 	    &shmfd->shm_mtx);
1416 
1417 	/* Even already-set seals should result in EPERM. */
1418 	if ((shmfd->shm_seals & F_SEAL_SEAL) != 0) {
1419 		error = EPERM;
1420 		goto out;
1421 	}
1422 	nseals = seals & ~shmfd->shm_seals;
1423 	if ((nseals & F_SEAL_WRITE) != 0) {
1424 		/*
1425 		 * The rangelock above prevents writable mappings from being
1426 		 * added after we've started applying seals.  The RLOCK here
1427 		 * is to avoid torn reads on ILP32 arches as unmapping/reducing
1428 		 * writemappings will be done without a rangelock.
1429 		 */
1430 		VM_OBJECT_RLOCK(shmfd->shm_object);
1431 		writemappings = shmfd->shm_object->un_pager.swp.writemappings;
1432 		VM_OBJECT_RUNLOCK(shmfd->shm_object);
1433 		/* kmappings are also writable */
1434 		if (writemappings > 0) {
1435 			error = EBUSY;
1436 			goto out;
1437 		}
1438 	}
1439 	shmfd->shm_seals |= nseals;
1440 out:
1441 	rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
1442 	return (error);
1443 }
1444 
1445 static int
1446 shm_get_seals(struct file *fp, int *seals)
1447 {
1448 	struct shmfd *shmfd;
1449 
1450 	shmfd = fp->f_data;
1451 	*seals = shmfd->shm_seals;
1452 	return (0);
1453 }
1454 
1455 static int
1456 shm_fallocate(struct file *fp, off_t offset, off_t len, struct thread *td)
1457 {
1458 	void *rl_cookie;
1459 	struct shmfd *shmfd;
1460 	size_t size;
1461 	int error;
1462 
1463 	/* This assumes that the caller already checked for overflow. */
1464 	error = 0;
1465 	shmfd = fp->f_data;
1466 	size = offset + len;
1467 
1468 	/*
1469 	 * Just grab the rangelock for the range that we may be attempting to
1470 	 * grow, rather than blocking read/write for regions we won't be
1471 	 * touching while this (potential) resize is in progress.  Other
1472 	 * attempts to resize the shmfd will have to take a write lock from 0 to
1473 	 * OFF_MAX, so this being potentially beyond the current usable range of
1474 	 * the shmfd is not necessarily a concern.  If other mechanisms are
1475 	 * added to grow a shmfd, this may need to be re-evaluated.
1476 	 */
1477 	rl_cookie = rangelock_wlock(&shmfd->shm_rl, offset, size,
1478 	    &shmfd->shm_mtx);
1479 	if (size > shmfd->shm_size) {
1480 		VM_OBJECT_WLOCK(shmfd->shm_object);
1481 		error = shm_dotruncate_locked(shmfd, size, rl_cookie);
1482 		VM_OBJECT_WUNLOCK(shmfd->shm_object);
1483 	}
1484 	rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
1485 	/* Translate to posix_fallocate(2) return value as needed. */
1486 	if (error == ENOMEM)
1487 		error = ENOSPC;
1488 	return (error);
1489 }
1490 
1491 static int
1492 sysctl_posix_shm_list(SYSCTL_HANDLER_ARGS)
1493 {
1494 	struct shm_mapping *shmm;
1495 	struct sbuf sb;
1496 	struct kinfo_file kif;
1497 	u_long i;
1498 	ssize_t curlen;
1499 	int error, error2;
1500 
1501 	sbuf_new_for_sysctl(&sb, NULL, sizeof(struct kinfo_file) * 5, req);
1502 	sbuf_clear_flags(&sb, SBUF_INCLUDENUL);
1503 	curlen = 0;
1504 	error = 0;
1505 	sx_slock(&shm_dict_lock);
1506 	for (i = 0; i < shm_hash + 1; i++) {
1507 		LIST_FOREACH(shmm, &shm_dictionary[i], sm_link) {
1508 			error = shm_fill_kinfo_locked(shmm->sm_shmfd,
1509 			    &kif, true);
1510 			if (error == EPERM)
1511 				continue;
1512 			if (error != 0)
1513 				break;
1514 			pack_kinfo(&kif);
1515 			if (req->oldptr != NULL &&
1516 			    kif.kf_structsize + curlen > req->oldlen)
1517 				break;
1518 			error = sbuf_bcat(&sb, &kif, kif.kf_structsize) == 0 ?
1519 			    0 : ENOMEM;
1520 			if (error != 0)
1521 				break;
1522 			curlen += kif.kf_structsize;
1523 		}
1524 	}
1525 	sx_sunlock(&shm_dict_lock);
1526 	error2 = sbuf_finish(&sb);
1527 	sbuf_delete(&sb);
1528 	return (error != 0 ? error : error2);
1529 }
1530 
1531 SYSCTL_PROC(_kern_ipc, OID_AUTO, posix_shm_list,
1532     CTLFLAG_RD | CTLFLAG_MPSAFE | CTLTYPE_OPAQUE,
1533     NULL, 0, sysctl_posix_shm_list, "",
1534     "POSIX SHM list");
1535 
1536 int
1537 kern_shm_open(struct thread *td, const char *path, int flags, mode_t mode,
1538     struct filecaps *caps)
1539 {
1540 
1541 	return (kern_shm_open2(td, path, flags, mode, 0, caps, NULL));
1542 }
1543 
1544 /*
1545  * This version of the shm_open() interface leaves CLOEXEC behavior up to the
1546  * caller, and libc will enforce it for the traditional shm_open() call.  This
1547  * allows other consumers, like memfd_create(), to opt-in for CLOEXEC.  This
1548  * interface also includes a 'name' argument that is currently unused, but could
1549  * potentially be exported later via some interface for debugging purposes.
1550  * From the kernel's perspective, it is optional.  Individual consumers like
1551  * memfd_create() may require it in order to be compatible with other systems
1552  * implementing the same function.
1553  */
1554 int
1555 sys_shm_open2(struct thread *td, struct shm_open2_args *uap)
1556 {
1557 
1558 	return (kern_shm_open2(td, uap->path, uap->flags, uap->mode,
1559 	    uap->shmflags, NULL, uap->name));
1560 }
1561