xref: /freebsd/sys/kern/uipc_shm.c (revision 82d8c609cfb7c6d8a9da8e30efa54240f293359e)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2006, 2011, 2016-2017 Robert N. M. Watson
5  * Copyright 2020 The FreeBSD Foundation
6  * All rights reserved.
7  *
8  * Portions of this software were developed by BAE Systems, the University of
9  * Cambridge Computer Laboratory, and Memorial University under DARPA/AFRL
10  * contract FA8650-15-C-7558 ("CADETS"), as part of the DARPA Transparent
11  * Computing (TC) research program.
12  *
13  * Portions of this software were developed by Konstantin Belousov
14  * under sponsorship from the FreeBSD Foundation.
15  *
16  * Redistribution and use in source and binary forms, with or without
17  * modification, are permitted provided that the following conditions
18  * are met:
19  * 1. Redistributions of source code must retain the above copyright
20  *    notice, this list of conditions and the following disclaimer.
21  * 2. Redistributions in binary form must reproduce the above copyright
22  *    notice, this list of conditions and the following disclaimer in the
23  *    documentation and/or other materials provided with the distribution.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  */
37 
38 /*
39  * Support for shared swap-backed anonymous memory objects via
40  * shm_open(2), shm_rename(2), and shm_unlink(2).
41  * While most of the implementation is here, vm_mmap.c contains
42  * mapping logic changes.
43  *
44  * posixshmcontrol(1) allows users to inspect the state of the memory
45  * objects.  Per-uid swap resource limit controls total amount of
46  * memory that user can consume for anonymous objects, including
47  * shared.
48  */
49 
50 #include <sys/cdefs.h>
51 #include "opt_capsicum.h"
52 #include "opt_ktrace.h"
53 
54 #include <sys/param.h>
55 #include <sys/capsicum.h>
56 #include <sys/conf.h>
57 #include <sys/fcntl.h>
58 #include <sys/file.h>
59 #include <sys/filedesc.h>
60 #include <sys/filio.h>
61 #include <sys/fnv_hash.h>
62 #include <sys/kernel.h>
63 #include <sys/limits.h>
64 #include <sys/uio.h>
65 #include <sys/signal.h>
66 #include <sys/jail.h>
67 #include <sys/ktrace.h>
68 #include <sys/lock.h>
69 #include <sys/malloc.h>
70 #include <sys/mman.h>
71 #include <sys/mutex.h>
72 #include <sys/priv.h>
73 #include <sys/proc.h>
74 #include <sys/refcount.h>
75 #include <sys/resourcevar.h>
76 #include <sys/rwlock.h>
77 #include <sys/sbuf.h>
78 #include <sys/stat.h>
79 #include <sys/syscallsubr.h>
80 #include <sys/sysctl.h>
81 #include <sys/sysproto.h>
82 #include <sys/systm.h>
83 #include <sys/sx.h>
84 #include <sys/time.h>
85 #include <sys/vmmeter.h>
86 #include <sys/vnode.h>
87 #include <sys/unistd.h>
88 #include <sys/user.h>
89 
90 #include <security/audit/audit.h>
91 #include <security/mac/mac_framework.h>
92 
93 #include <vm/vm.h>
94 #include <vm/vm_param.h>
95 #include <vm/pmap.h>
96 #include <vm/vm_extern.h>
97 #include <vm/vm_map.h>
98 #include <vm/vm_kern.h>
99 #include <vm/vm_object.h>
100 #include <vm/vm_page.h>
101 #include <vm/vm_pageout.h>
102 #include <vm/vm_pager.h>
103 #include <vm/swap_pager.h>
104 
105 struct shm_mapping {
106 	char		*sm_path;
107 	Fnv32_t		sm_fnv;
108 	struct shmfd	*sm_shmfd;
109 	LIST_ENTRY(shm_mapping) sm_link;
110 };
111 
112 static MALLOC_DEFINE(M_SHMFD, "shmfd", "shared memory file descriptor");
113 static LIST_HEAD(, shm_mapping) *shm_dictionary;
114 static struct sx shm_dict_lock;
115 static struct mtx shm_timestamp_lock;
116 static u_long shm_hash;
117 static struct unrhdr64 shm_ino_unr;
118 static dev_t shm_dev_ino;
119 
120 #define	SHM_HASH(fnv)	(&shm_dictionary[(fnv) & shm_hash])
121 
122 static void	shm_init(void *arg);
123 static void	shm_insert(char *path, Fnv32_t fnv, struct shmfd *shmfd);
124 static struct shmfd *shm_lookup(char *path, Fnv32_t fnv);
125 static int	shm_remove(char *path, Fnv32_t fnv, struct ucred *ucred);
126 static void	shm_doremove(struct shm_mapping *map);
127 static int	shm_dotruncate_cookie(struct shmfd *shmfd, off_t length,
128     void *rl_cookie);
129 static int	shm_dotruncate_locked(struct shmfd *shmfd, off_t length,
130     void *rl_cookie);
131 static int	shm_copyin_path(struct thread *td, const char *userpath_in,
132     char **path_out);
133 static int	shm_deallocate(struct shmfd *shmfd, off_t *offset,
134     off_t *length, int flags);
135 
136 static fo_rdwr_t	shm_read;
137 static fo_rdwr_t	shm_write;
138 static fo_truncate_t	shm_truncate;
139 static fo_ioctl_t	shm_ioctl;
140 static fo_stat_t	shm_stat;
141 static fo_close_t	shm_close;
142 static fo_chmod_t	shm_chmod;
143 static fo_chown_t	shm_chown;
144 static fo_seek_t	shm_seek;
145 static fo_fill_kinfo_t	shm_fill_kinfo;
146 static fo_mmap_t	shm_mmap;
147 static fo_get_seals_t	shm_get_seals;
148 static fo_add_seals_t	shm_add_seals;
149 static fo_fallocate_t	shm_fallocate;
150 static fo_fspacectl_t	shm_fspacectl;
151 
152 /* File descriptor operations. */
153 const struct fileops shm_ops = {
154 	.fo_read = shm_read,
155 	.fo_write = shm_write,
156 	.fo_truncate = shm_truncate,
157 	.fo_ioctl = shm_ioctl,
158 	.fo_poll = invfo_poll,
159 	.fo_kqfilter = invfo_kqfilter,
160 	.fo_stat = shm_stat,
161 	.fo_close = shm_close,
162 	.fo_chmod = shm_chmod,
163 	.fo_chown = shm_chown,
164 	.fo_sendfile = vn_sendfile,
165 	.fo_seek = shm_seek,
166 	.fo_fill_kinfo = shm_fill_kinfo,
167 	.fo_mmap = shm_mmap,
168 	.fo_get_seals = shm_get_seals,
169 	.fo_add_seals = shm_add_seals,
170 	.fo_fallocate = shm_fallocate,
171 	.fo_fspacectl = shm_fspacectl,
172 	.fo_cmp = file_kcmp_generic,
173 	.fo_flags = DFLAG_PASSABLE | DFLAG_SEEKABLE,
174 };
175 
176 FEATURE(posix_shm, "POSIX shared memory");
177 
178 static SYSCTL_NODE(_vm, OID_AUTO, largepages, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
179     "");
180 
181 static int largepage_reclaim_tries = 1;
182 SYSCTL_INT(_vm_largepages, OID_AUTO, reclaim_tries,
183     CTLFLAG_RWTUN, &largepage_reclaim_tries, 0,
184     "Number of contig reclaims before giving up for default alloc policy");
185 
186 #define	shm_rangelock_unlock(shmfd, cookie)				\
187 	rangelock_unlock(&(shmfd)->shm_rl, (cookie))
188 #define	shm_rangelock_rlock(shmfd, start, end)				\
189 	rangelock_rlock(&(shmfd)->shm_rl, (start), (end))
190 #define	shm_rangelock_tryrlock(shmfd, start, end)			\
191 	rangelock_tryrlock(&(shmfd)->shm_rl, (start), (end))
192 #define	shm_rangelock_wlock(shmfd, start, end)				\
193 	rangelock_wlock(&(shmfd)->shm_rl, (start), (end))
194 
195 static int
uiomove_object_page(vm_object_t obj,size_t len,struct uio * uio)196 uiomove_object_page(vm_object_t obj, size_t len, struct uio *uio)
197 {
198 	vm_page_t m;
199 	vm_pindex_t idx;
200 	size_t tlen;
201 	int error, offset, rv;
202 
203 	idx = OFF_TO_IDX(uio->uio_offset);
204 	offset = uio->uio_offset & PAGE_MASK;
205 	tlen = MIN(PAGE_SIZE - offset, len);
206 
207 	rv = vm_page_grab_valid_unlocked(&m, obj, idx,
208 	    VM_ALLOC_SBUSY | VM_ALLOC_IGN_SBUSY | VM_ALLOC_NOCREAT);
209 	if (rv == VM_PAGER_OK)
210 		goto found;
211 
212 	/*
213 	 * Read I/O without either a corresponding resident page or swap
214 	 * page: use zero_region.  This is intended to avoid instantiating
215 	 * pages on read from a sparse region.
216 	 */
217 	VM_OBJECT_WLOCK(obj);
218 	m = vm_page_lookup(obj, idx);
219 	if (uio->uio_rw == UIO_READ && m == NULL &&
220 	    !vm_pager_has_page(obj, idx, NULL, NULL)) {
221 		VM_OBJECT_WUNLOCK(obj);
222 		return (uiomove(__DECONST(void *, zero_region), tlen, uio));
223 	}
224 
225 	/*
226 	 * Although the tmpfs vnode lock is held here, it is
227 	 * nonetheless safe to sleep waiting for a free page.  The
228 	 * pageout daemon does not need to acquire the tmpfs vnode
229 	 * lock to page out tobj's pages because tobj is a OBJT_SWAP
230 	 * type object.
231 	 */
232 	rv = vm_page_grab_valid(&m, obj, idx,
233 	    VM_ALLOC_NORMAL | VM_ALLOC_SBUSY | VM_ALLOC_IGN_SBUSY);
234 	if (rv != VM_PAGER_OK) {
235 		VM_OBJECT_WUNLOCK(obj);
236 		if (bootverbose) {
237 			printf("uiomove_object: vm_obj %p idx %jd "
238 			    "pager error %d\n", obj, idx, rv);
239 		}
240 		return (rv == VM_PAGER_AGAIN ? ENOSPC : EIO);
241 	}
242 	VM_OBJECT_WUNLOCK(obj);
243 
244 found:
245 	error = uiomove_fromphys(&m, offset, tlen, uio);
246 	if (uio->uio_rw == UIO_WRITE && error == 0)
247 		vm_page_set_dirty(m);
248 	vm_page_activate(m);
249 	vm_page_sunbusy(m);
250 
251 	return (error);
252 }
253 
254 int
uiomove_object(vm_object_t obj,off_t obj_size,struct uio * uio)255 uiomove_object(vm_object_t obj, off_t obj_size, struct uio *uio)
256 {
257 	ssize_t resid;
258 	size_t len;
259 	int error;
260 
261 	error = 0;
262 	while ((resid = uio->uio_resid) > 0) {
263 		if (obj_size <= uio->uio_offset)
264 			break;
265 		len = MIN(obj_size - uio->uio_offset, resid);
266 		if (len == 0)
267 			break;
268 		error = uiomove_object_page(obj, len, uio);
269 		if (error != 0 || resid == uio->uio_resid)
270 			break;
271 	}
272 	return (error);
273 }
274 
275 static u_long count_largepages[MAXPAGESIZES];
276 
277 static int
shm_largepage_phys_populate(vm_object_t object,vm_pindex_t pidx,int fault_type,vm_prot_t max_prot,vm_pindex_t * first,vm_pindex_t * last)278 shm_largepage_phys_populate(vm_object_t object, vm_pindex_t pidx,
279     int fault_type, vm_prot_t max_prot, vm_pindex_t *first, vm_pindex_t *last)
280 {
281 	vm_page_t m __diagused;
282 	int psind;
283 
284 	psind = object->un_pager.phys.data_val;
285 	if (psind == 0 || pidx >= object->size)
286 		return (VM_PAGER_FAIL);
287 	*first = rounddown2(pidx, pagesizes[psind] / PAGE_SIZE);
288 
289 	/*
290 	 * We only busy the first page in the superpage run.  It is
291 	 * useless to busy whole run since we only remove full
292 	 * superpage, and it takes too long to busy e.g. 512 * 512 ==
293 	 * 262144 pages constituing 1G amd64 superage.
294 	 */
295 	m = vm_page_grab(object, *first, VM_ALLOC_NORMAL | VM_ALLOC_NOCREAT);
296 	MPASS(m != NULL);
297 
298 	*last = *first + atop(pagesizes[psind]) - 1;
299 	return (VM_PAGER_OK);
300 }
301 
302 static boolean_t
shm_largepage_phys_haspage(vm_object_t object,vm_pindex_t pindex,int * before,int * after)303 shm_largepage_phys_haspage(vm_object_t object, vm_pindex_t pindex,
304     int *before, int *after)
305 {
306 	int psind;
307 
308 	psind = object->un_pager.phys.data_val;
309 	if (psind == 0 || pindex >= object->size)
310 		return (FALSE);
311 	if (before != NULL) {
312 		*before = pindex - rounddown2(pindex, pagesizes[psind] /
313 		    PAGE_SIZE);
314 	}
315 	if (after != NULL) {
316 		*after = roundup2(pindex, pagesizes[psind] / PAGE_SIZE) -
317 		    pindex;
318 	}
319 	return (TRUE);
320 }
321 
322 static void
shm_largepage_phys_ctor(vm_object_t object,vm_prot_t prot,vm_ooffset_t foff,struct ucred * cred)323 shm_largepage_phys_ctor(vm_object_t object, vm_prot_t prot,
324     vm_ooffset_t foff, struct ucred *cred)
325 {
326 }
327 
328 static void
shm_largepage_phys_dtor(vm_object_t object)329 shm_largepage_phys_dtor(vm_object_t object)
330 {
331 	int psind;
332 
333 	psind = object->un_pager.phys.data_val;
334 	if (psind != 0) {
335 		atomic_subtract_long(&count_largepages[psind],
336 		    object->size / (pagesizes[psind] / PAGE_SIZE));
337 		vm_wire_sub(object->size);
338 	} else {
339 		KASSERT(object->size == 0,
340 		    ("largepage phys obj %p not initialized bit size %#jx > 0",
341 		    object, (uintmax_t)object->size));
342 	}
343 }
344 
345 static const struct phys_pager_ops shm_largepage_phys_ops = {
346 	.phys_pg_populate =	shm_largepage_phys_populate,
347 	.phys_pg_haspage =	shm_largepage_phys_haspage,
348 	.phys_pg_ctor =		shm_largepage_phys_ctor,
349 	.phys_pg_dtor =		shm_largepage_phys_dtor,
350 };
351 
352 bool
shm_largepage(struct shmfd * shmfd)353 shm_largepage(struct shmfd *shmfd)
354 {
355 	return (shmfd->shm_object->type == OBJT_PHYS);
356 }
357 
358 static void
shm_pager_freespace(vm_object_t obj,vm_pindex_t start,vm_size_t size)359 shm_pager_freespace(vm_object_t obj, vm_pindex_t start, vm_size_t size)
360 {
361 	struct shmfd *shm;
362 	vm_size_t c;
363 
364 	swap_pager_freespace(obj, start, size, &c);
365 	if (c == 0)
366 		return;
367 
368 	shm = obj->un_pager.swp.swp_priv;
369 	if (shm == NULL)
370 		return;
371 	KASSERT(shm->shm_pages >= c,
372 	    ("shm %p pages %jd free %jd", shm,
373 	    (uintmax_t)shm->shm_pages, (uintmax_t)c));
374 	shm->shm_pages -= c;
375 }
376 
377 static void
shm_page_inserted(vm_object_t obj,vm_page_t m)378 shm_page_inserted(vm_object_t obj, vm_page_t m)
379 {
380 	struct shmfd *shm;
381 
382 	shm = obj->un_pager.swp.swp_priv;
383 	if (shm == NULL)
384 		return;
385 	if (!vm_pager_has_page(obj, m->pindex, NULL, NULL))
386 		shm->shm_pages += 1;
387 }
388 
389 static void
shm_page_removed(vm_object_t obj,vm_page_t m)390 shm_page_removed(vm_object_t obj, vm_page_t m)
391 {
392 	struct shmfd *shm;
393 
394 	shm = obj->un_pager.swp.swp_priv;
395 	if (shm == NULL)
396 		return;
397 	if (!vm_pager_has_page(obj, m->pindex, NULL, NULL)) {
398 		KASSERT(shm->shm_pages >= 1,
399 		    ("shm %p pages %jd free 1", shm,
400 		    (uintmax_t)shm->shm_pages));
401 		shm->shm_pages -= 1;
402 	}
403 }
404 
405 static struct pagerops shm_swap_pager_ops = {
406 	.pgo_kvme_type = KVME_TYPE_SWAP,
407 	.pgo_freespace = shm_pager_freespace,
408 	.pgo_page_inserted = shm_page_inserted,
409 	.pgo_page_removed = shm_page_removed,
410 };
411 static int shmfd_pager_type = -1;
412 
413 static int
shm_seek(struct file * fp,off_t offset,int whence,struct thread * td)414 shm_seek(struct file *fp, off_t offset, int whence, struct thread *td)
415 {
416 	struct shmfd *shmfd;
417 	off_t foffset;
418 	int error;
419 
420 	shmfd = fp->f_data;
421 	foffset = foffset_lock(fp, 0);
422 	error = 0;
423 	switch (whence) {
424 	case L_INCR:
425 		if (foffset < 0 ||
426 		    (offset > 0 && foffset > OFF_MAX - offset)) {
427 			error = EOVERFLOW;
428 			break;
429 		}
430 		offset += foffset;
431 		break;
432 	case L_XTND:
433 		if (offset > 0 && shmfd->shm_size > OFF_MAX - offset) {
434 			error = EOVERFLOW;
435 			break;
436 		}
437 		offset += shmfd->shm_size;
438 		break;
439 	case L_SET:
440 		break;
441 	default:
442 		error = EINVAL;
443 	}
444 	if (error == 0) {
445 		if (offset < 0 || offset > shmfd->shm_size)
446 			error = EINVAL;
447 		else
448 			td->td_uretoff.tdu_off = offset;
449 	}
450 	foffset_unlock(fp, offset, error != 0 ? FOF_NOUPDATE : 0);
451 	return (error);
452 }
453 
454 static int
shm_read(struct file * fp,struct uio * uio,struct ucred * active_cred,int flags,struct thread * td)455 shm_read(struct file *fp, struct uio *uio, struct ucred *active_cred,
456     int flags, struct thread *td)
457 {
458 	struct shmfd *shmfd;
459 	void *rl_cookie;
460 	int error;
461 
462 	shmfd = fp->f_data;
463 #ifdef MAC
464 	error = mac_posixshm_check_read(active_cred, fp->f_cred, shmfd);
465 	if (error)
466 		return (error);
467 #endif
468 	foffset_lock_uio(fp, uio, flags);
469 	rl_cookie = shm_rangelock_rlock(shmfd, uio->uio_offset,
470 	    uio->uio_offset + uio->uio_resid);
471 	error = uiomove_object(shmfd->shm_object, shmfd->shm_size, uio);
472 	shm_rangelock_unlock(shmfd, rl_cookie);
473 	foffset_unlock_uio(fp, uio, flags);
474 	return (error);
475 }
476 
477 static int
shm_write(struct file * fp,struct uio * uio,struct ucred * active_cred,int flags,struct thread * td)478 shm_write(struct file *fp, struct uio *uio, struct ucred *active_cred,
479     int flags, struct thread *td)
480 {
481 	struct shmfd *shmfd;
482 	void *rl_cookie;
483 	int error;
484 	off_t newsize;
485 
486 	KASSERT((flags & FOF_OFFSET) == 0 || uio->uio_offset >= 0,
487 	    ("%s: negative offset", __func__));
488 
489 	shmfd = fp->f_data;
490 #ifdef MAC
491 	error = mac_posixshm_check_write(active_cred, fp->f_cred, shmfd);
492 	if (error)
493 		return (error);
494 #endif
495 	if (shm_largepage(shmfd) && shmfd->shm_lp_psind == 0)
496 		return (EINVAL);
497 	foffset_lock_uio(fp, uio, flags);
498 	if (uio->uio_resid > OFF_MAX - uio->uio_offset) {
499 		/*
500 		 * Overflow is only an error if we're supposed to expand on
501 		 * write.  Otherwise, we'll just truncate the write to the
502 		 * size of the file, which can only grow up to OFF_MAX.
503 		 */
504 		if ((shmfd->shm_flags & SHM_GROW_ON_WRITE) != 0) {
505 			foffset_unlock_uio(fp, uio, flags);
506 			return (EFBIG);
507 		}
508 
509 		newsize = atomic_load_64(&shmfd->shm_size);
510 	} else {
511 		newsize = uio->uio_offset + uio->uio_resid;
512 	}
513 	if ((flags & FOF_OFFSET) == 0)
514 		rl_cookie = shm_rangelock_wlock(shmfd, 0, OFF_MAX);
515 	else
516 		rl_cookie = shm_rangelock_wlock(shmfd, uio->uio_offset,
517 		    MAX(newsize, uio->uio_offset));
518 	if ((shmfd->shm_seals & F_SEAL_WRITE) != 0) {
519 		error = EPERM;
520 	} else {
521 		error = 0;
522 		if ((shmfd->shm_flags & SHM_GROW_ON_WRITE) != 0 &&
523 		    newsize > shmfd->shm_size) {
524 			error = shm_dotruncate_cookie(shmfd, newsize,
525 			    rl_cookie);
526 		}
527 		if (error == 0)
528 			error = uiomove_object(shmfd->shm_object,
529 			    shmfd->shm_size, uio);
530 	}
531 	shm_rangelock_unlock(shmfd, rl_cookie);
532 	foffset_unlock_uio(fp, uio, flags);
533 	return (error);
534 }
535 
536 static int
shm_truncate(struct file * fp,off_t length,struct ucred * active_cred,struct thread * td)537 shm_truncate(struct file *fp, off_t length, struct ucred *active_cred,
538     struct thread *td)
539 {
540 	struct shmfd *shmfd;
541 #ifdef MAC
542 	int error;
543 #endif
544 
545 	shmfd = fp->f_data;
546 #ifdef MAC
547 	error = mac_posixshm_check_truncate(active_cred, fp->f_cred, shmfd);
548 	if (error)
549 		return (error);
550 #endif
551 	return (shm_dotruncate(shmfd, length));
552 }
553 
554 int
shm_ioctl(struct file * fp,u_long com,void * data,struct ucred * active_cred,struct thread * td)555 shm_ioctl(struct file *fp, u_long com, void *data, struct ucred *active_cred,
556     struct thread *td)
557 {
558 	struct shmfd *shmfd;
559 	struct shm_largepage_conf *conf;
560 	void *rl_cookie;
561 
562 	shmfd = fp->f_data;
563 	switch (com) {
564 	case FIONBIO:
565 	case FIOASYNC:
566 		/*
567 		 * Allow fcntl(fd, F_SETFL, O_NONBLOCK) to work,
568 		 * just like it would on an unlinked regular file
569 		 */
570 		return (0);
571 	case FIOSSHMLPGCNF:
572 		if (!shm_largepage(shmfd))
573 			return (ENOTTY);
574 		conf = data;
575 		if (shmfd->shm_lp_psind != 0 &&
576 		    conf->psind != shmfd->shm_lp_psind)
577 			return (EINVAL);
578 		if (conf->psind <= 0 || conf->psind >= MAXPAGESIZES ||
579 		    pagesizes[conf->psind] == 0)
580 			return (EINVAL);
581 		if (conf->alloc_policy != SHM_LARGEPAGE_ALLOC_DEFAULT &&
582 		    conf->alloc_policy != SHM_LARGEPAGE_ALLOC_NOWAIT &&
583 		    conf->alloc_policy != SHM_LARGEPAGE_ALLOC_HARD)
584 			return (EINVAL);
585 
586 		rl_cookie = shm_rangelock_wlock(shmfd, 0, OFF_MAX);
587 		shmfd->shm_lp_psind = conf->psind;
588 		shmfd->shm_lp_alloc_policy = conf->alloc_policy;
589 		shmfd->shm_object->un_pager.phys.data_val = conf->psind;
590 		shm_rangelock_unlock(shmfd, rl_cookie);
591 		return (0);
592 	case FIOGSHMLPGCNF:
593 		if (!shm_largepage(shmfd))
594 			return (ENOTTY);
595 		conf = data;
596 		rl_cookie = shm_rangelock_rlock(shmfd, 0, OFF_MAX);
597 		conf->psind = shmfd->shm_lp_psind;
598 		conf->alloc_policy = shmfd->shm_lp_alloc_policy;
599 		shm_rangelock_unlock(shmfd, rl_cookie);
600 		return (0);
601 	default:
602 		return (ENOTTY);
603 	}
604 }
605 
606 static int
shm_stat(struct file * fp,struct stat * sb,struct ucred * active_cred)607 shm_stat(struct file *fp, struct stat *sb, struct ucred *active_cred)
608 {
609 	struct shmfd *shmfd;
610 #ifdef MAC
611 	int error;
612 #endif
613 
614 	shmfd = fp->f_data;
615 
616 #ifdef MAC
617 	error = mac_posixshm_check_stat(active_cred, fp->f_cred, shmfd);
618 	if (error)
619 		return (error);
620 #endif
621 
622 	/*
623 	 * Attempt to return sanish values for fstat() on a memory file
624 	 * descriptor.
625 	 */
626 	bzero(sb, sizeof(*sb));
627 	sb->st_blksize = PAGE_SIZE;
628 	sb->st_size = shmfd->shm_size;
629 	mtx_lock(&shm_timestamp_lock);
630 	sb->st_atim = shmfd->shm_atime;
631 	sb->st_ctim = shmfd->shm_ctime;
632 	sb->st_mtim = shmfd->shm_mtime;
633 	sb->st_birthtim = shmfd->shm_birthtime;
634 	sb->st_mode = S_IFREG | shmfd->shm_mode;		/* XXX */
635 	sb->st_uid = shmfd->shm_uid;
636 	sb->st_gid = shmfd->shm_gid;
637 	mtx_unlock(&shm_timestamp_lock);
638 	sb->st_dev = shm_dev_ino;
639 	sb->st_ino = shmfd->shm_ino;
640 	sb->st_nlink = shmfd->shm_object->ref_count;
641 	if (shm_largepage(shmfd)) {
642 		sb->st_blocks = shmfd->shm_object->size /
643 		    (pagesizes[shmfd->shm_lp_psind] >> PAGE_SHIFT);
644 	} else {
645 		sb->st_blocks = shmfd->shm_pages;
646 	}
647 
648 	return (0);
649 }
650 
651 static int
shm_close(struct file * fp,struct thread * td)652 shm_close(struct file *fp, struct thread *td)
653 {
654 	struct shmfd *shmfd;
655 
656 	shmfd = fp->f_data;
657 	fp->f_data = NULL;
658 	shm_drop(shmfd);
659 
660 	return (0);
661 }
662 
663 static int
shm_copyin_path(struct thread * td,const char * userpath_in,char ** path_out)664 shm_copyin_path(struct thread *td, const char *userpath_in, char **path_out) {
665 	int error;
666 	char *path;
667 	const char *pr_path;
668 	size_t pr_pathlen;
669 
670 	path = malloc(MAXPATHLEN, M_SHMFD, M_WAITOK);
671 	pr_path = td->td_ucred->cr_prison->pr_path;
672 
673 	/* Construct a full pathname for jailed callers. */
674 	pr_pathlen = strcmp(pr_path, "/") ==
675 	    0 ? 0 : strlcpy(path, pr_path, MAXPATHLEN);
676 	error = copyinstr(userpath_in, path + pr_pathlen,
677 	    MAXPATHLEN - pr_pathlen, NULL);
678 	if (error != 0)
679 		goto out;
680 
681 #ifdef KTRACE
682 	if (KTRPOINT(curthread, KTR_NAMEI))
683 		ktrnamei(path);
684 #endif
685 
686 	/* Require paths to start with a '/' character. */
687 	if (path[pr_pathlen] != '/') {
688 		error = EINVAL;
689 		goto out;
690 	}
691 
692 	*path_out = path;
693 
694 out:
695 	if (error != 0)
696 		free(path, M_SHMFD);
697 
698 	return (error);
699 }
700 
701 static int
shm_partial_page_invalidate(vm_object_t object,vm_pindex_t idx,int base,int end)702 shm_partial_page_invalidate(vm_object_t object, vm_pindex_t idx, int base,
703     int end)
704 {
705 	int error;
706 
707 	error = vm_page_grab_zero_partial(object, idx, base, end);
708 	if (error == EIO)
709 		VM_OBJECT_WUNLOCK(object);
710 	return (error);
711 }
712 
713 static int
shm_dotruncate_locked(struct shmfd * shmfd,off_t length,void * rl_cookie)714 shm_dotruncate_locked(struct shmfd *shmfd, off_t length, void *rl_cookie)
715 {
716 	vm_object_t object;
717 	vm_pindex_t nobjsize;
718 	vm_ooffset_t delta;
719 	int base, error;
720 
721 	KASSERT(length >= 0, ("shm_dotruncate: length < 0"));
722 	object = shmfd->shm_object;
723 	VM_OBJECT_ASSERT_WLOCKED(object);
724 	rangelock_cookie_assert(rl_cookie, RA_WLOCKED);
725 	if (length == shmfd->shm_size)
726 		return (0);
727 	nobjsize = OFF_TO_IDX(length + PAGE_MASK);
728 
729 	/* Are we shrinking?  If so, trim the end. */
730 	if (length < shmfd->shm_size) {
731 		if ((shmfd->shm_seals & F_SEAL_SHRINK) != 0)
732 			return (EPERM);
733 
734 		/*
735 		 * Disallow any requests to shrink the size if this
736 		 * object is mapped into the kernel.
737 		 */
738 		if (shmfd->shm_kmappings > 0)
739 			return (EBUSY);
740 
741 		/*
742 		 * Zero the truncated part of the last page.
743 		 */
744 		base = length & PAGE_MASK;
745 		if (base != 0) {
746 			error = shm_partial_page_invalidate(object,
747 			    OFF_TO_IDX(length), base, PAGE_SIZE);
748 			if (error)
749 				return (error);
750 		}
751 		delta = IDX_TO_OFF(object->size - nobjsize);
752 
753 		if (nobjsize < object->size)
754 			vm_object_page_remove(object, nobjsize, object->size,
755 			    0);
756 
757 		/* Free the swap accounted for shm */
758 		swap_release_by_cred(delta, object->cred);
759 		object->charge -= delta;
760 	} else {
761 		if ((shmfd->shm_seals & F_SEAL_GROW) != 0)
762 			return (EPERM);
763 
764 		/* Try to reserve additional swap space. */
765 		delta = IDX_TO_OFF(nobjsize - object->size);
766 		if (!swap_reserve_by_cred(delta, object->cred))
767 			return (ENOMEM);
768 		object->charge += delta;
769 	}
770 	shmfd->shm_size = length;
771 	mtx_lock(&shm_timestamp_lock);
772 	vfs_timestamp(&shmfd->shm_ctime);
773 	shmfd->shm_mtime = shmfd->shm_ctime;
774 	mtx_unlock(&shm_timestamp_lock);
775 	object->size = nobjsize;
776 	return (0);
777 }
778 
779 static int
shm_dotruncate_largepage(struct shmfd * shmfd,off_t length,void * rl_cookie)780 shm_dotruncate_largepage(struct shmfd *shmfd, off_t length, void *rl_cookie)
781 {
782 	vm_object_t object;
783 	vm_page_t m;
784 	vm_pindex_t newobjsz;
785 	vm_pindex_t oldobjsz __unused;
786 	int aflags, error, i, psind, try;
787 
788 	KASSERT(length >= 0, ("shm_dotruncate: length < 0"));
789 	object = shmfd->shm_object;
790 	VM_OBJECT_ASSERT_WLOCKED(object);
791 	rangelock_cookie_assert(rl_cookie, RA_WLOCKED);
792 
793 	oldobjsz = object->size;
794 	newobjsz = OFF_TO_IDX(length);
795 	if (length == shmfd->shm_size)
796 		return (0);
797 	psind = shmfd->shm_lp_psind;
798 	if (psind == 0 && length != 0)
799 		return (EINVAL);
800 	if ((length & (pagesizes[psind] - 1)) != 0)
801 		return (EINVAL);
802 
803 	if (length < shmfd->shm_size) {
804 		if ((shmfd->shm_seals & F_SEAL_SHRINK) != 0)
805 			return (EPERM);
806 		if (shmfd->shm_kmappings > 0)
807 			return (EBUSY);
808 		return (ENOTSUP);	/* Pages are unmanaged. */
809 #if 0
810 		vm_object_page_remove(object, newobjsz, oldobjsz, 0);
811 		object->size = newobjsz;
812 		shmfd->shm_size = length;
813 		return (0);
814 #endif
815 	}
816 
817 	if ((shmfd->shm_seals & F_SEAL_GROW) != 0)
818 		return (EPERM);
819 
820 	aflags = VM_ALLOC_NORMAL | VM_ALLOC_ZERO;
821 	if (shmfd->shm_lp_alloc_policy == SHM_LARGEPAGE_ALLOC_NOWAIT)
822 		aflags |= VM_ALLOC_WAITFAIL;
823 	try = 0;
824 
825 	/*
826 	 * Extend shmfd and object, keeping all already fully
827 	 * allocated large pages intact even on error, because dropped
828 	 * object lock might allowed mapping of them.
829 	 */
830 	while (object->size < newobjsz) {
831 		m = vm_page_alloc_contig(object, object->size, aflags,
832 		    pagesizes[psind] / PAGE_SIZE, 0, ~0,
833 		    pagesizes[psind], 0,
834 		    VM_MEMATTR_DEFAULT);
835 		if (m == NULL) {
836 			VM_OBJECT_WUNLOCK(object);
837 			if (shmfd->shm_lp_alloc_policy ==
838 			    SHM_LARGEPAGE_ALLOC_NOWAIT ||
839 			    (shmfd->shm_lp_alloc_policy ==
840 			    SHM_LARGEPAGE_ALLOC_DEFAULT &&
841 			    try >= largepage_reclaim_tries)) {
842 				VM_OBJECT_WLOCK(object);
843 				return (ENOMEM);
844 			}
845 			error = vm_page_reclaim_contig(aflags,
846 			    pagesizes[psind] / PAGE_SIZE, 0, ~0,
847 			    pagesizes[psind], 0);
848 			if (error == ENOMEM)
849 				error = vm_wait_intr(object);
850 			if (error != 0) {
851 				VM_OBJECT_WLOCK(object);
852 				return (error);
853 			}
854 			try++;
855 			VM_OBJECT_WLOCK(object);
856 			continue;
857 		}
858 		try = 0;
859 		for (i = 0; i < pagesizes[psind] / PAGE_SIZE; i++) {
860 			if ((m[i].flags & PG_ZERO) == 0)
861 				pmap_zero_page(&m[i]);
862 			vm_page_valid(&m[i]);
863 			vm_page_xunbusy(&m[i]);
864 		}
865 		object->size += OFF_TO_IDX(pagesizes[psind]);
866 		shmfd->shm_size += pagesizes[psind];
867 		atomic_add_long(&count_largepages[psind], 1);
868 		vm_wire_add(atop(pagesizes[psind]));
869 	}
870 	return (0);
871 }
872 
873 static int
shm_dotruncate_cookie(struct shmfd * shmfd,off_t length,void * rl_cookie)874 shm_dotruncate_cookie(struct shmfd *shmfd, off_t length, void *rl_cookie)
875 {
876 	int error;
877 
878 	VM_OBJECT_WLOCK(shmfd->shm_object);
879 	error = shm_largepage(shmfd) ? shm_dotruncate_largepage(shmfd,
880 	    length, rl_cookie) : shm_dotruncate_locked(shmfd, length,
881 	    rl_cookie);
882 	VM_OBJECT_WUNLOCK(shmfd->shm_object);
883 	return (error);
884 }
885 
886 int
shm_dotruncate(struct shmfd * shmfd,off_t length)887 shm_dotruncate(struct shmfd *shmfd, off_t length)
888 {
889 	void *rl_cookie;
890 	int error;
891 
892 	rl_cookie = shm_rangelock_wlock(shmfd, 0, OFF_MAX);
893 	error = shm_dotruncate_cookie(shmfd, length, rl_cookie);
894 	shm_rangelock_unlock(shmfd, rl_cookie);
895 	return (error);
896 }
897 
898 /*
899  * shmfd object management including creation and reference counting
900  * routines.
901  */
902 struct shmfd *
shm_alloc(struct ucred * ucred,mode_t mode,bool largepage)903 shm_alloc(struct ucred *ucred, mode_t mode, bool largepage)
904 {
905 	struct shmfd *shmfd;
906 	vm_object_t obj;
907 
908 	if (largepage) {
909 		obj = phys_pager_allocate(NULL, &shm_largepage_phys_ops,
910 		    NULL, 0, VM_PROT_DEFAULT, 0, ucred);
911 	} else {
912 		obj = vm_pager_allocate(shmfd_pager_type, NULL, 0,
913 		    VM_PROT_DEFAULT, 0, ucred);
914 	}
915 	if (obj == NULL) {
916 		/*
917 		 * swap reservation limits can cause object allocation
918 		 * to fail.
919 		 */
920 		return (NULL);
921 	}
922 
923 	shmfd = malloc(sizeof(*shmfd), M_SHMFD, M_WAITOK | M_ZERO);
924 	shmfd->shm_uid = ucred->cr_uid;
925 	shmfd->shm_gid = ucred->cr_gid;
926 	shmfd->shm_mode = mode;
927 	if (largepage) {
928 		obj->un_pager.phys.phys_priv = shmfd;
929 		shmfd->shm_lp_alloc_policy = SHM_LARGEPAGE_ALLOC_DEFAULT;
930 	} else {
931 		obj->un_pager.swp.swp_priv = shmfd;
932 	}
933 
934 	VM_OBJECT_WLOCK(obj);
935 	vm_object_set_flag(obj, OBJ_POSIXSHM);
936 	VM_OBJECT_WUNLOCK(obj);
937 	shmfd->shm_object = obj;
938 	vfs_timestamp(&shmfd->shm_birthtime);
939 	shmfd->shm_atime = shmfd->shm_mtime = shmfd->shm_ctime =
940 	    shmfd->shm_birthtime;
941 	shmfd->shm_ino = alloc_unr64(&shm_ino_unr);
942 	refcount_init(&shmfd->shm_refs, 1);
943 	mtx_init(&shmfd->shm_mtx, "shmrl", NULL, MTX_DEF);
944 	rangelock_init(&shmfd->shm_rl);
945 #ifdef MAC
946 	mac_posixshm_init(shmfd);
947 	mac_posixshm_create(ucred, shmfd);
948 #endif
949 
950 	return (shmfd);
951 }
952 
953 struct shmfd *
shm_hold(struct shmfd * shmfd)954 shm_hold(struct shmfd *shmfd)
955 {
956 
957 	refcount_acquire(&shmfd->shm_refs);
958 	return (shmfd);
959 }
960 
961 void
shm_drop(struct shmfd * shmfd)962 shm_drop(struct shmfd *shmfd)
963 {
964 	vm_object_t obj;
965 
966 	if (refcount_release(&shmfd->shm_refs)) {
967 #ifdef MAC
968 		mac_posixshm_destroy(shmfd);
969 #endif
970 		rangelock_destroy(&shmfd->shm_rl);
971 		mtx_destroy(&shmfd->shm_mtx);
972 		obj = shmfd->shm_object;
973 		VM_OBJECT_WLOCK(obj);
974 		if (shm_largepage(shmfd))
975 			obj->un_pager.phys.phys_priv = NULL;
976 		else
977 			obj->un_pager.swp.swp_priv = NULL;
978 		VM_OBJECT_WUNLOCK(obj);
979 		vm_object_deallocate(obj);
980 		free(shmfd, M_SHMFD);
981 	}
982 }
983 
984 /*
985  * Determine if the credentials have sufficient permissions for a
986  * specified combination of FREAD and FWRITE.
987  */
988 int
shm_access(struct shmfd * shmfd,struct ucred * ucred,int flags)989 shm_access(struct shmfd *shmfd, struct ucred *ucred, int flags)
990 {
991 	accmode_t accmode;
992 	int error;
993 
994 	accmode = 0;
995 	if (flags & FREAD)
996 		accmode |= VREAD;
997 	if (flags & FWRITE)
998 		accmode |= VWRITE;
999 	mtx_lock(&shm_timestamp_lock);
1000 	error = vaccess(VREG, shmfd->shm_mode, shmfd->shm_uid, shmfd->shm_gid,
1001 	    accmode, ucred);
1002 	mtx_unlock(&shm_timestamp_lock);
1003 	return (error);
1004 }
1005 
1006 static void
shm_init(void * arg)1007 shm_init(void *arg)
1008 {
1009 	char name[32];
1010 	int i;
1011 
1012 	mtx_init(&shm_timestamp_lock, "shm timestamps", NULL, MTX_DEF);
1013 	sx_init(&shm_dict_lock, "shm dictionary");
1014 	shm_dictionary = hashinit(1024, M_SHMFD, &shm_hash);
1015 	new_unrhdr64(&shm_ino_unr, 1);
1016 	shm_dev_ino = devfs_alloc_cdp_inode();
1017 	KASSERT(shm_dev_ino > 0, ("shm dev inode not initialized"));
1018 	shmfd_pager_type = vm_pager_alloc_dyn_type(&shm_swap_pager_ops,
1019 	    OBJT_SWAP);
1020 	MPASS(shmfd_pager_type != -1);
1021 
1022 	for (i = 1; i < MAXPAGESIZES; i++) {
1023 		if (pagesizes[i] == 0)
1024 			break;
1025 #define	M	(1024 * 1024)
1026 #define	G	(1024 * M)
1027 		if (pagesizes[i] >= G)
1028 			snprintf(name, sizeof(name), "%luG", pagesizes[i] / G);
1029 		else if (pagesizes[i] >= M)
1030 			snprintf(name, sizeof(name), "%luM", pagesizes[i] / M);
1031 		else
1032 			snprintf(name, sizeof(name), "%lu", pagesizes[i]);
1033 #undef G
1034 #undef M
1035 		SYSCTL_ADD_ULONG(NULL, SYSCTL_STATIC_CHILDREN(_vm_largepages),
1036 		    OID_AUTO, name, CTLFLAG_RD, &count_largepages[i],
1037 		    "number of non-transient largepages allocated");
1038 	}
1039 }
1040 SYSINIT(shm_init, SI_SUB_SYSV_SHM, SI_ORDER_ANY, shm_init, NULL);
1041 
1042 /*
1043  * Remove all shared memory objects that belong to a prison.
1044  */
1045 void
shm_remove_prison(struct prison * pr)1046 shm_remove_prison(struct prison *pr)
1047 {
1048 	struct shm_mapping *shmm, *tshmm;
1049 	u_long i;
1050 
1051 	sx_xlock(&shm_dict_lock);
1052 	for (i = 0; i < shm_hash + 1; i++) {
1053 		LIST_FOREACH_SAFE(shmm, &shm_dictionary[i], sm_link, tshmm) {
1054 			if (shmm->sm_shmfd->shm_object->cred &&
1055 			    shmm->sm_shmfd->shm_object->cred->cr_prison == pr)
1056 				shm_doremove(shmm);
1057 		}
1058 	}
1059 	sx_xunlock(&shm_dict_lock);
1060 }
1061 
1062 /*
1063  * Dictionary management.  We maintain an in-kernel dictionary to map
1064  * paths to shmfd objects.  We use the FNV hash on the path to store
1065  * the mappings in a hash table.
1066  */
1067 static struct shmfd *
shm_lookup(char * path,Fnv32_t fnv)1068 shm_lookup(char *path, Fnv32_t fnv)
1069 {
1070 	struct shm_mapping *map;
1071 
1072 	LIST_FOREACH(map, SHM_HASH(fnv), sm_link) {
1073 		if (map->sm_fnv != fnv)
1074 			continue;
1075 		if (strcmp(map->sm_path, path) == 0)
1076 			return (map->sm_shmfd);
1077 	}
1078 
1079 	return (NULL);
1080 }
1081 
1082 static void
shm_insert(char * path,Fnv32_t fnv,struct shmfd * shmfd)1083 shm_insert(char *path, Fnv32_t fnv, struct shmfd *shmfd)
1084 {
1085 	struct shm_mapping *map;
1086 
1087 	map = malloc(sizeof(struct shm_mapping), M_SHMFD, M_WAITOK);
1088 	map->sm_path = path;
1089 	map->sm_fnv = fnv;
1090 	map->sm_shmfd = shm_hold(shmfd);
1091 	shmfd->shm_path = path;
1092 	LIST_INSERT_HEAD(SHM_HASH(fnv), map, sm_link);
1093 }
1094 
1095 static int
shm_remove(char * path,Fnv32_t fnv,struct ucred * ucred)1096 shm_remove(char *path, Fnv32_t fnv, struct ucred *ucred)
1097 {
1098 	struct shm_mapping *map;
1099 	int error;
1100 
1101 	LIST_FOREACH(map, SHM_HASH(fnv), sm_link) {
1102 		if (map->sm_fnv != fnv)
1103 			continue;
1104 		if (strcmp(map->sm_path, path) == 0) {
1105 #ifdef MAC
1106 			error = mac_posixshm_check_unlink(ucred, map->sm_shmfd);
1107 			if (error)
1108 				return (error);
1109 #endif
1110 			error = shm_access(map->sm_shmfd, ucred,
1111 			    FREAD | FWRITE);
1112 			if (error)
1113 				return (error);
1114 			shm_doremove(map);
1115 			return (0);
1116 		}
1117 	}
1118 
1119 	return (ENOENT);
1120 }
1121 
1122 static void
shm_doremove(struct shm_mapping * map)1123 shm_doremove(struct shm_mapping *map)
1124 {
1125 	map->sm_shmfd->shm_path = NULL;
1126 	LIST_REMOVE(map, sm_link);
1127 	shm_drop(map->sm_shmfd);
1128 	free(map->sm_path, M_SHMFD);
1129 	free(map, M_SHMFD);
1130 }
1131 
1132 int
kern_shm_open2(struct thread * td,const char * userpath,int flags,mode_t mode,int shmflags,struct filecaps * fcaps,const char * name __unused)1133 kern_shm_open2(struct thread *td, const char *userpath, int flags, mode_t mode,
1134     int shmflags, struct filecaps *fcaps, const char *name __unused)
1135 {
1136 	struct pwddesc *pdp;
1137 	struct shmfd *shmfd;
1138 	struct file *fp;
1139 	char *path;
1140 	void *rl_cookie;
1141 	Fnv32_t fnv;
1142 	mode_t cmode;
1143 	int error, fd, initial_seals;
1144 	bool largepage;
1145 
1146 	if ((shmflags & ~(SHM_ALLOW_SEALING | SHM_GROW_ON_WRITE |
1147 	    SHM_LARGEPAGE)) != 0)
1148 		return (EINVAL);
1149 
1150 	initial_seals = F_SEAL_SEAL;
1151 	if ((shmflags & SHM_ALLOW_SEALING) != 0)
1152 		initial_seals &= ~F_SEAL_SEAL;
1153 
1154 	AUDIT_ARG_FFLAGS(flags);
1155 	AUDIT_ARG_MODE(mode);
1156 
1157 	if ((flags & O_ACCMODE) != O_RDONLY && (flags & O_ACCMODE) != O_RDWR)
1158 		return (EINVAL);
1159 
1160 	if ((flags & ~(O_ACCMODE | O_CREAT | O_EXCL | O_TRUNC | O_CLOEXEC)) != 0)
1161 		return (EINVAL);
1162 
1163 	largepage = (shmflags & SHM_LARGEPAGE) != 0;
1164 	if (largepage && !PMAP_HAS_LARGEPAGES)
1165 		return (ENOTTY);
1166 
1167 	/*
1168 	 * Currently only F_SEAL_SEAL may be set when creating or opening shmfd.
1169 	 * If the decision is made later to allow additional seals, care must be
1170 	 * taken below to ensure that the seals are properly set if the shmfd
1171 	 * already existed -- this currently assumes that only F_SEAL_SEAL can
1172 	 * be set and doesn't take further precautions to ensure the validity of
1173 	 * the seals being added with respect to current mappings.
1174 	 */
1175 	if ((initial_seals & ~F_SEAL_SEAL) != 0)
1176 		return (EINVAL);
1177 
1178 	if (userpath != SHM_ANON) {
1179 		error = shm_copyin_path(td, userpath, &path);
1180 		if (error != 0)
1181 			return (error);
1182 
1183 #ifdef CAPABILITY_MODE
1184 		/*
1185 		 * shm_open(2) is only allowed for anonymous objects.
1186 		 */
1187 		if (CAP_TRACING(td))
1188 			ktrcapfail(CAPFAIL_NAMEI, path);
1189 		if (IN_CAPABILITY_MODE(td)) {
1190 			error = ECAPMODE;
1191 			goto outnofp;
1192 		}
1193 #endif
1194 
1195 		AUDIT_ARG_UPATH1_CANON(path);
1196 	} else {
1197 		path = NULL;
1198 	}
1199 
1200 	pdp = td->td_proc->p_pd;
1201 	cmode = (mode & ~pdp->pd_cmask) & ACCESSPERMS;
1202 
1203 	/*
1204 	 * shm_open(2) created shm should always have O_CLOEXEC set, as mandated
1205 	 * by POSIX.  We allow it to be unset here so that an in-kernel
1206 	 * interface may be written as a thin layer around shm, optionally not
1207 	 * setting CLOEXEC.  For shm_open(2), O_CLOEXEC is set unconditionally
1208 	 * in sys_shm_open() to keep this implementation compliant.
1209 	 */
1210 	error = falloc_caps(td, &fp, &fd, flags & O_CLOEXEC, fcaps);
1211 	if (error != 0)
1212 		goto outnofp;
1213 
1214 	/* A SHM_ANON path pointer creates an anonymous object. */
1215 	if (userpath == SHM_ANON) {
1216 		/* A read-only anonymous object is pointless. */
1217 		if ((flags & O_ACCMODE) == O_RDONLY) {
1218 			error = EINVAL;
1219 			goto out;
1220 		}
1221 		shmfd = shm_alloc(td->td_ucred, cmode, largepage);
1222 		if (shmfd == NULL) {
1223 			error = ENOMEM;
1224 			goto out;
1225 		}
1226 		shmfd->shm_seals = initial_seals;
1227 		shmfd->shm_flags = shmflags;
1228 	} else {
1229 		fnv = fnv_32_str(path, FNV1_32_INIT);
1230 		sx_xlock(&shm_dict_lock);
1231 		shmfd = shm_lookup(path, fnv);
1232 		if (shmfd == NULL) {
1233 			/* Object does not yet exist, create it if requested. */
1234 			if (flags & O_CREAT) {
1235 #ifdef MAC
1236 				error = mac_posixshm_check_create(td->td_ucred,
1237 				    path);
1238 				if (error == 0) {
1239 #endif
1240 					shmfd = shm_alloc(td->td_ucred, cmode,
1241 					    largepage);
1242 					if (shmfd == NULL) {
1243 						error = ENOMEM;
1244 					} else {
1245 						shmfd->shm_seals =
1246 						    initial_seals;
1247 						shmfd->shm_flags = shmflags;
1248 						shm_insert(path, fnv, shmfd);
1249 						path = NULL;
1250 					}
1251 #ifdef MAC
1252 				}
1253 #endif
1254 			} else {
1255 				error = ENOENT;
1256 			}
1257 		} else {
1258 			/*
1259 			 * Object already exists, obtain a new reference if
1260 			 * requested and permitted.
1261 			 */
1262 			rl_cookie = shm_rangelock_wlock(shmfd, 0, OFF_MAX);
1263 
1264 			/*
1265 			 * kern_shm_open() likely shouldn't ever error out on
1266 			 * trying to set a seal that already exists, unlike
1267 			 * F_ADD_SEALS.  This would break terribly as
1268 			 * shm_open(2) actually sets F_SEAL_SEAL to maintain
1269 			 * historical behavior where the underlying file could
1270 			 * not be sealed.
1271 			 */
1272 			initial_seals &= ~shmfd->shm_seals;
1273 
1274 			/*
1275 			 * initial_seals can't set additional seals if we've
1276 			 * already been set F_SEAL_SEAL.  If F_SEAL_SEAL is set,
1277 			 * then we've already removed that one from
1278 			 * initial_seals.  This is currently redundant as we
1279 			 * only allow setting F_SEAL_SEAL at creation time, but
1280 			 * it's cheap to check and decreases the effort required
1281 			 * to allow additional seals.
1282 			 */
1283 			if ((shmfd->shm_seals & F_SEAL_SEAL) != 0 &&
1284 			    initial_seals != 0)
1285 				error = EPERM;
1286 			else if ((flags & (O_CREAT | O_EXCL)) ==
1287 			    (O_CREAT | O_EXCL))
1288 				error = EEXIST;
1289 			else if (shmflags != 0 && shmflags != shmfd->shm_flags)
1290 				error = EINVAL;
1291 			else {
1292 #ifdef MAC
1293 				error = mac_posixshm_check_open(td->td_ucred,
1294 				    shmfd, FFLAGS(flags & O_ACCMODE));
1295 				if (error == 0)
1296 #endif
1297 				error = shm_access(shmfd, td->td_ucred,
1298 				    FFLAGS(flags & O_ACCMODE));
1299 			}
1300 
1301 			/*
1302 			 * Truncate the file back to zero length if
1303 			 * O_TRUNC was specified and the object was
1304 			 * opened with read/write.
1305 			 */
1306 			if (error == 0 &&
1307 			    (flags & (O_ACCMODE | O_TRUNC)) ==
1308 			    (O_RDWR | O_TRUNC)) {
1309 				VM_OBJECT_WLOCK(shmfd->shm_object);
1310 #ifdef MAC
1311 				error = mac_posixshm_check_truncate(
1312 					td->td_ucred, fp->f_cred, shmfd);
1313 				if (error == 0)
1314 #endif
1315 					error = shm_dotruncate_locked(shmfd, 0,
1316 					    rl_cookie);
1317 				VM_OBJECT_WUNLOCK(shmfd->shm_object);
1318 			}
1319 			if (error == 0) {
1320 				/*
1321 				 * Currently we only allow F_SEAL_SEAL to be
1322 				 * set initially.  As noted above, this would
1323 				 * need to be reworked should that change.
1324 				 */
1325 				shmfd->shm_seals |= initial_seals;
1326 				shm_hold(shmfd);
1327 			}
1328 			shm_rangelock_unlock(shmfd, rl_cookie);
1329 		}
1330 		sx_xunlock(&shm_dict_lock);
1331 
1332 		if (error != 0)
1333 			goto out;
1334 	}
1335 
1336 	finit(fp, FFLAGS(flags & O_ACCMODE), DTYPE_SHM, shmfd, &shm_ops);
1337 
1338 	td->td_retval[0] = fd;
1339 	fdrop(fp, td);
1340 	free(path, M_SHMFD);
1341 
1342 	return (0);
1343 
1344 out:
1345 	fdclose(td, fp, fd);
1346 	fdrop(fp, td);
1347 outnofp:
1348 	free(path, M_SHMFD);
1349 
1350 	return (error);
1351 }
1352 
1353 /* System calls. */
1354 #ifdef COMPAT_FREEBSD12
1355 int
freebsd12_shm_open(struct thread * td,struct freebsd12_shm_open_args * uap)1356 freebsd12_shm_open(struct thread *td, struct freebsd12_shm_open_args *uap)
1357 {
1358 
1359 	return (kern_shm_open(td, uap->path, uap->flags | O_CLOEXEC,
1360 	    uap->mode, NULL));
1361 }
1362 #endif
1363 
1364 int
sys_shm_unlink(struct thread * td,struct shm_unlink_args * uap)1365 sys_shm_unlink(struct thread *td, struct shm_unlink_args *uap)
1366 {
1367 	char *path;
1368 	Fnv32_t fnv;
1369 	int error;
1370 
1371 	error = shm_copyin_path(td, uap->path, &path);
1372 	if (error != 0)
1373 		return (error);
1374 
1375 	AUDIT_ARG_UPATH1_CANON(path);
1376 	fnv = fnv_32_str(path, FNV1_32_INIT);
1377 	sx_xlock(&shm_dict_lock);
1378 	error = shm_remove(path, fnv, td->td_ucred);
1379 	sx_xunlock(&shm_dict_lock);
1380 	free(path, M_SHMFD);
1381 
1382 	return (error);
1383 }
1384 
1385 int
sys_shm_rename(struct thread * td,struct shm_rename_args * uap)1386 sys_shm_rename(struct thread *td, struct shm_rename_args *uap)
1387 {
1388 	char *path_from = NULL, *path_to = NULL;
1389 	Fnv32_t fnv_from, fnv_to;
1390 	struct shmfd *fd_from;
1391 	struct shmfd *fd_to;
1392 	int error;
1393 	int flags;
1394 
1395 	flags = uap->flags;
1396 	AUDIT_ARG_FFLAGS(flags);
1397 
1398 	/*
1399 	 * Make sure the user passed only valid flags.
1400 	 * If you add a new flag, please add a new term here.
1401 	 */
1402 	if ((flags & ~(
1403 	    SHM_RENAME_NOREPLACE |
1404 	    SHM_RENAME_EXCHANGE
1405 	    )) != 0) {
1406 		error = EINVAL;
1407 		goto out;
1408 	}
1409 
1410 	/*
1411 	 * EXCHANGE and NOREPLACE don't quite make sense together. Let's
1412 	 * force the user to choose one or the other.
1413 	 */
1414 	if ((flags & SHM_RENAME_NOREPLACE) != 0 &&
1415 	    (flags & SHM_RENAME_EXCHANGE) != 0) {
1416 		error = EINVAL;
1417 		goto out;
1418 	}
1419 
1420 	/* Renaming to or from anonymous makes no sense */
1421 	if (uap->path_from == SHM_ANON || uap->path_to == SHM_ANON) {
1422 		error = EINVAL;
1423 		goto out;
1424 	}
1425 
1426 	error = shm_copyin_path(td, uap->path_from, &path_from);
1427 	if (error != 0)
1428 		goto out;
1429 
1430 	error = shm_copyin_path(td, uap->path_to, &path_to);
1431 	if (error != 0)
1432 		goto out;
1433 
1434 	AUDIT_ARG_UPATH1_CANON(path_from);
1435 	AUDIT_ARG_UPATH2_CANON(path_to);
1436 
1437 	/* Rename with from/to equal is a no-op */
1438 	if (strcmp(path_from, path_to) == 0)
1439 		goto out;
1440 
1441 	fnv_from = fnv_32_str(path_from, FNV1_32_INIT);
1442 	fnv_to = fnv_32_str(path_to, FNV1_32_INIT);
1443 
1444 	sx_xlock(&shm_dict_lock);
1445 
1446 	fd_from = shm_lookup(path_from, fnv_from);
1447 	if (fd_from == NULL) {
1448 		error = ENOENT;
1449 		goto out_locked;
1450 	}
1451 
1452 	fd_to = shm_lookup(path_to, fnv_to);
1453 	if ((flags & SHM_RENAME_NOREPLACE) != 0 && fd_to != NULL) {
1454 		error = EEXIST;
1455 		goto out_locked;
1456 	}
1457 
1458 	/*
1459 	 * Unconditionally prevents shm_remove from invalidating the 'from'
1460 	 * shm's state.
1461 	 */
1462 	shm_hold(fd_from);
1463 	error = shm_remove(path_from, fnv_from, td->td_ucred);
1464 
1465 	/*
1466 	 * One of my assumptions failed if ENOENT (e.g. locking didn't
1467 	 * protect us)
1468 	 */
1469 	KASSERT(error != ENOENT, ("Our shm disappeared during shm_rename: %s",
1470 	    path_from));
1471 	if (error != 0) {
1472 		shm_drop(fd_from);
1473 		goto out_locked;
1474 	}
1475 
1476 	/*
1477 	 * If we are exchanging, we need to ensure the shm_remove below
1478 	 * doesn't invalidate the dest shm's state.
1479 	 */
1480 	if ((flags & SHM_RENAME_EXCHANGE) != 0 && fd_to != NULL)
1481 		shm_hold(fd_to);
1482 
1483 	/*
1484 	 * NOTE: if path_to is not already in the hash, c'est la vie;
1485 	 * it simply means we have nothing already at path_to to unlink.
1486 	 * That is the ENOENT case.
1487 	 *
1488 	 * If we somehow don't have access to unlink this guy, but
1489 	 * did for the shm at path_from, then relink the shm to path_from
1490 	 * and abort with EACCES.
1491 	 *
1492 	 * All other errors: that is weird; let's relink and abort the
1493 	 * operation.
1494 	 */
1495 	error = shm_remove(path_to, fnv_to, td->td_ucred);
1496 	if (error != 0 && error != ENOENT) {
1497 		shm_insert(path_from, fnv_from, fd_from);
1498 		shm_drop(fd_from);
1499 		/* Don't free path_from now, since the hash references it */
1500 		path_from = NULL;
1501 		goto out_locked;
1502 	}
1503 
1504 	error = 0;
1505 
1506 	shm_insert(path_to, fnv_to, fd_from);
1507 
1508 	/* Don't free path_to now, since the hash references it */
1509 	path_to = NULL;
1510 
1511 	/* We kept a ref when we removed, and incremented again in insert */
1512 	shm_drop(fd_from);
1513 	KASSERT(fd_from->shm_refs > 0, ("Expected >0 refs; got: %d\n",
1514 	    fd_from->shm_refs));
1515 
1516 	if ((flags & SHM_RENAME_EXCHANGE) != 0 && fd_to != NULL) {
1517 		shm_insert(path_from, fnv_from, fd_to);
1518 		path_from = NULL;
1519 		shm_drop(fd_to);
1520 		KASSERT(fd_to->shm_refs > 0, ("Expected >0 refs; got: %d\n",
1521 		    fd_to->shm_refs));
1522 	}
1523 
1524 out_locked:
1525 	sx_xunlock(&shm_dict_lock);
1526 
1527 out:
1528 	free(path_from, M_SHMFD);
1529 	free(path_to, M_SHMFD);
1530 	return (error);
1531 }
1532 
1533 static int
shm_mmap_large(struct shmfd * shmfd,vm_map_t map,vm_offset_t * addr,vm_size_t size,vm_prot_t prot,vm_prot_t max_prot,int flags,vm_ooffset_t foff,struct thread * td)1534 shm_mmap_large(struct shmfd *shmfd, vm_map_t map, vm_offset_t *addr,
1535     vm_size_t size, vm_prot_t prot, vm_prot_t max_prot, int flags,
1536     vm_ooffset_t foff, struct thread *td)
1537 {
1538 	struct vmspace *vms;
1539 	vm_map_entry_t next_entry, prev_entry;
1540 	vm_offset_t align, mask, maxaddr;
1541 	int docow, error, rv, try;
1542 	bool curmap;
1543 
1544 	if (shmfd->shm_lp_psind == 0)
1545 		return (EINVAL);
1546 
1547 	/* MAP_PRIVATE is disabled */
1548 	if ((flags & ~(MAP_SHARED | MAP_FIXED | MAP_EXCL |
1549 	    MAP_NOCORE | MAP_32BIT | MAP_ALIGNMENT_MASK)) != 0)
1550 		return (EINVAL);
1551 
1552 	vms = td->td_proc->p_vmspace;
1553 	curmap = map == &vms->vm_map;
1554 	if (curmap) {
1555 		error = kern_mmap_racct_check(td, map, size);
1556 		if (error != 0)
1557 			return (error);
1558 	}
1559 
1560 	docow = shmfd->shm_lp_psind << MAP_SPLIT_BOUNDARY_SHIFT;
1561 	docow |= MAP_INHERIT_SHARE;
1562 	if ((flags & MAP_NOCORE) != 0)
1563 		docow |= MAP_DISABLE_COREDUMP;
1564 
1565 	mask = pagesizes[shmfd->shm_lp_psind] - 1;
1566 	if ((foff & mask) != 0)
1567 		return (EINVAL);
1568 	maxaddr = vm_map_max(map);
1569 	if ((flags & MAP_32BIT) != 0 && maxaddr > MAP_32BIT_MAX_ADDR)
1570 		maxaddr = MAP_32BIT_MAX_ADDR;
1571 	if (size == 0 || (size & mask) != 0 ||
1572 	    (*addr != 0 && ((*addr & mask) != 0 ||
1573 	    *addr + size < *addr || *addr + size > maxaddr)))
1574 		return (EINVAL);
1575 
1576 	align = flags & MAP_ALIGNMENT_MASK;
1577 	if (align == 0) {
1578 		align = pagesizes[shmfd->shm_lp_psind];
1579 	} else if (align == MAP_ALIGNED_SUPER) {
1580 		/*
1581 		 * MAP_ALIGNED_SUPER is only supported on superpage sizes,
1582 		 * i.e., [1, VM_NRESERVLEVEL].  shmfd->shm_lp_psind < 1 is
1583 		 * handled above.
1584 		 */
1585 		if (
1586 #if VM_NRESERVLEVEL > 0
1587 		    shmfd->shm_lp_psind > VM_NRESERVLEVEL
1588 #else
1589 		    shmfd->shm_lp_psind > 1
1590 #endif
1591 		    )
1592 			return (EINVAL);
1593 		align = pagesizes[shmfd->shm_lp_psind];
1594 	} else {
1595 		align >>= MAP_ALIGNMENT_SHIFT;
1596 		align = 1ULL << align;
1597 		/* Also handles overflow. */
1598 		if (align < pagesizes[shmfd->shm_lp_psind])
1599 			return (EINVAL);
1600 	}
1601 
1602 	vm_map_lock(map);
1603 	if ((flags & MAP_FIXED) == 0) {
1604 		try = 1;
1605 		if (curmap && (*addr == 0 ||
1606 		    (*addr >= round_page((vm_offset_t)vms->vm_taddr) &&
1607 		    *addr < round_page((vm_offset_t)vms->vm_daddr +
1608 		    lim_max(td, RLIMIT_DATA))))) {
1609 			*addr = roundup2((vm_offset_t)vms->vm_daddr +
1610 			    lim_max(td, RLIMIT_DATA),
1611 			    pagesizes[shmfd->shm_lp_psind]);
1612 		}
1613 again:
1614 		rv = vm_map_find_aligned(map, addr, size, maxaddr, align);
1615 		if (rv != KERN_SUCCESS) {
1616 			if (try == 1) {
1617 				try = 2;
1618 				*addr = vm_map_min(map);
1619 				if ((*addr & mask) != 0)
1620 					*addr = (*addr + mask) & mask;
1621 				goto again;
1622 			}
1623 			goto fail1;
1624 		}
1625 	} else if ((flags & MAP_EXCL) == 0) {
1626 		rv = vm_map_delete(map, *addr, *addr + size);
1627 		if (rv != KERN_SUCCESS)
1628 			goto fail1;
1629 	} else {
1630 		error = ENOSPC;
1631 		if (vm_map_lookup_entry(map, *addr, &prev_entry))
1632 			goto fail;
1633 		next_entry = vm_map_entry_succ(prev_entry);
1634 		if (next_entry->start < *addr + size)
1635 			goto fail;
1636 	}
1637 
1638 	rv = vm_map_insert(map, shmfd->shm_object, foff, *addr, *addr + size,
1639 	    prot, max_prot, docow);
1640 fail1:
1641 	error = vm_mmap_to_errno(rv);
1642 fail:
1643 	vm_map_unlock(map);
1644 	return (error);
1645 }
1646 
1647 static int
shm_mmap(struct file * fp,vm_map_t map,vm_offset_t * addr,vm_size_t objsize,vm_prot_t prot,vm_prot_t max_maxprot,int flags,vm_ooffset_t foff,struct thread * td)1648 shm_mmap(struct file *fp, vm_map_t map, vm_offset_t *addr, vm_size_t objsize,
1649     vm_prot_t prot, vm_prot_t max_maxprot, int flags,
1650     vm_ooffset_t foff, struct thread *td)
1651 {
1652 	struct shmfd *shmfd;
1653 	vm_prot_t maxprot;
1654 	int error;
1655 	bool writecnt;
1656 	void *rl_cookie;
1657 
1658 	shmfd = fp->f_data;
1659 	maxprot = VM_PROT_NONE;
1660 
1661 	rl_cookie = shm_rangelock_rlock(shmfd, 0, objsize);
1662 	/* FREAD should always be set. */
1663 	if ((fp->f_flag & FREAD) != 0)
1664 		maxprot |= VM_PROT_EXECUTE | VM_PROT_READ;
1665 
1666 	/*
1667 	 * If FWRITE's set, we can allow VM_PROT_WRITE unless it's a shared
1668 	 * mapping with a write seal applied.  Private mappings are always
1669 	 * writeable.
1670 	 */
1671 	if ((flags & MAP_SHARED) == 0) {
1672 		if ((max_maxprot & VM_PROT_WRITE) != 0)
1673 			maxprot |= VM_PROT_WRITE;
1674 		writecnt = false;
1675 	} else {
1676 		if ((fp->f_flag & FWRITE) != 0 &&
1677 		    (shmfd->shm_seals & F_SEAL_WRITE) == 0)
1678 			maxprot |= VM_PROT_WRITE;
1679 
1680 		/*
1681 		 * Any mappings from a writable descriptor may be upgraded to
1682 		 * VM_PROT_WRITE with mprotect(2), unless a write-seal was
1683 		 * applied between the open and subsequent mmap(2).  We want to
1684 		 * reject application of a write seal as long as any such
1685 		 * mapping exists so that the seal cannot be trivially bypassed.
1686 		 */
1687 		writecnt = (maxprot & VM_PROT_WRITE) != 0;
1688 		if (!writecnt && (prot & VM_PROT_WRITE) != 0) {
1689 			error = EACCES;
1690 			goto out;
1691 		}
1692 	}
1693 	maxprot &= max_maxprot;
1694 
1695 	/* See comment in vn_mmap(). */
1696 	if (
1697 #ifdef _LP64
1698 	    objsize > OFF_MAX ||
1699 #endif
1700 	    foff > OFF_MAX - objsize) {
1701 		error = EINVAL;
1702 		goto out;
1703 	}
1704 
1705 #ifdef MAC
1706 	error = mac_posixshm_check_mmap(td->td_ucred, shmfd, prot, flags);
1707 	if (error != 0)
1708 		goto out;
1709 #endif
1710 
1711 	mtx_lock(&shm_timestamp_lock);
1712 	vfs_timestamp(&shmfd->shm_atime);
1713 	mtx_unlock(&shm_timestamp_lock);
1714 	vm_object_reference(shmfd->shm_object);
1715 
1716 	if (shm_largepage(shmfd)) {
1717 		writecnt = false;
1718 		error = shm_mmap_large(shmfd, map, addr, objsize, prot,
1719 		    maxprot, flags, foff, td);
1720 	} else {
1721 		if (writecnt) {
1722 			vm_pager_update_writecount(shmfd->shm_object, 0,
1723 			    objsize);
1724 		}
1725 		error = vm_mmap_object(map, addr, objsize, prot, maxprot, flags,
1726 		    shmfd->shm_object, foff, writecnt, td);
1727 	}
1728 	if (error != 0) {
1729 		if (writecnt)
1730 			vm_pager_release_writecount(shmfd->shm_object, 0,
1731 			    objsize);
1732 		vm_object_deallocate(shmfd->shm_object);
1733 	}
1734 out:
1735 	shm_rangelock_unlock(shmfd, rl_cookie);
1736 	return (error);
1737 }
1738 
1739 static int
shm_chmod(struct file * fp,mode_t mode,struct ucred * active_cred,struct thread * td)1740 shm_chmod(struct file *fp, mode_t mode, struct ucred *active_cred,
1741     struct thread *td)
1742 {
1743 	struct shmfd *shmfd;
1744 	int error;
1745 
1746 	error = 0;
1747 	shmfd = fp->f_data;
1748 	mtx_lock(&shm_timestamp_lock);
1749 	/*
1750 	 * SUSv4 says that x bits of permission need not be affected.
1751 	 * Be consistent with our shm_open there.
1752 	 */
1753 #ifdef MAC
1754 	error = mac_posixshm_check_setmode(active_cred, shmfd, mode);
1755 	if (error != 0)
1756 		goto out;
1757 #endif
1758 	error = vaccess(VREG, shmfd->shm_mode, shmfd->shm_uid, shmfd->shm_gid,
1759 	    VADMIN, active_cred);
1760 	if (error != 0)
1761 		goto out;
1762 	shmfd->shm_mode = mode & ACCESSPERMS;
1763 out:
1764 	mtx_unlock(&shm_timestamp_lock);
1765 	return (error);
1766 }
1767 
1768 static int
shm_chown(struct file * fp,uid_t uid,gid_t gid,struct ucred * active_cred,struct thread * td)1769 shm_chown(struct file *fp, uid_t uid, gid_t gid, struct ucred *active_cred,
1770     struct thread *td)
1771 {
1772 	struct shmfd *shmfd;
1773 	int error;
1774 
1775 	error = 0;
1776 	shmfd = fp->f_data;
1777 	mtx_lock(&shm_timestamp_lock);
1778 #ifdef MAC
1779 	error = mac_posixshm_check_setowner(active_cred, shmfd, uid, gid);
1780 	if (error != 0)
1781 		goto out;
1782 #endif
1783 	if (uid == (uid_t)-1)
1784 		uid = shmfd->shm_uid;
1785 	if (gid == (gid_t)-1)
1786                  gid = shmfd->shm_gid;
1787 	if (((uid != shmfd->shm_uid && uid != active_cred->cr_uid) ||
1788 	    (gid != shmfd->shm_gid && !groupmember(gid, active_cred))) &&
1789 	    (error = priv_check_cred(active_cred, PRIV_VFS_CHOWN)))
1790 		goto out;
1791 	shmfd->shm_uid = uid;
1792 	shmfd->shm_gid = gid;
1793 out:
1794 	mtx_unlock(&shm_timestamp_lock);
1795 	return (error);
1796 }
1797 
1798 /*
1799  * Helper routines to allow the backing object of a shared memory file
1800  * descriptor to be mapped in the kernel.
1801  */
1802 int
shm_map(struct file * fp,size_t size,off_t offset,void ** memp)1803 shm_map(struct file *fp, size_t size, off_t offset, void **memp)
1804 {
1805 	struct shmfd *shmfd;
1806 	vm_offset_t kva, ofs;
1807 	vm_object_t obj;
1808 	int rv;
1809 
1810 	if (fp->f_type != DTYPE_SHM)
1811 		return (EINVAL);
1812 	shmfd = fp->f_data;
1813 	obj = shmfd->shm_object;
1814 	VM_OBJECT_WLOCK(obj);
1815 	/*
1816 	 * XXXRW: This validation is probably insufficient, and subject to
1817 	 * sign errors.  It should be fixed.
1818 	 */
1819 	if (offset >= shmfd->shm_size ||
1820 	    offset + size > round_page(shmfd->shm_size)) {
1821 		VM_OBJECT_WUNLOCK(obj);
1822 		return (EINVAL);
1823 	}
1824 
1825 	shmfd->shm_kmappings++;
1826 	vm_object_reference_locked(obj);
1827 	VM_OBJECT_WUNLOCK(obj);
1828 
1829 	/* Map the object into the kernel_map and wire it. */
1830 	kva = vm_map_min(kernel_map);
1831 	ofs = offset & PAGE_MASK;
1832 	offset = trunc_page(offset);
1833 	size = round_page(size + ofs);
1834 	rv = vm_map_find(kernel_map, obj, offset, &kva, size, 0,
1835 	    VMFS_OPTIMAL_SPACE, VM_PROT_READ | VM_PROT_WRITE,
1836 	    VM_PROT_READ | VM_PROT_WRITE, 0);
1837 	if (rv == KERN_SUCCESS) {
1838 		rv = vm_map_wire(kernel_map, kva, kva + size,
1839 		    VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
1840 		if (rv == KERN_SUCCESS) {
1841 			*memp = (void *)(kva + ofs);
1842 			return (0);
1843 		}
1844 		vm_map_remove(kernel_map, kva, kva + size);
1845 	} else
1846 		vm_object_deallocate(obj);
1847 
1848 	/* On failure, drop our mapping reference. */
1849 	VM_OBJECT_WLOCK(obj);
1850 	shmfd->shm_kmappings--;
1851 	VM_OBJECT_WUNLOCK(obj);
1852 
1853 	return (vm_mmap_to_errno(rv));
1854 }
1855 
1856 /*
1857  * We require the caller to unmap the entire entry.  This allows us to
1858  * safely decrement shm_kmappings when a mapping is removed.
1859  */
1860 int
shm_unmap(struct file * fp,void * mem,size_t size)1861 shm_unmap(struct file *fp, void *mem, size_t size)
1862 {
1863 	struct shmfd *shmfd;
1864 	vm_map_entry_t entry;
1865 	vm_offset_t kva, ofs;
1866 	vm_object_t obj;
1867 	vm_pindex_t pindex;
1868 	vm_prot_t prot;
1869 	boolean_t wired;
1870 	vm_map_t map;
1871 	int rv;
1872 
1873 	if (fp->f_type != DTYPE_SHM)
1874 		return (EINVAL);
1875 	shmfd = fp->f_data;
1876 	kva = (vm_offset_t)mem;
1877 	ofs = kva & PAGE_MASK;
1878 	kva = trunc_page(kva);
1879 	size = round_page(size + ofs);
1880 	map = kernel_map;
1881 	rv = vm_map_lookup(&map, kva, VM_PROT_READ | VM_PROT_WRITE, &entry,
1882 	    &obj, &pindex, &prot, &wired);
1883 	if (rv != KERN_SUCCESS)
1884 		return (EINVAL);
1885 	if (entry->start != kva || entry->end != kva + size) {
1886 		vm_map_lookup_done(map, entry);
1887 		return (EINVAL);
1888 	}
1889 	vm_map_lookup_done(map, entry);
1890 	if (obj != shmfd->shm_object)
1891 		return (EINVAL);
1892 	vm_map_remove(map, kva, kva + size);
1893 	VM_OBJECT_WLOCK(obj);
1894 	KASSERT(shmfd->shm_kmappings > 0, ("shm_unmap: object not mapped"));
1895 	shmfd->shm_kmappings--;
1896 	VM_OBJECT_WUNLOCK(obj);
1897 	return (0);
1898 }
1899 
1900 static int
shm_fill_kinfo_locked(struct shmfd * shmfd,struct kinfo_file * kif,bool list)1901 shm_fill_kinfo_locked(struct shmfd *shmfd, struct kinfo_file *kif, bool list)
1902 {
1903 	const char *path, *pr_path;
1904 	size_t pr_pathlen;
1905 	bool visible;
1906 
1907 	sx_assert(&shm_dict_lock, SA_LOCKED);
1908 	kif->kf_type = KF_TYPE_SHM;
1909 	kif->kf_un.kf_file.kf_file_mode = S_IFREG | shmfd->shm_mode;
1910 	kif->kf_un.kf_file.kf_file_size = shmfd->shm_size;
1911 	if (shmfd->shm_path != NULL) {
1912 		path = shmfd->shm_path;
1913 		pr_path = curthread->td_ucred->cr_prison->pr_path;
1914 		if (strcmp(pr_path, "/") != 0) {
1915 			/* Return the jail-rooted pathname. */
1916 			pr_pathlen = strlen(pr_path);
1917 			visible = strncmp(path, pr_path, pr_pathlen) == 0 &&
1918 			    path[pr_pathlen] == '/';
1919 			if (list && !visible)
1920 				return (EPERM);
1921 			if (visible)
1922 				path += pr_pathlen;
1923 		}
1924 		strlcpy(kif->kf_path, path, sizeof(kif->kf_path));
1925 	}
1926 	return (0);
1927 }
1928 
1929 static int
shm_fill_kinfo(struct file * fp,struct kinfo_file * kif,struct filedesc * fdp __unused)1930 shm_fill_kinfo(struct file *fp, struct kinfo_file *kif,
1931     struct filedesc *fdp __unused)
1932 {
1933 	int res;
1934 
1935 	sx_slock(&shm_dict_lock);
1936 	res = shm_fill_kinfo_locked(fp->f_data, kif, false);
1937 	sx_sunlock(&shm_dict_lock);
1938 	return (res);
1939 }
1940 
1941 static int
shm_add_seals(struct file * fp,int seals)1942 shm_add_seals(struct file *fp, int seals)
1943 {
1944 	struct shmfd *shmfd;
1945 	void *rl_cookie;
1946 	vm_ooffset_t writemappings;
1947 	int error, nseals;
1948 
1949 	error = 0;
1950 	shmfd = fp->f_data;
1951 	rl_cookie = shm_rangelock_wlock(shmfd, 0, OFF_MAX);
1952 
1953 	/* Even already-set seals should result in EPERM. */
1954 	if ((shmfd->shm_seals & F_SEAL_SEAL) != 0) {
1955 		error = EPERM;
1956 		goto out;
1957 	}
1958 	nseals = seals & ~shmfd->shm_seals;
1959 	if ((nseals & F_SEAL_WRITE) != 0) {
1960 		if (shm_largepage(shmfd)) {
1961 			error = ENOTSUP;
1962 			goto out;
1963 		}
1964 
1965 		/*
1966 		 * The rangelock above prevents writable mappings from being
1967 		 * added after we've started applying seals.  The RLOCK here
1968 		 * is to avoid torn reads on ILP32 arches as unmapping/reducing
1969 		 * writemappings will be done without a rangelock.
1970 		 */
1971 		VM_OBJECT_RLOCK(shmfd->shm_object);
1972 		writemappings = shmfd->shm_object->un_pager.swp.writemappings;
1973 		VM_OBJECT_RUNLOCK(shmfd->shm_object);
1974 		/* kmappings are also writable */
1975 		if (writemappings > 0) {
1976 			error = EBUSY;
1977 			goto out;
1978 		}
1979 	}
1980 	shmfd->shm_seals |= nseals;
1981 out:
1982 	shm_rangelock_unlock(shmfd, rl_cookie);
1983 	return (error);
1984 }
1985 
1986 static int
shm_get_seals(struct file * fp,int * seals)1987 shm_get_seals(struct file *fp, int *seals)
1988 {
1989 	struct shmfd *shmfd;
1990 
1991 	shmfd = fp->f_data;
1992 	*seals = shmfd->shm_seals;
1993 	return (0);
1994 }
1995 
1996 static int
shm_deallocate(struct shmfd * shmfd,off_t * offset,off_t * length,int flags)1997 shm_deallocate(struct shmfd *shmfd, off_t *offset, off_t *length, int flags)
1998 {
1999 	vm_object_t object;
2000 	vm_pindex_t pistart, pi, piend;
2001 	vm_ooffset_t off, len;
2002 	int startofs, endofs, end;
2003 	int error;
2004 
2005 	off = *offset;
2006 	len = *length;
2007 	KASSERT(off + len <= (vm_ooffset_t)OFF_MAX, ("off + len overflows"));
2008 	if (off + len > shmfd->shm_size)
2009 		len = shmfd->shm_size - off;
2010 	object = shmfd->shm_object;
2011 	startofs = off & PAGE_MASK;
2012 	endofs = (off + len) & PAGE_MASK;
2013 	pistart = OFF_TO_IDX(off);
2014 	piend = OFF_TO_IDX(off + len);
2015 	pi = OFF_TO_IDX(off + PAGE_MASK);
2016 	error = 0;
2017 
2018 	/* Handle the case when offset is on or beyond shm size. */
2019 	if ((off_t)len <= 0) {
2020 		*length = 0;
2021 		return (0);
2022 	}
2023 
2024 	VM_OBJECT_WLOCK(object);
2025 
2026 	if (startofs != 0) {
2027 		end = pistart != piend ? PAGE_SIZE : endofs;
2028 		error = shm_partial_page_invalidate(object, pistart, startofs,
2029 		    end);
2030 		if (error)
2031 			goto out;
2032 		off += end - startofs;
2033 		len -= end - startofs;
2034 	}
2035 
2036 	if (pi < piend) {
2037 		vm_object_page_remove(object, pi, piend, 0);
2038 		off += IDX_TO_OFF(piend - pi);
2039 		len -= IDX_TO_OFF(piend - pi);
2040 	}
2041 
2042 	if (endofs != 0 && pistart != piend) {
2043 		error = shm_partial_page_invalidate(object, piend, 0, endofs);
2044 		if (error)
2045 			goto out;
2046 		off += endofs;
2047 		len -= endofs;
2048 	}
2049 
2050 out:
2051 	VM_OBJECT_WUNLOCK(shmfd->shm_object);
2052 	*offset = off;
2053 	*length = len;
2054 	return (error);
2055 }
2056 
2057 static int
shm_fspacectl(struct file * fp,int cmd,off_t * offset,off_t * length,int flags,struct ucred * active_cred,struct thread * td)2058 shm_fspacectl(struct file *fp, int cmd, off_t *offset, off_t *length, int flags,
2059     struct ucred *active_cred, struct thread *td)
2060 {
2061 	void *rl_cookie;
2062 	struct shmfd *shmfd;
2063 	off_t off, len;
2064 	int error;
2065 
2066 	KASSERT(cmd == SPACECTL_DEALLOC, ("shm_fspacectl: Invalid cmd"));
2067 	KASSERT((flags & ~SPACECTL_F_SUPPORTED) == 0,
2068 	    ("shm_fspacectl: non-zero flags"));
2069 	KASSERT(*offset >= 0 && *length > 0 && *length <= OFF_MAX - *offset,
2070 	    ("shm_fspacectl: offset/length overflow or underflow"));
2071 	error = EINVAL;
2072 	shmfd = fp->f_data;
2073 	off = *offset;
2074 	len = *length;
2075 
2076 	rl_cookie = shm_rangelock_wlock(shmfd, off, off + len);
2077 	switch (cmd) {
2078 	case SPACECTL_DEALLOC:
2079 		if ((shmfd->shm_seals & F_SEAL_WRITE) != 0) {
2080 			error = EPERM;
2081 			break;
2082 		}
2083 		error = shm_deallocate(shmfd, &off, &len, flags);
2084 		*offset = off;
2085 		*length = len;
2086 		break;
2087 	default:
2088 		__assert_unreachable();
2089 	}
2090 	shm_rangelock_unlock(shmfd, rl_cookie);
2091 	return (error);
2092 }
2093 
2094 
2095 static int
shm_fallocate(struct file * fp,off_t offset,off_t len,struct thread * td)2096 shm_fallocate(struct file *fp, off_t offset, off_t len, struct thread *td)
2097 {
2098 	void *rl_cookie;
2099 	struct shmfd *shmfd;
2100 	size_t size;
2101 	int error;
2102 
2103 	/* This assumes that the caller already checked for overflow. */
2104 	error = 0;
2105 	shmfd = fp->f_data;
2106 	size = offset + len;
2107 
2108 	/*
2109 	 * Just grab the rangelock for the range that we may be attempting to
2110 	 * grow, rather than blocking read/write for regions we won't be
2111 	 * touching while this (potential) resize is in progress.  Other
2112 	 * attempts to resize the shmfd will have to take a write lock from 0 to
2113 	 * OFF_MAX, so this being potentially beyond the current usable range of
2114 	 * the shmfd is not necessarily a concern.  If other mechanisms are
2115 	 * added to grow a shmfd, this may need to be re-evaluated.
2116 	 */
2117 	rl_cookie = shm_rangelock_wlock(shmfd, offset, size);
2118 	if (size > shmfd->shm_size)
2119 		error = shm_dotruncate_cookie(shmfd, size, rl_cookie);
2120 	shm_rangelock_unlock(shmfd, rl_cookie);
2121 	/* Translate to posix_fallocate(2) return value as needed. */
2122 	if (error == ENOMEM)
2123 		error = ENOSPC;
2124 	return (error);
2125 }
2126 
2127 static int
sysctl_posix_shm_list(SYSCTL_HANDLER_ARGS)2128 sysctl_posix_shm_list(SYSCTL_HANDLER_ARGS)
2129 {
2130 	struct shm_mapping *shmm;
2131 	struct sbuf sb;
2132 	struct kinfo_file kif;
2133 	u_long i;
2134 	int error, error2;
2135 
2136 	sbuf_new_for_sysctl(&sb, NULL, sizeof(struct kinfo_file) * 5, req);
2137 	sbuf_clear_flags(&sb, SBUF_INCLUDENUL);
2138 	error = 0;
2139 	sx_slock(&shm_dict_lock);
2140 	for (i = 0; i < shm_hash + 1; i++) {
2141 		LIST_FOREACH(shmm, &shm_dictionary[i], sm_link) {
2142 			error = shm_fill_kinfo_locked(shmm->sm_shmfd,
2143 			    &kif, true);
2144 			if (error == EPERM) {
2145 				error = 0;
2146 				continue;
2147 			}
2148 			if (error != 0)
2149 				break;
2150 			pack_kinfo(&kif);
2151 			error = sbuf_bcat(&sb, &kif, kif.kf_structsize) == 0 ?
2152 			    0 : ENOMEM;
2153 			if (error != 0)
2154 				break;
2155 		}
2156 	}
2157 	sx_sunlock(&shm_dict_lock);
2158 	error2 = sbuf_finish(&sb);
2159 	sbuf_delete(&sb);
2160 	return (error != 0 ? error : error2);
2161 }
2162 
2163 SYSCTL_PROC(_kern_ipc, OID_AUTO, posix_shm_list,
2164     CTLFLAG_RD | CTLFLAG_PRISON | CTLFLAG_MPSAFE | CTLTYPE_OPAQUE,
2165     NULL, 0, sysctl_posix_shm_list, "",
2166     "POSIX SHM list");
2167 
2168 int
kern_shm_open(struct thread * td,const char * path,int flags,mode_t mode,struct filecaps * caps)2169 kern_shm_open(struct thread *td, const char *path, int flags, mode_t mode,
2170     struct filecaps *caps)
2171 {
2172 
2173 	return (kern_shm_open2(td, path, flags, mode, 0, caps, NULL));
2174 }
2175 
2176 /*
2177  * This version of the shm_open() interface leaves CLOEXEC behavior up to the
2178  * caller, and libc will enforce it for the traditional shm_open() call.  This
2179  * allows other consumers, like memfd_create(), to opt-in for CLOEXEC.  This
2180  * interface also includes a 'name' argument that is currently unused, but could
2181  * potentially be exported later via some interface for debugging purposes.
2182  * From the kernel's perspective, it is optional.  Individual consumers like
2183  * memfd_create() may require it in order to be compatible with other systems
2184  * implementing the same function.
2185  */
2186 int
sys_shm_open2(struct thread * td,struct shm_open2_args * uap)2187 sys_shm_open2(struct thread *td, struct shm_open2_args *uap)
2188 {
2189 
2190 	return (kern_shm_open2(td, uap->path, uap->flags, uap->mode,
2191 	    uap->shmflags, NULL, uap->name));
2192 }
2193 
2194 int
shm_get_path(struct vm_object * obj,char * path,size_t sz)2195 shm_get_path(struct vm_object *obj, char *path, size_t sz)
2196 {
2197 	struct shmfd *shmfd;
2198 	int error;
2199 
2200 	error = 0;
2201 	shmfd = NULL;
2202 	sx_slock(&shm_dict_lock);
2203 	VM_OBJECT_RLOCK(obj);
2204 	if ((obj->flags & OBJ_POSIXSHM) == 0) {
2205 		error = EINVAL;
2206 	} else {
2207 		if (obj->type == shmfd_pager_type)
2208 			shmfd = obj->un_pager.swp.swp_priv;
2209 		else if (obj->type == OBJT_PHYS)
2210 			shmfd = obj->un_pager.phys.phys_priv;
2211 		if (shmfd == NULL) {
2212 			error = ENXIO;
2213 		} else {
2214 			strlcpy(path, shmfd->shm_path == NULL ? "anon" :
2215 			    shmfd->shm_path, sz);
2216 		}
2217 	}
2218 	if (error != 0)
2219 		path[0] = '\0';
2220 	VM_OBJECT_RUNLOCK(obj);
2221 	sx_sunlock(&shm_dict_lock);
2222 	return (error);
2223 }
2224