xref: /freebsd/sys/kern/uipc_shm.c (revision bb1dc6cf9c3671c82318e22825d86d54c8d672cb)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2006, 2011, 2016-2017 Robert N. M. Watson
5  * Copyright 2020 The FreeBSD Foundation
6  * All rights reserved.
7  *
8  * Portions of this software were developed by BAE Systems, the University of
9  * Cambridge Computer Laboratory, and Memorial University under DARPA/AFRL
10  * contract FA8650-15-C-7558 ("CADETS"), as part of the DARPA Transparent
11  * Computing (TC) research program.
12  *
13  * Portions of this software were developed by Konstantin Belousov
14  * under sponsorship from the FreeBSD Foundation.
15  *
16  * Redistribution and use in source and binary forms, with or without
17  * modification, are permitted provided that the following conditions
18  * are met:
19  * 1. Redistributions of source code must retain the above copyright
20  *    notice, this list of conditions and the following disclaimer.
21  * 2. Redistributions in binary form must reproduce the above copyright
22  *    notice, this list of conditions and the following disclaimer in the
23  *    documentation and/or other materials provided with the distribution.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  */
37 
38 /*
39  * Support for shared swap-backed anonymous memory objects via
40  * shm_open(2), shm_rename(2), and shm_unlink(2).
41  * While most of the implementation is here, vm_mmap.c contains
42  * mapping logic changes.
43  *
44  * posixshmcontrol(1) allows users to inspect the state of the memory
45  * objects.  Per-uid swap resource limit controls total amount of
46  * memory that user can consume for anonymous objects, including
47  * shared.
48  */
49 
50 #include <sys/cdefs.h>
51 #include "opt_capsicum.h"
52 #include "opt_ktrace.h"
53 
54 #include <sys/param.h>
55 #include <sys/capsicum.h>
56 #include <sys/conf.h>
57 #include <sys/fcntl.h>
58 #include <sys/file.h>
59 #include <sys/filedesc.h>
60 #include <sys/filio.h>
61 #include <sys/fnv_hash.h>
62 #include <sys/kernel.h>
63 #include <sys/limits.h>
64 #include <sys/uio.h>
65 #include <sys/signal.h>
66 #include <sys/jail.h>
67 #include <sys/ktrace.h>
68 #include <sys/lock.h>
69 #include <sys/malloc.h>
70 #include <sys/mman.h>
71 #include <sys/mutex.h>
72 #include <sys/priv.h>
73 #include <sys/proc.h>
74 #include <sys/refcount.h>
75 #include <sys/resourcevar.h>
76 #include <sys/rwlock.h>
77 #include <sys/sbuf.h>
78 #include <sys/stat.h>
79 #include <sys/syscallsubr.h>
80 #include <sys/sysctl.h>
81 #include <sys/sysproto.h>
82 #include <sys/systm.h>
83 #include <sys/sx.h>
84 #include <sys/time.h>
85 #include <sys/vmmeter.h>
86 #include <sys/vnode.h>
87 #include <sys/unistd.h>
88 #include <sys/user.h>
89 
90 #include <security/audit/audit.h>
91 #include <security/mac/mac_framework.h>
92 
93 #include <vm/vm.h>
94 #include <vm/vm_param.h>
95 #include <vm/pmap.h>
96 #include <vm/vm_extern.h>
97 #include <vm/vm_map.h>
98 #include <vm/vm_kern.h>
99 #include <vm/vm_object.h>
100 #include <vm/vm_page.h>
101 #include <vm/vm_pageout.h>
102 #include <vm/vm_pager.h>
103 #include <vm/swap_pager.h>
104 
105 struct shm_mapping {
106 	char		*sm_path;
107 	Fnv32_t		sm_fnv;
108 	struct shmfd	*sm_shmfd;
109 	LIST_ENTRY(shm_mapping) sm_link;
110 };
111 
112 static MALLOC_DEFINE(M_SHMFD, "shmfd", "shared memory file descriptor");
113 static LIST_HEAD(, shm_mapping) *shm_dictionary;
114 static struct sx shm_dict_lock;
115 static struct mtx shm_timestamp_lock;
116 static u_long shm_hash;
117 static struct unrhdr64 shm_ino_unr;
118 static dev_t shm_dev_ino;
119 
120 #define	SHM_HASH(fnv)	(&shm_dictionary[(fnv) & shm_hash])
121 
122 static void	shm_init(void *arg);
123 static void	shm_insert(char *path, Fnv32_t fnv, struct shmfd *shmfd);
124 static struct shmfd *shm_lookup(char *path, Fnv32_t fnv);
125 static int	shm_remove(char *path, Fnv32_t fnv, struct ucred *ucred);
126 static void	shm_doremove(struct shm_mapping *map);
127 static int	shm_dotruncate_cookie(struct shmfd *shmfd, off_t length,
128     void *rl_cookie);
129 static int	shm_dotruncate_locked(struct shmfd *shmfd, off_t length,
130     void *rl_cookie);
131 static int	shm_copyin_path(struct thread *td, const char *userpath_in,
132     char **path_out);
133 static int	shm_deallocate(struct shmfd *shmfd, off_t *offset,
134     off_t *length, int flags);
135 
136 static fo_rdwr_t	shm_read;
137 static fo_rdwr_t	shm_write;
138 static fo_truncate_t	shm_truncate;
139 static fo_ioctl_t	shm_ioctl;
140 static fo_stat_t	shm_stat;
141 static fo_close_t	shm_close;
142 static fo_chmod_t	shm_chmod;
143 static fo_chown_t	shm_chown;
144 static fo_seek_t	shm_seek;
145 static fo_fill_kinfo_t	shm_fill_kinfo;
146 static fo_mmap_t	shm_mmap;
147 static fo_get_seals_t	shm_get_seals;
148 static fo_add_seals_t	shm_add_seals;
149 static fo_fallocate_t	shm_fallocate;
150 static fo_fspacectl_t	shm_fspacectl;
151 
152 /* File descriptor operations. */
153 const struct fileops shm_ops = {
154 	.fo_read = shm_read,
155 	.fo_write = shm_write,
156 	.fo_truncate = shm_truncate,
157 	.fo_ioctl = shm_ioctl,
158 	.fo_poll = invfo_poll,
159 	.fo_kqfilter = invfo_kqfilter,
160 	.fo_stat = shm_stat,
161 	.fo_close = shm_close,
162 	.fo_chmod = shm_chmod,
163 	.fo_chown = shm_chown,
164 	.fo_sendfile = vn_sendfile,
165 	.fo_seek = shm_seek,
166 	.fo_fill_kinfo = shm_fill_kinfo,
167 	.fo_mmap = shm_mmap,
168 	.fo_get_seals = shm_get_seals,
169 	.fo_add_seals = shm_add_seals,
170 	.fo_fallocate = shm_fallocate,
171 	.fo_fspacectl = shm_fspacectl,
172 	.fo_cmp = file_kcmp_generic,
173 	.fo_flags = DFLAG_PASSABLE | DFLAG_SEEKABLE,
174 };
175 
176 FEATURE(posix_shm, "POSIX shared memory");
177 
178 static SYSCTL_NODE(_vm, OID_AUTO, largepages, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
179     "");
180 
181 static int largepage_reclaim_tries = 1;
182 SYSCTL_INT(_vm_largepages, OID_AUTO, reclaim_tries,
183     CTLFLAG_RWTUN, &largepage_reclaim_tries, 0,
184     "Number of contig reclaims before giving up for default alloc policy");
185 
186 #define	shm_rangelock_unlock(shmfd, cookie)				\
187 	rangelock_unlock(&(shmfd)->shm_rl, (cookie))
188 #define	shm_rangelock_rlock(shmfd, start, end)				\
189 	rangelock_rlock(&(shmfd)->shm_rl, (start), (end))
190 #define	shm_rangelock_tryrlock(shmfd, start, end)			\
191 	rangelock_tryrlock(&(shmfd)->shm_rl, (start), (end))
192 #define	shm_rangelock_wlock(shmfd, start, end)				\
193 	rangelock_wlock(&(shmfd)->shm_rl, (start), (end))
194 
195 static int
uiomove_object_page(vm_object_t obj,size_t len,struct uio * uio)196 uiomove_object_page(vm_object_t obj, size_t len, struct uio *uio)
197 {
198 	vm_page_t m;
199 	vm_pindex_t idx;
200 	size_t tlen;
201 	int error, offset, rv;
202 
203 	idx = OFF_TO_IDX(uio->uio_offset);
204 	offset = uio->uio_offset & PAGE_MASK;
205 	tlen = MIN(PAGE_SIZE - offset, len);
206 
207 	rv = vm_page_grab_valid_unlocked(&m, obj, idx,
208 	    VM_ALLOC_SBUSY | VM_ALLOC_IGN_SBUSY | VM_ALLOC_NOCREAT);
209 	if (rv == VM_PAGER_OK)
210 		goto found;
211 
212 	/*
213 	 * Read I/O without either a corresponding resident page or swap
214 	 * page: use zero_region.  This is intended to avoid instantiating
215 	 * pages on read from a sparse region.
216 	 */
217 	VM_OBJECT_WLOCK(obj);
218 	m = vm_page_lookup(obj, idx);
219 	if (uio->uio_rw == UIO_READ && m == NULL &&
220 	    !vm_pager_has_page(obj, idx, NULL, NULL)) {
221 		VM_OBJECT_WUNLOCK(obj);
222 		return (uiomove(__DECONST(void *, zero_region), tlen, uio));
223 	}
224 
225 	/*
226 	 * Although the tmpfs vnode lock is held here, it is
227 	 * nonetheless safe to sleep waiting for a free page.  The
228 	 * pageout daemon does not need to acquire the tmpfs vnode
229 	 * lock to page out tobj's pages because tobj is a OBJT_SWAP
230 	 * type object.
231 	 */
232 	rv = vm_page_grab_valid(&m, obj, idx,
233 	    VM_ALLOC_NORMAL | VM_ALLOC_SBUSY | VM_ALLOC_IGN_SBUSY);
234 	if (rv != VM_PAGER_OK) {
235 		VM_OBJECT_WUNLOCK(obj);
236 		if (bootverbose) {
237 			printf("uiomove_object: vm_obj %p idx %jd "
238 			    "pager error %d\n", obj, idx, rv);
239 		}
240 		return (rv == VM_PAGER_AGAIN ? ENOSPC : EIO);
241 	}
242 	VM_OBJECT_WUNLOCK(obj);
243 
244 found:
245 	error = uiomove_fromphys(&m, offset, tlen, uio);
246 	if (uio->uio_rw == UIO_WRITE && error == 0)
247 		vm_page_set_dirty(m);
248 	vm_page_activate(m);
249 	vm_page_sunbusy(m);
250 
251 	return (error);
252 }
253 
254 int
uiomove_object(vm_object_t obj,off_t obj_size,struct uio * uio)255 uiomove_object(vm_object_t obj, off_t obj_size, struct uio *uio)
256 {
257 	ssize_t resid;
258 	size_t len;
259 	int error;
260 
261 	error = 0;
262 	while ((resid = uio->uio_resid) > 0) {
263 		if (obj_size <= uio->uio_offset)
264 			break;
265 		len = MIN(obj_size - uio->uio_offset, resid);
266 		if (len == 0)
267 			break;
268 		error = uiomove_object_page(obj, len, uio);
269 		if (error != 0 || resid == uio->uio_resid)
270 			break;
271 	}
272 	return (error);
273 }
274 
275 static u_long count_largepages[MAXPAGESIZES];
276 
277 static int
shm_largepage_phys_populate(vm_object_t object,vm_pindex_t pidx,int fault_type,vm_prot_t max_prot,vm_pindex_t * first,vm_pindex_t * last)278 shm_largepage_phys_populate(vm_object_t object, vm_pindex_t pidx,
279     int fault_type, vm_prot_t max_prot, vm_pindex_t *first, vm_pindex_t *last)
280 {
281 	vm_page_t m __diagused;
282 	int psind;
283 
284 	psind = object->un_pager.phys.data_val;
285 	if (psind == 0 || pidx >= object->size)
286 		return (VM_PAGER_FAIL);
287 	*first = rounddown2(pidx, pagesizes[psind] / PAGE_SIZE);
288 
289 	/*
290 	 * We only busy the first page in the superpage run.  It is
291 	 * useless to busy whole run since we only remove full
292 	 * superpage, and it takes too long to busy e.g. 512 * 512 ==
293 	 * 262144 pages constituing 1G amd64 superage.
294 	 */
295 	m = vm_page_grab(object, *first, VM_ALLOC_NORMAL | VM_ALLOC_NOCREAT);
296 	MPASS(m != NULL);
297 
298 	*last = *first + atop(pagesizes[psind]) - 1;
299 	return (VM_PAGER_OK);
300 }
301 
302 static boolean_t
shm_largepage_phys_haspage(vm_object_t object,vm_pindex_t pindex,int * before,int * after)303 shm_largepage_phys_haspage(vm_object_t object, vm_pindex_t pindex,
304     int *before, int *after)
305 {
306 	int psind;
307 
308 	psind = object->un_pager.phys.data_val;
309 	if (psind == 0 || pindex >= object->size)
310 		return (FALSE);
311 	if (before != NULL) {
312 		*before = pindex - rounddown2(pindex, pagesizes[psind] /
313 		    PAGE_SIZE);
314 	}
315 	if (after != NULL) {
316 		*after = roundup2(pindex, pagesizes[psind] / PAGE_SIZE) -
317 		    pindex;
318 	}
319 	return (TRUE);
320 }
321 
322 static void
shm_largepage_phys_ctor(vm_object_t object,vm_prot_t prot,vm_ooffset_t foff,struct ucred * cred)323 shm_largepage_phys_ctor(vm_object_t object, vm_prot_t prot,
324     vm_ooffset_t foff, struct ucred *cred)
325 {
326 }
327 
328 static void
shm_largepage_phys_dtor(vm_object_t object)329 shm_largepage_phys_dtor(vm_object_t object)
330 {
331 	int psind;
332 
333 	psind = object->un_pager.phys.data_val;
334 	if (psind != 0) {
335 		atomic_subtract_long(&count_largepages[psind],
336 		    object->size / (pagesizes[psind] / PAGE_SIZE));
337 		vm_wire_sub(object->size);
338 	} else {
339 		KASSERT(object->size == 0,
340 		    ("largepage phys obj %p not initialized bit size %#jx > 0",
341 		    object, (uintmax_t)object->size));
342 	}
343 }
344 
345 static const struct phys_pager_ops shm_largepage_phys_ops = {
346 	.phys_pg_populate =	shm_largepage_phys_populate,
347 	.phys_pg_haspage =	shm_largepage_phys_haspage,
348 	.phys_pg_ctor =		shm_largepage_phys_ctor,
349 	.phys_pg_dtor =		shm_largepage_phys_dtor,
350 };
351 
352 bool
shm_largepage(struct shmfd * shmfd)353 shm_largepage(struct shmfd *shmfd)
354 {
355 	return (shmfd->shm_object->type == OBJT_PHYS);
356 }
357 
358 static void
shm_pager_freespace(vm_object_t obj,vm_pindex_t start,vm_size_t size)359 shm_pager_freespace(vm_object_t obj, vm_pindex_t start, vm_size_t size)
360 {
361 	struct shmfd *shm;
362 	vm_size_t c;
363 
364 	swap_pager_freespace(obj, start, size, &c);
365 	if (c == 0)
366 		return;
367 
368 	shm = obj->un_pager.swp.swp_priv;
369 	if (shm == NULL)
370 		return;
371 	KASSERT(shm->shm_pages >= c,
372 	    ("shm %p pages %jd free %jd", shm,
373 	    (uintmax_t)shm->shm_pages, (uintmax_t)c));
374 	shm->shm_pages -= c;
375 }
376 
377 static void
shm_page_inserted(vm_object_t obj,vm_page_t m)378 shm_page_inserted(vm_object_t obj, vm_page_t m)
379 {
380 	struct shmfd *shm;
381 
382 	shm = obj->un_pager.swp.swp_priv;
383 	if (shm == NULL)
384 		return;
385 	if (!vm_pager_has_page(obj, m->pindex, NULL, NULL))
386 		shm->shm_pages += 1;
387 }
388 
389 static void
shm_page_removed(vm_object_t obj,vm_page_t m)390 shm_page_removed(vm_object_t obj, vm_page_t m)
391 {
392 	struct shmfd *shm;
393 
394 	shm = obj->un_pager.swp.swp_priv;
395 	if (shm == NULL)
396 		return;
397 	if (!vm_pager_has_page(obj, m->pindex, NULL, NULL)) {
398 		KASSERT(shm->shm_pages >= 1,
399 		    ("shm %p pages %jd free 1", shm,
400 		    (uintmax_t)shm->shm_pages));
401 		shm->shm_pages -= 1;
402 	}
403 }
404 
405 static struct pagerops shm_swap_pager_ops = {
406 	.pgo_kvme_type = KVME_TYPE_SWAP,
407 	.pgo_freespace = shm_pager_freespace,
408 	.pgo_page_inserted = shm_page_inserted,
409 	.pgo_page_removed = shm_page_removed,
410 };
411 static int shmfd_pager_type = -1;
412 
413 static int
shm_seek(struct file * fp,off_t offset,int whence,struct thread * td)414 shm_seek(struct file *fp, off_t offset, int whence, struct thread *td)
415 {
416 	struct shmfd *shmfd;
417 	off_t foffset;
418 	int error;
419 
420 	shmfd = fp->f_data;
421 	foffset = foffset_lock(fp, 0);
422 	error = 0;
423 	switch (whence) {
424 	case L_INCR:
425 		if (foffset < 0 ||
426 		    (offset > 0 && foffset > OFF_MAX - offset)) {
427 			error = EOVERFLOW;
428 			break;
429 		}
430 		offset += foffset;
431 		break;
432 	case L_XTND:
433 		if (offset > 0 && shmfd->shm_size > OFF_MAX - offset) {
434 			error = EOVERFLOW;
435 			break;
436 		}
437 		offset += shmfd->shm_size;
438 		break;
439 	case L_SET:
440 		break;
441 	default:
442 		error = EINVAL;
443 	}
444 	if (error == 0) {
445 		if (offset < 0 || offset > shmfd->shm_size)
446 			error = EINVAL;
447 		else
448 			td->td_uretoff.tdu_off = offset;
449 	}
450 	foffset_unlock(fp, offset, error != 0 ? FOF_NOUPDATE : 0);
451 	return (error);
452 }
453 
454 static int
shm_read(struct file * fp,struct uio * uio,struct ucred * active_cred,int flags,struct thread * td)455 shm_read(struct file *fp, struct uio *uio, struct ucred *active_cred,
456     int flags, struct thread *td)
457 {
458 	struct shmfd *shmfd;
459 	void *rl_cookie;
460 	int error;
461 
462 	shmfd = fp->f_data;
463 #ifdef MAC
464 	error = mac_posixshm_check_read(active_cred, fp->f_cred, shmfd);
465 	if (error)
466 		return (error);
467 #endif
468 	foffset_lock_uio(fp, uio, flags);
469 	rl_cookie = shm_rangelock_rlock(shmfd, uio->uio_offset,
470 	    uio->uio_offset + uio->uio_resid);
471 	error = uiomove_object(shmfd->shm_object, shmfd->shm_size, uio);
472 	shm_rangelock_unlock(shmfd, rl_cookie);
473 	foffset_unlock_uio(fp, uio, flags);
474 	return (error);
475 }
476 
477 static int
shm_write(struct file * fp,struct uio * uio,struct ucred * active_cred,int flags,struct thread * td)478 shm_write(struct file *fp, struct uio *uio, struct ucred *active_cred,
479     int flags, struct thread *td)
480 {
481 	struct shmfd *shmfd;
482 	void *rl_cookie;
483 	int error;
484 	off_t size;
485 
486 	shmfd = fp->f_data;
487 #ifdef MAC
488 	error = mac_posixshm_check_write(active_cred, fp->f_cred, shmfd);
489 	if (error)
490 		return (error);
491 #endif
492 	if (shm_largepage(shmfd) && shmfd->shm_lp_psind == 0)
493 		return (EINVAL);
494 	foffset_lock_uio(fp, uio, flags);
495 	if (uio->uio_resid > OFF_MAX - uio->uio_offset) {
496 		/*
497 		 * Overflow is only an error if we're supposed to expand on
498 		 * write.  Otherwise, we'll just truncate the write to the
499 		 * size of the file, which can only grow up to OFF_MAX.
500 		 */
501 		if ((shmfd->shm_flags & SHM_GROW_ON_WRITE) != 0) {
502 			foffset_unlock_uio(fp, uio, flags);
503 			return (EFBIG);
504 		}
505 
506 		size = shmfd->shm_size;
507 	} else {
508 		size = uio->uio_offset + uio->uio_resid;
509 	}
510 	if ((flags & FOF_OFFSET) == 0)
511 		rl_cookie = shm_rangelock_wlock(shmfd, 0, OFF_MAX);
512 	else
513 		rl_cookie = shm_rangelock_wlock(shmfd, uio->uio_offset, size);
514 	if ((shmfd->shm_seals & F_SEAL_WRITE) != 0) {
515 		error = EPERM;
516 	} else {
517 		error = 0;
518 		if ((shmfd->shm_flags & SHM_GROW_ON_WRITE) != 0 &&
519 		    size > shmfd->shm_size) {
520 			error = shm_dotruncate_cookie(shmfd, size, rl_cookie);
521 		}
522 		if (error == 0)
523 			error = uiomove_object(shmfd->shm_object,
524 			    shmfd->shm_size, uio);
525 	}
526 	shm_rangelock_unlock(shmfd, rl_cookie);
527 	foffset_unlock_uio(fp, uio, flags);
528 	return (error);
529 }
530 
531 static int
shm_truncate(struct file * fp,off_t length,struct ucred * active_cred,struct thread * td)532 shm_truncate(struct file *fp, off_t length, struct ucred *active_cred,
533     struct thread *td)
534 {
535 	struct shmfd *shmfd;
536 #ifdef MAC
537 	int error;
538 #endif
539 
540 	shmfd = fp->f_data;
541 #ifdef MAC
542 	error = mac_posixshm_check_truncate(active_cred, fp->f_cred, shmfd);
543 	if (error)
544 		return (error);
545 #endif
546 	return (shm_dotruncate(shmfd, length));
547 }
548 
549 int
shm_ioctl(struct file * fp,u_long com,void * data,struct ucred * active_cred,struct thread * td)550 shm_ioctl(struct file *fp, u_long com, void *data, struct ucred *active_cred,
551     struct thread *td)
552 {
553 	struct shmfd *shmfd;
554 	struct shm_largepage_conf *conf;
555 	void *rl_cookie;
556 
557 	shmfd = fp->f_data;
558 	switch (com) {
559 	case FIONBIO:
560 	case FIOASYNC:
561 		/*
562 		 * Allow fcntl(fd, F_SETFL, O_NONBLOCK) to work,
563 		 * just like it would on an unlinked regular file
564 		 */
565 		return (0);
566 	case FIOSSHMLPGCNF:
567 		if (!shm_largepage(shmfd))
568 			return (ENOTTY);
569 		conf = data;
570 		if (shmfd->shm_lp_psind != 0 &&
571 		    conf->psind != shmfd->shm_lp_psind)
572 			return (EINVAL);
573 		if (conf->psind <= 0 || conf->psind >= MAXPAGESIZES ||
574 		    pagesizes[conf->psind] == 0)
575 			return (EINVAL);
576 		if (conf->alloc_policy != SHM_LARGEPAGE_ALLOC_DEFAULT &&
577 		    conf->alloc_policy != SHM_LARGEPAGE_ALLOC_NOWAIT &&
578 		    conf->alloc_policy != SHM_LARGEPAGE_ALLOC_HARD)
579 			return (EINVAL);
580 
581 		rl_cookie = shm_rangelock_wlock(shmfd, 0, OFF_MAX);
582 		shmfd->shm_lp_psind = conf->psind;
583 		shmfd->shm_lp_alloc_policy = conf->alloc_policy;
584 		shmfd->shm_object->un_pager.phys.data_val = conf->psind;
585 		shm_rangelock_unlock(shmfd, rl_cookie);
586 		return (0);
587 	case FIOGSHMLPGCNF:
588 		if (!shm_largepage(shmfd))
589 			return (ENOTTY);
590 		conf = data;
591 		rl_cookie = shm_rangelock_rlock(shmfd, 0, OFF_MAX);
592 		conf->psind = shmfd->shm_lp_psind;
593 		conf->alloc_policy = shmfd->shm_lp_alloc_policy;
594 		shm_rangelock_unlock(shmfd, rl_cookie);
595 		return (0);
596 	default:
597 		return (ENOTTY);
598 	}
599 }
600 
601 static int
shm_stat(struct file * fp,struct stat * sb,struct ucred * active_cred)602 shm_stat(struct file *fp, struct stat *sb, struct ucred *active_cred)
603 {
604 	struct shmfd *shmfd;
605 #ifdef MAC
606 	int error;
607 #endif
608 
609 	shmfd = fp->f_data;
610 
611 #ifdef MAC
612 	error = mac_posixshm_check_stat(active_cred, fp->f_cred, shmfd);
613 	if (error)
614 		return (error);
615 #endif
616 
617 	/*
618 	 * Attempt to return sanish values for fstat() on a memory file
619 	 * descriptor.
620 	 */
621 	bzero(sb, sizeof(*sb));
622 	sb->st_blksize = PAGE_SIZE;
623 	sb->st_size = shmfd->shm_size;
624 	mtx_lock(&shm_timestamp_lock);
625 	sb->st_atim = shmfd->shm_atime;
626 	sb->st_ctim = shmfd->shm_ctime;
627 	sb->st_mtim = shmfd->shm_mtime;
628 	sb->st_birthtim = shmfd->shm_birthtime;
629 	sb->st_mode = S_IFREG | shmfd->shm_mode;		/* XXX */
630 	sb->st_uid = shmfd->shm_uid;
631 	sb->st_gid = shmfd->shm_gid;
632 	mtx_unlock(&shm_timestamp_lock);
633 	sb->st_dev = shm_dev_ino;
634 	sb->st_ino = shmfd->shm_ino;
635 	sb->st_nlink = shmfd->shm_object->ref_count;
636 	if (shm_largepage(shmfd)) {
637 		sb->st_blocks = shmfd->shm_object->size /
638 		    (pagesizes[shmfd->shm_lp_psind] >> PAGE_SHIFT);
639 	} else {
640 		sb->st_blocks = shmfd->shm_pages;
641 	}
642 
643 	return (0);
644 }
645 
646 static int
shm_close(struct file * fp,struct thread * td)647 shm_close(struct file *fp, struct thread *td)
648 {
649 	struct shmfd *shmfd;
650 
651 	shmfd = fp->f_data;
652 	fp->f_data = NULL;
653 	shm_drop(shmfd);
654 
655 	return (0);
656 }
657 
658 static int
shm_copyin_path(struct thread * td,const char * userpath_in,char ** path_out)659 shm_copyin_path(struct thread *td, const char *userpath_in, char **path_out) {
660 	int error;
661 	char *path;
662 	const char *pr_path;
663 	size_t pr_pathlen;
664 
665 	path = malloc(MAXPATHLEN, M_SHMFD, M_WAITOK);
666 	pr_path = td->td_ucred->cr_prison->pr_path;
667 
668 	/* Construct a full pathname for jailed callers. */
669 	pr_pathlen = strcmp(pr_path, "/") ==
670 	    0 ? 0 : strlcpy(path, pr_path, MAXPATHLEN);
671 	error = copyinstr(userpath_in, path + pr_pathlen,
672 	    MAXPATHLEN - pr_pathlen, NULL);
673 	if (error != 0)
674 		goto out;
675 
676 #ifdef KTRACE
677 	if (KTRPOINT(curthread, KTR_NAMEI))
678 		ktrnamei(path);
679 #endif
680 
681 	/* Require paths to start with a '/' character. */
682 	if (path[pr_pathlen] != '/') {
683 		error = EINVAL;
684 		goto out;
685 	}
686 
687 	*path_out = path;
688 
689 out:
690 	if (error != 0)
691 		free(path, M_SHMFD);
692 
693 	return (error);
694 }
695 
696 static int
shm_partial_page_invalidate(vm_object_t object,vm_pindex_t idx,int base,int end)697 shm_partial_page_invalidate(vm_object_t object, vm_pindex_t idx, int base,
698     int end)
699 {
700 	int error;
701 
702 	error = vm_page_grab_zero_partial(object, idx, base, end);
703 	if (error == EIO)
704 		VM_OBJECT_WUNLOCK(object);
705 	return (error);
706 }
707 
708 static int
shm_dotruncate_locked(struct shmfd * shmfd,off_t length,void * rl_cookie)709 shm_dotruncate_locked(struct shmfd *shmfd, off_t length, void *rl_cookie)
710 {
711 	vm_object_t object;
712 	vm_pindex_t nobjsize;
713 	vm_ooffset_t delta;
714 	int base, error;
715 
716 	KASSERT(length >= 0, ("shm_dotruncate: length < 0"));
717 	object = shmfd->shm_object;
718 	VM_OBJECT_ASSERT_WLOCKED(object);
719 	rangelock_cookie_assert(rl_cookie, RA_WLOCKED);
720 	if (length == shmfd->shm_size)
721 		return (0);
722 	nobjsize = OFF_TO_IDX(length + PAGE_MASK);
723 
724 	/* Are we shrinking?  If so, trim the end. */
725 	if (length < shmfd->shm_size) {
726 		if ((shmfd->shm_seals & F_SEAL_SHRINK) != 0)
727 			return (EPERM);
728 
729 		/*
730 		 * Disallow any requests to shrink the size if this
731 		 * object is mapped into the kernel.
732 		 */
733 		if (shmfd->shm_kmappings > 0)
734 			return (EBUSY);
735 
736 		/*
737 		 * Zero the truncated part of the last page.
738 		 */
739 		base = length & PAGE_MASK;
740 		if (base != 0) {
741 			error = shm_partial_page_invalidate(object,
742 			    OFF_TO_IDX(length), base, PAGE_SIZE);
743 			if (error)
744 				return (error);
745 		}
746 		delta = IDX_TO_OFF(object->size - nobjsize);
747 
748 		if (nobjsize < object->size)
749 			vm_object_page_remove(object, nobjsize, object->size,
750 			    0);
751 
752 		/* Free the swap accounted for shm */
753 		swap_release_by_cred(delta, object->cred);
754 		object->charge -= delta;
755 	} else {
756 		if ((shmfd->shm_seals & F_SEAL_GROW) != 0)
757 			return (EPERM);
758 
759 		/* Try to reserve additional swap space. */
760 		delta = IDX_TO_OFF(nobjsize - object->size);
761 		if (!swap_reserve_by_cred(delta, object->cred))
762 			return (ENOMEM);
763 		object->charge += delta;
764 	}
765 	shmfd->shm_size = length;
766 	mtx_lock(&shm_timestamp_lock);
767 	vfs_timestamp(&shmfd->shm_ctime);
768 	shmfd->shm_mtime = shmfd->shm_ctime;
769 	mtx_unlock(&shm_timestamp_lock);
770 	object->size = nobjsize;
771 	return (0);
772 }
773 
774 static int
shm_dotruncate_largepage(struct shmfd * shmfd,off_t length,void * rl_cookie)775 shm_dotruncate_largepage(struct shmfd *shmfd, off_t length, void *rl_cookie)
776 {
777 	vm_object_t object;
778 	vm_page_t m;
779 	vm_pindex_t newobjsz;
780 	vm_pindex_t oldobjsz __unused;
781 	int aflags, error, i, psind, try;
782 
783 	KASSERT(length >= 0, ("shm_dotruncate: length < 0"));
784 	object = shmfd->shm_object;
785 	VM_OBJECT_ASSERT_WLOCKED(object);
786 	rangelock_cookie_assert(rl_cookie, RA_WLOCKED);
787 
788 	oldobjsz = object->size;
789 	newobjsz = OFF_TO_IDX(length);
790 	if (length == shmfd->shm_size)
791 		return (0);
792 	psind = shmfd->shm_lp_psind;
793 	if (psind == 0 && length != 0)
794 		return (EINVAL);
795 	if ((length & (pagesizes[psind] - 1)) != 0)
796 		return (EINVAL);
797 
798 	if (length < shmfd->shm_size) {
799 		if ((shmfd->shm_seals & F_SEAL_SHRINK) != 0)
800 			return (EPERM);
801 		if (shmfd->shm_kmappings > 0)
802 			return (EBUSY);
803 		return (ENOTSUP);	/* Pages are unmanaged. */
804 #if 0
805 		vm_object_page_remove(object, newobjsz, oldobjsz, 0);
806 		object->size = newobjsz;
807 		shmfd->shm_size = length;
808 		return (0);
809 #endif
810 	}
811 
812 	if ((shmfd->shm_seals & F_SEAL_GROW) != 0)
813 		return (EPERM);
814 
815 	aflags = VM_ALLOC_NORMAL | VM_ALLOC_ZERO;
816 	if (shmfd->shm_lp_alloc_policy == SHM_LARGEPAGE_ALLOC_NOWAIT)
817 		aflags |= VM_ALLOC_WAITFAIL;
818 	try = 0;
819 
820 	/*
821 	 * Extend shmfd and object, keeping all already fully
822 	 * allocated large pages intact even on error, because dropped
823 	 * object lock might allowed mapping of them.
824 	 */
825 	while (object->size < newobjsz) {
826 		m = vm_page_alloc_contig(object, object->size, aflags,
827 		    pagesizes[psind] / PAGE_SIZE, 0, ~0,
828 		    pagesizes[psind], 0,
829 		    VM_MEMATTR_DEFAULT);
830 		if (m == NULL) {
831 			VM_OBJECT_WUNLOCK(object);
832 			if (shmfd->shm_lp_alloc_policy ==
833 			    SHM_LARGEPAGE_ALLOC_NOWAIT ||
834 			    (shmfd->shm_lp_alloc_policy ==
835 			    SHM_LARGEPAGE_ALLOC_DEFAULT &&
836 			    try >= largepage_reclaim_tries)) {
837 				VM_OBJECT_WLOCK(object);
838 				return (ENOMEM);
839 			}
840 			error = vm_page_reclaim_contig(aflags,
841 			    pagesizes[psind] / PAGE_SIZE, 0, ~0,
842 			    pagesizes[psind], 0);
843 			if (error == ENOMEM)
844 				error = vm_wait_intr(object);
845 			if (error != 0) {
846 				VM_OBJECT_WLOCK(object);
847 				return (error);
848 			}
849 			try++;
850 			VM_OBJECT_WLOCK(object);
851 			continue;
852 		}
853 		try = 0;
854 		for (i = 0; i < pagesizes[psind] / PAGE_SIZE; i++) {
855 			if ((m[i].flags & PG_ZERO) == 0)
856 				pmap_zero_page(&m[i]);
857 			vm_page_valid(&m[i]);
858 			vm_page_xunbusy(&m[i]);
859 		}
860 		object->size += OFF_TO_IDX(pagesizes[psind]);
861 		shmfd->shm_size += pagesizes[psind];
862 		atomic_add_long(&count_largepages[psind], 1);
863 		vm_wire_add(atop(pagesizes[psind]));
864 	}
865 	return (0);
866 }
867 
868 static int
shm_dotruncate_cookie(struct shmfd * shmfd,off_t length,void * rl_cookie)869 shm_dotruncate_cookie(struct shmfd *shmfd, off_t length, void *rl_cookie)
870 {
871 	int error;
872 
873 	VM_OBJECT_WLOCK(shmfd->shm_object);
874 	error = shm_largepage(shmfd) ? shm_dotruncate_largepage(shmfd,
875 	    length, rl_cookie) : shm_dotruncate_locked(shmfd, length,
876 	    rl_cookie);
877 	VM_OBJECT_WUNLOCK(shmfd->shm_object);
878 	return (error);
879 }
880 
881 int
shm_dotruncate(struct shmfd * shmfd,off_t length)882 shm_dotruncate(struct shmfd *shmfd, off_t length)
883 {
884 	void *rl_cookie;
885 	int error;
886 
887 	rl_cookie = shm_rangelock_wlock(shmfd, 0, OFF_MAX);
888 	error = shm_dotruncate_cookie(shmfd, length, rl_cookie);
889 	shm_rangelock_unlock(shmfd, rl_cookie);
890 	return (error);
891 }
892 
893 /*
894  * shmfd object management including creation and reference counting
895  * routines.
896  */
897 struct shmfd *
shm_alloc(struct ucred * ucred,mode_t mode,bool largepage)898 shm_alloc(struct ucred *ucred, mode_t mode, bool largepage)
899 {
900 	struct shmfd *shmfd;
901 	vm_object_t obj;
902 
903 	if (largepage) {
904 		obj = phys_pager_allocate(NULL, &shm_largepage_phys_ops,
905 		    NULL, 0, VM_PROT_DEFAULT, 0, ucred);
906 	} else {
907 		obj = vm_pager_allocate(shmfd_pager_type, NULL, 0,
908 		    VM_PROT_DEFAULT, 0, ucred);
909 	}
910 	if (obj == NULL) {
911 		/*
912 		 * swap reservation limits can cause object allocation
913 		 * to fail.
914 		 */
915 		return (NULL);
916 	}
917 
918 	shmfd = malloc(sizeof(*shmfd), M_SHMFD, M_WAITOK | M_ZERO);
919 	shmfd->shm_uid = ucred->cr_uid;
920 	shmfd->shm_gid = ucred->cr_gid;
921 	shmfd->shm_mode = mode;
922 	if (largepage) {
923 		obj->un_pager.phys.phys_priv = shmfd;
924 		shmfd->shm_lp_alloc_policy = SHM_LARGEPAGE_ALLOC_DEFAULT;
925 	} else {
926 		obj->un_pager.swp.swp_priv = shmfd;
927 	}
928 
929 	VM_OBJECT_WLOCK(obj);
930 	vm_object_set_flag(obj, OBJ_POSIXSHM);
931 	VM_OBJECT_WUNLOCK(obj);
932 	shmfd->shm_object = obj;
933 	vfs_timestamp(&shmfd->shm_birthtime);
934 	shmfd->shm_atime = shmfd->shm_mtime = shmfd->shm_ctime =
935 	    shmfd->shm_birthtime;
936 	shmfd->shm_ino = alloc_unr64(&shm_ino_unr);
937 	refcount_init(&shmfd->shm_refs, 1);
938 	mtx_init(&shmfd->shm_mtx, "shmrl", NULL, MTX_DEF);
939 	rangelock_init(&shmfd->shm_rl);
940 #ifdef MAC
941 	mac_posixshm_init(shmfd);
942 	mac_posixshm_create(ucred, shmfd);
943 #endif
944 
945 	return (shmfd);
946 }
947 
948 struct shmfd *
shm_hold(struct shmfd * shmfd)949 shm_hold(struct shmfd *shmfd)
950 {
951 
952 	refcount_acquire(&shmfd->shm_refs);
953 	return (shmfd);
954 }
955 
956 void
shm_drop(struct shmfd * shmfd)957 shm_drop(struct shmfd *shmfd)
958 {
959 	vm_object_t obj;
960 
961 	if (refcount_release(&shmfd->shm_refs)) {
962 #ifdef MAC
963 		mac_posixshm_destroy(shmfd);
964 #endif
965 		rangelock_destroy(&shmfd->shm_rl);
966 		mtx_destroy(&shmfd->shm_mtx);
967 		obj = shmfd->shm_object;
968 		VM_OBJECT_WLOCK(obj);
969 		if (shm_largepage(shmfd))
970 			obj->un_pager.phys.phys_priv = NULL;
971 		else
972 			obj->un_pager.swp.swp_priv = NULL;
973 		VM_OBJECT_WUNLOCK(obj);
974 		vm_object_deallocate(obj);
975 		free(shmfd, M_SHMFD);
976 	}
977 }
978 
979 /*
980  * Determine if the credentials have sufficient permissions for a
981  * specified combination of FREAD and FWRITE.
982  */
983 int
shm_access(struct shmfd * shmfd,struct ucred * ucred,int flags)984 shm_access(struct shmfd *shmfd, struct ucred *ucred, int flags)
985 {
986 	accmode_t accmode;
987 	int error;
988 
989 	accmode = 0;
990 	if (flags & FREAD)
991 		accmode |= VREAD;
992 	if (flags & FWRITE)
993 		accmode |= VWRITE;
994 	mtx_lock(&shm_timestamp_lock);
995 	error = vaccess(VREG, shmfd->shm_mode, shmfd->shm_uid, shmfd->shm_gid,
996 	    accmode, ucred);
997 	mtx_unlock(&shm_timestamp_lock);
998 	return (error);
999 }
1000 
1001 static void
shm_init(void * arg)1002 shm_init(void *arg)
1003 {
1004 	char name[32];
1005 	int i;
1006 
1007 	mtx_init(&shm_timestamp_lock, "shm timestamps", NULL, MTX_DEF);
1008 	sx_init(&shm_dict_lock, "shm dictionary");
1009 	shm_dictionary = hashinit(1024, M_SHMFD, &shm_hash);
1010 	new_unrhdr64(&shm_ino_unr, 1);
1011 	shm_dev_ino = devfs_alloc_cdp_inode();
1012 	KASSERT(shm_dev_ino > 0, ("shm dev inode not initialized"));
1013 	shmfd_pager_type = vm_pager_alloc_dyn_type(&shm_swap_pager_ops,
1014 	    OBJT_SWAP);
1015 	MPASS(shmfd_pager_type != -1);
1016 
1017 	for (i = 1; i < MAXPAGESIZES; i++) {
1018 		if (pagesizes[i] == 0)
1019 			break;
1020 #define	M	(1024 * 1024)
1021 #define	G	(1024 * M)
1022 		if (pagesizes[i] >= G)
1023 			snprintf(name, sizeof(name), "%luG", pagesizes[i] / G);
1024 		else if (pagesizes[i] >= M)
1025 			snprintf(name, sizeof(name), "%luM", pagesizes[i] / M);
1026 		else
1027 			snprintf(name, sizeof(name), "%lu", pagesizes[i]);
1028 #undef G
1029 #undef M
1030 		SYSCTL_ADD_ULONG(NULL, SYSCTL_STATIC_CHILDREN(_vm_largepages),
1031 		    OID_AUTO, name, CTLFLAG_RD, &count_largepages[i],
1032 		    "number of non-transient largepages allocated");
1033 	}
1034 }
1035 SYSINIT(shm_init, SI_SUB_SYSV_SHM, SI_ORDER_ANY, shm_init, NULL);
1036 
1037 /*
1038  * Remove all shared memory objects that belong to a prison.
1039  */
1040 void
shm_remove_prison(struct prison * pr)1041 shm_remove_prison(struct prison *pr)
1042 {
1043 	struct shm_mapping *shmm, *tshmm;
1044 	u_long i;
1045 
1046 	sx_xlock(&shm_dict_lock);
1047 	for (i = 0; i < shm_hash + 1; i++) {
1048 		LIST_FOREACH_SAFE(shmm, &shm_dictionary[i], sm_link, tshmm) {
1049 			if (shmm->sm_shmfd->shm_object->cred &&
1050 			    shmm->sm_shmfd->shm_object->cred->cr_prison == pr)
1051 				shm_doremove(shmm);
1052 		}
1053 	}
1054 	sx_xunlock(&shm_dict_lock);
1055 }
1056 
1057 /*
1058  * Dictionary management.  We maintain an in-kernel dictionary to map
1059  * paths to shmfd objects.  We use the FNV hash on the path to store
1060  * the mappings in a hash table.
1061  */
1062 static struct shmfd *
shm_lookup(char * path,Fnv32_t fnv)1063 shm_lookup(char *path, Fnv32_t fnv)
1064 {
1065 	struct shm_mapping *map;
1066 
1067 	LIST_FOREACH(map, SHM_HASH(fnv), sm_link) {
1068 		if (map->sm_fnv != fnv)
1069 			continue;
1070 		if (strcmp(map->sm_path, path) == 0)
1071 			return (map->sm_shmfd);
1072 	}
1073 
1074 	return (NULL);
1075 }
1076 
1077 static void
shm_insert(char * path,Fnv32_t fnv,struct shmfd * shmfd)1078 shm_insert(char *path, Fnv32_t fnv, struct shmfd *shmfd)
1079 {
1080 	struct shm_mapping *map;
1081 
1082 	map = malloc(sizeof(struct shm_mapping), M_SHMFD, M_WAITOK);
1083 	map->sm_path = path;
1084 	map->sm_fnv = fnv;
1085 	map->sm_shmfd = shm_hold(shmfd);
1086 	shmfd->shm_path = path;
1087 	LIST_INSERT_HEAD(SHM_HASH(fnv), map, sm_link);
1088 }
1089 
1090 static int
shm_remove(char * path,Fnv32_t fnv,struct ucred * ucred)1091 shm_remove(char *path, Fnv32_t fnv, struct ucred *ucred)
1092 {
1093 	struct shm_mapping *map;
1094 	int error;
1095 
1096 	LIST_FOREACH(map, SHM_HASH(fnv), sm_link) {
1097 		if (map->sm_fnv != fnv)
1098 			continue;
1099 		if (strcmp(map->sm_path, path) == 0) {
1100 #ifdef MAC
1101 			error = mac_posixshm_check_unlink(ucred, map->sm_shmfd);
1102 			if (error)
1103 				return (error);
1104 #endif
1105 			error = shm_access(map->sm_shmfd, ucred,
1106 			    FREAD | FWRITE);
1107 			if (error)
1108 				return (error);
1109 			shm_doremove(map);
1110 			return (0);
1111 		}
1112 	}
1113 
1114 	return (ENOENT);
1115 }
1116 
1117 static void
shm_doremove(struct shm_mapping * map)1118 shm_doremove(struct shm_mapping *map)
1119 {
1120 	map->sm_shmfd->shm_path = NULL;
1121 	LIST_REMOVE(map, sm_link);
1122 	shm_drop(map->sm_shmfd);
1123 	free(map->sm_path, M_SHMFD);
1124 	free(map, M_SHMFD);
1125 }
1126 
1127 int
kern_shm_open2(struct thread * td,const char * userpath,int flags,mode_t mode,int shmflags,struct filecaps * fcaps,const char * name __unused)1128 kern_shm_open2(struct thread *td, const char *userpath, int flags, mode_t mode,
1129     int shmflags, struct filecaps *fcaps, const char *name __unused)
1130 {
1131 	struct pwddesc *pdp;
1132 	struct shmfd *shmfd;
1133 	struct file *fp;
1134 	char *path;
1135 	void *rl_cookie;
1136 	Fnv32_t fnv;
1137 	mode_t cmode;
1138 	int error, fd, initial_seals;
1139 	bool largepage;
1140 
1141 	if ((shmflags & ~(SHM_ALLOW_SEALING | SHM_GROW_ON_WRITE |
1142 	    SHM_LARGEPAGE)) != 0)
1143 		return (EINVAL);
1144 
1145 	initial_seals = F_SEAL_SEAL;
1146 	if ((shmflags & SHM_ALLOW_SEALING) != 0)
1147 		initial_seals &= ~F_SEAL_SEAL;
1148 
1149 	AUDIT_ARG_FFLAGS(flags);
1150 	AUDIT_ARG_MODE(mode);
1151 
1152 	if ((flags & O_ACCMODE) != O_RDONLY && (flags & O_ACCMODE) != O_RDWR)
1153 		return (EINVAL);
1154 
1155 	if ((flags & ~(O_ACCMODE | O_CREAT | O_EXCL | O_TRUNC | O_CLOEXEC)) != 0)
1156 		return (EINVAL);
1157 
1158 	largepage = (shmflags & SHM_LARGEPAGE) != 0;
1159 	if (largepage && !PMAP_HAS_LARGEPAGES)
1160 		return (ENOTTY);
1161 
1162 	/*
1163 	 * Currently only F_SEAL_SEAL may be set when creating or opening shmfd.
1164 	 * If the decision is made later to allow additional seals, care must be
1165 	 * taken below to ensure that the seals are properly set if the shmfd
1166 	 * already existed -- this currently assumes that only F_SEAL_SEAL can
1167 	 * be set and doesn't take further precautions to ensure the validity of
1168 	 * the seals being added with respect to current mappings.
1169 	 */
1170 	if ((initial_seals & ~F_SEAL_SEAL) != 0)
1171 		return (EINVAL);
1172 
1173 	if (userpath != SHM_ANON) {
1174 		error = shm_copyin_path(td, userpath, &path);
1175 		if (error != 0)
1176 			return (error);
1177 
1178 #ifdef CAPABILITY_MODE
1179 		/*
1180 		 * shm_open(2) is only allowed for anonymous objects.
1181 		 */
1182 		if (CAP_TRACING(td))
1183 			ktrcapfail(CAPFAIL_NAMEI, path);
1184 		if (IN_CAPABILITY_MODE(td)) {
1185 			error = ECAPMODE;
1186 			goto outnofp;
1187 		}
1188 #endif
1189 
1190 		AUDIT_ARG_UPATH1_CANON(path);
1191 	} else {
1192 		path = NULL;
1193 	}
1194 
1195 	pdp = td->td_proc->p_pd;
1196 	cmode = (mode & ~pdp->pd_cmask) & ACCESSPERMS;
1197 
1198 	/*
1199 	 * shm_open(2) created shm should always have O_CLOEXEC set, as mandated
1200 	 * by POSIX.  We allow it to be unset here so that an in-kernel
1201 	 * interface may be written as a thin layer around shm, optionally not
1202 	 * setting CLOEXEC.  For shm_open(2), O_CLOEXEC is set unconditionally
1203 	 * in sys_shm_open() to keep this implementation compliant.
1204 	 */
1205 	error = falloc_caps(td, &fp, &fd, flags & O_CLOEXEC, fcaps);
1206 	if (error != 0)
1207 		goto outnofp;
1208 
1209 	/* A SHM_ANON path pointer creates an anonymous object. */
1210 	if (userpath == SHM_ANON) {
1211 		/* A read-only anonymous object is pointless. */
1212 		if ((flags & O_ACCMODE) == O_RDONLY) {
1213 			error = EINVAL;
1214 			goto out;
1215 		}
1216 		shmfd = shm_alloc(td->td_ucred, cmode, largepage);
1217 		if (shmfd == NULL) {
1218 			error = ENOMEM;
1219 			goto out;
1220 		}
1221 		shmfd->shm_seals = initial_seals;
1222 		shmfd->shm_flags = shmflags;
1223 	} else {
1224 		fnv = fnv_32_str(path, FNV1_32_INIT);
1225 		sx_xlock(&shm_dict_lock);
1226 		shmfd = shm_lookup(path, fnv);
1227 		if (shmfd == NULL) {
1228 			/* Object does not yet exist, create it if requested. */
1229 			if (flags & O_CREAT) {
1230 #ifdef MAC
1231 				error = mac_posixshm_check_create(td->td_ucred,
1232 				    path);
1233 				if (error == 0) {
1234 #endif
1235 					shmfd = shm_alloc(td->td_ucred, cmode,
1236 					    largepage);
1237 					if (shmfd == NULL) {
1238 						error = ENOMEM;
1239 					} else {
1240 						shmfd->shm_seals =
1241 						    initial_seals;
1242 						shmfd->shm_flags = shmflags;
1243 						shm_insert(path, fnv, shmfd);
1244 						path = NULL;
1245 					}
1246 #ifdef MAC
1247 				}
1248 #endif
1249 			} else {
1250 				error = ENOENT;
1251 			}
1252 		} else {
1253 			/*
1254 			 * Object already exists, obtain a new reference if
1255 			 * requested and permitted.
1256 			 */
1257 			rl_cookie = shm_rangelock_wlock(shmfd, 0, OFF_MAX);
1258 
1259 			/*
1260 			 * kern_shm_open() likely shouldn't ever error out on
1261 			 * trying to set a seal that already exists, unlike
1262 			 * F_ADD_SEALS.  This would break terribly as
1263 			 * shm_open(2) actually sets F_SEAL_SEAL to maintain
1264 			 * historical behavior where the underlying file could
1265 			 * not be sealed.
1266 			 */
1267 			initial_seals &= ~shmfd->shm_seals;
1268 
1269 			/*
1270 			 * initial_seals can't set additional seals if we've
1271 			 * already been set F_SEAL_SEAL.  If F_SEAL_SEAL is set,
1272 			 * then we've already removed that one from
1273 			 * initial_seals.  This is currently redundant as we
1274 			 * only allow setting F_SEAL_SEAL at creation time, but
1275 			 * it's cheap to check and decreases the effort required
1276 			 * to allow additional seals.
1277 			 */
1278 			if ((shmfd->shm_seals & F_SEAL_SEAL) != 0 &&
1279 			    initial_seals != 0)
1280 				error = EPERM;
1281 			else if ((flags & (O_CREAT | O_EXCL)) ==
1282 			    (O_CREAT | O_EXCL))
1283 				error = EEXIST;
1284 			else if (shmflags != 0 && shmflags != shmfd->shm_flags)
1285 				error = EINVAL;
1286 			else {
1287 #ifdef MAC
1288 				error = mac_posixshm_check_open(td->td_ucred,
1289 				    shmfd, FFLAGS(flags & O_ACCMODE));
1290 				if (error == 0)
1291 #endif
1292 				error = shm_access(shmfd, td->td_ucred,
1293 				    FFLAGS(flags & O_ACCMODE));
1294 			}
1295 
1296 			/*
1297 			 * Truncate the file back to zero length if
1298 			 * O_TRUNC was specified and the object was
1299 			 * opened with read/write.
1300 			 */
1301 			if (error == 0 &&
1302 			    (flags & (O_ACCMODE | O_TRUNC)) ==
1303 			    (O_RDWR | O_TRUNC)) {
1304 				VM_OBJECT_WLOCK(shmfd->shm_object);
1305 #ifdef MAC
1306 				error = mac_posixshm_check_truncate(
1307 					td->td_ucred, fp->f_cred, shmfd);
1308 				if (error == 0)
1309 #endif
1310 					error = shm_dotruncate_locked(shmfd, 0,
1311 					    rl_cookie);
1312 				VM_OBJECT_WUNLOCK(shmfd->shm_object);
1313 			}
1314 			if (error == 0) {
1315 				/*
1316 				 * Currently we only allow F_SEAL_SEAL to be
1317 				 * set initially.  As noted above, this would
1318 				 * need to be reworked should that change.
1319 				 */
1320 				shmfd->shm_seals |= initial_seals;
1321 				shm_hold(shmfd);
1322 			}
1323 			shm_rangelock_unlock(shmfd, rl_cookie);
1324 		}
1325 		sx_xunlock(&shm_dict_lock);
1326 
1327 		if (error != 0)
1328 			goto out;
1329 	}
1330 
1331 	finit(fp, FFLAGS(flags & O_ACCMODE), DTYPE_SHM, shmfd, &shm_ops);
1332 
1333 	td->td_retval[0] = fd;
1334 	fdrop(fp, td);
1335 	free(path, M_SHMFD);
1336 
1337 	return (0);
1338 
1339 out:
1340 	fdclose(td, fp, fd);
1341 	fdrop(fp, td);
1342 outnofp:
1343 	free(path, M_SHMFD);
1344 
1345 	return (error);
1346 }
1347 
1348 /* System calls. */
1349 #ifdef COMPAT_FREEBSD12
1350 int
freebsd12_shm_open(struct thread * td,struct freebsd12_shm_open_args * uap)1351 freebsd12_shm_open(struct thread *td, struct freebsd12_shm_open_args *uap)
1352 {
1353 
1354 	return (kern_shm_open(td, uap->path, uap->flags | O_CLOEXEC,
1355 	    uap->mode, NULL));
1356 }
1357 #endif
1358 
1359 int
sys_shm_unlink(struct thread * td,struct shm_unlink_args * uap)1360 sys_shm_unlink(struct thread *td, struct shm_unlink_args *uap)
1361 {
1362 	char *path;
1363 	Fnv32_t fnv;
1364 	int error;
1365 
1366 	error = shm_copyin_path(td, uap->path, &path);
1367 	if (error != 0)
1368 		return (error);
1369 
1370 	AUDIT_ARG_UPATH1_CANON(path);
1371 	fnv = fnv_32_str(path, FNV1_32_INIT);
1372 	sx_xlock(&shm_dict_lock);
1373 	error = shm_remove(path, fnv, td->td_ucred);
1374 	sx_xunlock(&shm_dict_lock);
1375 	free(path, M_SHMFD);
1376 
1377 	return (error);
1378 }
1379 
1380 int
sys_shm_rename(struct thread * td,struct shm_rename_args * uap)1381 sys_shm_rename(struct thread *td, struct shm_rename_args *uap)
1382 {
1383 	char *path_from = NULL, *path_to = NULL;
1384 	Fnv32_t fnv_from, fnv_to;
1385 	struct shmfd *fd_from;
1386 	struct shmfd *fd_to;
1387 	int error;
1388 	int flags;
1389 
1390 	flags = uap->flags;
1391 	AUDIT_ARG_FFLAGS(flags);
1392 
1393 	/*
1394 	 * Make sure the user passed only valid flags.
1395 	 * If you add a new flag, please add a new term here.
1396 	 */
1397 	if ((flags & ~(
1398 	    SHM_RENAME_NOREPLACE |
1399 	    SHM_RENAME_EXCHANGE
1400 	    )) != 0) {
1401 		error = EINVAL;
1402 		goto out;
1403 	}
1404 
1405 	/*
1406 	 * EXCHANGE and NOREPLACE don't quite make sense together. Let's
1407 	 * force the user to choose one or the other.
1408 	 */
1409 	if ((flags & SHM_RENAME_NOREPLACE) != 0 &&
1410 	    (flags & SHM_RENAME_EXCHANGE) != 0) {
1411 		error = EINVAL;
1412 		goto out;
1413 	}
1414 
1415 	/* Renaming to or from anonymous makes no sense */
1416 	if (uap->path_from == SHM_ANON || uap->path_to == SHM_ANON) {
1417 		error = EINVAL;
1418 		goto out;
1419 	}
1420 
1421 	error = shm_copyin_path(td, uap->path_from, &path_from);
1422 	if (error != 0)
1423 		goto out;
1424 
1425 	error = shm_copyin_path(td, uap->path_to, &path_to);
1426 	if (error != 0)
1427 		goto out;
1428 
1429 	AUDIT_ARG_UPATH1_CANON(path_from);
1430 	AUDIT_ARG_UPATH2_CANON(path_to);
1431 
1432 	/* Rename with from/to equal is a no-op */
1433 	if (strcmp(path_from, path_to) == 0)
1434 		goto out;
1435 
1436 	fnv_from = fnv_32_str(path_from, FNV1_32_INIT);
1437 	fnv_to = fnv_32_str(path_to, FNV1_32_INIT);
1438 
1439 	sx_xlock(&shm_dict_lock);
1440 
1441 	fd_from = shm_lookup(path_from, fnv_from);
1442 	if (fd_from == NULL) {
1443 		error = ENOENT;
1444 		goto out_locked;
1445 	}
1446 
1447 	fd_to = shm_lookup(path_to, fnv_to);
1448 	if ((flags & SHM_RENAME_NOREPLACE) != 0 && fd_to != NULL) {
1449 		error = EEXIST;
1450 		goto out_locked;
1451 	}
1452 
1453 	/*
1454 	 * Unconditionally prevents shm_remove from invalidating the 'from'
1455 	 * shm's state.
1456 	 */
1457 	shm_hold(fd_from);
1458 	error = shm_remove(path_from, fnv_from, td->td_ucred);
1459 
1460 	/*
1461 	 * One of my assumptions failed if ENOENT (e.g. locking didn't
1462 	 * protect us)
1463 	 */
1464 	KASSERT(error != ENOENT, ("Our shm disappeared during shm_rename: %s",
1465 	    path_from));
1466 	if (error != 0) {
1467 		shm_drop(fd_from);
1468 		goto out_locked;
1469 	}
1470 
1471 	/*
1472 	 * If we are exchanging, we need to ensure the shm_remove below
1473 	 * doesn't invalidate the dest shm's state.
1474 	 */
1475 	if ((flags & SHM_RENAME_EXCHANGE) != 0 && fd_to != NULL)
1476 		shm_hold(fd_to);
1477 
1478 	/*
1479 	 * NOTE: if path_to is not already in the hash, c'est la vie;
1480 	 * it simply means we have nothing already at path_to to unlink.
1481 	 * That is the ENOENT case.
1482 	 *
1483 	 * If we somehow don't have access to unlink this guy, but
1484 	 * did for the shm at path_from, then relink the shm to path_from
1485 	 * and abort with EACCES.
1486 	 *
1487 	 * All other errors: that is weird; let's relink and abort the
1488 	 * operation.
1489 	 */
1490 	error = shm_remove(path_to, fnv_to, td->td_ucred);
1491 	if (error != 0 && error != ENOENT) {
1492 		shm_insert(path_from, fnv_from, fd_from);
1493 		shm_drop(fd_from);
1494 		/* Don't free path_from now, since the hash references it */
1495 		path_from = NULL;
1496 		goto out_locked;
1497 	}
1498 
1499 	error = 0;
1500 
1501 	shm_insert(path_to, fnv_to, fd_from);
1502 
1503 	/* Don't free path_to now, since the hash references it */
1504 	path_to = NULL;
1505 
1506 	/* We kept a ref when we removed, and incremented again in insert */
1507 	shm_drop(fd_from);
1508 	KASSERT(fd_from->shm_refs > 0, ("Expected >0 refs; got: %d\n",
1509 	    fd_from->shm_refs));
1510 
1511 	if ((flags & SHM_RENAME_EXCHANGE) != 0 && fd_to != NULL) {
1512 		shm_insert(path_from, fnv_from, fd_to);
1513 		path_from = NULL;
1514 		shm_drop(fd_to);
1515 		KASSERT(fd_to->shm_refs > 0, ("Expected >0 refs; got: %d\n",
1516 		    fd_to->shm_refs));
1517 	}
1518 
1519 out_locked:
1520 	sx_xunlock(&shm_dict_lock);
1521 
1522 out:
1523 	free(path_from, M_SHMFD);
1524 	free(path_to, M_SHMFD);
1525 	return (error);
1526 }
1527 
1528 static int
shm_mmap_large(struct shmfd * shmfd,vm_map_t map,vm_offset_t * addr,vm_size_t size,vm_prot_t prot,vm_prot_t max_prot,int flags,vm_ooffset_t foff,struct thread * td)1529 shm_mmap_large(struct shmfd *shmfd, vm_map_t map, vm_offset_t *addr,
1530     vm_size_t size, vm_prot_t prot, vm_prot_t max_prot, int flags,
1531     vm_ooffset_t foff, struct thread *td)
1532 {
1533 	struct vmspace *vms;
1534 	vm_map_entry_t next_entry, prev_entry;
1535 	vm_offset_t align, mask, maxaddr;
1536 	int docow, error, rv, try;
1537 	bool curmap;
1538 
1539 	if (shmfd->shm_lp_psind == 0)
1540 		return (EINVAL);
1541 
1542 	/* MAP_PRIVATE is disabled */
1543 	if ((flags & ~(MAP_SHARED | MAP_FIXED | MAP_EXCL |
1544 	    MAP_NOCORE | MAP_32BIT | MAP_ALIGNMENT_MASK)) != 0)
1545 		return (EINVAL);
1546 
1547 	vms = td->td_proc->p_vmspace;
1548 	curmap = map == &vms->vm_map;
1549 	if (curmap) {
1550 		error = kern_mmap_racct_check(td, map, size);
1551 		if (error != 0)
1552 			return (error);
1553 	}
1554 
1555 	docow = shmfd->shm_lp_psind << MAP_SPLIT_BOUNDARY_SHIFT;
1556 	docow |= MAP_INHERIT_SHARE;
1557 	if ((flags & MAP_NOCORE) != 0)
1558 		docow |= MAP_DISABLE_COREDUMP;
1559 
1560 	mask = pagesizes[shmfd->shm_lp_psind] - 1;
1561 	if ((foff & mask) != 0)
1562 		return (EINVAL);
1563 	maxaddr = vm_map_max(map);
1564 	if ((flags & MAP_32BIT) != 0 && maxaddr > MAP_32BIT_MAX_ADDR)
1565 		maxaddr = MAP_32BIT_MAX_ADDR;
1566 	if (size == 0 || (size & mask) != 0 ||
1567 	    (*addr != 0 && ((*addr & mask) != 0 ||
1568 	    *addr + size < *addr || *addr + size > maxaddr)))
1569 		return (EINVAL);
1570 
1571 	align = flags & MAP_ALIGNMENT_MASK;
1572 	if (align == 0) {
1573 		align = pagesizes[shmfd->shm_lp_psind];
1574 	} else if (align == MAP_ALIGNED_SUPER) {
1575 		/*
1576 		 * MAP_ALIGNED_SUPER is only supported on superpage sizes,
1577 		 * i.e., [1, VM_NRESERVLEVEL].  shmfd->shm_lp_psind < 1 is
1578 		 * handled above.
1579 		 */
1580 		if (
1581 #if VM_NRESERVLEVEL > 0
1582 		    shmfd->shm_lp_psind > VM_NRESERVLEVEL
1583 #else
1584 		    shmfd->shm_lp_psind > 1
1585 #endif
1586 		    )
1587 			return (EINVAL);
1588 		align = pagesizes[shmfd->shm_lp_psind];
1589 	} else {
1590 		align >>= MAP_ALIGNMENT_SHIFT;
1591 		align = 1ULL << align;
1592 		/* Also handles overflow. */
1593 		if (align < pagesizes[shmfd->shm_lp_psind])
1594 			return (EINVAL);
1595 	}
1596 
1597 	vm_map_lock(map);
1598 	if ((flags & MAP_FIXED) == 0) {
1599 		try = 1;
1600 		if (curmap && (*addr == 0 ||
1601 		    (*addr >= round_page((vm_offset_t)vms->vm_taddr) &&
1602 		    *addr < round_page((vm_offset_t)vms->vm_daddr +
1603 		    lim_max(td, RLIMIT_DATA))))) {
1604 			*addr = roundup2((vm_offset_t)vms->vm_daddr +
1605 			    lim_max(td, RLIMIT_DATA),
1606 			    pagesizes[shmfd->shm_lp_psind]);
1607 		}
1608 again:
1609 		rv = vm_map_find_aligned(map, addr, size, maxaddr, align);
1610 		if (rv != KERN_SUCCESS) {
1611 			if (try == 1) {
1612 				try = 2;
1613 				*addr = vm_map_min(map);
1614 				if ((*addr & mask) != 0)
1615 					*addr = (*addr + mask) & mask;
1616 				goto again;
1617 			}
1618 			goto fail1;
1619 		}
1620 	} else if ((flags & MAP_EXCL) == 0) {
1621 		rv = vm_map_delete(map, *addr, *addr + size);
1622 		if (rv != KERN_SUCCESS)
1623 			goto fail1;
1624 	} else {
1625 		error = ENOSPC;
1626 		if (vm_map_lookup_entry(map, *addr, &prev_entry))
1627 			goto fail;
1628 		next_entry = vm_map_entry_succ(prev_entry);
1629 		if (next_entry->start < *addr + size)
1630 			goto fail;
1631 	}
1632 
1633 	rv = vm_map_insert(map, shmfd->shm_object, foff, *addr, *addr + size,
1634 	    prot, max_prot, docow);
1635 fail1:
1636 	error = vm_mmap_to_errno(rv);
1637 fail:
1638 	vm_map_unlock(map);
1639 	return (error);
1640 }
1641 
1642 static int
shm_mmap(struct file * fp,vm_map_t map,vm_offset_t * addr,vm_size_t objsize,vm_prot_t prot,vm_prot_t max_maxprot,int flags,vm_ooffset_t foff,struct thread * td)1643 shm_mmap(struct file *fp, vm_map_t map, vm_offset_t *addr, vm_size_t objsize,
1644     vm_prot_t prot, vm_prot_t max_maxprot, int flags,
1645     vm_ooffset_t foff, struct thread *td)
1646 {
1647 	struct shmfd *shmfd;
1648 	vm_prot_t maxprot;
1649 	int error;
1650 	bool writecnt;
1651 	void *rl_cookie;
1652 
1653 	shmfd = fp->f_data;
1654 	maxprot = VM_PROT_NONE;
1655 
1656 	rl_cookie = shm_rangelock_rlock(shmfd, 0, objsize);
1657 	/* FREAD should always be set. */
1658 	if ((fp->f_flag & FREAD) != 0)
1659 		maxprot |= VM_PROT_EXECUTE | VM_PROT_READ;
1660 
1661 	/*
1662 	 * If FWRITE's set, we can allow VM_PROT_WRITE unless it's a shared
1663 	 * mapping with a write seal applied.  Private mappings are always
1664 	 * writeable.
1665 	 */
1666 	if ((flags & MAP_SHARED) == 0) {
1667 		if ((max_maxprot & VM_PROT_WRITE) != 0)
1668 			maxprot |= VM_PROT_WRITE;
1669 		writecnt = false;
1670 	} else {
1671 		if ((fp->f_flag & FWRITE) != 0 &&
1672 		    (shmfd->shm_seals & F_SEAL_WRITE) == 0)
1673 			maxprot |= VM_PROT_WRITE;
1674 
1675 		/*
1676 		 * Any mappings from a writable descriptor may be upgraded to
1677 		 * VM_PROT_WRITE with mprotect(2), unless a write-seal was
1678 		 * applied between the open and subsequent mmap(2).  We want to
1679 		 * reject application of a write seal as long as any such
1680 		 * mapping exists so that the seal cannot be trivially bypassed.
1681 		 */
1682 		writecnt = (maxprot & VM_PROT_WRITE) != 0;
1683 		if (!writecnt && (prot & VM_PROT_WRITE) != 0) {
1684 			error = EACCES;
1685 			goto out;
1686 		}
1687 	}
1688 	maxprot &= max_maxprot;
1689 
1690 	/* See comment in vn_mmap(). */
1691 	if (
1692 #ifdef _LP64
1693 	    objsize > OFF_MAX ||
1694 #endif
1695 	    foff > OFF_MAX - objsize) {
1696 		error = EINVAL;
1697 		goto out;
1698 	}
1699 
1700 #ifdef MAC
1701 	error = mac_posixshm_check_mmap(td->td_ucred, shmfd, prot, flags);
1702 	if (error != 0)
1703 		goto out;
1704 #endif
1705 
1706 	mtx_lock(&shm_timestamp_lock);
1707 	vfs_timestamp(&shmfd->shm_atime);
1708 	mtx_unlock(&shm_timestamp_lock);
1709 	vm_object_reference(shmfd->shm_object);
1710 
1711 	if (shm_largepage(shmfd)) {
1712 		writecnt = false;
1713 		error = shm_mmap_large(shmfd, map, addr, objsize, prot,
1714 		    maxprot, flags, foff, td);
1715 	} else {
1716 		if (writecnt) {
1717 			vm_pager_update_writecount(shmfd->shm_object, 0,
1718 			    objsize);
1719 		}
1720 		error = vm_mmap_object(map, addr, objsize, prot, maxprot, flags,
1721 		    shmfd->shm_object, foff, writecnt, td);
1722 	}
1723 	if (error != 0) {
1724 		if (writecnt)
1725 			vm_pager_release_writecount(shmfd->shm_object, 0,
1726 			    objsize);
1727 		vm_object_deallocate(shmfd->shm_object);
1728 	}
1729 out:
1730 	shm_rangelock_unlock(shmfd, rl_cookie);
1731 	return (error);
1732 }
1733 
1734 static int
shm_chmod(struct file * fp,mode_t mode,struct ucred * active_cred,struct thread * td)1735 shm_chmod(struct file *fp, mode_t mode, struct ucred *active_cred,
1736     struct thread *td)
1737 {
1738 	struct shmfd *shmfd;
1739 	int error;
1740 
1741 	error = 0;
1742 	shmfd = fp->f_data;
1743 	mtx_lock(&shm_timestamp_lock);
1744 	/*
1745 	 * SUSv4 says that x bits of permission need not be affected.
1746 	 * Be consistent with our shm_open there.
1747 	 */
1748 #ifdef MAC
1749 	error = mac_posixshm_check_setmode(active_cred, shmfd, mode);
1750 	if (error != 0)
1751 		goto out;
1752 #endif
1753 	error = vaccess(VREG, shmfd->shm_mode, shmfd->shm_uid, shmfd->shm_gid,
1754 	    VADMIN, active_cred);
1755 	if (error != 0)
1756 		goto out;
1757 	shmfd->shm_mode = mode & ACCESSPERMS;
1758 out:
1759 	mtx_unlock(&shm_timestamp_lock);
1760 	return (error);
1761 }
1762 
1763 static int
shm_chown(struct file * fp,uid_t uid,gid_t gid,struct ucred * active_cred,struct thread * td)1764 shm_chown(struct file *fp, uid_t uid, gid_t gid, struct ucred *active_cred,
1765     struct thread *td)
1766 {
1767 	struct shmfd *shmfd;
1768 	int error;
1769 
1770 	error = 0;
1771 	shmfd = fp->f_data;
1772 	mtx_lock(&shm_timestamp_lock);
1773 #ifdef MAC
1774 	error = mac_posixshm_check_setowner(active_cred, shmfd, uid, gid);
1775 	if (error != 0)
1776 		goto out;
1777 #endif
1778 	if (uid == (uid_t)-1)
1779 		uid = shmfd->shm_uid;
1780 	if (gid == (gid_t)-1)
1781                  gid = shmfd->shm_gid;
1782 	if (((uid != shmfd->shm_uid && uid != active_cred->cr_uid) ||
1783 	    (gid != shmfd->shm_gid && !groupmember(gid, active_cred))) &&
1784 	    (error = priv_check_cred(active_cred, PRIV_VFS_CHOWN)))
1785 		goto out;
1786 	shmfd->shm_uid = uid;
1787 	shmfd->shm_gid = gid;
1788 out:
1789 	mtx_unlock(&shm_timestamp_lock);
1790 	return (error);
1791 }
1792 
1793 /*
1794  * Helper routines to allow the backing object of a shared memory file
1795  * descriptor to be mapped in the kernel.
1796  */
1797 int
shm_map(struct file * fp,size_t size,off_t offset,void ** memp)1798 shm_map(struct file *fp, size_t size, off_t offset, void **memp)
1799 {
1800 	struct shmfd *shmfd;
1801 	vm_offset_t kva, ofs;
1802 	vm_object_t obj;
1803 	int rv;
1804 
1805 	if (fp->f_type != DTYPE_SHM)
1806 		return (EINVAL);
1807 	shmfd = fp->f_data;
1808 	obj = shmfd->shm_object;
1809 	VM_OBJECT_WLOCK(obj);
1810 	/*
1811 	 * XXXRW: This validation is probably insufficient, and subject to
1812 	 * sign errors.  It should be fixed.
1813 	 */
1814 	if (offset >= shmfd->shm_size ||
1815 	    offset + size > round_page(shmfd->shm_size)) {
1816 		VM_OBJECT_WUNLOCK(obj);
1817 		return (EINVAL);
1818 	}
1819 
1820 	shmfd->shm_kmappings++;
1821 	vm_object_reference_locked(obj);
1822 	VM_OBJECT_WUNLOCK(obj);
1823 
1824 	/* Map the object into the kernel_map and wire it. */
1825 	kva = vm_map_min(kernel_map);
1826 	ofs = offset & PAGE_MASK;
1827 	offset = trunc_page(offset);
1828 	size = round_page(size + ofs);
1829 	rv = vm_map_find(kernel_map, obj, offset, &kva, size, 0,
1830 	    VMFS_OPTIMAL_SPACE, VM_PROT_READ | VM_PROT_WRITE,
1831 	    VM_PROT_READ | VM_PROT_WRITE, 0);
1832 	if (rv == KERN_SUCCESS) {
1833 		rv = vm_map_wire(kernel_map, kva, kva + size,
1834 		    VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
1835 		if (rv == KERN_SUCCESS) {
1836 			*memp = (void *)(kva + ofs);
1837 			return (0);
1838 		}
1839 		vm_map_remove(kernel_map, kva, kva + size);
1840 	} else
1841 		vm_object_deallocate(obj);
1842 
1843 	/* On failure, drop our mapping reference. */
1844 	VM_OBJECT_WLOCK(obj);
1845 	shmfd->shm_kmappings--;
1846 	VM_OBJECT_WUNLOCK(obj);
1847 
1848 	return (vm_mmap_to_errno(rv));
1849 }
1850 
1851 /*
1852  * We require the caller to unmap the entire entry.  This allows us to
1853  * safely decrement shm_kmappings when a mapping is removed.
1854  */
1855 int
shm_unmap(struct file * fp,void * mem,size_t size)1856 shm_unmap(struct file *fp, void *mem, size_t size)
1857 {
1858 	struct shmfd *shmfd;
1859 	vm_map_entry_t entry;
1860 	vm_offset_t kva, ofs;
1861 	vm_object_t obj;
1862 	vm_pindex_t pindex;
1863 	vm_prot_t prot;
1864 	boolean_t wired;
1865 	vm_map_t map;
1866 	int rv;
1867 
1868 	if (fp->f_type != DTYPE_SHM)
1869 		return (EINVAL);
1870 	shmfd = fp->f_data;
1871 	kva = (vm_offset_t)mem;
1872 	ofs = kva & PAGE_MASK;
1873 	kva = trunc_page(kva);
1874 	size = round_page(size + ofs);
1875 	map = kernel_map;
1876 	rv = vm_map_lookup(&map, kva, VM_PROT_READ | VM_PROT_WRITE, &entry,
1877 	    &obj, &pindex, &prot, &wired);
1878 	if (rv != KERN_SUCCESS)
1879 		return (EINVAL);
1880 	if (entry->start != kva || entry->end != kva + size) {
1881 		vm_map_lookup_done(map, entry);
1882 		return (EINVAL);
1883 	}
1884 	vm_map_lookup_done(map, entry);
1885 	if (obj != shmfd->shm_object)
1886 		return (EINVAL);
1887 	vm_map_remove(map, kva, kva + size);
1888 	VM_OBJECT_WLOCK(obj);
1889 	KASSERT(shmfd->shm_kmappings > 0, ("shm_unmap: object not mapped"));
1890 	shmfd->shm_kmappings--;
1891 	VM_OBJECT_WUNLOCK(obj);
1892 	return (0);
1893 }
1894 
1895 static int
shm_fill_kinfo_locked(struct shmfd * shmfd,struct kinfo_file * kif,bool list)1896 shm_fill_kinfo_locked(struct shmfd *shmfd, struct kinfo_file *kif, bool list)
1897 {
1898 	const char *path, *pr_path;
1899 	size_t pr_pathlen;
1900 	bool visible;
1901 
1902 	sx_assert(&shm_dict_lock, SA_LOCKED);
1903 	kif->kf_type = KF_TYPE_SHM;
1904 	kif->kf_un.kf_file.kf_file_mode = S_IFREG | shmfd->shm_mode;
1905 	kif->kf_un.kf_file.kf_file_size = shmfd->shm_size;
1906 	if (shmfd->shm_path != NULL) {
1907 		path = shmfd->shm_path;
1908 		pr_path = curthread->td_ucred->cr_prison->pr_path;
1909 		if (strcmp(pr_path, "/") != 0) {
1910 			/* Return the jail-rooted pathname. */
1911 			pr_pathlen = strlen(pr_path);
1912 			visible = strncmp(path, pr_path, pr_pathlen) == 0 &&
1913 			    path[pr_pathlen] == '/';
1914 			if (list && !visible)
1915 				return (EPERM);
1916 			if (visible)
1917 				path += pr_pathlen;
1918 		}
1919 		strlcpy(kif->kf_path, path, sizeof(kif->kf_path));
1920 	}
1921 	return (0);
1922 }
1923 
1924 static int
shm_fill_kinfo(struct file * fp,struct kinfo_file * kif,struct filedesc * fdp __unused)1925 shm_fill_kinfo(struct file *fp, struct kinfo_file *kif,
1926     struct filedesc *fdp __unused)
1927 {
1928 	int res;
1929 
1930 	sx_slock(&shm_dict_lock);
1931 	res = shm_fill_kinfo_locked(fp->f_data, kif, false);
1932 	sx_sunlock(&shm_dict_lock);
1933 	return (res);
1934 }
1935 
1936 static int
shm_add_seals(struct file * fp,int seals)1937 shm_add_seals(struct file *fp, int seals)
1938 {
1939 	struct shmfd *shmfd;
1940 	void *rl_cookie;
1941 	vm_ooffset_t writemappings;
1942 	int error, nseals;
1943 
1944 	error = 0;
1945 	shmfd = fp->f_data;
1946 	rl_cookie = shm_rangelock_wlock(shmfd, 0, OFF_MAX);
1947 
1948 	/* Even already-set seals should result in EPERM. */
1949 	if ((shmfd->shm_seals & F_SEAL_SEAL) != 0) {
1950 		error = EPERM;
1951 		goto out;
1952 	}
1953 	nseals = seals & ~shmfd->shm_seals;
1954 	if ((nseals & F_SEAL_WRITE) != 0) {
1955 		if (shm_largepage(shmfd)) {
1956 			error = ENOTSUP;
1957 			goto out;
1958 		}
1959 
1960 		/*
1961 		 * The rangelock above prevents writable mappings from being
1962 		 * added after we've started applying seals.  The RLOCK here
1963 		 * is to avoid torn reads on ILP32 arches as unmapping/reducing
1964 		 * writemappings will be done without a rangelock.
1965 		 */
1966 		VM_OBJECT_RLOCK(shmfd->shm_object);
1967 		writemappings = shmfd->shm_object->un_pager.swp.writemappings;
1968 		VM_OBJECT_RUNLOCK(shmfd->shm_object);
1969 		/* kmappings are also writable */
1970 		if (writemappings > 0) {
1971 			error = EBUSY;
1972 			goto out;
1973 		}
1974 	}
1975 	shmfd->shm_seals |= nseals;
1976 out:
1977 	shm_rangelock_unlock(shmfd, rl_cookie);
1978 	return (error);
1979 }
1980 
1981 static int
shm_get_seals(struct file * fp,int * seals)1982 shm_get_seals(struct file *fp, int *seals)
1983 {
1984 	struct shmfd *shmfd;
1985 
1986 	shmfd = fp->f_data;
1987 	*seals = shmfd->shm_seals;
1988 	return (0);
1989 }
1990 
1991 static int
shm_deallocate(struct shmfd * shmfd,off_t * offset,off_t * length,int flags)1992 shm_deallocate(struct shmfd *shmfd, off_t *offset, off_t *length, int flags)
1993 {
1994 	vm_object_t object;
1995 	vm_pindex_t pistart, pi, piend;
1996 	vm_ooffset_t off, len;
1997 	int startofs, endofs, end;
1998 	int error;
1999 
2000 	off = *offset;
2001 	len = *length;
2002 	KASSERT(off + len <= (vm_ooffset_t)OFF_MAX, ("off + len overflows"));
2003 	if (off + len > shmfd->shm_size)
2004 		len = shmfd->shm_size - off;
2005 	object = shmfd->shm_object;
2006 	startofs = off & PAGE_MASK;
2007 	endofs = (off + len) & PAGE_MASK;
2008 	pistart = OFF_TO_IDX(off);
2009 	piend = OFF_TO_IDX(off + len);
2010 	pi = OFF_TO_IDX(off + PAGE_MASK);
2011 	error = 0;
2012 
2013 	/* Handle the case when offset is on or beyond shm size. */
2014 	if ((off_t)len <= 0) {
2015 		*length = 0;
2016 		return (0);
2017 	}
2018 
2019 	VM_OBJECT_WLOCK(object);
2020 
2021 	if (startofs != 0) {
2022 		end = pistart != piend ? PAGE_SIZE : endofs;
2023 		error = shm_partial_page_invalidate(object, pistart, startofs,
2024 		    end);
2025 		if (error)
2026 			goto out;
2027 		off += end - startofs;
2028 		len -= end - startofs;
2029 	}
2030 
2031 	if (pi < piend) {
2032 		vm_object_page_remove(object, pi, piend, 0);
2033 		off += IDX_TO_OFF(piend - pi);
2034 		len -= IDX_TO_OFF(piend - pi);
2035 	}
2036 
2037 	if (endofs != 0 && pistart != piend) {
2038 		error = shm_partial_page_invalidate(object, piend, 0, endofs);
2039 		if (error)
2040 			goto out;
2041 		off += endofs;
2042 		len -= endofs;
2043 	}
2044 
2045 out:
2046 	VM_OBJECT_WUNLOCK(shmfd->shm_object);
2047 	*offset = off;
2048 	*length = len;
2049 	return (error);
2050 }
2051 
2052 static int
shm_fspacectl(struct file * fp,int cmd,off_t * offset,off_t * length,int flags,struct ucred * active_cred,struct thread * td)2053 shm_fspacectl(struct file *fp, int cmd, off_t *offset, off_t *length, int flags,
2054     struct ucred *active_cred, struct thread *td)
2055 {
2056 	void *rl_cookie;
2057 	struct shmfd *shmfd;
2058 	off_t off, len;
2059 	int error;
2060 
2061 	KASSERT(cmd == SPACECTL_DEALLOC, ("shm_fspacectl: Invalid cmd"));
2062 	KASSERT((flags & ~SPACECTL_F_SUPPORTED) == 0,
2063 	    ("shm_fspacectl: non-zero flags"));
2064 	KASSERT(*offset >= 0 && *length > 0 && *length <= OFF_MAX - *offset,
2065 	    ("shm_fspacectl: offset/length overflow or underflow"));
2066 	error = EINVAL;
2067 	shmfd = fp->f_data;
2068 	off = *offset;
2069 	len = *length;
2070 
2071 	rl_cookie = shm_rangelock_wlock(shmfd, off, off + len);
2072 	switch (cmd) {
2073 	case SPACECTL_DEALLOC:
2074 		if ((shmfd->shm_seals & F_SEAL_WRITE) != 0) {
2075 			error = EPERM;
2076 			break;
2077 		}
2078 		error = shm_deallocate(shmfd, &off, &len, flags);
2079 		*offset = off;
2080 		*length = len;
2081 		break;
2082 	default:
2083 		__assert_unreachable();
2084 	}
2085 	shm_rangelock_unlock(shmfd, rl_cookie);
2086 	return (error);
2087 }
2088 
2089 
2090 static int
shm_fallocate(struct file * fp,off_t offset,off_t len,struct thread * td)2091 shm_fallocate(struct file *fp, off_t offset, off_t len, struct thread *td)
2092 {
2093 	void *rl_cookie;
2094 	struct shmfd *shmfd;
2095 	size_t size;
2096 	int error;
2097 
2098 	/* This assumes that the caller already checked for overflow. */
2099 	error = 0;
2100 	shmfd = fp->f_data;
2101 	size = offset + len;
2102 
2103 	/*
2104 	 * Just grab the rangelock for the range that we may be attempting to
2105 	 * grow, rather than blocking read/write for regions we won't be
2106 	 * touching while this (potential) resize is in progress.  Other
2107 	 * attempts to resize the shmfd will have to take a write lock from 0 to
2108 	 * OFF_MAX, so this being potentially beyond the current usable range of
2109 	 * the shmfd is not necessarily a concern.  If other mechanisms are
2110 	 * added to grow a shmfd, this may need to be re-evaluated.
2111 	 */
2112 	rl_cookie = shm_rangelock_wlock(shmfd, offset, size);
2113 	if (size > shmfd->shm_size)
2114 		error = shm_dotruncate_cookie(shmfd, size, rl_cookie);
2115 	shm_rangelock_unlock(shmfd, rl_cookie);
2116 	/* Translate to posix_fallocate(2) return value as needed. */
2117 	if (error == ENOMEM)
2118 		error = ENOSPC;
2119 	return (error);
2120 }
2121 
2122 static int
sysctl_posix_shm_list(SYSCTL_HANDLER_ARGS)2123 sysctl_posix_shm_list(SYSCTL_HANDLER_ARGS)
2124 {
2125 	struct shm_mapping *shmm;
2126 	struct sbuf sb;
2127 	struct kinfo_file kif;
2128 	u_long i;
2129 	int error, error2;
2130 
2131 	sbuf_new_for_sysctl(&sb, NULL, sizeof(struct kinfo_file) * 5, req);
2132 	sbuf_clear_flags(&sb, SBUF_INCLUDENUL);
2133 	error = 0;
2134 	sx_slock(&shm_dict_lock);
2135 	for (i = 0; i < shm_hash + 1; i++) {
2136 		LIST_FOREACH(shmm, &shm_dictionary[i], sm_link) {
2137 			error = shm_fill_kinfo_locked(shmm->sm_shmfd,
2138 			    &kif, true);
2139 			if (error == EPERM) {
2140 				error = 0;
2141 				continue;
2142 			}
2143 			if (error != 0)
2144 				break;
2145 			pack_kinfo(&kif);
2146 			error = sbuf_bcat(&sb, &kif, kif.kf_structsize) == 0 ?
2147 			    0 : ENOMEM;
2148 			if (error != 0)
2149 				break;
2150 		}
2151 	}
2152 	sx_sunlock(&shm_dict_lock);
2153 	error2 = sbuf_finish(&sb);
2154 	sbuf_delete(&sb);
2155 	return (error != 0 ? error : error2);
2156 }
2157 
2158 SYSCTL_PROC(_kern_ipc, OID_AUTO, posix_shm_list,
2159     CTLFLAG_RD | CTLFLAG_PRISON | CTLFLAG_MPSAFE | CTLTYPE_OPAQUE,
2160     NULL, 0, sysctl_posix_shm_list, "",
2161     "POSIX SHM list");
2162 
2163 int
kern_shm_open(struct thread * td,const char * path,int flags,mode_t mode,struct filecaps * caps)2164 kern_shm_open(struct thread *td, const char *path, int flags, mode_t mode,
2165     struct filecaps *caps)
2166 {
2167 
2168 	return (kern_shm_open2(td, path, flags, mode, 0, caps, NULL));
2169 }
2170 
2171 /*
2172  * This version of the shm_open() interface leaves CLOEXEC behavior up to the
2173  * caller, and libc will enforce it for the traditional shm_open() call.  This
2174  * allows other consumers, like memfd_create(), to opt-in for CLOEXEC.  This
2175  * interface also includes a 'name' argument that is currently unused, but could
2176  * potentially be exported later via some interface for debugging purposes.
2177  * From the kernel's perspective, it is optional.  Individual consumers like
2178  * memfd_create() may require it in order to be compatible with other systems
2179  * implementing the same function.
2180  */
2181 int
sys_shm_open2(struct thread * td,struct shm_open2_args * uap)2182 sys_shm_open2(struct thread *td, struct shm_open2_args *uap)
2183 {
2184 
2185 	return (kern_shm_open2(td, uap->path, uap->flags, uap->mode,
2186 	    uap->shmflags, NULL, uap->name));
2187 }
2188 
2189 int
shm_get_path(struct vm_object * obj,char * path,size_t sz)2190 shm_get_path(struct vm_object *obj, char *path, size_t sz)
2191 {
2192 	struct shmfd *shmfd;
2193 	int error;
2194 
2195 	error = 0;
2196 	shmfd = NULL;
2197 	sx_slock(&shm_dict_lock);
2198 	VM_OBJECT_RLOCK(obj);
2199 	if ((obj->flags & OBJ_POSIXSHM) == 0) {
2200 		error = EINVAL;
2201 	} else {
2202 		if (obj->type == shmfd_pager_type)
2203 			shmfd = obj->un_pager.swp.swp_priv;
2204 		else if (obj->type == OBJT_PHYS)
2205 			shmfd = obj->un_pager.phys.phys_priv;
2206 		if (shmfd == NULL) {
2207 			error = ENXIO;
2208 		} else {
2209 			strlcpy(path, shmfd->shm_path == NULL ? "anon" :
2210 			    shmfd->shm_path, sz);
2211 		}
2212 	}
2213 	if (error != 0)
2214 		path[0] = '\0';
2215 	VM_OBJECT_RUNLOCK(obj);
2216 	sx_sunlock(&shm_dict_lock);
2217 	return (error);
2218 }
2219