xref: /freebsd/sys/kern/uipc_shm.c (revision 6683132d54bd6d589889e43dabdc53d35e38a028)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2006, 2011, 2016-2017 Robert N. M. Watson
5  * All rights reserved.
6  *
7  * Portions of this software were developed by BAE Systems, the University of
8  * Cambridge Computer Laboratory, and Memorial University under DARPA/AFRL
9  * contract FA8650-15-C-7558 ("CADETS"), as part of the DARPA Transparent
10  * Computing (TC) research program.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
25  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31  * SUCH DAMAGE.
32  */
33 
34 /*
35  * Support for shared swap-backed anonymous memory objects via
36  * shm_open(2) and shm_unlink(2).  While most of the implementation is
37  * here, vm_mmap.c contains mapping logic changes.
38  *
39  * posixshmcontrol(1) allows users to inspect the state of the memory
40  * objects.  Per-uid swap resource limit controls total amount of
41  * memory that user can consume for anonymous objects, including
42  * shared.
43  */
44 
45 #include <sys/cdefs.h>
46 __FBSDID("$FreeBSD$");
47 
48 #include "opt_capsicum.h"
49 #include "opt_ktrace.h"
50 
51 #include <sys/param.h>
52 #include <sys/capsicum.h>
53 #include <sys/conf.h>
54 #include <sys/fcntl.h>
55 #include <sys/file.h>
56 #include <sys/filedesc.h>
57 #include <sys/filio.h>
58 #include <sys/fnv_hash.h>
59 #include <sys/kernel.h>
60 #include <sys/uio.h>
61 #include <sys/signal.h>
62 #include <sys/jail.h>
63 #include <sys/ktrace.h>
64 #include <sys/lock.h>
65 #include <sys/malloc.h>
66 #include <sys/mman.h>
67 #include <sys/mutex.h>
68 #include <sys/priv.h>
69 #include <sys/proc.h>
70 #include <sys/refcount.h>
71 #include <sys/resourcevar.h>
72 #include <sys/rwlock.h>
73 #include <sys/sbuf.h>
74 #include <sys/stat.h>
75 #include <sys/syscallsubr.h>
76 #include <sys/sysctl.h>
77 #include <sys/sysproto.h>
78 #include <sys/systm.h>
79 #include <sys/sx.h>
80 #include <sys/time.h>
81 #include <sys/vnode.h>
82 #include <sys/unistd.h>
83 #include <sys/user.h>
84 
85 #include <security/audit/audit.h>
86 #include <security/mac/mac_framework.h>
87 
88 #include <vm/vm.h>
89 #include <vm/vm_param.h>
90 #include <vm/pmap.h>
91 #include <vm/vm_extern.h>
92 #include <vm/vm_map.h>
93 #include <vm/vm_kern.h>
94 #include <vm/vm_object.h>
95 #include <vm/vm_page.h>
96 #include <vm/vm_pageout.h>
97 #include <vm/vm_pager.h>
98 #include <vm/swap_pager.h>
99 
100 struct shm_mapping {
101 	char		*sm_path;
102 	Fnv32_t		sm_fnv;
103 	struct shmfd	*sm_shmfd;
104 	LIST_ENTRY(shm_mapping) sm_link;
105 };
106 
107 static MALLOC_DEFINE(M_SHMFD, "shmfd", "shared memory file descriptor");
108 static LIST_HEAD(, shm_mapping) *shm_dictionary;
109 static struct sx shm_dict_lock;
110 static struct mtx shm_timestamp_lock;
111 static u_long shm_hash;
112 static struct unrhdr64 shm_ino_unr;
113 static dev_t shm_dev_ino;
114 
115 #define	SHM_HASH(fnv)	(&shm_dictionary[(fnv) & shm_hash])
116 
117 static void	shm_init(void *arg);
118 static void	shm_insert(char *path, Fnv32_t fnv, struct shmfd *shmfd);
119 static struct shmfd *shm_lookup(char *path, Fnv32_t fnv);
120 static int	shm_remove(char *path, Fnv32_t fnv, struct ucred *ucred);
121 
122 static fo_rdwr_t	shm_read;
123 static fo_rdwr_t	shm_write;
124 static fo_truncate_t	shm_truncate;
125 static fo_ioctl_t	shm_ioctl;
126 static fo_stat_t	shm_stat;
127 static fo_close_t	shm_close;
128 static fo_chmod_t	shm_chmod;
129 static fo_chown_t	shm_chown;
130 static fo_seek_t	shm_seek;
131 static fo_fill_kinfo_t	shm_fill_kinfo;
132 static fo_mmap_t	shm_mmap;
133 
134 /* File descriptor operations. */
135 struct fileops shm_ops = {
136 	.fo_read = shm_read,
137 	.fo_write = shm_write,
138 	.fo_truncate = shm_truncate,
139 	.fo_ioctl = shm_ioctl,
140 	.fo_poll = invfo_poll,
141 	.fo_kqfilter = invfo_kqfilter,
142 	.fo_stat = shm_stat,
143 	.fo_close = shm_close,
144 	.fo_chmod = shm_chmod,
145 	.fo_chown = shm_chown,
146 	.fo_sendfile = vn_sendfile,
147 	.fo_seek = shm_seek,
148 	.fo_fill_kinfo = shm_fill_kinfo,
149 	.fo_mmap = shm_mmap,
150 	.fo_flags = DFLAG_PASSABLE | DFLAG_SEEKABLE
151 };
152 
153 FEATURE(posix_shm, "POSIX shared memory");
154 
155 static int
156 uiomove_object_page(vm_object_t obj, size_t len, struct uio *uio)
157 {
158 	vm_page_t m;
159 	vm_pindex_t idx;
160 	size_t tlen;
161 	int error, offset, rv;
162 
163 	idx = OFF_TO_IDX(uio->uio_offset);
164 	offset = uio->uio_offset & PAGE_MASK;
165 	tlen = MIN(PAGE_SIZE - offset, len);
166 
167 	VM_OBJECT_WLOCK(obj);
168 
169 	/*
170 	 * Read I/O without either a corresponding resident page or swap
171 	 * page: use zero_region.  This is intended to avoid instantiating
172 	 * pages on read from a sparse region.
173 	 */
174 	if (uio->uio_rw == UIO_READ && vm_page_lookup(obj, idx) == NULL &&
175 	    !vm_pager_has_page(obj, idx, NULL, NULL)) {
176 		VM_OBJECT_WUNLOCK(obj);
177 		return (uiomove(__DECONST(void *, zero_region), tlen, uio));
178 	}
179 
180 	/*
181 	 * Parallel reads of the page content from disk are prevented
182 	 * by exclusive busy.
183 	 *
184 	 * Although the tmpfs vnode lock is held here, it is
185 	 * nonetheless safe to sleep waiting for a free page.  The
186 	 * pageout daemon does not need to acquire the tmpfs vnode
187 	 * lock to page out tobj's pages because tobj is a OBJT_SWAP
188 	 * type object.
189 	 */
190 	m = vm_page_grab(obj, idx, VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY);
191 	if (m->valid != VM_PAGE_BITS_ALL) {
192 		vm_page_xbusy(m);
193 		if (vm_pager_has_page(obj, idx, NULL, NULL)) {
194 			rv = vm_pager_get_pages(obj, &m, 1, NULL, NULL);
195 			if (rv != VM_PAGER_OK) {
196 				printf(
197 	    "uiomove_object: vm_obj %p idx %jd valid %x pager error %d\n",
198 				    obj, idx, m->valid, rv);
199 				vm_page_lock(m);
200 				vm_page_free(m);
201 				vm_page_unlock(m);
202 				VM_OBJECT_WUNLOCK(obj);
203 				return (EIO);
204 			}
205 		} else
206 			vm_page_zero_invalid(m, TRUE);
207 		vm_page_xunbusy(m);
208 	}
209 	vm_page_lock(m);
210 	vm_page_hold(m);
211 	if (vm_page_active(m))
212 		vm_page_reference(m);
213 	else
214 		vm_page_activate(m);
215 	vm_page_unlock(m);
216 	VM_OBJECT_WUNLOCK(obj);
217 	error = uiomove_fromphys(&m, offset, tlen, uio);
218 	if (uio->uio_rw == UIO_WRITE && error == 0) {
219 		VM_OBJECT_WLOCK(obj);
220 		vm_page_dirty(m);
221 		vm_pager_page_unswapped(m);
222 		VM_OBJECT_WUNLOCK(obj);
223 	}
224 	vm_page_lock(m);
225 	vm_page_unhold(m);
226 	vm_page_unlock(m);
227 
228 	return (error);
229 }
230 
231 int
232 uiomove_object(vm_object_t obj, off_t obj_size, struct uio *uio)
233 {
234 	ssize_t resid;
235 	size_t len;
236 	int error;
237 
238 	error = 0;
239 	while ((resid = uio->uio_resid) > 0) {
240 		if (obj_size <= uio->uio_offset)
241 			break;
242 		len = MIN(obj_size - uio->uio_offset, resid);
243 		if (len == 0)
244 			break;
245 		error = uiomove_object_page(obj, len, uio);
246 		if (error != 0 || resid == uio->uio_resid)
247 			break;
248 	}
249 	return (error);
250 }
251 
252 static int
253 shm_seek(struct file *fp, off_t offset, int whence, struct thread *td)
254 {
255 	struct shmfd *shmfd;
256 	off_t foffset;
257 	int error;
258 
259 	shmfd = fp->f_data;
260 	foffset = foffset_lock(fp, 0);
261 	error = 0;
262 	switch (whence) {
263 	case L_INCR:
264 		if (foffset < 0 ||
265 		    (offset > 0 && foffset > OFF_MAX - offset)) {
266 			error = EOVERFLOW;
267 			break;
268 		}
269 		offset += foffset;
270 		break;
271 	case L_XTND:
272 		if (offset > 0 && shmfd->shm_size > OFF_MAX - offset) {
273 			error = EOVERFLOW;
274 			break;
275 		}
276 		offset += shmfd->shm_size;
277 		break;
278 	case L_SET:
279 		break;
280 	default:
281 		error = EINVAL;
282 	}
283 	if (error == 0) {
284 		if (offset < 0 || offset > shmfd->shm_size)
285 			error = EINVAL;
286 		else
287 			td->td_uretoff.tdu_off = offset;
288 	}
289 	foffset_unlock(fp, offset, error != 0 ? FOF_NOUPDATE : 0);
290 	return (error);
291 }
292 
293 static int
294 shm_read(struct file *fp, struct uio *uio, struct ucred *active_cred,
295     int flags, struct thread *td)
296 {
297 	struct shmfd *shmfd;
298 	void *rl_cookie;
299 	int error;
300 
301 	shmfd = fp->f_data;
302 #ifdef MAC
303 	error = mac_posixshm_check_read(active_cred, fp->f_cred, shmfd);
304 	if (error)
305 		return (error);
306 #endif
307 	foffset_lock_uio(fp, uio, flags);
308 	rl_cookie = rangelock_rlock(&shmfd->shm_rl, uio->uio_offset,
309 	    uio->uio_offset + uio->uio_resid, &shmfd->shm_mtx);
310 	error = uiomove_object(shmfd->shm_object, shmfd->shm_size, uio);
311 	rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
312 	foffset_unlock_uio(fp, uio, flags);
313 	return (error);
314 }
315 
316 static int
317 shm_write(struct file *fp, struct uio *uio, struct ucred *active_cred,
318     int flags, struct thread *td)
319 {
320 	struct shmfd *shmfd;
321 	void *rl_cookie;
322 	int error;
323 
324 	shmfd = fp->f_data;
325 #ifdef MAC
326 	error = mac_posixshm_check_write(active_cred, fp->f_cred, shmfd);
327 	if (error)
328 		return (error);
329 #endif
330 	foffset_lock_uio(fp, uio, flags);
331 	if ((flags & FOF_OFFSET) == 0) {
332 		rl_cookie = rangelock_wlock(&shmfd->shm_rl, 0, OFF_MAX,
333 		    &shmfd->shm_mtx);
334 	} else {
335 		rl_cookie = rangelock_wlock(&shmfd->shm_rl, uio->uio_offset,
336 		    uio->uio_offset + uio->uio_resid, &shmfd->shm_mtx);
337 	}
338 
339 	error = uiomove_object(shmfd->shm_object, shmfd->shm_size, uio);
340 	rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
341 	foffset_unlock_uio(fp, uio, flags);
342 	return (error);
343 }
344 
345 static int
346 shm_truncate(struct file *fp, off_t length, struct ucred *active_cred,
347     struct thread *td)
348 {
349 	struct shmfd *shmfd;
350 #ifdef MAC
351 	int error;
352 #endif
353 
354 	shmfd = fp->f_data;
355 #ifdef MAC
356 	error = mac_posixshm_check_truncate(active_cred, fp->f_cred, shmfd);
357 	if (error)
358 		return (error);
359 #endif
360 	return (shm_dotruncate(shmfd, length));
361 }
362 
363 int
364 shm_ioctl(struct file *fp, u_long com, void *data, struct ucred *active_cred,
365     struct thread *td)
366 {
367 
368 	switch (com) {
369 	case FIONBIO:
370 	case FIOASYNC:
371 		/*
372 		 * Allow fcntl(fd, F_SETFL, O_NONBLOCK) to work,
373 		 * just like it would on an unlinked regular file
374 		 */
375 		return (0);
376 	default:
377 		return (ENOTTY);
378 	}
379 }
380 
381 static int
382 shm_stat(struct file *fp, struct stat *sb, struct ucred *active_cred,
383     struct thread *td)
384 {
385 	struct shmfd *shmfd;
386 #ifdef MAC
387 	int error;
388 #endif
389 
390 	shmfd = fp->f_data;
391 
392 #ifdef MAC
393 	error = mac_posixshm_check_stat(active_cred, fp->f_cred, shmfd);
394 	if (error)
395 		return (error);
396 #endif
397 
398 	/*
399 	 * Attempt to return sanish values for fstat() on a memory file
400 	 * descriptor.
401 	 */
402 	bzero(sb, sizeof(*sb));
403 	sb->st_blksize = PAGE_SIZE;
404 	sb->st_size = shmfd->shm_size;
405 	sb->st_blocks = howmany(sb->st_size, sb->st_blksize);
406 	mtx_lock(&shm_timestamp_lock);
407 	sb->st_atim = shmfd->shm_atime;
408 	sb->st_ctim = shmfd->shm_ctime;
409 	sb->st_mtim = shmfd->shm_mtime;
410 	sb->st_birthtim = shmfd->shm_birthtime;
411 	sb->st_mode = S_IFREG | shmfd->shm_mode;		/* XXX */
412 	sb->st_uid = shmfd->shm_uid;
413 	sb->st_gid = shmfd->shm_gid;
414 	mtx_unlock(&shm_timestamp_lock);
415 	sb->st_dev = shm_dev_ino;
416 	sb->st_ino = shmfd->shm_ino;
417 	sb->st_nlink = shmfd->shm_object->ref_count;
418 
419 	return (0);
420 }
421 
422 static int
423 shm_close(struct file *fp, struct thread *td)
424 {
425 	struct shmfd *shmfd;
426 
427 	shmfd = fp->f_data;
428 	fp->f_data = NULL;
429 	shm_drop(shmfd);
430 
431 	return (0);
432 }
433 
434 int
435 shm_dotruncate(struct shmfd *shmfd, off_t length)
436 {
437 	vm_object_t object;
438 	vm_page_t m;
439 	vm_pindex_t idx, nobjsize;
440 	vm_ooffset_t delta;
441 	int base, rv;
442 
443 	KASSERT(length >= 0, ("shm_dotruncate: length < 0"));
444 	object = shmfd->shm_object;
445 	VM_OBJECT_WLOCK(object);
446 	if (length == shmfd->shm_size) {
447 		VM_OBJECT_WUNLOCK(object);
448 		return (0);
449 	}
450 	nobjsize = OFF_TO_IDX(length + PAGE_MASK);
451 
452 	/* Are we shrinking?  If so, trim the end. */
453 	if (length < shmfd->shm_size) {
454 		/*
455 		 * Disallow any requests to shrink the size if this
456 		 * object is mapped into the kernel.
457 		 */
458 		if (shmfd->shm_kmappings > 0) {
459 			VM_OBJECT_WUNLOCK(object);
460 			return (EBUSY);
461 		}
462 
463 		/*
464 		 * Zero the truncated part of the last page.
465 		 */
466 		base = length & PAGE_MASK;
467 		if (base != 0) {
468 			idx = OFF_TO_IDX(length);
469 retry:
470 			m = vm_page_lookup(object, idx);
471 			if (m != NULL) {
472 				if (vm_page_sleep_if_busy(m, "shmtrc"))
473 					goto retry;
474 			} else if (vm_pager_has_page(object, idx, NULL, NULL)) {
475 				m = vm_page_alloc(object, idx,
476 				    VM_ALLOC_NORMAL | VM_ALLOC_WAITFAIL);
477 				if (m == NULL)
478 					goto retry;
479 				rv = vm_pager_get_pages(object, &m, 1, NULL,
480 				    NULL);
481 				vm_page_lock(m);
482 				if (rv == VM_PAGER_OK) {
483 					/*
484 					 * Since the page was not resident,
485 					 * and therefore not recently
486 					 * accessed, immediately enqueue it
487 					 * for asynchronous laundering.  The
488 					 * current operation is not regarded
489 					 * as an access.
490 					 */
491 					vm_page_launder(m);
492 					vm_page_unlock(m);
493 					vm_page_xunbusy(m);
494 				} else {
495 					vm_page_free(m);
496 					vm_page_unlock(m);
497 					VM_OBJECT_WUNLOCK(object);
498 					return (EIO);
499 				}
500 			}
501 			if (m != NULL) {
502 				pmap_zero_page_area(m, base, PAGE_SIZE - base);
503 				KASSERT(m->valid == VM_PAGE_BITS_ALL,
504 				    ("shm_dotruncate: page %p is invalid", m));
505 				vm_page_dirty(m);
506 				vm_pager_page_unswapped(m);
507 			}
508 		}
509 		delta = IDX_TO_OFF(object->size - nobjsize);
510 
511 		/* Toss in memory pages. */
512 		if (nobjsize < object->size)
513 			vm_object_page_remove(object, nobjsize, object->size,
514 			    0);
515 
516 		/* Toss pages from swap. */
517 		if (object->type == OBJT_SWAP)
518 			swap_pager_freespace(object, nobjsize, delta);
519 
520 		/* Free the swap accounted for shm */
521 		swap_release_by_cred(delta, object->cred);
522 		object->charge -= delta;
523 	} else {
524 		/* Try to reserve additional swap space. */
525 		delta = IDX_TO_OFF(nobjsize - object->size);
526 		if (!swap_reserve_by_cred(delta, object->cred)) {
527 			VM_OBJECT_WUNLOCK(object);
528 			return (ENOMEM);
529 		}
530 		object->charge += delta;
531 	}
532 	shmfd->shm_size = length;
533 	mtx_lock(&shm_timestamp_lock);
534 	vfs_timestamp(&shmfd->shm_ctime);
535 	shmfd->shm_mtime = shmfd->shm_ctime;
536 	mtx_unlock(&shm_timestamp_lock);
537 	object->size = nobjsize;
538 	VM_OBJECT_WUNLOCK(object);
539 	return (0);
540 }
541 
542 /*
543  * shmfd object management including creation and reference counting
544  * routines.
545  */
546 struct shmfd *
547 shm_alloc(struct ucred *ucred, mode_t mode)
548 {
549 	struct shmfd *shmfd;
550 
551 	shmfd = malloc(sizeof(*shmfd), M_SHMFD, M_WAITOK | M_ZERO);
552 	shmfd->shm_size = 0;
553 	shmfd->shm_uid = ucred->cr_uid;
554 	shmfd->shm_gid = ucred->cr_gid;
555 	shmfd->shm_mode = mode;
556 	shmfd->shm_object = vm_pager_allocate(OBJT_DEFAULT, NULL,
557 	    shmfd->shm_size, VM_PROT_DEFAULT, 0, ucred);
558 	KASSERT(shmfd->shm_object != NULL, ("shm_create: vm_pager_allocate"));
559 	shmfd->shm_object->pg_color = 0;
560 	VM_OBJECT_WLOCK(shmfd->shm_object);
561 	vm_object_clear_flag(shmfd->shm_object, OBJ_ONEMAPPING);
562 	vm_object_set_flag(shmfd->shm_object, OBJ_COLORED | OBJ_NOSPLIT);
563 	VM_OBJECT_WUNLOCK(shmfd->shm_object);
564 	vfs_timestamp(&shmfd->shm_birthtime);
565 	shmfd->shm_atime = shmfd->shm_mtime = shmfd->shm_ctime =
566 	    shmfd->shm_birthtime;
567 	shmfd->shm_ino = alloc_unr64(&shm_ino_unr);
568 	refcount_init(&shmfd->shm_refs, 1);
569 	mtx_init(&shmfd->shm_mtx, "shmrl", NULL, MTX_DEF);
570 	rangelock_init(&shmfd->shm_rl);
571 #ifdef MAC
572 	mac_posixshm_init(shmfd);
573 	mac_posixshm_create(ucred, shmfd);
574 #endif
575 
576 	return (shmfd);
577 }
578 
579 struct shmfd *
580 shm_hold(struct shmfd *shmfd)
581 {
582 
583 	refcount_acquire(&shmfd->shm_refs);
584 	return (shmfd);
585 }
586 
587 void
588 shm_drop(struct shmfd *shmfd)
589 {
590 
591 	if (refcount_release(&shmfd->shm_refs)) {
592 #ifdef MAC
593 		mac_posixshm_destroy(shmfd);
594 #endif
595 		rangelock_destroy(&shmfd->shm_rl);
596 		mtx_destroy(&shmfd->shm_mtx);
597 		vm_object_deallocate(shmfd->shm_object);
598 		free(shmfd, M_SHMFD);
599 	}
600 }
601 
602 /*
603  * Determine if the credentials have sufficient permissions for a
604  * specified combination of FREAD and FWRITE.
605  */
606 int
607 shm_access(struct shmfd *shmfd, struct ucred *ucred, int flags)
608 {
609 	accmode_t accmode;
610 	int error;
611 
612 	accmode = 0;
613 	if (flags & FREAD)
614 		accmode |= VREAD;
615 	if (flags & FWRITE)
616 		accmode |= VWRITE;
617 	mtx_lock(&shm_timestamp_lock);
618 	error = vaccess(VREG, shmfd->shm_mode, shmfd->shm_uid, shmfd->shm_gid,
619 	    accmode, ucred, NULL);
620 	mtx_unlock(&shm_timestamp_lock);
621 	return (error);
622 }
623 
624 /*
625  * Dictionary management.  We maintain an in-kernel dictionary to map
626  * paths to shmfd objects.  We use the FNV hash on the path to store
627  * the mappings in a hash table.
628  */
629 static void
630 shm_init(void *arg)
631 {
632 
633 	mtx_init(&shm_timestamp_lock, "shm timestamps", NULL, MTX_DEF);
634 	sx_init(&shm_dict_lock, "shm dictionary");
635 	shm_dictionary = hashinit(1024, M_SHMFD, &shm_hash);
636 	new_unrhdr64(&shm_ino_unr, 1);
637 	shm_dev_ino = devfs_alloc_cdp_inode();
638 	KASSERT(shm_dev_ino > 0, ("shm dev inode not initialized"));
639 }
640 SYSINIT(shm_init, SI_SUB_SYSV_SHM, SI_ORDER_ANY, shm_init, NULL);
641 
642 static struct shmfd *
643 shm_lookup(char *path, Fnv32_t fnv)
644 {
645 	struct shm_mapping *map;
646 
647 	LIST_FOREACH(map, SHM_HASH(fnv), sm_link) {
648 		if (map->sm_fnv != fnv)
649 			continue;
650 		if (strcmp(map->sm_path, path) == 0)
651 			return (map->sm_shmfd);
652 	}
653 
654 	return (NULL);
655 }
656 
657 static void
658 shm_insert(char *path, Fnv32_t fnv, struct shmfd *shmfd)
659 {
660 	struct shm_mapping *map;
661 
662 	map = malloc(sizeof(struct shm_mapping), M_SHMFD, M_WAITOK);
663 	map->sm_path = path;
664 	map->sm_fnv = fnv;
665 	map->sm_shmfd = shm_hold(shmfd);
666 	shmfd->shm_path = path;
667 	LIST_INSERT_HEAD(SHM_HASH(fnv), map, sm_link);
668 }
669 
670 static int
671 shm_remove(char *path, Fnv32_t fnv, struct ucred *ucred)
672 {
673 	struct shm_mapping *map;
674 	int error;
675 
676 	LIST_FOREACH(map, SHM_HASH(fnv), sm_link) {
677 		if (map->sm_fnv != fnv)
678 			continue;
679 		if (strcmp(map->sm_path, path) == 0) {
680 #ifdef MAC
681 			error = mac_posixshm_check_unlink(ucred, map->sm_shmfd);
682 			if (error)
683 				return (error);
684 #endif
685 			error = shm_access(map->sm_shmfd, ucred,
686 			    FREAD | FWRITE);
687 			if (error)
688 				return (error);
689 			map->sm_shmfd->shm_path = NULL;
690 			LIST_REMOVE(map, sm_link);
691 			shm_drop(map->sm_shmfd);
692 			free(map->sm_path, M_SHMFD);
693 			free(map, M_SHMFD);
694 			return (0);
695 		}
696 	}
697 
698 	return (ENOENT);
699 }
700 
701 int
702 kern_shm_open(struct thread *td, const char *userpath, int flags, mode_t mode,
703     struct filecaps *fcaps)
704 {
705 	struct filedesc *fdp;
706 	struct shmfd *shmfd;
707 	struct file *fp;
708 	char *path;
709 	const char *pr_path;
710 	size_t pr_pathlen;
711 	Fnv32_t fnv;
712 	mode_t cmode;
713 	int fd, error;
714 
715 #ifdef CAPABILITY_MODE
716 	/*
717 	 * shm_open(2) is only allowed for anonymous objects.
718 	 */
719 	if (IN_CAPABILITY_MODE(td) && (userpath != SHM_ANON))
720 		return (ECAPMODE);
721 #endif
722 
723 	AUDIT_ARG_FFLAGS(flags);
724 	AUDIT_ARG_MODE(mode);
725 
726 	if ((flags & O_ACCMODE) != O_RDONLY && (flags & O_ACCMODE) != O_RDWR)
727 		return (EINVAL);
728 
729 	if ((flags & ~(O_ACCMODE | O_CREAT | O_EXCL | O_TRUNC | O_CLOEXEC)) != 0)
730 		return (EINVAL);
731 
732 	fdp = td->td_proc->p_fd;
733 	cmode = (mode & ~fdp->fd_cmask) & ACCESSPERMS;
734 
735 	error = falloc_caps(td, &fp, &fd, O_CLOEXEC, fcaps);
736 	if (error)
737 		return (error);
738 
739 	/* A SHM_ANON path pointer creates an anonymous object. */
740 	if (userpath == SHM_ANON) {
741 		/* A read-only anonymous object is pointless. */
742 		if ((flags & O_ACCMODE) == O_RDONLY) {
743 			fdclose(td, fp, fd);
744 			fdrop(fp, td);
745 			return (EINVAL);
746 		}
747 		shmfd = shm_alloc(td->td_ucred, cmode);
748 	} else {
749 		path = malloc(MAXPATHLEN, M_SHMFD, M_WAITOK);
750 		pr_path = td->td_ucred->cr_prison->pr_path;
751 
752 		/* Construct a full pathname for jailed callers. */
753 		pr_pathlen = strcmp(pr_path, "/") == 0 ? 0
754 		    : strlcpy(path, pr_path, MAXPATHLEN);
755 		error = copyinstr(userpath, path + pr_pathlen,
756 		    MAXPATHLEN - pr_pathlen, NULL);
757 #ifdef KTRACE
758 		if (error == 0 && KTRPOINT(curthread, KTR_NAMEI))
759 			ktrnamei(path);
760 #endif
761 		/* Require paths to start with a '/' character. */
762 		if (error == 0 && path[pr_pathlen] != '/')
763 			error = EINVAL;
764 		if (error) {
765 			fdclose(td, fp, fd);
766 			fdrop(fp, td);
767 			free(path, M_SHMFD);
768 			return (error);
769 		}
770 
771 		AUDIT_ARG_UPATH1_CANON(path);
772 		fnv = fnv_32_str(path, FNV1_32_INIT);
773 		sx_xlock(&shm_dict_lock);
774 		shmfd = shm_lookup(path, fnv);
775 		if (shmfd == NULL) {
776 			/* Object does not yet exist, create it if requested. */
777 			if (flags & O_CREAT) {
778 #ifdef MAC
779 				error = mac_posixshm_check_create(td->td_ucred,
780 				    path);
781 				if (error == 0) {
782 #endif
783 					shmfd = shm_alloc(td->td_ucred, cmode);
784 					shm_insert(path, fnv, shmfd);
785 #ifdef MAC
786 				}
787 #endif
788 			} else {
789 				free(path, M_SHMFD);
790 				error = ENOENT;
791 			}
792 		} else {
793 			/*
794 			 * Object already exists, obtain a new
795 			 * reference if requested and permitted.
796 			 */
797 			free(path, M_SHMFD);
798 			if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
799 				error = EEXIST;
800 			else {
801 #ifdef MAC
802 				error = mac_posixshm_check_open(td->td_ucred,
803 				    shmfd, FFLAGS(flags & O_ACCMODE));
804 				if (error == 0)
805 #endif
806 				error = shm_access(shmfd, td->td_ucred,
807 				    FFLAGS(flags & O_ACCMODE));
808 			}
809 
810 			/*
811 			 * Truncate the file back to zero length if
812 			 * O_TRUNC was specified and the object was
813 			 * opened with read/write.
814 			 */
815 			if (error == 0 &&
816 			    (flags & (O_ACCMODE | O_TRUNC)) ==
817 			    (O_RDWR | O_TRUNC)) {
818 #ifdef MAC
819 				error = mac_posixshm_check_truncate(
820 					td->td_ucred, fp->f_cred, shmfd);
821 				if (error == 0)
822 #endif
823 					shm_dotruncate(shmfd, 0);
824 			}
825 			if (error == 0)
826 				shm_hold(shmfd);
827 		}
828 		sx_xunlock(&shm_dict_lock);
829 
830 		if (error) {
831 			fdclose(td, fp, fd);
832 			fdrop(fp, td);
833 			return (error);
834 		}
835 	}
836 
837 	finit(fp, FFLAGS(flags & O_ACCMODE), DTYPE_SHM, shmfd, &shm_ops);
838 
839 	td->td_retval[0] = fd;
840 	fdrop(fp, td);
841 
842 	return (0);
843 }
844 
845 /* System calls. */
846 int
847 sys_shm_open(struct thread *td, struct shm_open_args *uap)
848 {
849 
850 	return (kern_shm_open(td, uap->path, uap->flags, uap->mode, NULL));
851 }
852 
853 int
854 sys_shm_unlink(struct thread *td, struct shm_unlink_args *uap)
855 {
856 	char *path;
857 	const char *pr_path;
858 	size_t pr_pathlen;
859 	Fnv32_t fnv;
860 	int error;
861 
862 	path = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
863 	pr_path = td->td_ucred->cr_prison->pr_path;
864 	pr_pathlen = strcmp(pr_path, "/") == 0 ? 0
865 	    : strlcpy(path, pr_path, MAXPATHLEN);
866 	error = copyinstr(uap->path, path + pr_pathlen, MAXPATHLEN - pr_pathlen,
867 	    NULL);
868 	if (error) {
869 		free(path, M_TEMP);
870 		return (error);
871 	}
872 #ifdef KTRACE
873 	if (KTRPOINT(curthread, KTR_NAMEI))
874 		ktrnamei(path);
875 #endif
876 	AUDIT_ARG_UPATH1_CANON(path);
877 	fnv = fnv_32_str(path, FNV1_32_INIT);
878 	sx_xlock(&shm_dict_lock);
879 	error = shm_remove(path, fnv, td->td_ucred);
880 	sx_xunlock(&shm_dict_lock);
881 	free(path, M_TEMP);
882 
883 	return (error);
884 }
885 
886 int
887 shm_mmap(struct file *fp, vm_map_t map, vm_offset_t *addr, vm_size_t objsize,
888     vm_prot_t prot, vm_prot_t cap_maxprot, int flags,
889     vm_ooffset_t foff, struct thread *td)
890 {
891 	struct shmfd *shmfd;
892 	vm_prot_t maxprot;
893 	int error;
894 
895 	shmfd = fp->f_data;
896 	maxprot = VM_PROT_NONE;
897 
898 	/* FREAD should always be set. */
899 	if ((fp->f_flag & FREAD) != 0)
900 		maxprot |= VM_PROT_EXECUTE | VM_PROT_READ;
901 	if ((fp->f_flag & FWRITE) != 0)
902 		maxprot |= VM_PROT_WRITE;
903 
904 	/* Don't permit shared writable mappings on read-only descriptors. */
905 	if ((flags & MAP_SHARED) != 0 &&
906 	    (maxprot & VM_PROT_WRITE) == 0 &&
907 	    (prot & VM_PROT_WRITE) != 0)
908 		return (EACCES);
909 	maxprot &= cap_maxprot;
910 
911 	/* See comment in vn_mmap(). */
912 	if (
913 #ifdef _LP64
914 	    objsize > OFF_MAX ||
915 #endif
916 	    foff < 0 || foff > OFF_MAX - objsize)
917 		return (EINVAL);
918 
919 #ifdef MAC
920 	error = mac_posixshm_check_mmap(td->td_ucred, shmfd, prot, flags);
921 	if (error != 0)
922 		return (error);
923 #endif
924 
925 	mtx_lock(&shm_timestamp_lock);
926 	vfs_timestamp(&shmfd->shm_atime);
927 	mtx_unlock(&shm_timestamp_lock);
928 	vm_object_reference(shmfd->shm_object);
929 
930 	error = vm_mmap_object(map, addr, objsize, prot, maxprot, flags,
931 	    shmfd->shm_object, foff, FALSE, td);
932 	if (error != 0)
933 		vm_object_deallocate(shmfd->shm_object);
934 	return (error);
935 }
936 
937 static int
938 shm_chmod(struct file *fp, mode_t mode, struct ucred *active_cred,
939     struct thread *td)
940 {
941 	struct shmfd *shmfd;
942 	int error;
943 
944 	error = 0;
945 	shmfd = fp->f_data;
946 	mtx_lock(&shm_timestamp_lock);
947 	/*
948 	 * SUSv4 says that x bits of permission need not be affected.
949 	 * Be consistent with our shm_open there.
950 	 */
951 #ifdef MAC
952 	error = mac_posixshm_check_setmode(active_cred, shmfd, mode);
953 	if (error != 0)
954 		goto out;
955 #endif
956 	error = vaccess(VREG, shmfd->shm_mode, shmfd->shm_uid,
957 	    shmfd->shm_gid, VADMIN, active_cred, NULL);
958 	if (error != 0)
959 		goto out;
960 	shmfd->shm_mode = mode & ACCESSPERMS;
961 out:
962 	mtx_unlock(&shm_timestamp_lock);
963 	return (error);
964 }
965 
966 static int
967 shm_chown(struct file *fp, uid_t uid, gid_t gid, struct ucred *active_cred,
968     struct thread *td)
969 {
970 	struct shmfd *shmfd;
971 	int error;
972 
973 	error = 0;
974 	shmfd = fp->f_data;
975 	mtx_lock(&shm_timestamp_lock);
976 #ifdef MAC
977 	error = mac_posixshm_check_setowner(active_cred, shmfd, uid, gid);
978 	if (error != 0)
979 		goto out;
980 #endif
981 	if (uid == (uid_t)-1)
982 		uid = shmfd->shm_uid;
983 	if (gid == (gid_t)-1)
984                  gid = shmfd->shm_gid;
985 	if (((uid != shmfd->shm_uid && uid != active_cred->cr_uid) ||
986 	    (gid != shmfd->shm_gid && !groupmember(gid, active_cred))) &&
987 	    (error = priv_check_cred(active_cred, PRIV_VFS_CHOWN)))
988 		goto out;
989 	shmfd->shm_uid = uid;
990 	shmfd->shm_gid = gid;
991 out:
992 	mtx_unlock(&shm_timestamp_lock);
993 	return (error);
994 }
995 
996 /*
997  * Helper routines to allow the backing object of a shared memory file
998  * descriptor to be mapped in the kernel.
999  */
1000 int
1001 shm_map(struct file *fp, size_t size, off_t offset, void **memp)
1002 {
1003 	struct shmfd *shmfd;
1004 	vm_offset_t kva, ofs;
1005 	vm_object_t obj;
1006 	int rv;
1007 
1008 	if (fp->f_type != DTYPE_SHM)
1009 		return (EINVAL);
1010 	shmfd = fp->f_data;
1011 	obj = shmfd->shm_object;
1012 	VM_OBJECT_WLOCK(obj);
1013 	/*
1014 	 * XXXRW: This validation is probably insufficient, and subject to
1015 	 * sign errors.  It should be fixed.
1016 	 */
1017 	if (offset >= shmfd->shm_size ||
1018 	    offset + size > round_page(shmfd->shm_size)) {
1019 		VM_OBJECT_WUNLOCK(obj);
1020 		return (EINVAL);
1021 	}
1022 
1023 	shmfd->shm_kmappings++;
1024 	vm_object_reference_locked(obj);
1025 	VM_OBJECT_WUNLOCK(obj);
1026 
1027 	/* Map the object into the kernel_map and wire it. */
1028 	kva = vm_map_min(kernel_map);
1029 	ofs = offset & PAGE_MASK;
1030 	offset = trunc_page(offset);
1031 	size = round_page(size + ofs);
1032 	rv = vm_map_find(kernel_map, obj, offset, &kva, size, 0,
1033 	    VMFS_OPTIMAL_SPACE, VM_PROT_READ | VM_PROT_WRITE,
1034 	    VM_PROT_READ | VM_PROT_WRITE, 0);
1035 	if (rv == KERN_SUCCESS) {
1036 		rv = vm_map_wire(kernel_map, kva, kva + size,
1037 		    VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
1038 		if (rv == KERN_SUCCESS) {
1039 			*memp = (void *)(kva + ofs);
1040 			return (0);
1041 		}
1042 		vm_map_remove(kernel_map, kva, kva + size);
1043 	} else
1044 		vm_object_deallocate(obj);
1045 
1046 	/* On failure, drop our mapping reference. */
1047 	VM_OBJECT_WLOCK(obj);
1048 	shmfd->shm_kmappings--;
1049 	VM_OBJECT_WUNLOCK(obj);
1050 
1051 	return (vm_mmap_to_errno(rv));
1052 }
1053 
1054 /*
1055  * We require the caller to unmap the entire entry.  This allows us to
1056  * safely decrement shm_kmappings when a mapping is removed.
1057  */
1058 int
1059 shm_unmap(struct file *fp, void *mem, size_t size)
1060 {
1061 	struct shmfd *shmfd;
1062 	vm_map_entry_t entry;
1063 	vm_offset_t kva, ofs;
1064 	vm_object_t obj;
1065 	vm_pindex_t pindex;
1066 	vm_prot_t prot;
1067 	boolean_t wired;
1068 	vm_map_t map;
1069 	int rv;
1070 
1071 	if (fp->f_type != DTYPE_SHM)
1072 		return (EINVAL);
1073 	shmfd = fp->f_data;
1074 	kva = (vm_offset_t)mem;
1075 	ofs = kva & PAGE_MASK;
1076 	kva = trunc_page(kva);
1077 	size = round_page(size + ofs);
1078 	map = kernel_map;
1079 	rv = vm_map_lookup(&map, kva, VM_PROT_READ | VM_PROT_WRITE, &entry,
1080 	    &obj, &pindex, &prot, &wired);
1081 	if (rv != KERN_SUCCESS)
1082 		return (EINVAL);
1083 	if (entry->start != kva || entry->end != kva + size) {
1084 		vm_map_lookup_done(map, entry);
1085 		return (EINVAL);
1086 	}
1087 	vm_map_lookup_done(map, entry);
1088 	if (obj != shmfd->shm_object)
1089 		return (EINVAL);
1090 	vm_map_remove(map, kva, kva + size);
1091 	VM_OBJECT_WLOCK(obj);
1092 	KASSERT(shmfd->shm_kmappings > 0, ("shm_unmap: object not mapped"));
1093 	shmfd->shm_kmappings--;
1094 	VM_OBJECT_WUNLOCK(obj);
1095 	return (0);
1096 }
1097 
1098 static int
1099 shm_fill_kinfo_locked(struct shmfd *shmfd, struct kinfo_file *kif, bool list)
1100 {
1101 	const char *path, *pr_path;
1102 	size_t pr_pathlen;
1103 	bool visible;
1104 
1105 	sx_assert(&shm_dict_lock, SA_LOCKED);
1106 	kif->kf_type = KF_TYPE_SHM;
1107 	kif->kf_un.kf_file.kf_file_mode = S_IFREG | shmfd->shm_mode;
1108 	kif->kf_un.kf_file.kf_file_size = shmfd->shm_size;
1109 	if (shmfd->shm_path != NULL) {
1110 		if (shmfd->shm_path != NULL) {
1111 			path = shmfd->shm_path;
1112 			pr_path = curthread->td_ucred->cr_prison->pr_path;
1113 			if (strcmp(pr_path, "/") != 0) {
1114 				/* Return the jail-rooted pathname. */
1115 				pr_pathlen = strlen(pr_path);
1116 				visible = strncmp(path, pr_path, pr_pathlen)
1117 				    == 0 && path[pr_pathlen] == '/';
1118 				if (list && !visible)
1119 					return (EPERM);
1120 				if (visible)
1121 					path += pr_pathlen;
1122 			}
1123 			strlcpy(kif->kf_path, path, sizeof(kif->kf_path));
1124 		}
1125 	}
1126 	return (0);
1127 }
1128 
1129 static int
1130 shm_fill_kinfo(struct file *fp, struct kinfo_file *kif,
1131     struct filedesc *fdp __unused)
1132 {
1133 	int res;
1134 
1135 	sx_slock(&shm_dict_lock);
1136 	res = shm_fill_kinfo_locked(fp->f_data, kif, false);
1137 	sx_sunlock(&shm_dict_lock);
1138 	return (res);
1139 }
1140 
1141 static int
1142 sysctl_posix_shm_list(SYSCTL_HANDLER_ARGS)
1143 {
1144 	struct shm_mapping *shmm;
1145 	struct sbuf sb;
1146 	struct kinfo_file kif;
1147 	u_long i;
1148 	ssize_t curlen;
1149 	int error, error2;
1150 
1151 	sbuf_new_for_sysctl(&sb, NULL, sizeof(struct kinfo_file) * 5, req);
1152 	sbuf_clear_flags(&sb, SBUF_INCLUDENUL);
1153 	curlen = 0;
1154 	error = 0;
1155 	sx_slock(&shm_dict_lock);
1156 	for (i = 0; i < shm_hash + 1; i++) {
1157 		LIST_FOREACH(shmm, &shm_dictionary[i], sm_link) {
1158 			error = shm_fill_kinfo_locked(shmm->sm_shmfd,
1159 			    &kif, true);
1160 			if (error == EPERM)
1161 				continue;
1162 			if (error != 0)
1163 				break;
1164 			pack_kinfo(&kif);
1165 			if (req->oldptr != NULL &&
1166 			    kif.kf_structsize + curlen > req->oldlen)
1167 				break;
1168 			error = sbuf_bcat(&sb, &kif, kif.kf_structsize) == 0 ?
1169 			    0 : ENOMEM;
1170 			if (error != 0)
1171 				break;
1172 			curlen += kif.kf_structsize;
1173 		}
1174 	}
1175 	sx_sunlock(&shm_dict_lock);
1176 	error2 = sbuf_finish(&sb);
1177 	sbuf_delete(&sb);
1178 	return (error != 0 ? error : error2);
1179 }
1180 
1181 SYSCTL_PROC(_kern_ipc, OID_AUTO, posix_shm_list,
1182     CTLFLAG_RD | CTLFLAG_MPSAFE | CTLTYPE_OPAQUE,
1183     NULL, 0, sysctl_posix_shm_list, "",
1184     "POSIX SHM list");
1185