xref: /freebsd/sys/kern/uipc_shm.c (revision cbd30a72ca196976c1c700400ecd424baa1b9c16)
1 /*-
2  * Copyright (c) 2006, 2011 Robert N. M. Watson
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 /*
28  * Support for shared swap-backed anonymous memory objects via
29  * shm_open(2) and shm_unlink(2).  While most of the implementation is
30  * here, vm_mmap.c contains mapping logic changes.
31  *
32  * TODO:
33  *
34  * (1) Need to export data to a userland tool via a sysctl.  Should ipcs(1)
35  *     and ipcrm(1) be expanded or should new tools to manage both POSIX
36  *     kernel semaphores and POSIX shared memory be written?
37  *
38  * (2) Add support for this file type to fstat(1).
39  *
40  * (3) Resource limits?  Does this need its own resource limits or are the
41  *     existing limits in mmap(2) sufficient?
42  */
43 
44 #include <sys/cdefs.h>
45 __FBSDID("$FreeBSD$");
46 
47 #include "opt_capsicum.h"
48 #include "opt_ktrace.h"
49 
50 #include <sys/param.h>
51 #include <sys/capsicum.h>
52 #include <sys/conf.h>
53 #include <sys/fcntl.h>
54 #include <sys/file.h>
55 #include <sys/filedesc.h>
56 #include <sys/fnv_hash.h>
57 #include <sys/kernel.h>
58 #include <sys/uio.h>
59 #include <sys/signal.h>
60 #include <sys/jail.h>
61 #include <sys/ktrace.h>
62 #include <sys/lock.h>
63 #include <sys/malloc.h>
64 #include <sys/mman.h>
65 #include <sys/mutex.h>
66 #include <sys/priv.h>
67 #include <sys/proc.h>
68 #include <sys/refcount.h>
69 #include <sys/resourcevar.h>
70 #include <sys/rwlock.h>
71 #include <sys/stat.h>
72 #include <sys/syscallsubr.h>
73 #include <sys/sysctl.h>
74 #include <sys/sysproto.h>
75 #include <sys/systm.h>
76 #include <sys/sx.h>
77 #include <sys/time.h>
78 #include <sys/vnode.h>
79 #include <sys/unistd.h>
80 #include <sys/user.h>
81 
82 #include <security/mac/mac_framework.h>
83 
84 #include <vm/vm.h>
85 #include <vm/vm_param.h>
86 #include <vm/pmap.h>
87 #include <vm/vm_extern.h>
88 #include <vm/vm_map.h>
89 #include <vm/vm_kern.h>
90 #include <vm/vm_object.h>
91 #include <vm/vm_page.h>
92 #include <vm/vm_pageout.h>
93 #include <vm/vm_pager.h>
94 #include <vm/swap_pager.h>
95 
96 struct shm_mapping {
97 	char		*sm_path;
98 	Fnv32_t		sm_fnv;
99 	struct shmfd	*sm_shmfd;
100 	LIST_ENTRY(shm_mapping) sm_link;
101 };
102 
103 static MALLOC_DEFINE(M_SHMFD, "shmfd", "shared memory file descriptor");
104 static LIST_HEAD(, shm_mapping) *shm_dictionary;
105 static struct sx shm_dict_lock;
106 static struct mtx shm_timestamp_lock;
107 static u_long shm_hash;
108 static struct unrhdr *shm_ino_unr;
109 static dev_t shm_dev_ino;
110 
111 #define	SHM_HASH(fnv)	(&shm_dictionary[(fnv) & shm_hash])
112 
113 static void	shm_init(void *arg);
114 static void	shm_insert(char *path, Fnv32_t fnv, struct shmfd *shmfd);
115 static struct shmfd *shm_lookup(char *path, Fnv32_t fnv);
116 static int	shm_remove(char *path, Fnv32_t fnv, struct ucred *ucred);
117 
118 static fo_rdwr_t	shm_read;
119 static fo_rdwr_t	shm_write;
120 static fo_truncate_t	shm_truncate;
121 static fo_stat_t	shm_stat;
122 static fo_close_t	shm_close;
123 static fo_chmod_t	shm_chmod;
124 static fo_chown_t	shm_chown;
125 static fo_seek_t	shm_seek;
126 static fo_fill_kinfo_t	shm_fill_kinfo;
127 static fo_mmap_t	shm_mmap;
128 
129 /* File descriptor operations. */
130 struct fileops shm_ops = {
131 	.fo_read = shm_read,
132 	.fo_write = shm_write,
133 	.fo_truncate = shm_truncate,
134 	.fo_ioctl = invfo_ioctl,
135 	.fo_poll = invfo_poll,
136 	.fo_kqfilter = invfo_kqfilter,
137 	.fo_stat = shm_stat,
138 	.fo_close = shm_close,
139 	.fo_chmod = shm_chmod,
140 	.fo_chown = shm_chown,
141 	.fo_sendfile = vn_sendfile,
142 	.fo_seek = shm_seek,
143 	.fo_fill_kinfo = shm_fill_kinfo,
144 	.fo_mmap = shm_mmap,
145 	.fo_flags = DFLAG_PASSABLE | DFLAG_SEEKABLE
146 };
147 
148 FEATURE(posix_shm, "POSIX shared memory");
149 
150 static int
151 uiomove_object_page(vm_object_t obj, size_t len, struct uio *uio)
152 {
153 	vm_page_t m;
154 	vm_pindex_t idx;
155 	size_t tlen;
156 	int error, offset, rv;
157 
158 	idx = OFF_TO_IDX(uio->uio_offset);
159 	offset = uio->uio_offset & PAGE_MASK;
160 	tlen = MIN(PAGE_SIZE - offset, len);
161 
162 	VM_OBJECT_WLOCK(obj);
163 
164 	/*
165 	 * Read I/O without either a corresponding resident page or swap
166 	 * page: use zero_region.  This is intended to avoid instantiating
167 	 * pages on read from a sparse region.
168 	 */
169 	if (uio->uio_rw == UIO_READ && vm_page_lookup(obj, idx) == NULL &&
170 	    !vm_pager_has_page(obj, idx, NULL, NULL)) {
171 		VM_OBJECT_WUNLOCK(obj);
172 		return (uiomove(__DECONST(void *, zero_region), tlen, uio));
173 	}
174 
175 	/*
176 	 * Parallel reads of the page content from disk are prevented
177 	 * by exclusive busy.
178 	 *
179 	 * Although the tmpfs vnode lock is held here, it is
180 	 * nonetheless safe to sleep waiting for a free page.  The
181 	 * pageout daemon does not need to acquire the tmpfs vnode
182 	 * lock to page out tobj's pages because tobj is a OBJT_SWAP
183 	 * type object.
184 	 */
185 	m = vm_page_grab(obj, idx, VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY);
186 	if (m->valid != VM_PAGE_BITS_ALL) {
187 		vm_page_xbusy(m);
188 		if (vm_pager_has_page(obj, idx, NULL, NULL)) {
189 			rv = vm_pager_get_pages(obj, &m, 1, NULL, NULL);
190 			if (rv != VM_PAGER_OK) {
191 				printf(
192 	    "uiomove_object: vm_obj %p idx %jd valid %x pager error %d\n",
193 				    obj, idx, m->valid, rv);
194 				vm_page_lock(m);
195 				vm_page_free(m);
196 				vm_page_unlock(m);
197 				VM_OBJECT_WUNLOCK(obj);
198 				return (EIO);
199 			}
200 		} else
201 			vm_page_zero_invalid(m, TRUE);
202 		vm_page_xunbusy(m);
203 	}
204 	vm_page_lock(m);
205 	vm_page_hold(m);
206 	if (m->queue == PQ_NONE) {
207 		vm_page_deactivate(m);
208 	} else {
209 		/* Requeue to maintain LRU ordering. */
210 		vm_page_requeue(m);
211 	}
212 	vm_page_unlock(m);
213 	VM_OBJECT_WUNLOCK(obj);
214 	error = uiomove_fromphys(&m, offset, tlen, uio);
215 	if (uio->uio_rw == UIO_WRITE && error == 0) {
216 		VM_OBJECT_WLOCK(obj);
217 		vm_page_dirty(m);
218 		vm_pager_page_unswapped(m);
219 		VM_OBJECT_WUNLOCK(obj);
220 	}
221 	vm_page_lock(m);
222 	vm_page_unhold(m);
223 	vm_page_unlock(m);
224 
225 	return (error);
226 }
227 
228 int
229 uiomove_object(vm_object_t obj, off_t obj_size, struct uio *uio)
230 {
231 	ssize_t resid;
232 	size_t len;
233 	int error;
234 
235 	error = 0;
236 	while ((resid = uio->uio_resid) > 0) {
237 		if (obj_size <= uio->uio_offset)
238 			break;
239 		len = MIN(obj_size - uio->uio_offset, resid);
240 		if (len == 0)
241 			break;
242 		error = uiomove_object_page(obj, len, uio);
243 		if (error != 0 || resid == uio->uio_resid)
244 			break;
245 	}
246 	return (error);
247 }
248 
249 static int
250 shm_seek(struct file *fp, off_t offset, int whence, struct thread *td)
251 {
252 	struct shmfd *shmfd;
253 	off_t foffset;
254 	int error;
255 
256 	shmfd = fp->f_data;
257 	foffset = foffset_lock(fp, 0);
258 	error = 0;
259 	switch (whence) {
260 	case L_INCR:
261 		if (foffset < 0 ||
262 		    (offset > 0 && foffset > OFF_MAX - offset)) {
263 			error = EOVERFLOW;
264 			break;
265 		}
266 		offset += foffset;
267 		break;
268 	case L_XTND:
269 		if (offset > 0 && shmfd->shm_size > OFF_MAX - offset) {
270 			error = EOVERFLOW;
271 			break;
272 		}
273 		offset += shmfd->shm_size;
274 		break;
275 	case L_SET:
276 		break;
277 	default:
278 		error = EINVAL;
279 	}
280 	if (error == 0) {
281 		if (offset < 0 || offset > shmfd->shm_size)
282 			error = EINVAL;
283 		else
284 			td->td_uretoff.tdu_off = offset;
285 	}
286 	foffset_unlock(fp, offset, error != 0 ? FOF_NOUPDATE : 0);
287 	return (error);
288 }
289 
290 static int
291 shm_read(struct file *fp, struct uio *uio, struct ucred *active_cred,
292     int flags, struct thread *td)
293 {
294 	struct shmfd *shmfd;
295 	void *rl_cookie;
296 	int error;
297 
298 	shmfd = fp->f_data;
299 #ifdef MAC
300 	error = mac_posixshm_check_read(active_cred, fp->f_cred, shmfd);
301 	if (error)
302 		return (error);
303 #endif
304 	foffset_lock_uio(fp, uio, flags);
305 	rl_cookie = rangelock_rlock(&shmfd->shm_rl, uio->uio_offset,
306 	    uio->uio_offset + uio->uio_resid, &shmfd->shm_mtx);
307 	error = uiomove_object(shmfd->shm_object, shmfd->shm_size, uio);
308 	rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
309 	foffset_unlock_uio(fp, uio, flags);
310 	return (error);
311 }
312 
313 static int
314 shm_write(struct file *fp, struct uio *uio, struct ucred *active_cred,
315     int flags, struct thread *td)
316 {
317 	struct shmfd *shmfd;
318 	void *rl_cookie;
319 	int error;
320 
321 	shmfd = fp->f_data;
322 #ifdef MAC
323 	error = mac_posixshm_check_write(active_cred, fp->f_cred, shmfd);
324 	if (error)
325 		return (error);
326 #endif
327 	foffset_lock_uio(fp, uio, flags);
328 	if ((flags & FOF_OFFSET) == 0) {
329 		rl_cookie = rangelock_wlock(&shmfd->shm_rl, 0, OFF_MAX,
330 		    &shmfd->shm_mtx);
331 	} else {
332 		rl_cookie = rangelock_wlock(&shmfd->shm_rl, uio->uio_offset,
333 		    uio->uio_offset + uio->uio_resid, &shmfd->shm_mtx);
334 	}
335 
336 	error = uiomove_object(shmfd->shm_object, shmfd->shm_size, uio);
337 	rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
338 	foffset_unlock_uio(fp, uio, flags);
339 	return (error);
340 }
341 
342 static int
343 shm_truncate(struct file *fp, off_t length, struct ucred *active_cred,
344     struct thread *td)
345 {
346 	struct shmfd *shmfd;
347 #ifdef MAC
348 	int error;
349 #endif
350 
351 	shmfd = fp->f_data;
352 #ifdef MAC
353 	error = mac_posixshm_check_truncate(active_cred, fp->f_cred, shmfd);
354 	if (error)
355 		return (error);
356 #endif
357 	return (shm_dotruncate(shmfd, length));
358 }
359 
360 static int
361 shm_stat(struct file *fp, struct stat *sb, struct ucred *active_cred,
362     struct thread *td)
363 {
364 	struct shmfd *shmfd;
365 #ifdef MAC
366 	int error;
367 #endif
368 
369 	shmfd = fp->f_data;
370 
371 #ifdef MAC
372 	error = mac_posixshm_check_stat(active_cred, fp->f_cred, shmfd);
373 	if (error)
374 		return (error);
375 #endif
376 
377 	/*
378 	 * Attempt to return sanish values for fstat() on a memory file
379 	 * descriptor.
380 	 */
381 	bzero(sb, sizeof(*sb));
382 	sb->st_blksize = PAGE_SIZE;
383 	sb->st_size = shmfd->shm_size;
384 	sb->st_blocks = howmany(sb->st_size, sb->st_blksize);
385 	mtx_lock(&shm_timestamp_lock);
386 	sb->st_atim = shmfd->shm_atime;
387 	sb->st_ctim = shmfd->shm_ctime;
388 	sb->st_mtim = shmfd->shm_mtime;
389 	sb->st_birthtim = shmfd->shm_birthtime;
390 	sb->st_mode = S_IFREG | shmfd->shm_mode;		/* XXX */
391 	sb->st_uid = shmfd->shm_uid;
392 	sb->st_gid = shmfd->shm_gid;
393 	mtx_unlock(&shm_timestamp_lock);
394 	sb->st_dev = shm_dev_ino;
395 	sb->st_ino = shmfd->shm_ino;
396 
397 	return (0);
398 }
399 
400 static int
401 shm_close(struct file *fp, struct thread *td)
402 {
403 	struct shmfd *shmfd;
404 
405 	shmfd = fp->f_data;
406 	fp->f_data = NULL;
407 	shm_drop(shmfd);
408 
409 	return (0);
410 }
411 
412 int
413 shm_dotruncate(struct shmfd *shmfd, off_t length)
414 {
415 	vm_object_t object;
416 	vm_page_t m;
417 	vm_pindex_t idx, nobjsize;
418 	vm_ooffset_t delta;
419 	int base, rv;
420 
421 	object = shmfd->shm_object;
422 	VM_OBJECT_WLOCK(object);
423 	if (length == shmfd->shm_size) {
424 		VM_OBJECT_WUNLOCK(object);
425 		return (0);
426 	}
427 	nobjsize = OFF_TO_IDX(length + PAGE_MASK);
428 
429 	/* Are we shrinking?  If so, trim the end. */
430 	if (length < shmfd->shm_size) {
431 		/*
432 		 * Disallow any requests to shrink the size if this
433 		 * object is mapped into the kernel.
434 		 */
435 		if (shmfd->shm_kmappings > 0) {
436 			VM_OBJECT_WUNLOCK(object);
437 			return (EBUSY);
438 		}
439 
440 		/*
441 		 * Zero the truncated part of the last page.
442 		 */
443 		base = length & PAGE_MASK;
444 		if (base != 0) {
445 			idx = OFF_TO_IDX(length);
446 retry:
447 			m = vm_page_lookup(object, idx);
448 			if (m != NULL) {
449 				if (vm_page_sleep_if_busy(m, "shmtrc"))
450 					goto retry;
451 			} else if (vm_pager_has_page(object, idx, NULL, NULL)) {
452 				m = vm_page_alloc(object, idx, VM_ALLOC_NORMAL);
453 				if (m == NULL) {
454 					VM_OBJECT_WUNLOCK(object);
455 					VM_WAIT;
456 					VM_OBJECT_WLOCK(object);
457 					goto retry;
458 				}
459 				rv = vm_pager_get_pages(object, &m, 1, NULL,
460 				    NULL);
461 				vm_page_lock(m);
462 				if (rv == VM_PAGER_OK) {
463 					/*
464 					 * Since the page was not resident,
465 					 * and therefore not recently
466 					 * accessed, immediately enqueue it
467 					 * for asynchronous laundering.  The
468 					 * current operation is not regarded
469 					 * as an access.
470 					 */
471 					vm_page_launder(m);
472 					vm_page_unlock(m);
473 					vm_page_xunbusy(m);
474 				} else {
475 					vm_page_free(m);
476 					vm_page_unlock(m);
477 					VM_OBJECT_WUNLOCK(object);
478 					return (EIO);
479 				}
480 			}
481 			if (m != NULL) {
482 				pmap_zero_page_area(m, base, PAGE_SIZE - base);
483 				KASSERT(m->valid == VM_PAGE_BITS_ALL,
484 				    ("shm_dotruncate: page %p is invalid", m));
485 				vm_page_dirty(m);
486 				vm_pager_page_unswapped(m);
487 			}
488 		}
489 		delta = ptoa(object->size - nobjsize);
490 
491 		/* Toss in memory pages. */
492 		if (nobjsize < object->size)
493 			vm_object_page_remove(object, nobjsize, object->size,
494 			    0);
495 
496 		/* Toss pages from swap. */
497 		if (object->type == OBJT_SWAP)
498 			swap_pager_freespace(object, nobjsize, delta);
499 
500 		/* Free the swap accounted for shm */
501 		swap_release_by_cred(delta, object->cred);
502 		object->charge -= delta;
503 	} else {
504 		/* Attempt to reserve the swap */
505 		delta = ptoa(nobjsize - object->size);
506 		if (!swap_reserve_by_cred(delta, object->cred)) {
507 			VM_OBJECT_WUNLOCK(object);
508 			return (ENOMEM);
509 		}
510 		object->charge += delta;
511 	}
512 	shmfd->shm_size = length;
513 	mtx_lock(&shm_timestamp_lock);
514 	vfs_timestamp(&shmfd->shm_ctime);
515 	shmfd->shm_mtime = shmfd->shm_ctime;
516 	mtx_unlock(&shm_timestamp_lock);
517 	object->size = nobjsize;
518 	VM_OBJECT_WUNLOCK(object);
519 	return (0);
520 }
521 
522 /*
523  * shmfd object management including creation and reference counting
524  * routines.
525  */
526 struct shmfd *
527 shm_alloc(struct ucred *ucred, mode_t mode)
528 {
529 	struct shmfd *shmfd;
530 	int ino;
531 
532 	shmfd = malloc(sizeof(*shmfd), M_SHMFD, M_WAITOK | M_ZERO);
533 	shmfd->shm_size = 0;
534 	shmfd->shm_uid = ucred->cr_uid;
535 	shmfd->shm_gid = ucred->cr_gid;
536 	shmfd->shm_mode = mode;
537 	shmfd->shm_object = vm_pager_allocate(OBJT_DEFAULT, NULL,
538 	    shmfd->shm_size, VM_PROT_DEFAULT, 0, ucred);
539 	KASSERT(shmfd->shm_object != NULL, ("shm_create: vm_pager_allocate"));
540 	shmfd->shm_object->pg_color = 0;
541 	VM_OBJECT_WLOCK(shmfd->shm_object);
542 	vm_object_clear_flag(shmfd->shm_object, OBJ_ONEMAPPING);
543 	vm_object_set_flag(shmfd->shm_object, OBJ_COLORED | OBJ_NOSPLIT);
544 	VM_OBJECT_WUNLOCK(shmfd->shm_object);
545 	vfs_timestamp(&shmfd->shm_birthtime);
546 	shmfd->shm_atime = shmfd->shm_mtime = shmfd->shm_ctime =
547 	    shmfd->shm_birthtime;
548 	ino = alloc_unr(shm_ino_unr);
549 	if (ino == -1)
550 		shmfd->shm_ino = 0;
551 	else
552 		shmfd->shm_ino = ino;
553 	refcount_init(&shmfd->shm_refs, 1);
554 	mtx_init(&shmfd->shm_mtx, "shmrl", NULL, MTX_DEF);
555 	rangelock_init(&shmfd->shm_rl);
556 #ifdef MAC
557 	mac_posixshm_init(shmfd);
558 	mac_posixshm_create(ucred, shmfd);
559 #endif
560 
561 	return (shmfd);
562 }
563 
564 struct shmfd *
565 shm_hold(struct shmfd *shmfd)
566 {
567 
568 	refcount_acquire(&shmfd->shm_refs);
569 	return (shmfd);
570 }
571 
572 void
573 shm_drop(struct shmfd *shmfd)
574 {
575 
576 	if (refcount_release(&shmfd->shm_refs)) {
577 #ifdef MAC
578 		mac_posixshm_destroy(shmfd);
579 #endif
580 		rangelock_destroy(&shmfd->shm_rl);
581 		mtx_destroy(&shmfd->shm_mtx);
582 		vm_object_deallocate(shmfd->shm_object);
583 		if (shmfd->shm_ino != 0)
584 			free_unr(shm_ino_unr, shmfd->shm_ino);
585 		free(shmfd, M_SHMFD);
586 	}
587 }
588 
589 /*
590  * Determine if the credentials have sufficient permissions for a
591  * specified combination of FREAD and FWRITE.
592  */
593 int
594 shm_access(struct shmfd *shmfd, struct ucred *ucred, int flags)
595 {
596 	accmode_t accmode;
597 	int error;
598 
599 	accmode = 0;
600 	if (flags & FREAD)
601 		accmode |= VREAD;
602 	if (flags & FWRITE)
603 		accmode |= VWRITE;
604 	mtx_lock(&shm_timestamp_lock);
605 	error = vaccess(VREG, shmfd->shm_mode, shmfd->shm_uid, shmfd->shm_gid,
606 	    accmode, ucred, NULL);
607 	mtx_unlock(&shm_timestamp_lock);
608 	return (error);
609 }
610 
611 /*
612  * Dictionary management.  We maintain an in-kernel dictionary to map
613  * paths to shmfd objects.  We use the FNV hash on the path to store
614  * the mappings in a hash table.
615  */
616 static void
617 shm_init(void *arg)
618 {
619 
620 	mtx_init(&shm_timestamp_lock, "shm timestamps", NULL, MTX_DEF);
621 	sx_init(&shm_dict_lock, "shm dictionary");
622 	shm_dictionary = hashinit(1024, M_SHMFD, &shm_hash);
623 	shm_ino_unr = new_unrhdr(1, INT32_MAX, NULL);
624 	KASSERT(shm_ino_unr != NULL, ("shm fake inodes not initialized"));
625 	shm_dev_ino = devfs_alloc_cdp_inode();
626 	KASSERT(shm_dev_ino > 0, ("shm dev inode not initialized"));
627 }
628 SYSINIT(shm_init, SI_SUB_SYSV_SHM, SI_ORDER_ANY, shm_init, NULL);
629 
630 static struct shmfd *
631 shm_lookup(char *path, Fnv32_t fnv)
632 {
633 	struct shm_mapping *map;
634 
635 	LIST_FOREACH(map, SHM_HASH(fnv), sm_link) {
636 		if (map->sm_fnv != fnv)
637 			continue;
638 		if (strcmp(map->sm_path, path) == 0)
639 			return (map->sm_shmfd);
640 	}
641 
642 	return (NULL);
643 }
644 
645 static void
646 shm_insert(char *path, Fnv32_t fnv, struct shmfd *shmfd)
647 {
648 	struct shm_mapping *map;
649 
650 	map = malloc(sizeof(struct shm_mapping), M_SHMFD, M_WAITOK);
651 	map->sm_path = path;
652 	map->sm_fnv = fnv;
653 	map->sm_shmfd = shm_hold(shmfd);
654 	shmfd->shm_path = path;
655 	LIST_INSERT_HEAD(SHM_HASH(fnv), map, sm_link);
656 }
657 
658 static int
659 shm_remove(char *path, Fnv32_t fnv, struct ucred *ucred)
660 {
661 	struct shm_mapping *map;
662 	int error;
663 
664 	LIST_FOREACH(map, SHM_HASH(fnv), sm_link) {
665 		if (map->sm_fnv != fnv)
666 			continue;
667 		if (strcmp(map->sm_path, path) == 0) {
668 #ifdef MAC
669 			error = mac_posixshm_check_unlink(ucred, map->sm_shmfd);
670 			if (error)
671 				return (error);
672 #endif
673 			error = shm_access(map->sm_shmfd, ucred,
674 			    FREAD | FWRITE);
675 			if (error)
676 				return (error);
677 			map->sm_shmfd->shm_path = NULL;
678 			LIST_REMOVE(map, sm_link);
679 			shm_drop(map->sm_shmfd);
680 			free(map->sm_path, M_SHMFD);
681 			free(map, M_SHMFD);
682 			return (0);
683 		}
684 	}
685 
686 	return (ENOENT);
687 }
688 
689 int
690 kern_shm_open(struct thread *td, const char *userpath, int flags, mode_t mode,
691     struct filecaps *fcaps)
692 {
693 	struct filedesc *fdp;
694 	struct shmfd *shmfd;
695 	struct file *fp;
696 	char *path;
697 	const char *pr_path;
698 	size_t pr_pathlen;
699 	Fnv32_t fnv;
700 	mode_t cmode;
701 	int fd, error;
702 
703 #ifdef CAPABILITY_MODE
704 	/*
705 	 * shm_open(2) is only allowed for anonymous objects.
706 	 */
707 	if (IN_CAPABILITY_MODE(td) && (userpath != SHM_ANON))
708 		return (ECAPMODE);
709 #endif
710 
711 	if ((flags & O_ACCMODE) != O_RDONLY && (flags & O_ACCMODE) != O_RDWR)
712 		return (EINVAL);
713 
714 	if ((flags & ~(O_ACCMODE | O_CREAT | O_EXCL | O_TRUNC | O_CLOEXEC)) != 0)
715 		return (EINVAL);
716 
717 	fdp = td->td_proc->p_fd;
718 	cmode = (mode & ~fdp->fd_cmask) & ACCESSPERMS;
719 
720 	error = falloc_caps(td, &fp, &fd, O_CLOEXEC, fcaps);
721 	if (error)
722 		return (error);
723 
724 	/* A SHM_ANON path pointer creates an anonymous object. */
725 	if (userpath == SHM_ANON) {
726 		/* A read-only anonymous object is pointless. */
727 		if ((flags & O_ACCMODE) == O_RDONLY) {
728 			fdclose(td, fp, fd);
729 			fdrop(fp, td);
730 			return (EINVAL);
731 		}
732 		shmfd = shm_alloc(td->td_ucred, cmode);
733 	} else {
734 		path = malloc(MAXPATHLEN, M_SHMFD, M_WAITOK);
735 		pr_path = td->td_ucred->cr_prison->pr_path;
736 
737 		/* Construct a full pathname for jailed callers. */
738 		pr_pathlen = strcmp(pr_path, "/") == 0 ? 0
739 		    : strlcpy(path, pr_path, MAXPATHLEN);
740 		error = copyinstr(userpath, path + pr_pathlen,
741 		    MAXPATHLEN - pr_pathlen, NULL);
742 #ifdef KTRACE
743 		if (error == 0 && KTRPOINT(curthread, KTR_NAMEI))
744 			ktrnamei(path);
745 #endif
746 		/* Require paths to start with a '/' character. */
747 		if (error == 0 && path[pr_pathlen] != '/')
748 			error = EINVAL;
749 		if (error) {
750 			fdclose(td, fp, fd);
751 			fdrop(fp, td);
752 			free(path, M_SHMFD);
753 			return (error);
754 		}
755 
756 		fnv = fnv_32_str(path, FNV1_32_INIT);
757 		sx_xlock(&shm_dict_lock);
758 		shmfd = shm_lookup(path, fnv);
759 		if (shmfd == NULL) {
760 			/* Object does not yet exist, create it if requested. */
761 			if (flags & O_CREAT) {
762 #ifdef MAC
763 				error = mac_posixshm_check_create(td->td_ucred,
764 				    path);
765 				if (error == 0) {
766 #endif
767 					shmfd = shm_alloc(td->td_ucred, cmode);
768 					shm_insert(path, fnv, shmfd);
769 #ifdef MAC
770 				}
771 #endif
772 			} else {
773 				free(path, M_SHMFD);
774 				error = ENOENT;
775 			}
776 		} else {
777 			/*
778 			 * Object already exists, obtain a new
779 			 * reference if requested and permitted.
780 			 */
781 			free(path, M_SHMFD);
782 			if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
783 				error = EEXIST;
784 			else {
785 #ifdef MAC
786 				error = mac_posixshm_check_open(td->td_ucred,
787 				    shmfd, FFLAGS(flags & O_ACCMODE));
788 				if (error == 0)
789 #endif
790 				error = shm_access(shmfd, td->td_ucred,
791 				    FFLAGS(flags & O_ACCMODE));
792 			}
793 
794 			/*
795 			 * Truncate the file back to zero length if
796 			 * O_TRUNC was specified and the object was
797 			 * opened with read/write.
798 			 */
799 			if (error == 0 &&
800 			    (flags & (O_ACCMODE | O_TRUNC)) ==
801 			    (O_RDWR | O_TRUNC)) {
802 #ifdef MAC
803 				error = mac_posixshm_check_truncate(
804 					td->td_ucred, fp->f_cred, shmfd);
805 				if (error == 0)
806 #endif
807 					shm_dotruncate(shmfd, 0);
808 			}
809 			if (error == 0)
810 				shm_hold(shmfd);
811 		}
812 		sx_xunlock(&shm_dict_lock);
813 
814 		if (error) {
815 			fdclose(td, fp, fd);
816 			fdrop(fp, td);
817 			return (error);
818 		}
819 	}
820 
821 	finit(fp, FFLAGS(flags & O_ACCMODE), DTYPE_SHM, shmfd, &shm_ops);
822 
823 	td->td_retval[0] = fd;
824 	fdrop(fp, td);
825 
826 	return (0);
827 }
828 
829 /* System calls. */
830 int
831 sys_shm_open(struct thread *td, struct shm_open_args *uap)
832 {
833 
834 	return (kern_shm_open(td, uap->path, uap->flags, uap->mode, NULL));
835 }
836 
837 int
838 sys_shm_unlink(struct thread *td, struct shm_unlink_args *uap)
839 {
840 	char *path;
841 	const char *pr_path;
842 	size_t pr_pathlen;
843 	Fnv32_t fnv;
844 	int error;
845 
846 	path = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
847 	pr_path = td->td_ucred->cr_prison->pr_path;
848 	pr_pathlen = strcmp(pr_path, "/") == 0 ? 0
849 	    : strlcpy(path, pr_path, MAXPATHLEN);
850 	error = copyinstr(uap->path, path + pr_pathlen, MAXPATHLEN - pr_pathlen,
851 	    NULL);
852 	if (error) {
853 		free(path, M_TEMP);
854 		return (error);
855 	}
856 #ifdef KTRACE
857 	if (KTRPOINT(curthread, KTR_NAMEI))
858 		ktrnamei(path);
859 #endif
860 	fnv = fnv_32_str(path, FNV1_32_INIT);
861 	sx_xlock(&shm_dict_lock);
862 	error = shm_remove(path, fnv, td->td_ucred);
863 	sx_xunlock(&shm_dict_lock);
864 	free(path, M_TEMP);
865 
866 	return (error);
867 }
868 
869 int
870 shm_mmap(struct file *fp, vm_map_t map, vm_offset_t *addr, vm_size_t objsize,
871     vm_prot_t prot, vm_prot_t cap_maxprot, int flags,
872     vm_ooffset_t foff, struct thread *td)
873 {
874 	struct shmfd *shmfd;
875 	vm_prot_t maxprot;
876 	int error;
877 
878 	shmfd = fp->f_data;
879 	maxprot = VM_PROT_NONE;
880 
881 	/* FREAD should always be set. */
882 	if ((fp->f_flag & FREAD) != 0)
883 		maxprot |= VM_PROT_EXECUTE | VM_PROT_READ;
884 	if ((fp->f_flag & FWRITE) != 0)
885 		maxprot |= VM_PROT_WRITE;
886 
887 	/* Don't permit shared writable mappings on read-only descriptors. */
888 	if ((flags & MAP_SHARED) != 0 &&
889 	    (maxprot & VM_PROT_WRITE) == 0 &&
890 	    (prot & VM_PROT_WRITE) != 0)
891 		return (EACCES);
892 	maxprot &= cap_maxprot;
893 
894 	/* See comment in vn_mmap(). */
895 	if (
896 #ifdef _LP64
897 	    objsize > OFF_MAX ||
898 #endif
899 	    foff < 0 || foff > OFF_MAX - objsize)
900 		return (EINVAL);
901 
902 #ifdef MAC
903 	error = mac_posixshm_check_mmap(td->td_ucred, shmfd, prot, flags);
904 	if (error != 0)
905 		return (error);
906 #endif
907 
908 	mtx_lock(&shm_timestamp_lock);
909 	vfs_timestamp(&shmfd->shm_atime);
910 	mtx_unlock(&shm_timestamp_lock);
911 	vm_object_reference(shmfd->shm_object);
912 
913 	error = vm_mmap_object(map, addr, objsize, prot, maxprot, flags,
914 	    shmfd->shm_object, foff, FALSE, td);
915 	if (error != 0)
916 		vm_object_deallocate(shmfd->shm_object);
917 	return (0);
918 }
919 
920 static int
921 shm_chmod(struct file *fp, mode_t mode, struct ucred *active_cred,
922     struct thread *td)
923 {
924 	struct shmfd *shmfd;
925 	int error;
926 
927 	error = 0;
928 	shmfd = fp->f_data;
929 	mtx_lock(&shm_timestamp_lock);
930 	/*
931 	 * SUSv4 says that x bits of permission need not be affected.
932 	 * Be consistent with our shm_open there.
933 	 */
934 #ifdef MAC
935 	error = mac_posixshm_check_setmode(active_cred, shmfd, mode);
936 	if (error != 0)
937 		goto out;
938 #endif
939 	error = vaccess(VREG, shmfd->shm_mode, shmfd->shm_uid,
940 	    shmfd->shm_gid, VADMIN, active_cred, NULL);
941 	if (error != 0)
942 		goto out;
943 	shmfd->shm_mode = mode & ACCESSPERMS;
944 out:
945 	mtx_unlock(&shm_timestamp_lock);
946 	return (error);
947 }
948 
949 static int
950 shm_chown(struct file *fp, uid_t uid, gid_t gid, struct ucred *active_cred,
951     struct thread *td)
952 {
953 	struct shmfd *shmfd;
954 	int error;
955 
956 	error = 0;
957 	shmfd = fp->f_data;
958 	mtx_lock(&shm_timestamp_lock);
959 #ifdef MAC
960 	error = mac_posixshm_check_setowner(active_cred, shmfd, uid, gid);
961 	if (error != 0)
962 		goto out;
963 #endif
964 	if (uid == (uid_t)-1)
965 		uid = shmfd->shm_uid;
966 	if (gid == (gid_t)-1)
967                  gid = shmfd->shm_gid;
968 	if (((uid != shmfd->shm_uid && uid != active_cred->cr_uid) ||
969 	    (gid != shmfd->shm_gid && !groupmember(gid, active_cred))) &&
970 	    (error = priv_check_cred(active_cred, PRIV_VFS_CHOWN, 0)))
971 		goto out;
972 	shmfd->shm_uid = uid;
973 	shmfd->shm_gid = gid;
974 out:
975 	mtx_unlock(&shm_timestamp_lock);
976 	return (error);
977 }
978 
979 /*
980  * Helper routines to allow the backing object of a shared memory file
981  * descriptor to be mapped in the kernel.
982  */
983 int
984 shm_map(struct file *fp, size_t size, off_t offset, void **memp)
985 {
986 	struct shmfd *shmfd;
987 	vm_offset_t kva, ofs;
988 	vm_object_t obj;
989 	int rv;
990 
991 	if (fp->f_type != DTYPE_SHM)
992 		return (EINVAL);
993 	shmfd = fp->f_data;
994 	obj = shmfd->shm_object;
995 	VM_OBJECT_WLOCK(obj);
996 	/*
997 	 * XXXRW: This validation is probably insufficient, and subject to
998 	 * sign errors.  It should be fixed.
999 	 */
1000 	if (offset >= shmfd->shm_size ||
1001 	    offset + size > round_page(shmfd->shm_size)) {
1002 		VM_OBJECT_WUNLOCK(obj);
1003 		return (EINVAL);
1004 	}
1005 
1006 	shmfd->shm_kmappings++;
1007 	vm_object_reference_locked(obj);
1008 	VM_OBJECT_WUNLOCK(obj);
1009 
1010 	/* Map the object into the kernel_map and wire it. */
1011 	kva = vm_map_min(kernel_map);
1012 	ofs = offset & PAGE_MASK;
1013 	offset = trunc_page(offset);
1014 	size = round_page(size + ofs);
1015 	rv = vm_map_find(kernel_map, obj, offset, &kva, size, 0,
1016 	    VMFS_OPTIMAL_SPACE, VM_PROT_READ | VM_PROT_WRITE,
1017 	    VM_PROT_READ | VM_PROT_WRITE, 0);
1018 	if (rv == KERN_SUCCESS) {
1019 		rv = vm_map_wire(kernel_map, kva, kva + size,
1020 		    VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
1021 		if (rv == KERN_SUCCESS) {
1022 			*memp = (void *)(kva + ofs);
1023 			return (0);
1024 		}
1025 		vm_map_remove(kernel_map, kva, kva + size);
1026 	} else
1027 		vm_object_deallocate(obj);
1028 
1029 	/* On failure, drop our mapping reference. */
1030 	VM_OBJECT_WLOCK(obj);
1031 	shmfd->shm_kmappings--;
1032 	VM_OBJECT_WUNLOCK(obj);
1033 
1034 	return (vm_mmap_to_errno(rv));
1035 }
1036 
1037 /*
1038  * We require the caller to unmap the entire entry.  This allows us to
1039  * safely decrement shm_kmappings when a mapping is removed.
1040  */
1041 int
1042 shm_unmap(struct file *fp, void *mem, size_t size)
1043 {
1044 	struct shmfd *shmfd;
1045 	vm_map_entry_t entry;
1046 	vm_offset_t kva, ofs;
1047 	vm_object_t obj;
1048 	vm_pindex_t pindex;
1049 	vm_prot_t prot;
1050 	boolean_t wired;
1051 	vm_map_t map;
1052 	int rv;
1053 
1054 	if (fp->f_type != DTYPE_SHM)
1055 		return (EINVAL);
1056 	shmfd = fp->f_data;
1057 	kva = (vm_offset_t)mem;
1058 	ofs = kva & PAGE_MASK;
1059 	kva = trunc_page(kva);
1060 	size = round_page(size + ofs);
1061 	map = kernel_map;
1062 	rv = vm_map_lookup(&map, kva, VM_PROT_READ | VM_PROT_WRITE, &entry,
1063 	    &obj, &pindex, &prot, &wired);
1064 	if (rv != KERN_SUCCESS)
1065 		return (EINVAL);
1066 	if (entry->start != kva || entry->end != kva + size) {
1067 		vm_map_lookup_done(map, entry);
1068 		return (EINVAL);
1069 	}
1070 	vm_map_lookup_done(map, entry);
1071 	if (obj != shmfd->shm_object)
1072 		return (EINVAL);
1073 	vm_map_remove(map, kva, kva + size);
1074 	VM_OBJECT_WLOCK(obj);
1075 	KASSERT(shmfd->shm_kmappings > 0, ("shm_unmap: object not mapped"));
1076 	shmfd->shm_kmappings--;
1077 	VM_OBJECT_WUNLOCK(obj);
1078 	return (0);
1079 }
1080 
1081 static int
1082 shm_fill_kinfo(struct file *fp, struct kinfo_file *kif, struct filedesc *fdp)
1083 {
1084 	const char *path, *pr_path;
1085 	struct shmfd *shmfd;
1086 	size_t pr_pathlen;
1087 
1088 	kif->kf_type = KF_TYPE_SHM;
1089 	shmfd = fp->f_data;
1090 
1091 	mtx_lock(&shm_timestamp_lock);
1092 	kif->kf_un.kf_file.kf_file_mode = S_IFREG | shmfd->shm_mode;	/* XXX */
1093 	mtx_unlock(&shm_timestamp_lock);
1094 	kif->kf_un.kf_file.kf_file_size = shmfd->shm_size;
1095 	if (shmfd->shm_path != NULL) {
1096 		sx_slock(&shm_dict_lock);
1097 		if (shmfd->shm_path != NULL) {
1098 			path = shmfd->shm_path;
1099 			pr_path = curthread->td_ucred->cr_prison->pr_path;
1100 			if (strcmp(pr_path, "/") != 0) {
1101 				/* Return the jail-rooted pathname. */
1102 				pr_pathlen = strlen(pr_path);
1103 				if (strncmp(path, pr_path, pr_pathlen) == 0 &&
1104 				    path[pr_pathlen] == '/')
1105 					path += pr_pathlen;
1106 			}
1107 			strlcpy(kif->kf_path, path, sizeof(kif->kf_path));
1108 		}
1109 		sx_sunlock(&shm_dict_lock);
1110 	}
1111 	return (0);
1112 }
1113