xref: /freebsd/sys/kern/uipc_shm.c (revision 5b381db8cc67dd85695c49a85893ac7a5a612f42)
1 /*-
2  * Copyright (c) 2006, 2011 Robert N. M. Watson
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 /*
28  * Support for shared swap-backed anonymous memory objects via
29  * shm_open(2) and shm_unlink(2).  While most of the implementation is
30  * here, vm_mmap.c contains mapping logic changes.
31  *
32  * TODO:
33  *
34  * (1) Need to export data to a userland tool via a sysctl.  Should ipcs(1)
35  *     and ipcrm(1) be expanded or should new tools to manage both POSIX
36  *     kernel semaphores and POSIX shared memory be written?
37  *
38  * (2) Add support for this file type to fstat(1).
39  *
40  * (3) Resource limits?  Does this need its own resource limits or are the
41  *     existing limits in mmap(2) sufficient?
42  */
43 
44 #include <sys/cdefs.h>
45 __FBSDID("$FreeBSD$");
46 
47 #include "opt_capsicum.h"
48 #include "opt_ktrace.h"
49 
50 #include <sys/param.h>
51 #include <sys/capsicum.h>
52 #include <sys/conf.h>
53 #include <sys/fcntl.h>
54 #include <sys/file.h>
55 #include <sys/filedesc.h>
56 #include <sys/fnv_hash.h>
57 #include <sys/kernel.h>
58 #include <sys/uio.h>
59 #include <sys/signal.h>
60 #include <sys/ktrace.h>
61 #include <sys/lock.h>
62 #include <sys/malloc.h>
63 #include <sys/mman.h>
64 #include <sys/mutex.h>
65 #include <sys/priv.h>
66 #include <sys/proc.h>
67 #include <sys/refcount.h>
68 #include <sys/resourcevar.h>
69 #include <sys/rwlock.h>
70 #include <sys/stat.h>
71 #include <sys/sysctl.h>
72 #include <sys/sysproto.h>
73 #include <sys/systm.h>
74 #include <sys/sx.h>
75 #include <sys/time.h>
76 #include <sys/vnode.h>
77 #include <sys/unistd.h>
78 #include <sys/user.h>
79 
80 #include <security/mac/mac_framework.h>
81 
82 #include <vm/vm.h>
83 #include <vm/vm_param.h>
84 #include <vm/pmap.h>
85 #include <vm/vm_extern.h>
86 #include <vm/vm_map.h>
87 #include <vm/vm_kern.h>
88 #include <vm/vm_object.h>
89 #include <vm/vm_page.h>
90 #include <vm/vm_pageout.h>
91 #include <vm/vm_pager.h>
92 #include <vm/swap_pager.h>
93 
94 struct shm_mapping {
95 	char		*sm_path;
96 	Fnv32_t		sm_fnv;
97 	struct shmfd	*sm_shmfd;
98 	LIST_ENTRY(shm_mapping) sm_link;
99 };
100 
101 static MALLOC_DEFINE(M_SHMFD, "shmfd", "shared memory file descriptor");
102 static LIST_HEAD(, shm_mapping) *shm_dictionary;
103 static struct sx shm_dict_lock;
104 static struct mtx shm_timestamp_lock;
105 static u_long shm_hash;
106 static struct unrhdr *shm_ino_unr;
107 static dev_t shm_dev_ino;
108 
109 #define	SHM_HASH(fnv)	(&shm_dictionary[(fnv) & shm_hash])
110 
111 static int	shm_access(struct shmfd *shmfd, struct ucred *ucred, int flags);
112 static struct shmfd *shm_alloc(struct ucred *ucred, mode_t mode);
113 static void	shm_init(void *arg);
114 static void	shm_drop(struct shmfd *shmfd);
115 static struct shmfd *shm_hold(struct shmfd *shmfd);
116 static void	shm_insert(char *path, Fnv32_t fnv, struct shmfd *shmfd);
117 static struct shmfd *shm_lookup(char *path, Fnv32_t fnv);
118 static int	shm_remove(char *path, Fnv32_t fnv, struct ucred *ucred);
119 static int	shm_dotruncate(struct shmfd *shmfd, off_t length);
120 
121 static fo_rdwr_t	shm_read;
122 static fo_rdwr_t	shm_write;
123 static fo_truncate_t	shm_truncate;
124 static fo_stat_t	shm_stat;
125 static fo_close_t	shm_close;
126 static fo_chmod_t	shm_chmod;
127 static fo_chown_t	shm_chown;
128 static fo_seek_t	shm_seek;
129 static fo_fill_kinfo_t	shm_fill_kinfo;
130 static fo_mmap_t	shm_mmap;
131 
132 /* File descriptor operations. */
133 static struct fileops shm_ops = {
134 	.fo_read = shm_read,
135 	.fo_write = shm_write,
136 	.fo_truncate = shm_truncate,
137 	.fo_ioctl = invfo_ioctl,
138 	.fo_poll = invfo_poll,
139 	.fo_kqfilter = invfo_kqfilter,
140 	.fo_stat = shm_stat,
141 	.fo_close = shm_close,
142 	.fo_chmod = shm_chmod,
143 	.fo_chown = shm_chown,
144 	.fo_sendfile = vn_sendfile,
145 	.fo_seek = shm_seek,
146 	.fo_fill_kinfo = shm_fill_kinfo,
147 	.fo_mmap = shm_mmap,
148 	.fo_flags = DFLAG_PASSABLE | DFLAG_SEEKABLE
149 };
150 
151 FEATURE(posix_shm, "POSIX shared memory");
152 
153 static int
154 uiomove_object_page(vm_object_t obj, size_t len, struct uio *uio)
155 {
156 	vm_page_t m;
157 	vm_pindex_t idx;
158 	size_t tlen;
159 	int error, offset, rv;
160 
161 	idx = OFF_TO_IDX(uio->uio_offset);
162 	offset = uio->uio_offset & PAGE_MASK;
163 	tlen = MIN(PAGE_SIZE - offset, len);
164 
165 	VM_OBJECT_WLOCK(obj);
166 
167 	/*
168 	 * Read I/O without either a corresponding resident page or swap
169 	 * page: use zero_region.  This is intended to avoid instantiating
170 	 * pages on read from a sparse region.
171 	 */
172 	if (uio->uio_rw == UIO_READ && vm_page_lookup(obj, idx) == NULL &&
173 	    !vm_pager_has_page(obj, idx, NULL, NULL)) {
174 		VM_OBJECT_WUNLOCK(obj);
175 		return (uiomove(__DECONST(void *, zero_region), tlen, uio));
176 	}
177 
178 	/*
179 	 * Parallel reads of the page content from disk are prevented
180 	 * by exclusive busy.
181 	 *
182 	 * Although the tmpfs vnode lock is held here, it is
183 	 * nonetheless safe to sleep waiting for a free page.  The
184 	 * pageout daemon does not need to acquire the tmpfs vnode
185 	 * lock to page out tobj's pages because tobj is a OBJT_SWAP
186 	 * type object.
187 	 */
188 	m = vm_page_grab(obj, idx, VM_ALLOC_NORMAL);
189 	if (m->valid != VM_PAGE_BITS_ALL) {
190 		if (vm_pager_has_page(obj, idx, NULL, NULL)) {
191 			rv = vm_pager_get_pages(obj, &m, 1, 0);
192 			m = vm_page_lookup(obj, idx);
193 			if (m == NULL) {
194 				printf(
195 		    "uiomove_object: vm_obj %p idx %jd null lookup rv %d\n",
196 				    obj, idx, rv);
197 				VM_OBJECT_WUNLOCK(obj);
198 				return (EIO);
199 			}
200 			if (rv != VM_PAGER_OK) {
201 				printf(
202 	    "uiomove_object: vm_obj %p idx %jd valid %x pager error %d\n",
203 				    obj, idx, m->valid, rv);
204 				vm_page_lock(m);
205 				vm_page_free(m);
206 				vm_page_unlock(m);
207 				VM_OBJECT_WUNLOCK(obj);
208 				return (EIO);
209 			}
210 		} else
211 			vm_page_zero_invalid(m, TRUE);
212 	}
213 	vm_page_xunbusy(m);
214 	vm_page_lock(m);
215 	vm_page_hold(m);
216 	if (m->queue == PQ_NONE) {
217 		vm_page_deactivate(m);
218 	} else {
219 		/* Requeue to maintain LRU ordering. */
220 		vm_page_requeue(m);
221 	}
222 	vm_page_unlock(m);
223 	VM_OBJECT_WUNLOCK(obj);
224 	error = uiomove_fromphys(&m, offset, tlen, uio);
225 	if (uio->uio_rw == UIO_WRITE && error == 0) {
226 		VM_OBJECT_WLOCK(obj);
227 		vm_page_dirty(m);
228 		vm_pager_page_unswapped(m);
229 		VM_OBJECT_WUNLOCK(obj);
230 	}
231 	vm_page_lock(m);
232 	vm_page_unhold(m);
233 	vm_page_unlock(m);
234 
235 	return (error);
236 }
237 
238 int
239 uiomove_object(vm_object_t obj, off_t obj_size, struct uio *uio)
240 {
241 	ssize_t resid;
242 	size_t len;
243 	int error;
244 
245 	error = 0;
246 	while ((resid = uio->uio_resid) > 0) {
247 		if (obj_size <= uio->uio_offset)
248 			break;
249 		len = MIN(obj_size - uio->uio_offset, resid);
250 		if (len == 0)
251 			break;
252 		error = uiomove_object_page(obj, len, uio);
253 		if (error != 0 || resid == uio->uio_resid)
254 			break;
255 	}
256 	return (error);
257 }
258 
259 static int
260 shm_seek(struct file *fp, off_t offset, int whence, struct thread *td)
261 {
262 	struct shmfd *shmfd;
263 	off_t foffset;
264 	int error;
265 
266 	shmfd = fp->f_data;
267 	foffset = foffset_lock(fp, 0);
268 	error = 0;
269 	switch (whence) {
270 	case L_INCR:
271 		if (foffset < 0 ||
272 		    (offset > 0 && foffset > OFF_MAX - offset)) {
273 			error = EOVERFLOW;
274 			break;
275 		}
276 		offset += foffset;
277 		break;
278 	case L_XTND:
279 		if (offset > 0 && shmfd->shm_size > OFF_MAX - offset) {
280 			error = EOVERFLOW;
281 			break;
282 		}
283 		offset += shmfd->shm_size;
284 		break;
285 	case L_SET:
286 		break;
287 	default:
288 		error = EINVAL;
289 	}
290 	if (error == 0) {
291 		if (offset < 0 || offset > shmfd->shm_size)
292 			error = EINVAL;
293 		else
294 			td->td_uretoff.tdu_off = offset;
295 	}
296 	foffset_unlock(fp, offset, error != 0 ? FOF_NOUPDATE : 0);
297 	return (error);
298 }
299 
300 static int
301 shm_read(struct file *fp, struct uio *uio, struct ucred *active_cred,
302     int flags, struct thread *td)
303 {
304 	struct shmfd *shmfd;
305 	void *rl_cookie;
306 	int error;
307 
308 	shmfd = fp->f_data;
309 	foffset_lock_uio(fp, uio, flags);
310 	rl_cookie = rangelock_rlock(&shmfd->shm_rl, uio->uio_offset,
311 	    uio->uio_offset + uio->uio_resid, &shmfd->shm_mtx);
312 #ifdef MAC
313 	error = mac_posixshm_check_read(active_cred, fp->f_cred, shmfd);
314 	if (error)
315 		return (error);
316 #endif
317 	error = uiomove_object(shmfd->shm_object, shmfd->shm_size, uio);
318 	rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
319 	foffset_unlock_uio(fp, uio, flags);
320 	return (error);
321 }
322 
323 static int
324 shm_write(struct file *fp, struct uio *uio, struct ucred *active_cred,
325     int flags, struct thread *td)
326 {
327 	struct shmfd *shmfd;
328 	void *rl_cookie;
329 	int error;
330 
331 	shmfd = fp->f_data;
332 #ifdef MAC
333 	error = mac_posixshm_check_write(active_cred, fp->f_cred, shmfd);
334 	if (error)
335 		return (error);
336 #endif
337 	foffset_lock_uio(fp, uio, flags);
338 	if ((flags & FOF_OFFSET) == 0) {
339 		rl_cookie = rangelock_wlock(&shmfd->shm_rl, 0, OFF_MAX,
340 		    &shmfd->shm_mtx);
341 	} else {
342 		rl_cookie = rangelock_wlock(&shmfd->shm_rl, uio->uio_offset,
343 		    uio->uio_offset + uio->uio_resid, &shmfd->shm_mtx);
344 	}
345 
346 	error = uiomove_object(shmfd->shm_object, shmfd->shm_size, uio);
347 	rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
348 	foffset_unlock_uio(fp, uio, flags);
349 	return (error);
350 }
351 
352 static int
353 shm_truncate(struct file *fp, off_t length, struct ucred *active_cred,
354     struct thread *td)
355 {
356 	struct shmfd *shmfd;
357 #ifdef MAC
358 	int error;
359 #endif
360 
361 	shmfd = fp->f_data;
362 #ifdef MAC
363 	error = mac_posixshm_check_truncate(active_cred, fp->f_cred, shmfd);
364 	if (error)
365 		return (error);
366 #endif
367 	return (shm_dotruncate(shmfd, length));
368 }
369 
370 static int
371 shm_stat(struct file *fp, struct stat *sb, struct ucred *active_cred,
372     struct thread *td)
373 {
374 	struct shmfd *shmfd;
375 #ifdef MAC
376 	int error;
377 #endif
378 
379 	shmfd = fp->f_data;
380 
381 #ifdef MAC
382 	error = mac_posixshm_check_stat(active_cred, fp->f_cred, shmfd);
383 	if (error)
384 		return (error);
385 #endif
386 
387 	/*
388 	 * Attempt to return sanish values for fstat() on a memory file
389 	 * descriptor.
390 	 */
391 	bzero(sb, sizeof(*sb));
392 	sb->st_blksize = PAGE_SIZE;
393 	sb->st_size = shmfd->shm_size;
394 	sb->st_blocks = (sb->st_size + sb->st_blksize - 1) / sb->st_blksize;
395 	mtx_lock(&shm_timestamp_lock);
396 	sb->st_atim = shmfd->shm_atime;
397 	sb->st_ctim = shmfd->shm_ctime;
398 	sb->st_mtim = shmfd->shm_mtime;
399 	sb->st_birthtim = shmfd->shm_birthtime;
400 	sb->st_mode = S_IFREG | shmfd->shm_mode;		/* XXX */
401 	sb->st_uid = shmfd->shm_uid;
402 	sb->st_gid = shmfd->shm_gid;
403 	mtx_unlock(&shm_timestamp_lock);
404 	sb->st_dev = shm_dev_ino;
405 	sb->st_ino = shmfd->shm_ino;
406 
407 	return (0);
408 }
409 
410 static int
411 shm_close(struct file *fp, struct thread *td)
412 {
413 	struct shmfd *shmfd;
414 
415 	shmfd = fp->f_data;
416 	fp->f_data = NULL;
417 	shm_drop(shmfd);
418 
419 	return (0);
420 }
421 
422 static int
423 shm_dotruncate(struct shmfd *shmfd, off_t length)
424 {
425 	vm_object_t object;
426 	vm_page_t m, ma[1];
427 	vm_pindex_t idx, nobjsize;
428 	vm_ooffset_t delta;
429 	int base, rv;
430 
431 	object = shmfd->shm_object;
432 	VM_OBJECT_WLOCK(object);
433 	if (length == shmfd->shm_size) {
434 		VM_OBJECT_WUNLOCK(object);
435 		return (0);
436 	}
437 	nobjsize = OFF_TO_IDX(length + PAGE_MASK);
438 
439 	/* Are we shrinking?  If so, trim the end. */
440 	if (length < shmfd->shm_size) {
441 		/*
442 		 * Disallow any requests to shrink the size if this
443 		 * object is mapped into the kernel.
444 		 */
445 		if (shmfd->shm_kmappings > 0) {
446 			VM_OBJECT_WUNLOCK(object);
447 			return (EBUSY);
448 		}
449 
450 		/*
451 		 * Zero the truncated part of the last page.
452 		 */
453 		base = length & PAGE_MASK;
454 		if (base != 0) {
455 			idx = OFF_TO_IDX(length);
456 retry:
457 			m = vm_page_lookup(object, idx);
458 			if (m != NULL) {
459 				if (vm_page_sleep_if_busy(m, "shmtrc"))
460 					goto retry;
461 			} else if (vm_pager_has_page(object, idx, NULL, NULL)) {
462 				m = vm_page_alloc(object, idx, VM_ALLOC_NORMAL);
463 				if (m == NULL) {
464 					VM_OBJECT_WUNLOCK(object);
465 					VM_WAIT;
466 					VM_OBJECT_WLOCK(object);
467 					goto retry;
468 				} else if (m->valid != VM_PAGE_BITS_ALL) {
469 					ma[0] = m;
470 					rv = vm_pager_get_pages(object, ma, 1,
471 					    0);
472 					m = vm_page_lookup(object, idx);
473 				} else
474 					/* A cached page was reactivated. */
475 					rv = VM_PAGER_OK;
476 				vm_page_lock(m);
477 				if (rv == VM_PAGER_OK) {
478 					vm_page_deactivate(m);
479 					vm_page_unlock(m);
480 					vm_page_xunbusy(m);
481 				} else {
482 					vm_page_free(m);
483 					vm_page_unlock(m);
484 					VM_OBJECT_WUNLOCK(object);
485 					return (EIO);
486 				}
487 			}
488 			if (m != NULL) {
489 				pmap_zero_page_area(m, base, PAGE_SIZE - base);
490 				KASSERT(m->valid == VM_PAGE_BITS_ALL,
491 				    ("shm_dotruncate: page %p is invalid", m));
492 				vm_page_dirty(m);
493 				vm_pager_page_unswapped(m);
494 			}
495 		}
496 		delta = ptoa(object->size - nobjsize);
497 
498 		/* Toss in memory pages. */
499 		if (nobjsize < object->size)
500 			vm_object_page_remove(object, nobjsize, object->size,
501 			    0);
502 
503 		/* Toss pages from swap. */
504 		if (object->type == OBJT_SWAP)
505 			swap_pager_freespace(object, nobjsize, delta);
506 
507 		/* Free the swap accounted for shm */
508 		swap_release_by_cred(delta, object->cred);
509 		object->charge -= delta;
510 	} else {
511 		/* Attempt to reserve the swap */
512 		delta = ptoa(nobjsize - object->size);
513 		if (!swap_reserve_by_cred(delta, object->cred)) {
514 			VM_OBJECT_WUNLOCK(object);
515 			return (ENOMEM);
516 		}
517 		object->charge += delta;
518 	}
519 	shmfd->shm_size = length;
520 	mtx_lock(&shm_timestamp_lock);
521 	vfs_timestamp(&shmfd->shm_ctime);
522 	shmfd->shm_mtime = shmfd->shm_ctime;
523 	mtx_unlock(&shm_timestamp_lock);
524 	object->size = nobjsize;
525 	VM_OBJECT_WUNLOCK(object);
526 	return (0);
527 }
528 
529 /*
530  * shmfd object management including creation and reference counting
531  * routines.
532  */
533 static struct shmfd *
534 shm_alloc(struct ucred *ucred, mode_t mode)
535 {
536 	struct shmfd *shmfd;
537 	int ino;
538 
539 	shmfd = malloc(sizeof(*shmfd), M_SHMFD, M_WAITOK | M_ZERO);
540 	shmfd->shm_size = 0;
541 	shmfd->shm_uid = ucred->cr_uid;
542 	shmfd->shm_gid = ucred->cr_gid;
543 	shmfd->shm_mode = mode;
544 	shmfd->shm_object = vm_pager_allocate(OBJT_DEFAULT, NULL,
545 	    shmfd->shm_size, VM_PROT_DEFAULT, 0, ucred);
546 	KASSERT(shmfd->shm_object != NULL, ("shm_create: vm_pager_allocate"));
547 	shmfd->shm_object->pg_color = 0;
548 	VM_OBJECT_WLOCK(shmfd->shm_object);
549 	vm_object_clear_flag(shmfd->shm_object, OBJ_ONEMAPPING);
550 	vm_object_set_flag(shmfd->shm_object, OBJ_COLORED | OBJ_NOSPLIT);
551 	VM_OBJECT_WUNLOCK(shmfd->shm_object);
552 	vfs_timestamp(&shmfd->shm_birthtime);
553 	shmfd->shm_atime = shmfd->shm_mtime = shmfd->shm_ctime =
554 	    shmfd->shm_birthtime;
555 	ino = alloc_unr(shm_ino_unr);
556 	if (ino == -1)
557 		shmfd->shm_ino = 0;
558 	else
559 		shmfd->shm_ino = ino;
560 	refcount_init(&shmfd->shm_refs, 1);
561 	mtx_init(&shmfd->shm_mtx, "shmrl", NULL, MTX_DEF);
562 	rangelock_init(&shmfd->shm_rl);
563 #ifdef MAC
564 	mac_posixshm_init(shmfd);
565 	mac_posixshm_create(ucred, shmfd);
566 #endif
567 
568 	return (shmfd);
569 }
570 
571 static struct shmfd *
572 shm_hold(struct shmfd *shmfd)
573 {
574 
575 	refcount_acquire(&shmfd->shm_refs);
576 	return (shmfd);
577 }
578 
579 static void
580 shm_drop(struct shmfd *shmfd)
581 {
582 
583 	if (refcount_release(&shmfd->shm_refs)) {
584 #ifdef MAC
585 		mac_posixshm_destroy(shmfd);
586 #endif
587 		rangelock_destroy(&shmfd->shm_rl);
588 		mtx_destroy(&shmfd->shm_mtx);
589 		vm_object_deallocate(shmfd->shm_object);
590 		if (shmfd->shm_ino != 0)
591 			free_unr(shm_ino_unr, shmfd->shm_ino);
592 		free(shmfd, M_SHMFD);
593 	}
594 }
595 
596 /*
597  * Determine if the credentials have sufficient permissions for a
598  * specified combination of FREAD and FWRITE.
599  */
600 static int
601 shm_access(struct shmfd *shmfd, struct ucred *ucred, int flags)
602 {
603 	accmode_t accmode;
604 	int error;
605 
606 	accmode = 0;
607 	if (flags & FREAD)
608 		accmode |= VREAD;
609 	if (flags & FWRITE)
610 		accmode |= VWRITE;
611 	mtx_lock(&shm_timestamp_lock);
612 	error = vaccess(VREG, shmfd->shm_mode, shmfd->shm_uid, shmfd->shm_gid,
613 	    accmode, ucred, NULL);
614 	mtx_unlock(&shm_timestamp_lock);
615 	return (error);
616 }
617 
618 /*
619  * Dictionary management.  We maintain an in-kernel dictionary to map
620  * paths to shmfd objects.  We use the FNV hash on the path to store
621  * the mappings in a hash table.
622  */
623 static void
624 shm_init(void *arg)
625 {
626 
627 	mtx_init(&shm_timestamp_lock, "shm timestamps", NULL, MTX_DEF);
628 	sx_init(&shm_dict_lock, "shm dictionary");
629 	shm_dictionary = hashinit(1024, M_SHMFD, &shm_hash);
630 	shm_ino_unr = new_unrhdr(1, INT32_MAX, NULL);
631 	KASSERT(shm_ino_unr != NULL, ("shm fake inodes not initialized"));
632 	shm_dev_ino = devfs_alloc_cdp_inode();
633 	KASSERT(shm_dev_ino > 0, ("shm dev inode not initialized"));
634 }
635 SYSINIT(shm_init, SI_SUB_SYSV_SHM, SI_ORDER_ANY, shm_init, NULL);
636 
637 static struct shmfd *
638 shm_lookup(char *path, Fnv32_t fnv)
639 {
640 	struct shm_mapping *map;
641 
642 	LIST_FOREACH(map, SHM_HASH(fnv), sm_link) {
643 		if (map->sm_fnv != fnv)
644 			continue;
645 		if (strcmp(map->sm_path, path) == 0)
646 			return (map->sm_shmfd);
647 	}
648 
649 	return (NULL);
650 }
651 
652 static void
653 shm_insert(char *path, Fnv32_t fnv, struct shmfd *shmfd)
654 {
655 	struct shm_mapping *map;
656 
657 	map = malloc(sizeof(struct shm_mapping), M_SHMFD, M_WAITOK);
658 	map->sm_path = path;
659 	map->sm_fnv = fnv;
660 	map->sm_shmfd = shm_hold(shmfd);
661 	shmfd->shm_path = path;
662 	LIST_INSERT_HEAD(SHM_HASH(fnv), map, sm_link);
663 }
664 
665 static int
666 shm_remove(char *path, Fnv32_t fnv, struct ucred *ucred)
667 {
668 	struct shm_mapping *map;
669 	int error;
670 
671 	LIST_FOREACH(map, SHM_HASH(fnv), sm_link) {
672 		if (map->sm_fnv != fnv)
673 			continue;
674 		if (strcmp(map->sm_path, path) == 0) {
675 #ifdef MAC
676 			error = mac_posixshm_check_unlink(ucred, map->sm_shmfd);
677 			if (error)
678 				return (error);
679 #endif
680 			error = shm_access(map->sm_shmfd, ucred,
681 			    FREAD | FWRITE);
682 			if (error)
683 				return (error);
684 			map->sm_shmfd->shm_path = NULL;
685 			LIST_REMOVE(map, sm_link);
686 			shm_drop(map->sm_shmfd);
687 			free(map->sm_path, M_SHMFD);
688 			free(map, M_SHMFD);
689 			return (0);
690 		}
691 	}
692 
693 	return (ENOENT);
694 }
695 
696 /* System calls. */
697 int
698 sys_shm_open(struct thread *td, struct shm_open_args *uap)
699 {
700 	struct filedesc *fdp;
701 	struct shmfd *shmfd;
702 	struct file *fp;
703 	char *path;
704 	Fnv32_t fnv;
705 	mode_t cmode;
706 	int fd, error;
707 
708 #ifdef CAPABILITY_MODE
709 	/*
710 	 * shm_open(2) is only allowed for anonymous objects.
711 	 */
712 	if (IN_CAPABILITY_MODE(td) && (uap->path != SHM_ANON))
713 		return (ECAPMODE);
714 #endif
715 
716 	if ((uap->flags & O_ACCMODE) != O_RDONLY &&
717 	    (uap->flags & O_ACCMODE) != O_RDWR)
718 		return (EINVAL);
719 
720 	if ((uap->flags & ~(O_ACCMODE | O_CREAT | O_EXCL | O_TRUNC | O_CLOEXEC)) != 0)
721 		return (EINVAL);
722 
723 	fdp = td->td_proc->p_fd;
724 	cmode = (uap->mode & ~fdp->fd_cmask) & ACCESSPERMS;
725 
726 	error = falloc(td, &fp, &fd, O_CLOEXEC);
727 	if (error)
728 		return (error);
729 
730 	/* A SHM_ANON path pointer creates an anonymous object. */
731 	if (uap->path == SHM_ANON) {
732 		/* A read-only anonymous object is pointless. */
733 		if ((uap->flags & O_ACCMODE) == O_RDONLY) {
734 			fdclose(td, fp, fd);
735 			fdrop(fp, td);
736 			return (EINVAL);
737 		}
738 		shmfd = shm_alloc(td->td_ucred, cmode);
739 	} else {
740 		path = malloc(MAXPATHLEN, M_SHMFD, M_WAITOK);
741 		error = copyinstr(uap->path, path, MAXPATHLEN, NULL);
742 #ifdef KTRACE
743 		if (error == 0 && KTRPOINT(curthread, KTR_NAMEI))
744 			ktrnamei(path);
745 #endif
746 		/* Require paths to start with a '/' character. */
747 		if (error == 0 && path[0] != '/')
748 			error = EINVAL;
749 		if (error) {
750 			fdclose(td, fp, fd);
751 			fdrop(fp, td);
752 			free(path, M_SHMFD);
753 			return (error);
754 		}
755 
756 		fnv = fnv_32_str(path, FNV1_32_INIT);
757 		sx_xlock(&shm_dict_lock);
758 		shmfd = shm_lookup(path, fnv);
759 		if (shmfd == NULL) {
760 			/* Object does not yet exist, create it if requested. */
761 			if (uap->flags & O_CREAT) {
762 #ifdef MAC
763 				error = mac_posixshm_check_create(td->td_ucred,
764 				    path);
765 				if (error == 0) {
766 #endif
767 					shmfd = shm_alloc(td->td_ucred, cmode);
768 					shm_insert(path, fnv, shmfd);
769 #ifdef MAC
770 				}
771 #endif
772 			} else {
773 				free(path, M_SHMFD);
774 				error = ENOENT;
775 			}
776 		} else {
777 			/*
778 			 * Object already exists, obtain a new
779 			 * reference if requested and permitted.
780 			 */
781 			free(path, M_SHMFD);
782 			if ((uap->flags & (O_CREAT | O_EXCL)) ==
783 			    (O_CREAT | O_EXCL))
784 				error = EEXIST;
785 			else {
786 #ifdef MAC
787 				error = mac_posixshm_check_open(td->td_ucred,
788 				    shmfd, FFLAGS(uap->flags & O_ACCMODE));
789 				if (error == 0)
790 #endif
791 				error = shm_access(shmfd, td->td_ucred,
792 				    FFLAGS(uap->flags & O_ACCMODE));
793 			}
794 
795 			/*
796 			 * Truncate the file back to zero length if
797 			 * O_TRUNC was specified and the object was
798 			 * opened with read/write.
799 			 */
800 			if (error == 0 &&
801 			    (uap->flags & (O_ACCMODE | O_TRUNC)) ==
802 			    (O_RDWR | O_TRUNC)) {
803 #ifdef MAC
804 				error = mac_posixshm_check_truncate(
805 					td->td_ucred, fp->f_cred, shmfd);
806 				if (error == 0)
807 #endif
808 					shm_dotruncate(shmfd, 0);
809 			}
810 			if (error == 0)
811 				shm_hold(shmfd);
812 		}
813 		sx_xunlock(&shm_dict_lock);
814 
815 		if (error) {
816 			fdclose(td, fp, fd);
817 			fdrop(fp, td);
818 			return (error);
819 		}
820 	}
821 
822 	finit(fp, FFLAGS(uap->flags & O_ACCMODE), DTYPE_SHM, shmfd, &shm_ops);
823 
824 	td->td_retval[0] = fd;
825 	fdrop(fp, td);
826 
827 	return (0);
828 }
829 
830 int
831 sys_shm_unlink(struct thread *td, struct shm_unlink_args *uap)
832 {
833 	char *path;
834 	Fnv32_t fnv;
835 	int error;
836 
837 	path = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
838 	error = copyinstr(uap->path, path, MAXPATHLEN, NULL);
839 	if (error) {
840 		free(path, M_TEMP);
841 		return (error);
842 	}
843 #ifdef KTRACE
844 	if (KTRPOINT(curthread, KTR_NAMEI))
845 		ktrnamei(path);
846 #endif
847 	fnv = fnv_32_str(path, FNV1_32_INIT);
848 	sx_xlock(&shm_dict_lock);
849 	error = shm_remove(path, fnv, td->td_ucred);
850 	sx_xunlock(&shm_dict_lock);
851 	free(path, M_TEMP);
852 
853 	return (error);
854 }
855 
856 int
857 shm_mmap(struct file *fp, vm_map_t map, vm_offset_t *addr, vm_size_t objsize,
858     vm_prot_t prot, vm_prot_t cap_maxprot, int flags,
859     vm_ooffset_t foff, struct thread *td)
860 {
861 	struct shmfd *shmfd;
862 	vm_prot_t maxprot;
863 	int error;
864 
865 	shmfd = fp->f_data;
866 	maxprot = VM_PROT_NONE;
867 
868 	/* FREAD should always be set. */
869 	if ((fp->f_flag & FREAD) != 0)
870 		maxprot |= VM_PROT_EXECUTE | VM_PROT_READ;
871 	if ((fp->f_flag & FWRITE) != 0)
872 		maxprot |= VM_PROT_WRITE;
873 
874 	/* Don't permit shared writable mappings on read-only descriptors. */
875 	if ((flags & MAP_SHARED) != 0 &&
876 	    (maxprot & VM_PROT_WRITE) == 0 &&
877 	    (prot & VM_PROT_WRITE) != 0)
878 		return (EACCES);
879 	maxprot &= cap_maxprot;
880 
881 #ifdef MAC
882 	error = mac_posixshm_check_mmap(td->td_ucred, shmfd, prot, flags);
883 	if (error != 0)
884 		return (error);
885 #endif
886 
887 	/*
888 	 * XXXRW: This validation is probably insufficient, and subject to
889 	 * sign errors.  It should be fixed.
890 	 */
891 	if (foff >= shmfd->shm_size ||
892 	    foff + objsize > round_page(shmfd->shm_size))
893 		return (EINVAL);
894 
895 	mtx_lock(&shm_timestamp_lock);
896 	vfs_timestamp(&shmfd->shm_atime);
897 	mtx_unlock(&shm_timestamp_lock);
898 	vm_object_reference(shmfd->shm_object);
899 
900 	error = vm_mmap_object(map, addr, objsize, prot, maxprot, flags,
901 	    shmfd->shm_object, foff, FALSE, td);
902 	if (error != 0)
903 		vm_object_deallocate(shmfd->shm_object);
904 	return (0);
905 }
906 
907 static int
908 shm_chmod(struct file *fp, mode_t mode, struct ucred *active_cred,
909     struct thread *td)
910 {
911 	struct shmfd *shmfd;
912 	int error;
913 
914 	error = 0;
915 	shmfd = fp->f_data;
916 	mtx_lock(&shm_timestamp_lock);
917 	/*
918 	 * SUSv4 says that x bits of permission need not be affected.
919 	 * Be consistent with our shm_open there.
920 	 */
921 #ifdef MAC
922 	error = mac_posixshm_check_setmode(active_cred, shmfd, mode);
923 	if (error != 0)
924 		goto out;
925 #endif
926 	error = vaccess(VREG, shmfd->shm_mode, shmfd->shm_uid,
927 	    shmfd->shm_gid, VADMIN, active_cred, NULL);
928 	if (error != 0)
929 		goto out;
930 	shmfd->shm_mode = mode & ACCESSPERMS;
931 out:
932 	mtx_unlock(&shm_timestamp_lock);
933 	return (error);
934 }
935 
936 static int
937 shm_chown(struct file *fp, uid_t uid, gid_t gid, struct ucred *active_cred,
938     struct thread *td)
939 {
940 	struct shmfd *shmfd;
941 	int error;
942 
943 	error = 0;
944 	shmfd = fp->f_data;
945 	mtx_lock(&shm_timestamp_lock);
946 #ifdef MAC
947 	error = mac_posixshm_check_setowner(active_cred, shmfd, uid, gid);
948 	if (error != 0)
949 		goto out;
950 #endif
951 	if (uid == (uid_t)-1)
952 		uid = shmfd->shm_uid;
953 	if (gid == (gid_t)-1)
954                  gid = shmfd->shm_gid;
955 	if (((uid != shmfd->shm_uid && uid != active_cred->cr_uid) ||
956 	    (gid != shmfd->shm_gid && !groupmember(gid, active_cred))) &&
957 	    (error = priv_check_cred(active_cred, PRIV_VFS_CHOWN, 0)))
958 		goto out;
959 	shmfd->shm_uid = uid;
960 	shmfd->shm_gid = gid;
961 out:
962 	mtx_unlock(&shm_timestamp_lock);
963 	return (error);
964 }
965 
966 /*
967  * Helper routines to allow the backing object of a shared memory file
968  * descriptor to be mapped in the kernel.
969  */
970 int
971 shm_map(struct file *fp, size_t size, off_t offset, void **memp)
972 {
973 	struct shmfd *shmfd;
974 	vm_offset_t kva, ofs;
975 	vm_object_t obj;
976 	int rv;
977 
978 	if (fp->f_type != DTYPE_SHM)
979 		return (EINVAL);
980 	shmfd = fp->f_data;
981 	obj = shmfd->shm_object;
982 	VM_OBJECT_WLOCK(obj);
983 	/*
984 	 * XXXRW: This validation is probably insufficient, and subject to
985 	 * sign errors.  It should be fixed.
986 	 */
987 	if (offset >= shmfd->shm_size ||
988 	    offset + size > round_page(shmfd->shm_size)) {
989 		VM_OBJECT_WUNLOCK(obj);
990 		return (EINVAL);
991 	}
992 
993 	shmfd->shm_kmappings++;
994 	vm_object_reference_locked(obj);
995 	VM_OBJECT_WUNLOCK(obj);
996 
997 	/* Map the object into the kernel_map and wire it. */
998 	kva = vm_map_min(kernel_map);
999 	ofs = offset & PAGE_MASK;
1000 	offset = trunc_page(offset);
1001 	size = round_page(size + ofs);
1002 	rv = vm_map_find(kernel_map, obj, offset, &kva, size, 0,
1003 	    VMFS_OPTIMAL_SPACE, VM_PROT_READ | VM_PROT_WRITE,
1004 	    VM_PROT_READ | VM_PROT_WRITE, 0);
1005 	if (rv == KERN_SUCCESS) {
1006 		rv = vm_map_wire(kernel_map, kva, kva + size,
1007 		    VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
1008 		if (rv == KERN_SUCCESS) {
1009 			*memp = (void *)(kva + ofs);
1010 			return (0);
1011 		}
1012 		vm_map_remove(kernel_map, kva, kva + size);
1013 	} else
1014 		vm_object_deallocate(obj);
1015 
1016 	/* On failure, drop our mapping reference. */
1017 	VM_OBJECT_WLOCK(obj);
1018 	shmfd->shm_kmappings--;
1019 	VM_OBJECT_WUNLOCK(obj);
1020 
1021 	return (vm_mmap_to_errno(rv));
1022 }
1023 
1024 /*
1025  * We require the caller to unmap the entire entry.  This allows us to
1026  * safely decrement shm_kmappings when a mapping is removed.
1027  */
1028 int
1029 shm_unmap(struct file *fp, void *mem, size_t size)
1030 {
1031 	struct shmfd *shmfd;
1032 	vm_map_entry_t entry;
1033 	vm_offset_t kva, ofs;
1034 	vm_object_t obj;
1035 	vm_pindex_t pindex;
1036 	vm_prot_t prot;
1037 	boolean_t wired;
1038 	vm_map_t map;
1039 	int rv;
1040 
1041 	if (fp->f_type != DTYPE_SHM)
1042 		return (EINVAL);
1043 	shmfd = fp->f_data;
1044 	kva = (vm_offset_t)mem;
1045 	ofs = kva & PAGE_MASK;
1046 	kva = trunc_page(kva);
1047 	size = round_page(size + ofs);
1048 	map = kernel_map;
1049 	rv = vm_map_lookup(&map, kva, VM_PROT_READ | VM_PROT_WRITE, &entry,
1050 	    &obj, &pindex, &prot, &wired);
1051 	if (rv != KERN_SUCCESS)
1052 		return (EINVAL);
1053 	if (entry->start != kva || entry->end != kva + size) {
1054 		vm_map_lookup_done(map, entry);
1055 		return (EINVAL);
1056 	}
1057 	vm_map_lookup_done(map, entry);
1058 	if (obj != shmfd->shm_object)
1059 		return (EINVAL);
1060 	vm_map_remove(map, kva, kva + size);
1061 	VM_OBJECT_WLOCK(obj);
1062 	KASSERT(shmfd->shm_kmappings > 0, ("shm_unmap: object not mapped"));
1063 	shmfd->shm_kmappings--;
1064 	VM_OBJECT_WUNLOCK(obj);
1065 	return (0);
1066 }
1067 
1068 static int
1069 shm_fill_kinfo(struct file *fp, struct kinfo_file *kif, struct filedesc *fdp)
1070 {
1071 	struct shmfd *shmfd;
1072 
1073 	kif->kf_type = KF_TYPE_SHM;
1074 	shmfd = fp->f_data;
1075 
1076 	mtx_lock(&shm_timestamp_lock);
1077 	kif->kf_un.kf_file.kf_file_mode = S_IFREG | shmfd->shm_mode;	/* XXX */
1078 	mtx_unlock(&shm_timestamp_lock);
1079 	kif->kf_un.kf_file.kf_file_size = shmfd->shm_size;
1080 	if (shmfd->shm_path != NULL) {
1081 		sx_slock(&shm_dict_lock);
1082 		if (shmfd->shm_path != NULL)
1083 			strlcpy(kif->kf_path, shmfd->shm_path,
1084 			    sizeof(kif->kf_path));
1085 		sx_sunlock(&shm_dict_lock);
1086 	}
1087 	return (0);
1088 }
1089