xref: /freebsd/sys/kern/uipc_shm.c (revision fb680e16f44be1cbcf58edafe75b85b166f449aa)
18e38aeffSJohn Baldwin /*-
29b6dd12eSRobert Watson  * Copyright (c) 2006, 2011 Robert N. M. Watson
38e38aeffSJohn Baldwin  * All rights reserved.
48e38aeffSJohn Baldwin  *
58e38aeffSJohn Baldwin  * Redistribution and use in source and binary forms, with or without
68e38aeffSJohn Baldwin  * modification, are permitted provided that the following conditions
78e38aeffSJohn Baldwin  * are met:
88e38aeffSJohn Baldwin  * 1. Redistributions of source code must retain the above copyright
98e38aeffSJohn Baldwin  *    notice, this list of conditions and the following disclaimer.
108e38aeffSJohn Baldwin  * 2. Redistributions in binary form must reproduce the above copyright
118e38aeffSJohn Baldwin  *    notice, this list of conditions and the following disclaimer in the
128e38aeffSJohn Baldwin  *    documentation and/or other materials provided with the distribution.
138e38aeffSJohn Baldwin  *
148e38aeffSJohn Baldwin  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
158e38aeffSJohn Baldwin  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
168e38aeffSJohn Baldwin  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
178e38aeffSJohn Baldwin  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
188e38aeffSJohn Baldwin  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
198e38aeffSJohn Baldwin  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
208e38aeffSJohn Baldwin  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
218e38aeffSJohn Baldwin  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
228e38aeffSJohn Baldwin  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
238e38aeffSJohn Baldwin  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
248e38aeffSJohn Baldwin  * SUCH DAMAGE.
258e38aeffSJohn Baldwin  */
268e38aeffSJohn Baldwin 
278e38aeffSJohn Baldwin /*
288e38aeffSJohn Baldwin  * Support for shared swap-backed anonymous memory objects via
298e38aeffSJohn Baldwin  * shm_open(2) and shm_unlink(2).  While most of the implementation is
308e38aeffSJohn Baldwin  * here, vm_mmap.c contains mapping logic changes.
318e38aeffSJohn Baldwin  *
328e38aeffSJohn Baldwin  * TODO:
338e38aeffSJohn Baldwin  *
349b6dd12eSRobert Watson  * (1) Need to export data to a userland tool via a sysctl.  Should ipcs(1)
358e38aeffSJohn Baldwin  *     and ipcrm(1) be expanded or should new tools to manage both POSIX
368e38aeffSJohn Baldwin  *     kernel semaphores and POSIX shared memory be written?
378e38aeffSJohn Baldwin  *
389b6dd12eSRobert Watson  * (2) Add support for this file type to fstat(1).
398e38aeffSJohn Baldwin  *
409b6dd12eSRobert Watson  * (3) Resource limits?  Does this need its own resource limits or are the
418e38aeffSJohn Baldwin  *     existing limits in mmap(2) sufficient?
428e38aeffSJohn Baldwin  *
439b6dd12eSRobert Watson  * (4) Partial page truncation.  vnode_pager_setsize() will zero any parts
448e38aeffSJohn Baldwin  *     of a partially mapped page as a result of ftruncate(2)/truncate(2).
458e38aeffSJohn Baldwin  *     We can do the same (with the same pmap evil), but do we need to
468e38aeffSJohn Baldwin  *     worry about the bits on disk if the page is swapped out or will the
478e38aeffSJohn Baldwin  *     swapper zero the parts of a page that are invalid if the page is
488e38aeffSJohn Baldwin  *     swapped back in for us?
498e38aeffSJohn Baldwin  */
508e38aeffSJohn Baldwin 
518e38aeffSJohn Baldwin #include <sys/cdefs.h>
528e38aeffSJohn Baldwin __FBSDID("$FreeBSD$");
538e38aeffSJohn Baldwin 
5412bc222eSJonathan Anderson #include "opt_capsicum.h"
5512bc222eSJonathan Anderson 
568e38aeffSJohn Baldwin #include <sys/param.h>
5712bc222eSJonathan Anderson #include <sys/capability.h>
588e38aeffSJohn Baldwin #include <sys/fcntl.h>
598e38aeffSJohn Baldwin #include <sys/file.h>
608e38aeffSJohn Baldwin #include <sys/filedesc.h>
618e38aeffSJohn Baldwin #include <sys/fnv_hash.h>
628e38aeffSJohn Baldwin #include <sys/kernel.h>
638e38aeffSJohn Baldwin #include <sys/lock.h>
648e38aeffSJohn Baldwin #include <sys/malloc.h>
658e38aeffSJohn Baldwin #include <sys/mman.h>
668e38aeffSJohn Baldwin #include <sys/mutex.h>
679c00bb91SKonstantin Belousov #include <sys/priv.h>
688e38aeffSJohn Baldwin #include <sys/proc.h>
698e38aeffSJohn Baldwin #include <sys/refcount.h>
708e38aeffSJohn Baldwin #include <sys/resourcevar.h>
718e38aeffSJohn Baldwin #include <sys/stat.h>
728e38aeffSJohn Baldwin #include <sys/sysctl.h>
738e38aeffSJohn Baldwin #include <sys/sysproto.h>
748e38aeffSJohn Baldwin #include <sys/systm.h>
758e38aeffSJohn Baldwin #include <sys/sx.h>
768e38aeffSJohn Baldwin #include <sys/time.h>
778e38aeffSJohn Baldwin #include <sys/vnode.h>
788e38aeffSJohn Baldwin 
798e38aeffSJohn Baldwin #include <security/mac/mac_framework.h>
808e38aeffSJohn Baldwin 
818e38aeffSJohn Baldwin #include <vm/vm.h>
828e38aeffSJohn Baldwin #include <vm/vm_param.h>
838e38aeffSJohn Baldwin #include <vm/pmap.h>
848e38aeffSJohn Baldwin #include <vm/vm_map.h>
85*fb680e16SJohn Baldwin #include <vm/vm_kern.h>
868e38aeffSJohn Baldwin #include <vm/vm_object.h>
878e38aeffSJohn Baldwin #include <vm/vm_page.h>
888e38aeffSJohn Baldwin #include <vm/vm_pager.h>
898e38aeffSJohn Baldwin #include <vm/swap_pager.h>
908e38aeffSJohn Baldwin 
918e38aeffSJohn Baldwin struct shm_mapping {
928e38aeffSJohn Baldwin 	char		*sm_path;
938e38aeffSJohn Baldwin 	Fnv32_t		sm_fnv;
948e38aeffSJohn Baldwin 	struct shmfd	*sm_shmfd;
958e38aeffSJohn Baldwin 	LIST_ENTRY(shm_mapping) sm_link;
968e38aeffSJohn Baldwin };
978e38aeffSJohn Baldwin 
988e38aeffSJohn Baldwin static MALLOC_DEFINE(M_SHMFD, "shmfd", "shared memory file descriptor");
998e38aeffSJohn Baldwin static LIST_HEAD(, shm_mapping) *shm_dictionary;
1008e38aeffSJohn Baldwin static struct sx shm_dict_lock;
1018e38aeffSJohn Baldwin static struct mtx shm_timestamp_lock;
1028e38aeffSJohn Baldwin static u_long shm_hash;
1038e38aeffSJohn Baldwin 
1048e38aeffSJohn Baldwin #define	SHM_HASH(fnv)	(&shm_dictionary[(fnv) & shm_hash])
1058e38aeffSJohn Baldwin 
1068e38aeffSJohn Baldwin static int	shm_access(struct shmfd *shmfd, struct ucred *ucred, int flags);
1078e38aeffSJohn Baldwin static struct shmfd *shm_alloc(struct ucred *ucred, mode_t mode);
1088e38aeffSJohn Baldwin static void	shm_dict_init(void *arg);
1098e38aeffSJohn Baldwin static void	shm_drop(struct shmfd *shmfd);
1108e38aeffSJohn Baldwin static struct shmfd *shm_hold(struct shmfd *shmfd);
1118e38aeffSJohn Baldwin static void	shm_insert(char *path, Fnv32_t fnv, struct shmfd *shmfd);
1128e38aeffSJohn Baldwin static struct shmfd *shm_lookup(char *path, Fnv32_t fnv);
1138e38aeffSJohn Baldwin static int	shm_remove(char *path, Fnv32_t fnv, struct ucred *ucred);
1143364c323SKonstantin Belousov static int	shm_dotruncate(struct shmfd *shmfd, off_t length);
1158e38aeffSJohn Baldwin 
1168e38aeffSJohn Baldwin static fo_rdwr_t	shm_read;
1178e38aeffSJohn Baldwin static fo_rdwr_t	shm_write;
1188e38aeffSJohn Baldwin static fo_truncate_t	shm_truncate;
1198e38aeffSJohn Baldwin static fo_ioctl_t	shm_ioctl;
1208e38aeffSJohn Baldwin static fo_poll_t	shm_poll;
1218e38aeffSJohn Baldwin static fo_kqfilter_t	shm_kqfilter;
1228e38aeffSJohn Baldwin static fo_stat_t	shm_stat;
1238e38aeffSJohn Baldwin static fo_close_t	shm_close;
1249c00bb91SKonstantin Belousov static fo_chmod_t	shm_chmod;
1259c00bb91SKonstantin Belousov static fo_chown_t	shm_chown;
1268e38aeffSJohn Baldwin 
1278e38aeffSJohn Baldwin /* File descriptor operations. */
1288e38aeffSJohn Baldwin static struct fileops shm_ops = {
1298e38aeffSJohn Baldwin 	.fo_read = shm_read,
1308e38aeffSJohn Baldwin 	.fo_write = shm_write,
1318e38aeffSJohn Baldwin 	.fo_truncate = shm_truncate,
1328e38aeffSJohn Baldwin 	.fo_ioctl = shm_ioctl,
1338e38aeffSJohn Baldwin 	.fo_poll = shm_poll,
1348e38aeffSJohn Baldwin 	.fo_kqfilter = shm_kqfilter,
1358e38aeffSJohn Baldwin 	.fo_stat = shm_stat,
1368e38aeffSJohn Baldwin 	.fo_close = shm_close,
1379c00bb91SKonstantin Belousov 	.fo_chmod = shm_chmod,
1389c00bb91SKonstantin Belousov 	.fo_chown = shm_chown,
1398e38aeffSJohn Baldwin 	.fo_flags = DFLAG_PASSABLE
1408e38aeffSJohn Baldwin };
1418e38aeffSJohn Baldwin 
1428e38aeffSJohn Baldwin FEATURE(posix_shm, "POSIX shared memory");
1438e38aeffSJohn Baldwin 
1448e38aeffSJohn Baldwin static int
1458e38aeffSJohn Baldwin shm_read(struct file *fp, struct uio *uio, struct ucred *active_cred,
1468e38aeffSJohn Baldwin     int flags, struct thread *td)
1478e38aeffSJohn Baldwin {
1488e38aeffSJohn Baldwin 
1498e38aeffSJohn Baldwin 	return (EOPNOTSUPP);
1508e38aeffSJohn Baldwin }
1518e38aeffSJohn Baldwin 
1528e38aeffSJohn Baldwin static int
1538e38aeffSJohn Baldwin shm_write(struct file *fp, struct uio *uio, struct ucred *active_cred,
1548e38aeffSJohn Baldwin     int flags, struct thread *td)
1558e38aeffSJohn Baldwin {
1568e38aeffSJohn Baldwin 
1578e38aeffSJohn Baldwin 	return (EOPNOTSUPP);
1588e38aeffSJohn Baldwin }
1598e38aeffSJohn Baldwin 
1608e38aeffSJohn Baldwin static int
1618e38aeffSJohn Baldwin shm_truncate(struct file *fp, off_t length, struct ucred *active_cred,
1628e38aeffSJohn Baldwin     struct thread *td)
1638e38aeffSJohn Baldwin {
1648e38aeffSJohn Baldwin 	struct shmfd *shmfd;
1658e38aeffSJohn Baldwin #ifdef MAC
1668e38aeffSJohn Baldwin 	int error;
1678e38aeffSJohn Baldwin #endif
1688e38aeffSJohn Baldwin 
1698e38aeffSJohn Baldwin 	shmfd = fp->f_data;
1708e38aeffSJohn Baldwin #ifdef MAC
1718e38aeffSJohn Baldwin 	error = mac_posixshm_check_truncate(active_cred, fp->f_cred, shmfd);
1728e38aeffSJohn Baldwin 	if (error)
1738e38aeffSJohn Baldwin 		return (error);
1748e38aeffSJohn Baldwin #endif
1753364c323SKonstantin Belousov 	return (shm_dotruncate(shmfd, length));
1768e38aeffSJohn Baldwin }
1778e38aeffSJohn Baldwin 
1788e38aeffSJohn Baldwin static int
1798e38aeffSJohn Baldwin shm_ioctl(struct file *fp, u_long com, void *data,
1808e38aeffSJohn Baldwin     struct ucred *active_cred, struct thread *td)
1818e38aeffSJohn Baldwin {
1828e38aeffSJohn Baldwin 
1838e38aeffSJohn Baldwin 	return (EOPNOTSUPP);
1848e38aeffSJohn Baldwin }
1858e38aeffSJohn Baldwin 
1868e38aeffSJohn Baldwin static int
1878e38aeffSJohn Baldwin shm_poll(struct file *fp, int events, struct ucred *active_cred,
1888e38aeffSJohn Baldwin     struct thread *td)
1898e38aeffSJohn Baldwin {
1908e38aeffSJohn Baldwin 
1918e38aeffSJohn Baldwin 	return (EOPNOTSUPP);
1928e38aeffSJohn Baldwin }
1938e38aeffSJohn Baldwin 
1948e38aeffSJohn Baldwin static int
1958e38aeffSJohn Baldwin shm_kqfilter(struct file *fp, struct knote *kn)
1968e38aeffSJohn Baldwin {
1978e38aeffSJohn Baldwin 
1988e38aeffSJohn Baldwin 	return (EOPNOTSUPP);
1998e38aeffSJohn Baldwin }
2008e38aeffSJohn Baldwin 
2018e38aeffSJohn Baldwin static int
2028e38aeffSJohn Baldwin shm_stat(struct file *fp, struct stat *sb, struct ucred *active_cred,
2038e38aeffSJohn Baldwin     struct thread *td)
2048e38aeffSJohn Baldwin {
2058e38aeffSJohn Baldwin 	struct shmfd *shmfd;
2068e38aeffSJohn Baldwin #ifdef MAC
2078e38aeffSJohn Baldwin 	int error;
2088e38aeffSJohn Baldwin #endif
2098e38aeffSJohn Baldwin 
2108e38aeffSJohn Baldwin 	shmfd = fp->f_data;
2118e38aeffSJohn Baldwin 
2128e38aeffSJohn Baldwin #ifdef MAC
2138e38aeffSJohn Baldwin 	error = mac_posixshm_check_stat(active_cred, fp->f_cred, shmfd);
2148e38aeffSJohn Baldwin 	if (error)
2158e38aeffSJohn Baldwin 		return (error);
2168e38aeffSJohn Baldwin #endif
2178e38aeffSJohn Baldwin 
2188e38aeffSJohn Baldwin 	/*
2198e38aeffSJohn Baldwin 	 * Attempt to return sanish values for fstat() on a memory file
2208e38aeffSJohn Baldwin 	 * descriptor.
2218e38aeffSJohn Baldwin 	 */
2228e38aeffSJohn Baldwin 	bzero(sb, sizeof(*sb));
2238e38aeffSJohn Baldwin 	sb->st_blksize = PAGE_SIZE;
2248e38aeffSJohn Baldwin 	sb->st_size = shmfd->shm_size;
2258e38aeffSJohn Baldwin 	sb->st_blocks = (sb->st_size + sb->st_blksize - 1) / sb->st_blksize;
2269c00bb91SKonstantin Belousov 	mtx_lock(&shm_timestamp_lock);
227510ea843SEd Schouten 	sb->st_atim = shmfd->shm_atime;
228510ea843SEd Schouten 	sb->st_ctim = shmfd->shm_ctime;
229510ea843SEd Schouten 	sb->st_mtim = shmfd->shm_mtime;
230510ea843SEd Schouten 	sb->st_birthtim = shmfd->shm_birthtime;
2319c00bb91SKonstantin Belousov 	sb->st_mode = S_IFREG | shmfd->shm_mode;		/* XXX */
2328e38aeffSJohn Baldwin 	sb->st_uid = shmfd->shm_uid;
2338e38aeffSJohn Baldwin 	sb->st_gid = shmfd->shm_gid;
2349c00bb91SKonstantin Belousov 	mtx_unlock(&shm_timestamp_lock);
2358e38aeffSJohn Baldwin 
2368e38aeffSJohn Baldwin 	return (0);
2378e38aeffSJohn Baldwin }
2388e38aeffSJohn Baldwin 
2398e38aeffSJohn Baldwin static int
2408e38aeffSJohn Baldwin shm_close(struct file *fp, struct thread *td)
2418e38aeffSJohn Baldwin {
2428e38aeffSJohn Baldwin 	struct shmfd *shmfd;
2438e38aeffSJohn Baldwin 
2448e38aeffSJohn Baldwin 	shmfd = fp->f_data;
2458e38aeffSJohn Baldwin 	fp->f_data = NULL;
2468e38aeffSJohn Baldwin 	shm_drop(shmfd);
2478e38aeffSJohn Baldwin 
2488e38aeffSJohn Baldwin 	return (0);
2498e38aeffSJohn Baldwin }
2508e38aeffSJohn Baldwin 
2513364c323SKonstantin Belousov static int
2528e38aeffSJohn Baldwin shm_dotruncate(struct shmfd *shmfd, off_t length)
2538e38aeffSJohn Baldwin {
2548e38aeffSJohn Baldwin 	vm_object_t object;
2558e38aeffSJohn Baldwin 	vm_page_t m;
2568e38aeffSJohn Baldwin 	vm_pindex_t nobjsize;
2573364c323SKonstantin Belousov 	vm_ooffset_t delta;
2588e38aeffSJohn Baldwin 
2598e38aeffSJohn Baldwin 	object = shmfd->shm_object;
2608e38aeffSJohn Baldwin 	VM_OBJECT_LOCK(object);
2618e38aeffSJohn Baldwin 	if (length == shmfd->shm_size) {
2628e38aeffSJohn Baldwin 		VM_OBJECT_UNLOCK(object);
2633364c323SKonstantin Belousov 		return (0);
2648e38aeffSJohn Baldwin 	}
2658e38aeffSJohn Baldwin 	nobjsize = OFF_TO_IDX(length + PAGE_MASK);
2668e38aeffSJohn Baldwin 
2678e38aeffSJohn Baldwin 	/* Are we shrinking?  If so, trim the end. */
2688e38aeffSJohn Baldwin 	if (length < shmfd->shm_size) {
269*fb680e16SJohn Baldwin 		/*
270*fb680e16SJohn Baldwin 		 * Disallow any requests to shrink the size if this
271*fb680e16SJohn Baldwin 		 * object is mapped into the kernel.
272*fb680e16SJohn Baldwin 		 */
273*fb680e16SJohn Baldwin 		if (shmfd->shm_kmappings > 0) {
274*fb680e16SJohn Baldwin 			VM_OBJECT_UNLOCK(object);
275*fb680e16SJohn Baldwin 			return (EBUSY);
276*fb680e16SJohn Baldwin 		}
2773364c323SKonstantin Belousov 		delta = ptoa(object->size - nobjsize);
2783364c323SKonstantin Belousov 
2798e38aeffSJohn Baldwin 		/* Toss in memory pages. */
2808e38aeffSJohn Baldwin 		if (nobjsize < object->size)
2818e38aeffSJohn Baldwin 			vm_object_page_remove(object, nobjsize, object->size,
2826bbee8e2SAlan Cox 			    0);
2838e38aeffSJohn Baldwin 
2848e38aeffSJohn Baldwin 		/* Toss pages from swap. */
2858e38aeffSJohn Baldwin 		if (object->type == OBJT_SWAP)
2863364c323SKonstantin Belousov 			swap_pager_freespace(object, nobjsize, delta);
2873364c323SKonstantin Belousov 
2883364c323SKonstantin Belousov 		/* Free the swap accounted for shm */
289ef694c1aSEdward Tomasz Napierala 		swap_release_by_cred(delta, object->cred);
2903364c323SKonstantin Belousov 		object->charge -= delta;
2918e38aeffSJohn Baldwin 
2928e38aeffSJohn Baldwin 		/*
2938e38aeffSJohn Baldwin 		 * If the last page is partially mapped, then zero out
2948e38aeffSJohn Baldwin 		 * the garbage at the end of the page.  See comments
2953c33df62SAlan Cox 		 * in vnode_pager_setsize() for more details.
2968e38aeffSJohn Baldwin 		 *
2978e38aeffSJohn Baldwin 		 * XXXJHB: This handles in memory pages, but what about
2988e38aeffSJohn Baldwin 		 * a page swapped out to disk?
2998e38aeffSJohn Baldwin 		 */
3008e38aeffSJohn Baldwin 		if ((length & PAGE_MASK) &&
3018e38aeffSJohn Baldwin 		    (m = vm_page_lookup(object, OFF_TO_IDX(length))) != NULL &&
3028e38aeffSJohn Baldwin 		    m->valid != 0) {
3038e38aeffSJohn Baldwin 			int base = (int)length & PAGE_MASK;
3048e38aeffSJohn Baldwin 			int size = PAGE_SIZE - base;
3058e38aeffSJohn Baldwin 
3068e38aeffSJohn Baldwin 			pmap_zero_page_area(m, base, size);
3073c33df62SAlan Cox 
3083c33df62SAlan Cox 			/*
3093c33df62SAlan Cox 			 * Update the valid bits to reflect the blocks that
3103c33df62SAlan Cox 			 * have been zeroed.  Some of these valid bits may
3113c33df62SAlan Cox 			 * have already been set.
3123c33df62SAlan Cox 			 */
313dc874f98SKonstantin Belousov 			vm_page_set_valid_range(m, base, size);
3143c33df62SAlan Cox 
3153c33df62SAlan Cox 			/*
3163c33df62SAlan Cox 			 * Round "base" to the next block boundary so that the
3173c33df62SAlan Cox 			 * dirty bit for a partially zeroed block is not
3183c33df62SAlan Cox 			 * cleared.
3193c33df62SAlan Cox 			 */
3203c33df62SAlan Cox 			base = roundup2(base, DEV_BSIZE);
3213c33df62SAlan Cox 
3223c33df62SAlan Cox 			vm_page_clear_dirty(m, base, PAGE_SIZE - base);
323fb73a5abSAlan Cox 		} else if ((length & PAGE_MASK) &&
324fb73a5abSAlan Cox 		    __predict_false(object->cache != NULL)) {
325fb73a5abSAlan Cox 			vm_page_cache_free(object, OFF_TO_IDX(length),
326fb73a5abSAlan Cox 			    nobjsize);
3278e38aeffSJohn Baldwin 		}
3283364c323SKonstantin Belousov 	} else {
3293364c323SKonstantin Belousov 
3303364c323SKonstantin Belousov 		/* Attempt to reserve the swap */
3313364c323SKonstantin Belousov 		delta = ptoa(nobjsize - object->size);
332ef694c1aSEdward Tomasz Napierala 		if (!swap_reserve_by_cred(delta, object->cred)) {
3333364c323SKonstantin Belousov 			VM_OBJECT_UNLOCK(object);
3343364c323SKonstantin Belousov 			return (ENOMEM);
3353364c323SKonstantin Belousov 		}
3363364c323SKonstantin Belousov 		object->charge += delta;
3378e38aeffSJohn Baldwin 	}
3388e38aeffSJohn Baldwin 	shmfd->shm_size = length;
3398e38aeffSJohn Baldwin 	mtx_lock(&shm_timestamp_lock);
3408e38aeffSJohn Baldwin 	vfs_timestamp(&shmfd->shm_ctime);
3418e38aeffSJohn Baldwin 	shmfd->shm_mtime = shmfd->shm_ctime;
3428e38aeffSJohn Baldwin 	mtx_unlock(&shm_timestamp_lock);
3438e38aeffSJohn Baldwin 	object->size = nobjsize;
3448e38aeffSJohn Baldwin 	VM_OBJECT_UNLOCK(object);
3453364c323SKonstantin Belousov 	return (0);
3468e38aeffSJohn Baldwin }
3478e38aeffSJohn Baldwin 
3488e38aeffSJohn Baldwin /*
3498e38aeffSJohn Baldwin  * shmfd object management including creation and reference counting
3508e38aeffSJohn Baldwin  * routines.
3518e38aeffSJohn Baldwin  */
3528e38aeffSJohn Baldwin static struct shmfd *
3538e38aeffSJohn Baldwin shm_alloc(struct ucred *ucred, mode_t mode)
3548e38aeffSJohn Baldwin {
3558e38aeffSJohn Baldwin 	struct shmfd *shmfd;
3568e38aeffSJohn Baldwin 
3578e38aeffSJohn Baldwin 	shmfd = malloc(sizeof(*shmfd), M_SHMFD, M_WAITOK | M_ZERO);
3588e38aeffSJohn Baldwin 	shmfd->shm_size = 0;
3598e38aeffSJohn Baldwin 	shmfd->shm_uid = ucred->cr_uid;
3608e38aeffSJohn Baldwin 	shmfd->shm_gid = ucred->cr_gid;
3618e38aeffSJohn Baldwin 	shmfd->shm_mode = mode;
3628e38aeffSJohn Baldwin 	shmfd->shm_object = vm_pager_allocate(OBJT_DEFAULT, NULL,
3633364c323SKonstantin Belousov 	    shmfd->shm_size, VM_PROT_DEFAULT, 0, ucred);
3648e38aeffSJohn Baldwin 	KASSERT(shmfd->shm_object != NULL, ("shm_create: vm_pager_allocate"));
365e384d8a8SAlan Cox 	VM_OBJECT_LOCK(shmfd->shm_object);
366e384d8a8SAlan Cox 	vm_object_clear_flag(shmfd->shm_object, OBJ_ONEMAPPING);
367e384d8a8SAlan Cox 	vm_object_set_flag(shmfd->shm_object, OBJ_NOSPLIT);
368e384d8a8SAlan Cox 	VM_OBJECT_UNLOCK(shmfd->shm_object);
3698e38aeffSJohn Baldwin 	vfs_timestamp(&shmfd->shm_birthtime);
3708e38aeffSJohn Baldwin 	shmfd->shm_atime = shmfd->shm_mtime = shmfd->shm_ctime =
3718e38aeffSJohn Baldwin 	    shmfd->shm_birthtime;
3728e38aeffSJohn Baldwin 	refcount_init(&shmfd->shm_refs, 1);
3738e38aeffSJohn Baldwin #ifdef MAC
3748e38aeffSJohn Baldwin 	mac_posixshm_init(shmfd);
3758e38aeffSJohn Baldwin 	mac_posixshm_create(ucred, shmfd);
3768e38aeffSJohn Baldwin #endif
3778e38aeffSJohn Baldwin 
3788e38aeffSJohn Baldwin 	return (shmfd);
3798e38aeffSJohn Baldwin }
3808e38aeffSJohn Baldwin 
3818e38aeffSJohn Baldwin static struct shmfd *
3828e38aeffSJohn Baldwin shm_hold(struct shmfd *shmfd)
3838e38aeffSJohn Baldwin {
3848e38aeffSJohn Baldwin 
3858e38aeffSJohn Baldwin 	refcount_acquire(&shmfd->shm_refs);
3868e38aeffSJohn Baldwin 	return (shmfd);
3878e38aeffSJohn Baldwin }
3888e38aeffSJohn Baldwin 
3898e38aeffSJohn Baldwin static void
3908e38aeffSJohn Baldwin shm_drop(struct shmfd *shmfd)
3918e38aeffSJohn Baldwin {
3928e38aeffSJohn Baldwin 
3938e38aeffSJohn Baldwin 	if (refcount_release(&shmfd->shm_refs)) {
3948e38aeffSJohn Baldwin #ifdef MAC
3958e38aeffSJohn Baldwin 		mac_posixshm_destroy(shmfd);
3968e38aeffSJohn Baldwin #endif
3978e38aeffSJohn Baldwin 		vm_object_deallocate(shmfd->shm_object);
3988e38aeffSJohn Baldwin 		free(shmfd, M_SHMFD);
3998e38aeffSJohn Baldwin 	}
4008e38aeffSJohn Baldwin }
4018e38aeffSJohn Baldwin 
4028e38aeffSJohn Baldwin /*
4038e38aeffSJohn Baldwin  * Determine if the credentials have sufficient permissions for a
4048e38aeffSJohn Baldwin  * specified combination of FREAD and FWRITE.
4058e38aeffSJohn Baldwin  */
4068e38aeffSJohn Baldwin static int
4078e38aeffSJohn Baldwin shm_access(struct shmfd *shmfd, struct ucred *ucred, int flags)
4088e38aeffSJohn Baldwin {
40915bc6b2bSEdward Tomasz Napierala 	accmode_t accmode;
4109c00bb91SKonstantin Belousov 	int error;
4118e38aeffSJohn Baldwin 
41215bc6b2bSEdward Tomasz Napierala 	accmode = 0;
4138e38aeffSJohn Baldwin 	if (flags & FREAD)
41415bc6b2bSEdward Tomasz Napierala 		accmode |= VREAD;
4158e38aeffSJohn Baldwin 	if (flags & FWRITE)
41615bc6b2bSEdward Tomasz Napierala 		accmode |= VWRITE;
4179c00bb91SKonstantin Belousov 	mtx_lock(&shm_timestamp_lock);
4189c00bb91SKonstantin Belousov 	error = vaccess(VREG, shmfd->shm_mode, shmfd->shm_uid, shmfd->shm_gid,
4199c00bb91SKonstantin Belousov 	    accmode, ucred, NULL);
4209c00bb91SKonstantin Belousov 	mtx_unlock(&shm_timestamp_lock);
4219c00bb91SKonstantin Belousov 	return (error);
4228e38aeffSJohn Baldwin }
4238e38aeffSJohn Baldwin 
4248e38aeffSJohn Baldwin /*
4258e38aeffSJohn Baldwin  * Dictionary management.  We maintain an in-kernel dictionary to map
4268e38aeffSJohn Baldwin  * paths to shmfd objects.  We use the FNV hash on the path to store
4278e38aeffSJohn Baldwin  * the mappings in a hash table.
4288e38aeffSJohn Baldwin  */
4298e38aeffSJohn Baldwin static void
4308e38aeffSJohn Baldwin shm_dict_init(void *arg)
4318e38aeffSJohn Baldwin {
4328e38aeffSJohn Baldwin 
4338e38aeffSJohn Baldwin 	mtx_init(&shm_timestamp_lock, "shm timestamps", NULL, MTX_DEF);
4348e38aeffSJohn Baldwin 	sx_init(&shm_dict_lock, "shm dictionary");
4358e38aeffSJohn Baldwin 	shm_dictionary = hashinit(1024, M_SHMFD, &shm_hash);
4368e38aeffSJohn Baldwin }
4378e38aeffSJohn Baldwin SYSINIT(shm_dict_init, SI_SUB_SYSV_SHM, SI_ORDER_ANY, shm_dict_init, NULL);
4388e38aeffSJohn Baldwin 
4398e38aeffSJohn Baldwin static struct shmfd *
4408e38aeffSJohn Baldwin shm_lookup(char *path, Fnv32_t fnv)
4418e38aeffSJohn Baldwin {
4428e38aeffSJohn Baldwin 	struct shm_mapping *map;
4438e38aeffSJohn Baldwin 
4448e38aeffSJohn Baldwin 	LIST_FOREACH(map, SHM_HASH(fnv), sm_link) {
4458e38aeffSJohn Baldwin 		if (map->sm_fnv != fnv)
4468e38aeffSJohn Baldwin 			continue;
4478e38aeffSJohn Baldwin 		if (strcmp(map->sm_path, path) == 0)
4488e38aeffSJohn Baldwin 			return (map->sm_shmfd);
4498e38aeffSJohn Baldwin 	}
4508e38aeffSJohn Baldwin 
4518e38aeffSJohn Baldwin 	return (NULL);
4528e38aeffSJohn Baldwin }
4538e38aeffSJohn Baldwin 
4548e38aeffSJohn Baldwin static void
4558e38aeffSJohn Baldwin shm_insert(char *path, Fnv32_t fnv, struct shmfd *shmfd)
4568e38aeffSJohn Baldwin {
4578e38aeffSJohn Baldwin 	struct shm_mapping *map;
4588e38aeffSJohn Baldwin 
4598e38aeffSJohn Baldwin 	map = malloc(sizeof(struct shm_mapping), M_SHMFD, M_WAITOK);
4608e38aeffSJohn Baldwin 	map->sm_path = path;
4618e38aeffSJohn Baldwin 	map->sm_fnv = fnv;
4628e38aeffSJohn Baldwin 	map->sm_shmfd = shm_hold(shmfd);
4638e38aeffSJohn Baldwin 	LIST_INSERT_HEAD(SHM_HASH(fnv), map, sm_link);
4648e38aeffSJohn Baldwin }
4658e38aeffSJohn Baldwin 
4668e38aeffSJohn Baldwin static int
4678e38aeffSJohn Baldwin shm_remove(char *path, Fnv32_t fnv, struct ucred *ucred)
4688e38aeffSJohn Baldwin {
4698e38aeffSJohn Baldwin 	struct shm_mapping *map;
4708e38aeffSJohn Baldwin 	int error;
4718e38aeffSJohn Baldwin 
4728e38aeffSJohn Baldwin 	LIST_FOREACH(map, SHM_HASH(fnv), sm_link) {
4738e38aeffSJohn Baldwin 		if (map->sm_fnv != fnv)
4748e38aeffSJohn Baldwin 			continue;
4758e38aeffSJohn Baldwin 		if (strcmp(map->sm_path, path) == 0) {
4768e38aeffSJohn Baldwin #ifdef MAC
4778e38aeffSJohn Baldwin 			error = mac_posixshm_check_unlink(ucred, map->sm_shmfd);
4788e38aeffSJohn Baldwin 			if (error)
4798e38aeffSJohn Baldwin 				return (error);
4808e38aeffSJohn Baldwin #endif
4818e38aeffSJohn Baldwin 			error = shm_access(map->sm_shmfd, ucred,
4828e38aeffSJohn Baldwin 			    FREAD | FWRITE);
4838e38aeffSJohn Baldwin 			if (error)
4848e38aeffSJohn Baldwin 				return (error);
4858e38aeffSJohn Baldwin 			LIST_REMOVE(map, sm_link);
4868e38aeffSJohn Baldwin 			shm_drop(map->sm_shmfd);
4878e38aeffSJohn Baldwin 			free(map->sm_path, M_SHMFD);
4888e38aeffSJohn Baldwin 			free(map, M_SHMFD);
4898e38aeffSJohn Baldwin 			return (0);
4908e38aeffSJohn Baldwin 		}
4918e38aeffSJohn Baldwin 	}
4928e38aeffSJohn Baldwin 
4938e38aeffSJohn Baldwin 	return (ENOENT);
4948e38aeffSJohn Baldwin }
4958e38aeffSJohn Baldwin 
4968e38aeffSJohn Baldwin /* System calls. */
4978e38aeffSJohn Baldwin int
4988451d0ddSKip Macy sys_shm_open(struct thread *td, struct shm_open_args *uap)
4998e38aeffSJohn Baldwin {
5008e38aeffSJohn Baldwin 	struct filedesc *fdp;
5018e38aeffSJohn Baldwin 	struct shmfd *shmfd;
5028e38aeffSJohn Baldwin 	struct file *fp;
5038e38aeffSJohn Baldwin 	char *path;
5048e38aeffSJohn Baldwin 	Fnv32_t fnv;
5058e38aeffSJohn Baldwin 	mode_t cmode;
5068e38aeffSJohn Baldwin 	int fd, error;
5078e38aeffSJohn Baldwin 
50812bc222eSJonathan Anderson #ifdef CAPABILITY_MODE
50912bc222eSJonathan Anderson 	/*
51012bc222eSJonathan Anderson 	 * shm_open(2) is only allowed for anonymous objects.
51112bc222eSJonathan Anderson 	 */
51212bc222eSJonathan Anderson 	if (IN_CAPABILITY_MODE(td) && (uap->path != SHM_ANON))
51312bc222eSJonathan Anderson 		return (ECAPMODE);
51412bc222eSJonathan Anderson #endif
51512bc222eSJonathan Anderson 
5168e38aeffSJohn Baldwin 	if ((uap->flags & O_ACCMODE) != O_RDONLY &&
5178e38aeffSJohn Baldwin 	    (uap->flags & O_ACCMODE) != O_RDWR)
5188e38aeffSJohn Baldwin 		return (EINVAL);
5198e38aeffSJohn Baldwin 
5208e38aeffSJohn Baldwin 	if ((uap->flags & ~(O_ACCMODE | O_CREAT | O_EXCL | O_TRUNC)) != 0)
5218e38aeffSJohn Baldwin 		return (EINVAL);
5228e38aeffSJohn Baldwin 
5238e38aeffSJohn Baldwin 	fdp = td->td_proc->p_fd;
5248e38aeffSJohn Baldwin 	cmode = (uap->mode & ~fdp->fd_cmask) & ACCESSPERMS;
5258e38aeffSJohn Baldwin 
5261fe80828SKonstantin Belousov 	error = falloc(td, &fp, &fd, 0);
5278e38aeffSJohn Baldwin 	if (error)
5288e38aeffSJohn Baldwin 		return (error);
5298e38aeffSJohn Baldwin 
5308e38aeffSJohn Baldwin 	/* A SHM_ANON path pointer creates an anonymous object. */
5318e38aeffSJohn Baldwin 	if (uap->path == SHM_ANON) {
5328e38aeffSJohn Baldwin 		/* A read-only anonymous object is pointless. */
5338e38aeffSJohn Baldwin 		if ((uap->flags & O_ACCMODE) == O_RDONLY) {
5348e38aeffSJohn Baldwin 			fdclose(fdp, fp, fd, td);
5358e38aeffSJohn Baldwin 			fdrop(fp, td);
5368e38aeffSJohn Baldwin 			return (EINVAL);
5378e38aeffSJohn Baldwin 		}
5388e38aeffSJohn Baldwin 		shmfd = shm_alloc(td->td_ucred, cmode);
5398e38aeffSJohn Baldwin 	} else {
5408e38aeffSJohn Baldwin 		path = malloc(MAXPATHLEN, M_SHMFD, M_WAITOK);
5418e38aeffSJohn Baldwin 		error = copyinstr(uap->path, path, MAXPATHLEN, NULL);
5428e38aeffSJohn Baldwin 
5438e38aeffSJohn Baldwin 		/* Require paths to start with a '/' character. */
5448e38aeffSJohn Baldwin 		if (error == 0 && path[0] != '/')
5458e38aeffSJohn Baldwin 			error = EINVAL;
5468e38aeffSJohn Baldwin 		if (error) {
5478e38aeffSJohn Baldwin 			fdclose(fdp, fp, fd, td);
5488e38aeffSJohn Baldwin 			fdrop(fp, td);
5498e38aeffSJohn Baldwin 			free(path, M_SHMFD);
5508e38aeffSJohn Baldwin 			return (error);
5518e38aeffSJohn Baldwin 		}
5528e38aeffSJohn Baldwin 
5538e38aeffSJohn Baldwin 		fnv = fnv_32_str(path, FNV1_32_INIT);
5548e38aeffSJohn Baldwin 		sx_xlock(&shm_dict_lock);
5558e38aeffSJohn Baldwin 		shmfd = shm_lookup(path, fnv);
5568e38aeffSJohn Baldwin 		if (shmfd == NULL) {
5578e38aeffSJohn Baldwin 			/* Object does not yet exist, create it if requested. */
5588e38aeffSJohn Baldwin 			if (uap->flags & O_CREAT) {
5599b6dd12eSRobert Watson #ifdef MAC
5609b6dd12eSRobert Watson 				error = mac_posixshm_check_create(td->td_ucred,
5619b6dd12eSRobert Watson 				    path);
5629b6dd12eSRobert Watson 				if (error == 0) {
5639b6dd12eSRobert Watson #endif
5648e38aeffSJohn Baldwin 					shmfd = shm_alloc(td->td_ucred, cmode);
5658e38aeffSJohn Baldwin 					shm_insert(path, fnv, shmfd);
5669b6dd12eSRobert Watson #ifdef MAC
5679b6dd12eSRobert Watson 				}
5689b6dd12eSRobert Watson #endif
5698e38aeffSJohn Baldwin 			} else {
5708e38aeffSJohn Baldwin 				free(path, M_SHMFD);
5718e38aeffSJohn Baldwin 				error = ENOENT;
5728e38aeffSJohn Baldwin 			}
5738e38aeffSJohn Baldwin 		} else {
5748e38aeffSJohn Baldwin 			/*
5758e38aeffSJohn Baldwin 			 * Object already exists, obtain a new
5768e38aeffSJohn Baldwin 			 * reference if requested and permitted.
5778e38aeffSJohn Baldwin 			 */
5788e38aeffSJohn Baldwin 			free(path, M_SHMFD);
5798e38aeffSJohn Baldwin 			if ((uap->flags & (O_CREAT | O_EXCL)) ==
5808e38aeffSJohn Baldwin 			    (O_CREAT | O_EXCL))
5818e38aeffSJohn Baldwin 				error = EEXIST;
5828e38aeffSJohn Baldwin 			else {
5838e38aeffSJohn Baldwin #ifdef MAC
5848e38aeffSJohn Baldwin 				error = mac_posixshm_check_open(td->td_ucred,
5859b6dd12eSRobert Watson 				    shmfd, FFLAGS(uap->flags & O_ACCMODE));
5868e38aeffSJohn Baldwin 				if (error == 0)
5878e38aeffSJohn Baldwin #endif
5888e38aeffSJohn Baldwin 				error = shm_access(shmfd, td->td_ucred,
5898e38aeffSJohn Baldwin 				    FFLAGS(uap->flags & O_ACCMODE));
5908e38aeffSJohn Baldwin 			}
5918e38aeffSJohn Baldwin 
5928e38aeffSJohn Baldwin 			/*
5938e38aeffSJohn Baldwin 			 * Truncate the file back to zero length if
5948e38aeffSJohn Baldwin 			 * O_TRUNC was specified and the object was
5958e38aeffSJohn Baldwin 			 * opened with read/write.
5968e38aeffSJohn Baldwin 			 */
5978e38aeffSJohn Baldwin 			if (error == 0 &&
5988e38aeffSJohn Baldwin 			    (uap->flags & (O_ACCMODE | O_TRUNC)) ==
5998e38aeffSJohn Baldwin 			    (O_RDWR | O_TRUNC)) {
6008e38aeffSJohn Baldwin #ifdef MAC
6018e38aeffSJohn Baldwin 				error = mac_posixshm_check_truncate(
6028e38aeffSJohn Baldwin 					td->td_ucred, fp->f_cred, shmfd);
6038e38aeffSJohn Baldwin 				if (error == 0)
6048e38aeffSJohn Baldwin #endif
6058e38aeffSJohn Baldwin 					shm_dotruncate(shmfd, 0);
6068e38aeffSJohn Baldwin 			}
6078e38aeffSJohn Baldwin 			if (error == 0)
6088e38aeffSJohn Baldwin 				shm_hold(shmfd);
6098e38aeffSJohn Baldwin 		}
6108e38aeffSJohn Baldwin 		sx_xunlock(&shm_dict_lock);
6118e38aeffSJohn Baldwin 
6128e38aeffSJohn Baldwin 		if (error) {
6138e38aeffSJohn Baldwin 			fdclose(fdp, fp, fd, td);
6148e38aeffSJohn Baldwin 			fdrop(fp, td);
6158e38aeffSJohn Baldwin 			return (error);
6168e38aeffSJohn Baldwin 		}
6178e38aeffSJohn Baldwin 	}
6188e38aeffSJohn Baldwin 
6198e38aeffSJohn Baldwin 	finit(fp, FFLAGS(uap->flags & O_ACCMODE), DTYPE_SHM, shmfd, &shm_ops);
6208e38aeffSJohn Baldwin 
6218e38aeffSJohn Baldwin 	FILEDESC_XLOCK(fdp);
6228e38aeffSJohn Baldwin 	if (fdp->fd_ofiles[fd] == fp)
6238e38aeffSJohn Baldwin 		fdp->fd_ofileflags[fd] |= UF_EXCLOSE;
6248e38aeffSJohn Baldwin 	FILEDESC_XUNLOCK(fdp);
6258e38aeffSJohn Baldwin 	td->td_retval[0] = fd;
6268e38aeffSJohn Baldwin 	fdrop(fp, td);
6278e38aeffSJohn Baldwin 
6288e38aeffSJohn Baldwin 	return (0);
6298e38aeffSJohn Baldwin }
6308e38aeffSJohn Baldwin 
6318e38aeffSJohn Baldwin int
6328451d0ddSKip Macy sys_shm_unlink(struct thread *td, struct shm_unlink_args *uap)
6338e38aeffSJohn Baldwin {
6348e38aeffSJohn Baldwin 	char *path;
6358e38aeffSJohn Baldwin 	Fnv32_t fnv;
6368e38aeffSJohn Baldwin 	int error;
6378e38aeffSJohn Baldwin 
6388e38aeffSJohn Baldwin 	path = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
6398e38aeffSJohn Baldwin 	error = copyinstr(uap->path, path, MAXPATHLEN, NULL);
6408e38aeffSJohn Baldwin 	if (error) {
6418e38aeffSJohn Baldwin 		free(path, M_TEMP);
6428e38aeffSJohn Baldwin 		return (error);
6438e38aeffSJohn Baldwin 	}
6448e38aeffSJohn Baldwin 
6458e38aeffSJohn Baldwin 	fnv = fnv_32_str(path, FNV1_32_INIT);
6468e38aeffSJohn Baldwin 	sx_xlock(&shm_dict_lock);
6478e38aeffSJohn Baldwin 	error = shm_remove(path, fnv, td->td_ucred);
6488e38aeffSJohn Baldwin 	sx_xunlock(&shm_dict_lock);
6498e38aeffSJohn Baldwin 	free(path, M_TEMP);
6508e38aeffSJohn Baldwin 
6518e38aeffSJohn Baldwin 	return (error);
6528e38aeffSJohn Baldwin }
6538e38aeffSJohn Baldwin 
6548e38aeffSJohn Baldwin /*
6558e38aeffSJohn Baldwin  * mmap() helper to validate mmap() requests against shm object state
6568e38aeffSJohn Baldwin  * and give mmap() the vm_object to use for the mapping.
6578e38aeffSJohn Baldwin  */
6588e38aeffSJohn Baldwin int
6598e38aeffSJohn Baldwin shm_mmap(struct shmfd *shmfd, vm_size_t objsize, vm_ooffset_t foff,
6608e38aeffSJohn Baldwin     vm_object_t *obj)
6618e38aeffSJohn Baldwin {
6628e38aeffSJohn Baldwin 
6638e38aeffSJohn Baldwin 	/*
6648e38aeffSJohn Baldwin 	 * XXXRW: This validation is probably insufficient, and subject to
6658e38aeffSJohn Baldwin 	 * sign errors.  It should be fixed.
6668e38aeffSJohn Baldwin 	 */
6676ee7dd87SAlexander Kabaev 	if (foff >= shmfd->shm_size ||
6686ee7dd87SAlexander Kabaev 	    foff + objsize > round_page(shmfd->shm_size))
6698e38aeffSJohn Baldwin 		return (EINVAL);
6708e38aeffSJohn Baldwin 
6718e38aeffSJohn Baldwin 	mtx_lock(&shm_timestamp_lock);
6728e38aeffSJohn Baldwin 	vfs_timestamp(&shmfd->shm_atime);
6738e38aeffSJohn Baldwin 	mtx_unlock(&shm_timestamp_lock);
6748e38aeffSJohn Baldwin 	vm_object_reference(shmfd->shm_object);
6758e38aeffSJohn Baldwin 	*obj = shmfd->shm_object;
6768e38aeffSJohn Baldwin 	return (0);
6778e38aeffSJohn Baldwin }
6789c00bb91SKonstantin Belousov 
6799c00bb91SKonstantin Belousov static int
6809c00bb91SKonstantin Belousov shm_chmod(struct file *fp, mode_t mode, struct ucred *active_cred,
6819c00bb91SKonstantin Belousov     struct thread *td)
6829c00bb91SKonstantin Belousov {
6839c00bb91SKonstantin Belousov 	struct shmfd *shmfd;
6849c00bb91SKonstantin Belousov 	int error;
6859c00bb91SKonstantin Belousov 
6869c00bb91SKonstantin Belousov 	error = 0;
6879c00bb91SKonstantin Belousov 	shmfd = fp->f_data;
6889c00bb91SKonstantin Belousov 	mtx_lock(&shm_timestamp_lock);
6899c00bb91SKonstantin Belousov 	/*
6909c00bb91SKonstantin Belousov 	 * SUSv4 says that x bits of permission need not be affected.
6919c00bb91SKonstantin Belousov 	 * Be consistent with our shm_open there.
6929c00bb91SKonstantin Belousov 	 */
6939c00bb91SKonstantin Belousov #ifdef MAC
6949c00bb91SKonstantin Belousov 	error = mac_posixshm_check_setmode(active_cred, shmfd, mode);
6959c00bb91SKonstantin Belousov 	if (error != 0)
6969c00bb91SKonstantin Belousov 		goto out;
6979c00bb91SKonstantin Belousov #endif
6989c00bb91SKonstantin Belousov 	error = vaccess(VREG, shmfd->shm_mode, shmfd->shm_uid,
6999c00bb91SKonstantin Belousov 	    shmfd->shm_gid, VADMIN, active_cred, NULL);
7009c00bb91SKonstantin Belousov 	if (error != 0)
7019c00bb91SKonstantin Belousov 		goto out;
7029c00bb91SKonstantin Belousov 	shmfd->shm_mode = mode & ACCESSPERMS;
7039c00bb91SKonstantin Belousov out:
7049c00bb91SKonstantin Belousov 	mtx_unlock(&shm_timestamp_lock);
7059c00bb91SKonstantin Belousov 	return (error);
7069c00bb91SKonstantin Belousov }
7079c00bb91SKonstantin Belousov 
7089c00bb91SKonstantin Belousov static int
7099c00bb91SKonstantin Belousov shm_chown(struct file *fp, uid_t uid, gid_t gid, struct ucred *active_cred,
7109c00bb91SKonstantin Belousov     struct thread *td)
7119c00bb91SKonstantin Belousov {
7129c00bb91SKonstantin Belousov 	struct shmfd *shmfd;
7139c00bb91SKonstantin Belousov 	int error;
7149c00bb91SKonstantin Belousov 
71568889ed6SKonstantin Belousov 	error = 0;
7169c00bb91SKonstantin Belousov 	shmfd = fp->f_data;
7179c00bb91SKonstantin Belousov 	mtx_lock(&shm_timestamp_lock);
7189c00bb91SKonstantin Belousov #ifdef MAC
7199c00bb91SKonstantin Belousov 	error = mac_posixshm_check_setowner(active_cred, shmfd, uid, gid);
7209c00bb91SKonstantin Belousov 	if (error != 0)
7219c00bb91SKonstantin Belousov 		goto out;
7229c00bb91SKonstantin Belousov #endif
7239c00bb91SKonstantin Belousov 	if (uid == (uid_t)-1)
7249c00bb91SKonstantin Belousov 		uid = shmfd->shm_uid;
7259c00bb91SKonstantin Belousov 	if (gid == (gid_t)-1)
7269c00bb91SKonstantin Belousov                  gid = shmfd->shm_gid;
7279c00bb91SKonstantin Belousov 	if (((uid != shmfd->shm_uid && uid != active_cred->cr_uid) ||
7289c00bb91SKonstantin Belousov 	    (gid != shmfd->shm_gid && !groupmember(gid, active_cred))) &&
7299c00bb91SKonstantin Belousov 	    (error = priv_check_cred(active_cred, PRIV_VFS_CHOWN, 0)))
7309c00bb91SKonstantin Belousov 		goto out;
7319c00bb91SKonstantin Belousov 	shmfd->shm_uid = uid;
7329c00bb91SKonstantin Belousov 	shmfd->shm_gid = gid;
7339c00bb91SKonstantin Belousov out:
7349c00bb91SKonstantin Belousov 	mtx_unlock(&shm_timestamp_lock);
7359c00bb91SKonstantin Belousov 	return (error);
7369c00bb91SKonstantin Belousov }
737*fb680e16SJohn Baldwin 
738*fb680e16SJohn Baldwin /*
739*fb680e16SJohn Baldwin  * Helper routines to allow the backing object of a shared memory file
740*fb680e16SJohn Baldwin  * descriptor to be mapped in the kernel.
741*fb680e16SJohn Baldwin  */
742*fb680e16SJohn Baldwin int
743*fb680e16SJohn Baldwin shm_map(struct file *fp, size_t size, off_t offset, void **memp)
744*fb680e16SJohn Baldwin {
745*fb680e16SJohn Baldwin 	struct shmfd *shmfd;
746*fb680e16SJohn Baldwin 	vm_offset_t kva, ofs;
747*fb680e16SJohn Baldwin 	vm_object_t obj;
748*fb680e16SJohn Baldwin 	int rv;
749*fb680e16SJohn Baldwin 
750*fb680e16SJohn Baldwin 	if (fp->f_type != DTYPE_SHM)
751*fb680e16SJohn Baldwin 		return (EINVAL);
752*fb680e16SJohn Baldwin 	shmfd = fp->f_data;
753*fb680e16SJohn Baldwin 	obj = shmfd->shm_object;
754*fb680e16SJohn Baldwin 	VM_OBJECT_LOCK(obj);
755*fb680e16SJohn Baldwin 	/*
756*fb680e16SJohn Baldwin 	 * XXXRW: This validation is probably insufficient, and subject to
757*fb680e16SJohn Baldwin 	 * sign errors.  It should be fixed.
758*fb680e16SJohn Baldwin 	 */
759*fb680e16SJohn Baldwin 	if (offset >= shmfd->shm_size ||
760*fb680e16SJohn Baldwin 	    offset + size > round_page(shmfd->shm_size)) {
761*fb680e16SJohn Baldwin 		VM_OBJECT_UNLOCK(obj);
762*fb680e16SJohn Baldwin 		return (EINVAL);
763*fb680e16SJohn Baldwin 	}
764*fb680e16SJohn Baldwin 
765*fb680e16SJohn Baldwin 	shmfd->shm_kmappings++;
766*fb680e16SJohn Baldwin 	vm_object_reference_locked(obj);
767*fb680e16SJohn Baldwin 	VM_OBJECT_UNLOCK(obj);
768*fb680e16SJohn Baldwin 
769*fb680e16SJohn Baldwin 	/* Map the object into the kernel_map and wire it. */
770*fb680e16SJohn Baldwin 	kva = vm_map_min(kernel_map);
771*fb680e16SJohn Baldwin 	ofs = offset & PAGE_MASK;
772*fb680e16SJohn Baldwin 	offset = trunc_page(offset);
773*fb680e16SJohn Baldwin 	size = round_page(size + ofs);
774*fb680e16SJohn Baldwin 	rv = vm_map_find(kernel_map, obj, offset, &kva, size,
775*fb680e16SJohn Baldwin 	    VMFS_ALIGNED_SPACE, VM_PROT_READ | VM_PROT_WRITE,
776*fb680e16SJohn Baldwin 	    VM_PROT_READ | VM_PROT_WRITE, 0);
777*fb680e16SJohn Baldwin 	if (rv == KERN_SUCCESS) {
778*fb680e16SJohn Baldwin 		rv = vm_map_wire(kernel_map, kva, kva + size,
779*fb680e16SJohn Baldwin 		    VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
780*fb680e16SJohn Baldwin 		if (rv == KERN_SUCCESS) {
781*fb680e16SJohn Baldwin 			*memp = (void *)(kva + ofs);
782*fb680e16SJohn Baldwin 			return (0);
783*fb680e16SJohn Baldwin 		}
784*fb680e16SJohn Baldwin 		vm_map_remove(kernel_map, kva, kva + size);
785*fb680e16SJohn Baldwin 	} else
786*fb680e16SJohn Baldwin 		vm_object_deallocate(obj);
787*fb680e16SJohn Baldwin 
788*fb680e16SJohn Baldwin 	/* On failure, drop our mapping reference. */
789*fb680e16SJohn Baldwin 	VM_OBJECT_LOCK(obj);
790*fb680e16SJohn Baldwin 	shmfd->shm_kmappings--;
791*fb680e16SJohn Baldwin 	VM_OBJECT_UNLOCK(obj);
792*fb680e16SJohn Baldwin 
793*fb680e16SJohn Baldwin 	switch (rv) {
794*fb680e16SJohn Baldwin 	case KERN_INVALID_ADDRESS:
795*fb680e16SJohn Baldwin 	case KERN_NO_SPACE:
796*fb680e16SJohn Baldwin 		return (ENOMEM);
797*fb680e16SJohn Baldwin 	case KERN_PROTECTION_FAILURE:
798*fb680e16SJohn Baldwin 		return (EACCES);
799*fb680e16SJohn Baldwin 	default:
800*fb680e16SJohn Baldwin 		return (EINVAL);
801*fb680e16SJohn Baldwin 	}
802*fb680e16SJohn Baldwin }
803*fb680e16SJohn Baldwin 
804*fb680e16SJohn Baldwin /*
805*fb680e16SJohn Baldwin  * We require the caller to unmap the entire entry.  This allows us to
806*fb680e16SJohn Baldwin  * safely decrement shm_kmappings when a mapping is removed.
807*fb680e16SJohn Baldwin  */
808*fb680e16SJohn Baldwin int
809*fb680e16SJohn Baldwin shm_unmap(struct file *fp, void *mem, size_t size)
810*fb680e16SJohn Baldwin {
811*fb680e16SJohn Baldwin 	struct shmfd *shmfd;
812*fb680e16SJohn Baldwin 	vm_map_entry_t entry;
813*fb680e16SJohn Baldwin 	vm_offset_t kva, ofs;
814*fb680e16SJohn Baldwin 	vm_object_t obj;
815*fb680e16SJohn Baldwin 	vm_pindex_t pindex;
816*fb680e16SJohn Baldwin 	vm_prot_t prot;
817*fb680e16SJohn Baldwin 	boolean_t wired;
818*fb680e16SJohn Baldwin 	vm_map_t map;
819*fb680e16SJohn Baldwin 	int rv;
820*fb680e16SJohn Baldwin 
821*fb680e16SJohn Baldwin 	if (fp->f_type != DTYPE_SHM)
822*fb680e16SJohn Baldwin 		return (EINVAL);
823*fb680e16SJohn Baldwin 	shmfd = fp->f_data;
824*fb680e16SJohn Baldwin 	kva = (vm_offset_t)mem;
825*fb680e16SJohn Baldwin 	ofs = kva & PAGE_MASK;
826*fb680e16SJohn Baldwin 	kva = trunc_page(kva);
827*fb680e16SJohn Baldwin 	size = round_page(size + ofs);
828*fb680e16SJohn Baldwin 	map = kernel_map;
829*fb680e16SJohn Baldwin 	rv = vm_map_lookup(&map, kva, VM_PROT_READ | VM_PROT_WRITE, &entry,
830*fb680e16SJohn Baldwin 	    &obj, &pindex, &prot, &wired);
831*fb680e16SJohn Baldwin 	if (rv != KERN_SUCCESS)
832*fb680e16SJohn Baldwin 		return (EINVAL);
833*fb680e16SJohn Baldwin 	if (entry->start != kva || entry->end != kva + size) {
834*fb680e16SJohn Baldwin 		vm_map_lookup_done(map, entry);
835*fb680e16SJohn Baldwin 		return (EINVAL);
836*fb680e16SJohn Baldwin 	}
837*fb680e16SJohn Baldwin 	vm_map_lookup_done(map, entry);
838*fb680e16SJohn Baldwin 	if (obj != shmfd->shm_object)
839*fb680e16SJohn Baldwin 		return (EINVAL);
840*fb680e16SJohn Baldwin 	vm_map_remove(map, kva, kva + size);
841*fb680e16SJohn Baldwin 	VM_OBJECT_LOCK(obj);
842*fb680e16SJohn Baldwin 	KASSERT(shmfd->shm_kmappings > 0, ("shm_unmap: object not mapped"));
843*fb680e16SJohn Baldwin 	shmfd->shm_kmappings--;
844*fb680e16SJohn Baldwin 	VM_OBJECT_UNLOCK(obj);
845*fb680e16SJohn Baldwin 	return (0);
846*fb680e16SJohn Baldwin }
847