xref: /freebsd/sys/kern/uipc_mqueue.c (revision ef9ffb8594eee294334ced627755bf5b46b48f9f)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2005 David Xu <davidxu@freebsd.org>
5  * Copyright (c) 2016-2017 Robert N. M. Watson
6  * All rights reserved.
7  *
8  * Portions of this software were developed by BAE Systems, the University of
9  * Cambridge Computer Laboratory, and Memorial University under DARPA/AFRL
10  * contract FA8650-15-C-7558 ("CADETS"), as part of the DARPA Transparent
11  * Computing (TC) research program.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  */
35 
36 /*
37  * POSIX message queue implementation.
38  *
39  * 1) A mqueue filesystem can be mounted, each message queue appears
40  *    in mounted directory, user can change queue's permission and
41  *    ownership, or remove a queue. Manually creating a file in the
42  *    directory causes a message queue to be created in the kernel with
43  *    default message queue attributes applied and same name used, this
44  *    method is not advocated since mq_open syscall allows user to specify
45  *    different attributes. Also the file system can be mounted multiple
46  *    times at different mount points but shows same contents.
47  *
48  * 2) Standard POSIX message queue API. The syscalls do not use vfs layer,
49  *    but directly operate on internal data structure, this allows user to
50  *    use the IPC facility without having to mount mqueue file system.
51  */
52 
53 #include "opt_capsicum.h"
54 
55 #include <sys/param.h>
56 #include <sys/kernel.h>
57 #include <sys/systm.h>
58 #include <sys/limits.h>
59 #include <sys/malloc.h>
60 #include <sys/buf.h>
61 #include <sys/capsicum.h>
62 #include <sys/dirent.h>
63 #include <sys/event.h>
64 #include <sys/eventhandler.h>
65 #include <sys/fcntl.h>
66 #include <sys/file.h>
67 #include <sys/filedesc.h>
68 #include <sys/jail.h>
69 #include <sys/lock.h>
70 #include <sys/module.h>
71 #include <sys/mount.h>
72 #include <sys/mqueue.h>
73 #include <sys/mutex.h>
74 #include <sys/namei.h>
75 #include <sys/posix4.h>
76 #include <sys/poll.h>
77 #include <sys/priv.h>
78 #include <sys/proc.h>
79 #include <sys/queue.h>
80 #include <sys/sysproto.h>
81 #include <sys/stat.h>
82 #include <sys/syscall.h>
83 #include <sys/syscallsubr.h>
84 #include <sys/sysent.h>
85 #include <sys/sx.h>
86 #include <sys/sysctl.h>
87 #include <sys/taskqueue.h>
88 #include <sys/unistd.h>
89 #include <sys/user.h>
90 #include <sys/vnode.h>
91 #include <machine/atomic.h>
92 
93 #include <security/audit/audit.h>
94 
95 FEATURE(p1003_1b_mqueue, "POSIX P1003.1B message queues support");
96 
97 /*
98  * Limits and constants
99  */
100 #define	MQFS_NAMELEN		NAME_MAX
101 #define MQFS_DELEN		(8 + MQFS_NAMELEN)
102 
103 /* node types */
104 typedef enum {
105 	mqfstype_none = 0,
106 	mqfstype_root,
107 	mqfstype_dir,
108 	mqfstype_this,
109 	mqfstype_parent,
110 	mqfstype_file,
111 	mqfstype_symlink,
112 } mqfs_type_t;
113 
114 struct mqfs_node;
115 
116 /*
117  * mqfs_info: describes a mqfs instance
118  */
119 struct mqfs_info {
120 	struct sx		mi_lock;
121 	struct mqfs_node	*mi_root;
122 	struct unrhdr		*mi_unrhdr;
123 };
124 
125 struct mqfs_vdata {
126 	LIST_ENTRY(mqfs_vdata)	mv_link;
127 	struct mqfs_node	*mv_node;
128 	struct vnode		*mv_vnode;
129 	struct task		mv_task;
130 };
131 
132 /*
133  * mqfs_node: describes a node (file or directory) within a mqfs
134  */
135 struct mqfs_node {
136 	char			mn_name[MQFS_NAMELEN+1];
137 	struct mqfs_info	*mn_info;
138 	struct mqfs_node	*mn_parent;
139 	LIST_HEAD(,mqfs_node)	mn_children;
140 	LIST_ENTRY(mqfs_node)	mn_sibling;
141 	LIST_HEAD(,mqfs_vdata)	mn_vnodes;
142 	const void		*mn_pr_root;
143 	int			mn_refcount;
144 	mqfs_type_t		mn_type;
145 	int			mn_deleted;
146 	uint32_t		mn_fileno;
147 	void			*mn_data;
148 	struct timespec		mn_birth;
149 	struct timespec		mn_ctime;
150 	struct timespec		mn_atime;
151 	struct timespec		mn_mtime;
152 	uid_t			mn_uid;
153 	gid_t			mn_gid;
154 	int			mn_mode;
155 };
156 
157 #define	VTON(vp)	(((struct mqfs_vdata *)((vp)->v_data))->mv_node)
158 #define VTOMQ(vp) 	((struct mqueue *)(VTON(vp)->mn_data))
159 #define	VFSTOMQFS(m)	((struct mqfs_info *)((m)->mnt_data))
160 #define	FPTOMQ(fp)	((struct mqueue *)(((struct mqfs_node *) \
161 				(fp)->f_data)->mn_data))
162 
163 TAILQ_HEAD(msgq, mqueue_msg);
164 
165 struct mqueue;
166 
167 struct mqueue_notifier {
168 	LIST_ENTRY(mqueue_notifier)	nt_link;
169 	struct sigevent			nt_sigev;
170 	ksiginfo_t			nt_ksi;
171 	struct proc			*nt_proc;
172 };
173 
174 struct mqueue {
175 	struct mtx	mq_mutex;
176 	int		mq_flags;
177 	long		mq_maxmsg;
178 	long		mq_msgsize;
179 	long		mq_curmsgs;
180 	long		mq_totalbytes;
181 	struct msgq	mq_msgq;
182 	int		mq_receivers;
183 	int		mq_senders;
184 	struct selinfo	mq_rsel;
185 	struct selinfo	mq_wsel;
186 	struct mqueue_notifier	*mq_notifier;
187 };
188 
189 #define	MQ_RSEL		0x01
190 #define	MQ_WSEL		0x02
191 
192 struct mqueue_msg {
193 	TAILQ_ENTRY(mqueue_msg)	msg_link;
194 	unsigned int	msg_prio;
195 	unsigned int	msg_size;
196 	/* following real data... */
197 };
198 
199 static SYSCTL_NODE(_kern, OID_AUTO, mqueue, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
200 	"POSIX real time message queue");
201 
202 static int	default_maxmsg  = 10;
203 SYSCTL_INT(_kern_mqueue, OID_AUTO, default_maxmsg, CTLFLAG_RD,
204     &default_maxmsg, 0, "Default maximum messages in queue");
205 static int	default_msgsize = 1024;
206 SYSCTL_INT(_kern_mqueue, OID_AUTO, default_msgsize, CTLFLAG_RD,
207     &default_msgsize, 0, "Default maximum message size");
208 
209 static int	maxmsg = 100;
210 SYSCTL_INT(_kern_mqueue, OID_AUTO, maxmsg, CTLFLAG_RW,
211     &maxmsg, 0, "maximum messages in queue");
212 static int	maxmsgsize = 16384;
213 SYSCTL_INT(_kern_mqueue, OID_AUTO, maxmsgsize, CTLFLAG_RW,
214     &maxmsgsize, 0, "maximum message size");
215 static int	maxmq = 100;
216 SYSCTL_INT(_kern_mqueue, OID_AUTO, maxmq, CTLFLAG_RW,
217     &maxmq, 0, "maximum message queues");
218 static int	curmq = 0;
219 SYSCTL_INT(_kern_mqueue, OID_AUTO, curmq, CTLFLAG_RW,
220     &curmq, 0, "current message queue number");
221 static int	unloadable = 0;
222 static MALLOC_DEFINE(M_MQUEUEDATA, "mqdata", "mqueue data");
223 
224 static eventhandler_tag exit_tag;
225 
226 /* Only one instance per-system */
227 static struct mqfs_info		mqfs_data;
228 static uma_zone_t		mqnode_zone;
229 static uma_zone_t		mqueue_zone;
230 static uma_zone_t		mvdata_zone;
231 static uma_zone_t		mqnoti_zone;
232 static struct vop_vector	mqfs_vnodeops;
233 static const struct fileops	mqueueops;
234 static unsigned			mqfs_osd_jail_slot;
235 
236 /*
237  * Directory structure construction and manipulation
238  */
239 #ifdef notyet
240 static struct mqfs_node	*mqfs_create_dir(struct mqfs_node *parent,
241 	const char *name, int namelen, struct ucred *cred, int mode);
242 static struct mqfs_node	*mqfs_create_link(struct mqfs_node *parent,
243 	const char *name, int namelen, struct ucred *cred, int mode);
244 #endif
245 
246 static struct mqfs_node	*mqfs_create_file(struct mqfs_node *parent,
247 	const char *name, int namelen, struct ucred *cred, int mode);
248 static int	mqfs_destroy(struct mqfs_node *mn);
249 static void	mqfs_fileno_alloc(struct mqfs_info *mi, struct mqfs_node *mn);
250 static void	mqfs_fileno_free(struct mqfs_info *mi, struct mqfs_node *mn);
251 static int	mqfs_allocv(struct mount *mp, struct vnode **vpp, struct mqfs_node *pn);
252 static int	mqfs_prison_remove(void *obj, void *data);
253 
254 /*
255  * Message queue construction and maniplation
256  */
257 static struct mqueue	*mqueue_alloc(const struct mq_attr *attr);
258 static void	mqueue_free(struct mqueue *mq);
259 static int	mqueue_send(struct mqueue *mq, const char *msg_ptr,
260 			size_t msg_len, unsigned msg_prio, int waitok,
261 			const struct timespec *abs_timeout);
262 static int	mqueue_receive(struct mqueue *mq, char *msg_ptr,
263 			size_t msg_len, unsigned *msg_prio, int waitok,
264 			const struct timespec *abs_timeout);
265 static int	_mqueue_send(struct mqueue *mq, struct mqueue_msg *msg,
266 			int timo);
267 static int	_mqueue_recv(struct mqueue *mq, struct mqueue_msg **msg,
268 			int timo);
269 static void	mqueue_send_notification(struct mqueue *mq);
270 static void	mqueue_fdclose(struct thread *td, int fd, struct file *fp);
271 static void	mq_proc_exit(void *arg, struct proc *p);
272 
273 /*
274  * kqueue filters
275  */
276 static void	filt_mqdetach(struct knote *kn);
277 static int	filt_mqread(struct knote *kn, long hint);
278 static int	filt_mqwrite(struct knote *kn, long hint);
279 
280 static const struct filterops mq_rfiltops = {
281 	.f_isfd = 1,
282 	.f_detach = filt_mqdetach,
283 	.f_event = filt_mqread,
284 };
285 static const struct filterops mq_wfiltops = {
286 	.f_isfd = 1,
287 	.f_detach = filt_mqdetach,
288 	.f_event = filt_mqwrite,
289 };
290 
291 /*
292  * Initialize fileno bitmap
293  */
294 static void
mqfs_fileno_init(struct mqfs_info * mi)295 mqfs_fileno_init(struct mqfs_info *mi)
296 {
297 	struct unrhdr *up;
298 
299 	up = new_unrhdr(1, INT_MAX, NULL);
300 	mi->mi_unrhdr = up;
301 }
302 
303 /*
304  * Tear down fileno bitmap
305  */
306 static void
mqfs_fileno_uninit(struct mqfs_info * mi)307 mqfs_fileno_uninit(struct mqfs_info *mi)
308 {
309 	struct unrhdr *up;
310 
311 	up = mi->mi_unrhdr;
312 	mi->mi_unrhdr = NULL;
313 	delete_unrhdr(up);
314 }
315 
316 /*
317  * Allocate a file number
318  */
319 static void
mqfs_fileno_alloc(struct mqfs_info * mi,struct mqfs_node * mn)320 mqfs_fileno_alloc(struct mqfs_info *mi, struct mqfs_node *mn)
321 {
322 	/* make sure our parent has a file number */
323 	if (mn->mn_parent && !mn->mn_parent->mn_fileno)
324 		mqfs_fileno_alloc(mi, mn->mn_parent);
325 
326 	switch (mn->mn_type) {
327 	case mqfstype_root:
328 	case mqfstype_dir:
329 	case mqfstype_file:
330 	case mqfstype_symlink:
331 		mn->mn_fileno = alloc_unr(mi->mi_unrhdr);
332 		break;
333 	case mqfstype_this:
334 		KASSERT(mn->mn_parent != NULL,
335 		    ("mqfstype_this node has no parent"));
336 		mn->mn_fileno = mn->mn_parent->mn_fileno;
337 		break;
338 	case mqfstype_parent:
339 		KASSERT(mn->mn_parent != NULL,
340 		    ("mqfstype_parent node has no parent"));
341 		if (mn->mn_parent == mi->mi_root) {
342 			mn->mn_fileno = mn->mn_parent->mn_fileno;
343 			break;
344 		}
345 		KASSERT(mn->mn_parent->mn_parent != NULL,
346 		    ("mqfstype_parent node has no grandparent"));
347 		mn->mn_fileno = mn->mn_parent->mn_parent->mn_fileno;
348 		break;
349 	default:
350 		KASSERT(0,
351 		    ("mqfs_fileno_alloc() called for unknown type node: %d",
352 			mn->mn_type));
353 		break;
354 	}
355 }
356 
357 /*
358  * Release a file number
359  */
360 static void
mqfs_fileno_free(struct mqfs_info * mi,struct mqfs_node * mn)361 mqfs_fileno_free(struct mqfs_info *mi, struct mqfs_node *mn)
362 {
363 	switch (mn->mn_type) {
364 	case mqfstype_root:
365 	case mqfstype_dir:
366 	case mqfstype_file:
367 	case mqfstype_symlink:
368 		free_unr(mi->mi_unrhdr, mn->mn_fileno);
369 		break;
370 	case mqfstype_this:
371 	case mqfstype_parent:
372 		/* ignore these, as they don't "own" their file number */
373 		break;
374 	default:
375 		KASSERT(0,
376 		    ("mqfs_fileno_free() called for unknown type node: %d",
377 			mn->mn_type));
378 		break;
379 	}
380 }
381 
382 static __inline struct mqfs_node *
mqnode_alloc(void)383 mqnode_alloc(void)
384 {
385 	return (uma_zalloc(mqnode_zone, M_WAITOK | M_ZERO));
386 }
387 
388 static __inline void
mqnode_free(struct mqfs_node * node)389 mqnode_free(struct mqfs_node *node)
390 {
391 	uma_zfree(mqnode_zone, node);
392 }
393 
394 static __inline void
mqnode_addref(struct mqfs_node * node)395 mqnode_addref(struct mqfs_node *node)
396 {
397 	atomic_add_int(&node->mn_refcount, 1);
398 }
399 
400 static __inline void
mqnode_release(struct mqfs_node * node)401 mqnode_release(struct mqfs_node *node)
402 {
403 	struct mqfs_info *mqfs;
404 	int old, exp;
405 
406 	mqfs = node->mn_info;
407 	old = atomic_fetchadd_int(&node->mn_refcount, -1);
408 	if (node->mn_type == mqfstype_dir ||
409 	    node->mn_type == mqfstype_root)
410 		exp = 3; /* include . and .. */
411 	else
412 		exp = 1;
413 	if (old == exp) {
414 		int locked = sx_xlocked(&mqfs->mi_lock);
415 		if (!locked)
416 			sx_xlock(&mqfs->mi_lock);
417 		mqfs_destroy(node);
418 		if (!locked)
419 			sx_xunlock(&mqfs->mi_lock);
420 	}
421 }
422 
423 /*
424  * Add a node to a directory
425  */
426 static int
mqfs_add_node(struct mqfs_node * parent,struct mqfs_node * node)427 mqfs_add_node(struct mqfs_node *parent, struct mqfs_node *node)
428 {
429 	KASSERT(parent != NULL, ("%s(): parent is NULL", __func__));
430 	KASSERT(parent->mn_info != NULL,
431 	    ("%s(): parent has no mn_info", __func__));
432 	KASSERT(parent->mn_type == mqfstype_dir ||
433 	    parent->mn_type == mqfstype_root,
434 	    ("%s(): parent is not a directory", __func__));
435 
436 	node->mn_info = parent->mn_info;
437 	node->mn_parent = parent;
438 	LIST_INIT(&node->mn_children);
439 	LIST_INIT(&node->mn_vnodes);
440 	LIST_INSERT_HEAD(&parent->mn_children, node, mn_sibling);
441 	mqnode_addref(parent);
442 	return (0);
443 }
444 
445 static struct mqfs_node *
mqfs_create_node(const char * name,int namelen,struct ucred * cred,int mode,int nodetype)446 mqfs_create_node(const char *name, int namelen, struct ucred *cred, int mode,
447 	int nodetype)
448 {
449 	struct mqfs_node *node;
450 
451 	node = mqnode_alloc();
452 	strncpy(node->mn_name, name, namelen);
453 	node->mn_pr_root = cred->cr_prison->pr_root;
454 	node->mn_type = nodetype;
455 	node->mn_refcount = 1;
456 	vfs_timestamp(&node->mn_birth);
457 	node->mn_ctime = node->mn_atime = node->mn_mtime =
458 	    node->mn_birth;
459 	node->mn_uid = cred->cr_uid;
460 	node->mn_gid = cred->cr_gid;
461 	node->mn_mode = mode;
462 	return (node);
463 }
464 
465 /*
466  * Create a file
467  */
468 static struct mqfs_node *
mqfs_create_file(struct mqfs_node * parent,const char * name,int namelen,struct ucred * cred,int mode)469 mqfs_create_file(struct mqfs_node *parent, const char *name, int namelen,
470 	struct ucred *cred, int mode)
471 {
472 	struct mqfs_node *node;
473 
474 	node = mqfs_create_node(name, namelen, cred, mode, mqfstype_file);
475 	if (mqfs_add_node(parent, node) != 0) {
476 		mqnode_free(node);
477 		return (NULL);
478 	}
479 	return (node);
480 }
481 
482 /*
483  * Add . and .. to a directory
484  */
485 static int
mqfs_fixup_dir(struct mqfs_node * parent)486 mqfs_fixup_dir(struct mqfs_node *parent)
487 {
488 	struct mqfs_node *dir;
489 
490 	dir = mqnode_alloc();
491 	dir->mn_name[0] = '.';
492 	dir->mn_type = mqfstype_this;
493 	dir->mn_refcount = 1;
494 	if (mqfs_add_node(parent, dir) != 0) {
495 		mqnode_free(dir);
496 		return (-1);
497 	}
498 
499 	dir = mqnode_alloc();
500 	dir->mn_name[0] = dir->mn_name[1] = '.';
501 	dir->mn_type = mqfstype_parent;
502 	dir->mn_refcount = 1;
503 
504 	if (mqfs_add_node(parent, dir) != 0) {
505 		mqnode_free(dir);
506 		return (-1);
507 	}
508 
509 	return (0);
510 }
511 
512 #ifdef notyet
513 
514 /*
515  * Create a directory
516  */
517 static struct mqfs_node *
mqfs_create_dir(struct mqfs_node * parent,const char * name,int namelen,struct ucred * cred,int mode)518 mqfs_create_dir(struct mqfs_node *parent, const char *name, int namelen,
519 	struct ucred *cred, int mode)
520 {
521 	struct mqfs_node *node;
522 
523 	node = mqfs_create_node(name, namelen, cred, mode, mqfstype_dir);
524 	if (mqfs_add_node(parent, node) != 0) {
525 		mqnode_free(node);
526 		return (NULL);
527 	}
528 
529 	if (mqfs_fixup_dir(node) != 0) {
530 		mqfs_destroy(node);
531 		return (NULL);
532 	}
533 	return (node);
534 }
535 
536 /*
537  * Create a symlink
538  */
539 static struct mqfs_node *
mqfs_create_link(struct mqfs_node * parent,const char * name,int namelen,struct ucred * cred,int mode)540 mqfs_create_link(struct mqfs_node *parent, const char *name, int namelen,
541 	struct ucred *cred, int mode)
542 {
543 	struct mqfs_node *node;
544 
545 	node = mqfs_create_node(name, namelen, cred, mode, mqfstype_symlink);
546 	if (mqfs_add_node(parent, node) != 0) {
547 		mqnode_free(node);
548 		return (NULL);
549 	}
550 	return (node);
551 }
552 
553 #endif
554 
555 /*
556  * Destroy a node or a tree of nodes
557  */
558 static int
mqfs_destroy(struct mqfs_node * node)559 mqfs_destroy(struct mqfs_node *node)
560 {
561 	struct mqfs_node *parent;
562 
563 	KASSERT(node != NULL,
564 	    ("%s(): node is NULL", __func__));
565 	KASSERT(node->mn_info != NULL,
566 	    ("%s(): node has no mn_info", __func__));
567 
568 	/* destroy children */
569 	if (node->mn_type == mqfstype_dir || node->mn_type == mqfstype_root)
570 		while (! LIST_EMPTY(&node->mn_children))
571 			mqfs_destroy(LIST_FIRST(&node->mn_children));
572 
573 	/* unlink from parent */
574 	if ((parent = node->mn_parent) != NULL) {
575 		KASSERT(parent->mn_info == node->mn_info,
576 		    ("%s(): parent has different mn_info", __func__));
577 		LIST_REMOVE(node, mn_sibling);
578 	}
579 
580 	if (node->mn_fileno != 0)
581 		mqfs_fileno_free(node->mn_info, node);
582 	if (node->mn_data != NULL)
583 		mqueue_free(node->mn_data);
584 	mqnode_free(node);
585 	return (0);
586 }
587 
588 /*
589  * Mount a mqfs instance
590  */
591 static int
mqfs_mount(struct mount * mp)592 mqfs_mount(struct mount *mp)
593 {
594 	struct statfs *sbp;
595 
596 	if (mp->mnt_flag & MNT_UPDATE)
597 		return (EOPNOTSUPP);
598 
599 	mp->mnt_data = &mqfs_data;
600 	MNT_ILOCK(mp);
601 	mp->mnt_flag |= MNT_LOCAL;
602 	MNT_IUNLOCK(mp);
603 	vfs_getnewfsid(mp);
604 
605 	sbp = &mp->mnt_stat;
606 	vfs_mountedfrom(mp, "mqueue");
607 	sbp->f_bsize = PAGE_SIZE;
608 	sbp->f_iosize = PAGE_SIZE;
609 	sbp->f_blocks = 1;
610 	sbp->f_bfree = 1;
611 	sbp->f_bavail = 0;
612 	sbp->f_files = 0;
613 	sbp->f_ffree = 0;
614 	return (0);
615 }
616 
617 /*
618  * Unmount a mqfs instance
619  */
620 static int
mqfs_unmount(struct mount * mp,int mntflags)621 mqfs_unmount(struct mount *mp, int mntflags)
622 {
623 	int error;
624 
625 	error = vflush(mp, 0, (mntflags & MNT_FORCE) ?  FORCECLOSE : 0,
626 	    curthread);
627 	return (error);
628 }
629 
630 /*
631  * Return a root vnode
632  */
633 static int
mqfs_root(struct mount * mp,int flags,struct vnode ** vpp)634 mqfs_root(struct mount *mp, int flags, struct vnode **vpp)
635 {
636 	struct mqfs_info *mqfs;
637 	int ret;
638 
639 	mqfs = VFSTOMQFS(mp);
640 	ret = mqfs_allocv(mp, vpp, mqfs->mi_root);
641 	return (ret);
642 }
643 
644 /*
645  * Return filesystem stats
646  */
647 static int
mqfs_statfs(struct mount * mp,struct statfs * sbp)648 mqfs_statfs(struct mount *mp, struct statfs *sbp)
649 {
650 	/* XXX update statistics */
651 	return (0);
652 }
653 
654 /*
655  * Initialize a mqfs instance
656  */
657 static int
mqfs_init(struct vfsconf * vfc)658 mqfs_init(struct vfsconf *vfc)
659 {
660 	struct mqfs_node *root;
661 	struct mqfs_info *mi;
662 	osd_method_t methods[PR_MAXMETHOD] = {
663 	    [PR_METHOD_REMOVE] = mqfs_prison_remove,
664 	};
665 
666 	mqnode_zone = uma_zcreate("mqnode", sizeof(struct mqfs_node),
667 		NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
668 	mqueue_zone = uma_zcreate("mqueue", sizeof(struct mqueue),
669 		NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
670 	mvdata_zone = uma_zcreate("mvdata",
671 		sizeof(struct mqfs_vdata), NULL, NULL, NULL,
672 		NULL, UMA_ALIGN_PTR, 0);
673 	mqnoti_zone = uma_zcreate("mqnotifier", sizeof(struct mqueue_notifier),
674 		NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
675 	mi = &mqfs_data;
676 	sx_init(&mi->mi_lock, "mqfs lock");
677 	/* set up the root diretory */
678 	root = mqfs_create_node("/", 1, curthread->td_ucred, 01777,
679 		mqfstype_root);
680 	root->mn_info = mi;
681 	LIST_INIT(&root->mn_children);
682 	LIST_INIT(&root->mn_vnodes);
683 	mi->mi_root = root;
684 	mqfs_fileno_init(mi);
685 	mqfs_fileno_alloc(mi, root);
686 	mqfs_fixup_dir(root);
687 	exit_tag = EVENTHANDLER_REGISTER(process_exit, mq_proc_exit, NULL,
688 	    EVENTHANDLER_PRI_ANY);
689 	mq_fdclose = mqueue_fdclose;
690 	p31b_setcfg(CTL_P1003_1B_MESSAGE_PASSING, _POSIX_MESSAGE_PASSING);
691 	mqfs_osd_jail_slot = osd_jail_register(NULL, methods);
692 	return (0);
693 }
694 
695 /*
696  * Destroy a mqfs instance
697  */
698 static int
mqfs_uninit(struct vfsconf * vfc)699 mqfs_uninit(struct vfsconf *vfc)
700 {
701 	struct mqfs_info *mi;
702 
703 	if (!unloadable)
704 		return (EOPNOTSUPP);
705 	osd_jail_deregister(mqfs_osd_jail_slot);
706 	EVENTHANDLER_DEREGISTER(process_exit, exit_tag);
707 	mi = &mqfs_data;
708 	mqfs_destroy(mi->mi_root);
709 	mi->mi_root = NULL;
710 	mqfs_fileno_uninit(mi);
711 	sx_destroy(&mi->mi_lock);
712 	uma_zdestroy(mqnode_zone);
713 	uma_zdestroy(mqueue_zone);
714 	uma_zdestroy(mvdata_zone);
715 	uma_zdestroy(mqnoti_zone);
716 	return (0);
717 }
718 
719 /*
720  * task routine
721  */
722 static void
do_recycle(void * context,int pending __unused)723 do_recycle(void *context, int pending __unused)
724 {
725 	struct vnode *vp = (struct vnode *)context;
726 
727 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
728 	vrecycle(vp);
729 	VOP_UNLOCK(vp);
730 	vdrop(vp);
731 }
732 
733 /*
734  * Allocate a vnode
735  */
736 static int
mqfs_allocv(struct mount * mp,struct vnode ** vpp,struct mqfs_node * pn)737 mqfs_allocv(struct mount *mp, struct vnode **vpp, struct mqfs_node *pn)
738 {
739 	struct mqfs_vdata *vd;
740 	struct mqfs_info  *mqfs;
741 	struct vnode *newvpp;
742 	int error;
743 
744 	mqfs = pn->mn_info;
745 	*vpp = NULL;
746 	sx_xlock(&mqfs->mi_lock);
747 	LIST_FOREACH(vd, &pn->mn_vnodes, mv_link) {
748 		if (vd->mv_vnode->v_mount == mp) {
749 			vhold(vd->mv_vnode);
750 			break;
751 		}
752 	}
753 
754 	if (vd != NULL) {
755 found:
756 		*vpp = vd->mv_vnode;
757 		sx_xunlock(&mqfs->mi_lock);
758 		error = vget(*vpp, LK_RETRY | LK_EXCLUSIVE);
759 		vdrop(*vpp);
760 		return (error);
761 	}
762 	sx_xunlock(&mqfs->mi_lock);
763 
764 	error = getnewvnode("mqueue", mp, &mqfs_vnodeops, &newvpp);
765 	if (error)
766 		return (error);
767 	vn_lock(newvpp, LK_EXCLUSIVE | LK_RETRY);
768 	error = insmntque(newvpp, mp);
769 	if (error != 0)
770 		return (error);
771 
772 	sx_xlock(&mqfs->mi_lock);
773 	/*
774 	 * Check if it has already been allocated
775 	 * while we were blocked.
776 	 */
777 	LIST_FOREACH(vd, &pn->mn_vnodes, mv_link) {
778 		if (vd->mv_vnode->v_mount == mp) {
779 			vhold(vd->mv_vnode);
780 			sx_xunlock(&mqfs->mi_lock);
781 
782 			vgone(newvpp);
783 			vput(newvpp);
784 			goto found;
785 		}
786 	}
787 
788 	*vpp = newvpp;
789 
790 	vd = uma_zalloc(mvdata_zone, M_WAITOK);
791 	(*vpp)->v_data = vd;
792 	vd->mv_vnode = *vpp;
793 	vd->mv_node = pn;
794 	TASK_INIT(&vd->mv_task, 0, do_recycle, *vpp);
795 	LIST_INSERT_HEAD(&pn->mn_vnodes, vd, mv_link);
796 	mqnode_addref(pn);
797 	switch (pn->mn_type) {
798 	case mqfstype_root:
799 		(*vpp)->v_vflag = VV_ROOT;
800 		/* fall through */
801 	case mqfstype_dir:
802 	case mqfstype_this:
803 	case mqfstype_parent:
804 		(*vpp)->v_type = VDIR;
805 		break;
806 	case mqfstype_file:
807 		(*vpp)->v_type = VREG;
808 		break;
809 	case mqfstype_symlink:
810 		(*vpp)->v_type = VLNK;
811 		break;
812 	case mqfstype_none:
813 		KASSERT(0, ("mqfs_allocf called for null node\n"));
814 	default:
815 		panic("%s has unexpected type: %d", pn->mn_name, pn->mn_type);
816 	}
817 	sx_xunlock(&mqfs->mi_lock);
818 	vn_set_state(*vpp, VSTATE_CONSTRUCTED);
819 	return (0);
820 }
821 
822 /*
823  * Search a directory entry
824  */
825 static struct mqfs_node *
mqfs_search(struct mqfs_node * pd,const char * name,int len,struct ucred * cred)826 mqfs_search(struct mqfs_node *pd, const char *name, int len, struct ucred *cred)
827 {
828 	struct mqfs_node *pn;
829 	const void *pr_root;
830 
831 	sx_assert(&pd->mn_info->mi_lock, SX_LOCKED);
832 	pr_root = cred->cr_prison->pr_root;
833 	LIST_FOREACH(pn, &pd->mn_children, mn_sibling) {
834 		/* Only match names within the same prison root directory */
835 		if ((pn->mn_pr_root == NULL || pn->mn_pr_root == pr_root) &&
836 		    strncmp(pn->mn_name, name, len) == 0 &&
837 		    pn->mn_name[len] == '\0')
838 			return (pn);
839 	}
840 	return (NULL);
841 }
842 
843 /*
844  * Look up a file or directory.
845  */
846 static int
mqfs_lookupx(struct vop_cachedlookup_args * ap)847 mqfs_lookupx(struct vop_cachedlookup_args *ap)
848 {
849 	struct componentname *cnp;
850 	struct vnode *dvp, **vpp;
851 	struct mqfs_node *pd;
852 	struct mqfs_node *pn;
853 	struct mqfs_info *mqfs;
854 	int nameiop, flags, error, namelen;
855 	char *pname;
856 	struct thread *td;
857 
858 	td = curthread;
859 	cnp = ap->a_cnp;
860 	vpp = ap->a_vpp;
861 	dvp = ap->a_dvp;
862 	pname = cnp->cn_nameptr;
863 	namelen = cnp->cn_namelen;
864 	flags = cnp->cn_flags;
865 	nameiop = cnp->cn_nameiop;
866 	pd = VTON(dvp);
867 	pn = NULL;
868 	mqfs = pd->mn_info;
869 	*vpp = NULLVP;
870 
871 	if (dvp->v_type != VDIR)
872 		return (ENOTDIR);
873 
874 	error = VOP_ACCESS(dvp, VEXEC, cnp->cn_cred, td);
875 	if (error)
876 		return (error);
877 
878 	/* shortcut: check if the name is too long */
879 	if (cnp->cn_namelen >= MQFS_NAMELEN)
880 		return (ENOENT);
881 
882 	/* self */
883 	if (namelen == 1 && pname[0] == '.') {
884 		if ((flags & ISLASTCN) && nameiop != LOOKUP)
885 			return (EINVAL);
886 		pn = pd;
887 		*vpp = dvp;
888 		VREF(dvp);
889 		return (0);
890 	}
891 
892 	/* parent */
893 	if (cnp->cn_flags & ISDOTDOT) {
894 		if (dvp->v_vflag & VV_ROOT)
895 			return (EIO);
896 		if ((flags & ISLASTCN) && nameiop != LOOKUP)
897 			return (EINVAL);
898 		VOP_UNLOCK(dvp);
899 		KASSERT(pd->mn_parent, ("non-root directory has no parent"));
900 		pn = pd->mn_parent;
901 		error = mqfs_allocv(dvp->v_mount, vpp, pn);
902 		vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
903 		return (error);
904 	}
905 
906 	/* named node */
907 	sx_xlock(&mqfs->mi_lock);
908 	pn = mqfs_search(pd, pname, namelen, cnp->cn_cred);
909 	if (pn != NULL)
910 		mqnode_addref(pn);
911 	sx_xunlock(&mqfs->mi_lock);
912 
913 	/* found */
914 	if (pn != NULL) {
915 		/* DELETE */
916 		if (nameiop == DELETE && (flags & ISLASTCN)) {
917 			error = VOP_ACCESS(dvp, VWRITE, cnp->cn_cred, td);
918 			if (error) {
919 				mqnode_release(pn);
920 				return (error);
921 			}
922 			if (*vpp == dvp) {
923 				VREF(dvp);
924 				*vpp = dvp;
925 				mqnode_release(pn);
926 				return (0);
927 			}
928 		}
929 
930 		/* allocate vnode */
931 		error = mqfs_allocv(dvp->v_mount, vpp, pn);
932 		mqnode_release(pn);
933 		if (error == 0 && cnp->cn_flags & MAKEENTRY)
934 			cache_enter(dvp, *vpp, cnp);
935 		return (error);
936 	}
937 
938 	/* not found */
939 
940 	/* will create a new entry in the directory ? */
941 	if ((nameiop == CREATE || nameiop == RENAME) && (flags & LOCKPARENT)
942 	    && (flags & ISLASTCN)) {
943 		error = VOP_ACCESS(dvp, VWRITE, cnp->cn_cred, td);
944 		if (error)
945 			return (error);
946 		return (EJUSTRETURN);
947 	}
948 	return (ENOENT);
949 }
950 
951 #if 0
952 struct vop_lookup_args {
953 	struct vop_generic_args a_gen;
954 	struct vnode *a_dvp;
955 	struct vnode **a_vpp;
956 	struct componentname *a_cnp;
957 };
958 #endif
959 
960 /*
961  * vnode lookup operation
962  */
963 static int
mqfs_lookup(struct vop_cachedlookup_args * ap)964 mqfs_lookup(struct vop_cachedlookup_args *ap)
965 {
966 	int rc;
967 
968 	rc = mqfs_lookupx(ap);
969 	return (rc);
970 }
971 
972 #if 0
973 struct vop_create_args {
974 	struct vnode *a_dvp;
975 	struct vnode **a_vpp;
976 	struct componentname *a_cnp;
977 	struct vattr *a_vap;
978 };
979 #endif
980 
981 /*
982  * vnode creation operation
983  */
984 static int
mqfs_create(struct vop_create_args * ap)985 mqfs_create(struct vop_create_args *ap)
986 {
987 	struct mqfs_info *mqfs = VFSTOMQFS(ap->a_dvp->v_mount);
988 	struct componentname *cnp = ap->a_cnp;
989 	struct mqfs_node *pd;
990 	struct mqfs_node *pn;
991 	struct mqueue *mq;
992 	int error;
993 
994 	pd = VTON(ap->a_dvp);
995 	if (pd->mn_type != mqfstype_root && pd->mn_type != mqfstype_dir)
996 		return (ENOTDIR);
997 	mq = mqueue_alloc(NULL);
998 	if (mq == NULL)
999 		return (EAGAIN);
1000 	sx_xlock(&mqfs->mi_lock);
1001 	pn = mqfs_create_file(pd, cnp->cn_nameptr, cnp->cn_namelen,
1002 		cnp->cn_cred, ap->a_vap->va_mode);
1003 	if (pn == NULL) {
1004 		sx_xunlock(&mqfs->mi_lock);
1005 		error = ENOSPC;
1006 	} else {
1007 		mqnode_addref(pn);
1008 		sx_xunlock(&mqfs->mi_lock);
1009 		error = mqfs_allocv(ap->a_dvp->v_mount, ap->a_vpp, pn);
1010 		mqnode_release(pn);
1011 		if (error)
1012 			mqfs_destroy(pn);
1013 		else
1014 			pn->mn_data = mq;
1015 	}
1016 	if (error)
1017 		mqueue_free(mq);
1018 	return (error);
1019 }
1020 
1021 /*
1022  * Remove an entry
1023  */
1024 static int
do_unlink(struct mqfs_node * pn,struct ucred * ucred)1025 do_unlink(struct mqfs_node *pn, struct ucred *ucred)
1026 {
1027 	struct mqfs_node *parent;
1028 	struct mqfs_vdata *vd;
1029 	int error = 0;
1030 
1031 	sx_assert(&pn->mn_info->mi_lock, SX_LOCKED);
1032 
1033 	if (ucred->cr_uid != pn->mn_uid &&
1034 	    (error = priv_check_cred(ucred, PRIV_MQ_ADMIN)) != 0)
1035 		error = EACCES;
1036 	else if (!pn->mn_deleted) {
1037 		parent = pn->mn_parent;
1038 		pn->mn_parent = NULL;
1039 		pn->mn_deleted = 1;
1040 		LIST_REMOVE(pn, mn_sibling);
1041 		LIST_FOREACH(vd, &pn->mn_vnodes, mv_link) {
1042 			cache_purge(vd->mv_vnode);
1043 			vhold(vd->mv_vnode);
1044 			taskqueue_enqueue(taskqueue_thread, &vd->mv_task);
1045 		}
1046 		mqnode_release(pn);
1047 		mqnode_release(parent);
1048 	} else
1049 		error = ENOENT;
1050 	return (error);
1051 }
1052 
1053 #if 0
1054 struct vop_remove_args {
1055 	struct vnode *a_dvp;
1056 	struct vnode *a_vp;
1057 	struct componentname *a_cnp;
1058 };
1059 #endif
1060 
1061 /*
1062  * vnode removal operation
1063  */
1064 static int
mqfs_remove(struct vop_remove_args * ap)1065 mqfs_remove(struct vop_remove_args *ap)
1066 {
1067 	struct mqfs_info *mqfs = VFSTOMQFS(ap->a_dvp->v_mount);
1068 	struct mqfs_node *pn;
1069 	int error;
1070 
1071 	if (ap->a_vp->v_type == VDIR)
1072                 return (EPERM);
1073 	pn = VTON(ap->a_vp);
1074 	sx_xlock(&mqfs->mi_lock);
1075 	error = do_unlink(pn, ap->a_cnp->cn_cred);
1076 	sx_xunlock(&mqfs->mi_lock);
1077 	return (error);
1078 }
1079 
1080 #if 0
1081 struct vop_inactive_args {
1082 	struct vnode *a_vp;
1083 	struct thread *a_td;
1084 };
1085 #endif
1086 
1087 static int
mqfs_inactive(struct vop_inactive_args * ap)1088 mqfs_inactive(struct vop_inactive_args *ap)
1089 {
1090 	struct mqfs_node *pn = VTON(ap->a_vp);
1091 
1092 	if (pn->mn_deleted)
1093 		vrecycle(ap->a_vp);
1094 	return (0);
1095 }
1096 
1097 #if 0
1098 struct vop_reclaim_args {
1099 	struct vop_generic_args a_gen;
1100 	struct vnode *a_vp;
1101 };
1102 #endif
1103 
1104 static int
mqfs_reclaim(struct vop_reclaim_args * ap)1105 mqfs_reclaim(struct vop_reclaim_args *ap)
1106 {
1107 	struct mqfs_info *mqfs = VFSTOMQFS(ap->a_vp->v_mount);
1108 	struct vnode *vp = ap->a_vp;
1109 	struct mqfs_node *pn;
1110 	struct mqfs_vdata *vd;
1111 
1112 	vd = vp->v_data;
1113 	pn = vd->mv_node;
1114 	sx_xlock(&mqfs->mi_lock);
1115 	vp->v_data = NULL;
1116 	LIST_REMOVE(vd, mv_link);
1117 	mqnode_release(pn);
1118 	sx_xunlock(&mqfs->mi_lock);
1119 	uma_zfree(mvdata_zone, vd);
1120 	return (0);
1121 }
1122 
1123 #if 0
1124 struct vop_open_args {
1125 	struct vop_generic_args a_gen;
1126 	struct vnode *a_vp;
1127 	int a_mode;
1128 	struct ucred *a_cred;
1129 	struct thread *a_td;
1130 	struct file *a_fp;
1131 };
1132 #endif
1133 
1134 static int
mqfs_open(struct vop_open_args * ap)1135 mqfs_open(struct vop_open_args *ap)
1136 {
1137 	return (0);
1138 }
1139 
1140 #if 0
1141 struct vop_close_args {
1142 	struct vop_generic_args a_gen;
1143 	struct vnode *a_vp;
1144 	int a_fflag;
1145 	struct ucred *a_cred;
1146 	struct thread *a_td;
1147 };
1148 #endif
1149 
1150 static int
mqfs_close(struct vop_close_args * ap)1151 mqfs_close(struct vop_close_args *ap)
1152 {
1153 	return (0);
1154 }
1155 
1156 #if 0
1157 struct vop_access_args {
1158 	struct vop_generic_args a_gen;
1159 	struct vnode *a_vp;
1160 	accmode_t a_accmode;
1161 	struct ucred *a_cred;
1162 	struct thread *a_td;
1163 };
1164 #endif
1165 
1166 /*
1167  * Verify permissions
1168  */
1169 static int
mqfs_access(struct vop_access_args * ap)1170 mqfs_access(struct vop_access_args *ap)
1171 {
1172 	struct vnode *vp = ap->a_vp;
1173 	struct vattr vattr;
1174 	int error;
1175 
1176 	error = VOP_GETATTR(vp, &vattr, ap->a_cred);
1177 	if (error)
1178 		return (error);
1179 	error = vaccess(vp->v_type, vattr.va_mode, vattr.va_uid, vattr.va_gid,
1180 	    ap->a_accmode, ap->a_cred);
1181 	return (error);
1182 }
1183 
1184 #if 0
1185 struct vop_getattr_args {
1186 	struct vop_generic_args a_gen;
1187 	struct vnode *a_vp;
1188 	struct vattr *a_vap;
1189 	struct ucred *a_cred;
1190 };
1191 #endif
1192 
1193 /*
1194  * Get file attributes
1195  */
1196 static int
mqfs_getattr(struct vop_getattr_args * ap)1197 mqfs_getattr(struct vop_getattr_args *ap)
1198 {
1199 	struct vnode *vp = ap->a_vp;
1200 	struct mqfs_node *pn = VTON(vp);
1201 	struct vattr *vap = ap->a_vap;
1202 	int error = 0;
1203 
1204 	vap->va_type = vp->v_type;
1205 	vap->va_mode = pn->mn_mode;
1206 	vap->va_nlink = 1;
1207 	vap->va_uid = pn->mn_uid;
1208 	vap->va_gid = pn->mn_gid;
1209 	vap->va_fsid = vp->v_mount->mnt_stat.f_fsid.val[0];
1210 	vap->va_fileid = pn->mn_fileno;
1211 	vap->va_size = 0;
1212 	vap->va_blocksize = PAGE_SIZE;
1213 	vap->va_bytes = vap->va_size = 0;
1214 	vap->va_atime = pn->mn_atime;
1215 	vap->va_mtime = pn->mn_mtime;
1216 	vap->va_ctime = pn->mn_ctime;
1217 	vap->va_birthtime = pn->mn_birth;
1218 	vap->va_gen = 0;
1219 	vap->va_flags = 0;
1220 	vap->va_rdev = NODEV;
1221 	vap->va_bytes = 0;
1222 	vap->va_filerev = 0;
1223 	return (error);
1224 }
1225 
1226 #if 0
1227 struct vop_setattr_args {
1228 	struct vop_generic_args a_gen;
1229 	struct vnode *a_vp;
1230 	struct vattr *a_vap;
1231 	struct ucred *a_cred;
1232 };
1233 #endif
1234 /*
1235  * Set attributes
1236  */
1237 static int
mqfs_setattr(struct vop_setattr_args * ap)1238 mqfs_setattr(struct vop_setattr_args *ap)
1239 {
1240 	struct mqfs_node *pn;
1241 	struct vattr *vap;
1242 	struct vnode *vp;
1243 	struct thread *td;
1244 	int c, error;
1245 	uid_t uid;
1246 	gid_t gid;
1247 
1248 	td = curthread;
1249 	vap = ap->a_vap;
1250 	vp = ap->a_vp;
1251 	if (vap->va_type != VNON ||
1252 	    vap->va_nlink != VNOVAL ||
1253 	    vap->va_fsid != VNOVAL ||
1254 	    vap->va_fileid != VNOVAL ||
1255 	    vap->va_blocksize != VNOVAL ||
1256 	    (vap->va_flags != VNOVAL && vap->va_flags != 0) ||
1257 	    vap->va_rdev != VNOVAL ||
1258 	    (int)vap->va_bytes != VNOVAL ||
1259 	    vap->va_gen != VNOVAL) {
1260 		return (EINVAL);
1261 	}
1262 
1263 	pn = VTON(vp);
1264 
1265 	error = c = 0;
1266 	if (vap->va_uid == (uid_t)VNOVAL)
1267 		uid = pn->mn_uid;
1268 	else
1269 		uid = vap->va_uid;
1270 	if (vap->va_gid == (gid_t)VNOVAL)
1271 		gid = pn->mn_gid;
1272 	else
1273 		gid = vap->va_gid;
1274 
1275 	if (uid != pn->mn_uid || gid != pn->mn_gid) {
1276 		/*
1277 		 * To modify the ownership of a file, must possess VADMIN
1278 		 * for that file.
1279 		 */
1280 		if ((error = VOP_ACCESS(vp, VADMIN, ap->a_cred, td)))
1281 			return (error);
1282 
1283 		/*
1284 		 * XXXRW: Why is there a privilege check here: shouldn't the
1285 		 * check in VOP_ACCESS() be enough?  Also, are the group bits
1286 		 * below definitely right?
1287 		 */
1288 		if ((ap->a_cred->cr_uid != pn->mn_uid || uid != pn->mn_uid ||
1289 		    (gid != pn->mn_gid && !groupmember(gid, ap->a_cred))) &&
1290 		    (error = priv_check(td, PRIV_MQ_ADMIN)) != 0)
1291 			return (error);
1292 		pn->mn_uid = uid;
1293 		pn->mn_gid = gid;
1294 		c = 1;
1295 	}
1296 
1297 	if (vap->va_mode != (mode_t)VNOVAL) {
1298 		if (ap->a_cred->cr_uid != pn->mn_uid &&
1299 		    (error = priv_check(td, PRIV_MQ_ADMIN)))
1300 			return (error);
1301 		pn->mn_mode = vap->va_mode;
1302 		c = 1;
1303 	}
1304 
1305 	if (vap->va_atime.tv_sec != VNOVAL || vap->va_mtime.tv_sec != VNOVAL) {
1306 		/* See the comment in ufs_vnops::ufs_setattr(). */
1307 		if ((error = VOP_ACCESS(vp, VADMIN, ap->a_cred, td)) &&
1308 		    ((vap->va_vaflags & VA_UTIMES_NULL) == 0 ||
1309 		    (error = VOP_ACCESS(vp, VWRITE, ap->a_cred, td))))
1310 			return (error);
1311 		if (vap->va_atime.tv_sec != VNOVAL) {
1312 			pn->mn_atime = vap->va_atime;
1313 		}
1314 		if (vap->va_mtime.tv_sec != VNOVAL) {
1315 			pn->mn_mtime = vap->va_mtime;
1316 		}
1317 		c = 1;
1318 	}
1319 	if (c) {
1320 		vfs_timestamp(&pn->mn_ctime);
1321 	}
1322 	return (0);
1323 }
1324 
1325 #if 0
1326 struct vop_read_args {
1327 	struct vop_generic_args a_gen;
1328 	struct vnode *a_vp;
1329 	struct uio *a_uio;
1330 	int a_ioflag;
1331 	struct ucred *a_cred;
1332 };
1333 #endif
1334 
1335 /*
1336  * Read from a file
1337  */
1338 static int
mqfs_read(struct vop_read_args * ap)1339 mqfs_read(struct vop_read_args *ap)
1340 {
1341 	char buf[80];
1342 	struct vnode *vp = ap->a_vp;
1343 	struct uio *uio = ap->a_uio;
1344 	struct mqueue *mq;
1345 	int len, error;
1346 
1347 	if (vp->v_type != VREG)
1348 		return (EINVAL);
1349 
1350 	mq = VTOMQ(vp);
1351 	snprintf(buf, sizeof(buf),
1352 	    "QSIZE:%-10ld MAXMSG:%-10ld CURMSG:%-10ld MSGSIZE:%-10ld\n",
1353 	    mq->mq_totalbytes,
1354 	    mq->mq_maxmsg,
1355 	    mq->mq_curmsgs,
1356 	    mq->mq_msgsize);
1357 	buf[sizeof(buf)-1] = '\0';
1358 	len = strlen(buf);
1359 	error = uiomove_frombuf(buf, len, uio);
1360 	return (error);
1361 }
1362 
1363 #if 0
1364 struct vop_readdir_args {
1365 	struct vop_generic_args a_gen;
1366 	struct vnode *a_vp;
1367 	struct uio *a_uio;
1368 	struct ucred *a_cred;
1369 	int *a_eofflag;
1370 	int *a_ncookies;
1371 	uint64_t **a_cookies;
1372 };
1373 #endif
1374 
1375 /*
1376  * Return directory entries.
1377  */
1378 static int
mqfs_readdir(struct vop_readdir_args * ap)1379 mqfs_readdir(struct vop_readdir_args *ap)
1380 {
1381 	struct vnode *vp;
1382 	struct mqfs_info *mi;
1383 	struct mqfs_node *pd;
1384 	struct mqfs_node *pn;
1385 	struct dirent entry;
1386 	struct uio *uio;
1387 	const void *pr_root;
1388 	int *tmp_ncookies = NULL;
1389 	off_t offset;
1390 	int error, i;
1391 
1392 	vp = ap->a_vp;
1393 	mi = VFSTOMQFS(vp->v_mount);
1394 	pd = VTON(vp);
1395 	uio = ap->a_uio;
1396 
1397 	if (vp->v_type != VDIR)
1398 		return (ENOTDIR);
1399 
1400 	if (uio->uio_offset < 0)
1401 		return (EINVAL);
1402 
1403 	if (ap->a_ncookies != NULL) {
1404 		tmp_ncookies = ap->a_ncookies;
1405 		*ap->a_ncookies = 0;
1406 		ap->a_ncookies = NULL;
1407         }
1408 
1409 	error = 0;
1410 	offset = 0;
1411 
1412 	pr_root = ap->a_cred->cr_prison->pr_root;
1413 	sx_xlock(&mi->mi_lock);
1414 
1415 	LIST_FOREACH(pn, &pd->mn_children, mn_sibling) {
1416 		entry.d_reclen = sizeof(entry);
1417 
1418 		/*
1419 		 * Only show names within the same prison root directory
1420 		 * (or not associated with a prison, e.g. "." and "..").
1421 		 */
1422 		if (pn->mn_pr_root != NULL && pn->mn_pr_root != pr_root)
1423 			continue;
1424 		if (!pn->mn_fileno)
1425 			mqfs_fileno_alloc(mi, pn);
1426 		entry.d_fileno = pn->mn_fileno;
1427 		entry.d_off = offset + entry.d_reclen;
1428 		for (i = 0; i < MQFS_NAMELEN - 1 && pn->mn_name[i] != '\0'; ++i)
1429 			entry.d_name[i] = pn->mn_name[i];
1430 		entry.d_namlen = i;
1431 		switch (pn->mn_type) {
1432 		case mqfstype_root:
1433 		case mqfstype_dir:
1434 		case mqfstype_this:
1435 		case mqfstype_parent:
1436 			entry.d_type = DT_DIR;
1437 			break;
1438 		case mqfstype_file:
1439 			entry.d_type = DT_REG;
1440 			break;
1441 		case mqfstype_symlink:
1442 			entry.d_type = DT_LNK;
1443 			break;
1444 		default:
1445 			panic("%s has unexpected node type: %d", pn->mn_name,
1446 				pn->mn_type);
1447 		}
1448 		dirent_terminate(&entry);
1449 		if (entry.d_reclen > uio->uio_resid)
1450                         break;
1451 		if (offset >= uio->uio_offset) {
1452 			error = vfs_read_dirent(ap, &entry, offset);
1453                         if (error)
1454                                 break;
1455                 }
1456                 offset += entry.d_reclen;
1457 	}
1458 	sx_xunlock(&mi->mi_lock);
1459 
1460 	uio->uio_offset = offset;
1461 
1462 	if (tmp_ncookies != NULL)
1463 		ap->a_ncookies = tmp_ncookies;
1464 
1465 	return (error);
1466 }
1467 
1468 #ifdef notyet
1469 
1470 #if 0
1471 struct vop_mkdir_args {
1472 	struct vnode *a_dvp;
1473 	struvt vnode **a_vpp;
1474 	struvt componentname *a_cnp;
1475 	struct vattr *a_vap;
1476 };
1477 #endif
1478 
1479 /*
1480  * Create a directory.
1481  */
1482 static int
mqfs_mkdir(struct vop_mkdir_args * ap)1483 mqfs_mkdir(struct vop_mkdir_args *ap)
1484 {
1485 	struct mqfs_info *mqfs = VFSTOMQFS(ap->a_dvp->v_mount);
1486 	struct componentname *cnp = ap->a_cnp;
1487 	struct mqfs_node *pd = VTON(ap->a_dvp);
1488 	struct mqfs_node *pn;
1489 	int error;
1490 
1491 	if (pd->mn_type != mqfstype_root && pd->mn_type != mqfstype_dir)
1492 		return (ENOTDIR);
1493 	sx_xlock(&mqfs->mi_lock);
1494 	pn = mqfs_create_dir(pd, cnp->cn_nameptr, cnp->cn_namelen,
1495 		ap->a_vap->cn_cred, ap->a_vap->va_mode);
1496 	if (pn != NULL)
1497 		mqnode_addref(pn);
1498 	sx_xunlock(&mqfs->mi_lock);
1499 	if (pn == NULL) {
1500 		error = ENOSPC;
1501 	} else {
1502 		error = mqfs_allocv(ap->a_dvp->v_mount, ap->a_vpp, pn);
1503 		mqnode_release(pn);
1504 	}
1505 	return (error);
1506 }
1507 
1508 #if 0
1509 struct vop_rmdir_args {
1510 	struct vnode *a_dvp;
1511 	struct vnode *a_vp;
1512 	struct componentname *a_cnp;
1513 };
1514 #endif
1515 
1516 /*
1517  * Remove a directory.
1518  */
1519 static int
mqfs_rmdir(struct vop_rmdir_args * ap)1520 mqfs_rmdir(struct vop_rmdir_args *ap)
1521 {
1522 	struct mqfs_info *mqfs = VFSTOMQFS(ap->a_dvp->v_mount);
1523 	struct mqfs_node *pn = VTON(ap->a_vp);
1524 	struct mqfs_node *pt;
1525 
1526 	if (pn->mn_type != mqfstype_dir)
1527 		return (ENOTDIR);
1528 
1529 	sx_xlock(&mqfs->mi_lock);
1530 	if (pn->mn_deleted) {
1531 		sx_xunlock(&mqfs->mi_lock);
1532 		return (ENOENT);
1533 	}
1534 
1535 	pt = LIST_FIRST(&pn->mn_children);
1536 	pt = LIST_NEXT(pt, mn_sibling);
1537 	pt = LIST_NEXT(pt, mn_sibling);
1538 	if (pt != NULL) {
1539 		sx_xunlock(&mqfs->mi_lock);
1540 		return (ENOTEMPTY);
1541 	}
1542 	pt = pn->mn_parent;
1543 	pn->mn_parent = NULL;
1544 	pn->mn_deleted = 1;
1545 	LIST_REMOVE(pn, mn_sibling);
1546 	mqnode_release(pn);
1547 	mqnode_release(pt);
1548 	sx_xunlock(&mqfs->mi_lock);
1549 	cache_purge(ap->a_vp);
1550 	return (0);
1551 }
1552 
1553 #endif /* notyet */
1554 
1555 /*
1556  * See if this prison root is obsolete, and clean up associated queues if it is.
1557  */
1558 static int
mqfs_prison_remove(void * obj,void * data __unused)1559 mqfs_prison_remove(void *obj, void *data __unused)
1560 {
1561 	const struct prison *pr = obj;
1562 	struct prison *tpr;
1563 	struct mqfs_node *pn, *tpn;
1564 	struct vnode *pr_root;
1565 
1566 	pr_root = pr->pr_root;
1567 	if (pr->pr_parent->pr_root == pr_root)
1568 		return (0);
1569 	TAILQ_FOREACH(tpr, &allprison, pr_list) {
1570 		if (tpr != pr && tpr->pr_root == pr_root)
1571 			return (0);
1572 	}
1573 	/*
1574 	 * No jails are rooted in this directory anymore,
1575 	 * so no queues should be either.
1576 	 */
1577 	sx_xlock(&mqfs_data.mi_lock);
1578 	LIST_FOREACH_SAFE(pn, &mqfs_data.mi_root->mn_children,
1579 	    mn_sibling, tpn) {
1580 		if (pn->mn_pr_root == pr_root)
1581 			(void)do_unlink(pn, curthread->td_ucred);
1582 	}
1583 	sx_xunlock(&mqfs_data.mi_lock);
1584 	return (0);
1585 }
1586 
1587 /*
1588  * Allocate a message queue
1589  */
1590 static struct mqueue *
mqueue_alloc(const struct mq_attr * attr)1591 mqueue_alloc(const struct mq_attr *attr)
1592 {
1593 	struct mqueue *mq;
1594 
1595 	if (curmq >= maxmq)
1596 		return (NULL);
1597 	mq = uma_zalloc(mqueue_zone, M_WAITOK | M_ZERO);
1598 	TAILQ_INIT(&mq->mq_msgq);
1599 	if (attr != NULL) {
1600 		mq->mq_maxmsg = attr->mq_maxmsg;
1601 		mq->mq_msgsize = attr->mq_msgsize;
1602 	} else {
1603 		mq->mq_maxmsg = default_maxmsg;
1604 		mq->mq_msgsize = default_msgsize;
1605 	}
1606 	mtx_init(&mq->mq_mutex, "mqueue lock", NULL, MTX_DEF);
1607 	knlist_init_mtx(&mq->mq_rsel.si_note, &mq->mq_mutex);
1608 	knlist_init_mtx(&mq->mq_wsel.si_note, &mq->mq_mutex);
1609 	atomic_add_int(&curmq, 1);
1610 	return (mq);
1611 }
1612 
1613 /*
1614  * Destroy a message queue
1615  */
1616 static void
mqueue_free(struct mqueue * mq)1617 mqueue_free(struct mqueue *mq)
1618 {
1619 	struct mqueue_msg *msg;
1620 
1621 	while ((msg = TAILQ_FIRST(&mq->mq_msgq)) != NULL) {
1622 		TAILQ_REMOVE(&mq->mq_msgq, msg, msg_link);
1623 		free(msg, M_MQUEUEDATA);
1624 	}
1625 
1626 	mtx_destroy(&mq->mq_mutex);
1627 	seldrain(&mq->mq_rsel);
1628 	seldrain(&mq->mq_wsel);
1629 	knlist_destroy(&mq->mq_rsel.si_note);
1630 	knlist_destroy(&mq->mq_wsel.si_note);
1631 	uma_zfree(mqueue_zone, mq);
1632 	atomic_add_int(&curmq, -1);
1633 }
1634 
1635 /*
1636  * Load a message from user space
1637  */
1638 static struct mqueue_msg *
mqueue_loadmsg(const char * msg_ptr,size_t msg_size,int msg_prio)1639 mqueue_loadmsg(const char *msg_ptr, size_t msg_size, int msg_prio)
1640 {
1641 	struct mqueue_msg *msg;
1642 	size_t len;
1643 	int error;
1644 
1645 	len = sizeof(struct mqueue_msg) + msg_size;
1646 	msg = malloc(len, M_MQUEUEDATA, M_WAITOK);
1647 	error = copyin(msg_ptr, ((char *)msg) + sizeof(struct mqueue_msg),
1648 	    msg_size);
1649 	if (error) {
1650 		free(msg, M_MQUEUEDATA);
1651 		msg = NULL;
1652 	} else {
1653 		msg->msg_size = msg_size;
1654 		msg->msg_prio = msg_prio;
1655 	}
1656 	return (msg);
1657 }
1658 
1659 /*
1660  * Save a message to user space
1661  */
1662 static int
mqueue_savemsg(struct mqueue_msg * msg,char * msg_ptr,int * msg_prio)1663 mqueue_savemsg(struct mqueue_msg *msg, char *msg_ptr, int *msg_prio)
1664 {
1665 	int error;
1666 
1667 	error = copyout(((char *)msg) + sizeof(*msg), msg_ptr,
1668 		msg->msg_size);
1669 	if (error == 0 && msg_prio != NULL)
1670 		error = copyout(&msg->msg_prio, msg_prio, sizeof(int));
1671 	return (error);
1672 }
1673 
1674 /*
1675  * Free a message's memory
1676  */
1677 static __inline void
mqueue_freemsg(struct mqueue_msg * msg)1678 mqueue_freemsg(struct mqueue_msg *msg)
1679 {
1680 	free(msg, M_MQUEUEDATA);
1681 }
1682 
1683 /*
1684  * Send a message. if waitok is false, thread will not be
1685  * blocked if there is no data in queue, otherwise, absolute
1686  * time will be checked.
1687  */
1688 int
mqueue_send(struct mqueue * mq,const char * msg_ptr,size_t msg_len,unsigned msg_prio,int waitok,const struct timespec * abs_timeout)1689 mqueue_send(struct mqueue *mq, const char *msg_ptr,
1690 	size_t msg_len, unsigned msg_prio, int waitok,
1691 	const struct timespec *abs_timeout)
1692 {
1693 	struct mqueue_msg *msg;
1694 	struct timespec ts, ts2;
1695 	struct timeval tv;
1696 	int error;
1697 
1698 	if (msg_prio >= MQ_PRIO_MAX)
1699 		return (EINVAL);
1700 	if (msg_len > mq->mq_msgsize)
1701 		return (EMSGSIZE);
1702 	msg = mqueue_loadmsg(msg_ptr, msg_len, msg_prio);
1703 	if (msg == NULL)
1704 		return (EFAULT);
1705 
1706 	/* O_NONBLOCK case */
1707 	if (!waitok) {
1708 		error = _mqueue_send(mq, msg, -1);
1709 		if (error)
1710 			goto bad;
1711 		return (0);
1712 	}
1713 
1714 	/* we allow a null timeout (wait forever) */
1715 	if (abs_timeout == NULL) {
1716 		error = _mqueue_send(mq, msg, 0);
1717 		if (error)
1718 			goto bad;
1719 		return (0);
1720 	}
1721 
1722 	/* send it before checking time */
1723 	error = _mqueue_send(mq, msg, -1);
1724 	if (error == 0)
1725 		return (0);
1726 
1727 	if (error != EAGAIN)
1728 		goto bad;
1729 
1730 	if (abs_timeout->tv_nsec >= 1000000000 || abs_timeout->tv_nsec < 0) {
1731 		error = EINVAL;
1732 		goto bad;
1733 	}
1734 	for (;;) {
1735 		getnanotime(&ts);
1736 		timespecsub(abs_timeout, &ts, &ts2);
1737 		if (ts2.tv_sec < 0 || (ts2.tv_sec == 0 && ts2.tv_nsec <= 0)) {
1738 			error = ETIMEDOUT;
1739 			break;
1740 		}
1741 		TIMESPEC_TO_TIMEVAL(&tv, &ts2);
1742 		error = _mqueue_send(mq, msg, tvtohz(&tv));
1743 		if (error != ETIMEDOUT)
1744 			break;
1745 	}
1746 	if (error == 0)
1747 		return (0);
1748 bad:
1749 	mqueue_freemsg(msg);
1750 	return (error);
1751 }
1752 
1753 /*
1754  * Common routine to send a message
1755  */
1756 static int
_mqueue_send(struct mqueue * mq,struct mqueue_msg * msg,int timo)1757 _mqueue_send(struct mqueue *mq, struct mqueue_msg *msg, int timo)
1758 {
1759 	struct mqueue_msg *msg2;
1760 	int error = 0;
1761 
1762 	mtx_lock(&mq->mq_mutex);
1763 	while (mq->mq_curmsgs >= mq->mq_maxmsg && error == 0) {
1764 		if (timo < 0) {
1765 			mtx_unlock(&mq->mq_mutex);
1766 			return (EAGAIN);
1767 		}
1768 		mq->mq_senders++;
1769 		error = msleep(&mq->mq_senders, &mq->mq_mutex,
1770 			    PCATCH, "mqsend", timo);
1771 		mq->mq_senders--;
1772 		if (error == EAGAIN)
1773 			error = ETIMEDOUT;
1774 	}
1775 	if (mq->mq_curmsgs >= mq->mq_maxmsg) {
1776 		mtx_unlock(&mq->mq_mutex);
1777 		return (error);
1778 	}
1779 	error = 0;
1780 	if (TAILQ_EMPTY(&mq->mq_msgq)) {
1781 		TAILQ_INSERT_HEAD(&mq->mq_msgq, msg, msg_link);
1782 	} else {
1783 		if (msg->msg_prio <= TAILQ_LAST(&mq->mq_msgq, msgq)->msg_prio) {
1784 			TAILQ_INSERT_TAIL(&mq->mq_msgq, msg, msg_link);
1785 		} else {
1786 			TAILQ_FOREACH(msg2, &mq->mq_msgq, msg_link) {
1787 				if (msg2->msg_prio < msg->msg_prio)
1788 					break;
1789 			}
1790 			TAILQ_INSERT_BEFORE(msg2, msg, msg_link);
1791 		}
1792 	}
1793 	mq->mq_curmsgs++;
1794 	mq->mq_totalbytes += msg->msg_size;
1795 	if (mq->mq_receivers)
1796 		wakeup_one(&mq->mq_receivers);
1797 	else if (mq->mq_notifier != NULL)
1798 		mqueue_send_notification(mq);
1799 	if (mq->mq_flags & MQ_RSEL) {
1800 		mq->mq_flags &= ~MQ_RSEL;
1801 		selwakeup(&mq->mq_rsel);
1802 	}
1803 	KNOTE_LOCKED(&mq->mq_rsel.si_note, 0);
1804 	mtx_unlock(&mq->mq_mutex);
1805 	return (0);
1806 }
1807 
1808 /*
1809  * Send realtime a signal to process which registered itself
1810  * successfully by mq_notify.
1811  */
1812 static void
mqueue_send_notification(struct mqueue * mq)1813 mqueue_send_notification(struct mqueue *mq)
1814 {
1815 	struct mqueue_notifier *nt;
1816 	struct thread *td;
1817 	struct proc *p;
1818 	int error;
1819 
1820 	mtx_assert(&mq->mq_mutex, MA_OWNED);
1821 	nt = mq->mq_notifier;
1822 	if (nt->nt_sigev.sigev_notify != SIGEV_NONE) {
1823 		p = nt->nt_proc;
1824 		error = sigev_findtd(p, &nt->nt_sigev, &td);
1825 		if (error) {
1826 			mq->mq_notifier = NULL;
1827 			return;
1828 		}
1829 		if (!KSI_ONQ(&nt->nt_ksi)) {
1830 			ksiginfo_set_sigev(&nt->nt_ksi, &nt->nt_sigev);
1831 			tdsendsignal(p, td, nt->nt_ksi.ksi_signo, &nt->nt_ksi);
1832 		}
1833 		PROC_UNLOCK(p);
1834 	}
1835 	mq->mq_notifier = NULL;
1836 }
1837 
1838 /*
1839  * Get a message. if waitok is false, thread will not be
1840  * blocked if there is no data in queue, otherwise, absolute
1841  * time will be checked.
1842  */
1843 int
mqueue_receive(struct mqueue * mq,char * msg_ptr,size_t msg_len,unsigned * msg_prio,int waitok,const struct timespec * abs_timeout)1844 mqueue_receive(struct mqueue *mq, char *msg_ptr,
1845 	size_t msg_len, unsigned *msg_prio, int waitok,
1846 	const struct timespec *abs_timeout)
1847 {
1848 	struct mqueue_msg *msg;
1849 	struct timespec ts, ts2;
1850 	struct timeval tv;
1851 	int error;
1852 
1853 	if (msg_len < mq->mq_msgsize)
1854 		return (EMSGSIZE);
1855 
1856 	/* O_NONBLOCK case */
1857 	if (!waitok) {
1858 		error = _mqueue_recv(mq, &msg, -1);
1859 		if (error)
1860 			return (error);
1861 		goto received;
1862 	}
1863 
1864 	/* we allow a null timeout (wait forever). */
1865 	if (abs_timeout == NULL) {
1866 		error = _mqueue_recv(mq, &msg, 0);
1867 		if (error)
1868 			return (error);
1869 		goto received;
1870 	}
1871 
1872 	/* try to get a message before checking time */
1873 	error = _mqueue_recv(mq, &msg, -1);
1874 	if (error == 0)
1875 		goto received;
1876 
1877 	if (error != EAGAIN)
1878 		return (error);
1879 
1880 	if (abs_timeout->tv_nsec >= 1000000000 || abs_timeout->tv_nsec < 0) {
1881 		error = EINVAL;
1882 		return (error);
1883 	}
1884 
1885 	for (;;) {
1886 		getnanotime(&ts);
1887 		timespecsub(abs_timeout, &ts, &ts2);
1888 		if (ts2.tv_sec < 0 || (ts2.tv_sec == 0 && ts2.tv_nsec <= 0)) {
1889 			error = ETIMEDOUT;
1890 			return (error);
1891 		}
1892 		TIMESPEC_TO_TIMEVAL(&tv, &ts2);
1893 		error = _mqueue_recv(mq, &msg, tvtohz(&tv));
1894 		if (error == 0)
1895 			break;
1896 		if (error != ETIMEDOUT)
1897 			return (error);
1898 	}
1899 
1900 received:
1901 	error = mqueue_savemsg(msg, msg_ptr, msg_prio);
1902 	if (error == 0) {
1903 		curthread->td_retval[0] = msg->msg_size;
1904 		curthread->td_retval[1] = 0;
1905 	}
1906 	mqueue_freemsg(msg);
1907 	return (error);
1908 }
1909 
1910 /*
1911  * Common routine to receive a message
1912  */
1913 static int
_mqueue_recv(struct mqueue * mq,struct mqueue_msg ** msg,int timo)1914 _mqueue_recv(struct mqueue *mq, struct mqueue_msg **msg, int timo)
1915 {
1916 	int error = 0;
1917 
1918 	mtx_lock(&mq->mq_mutex);
1919 	while ((*msg = TAILQ_FIRST(&mq->mq_msgq)) == NULL && error == 0) {
1920 		if (timo < 0) {
1921 			mtx_unlock(&mq->mq_mutex);
1922 			return (EAGAIN);
1923 		}
1924 		mq->mq_receivers++;
1925 		error = msleep(&mq->mq_receivers, &mq->mq_mutex,
1926 			    PCATCH, "mqrecv", timo);
1927 		mq->mq_receivers--;
1928 		if (error == EAGAIN)
1929 			error = ETIMEDOUT;
1930 	}
1931 	if (*msg != NULL) {
1932 		error = 0;
1933 		TAILQ_REMOVE(&mq->mq_msgq, *msg, msg_link);
1934 		mq->mq_curmsgs--;
1935 		mq->mq_totalbytes -= (*msg)->msg_size;
1936 		if (mq->mq_senders)
1937 			wakeup_one(&mq->mq_senders);
1938 		if (mq->mq_flags & MQ_WSEL) {
1939 			mq->mq_flags &= ~MQ_WSEL;
1940 			selwakeup(&mq->mq_wsel);
1941 		}
1942 		KNOTE_LOCKED(&mq->mq_wsel.si_note, 0);
1943 	}
1944 	if (mq->mq_notifier != NULL && mq->mq_receivers == 0 &&
1945 	    !TAILQ_EMPTY(&mq->mq_msgq)) {
1946 		mqueue_send_notification(mq);
1947 	}
1948 	mtx_unlock(&mq->mq_mutex);
1949 	return (error);
1950 }
1951 
1952 static __inline struct mqueue_notifier *
notifier_alloc(void)1953 notifier_alloc(void)
1954 {
1955 	return (uma_zalloc(mqnoti_zone, M_WAITOK | M_ZERO));
1956 }
1957 
1958 static __inline void
notifier_free(struct mqueue_notifier * p)1959 notifier_free(struct mqueue_notifier *p)
1960 {
1961 	uma_zfree(mqnoti_zone, p);
1962 }
1963 
1964 static struct mqueue_notifier *
notifier_search(struct proc * p,int fd)1965 notifier_search(struct proc *p, int fd)
1966 {
1967 	struct mqueue_notifier *nt;
1968 
1969 	LIST_FOREACH(nt, &p->p_mqnotifier, nt_link) {
1970 		if (nt->nt_ksi.ksi_mqd == fd)
1971 			break;
1972 	}
1973 	return (nt);
1974 }
1975 
1976 static __inline void
notifier_insert(struct proc * p,struct mqueue_notifier * nt)1977 notifier_insert(struct proc *p, struct mqueue_notifier *nt)
1978 {
1979 	LIST_INSERT_HEAD(&p->p_mqnotifier, nt, nt_link);
1980 }
1981 
1982 static __inline void
notifier_delete(struct proc * p,struct mqueue_notifier * nt)1983 notifier_delete(struct proc *p, struct mqueue_notifier *nt)
1984 {
1985 	LIST_REMOVE(nt, nt_link);
1986 	notifier_free(nt);
1987 }
1988 
1989 static void
notifier_remove(struct proc * p,struct mqueue * mq,int fd)1990 notifier_remove(struct proc *p, struct mqueue *mq, int fd)
1991 {
1992 	struct mqueue_notifier *nt;
1993 
1994 	mtx_assert(&mq->mq_mutex, MA_OWNED);
1995 	PROC_LOCK(p);
1996 	nt = notifier_search(p, fd);
1997 	if (nt != NULL) {
1998 		if (mq->mq_notifier == nt)
1999 			mq->mq_notifier = NULL;
2000 		sigqueue_take(&nt->nt_ksi);
2001 		notifier_delete(p, nt);
2002 	}
2003 	PROC_UNLOCK(p);
2004 }
2005 
2006 int
kern_kmq_open(struct thread * td,const char * upath,int flags,mode_t mode,const struct mq_attr * attr)2007 kern_kmq_open(struct thread *td, const char *upath, int flags, mode_t mode,
2008     const struct mq_attr *attr)
2009 {
2010 	char *path, pathbuf[MQFS_NAMELEN + 1];
2011 	struct mqfs_node *pn;
2012 	struct pwddesc *pdp;
2013 	struct file *fp;
2014 	struct mqueue *mq;
2015 	int fd, error, len, cmode;
2016 
2017 	AUDIT_ARG_FFLAGS(flags);
2018 	AUDIT_ARG_MODE(mode);
2019 
2020 	pdp = td->td_proc->p_pd;
2021 	cmode = ((mode & ~pdp->pd_cmask) & ALLPERMS) & ~S_ISTXT;
2022 	mq = NULL;
2023 	if ((flags & O_CREAT) != 0 && attr != NULL) {
2024 		if (attr->mq_maxmsg <= 0 || attr->mq_maxmsg > maxmsg)
2025 			return (EINVAL);
2026 		if (attr->mq_msgsize <= 0 || attr->mq_msgsize > maxmsgsize)
2027 			return (EINVAL);
2028 	}
2029 
2030 	path = pathbuf;
2031 	error = copyinstr(upath, path, MQFS_NAMELEN + 1, NULL);
2032         if (error)
2033 		return (error);
2034 
2035 	/*
2036 	 * The first character of name may be a slash (/) character
2037 	 * and the remaining characters of name cannot include any slash
2038 	 * characters.
2039 	 */
2040 	len = strlen(path);
2041 	if (len < 2 || strchr(path + 1, '/') != NULL)
2042 		return (EINVAL);
2043 	if (path[0] == '/') {
2044 		path++;
2045 		len--;
2046 	}
2047 	/*
2048 	 * "." and ".." are magic directories, populated on the fly, and cannot
2049 	 * be opened as queues.
2050 	 */
2051 	if (strcmp(path, ".") == 0 || strcmp(path, "..") == 0)
2052 		return (EINVAL);
2053 	AUDIT_ARG_UPATH1_CANON(pathbuf);
2054 
2055 	error = falloc(td, &fp, &fd, O_CLOEXEC);
2056 	if (error)
2057 		return (error);
2058 
2059 	sx_xlock(&mqfs_data.mi_lock);
2060 	pn = mqfs_search(mqfs_data.mi_root, path, len, td->td_ucred);
2061 	if (pn == NULL) {
2062 		if (!(flags & O_CREAT)) {
2063 			error = ENOENT;
2064 		} else {
2065 			mq = mqueue_alloc(attr);
2066 			if (mq == NULL) {
2067 				error = ENFILE;
2068 			} else {
2069 				pn = mqfs_create_file(mqfs_data.mi_root,
2070 				         path, len, td->td_ucred,
2071 					 cmode);
2072 				if (pn == NULL) {
2073 					error = ENOSPC;
2074 					mqueue_free(mq);
2075 				}
2076 			}
2077 		}
2078 
2079 		if (error == 0) {
2080 			pn->mn_data = mq;
2081 		}
2082 	} else {
2083 		if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL)) {
2084 			error = EEXIST;
2085 		} else {
2086 			accmode_t accmode = 0;
2087 
2088 			if (flags & FREAD)
2089 				accmode |= VREAD;
2090 			if (flags & FWRITE)
2091 				accmode |= VWRITE;
2092 			error = vaccess(VREG, pn->mn_mode, pn->mn_uid,
2093 			    pn->mn_gid, accmode, td->td_ucred);
2094 		}
2095 	}
2096 
2097 	if (error) {
2098 		sx_xunlock(&mqfs_data.mi_lock);
2099 		fdclose(td, fp, fd);
2100 		fdrop(fp, td);
2101 		return (error);
2102 	}
2103 
2104 	mqnode_addref(pn);
2105 	sx_xunlock(&mqfs_data.mi_lock);
2106 
2107 	finit(fp, flags & (FREAD | FWRITE | O_NONBLOCK), DTYPE_MQUEUE, pn,
2108 	    &mqueueops);
2109 
2110 	td->td_retval[0] = fd;
2111 	fdrop(fp, td);
2112 	return (0);
2113 }
2114 
2115 /*
2116  * Syscall to open a message queue.
2117  */
2118 int
sys_kmq_open(struct thread * td,struct kmq_open_args * uap)2119 sys_kmq_open(struct thread *td, struct kmq_open_args *uap)
2120 {
2121 	struct mq_attr attr;
2122 	int flags, error;
2123 
2124 	if ((uap->flags & O_ACCMODE) == O_ACCMODE || uap->flags & O_EXEC)
2125 		return (EINVAL);
2126 	flags = FFLAGS(uap->flags);
2127 	if ((flags & O_CREAT) != 0 && uap->attr != NULL) {
2128 		error = copyin(uap->attr, &attr, sizeof(attr));
2129 		if (error)
2130 			return (error);
2131 	}
2132 	return (kern_kmq_open(td, uap->path, flags, uap->mode,
2133 	    uap->attr != NULL ? &attr : NULL));
2134 }
2135 
2136 /*
2137  * Syscall to unlink a message queue.
2138  */
2139 int
sys_kmq_unlink(struct thread * td,struct kmq_unlink_args * uap)2140 sys_kmq_unlink(struct thread *td, struct kmq_unlink_args *uap)
2141 {
2142 	char *path, pathbuf[MQFS_NAMELEN + 1];
2143 	struct mqfs_node *pn;
2144 	int error, len;
2145 
2146 	path = pathbuf;
2147 	error = copyinstr(uap->path, path, MQFS_NAMELEN + 1, NULL);
2148         if (error)
2149 		return (error);
2150 
2151 	len = strlen(path);
2152 	if (len < 2 || strchr(path + 1, '/') != NULL)
2153 		return (EINVAL);
2154 	if (path[0] == '/') {
2155 		path++;
2156 		len--;
2157 	}
2158 	if (strcmp(path, ".") == 0 || strcmp(path, "..") == 0)
2159 		return (EINVAL);
2160 	AUDIT_ARG_UPATH1_CANON(pathbuf);
2161 
2162 	sx_xlock(&mqfs_data.mi_lock);
2163 	pn = mqfs_search(mqfs_data.mi_root, path, len, td->td_ucred);
2164 	if (pn != NULL)
2165 		error = do_unlink(pn, td->td_ucred);
2166 	else
2167 		error = ENOENT;
2168 	sx_xunlock(&mqfs_data.mi_lock);
2169 	return (error);
2170 }
2171 
2172 typedef int (*_fgetf)(struct thread *, int, cap_rights_t *, struct file **);
2173 
2174 /*
2175  * Get message queue by giving file slot
2176  */
2177 static int
_getmq(struct thread * td,int fd,cap_rights_t * rightsp,_fgetf func,struct file ** fpp,struct mqfs_node ** ppn,struct mqueue ** pmq)2178 _getmq(struct thread *td, int fd, cap_rights_t *rightsp, _fgetf func,
2179        struct file **fpp, struct mqfs_node **ppn, struct mqueue **pmq)
2180 {
2181 	struct mqfs_node *pn;
2182 	int error;
2183 
2184 	error = func(td, fd, rightsp, fpp);
2185 	if (error)
2186 		return (error);
2187 	if (&mqueueops != (*fpp)->f_ops) {
2188 		fdrop(*fpp, td);
2189 		return (EBADF);
2190 	}
2191 	pn = (*fpp)->f_data;
2192 	if (ppn)
2193 		*ppn = pn;
2194 	if (pmq)
2195 		*pmq = pn->mn_data;
2196 	return (0);
2197 }
2198 
2199 static __inline int
getmq(struct thread * td,int fd,struct file ** fpp,struct mqfs_node ** ppn,struct mqueue ** pmq)2200 getmq(struct thread *td, int fd, struct file **fpp, struct mqfs_node **ppn,
2201 	struct mqueue **pmq)
2202 {
2203 
2204 	return _getmq(td, fd, &cap_event_rights, fget,
2205 	    fpp, ppn, pmq);
2206 }
2207 
2208 static __inline int
getmq_read(struct thread * td,int fd,struct file ** fpp,struct mqfs_node ** ppn,struct mqueue ** pmq)2209 getmq_read(struct thread *td, int fd, struct file **fpp,
2210 	 struct mqfs_node **ppn, struct mqueue **pmq)
2211 {
2212 
2213 	return _getmq(td, fd, &cap_read_rights, fget_read,
2214 	    fpp, ppn, pmq);
2215 }
2216 
2217 static __inline int
getmq_write(struct thread * td,int fd,struct file ** fpp,struct mqfs_node ** ppn,struct mqueue ** pmq)2218 getmq_write(struct thread *td, int fd, struct file **fpp,
2219 	struct mqfs_node **ppn, struct mqueue **pmq)
2220 {
2221 
2222 	return _getmq(td, fd, &cap_write_rights, fget_write,
2223 	    fpp, ppn, pmq);
2224 }
2225 
2226 int
kern_kmq_setattr(struct thread * td,int mqd,const struct mq_attr * attr,struct mq_attr * oattr)2227 kern_kmq_setattr(struct thread *td, int mqd, const struct mq_attr *attr,
2228     struct mq_attr *oattr)
2229 {
2230 	struct mqueue *mq;
2231 	struct file *fp;
2232 	u_int oflag, flag;
2233 	int error;
2234 
2235 	AUDIT_ARG_FD(mqd);
2236 	if (attr != NULL && (attr->mq_flags & ~O_NONBLOCK) != 0)
2237 		return (EINVAL);
2238 	error = getmq(td, mqd, &fp, NULL, &mq);
2239 	if (error)
2240 		return (error);
2241 	oattr->mq_maxmsg  = mq->mq_maxmsg;
2242 	oattr->mq_msgsize = mq->mq_msgsize;
2243 	oattr->mq_curmsgs = mq->mq_curmsgs;
2244 	if (attr != NULL) {
2245 		do {
2246 			oflag = flag = fp->f_flag;
2247 			flag &= ~O_NONBLOCK;
2248 			flag |= (attr->mq_flags & O_NONBLOCK);
2249 		} while (atomic_cmpset_int(&fp->f_flag, oflag, flag) == 0);
2250 	} else
2251 		oflag = fp->f_flag;
2252 	oattr->mq_flags = (O_NONBLOCK & oflag);
2253 	fdrop(fp, td);
2254 	return (error);
2255 }
2256 
2257 int
sys_kmq_setattr(struct thread * td,struct kmq_setattr_args * uap)2258 sys_kmq_setattr(struct thread *td, struct kmq_setattr_args *uap)
2259 {
2260 	struct mq_attr attr, oattr;
2261 	int error;
2262 
2263 	if (uap->attr != NULL) {
2264 		error = copyin(uap->attr, &attr, sizeof(attr));
2265 		if (error != 0)
2266 			return (error);
2267 	}
2268 	error = kern_kmq_setattr(td, uap->mqd, uap->attr != NULL ? &attr : NULL,
2269 	    &oattr);
2270 	if (error == 0 && uap->oattr != NULL) {
2271 		bzero(oattr.__reserved, sizeof(oattr.__reserved));
2272 		error = copyout(&oattr, uap->oattr, sizeof(oattr));
2273 	}
2274 	return (error);
2275 }
2276 
2277 int
kern_kmq_timedreceive(struct thread * td,int mqd,char * msg_ptr,size_t msg_len,unsigned int * msg_prio,const struct timespec * abs_timeout)2278 kern_kmq_timedreceive(struct thread *td, int mqd, char *msg_ptr,
2279 	size_t msg_len, unsigned int *msg_prio, const struct timespec *abs_timeout)
2280 {
2281 	struct mqueue *mq;
2282 	struct file *fp;
2283 	int error, waitok;
2284 
2285 	AUDIT_ARG_FD(mqd);
2286 	error = getmq_read(td, mqd, &fp, NULL, &mq);
2287 	if (error != 0)
2288 		return (error);
2289 	waitok = (fp->f_flag & O_NONBLOCK) == 0;
2290 	error = mqueue_receive(mq, msg_ptr, msg_len, msg_prio, waitok,
2291 	    abs_timeout);
2292 	fdrop(fp, td);
2293 	return (error);
2294 }
2295 
2296 int
sys_kmq_timedreceive(struct thread * td,struct kmq_timedreceive_args * uap)2297 sys_kmq_timedreceive(struct thread *td, struct kmq_timedreceive_args *uap)
2298 {
2299 	struct timespec *abs_timeout, ets;
2300 	int error;
2301 
2302 	if (uap->abs_timeout != NULL) {
2303 		error = copyin(uap->abs_timeout, &ets, sizeof(ets));
2304 		if (error != 0)
2305 			return (error);
2306 		abs_timeout = &ets;
2307 	} else
2308 		abs_timeout = NULL;
2309 
2310 	return (kern_kmq_timedreceive(td, uap->mqd, uap->msg_ptr, uap->msg_len,
2311 		uap->msg_prio, abs_timeout));
2312 }
2313 
2314 int
kern_kmq_timedsend(struct thread * td,int mqd,const char * msg_ptr,size_t msg_len,unsigned int msg_prio,const struct timespec * abs_timeout)2315 kern_kmq_timedsend(struct thread *td, int mqd, const char *msg_ptr,
2316 	size_t msg_len, unsigned int msg_prio, const struct timespec *abs_timeout)
2317 {
2318 	struct mqueue *mq;
2319 	struct file *fp;
2320 	int error, waitok;
2321 
2322 	AUDIT_ARG_FD(mqd);
2323 	error = getmq_write(td, mqd, &fp, NULL, &mq);
2324 	if (error != 0)
2325 		return (error);
2326 	waitok = (fp->f_flag & O_NONBLOCK) == 0;
2327 	error = mqueue_send(mq, msg_ptr, msg_len, msg_prio, waitok,
2328 		abs_timeout);
2329 	fdrop(fp, td);
2330 	return (error);
2331 }
2332 
2333 int
sys_kmq_timedsend(struct thread * td,struct kmq_timedsend_args * uap)2334 sys_kmq_timedsend(struct thread *td, struct kmq_timedsend_args *uap)
2335 {
2336 	struct timespec *abs_timeout, ets;
2337 	int error;
2338 
2339 	if (uap->abs_timeout != NULL) {
2340 		error = copyin(uap->abs_timeout, &ets, sizeof(ets));
2341 		if (error != 0)
2342 			return (error);
2343 		abs_timeout = &ets;
2344 	} else
2345 		abs_timeout = NULL;
2346 
2347 	return (kern_kmq_timedsend(td, uap->mqd, uap->msg_ptr, uap->msg_len,
2348 		uap->msg_prio, abs_timeout));
2349 }
2350 
2351 int
kern_kmq_notify(struct thread * td,int mqd,struct sigevent * sigev)2352 kern_kmq_notify(struct thread *td, int mqd, struct sigevent *sigev)
2353 {
2354 	struct filedesc *fdp;
2355 	struct proc *p;
2356 	struct mqueue *mq;
2357 	struct file *fp, *fp2;
2358 	struct mqueue_notifier *nt, *newnt = NULL;
2359 	int error;
2360 
2361 	AUDIT_ARG_FD(mqd);
2362 	if (sigev != NULL) {
2363 		if (sigev->sigev_notify != SIGEV_SIGNAL &&
2364 		    sigev->sigev_notify != SIGEV_THREAD_ID &&
2365 		    sigev->sigev_notify != SIGEV_NONE)
2366 			return (EINVAL);
2367 		if ((sigev->sigev_notify == SIGEV_SIGNAL ||
2368 		    sigev->sigev_notify == SIGEV_THREAD_ID) &&
2369 		    !_SIG_VALID(sigev->sigev_signo))
2370 			return (EINVAL);
2371 	}
2372 	p = td->td_proc;
2373 	fdp = td->td_proc->p_fd;
2374 	error = getmq(td, mqd, &fp, NULL, &mq);
2375 	if (error)
2376 		return (error);
2377 again:
2378 	FILEDESC_SLOCK(fdp);
2379 	fp2 = fget_noref(fdp, mqd);
2380 	if (fp2 == NULL) {
2381 		FILEDESC_SUNLOCK(fdp);
2382 		error = EBADF;
2383 		goto out;
2384 	}
2385 #ifdef CAPABILITIES
2386 	error = cap_check(cap_rights(fdp, mqd), &cap_event_rights);
2387 	if (error) {
2388 		FILEDESC_SUNLOCK(fdp);
2389 		goto out;
2390 	}
2391 #endif
2392 	if (fp2 != fp) {
2393 		FILEDESC_SUNLOCK(fdp);
2394 		error = EBADF;
2395 		goto out;
2396 	}
2397 	mtx_lock(&mq->mq_mutex);
2398 	FILEDESC_SUNLOCK(fdp);
2399 	if (sigev != NULL) {
2400 		if (mq->mq_notifier != NULL) {
2401 			error = EBUSY;
2402 		} else {
2403 			PROC_LOCK(p);
2404 			nt = notifier_search(p, mqd);
2405 			if (nt == NULL) {
2406 				if (newnt == NULL) {
2407 					PROC_UNLOCK(p);
2408 					mtx_unlock(&mq->mq_mutex);
2409 					newnt = notifier_alloc();
2410 					goto again;
2411 				}
2412 			}
2413 
2414 			if (nt != NULL) {
2415 				sigqueue_take(&nt->nt_ksi);
2416 				if (newnt != NULL) {
2417 					notifier_free(newnt);
2418 					newnt = NULL;
2419 				}
2420 			} else {
2421 				nt = newnt;
2422 				newnt = NULL;
2423 				ksiginfo_init(&nt->nt_ksi);
2424 				nt->nt_ksi.ksi_flags |= KSI_INS | KSI_EXT;
2425 				nt->nt_ksi.ksi_code = SI_MESGQ;
2426 				nt->nt_proc = p;
2427 				nt->nt_ksi.ksi_mqd = mqd;
2428 				notifier_insert(p, nt);
2429 			}
2430 			nt->nt_sigev = *sigev;
2431 			mq->mq_notifier = nt;
2432 			PROC_UNLOCK(p);
2433 			/*
2434 			 * if there is no receivers and message queue
2435 			 * is not empty, we should send notification
2436 			 * as soon as possible.
2437 			 */
2438 			if (mq->mq_receivers == 0 &&
2439 			    !TAILQ_EMPTY(&mq->mq_msgq))
2440 				mqueue_send_notification(mq);
2441 		}
2442 	} else {
2443 		notifier_remove(p, mq, mqd);
2444 	}
2445 	mtx_unlock(&mq->mq_mutex);
2446 
2447 out:
2448 	fdrop(fp, td);
2449 	if (newnt != NULL)
2450 		notifier_free(newnt);
2451 	return (error);
2452 }
2453 
2454 int
sys_kmq_notify(struct thread * td,struct kmq_notify_args * uap)2455 sys_kmq_notify(struct thread *td, struct kmq_notify_args *uap)
2456 {
2457 	struct sigevent ev, *evp;
2458 	int error;
2459 
2460 	if (uap->sigev == NULL) {
2461 		evp = NULL;
2462 	} else {
2463 		error = copyin(uap->sigev, &ev, sizeof(ev));
2464 		if (error != 0)
2465 			return (error);
2466 		evp = &ev;
2467 	}
2468 	return (kern_kmq_notify(td, uap->mqd, evp));
2469 }
2470 
2471 static void
mqueue_fdclose(struct thread * td,int fd,struct file * fp)2472 mqueue_fdclose(struct thread *td, int fd, struct file *fp)
2473 {
2474 	struct mqueue *mq;
2475 #ifdef INVARIANTS
2476 	struct filedesc *fdp;
2477 
2478 	fdp = td->td_proc->p_fd;
2479 	FILEDESC_LOCK_ASSERT(fdp);
2480 #endif
2481 
2482 	if (fp->f_ops == &mqueueops) {
2483 		mq = FPTOMQ(fp);
2484 		mtx_lock(&mq->mq_mutex);
2485 		notifier_remove(td->td_proc, mq, fd);
2486 
2487 		/* have to wakeup thread in same process */
2488 		if (mq->mq_flags & MQ_RSEL) {
2489 			mq->mq_flags &= ~MQ_RSEL;
2490 			selwakeup(&mq->mq_rsel);
2491 		}
2492 		if (mq->mq_flags & MQ_WSEL) {
2493 			mq->mq_flags &= ~MQ_WSEL;
2494 			selwakeup(&mq->mq_wsel);
2495 		}
2496 		mtx_unlock(&mq->mq_mutex);
2497 	}
2498 }
2499 
2500 static void
mq_proc_exit(void * arg __unused,struct proc * p)2501 mq_proc_exit(void *arg __unused, struct proc *p)
2502 {
2503 	struct filedesc *fdp;
2504 	struct file *fp;
2505 	struct mqueue *mq;
2506 	int i;
2507 
2508 	fdp = p->p_fd;
2509 	FILEDESC_SLOCK(fdp);
2510 	for (i = 0; i < fdp->fd_nfiles; ++i) {
2511 		fp = fget_noref(fdp, i);
2512 		if (fp != NULL && fp->f_ops == &mqueueops) {
2513 			mq = FPTOMQ(fp);
2514 			mtx_lock(&mq->mq_mutex);
2515 			notifier_remove(p, FPTOMQ(fp), i);
2516 			mtx_unlock(&mq->mq_mutex);
2517 		}
2518 	}
2519 	FILEDESC_SUNLOCK(fdp);
2520 	KASSERT(LIST_EMPTY(&p->p_mqnotifier), ("mq notifiers left"));
2521 }
2522 
2523 static int
mqf_poll(struct file * fp,int events,struct ucred * active_cred,struct thread * td)2524 mqf_poll(struct file *fp, int events, struct ucred *active_cred,
2525 	struct thread *td)
2526 {
2527 	struct mqueue *mq = FPTOMQ(fp);
2528 	int revents = 0;
2529 
2530 	mtx_lock(&mq->mq_mutex);
2531 	if (events & (POLLIN | POLLRDNORM)) {
2532 		if (mq->mq_curmsgs) {
2533 			revents |= events & (POLLIN | POLLRDNORM);
2534 		} else {
2535 			mq->mq_flags |= MQ_RSEL;
2536 			selrecord(td, &mq->mq_rsel);
2537  		}
2538 	}
2539 	if (events & POLLOUT) {
2540 		if (mq->mq_curmsgs < mq->mq_maxmsg)
2541 			revents |= POLLOUT;
2542 		else {
2543 			mq->mq_flags |= MQ_WSEL;
2544 			selrecord(td, &mq->mq_wsel);
2545 		}
2546 	}
2547 	mtx_unlock(&mq->mq_mutex);
2548 	return (revents);
2549 }
2550 
2551 static int
mqf_close(struct file * fp,struct thread * td)2552 mqf_close(struct file *fp, struct thread *td)
2553 {
2554 	struct mqfs_node *pn;
2555 
2556 	fp->f_ops = &badfileops;
2557 	pn = fp->f_data;
2558 	fp->f_data = NULL;
2559 	sx_xlock(&mqfs_data.mi_lock);
2560 	mqnode_release(pn);
2561 	sx_xunlock(&mqfs_data.mi_lock);
2562 	return (0);
2563 }
2564 
2565 static int
mqf_stat(struct file * fp,struct stat * st,struct ucred * active_cred)2566 mqf_stat(struct file *fp, struct stat *st, struct ucred *active_cred)
2567 {
2568 	struct mqfs_node *pn = fp->f_data;
2569 
2570 	bzero(st, sizeof *st);
2571 	sx_xlock(&mqfs_data.mi_lock);
2572 	st->st_atim = pn->mn_atime;
2573 	st->st_mtim = pn->mn_mtime;
2574 	st->st_ctim = pn->mn_ctime;
2575 	st->st_birthtim = pn->mn_birth;
2576 	st->st_uid = pn->mn_uid;
2577 	st->st_gid = pn->mn_gid;
2578 	st->st_mode = S_IFIFO | pn->mn_mode;
2579 	sx_xunlock(&mqfs_data.mi_lock);
2580 	return (0);
2581 }
2582 
2583 static int
mqf_chmod(struct file * fp,mode_t mode,struct ucred * active_cred,struct thread * td)2584 mqf_chmod(struct file *fp, mode_t mode, struct ucred *active_cred,
2585     struct thread *td)
2586 {
2587 	struct mqfs_node *pn;
2588 	int error;
2589 
2590 	error = 0;
2591 	pn = fp->f_data;
2592 	sx_xlock(&mqfs_data.mi_lock);
2593 	error = vaccess(VREG, pn->mn_mode, pn->mn_uid, pn->mn_gid, VADMIN,
2594 	    active_cred);
2595 	if (error != 0)
2596 		goto out;
2597 	pn->mn_mode = mode & ACCESSPERMS;
2598 out:
2599 	sx_xunlock(&mqfs_data.mi_lock);
2600 	return (error);
2601 }
2602 
2603 static int
mqf_chown(struct file * fp,uid_t uid,gid_t gid,struct ucred * active_cred,struct thread * td)2604 mqf_chown(struct file *fp, uid_t uid, gid_t gid, struct ucred *active_cred,
2605     struct thread *td)
2606 {
2607 	struct mqfs_node *pn;
2608 	int error;
2609 
2610 	error = 0;
2611 	pn = fp->f_data;
2612 	sx_xlock(&mqfs_data.mi_lock);
2613 	if (uid == (uid_t)-1)
2614 		uid = pn->mn_uid;
2615 	if (gid == (gid_t)-1)
2616 		gid = pn->mn_gid;
2617 	if (((uid != pn->mn_uid && uid != active_cred->cr_uid) ||
2618 	    (gid != pn->mn_gid && !groupmember(gid, active_cred))) &&
2619 	    (error = priv_check_cred(active_cred, PRIV_VFS_CHOWN)))
2620 		goto out;
2621 	pn->mn_uid = uid;
2622 	pn->mn_gid = gid;
2623 out:
2624 	sx_xunlock(&mqfs_data.mi_lock);
2625 	return (error);
2626 }
2627 
2628 static int
mqf_kqfilter(struct file * fp,struct knote * kn)2629 mqf_kqfilter(struct file *fp, struct knote *kn)
2630 {
2631 	struct mqueue *mq = FPTOMQ(fp);
2632 	int error = 0;
2633 
2634 	if (kn->kn_filter == EVFILT_READ) {
2635 		kn->kn_fop = &mq_rfiltops;
2636 		knlist_add(&mq->mq_rsel.si_note, kn, 0);
2637 	} else if (kn->kn_filter == EVFILT_WRITE) {
2638 		kn->kn_fop = &mq_wfiltops;
2639 		knlist_add(&mq->mq_wsel.si_note, kn, 0);
2640 	} else
2641 		error = EINVAL;
2642 	return (error);
2643 }
2644 
2645 static void
filt_mqdetach(struct knote * kn)2646 filt_mqdetach(struct knote *kn)
2647 {
2648 	struct mqueue *mq = FPTOMQ(kn->kn_fp);
2649 
2650 	if (kn->kn_filter == EVFILT_READ)
2651 		knlist_remove(&mq->mq_rsel.si_note, kn, 0);
2652 	else if (kn->kn_filter == EVFILT_WRITE)
2653 		knlist_remove(&mq->mq_wsel.si_note, kn, 0);
2654 	else
2655 		panic("filt_mqdetach");
2656 }
2657 
2658 static int
filt_mqread(struct knote * kn,long hint)2659 filt_mqread(struct knote *kn, long hint)
2660 {
2661 	struct mqueue *mq = FPTOMQ(kn->kn_fp);
2662 
2663 	mtx_assert(&mq->mq_mutex, MA_OWNED);
2664 	return (mq->mq_curmsgs != 0);
2665 }
2666 
2667 static int
filt_mqwrite(struct knote * kn,long hint)2668 filt_mqwrite(struct knote *kn, long hint)
2669 {
2670 	struct mqueue *mq = FPTOMQ(kn->kn_fp);
2671 
2672 	mtx_assert(&mq->mq_mutex, MA_OWNED);
2673 	return (mq->mq_curmsgs < mq->mq_maxmsg);
2674 }
2675 
2676 static int
mqf_fill_kinfo(struct file * fp,struct kinfo_file * kif,struct filedesc * fdp)2677 mqf_fill_kinfo(struct file *fp, struct kinfo_file *kif, struct filedesc *fdp)
2678 {
2679 
2680 	kif->kf_type = KF_TYPE_MQUEUE;
2681 	return (0);
2682 }
2683 
2684 static const struct fileops mqueueops = {
2685 	.fo_read		= invfo_rdwr,
2686 	.fo_write		= invfo_rdwr,
2687 	.fo_truncate		= invfo_truncate,
2688 	.fo_ioctl		= invfo_ioctl,
2689 	.fo_poll		= mqf_poll,
2690 	.fo_kqfilter		= mqf_kqfilter,
2691 	.fo_stat		= mqf_stat,
2692 	.fo_close		= mqf_close,
2693 	.fo_chmod		= mqf_chmod,
2694 	.fo_chown		= mqf_chown,
2695 	.fo_sendfile		= invfo_sendfile,
2696 	.fo_fill_kinfo		= mqf_fill_kinfo,
2697 	.fo_cmp			= file_kcmp_generic,
2698 	.fo_flags		= DFLAG_PASSABLE,
2699 };
2700 
2701 static struct vop_vector mqfs_vnodeops = {
2702 	.vop_default 		= &default_vnodeops,
2703 	.vop_access		= mqfs_access,
2704 	.vop_cachedlookup	= mqfs_lookup,
2705 	.vop_lookup		= vfs_cache_lookup,
2706 	.vop_reclaim		= mqfs_reclaim,
2707 	.vop_create		= mqfs_create,
2708 	.vop_remove		= mqfs_remove,
2709 	.vop_inactive		= mqfs_inactive,
2710 	.vop_open		= mqfs_open,
2711 	.vop_close		= mqfs_close,
2712 	.vop_getattr		= mqfs_getattr,
2713 	.vop_setattr		= mqfs_setattr,
2714 	.vop_read		= mqfs_read,
2715 	.vop_write		= VOP_EOPNOTSUPP,
2716 	.vop_readdir		= mqfs_readdir,
2717 	.vop_mkdir		= VOP_EOPNOTSUPP,
2718 	.vop_rmdir		= VOP_EOPNOTSUPP
2719 };
2720 VFS_VOP_VECTOR_REGISTER(mqfs_vnodeops);
2721 
2722 static struct vfsops mqfs_vfsops = {
2723 	.vfs_init 		= mqfs_init,
2724 	.vfs_uninit		= mqfs_uninit,
2725 	.vfs_mount		= mqfs_mount,
2726 	.vfs_unmount		= mqfs_unmount,
2727 	.vfs_root		= mqfs_root,
2728 	.vfs_statfs		= mqfs_statfs,
2729 };
2730 
2731 static struct vfsconf mqueuefs_vfsconf = {
2732 	.vfc_version = VFS_VERSION,
2733 	.vfc_name = "mqueuefs",
2734 	.vfc_vfsops = &mqfs_vfsops,
2735 	.vfc_typenum = -1,
2736 	.vfc_flags = VFCF_SYNTHETIC
2737 };
2738 
2739 static struct syscall_helper_data mq_syscalls[] = {
2740 	SYSCALL_INIT_HELPER(kmq_open),
2741 	SYSCALL_INIT_HELPER_F(kmq_setattr, SYF_CAPENABLED),
2742 	SYSCALL_INIT_HELPER_F(kmq_timedsend, SYF_CAPENABLED),
2743 	SYSCALL_INIT_HELPER_F(kmq_timedreceive, SYF_CAPENABLED),
2744 	SYSCALL_INIT_HELPER_F(kmq_notify, SYF_CAPENABLED),
2745 	SYSCALL_INIT_HELPER(kmq_unlink),
2746 	SYSCALL_INIT_LAST
2747 };
2748 
2749 #ifdef COMPAT_FREEBSD32
2750 #include <compat/freebsd32/freebsd32.h>
2751 #include <compat/freebsd32/freebsd32_proto.h>
2752 #include <compat/freebsd32/freebsd32_signal.h>
2753 #include <compat/freebsd32/freebsd32_syscall.h>
2754 #include <compat/freebsd32/freebsd32_util.h>
2755 
2756 static void
mq_attr_from32(const struct mq_attr32 * from,struct mq_attr * to)2757 mq_attr_from32(const struct mq_attr32 *from, struct mq_attr *to)
2758 {
2759 
2760 	to->mq_flags = from->mq_flags;
2761 	to->mq_maxmsg = from->mq_maxmsg;
2762 	to->mq_msgsize = from->mq_msgsize;
2763 	to->mq_curmsgs = from->mq_curmsgs;
2764 }
2765 
2766 static void
mq_attr_to32(const struct mq_attr * from,struct mq_attr32 * to)2767 mq_attr_to32(const struct mq_attr *from, struct mq_attr32 *to)
2768 {
2769 
2770 	to->mq_flags = from->mq_flags;
2771 	to->mq_maxmsg = from->mq_maxmsg;
2772 	to->mq_msgsize = from->mq_msgsize;
2773 	to->mq_curmsgs = from->mq_curmsgs;
2774 }
2775 
2776 int
freebsd32_kmq_open(struct thread * td,struct freebsd32_kmq_open_args * uap)2777 freebsd32_kmq_open(struct thread *td, struct freebsd32_kmq_open_args *uap)
2778 {
2779 	struct mq_attr attr;
2780 	struct mq_attr32 attr32;
2781 	int flags, error;
2782 
2783 	if ((uap->flags & O_ACCMODE) == O_ACCMODE || uap->flags & O_EXEC)
2784 		return (EINVAL);
2785 	flags = FFLAGS(uap->flags);
2786 	if ((flags & O_CREAT) != 0 && uap->attr != NULL) {
2787 		error = copyin(uap->attr, &attr32, sizeof(attr32));
2788 		if (error)
2789 			return (error);
2790 		mq_attr_from32(&attr32, &attr);
2791 	}
2792 	return (kern_kmq_open(td, uap->path, flags, uap->mode,
2793 	    uap->attr != NULL ? &attr : NULL));
2794 }
2795 
2796 int
freebsd32_kmq_setattr(struct thread * td,struct freebsd32_kmq_setattr_args * uap)2797 freebsd32_kmq_setattr(struct thread *td, struct freebsd32_kmq_setattr_args *uap)
2798 {
2799 	struct mq_attr attr, oattr;
2800 	struct mq_attr32 attr32, oattr32;
2801 	int error;
2802 
2803 	if (uap->attr != NULL) {
2804 		error = copyin(uap->attr, &attr32, sizeof(attr32));
2805 		if (error != 0)
2806 			return (error);
2807 		mq_attr_from32(&attr32, &attr);
2808 	}
2809 	error = kern_kmq_setattr(td, uap->mqd, uap->attr != NULL ? &attr : NULL,
2810 	    &oattr);
2811 	if (error == 0 && uap->oattr != NULL) {
2812 		mq_attr_to32(&oattr, &oattr32);
2813 		bzero(oattr32.__reserved, sizeof(oattr32.__reserved));
2814 		error = copyout(&oattr32, uap->oattr, sizeof(oattr32));
2815 	}
2816 	return (error);
2817 }
2818 
2819 int
freebsd32_kmq_timedsend(struct thread * td,struct freebsd32_kmq_timedsend_args * uap)2820 freebsd32_kmq_timedsend(struct thread *td,
2821     struct freebsd32_kmq_timedsend_args *uap)
2822 {
2823 	struct timespec32 ets32;
2824 	struct timespec *abs_timeout, ets;
2825 	int error;
2826 
2827 	if (uap->abs_timeout != NULL) {
2828 		error = copyin(uap->abs_timeout, &ets32, sizeof(ets32));
2829 		if (error != 0)
2830 			return (error);
2831 		CP(ets32, ets, tv_sec);
2832 		CP(ets32, ets, tv_nsec);
2833 		abs_timeout = &ets;
2834 	} else
2835 		abs_timeout = NULL;
2836 
2837 	return (kern_kmq_timedsend(td, uap->mqd, uap->msg_ptr, uap->msg_len,
2838 		uap->msg_prio, abs_timeout));
2839 }
2840 
2841 int
freebsd32_kmq_timedreceive(struct thread * td,struct freebsd32_kmq_timedreceive_args * uap)2842 freebsd32_kmq_timedreceive(struct thread *td,
2843     struct freebsd32_kmq_timedreceive_args *uap)
2844 {
2845 	struct timespec32 ets32;
2846 	struct timespec *abs_timeout, ets;
2847 	int error;
2848 
2849 	if (uap->abs_timeout != NULL) {
2850 		error = copyin(uap->abs_timeout, &ets32, sizeof(ets32));
2851 		if (error != 0)
2852 			return (error);
2853 		CP(ets32, ets, tv_sec);
2854 		CP(ets32, ets, tv_nsec);
2855 		abs_timeout = &ets;
2856 	} else
2857 		abs_timeout = NULL;
2858 
2859 	return (kern_kmq_timedreceive(td, uap->mqd, uap->msg_ptr, uap->msg_len,
2860 		uap->msg_prio, abs_timeout));
2861 }
2862 
2863 int
freebsd32_kmq_notify(struct thread * td,struct freebsd32_kmq_notify_args * uap)2864 freebsd32_kmq_notify(struct thread *td, struct freebsd32_kmq_notify_args *uap)
2865 {
2866 	struct sigevent ev, *evp;
2867 	struct sigevent32 ev32;
2868 	int error;
2869 
2870 	if (uap->sigev == NULL) {
2871 		evp = NULL;
2872 	} else {
2873 		error = copyin(uap->sigev, &ev32, sizeof(ev32));
2874 		if (error != 0)
2875 			return (error);
2876 		error = convert_sigevent32(&ev32, &ev);
2877 		if (error != 0)
2878 			return (error);
2879 		evp = &ev;
2880 	}
2881 	return (kern_kmq_notify(td, uap->mqd, evp));
2882 }
2883 
2884 static struct syscall_helper_data mq32_syscalls[] = {
2885 	SYSCALL32_INIT_HELPER(freebsd32_kmq_open),
2886 	SYSCALL32_INIT_HELPER_F(freebsd32_kmq_setattr, SYF_CAPENABLED),
2887 	SYSCALL32_INIT_HELPER_F(freebsd32_kmq_timedsend, SYF_CAPENABLED),
2888 	SYSCALL32_INIT_HELPER_F(freebsd32_kmq_timedreceive, SYF_CAPENABLED),
2889 	SYSCALL32_INIT_HELPER_F(freebsd32_kmq_notify, SYF_CAPENABLED),
2890 	SYSCALL32_INIT_HELPER_COMPAT(kmq_unlink),
2891 	SYSCALL_INIT_LAST
2892 };
2893 #endif
2894 
2895 static int
mqinit(void)2896 mqinit(void)
2897 {
2898 	int error;
2899 
2900 	error = syscall_helper_register(mq_syscalls, SY_THR_STATIC_KLD);
2901 	if (error != 0)
2902 		return (error);
2903 #ifdef COMPAT_FREEBSD32
2904 	error = syscall32_helper_register(mq32_syscalls, SY_THR_STATIC_KLD);
2905 	if (error != 0)
2906 		return (error);
2907 #endif
2908 	return (0);
2909 }
2910 
2911 static int
mqunload(void)2912 mqunload(void)
2913 {
2914 
2915 #ifdef COMPAT_FREEBSD32
2916 	syscall32_helper_unregister(mq32_syscalls);
2917 #endif
2918 	syscall_helper_unregister(mq_syscalls);
2919 	return (0);
2920 }
2921 
2922 static int
mq_modload(struct module * module,int cmd,void * arg)2923 mq_modload(struct module *module, int cmd, void *arg)
2924 {
2925 	int error = 0;
2926 
2927 	error = vfs_modevent(module, cmd, arg);
2928 	if (error != 0)
2929 		return (error);
2930 
2931 	switch (cmd) {
2932 	case MOD_LOAD:
2933 		error = mqinit();
2934 		if (error != 0)
2935 			mqunload();
2936 		break;
2937 	case MOD_UNLOAD:
2938 		error = mqunload();
2939 		break;
2940 	default:
2941 		break;
2942 	}
2943 	return (error);
2944 }
2945 
2946 static moduledata_t mqueuefs_mod = {
2947 	"mqueuefs",
2948 	mq_modload,
2949 	&mqueuefs_vfsconf
2950 };
2951 DECLARE_MODULE(mqueuefs, mqueuefs_mod, SI_SUB_VFS, SI_ORDER_MIDDLE);
2952 MODULE_VERSION(mqueuefs, 1);
2953