xref: /freebsd/sys/kern/uipc_mqueue.c (revision 273c26a3c3bea87a241d6879abd4f991db180bf0)
1 /*-
2  * Copyright (c) 2005 David Xu <davidxu@freebsd.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  */
27 
28 /*
29  * POSIX message queue implementation.
30  *
31  * 1) A mqueue filesystem can be mounted, each message queue appears
32  *    in mounted directory, user can change queue's permission and
33  *    ownership, or remove a queue. Manually creating a file in the
34  *    directory causes a message queue to be created in the kernel with
35  *    default message queue attributes applied and same name used, this
36  *    method is not advocated since mq_open syscall allows user to specify
37  *    different attributes. Also the file system can be mounted multiple
38  *    times at different mount points but shows same contents.
39  *
40  * 2) Standard POSIX message queue API. The syscalls do not use vfs layer,
41  *    but directly operate on internal data structure, this allows user to
42  *    use the IPC facility without having to mount mqueue file system.
43  */
44 
45 #include <sys/cdefs.h>
46 __FBSDID("$FreeBSD$");
47 
48 #include "opt_capsicum.h"
49 #include "opt_compat.h"
50 
51 #include <sys/param.h>
52 #include <sys/kernel.h>
53 #include <sys/systm.h>
54 #include <sys/limits.h>
55 #include <sys/malloc.h>
56 #include <sys/buf.h>
57 #include <sys/capsicum.h>
58 #include <sys/dirent.h>
59 #include <sys/event.h>
60 #include <sys/eventhandler.h>
61 #include <sys/fcntl.h>
62 #include <sys/file.h>
63 #include <sys/filedesc.h>
64 #include <sys/jail.h>
65 #include <sys/lock.h>
66 #include <sys/module.h>
67 #include <sys/mount.h>
68 #include <sys/mqueue.h>
69 #include <sys/mutex.h>
70 #include <sys/namei.h>
71 #include <sys/posix4.h>
72 #include <sys/poll.h>
73 #include <sys/priv.h>
74 #include <sys/proc.h>
75 #include <sys/queue.h>
76 #include <sys/sysproto.h>
77 #include <sys/stat.h>
78 #include <sys/syscall.h>
79 #include <sys/syscallsubr.h>
80 #include <sys/sysent.h>
81 #include <sys/sx.h>
82 #include <sys/sysctl.h>
83 #include <sys/taskqueue.h>
84 #include <sys/unistd.h>
85 #include <sys/user.h>
86 #include <sys/vnode.h>
87 #include <machine/atomic.h>
88 
89 FEATURE(p1003_1b_mqueue, "POSIX P1003.1B message queues support");
90 
91 /*
92  * Limits and constants
93  */
94 #define	MQFS_NAMELEN		NAME_MAX
95 #define MQFS_DELEN		(8 + MQFS_NAMELEN)
96 
97 /* node types */
98 typedef enum {
99 	mqfstype_none = 0,
100 	mqfstype_root,
101 	mqfstype_dir,
102 	mqfstype_this,
103 	mqfstype_parent,
104 	mqfstype_file,
105 	mqfstype_symlink,
106 } mqfs_type_t;
107 
108 struct mqfs_node;
109 
110 /*
111  * mqfs_info: describes a mqfs instance
112  */
113 struct mqfs_info {
114 	struct sx		mi_lock;
115 	struct mqfs_node	*mi_root;
116 	struct unrhdr		*mi_unrhdr;
117 };
118 
119 struct mqfs_vdata {
120 	LIST_ENTRY(mqfs_vdata)	mv_link;
121 	struct mqfs_node	*mv_node;
122 	struct vnode		*mv_vnode;
123 	struct task		mv_task;
124 };
125 
126 /*
127  * mqfs_node: describes a node (file or directory) within a mqfs
128  */
129 struct mqfs_node {
130 	char			mn_name[MQFS_NAMELEN+1];
131 	struct mqfs_info	*mn_info;
132 	struct mqfs_node	*mn_parent;
133 	LIST_HEAD(,mqfs_node)	mn_children;
134 	LIST_ENTRY(mqfs_node)	mn_sibling;
135 	LIST_HEAD(,mqfs_vdata)	mn_vnodes;
136 	const void		*mn_pr_root;
137 	int			mn_refcount;
138 	mqfs_type_t		mn_type;
139 	int			mn_deleted;
140 	uint32_t		mn_fileno;
141 	void			*mn_data;
142 	struct timespec		mn_birth;
143 	struct timespec		mn_ctime;
144 	struct timespec		mn_atime;
145 	struct timespec		mn_mtime;
146 	uid_t			mn_uid;
147 	gid_t			mn_gid;
148 	int			mn_mode;
149 };
150 
151 #define	VTON(vp)	(((struct mqfs_vdata *)((vp)->v_data))->mv_node)
152 #define VTOMQ(vp) 	((struct mqueue *)(VTON(vp)->mn_data))
153 #define	VFSTOMQFS(m)	((struct mqfs_info *)((m)->mnt_data))
154 #define	FPTOMQ(fp)	((struct mqueue *)(((struct mqfs_node *) \
155 				(fp)->f_data)->mn_data))
156 
157 TAILQ_HEAD(msgq, mqueue_msg);
158 
159 struct mqueue;
160 
161 struct mqueue_notifier {
162 	LIST_ENTRY(mqueue_notifier)	nt_link;
163 	struct sigevent			nt_sigev;
164 	ksiginfo_t			nt_ksi;
165 	struct proc			*nt_proc;
166 };
167 
168 struct mqueue {
169 	struct mtx	mq_mutex;
170 	int		mq_flags;
171 	long		mq_maxmsg;
172 	long		mq_msgsize;
173 	long		mq_curmsgs;
174 	long		mq_totalbytes;
175 	struct msgq	mq_msgq;
176 	int		mq_receivers;
177 	int		mq_senders;
178 	struct selinfo	mq_rsel;
179 	struct selinfo	mq_wsel;
180 	struct mqueue_notifier	*mq_notifier;
181 };
182 
183 #define	MQ_RSEL		0x01
184 #define	MQ_WSEL		0x02
185 
186 struct mqueue_msg {
187 	TAILQ_ENTRY(mqueue_msg)	msg_link;
188 	unsigned int	msg_prio;
189 	unsigned int	msg_size;
190 	/* following real data... */
191 };
192 
193 static SYSCTL_NODE(_kern, OID_AUTO, mqueue, CTLFLAG_RW, 0,
194 	"POSIX real time message queue");
195 
196 static int	default_maxmsg  = 10;
197 static int	default_msgsize = 1024;
198 
199 static int	maxmsg = 100;
200 SYSCTL_INT(_kern_mqueue, OID_AUTO, maxmsg, CTLFLAG_RW,
201     &maxmsg, 0, "Default maximum messages in queue");
202 static int	maxmsgsize = 16384;
203 SYSCTL_INT(_kern_mqueue, OID_AUTO, maxmsgsize, CTLFLAG_RW,
204     &maxmsgsize, 0, "Default maximum message size");
205 static int	maxmq = 100;
206 SYSCTL_INT(_kern_mqueue, OID_AUTO, maxmq, CTLFLAG_RW,
207     &maxmq, 0, "maximum message queues");
208 static int	curmq = 0;
209 SYSCTL_INT(_kern_mqueue, OID_AUTO, curmq, CTLFLAG_RW,
210     &curmq, 0, "current message queue number");
211 static int	unloadable = 0;
212 static MALLOC_DEFINE(M_MQUEUEDATA, "mqdata", "mqueue data");
213 
214 static eventhandler_tag exit_tag;
215 
216 /* Only one instance per-system */
217 static struct mqfs_info		mqfs_data;
218 static uma_zone_t		mqnode_zone;
219 static uma_zone_t		mqueue_zone;
220 static uma_zone_t		mvdata_zone;
221 static uma_zone_t		mqnoti_zone;
222 static struct vop_vector	mqfs_vnodeops;
223 static struct fileops		mqueueops;
224 static unsigned			mqfs_osd_jail_slot;
225 
226 /*
227  * Directory structure construction and manipulation
228  */
229 #ifdef notyet
230 static struct mqfs_node	*mqfs_create_dir(struct mqfs_node *parent,
231 	const char *name, int namelen, struct ucred *cred, int mode);
232 static struct mqfs_node	*mqfs_create_link(struct mqfs_node *parent,
233 	const char *name, int namelen, struct ucred *cred, int mode);
234 #endif
235 
236 static struct mqfs_node	*mqfs_create_file(struct mqfs_node *parent,
237 	const char *name, int namelen, struct ucred *cred, int mode);
238 static int	mqfs_destroy(struct mqfs_node *mn);
239 static void	mqfs_fileno_alloc(struct mqfs_info *mi, struct mqfs_node *mn);
240 static void	mqfs_fileno_free(struct mqfs_info *mi, struct mqfs_node *mn);
241 static int	mqfs_allocv(struct mount *mp, struct vnode **vpp, struct mqfs_node *pn);
242 static int	mqfs_prison_remove(void *obj, void *data);
243 
244 /*
245  * Message queue construction and maniplation
246  */
247 static struct mqueue	*mqueue_alloc(const struct mq_attr *attr);
248 static void	mqueue_free(struct mqueue *mq);
249 static int	mqueue_send(struct mqueue *mq, const char *msg_ptr,
250 			size_t msg_len, unsigned msg_prio, int waitok,
251 			const struct timespec *abs_timeout);
252 static int	mqueue_receive(struct mqueue *mq, char *msg_ptr,
253 			size_t msg_len, unsigned *msg_prio, int waitok,
254 			const struct timespec *abs_timeout);
255 static int	_mqueue_send(struct mqueue *mq, struct mqueue_msg *msg,
256 			int timo);
257 static int	_mqueue_recv(struct mqueue *mq, struct mqueue_msg **msg,
258 			int timo);
259 static void	mqueue_send_notification(struct mqueue *mq);
260 static void	mqueue_fdclose(struct thread *td, int fd, struct file *fp);
261 static void	mq_proc_exit(void *arg, struct proc *p);
262 
263 /*
264  * kqueue filters
265  */
266 static void	filt_mqdetach(struct knote *kn);
267 static int	filt_mqread(struct knote *kn, long hint);
268 static int	filt_mqwrite(struct knote *kn, long hint);
269 
270 struct filterops mq_rfiltops = {
271 	.f_isfd = 1,
272 	.f_detach = filt_mqdetach,
273 	.f_event = filt_mqread,
274 };
275 struct filterops mq_wfiltops = {
276 	.f_isfd = 1,
277 	.f_detach = filt_mqdetach,
278 	.f_event = filt_mqwrite,
279 };
280 
281 /*
282  * Initialize fileno bitmap
283  */
284 static void
285 mqfs_fileno_init(struct mqfs_info *mi)
286 {
287 	struct unrhdr *up;
288 
289 	up = new_unrhdr(1, INT_MAX, NULL);
290 	mi->mi_unrhdr = up;
291 }
292 
293 /*
294  * Tear down fileno bitmap
295  */
296 static void
297 mqfs_fileno_uninit(struct mqfs_info *mi)
298 {
299 	struct unrhdr *up;
300 
301 	up = mi->mi_unrhdr;
302 	mi->mi_unrhdr = NULL;
303 	delete_unrhdr(up);
304 }
305 
306 /*
307  * Allocate a file number
308  */
309 static void
310 mqfs_fileno_alloc(struct mqfs_info *mi, struct mqfs_node *mn)
311 {
312 	/* make sure our parent has a file number */
313 	if (mn->mn_parent && !mn->mn_parent->mn_fileno)
314 		mqfs_fileno_alloc(mi, mn->mn_parent);
315 
316 	switch (mn->mn_type) {
317 	case mqfstype_root:
318 	case mqfstype_dir:
319 	case mqfstype_file:
320 	case mqfstype_symlink:
321 		mn->mn_fileno = alloc_unr(mi->mi_unrhdr);
322 		break;
323 	case mqfstype_this:
324 		KASSERT(mn->mn_parent != NULL,
325 		    ("mqfstype_this node has no parent"));
326 		mn->mn_fileno = mn->mn_parent->mn_fileno;
327 		break;
328 	case mqfstype_parent:
329 		KASSERT(mn->mn_parent != NULL,
330 		    ("mqfstype_parent node has no parent"));
331 		if (mn->mn_parent == mi->mi_root) {
332 			mn->mn_fileno = mn->mn_parent->mn_fileno;
333 			break;
334 		}
335 		KASSERT(mn->mn_parent->mn_parent != NULL,
336 		    ("mqfstype_parent node has no grandparent"));
337 		mn->mn_fileno = mn->mn_parent->mn_parent->mn_fileno;
338 		break;
339 	default:
340 		KASSERT(0,
341 		    ("mqfs_fileno_alloc() called for unknown type node: %d",
342 			mn->mn_type));
343 		break;
344 	}
345 }
346 
347 /*
348  * Release a file number
349  */
350 static void
351 mqfs_fileno_free(struct mqfs_info *mi, struct mqfs_node *mn)
352 {
353 	switch (mn->mn_type) {
354 	case mqfstype_root:
355 	case mqfstype_dir:
356 	case mqfstype_file:
357 	case mqfstype_symlink:
358 		free_unr(mi->mi_unrhdr, mn->mn_fileno);
359 		break;
360 	case mqfstype_this:
361 	case mqfstype_parent:
362 		/* ignore these, as they don't "own" their file number */
363 		break;
364 	default:
365 		KASSERT(0,
366 		    ("mqfs_fileno_free() called for unknown type node: %d",
367 			mn->mn_type));
368 		break;
369 	}
370 }
371 
372 static __inline struct mqfs_node *
373 mqnode_alloc(void)
374 {
375 	return uma_zalloc(mqnode_zone, M_WAITOK | M_ZERO);
376 }
377 
378 static __inline void
379 mqnode_free(struct mqfs_node *node)
380 {
381 	uma_zfree(mqnode_zone, node);
382 }
383 
384 static __inline void
385 mqnode_addref(struct mqfs_node *node)
386 {
387 	atomic_fetchadd_int(&node->mn_refcount, 1);
388 }
389 
390 static __inline void
391 mqnode_release(struct mqfs_node *node)
392 {
393 	struct mqfs_info *mqfs;
394 	int old, exp;
395 
396 	mqfs = node->mn_info;
397 	old = atomic_fetchadd_int(&node->mn_refcount, -1);
398 	if (node->mn_type == mqfstype_dir ||
399 	    node->mn_type == mqfstype_root)
400 		exp = 3; /* include . and .. */
401 	else
402 		exp = 1;
403 	if (old == exp) {
404 		int locked = sx_xlocked(&mqfs->mi_lock);
405 		if (!locked)
406 			sx_xlock(&mqfs->mi_lock);
407 		mqfs_destroy(node);
408 		if (!locked)
409 			sx_xunlock(&mqfs->mi_lock);
410 	}
411 }
412 
413 /*
414  * Add a node to a directory
415  */
416 static int
417 mqfs_add_node(struct mqfs_node *parent, struct mqfs_node *node)
418 {
419 	KASSERT(parent != NULL, ("%s(): parent is NULL", __func__));
420 	KASSERT(parent->mn_info != NULL,
421 	    ("%s(): parent has no mn_info", __func__));
422 	KASSERT(parent->mn_type == mqfstype_dir ||
423 	    parent->mn_type == mqfstype_root,
424 	    ("%s(): parent is not a directory", __func__));
425 
426 	node->mn_info = parent->mn_info;
427 	node->mn_parent = parent;
428 	LIST_INIT(&node->mn_children);
429 	LIST_INIT(&node->mn_vnodes);
430 	LIST_INSERT_HEAD(&parent->mn_children, node, mn_sibling);
431 	mqnode_addref(parent);
432 	return (0);
433 }
434 
435 static struct mqfs_node *
436 mqfs_create_node(const char *name, int namelen, struct ucred *cred, int mode,
437 	int nodetype)
438 {
439 	struct mqfs_node *node;
440 
441 	node = mqnode_alloc();
442 	strncpy(node->mn_name, name, namelen);
443 	node->mn_pr_root = cred->cr_prison->pr_root;
444 	node->mn_type = nodetype;
445 	node->mn_refcount = 1;
446 	vfs_timestamp(&node->mn_birth);
447 	node->mn_ctime = node->mn_atime = node->mn_mtime
448 		= node->mn_birth;
449 	node->mn_uid = cred->cr_uid;
450 	node->mn_gid = cred->cr_gid;
451 	node->mn_mode = mode;
452 	return (node);
453 }
454 
455 /*
456  * Create a file
457  */
458 static struct mqfs_node *
459 mqfs_create_file(struct mqfs_node *parent, const char *name, int namelen,
460 	struct ucred *cred, int mode)
461 {
462 	struct mqfs_node *node;
463 
464 	node = mqfs_create_node(name, namelen, cred, mode, mqfstype_file);
465 	if (mqfs_add_node(parent, node) != 0) {
466 		mqnode_free(node);
467 		return (NULL);
468 	}
469 	return (node);
470 }
471 
472 /*
473  * Add . and .. to a directory
474  */
475 static int
476 mqfs_fixup_dir(struct mqfs_node *parent)
477 {
478 	struct mqfs_node *dir;
479 
480 	dir = mqnode_alloc();
481 	dir->mn_name[0] = '.';
482 	dir->mn_type = mqfstype_this;
483 	dir->mn_refcount = 1;
484 	if (mqfs_add_node(parent, dir) != 0) {
485 		mqnode_free(dir);
486 		return (-1);
487 	}
488 
489 	dir = mqnode_alloc();
490 	dir->mn_name[0] = dir->mn_name[1] = '.';
491 	dir->mn_type = mqfstype_parent;
492 	dir->mn_refcount = 1;
493 
494 	if (mqfs_add_node(parent, dir) != 0) {
495 		mqnode_free(dir);
496 		return (-1);
497 	}
498 
499 	return (0);
500 }
501 
502 #ifdef notyet
503 
504 /*
505  * Create a directory
506  */
507 static struct mqfs_node *
508 mqfs_create_dir(struct mqfs_node *parent, const char *name, int namelen,
509 	struct ucred *cred, int mode)
510 {
511 	struct mqfs_node *node;
512 
513 	node = mqfs_create_node(name, namelen, cred, mode, mqfstype_dir);
514 	if (mqfs_add_node(parent, node) != 0) {
515 		mqnode_free(node);
516 		return (NULL);
517 	}
518 
519 	if (mqfs_fixup_dir(node) != 0) {
520 		mqfs_destroy(node);
521 		return (NULL);
522 	}
523 	return (node);
524 }
525 
526 /*
527  * Create a symlink
528  */
529 static struct mqfs_node *
530 mqfs_create_link(struct mqfs_node *parent, const char *name, int namelen,
531 	struct ucred *cred, int mode)
532 {
533 	struct mqfs_node *node;
534 
535 	node = mqfs_create_node(name, namelen, cred, mode, mqfstype_symlink);
536 	if (mqfs_add_node(parent, node) != 0) {
537 		mqnode_free(node);
538 		return (NULL);
539 	}
540 	return (node);
541 }
542 
543 #endif
544 
545 /*
546  * Destroy a node or a tree of nodes
547  */
548 static int
549 mqfs_destroy(struct mqfs_node *node)
550 {
551 	struct mqfs_node *parent;
552 
553 	KASSERT(node != NULL,
554 	    ("%s(): node is NULL", __func__));
555 	KASSERT(node->mn_info != NULL,
556 	    ("%s(): node has no mn_info", __func__));
557 
558 	/* destroy children */
559 	if (node->mn_type == mqfstype_dir || node->mn_type == mqfstype_root)
560 		while (! LIST_EMPTY(&node->mn_children))
561 			mqfs_destroy(LIST_FIRST(&node->mn_children));
562 
563 	/* unlink from parent */
564 	if ((parent = node->mn_parent) != NULL) {
565 		KASSERT(parent->mn_info == node->mn_info,
566 		    ("%s(): parent has different mn_info", __func__));
567 		LIST_REMOVE(node, mn_sibling);
568 	}
569 
570 	if (node->mn_fileno != 0)
571 		mqfs_fileno_free(node->mn_info, node);
572 	if (node->mn_data != NULL)
573 		mqueue_free(node->mn_data);
574 	mqnode_free(node);
575 	return (0);
576 }
577 
578 /*
579  * Mount a mqfs instance
580  */
581 static int
582 mqfs_mount(struct mount *mp)
583 {
584 	struct statfs *sbp;
585 
586 	if (mp->mnt_flag & MNT_UPDATE)
587 		return (EOPNOTSUPP);
588 
589 	mp->mnt_data = &mqfs_data;
590 	MNT_ILOCK(mp);
591 	mp->mnt_flag |= MNT_LOCAL;
592 	MNT_IUNLOCK(mp);
593 	vfs_getnewfsid(mp);
594 
595 	sbp = &mp->mnt_stat;
596 	vfs_mountedfrom(mp, "mqueue");
597 	sbp->f_bsize = PAGE_SIZE;
598 	sbp->f_iosize = PAGE_SIZE;
599 	sbp->f_blocks = 1;
600 	sbp->f_bfree = 0;
601 	sbp->f_bavail = 0;
602 	sbp->f_files = 1;
603 	sbp->f_ffree = 0;
604 	return (0);
605 }
606 
607 /*
608  * Unmount a mqfs instance
609  */
610 static int
611 mqfs_unmount(struct mount *mp, int mntflags)
612 {
613 	int error;
614 
615 	error = vflush(mp, 0, (mntflags & MNT_FORCE) ?  FORCECLOSE : 0,
616 	    curthread);
617 	return (error);
618 }
619 
620 /*
621  * Return a root vnode
622  */
623 static int
624 mqfs_root(struct mount *mp, int flags, struct vnode **vpp)
625 {
626 	struct mqfs_info *mqfs;
627 	int ret;
628 
629 	mqfs = VFSTOMQFS(mp);
630 	ret = mqfs_allocv(mp, vpp, mqfs->mi_root);
631 	return (ret);
632 }
633 
634 /*
635  * Return filesystem stats
636  */
637 static int
638 mqfs_statfs(struct mount *mp, struct statfs *sbp)
639 {
640 	/* XXX update statistics */
641 	return (0);
642 }
643 
644 /*
645  * Initialize a mqfs instance
646  */
647 static int
648 mqfs_init(struct vfsconf *vfc)
649 {
650 	struct mqfs_node *root;
651 	struct mqfs_info *mi;
652 	osd_method_t methods[PR_MAXMETHOD] = {
653 	    [PR_METHOD_REMOVE] = mqfs_prison_remove,
654 	};
655 
656 	mqnode_zone = uma_zcreate("mqnode", sizeof(struct mqfs_node),
657 		NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
658 	mqueue_zone = uma_zcreate("mqueue", sizeof(struct mqueue),
659 		NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
660 	mvdata_zone = uma_zcreate("mvdata",
661 		sizeof(struct mqfs_vdata), NULL, NULL, NULL,
662 		NULL, UMA_ALIGN_PTR, 0);
663 	mqnoti_zone = uma_zcreate("mqnotifier", sizeof(struct mqueue_notifier),
664 		NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
665 	mi = &mqfs_data;
666 	sx_init(&mi->mi_lock, "mqfs lock");
667 	/* set up the root diretory */
668 	root = mqfs_create_node("/", 1, curthread->td_ucred, 01777,
669 		mqfstype_root);
670 	root->mn_info = mi;
671 	LIST_INIT(&root->mn_children);
672 	LIST_INIT(&root->mn_vnodes);
673 	mi->mi_root = root;
674 	mqfs_fileno_init(mi);
675 	mqfs_fileno_alloc(mi, root);
676 	mqfs_fixup_dir(root);
677 	exit_tag = EVENTHANDLER_REGISTER(process_exit, mq_proc_exit, NULL,
678 	    EVENTHANDLER_PRI_ANY);
679 	mq_fdclose = mqueue_fdclose;
680 	p31b_setcfg(CTL_P1003_1B_MESSAGE_PASSING, _POSIX_MESSAGE_PASSING);
681 	mqfs_osd_jail_slot = osd_jail_register(NULL, methods);
682 	return (0);
683 }
684 
685 /*
686  * Destroy a mqfs instance
687  */
688 static int
689 mqfs_uninit(struct vfsconf *vfc)
690 {
691 	struct mqfs_info *mi;
692 
693 	if (!unloadable)
694 		return (EOPNOTSUPP);
695 	osd_jail_deregister(mqfs_osd_jail_slot);
696 	EVENTHANDLER_DEREGISTER(process_exit, exit_tag);
697 	mi = &mqfs_data;
698 	mqfs_destroy(mi->mi_root);
699 	mi->mi_root = NULL;
700 	mqfs_fileno_uninit(mi);
701 	sx_destroy(&mi->mi_lock);
702 	uma_zdestroy(mqnode_zone);
703 	uma_zdestroy(mqueue_zone);
704 	uma_zdestroy(mvdata_zone);
705 	uma_zdestroy(mqnoti_zone);
706 	return (0);
707 }
708 
709 /*
710  * task routine
711  */
712 static void
713 do_recycle(void *context, int pending __unused)
714 {
715 	struct vnode *vp = (struct vnode *)context;
716 
717 	vrecycle(vp);
718 	vdrop(vp);
719 }
720 
721 /*
722  * Allocate a vnode
723  */
724 static int
725 mqfs_allocv(struct mount *mp, struct vnode **vpp, struct mqfs_node *pn)
726 {
727 	struct mqfs_vdata *vd;
728 	struct mqfs_info  *mqfs;
729 	struct vnode *newvpp;
730 	int error;
731 
732 	mqfs = pn->mn_info;
733 	*vpp = NULL;
734 	sx_xlock(&mqfs->mi_lock);
735 	LIST_FOREACH(vd, &pn->mn_vnodes, mv_link) {
736 		if (vd->mv_vnode->v_mount == mp) {
737 			vhold(vd->mv_vnode);
738 			break;
739 		}
740 	}
741 
742 	if (vd != NULL) {
743 found:
744 		*vpp = vd->mv_vnode;
745 		sx_xunlock(&mqfs->mi_lock);
746 		error = vget(*vpp, LK_RETRY | LK_EXCLUSIVE, curthread);
747 		vdrop(*vpp);
748 		return (error);
749 	}
750 	sx_xunlock(&mqfs->mi_lock);
751 
752 	error = getnewvnode("mqueue", mp, &mqfs_vnodeops, &newvpp);
753 	if (error)
754 		return (error);
755 	vn_lock(newvpp, LK_EXCLUSIVE | LK_RETRY);
756 	error = insmntque(newvpp, mp);
757 	if (error != 0)
758 		return (error);
759 
760 	sx_xlock(&mqfs->mi_lock);
761 	/*
762 	 * Check if it has already been allocated
763 	 * while we were blocked.
764 	 */
765 	LIST_FOREACH(vd, &pn->mn_vnodes, mv_link) {
766 		if (vd->mv_vnode->v_mount == mp) {
767 			vhold(vd->mv_vnode);
768 			sx_xunlock(&mqfs->mi_lock);
769 
770 			vgone(newvpp);
771 			vput(newvpp);
772 			goto found;
773 		}
774 	}
775 
776 	*vpp = newvpp;
777 
778 	vd = uma_zalloc(mvdata_zone, M_WAITOK);
779 	(*vpp)->v_data = vd;
780 	vd->mv_vnode = *vpp;
781 	vd->mv_node = pn;
782 	TASK_INIT(&vd->mv_task, 0, do_recycle, *vpp);
783 	LIST_INSERT_HEAD(&pn->mn_vnodes, vd, mv_link);
784 	mqnode_addref(pn);
785 	switch (pn->mn_type) {
786 	case mqfstype_root:
787 		(*vpp)->v_vflag = VV_ROOT;
788 		/* fall through */
789 	case mqfstype_dir:
790 	case mqfstype_this:
791 	case mqfstype_parent:
792 		(*vpp)->v_type = VDIR;
793 		break;
794 	case mqfstype_file:
795 		(*vpp)->v_type = VREG;
796 		break;
797 	case mqfstype_symlink:
798 		(*vpp)->v_type = VLNK;
799 		break;
800 	case mqfstype_none:
801 		KASSERT(0, ("mqfs_allocf called for null node\n"));
802 	default:
803 		panic("%s has unexpected type: %d", pn->mn_name, pn->mn_type);
804 	}
805 	sx_xunlock(&mqfs->mi_lock);
806 	return (0);
807 }
808 
809 /*
810  * Search a directory entry
811  */
812 static struct mqfs_node *
813 mqfs_search(struct mqfs_node *pd, const char *name, int len, struct ucred *cred)
814 {
815 	struct mqfs_node *pn;
816 	const void *pr_root;
817 
818 	sx_assert(&pd->mn_info->mi_lock, SX_LOCKED);
819 	pr_root = cred->cr_prison->pr_root;
820 	LIST_FOREACH(pn, &pd->mn_children, mn_sibling) {
821 		/* Only match names within the same prison root directory */
822 		if ((pn->mn_pr_root == NULL || pn->mn_pr_root == pr_root) &&
823 		    strncmp(pn->mn_name, name, len) == 0 &&
824 		    pn->mn_name[len] == '\0')
825 			return (pn);
826 	}
827 	return (NULL);
828 }
829 
830 /*
831  * Look up a file or directory.
832  */
833 static int
834 mqfs_lookupx(struct vop_cachedlookup_args *ap)
835 {
836 	struct componentname *cnp;
837 	struct vnode *dvp, **vpp;
838 	struct mqfs_node *pd;
839 	struct mqfs_node *pn;
840 	struct mqfs_info *mqfs;
841 	int nameiop, flags, error, namelen;
842 	char *pname;
843 	struct thread *td;
844 
845 	cnp = ap->a_cnp;
846 	vpp = ap->a_vpp;
847 	dvp = ap->a_dvp;
848 	pname = cnp->cn_nameptr;
849 	namelen = cnp->cn_namelen;
850 	td = cnp->cn_thread;
851 	flags = cnp->cn_flags;
852 	nameiop = cnp->cn_nameiop;
853 	pd = VTON(dvp);
854 	pn = NULL;
855 	mqfs = pd->mn_info;
856 	*vpp = NULLVP;
857 
858 	if (dvp->v_type != VDIR)
859 		return (ENOTDIR);
860 
861 	error = VOP_ACCESS(dvp, VEXEC, cnp->cn_cred, cnp->cn_thread);
862 	if (error)
863 		return (error);
864 
865 	/* shortcut: check if the name is too long */
866 	if (cnp->cn_namelen >= MQFS_NAMELEN)
867 		return (ENOENT);
868 
869 	/* self */
870 	if (namelen == 1 && pname[0] == '.') {
871 		if ((flags & ISLASTCN) && nameiop != LOOKUP)
872 			return (EINVAL);
873 		pn = pd;
874 		*vpp = dvp;
875 		VREF(dvp);
876 		return (0);
877 	}
878 
879 	/* parent */
880 	if (cnp->cn_flags & ISDOTDOT) {
881 		if (dvp->v_vflag & VV_ROOT)
882 			return (EIO);
883 		if ((flags & ISLASTCN) && nameiop != LOOKUP)
884 			return (EINVAL);
885 		VOP_UNLOCK(dvp, 0);
886 		KASSERT(pd->mn_parent, ("non-root directory has no parent"));
887 		pn = pd->mn_parent;
888 		error = mqfs_allocv(dvp->v_mount, vpp, pn);
889 		vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
890 		return (error);
891 	}
892 
893 	/* named node */
894 	sx_xlock(&mqfs->mi_lock);
895 	pn = mqfs_search(pd, pname, namelen, cnp->cn_cred);
896 	if (pn != NULL)
897 		mqnode_addref(pn);
898 	sx_xunlock(&mqfs->mi_lock);
899 
900 	/* found */
901 	if (pn != NULL) {
902 		/* DELETE */
903 		if (nameiop == DELETE && (flags & ISLASTCN)) {
904 			error = VOP_ACCESS(dvp, VWRITE, cnp->cn_cred, td);
905 			if (error) {
906 				mqnode_release(pn);
907 				return (error);
908 			}
909 			if (*vpp == dvp) {
910 				VREF(dvp);
911 				*vpp = dvp;
912 				mqnode_release(pn);
913 				return (0);
914 			}
915 		}
916 
917 		/* allocate vnode */
918 		error = mqfs_allocv(dvp->v_mount, vpp, pn);
919 		mqnode_release(pn);
920 		if (error == 0 && cnp->cn_flags & MAKEENTRY)
921 			cache_enter(dvp, *vpp, cnp);
922 		return (error);
923 	}
924 
925 	/* not found */
926 
927 	/* will create a new entry in the directory ? */
928 	if ((nameiop == CREATE || nameiop == RENAME) && (flags & LOCKPARENT)
929 	    && (flags & ISLASTCN)) {
930 		error = VOP_ACCESS(dvp, VWRITE, cnp->cn_cred, td);
931 		if (error)
932 			return (error);
933 		cnp->cn_flags |= SAVENAME;
934 		return (EJUSTRETURN);
935 	}
936 	return (ENOENT);
937 }
938 
939 #if 0
940 struct vop_lookup_args {
941 	struct vop_generic_args a_gen;
942 	struct vnode *a_dvp;
943 	struct vnode **a_vpp;
944 	struct componentname *a_cnp;
945 };
946 #endif
947 
948 /*
949  * vnode lookup operation
950  */
951 static int
952 mqfs_lookup(struct vop_cachedlookup_args *ap)
953 {
954 	int rc;
955 
956 	rc = mqfs_lookupx(ap);
957 	return (rc);
958 }
959 
960 #if 0
961 struct vop_create_args {
962 	struct vnode *a_dvp;
963 	struct vnode **a_vpp;
964 	struct componentname *a_cnp;
965 	struct vattr *a_vap;
966 };
967 #endif
968 
969 /*
970  * vnode creation operation
971  */
972 static int
973 mqfs_create(struct vop_create_args *ap)
974 {
975 	struct mqfs_info *mqfs = VFSTOMQFS(ap->a_dvp->v_mount);
976 	struct componentname *cnp = ap->a_cnp;
977 	struct mqfs_node *pd;
978 	struct mqfs_node *pn;
979 	struct mqueue *mq;
980 	int error;
981 
982 	pd = VTON(ap->a_dvp);
983 	if (pd->mn_type != mqfstype_root && pd->mn_type != mqfstype_dir)
984 		return (ENOTDIR);
985 	mq = mqueue_alloc(NULL);
986 	if (mq == NULL)
987 		return (EAGAIN);
988 	sx_xlock(&mqfs->mi_lock);
989 	if ((cnp->cn_flags & HASBUF) == 0)
990 		panic("%s: no name", __func__);
991 	pn = mqfs_create_file(pd, cnp->cn_nameptr, cnp->cn_namelen,
992 		cnp->cn_cred, ap->a_vap->va_mode);
993 	if (pn == NULL) {
994 		sx_xunlock(&mqfs->mi_lock);
995 		error = ENOSPC;
996 	} else {
997 		mqnode_addref(pn);
998 		sx_xunlock(&mqfs->mi_lock);
999 		error = mqfs_allocv(ap->a_dvp->v_mount, ap->a_vpp, pn);
1000 		mqnode_release(pn);
1001 		if (error)
1002 			mqfs_destroy(pn);
1003 		else
1004 			pn->mn_data = mq;
1005 	}
1006 	if (error)
1007 		mqueue_free(mq);
1008 	return (error);
1009 }
1010 
1011 /*
1012  * Remove an entry
1013  */
1014 static
1015 int do_unlink(struct mqfs_node *pn, struct ucred *ucred)
1016 {
1017 	struct mqfs_node *parent;
1018 	struct mqfs_vdata *vd;
1019 	int error = 0;
1020 
1021 	sx_assert(&pn->mn_info->mi_lock, SX_LOCKED);
1022 
1023 	if (ucred->cr_uid != pn->mn_uid &&
1024 	    (error = priv_check_cred(ucred, PRIV_MQ_ADMIN, 0)) != 0)
1025 		error = EACCES;
1026 	else if (!pn->mn_deleted) {
1027 		parent = pn->mn_parent;
1028 		pn->mn_parent = NULL;
1029 		pn->mn_deleted = 1;
1030 		LIST_REMOVE(pn, mn_sibling);
1031 		LIST_FOREACH(vd, &pn->mn_vnodes, mv_link) {
1032 			cache_purge(vd->mv_vnode);
1033 			vhold(vd->mv_vnode);
1034 			taskqueue_enqueue(taskqueue_thread, &vd->mv_task);
1035 		}
1036 		mqnode_release(pn);
1037 		mqnode_release(parent);
1038 	} else
1039 		error = ENOENT;
1040 	return (error);
1041 }
1042 
1043 #if 0
1044 struct vop_remove_args {
1045 	struct vnode *a_dvp;
1046 	struct vnode *a_vp;
1047 	struct componentname *a_cnp;
1048 };
1049 #endif
1050 
1051 /*
1052  * vnode removal operation
1053  */
1054 static int
1055 mqfs_remove(struct vop_remove_args *ap)
1056 {
1057 	struct mqfs_info *mqfs = VFSTOMQFS(ap->a_dvp->v_mount);
1058 	struct mqfs_node *pn;
1059 	int error;
1060 
1061 	if (ap->a_vp->v_type == VDIR)
1062                 return (EPERM);
1063 	pn = VTON(ap->a_vp);
1064 	sx_xlock(&mqfs->mi_lock);
1065 	error = do_unlink(pn, ap->a_cnp->cn_cred);
1066 	sx_xunlock(&mqfs->mi_lock);
1067 	return (error);
1068 }
1069 
1070 #if 0
1071 struct vop_inactive_args {
1072 	struct vnode *a_vp;
1073 	struct thread *a_td;
1074 };
1075 #endif
1076 
1077 static int
1078 mqfs_inactive(struct vop_inactive_args *ap)
1079 {
1080 	struct mqfs_node *pn = VTON(ap->a_vp);
1081 
1082 	if (pn->mn_deleted)
1083 		vrecycle(ap->a_vp);
1084 	return (0);
1085 }
1086 
1087 #if 0
1088 struct vop_reclaim_args {
1089 	struct vop_generic_args a_gen;
1090 	struct vnode *a_vp;
1091 	struct thread *a_td;
1092 };
1093 #endif
1094 
1095 static int
1096 mqfs_reclaim(struct vop_reclaim_args *ap)
1097 {
1098 	struct mqfs_info *mqfs = VFSTOMQFS(ap->a_vp->v_mount);
1099 	struct vnode *vp = ap->a_vp;
1100 	struct mqfs_node *pn;
1101 	struct mqfs_vdata *vd;
1102 
1103 	vd = vp->v_data;
1104 	pn = vd->mv_node;
1105 	sx_xlock(&mqfs->mi_lock);
1106 	vp->v_data = NULL;
1107 	LIST_REMOVE(vd, mv_link);
1108 	uma_zfree(mvdata_zone, vd);
1109 	mqnode_release(pn);
1110 	sx_xunlock(&mqfs->mi_lock);
1111 	return (0);
1112 }
1113 
1114 #if 0
1115 struct vop_open_args {
1116 	struct vop_generic_args a_gen;
1117 	struct vnode *a_vp;
1118 	int a_mode;
1119 	struct ucred *a_cred;
1120 	struct thread *a_td;
1121 	struct file *a_fp;
1122 };
1123 #endif
1124 
1125 static int
1126 mqfs_open(struct vop_open_args *ap)
1127 {
1128 	return (0);
1129 }
1130 
1131 #if 0
1132 struct vop_close_args {
1133 	struct vop_generic_args a_gen;
1134 	struct vnode *a_vp;
1135 	int a_fflag;
1136 	struct ucred *a_cred;
1137 	struct thread *a_td;
1138 };
1139 #endif
1140 
1141 static int
1142 mqfs_close(struct vop_close_args *ap)
1143 {
1144 	return (0);
1145 }
1146 
1147 #if 0
1148 struct vop_access_args {
1149 	struct vop_generic_args a_gen;
1150 	struct vnode *a_vp;
1151 	accmode_t a_accmode;
1152 	struct ucred *a_cred;
1153 	struct thread *a_td;
1154 };
1155 #endif
1156 
1157 /*
1158  * Verify permissions
1159  */
1160 static int
1161 mqfs_access(struct vop_access_args *ap)
1162 {
1163 	struct vnode *vp = ap->a_vp;
1164 	struct vattr vattr;
1165 	int error;
1166 
1167 	error = VOP_GETATTR(vp, &vattr, ap->a_cred);
1168 	if (error)
1169 		return (error);
1170 	error = vaccess(vp->v_type, vattr.va_mode, vattr.va_uid,
1171 	    vattr.va_gid, ap->a_accmode, ap->a_cred, NULL);
1172 	return (error);
1173 }
1174 
1175 #if 0
1176 struct vop_getattr_args {
1177 	struct vop_generic_args a_gen;
1178 	struct vnode *a_vp;
1179 	struct vattr *a_vap;
1180 	struct ucred *a_cred;
1181 };
1182 #endif
1183 
1184 /*
1185  * Get file attributes
1186  */
1187 static int
1188 mqfs_getattr(struct vop_getattr_args *ap)
1189 {
1190 	struct vnode *vp = ap->a_vp;
1191 	struct mqfs_node *pn = VTON(vp);
1192 	struct vattr *vap = ap->a_vap;
1193 	int error = 0;
1194 
1195 	vap->va_type = vp->v_type;
1196 	vap->va_mode = pn->mn_mode;
1197 	vap->va_nlink = 1;
1198 	vap->va_uid = pn->mn_uid;
1199 	vap->va_gid = pn->mn_gid;
1200 	vap->va_fsid = vp->v_mount->mnt_stat.f_fsid.val[0];
1201 	vap->va_fileid = pn->mn_fileno;
1202 	vap->va_size = 0;
1203 	vap->va_blocksize = PAGE_SIZE;
1204 	vap->va_bytes = vap->va_size = 0;
1205 	vap->va_atime = pn->mn_atime;
1206 	vap->va_mtime = pn->mn_mtime;
1207 	vap->va_ctime = pn->mn_ctime;
1208 	vap->va_birthtime = pn->mn_birth;
1209 	vap->va_gen = 0;
1210 	vap->va_flags = 0;
1211 	vap->va_rdev = NODEV;
1212 	vap->va_bytes = 0;
1213 	vap->va_filerev = 0;
1214 	return (error);
1215 }
1216 
1217 #if 0
1218 struct vop_setattr_args {
1219 	struct vop_generic_args a_gen;
1220 	struct vnode *a_vp;
1221 	struct vattr *a_vap;
1222 	struct ucred *a_cred;
1223 };
1224 #endif
1225 /*
1226  * Set attributes
1227  */
1228 static int
1229 mqfs_setattr(struct vop_setattr_args *ap)
1230 {
1231 	struct mqfs_node *pn;
1232 	struct vattr *vap;
1233 	struct vnode *vp;
1234 	struct thread *td;
1235 	int c, error;
1236 	uid_t uid;
1237 	gid_t gid;
1238 
1239 	td = curthread;
1240 	vap = ap->a_vap;
1241 	vp = ap->a_vp;
1242 	if ((vap->va_type != VNON) ||
1243 	    (vap->va_nlink != VNOVAL) ||
1244 	    (vap->va_fsid != VNOVAL) ||
1245 	    (vap->va_fileid != VNOVAL) ||
1246 	    (vap->va_blocksize != VNOVAL) ||
1247 	    (vap->va_flags != VNOVAL && vap->va_flags != 0) ||
1248 	    (vap->va_rdev != VNOVAL) ||
1249 	    ((int)vap->va_bytes != VNOVAL) ||
1250 	    (vap->va_gen != VNOVAL)) {
1251 		return (EINVAL);
1252 	}
1253 
1254 	pn = VTON(vp);
1255 
1256 	error = c = 0;
1257 	if (vap->va_uid == (uid_t)VNOVAL)
1258 		uid = pn->mn_uid;
1259 	else
1260 		uid = vap->va_uid;
1261 	if (vap->va_gid == (gid_t)VNOVAL)
1262 		gid = pn->mn_gid;
1263 	else
1264 		gid = vap->va_gid;
1265 
1266 	if (uid != pn->mn_uid || gid != pn->mn_gid) {
1267 		/*
1268 		 * To modify the ownership of a file, must possess VADMIN
1269 		 * for that file.
1270 		 */
1271 		if ((error = VOP_ACCESS(vp, VADMIN, ap->a_cred, td)))
1272 			return (error);
1273 
1274 		/*
1275 		 * XXXRW: Why is there a privilege check here: shouldn't the
1276 		 * check in VOP_ACCESS() be enough?  Also, are the group bits
1277 		 * below definitely right?
1278 		 */
1279 		if (((ap->a_cred->cr_uid != pn->mn_uid) || uid != pn->mn_uid ||
1280 		    (gid != pn->mn_gid && !groupmember(gid, ap->a_cred))) &&
1281 		    (error = priv_check(td, PRIV_MQ_ADMIN)) != 0)
1282 			return (error);
1283 		pn->mn_uid = uid;
1284 		pn->mn_gid = gid;
1285 		c = 1;
1286 	}
1287 
1288 	if (vap->va_mode != (mode_t)VNOVAL) {
1289 		if ((ap->a_cred->cr_uid != pn->mn_uid) &&
1290 		    (error = priv_check(td, PRIV_MQ_ADMIN)))
1291 			return (error);
1292 		pn->mn_mode = vap->va_mode;
1293 		c = 1;
1294 	}
1295 
1296 	if (vap->va_atime.tv_sec != VNOVAL || vap->va_mtime.tv_sec != VNOVAL) {
1297 		/* See the comment in ufs_vnops::ufs_setattr(). */
1298 		if ((error = VOP_ACCESS(vp, VADMIN, ap->a_cred, td)) &&
1299 		    ((vap->va_vaflags & VA_UTIMES_NULL) == 0 ||
1300 		    (error = VOP_ACCESS(vp, VWRITE, ap->a_cred, td))))
1301 			return (error);
1302 		if (vap->va_atime.tv_sec != VNOVAL) {
1303 			pn->mn_atime = vap->va_atime;
1304 		}
1305 		if (vap->va_mtime.tv_sec != VNOVAL) {
1306 			pn->mn_mtime = vap->va_mtime;
1307 		}
1308 		c = 1;
1309 	}
1310 	if (c) {
1311 		vfs_timestamp(&pn->mn_ctime);
1312 	}
1313 	return (0);
1314 }
1315 
1316 #if 0
1317 struct vop_read_args {
1318 	struct vop_generic_args a_gen;
1319 	struct vnode *a_vp;
1320 	struct uio *a_uio;
1321 	int a_ioflag;
1322 	struct ucred *a_cred;
1323 };
1324 #endif
1325 
1326 /*
1327  * Read from a file
1328  */
1329 static int
1330 mqfs_read(struct vop_read_args *ap)
1331 {
1332 	char buf[80];
1333 	struct vnode *vp = ap->a_vp;
1334 	struct uio *uio = ap->a_uio;
1335 	struct mqfs_node *pn;
1336 	struct mqueue *mq;
1337 	int len, error;
1338 
1339 	if (vp->v_type != VREG)
1340 		return (EINVAL);
1341 
1342 	pn = VTON(vp);
1343 	mq = VTOMQ(vp);
1344 	snprintf(buf, sizeof(buf),
1345 		"QSIZE:%-10ld MAXMSG:%-10ld CURMSG:%-10ld MSGSIZE:%-10ld\n",
1346 		mq->mq_totalbytes,
1347 		mq->mq_maxmsg,
1348 		mq->mq_curmsgs,
1349 		mq->mq_msgsize);
1350 	buf[sizeof(buf)-1] = '\0';
1351 	len = strlen(buf);
1352 	error = uiomove_frombuf(buf, len, uio);
1353 	return (error);
1354 }
1355 
1356 #if 0
1357 struct vop_readdir_args {
1358 	struct vop_generic_args a_gen;
1359 	struct vnode *a_vp;
1360 	struct uio *a_uio;
1361 	struct ucred *a_cred;
1362 	int *a_eofflag;
1363 	int *a_ncookies;
1364 	u_long **a_cookies;
1365 };
1366 #endif
1367 
1368 /*
1369  * Return directory entries.
1370  */
1371 static int
1372 mqfs_readdir(struct vop_readdir_args *ap)
1373 {
1374 	struct vnode *vp;
1375 	struct mqfs_info *mi;
1376 	struct mqfs_node *pd;
1377 	struct mqfs_node *pn;
1378 	struct dirent entry;
1379 	struct uio *uio;
1380 	const void *pr_root;
1381 	int *tmp_ncookies = NULL;
1382 	off_t offset;
1383 	int error, i;
1384 
1385 	vp = ap->a_vp;
1386 	mi = VFSTOMQFS(vp->v_mount);
1387 	pd = VTON(vp);
1388 	uio = ap->a_uio;
1389 
1390 	if (vp->v_type != VDIR)
1391 		return (ENOTDIR);
1392 
1393 	if (uio->uio_offset < 0)
1394 		return (EINVAL);
1395 
1396 	if (ap->a_ncookies != NULL) {
1397 		tmp_ncookies = ap->a_ncookies;
1398 		*ap->a_ncookies = 0;
1399 		ap->a_ncookies = NULL;
1400         }
1401 
1402 	error = 0;
1403 	offset = 0;
1404 
1405 	pr_root = ap->a_cred->cr_prison->pr_root;
1406 	sx_xlock(&mi->mi_lock);
1407 
1408 	LIST_FOREACH(pn, &pd->mn_children, mn_sibling) {
1409 		entry.d_reclen = sizeof(entry);
1410 
1411 		/*
1412 		 * Only show names within the same prison root directory
1413 		 * (or not associated with a prison, e.g. "." and "..").
1414 		 */
1415 		if (pn->mn_pr_root != NULL && pn->mn_pr_root != pr_root)
1416 			continue;
1417 		if (!pn->mn_fileno)
1418 			mqfs_fileno_alloc(mi, pn);
1419 		entry.d_fileno = pn->mn_fileno;
1420 		for (i = 0; i < MQFS_NAMELEN - 1 && pn->mn_name[i] != '\0'; ++i)
1421 			entry.d_name[i] = pn->mn_name[i];
1422 		entry.d_name[i] = 0;
1423 		entry.d_namlen = i;
1424 		switch (pn->mn_type) {
1425 		case mqfstype_root:
1426 		case mqfstype_dir:
1427 		case mqfstype_this:
1428 		case mqfstype_parent:
1429 			entry.d_type = DT_DIR;
1430 			break;
1431 		case mqfstype_file:
1432 			entry.d_type = DT_REG;
1433 			break;
1434 		case mqfstype_symlink:
1435 			entry.d_type = DT_LNK;
1436 			break;
1437 		default:
1438 			panic("%s has unexpected node type: %d", pn->mn_name,
1439 				pn->mn_type);
1440 		}
1441 		if (entry.d_reclen > uio->uio_resid)
1442                         break;
1443 		if (offset >= uio->uio_offset) {
1444 			error = vfs_read_dirent(ap, &entry, offset);
1445                         if (error)
1446                                 break;
1447                 }
1448                 offset += entry.d_reclen;
1449 	}
1450 	sx_xunlock(&mi->mi_lock);
1451 
1452 	uio->uio_offset = offset;
1453 
1454 	if (tmp_ncookies != NULL)
1455 		ap->a_ncookies = tmp_ncookies;
1456 
1457 	return (error);
1458 }
1459 
1460 #ifdef notyet
1461 
1462 #if 0
1463 struct vop_mkdir_args {
1464 	struct vnode *a_dvp;
1465 	struvt vnode **a_vpp;
1466 	struvt componentname *a_cnp;
1467 	struct vattr *a_vap;
1468 };
1469 #endif
1470 
1471 /*
1472  * Create a directory.
1473  */
1474 static int
1475 mqfs_mkdir(struct vop_mkdir_args *ap)
1476 {
1477 	struct mqfs_info *mqfs = VFSTOMQFS(ap->a_dvp->v_mount);
1478 	struct componentname *cnp = ap->a_cnp;
1479 	struct mqfs_node *pd = VTON(ap->a_dvp);
1480 	struct mqfs_node *pn;
1481 	int error;
1482 
1483 	if (pd->mn_type != mqfstype_root && pd->mn_type != mqfstype_dir)
1484 		return (ENOTDIR);
1485 	sx_xlock(&mqfs->mi_lock);
1486 	if ((cnp->cn_flags & HASBUF) == 0)
1487 		panic("%s: no name", __func__);
1488 	pn = mqfs_create_dir(pd, cnp->cn_nameptr, cnp->cn_namelen,
1489 		ap->a_vap->cn_cred, ap->a_vap->va_mode);
1490 	if (pn != NULL)
1491 		mqnode_addref(pn);
1492 	sx_xunlock(&mqfs->mi_lock);
1493 	if (pn == NULL) {
1494 		error = ENOSPC;
1495 	} else {
1496 		error = mqfs_allocv(ap->a_dvp->v_mount, ap->a_vpp, pn);
1497 		mqnode_release(pn);
1498 	}
1499 	return (error);
1500 }
1501 
1502 #if 0
1503 struct vop_rmdir_args {
1504 	struct vnode *a_dvp;
1505 	struct vnode *a_vp;
1506 	struct componentname *a_cnp;
1507 };
1508 #endif
1509 
1510 /*
1511  * Remove a directory.
1512  */
1513 static int
1514 mqfs_rmdir(struct vop_rmdir_args *ap)
1515 {
1516 	struct mqfs_info *mqfs = VFSTOMQFS(ap->a_dvp->v_mount);
1517 	struct mqfs_node *pn = VTON(ap->a_vp);
1518 	struct mqfs_node *pt;
1519 
1520 	if (pn->mn_type != mqfstype_dir)
1521 		return (ENOTDIR);
1522 
1523 	sx_xlock(&mqfs->mi_lock);
1524 	if (pn->mn_deleted) {
1525 		sx_xunlock(&mqfs->mi_lock);
1526 		return (ENOENT);
1527 	}
1528 
1529 	pt = LIST_FIRST(&pn->mn_children);
1530 	pt = LIST_NEXT(pt, mn_sibling);
1531 	pt = LIST_NEXT(pt, mn_sibling);
1532 	if (pt != NULL) {
1533 		sx_xunlock(&mqfs->mi_lock);
1534 		return (ENOTEMPTY);
1535 	}
1536 	pt = pn->mn_parent;
1537 	pn->mn_parent = NULL;
1538 	pn->mn_deleted = 1;
1539 	LIST_REMOVE(pn, mn_sibling);
1540 	mqnode_release(pn);
1541 	mqnode_release(pt);
1542 	sx_xunlock(&mqfs->mi_lock);
1543 	cache_purge(ap->a_vp);
1544 	return (0);
1545 }
1546 
1547 #endif /* notyet */
1548 
1549 /*
1550  * See if this prison root is obsolete, and clean up associated queues if it is.
1551  */
1552 static int
1553 mqfs_prison_remove(void *obj, void *data __unused)
1554 {
1555 	const struct prison *pr = obj;
1556 	const struct prison *tpr;
1557 	struct mqfs_node *pn, *tpn;
1558 	int found;
1559 
1560 	found = 0;
1561 	TAILQ_FOREACH(tpr, &allprison, pr_list) {
1562 		if (tpr->pr_root == pr->pr_root && tpr != pr && tpr->pr_ref > 0)
1563 			found = 1;
1564 	}
1565 	if (!found) {
1566 		/*
1567 		 * No jails are rooted in this directory anymore,
1568 		 * so no queues should be either.
1569 		 */
1570 		sx_xlock(&mqfs_data.mi_lock);
1571 		LIST_FOREACH_SAFE(pn, &mqfs_data.mi_root->mn_children,
1572 		    mn_sibling, tpn) {
1573 			if (pn->mn_pr_root == pr->pr_root)
1574 				(void)do_unlink(pn, curthread->td_ucred);
1575 		}
1576 		sx_xunlock(&mqfs_data.mi_lock);
1577 	}
1578 	return (0);
1579 }
1580 
1581 /*
1582  * Allocate a message queue
1583  */
1584 static struct mqueue *
1585 mqueue_alloc(const struct mq_attr *attr)
1586 {
1587 	struct mqueue *mq;
1588 
1589 	if (curmq >= maxmq)
1590 		return (NULL);
1591 	mq = uma_zalloc(mqueue_zone, M_WAITOK | M_ZERO);
1592 	TAILQ_INIT(&mq->mq_msgq);
1593 	if (attr != NULL) {
1594 		mq->mq_maxmsg = attr->mq_maxmsg;
1595 		mq->mq_msgsize = attr->mq_msgsize;
1596 	} else {
1597 		mq->mq_maxmsg = default_maxmsg;
1598 		mq->mq_msgsize = default_msgsize;
1599 	}
1600 	mtx_init(&mq->mq_mutex, "mqueue lock", NULL, MTX_DEF);
1601 	knlist_init_mtx(&mq->mq_rsel.si_note, &mq->mq_mutex);
1602 	knlist_init_mtx(&mq->mq_wsel.si_note, &mq->mq_mutex);
1603 	atomic_add_int(&curmq, 1);
1604 	return (mq);
1605 }
1606 
1607 /*
1608  * Destroy a message queue
1609  */
1610 static void
1611 mqueue_free(struct mqueue *mq)
1612 {
1613 	struct mqueue_msg *msg;
1614 
1615 	while ((msg = TAILQ_FIRST(&mq->mq_msgq)) != NULL) {
1616 		TAILQ_REMOVE(&mq->mq_msgq, msg, msg_link);
1617 		free(msg, M_MQUEUEDATA);
1618 	}
1619 
1620 	mtx_destroy(&mq->mq_mutex);
1621 	seldrain(&mq->mq_rsel);
1622 	seldrain(&mq->mq_wsel);
1623 	knlist_destroy(&mq->mq_rsel.si_note);
1624 	knlist_destroy(&mq->mq_wsel.si_note);
1625 	uma_zfree(mqueue_zone, mq);
1626 	atomic_add_int(&curmq, -1);
1627 }
1628 
1629 /*
1630  * Load a message from user space
1631  */
1632 static struct mqueue_msg *
1633 mqueue_loadmsg(const char *msg_ptr, size_t msg_size, int msg_prio)
1634 {
1635 	struct mqueue_msg *msg;
1636 	size_t len;
1637 	int error;
1638 
1639 	len = sizeof(struct mqueue_msg) + msg_size;
1640 	msg = malloc(len, M_MQUEUEDATA, M_WAITOK);
1641 	error = copyin(msg_ptr, ((char *)msg) + sizeof(struct mqueue_msg),
1642 	    msg_size);
1643 	if (error) {
1644 		free(msg, M_MQUEUEDATA);
1645 		msg = NULL;
1646 	} else {
1647 		msg->msg_size = msg_size;
1648 		msg->msg_prio = msg_prio;
1649 	}
1650 	return (msg);
1651 }
1652 
1653 /*
1654  * Save a message to user space
1655  */
1656 static int
1657 mqueue_savemsg(struct mqueue_msg *msg, char *msg_ptr, int *msg_prio)
1658 {
1659 	int error;
1660 
1661 	error = copyout(((char *)msg) + sizeof(*msg), msg_ptr,
1662 		msg->msg_size);
1663 	if (error == 0 && msg_prio != NULL)
1664 		error = copyout(&msg->msg_prio, msg_prio, sizeof(int));
1665 	return (error);
1666 }
1667 
1668 /*
1669  * Free a message's memory
1670  */
1671 static __inline void
1672 mqueue_freemsg(struct mqueue_msg *msg)
1673 {
1674 	free(msg, M_MQUEUEDATA);
1675 }
1676 
1677 /*
1678  * Send a message. if waitok is false, thread will not be
1679  * blocked if there is no data in queue, otherwise, absolute
1680  * time will be checked.
1681  */
1682 int
1683 mqueue_send(struct mqueue *mq, const char *msg_ptr,
1684 	size_t msg_len, unsigned msg_prio, int waitok,
1685 	const struct timespec *abs_timeout)
1686 {
1687 	struct mqueue_msg *msg;
1688 	struct timespec ts, ts2;
1689 	struct timeval tv;
1690 	int error;
1691 
1692 	if (msg_prio >= MQ_PRIO_MAX)
1693 		return (EINVAL);
1694 	if (msg_len > mq->mq_msgsize)
1695 		return (EMSGSIZE);
1696 	msg = mqueue_loadmsg(msg_ptr, msg_len, msg_prio);
1697 	if (msg == NULL)
1698 		return (EFAULT);
1699 
1700 	/* O_NONBLOCK case */
1701 	if (!waitok) {
1702 		error = _mqueue_send(mq, msg, -1);
1703 		if (error)
1704 			goto bad;
1705 		return (0);
1706 	}
1707 
1708 	/* we allow a null timeout (wait forever) */
1709 	if (abs_timeout == NULL) {
1710 		error = _mqueue_send(mq, msg, 0);
1711 		if (error)
1712 			goto bad;
1713 		return (0);
1714 	}
1715 
1716 	/* send it before checking time */
1717 	error = _mqueue_send(mq, msg, -1);
1718 	if (error == 0)
1719 		return (0);
1720 
1721 	if (error != EAGAIN)
1722 		goto bad;
1723 
1724 	if (abs_timeout->tv_nsec >= 1000000000 || abs_timeout->tv_nsec < 0) {
1725 		error = EINVAL;
1726 		goto bad;
1727 	}
1728 	for (;;) {
1729 		ts2 = *abs_timeout;
1730 		getnanotime(&ts);
1731 		timespecsub(&ts2, &ts);
1732 		if (ts2.tv_sec < 0 || (ts2.tv_sec == 0 && ts2.tv_nsec <= 0)) {
1733 			error = ETIMEDOUT;
1734 			break;
1735 		}
1736 		TIMESPEC_TO_TIMEVAL(&tv, &ts2);
1737 		error = _mqueue_send(mq, msg, tvtohz(&tv));
1738 		if (error != ETIMEDOUT)
1739 			break;
1740 	}
1741 	if (error == 0)
1742 		return (0);
1743 bad:
1744 	mqueue_freemsg(msg);
1745 	return (error);
1746 }
1747 
1748 /*
1749  * Common routine to send a message
1750  */
1751 static int
1752 _mqueue_send(struct mqueue *mq, struct mqueue_msg *msg, int timo)
1753 {
1754 	struct mqueue_msg *msg2;
1755 	int error = 0;
1756 
1757 	mtx_lock(&mq->mq_mutex);
1758 	while (mq->mq_curmsgs >= mq->mq_maxmsg && error == 0) {
1759 		if (timo < 0) {
1760 			mtx_unlock(&mq->mq_mutex);
1761 			return (EAGAIN);
1762 		}
1763 		mq->mq_senders++;
1764 		error = msleep(&mq->mq_senders, &mq->mq_mutex,
1765 			    PCATCH, "mqsend", timo);
1766 		mq->mq_senders--;
1767 		if (error == EAGAIN)
1768 			error = ETIMEDOUT;
1769 	}
1770 	if (mq->mq_curmsgs >= mq->mq_maxmsg) {
1771 		mtx_unlock(&mq->mq_mutex);
1772 		return (error);
1773 	}
1774 	error = 0;
1775 	if (TAILQ_EMPTY(&mq->mq_msgq)) {
1776 		TAILQ_INSERT_HEAD(&mq->mq_msgq, msg, msg_link);
1777 	} else {
1778 		if (msg->msg_prio <= TAILQ_LAST(&mq->mq_msgq, msgq)->msg_prio) {
1779 			TAILQ_INSERT_TAIL(&mq->mq_msgq, msg, msg_link);
1780 		} else {
1781 			TAILQ_FOREACH(msg2, &mq->mq_msgq, msg_link) {
1782 				if (msg2->msg_prio < msg->msg_prio)
1783 					break;
1784 			}
1785 			TAILQ_INSERT_BEFORE(msg2, msg, msg_link);
1786 		}
1787 	}
1788 	mq->mq_curmsgs++;
1789 	mq->mq_totalbytes += msg->msg_size;
1790 	if (mq->mq_receivers)
1791 		wakeup_one(&mq->mq_receivers);
1792 	else if (mq->mq_notifier != NULL)
1793 		mqueue_send_notification(mq);
1794 	if (mq->mq_flags & MQ_RSEL) {
1795 		mq->mq_flags &= ~MQ_RSEL;
1796 		selwakeup(&mq->mq_rsel);
1797 	}
1798 	KNOTE_LOCKED(&mq->mq_rsel.si_note, 0);
1799 	mtx_unlock(&mq->mq_mutex);
1800 	return (0);
1801 }
1802 
1803 /*
1804  * Send realtime a signal to process which registered itself
1805  * successfully by mq_notify.
1806  */
1807 static void
1808 mqueue_send_notification(struct mqueue *mq)
1809 {
1810 	struct mqueue_notifier *nt;
1811 	struct thread *td;
1812 	struct proc *p;
1813 	int error;
1814 
1815 	mtx_assert(&mq->mq_mutex, MA_OWNED);
1816 	nt = mq->mq_notifier;
1817 	if (nt->nt_sigev.sigev_notify != SIGEV_NONE) {
1818 		p = nt->nt_proc;
1819 		error = sigev_findtd(p, &nt->nt_sigev, &td);
1820 		if (error) {
1821 			mq->mq_notifier = NULL;
1822 			return;
1823 		}
1824 		if (!KSI_ONQ(&nt->nt_ksi)) {
1825 			ksiginfo_set_sigev(&nt->nt_ksi, &nt->nt_sigev);
1826 			tdsendsignal(p, td, nt->nt_ksi.ksi_signo, &nt->nt_ksi);
1827 		}
1828 		PROC_UNLOCK(p);
1829 	}
1830 	mq->mq_notifier = NULL;
1831 }
1832 
1833 /*
1834  * Get a message. if waitok is false, thread will not be
1835  * blocked if there is no data in queue, otherwise, absolute
1836  * time will be checked.
1837  */
1838 int
1839 mqueue_receive(struct mqueue *mq, char *msg_ptr,
1840 	size_t msg_len, unsigned *msg_prio, int waitok,
1841 	const struct timespec *abs_timeout)
1842 {
1843 	struct mqueue_msg *msg;
1844 	struct timespec ts, ts2;
1845 	struct timeval tv;
1846 	int error;
1847 
1848 	if (msg_len < mq->mq_msgsize)
1849 		return (EMSGSIZE);
1850 
1851 	/* O_NONBLOCK case */
1852 	if (!waitok) {
1853 		error = _mqueue_recv(mq, &msg, -1);
1854 		if (error)
1855 			return (error);
1856 		goto received;
1857 	}
1858 
1859 	/* we allow a null timeout (wait forever). */
1860 	if (abs_timeout == NULL) {
1861 		error = _mqueue_recv(mq, &msg, 0);
1862 		if (error)
1863 			return (error);
1864 		goto received;
1865 	}
1866 
1867 	/* try to get a message before checking time */
1868 	error = _mqueue_recv(mq, &msg, -1);
1869 	if (error == 0)
1870 		goto received;
1871 
1872 	if (error != EAGAIN)
1873 		return (error);
1874 
1875 	if (abs_timeout->tv_nsec >= 1000000000 || abs_timeout->tv_nsec < 0) {
1876 		error = EINVAL;
1877 		return (error);
1878 	}
1879 
1880 	for (;;) {
1881 		ts2 = *abs_timeout;
1882 		getnanotime(&ts);
1883 		timespecsub(&ts2, &ts);
1884 		if (ts2.tv_sec < 0 || (ts2.tv_sec == 0 && ts2.tv_nsec <= 0)) {
1885 			error = ETIMEDOUT;
1886 			return (error);
1887 		}
1888 		TIMESPEC_TO_TIMEVAL(&tv, &ts2);
1889 		error = _mqueue_recv(mq, &msg, tvtohz(&tv));
1890 		if (error == 0)
1891 			break;
1892 		if (error != ETIMEDOUT)
1893 			return (error);
1894 	}
1895 
1896 received:
1897 	error = mqueue_savemsg(msg, msg_ptr, msg_prio);
1898 	if (error == 0) {
1899 		curthread->td_retval[0] = msg->msg_size;
1900 		curthread->td_retval[1] = 0;
1901 	}
1902 	mqueue_freemsg(msg);
1903 	return (error);
1904 }
1905 
1906 /*
1907  * Common routine to receive a message
1908  */
1909 static int
1910 _mqueue_recv(struct mqueue *mq, struct mqueue_msg **msg, int timo)
1911 {
1912 	int error = 0;
1913 
1914 	mtx_lock(&mq->mq_mutex);
1915 	while ((*msg = TAILQ_FIRST(&mq->mq_msgq)) == NULL && error == 0) {
1916 		if (timo < 0) {
1917 			mtx_unlock(&mq->mq_mutex);
1918 			return (EAGAIN);
1919 		}
1920 		mq->mq_receivers++;
1921 		error = msleep(&mq->mq_receivers, &mq->mq_mutex,
1922 			    PCATCH, "mqrecv", timo);
1923 		mq->mq_receivers--;
1924 		if (error == EAGAIN)
1925 			error = ETIMEDOUT;
1926 	}
1927 	if (*msg != NULL) {
1928 		error = 0;
1929 		TAILQ_REMOVE(&mq->mq_msgq, *msg, msg_link);
1930 		mq->mq_curmsgs--;
1931 		mq->mq_totalbytes -= (*msg)->msg_size;
1932 		if (mq->mq_senders)
1933 			wakeup_one(&mq->mq_senders);
1934 		if (mq->mq_flags & MQ_WSEL) {
1935 			mq->mq_flags &= ~MQ_WSEL;
1936 			selwakeup(&mq->mq_wsel);
1937 		}
1938 		KNOTE_LOCKED(&mq->mq_wsel.si_note, 0);
1939 	}
1940 	if (mq->mq_notifier != NULL && mq->mq_receivers == 0 &&
1941 	    !TAILQ_EMPTY(&mq->mq_msgq)) {
1942 		mqueue_send_notification(mq);
1943 	}
1944 	mtx_unlock(&mq->mq_mutex);
1945 	return (error);
1946 }
1947 
1948 static __inline struct mqueue_notifier *
1949 notifier_alloc(void)
1950 {
1951 	return (uma_zalloc(mqnoti_zone, M_WAITOK | M_ZERO));
1952 }
1953 
1954 static __inline void
1955 notifier_free(struct mqueue_notifier *p)
1956 {
1957 	uma_zfree(mqnoti_zone, p);
1958 }
1959 
1960 static struct mqueue_notifier *
1961 notifier_search(struct proc *p, int fd)
1962 {
1963 	struct mqueue_notifier *nt;
1964 
1965 	LIST_FOREACH(nt, &p->p_mqnotifier, nt_link) {
1966 		if (nt->nt_ksi.ksi_mqd == fd)
1967 			break;
1968 	}
1969 	return (nt);
1970 }
1971 
1972 static __inline void
1973 notifier_insert(struct proc *p, struct mqueue_notifier *nt)
1974 {
1975 	LIST_INSERT_HEAD(&p->p_mqnotifier, nt, nt_link);
1976 }
1977 
1978 static __inline void
1979 notifier_delete(struct proc *p, struct mqueue_notifier *nt)
1980 {
1981 	LIST_REMOVE(nt, nt_link);
1982 	notifier_free(nt);
1983 }
1984 
1985 static void
1986 notifier_remove(struct proc *p, struct mqueue *mq, int fd)
1987 {
1988 	struct mqueue_notifier *nt;
1989 
1990 	mtx_assert(&mq->mq_mutex, MA_OWNED);
1991 	PROC_LOCK(p);
1992 	nt = notifier_search(p, fd);
1993 	if (nt != NULL) {
1994 		if (mq->mq_notifier == nt)
1995 			mq->mq_notifier = NULL;
1996 		sigqueue_take(&nt->nt_ksi);
1997 		notifier_delete(p, nt);
1998 	}
1999 	PROC_UNLOCK(p);
2000 }
2001 
2002 static int
2003 kern_kmq_open(struct thread *td, const char *upath, int flags, mode_t mode,
2004     const struct mq_attr *attr)
2005 {
2006 	char path[MQFS_NAMELEN + 1];
2007 	struct mqfs_node *pn;
2008 	struct filedesc *fdp;
2009 	struct file *fp;
2010 	struct mqueue *mq;
2011 	int fd, error, len, cmode;
2012 
2013 	fdp = td->td_proc->p_fd;
2014 	cmode = (((mode & ~fdp->fd_cmask) & ALLPERMS) & ~S_ISTXT);
2015 	mq = NULL;
2016 	if ((flags & O_CREAT) != 0 && attr != NULL) {
2017 		if (attr->mq_maxmsg <= 0 || attr->mq_maxmsg > maxmsg)
2018 			return (EINVAL);
2019 		if (attr->mq_msgsize <= 0 || attr->mq_msgsize > maxmsgsize)
2020 			return (EINVAL);
2021 	}
2022 
2023 	error = copyinstr(upath, path, MQFS_NAMELEN + 1, NULL);
2024         if (error)
2025 		return (error);
2026 
2027 	/*
2028 	 * The first character of name must be a slash  (/) character
2029 	 * and the remaining characters of name cannot include any slash
2030 	 * characters.
2031 	 */
2032 	len = strlen(path);
2033 	if (len < 2 || path[0] != '/' || strchr(path + 1, '/') != NULL)
2034 		return (EINVAL);
2035 
2036 	error = falloc(td, &fp, &fd, O_CLOEXEC);
2037 	if (error)
2038 		return (error);
2039 
2040 	sx_xlock(&mqfs_data.mi_lock);
2041 	pn = mqfs_search(mqfs_data.mi_root, path + 1, len - 1, td->td_ucred);
2042 	if (pn == NULL) {
2043 		if (!(flags & O_CREAT)) {
2044 			error = ENOENT;
2045 		} else {
2046 			mq = mqueue_alloc(attr);
2047 			if (mq == NULL) {
2048 				error = ENFILE;
2049 			} else {
2050 				pn = mqfs_create_file(mqfs_data.mi_root,
2051 				         path + 1, len - 1, td->td_ucred,
2052 					 cmode);
2053 				if (pn == NULL) {
2054 					error = ENOSPC;
2055 					mqueue_free(mq);
2056 				}
2057 			}
2058 		}
2059 
2060 		if (error == 0) {
2061 			pn->mn_data = mq;
2062 		}
2063 	} else {
2064 		if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL)) {
2065 			error = EEXIST;
2066 		} else {
2067 			accmode_t accmode = 0;
2068 
2069 			if (flags & FREAD)
2070 				accmode |= VREAD;
2071 			if (flags & FWRITE)
2072 				accmode |= VWRITE;
2073 			error = vaccess(VREG, pn->mn_mode, pn->mn_uid,
2074 				    pn->mn_gid, accmode, td->td_ucred, NULL);
2075 		}
2076 	}
2077 
2078 	if (error) {
2079 		sx_xunlock(&mqfs_data.mi_lock);
2080 		fdclose(td, fp, fd);
2081 		fdrop(fp, td);
2082 		return (error);
2083 	}
2084 
2085 	mqnode_addref(pn);
2086 	sx_xunlock(&mqfs_data.mi_lock);
2087 
2088 	finit(fp, flags & (FREAD | FWRITE | O_NONBLOCK), DTYPE_MQUEUE, pn,
2089 	    &mqueueops);
2090 
2091 	td->td_retval[0] = fd;
2092 	fdrop(fp, td);
2093 	return (0);
2094 }
2095 
2096 /*
2097  * Syscall to open a message queue.
2098  */
2099 int
2100 sys_kmq_open(struct thread *td, struct kmq_open_args *uap)
2101 {
2102 	struct mq_attr attr;
2103 	int flags, error;
2104 
2105 	if ((uap->flags & O_ACCMODE) == O_ACCMODE || uap->flags & O_EXEC)
2106 		return (EINVAL);
2107 	flags = FFLAGS(uap->flags);
2108 	if ((flags & O_CREAT) != 0 && uap->attr != NULL) {
2109 		error = copyin(uap->attr, &attr, sizeof(attr));
2110 		if (error)
2111 			return (error);
2112 	}
2113 	return (kern_kmq_open(td, uap->path, flags, uap->mode,
2114 	    uap->attr != NULL ? &attr : NULL));
2115 }
2116 
2117 /*
2118  * Syscall to unlink a message queue.
2119  */
2120 int
2121 sys_kmq_unlink(struct thread *td, struct kmq_unlink_args *uap)
2122 {
2123 	char path[MQFS_NAMELEN+1];
2124 	struct mqfs_node *pn;
2125 	int error, len;
2126 
2127 	error = copyinstr(uap->path, path, MQFS_NAMELEN + 1, NULL);
2128         if (error)
2129 		return (error);
2130 
2131 	len = strlen(path);
2132 	if (len < 2 || path[0] != '/' || strchr(path + 1, '/') != NULL)
2133 		return (EINVAL);
2134 
2135 	sx_xlock(&mqfs_data.mi_lock);
2136 	pn = mqfs_search(mqfs_data.mi_root, path + 1, len - 1, td->td_ucred);
2137 	if (pn != NULL)
2138 		error = do_unlink(pn, td->td_ucred);
2139 	else
2140 		error = ENOENT;
2141 	sx_xunlock(&mqfs_data.mi_lock);
2142 	return (error);
2143 }
2144 
2145 typedef int (*_fgetf)(struct thread *, int, cap_rights_t *, struct file **);
2146 
2147 /*
2148  * Get message queue by giving file slot
2149  */
2150 static int
2151 _getmq(struct thread *td, int fd, cap_rights_t *rightsp, _fgetf func,
2152        struct file **fpp, struct mqfs_node **ppn, struct mqueue **pmq)
2153 {
2154 	struct mqfs_node *pn;
2155 	int error;
2156 
2157 	error = func(td, fd, rightsp, fpp);
2158 	if (error)
2159 		return (error);
2160 	if (&mqueueops != (*fpp)->f_ops) {
2161 		fdrop(*fpp, td);
2162 		return (EBADF);
2163 	}
2164 	pn = (*fpp)->f_data;
2165 	if (ppn)
2166 		*ppn = pn;
2167 	if (pmq)
2168 		*pmq = pn->mn_data;
2169 	return (0);
2170 }
2171 
2172 static __inline int
2173 getmq(struct thread *td, int fd, struct file **fpp, struct mqfs_node **ppn,
2174 	struct mqueue **pmq)
2175 {
2176 	cap_rights_t rights;
2177 
2178 	return _getmq(td, fd, cap_rights_init(&rights, CAP_EVENT), fget,
2179 	    fpp, ppn, pmq);
2180 }
2181 
2182 static __inline int
2183 getmq_read(struct thread *td, int fd, struct file **fpp,
2184 	 struct mqfs_node **ppn, struct mqueue **pmq)
2185 {
2186 	cap_rights_t rights;
2187 
2188 	return _getmq(td, fd, cap_rights_init(&rights, CAP_READ), fget_read,
2189 	    fpp, ppn, pmq);
2190 }
2191 
2192 static __inline int
2193 getmq_write(struct thread *td, int fd, struct file **fpp,
2194 	struct mqfs_node **ppn, struct mqueue **pmq)
2195 {
2196 	cap_rights_t rights;
2197 
2198 	return _getmq(td, fd, cap_rights_init(&rights, CAP_WRITE), fget_write,
2199 	    fpp, ppn, pmq);
2200 }
2201 
2202 static int
2203 kern_kmq_setattr(struct thread *td, int mqd, const struct mq_attr *attr,
2204     struct mq_attr *oattr)
2205 {
2206 	struct mqueue *mq;
2207 	struct file *fp;
2208 	u_int oflag, flag;
2209 	int error;
2210 
2211 	if (attr != NULL && (attr->mq_flags & ~O_NONBLOCK) != 0)
2212 		return (EINVAL);
2213 	error = getmq(td, mqd, &fp, NULL, &mq);
2214 	if (error)
2215 		return (error);
2216 	oattr->mq_maxmsg  = mq->mq_maxmsg;
2217 	oattr->mq_msgsize = mq->mq_msgsize;
2218 	oattr->mq_curmsgs = mq->mq_curmsgs;
2219 	if (attr != NULL) {
2220 		do {
2221 			oflag = flag = fp->f_flag;
2222 			flag &= ~O_NONBLOCK;
2223 			flag |= (attr->mq_flags & O_NONBLOCK);
2224 		} while (atomic_cmpset_int(&fp->f_flag, oflag, flag) == 0);
2225 	} else
2226 		oflag = fp->f_flag;
2227 	oattr->mq_flags = (O_NONBLOCK & oflag);
2228 	fdrop(fp, td);
2229 	return (error);
2230 }
2231 
2232 int
2233 sys_kmq_setattr(struct thread *td, struct kmq_setattr_args *uap)
2234 {
2235 	struct mq_attr attr, oattr;
2236 	int error;
2237 
2238 	if (uap->attr != NULL) {
2239 		error = copyin(uap->attr, &attr, sizeof(attr));
2240 		if (error != 0)
2241 			return (error);
2242 	}
2243 	error = kern_kmq_setattr(td, uap->mqd, uap->attr != NULL ? &attr : NULL,
2244 	    &oattr);
2245 	if (error != 0)
2246 		return (error);
2247 	if (uap->oattr != NULL)
2248 		error = copyout(&oattr, uap->oattr, sizeof(oattr));
2249 	return (error);
2250 }
2251 
2252 int
2253 sys_kmq_timedreceive(struct thread *td, struct kmq_timedreceive_args *uap)
2254 {
2255 	struct mqueue *mq;
2256 	struct file *fp;
2257 	struct timespec *abs_timeout, ets;
2258 	int error;
2259 	int waitok;
2260 
2261 	error = getmq_read(td, uap->mqd, &fp, NULL, &mq);
2262 	if (error)
2263 		return (error);
2264 	if (uap->abs_timeout != NULL) {
2265 		error = copyin(uap->abs_timeout, &ets, sizeof(ets));
2266 		if (error != 0)
2267 			return (error);
2268 		abs_timeout = &ets;
2269 	} else
2270 		abs_timeout = NULL;
2271 	waitok = !(fp->f_flag & O_NONBLOCK);
2272 	error = mqueue_receive(mq, uap->msg_ptr, uap->msg_len,
2273 		uap->msg_prio, waitok, abs_timeout);
2274 	fdrop(fp, td);
2275 	return (error);
2276 }
2277 
2278 int
2279 sys_kmq_timedsend(struct thread *td, struct kmq_timedsend_args *uap)
2280 {
2281 	struct mqueue *mq;
2282 	struct file *fp;
2283 	struct timespec *abs_timeout, ets;
2284 	int error, waitok;
2285 
2286 	error = getmq_write(td, uap->mqd, &fp, NULL, &mq);
2287 	if (error)
2288 		return (error);
2289 	if (uap->abs_timeout != NULL) {
2290 		error = copyin(uap->abs_timeout, &ets, sizeof(ets));
2291 		if (error != 0)
2292 			return (error);
2293 		abs_timeout = &ets;
2294 	} else
2295 		abs_timeout = NULL;
2296 	waitok = !(fp->f_flag & O_NONBLOCK);
2297 	error = mqueue_send(mq, uap->msg_ptr, uap->msg_len,
2298 		uap->msg_prio, waitok, abs_timeout);
2299 	fdrop(fp, td);
2300 	return (error);
2301 }
2302 
2303 static int
2304 kern_kmq_notify(struct thread *td, int mqd, struct sigevent *sigev)
2305 {
2306 #ifdef CAPABILITIES
2307 	cap_rights_t rights;
2308 #endif
2309 	struct filedesc *fdp;
2310 	struct proc *p;
2311 	struct mqueue *mq;
2312 	struct file *fp, *fp2;
2313 	struct mqueue_notifier *nt, *newnt = NULL;
2314 	int error;
2315 
2316 	if (sigev != NULL) {
2317 		if (sigev->sigev_notify != SIGEV_SIGNAL &&
2318 		    sigev->sigev_notify != SIGEV_THREAD_ID &&
2319 		    sigev->sigev_notify != SIGEV_NONE)
2320 			return (EINVAL);
2321 		if ((sigev->sigev_notify == SIGEV_SIGNAL ||
2322 		    sigev->sigev_notify == SIGEV_THREAD_ID) &&
2323 		    !_SIG_VALID(sigev->sigev_signo))
2324 			return (EINVAL);
2325 	}
2326 	p = td->td_proc;
2327 	fdp = td->td_proc->p_fd;
2328 	error = getmq(td, mqd, &fp, NULL, &mq);
2329 	if (error)
2330 		return (error);
2331 again:
2332 	FILEDESC_SLOCK(fdp);
2333 	fp2 = fget_locked(fdp, mqd);
2334 	if (fp2 == NULL) {
2335 		FILEDESC_SUNLOCK(fdp);
2336 		error = EBADF;
2337 		goto out;
2338 	}
2339 #ifdef CAPABILITIES
2340 	error = cap_check(cap_rights(fdp, mqd),
2341 	    cap_rights_init(&rights, CAP_EVENT));
2342 	if (error) {
2343 		FILEDESC_SUNLOCK(fdp);
2344 		goto out;
2345 	}
2346 #endif
2347 	if (fp2 != fp) {
2348 		FILEDESC_SUNLOCK(fdp);
2349 		error = EBADF;
2350 		goto out;
2351 	}
2352 	mtx_lock(&mq->mq_mutex);
2353 	FILEDESC_SUNLOCK(fdp);
2354 	if (sigev != NULL) {
2355 		if (mq->mq_notifier != NULL) {
2356 			error = EBUSY;
2357 		} else {
2358 			PROC_LOCK(p);
2359 			nt = notifier_search(p, mqd);
2360 			if (nt == NULL) {
2361 				if (newnt == NULL) {
2362 					PROC_UNLOCK(p);
2363 					mtx_unlock(&mq->mq_mutex);
2364 					newnt = notifier_alloc();
2365 					goto again;
2366 				}
2367 			}
2368 
2369 			if (nt != NULL) {
2370 				sigqueue_take(&nt->nt_ksi);
2371 				if (newnt != NULL) {
2372 					notifier_free(newnt);
2373 					newnt = NULL;
2374 				}
2375 			} else {
2376 				nt = newnt;
2377 				newnt = NULL;
2378 				ksiginfo_init(&nt->nt_ksi);
2379 				nt->nt_ksi.ksi_flags |= KSI_INS | KSI_EXT;
2380 				nt->nt_ksi.ksi_code = SI_MESGQ;
2381 				nt->nt_proc = p;
2382 				nt->nt_ksi.ksi_mqd = mqd;
2383 				notifier_insert(p, nt);
2384 			}
2385 			nt->nt_sigev = *sigev;
2386 			mq->mq_notifier = nt;
2387 			PROC_UNLOCK(p);
2388 			/*
2389 			 * if there is no receivers and message queue
2390 			 * is not empty, we should send notification
2391 			 * as soon as possible.
2392 			 */
2393 			if (mq->mq_receivers == 0 &&
2394 			    !TAILQ_EMPTY(&mq->mq_msgq))
2395 				mqueue_send_notification(mq);
2396 		}
2397 	} else {
2398 		notifier_remove(p, mq, mqd);
2399 	}
2400 	mtx_unlock(&mq->mq_mutex);
2401 
2402 out:
2403 	fdrop(fp, td);
2404 	if (newnt != NULL)
2405 		notifier_free(newnt);
2406 	return (error);
2407 }
2408 
2409 int
2410 sys_kmq_notify(struct thread *td, struct kmq_notify_args *uap)
2411 {
2412 	struct sigevent ev, *evp;
2413 	int error;
2414 
2415 	if (uap->sigev == NULL) {
2416 		evp = NULL;
2417 	} else {
2418 		error = copyin(uap->sigev, &ev, sizeof(ev));
2419 		if (error != 0)
2420 			return (error);
2421 		evp = &ev;
2422 	}
2423 	return (kern_kmq_notify(td, uap->mqd, evp));
2424 }
2425 
2426 static void
2427 mqueue_fdclose(struct thread *td, int fd, struct file *fp)
2428 {
2429 	struct filedesc *fdp;
2430 	struct mqueue *mq;
2431 
2432 	fdp = td->td_proc->p_fd;
2433 	FILEDESC_LOCK_ASSERT(fdp);
2434 
2435 	if (fp->f_ops == &mqueueops) {
2436 		mq = FPTOMQ(fp);
2437 		mtx_lock(&mq->mq_mutex);
2438 		notifier_remove(td->td_proc, mq, fd);
2439 
2440 		/* have to wakeup thread in same process */
2441 		if (mq->mq_flags & MQ_RSEL) {
2442 			mq->mq_flags &= ~MQ_RSEL;
2443 			selwakeup(&mq->mq_rsel);
2444 		}
2445 		if (mq->mq_flags & MQ_WSEL) {
2446 			mq->mq_flags &= ~MQ_WSEL;
2447 			selwakeup(&mq->mq_wsel);
2448 		}
2449 		mtx_unlock(&mq->mq_mutex);
2450 	}
2451 }
2452 
2453 static void
2454 mq_proc_exit(void *arg __unused, struct proc *p)
2455 {
2456 	struct filedesc *fdp;
2457 	struct file *fp;
2458 	struct mqueue *mq;
2459 	int i;
2460 
2461 	fdp = p->p_fd;
2462 	FILEDESC_SLOCK(fdp);
2463 	for (i = 0; i < fdp->fd_nfiles; ++i) {
2464 		fp = fget_locked(fdp, i);
2465 		if (fp != NULL && fp->f_ops == &mqueueops) {
2466 			mq = FPTOMQ(fp);
2467 			mtx_lock(&mq->mq_mutex);
2468 			notifier_remove(p, FPTOMQ(fp), i);
2469 			mtx_unlock(&mq->mq_mutex);
2470 		}
2471 	}
2472 	FILEDESC_SUNLOCK(fdp);
2473 	KASSERT(LIST_EMPTY(&p->p_mqnotifier), ("mq notifiers left"));
2474 }
2475 
2476 static int
2477 mqf_poll(struct file *fp, int events, struct ucred *active_cred,
2478 	struct thread *td)
2479 {
2480 	struct mqueue *mq = FPTOMQ(fp);
2481 	int revents = 0;
2482 
2483 	mtx_lock(&mq->mq_mutex);
2484 	if (events & (POLLIN | POLLRDNORM)) {
2485 		if (mq->mq_curmsgs) {
2486 			revents |= events & (POLLIN | POLLRDNORM);
2487 		} else {
2488 			mq->mq_flags |= MQ_RSEL;
2489 			selrecord(td, &mq->mq_rsel);
2490  		}
2491 	}
2492 	if (events & POLLOUT) {
2493 		if (mq->mq_curmsgs < mq->mq_maxmsg)
2494 			revents |= POLLOUT;
2495 		else {
2496 			mq->mq_flags |= MQ_WSEL;
2497 			selrecord(td, &mq->mq_wsel);
2498 		}
2499 	}
2500 	mtx_unlock(&mq->mq_mutex);
2501 	return (revents);
2502 }
2503 
2504 static int
2505 mqf_close(struct file *fp, struct thread *td)
2506 {
2507 	struct mqfs_node *pn;
2508 
2509 	fp->f_ops = &badfileops;
2510 	pn = fp->f_data;
2511 	fp->f_data = NULL;
2512 	sx_xlock(&mqfs_data.mi_lock);
2513 	mqnode_release(pn);
2514 	sx_xunlock(&mqfs_data.mi_lock);
2515 	return (0);
2516 }
2517 
2518 static int
2519 mqf_stat(struct file *fp, struct stat *st, struct ucred *active_cred,
2520 	struct thread *td)
2521 {
2522 	struct mqfs_node *pn = fp->f_data;
2523 
2524 	bzero(st, sizeof *st);
2525 	sx_xlock(&mqfs_data.mi_lock);
2526 	st->st_atim = pn->mn_atime;
2527 	st->st_mtim = pn->mn_mtime;
2528 	st->st_ctim = pn->mn_ctime;
2529 	st->st_birthtim = pn->mn_birth;
2530 	st->st_uid = pn->mn_uid;
2531 	st->st_gid = pn->mn_gid;
2532 	st->st_mode = S_IFIFO | pn->mn_mode;
2533 	sx_xunlock(&mqfs_data.mi_lock);
2534 	return (0);
2535 }
2536 
2537 static int
2538 mqf_chmod(struct file *fp, mode_t mode, struct ucred *active_cred,
2539     struct thread *td)
2540 {
2541 	struct mqfs_node *pn;
2542 	int error;
2543 
2544 	error = 0;
2545 	pn = fp->f_data;
2546 	sx_xlock(&mqfs_data.mi_lock);
2547 	error = vaccess(VREG, pn->mn_mode, pn->mn_uid, pn->mn_gid, VADMIN,
2548 	    active_cred, NULL);
2549 	if (error != 0)
2550 		goto out;
2551 	pn->mn_mode = mode & ACCESSPERMS;
2552 out:
2553 	sx_xunlock(&mqfs_data.mi_lock);
2554 	return (error);
2555 }
2556 
2557 static int
2558 mqf_chown(struct file *fp, uid_t uid, gid_t gid, struct ucred *active_cred,
2559     struct thread *td)
2560 {
2561 	struct mqfs_node *pn;
2562 	int error;
2563 
2564 	error = 0;
2565 	pn = fp->f_data;
2566 	sx_xlock(&mqfs_data.mi_lock);
2567 	if (uid == (uid_t)-1)
2568 		uid = pn->mn_uid;
2569 	if (gid == (gid_t)-1)
2570 		gid = pn->mn_gid;
2571 	if (((uid != pn->mn_uid && uid != active_cred->cr_uid) ||
2572 	    (gid != pn->mn_gid && !groupmember(gid, active_cred))) &&
2573 	    (error = priv_check_cred(active_cred, PRIV_VFS_CHOWN, 0)))
2574 		goto out;
2575 	pn->mn_uid = uid;
2576 	pn->mn_gid = gid;
2577 out:
2578 	sx_xunlock(&mqfs_data.mi_lock);
2579 	return (error);
2580 }
2581 
2582 static int
2583 mqf_kqfilter(struct file *fp, struct knote *kn)
2584 {
2585 	struct mqueue *mq = FPTOMQ(fp);
2586 	int error = 0;
2587 
2588 	if (kn->kn_filter == EVFILT_READ) {
2589 		kn->kn_fop = &mq_rfiltops;
2590 		knlist_add(&mq->mq_rsel.si_note, kn, 0);
2591 	} else if (kn->kn_filter == EVFILT_WRITE) {
2592 		kn->kn_fop = &mq_wfiltops;
2593 		knlist_add(&mq->mq_wsel.si_note, kn, 0);
2594 	} else
2595 		error = EINVAL;
2596 	return (error);
2597 }
2598 
2599 static void
2600 filt_mqdetach(struct knote *kn)
2601 {
2602 	struct mqueue *mq = FPTOMQ(kn->kn_fp);
2603 
2604 	if (kn->kn_filter == EVFILT_READ)
2605 		knlist_remove(&mq->mq_rsel.si_note, kn, 0);
2606 	else if (kn->kn_filter == EVFILT_WRITE)
2607 		knlist_remove(&mq->mq_wsel.si_note, kn, 0);
2608 	else
2609 		panic("filt_mqdetach");
2610 }
2611 
2612 static int
2613 filt_mqread(struct knote *kn, long hint)
2614 {
2615 	struct mqueue *mq = FPTOMQ(kn->kn_fp);
2616 
2617 	mtx_assert(&mq->mq_mutex, MA_OWNED);
2618 	return (mq->mq_curmsgs != 0);
2619 }
2620 
2621 static int
2622 filt_mqwrite(struct knote *kn, long hint)
2623 {
2624 	struct mqueue *mq = FPTOMQ(kn->kn_fp);
2625 
2626 	mtx_assert(&mq->mq_mutex, MA_OWNED);
2627 	return (mq->mq_curmsgs < mq->mq_maxmsg);
2628 }
2629 
2630 static int
2631 mqf_fill_kinfo(struct file *fp, struct kinfo_file *kif, struct filedesc *fdp)
2632 {
2633 
2634 	kif->kf_type = KF_TYPE_MQUEUE;
2635 	return (0);
2636 }
2637 
2638 static struct fileops mqueueops = {
2639 	.fo_read		= invfo_rdwr,
2640 	.fo_write		= invfo_rdwr,
2641 	.fo_truncate		= invfo_truncate,
2642 	.fo_ioctl		= invfo_ioctl,
2643 	.fo_poll		= mqf_poll,
2644 	.fo_kqfilter		= mqf_kqfilter,
2645 	.fo_stat		= mqf_stat,
2646 	.fo_close		= mqf_close,
2647 	.fo_chmod		= mqf_chmod,
2648 	.fo_chown		= mqf_chown,
2649 	.fo_sendfile		= invfo_sendfile,
2650 	.fo_fill_kinfo		= mqf_fill_kinfo,
2651 };
2652 
2653 static struct vop_vector mqfs_vnodeops = {
2654 	.vop_default 		= &default_vnodeops,
2655 	.vop_access		= mqfs_access,
2656 	.vop_cachedlookup	= mqfs_lookup,
2657 	.vop_lookup		= vfs_cache_lookup,
2658 	.vop_reclaim		= mqfs_reclaim,
2659 	.vop_create		= mqfs_create,
2660 	.vop_remove		= mqfs_remove,
2661 	.vop_inactive		= mqfs_inactive,
2662 	.vop_open		= mqfs_open,
2663 	.vop_close		= mqfs_close,
2664 	.vop_getattr		= mqfs_getattr,
2665 	.vop_setattr		= mqfs_setattr,
2666 	.vop_read		= mqfs_read,
2667 	.vop_write		= VOP_EOPNOTSUPP,
2668 	.vop_readdir		= mqfs_readdir,
2669 	.vop_mkdir		= VOP_EOPNOTSUPP,
2670 	.vop_rmdir		= VOP_EOPNOTSUPP
2671 };
2672 
2673 static struct vfsops mqfs_vfsops = {
2674 	.vfs_init 		= mqfs_init,
2675 	.vfs_uninit		= mqfs_uninit,
2676 	.vfs_mount		= mqfs_mount,
2677 	.vfs_unmount		= mqfs_unmount,
2678 	.vfs_root		= mqfs_root,
2679 	.vfs_statfs		= mqfs_statfs,
2680 };
2681 
2682 static struct vfsconf mqueuefs_vfsconf = {
2683 	.vfc_version = VFS_VERSION,
2684 	.vfc_name = "mqueuefs",
2685 	.vfc_vfsops = &mqfs_vfsops,
2686 	.vfc_typenum = -1,
2687 	.vfc_flags = VFCF_SYNTHETIC
2688 };
2689 
2690 static struct syscall_helper_data mq_syscalls[] = {
2691 	SYSCALL_INIT_HELPER(kmq_open),
2692 	SYSCALL_INIT_HELPER(kmq_setattr),
2693 	SYSCALL_INIT_HELPER(kmq_timedsend),
2694 	SYSCALL_INIT_HELPER(kmq_timedreceive),
2695 	SYSCALL_INIT_HELPER(kmq_notify),
2696 	SYSCALL_INIT_HELPER(kmq_unlink),
2697 	SYSCALL_INIT_LAST
2698 };
2699 
2700 #ifdef COMPAT_FREEBSD32
2701 #include <compat/freebsd32/freebsd32.h>
2702 #include <compat/freebsd32/freebsd32_proto.h>
2703 #include <compat/freebsd32/freebsd32_signal.h>
2704 #include <compat/freebsd32/freebsd32_syscall.h>
2705 #include <compat/freebsd32/freebsd32_util.h>
2706 
2707 static void
2708 mq_attr_from32(const struct mq_attr32 *from, struct mq_attr *to)
2709 {
2710 
2711 	to->mq_flags = from->mq_flags;
2712 	to->mq_maxmsg = from->mq_maxmsg;
2713 	to->mq_msgsize = from->mq_msgsize;
2714 	to->mq_curmsgs = from->mq_curmsgs;
2715 }
2716 
2717 static void
2718 mq_attr_to32(const struct mq_attr *from, struct mq_attr32 *to)
2719 {
2720 
2721 	to->mq_flags = from->mq_flags;
2722 	to->mq_maxmsg = from->mq_maxmsg;
2723 	to->mq_msgsize = from->mq_msgsize;
2724 	to->mq_curmsgs = from->mq_curmsgs;
2725 }
2726 
2727 int
2728 freebsd32_kmq_open(struct thread *td, struct freebsd32_kmq_open_args *uap)
2729 {
2730 	struct mq_attr attr;
2731 	struct mq_attr32 attr32;
2732 	int flags, error;
2733 
2734 	if ((uap->flags & O_ACCMODE) == O_ACCMODE || uap->flags & O_EXEC)
2735 		return (EINVAL);
2736 	flags = FFLAGS(uap->flags);
2737 	if ((flags & O_CREAT) != 0 && uap->attr != NULL) {
2738 		error = copyin(uap->attr, &attr32, sizeof(attr32));
2739 		if (error)
2740 			return (error);
2741 		mq_attr_from32(&attr32, &attr);
2742 	}
2743 	return (kern_kmq_open(td, uap->path, flags, uap->mode,
2744 	    uap->attr != NULL ? &attr : NULL));
2745 }
2746 
2747 int
2748 freebsd32_kmq_setattr(struct thread *td, struct freebsd32_kmq_setattr_args *uap)
2749 {
2750 	struct mq_attr attr, oattr;
2751 	struct mq_attr32 attr32, oattr32;
2752 	int error;
2753 
2754 	if (uap->attr != NULL) {
2755 		error = copyin(uap->attr, &attr32, sizeof(attr32));
2756 		if (error != 0)
2757 			return (error);
2758 		mq_attr_from32(&attr32, &attr);
2759 	}
2760 	error = kern_kmq_setattr(td, uap->mqd, uap->attr != NULL ? &attr : NULL,
2761 	    &oattr);
2762 	if (error != 0)
2763 		return (error);
2764 	if (uap->oattr != NULL) {
2765 		mq_attr_to32(&oattr, &oattr32);
2766 		error = copyout(&oattr32, uap->oattr, sizeof(oattr32));
2767 	}
2768 	return (error);
2769 }
2770 
2771 int
2772 freebsd32_kmq_timedsend(struct thread *td,
2773     struct freebsd32_kmq_timedsend_args *uap)
2774 {
2775 	struct mqueue *mq;
2776 	struct file *fp;
2777 	struct timespec32 ets32;
2778 	struct timespec *abs_timeout, ets;
2779 	int error;
2780 	int waitok;
2781 
2782 	error = getmq_write(td, uap->mqd, &fp, NULL, &mq);
2783 	if (error)
2784 		return (error);
2785 	if (uap->abs_timeout != NULL) {
2786 		error = copyin(uap->abs_timeout, &ets32, sizeof(ets32));
2787 		if (error != 0)
2788 			return (error);
2789 		CP(ets32, ets, tv_sec);
2790 		CP(ets32, ets, tv_nsec);
2791 		abs_timeout = &ets;
2792 	} else
2793 		abs_timeout = NULL;
2794 	waitok = !(fp->f_flag & O_NONBLOCK);
2795 	error = mqueue_send(mq, uap->msg_ptr, uap->msg_len,
2796 		uap->msg_prio, waitok, abs_timeout);
2797 	fdrop(fp, td);
2798 	return (error);
2799 }
2800 
2801 int
2802 freebsd32_kmq_timedreceive(struct thread *td,
2803     struct freebsd32_kmq_timedreceive_args *uap)
2804 {
2805 	struct mqueue *mq;
2806 	struct file *fp;
2807 	struct timespec32 ets32;
2808 	struct timespec *abs_timeout, ets;
2809 	int error, waitok;
2810 
2811 	error = getmq_read(td, uap->mqd, &fp, NULL, &mq);
2812 	if (error)
2813 		return (error);
2814 	if (uap->abs_timeout != NULL) {
2815 		error = copyin(uap->abs_timeout, &ets32, sizeof(ets32));
2816 		if (error != 0)
2817 			return (error);
2818 		CP(ets32, ets, tv_sec);
2819 		CP(ets32, ets, tv_nsec);
2820 		abs_timeout = &ets;
2821 	} else
2822 		abs_timeout = NULL;
2823 	waitok = !(fp->f_flag & O_NONBLOCK);
2824 	error = mqueue_receive(mq, uap->msg_ptr, uap->msg_len,
2825 		uap->msg_prio, waitok, abs_timeout);
2826 	fdrop(fp, td);
2827 	return (error);
2828 }
2829 
2830 int
2831 freebsd32_kmq_notify(struct thread *td, struct freebsd32_kmq_notify_args *uap)
2832 {
2833 	struct sigevent ev, *evp;
2834 	struct sigevent32 ev32;
2835 	int error;
2836 
2837 	if (uap->sigev == NULL) {
2838 		evp = NULL;
2839 	} else {
2840 		error = copyin(uap->sigev, &ev32, sizeof(ev32));
2841 		if (error != 0)
2842 			return (error);
2843 		error = convert_sigevent32(&ev32, &ev);
2844 		if (error != 0)
2845 			return (error);
2846 		evp = &ev;
2847 	}
2848 	return (kern_kmq_notify(td, uap->mqd, evp));
2849 }
2850 
2851 static struct syscall_helper_data mq32_syscalls[] = {
2852 	SYSCALL32_INIT_HELPER(freebsd32_kmq_open),
2853 	SYSCALL32_INIT_HELPER(freebsd32_kmq_setattr),
2854 	SYSCALL32_INIT_HELPER(freebsd32_kmq_timedsend),
2855 	SYSCALL32_INIT_HELPER(freebsd32_kmq_timedreceive),
2856 	SYSCALL32_INIT_HELPER(freebsd32_kmq_notify),
2857 	SYSCALL32_INIT_HELPER_COMPAT(kmq_unlink),
2858 	SYSCALL_INIT_LAST
2859 };
2860 #endif
2861 
2862 static int
2863 mqinit(void)
2864 {
2865 	int error;
2866 
2867 	error = syscall_helper_register(mq_syscalls, SY_THR_STATIC_KLD);
2868 	if (error != 0)
2869 		return (error);
2870 #ifdef COMPAT_FREEBSD32
2871 	error = syscall32_helper_register(mq32_syscalls, SY_THR_STATIC_KLD);
2872 	if (error != 0)
2873 		return (error);
2874 #endif
2875 	return (0);
2876 }
2877 
2878 static int
2879 mqunload(void)
2880 {
2881 
2882 #ifdef COMPAT_FREEBSD32
2883 	syscall32_helper_unregister(mq32_syscalls);
2884 #endif
2885 	syscall_helper_unregister(mq_syscalls);
2886 	return (0);
2887 }
2888 
2889 static int
2890 mq_modload(struct module *module, int cmd, void *arg)
2891 {
2892 	int error = 0;
2893 
2894 	error = vfs_modevent(module, cmd, arg);
2895 	if (error != 0)
2896 		return (error);
2897 
2898 	switch (cmd) {
2899 	case MOD_LOAD:
2900 		error = mqinit();
2901 		if (error != 0)
2902 			mqunload();
2903 		break;
2904 	case MOD_UNLOAD:
2905 		error = mqunload();
2906 		break;
2907 	default:
2908 		break;
2909 	}
2910 	return (error);
2911 }
2912 
2913 static moduledata_t mqueuefs_mod = {
2914 	"mqueuefs",
2915 	mq_modload,
2916 	&mqueuefs_vfsconf
2917 };
2918 DECLARE_MODULE(mqueuefs, mqueuefs_mod, SI_SUB_VFS, SI_ORDER_MIDDLE);
2919 MODULE_VERSION(mqueuefs, 1);
2920