1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2005 David Xu <davidxu@freebsd.org>
5 * Copyright (c) 2016-2017 Robert N. M. Watson
6 * All rights reserved.
7 *
8 * Portions of this software were developed by BAE Systems, the University of
9 * Cambridge Computer Laboratory, and Memorial University under DARPA/AFRL
10 * contract FA8650-15-C-7558 ("CADETS"), as part of the DARPA Transparent
11 * Computing (TC) research program.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 */
35
36 /*
37 * POSIX message queue implementation.
38 *
39 * 1) A mqueue filesystem can be mounted, each message queue appears
40 * in mounted directory, user can change queue's permission and
41 * ownership, or remove a queue. Manually creating a file in the
42 * directory causes a message queue to be created in the kernel with
43 * default message queue attributes applied and same name used, this
44 * method is not advocated since mq_open syscall allows user to specify
45 * different attributes. Also the file system can be mounted multiple
46 * times at different mount points but shows same contents.
47 *
48 * 2) Standard POSIX message queue API. The syscalls do not use vfs layer,
49 * but directly operate on internal data structure, this allows user to
50 * use the IPC facility without having to mount mqueue file system.
51 */
52
53 #include "opt_capsicum.h"
54
55 #include <sys/param.h>
56 #include <sys/kernel.h>
57 #include <sys/systm.h>
58 #include <sys/limits.h>
59 #include <sys/malloc.h>
60 #include <sys/buf.h>
61 #include <sys/capsicum.h>
62 #include <sys/dirent.h>
63 #include <sys/event.h>
64 #include <sys/eventhandler.h>
65 #include <sys/fcntl.h>
66 #include <sys/file.h>
67 #include <sys/filedesc.h>
68 #include <sys/jail.h>
69 #include <sys/lock.h>
70 #include <sys/module.h>
71 #include <sys/mount.h>
72 #include <sys/mqueue.h>
73 #include <sys/mutex.h>
74 #include <sys/namei.h>
75 #include <sys/posix4.h>
76 #include <sys/poll.h>
77 #include <sys/priv.h>
78 #include <sys/proc.h>
79 #include <sys/queue.h>
80 #include <sys/sysproto.h>
81 #include <sys/stat.h>
82 #include <sys/syscall.h>
83 #include <sys/syscallsubr.h>
84 #include <sys/sysent.h>
85 #include <sys/sx.h>
86 #include <sys/sysctl.h>
87 #include <sys/taskqueue.h>
88 #include <sys/unistd.h>
89 #include <sys/user.h>
90 #include <sys/vnode.h>
91 #include <machine/atomic.h>
92
93 #include <security/audit/audit.h>
94
95 FEATURE(p1003_1b_mqueue, "POSIX P1003.1B message queues support");
96
97 /*
98 * Limits and constants
99 */
100 #define MQFS_NAMELEN NAME_MAX
101 #define MQFS_DELEN (8 + MQFS_NAMELEN)
102
103 /* node types */
104 typedef enum {
105 mqfstype_none = 0,
106 mqfstype_root,
107 mqfstype_dir,
108 mqfstype_this,
109 mqfstype_parent,
110 mqfstype_file,
111 mqfstype_symlink,
112 } mqfs_type_t;
113
114 struct mqfs_node;
115
116 /*
117 * mqfs_info: describes a mqfs instance
118 */
119 struct mqfs_info {
120 struct sx mi_lock;
121 struct mqfs_node *mi_root;
122 struct unrhdr *mi_unrhdr;
123 };
124
125 struct mqfs_vdata {
126 LIST_ENTRY(mqfs_vdata) mv_link;
127 struct mqfs_node *mv_node;
128 struct vnode *mv_vnode;
129 struct task mv_task;
130 };
131
132 /*
133 * mqfs_node: describes a node (file or directory) within a mqfs
134 */
135 struct mqfs_node {
136 char mn_name[MQFS_NAMELEN+1];
137 struct mqfs_info *mn_info;
138 struct mqfs_node *mn_parent;
139 LIST_HEAD(,mqfs_node) mn_children;
140 LIST_ENTRY(mqfs_node) mn_sibling;
141 LIST_HEAD(,mqfs_vdata) mn_vnodes;
142 const void *mn_pr_root;
143 int mn_refcount;
144 mqfs_type_t mn_type;
145 int mn_deleted;
146 uint32_t mn_fileno;
147 void *mn_data;
148 struct timespec mn_birth;
149 struct timespec mn_ctime;
150 struct timespec mn_atime;
151 struct timespec mn_mtime;
152 uid_t mn_uid;
153 gid_t mn_gid;
154 int mn_mode;
155 };
156
157 #define VTON(vp) (((struct mqfs_vdata *)((vp)->v_data))->mv_node)
158 #define VTOMQ(vp) ((struct mqueue *)(VTON(vp)->mn_data))
159 #define VFSTOMQFS(m) ((struct mqfs_info *)((m)->mnt_data))
160 #define FPTOMQ(fp) ((struct mqueue *)(((struct mqfs_node *) \
161 (fp)->f_data)->mn_data))
162
163 TAILQ_HEAD(msgq, mqueue_msg);
164
165 struct mqueue;
166
167 struct mqueue_notifier {
168 LIST_ENTRY(mqueue_notifier) nt_link;
169 struct sigevent nt_sigev;
170 ksiginfo_t nt_ksi;
171 struct proc *nt_proc;
172 };
173
174 struct mqueue {
175 struct mtx mq_mutex;
176 int mq_flags;
177 long mq_maxmsg;
178 long mq_msgsize;
179 long mq_curmsgs;
180 long mq_totalbytes;
181 struct msgq mq_msgq;
182 int mq_receivers;
183 int mq_senders;
184 struct selinfo mq_rsel;
185 struct selinfo mq_wsel;
186 struct mqueue_notifier *mq_notifier;
187 };
188
189 #define MQ_RSEL 0x01
190 #define MQ_WSEL 0x02
191
192 struct mqueue_msg {
193 TAILQ_ENTRY(mqueue_msg) msg_link;
194 unsigned int msg_prio;
195 unsigned int msg_size;
196 /* following real data... */
197 };
198
199 static SYSCTL_NODE(_kern, OID_AUTO, mqueue, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
200 "POSIX real time message queue");
201
202 static int default_maxmsg = 10;
203 SYSCTL_INT(_kern_mqueue, OID_AUTO, default_maxmsg, CTLFLAG_RD,
204 &default_maxmsg, 0, "Default maximum messages in queue");
205 static int default_msgsize = 1024;
206 SYSCTL_INT(_kern_mqueue, OID_AUTO, default_msgsize, CTLFLAG_RD,
207 &default_msgsize, 0, "Default maximum message size");
208
209 static int maxmsg = 100;
210 SYSCTL_INT(_kern_mqueue, OID_AUTO, maxmsg, CTLFLAG_RW,
211 &maxmsg, 0, "maximum messages in queue");
212 static int maxmsgsize = 16384;
213 SYSCTL_INT(_kern_mqueue, OID_AUTO, maxmsgsize, CTLFLAG_RW,
214 &maxmsgsize, 0, "maximum message size");
215 static int maxmq = 100;
216 SYSCTL_INT(_kern_mqueue, OID_AUTO, maxmq, CTLFLAG_RW,
217 &maxmq, 0, "maximum message queues");
218 static int curmq = 0;
219 SYSCTL_INT(_kern_mqueue, OID_AUTO, curmq, CTLFLAG_RW,
220 &curmq, 0, "current message queue number");
221 static int unloadable = 0;
222 static MALLOC_DEFINE(M_MQUEUEDATA, "mqdata", "mqueue data");
223
224 static eventhandler_tag exit_tag;
225
226 /* Only one instance per-system */
227 static struct mqfs_info mqfs_data;
228 static uma_zone_t mqnode_zone;
229 static uma_zone_t mqueue_zone;
230 static uma_zone_t mvdata_zone;
231 static uma_zone_t mqnoti_zone;
232 static struct vop_vector mqfs_vnodeops;
233 static const struct fileops mqueueops;
234 static unsigned mqfs_osd_jail_slot;
235
236 /*
237 * Directory structure construction and manipulation
238 */
239 #ifdef notyet
240 static struct mqfs_node *mqfs_create_dir(struct mqfs_node *parent,
241 const char *name, int namelen, struct ucred *cred, int mode);
242 static struct mqfs_node *mqfs_create_link(struct mqfs_node *parent,
243 const char *name, int namelen, struct ucred *cred, int mode);
244 #endif
245
246 static struct mqfs_node *mqfs_create_file(struct mqfs_node *parent,
247 const char *name, int namelen, struct ucred *cred, int mode);
248 static int mqfs_destroy(struct mqfs_node *mn);
249 static void mqfs_fileno_alloc(struct mqfs_info *mi, struct mqfs_node *mn);
250 static void mqfs_fileno_free(struct mqfs_info *mi, struct mqfs_node *mn);
251 static int mqfs_allocv(struct mount *mp, struct vnode **vpp, struct mqfs_node *pn);
252 static int mqfs_prison_remove(void *obj, void *data);
253
254 /*
255 * Message queue construction and maniplation
256 */
257 static struct mqueue *mqueue_alloc(const struct mq_attr *attr);
258 static void mqueue_free(struct mqueue *mq);
259 static int mqueue_send(struct mqueue *mq, const char *msg_ptr,
260 size_t msg_len, unsigned msg_prio, int waitok,
261 const struct timespec *abs_timeout);
262 static int mqueue_receive(struct mqueue *mq, char *msg_ptr,
263 size_t msg_len, unsigned *msg_prio, int waitok,
264 const struct timespec *abs_timeout);
265 static int _mqueue_send(struct mqueue *mq, struct mqueue_msg *msg,
266 int timo);
267 static int _mqueue_recv(struct mqueue *mq, struct mqueue_msg **msg,
268 int timo);
269 static void mqueue_send_notification(struct mqueue *mq);
270 static void mq_proc_exit(void *arg, struct proc *p);
271
272 /*
273 * kqueue filters
274 */
275 static void filt_mqdetach(struct knote *kn);
276 static int filt_mqread(struct knote *kn, long hint);
277 static int filt_mqwrite(struct knote *kn, long hint);
278
279 static const struct filterops mq_rfiltops = {
280 .f_isfd = 1,
281 .f_detach = filt_mqdetach,
282 .f_event = filt_mqread,
283 .f_copy = knote_triv_copy,
284 };
285 static const struct filterops mq_wfiltops = {
286 .f_isfd = 1,
287 .f_detach = filt_mqdetach,
288 .f_event = filt_mqwrite,
289 .f_copy = knote_triv_copy,
290 };
291
292 /*
293 * Initialize fileno bitmap
294 */
295 static void
mqfs_fileno_init(struct mqfs_info * mi)296 mqfs_fileno_init(struct mqfs_info *mi)
297 {
298 struct unrhdr *up;
299
300 up = new_unrhdr(1, INT_MAX, NULL);
301 mi->mi_unrhdr = up;
302 }
303
304 /*
305 * Tear down fileno bitmap
306 */
307 static void
mqfs_fileno_uninit(struct mqfs_info * mi)308 mqfs_fileno_uninit(struct mqfs_info *mi)
309 {
310 struct unrhdr *up;
311
312 up = mi->mi_unrhdr;
313 mi->mi_unrhdr = NULL;
314 delete_unrhdr(up);
315 }
316
317 /*
318 * Allocate a file number
319 */
320 static void
mqfs_fileno_alloc(struct mqfs_info * mi,struct mqfs_node * mn)321 mqfs_fileno_alloc(struct mqfs_info *mi, struct mqfs_node *mn)
322 {
323 /* make sure our parent has a file number */
324 if (mn->mn_parent && !mn->mn_parent->mn_fileno)
325 mqfs_fileno_alloc(mi, mn->mn_parent);
326
327 switch (mn->mn_type) {
328 case mqfstype_root:
329 case mqfstype_dir:
330 case mqfstype_file:
331 case mqfstype_symlink:
332 mn->mn_fileno = alloc_unr(mi->mi_unrhdr);
333 break;
334 case mqfstype_this:
335 KASSERT(mn->mn_parent != NULL,
336 ("mqfstype_this node has no parent"));
337 mn->mn_fileno = mn->mn_parent->mn_fileno;
338 break;
339 case mqfstype_parent:
340 KASSERT(mn->mn_parent != NULL,
341 ("mqfstype_parent node has no parent"));
342 if (mn->mn_parent == mi->mi_root) {
343 mn->mn_fileno = mn->mn_parent->mn_fileno;
344 break;
345 }
346 KASSERT(mn->mn_parent->mn_parent != NULL,
347 ("mqfstype_parent node has no grandparent"));
348 mn->mn_fileno = mn->mn_parent->mn_parent->mn_fileno;
349 break;
350 default:
351 KASSERT(0,
352 ("mqfs_fileno_alloc() called for unknown type node: %d",
353 mn->mn_type));
354 break;
355 }
356 }
357
358 /*
359 * Release a file number
360 */
361 static void
mqfs_fileno_free(struct mqfs_info * mi,struct mqfs_node * mn)362 mqfs_fileno_free(struct mqfs_info *mi, struct mqfs_node *mn)
363 {
364 switch (mn->mn_type) {
365 case mqfstype_root:
366 case mqfstype_dir:
367 case mqfstype_file:
368 case mqfstype_symlink:
369 free_unr(mi->mi_unrhdr, mn->mn_fileno);
370 break;
371 case mqfstype_this:
372 case mqfstype_parent:
373 /* ignore these, as they don't "own" their file number */
374 break;
375 default:
376 KASSERT(0,
377 ("mqfs_fileno_free() called for unknown type node: %d",
378 mn->mn_type));
379 break;
380 }
381 }
382
383 static __inline struct mqfs_node *
mqnode_alloc(void)384 mqnode_alloc(void)
385 {
386 return (uma_zalloc(mqnode_zone, M_WAITOK | M_ZERO));
387 }
388
389 static __inline void
mqnode_free(struct mqfs_node * node)390 mqnode_free(struct mqfs_node *node)
391 {
392 uma_zfree(mqnode_zone, node);
393 }
394
395 static __inline void
mqnode_addref(struct mqfs_node * node)396 mqnode_addref(struct mqfs_node *node)
397 {
398 atomic_add_int(&node->mn_refcount, 1);
399 }
400
401 static __inline void
mqnode_release(struct mqfs_node * node)402 mqnode_release(struct mqfs_node *node)
403 {
404 struct mqfs_info *mqfs;
405 int old, exp;
406
407 mqfs = node->mn_info;
408 old = atomic_fetchadd_int(&node->mn_refcount, -1);
409 if (node->mn_type == mqfstype_dir ||
410 node->mn_type == mqfstype_root)
411 exp = 3; /* include . and .. */
412 else
413 exp = 1;
414 if (old == exp) {
415 int locked = sx_xlocked(&mqfs->mi_lock);
416 if (!locked)
417 sx_xlock(&mqfs->mi_lock);
418 mqfs_destroy(node);
419 if (!locked)
420 sx_xunlock(&mqfs->mi_lock);
421 }
422 }
423
424 /*
425 * Add a node to a directory
426 */
427 static int
mqfs_add_node(struct mqfs_node * parent,struct mqfs_node * node)428 mqfs_add_node(struct mqfs_node *parent, struct mqfs_node *node)
429 {
430 KASSERT(parent != NULL, ("%s(): parent is NULL", __func__));
431 KASSERT(parent->mn_info != NULL,
432 ("%s(): parent has no mn_info", __func__));
433 KASSERT(parent->mn_type == mqfstype_dir ||
434 parent->mn_type == mqfstype_root,
435 ("%s(): parent is not a directory", __func__));
436
437 node->mn_info = parent->mn_info;
438 node->mn_parent = parent;
439 LIST_INIT(&node->mn_children);
440 LIST_INIT(&node->mn_vnodes);
441 LIST_INSERT_HEAD(&parent->mn_children, node, mn_sibling);
442 mqnode_addref(parent);
443 return (0);
444 }
445
446 static struct mqfs_node *
mqfs_create_node(const char * name,int namelen,struct ucred * cred,int mode,int nodetype)447 mqfs_create_node(const char *name, int namelen, struct ucred *cred, int mode,
448 int nodetype)
449 {
450 struct mqfs_node *node;
451
452 node = mqnode_alloc();
453 strncpy(node->mn_name, name, namelen);
454 node->mn_pr_root = cred->cr_prison->pr_root;
455 node->mn_type = nodetype;
456 node->mn_refcount = 1;
457 vfs_timestamp(&node->mn_birth);
458 node->mn_ctime = node->mn_atime = node->mn_mtime =
459 node->mn_birth;
460 node->mn_uid = cred->cr_uid;
461 node->mn_gid = cred->cr_gid;
462 node->mn_mode = mode;
463 return (node);
464 }
465
466 /*
467 * Create a file
468 */
469 static struct mqfs_node *
mqfs_create_file(struct mqfs_node * parent,const char * name,int namelen,struct ucred * cred,int mode)470 mqfs_create_file(struct mqfs_node *parent, const char *name, int namelen,
471 struct ucred *cred, int mode)
472 {
473 struct mqfs_node *node;
474
475 node = mqfs_create_node(name, namelen, cred, mode, mqfstype_file);
476 if (mqfs_add_node(parent, node) != 0) {
477 mqnode_free(node);
478 return (NULL);
479 }
480 return (node);
481 }
482
483 /*
484 * Add . and .. to a directory
485 */
486 static int
mqfs_fixup_dir(struct mqfs_node * parent)487 mqfs_fixup_dir(struct mqfs_node *parent)
488 {
489 struct mqfs_node *dir;
490
491 dir = mqnode_alloc();
492 dir->mn_name[0] = '.';
493 dir->mn_type = mqfstype_this;
494 dir->mn_refcount = 1;
495 if (mqfs_add_node(parent, dir) != 0) {
496 mqnode_free(dir);
497 return (-1);
498 }
499
500 dir = mqnode_alloc();
501 dir->mn_name[0] = dir->mn_name[1] = '.';
502 dir->mn_type = mqfstype_parent;
503 dir->mn_refcount = 1;
504
505 if (mqfs_add_node(parent, dir) != 0) {
506 mqnode_free(dir);
507 return (-1);
508 }
509
510 return (0);
511 }
512
513 #ifdef notyet
514
515 /*
516 * Create a directory
517 */
518 static struct mqfs_node *
mqfs_create_dir(struct mqfs_node * parent,const char * name,int namelen,struct ucred * cred,int mode)519 mqfs_create_dir(struct mqfs_node *parent, const char *name, int namelen,
520 struct ucred *cred, int mode)
521 {
522 struct mqfs_node *node;
523
524 node = mqfs_create_node(name, namelen, cred, mode, mqfstype_dir);
525 if (mqfs_add_node(parent, node) != 0) {
526 mqnode_free(node);
527 return (NULL);
528 }
529
530 if (mqfs_fixup_dir(node) != 0) {
531 mqfs_destroy(node);
532 return (NULL);
533 }
534 return (node);
535 }
536
537 /*
538 * Create a symlink
539 */
540 static struct mqfs_node *
mqfs_create_link(struct mqfs_node * parent,const char * name,int namelen,struct ucred * cred,int mode)541 mqfs_create_link(struct mqfs_node *parent, const char *name, int namelen,
542 struct ucred *cred, int mode)
543 {
544 struct mqfs_node *node;
545
546 node = mqfs_create_node(name, namelen, cred, mode, mqfstype_symlink);
547 if (mqfs_add_node(parent, node) != 0) {
548 mqnode_free(node);
549 return (NULL);
550 }
551 return (node);
552 }
553
554 #endif
555
556 /*
557 * Destroy a node or a tree of nodes
558 */
559 static int
mqfs_destroy(struct mqfs_node * node)560 mqfs_destroy(struct mqfs_node *node)
561 {
562 struct mqfs_node *parent;
563
564 KASSERT(node != NULL,
565 ("%s(): node is NULL", __func__));
566 KASSERT(node->mn_info != NULL,
567 ("%s(): node has no mn_info", __func__));
568
569 /* destroy children */
570 if (node->mn_type == mqfstype_dir || node->mn_type == mqfstype_root)
571 while (! LIST_EMPTY(&node->mn_children))
572 mqfs_destroy(LIST_FIRST(&node->mn_children));
573
574 /* unlink from parent */
575 if ((parent = node->mn_parent) != NULL) {
576 KASSERT(parent->mn_info == node->mn_info,
577 ("%s(): parent has different mn_info", __func__));
578 LIST_REMOVE(node, mn_sibling);
579 }
580
581 if (node->mn_fileno != 0)
582 mqfs_fileno_free(node->mn_info, node);
583 if (node->mn_data != NULL)
584 mqueue_free(node->mn_data);
585 mqnode_free(node);
586 return (0);
587 }
588
589 /*
590 * Mount a mqfs instance
591 */
592 static int
mqfs_mount(struct mount * mp)593 mqfs_mount(struct mount *mp)
594 {
595 struct statfs *sbp;
596
597 if (mp->mnt_flag & MNT_UPDATE)
598 return (EOPNOTSUPP);
599
600 mp->mnt_data = &mqfs_data;
601 MNT_ILOCK(mp);
602 mp->mnt_flag |= MNT_LOCAL;
603 MNT_IUNLOCK(mp);
604 vfs_getnewfsid(mp);
605
606 sbp = &mp->mnt_stat;
607 vfs_mountedfrom(mp, "mqueue");
608 sbp->f_bsize = PAGE_SIZE;
609 sbp->f_iosize = PAGE_SIZE;
610 sbp->f_blocks = 1;
611 sbp->f_bfree = 1;
612 sbp->f_bavail = 0;
613 sbp->f_files = 0;
614 sbp->f_ffree = 0;
615 return (0);
616 }
617
618 /*
619 * Unmount a mqfs instance
620 */
621 static int
mqfs_unmount(struct mount * mp,int mntflags)622 mqfs_unmount(struct mount *mp, int mntflags)
623 {
624 int error;
625
626 error = vflush(mp, 0, (mntflags & MNT_FORCE) ? FORCECLOSE : 0,
627 curthread);
628 return (error);
629 }
630
631 /*
632 * Return a root vnode
633 */
634 static int
mqfs_root(struct mount * mp,int flags,struct vnode ** vpp)635 mqfs_root(struct mount *mp, int flags, struct vnode **vpp)
636 {
637 struct mqfs_info *mqfs;
638 int ret;
639
640 mqfs = VFSTOMQFS(mp);
641 ret = mqfs_allocv(mp, vpp, mqfs->mi_root);
642 return (ret);
643 }
644
645 /*
646 * Return filesystem stats
647 */
648 static int
mqfs_statfs(struct mount * mp,struct statfs * sbp)649 mqfs_statfs(struct mount *mp, struct statfs *sbp)
650 {
651 /* XXX update statistics */
652 return (0);
653 }
654
655 /*
656 * Initialize a mqfs instance
657 */
658 static int
mqfs_init(struct vfsconf * vfc)659 mqfs_init(struct vfsconf *vfc)
660 {
661 struct mqfs_node *root;
662 struct mqfs_info *mi;
663 osd_method_t methods[PR_MAXMETHOD] = {
664 [PR_METHOD_REMOVE] = mqfs_prison_remove,
665 };
666
667 mqnode_zone = uma_zcreate("mqnode", sizeof(struct mqfs_node),
668 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
669 mqueue_zone = uma_zcreate("mqueue", sizeof(struct mqueue),
670 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
671 mvdata_zone = uma_zcreate("mvdata",
672 sizeof(struct mqfs_vdata), NULL, NULL, NULL,
673 NULL, UMA_ALIGN_PTR, 0);
674 mqnoti_zone = uma_zcreate("mqnotifier", sizeof(struct mqueue_notifier),
675 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
676 mi = &mqfs_data;
677 sx_init(&mi->mi_lock, "mqfs lock");
678 /* set up the root diretory */
679 root = mqfs_create_node("/", 1, curthread->td_ucred, 01777,
680 mqfstype_root);
681 root->mn_info = mi;
682 LIST_INIT(&root->mn_children);
683 LIST_INIT(&root->mn_vnodes);
684 mi->mi_root = root;
685 mqfs_fileno_init(mi);
686 mqfs_fileno_alloc(mi, root);
687 mqfs_fixup_dir(root);
688 exit_tag = EVENTHANDLER_REGISTER(process_exit, mq_proc_exit, NULL,
689 EVENTHANDLER_PRI_ANY);
690 p31b_setcfg(CTL_P1003_1B_MESSAGE_PASSING, _POSIX_MESSAGE_PASSING);
691 mqfs_osd_jail_slot = osd_jail_register(NULL, methods);
692 return (0);
693 }
694
695 /*
696 * Destroy a mqfs instance
697 */
698 static int
mqfs_uninit(struct vfsconf * vfc)699 mqfs_uninit(struct vfsconf *vfc)
700 {
701 struct mqfs_info *mi;
702
703 if (!unloadable)
704 return (EOPNOTSUPP);
705 osd_jail_deregister(mqfs_osd_jail_slot);
706 EVENTHANDLER_DEREGISTER(process_exit, exit_tag);
707 mi = &mqfs_data;
708 mqfs_destroy(mi->mi_root);
709 mi->mi_root = NULL;
710 mqfs_fileno_uninit(mi);
711 sx_destroy(&mi->mi_lock);
712 uma_zdestroy(mqnode_zone);
713 uma_zdestroy(mqueue_zone);
714 uma_zdestroy(mvdata_zone);
715 uma_zdestroy(mqnoti_zone);
716 return (0);
717 }
718
719 /*
720 * task routine
721 */
722 static void
do_recycle(void * context,int pending __unused)723 do_recycle(void *context, int pending __unused)
724 {
725 struct vnode *vp = (struct vnode *)context;
726
727 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
728 vrecycle(vp);
729 VOP_UNLOCK(vp);
730 vdrop(vp);
731 }
732
733 /*
734 * Allocate a vnode
735 */
736 static int
mqfs_allocv(struct mount * mp,struct vnode ** vpp,struct mqfs_node * pn)737 mqfs_allocv(struct mount *mp, struct vnode **vpp, struct mqfs_node *pn)
738 {
739 struct mqfs_vdata *vd;
740 struct mqfs_info *mqfs;
741 struct vnode *newvpp;
742 int error;
743
744 mqfs = pn->mn_info;
745 *vpp = NULL;
746 sx_xlock(&mqfs->mi_lock);
747 LIST_FOREACH(vd, &pn->mn_vnodes, mv_link) {
748 if (vd->mv_vnode->v_mount == mp) {
749 vhold(vd->mv_vnode);
750 break;
751 }
752 }
753
754 if (vd != NULL) {
755 found:
756 *vpp = vd->mv_vnode;
757 sx_xunlock(&mqfs->mi_lock);
758 error = vget(*vpp, LK_RETRY | LK_EXCLUSIVE);
759 vdrop(*vpp);
760 return (error);
761 }
762 sx_xunlock(&mqfs->mi_lock);
763
764 error = getnewvnode("mqueue", mp, &mqfs_vnodeops, &newvpp);
765 if (error)
766 return (error);
767 vn_lock(newvpp, LK_EXCLUSIVE | LK_RETRY);
768 error = insmntque(newvpp, mp);
769 if (error != 0)
770 return (error);
771
772 sx_xlock(&mqfs->mi_lock);
773 /*
774 * Check if it has already been allocated
775 * while we were blocked.
776 */
777 LIST_FOREACH(vd, &pn->mn_vnodes, mv_link) {
778 if (vd->mv_vnode->v_mount == mp) {
779 vhold(vd->mv_vnode);
780 sx_xunlock(&mqfs->mi_lock);
781
782 vgone(newvpp);
783 vput(newvpp);
784 goto found;
785 }
786 }
787
788 *vpp = newvpp;
789
790 vd = uma_zalloc(mvdata_zone, M_WAITOK);
791 (*vpp)->v_data = vd;
792 vd->mv_vnode = *vpp;
793 vd->mv_node = pn;
794 TASK_INIT(&vd->mv_task, 0, do_recycle, *vpp);
795 LIST_INSERT_HEAD(&pn->mn_vnodes, vd, mv_link);
796 mqnode_addref(pn);
797 switch (pn->mn_type) {
798 case mqfstype_root:
799 (*vpp)->v_vflag = VV_ROOT;
800 /* fall through */
801 case mqfstype_dir:
802 case mqfstype_this:
803 case mqfstype_parent:
804 (*vpp)->v_type = VDIR;
805 break;
806 case mqfstype_file:
807 (*vpp)->v_type = VREG;
808 break;
809 case mqfstype_symlink:
810 (*vpp)->v_type = VLNK;
811 break;
812 case mqfstype_none:
813 KASSERT(0, ("mqfs_allocf called for null node\n"));
814 default:
815 panic("%s has unexpected type: %d", pn->mn_name, pn->mn_type);
816 }
817 sx_xunlock(&mqfs->mi_lock);
818 vn_set_state(*vpp, VSTATE_CONSTRUCTED);
819 return (0);
820 }
821
822 /*
823 * Search a directory entry
824 */
825 static struct mqfs_node *
mqfs_search(struct mqfs_node * pd,const char * name,int len,struct ucred * cred)826 mqfs_search(struct mqfs_node *pd, const char *name, int len, struct ucred *cred)
827 {
828 struct mqfs_node *pn;
829 const void *pr_root;
830
831 sx_assert(&pd->mn_info->mi_lock, SX_LOCKED);
832 pr_root = cred->cr_prison->pr_root;
833 LIST_FOREACH(pn, &pd->mn_children, mn_sibling) {
834 /* Only match names within the same prison root directory */
835 if ((pn->mn_pr_root == NULL || pn->mn_pr_root == pr_root) &&
836 strncmp(pn->mn_name, name, len) == 0 &&
837 pn->mn_name[len] == '\0')
838 return (pn);
839 }
840 return (NULL);
841 }
842
843 /*
844 * Look up a file or directory.
845 */
846 static int
mqfs_lookupx(struct vop_cachedlookup_args * ap)847 mqfs_lookupx(struct vop_cachedlookup_args *ap)
848 {
849 struct componentname *cnp;
850 struct vnode *dvp, **vpp;
851 struct mqfs_node *pd;
852 struct mqfs_node *pn;
853 struct mqfs_info *mqfs;
854 uint64_t flags;
855 int nameiop, error, namelen;
856 char *pname;
857 struct thread *td;
858
859 td = curthread;
860 cnp = ap->a_cnp;
861 vpp = ap->a_vpp;
862 dvp = ap->a_dvp;
863 pname = cnp->cn_nameptr;
864 namelen = cnp->cn_namelen;
865 flags = cnp->cn_flags;
866 nameiop = cnp->cn_nameiop;
867 pd = VTON(dvp);
868 pn = NULL;
869 mqfs = pd->mn_info;
870 *vpp = NULL;
871
872 if (dvp->v_type != VDIR)
873 return (ENOTDIR);
874
875 error = VOP_ACCESS(dvp, VEXEC, cnp->cn_cred, td);
876 if (error)
877 return (error);
878
879 /* shortcut: check if the name is too long */
880 if (cnp->cn_namelen >= MQFS_NAMELEN)
881 return (ENOENT);
882
883 /* self */
884 if (namelen == 1 && pname[0] == '.') {
885 if ((flags & ISLASTCN) && nameiop != LOOKUP)
886 return (EINVAL);
887 pn = pd;
888 *vpp = dvp;
889 vref(dvp);
890 return (0);
891 }
892
893 /* parent */
894 if (cnp->cn_flags & ISDOTDOT) {
895 if (dvp->v_vflag & VV_ROOT)
896 return (EIO);
897 if ((flags & ISLASTCN) && nameiop != LOOKUP)
898 return (EINVAL);
899 VOP_UNLOCK(dvp);
900 KASSERT(pd->mn_parent, ("non-root directory has no parent"));
901 pn = pd->mn_parent;
902 error = mqfs_allocv(dvp->v_mount, vpp, pn);
903 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
904 return (error);
905 }
906
907 /* named node */
908 sx_xlock(&mqfs->mi_lock);
909 pn = mqfs_search(pd, pname, namelen, cnp->cn_cred);
910 if (pn != NULL)
911 mqnode_addref(pn);
912 sx_xunlock(&mqfs->mi_lock);
913
914 /* found */
915 if (pn != NULL) {
916 /* DELETE */
917 if (nameiop == DELETE && (flags & ISLASTCN)) {
918 error = VOP_ACCESS(dvp, VWRITE, cnp->cn_cred, td);
919 if (error) {
920 mqnode_release(pn);
921 return (error);
922 }
923 if (*vpp == dvp) {
924 vref(dvp);
925 *vpp = dvp;
926 mqnode_release(pn);
927 return (0);
928 }
929 }
930
931 /* allocate vnode */
932 error = mqfs_allocv(dvp->v_mount, vpp, pn);
933 mqnode_release(pn);
934 if (error == 0 && cnp->cn_flags & MAKEENTRY)
935 cache_enter(dvp, *vpp, cnp);
936 return (error);
937 }
938
939 /* not found */
940
941 /* will create a new entry in the directory ? */
942 if ((nameiop == CREATE || nameiop == RENAME) && (flags & LOCKPARENT)
943 && (flags & ISLASTCN)) {
944 error = VOP_ACCESS(dvp, VWRITE, cnp->cn_cred, td);
945 if (error)
946 return (error);
947 return (EJUSTRETURN);
948 }
949 return (ENOENT);
950 }
951
952 #if 0
953 struct vop_lookup_args {
954 struct vop_generic_args a_gen;
955 struct vnode *a_dvp;
956 struct vnode **a_vpp;
957 struct componentname *a_cnp;
958 };
959 #endif
960
961 /*
962 * vnode lookup operation
963 */
964 static int
mqfs_lookup(struct vop_cachedlookup_args * ap)965 mqfs_lookup(struct vop_cachedlookup_args *ap)
966 {
967 int rc;
968
969 rc = mqfs_lookupx(ap);
970 return (rc);
971 }
972
973 #if 0
974 struct vop_create_args {
975 struct vnode *a_dvp;
976 struct vnode **a_vpp;
977 struct componentname *a_cnp;
978 struct vattr *a_vap;
979 };
980 #endif
981
982 /*
983 * vnode creation operation
984 */
985 static int
mqfs_create(struct vop_create_args * ap)986 mqfs_create(struct vop_create_args *ap)
987 {
988 struct mqfs_info *mqfs = VFSTOMQFS(ap->a_dvp->v_mount);
989 struct componentname *cnp = ap->a_cnp;
990 struct mqfs_node *pd;
991 struct mqfs_node *pn;
992 struct mqueue *mq;
993 int error;
994
995 pd = VTON(ap->a_dvp);
996 if (pd->mn_type != mqfstype_root && pd->mn_type != mqfstype_dir)
997 return (ENOTDIR);
998 mq = mqueue_alloc(NULL);
999 if (mq == NULL)
1000 return (EAGAIN);
1001 sx_xlock(&mqfs->mi_lock);
1002 pn = mqfs_create_file(pd, cnp->cn_nameptr, cnp->cn_namelen,
1003 cnp->cn_cred, ap->a_vap->va_mode);
1004 if (pn == NULL) {
1005 sx_xunlock(&mqfs->mi_lock);
1006 error = ENOSPC;
1007 } else {
1008 mqnode_addref(pn);
1009 sx_xunlock(&mqfs->mi_lock);
1010 error = mqfs_allocv(ap->a_dvp->v_mount, ap->a_vpp, pn);
1011 mqnode_release(pn);
1012 if (error)
1013 mqfs_destroy(pn);
1014 else
1015 pn->mn_data = mq;
1016 }
1017 if (error)
1018 mqueue_free(mq);
1019 return (error);
1020 }
1021
1022 /*
1023 * Remove an entry
1024 */
1025 static int
do_unlink(struct mqfs_node * pn,struct ucred * ucred)1026 do_unlink(struct mqfs_node *pn, struct ucred *ucred)
1027 {
1028 struct mqfs_node *parent;
1029 struct mqfs_vdata *vd;
1030 int error = 0;
1031
1032 sx_assert(&pn->mn_info->mi_lock, SX_LOCKED);
1033
1034 if (ucred->cr_uid != pn->mn_uid &&
1035 (error = priv_check_cred(ucred, PRIV_MQ_ADMIN)) != 0)
1036 error = EACCES;
1037 else if (!pn->mn_deleted) {
1038 parent = pn->mn_parent;
1039 pn->mn_parent = NULL;
1040 pn->mn_deleted = 1;
1041 LIST_REMOVE(pn, mn_sibling);
1042 LIST_FOREACH(vd, &pn->mn_vnodes, mv_link) {
1043 cache_purge(vd->mv_vnode);
1044 vhold(vd->mv_vnode);
1045 taskqueue_enqueue(taskqueue_thread, &vd->mv_task);
1046 }
1047 mqnode_release(pn);
1048 mqnode_release(parent);
1049 } else
1050 error = ENOENT;
1051 return (error);
1052 }
1053
1054 #if 0
1055 struct vop_remove_args {
1056 struct vnode *a_dvp;
1057 struct vnode *a_vp;
1058 struct componentname *a_cnp;
1059 };
1060 #endif
1061
1062 /*
1063 * vnode removal operation
1064 */
1065 static int
mqfs_remove(struct vop_remove_args * ap)1066 mqfs_remove(struct vop_remove_args *ap)
1067 {
1068 struct mqfs_info *mqfs = VFSTOMQFS(ap->a_dvp->v_mount);
1069 struct mqfs_node *pn;
1070 int error;
1071
1072 if (ap->a_vp->v_type == VDIR)
1073 return (EPERM);
1074 pn = VTON(ap->a_vp);
1075 sx_xlock(&mqfs->mi_lock);
1076 error = do_unlink(pn, ap->a_cnp->cn_cred);
1077 sx_xunlock(&mqfs->mi_lock);
1078 return (error);
1079 }
1080
1081 #if 0
1082 struct vop_inactive_args {
1083 struct vnode *a_vp;
1084 struct thread *a_td;
1085 };
1086 #endif
1087
1088 static int
mqfs_inactive(struct vop_inactive_args * ap)1089 mqfs_inactive(struct vop_inactive_args *ap)
1090 {
1091 struct mqfs_node *pn = VTON(ap->a_vp);
1092
1093 if (pn->mn_deleted)
1094 vrecycle(ap->a_vp);
1095 return (0);
1096 }
1097
1098 #if 0
1099 struct vop_reclaim_args {
1100 struct vop_generic_args a_gen;
1101 struct vnode *a_vp;
1102 };
1103 #endif
1104
1105 static int
mqfs_reclaim(struct vop_reclaim_args * ap)1106 mqfs_reclaim(struct vop_reclaim_args *ap)
1107 {
1108 struct mqfs_info *mqfs = VFSTOMQFS(ap->a_vp->v_mount);
1109 struct vnode *vp = ap->a_vp;
1110 struct mqfs_node *pn;
1111 struct mqfs_vdata *vd;
1112
1113 vd = vp->v_data;
1114 pn = vd->mv_node;
1115 sx_xlock(&mqfs->mi_lock);
1116 vp->v_data = NULL;
1117 LIST_REMOVE(vd, mv_link);
1118 mqnode_release(pn);
1119 sx_xunlock(&mqfs->mi_lock);
1120 uma_zfree(mvdata_zone, vd);
1121 return (0);
1122 }
1123
1124 #if 0
1125 struct vop_open_args {
1126 struct vop_generic_args a_gen;
1127 struct vnode *a_vp;
1128 int a_mode;
1129 struct ucred *a_cred;
1130 struct thread *a_td;
1131 struct file *a_fp;
1132 };
1133 #endif
1134
1135 static int
mqfs_open(struct vop_open_args * ap)1136 mqfs_open(struct vop_open_args *ap)
1137 {
1138 return (0);
1139 }
1140
1141 #if 0
1142 struct vop_close_args {
1143 struct vop_generic_args a_gen;
1144 struct vnode *a_vp;
1145 int a_fflag;
1146 struct ucred *a_cred;
1147 struct thread *a_td;
1148 };
1149 #endif
1150
1151 static int
mqfs_close(struct vop_close_args * ap)1152 mqfs_close(struct vop_close_args *ap)
1153 {
1154 return (0);
1155 }
1156
1157 #if 0
1158 struct vop_access_args {
1159 struct vop_generic_args a_gen;
1160 struct vnode *a_vp;
1161 accmode_t a_accmode;
1162 struct ucred *a_cred;
1163 struct thread *a_td;
1164 };
1165 #endif
1166
1167 /*
1168 * Verify permissions
1169 */
1170 static int
mqfs_access(struct vop_access_args * ap)1171 mqfs_access(struct vop_access_args *ap)
1172 {
1173 struct vnode *vp = ap->a_vp;
1174 struct vattr vattr;
1175 int error;
1176
1177 error = VOP_GETATTR(vp, &vattr, ap->a_cred);
1178 if (error)
1179 return (error);
1180 error = vaccess(vp->v_type, vattr.va_mode, vattr.va_uid, vattr.va_gid,
1181 ap->a_accmode, ap->a_cred);
1182 return (error);
1183 }
1184
1185 #if 0
1186 struct vop_getattr_args {
1187 struct vop_generic_args a_gen;
1188 struct vnode *a_vp;
1189 struct vattr *a_vap;
1190 struct ucred *a_cred;
1191 };
1192 #endif
1193
1194 /*
1195 * Get file attributes
1196 */
1197 static int
mqfs_getattr(struct vop_getattr_args * ap)1198 mqfs_getattr(struct vop_getattr_args *ap)
1199 {
1200 struct vnode *vp = ap->a_vp;
1201 struct mqfs_node *pn = VTON(vp);
1202 struct vattr *vap = ap->a_vap;
1203 int error = 0;
1204
1205 vap->va_type = vp->v_type;
1206 vap->va_mode = pn->mn_mode;
1207 vap->va_nlink = 1;
1208 vap->va_uid = pn->mn_uid;
1209 vap->va_gid = pn->mn_gid;
1210 vap->va_fsid = vp->v_mount->mnt_stat.f_fsid.val[0];
1211 vap->va_fileid = pn->mn_fileno;
1212 vap->va_size = 0;
1213 vap->va_blocksize = PAGE_SIZE;
1214 vap->va_bytes = vap->va_size = 0;
1215 vap->va_atime = pn->mn_atime;
1216 vap->va_mtime = pn->mn_mtime;
1217 vap->va_ctime = pn->mn_ctime;
1218 vap->va_birthtime = pn->mn_birth;
1219 vap->va_gen = 0;
1220 vap->va_flags = 0;
1221 vap->va_rdev = NODEV;
1222 vap->va_bytes = 0;
1223 vap->va_filerev = 0;
1224 return (error);
1225 }
1226
1227 #if 0
1228 struct vop_setattr_args {
1229 struct vop_generic_args a_gen;
1230 struct vnode *a_vp;
1231 struct vattr *a_vap;
1232 struct ucred *a_cred;
1233 };
1234 #endif
1235 /*
1236 * Set attributes
1237 */
1238 static int
mqfs_setattr(struct vop_setattr_args * ap)1239 mqfs_setattr(struct vop_setattr_args *ap)
1240 {
1241 struct mqfs_node *pn;
1242 struct vattr *vap;
1243 struct vnode *vp;
1244 struct thread *td;
1245 int c, error;
1246 uid_t uid;
1247 gid_t gid;
1248
1249 td = curthread;
1250 vap = ap->a_vap;
1251 vp = ap->a_vp;
1252 if (vap->va_type != VNON ||
1253 vap->va_nlink != VNOVAL ||
1254 vap->va_fsid != VNOVAL ||
1255 vap->va_fileid != VNOVAL ||
1256 vap->va_blocksize != VNOVAL ||
1257 (vap->va_flags != VNOVAL && vap->va_flags != 0) ||
1258 vap->va_rdev != VNOVAL ||
1259 (int)vap->va_bytes != VNOVAL ||
1260 vap->va_gen != VNOVAL) {
1261 return (EINVAL);
1262 }
1263
1264 pn = VTON(vp);
1265
1266 error = c = 0;
1267 if (vap->va_uid == (uid_t)VNOVAL)
1268 uid = pn->mn_uid;
1269 else
1270 uid = vap->va_uid;
1271 if (vap->va_gid == (gid_t)VNOVAL)
1272 gid = pn->mn_gid;
1273 else
1274 gid = vap->va_gid;
1275
1276 if (uid != pn->mn_uid || gid != pn->mn_gid) {
1277 /*
1278 * To modify the ownership of a file, must possess VADMIN
1279 * for that file.
1280 */
1281 if ((error = VOP_ACCESS(vp, VADMIN, ap->a_cred, td)))
1282 return (error);
1283
1284 /*
1285 * XXXRW: Why is there a privilege check here: shouldn't the
1286 * check in VOP_ACCESS() be enough? Also, are the group bits
1287 * below definitely right?
1288 */
1289 if ((ap->a_cred->cr_uid != pn->mn_uid || uid != pn->mn_uid ||
1290 (gid != pn->mn_gid && !groupmember(gid, ap->a_cred))) &&
1291 (error = priv_check(td, PRIV_MQ_ADMIN)) != 0)
1292 return (error);
1293 pn->mn_uid = uid;
1294 pn->mn_gid = gid;
1295 c = 1;
1296 }
1297
1298 if (vap->va_mode != (mode_t)VNOVAL) {
1299 if (ap->a_cred->cr_uid != pn->mn_uid &&
1300 (error = priv_check(td, PRIV_MQ_ADMIN)))
1301 return (error);
1302 pn->mn_mode = vap->va_mode;
1303 c = 1;
1304 }
1305
1306 if (vap->va_atime.tv_sec != VNOVAL || vap->va_mtime.tv_sec != VNOVAL) {
1307 /* See the comment in ufs_vnops::ufs_setattr(). */
1308 if ((error = VOP_ACCESS(vp, VADMIN, ap->a_cred, td)) &&
1309 ((vap->va_vaflags & VA_UTIMES_NULL) == 0 ||
1310 (error = VOP_ACCESS(vp, VWRITE, ap->a_cred, td))))
1311 return (error);
1312 if (vap->va_atime.tv_sec != VNOVAL) {
1313 pn->mn_atime = vap->va_atime;
1314 }
1315 if (vap->va_mtime.tv_sec != VNOVAL) {
1316 pn->mn_mtime = vap->va_mtime;
1317 }
1318 c = 1;
1319 }
1320 if (c) {
1321 vfs_timestamp(&pn->mn_ctime);
1322 }
1323 return (0);
1324 }
1325
1326 #if 0
1327 struct vop_read_args {
1328 struct vop_generic_args a_gen;
1329 struct vnode *a_vp;
1330 struct uio *a_uio;
1331 int a_ioflag;
1332 struct ucred *a_cred;
1333 };
1334 #endif
1335
1336 /*
1337 * Read from a file
1338 */
1339 static int
mqfs_read(struct vop_read_args * ap)1340 mqfs_read(struct vop_read_args *ap)
1341 {
1342 char buf[80];
1343 struct vnode *vp = ap->a_vp;
1344 struct uio *uio = ap->a_uio;
1345 struct mqueue *mq;
1346 int len, error;
1347
1348 if (vp->v_type != VREG)
1349 return (EINVAL);
1350
1351 mq = VTOMQ(vp);
1352 snprintf(buf, sizeof(buf),
1353 "QSIZE:%-10ld MAXMSG:%-10ld CURMSG:%-10ld MSGSIZE:%-10ld\n",
1354 mq->mq_totalbytes,
1355 mq->mq_maxmsg,
1356 mq->mq_curmsgs,
1357 mq->mq_msgsize);
1358 buf[sizeof(buf)-1] = '\0';
1359 len = strlen(buf);
1360 error = uiomove_frombuf(buf, len, uio);
1361 return (error);
1362 }
1363
1364 #if 0
1365 struct vop_readdir_args {
1366 struct vop_generic_args a_gen;
1367 struct vnode *a_vp;
1368 struct uio *a_uio;
1369 struct ucred *a_cred;
1370 int *a_eofflag;
1371 int *a_ncookies;
1372 uint64_t **a_cookies;
1373 };
1374 #endif
1375
1376 /*
1377 * Return directory entries.
1378 */
1379 static int
mqfs_readdir(struct vop_readdir_args * ap)1380 mqfs_readdir(struct vop_readdir_args *ap)
1381 {
1382 struct vnode *vp;
1383 struct mqfs_info *mi;
1384 struct mqfs_node *pd;
1385 struct mqfs_node *pn;
1386 struct dirent entry;
1387 struct uio *uio;
1388 const void *pr_root;
1389 int *tmp_ncookies = NULL;
1390 off_t offset;
1391 int error, i;
1392
1393 vp = ap->a_vp;
1394 mi = VFSTOMQFS(vp->v_mount);
1395 pd = VTON(vp);
1396 uio = ap->a_uio;
1397
1398 if (vp->v_type != VDIR)
1399 return (ENOTDIR);
1400
1401 if (uio->uio_offset < 0)
1402 return (EINVAL);
1403
1404 if (ap->a_ncookies != NULL) {
1405 tmp_ncookies = ap->a_ncookies;
1406 *ap->a_ncookies = 0;
1407 ap->a_ncookies = NULL;
1408 }
1409
1410 error = 0;
1411 offset = 0;
1412
1413 pr_root = ap->a_cred->cr_prison->pr_root;
1414 sx_xlock(&mi->mi_lock);
1415
1416 LIST_FOREACH(pn, &pd->mn_children, mn_sibling) {
1417 entry.d_reclen = sizeof(entry);
1418
1419 /*
1420 * Only show names within the same prison root directory
1421 * (or not associated with a prison, e.g. "." and "..").
1422 */
1423 if (pn->mn_pr_root != NULL && pn->mn_pr_root != pr_root)
1424 continue;
1425 if (!pn->mn_fileno)
1426 mqfs_fileno_alloc(mi, pn);
1427 entry.d_fileno = pn->mn_fileno;
1428 entry.d_off = offset + entry.d_reclen;
1429 for (i = 0; i < MQFS_NAMELEN - 1 && pn->mn_name[i] != '\0'; ++i)
1430 entry.d_name[i] = pn->mn_name[i];
1431 entry.d_namlen = i;
1432 switch (pn->mn_type) {
1433 case mqfstype_root:
1434 case mqfstype_dir:
1435 case mqfstype_this:
1436 case mqfstype_parent:
1437 entry.d_type = DT_DIR;
1438 break;
1439 case mqfstype_file:
1440 entry.d_type = DT_REG;
1441 break;
1442 case mqfstype_symlink:
1443 entry.d_type = DT_LNK;
1444 break;
1445 default:
1446 panic("%s has unexpected node type: %d", pn->mn_name,
1447 pn->mn_type);
1448 }
1449 dirent_terminate(&entry);
1450 if (entry.d_reclen > uio->uio_resid)
1451 break;
1452 if (offset >= uio->uio_offset) {
1453 error = vfs_read_dirent(ap, &entry, offset);
1454 if (error)
1455 break;
1456 }
1457 offset += entry.d_reclen;
1458 }
1459 sx_xunlock(&mi->mi_lock);
1460
1461 uio->uio_offset = offset;
1462
1463 if (tmp_ncookies != NULL)
1464 ap->a_ncookies = tmp_ncookies;
1465
1466 return (error);
1467 }
1468
1469 #ifdef notyet
1470
1471 #if 0
1472 struct vop_mkdir_args {
1473 struct vnode *a_dvp;
1474 struvt vnode **a_vpp;
1475 struvt componentname *a_cnp;
1476 struct vattr *a_vap;
1477 };
1478 #endif
1479
1480 /*
1481 * Create a directory.
1482 */
1483 static int
mqfs_mkdir(struct vop_mkdir_args * ap)1484 mqfs_mkdir(struct vop_mkdir_args *ap)
1485 {
1486 struct mqfs_info *mqfs = VFSTOMQFS(ap->a_dvp->v_mount);
1487 struct componentname *cnp = ap->a_cnp;
1488 struct mqfs_node *pd = VTON(ap->a_dvp);
1489 struct mqfs_node *pn;
1490 int error;
1491
1492 if (pd->mn_type != mqfstype_root && pd->mn_type != mqfstype_dir)
1493 return (ENOTDIR);
1494 sx_xlock(&mqfs->mi_lock);
1495 pn = mqfs_create_dir(pd, cnp->cn_nameptr, cnp->cn_namelen,
1496 ap->a_vap->cn_cred, ap->a_vap->va_mode);
1497 if (pn != NULL)
1498 mqnode_addref(pn);
1499 sx_xunlock(&mqfs->mi_lock);
1500 if (pn == NULL) {
1501 error = ENOSPC;
1502 } else {
1503 error = mqfs_allocv(ap->a_dvp->v_mount, ap->a_vpp, pn);
1504 mqnode_release(pn);
1505 }
1506 return (error);
1507 }
1508
1509 #if 0
1510 struct vop_rmdir_args {
1511 struct vnode *a_dvp;
1512 struct vnode *a_vp;
1513 struct componentname *a_cnp;
1514 };
1515 #endif
1516
1517 /*
1518 * Remove a directory.
1519 */
1520 static int
mqfs_rmdir(struct vop_rmdir_args * ap)1521 mqfs_rmdir(struct vop_rmdir_args *ap)
1522 {
1523 struct mqfs_info *mqfs = VFSTOMQFS(ap->a_dvp->v_mount);
1524 struct mqfs_node *pn = VTON(ap->a_vp);
1525 struct mqfs_node *pt;
1526
1527 if (pn->mn_type != mqfstype_dir)
1528 return (ENOTDIR);
1529
1530 sx_xlock(&mqfs->mi_lock);
1531 if (pn->mn_deleted) {
1532 sx_xunlock(&mqfs->mi_lock);
1533 return (ENOENT);
1534 }
1535
1536 pt = LIST_FIRST(&pn->mn_children);
1537 pt = LIST_NEXT(pt, mn_sibling);
1538 pt = LIST_NEXT(pt, mn_sibling);
1539 if (pt != NULL) {
1540 sx_xunlock(&mqfs->mi_lock);
1541 return (ENOTEMPTY);
1542 }
1543 pt = pn->mn_parent;
1544 pn->mn_parent = NULL;
1545 pn->mn_deleted = 1;
1546 LIST_REMOVE(pn, mn_sibling);
1547 mqnode_release(pn);
1548 mqnode_release(pt);
1549 sx_xunlock(&mqfs->mi_lock);
1550 cache_purge(ap->a_vp);
1551 return (0);
1552 }
1553
1554 #endif /* notyet */
1555
1556 /*
1557 * See if this prison root is obsolete, and clean up associated queues if it is.
1558 */
1559 static int
mqfs_prison_remove(void * obj,void * data __unused)1560 mqfs_prison_remove(void *obj, void *data __unused)
1561 {
1562 const struct prison *pr = obj;
1563 struct prison *tpr;
1564 struct mqfs_node *pn, *tpn;
1565 struct vnode *pr_root;
1566
1567 pr_root = pr->pr_root;
1568 if (pr->pr_parent->pr_root == pr_root)
1569 return (0);
1570 TAILQ_FOREACH(tpr, &allprison, pr_list) {
1571 if (tpr != pr && tpr->pr_root == pr_root)
1572 return (0);
1573 }
1574 /*
1575 * No jails are rooted in this directory anymore,
1576 * so no queues should be either.
1577 */
1578 sx_xlock(&mqfs_data.mi_lock);
1579 LIST_FOREACH_SAFE(pn, &mqfs_data.mi_root->mn_children,
1580 mn_sibling, tpn) {
1581 if (pn->mn_pr_root == pr_root)
1582 (void)do_unlink(pn, curthread->td_ucred);
1583 }
1584 sx_xunlock(&mqfs_data.mi_lock);
1585 return (0);
1586 }
1587
1588 /*
1589 * Allocate a message queue
1590 */
1591 static struct mqueue *
mqueue_alloc(const struct mq_attr * attr)1592 mqueue_alloc(const struct mq_attr *attr)
1593 {
1594 struct mqueue *mq;
1595
1596 if (curmq >= maxmq)
1597 return (NULL);
1598 mq = uma_zalloc(mqueue_zone, M_WAITOK | M_ZERO);
1599 TAILQ_INIT(&mq->mq_msgq);
1600 if (attr != NULL) {
1601 mq->mq_maxmsg = attr->mq_maxmsg;
1602 mq->mq_msgsize = attr->mq_msgsize;
1603 } else {
1604 mq->mq_maxmsg = default_maxmsg;
1605 mq->mq_msgsize = default_msgsize;
1606 }
1607 mtx_init(&mq->mq_mutex, "mqueue lock", NULL, MTX_DEF);
1608 knlist_init_mtx(&mq->mq_rsel.si_note, &mq->mq_mutex);
1609 knlist_init_mtx(&mq->mq_wsel.si_note, &mq->mq_mutex);
1610 atomic_add_int(&curmq, 1);
1611 return (mq);
1612 }
1613
1614 /*
1615 * Destroy a message queue
1616 */
1617 static void
mqueue_free(struct mqueue * mq)1618 mqueue_free(struct mqueue *mq)
1619 {
1620 struct mqueue_msg *msg;
1621
1622 while ((msg = TAILQ_FIRST(&mq->mq_msgq)) != NULL) {
1623 TAILQ_REMOVE(&mq->mq_msgq, msg, msg_link);
1624 free(msg, M_MQUEUEDATA);
1625 }
1626
1627 mtx_destroy(&mq->mq_mutex);
1628 seldrain(&mq->mq_rsel);
1629 seldrain(&mq->mq_wsel);
1630 knlist_destroy(&mq->mq_rsel.si_note);
1631 knlist_destroy(&mq->mq_wsel.si_note);
1632 uma_zfree(mqueue_zone, mq);
1633 atomic_add_int(&curmq, -1);
1634 }
1635
1636 /*
1637 * Load a message from user space
1638 */
1639 static struct mqueue_msg *
mqueue_loadmsg(const char * msg_ptr,size_t msg_size,int msg_prio)1640 mqueue_loadmsg(const char *msg_ptr, size_t msg_size, int msg_prio)
1641 {
1642 struct mqueue_msg *msg;
1643 size_t len;
1644 int error;
1645
1646 len = sizeof(struct mqueue_msg) + msg_size;
1647 msg = malloc(len, M_MQUEUEDATA, M_WAITOK);
1648 error = copyin(msg_ptr, ((char *)msg) + sizeof(struct mqueue_msg),
1649 msg_size);
1650 if (error) {
1651 free(msg, M_MQUEUEDATA);
1652 msg = NULL;
1653 } else {
1654 msg->msg_size = msg_size;
1655 msg->msg_prio = msg_prio;
1656 }
1657 return (msg);
1658 }
1659
1660 /*
1661 * Save a message to user space
1662 */
1663 static int
mqueue_savemsg(struct mqueue_msg * msg,char * msg_ptr,int * msg_prio)1664 mqueue_savemsg(struct mqueue_msg *msg, char *msg_ptr, int *msg_prio)
1665 {
1666 int error;
1667
1668 error = copyout(((char *)msg) + sizeof(*msg), msg_ptr,
1669 msg->msg_size);
1670 if (error == 0 && msg_prio != NULL)
1671 error = copyout(&msg->msg_prio, msg_prio, sizeof(int));
1672 return (error);
1673 }
1674
1675 /*
1676 * Free a message's memory
1677 */
1678 static __inline void
mqueue_freemsg(struct mqueue_msg * msg)1679 mqueue_freemsg(struct mqueue_msg *msg)
1680 {
1681 free(msg, M_MQUEUEDATA);
1682 }
1683
1684 /*
1685 * Send a message. if waitok is false, thread will not be
1686 * blocked if there is no data in queue, otherwise, absolute
1687 * time will be checked.
1688 */
1689 int
mqueue_send(struct mqueue * mq,const char * msg_ptr,size_t msg_len,unsigned msg_prio,int waitok,const struct timespec * abs_timeout)1690 mqueue_send(struct mqueue *mq, const char *msg_ptr,
1691 size_t msg_len, unsigned msg_prio, int waitok,
1692 const struct timespec *abs_timeout)
1693 {
1694 struct mqueue_msg *msg;
1695 struct timespec ts, ts2;
1696 struct timeval tv;
1697 int error;
1698
1699 if (msg_prio >= MQ_PRIO_MAX)
1700 return (EINVAL);
1701 if (msg_len > mq->mq_msgsize)
1702 return (EMSGSIZE);
1703 msg = mqueue_loadmsg(msg_ptr, msg_len, msg_prio);
1704 if (msg == NULL)
1705 return (EFAULT);
1706
1707 /* O_NONBLOCK case */
1708 if (!waitok) {
1709 error = _mqueue_send(mq, msg, -1);
1710 if (error)
1711 goto bad;
1712 return (0);
1713 }
1714
1715 /* we allow a null timeout (wait forever) */
1716 if (abs_timeout == NULL) {
1717 error = _mqueue_send(mq, msg, 0);
1718 if (error)
1719 goto bad;
1720 return (0);
1721 }
1722
1723 /* send it before checking time */
1724 error = _mqueue_send(mq, msg, -1);
1725 if (error == 0)
1726 return (0);
1727
1728 if (error != EAGAIN)
1729 goto bad;
1730
1731 if (abs_timeout->tv_nsec >= 1000000000 || abs_timeout->tv_nsec < 0) {
1732 error = EINVAL;
1733 goto bad;
1734 }
1735 for (;;) {
1736 getnanotime(&ts);
1737 timespecsub(abs_timeout, &ts, &ts2);
1738 if (ts2.tv_sec < 0 || (ts2.tv_sec == 0 && ts2.tv_nsec <= 0)) {
1739 error = ETIMEDOUT;
1740 break;
1741 }
1742 TIMESPEC_TO_TIMEVAL(&tv, &ts2);
1743 error = _mqueue_send(mq, msg, tvtohz(&tv));
1744 if (error != ETIMEDOUT)
1745 break;
1746 }
1747 if (error == 0)
1748 return (0);
1749 bad:
1750 mqueue_freemsg(msg);
1751 return (error);
1752 }
1753
1754 /*
1755 * Common routine to send a message
1756 */
1757 static int
_mqueue_send(struct mqueue * mq,struct mqueue_msg * msg,int timo)1758 _mqueue_send(struct mqueue *mq, struct mqueue_msg *msg, int timo)
1759 {
1760 struct mqueue_msg *msg2;
1761 int error = 0;
1762
1763 mtx_lock(&mq->mq_mutex);
1764 while (mq->mq_curmsgs >= mq->mq_maxmsg && error == 0) {
1765 if (timo < 0) {
1766 mtx_unlock(&mq->mq_mutex);
1767 return (EAGAIN);
1768 }
1769 mq->mq_senders++;
1770 error = msleep(&mq->mq_senders, &mq->mq_mutex,
1771 PCATCH, "mqsend", timo);
1772 mq->mq_senders--;
1773 if (error == EAGAIN)
1774 error = ETIMEDOUT;
1775 }
1776 if (mq->mq_curmsgs >= mq->mq_maxmsg) {
1777 mtx_unlock(&mq->mq_mutex);
1778 return (error);
1779 }
1780 error = 0;
1781 if (TAILQ_EMPTY(&mq->mq_msgq)) {
1782 TAILQ_INSERT_HEAD(&mq->mq_msgq, msg, msg_link);
1783 } else {
1784 if (msg->msg_prio <= TAILQ_LAST(&mq->mq_msgq, msgq)->msg_prio) {
1785 TAILQ_INSERT_TAIL(&mq->mq_msgq, msg, msg_link);
1786 } else {
1787 TAILQ_FOREACH(msg2, &mq->mq_msgq, msg_link) {
1788 if (msg2->msg_prio < msg->msg_prio)
1789 break;
1790 }
1791 TAILQ_INSERT_BEFORE(msg2, msg, msg_link);
1792 }
1793 }
1794 mq->mq_curmsgs++;
1795 mq->mq_totalbytes += msg->msg_size;
1796 if (mq->mq_receivers)
1797 wakeup_one(&mq->mq_receivers);
1798 else if (mq->mq_notifier != NULL)
1799 mqueue_send_notification(mq);
1800 if (mq->mq_flags & MQ_RSEL) {
1801 mq->mq_flags &= ~MQ_RSEL;
1802 selwakeup(&mq->mq_rsel);
1803 }
1804 KNOTE_LOCKED(&mq->mq_rsel.si_note, 0);
1805 mtx_unlock(&mq->mq_mutex);
1806 return (0);
1807 }
1808
1809 /*
1810 * Send realtime a signal to process which registered itself
1811 * successfully by mq_notify.
1812 */
1813 static void
mqueue_send_notification(struct mqueue * mq)1814 mqueue_send_notification(struct mqueue *mq)
1815 {
1816 struct mqueue_notifier *nt;
1817 struct thread *td;
1818 struct proc *p;
1819 int error;
1820
1821 mtx_assert(&mq->mq_mutex, MA_OWNED);
1822 nt = mq->mq_notifier;
1823 if (nt->nt_sigev.sigev_notify != SIGEV_NONE) {
1824 p = nt->nt_proc;
1825 error = sigev_findtd(p, &nt->nt_sigev, &td);
1826 if (error) {
1827 mq->mq_notifier = NULL;
1828 return;
1829 }
1830 if (!KSI_ONQ(&nt->nt_ksi)) {
1831 ksiginfo_set_sigev(&nt->nt_ksi, &nt->nt_sigev);
1832 tdsendsignal(p, td, nt->nt_ksi.ksi_signo, &nt->nt_ksi);
1833 }
1834 PROC_UNLOCK(p);
1835 }
1836 mq->mq_notifier = NULL;
1837 }
1838
1839 /*
1840 * Get a message. if waitok is false, thread will not be
1841 * blocked if there is no data in queue, otherwise, absolute
1842 * time will be checked.
1843 */
1844 int
mqueue_receive(struct mqueue * mq,char * msg_ptr,size_t msg_len,unsigned * msg_prio,int waitok,const struct timespec * abs_timeout)1845 mqueue_receive(struct mqueue *mq, char *msg_ptr,
1846 size_t msg_len, unsigned *msg_prio, int waitok,
1847 const struct timespec *abs_timeout)
1848 {
1849 struct mqueue_msg *msg;
1850 struct timespec ts, ts2;
1851 struct timeval tv;
1852 int error;
1853
1854 if (msg_len < mq->mq_msgsize)
1855 return (EMSGSIZE);
1856
1857 /* O_NONBLOCK case */
1858 if (!waitok) {
1859 error = _mqueue_recv(mq, &msg, -1);
1860 if (error)
1861 return (error);
1862 goto received;
1863 }
1864
1865 /* we allow a null timeout (wait forever). */
1866 if (abs_timeout == NULL) {
1867 error = _mqueue_recv(mq, &msg, 0);
1868 if (error)
1869 return (error);
1870 goto received;
1871 }
1872
1873 /* try to get a message before checking time */
1874 error = _mqueue_recv(mq, &msg, -1);
1875 if (error == 0)
1876 goto received;
1877
1878 if (error != EAGAIN)
1879 return (error);
1880
1881 if (abs_timeout->tv_nsec >= 1000000000 || abs_timeout->tv_nsec < 0) {
1882 error = EINVAL;
1883 return (error);
1884 }
1885
1886 for (;;) {
1887 getnanotime(&ts);
1888 timespecsub(abs_timeout, &ts, &ts2);
1889 if (ts2.tv_sec < 0 || (ts2.tv_sec == 0 && ts2.tv_nsec <= 0)) {
1890 error = ETIMEDOUT;
1891 return (error);
1892 }
1893 TIMESPEC_TO_TIMEVAL(&tv, &ts2);
1894 error = _mqueue_recv(mq, &msg, tvtohz(&tv));
1895 if (error == 0)
1896 break;
1897 if (error != ETIMEDOUT)
1898 return (error);
1899 }
1900
1901 received:
1902 error = mqueue_savemsg(msg, msg_ptr, msg_prio);
1903 if (error == 0) {
1904 curthread->td_retval[0] = msg->msg_size;
1905 curthread->td_retval[1] = 0;
1906 }
1907 mqueue_freemsg(msg);
1908 return (error);
1909 }
1910
1911 /*
1912 * Common routine to receive a message
1913 */
1914 static int
_mqueue_recv(struct mqueue * mq,struct mqueue_msg ** msg,int timo)1915 _mqueue_recv(struct mqueue *mq, struct mqueue_msg **msg, int timo)
1916 {
1917 int error = 0;
1918
1919 mtx_lock(&mq->mq_mutex);
1920 while ((*msg = TAILQ_FIRST(&mq->mq_msgq)) == NULL && error == 0) {
1921 if (timo < 0) {
1922 mtx_unlock(&mq->mq_mutex);
1923 return (EAGAIN);
1924 }
1925 mq->mq_receivers++;
1926 error = msleep(&mq->mq_receivers, &mq->mq_mutex,
1927 PCATCH, "mqrecv", timo);
1928 mq->mq_receivers--;
1929 if (error == EAGAIN)
1930 error = ETIMEDOUT;
1931 }
1932 if (*msg != NULL) {
1933 error = 0;
1934 TAILQ_REMOVE(&mq->mq_msgq, *msg, msg_link);
1935 mq->mq_curmsgs--;
1936 mq->mq_totalbytes -= (*msg)->msg_size;
1937 if (mq->mq_senders)
1938 wakeup_one(&mq->mq_senders);
1939 if (mq->mq_flags & MQ_WSEL) {
1940 mq->mq_flags &= ~MQ_WSEL;
1941 selwakeup(&mq->mq_wsel);
1942 }
1943 KNOTE_LOCKED(&mq->mq_wsel.si_note, 0);
1944 }
1945 if (mq->mq_notifier != NULL && mq->mq_receivers == 0 &&
1946 !TAILQ_EMPTY(&mq->mq_msgq)) {
1947 mqueue_send_notification(mq);
1948 }
1949 mtx_unlock(&mq->mq_mutex);
1950 return (error);
1951 }
1952
1953 static __inline struct mqueue_notifier *
notifier_alloc(void)1954 notifier_alloc(void)
1955 {
1956 return (uma_zalloc(mqnoti_zone, M_WAITOK | M_ZERO));
1957 }
1958
1959 static __inline void
notifier_free(struct mqueue_notifier * p)1960 notifier_free(struct mqueue_notifier *p)
1961 {
1962 uma_zfree(mqnoti_zone, p);
1963 }
1964
1965 static struct mqueue_notifier *
notifier_search(struct proc * p,int fd)1966 notifier_search(struct proc *p, int fd)
1967 {
1968 struct mqueue_notifier *nt;
1969
1970 LIST_FOREACH(nt, &p->p_mqnotifier, nt_link) {
1971 if (nt->nt_ksi.ksi_mqd == fd)
1972 break;
1973 }
1974 return (nt);
1975 }
1976
1977 static __inline void
notifier_insert(struct proc * p,struct mqueue_notifier * nt)1978 notifier_insert(struct proc *p, struct mqueue_notifier *nt)
1979 {
1980 LIST_INSERT_HEAD(&p->p_mqnotifier, nt, nt_link);
1981 }
1982
1983 static __inline void
notifier_delete(struct proc * p,struct mqueue_notifier * nt)1984 notifier_delete(struct proc *p, struct mqueue_notifier *nt)
1985 {
1986 LIST_REMOVE(nt, nt_link);
1987 notifier_free(nt);
1988 }
1989
1990 static void
notifier_remove(struct proc * p,struct mqueue * mq,int fd)1991 notifier_remove(struct proc *p, struct mqueue *mq, int fd)
1992 {
1993 struct mqueue_notifier *nt;
1994
1995 mtx_assert(&mq->mq_mutex, MA_OWNED);
1996 PROC_LOCK(p);
1997 nt = notifier_search(p, fd);
1998 if (nt != NULL) {
1999 if (mq->mq_notifier == nt)
2000 mq->mq_notifier = NULL;
2001 sigqueue_take(&nt->nt_ksi);
2002 notifier_delete(p, nt);
2003 }
2004 PROC_UNLOCK(p);
2005 }
2006
2007 int
kern_kmq_open(struct thread * td,const char * upath,int flags,mode_t mode,const struct mq_attr * attr)2008 kern_kmq_open(struct thread *td, const char *upath, int flags, mode_t mode,
2009 const struct mq_attr *attr)
2010 {
2011 char *path, pathbuf[MQFS_NAMELEN + 1];
2012 struct mqfs_node *pn;
2013 struct pwddesc *pdp;
2014 struct file *fp;
2015 struct mqueue *mq;
2016 int fd, error, len, cmode;
2017
2018 AUDIT_ARG_FFLAGS(flags);
2019 AUDIT_ARG_MODE(mode);
2020
2021 pdp = td->td_proc->p_pd;
2022 cmode = ((mode & ~pdp->pd_cmask) & ALLPERMS) & ~S_ISTXT;
2023 mq = NULL;
2024 if ((flags & O_CREAT) != 0 && attr != NULL) {
2025 if (attr->mq_maxmsg <= 0 || attr->mq_maxmsg > maxmsg)
2026 return (EINVAL);
2027 if (attr->mq_msgsize <= 0 || attr->mq_msgsize > maxmsgsize)
2028 return (EINVAL);
2029 }
2030
2031 path = pathbuf;
2032 error = copyinstr(upath, path, MQFS_NAMELEN + 1, NULL);
2033 if (error)
2034 return (error);
2035
2036 /*
2037 * The first character of name may be a slash (/) character
2038 * and the remaining characters of name cannot include any slash
2039 * characters.
2040 */
2041 len = strlen(path);
2042 if (len < 2 || strchr(path + 1, '/') != NULL)
2043 return (EINVAL);
2044 if (path[0] == '/') {
2045 path++;
2046 len--;
2047 }
2048 /*
2049 * "." and ".." are magic directories, populated on the fly, and cannot
2050 * be opened as queues.
2051 */
2052 if (strcmp(path, ".") == 0 || strcmp(path, "..") == 0)
2053 return (EINVAL);
2054 AUDIT_ARG_UPATH1_CANON(pathbuf);
2055
2056 error = falloc(td, &fp, &fd, O_CLOEXEC);
2057 if (error)
2058 return (error);
2059
2060 sx_xlock(&mqfs_data.mi_lock);
2061 pn = mqfs_search(mqfs_data.mi_root, path, len, td->td_ucred);
2062 if (pn == NULL) {
2063 if (!(flags & O_CREAT)) {
2064 error = ENOENT;
2065 } else {
2066 mq = mqueue_alloc(attr);
2067 if (mq == NULL) {
2068 error = ENFILE;
2069 } else {
2070 pn = mqfs_create_file(mqfs_data.mi_root,
2071 path, len, td->td_ucred,
2072 cmode);
2073 if (pn == NULL) {
2074 error = ENOSPC;
2075 mqueue_free(mq);
2076 }
2077 }
2078 }
2079
2080 if (error == 0) {
2081 pn->mn_data = mq;
2082 }
2083 } else {
2084 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL)) {
2085 error = EEXIST;
2086 } else {
2087 accmode_t accmode = 0;
2088
2089 if (flags & FREAD)
2090 accmode |= VREAD;
2091 if (flags & FWRITE)
2092 accmode |= VWRITE;
2093 error = vaccess(VREG, pn->mn_mode, pn->mn_uid,
2094 pn->mn_gid, accmode, td->td_ucred);
2095 }
2096 }
2097
2098 if (error) {
2099 sx_xunlock(&mqfs_data.mi_lock);
2100 fdclose(td, fp, fd);
2101 fdrop(fp, td);
2102 return (error);
2103 }
2104
2105 mqnode_addref(pn);
2106 sx_xunlock(&mqfs_data.mi_lock);
2107
2108 finit(fp, flags & (FREAD | FWRITE | O_NONBLOCK), DTYPE_MQUEUE, pn,
2109 &mqueueops);
2110
2111 td->td_retval[0] = fd;
2112 fdrop(fp, td);
2113 return (0);
2114 }
2115
2116 /*
2117 * Syscall to open a message queue.
2118 */
2119 int
sys_kmq_open(struct thread * td,struct kmq_open_args * uap)2120 sys_kmq_open(struct thread *td, struct kmq_open_args *uap)
2121 {
2122 struct mq_attr attr;
2123 int flags, error;
2124
2125 if ((uap->flags & O_ACCMODE) == O_ACCMODE || uap->flags & O_EXEC)
2126 return (EINVAL);
2127 flags = FFLAGS(uap->flags);
2128 if ((flags & O_CREAT) != 0 && uap->attr != NULL) {
2129 error = copyin(uap->attr, &attr, sizeof(attr));
2130 if (error)
2131 return (error);
2132 }
2133 return (kern_kmq_open(td, uap->path, flags, uap->mode,
2134 uap->attr != NULL ? &attr : NULL));
2135 }
2136
2137 /*
2138 * Syscall to unlink a message queue.
2139 */
2140 int
sys_kmq_unlink(struct thread * td,struct kmq_unlink_args * uap)2141 sys_kmq_unlink(struct thread *td, struct kmq_unlink_args *uap)
2142 {
2143 char *path, pathbuf[MQFS_NAMELEN + 1];
2144 struct mqfs_node *pn;
2145 int error, len;
2146
2147 path = pathbuf;
2148 error = copyinstr(uap->path, path, MQFS_NAMELEN + 1, NULL);
2149 if (error)
2150 return (error);
2151
2152 len = strlen(path);
2153 if (len < 2 || strchr(path + 1, '/') != NULL)
2154 return (EINVAL);
2155 if (path[0] == '/') {
2156 path++;
2157 len--;
2158 }
2159 if (strcmp(path, ".") == 0 || strcmp(path, "..") == 0)
2160 return (EINVAL);
2161 AUDIT_ARG_UPATH1_CANON(pathbuf);
2162
2163 sx_xlock(&mqfs_data.mi_lock);
2164 pn = mqfs_search(mqfs_data.mi_root, path, len, td->td_ucred);
2165 if (pn != NULL)
2166 error = do_unlink(pn, td->td_ucred);
2167 else
2168 error = ENOENT;
2169 sx_xunlock(&mqfs_data.mi_lock);
2170 return (error);
2171 }
2172
2173 typedef int (*_fgetf)(struct thread *, int, const cap_rights_t *,
2174 struct file **);
2175
2176 /*
2177 * Get message queue by giving file slot
2178 */
2179 static int
_getmq(struct thread * td,int fd,const cap_rights_t * rightsp,_fgetf func,struct file ** fpp,struct mqfs_node ** ppn,struct mqueue ** pmq)2180 _getmq(struct thread *td, int fd, const cap_rights_t *rightsp, _fgetf func,
2181 struct file **fpp, struct mqfs_node **ppn, struct mqueue **pmq)
2182 {
2183 struct mqfs_node *pn;
2184 int error;
2185
2186 error = func(td, fd, rightsp, fpp);
2187 if (error)
2188 return (error);
2189 if (&mqueueops != (*fpp)->f_ops) {
2190 fdrop(*fpp, td);
2191 return (EBADF);
2192 }
2193 pn = (*fpp)->f_data;
2194 if (ppn)
2195 *ppn = pn;
2196 if (pmq)
2197 *pmq = pn->mn_data;
2198 return (0);
2199 }
2200
2201 static __inline int
getmq(struct thread * td,int fd,struct file ** fpp,struct mqfs_node ** ppn,struct mqueue ** pmq)2202 getmq(struct thread *td, int fd, struct file **fpp, struct mqfs_node **ppn,
2203 struct mqueue **pmq)
2204 {
2205
2206 return _getmq(td, fd, &cap_event_rights, fget,
2207 fpp, ppn, pmq);
2208 }
2209
2210 static __inline int
getmq_read(struct thread * td,int fd,struct file ** fpp,struct mqfs_node ** ppn,struct mqueue ** pmq)2211 getmq_read(struct thread *td, int fd, struct file **fpp,
2212 struct mqfs_node **ppn, struct mqueue **pmq)
2213 {
2214
2215 return _getmq(td, fd, &cap_read_rights, fget_read,
2216 fpp, ppn, pmq);
2217 }
2218
2219 static __inline int
getmq_write(struct thread * td,int fd,struct file ** fpp,struct mqfs_node ** ppn,struct mqueue ** pmq)2220 getmq_write(struct thread *td, int fd, struct file **fpp,
2221 struct mqfs_node **ppn, struct mqueue **pmq)
2222 {
2223
2224 return _getmq(td, fd, &cap_write_rights, fget_write,
2225 fpp, ppn, pmq);
2226 }
2227
2228 int
kern_kmq_setattr(struct thread * td,int mqd,const struct mq_attr * attr,struct mq_attr * oattr)2229 kern_kmq_setattr(struct thread *td, int mqd, const struct mq_attr *attr,
2230 struct mq_attr *oattr)
2231 {
2232 struct mqueue *mq;
2233 struct file *fp;
2234 u_int oflag, flag;
2235 int error;
2236
2237 AUDIT_ARG_FD(mqd);
2238 if (attr != NULL && (attr->mq_flags & ~O_NONBLOCK) != 0)
2239 return (EINVAL);
2240 error = getmq(td, mqd, &fp, NULL, &mq);
2241 if (error)
2242 return (error);
2243 oattr->mq_maxmsg = mq->mq_maxmsg;
2244 oattr->mq_msgsize = mq->mq_msgsize;
2245 oattr->mq_curmsgs = mq->mq_curmsgs;
2246 if (attr != NULL) {
2247 do {
2248 oflag = flag = fp->f_flag;
2249 flag &= ~O_NONBLOCK;
2250 flag |= (attr->mq_flags & O_NONBLOCK);
2251 } while (atomic_cmpset_int(&fp->f_flag, oflag, flag) == 0);
2252 } else
2253 oflag = fp->f_flag;
2254 oattr->mq_flags = (O_NONBLOCK & oflag);
2255 fdrop(fp, td);
2256 return (error);
2257 }
2258
2259 int
sys_kmq_setattr(struct thread * td,struct kmq_setattr_args * uap)2260 sys_kmq_setattr(struct thread *td, struct kmq_setattr_args *uap)
2261 {
2262 struct mq_attr attr, oattr;
2263 int error;
2264
2265 if (uap->attr != NULL) {
2266 error = copyin(uap->attr, &attr, sizeof(attr));
2267 if (error != 0)
2268 return (error);
2269 }
2270 error = kern_kmq_setattr(td, uap->mqd, uap->attr != NULL ? &attr : NULL,
2271 &oattr);
2272 if (error == 0 && uap->oattr != NULL) {
2273 bzero(oattr.__reserved, sizeof(oattr.__reserved));
2274 error = copyout(&oattr, uap->oattr, sizeof(oattr));
2275 }
2276 return (error);
2277 }
2278
2279 int
kern_kmq_timedreceive(struct thread * td,int mqd,char * msg_ptr,size_t msg_len,unsigned int * msg_prio,const struct timespec * abs_timeout)2280 kern_kmq_timedreceive(struct thread *td, int mqd, char *msg_ptr,
2281 size_t msg_len, unsigned int *msg_prio, const struct timespec *abs_timeout)
2282 {
2283 struct mqueue *mq;
2284 struct file *fp;
2285 int error, waitok;
2286
2287 AUDIT_ARG_FD(mqd);
2288 error = getmq_read(td, mqd, &fp, NULL, &mq);
2289 if (error != 0)
2290 return (error);
2291 waitok = (fp->f_flag & O_NONBLOCK) == 0;
2292 error = mqueue_receive(mq, msg_ptr, msg_len, msg_prio, waitok,
2293 abs_timeout);
2294 fdrop(fp, td);
2295 return (error);
2296 }
2297
2298 int
sys_kmq_timedreceive(struct thread * td,struct kmq_timedreceive_args * uap)2299 sys_kmq_timedreceive(struct thread *td, struct kmq_timedreceive_args *uap)
2300 {
2301 struct timespec *abs_timeout, ets;
2302 int error;
2303
2304 if (uap->abs_timeout != NULL) {
2305 error = copyin(uap->abs_timeout, &ets, sizeof(ets));
2306 if (error != 0)
2307 return (error);
2308 abs_timeout = &ets;
2309 } else
2310 abs_timeout = NULL;
2311
2312 return (kern_kmq_timedreceive(td, uap->mqd, uap->msg_ptr, uap->msg_len,
2313 uap->msg_prio, abs_timeout));
2314 }
2315
2316 int
kern_kmq_timedsend(struct thread * td,int mqd,const char * msg_ptr,size_t msg_len,unsigned int msg_prio,const struct timespec * abs_timeout)2317 kern_kmq_timedsend(struct thread *td, int mqd, const char *msg_ptr,
2318 size_t msg_len, unsigned int msg_prio, const struct timespec *abs_timeout)
2319 {
2320 struct mqueue *mq;
2321 struct file *fp;
2322 int error, waitok;
2323
2324 AUDIT_ARG_FD(mqd);
2325 error = getmq_write(td, mqd, &fp, NULL, &mq);
2326 if (error != 0)
2327 return (error);
2328 waitok = (fp->f_flag & O_NONBLOCK) == 0;
2329 error = mqueue_send(mq, msg_ptr, msg_len, msg_prio, waitok,
2330 abs_timeout);
2331 fdrop(fp, td);
2332 return (error);
2333 }
2334
2335 int
sys_kmq_timedsend(struct thread * td,struct kmq_timedsend_args * uap)2336 sys_kmq_timedsend(struct thread *td, struct kmq_timedsend_args *uap)
2337 {
2338 struct timespec *abs_timeout, ets;
2339 int error;
2340
2341 if (uap->abs_timeout != NULL) {
2342 error = copyin(uap->abs_timeout, &ets, sizeof(ets));
2343 if (error != 0)
2344 return (error);
2345 abs_timeout = &ets;
2346 } else
2347 abs_timeout = NULL;
2348
2349 return (kern_kmq_timedsend(td, uap->mqd, uap->msg_ptr, uap->msg_len,
2350 uap->msg_prio, abs_timeout));
2351 }
2352
2353 int
kern_kmq_notify(struct thread * td,int mqd,struct sigevent * sigev)2354 kern_kmq_notify(struct thread *td, int mqd, struct sigevent *sigev)
2355 {
2356 struct filedesc *fdp;
2357 struct proc *p;
2358 struct mqueue *mq;
2359 struct file *fp, *fp2;
2360 struct mqueue_notifier *nt, *newnt = NULL;
2361 int error;
2362
2363 AUDIT_ARG_FD(mqd);
2364 if (sigev != NULL) {
2365 if (sigev->sigev_notify != SIGEV_SIGNAL &&
2366 sigev->sigev_notify != SIGEV_THREAD_ID &&
2367 sigev->sigev_notify != SIGEV_NONE)
2368 return (EINVAL);
2369 if ((sigev->sigev_notify == SIGEV_SIGNAL ||
2370 sigev->sigev_notify == SIGEV_THREAD_ID) &&
2371 !_SIG_VALID(sigev->sigev_signo))
2372 return (EINVAL);
2373 }
2374 p = td->td_proc;
2375 fdp = td->td_proc->p_fd;
2376 error = getmq(td, mqd, &fp, NULL, &mq);
2377 if (error)
2378 return (error);
2379 again:
2380 FILEDESC_SLOCK(fdp);
2381 fp2 = fget_noref(fdp, mqd);
2382 if (fp2 == NULL) {
2383 FILEDESC_SUNLOCK(fdp);
2384 error = EBADF;
2385 goto out;
2386 }
2387 #ifdef CAPABILITIES
2388 error = cap_check(cap_rights(fdp, mqd), &cap_event_rights);
2389 if (error) {
2390 FILEDESC_SUNLOCK(fdp);
2391 goto out;
2392 }
2393 #endif
2394 if (fp2 != fp) {
2395 FILEDESC_SUNLOCK(fdp);
2396 error = EBADF;
2397 goto out;
2398 }
2399 mtx_lock(&mq->mq_mutex);
2400 FILEDESC_SUNLOCK(fdp);
2401 if (sigev != NULL) {
2402 if (mq->mq_notifier != NULL) {
2403 error = EBUSY;
2404 } else {
2405 PROC_LOCK(p);
2406 nt = notifier_search(p, mqd);
2407 if (nt == NULL) {
2408 if (newnt == NULL) {
2409 PROC_UNLOCK(p);
2410 mtx_unlock(&mq->mq_mutex);
2411 newnt = notifier_alloc();
2412 goto again;
2413 }
2414 }
2415
2416 if (nt != NULL) {
2417 sigqueue_take(&nt->nt_ksi);
2418 if (newnt != NULL) {
2419 notifier_free(newnt);
2420 newnt = NULL;
2421 }
2422 } else {
2423 nt = newnt;
2424 newnt = NULL;
2425 ksiginfo_init(&nt->nt_ksi);
2426 nt->nt_ksi.ksi_flags |= KSI_INS | KSI_EXT;
2427 nt->nt_ksi.ksi_code = SI_MESGQ;
2428 nt->nt_proc = p;
2429 nt->nt_ksi.ksi_mqd = mqd;
2430 notifier_insert(p, nt);
2431 }
2432 nt->nt_sigev = *sigev;
2433 mq->mq_notifier = nt;
2434 PROC_UNLOCK(p);
2435 /*
2436 * if there is no receivers and message queue
2437 * is not empty, we should send notification
2438 * as soon as possible.
2439 */
2440 if (mq->mq_receivers == 0 &&
2441 !TAILQ_EMPTY(&mq->mq_msgq))
2442 mqueue_send_notification(mq);
2443 }
2444 } else {
2445 notifier_remove(p, mq, mqd);
2446 }
2447 mtx_unlock(&mq->mq_mutex);
2448
2449 out:
2450 fdrop(fp, td);
2451 if (newnt != NULL)
2452 notifier_free(newnt);
2453 return (error);
2454 }
2455
2456 int
sys_kmq_notify(struct thread * td,struct kmq_notify_args * uap)2457 sys_kmq_notify(struct thread *td, struct kmq_notify_args *uap)
2458 {
2459 struct sigevent ev, *evp;
2460 int error;
2461
2462 if (uap->sigev == NULL) {
2463 evp = NULL;
2464 } else {
2465 error = copyin(uap->sigev, &ev, sizeof(ev));
2466 if (error != 0)
2467 return (error);
2468 evp = &ev;
2469 }
2470 return (kern_kmq_notify(td, uap->mqd, evp));
2471 }
2472
2473 static void
mq_proc_exit(void * arg __unused,struct proc * p)2474 mq_proc_exit(void *arg __unused, struct proc *p)
2475 {
2476 struct filedesc *fdp;
2477 struct file *fp;
2478 struct mqueue *mq;
2479 int i;
2480
2481 fdp = p->p_fd;
2482 FILEDESC_SLOCK(fdp);
2483 for (i = 0; i < fdp->fd_nfiles; ++i) {
2484 fp = fget_noref(fdp, i);
2485 if (fp != NULL && fp->f_ops == &mqueueops) {
2486 mq = FPTOMQ(fp);
2487 mtx_lock(&mq->mq_mutex);
2488 notifier_remove(p, FPTOMQ(fp), i);
2489 mtx_unlock(&mq->mq_mutex);
2490 }
2491 }
2492 FILEDESC_SUNLOCK(fdp);
2493 KASSERT(LIST_EMPTY(&p->p_mqnotifier), ("mq notifiers left"));
2494 }
2495
2496 static int
mqf_poll(struct file * fp,int events,struct ucred * active_cred,struct thread * td)2497 mqf_poll(struct file *fp, int events, struct ucred *active_cred,
2498 struct thread *td)
2499 {
2500 struct mqueue *mq = FPTOMQ(fp);
2501 int revents = 0;
2502
2503 mtx_lock(&mq->mq_mutex);
2504 if (events & (POLLIN | POLLRDNORM)) {
2505 if (mq->mq_curmsgs) {
2506 revents |= events & (POLLIN | POLLRDNORM);
2507 } else {
2508 mq->mq_flags |= MQ_RSEL;
2509 selrecord(td, &mq->mq_rsel);
2510 }
2511 }
2512 if (events & POLLOUT) {
2513 if (mq->mq_curmsgs < mq->mq_maxmsg)
2514 revents |= POLLOUT;
2515 else {
2516 mq->mq_flags |= MQ_WSEL;
2517 selrecord(td, &mq->mq_wsel);
2518 }
2519 }
2520 mtx_unlock(&mq->mq_mutex);
2521 return (revents);
2522 }
2523
2524 static int
mqf_close(struct file * fp,struct thread * td)2525 mqf_close(struct file *fp, struct thread *td)
2526 {
2527 struct mqfs_node *pn;
2528
2529 fp->f_ops = &badfileops;
2530 pn = fp->f_data;
2531 fp->f_data = NULL;
2532 sx_xlock(&mqfs_data.mi_lock);
2533 mqnode_release(pn);
2534 sx_xunlock(&mqfs_data.mi_lock);
2535 return (0);
2536 }
2537
2538 static void
mqf_fdclose(struct file * fp,int fd,struct thread * td)2539 mqf_fdclose(struct file *fp, int fd, struct thread *td)
2540 {
2541 struct mqueue *mq;
2542 #ifdef INVARIANTS
2543 struct filedesc *fdp;
2544
2545 fdp = td->td_proc->p_fd;
2546 FILEDESC_LOCK_ASSERT(fdp);
2547 #endif
2548
2549 mq = FPTOMQ(fp);
2550 mtx_lock(&mq->mq_mutex);
2551 notifier_remove(td->td_proc, mq, fd);
2552
2553 /* have to wakeup thread in same process */
2554 if (mq->mq_flags & MQ_RSEL) {
2555 mq->mq_flags &= ~MQ_RSEL;
2556 selwakeup(&mq->mq_rsel);
2557 }
2558 if (mq->mq_flags & MQ_WSEL) {
2559 mq->mq_flags &= ~MQ_WSEL;
2560 selwakeup(&mq->mq_wsel);
2561 }
2562 mtx_unlock(&mq->mq_mutex);
2563 }
2564
2565 static int
mqf_stat(struct file * fp,struct stat * st,struct ucred * active_cred)2566 mqf_stat(struct file *fp, struct stat *st, struct ucred *active_cred)
2567 {
2568 struct mqfs_node *pn = fp->f_data;
2569
2570 bzero(st, sizeof *st);
2571 sx_xlock(&mqfs_data.mi_lock);
2572 st->st_atim = pn->mn_atime;
2573 st->st_mtim = pn->mn_mtime;
2574 st->st_ctim = pn->mn_ctime;
2575 st->st_birthtim = pn->mn_birth;
2576 st->st_uid = pn->mn_uid;
2577 st->st_gid = pn->mn_gid;
2578 st->st_mode = S_IFIFO | pn->mn_mode;
2579 sx_xunlock(&mqfs_data.mi_lock);
2580 return (0);
2581 }
2582
2583 static int
mqf_chmod(struct file * fp,mode_t mode,struct ucred * active_cred,struct thread * td)2584 mqf_chmod(struct file *fp, mode_t mode, struct ucred *active_cred,
2585 struct thread *td)
2586 {
2587 struct mqfs_node *pn;
2588 int error;
2589
2590 error = 0;
2591 pn = fp->f_data;
2592 sx_xlock(&mqfs_data.mi_lock);
2593 error = vaccess(VREG, pn->mn_mode, pn->mn_uid, pn->mn_gid, VADMIN,
2594 active_cred);
2595 if (error != 0)
2596 goto out;
2597 pn->mn_mode = mode & ACCESSPERMS;
2598 out:
2599 sx_xunlock(&mqfs_data.mi_lock);
2600 return (error);
2601 }
2602
2603 static int
mqf_chown(struct file * fp,uid_t uid,gid_t gid,struct ucred * active_cred,struct thread * td)2604 mqf_chown(struct file *fp, uid_t uid, gid_t gid, struct ucred *active_cred,
2605 struct thread *td)
2606 {
2607 struct mqfs_node *pn;
2608 int error;
2609
2610 error = 0;
2611 pn = fp->f_data;
2612 sx_xlock(&mqfs_data.mi_lock);
2613 if (uid == (uid_t)-1)
2614 uid = pn->mn_uid;
2615 if (gid == (gid_t)-1)
2616 gid = pn->mn_gid;
2617 if (((uid != pn->mn_uid && uid != active_cred->cr_uid) ||
2618 (gid != pn->mn_gid && !groupmember(gid, active_cred))) &&
2619 (error = priv_check_cred(active_cred, PRIV_VFS_CHOWN)))
2620 goto out;
2621 pn->mn_uid = uid;
2622 pn->mn_gid = gid;
2623 out:
2624 sx_xunlock(&mqfs_data.mi_lock);
2625 return (error);
2626 }
2627
2628 static int
mqf_kqfilter(struct file * fp,struct knote * kn)2629 mqf_kqfilter(struct file *fp, struct knote *kn)
2630 {
2631 struct mqueue *mq = FPTOMQ(fp);
2632 int error = 0;
2633
2634 if (kn->kn_filter == EVFILT_READ) {
2635 kn->kn_fop = &mq_rfiltops;
2636 knlist_add(&mq->mq_rsel.si_note, kn, 0);
2637 } else if (kn->kn_filter == EVFILT_WRITE) {
2638 kn->kn_fop = &mq_wfiltops;
2639 knlist_add(&mq->mq_wsel.si_note, kn, 0);
2640 } else
2641 error = EINVAL;
2642 return (error);
2643 }
2644
2645 static void
filt_mqdetach(struct knote * kn)2646 filt_mqdetach(struct knote *kn)
2647 {
2648 struct mqueue *mq = FPTOMQ(kn->kn_fp);
2649
2650 if (kn->kn_filter == EVFILT_READ)
2651 knlist_remove(&mq->mq_rsel.si_note, kn, 0);
2652 else if (kn->kn_filter == EVFILT_WRITE)
2653 knlist_remove(&mq->mq_wsel.si_note, kn, 0);
2654 else
2655 panic("filt_mqdetach");
2656 }
2657
2658 static int
filt_mqread(struct knote * kn,long hint)2659 filt_mqread(struct knote *kn, long hint)
2660 {
2661 struct mqueue *mq = FPTOMQ(kn->kn_fp);
2662
2663 mtx_assert(&mq->mq_mutex, MA_OWNED);
2664 return (mq->mq_curmsgs != 0);
2665 }
2666
2667 static int
filt_mqwrite(struct knote * kn,long hint)2668 filt_mqwrite(struct knote *kn, long hint)
2669 {
2670 struct mqueue *mq = FPTOMQ(kn->kn_fp);
2671
2672 mtx_assert(&mq->mq_mutex, MA_OWNED);
2673 return (mq->mq_curmsgs < mq->mq_maxmsg);
2674 }
2675
2676 static int
mqf_fill_kinfo(struct file * fp,struct kinfo_file * kif,struct filedesc * fdp)2677 mqf_fill_kinfo(struct file *fp, struct kinfo_file *kif, struct filedesc *fdp)
2678 {
2679
2680 kif->kf_type = KF_TYPE_MQUEUE;
2681 return (0);
2682 }
2683
2684 static const struct fileops mqueueops = {
2685 .fo_read = invfo_rdwr,
2686 .fo_write = invfo_rdwr,
2687 .fo_truncate = invfo_truncate,
2688 .fo_ioctl = invfo_ioctl,
2689 .fo_poll = mqf_poll,
2690 .fo_kqfilter = mqf_kqfilter,
2691 .fo_stat = mqf_stat,
2692 .fo_close = mqf_close,
2693 .fo_fdclose = mqf_fdclose,
2694 .fo_chmod = mqf_chmod,
2695 .fo_chown = mqf_chown,
2696 .fo_sendfile = invfo_sendfile,
2697 .fo_fill_kinfo = mqf_fill_kinfo,
2698 .fo_cmp = file_kcmp_generic,
2699 .fo_flags = DFLAG_PASSABLE,
2700 };
2701
2702 static struct vop_vector mqfs_vnodeops = {
2703 .vop_default = &default_vnodeops,
2704 .vop_access = mqfs_access,
2705 .vop_cachedlookup = mqfs_lookup,
2706 .vop_lookup = vfs_cache_lookup,
2707 .vop_reclaim = mqfs_reclaim,
2708 .vop_create = mqfs_create,
2709 .vop_remove = mqfs_remove,
2710 .vop_inactive = mqfs_inactive,
2711 .vop_open = mqfs_open,
2712 .vop_close = mqfs_close,
2713 .vop_getattr = mqfs_getattr,
2714 .vop_setattr = mqfs_setattr,
2715 .vop_read = mqfs_read,
2716 .vop_write = VOP_EOPNOTSUPP,
2717 .vop_readdir = mqfs_readdir,
2718 .vop_mkdir = VOP_EOPNOTSUPP,
2719 .vop_rmdir = VOP_EOPNOTSUPP
2720 };
2721 VFS_VOP_VECTOR_REGISTER(mqfs_vnodeops);
2722
2723 static struct vfsops mqfs_vfsops = {
2724 .vfs_init = mqfs_init,
2725 .vfs_uninit = mqfs_uninit,
2726 .vfs_mount = mqfs_mount,
2727 .vfs_unmount = mqfs_unmount,
2728 .vfs_root = mqfs_root,
2729 .vfs_statfs = mqfs_statfs,
2730 };
2731
2732 static struct vfsconf mqueuefs_vfsconf = {
2733 .vfc_version = VFS_VERSION,
2734 .vfc_name = "mqueuefs",
2735 .vfc_vfsops = &mqfs_vfsops,
2736 .vfc_typenum = -1,
2737 .vfc_flags = VFCF_SYNTHETIC
2738 };
2739
2740 static struct syscall_helper_data mq_syscalls[] = {
2741 SYSCALL_INIT_HELPER(kmq_open),
2742 SYSCALL_INIT_HELPER_F(kmq_setattr, SYF_CAPENABLED),
2743 SYSCALL_INIT_HELPER_F(kmq_timedsend, SYF_CAPENABLED),
2744 SYSCALL_INIT_HELPER_F(kmq_timedreceive, SYF_CAPENABLED),
2745 SYSCALL_INIT_HELPER_F(kmq_notify, SYF_CAPENABLED),
2746 SYSCALL_INIT_HELPER(kmq_unlink),
2747 SYSCALL_INIT_LAST
2748 };
2749
2750 #ifdef COMPAT_FREEBSD32
2751 #include <compat/freebsd32/freebsd32.h>
2752 #include <compat/freebsd32/freebsd32_proto.h>
2753 #include <compat/freebsd32/freebsd32_signal.h>
2754 #include <compat/freebsd32/freebsd32_syscall.h>
2755 #include <compat/freebsd32/freebsd32_util.h>
2756
2757 static void
mq_attr_from32(const struct mq_attr32 * from,struct mq_attr * to)2758 mq_attr_from32(const struct mq_attr32 *from, struct mq_attr *to)
2759 {
2760
2761 to->mq_flags = from->mq_flags;
2762 to->mq_maxmsg = from->mq_maxmsg;
2763 to->mq_msgsize = from->mq_msgsize;
2764 to->mq_curmsgs = from->mq_curmsgs;
2765 }
2766
2767 static void
mq_attr_to32(const struct mq_attr * from,struct mq_attr32 * to)2768 mq_attr_to32(const struct mq_attr *from, struct mq_attr32 *to)
2769 {
2770
2771 to->mq_flags = from->mq_flags;
2772 to->mq_maxmsg = from->mq_maxmsg;
2773 to->mq_msgsize = from->mq_msgsize;
2774 to->mq_curmsgs = from->mq_curmsgs;
2775 }
2776
2777 int
freebsd32_kmq_open(struct thread * td,struct freebsd32_kmq_open_args * uap)2778 freebsd32_kmq_open(struct thread *td, struct freebsd32_kmq_open_args *uap)
2779 {
2780 struct mq_attr attr;
2781 struct mq_attr32 attr32;
2782 int flags, error;
2783
2784 if ((uap->flags & O_ACCMODE) == O_ACCMODE || uap->flags & O_EXEC)
2785 return (EINVAL);
2786 flags = FFLAGS(uap->flags);
2787 if ((flags & O_CREAT) != 0 && uap->attr != NULL) {
2788 error = copyin(uap->attr, &attr32, sizeof(attr32));
2789 if (error)
2790 return (error);
2791 mq_attr_from32(&attr32, &attr);
2792 }
2793 return (kern_kmq_open(td, uap->path, flags, uap->mode,
2794 uap->attr != NULL ? &attr : NULL));
2795 }
2796
2797 int
freebsd32_kmq_setattr(struct thread * td,struct freebsd32_kmq_setattr_args * uap)2798 freebsd32_kmq_setattr(struct thread *td, struct freebsd32_kmq_setattr_args *uap)
2799 {
2800 struct mq_attr attr, oattr;
2801 struct mq_attr32 attr32, oattr32;
2802 int error;
2803
2804 if (uap->attr != NULL) {
2805 error = copyin(uap->attr, &attr32, sizeof(attr32));
2806 if (error != 0)
2807 return (error);
2808 mq_attr_from32(&attr32, &attr);
2809 }
2810 error = kern_kmq_setattr(td, uap->mqd, uap->attr != NULL ? &attr : NULL,
2811 &oattr);
2812 if (error == 0 && uap->oattr != NULL) {
2813 mq_attr_to32(&oattr, &oattr32);
2814 bzero(oattr32.__reserved, sizeof(oattr32.__reserved));
2815 error = copyout(&oattr32, uap->oattr, sizeof(oattr32));
2816 }
2817 return (error);
2818 }
2819
2820 int
freebsd32_kmq_timedsend(struct thread * td,struct freebsd32_kmq_timedsend_args * uap)2821 freebsd32_kmq_timedsend(struct thread *td,
2822 struct freebsd32_kmq_timedsend_args *uap)
2823 {
2824 struct timespec32 ets32;
2825 struct timespec *abs_timeout, ets;
2826 int error;
2827
2828 if (uap->abs_timeout != NULL) {
2829 error = copyin(uap->abs_timeout, &ets32, sizeof(ets32));
2830 if (error != 0)
2831 return (error);
2832 CP(ets32, ets, tv_sec);
2833 CP(ets32, ets, tv_nsec);
2834 abs_timeout = &ets;
2835 } else
2836 abs_timeout = NULL;
2837
2838 return (kern_kmq_timedsend(td, uap->mqd, uap->msg_ptr, uap->msg_len,
2839 uap->msg_prio, abs_timeout));
2840 }
2841
2842 int
freebsd32_kmq_timedreceive(struct thread * td,struct freebsd32_kmq_timedreceive_args * uap)2843 freebsd32_kmq_timedreceive(struct thread *td,
2844 struct freebsd32_kmq_timedreceive_args *uap)
2845 {
2846 struct timespec32 ets32;
2847 struct timespec *abs_timeout, ets;
2848 int error;
2849
2850 if (uap->abs_timeout != NULL) {
2851 error = copyin(uap->abs_timeout, &ets32, sizeof(ets32));
2852 if (error != 0)
2853 return (error);
2854 CP(ets32, ets, tv_sec);
2855 CP(ets32, ets, tv_nsec);
2856 abs_timeout = &ets;
2857 } else
2858 abs_timeout = NULL;
2859
2860 return (kern_kmq_timedreceive(td, uap->mqd, uap->msg_ptr, uap->msg_len,
2861 uap->msg_prio, abs_timeout));
2862 }
2863
2864 int
freebsd32_kmq_notify(struct thread * td,struct freebsd32_kmq_notify_args * uap)2865 freebsd32_kmq_notify(struct thread *td, struct freebsd32_kmq_notify_args *uap)
2866 {
2867 struct sigevent ev, *evp;
2868 struct sigevent32 ev32;
2869 int error;
2870
2871 if (uap->sigev == NULL) {
2872 evp = NULL;
2873 } else {
2874 error = copyin(uap->sigev, &ev32, sizeof(ev32));
2875 if (error != 0)
2876 return (error);
2877 error = convert_sigevent32(&ev32, &ev);
2878 if (error != 0)
2879 return (error);
2880 evp = &ev;
2881 }
2882 return (kern_kmq_notify(td, uap->mqd, evp));
2883 }
2884
2885 static struct syscall_helper_data mq32_syscalls[] = {
2886 SYSCALL32_INIT_HELPER(freebsd32_kmq_open),
2887 SYSCALL32_INIT_HELPER_F(freebsd32_kmq_setattr, SYF_CAPENABLED),
2888 SYSCALL32_INIT_HELPER_F(freebsd32_kmq_timedsend, SYF_CAPENABLED),
2889 SYSCALL32_INIT_HELPER_F(freebsd32_kmq_timedreceive, SYF_CAPENABLED),
2890 SYSCALL32_INIT_HELPER_F(freebsd32_kmq_notify, SYF_CAPENABLED),
2891 SYSCALL32_INIT_HELPER_COMPAT(kmq_unlink),
2892 SYSCALL_INIT_LAST
2893 };
2894 #endif
2895
2896 static int
mqinit(void)2897 mqinit(void)
2898 {
2899 int error;
2900
2901 error = syscall_helper_register(mq_syscalls, SY_THR_STATIC_KLD);
2902 if (error != 0)
2903 return (error);
2904 #ifdef COMPAT_FREEBSD32
2905 error = syscall32_helper_register(mq32_syscalls, SY_THR_STATIC_KLD);
2906 if (error != 0)
2907 return (error);
2908 #endif
2909 return (0);
2910 }
2911
2912 static int
mqunload(void)2913 mqunload(void)
2914 {
2915
2916 #ifdef COMPAT_FREEBSD32
2917 syscall32_helper_unregister(mq32_syscalls);
2918 #endif
2919 syscall_helper_unregister(mq_syscalls);
2920 return (0);
2921 }
2922
2923 static int
mq_modload(struct module * module,int cmd,void * arg)2924 mq_modload(struct module *module, int cmd, void *arg)
2925 {
2926 int error = 0;
2927
2928 error = vfs_modevent(module, cmd, arg);
2929 if (error != 0)
2930 return (error);
2931
2932 switch (cmd) {
2933 case MOD_LOAD:
2934 error = mqinit();
2935 if (error != 0)
2936 mqunload();
2937 break;
2938 case MOD_UNLOAD:
2939 error = mqunload();
2940 break;
2941 default:
2942 break;
2943 }
2944 return (error);
2945 }
2946
2947 static moduledata_t mqueuefs_mod = {
2948 "mqueuefs",
2949 mq_modload,
2950 &mqueuefs_vfsconf
2951 };
2952 DECLARE_MODULE(mqueuefs, mqueuefs_mod, SI_SUB_VFS, SI_ORDER_MIDDLE);
2953 MODULE_VERSION(mqueuefs, 1);
2954