1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2005 David Xu <davidxu@freebsd.org>
5 * Copyright (c) 2016-2017 Robert N. M. Watson
6 * All rights reserved.
7 *
8 * Portions of this software were developed by BAE Systems, the University of
9 * Cambridge Computer Laboratory, and Memorial University under DARPA/AFRL
10 * contract FA8650-15-C-7558 ("CADETS"), as part of the DARPA Transparent
11 * Computing (TC) research program.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 */
35
36 /*
37 * POSIX message queue implementation.
38 *
39 * 1) A mqueue filesystem can be mounted, each message queue appears
40 * in mounted directory, user can change queue's permission and
41 * ownership, or remove a queue. Manually creating a file in the
42 * directory causes a message queue to be created in the kernel with
43 * default message queue attributes applied and same name used, this
44 * method is not advocated since mq_open syscall allows user to specify
45 * different attributes. Also the file system can be mounted multiple
46 * times at different mount points but shows same contents.
47 *
48 * 2) Standard POSIX message queue API. The syscalls do not use vfs layer,
49 * but directly operate on internal data structure, this allows user to
50 * use the IPC facility without having to mount mqueue file system.
51 */
52
53 #include "opt_capsicum.h"
54
55 #include <sys/param.h>
56 #include <sys/kernel.h>
57 #include <sys/systm.h>
58 #include <sys/limits.h>
59 #include <sys/malloc.h>
60 #include <sys/buf.h>
61 #include <sys/capsicum.h>
62 #include <sys/dirent.h>
63 #include <sys/event.h>
64 #include <sys/eventhandler.h>
65 #include <sys/fcntl.h>
66 #include <sys/file.h>
67 #include <sys/filedesc.h>
68 #include <sys/jail.h>
69 #include <sys/lock.h>
70 #include <sys/module.h>
71 #include <sys/mount.h>
72 #include <sys/mqueue.h>
73 #include <sys/mutex.h>
74 #include <sys/namei.h>
75 #include <sys/posix4.h>
76 #include <sys/poll.h>
77 #include <sys/priv.h>
78 #include <sys/proc.h>
79 #include <sys/queue.h>
80 #include <sys/sysproto.h>
81 #include <sys/stat.h>
82 #include <sys/syscall.h>
83 #include <sys/syscallsubr.h>
84 #include <sys/sysent.h>
85 #include <sys/sx.h>
86 #include <sys/sysctl.h>
87 #include <sys/taskqueue.h>
88 #include <sys/unistd.h>
89 #include <sys/user.h>
90 #include <sys/vnode.h>
91 #include <machine/atomic.h>
92
93 #include <security/audit/audit.h>
94
95 FEATURE(p1003_1b_mqueue, "POSIX P1003.1B message queues support");
96
97 /*
98 * Limits and constants
99 */
100 #define MQFS_NAMELEN NAME_MAX
101 #define MQFS_DELEN (8 + MQFS_NAMELEN)
102
103 /* node types */
104 typedef enum {
105 mqfstype_none = 0,
106 mqfstype_root,
107 mqfstype_dir,
108 mqfstype_this,
109 mqfstype_parent,
110 mqfstype_file,
111 mqfstype_symlink,
112 } mqfs_type_t;
113
114 struct mqfs_node;
115
116 /*
117 * mqfs_info: describes a mqfs instance
118 */
119 struct mqfs_info {
120 struct sx mi_lock;
121 struct mqfs_node *mi_root;
122 struct unrhdr *mi_unrhdr;
123 };
124
125 struct mqfs_vdata {
126 LIST_ENTRY(mqfs_vdata) mv_link;
127 struct mqfs_node *mv_node;
128 struct vnode *mv_vnode;
129 struct task mv_task;
130 };
131
132 /*
133 * mqfs_node: describes a node (file or directory) within a mqfs
134 */
135 struct mqfs_node {
136 char mn_name[MQFS_NAMELEN+1];
137 struct mqfs_info *mn_info;
138 struct mqfs_node *mn_parent;
139 LIST_HEAD(,mqfs_node) mn_children;
140 LIST_ENTRY(mqfs_node) mn_sibling;
141 LIST_HEAD(,mqfs_vdata) mn_vnodes;
142 const void *mn_pr_root;
143 int mn_refcount;
144 mqfs_type_t mn_type;
145 int mn_deleted;
146 uint32_t mn_fileno;
147 void *mn_data;
148 struct timespec mn_birth;
149 struct timespec mn_ctime;
150 struct timespec mn_atime;
151 struct timespec mn_mtime;
152 uid_t mn_uid;
153 gid_t mn_gid;
154 int mn_mode;
155 };
156
157 #define VTON(vp) (((struct mqfs_vdata *)((vp)->v_data))->mv_node)
158 #define VTOMQ(vp) ((struct mqueue *)(VTON(vp)->mn_data))
159 #define VFSTOMQFS(m) ((struct mqfs_info *)((m)->mnt_data))
160 #define FPTOMQ(fp) ((struct mqueue *)(((struct mqfs_node *) \
161 (fp)->f_data)->mn_data))
162
163 TAILQ_HEAD(msgq, mqueue_msg);
164
165 struct mqueue;
166
167 struct mqueue_notifier {
168 LIST_ENTRY(mqueue_notifier) nt_link;
169 struct sigevent nt_sigev;
170 ksiginfo_t nt_ksi;
171 struct proc *nt_proc;
172 };
173
174 struct mqueue {
175 struct mtx mq_mutex;
176 int mq_flags;
177 long mq_maxmsg;
178 long mq_msgsize;
179 long mq_curmsgs;
180 long mq_totalbytes;
181 struct msgq mq_msgq;
182 int mq_receivers;
183 int mq_senders;
184 struct selinfo mq_rsel;
185 struct selinfo mq_wsel;
186 struct mqueue_notifier *mq_notifier;
187 };
188
189 #define MQ_RSEL 0x01
190 #define MQ_WSEL 0x02
191
192 struct mqueue_msg {
193 TAILQ_ENTRY(mqueue_msg) msg_link;
194 unsigned int msg_prio;
195 unsigned int msg_size;
196 /* following real data... */
197 };
198
199 static SYSCTL_NODE(_kern, OID_AUTO, mqueue, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
200 "POSIX real time message queue");
201
202 static int default_maxmsg = 10;
203 SYSCTL_INT(_kern_mqueue, OID_AUTO, default_maxmsg, CTLFLAG_RD,
204 &default_maxmsg, 0, "Default maximum messages in queue");
205 static int default_msgsize = 1024;
206 SYSCTL_INT(_kern_mqueue, OID_AUTO, default_msgsize, CTLFLAG_RD,
207 &default_msgsize, 0, "Default maximum message size");
208
209 static int maxmsg = 100;
210 SYSCTL_INT(_kern_mqueue, OID_AUTO, maxmsg, CTLFLAG_RW,
211 &maxmsg, 0, "maximum messages in queue");
212 static int maxmsgsize = 16384;
213 SYSCTL_INT(_kern_mqueue, OID_AUTO, maxmsgsize, CTLFLAG_RW,
214 &maxmsgsize, 0, "maximum message size");
215 static int maxmq = 100;
216 SYSCTL_INT(_kern_mqueue, OID_AUTO, maxmq, CTLFLAG_RW,
217 &maxmq, 0, "maximum message queues");
218 static int curmq = 0;
219 SYSCTL_INT(_kern_mqueue, OID_AUTO, curmq, CTLFLAG_RW,
220 &curmq, 0, "current message queue number");
221 static int unloadable = 0;
222 static MALLOC_DEFINE(M_MQUEUEDATA, "mqdata", "mqueue data");
223
224 static eventhandler_tag exit_tag;
225
226 /* Only one instance per-system */
227 static struct mqfs_info mqfs_data;
228 static uma_zone_t mqnode_zone;
229 static uma_zone_t mqueue_zone;
230 static uma_zone_t mvdata_zone;
231 static uma_zone_t mqnoti_zone;
232 static struct vop_vector mqfs_vnodeops;
233 static const struct fileops mqueueops;
234 static unsigned mqfs_osd_jail_slot;
235
236 /*
237 * Directory structure construction and manipulation
238 */
239 #ifdef notyet
240 static struct mqfs_node *mqfs_create_dir(struct mqfs_node *parent,
241 const char *name, int namelen, struct ucred *cred, int mode);
242 static struct mqfs_node *mqfs_create_link(struct mqfs_node *parent,
243 const char *name, int namelen, struct ucred *cred, int mode);
244 #endif
245
246 static struct mqfs_node *mqfs_create_file(struct mqfs_node *parent,
247 const char *name, int namelen, struct ucred *cred, int mode);
248 static int mqfs_destroy(struct mqfs_node *mn);
249 static void mqfs_fileno_alloc(struct mqfs_info *mi, struct mqfs_node *mn);
250 static void mqfs_fileno_free(struct mqfs_info *mi, struct mqfs_node *mn);
251 static int mqfs_allocv(struct mount *mp, struct vnode **vpp, struct mqfs_node *pn);
252 static int mqfs_prison_remove(void *obj, void *data);
253
254 /*
255 * Message queue construction and maniplation
256 */
257 static struct mqueue *mqueue_alloc(const struct mq_attr *attr);
258 static void mqueue_free(struct mqueue *mq);
259 static int mqueue_send(struct mqueue *mq, const char *msg_ptr,
260 size_t msg_len, unsigned msg_prio, int waitok,
261 const struct timespec *abs_timeout);
262 static int mqueue_receive(struct mqueue *mq, char *msg_ptr,
263 size_t msg_len, unsigned *msg_prio, int waitok,
264 const struct timespec *abs_timeout);
265 static int _mqueue_send(struct mqueue *mq, struct mqueue_msg *msg,
266 int timo);
267 static int _mqueue_recv(struct mqueue *mq, struct mqueue_msg **msg,
268 int timo);
269 static void mqueue_send_notification(struct mqueue *mq);
270 static void mqueue_fdclose(struct thread *td, int fd, struct file *fp);
271 static void mq_proc_exit(void *arg, struct proc *p);
272
273 /*
274 * kqueue filters
275 */
276 static void filt_mqdetach(struct knote *kn);
277 static int filt_mqread(struct knote *kn, long hint);
278 static int filt_mqwrite(struct knote *kn, long hint);
279
280 static const struct filterops mq_rfiltops = {
281 .f_isfd = 1,
282 .f_detach = filt_mqdetach,
283 .f_event = filt_mqread,
284 .f_copy = knote_triv_copy,
285 };
286 static const struct filterops mq_wfiltops = {
287 .f_isfd = 1,
288 .f_detach = filt_mqdetach,
289 .f_event = filt_mqwrite,
290 .f_copy = knote_triv_copy,
291 };
292
293 /*
294 * Initialize fileno bitmap
295 */
296 static void
mqfs_fileno_init(struct mqfs_info * mi)297 mqfs_fileno_init(struct mqfs_info *mi)
298 {
299 struct unrhdr *up;
300
301 up = new_unrhdr(1, INT_MAX, NULL);
302 mi->mi_unrhdr = up;
303 }
304
305 /*
306 * Tear down fileno bitmap
307 */
308 static void
mqfs_fileno_uninit(struct mqfs_info * mi)309 mqfs_fileno_uninit(struct mqfs_info *mi)
310 {
311 struct unrhdr *up;
312
313 up = mi->mi_unrhdr;
314 mi->mi_unrhdr = NULL;
315 delete_unrhdr(up);
316 }
317
318 /*
319 * Allocate a file number
320 */
321 static void
mqfs_fileno_alloc(struct mqfs_info * mi,struct mqfs_node * mn)322 mqfs_fileno_alloc(struct mqfs_info *mi, struct mqfs_node *mn)
323 {
324 /* make sure our parent has a file number */
325 if (mn->mn_parent && !mn->mn_parent->mn_fileno)
326 mqfs_fileno_alloc(mi, mn->mn_parent);
327
328 switch (mn->mn_type) {
329 case mqfstype_root:
330 case mqfstype_dir:
331 case mqfstype_file:
332 case mqfstype_symlink:
333 mn->mn_fileno = alloc_unr(mi->mi_unrhdr);
334 break;
335 case mqfstype_this:
336 KASSERT(mn->mn_parent != NULL,
337 ("mqfstype_this node has no parent"));
338 mn->mn_fileno = mn->mn_parent->mn_fileno;
339 break;
340 case mqfstype_parent:
341 KASSERT(mn->mn_parent != NULL,
342 ("mqfstype_parent node has no parent"));
343 if (mn->mn_parent == mi->mi_root) {
344 mn->mn_fileno = mn->mn_parent->mn_fileno;
345 break;
346 }
347 KASSERT(mn->mn_parent->mn_parent != NULL,
348 ("mqfstype_parent node has no grandparent"));
349 mn->mn_fileno = mn->mn_parent->mn_parent->mn_fileno;
350 break;
351 default:
352 KASSERT(0,
353 ("mqfs_fileno_alloc() called for unknown type node: %d",
354 mn->mn_type));
355 break;
356 }
357 }
358
359 /*
360 * Release a file number
361 */
362 static void
mqfs_fileno_free(struct mqfs_info * mi,struct mqfs_node * mn)363 mqfs_fileno_free(struct mqfs_info *mi, struct mqfs_node *mn)
364 {
365 switch (mn->mn_type) {
366 case mqfstype_root:
367 case mqfstype_dir:
368 case mqfstype_file:
369 case mqfstype_symlink:
370 free_unr(mi->mi_unrhdr, mn->mn_fileno);
371 break;
372 case mqfstype_this:
373 case mqfstype_parent:
374 /* ignore these, as they don't "own" their file number */
375 break;
376 default:
377 KASSERT(0,
378 ("mqfs_fileno_free() called for unknown type node: %d",
379 mn->mn_type));
380 break;
381 }
382 }
383
384 static __inline struct mqfs_node *
mqnode_alloc(void)385 mqnode_alloc(void)
386 {
387 return (uma_zalloc(mqnode_zone, M_WAITOK | M_ZERO));
388 }
389
390 static __inline void
mqnode_free(struct mqfs_node * node)391 mqnode_free(struct mqfs_node *node)
392 {
393 uma_zfree(mqnode_zone, node);
394 }
395
396 static __inline void
mqnode_addref(struct mqfs_node * node)397 mqnode_addref(struct mqfs_node *node)
398 {
399 atomic_add_int(&node->mn_refcount, 1);
400 }
401
402 static __inline void
mqnode_release(struct mqfs_node * node)403 mqnode_release(struct mqfs_node *node)
404 {
405 struct mqfs_info *mqfs;
406 int old, exp;
407
408 mqfs = node->mn_info;
409 old = atomic_fetchadd_int(&node->mn_refcount, -1);
410 if (node->mn_type == mqfstype_dir ||
411 node->mn_type == mqfstype_root)
412 exp = 3; /* include . and .. */
413 else
414 exp = 1;
415 if (old == exp) {
416 int locked = sx_xlocked(&mqfs->mi_lock);
417 if (!locked)
418 sx_xlock(&mqfs->mi_lock);
419 mqfs_destroy(node);
420 if (!locked)
421 sx_xunlock(&mqfs->mi_lock);
422 }
423 }
424
425 /*
426 * Add a node to a directory
427 */
428 static int
mqfs_add_node(struct mqfs_node * parent,struct mqfs_node * node)429 mqfs_add_node(struct mqfs_node *parent, struct mqfs_node *node)
430 {
431 KASSERT(parent != NULL, ("%s(): parent is NULL", __func__));
432 KASSERT(parent->mn_info != NULL,
433 ("%s(): parent has no mn_info", __func__));
434 KASSERT(parent->mn_type == mqfstype_dir ||
435 parent->mn_type == mqfstype_root,
436 ("%s(): parent is not a directory", __func__));
437
438 node->mn_info = parent->mn_info;
439 node->mn_parent = parent;
440 LIST_INIT(&node->mn_children);
441 LIST_INIT(&node->mn_vnodes);
442 LIST_INSERT_HEAD(&parent->mn_children, node, mn_sibling);
443 mqnode_addref(parent);
444 return (0);
445 }
446
447 static struct mqfs_node *
mqfs_create_node(const char * name,int namelen,struct ucred * cred,int mode,int nodetype)448 mqfs_create_node(const char *name, int namelen, struct ucred *cred, int mode,
449 int nodetype)
450 {
451 struct mqfs_node *node;
452
453 node = mqnode_alloc();
454 strncpy(node->mn_name, name, namelen);
455 node->mn_pr_root = cred->cr_prison->pr_root;
456 node->mn_type = nodetype;
457 node->mn_refcount = 1;
458 vfs_timestamp(&node->mn_birth);
459 node->mn_ctime = node->mn_atime = node->mn_mtime =
460 node->mn_birth;
461 node->mn_uid = cred->cr_uid;
462 node->mn_gid = cred->cr_gid;
463 node->mn_mode = mode;
464 return (node);
465 }
466
467 /*
468 * Create a file
469 */
470 static struct mqfs_node *
mqfs_create_file(struct mqfs_node * parent,const char * name,int namelen,struct ucred * cred,int mode)471 mqfs_create_file(struct mqfs_node *parent, const char *name, int namelen,
472 struct ucred *cred, int mode)
473 {
474 struct mqfs_node *node;
475
476 node = mqfs_create_node(name, namelen, cred, mode, mqfstype_file);
477 if (mqfs_add_node(parent, node) != 0) {
478 mqnode_free(node);
479 return (NULL);
480 }
481 return (node);
482 }
483
484 /*
485 * Add . and .. to a directory
486 */
487 static int
mqfs_fixup_dir(struct mqfs_node * parent)488 mqfs_fixup_dir(struct mqfs_node *parent)
489 {
490 struct mqfs_node *dir;
491
492 dir = mqnode_alloc();
493 dir->mn_name[0] = '.';
494 dir->mn_type = mqfstype_this;
495 dir->mn_refcount = 1;
496 if (mqfs_add_node(parent, dir) != 0) {
497 mqnode_free(dir);
498 return (-1);
499 }
500
501 dir = mqnode_alloc();
502 dir->mn_name[0] = dir->mn_name[1] = '.';
503 dir->mn_type = mqfstype_parent;
504 dir->mn_refcount = 1;
505
506 if (mqfs_add_node(parent, dir) != 0) {
507 mqnode_free(dir);
508 return (-1);
509 }
510
511 return (0);
512 }
513
514 #ifdef notyet
515
516 /*
517 * Create a directory
518 */
519 static struct mqfs_node *
mqfs_create_dir(struct mqfs_node * parent,const char * name,int namelen,struct ucred * cred,int mode)520 mqfs_create_dir(struct mqfs_node *parent, const char *name, int namelen,
521 struct ucred *cred, int mode)
522 {
523 struct mqfs_node *node;
524
525 node = mqfs_create_node(name, namelen, cred, mode, mqfstype_dir);
526 if (mqfs_add_node(parent, node) != 0) {
527 mqnode_free(node);
528 return (NULL);
529 }
530
531 if (mqfs_fixup_dir(node) != 0) {
532 mqfs_destroy(node);
533 return (NULL);
534 }
535 return (node);
536 }
537
538 /*
539 * Create a symlink
540 */
541 static struct mqfs_node *
mqfs_create_link(struct mqfs_node * parent,const char * name,int namelen,struct ucred * cred,int mode)542 mqfs_create_link(struct mqfs_node *parent, const char *name, int namelen,
543 struct ucred *cred, int mode)
544 {
545 struct mqfs_node *node;
546
547 node = mqfs_create_node(name, namelen, cred, mode, mqfstype_symlink);
548 if (mqfs_add_node(parent, node) != 0) {
549 mqnode_free(node);
550 return (NULL);
551 }
552 return (node);
553 }
554
555 #endif
556
557 /*
558 * Destroy a node or a tree of nodes
559 */
560 static int
mqfs_destroy(struct mqfs_node * node)561 mqfs_destroy(struct mqfs_node *node)
562 {
563 struct mqfs_node *parent;
564
565 KASSERT(node != NULL,
566 ("%s(): node is NULL", __func__));
567 KASSERT(node->mn_info != NULL,
568 ("%s(): node has no mn_info", __func__));
569
570 /* destroy children */
571 if (node->mn_type == mqfstype_dir || node->mn_type == mqfstype_root)
572 while (! LIST_EMPTY(&node->mn_children))
573 mqfs_destroy(LIST_FIRST(&node->mn_children));
574
575 /* unlink from parent */
576 if ((parent = node->mn_parent) != NULL) {
577 KASSERT(parent->mn_info == node->mn_info,
578 ("%s(): parent has different mn_info", __func__));
579 LIST_REMOVE(node, mn_sibling);
580 }
581
582 if (node->mn_fileno != 0)
583 mqfs_fileno_free(node->mn_info, node);
584 if (node->mn_data != NULL)
585 mqueue_free(node->mn_data);
586 mqnode_free(node);
587 return (0);
588 }
589
590 /*
591 * Mount a mqfs instance
592 */
593 static int
mqfs_mount(struct mount * mp)594 mqfs_mount(struct mount *mp)
595 {
596 struct statfs *sbp;
597
598 if (mp->mnt_flag & MNT_UPDATE)
599 return (EOPNOTSUPP);
600
601 mp->mnt_data = &mqfs_data;
602 MNT_ILOCK(mp);
603 mp->mnt_flag |= MNT_LOCAL;
604 MNT_IUNLOCK(mp);
605 vfs_getnewfsid(mp);
606
607 sbp = &mp->mnt_stat;
608 vfs_mountedfrom(mp, "mqueue");
609 sbp->f_bsize = PAGE_SIZE;
610 sbp->f_iosize = PAGE_SIZE;
611 sbp->f_blocks = 1;
612 sbp->f_bfree = 1;
613 sbp->f_bavail = 0;
614 sbp->f_files = 0;
615 sbp->f_ffree = 0;
616 return (0);
617 }
618
619 /*
620 * Unmount a mqfs instance
621 */
622 static int
mqfs_unmount(struct mount * mp,int mntflags)623 mqfs_unmount(struct mount *mp, int mntflags)
624 {
625 int error;
626
627 error = vflush(mp, 0, (mntflags & MNT_FORCE) ? FORCECLOSE : 0,
628 curthread);
629 return (error);
630 }
631
632 /*
633 * Return a root vnode
634 */
635 static int
mqfs_root(struct mount * mp,int flags,struct vnode ** vpp)636 mqfs_root(struct mount *mp, int flags, struct vnode **vpp)
637 {
638 struct mqfs_info *mqfs;
639 int ret;
640
641 mqfs = VFSTOMQFS(mp);
642 ret = mqfs_allocv(mp, vpp, mqfs->mi_root);
643 return (ret);
644 }
645
646 /*
647 * Return filesystem stats
648 */
649 static int
mqfs_statfs(struct mount * mp,struct statfs * sbp)650 mqfs_statfs(struct mount *mp, struct statfs *sbp)
651 {
652 /* XXX update statistics */
653 return (0);
654 }
655
656 /*
657 * Initialize a mqfs instance
658 */
659 static int
mqfs_init(struct vfsconf * vfc)660 mqfs_init(struct vfsconf *vfc)
661 {
662 struct mqfs_node *root;
663 struct mqfs_info *mi;
664 osd_method_t methods[PR_MAXMETHOD] = {
665 [PR_METHOD_REMOVE] = mqfs_prison_remove,
666 };
667
668 mqnode_zone = uma_zcreate("mqnode", sizeof(struct mqfs_node),
669 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
670 mqueue_zone = uma_zcreate("mqueue", sizeof(struct mqueue),
671 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
672 mvdata_zone = uma_zcreate("mvdata",
673 sizeof(struct mqfs_vdata), NULL, NULL, NULL,
674 NULL, UMA_ALIGN_PTR, 0);
675 mqnoti_zone = uma_zcreate("mqnotifier", sizeof(struct mqueue_notifier),
676 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
677 mi = &mqfs_data;
678 sx_init(&mi->mi_lock, "mqfs lock");
679 /* set up the root diretory */
680 root = mqfs_create_node("/", 1, curthread->td_ucred, 01777,
681 mqfstype_root);
682 root->mn_info = mi;
683 LIST_INIT(&root->mn_children);
684 LIST_INIT(&root->mn_vnodes);
685 mi->mi_root = root;
686 mqfs_fileno_init(mi);
687 mqfs_fileno_alloc(mi, root);
688 mqfs_fixup_dir(root);
689 exit_tag = EVENTHANDLER_REGISTER(process_exit, mq_proc_exit, NULL,
690 EVENTHANDLER_PRI_ANY);
691 mq_fdclose = mqueue_fdclose;
692 p31b_setcfg(CTL_P1003_1B_MESSAGE_PASSING, _POSIX_MESSAGE_PASSING);
693 mqfs_osd_jail_slot = osd_jail_register(NULL, methods);
694 return (0);
695 }
696
697 /*
698 * Destroy a mqfs instance
699 */
700 static int
mqfs_uninit(struct vfsconf * vfc)701 mqfs_uninit(struct vfsconf *vfc)
702 {
703 struct mqfs_info *mi;
704
705 if (!unloadable)
706 return (EOPNOTSUPP);
707 osd_jail_deregister(mqfs_osd_jail_slot);
708 EVENTHANDLER_DEREGISTER(process_exit, exit_tag);
709 mi = &mqfs_data;
710 mqfs_destroy(mi->mi_root);
711 mi->mi_root = NULL;
712 mqfs_fileno_uninit(mi);
713 sx_destroy(&mi->mi_lock);
714 uma_zdestroy(mqnode_zone);
715 uma_zdestroy(mqueue_zone);
716 uma_zdestroy(mvdata_zone);
717 uma_zdestroy(mqnoti_zone);
718 return (0);
719 }
720
721 /*
722 * task routine
723 */
724 static void
do_recycle(void * context,int pending __unused)725 do_recycle(void *context, int pending __unused)
726 {
727 struct vnode *vp = (struct vnode *)context;
728
729 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
730 vrecycle(vp);
731 VOP_UNLOCK(vp);
732 vdrop(vp);
733 }
734
735 /*
736 * Allocate a vnode
737 */
738 static int
mqfs_allocv(struct mount * mp,struct vnode ** vpp,struct mqfs_node * pn)739 mqfs_allocv(struct mount *mp, struct vnode **vpp, struct mqfs_node *pn)
740 {
741 struct mqfs_vdata *vd;
742 struct mqfs_info *mqfs;
743 struct vnode *newvpp;
744 int error;
745
746 mqfs = pn->mn_info;
747 *vpp = NULL;
748 sx_xlock(&mqfs->mi_lock);
749 LIST_FOREACH(vd, &pn->mn_vnodes, mv_link) {
750 if (vd->mv_vnode->v_mount == mp) {
751 vhold(vd->mv_vnode);
752 break;
753 }
754 }
755
756 if (vd != NULL) {
757 found:
758 *vpp = vd->mv_vnode;
759 sx_xunlock(&mqfs->mi_lock);
760 error = vget(*vpp, LK_RETRY | LK_EXCLUSIVE);
761 vdrop(*vpp);
762 return (error);
763 }
764 sx_xunlock(&mqfs->mi_lock);
765
766 error = getnewvnode("mqueue", mp, &mqfs_vnodeops, &newvpp);
767 if (error)
768 return (error);
769 vn_lock(newvpp, LK_EXCLUSIVE | LK_RETRY);
770 error = insmntque(newvpp, mp);
771 if (error != 0)
772 return (error);
773
774 sx_xlock(&mqfs->mi_lock);
775 /*
776 * Check if it has already been allocated
777 * while we were blocked.
778 */
779 LIST_FOREACH(vd, &pn->mn_vnodes, mv_link) {
780 if (vd->mv_vnode->v_mount == mp) {
781 vhold(vd->mv_vnode);
782 sx_xunlock(&mqfs->mi_lock);
783
784 vgone(newvpp);
785 vput(newvpp);
786 goto found;
787 }
788 }
789
790 *vpp = newvpp;
791
792 vd = uma_zalloc(mvdata_zone, M_WAITOK);
793 (*vpp)->v_data = vd;
794 vd->mv_vnode = *vpp;
795 vd->mv_node = pn;
796 TASK_INIT(&vd->mv_task, 0, do_recycle, *vpp);
797 LIST_INSERT_HEAD(&pn->mn_vnodes, vd, mv_link);
798 mqnode_addref(pn);
799 switch (pn->mn_type) {
800 case mqfstype_root:
801 (*vpp)->v_vflag = VV_ROOT;
802 /* fall through */
803 case mqfstype_dir:
804 case mqfstype_this:
805 case mqfstype_parent:
806 (*vpp)->v_type = VDIR;
807 break;
808 case mqfstype_file:
809 (*vpp)->v_type = VREG;
810 break;
811 case mqfstype_symlink:
812 (*vpp)->v_type = VLNK;
813 break;
814 case mqfstype_none:
815 KASSERT(0, ("mqfs_allocf called for null node\n"));
816 default:
817 panic("%s has unexpected type: %d", pn->mn_name, pn->mn_type);
818 }
819 sx_xunlock(&mqfs->mi_lock);
820 vn_set_state(*vpp, VSTATE_CONSTRUCTED);
821 return (0);
822 }
823
824 /*
825 * Search a directory entry
826 */
827 static struct mqfs_node *
mqfs_search(struct mqfs_node * pd,const char * name,int len,struct ucred * cred)828 mqfs_search(struct mqfs_node *pd, const char *name, int len, struct ucred *cred)
829 {
830 struct mqfs_node *pn;
831 const void *pr_root;
832
833 sx_assert(&pd->mn_info->mi_lock, SX_LOCKED);
834 pr_root = cred->cr_prison->pr_root;
835 LIST_FOREACH(pn, &pd->mn_children, mn_sibling) {
836 /* Only match names within the same prison root directory */
837 if ((pn->mn_pr_root == NULL || pn->mn_pr_root == pr_root) &&
838 strncmp(pn->mn_name, name, len) == 0 &&
839 pn->mn_name[len] == '\0')
840 return (pn);
841 }
842 return (NULL);
843 }
844
845 /*
846 * Look up a file or directory.
847 */
848 static int
mqfs_lookupx(struct vop_cachedlookup_args * ap)849 mqfs_lookupx(struct vop_cachedlookup_args *ap)
850 {
851 struct componentname *cnp;
852 struct vnode *dvp, **vpp;
853 struct mqfs_node *pd;
854 struct mqfs_node *pn;
855 struct mqfs_info *mqfs;
856 uint64_t flags;
857 int nameiop, error, namelen;
858 char *pname;
859 struct thread *td;
860
861 td = curthread;
862 cnp = ap->a_cnp;
863 vpp = ap->a_vpp;
864 dvp = ap->a_dvp;
865 pname = cnp->cn_nameptr;
866 namelen = cnp->cn_namelen;
867 flags = cnp->cn_flags;
868 nameiop = cnp->cn_nameiop;
869 pd = VTON(dvp);
870 pn = NULL;
871 mqfs = pd->mn_info;
872 *vpp = NULL;
873
874 if (dvp->v_type != VDIR)
875 return (ENOTDIR);
876
877 error = VOP_ACCESS(dvp, VEXEC, cnp->cn_cred, td);
878 if (error)
879 return (error);
880
881 /* shortcut: check if the name is too long */
882 if (cnp->cn_namelen >= MQFS_NAMELEN)
883 return (ENOENT);
884
885 /* self */
886 if (namelen == 1 && pname[0] == '.') {
887 if ((flags & ISLASTCN) && nameiop != LOOKUP)
888 return (EINVAL);
889 pn = pd;
890 *vpp = dvp;
891 vref(dvp);
892 return (0);
893 }
894
895 /* parent */
896 if (cnp->cn_flags & ISDOTDOT) {
897 if (dvp->v_vflag & VV_ROOT)
898 return (EIO);
899 if ((flags & ISLASTCN) && nameiop != LOOKUP)
900 return (EINVAL);
901 VOP_UNLOCK(dvp);
902 KASSERT(pd->mn_parent, ("non-root directory has no parent"));
903 pn = pd->mn_parent;
904 error = mqfs_allocv(dvp->v_mount, vpp, pn);
905 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
906 return (error);
907 }
908
909 /* named node */
910 sx_xlock(&mqfs->mi_lock);
911 pn = mqfs_search(pd, pname, namelen, cnp->cn_cred);
912 if (pn != NULL)
913 mqnode_addref(pn);
914 sx_xunlock(&mqfs->mi_lock);
915
916 /* found */
917 if (pn != NULL) {
918 /* DELETE */
919 if (nameiop == DELETE && (flags & ISLASTCN)) {
920 error = VOP_ACCESS(dvp, VWRITE, cnp->cn_cred, td);
921 if (error) {
922 mqnode_release(pn);
923 return (error);
924 }
925 if (*vpp == dvp) {
926 vref(dvp);
927 *vpp = dvp;
928 mqnode_release(pn);
929 return (0);
930 }
931 }
932
933 /* allocate vnode */
934 error = mqfs_allocv(dvp->v_mount, vpp, pn);
935 mqnode_release(pn);
936 if (error == 0 && cnp->cn_flags & MAKEENTRY)
937 cache_enter(dvp, *vpp, cnp);
938 return (error);
939 }
940
941 /* not found */
942
943 /* will create a new entry in the directory ? */
944 if ((nameiop == CREATE || nameiop == RENAME) && (flags & LOCKPARENT)
945 && (flags & ISLASTCN)) {
946 error = VOP_ACCESS(dvp, VWRITE, cnp->cn_cred, td);
947 if (error)
948 return (error);
949 return (EJUSTRETURN);
950 }
951 return (ENOENT);
952 }
953
954 #if 0
955 struct vop_lookup_args {
956 struct vop_generic_args a_gen;
957 struct vnode *a_dvp;
958 struct vnode **a_vpp;
959 struct componentname *a_cnp;
960 };
961 #endif
962
963 /*
964 * vnode lookup operation
965 */
966 static int
mqfs_lookup(struct vop_cachedlookup_args * ap)967 mqfs_lookup(struct vop_cachedlookup_args *ap)
968 {
969 int rc;
970
971 rc = mqfs_lookupx(ap);
972 return (rc);
973 }
974
975 #if 0
976 struct vop_create_args {
977 struct vnode *a_dvp;
978 struct vnode **a_vpp;
979 struct componentname *a_cnp;
980 struct vattr *a_vap;
981 };
982 #endif
983
984 /*
985 * vnode creation operation
986 */
987 static int
mqfs_create(struct vop_create_args * ap)988 mqfs_create(struct vop_create_args *ap)
989 {
990 struct mqfs_info *mqfs = VFSTOMQFS(ap->a_dvp->v_mount);
991 struct componentname *cnp = ap->a_cnp;
992 struct mqfs_node *pd;
993 struct mqfs_node *pn;
994 struct mqueue *mq;
995 int error;
996
997 pd = VTON(ap->a_dvp);
998 if (pd->mn_type != mqfstype_root && pd->mn_type != mqfstype_dir)
999 return (ENOTDIR);
1000 mq = mqueue_alloc(NULL);
1001 if (mq == NULL)
1002 return (EAGAIN);
1003 sx_xlock(&mqfs->mi_lock);
1004 pn = mqfs_create_file(pd, cnp->cn_nameptr, cnp->cn_namelen,
1005 cnp->cn_cred, ap->a_vap->va_mode);
1006 if (pn == NULL) {
1007 sx_xunlock(&mqfs->mi_lock);
1008 error = ENOSPC;
1009 } else {
1010 mqnode_addref(pn);
1011 sx_xunlock(&mqfs->mi_lock);
1012 error = mqfs_allocv(ap->a_dvp->v_mount, ap->a_vpp, pn);
1013 mqnode_release(pn);
1014 if (error)
1015 mqfs_destroy(pn);
1016 else
1017 pn->mn_data = mq;
1018 }
1019 if (error)
1020 mqueue_free(mq);
1021 return (error);
1022 }
1023
1024 /*
1025 * Remove an entry
1026 */
1027 static int
do_unlink(struct mqfs_node * pn,struct ucred * ucred)1028 do_unlink(struct mqfs_node *pn, struct ucred *ucred)
1029 {
1030 struct mqfs_node *parent;
1031 struct mqfs_vdata *vd;
1032 int error = 0;
1033
1034 sx_assert(&pn->mn_info->mi_lock, SX_LOCKED);
1035
1036 if (ucred->cr_uid != pn->mn_uid &&
1037 (error = priv_check_cred(ucred, PRIV_MQ_ADMIN)) != 0)
1038 error = EACCES;
1039 else if (!pn->mn_deleted) {
1040 parent = pn->mn_parent;
1041 pn->mn_parent = NULL;
1042 pn->mn_deleted = 1;
1043 LIST_REMOVE(pn, mn_sibling);
1044 LIST_FOREACH(vd, &pn->mn_vnodes, mv_link) {
1045 cache_purge(vd->mv_vnode);
1046 vhold(vd->mv_vnode);
1047 taskqueue_enqueue(taskqueue_thread, &vd->mv_task);
1048 }
1049 mqnode_release(pn);
1050 mqnode_release(parent);
1051 } else
1052 error = ENOENT;
1053 return (error);
1054 }
1055
1056 #if 0
1057 struct vop_remove_args {
1058 struct vnode *a_dvp;
1059 struct vnode *a_vp;
1060 struct componentname *a_cnp;
1061 };
1062 #endif
1063
1064 /*
1065 * vnode removal operation
1066 */
1067 static int
mqfs_remove(struct vop_remove_args * ap)1068 mqfs_remove(struct vop_remove_args *ap)
1069 {
1070 struct mqfs_info *mqfs = VFSTOMQFS(ap->a_dvp->v_mount);
1071 struct mqfs_node *pn;
1072 int error;
1073
1074 if (ap->a_vp->v_type == VDIR)
1075 return (EPERM);
1076 pn = VTON(ap->a_vp);
1077 sx_xlock(&mqfs->mi_lock);
1078 error = do_unlink(pn, ap->a_cnp->cn_cred);
1079 sx_xunlock(&mqfs->mi_lock);
1080 return (error);
1081 }
1082
1083 #if 0
1084 struct vop_inactive_args {
1085 struct vnode *a_vp;
1086 struct thread *a_td;
1087 };
1088 #endif
1089
1090 static int
mqfs_inactive(struct vop_inactive_args * ap)1091 mqfs_inactive(struct vop_inactive_args *ap)
1092 {
1093 struct mqfs_node *pn = VTON(ap->a_vp);
1094
1095 if (pn->mn_deleted)
1096 vrecycle(ap->a_vp);
1097 return (0);
1098 }
1099
1100 #if 0
1101 struct vop_reclaim_args {
1102 struct vop_generic_args a_gen;
1103 struct vnode *a_vp;
1104 };
1105 #endif
1106
1107 static int
mqfs_reclaim(struct vop_reclaim_args * ap)1108 mqfs_reclaim(struct vop_reclaim_args *ap)
1109 {
1110 struct mqfs_info *mqfs = VFSTOMQFS(ap->a_vp->v_mount);
1111 struct vnode *vp = ap->a_vp;
1112 struct mqfs_node *pn;
1113 struct mqfs_vdata *vd;
1114
1115 vd = vp->v_data;
1116 pn = vd->mv_node;
1117 sx_xlock(&mqfs->mi_lock);
1118 vp->v_data = NULL;
1119 LIST_REMOVE(vd, mv_link);
1120 mqnode_release(pn);
1121 sx_xunlock(&mqfs->mi_lock);
1122 uma_zfree(mvdata_zone, vd);
1123 return (0);
1124 }
1125
1126 #if 0
1127 struct vop_open_args {
1128 struct vop_generic_args a_gen;
1129 struct vnode *a_vp;
1130 int a_mode;
1131 struct ucred *a_cred;
1132 struct thread *a_td;
1133 struct file *a_fp;
1134 };
1135 #endif
1136
1137 static int
mqfs_open(struct vop_open_args * ap)1138 mqfs_open(struct vop_open_args *ap)
1139 {
1140 return (0);
1141 }
1142
1143 #if 0
1144 struct vop_close_args {
1145 struct vop_generic_args a_gen;
1146 struct vnode *a_vp;
1147 int a_fflag;
1148 struct ucred *a_cred;
1149 struct thread *a_td;
1150 };
1151 #endif
1152
1153 static int
mqfs_close(struct vop_close_args * ap)1154 mqfs_close(struct vop_close_args *ap)
1155 {
1156 return (0);
1157 }
1158
1159 #if 0
1160 struct vop_access_args {
1161 struct vop_generic_args a_gen;
1162 struct vnode *a_vp;
1163 accmode_t a_accmode;
1164 struct ucred *a_cred;
1165 struct thread *a_td;
1166 };
1167 #endif
1168
1169 /*
1170 * Verify permissions
1171 */
1172 static int
mqfs_access(struct vop_access_args * ap)1173 mqfs_access(struct vop_access_args *ap)
1174 {
1175 struct vnode *vp = ap->a_vp;
1176 struct vattr vattr;
1177 int error;
1178
1179 error = VOP_GETATTR(vp, &vattr, ap->a_cred);
1180 if (error)
1181 return (error);
1182 error = vaccess(vp->v_type, vattr.va_mode, vattr.va_uid, vattr.va_gid,
1183 ap->a_accmode, ap->a_cred);
1184 return (error);
1185 }
1186
1187 #if 0
1188 struct vop_getattr_args {
1189 struct vop_generic_args a_gen;
1190 struct vnode *a_vp;
1191 struct vattr *a_vap;
1192 struct ucred *a_cred;
1193 };
1194 #endif
1195
1196 /*
1197 * Get file attributes
1198 */
1199 static int
mqfs_getattr(struct vop_getattr_args * ap)1200 mqfs_getattr(struct vop_getattr_args *ap)
1201 {
1202 struct vnode *vp = ap->a_vp;
1203 struct mqfs_node *pn = VTON(vp);
1204 struct vattr *vap = ap->a_vap;
1205 int error = 0;
1206
1207 vap->va_type = vp->v_type;
1208 vap->va_mode = pn->mn_mode;
1209 vap->va_nlink = 1;
1210 vap->va_uid = pn->mn_uid;
1211 vap->va_gid = pn->mn_gid;
1212 vap->va_fsid = vp->v_mount->mnt_stat.f_fsid.val[0];
1213 vap->va_fileid = pn->mn_fileno;
1214 vap->va_size = 0;
1215 vap->va_blocksize = PAGE_SIZE;
1216 vap->va_bytes = vap->va_size = 0;
1217 vap->va_atime = pn->mn_atime;
1218 vap->va_mtime = pn->mn_mtime;
1219 vap->va_ctime = pn->mn_ctime;
1220 vap->va_birthtime = pn->mn_birth;
1221 vap->va_gen = 0;
1222 vap->va_flags = 0;
1223 vap->va_rdev = NODEV;
1224 vap->va_bytes = 0;
1225 vap->va_filerev = 0;
1226 return (error);
1227 }
1228
1229 #if 0
1230 struct vop_setattr_args {
1231 struct vop_generic_args a_gen;
1232 struct vnode *a_vp;
1233 struct vattr *a_vap;
1234 struct ucred *a_cred;
1235 };
1236 #endif
1237 /*
1238 * Set attributes
1239 */
1240 static int
mqfs_setattr(struct vop_setattr_args * ap)1241 mqfs_setattr(struct vop_setattr_args *ap)
1242 {
1243 struct mqfs_node *pn;
1244 struct vattr *vap;
1245 struct vnode *vp;
1246 struct thread *td;
1247 int c, error;
1248 uid_t uid;
1249 gid_t gid;
1250
1251 td = curthread;
1252 vap = ap->a_vap;
1253 vp = ap->a_vp;
1254 if (vap->va_type != VNON ||
1255 vap->va_nlink != VNOVAL ||
1256 vap->va_fsid != VNOVAL ||
1257 vap->va_fileid != VNOVAL ||
1258 vap->va_blocksize != VNOVAL ||
1259 (vap->va_flags != VNOVAL && vap->va_flags != 0) ||
1260 vap->va_rdev != VNOVAL ||
1261 (int)vap->va_bytes != VNOVAL ||
1262 vap->va_gen != VNOVAL) {
1263 return (EINVAL);
1264 }
1265
1266 pn = VTON(vp);
1267
1268 error = c = 0;
1269 if (vap->va_uid == (uid_t)VNOVAL)
1270 uid = pn->mn_uid;
1271 else
1272 uid = vap->va_uid;
1273 if (vap->va_gid == (gid_t)VNOVAL)
1274 gid = pn->mn_gid;
1275 else
1276 gid = vap->va_gid;
1277
1278 if (uid != pn->mn_uid || gid != pn->mn_gid) {
1279 /*
1280 * To modify the ownership of a file, must possess VADMIN
1281 * for that file.
1282 */
1283 if ((error = VOP_ACCESS(vp, VADMIN, ap->a_cred, td)))
1284 return (error);
1285
1286 /*
1287 * XXXRW: Why is there a privilege check here: shouldn't the
1288 * check in VOP_ACCESS() be enough? Also, are the group bits
1289 * below definitely right?
1290 */
1291 if ((ap->a_cred->cr_uid != pn->mn_uid || uid != pn->mn_uid ||
1292 (gid != pn->mn_gid && !groupmember(gid, ap->a_cred))) &&
1293 (error = priv_check(td, PRIV_MQ_ADMIN)) != 0)
1294 return (error);
1295 pn->mn_uid = uid;
1296 pn->mn_gid = gid;
1297 c = 1;
1298 }
1299
1300 if (vap->va_mode != (mode_t)VNOVAL) {
1301 if (ap->a_cred->cr_uid != pn->mn_uid &&
1302 (error = priv_check(td, PRIV_MQ_ADMIN)))
1303 return (error);
1304 pn->mn_mode = vap->va_mode;
1305 c = 1;
1306 }
1307
1308 if (vap->va_atime.tv_sec != VNOVAL || vap->va_mtime.tv_sec != VNOVAL) {
1309 /* See the comment in ufs_vnops::ufs_setattr(). */
1310 if ((error = VOP_ACCESS(vp, VADMIN, ap->a_cred, td)) &&
1311 ((vap->va_vaflags & VA_UTIMES_NULL) == 0 ||
1312 (error = VOP_ACCESS(vp, VWRITE, ap->a_cred, td))))
1313 return (error);
1314 if (vap->va_atime.tv_sec != VNOVAL) {
1315 pn->mn_atime = vap->va_atime;
1316 }
1317 if (vap->va_mtime.tv_sec != VNOVAL) {
1318 pn->mn_mtime = vap->va_mtime;
1319 }
1320 c = 1;
1321 }
1322 if (c) {
1323 vfs_timestamp(&pn->mn_ctime);
1324 }
1325 return (0);
1326 }
1327
1328 #if 0
1329 struct vop_read_args {
1330 struct vop_generic_args a_gen;
1331 struct vnode *a_vp;
1332 struct uio *a_uio;
1333 int a_ioflag;
1334 struct ucred *a_cred;
1335 };
1336 #endif
1337
1338 /*
1339 * Read from a file
1340 */
1341 static int
mqfs_read(struct vop_read_args * ap)1342 mqfs_read(struct vop_read_args *ap)
1343 {
1344 char buf[80];
1345 struct vnode *vp = ap->a_vp;
1346 struct uio *uio = ap->a_uio;
1347 struct mqueue *mq;
1348 int len, error;
1349
1350 if (vp->v_type != VREG)
1351 return (EINVAL);
1352
1353 mq = VTOMQ(vp);
1354 snprintf(buf, sizeof(buf),
1355 "QSIZE:%-10ld MAXMSG:%-10ld CURMSG:%-10ld MSGSIZE:%-10ld\n",
1356 mq->mq_totalbytes,
1357 mq->mq_maxmsg,
1358 mq->mq_curmsgs,
1359 mq->mq_msgsize);
1360 buf[sizeof(buf)-1] = '\0';
1361 len = strlen(buf);
1362 error = uiomove_frombuf(buf, len, uio);
1363 return (error);
1364 }
1365
1366 #if 0
1367 struct vop_readdir_args {
1368 struct vop_generic_args a_gen;
1369 struct vnode *a_vp;
1370 struct uio *a_uio;
1371 struct ucred *a_cred;
1372 int *a_eofflag;
1373 int *a_ncookies;
1374 uint64_t **a_cookies;
1375 };
1376 #endif
1377
1378 /*
1379 * Return directory entries.
1380 */
1381 static int
mqfs_readdir(struct vop_readdir_args * ap)1382 mqfs_readdir(struct vop_readdir_args *ap)
1383 {
1384 struct vnode *vp;
1385 struct mqfs_info *mi;
1386 struct mqfs_node *pd;
1387 struct mqfs_node *pn;
1388 struct dirent entry;
1389 struct uio *uio;
1390 const void *pr_root;
1391 int *tmp_ncookies = NULL;
1392 off_t offset;
1393 int error, i;
1394
1395 vp = ap->a_vp;
1396 mi = VFSTOMQFS(vp->v_mount);
1397 pd = VTON(vp);
1398 uio = ap->a_uio;
1399
1400 if (vp->v_type != VDIR)
1401 return (ENOTDIR);
1402
1403 if (uio->uio_offset < 0)
1404 return (EINVAL);
1405
1406 if (ap->a_ncookies != NULL) {
1407 tmp_ncookies = ap->a_ncookies;
1408 *ap->a_ncookies = 0;
1409 ap->a_ncookies = NULL;
1410 }
1411
1412 error = 0;
1413 offset = 0;
1414
1415 pr_root = ap->a_cred->cr_prison->pr_root;
1416 sx_xlock(&mi->mi_lock);
1417
1418 LIST_FOREACH(pn, &pd->mn_children, mn_sibling) {
1419 entry.d_reclen = sizeof(entry);
1420
1421 /*
1422 * Only show names within the same prison root directory
1423 * (or not associated with a prison, e.g. "." and "..").
1424 */
1425 if (pn->mn_pr_root != NULL && pn->mn_pr_root != pr_root)
1426 continue;
1427 if (!pn->mn_fileno)
1428 mqfs_fileno_alloc(mi, pn);
1429 entry.d_fileno = pn->mn_fileno;
1430 entry.d_off = offset + entry.d_reclen;
1431 for (i = 0; i < MQFS_NAMELEN - 1 && pn->mn_name[i] != '\0'; ++i)
1432 entry.d_name[i] = pn->mn_name[i];
1433 entry.d_namlen = i;
1434 switch (pn->mn_type) {
1435 case mqfstype_root:
1436 case mqfstype_dir:
1437 case mqfstype_this:
1438 case mqfstype_parent:
1439 entry.d_type = DT_DIR;
1440 break;
1441 case mqfstype_file:
1442 entry.d_type = DT_REG;
1443 break;
1444 case mqfstype_symlink:
1445 entry.d_type = DT_LNK;
1446 break;
1447 default:
1448 panic("%s has unexpected node type: %d", pn->mn_name,
1449 pn->mn_type);
1450 }
1451 dirent_terminate(&entry);
1452 if (entry.d_reclen > uio->uio_resid)
1453 break;
1454 if (offset >= uio->uio_offset) {
1455 error = vfs_read_dirent(ap, &entry, offset);
1456 if (error)
1457 break;
1458 }
1459 offset += entry.d_reclen;
1460 }
1461 sx_xunlock(&mi->mi_lock);
1462
1463 uio->uio_offset = offset;
1464
1465 if (tmp_ncookies != NULL)
1466 ap->a_ncookies = tmp_ncookies;
1467
1468 return (error);
1469 }
1470
1471 #ifdef notyet
1472
1473 #if 0
1474 struct vop_mkdir_args {
1475 struct vnode *a_dvp;
1476 struvt vnode **a_vpp;
1477 struvt componentname *a_cnp;
1478 struct vattr *a_vap;
1479 };
1480 #endif
1481
1482 /*
1483 * Create a directory.
1484 */
1485 static int
mqfs_mkdir(struct vop_mkdir_args * ap)1486 mqfs_mkdir(struct vop_mkdir_args *ap)
1487 {
1488 struct mqfs_info *mqfs = VFSTOMQFS(ap->a_dvp->v_mount);
1489 struct componentname *cnp = ap->a_cnp;
1490 struct mqfs_node *pd = VTON(ap->a_dvp);
1491 struct mqfs_node *pn;
1492 int error;
1493
1494 if (pd->mn_type != mqfstype_root && pd->mn_type != mqfstype_dir)
1495 return (ENOTDIR);
1496 sx_xlock(&mqfs->mi_lock);
1497 pn = mqfs_create_dir(pd, cnp->cn_nameptr, cnp->cn_namelen,
1498 ap->a_vap->cn_cred, ap->a_vap->va_mode);
1499 if (pn != NULL)
1500 mqnode_addref(pn);
1501 sx_xunlock(&mqfs->mi_lock);
1502 if (pn == NULL) {
1503 error = ENOSPC;
1504 } else {
1505 error = mqfs_allocv(ap->a_dvp->v_mount, ap->a_vpp, pn);
1506 mqnode_release(pn);
1507 }
1508 return (error);
1509 }
1510
1511 #if 0
1512 struct vop_rmdir_args {
1513 struct vnode *a_dvp;
1514 struct vnode *a_vp;
1515 struct componentname *a_cnp;
1516 };
1517 #endif
1518
1519 /*
1520 * Remove a directory.
1521 */
1522 static int
mqfs_rmdir(struct vop_rmdir_args * ap)1523 mqfs_rmdir(struct vop_rmdir_args *ap)
1524 {
1525 struct mqfs_info *mqfs = VFSTOMQFS(ap->a_dvp->v_mount);
1526 struct mqfs_node *pn = VTON(ap->a_vp);
1527 struct mqfs_node *pt;
1528
1529 if (pn->mn_type != mqfstype_dir)
1530 return (ENOTDIR);
1531
1532 sx_xlock(&mqfs->mi_lock);
1533 if (pn->mn_deleted) {
1534 sx_xunlock(&mqfs->mi_lock);
1535 return (ENOENT);
1536 }
1537
1538 pt = LIST_FIRST(&pn->mn_children);
1539 pt = LIST_NEXT(pt, mn_sibling);
1540 pt = LIST_NEXT(pt, mn_sibling);
1541 if (pt != NULL) {
1542 sx_xunlock(&mqfs->mi_lock);
1543 return (ENOTEMPTY);
1544 }
1545 pt = pn->mn_parent;
1546 pn->mn_parent = NULL;
1547 pn->mn_deleted = 1;
1548 LIST_REMOVE(pn, mn_sibling);
1549 mqnode_release(pn);
1550 mqnode_release(pt);
1551 sx_xunlock(&mqfs->mi_lock);
1552 cache_purge(ap->a_vp);
1553 return (0);
1554 }
1555
1556 #endif /* notyet */
1557
1558 /*
1559 * See if this prison root is obsolete, and clean up associated queues if it is.
1560 */
1561 static int
mqfs_prison_remove(void * obj,void * data __unused)1562 mqfs_prison_remove(void *obj, void *data __unused)
1563 {
1564 const struct prison *pr = obj;
1565 struct prison *tpr;
1566 struct mqfs_node *pn, *tpn;
1567 struct vnode *pr_root;
1568
1569 pr_root = pr->pr_root;
1570 if (pr->pr_parent->pr_root == pr_root)
1571 return (0);
1572 TAILQ_FOREACH(tpr, &allprison, pr_list) {
1573 if (tpr != pr && tpr->pr_root == pr_root)
1574 return (0);
1575 }
1576 /*
1577 * No jails are rooted in this directory anymore,
1578 * so no queues should be either.
1579 */
1580 sx_xlock(&mqfs_data.mi_lock);
1581 LIST_FOREACH_SAFE(pn, &mqfs_data.mi_root->mn_children,
1582 mn_sibling, tpn) {
1583 if (pn->mn_pr_root == pr_root)
1584 (void)do_unlink(pn, curthread->td_ucred);
1585 }
1586 sx_xunlock(&mqfs_data.mi_lock);
1587 return (0);
1588 }
1589
1590 /*
1591 * Allocate a message queue
1592 */
1593 static struct mqueue *
mqueue_alloc(const struct mq_attr * attr)1594 mqueue_alloc(const struct mq_attr *attr)
1595 {
1596 struct mqueue *mq;
1597
1598 if (curmq >= maxmq)
1599 return (NULL);
1600 mq = uma_zalloc(mqueue_zone, M_WAITOK | M_ZERO);
1601 TAILQ_INIT(&mq->mq_msgq);
1602 if (attr != NULL) {
1603 mq->mq_maxmsg = attr->mq_maxmsg;
1604 mq->mq_msgsize = attr->mq_msgsize;
1605 } else {
1606 mq->mq_maxmsg = default_maxmsg;
1607 mq->mq_msgsize = default_msgsize;
1608 }
1609 mtx_init(&mq->mq_mutex, "mqueue lock", NULL, MTX_DEF);
1610 knlist_init_mtx(&mq->mq_rsel.si_note, &mq->mq_mutex);
1611 knlist_init_mtx(&mq->mq_wsel.si_note, &mq->mq_mutex);
1612 atomic_add_int(&curmq, 1);
1613 return (mq);
1614 }
1615
1616 /*
1617 * Destroy a message queue
1618 */
1619 static void
mqueue_free(struct mqueue * mq)1620 mqueue_free(struct mqueue *mq)
1621 {
1622 struct mqueue_msg *msg;
1623
1624 while ((msg = TAILQ_FIRST(&mq->mq_msgq)) != NULL) {
1625 TAILQ_REMOVE(&mq->mq_msgq, msg, msg_link);
1626 free(msg, M_MQUEUEDATA);
1627 }
1628
1629 mtx_destroy(&mq->mq_mutex);
1630 seldrain(&mq->mq_rsel);
1631 seldrain(&mq->mq_wsel);
1632 knlist_destroy(&mq->mq_rsel.si_note);
1633 knlist_destroy(&mq->mq_wsel.si_note);
1634 uma_zfree(mqueue_zone, mq);
1635 atomic_add_int(&curmq, -1);
1636 }
1637
1638 /*
1639 * Load a message from user space
1640 */
1641 static struct mqueue_msg *
mqueue_loadmsg(const char * msg_ptr,size_t msg_size,int msg_prio)1642 mqueue_loadmsg(const char *msg_ptr, size_t msg_size, int msg_prio)
1643 {
1644 struct mqueue_msg *msg;
1645 size_t len;
1646 int error;
1647
1648 len = sizeof(struct mqueue_msg) + msg_size;
1649 msg = malloc(len, M_MQUEUEDATA, M_WAITOK);
1650 error = copyin(msg_ptr, ((char *)msg) + sizeof(struct mqueue_msg),
1651 msg_size);
1652 if (error) {
1653 free(msg, M_MQUEUEDATA);
1654 msg = NULL;
1655 } else {
1656 msg->msg_size = msg_size;
1657 msg->msg_prio = msg_prio;
1658 }
1659 return (msg);
1660 }
1661
1662 /*
1663 * Save a message to user space
1664 */
1665 static int
mqueue_savemsg(struct mqueue_msg * msg,char * msg_ptr,int * msg_prio)1666 mqueue_savemsg(struct mqueue_msg *msg, char *msg_ptr, int *msg_prio)
1667 {
1668 int error;
1669
1670 error = copyout(((char *)msg) + sizeof(*msg), msg_ptr,
1671 msg->msg_size);
1672 if (error == 0 && msg_prio != NULL)
1673 error = copyout(&msg->msg_prio, msg_prio, sizeof(int));
1674 return (error);
1675 }
1676
1677 /*
1678 * Free a message's memory
1679 */
1680 static __inline void
mqueue_freemsg(struct mqueue_msg * msg)1681 mqueue_freemsg(struct mqueue_msg *msg)
1682 {
1683 free(msg, M_MQUEUEDATA);
1684 }
1685
1686 /*
1687 * Send a message. if waitok is false, thread will not be
1688 * blocked if there is no data in queue, otherwise, absolute
1689 * time will be checked.
1690 */
1691 int
mqueue_send(struct mqueue * mq,const char * msg_ptr,size_t msg_len,unsigned msg_prio,int waitok,const struct timespec * abs_timeout)1692 mqueue_send(struct mqueue *mq, const char *msg_ptr,
1693 size_t msg_len, unsigned msg_prio, int waitok,
1694 const struct timespec *abs_timeout)
1695 {
1696 struct mqueue_msg *msg;
1697 struct timespec ts, ts2;
1698 struct timeval tv;
1699 int error;
1700
1701 if (msg_prio >= MQ_PRIO_MAX)
1702 return (EINVAL);
1703 if (msg_len > mq->mq_msgsize)
1704 return (EMSGSIZE);
1705 msg = mqueue_loadmsg(msg_ptr, msg_len, msg_prio);
1706 if (msg == NULL)
1707 return (EFAULT);
1708
1709 /* O_NONBLOCK case */
1710 if (!waitok) {
1711 error = _mqueue_send(mq, msg, -1);
1712 if (error)
1713 goto bad;
1714 return (0);
1715 }
1716
1717 /* we allow a null timeout (wait forever) */
1718 if (abs_timeout == NULL) {
1719 error = _mqueue_send(mq, msg, 0);
1720 if (error)
1721 goto bad;
1722 return (0);
1723 }
1724
1725 /* send it before checking time */
1726 error = _mqueue_send(mq, msg, -1);
1727 if (error == 0)
1728 return (0);
1729
1730 if (error != EAGAIN)
1731 goto bad;
1732
1733 if (abs_timeout->tv_nsec >= 1000000000 || abs_timeout->tv_nsec < 0) {
1734 error = EINVAL;
1735 goto bad;
1736 }
1737 for (;;) {
1738 getnanotime(&ts);
1739 timespecsub(abs_timeout, &ts, &ts2);
1740 if (ts2.tv_sec < 0 || (ts2.tv_sec == 0 && ts2.tv_nsec <= 0)) {
1741 error = ETIMEDOUT;
1742 break;
1743 }
1744 TIMESPEC_TO_TIMEVAL(&tv, &ts2);
1745 error = _mqueue_send(mq, msg, tvtohz(&tv));
1746 if (error != ETIMEDOUT)
1747 break;
1748 }
1749 if (error == 0)
1750 return (0);
1751 bad:
1752 mqueue_freemsg(msg);
1753 return (error);
1754 }
1755
1756 /*
1757 * Common routine to send a message
1758 */
1759 static int
_mqueue_send(struct mqueue * mq,struct mqueue_msg * msg,int timo)1760 _mqueue_send(struct mqueue *mq, struct mqueue_msg *msg, int timo)
1761 {
1762 struct mqueue_msg *msg2;
1763 int error = 0;
1764
1765 mtx_lock(&mq->mq_mutex);
1766 while (mq->mq_curmsgs >= mq->mq_maxmsg && error == 0) {
1767 if (timo < 0) {
1768 mtx_unlock(&mq->mq_mutex);
1769 return (EAGAIN);
1770 }
1771 mq->mq_senders++;
1772 error = msleep(&mq->mq_senders, &mq->mq_mutex,
1773 PCATCH, "mqsend", timo);
1774 mq->mq_senders--;
1775 if (error == EAGAIN)
1776 error = ETIMEDOUT;
1777 }
1778 if (mq->mq_curmsgs >= mq->mq_maxmsg) {
1779 mtx_unlock(&mq->mq_mutex);
1780 return (error);
1781 }
1782 error = 0;
1783 if (TAILQ_EMPTY(&mq->mq_msgq)) {
1784 TAILQ_INSERT_HEAD(&mq->mq_msgq, msg, msg_link);
1785 } else {
1786 if (msg->msg_prio <= TAILQ_LAST(&mq->mq_msgq, msgq)->msg_prio) {
1787 TAILQ_INSERT_TAIL(&mq->mq_msgq, msg, msg_link);
1788 } else {
1789 TAILQ_FOREACH(msg2, &mq->mq_msgq, msg_link) {
1790 if (msg2->msg_prio < msg->msg_prio)
1791 break;
1792 }
1793 TAILQ_INSERT_BEFORE(msg2, msg, msg_link);
1794 }
1795 }
1796 mq->mq_curmsgs++;
1797 mq->mq_totalbytes += msg->msg_size;
1798 if (mq->mq_receivers)
1799 wakeup_one(&mq->mq_receivers);
1800 else if (mq->mq_notifier != NULL)
1801 mqueue_send_notification(mq);
1802 if (mq->mq_flags & MQ_RSEL) {
1803 mq->mq_flags &= ~MQ_RSEL;
1804 selwakeup(&mq->mq_rsel);
1805 }
1806 KNOTE_LOCKED(&mq->mq_rsel.si_note, 0);
1807 mtx_unlock(&mq->mq_mutex);
1808 return (0);
1809 }
1810
1811 /*
1812 * Send realtime a signal to process which registered itself
1813 * successfully by mq_notify.
1814 */
1815 static void
mqueue_send_notification(struct mqueue * mq)1816 mqueue_send_notification(struct mqueue *mq)
1817 {
1818 struct mqueue_notifier *nt;
1819 struct thread *td;
1820 struct proc *p;
1821 int error;
1822
1823 mtx_assert(&mq->mq_mutex, MA_OWNED);
1824 nt = mq->mq_notifier;
1825 if (nt->nt_sigev.sigev_notify != SIGEV_NONE) {
1826 p = nt->nt_proc;
1827 error = sigev_findtd(p, &nt->nt_sigev, &td);
1828 if (error) {
1829 mq->mq_notifier = NULL;
1830 return;
1831 }
1832 if (!KSI_ONQ(&nt->nt_ksi)) {
1833 ksiginfo_set_sigev(&nt->nt_ksi, &nt->nt_sigev);
1834 tdsendsignal(p, td, nt->nt_ksi.ksi_signo, &nt->nt_ksi);
1835 }
1836 PROC_UNLOCK(p);
1837 }
1838 mq->mq_notifier = NULL;
1839 }
1840
1841 /*
1842 * Get a message. if waitok is false, thread will not be
1843 * blocked if there is no data in queue, otherwise, absolute
1844 * time will be checked.
1845 */
1846 int
mqueue_receive(struct mqueue * mq,char * msg_ptr,size_t msg_len,unsigned * msg_prio,int waitok,const struct timespec * abs_timeout)1847 mqueue_receive(struct mqueue *mq, char *msg_ptr,
1848 size_t msg_len, unsigned *msg_prio, int waitok,
1849 const struct timespec *abs_timeout)
1850 {
1851 struct mqueue_msg *msg;
1852 struct timespec ts, ts2;
1853 struct timeval tv;
1854 int error;
1855
1856 if (msg_len < mq->mq_msgsize)
1857 return (EMSGSIZE);
1858
1859 /* O_NONBLOCK case */
1860 if (!waitok) {
1861 error = _mqueue_recv(mq, &msg, -1);
1862 if (error)
1863 return (error);
1864 goto received;
1865 }
1866
1867 /* we allow a null timeout (wait forever). */
1868 if (abs_timeout == NULL) {
1869 error = _mqueue_recv(mq, &msg, 0);
1870 if (error)
1871 return (error);
1872 goto received;
1873 }
1874
1875 /* try to get a message before checking time */
1876 error = _mqueue_recv(mq, &msg, -1);
1877 if (error == 0)
1878 goto received;
1879
1880 if (error != EAGAIN)
1881 return (error);
1882
1883 if (abs_timeout->tv_nsec >= 1000000000 || abs_timeout->tv_nsec < 0) {
1884 error = EINVAL;
1885 return (error);
1886 }
1887
1888 for (;;) {
1889 getnanotime(&ts);
1890 timespecsub(abs_timeout, &ts, &ts2);
1891 if (ts2.tv_sec < 0 || (ts2.tv_sec == 0 && ts2.tv_nsec <= 0)) {
1892 error = ETIMEDOUT;
1893 return (error);
1894 }
1895 TIMESPEC_TO_TIMEVAL(&tv, &ts2);
1896 error = _mqueue_recv(mq, &msg, tvtohz(&tv));
1897 if (error == 0)
1898 break;
1899 if (error != ETIMEDOUT)
1900 return (error);
1901 }
1902
1903 received:
1904 error = mqueue_savemsg(msg, msg_ptr, msg_prio);
1905 if (error == 0) {
1906 curthread->td_retval[0] = msg->msg_size;
1907 curthread->td_retval[1] = 0;
1908 }
1909 mqueue_freemsg(msg);
1910 return (error);
1911 }
1912
1913 /*
1914 * Common routine to receive a message
1915 */
1916 static int
_mqueue_recv(struct mqueue * mq,struct mqueue_msg ** msg,int timo)1917 _mqueue_recv(struct mqueue *mq, struct mqueue_msg **msg, int timo)
1918 {
1919 int error = 0;
1920
1921 mtx_lock(&mq->mq_mutex);
1922 while ((*msg = TAILQ_FIRST(&mq->mq_msgq)) == NULL && error == 0) {
1923 if (timo < 0) {
1924 mtx_unlock(&mq->mq_mutex);
1925 return (EAGAIN);
1926 }
1927 mq->mq_receivers++;
1928 error = msleep(&mq->mq_receivers, &mq->mq_mutex,
1929 PCATCH, "mqrecv", timo);
1930 mq->mq_receivers--;
1931 if (error == EAGAIN)
1932 error = ETIMEDOUT;
1933 }
1934 if (*msg != NULL) {
1935 error = 0;
1936 TAILQ_REMOVE(&mq->mq_msgq, *msg, msg_link);
1937 mq->mq_curmsgs--;
1938 mq->mq_totalbytes -= (*msg)->msg_size;
1939 if (mq->mq_senders)
1940 wakeup_one(&mq->mq_senders);
1941 if (mq->mq_flags & MQ_WSEL) {
1942 mq->mq_flags &= ~MQ_WSEL;
1943 selwakeup(&mq->mq_wsel);
1944 }
1945 KNOTE_LOCKED(&mq->mq_wsel.si_note, 0);
1946 }
1947 if (mq->mq_notifier != NULL && mq->mq_receivers == 0 &&
1948 !TAILQ_EMPTY(&mq->mq_msgq)) {
1949 mqueue_send_notification(mq);
1950 }
1951 mtx_unlock(&mq->mq_mutex);
1952 return (error);
1953 }
1954
1955 static __inline struct mqueue_notifier *
notifier_alloc(void)1956 notifier_alloc(void)
1957 {
1958 return (uma_zalloc(mqnoti_zone, M_WAITOK | M_ZERO));
1959 }
1960
1961 static __inline void
notifier_free(struct mqueue_notifier * p)1962 notifier_free(struct mqueue_notifier *p)
1963 {
1964 uma_zfree(mqnoti_zone, p);
1965 }
1966
1967 static struct mqueue_notifier *
notifier_search(struct proc * p,int fd)1968 notifier_search(struct proc *p, int fd)
1969 {
1970 struct mqueue_notifier *nt;
1971
1972 LIST_FOREACH(nt, &p->p_mqnotifier, nt_link) {
1973 if (nt->nt_ksi.ksi_mqd == fd)
1974 break;
1975 }
1976 return (nt);
1977 }
1978
1979 static __inline void
notifier_insert(struct proc * p,struct mqueue_notifier * nt)1980 notifier_insert(struct proc *p, struct mqueue_notifier *nt)
1981 {
1982 LIST_INSERT_HEAD(&p->p_mqnotifier, nt, nt_link);
1983 }
1984
1985 static __inline void
notifier_delete(struct proc * p,struct mqueue_notifier * nt)1986 notifier_delete(struct proc *p, struct mqueue_notifier *nt)
1987 {
1988 LIST_REMOVE(nt, nt_link);
1989 notifier_free(nt);
1990 }
1991
1992 static void
notifier_remove(struct proc * p,struct mqueue * mq,int fd)1993 notifier_remove(struct proc *p, struct mqueue *mq, int fd)
1994 {
1995 struct mqueue_notifier *nt;
1996
1997 mtx_assert(&mq->mq_mutex, MA_OWNED);
1998 PROC_LOCK(p);
1999 nt = notifier_search(p, fd);
2000 if (nt != NULL) {
2001 if (mq->mq_notifier == nt)
2002 mq->mq_notifier = NULL;
2003 sigqueue_take(&nt->nt_ksi);
2004 notifier_delete(p, nt);
2005 }
2006 PROC_UNLOCK(p);
2007 }
2008
2009 int
kern_kmq_open(struct thread * td,const char * upath,int flags,mode_t mode,const struct mq_attr * attr)2010 kern_kmq_open(struct thread *td, const char *upath, int flags, mode_t mode,
2011 const struct mq_attr *attr)
2012 {
2013 char *path, pathbuf[MQFS_NAMELEN + 1];
2014 struct mqfs_node *pn;
2015 struct pwddesc *pdp;
2016 struct file *fp;
2017 struct mqueue *mq;
2018 int fd, error, len, cmode;
2019
2020 AUDIT_ARG_FFLAGS(flags);
2021 AUDIT_ARG_MODE(mode);
2022
2023 pdp = td->td_proc->p_pd;
2024 cmode = ((mode & ~pdp->pd_cmask) & ALLPERMS) & ~S_ISTXT;
2025 mq = NULL;
2026 if ((flags & O_CREAT) != 0 && attr != NULL) {
2027 if (attr->mq_maxmsg <= 0 || attr->mq_maxmsg > maxmsg)
2028 return (EINVAL);
2029 if (attr->mq_msgsize <= 0 || attr->mq_msgsize > maxmsgsize)
2030 return (EINVAL);
2031 }
2032
2033 path = pathbuf;
2034 error = copyinstr(upath, path, MQFS_NAMELEN + 1, NULL);
2035 if (error)
2036 return (error);
2037
2038 /*
2039 * The first character of name may be a slash (/) character
2040 * and the remaining characters of name cannot include any slash
2041 * characters.
2042 */
2043 len = strlen(path);
2044 if (len < 2 || strchr(path + 1, '/') != NULL)
2045 return (EINVAL);
2046 if (path[0] == '/') {
2047 path++;
2048 len--;
2049 }
2050 /*
2051 * "." and ".." are magic directories, populated on the fly, and cannot
2052 * be opened as queues.
2053 */
2054 if (strcmp(path, ".") == 0 || strcmp(path, "..") == 0)
2055 return (EINVAL);
2056 AUDIT_ARG_UPATH1_CANON(pathbuf);
2057
2058 error = falloc(td, &fp, &fd, O_CLOEXEC);
2059 if (error)
2060 return (error);
2061
2062 sx_xlock(&mqfs_data.mi_lock);
2063 pn = mqfs_search(mqfs_data.mi_root, path, len, td->td_ucred);
2064 if (pn == NULL) {
2065 if (!(flags & O_CREAT)) {
2066 error = ENOENT;
2067 } else {
2068 mq = mqueue_alloc(attr);
2069 if (mq == NULL) {
2070 error = ENFILE;
2071 } else {
2072 pn = mqfs_create_file(mqfs_data.mi_root,
2073 path, len, td->td_ucred,
2074 cmode);
2075 if (pn == NULL) {
2076 error = ENOSPC;
2077 mqueue_free(mq);
2078 }
2079 }
2080 }
2081
2082 if (error == 0) {
2083 pn->mn_data = mq;
2084 }
2085 } else {
2086 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL)) {
2087 error = EEXIST;
2088 } else {
2089 accmode_t accmode = 0;
2090
2091 if (flags & FREAD)
2092 accmode |= VREAD;
2093 if (flags & FWRITE)
2094 accmode |= VWRITE;
2095 error = vaccess(VREG, pn->mn_mode, pn->mn_uid,
2096 pn->mn_gid, accmode, td->td_ucred);
2097 }
2098 }
2099
2100 if (error) {
2101 sx_xunlock(&mqfs_data.mi_lock);
2102 fdclose(td, fp, fd);
2103 fdrop(fp, td);
2104 return (error);
2105 }
2106
2107 mqnode_addref(pn);
2108 sx_xunlock(&mqfs_data.mi_lock);
2109
2110 finit(fp, flags & (FREAD | FWRITE | O_NONBLOCK), DTYPE_MQUEUE, pn,
2111 &mqueueops);
2112
2113 td->td_retval[0] = fd;
2114 fdrop(fp, td);
2115 return (0);
2116 }
2117
2118 /*
2119 * Syscall to open a message queue.
2120 */
2121 int
sys_kmq_open(struct thread * td,struct kmq_open_args * uap)2122 sys_kmq_open(struct thread *td, struct kmq_open_args *uap)
2123 {
2124 struct mq_attr attr;
2125 int flags, error;
2126
2127 if ((uap->flags & O_ACCMODE) == O_ACCMODE || uap->flags & O_EXEC)
2128 return (EINVAL);
2129 flags = FFLAGS(uap->flags);
2130 if ((flags & O_CREAT) != 0 && uap->attr != NULL) {
2131 error = copyin(uap->attr, &attr, sizeof(attr));
2132 if (error)
2133 return (error);
2134 }
2135 return (kern_kmq_open(td, uap->path, flags, uap->mode,
2136 uap->attr != NULL ? &attr : NULL));
2137 }
2138
2139 /*
2140 * Syscall to unlink a message queue.
2141 */
2142 int
sys_kmq_unlink(struct thread * td,struct kmq_unlink_args * uap)2143 sys_kmq_unlink(struct thread *td, struct kmq_unlink_args *uap)
2144 {
2145 char *path, pathbuf[MQFS_NAMELEN + 1];
2146 struct mqfs_node *pn;
2147 int error, len;
2148
2149 path = pathbuf;
2150 error = copyinstr(uap->path, path, MQFS_NAMELEN + 1, NULL);
2151 if (error)
2152 return (error);
2153
2154 len = strlen(path);
2155 if (len < 2 || strchr(path + 1, '/') != NULL)
2156 return (EINVAL);
2157 if (path[0] == '/') {
2158 path++;
2159 len--;
2160 }
2161 if (strcmp(path, ".") == 0 || strcmp(path, "..") == 0)
2162 return (EINVAL);
2163 AUDIT_ARG_UPATH1_CANON(pathbuf);
2164
2165 sx_xlock(&mqfs_data.mi_lock);
2166 pn = mqfs_search(mqfs_data.mi_root, path, len, td->td_ucred);
2167 if (pn != NULL)
2168 error = do_unlink(pn, td->td_ucred);
2169 else
2170 error = ENOENT;
2171 sx_xunlock(&mqfs_data.mi_lock);
2172 return (error);
2173 }
2174
2175 typedef int (*_fgetf)(struct thread *, int, const cap_rights_t *,
2176 struct file **);
2177
2178 /*
2179 * Get message queue by giving file slot
2180 */
2181 static int
_getmq(struct thread * td,int fd,const cap_rights_t * rightsp,_fgetf func,struct file ** fpp,struct mqfs_node ** ppn,struct mqueue ** pmq)2182 _getmq(struct thread *td, int fd, const cap_rights_t *rightsp, _fgetf func,
2183 struct file **fpp, struct mqfs_node **ppn, struct mqueue **pmq)
2184 {
2185 struct mqfs_node *pn;
2186 int error;
2187
2188 error = func(td, fd, rightsp, fpp);
2189 if (error)
2190 return (error);
2191 if (&mqueueops != (*fpp)->f_ops) {
2192 fdrop(*fpp, td);
2193 return (EBADF);
2194 }
2195 pn = (*fpp)->f_data;
2196 if (ppn)
2197 *ppn = pn;
2198 if (pmq)
2199 *pmq = pn->mn_data;
2200 return (0);
2201 }
2202
2203 static __inline int
getmq(struct thread * td,int fd,struct file ** fpp,struct mqfs_node ** ppn,struct mqueue ** pmq)2204 getmq(struct thread *td, int fd, struct file **fpp, struct mqfs_node **ppn,
2205 struct mqueue **pmq)
2206 {
2207
2208 return _getmq(td, fd, &cap_event_rights, fget,
2209 fpp, ppn, pmq);
2210 }
2211
2212 static __inline int
getmq_read(struct thread * td,int fd,struct file ** fpp,struct mqfs_node ** ppn,struct mqueue ** pmq)2213 getmq_read(struct thread *td, int fd, struct file **fpp,
2214 struct mqfs_node **ppn, struct mqueue **pmq)
2215 {
2216
2217 return _getmq(td, fd, &cap_read_rights, fget_read,
2218 fpp, ppn, pmq);
2219 }
2220
2221 static __inline int
getmq_write(struct thread * td,int fd,struct file ** fpp,struct mqfs_node ** ppn,struct mqueue ** pmq)2222 getmq_write(struct thread *td, int fd, struct file **fpp,
2223 struct mqfs_node **ppn, struct mqueue **pmq)
2224 {
2225
2226 return _getmq(td, fd, &cap_write_rights, fget_write,
2227 fpp, ppn, pmq);
2228 }
2229
2230 int
kern_kmq_setattr(struct thread * td,int mqd,const struct mq_attr * attr,struct mq_attr * oattr)2231 kern_kmq_setattr(struct thread *td, int mqd, const struct mq_attr *attr,
2232 struct mq_attr *oattr)
2233 {
2234 struct mqueue *mq;
2235 struct file *fp;
2236 u_int oflag, flag;
2237 int error;
2238
2239 AUDIT_ARG_FD(mqd);
2240 if (attr != NULL && (attr->mq_flags & ~O_NONBLOCK) != 0)
2241 return (EINVAL);
2242 error = getmq(td, mqd, &fp, NULL, &mq);
2243 if (error)
2244 return (error);
2245 oattr->mq_maxmsg = mq->mq_maxmsg;
2246 oattr->mq_msgsize = mq->mq_msgsize;
2247 oattr->mq_curmsgs = mq->mq_curmsgs;
2248 if (attr != NULL) {
2249 do {
2250 oflag = flag = fp->f_flag;
2251 flag &= ~O_NONBLOCK;
2252 flag |= (attr->mq_flags & O_NONBLOCK);
2253 } while (atomic_cmpset_int(&fp->f_flag, oflag, flag) == 0);
2254 } else
2255 oflag = fp->f_flag;
2256 oattr->mq_flags = (O_NONBLOCK & oflag);
2257 fdrop(fp, td);
2258 return (error);
2259 }
2260
2261 int
sys_kmq_setattr(struct thread * td,struct kmq_setattr_args * uap)2262 sys_kmq_setattr(struct thread *td, struct kmq_setattr_args *uap)
2263 {
2264 struct mq_attr attr, oattr;
2265 int error;
2266
2267 if (uap->attr != NULL) {
2268 error = copyin(uap->attr, &attr, sizeof(attr));
2269 if (error != 0)
2270 return (error);
2271 }
2272 error = kern_kmq_setattr(td, uap->mqd, uap->attr != NULL ? &attr : NULL,
2273 &oattr);
2274 if (error == 0 && uap->oattr != NULL) {
2275 bzero(oattr.__reserved, sizeof(oattr.__reserved));
2276 error = copyout(&oattr, uap->oattr, sizeof(oattr));
2277 }
2278 return (error);
2279 }
2280
2281 int
kern_kmq_timedreceive(struct thread * td,int mqd,char * msg_ptr,size_t msg_len,unsigned int * msg_prio,const struct timespec * abs_timeout)2282 kern_kmq_timedreceive(struct thread *td, int mqd, char *msg_ptr,
2283 size_t msg_len, unsigned int *msg_prio, const struct timespec *abs_timeout)
2284 {
2285 struct mqueue *mq;
2286 struct file *fp;
2287 int error, waitok;
2288
2289 AUDIT_ARG_FD(mqd);
2290 error = getmq_read(td, mqd, &fp, NULL, &mq);
2291 if (error != 0)
2292 return (error);
2293 waitok = (fp->f_flag & O_NONBLOCK) == 0;
2294 error = mqueue_receive(mq, msg_ptr, msg_len, msg_prio, waitok,
2295 abs_timeout);
2296 fdrop(fp, td);
2297 return (error);
2298 }
2299
2300 int
sys_kmq_timedreceive(struct thread * td,struct kmq_timedreceive_args * uap)2301 sys_kmq_timedreceive(struct thread *td, struct kmq_timedreceive_args *uap)
2302 {
2303 struct timespec *abs_timeout, ets;
2304 int error;
2305
2306 if (uap->abs_timeout != NULL) {
2307 error = copyin(uap->abs_timeout, &ets, sizeof(ets));
2308 if (error != 0)
2309 return (error);
2310 abs_timeout = &ets;
2311 } else
2312 abs_timeout = NULL;
2313
2314 return (kern_kmq_timedreceive(td, uap->mqd, uap->msg_ptr, uap->msg_len,
2315 uap->msg_prio, abs_timeout));
2316 }
2317
2318 int
kern_kmq_timedsend(struct thread * td,int mqd,const char * msg_ptr,size_t msg_len,unsigned int msg_prio,const struct timespec * abs_timeout)2319 kern_kmq_timedsend(struct thread *td, int mqd, const char *msg_ptr,
2320 size_t msg_len, unsigned int msg_prio, const struct timespec *abs_timeout)
2321 {
2322 struct mqueue *mq;
2323 struct file *fp;
2324 int error, waitok;
2325
2326 AUDIT_ARG_FD(mqd);
2327 error = getmq_write(td, mqd, &fp, NULL, &mq);
2328 if (error != 0)
2329 return (error);
2330 waitok = (fp->f_flag & O_NONBLOCK) == 0;
2331 error = mqueue_send(mq, msg_ptr, msg_len, msg_prio, waitok,
2332 abs_timeout);
2333 fdrop(fp, td);
2334 return (error);
2335 }
2336
2337 int
sys_kmq_timedsend(struct thread * td,struct kmq_timedsend_args * uap)2338 sys_kmq_timedsend(struct thread *td, struct kmq_timedsend_args *uap)
2339 {
2340 struct timespec *abs_timeout, ets;
2341 int error;
2342
2343 if (uap->abs_timeout != NULL) {
2344 error = copyin(uap->abs_timeout, &ets, sizeof(ets));
2345 if (error != 0)
2346 return (error);
2347 abs_timeout = &ets;
2348 } else
2349 abs_timeout = NULL;
2350
2351 return (kern_kmq_timedsend(td, uap->mqd, uap->msg_ptr, uap->msg_len,
2352 uap->msg_prio, abs_timeout));
2353 }
2354
2355 int
kern_kmq_notify(struct thread * td,int mqd,struct sigevent * sigev)2356 kern_kmq_notify(struct thread *td, int mqd, struct sigevent *sigev)
2357 {
2358 struct filedesc *fdp;
2359 struct proc *p;
2360 struct mqueue *mq;
2361 struct file *fp, *fp2;
2362 struct mqueue_notifier *nt, *newnt = NULL;
2363 int error;
2364
2365 AUDIT_ARG_FD(mqd);
2366 if (sigev != NULL) {
2367 if (sigev->sigev_notify != SIGEV_SIGNAL &&
2368 sigev->sigev_notify != SIGEV_THREAD_ID &&
2369 sigev->sigev_notify != SIGEV_NONE)
2370 return (EINVAL);
2371 if ((sigev->sigev_notify == SIGEV_SIGNAL ||
2372 sigev->sigev_notify == SIGEV_THREAD_ID) &&
2373 !_SIG_VALID(sigev->sigev_signo))
2374 return (EINVAL);
2375 }
2376 p = td->td_proc;
2377 fdp = td->td_proc->p_fd;
2378 error = getmq(td, mqd, &fp, NULL, &mq);
2379 if (error)
2380 return (error);
2381 again:
2382 FILEDESC_SLOCK(fdp);
2383 fp2 = fget_noref(fdp, mqd);
2384 if (fp2 == NULL) {
2385 FILEDESC_SUNLOCK(fdp);
2386 error = EBADF;
2387 goto out;
2388 }
2389 #ifdef CAPABILITIES
2390 error = cap_check(cap_rights(fdp, mqd), &cap_event_rights);
2391 if (error) {
2392 FILEDESC_SUNLOCK(fdp);
2393 goto out;
2394 }
2395 #endif
2396 if (fp2 != fp) {
2397 FILEDESC_SUNLOCK(fdp);
2398 error = EBADF;
2399 goto out;
2400 }
2401 mtx_lock(&mq->mq_mutex);
2402 FILEDESC_SUNLOCK(fdp);
2403 if (sigev != NULL) {
2404 if (mq->mq_notifier != NULL) {
2405 error = EBUSY;
2406 } else {
2407 PROC_LOCK(p);
2408 nt = notifier_search(p, mqd);
2409 if (nt == NULL) {
2410 if (newnt == NULL) {
2411 PROC_UNLOCK(p);
2412 mtx_unlock(&mq->mq_mutex);
2413 newnt = notifier_alloc();
2414 goto again;
2415 }
2416 }
2417
2418 if (nt != NULL) {
2419 sigqueue_take(&nt->nt_ksi);
2420 if (newnt != NULL) {
2421 notifier_free(newnt);
2422 newnt = NULL;
2423 }
2424 } else {
2425 nt = newnt;
2426 newnt = NULL;
2427 ksiginfo_init(&nt->nt_ksi);
2428 nt->nt_ksi.ksi_flags |= KSI_INS | KSI_EXT;
2429 nt->nt_ksi.ksi_code = SI_MESGQ;
2430 nt->nt_proc = p;
2431 nt->nt_ksi.ksi_mqd = mqd;
2432 notifier_insert(p, nt);
2433 }
2434 nt->nt_sigev = *sigev;
2435 mq->mq_notifier = nt;
2436 PROC_UNLOCK(p);
2437 /*
2438 * if there is no receivers and message queue
2439 * is not empty, we should send notification
2440 * as soon as possible.
2441 */
2442 if (mq->mq_receivers == 0 &&
2443 !TAILQ_EMPTY(&mq->mq_msgq))
2444 mqueue_send_notification(mq);
2445 }
2446 } else {
2447 notifier_remove(p, mq, mqd);
2448 }
2449 mtx_unlock(&mq->mq_mutex);
2450
2451 out:
2452 fdrop(fp, td);
2453 if (newnt != NULL)
2454 notifier_free(newnt);
2455 return (error);
2456 }
2457
2458 int
sys_kmq_notify(struct thread * td,struct kmq_notify_args * uap)2459 sys_kmq_notify(struct thread *td, struct kmq_notify_args *uap)
2460 {
2461 struct sigevent ev, *evp;
2462 int error;
2463
2464 if (uap->sigev == NULL) {
2465 evp = NULL;
2466 } else {
2467 error = copyin(uap->sigev, &ev, sizeof(ev));
2468 if (error != 0)
2469 return (error);
2470 evp = &ev;
2471 }
2472 return (kern_kmq_notify(td, uap->mqd, evp));
2473 }
2474
2475 static void
mqueue_fdclose(struct thread * td,int fd,struct file * fp)2476 mqueue_fdclose(struct thread *td, int fd, struct file *fp)
2477 {
2478 struct mqueue *mq;
2479 #ifdef INVARIANTS
2480 struct filedesc *fdp;
2481
2482 fdp = td->td_proc->p_fd;
2483 FILEDESC_LOCK_ASSERT(fdp);
2484 #endif
2485
2486 if (fp->f_ops == &mqueueops) {
2487 mq = FPTOMQ(fp);
2488 mtx_lock(&mq->mq_mutex);
2489 notifier_remove(td->td_proc, mq, fd);
2490
2491 /* have to wakeup thread in same process */
2492 if (mq->mq_flags & MQ_RSEL) {
2493 mq->mq_flags &= ~MQ_RSEL;
2494 selwakeup(&mq->mq_rsel);
2495 }
2496 if (mq->mq_flags & MQ_WSEL) {
2497 mq->mq_flags &= ~MQ_WSEL;
2498 selwakeup(&mq->mq_wsel);
2499 }
2500 mtx_unlock(&mq->mq_mutex);
2501 }
2502 }
2503
2504 static void
mq_proc_exit(void * arg __unused,struct proc * p)2505 mq_proc_exit(void *arg __unused, struct proc *p)
2506 {
2507 struct filedesc *fdp;
2508 struct file *fp;
2509 struct mqueue *mq;
2510 int i;
2511
2512 fdp = p->p_fd;
2513 FILEDESC_SLOCK(fdp);
2514 for (i = 0; i < fdp->fd_nfiles; ++i) {
2515 fp = fget_noref(fdp, i);
2516 if (fp != NULL && fp->f_ops == &mqueueops) {
2517 mq = FPTOMQ(fp);
2518 mtx_lock(&mq->mq_mutex);
2519 notifier_remove(p, FPTOMQ(fp), i);
2520 mtx_unlock(&mq->mq_mutex);
2521 }
2522 }
2523 FILEDESC_SUNLOCK(fdp);
2524 KASSERT(LIST_EMPTY(&p->p_mqnotifier), ("mq notifiers left"));
2525 }
2526
2527 static int
mqf_poll(struct file * fp,int events,struct ucred * active_cred,struct thread * td)2528 mqf_poll(struct file *fp, int events, struct ucred *active_cred,
2529 struct thread *td)
2530 {
2531 struct mqueue *mq = FPTOMQ(fp);
2532 int revents = 0;
2533
2534 mtx_lock(&mq->mq_mutex);
2535 if (events & (POLLIN | POLLRDNORM)) {
2536 if (mq->mq_curmsgs) {
2537 revents |= events & (POLLIN | POLLRDNORM);
2538 } else {
2539 mq->mq_flags |= MQ_RSEL;
2540 selrecord(td, &mq->mq_rsel);
2541 }
2542 }
2543 if (events & POLLOUT) {
2544 if (mq->mq_curmsgs < mq->mq_maxmsg)
2545 revents |= POLLOUT;
2546 else {
2547 mq->mq_flags |= MQ_WSEL;
2548 selrecord(td, &mq->mq_wsel);
2549 }
2550 }
2551 mtx_unlock(&mq->mq_mutex);
2552 return (revents);
2553 }
2554
2555 static int
mqf_close(struct file * fp,struct thread * td)2556 mqf_close(struct file *fp, struct thread *td)
2557 {
2558 struct mqfs_node *pn;
2559
2560 fp->f_ops = &badfileops;
2561 pn = fp->f_data;
2562 fp->f_data = NULL;
2563 sx_xlock(&mqfs_data.mi_lock);
2564 mqnode_release(pn);
2565 sx_xunlock(&mqfs_data.mi_lock);
2566 return (0);
2567 }
2568
2569 static int
mqf_stat(struct file * fp,struct stat * st,struct ucred * active_cred)2570 mqf_stat(struct file *fp, struct stat *st, struct ucred *active_cred)
2571 {
2572 struct mqfs_node *pn = fp->f_data;
2573
2574 bzero(st, sizeof *st);
2575 sx_xlock(&mqfs_data.mi_lock);
2576 st->st_atim = pn->mn_atime;
2577 st->st_mtim = pn->mn_mtime;
2578 st->st_ctim = pn->mn_ctime;
2579 st->st_birthtim = pn->mn_birth;
2580 st->st_uid = pn->mn_uid;
2581 st->st_gid = pn->mn_gid;
2582 st->st_mode = S_IFIFO | pn->mn_mode;
2583 sx_xunlock(&mqfs_data.mi_lock);
2584 return (0);
2585 }
2586
2587 static int
mqf_chmod(struct file * fp,mode_t mode,struct ucred * active_cred,struct thread * td)2588 mqf_chmod(struct file *fp, mode_t mode, struct ucred *active_cred,
2589 struct thread *td)
2590 {
2591 struct mqfs_node *pn;
2592 int error;
2593
2594 error = 0;
2595 pn = fp->f_data;
2596 sx_xlock(&mqfs_data.mi_lock);
2597 error = vaccess(VREG, pn->mn_mode, pn->mn_uid, pn->mn_gid, VADMIN,
2598 active_cred);
2599 if (error != 0)
2600 goto out;
2601 pn->mn_mode = mode & ACCESSPERMS;
2602 out:
2603 sx_xunlock(&mqfs_data.mi_lock);
2604 return (error);
2605 }
2606
2607 static int
mqf_chown(struct file * fp,uid_t uid,gid_t gid,struct ucred * active_cred,struct thread * td)2608 mqf_chown(struct file *fp, uid_t uid, gid_t gid, struct ucred *active_cred,
2609 struct thread *td)
2610 {
2611 struct mqfs_node *pn;
2612 int error;
2613
2614 error = 0;
2615 pn = fp->f_data;
2616 sx_xlock(&mqfs_data.mi_lock);
2617 if (uid == (uid_t)-1)
2618 uid = pn->mn_uid;
2619 if (gid == (gid_t)-1)
2620 gid = pn->mn_gid;
2621 if (((uid != pn->mn_uid && uid != active_cred->cr_uid) ||
2622 (gid != pn->mn_gid && !groupmember(gid, active_cred))) &&
2623 (error = priv_check_cred(active_cred, PRIV_VFS_CHOWN)))
2624 goto out;
2625 pn->mn_uid = uid;
2626 pn->mn_gid = gid;
2627 out:
2628 sx_xunlock(&mqfs_data.mi_lock);
2629 return (error);
2630 }
2631
2632 static int
mqf_kqfilter(struct file * fp,struct knote * kn)2633 mqf_kqfilter(struct file *fp, struct knote *kn)
2634 {
2635 struct mqueue *mq = FPTOMQ(fp);
2636 int error = 0;
2637
2638 if (kn->kn_filter == EVFILT_READ) {
2639 kn->kn_fop = &mq_rfiltops;
2640 knlist_add(&mq->mq_rsel.si_note, kn, 0);
2641 } else if (kn->kn_filter == EVFILT_WRITE) {
2642 kn->kn_fop = &mq_wfiltops;
2643 knlist_add(&mq->mq_wsel.si_note, kn, 0);
2644 } else
2645 error = EINVAL;
2646 return (error);
2647 }
2648
2649 static void
filt_mqdetach(struct knote * kn)2650 filt_mqdetach(struct knote *kn)
2651 {
2652 struct mqueue *mq = FPTOMQ(kn->kn_fp);
2653
2654 if (kn->kn_filter == EVFILT_READ)
2655 knlist_remove(&mq->mq_rsel.si_note, kn, 0);
2656 else if (kn->kn_filter == EVFILT_WRITE)
2657 knlist_remove(&mq->mq_wsel.si_note, kn, 0);
2658 else
2659 panic("filt_mqdetach");
2660 }
2661
2662 static int
filt_mqread(struct knote * kn,long hint)2663 filt_mqread(struct knote *kn, long hint)
2664 {
2665 struct mqueue *mq = FPTOMQ(kn->kn_fp);
2666
2667 mtx_assert(&mq->mq_mutex, MA_OWNED);
2668 return (mq->mq_curmsgs != 0);
2669 }
2670
2671 static int
filt_mqwrite(struct knote * kn,long hint)2672 filt_mqwrite(struct knote *kn, long hint)
2673 {
2674 struct mqueue *mq = FPTOMQ(kn->kn_fp);
2675
2676 mtx_assert(&mq->mq_mutex, MA_OWNED);
2677 return (mq->mq_curmsgs < mq->mq_maxmsg);
2678 }
2679
2680 static int
mqf_fill_kinfo(struct file * fp,struct kinfo_file * kif,struct filedesc * fdp)2681 mqf_fill_kinfo(struct file *fp, struct kinfo_file *kif, struct filedesc *fdp)
2682 {
2683
2684 kif->kf_type = KF_TYPE_MQUEUE;
2685 return (0);
2686 }
2687
2688 static const struct fileops mqueueops = {
2689 .fo_read = invfo_rdwr,
2690 .fo_write = invfo_rdwr,
2691 .fo_truncate = invfo_truncate,
2692 .fo_ioctl = invfo_ioctl,
2693 .fo_poll = mqf_poll,
2694 .fo_kqfilter = mqf_kqfilter,
2695 .fo_stat = mqf_stat,
2696 .fo_close = mqf_close,
2697 .fo_chmod = mqf_chmod,
2698 .fo_chown = mqf_chown,
2699 .fo_sendfile = invfo_sendfile,
2700 .fo_fill_kinfo = mqf_fill_kinfo,
2701 .fo_cmp = file_kcmp_generic,
2702 .fo_flags = DFLAG_PASSABLE,
2703 };
2704
2705 static struct vop_vector mqfs_vnodeops = {
2706 .vop_default = &default_vnodeops,
2707 .vop_access = mqfs_access,
2708 .vop_cachedlookup = mqfs_lookup,
2709 .vop_lookup = vfs_cache_lookup,
2710 .vop_reclaim = mqfs_reclaim,
2711 .vop_create = mqfs_create,
2712 .vop_remove = mqfs_remove,
2713 .vop_inactive = mqfs_inactive,
2714 .vop_open = mqfs_open,
2715 .vop_close = mqfs_close,
2716 .vop_getattr = mqfs_getattr,
2717 .vop_setattr = mqfs_setattr,
2718 .vop_read = mqfs_read,
2719 .vop_write = VOP_EOPNOTSUPP,
2720 .vop_readdir = mqfs_readdir,
2721 .vop_mkdir = VOP_EOPNOTSUPP,
2722 .vop_rmdir = VOP_EOPNOTSUPP
2723 };
2724 VFS_VOP_VECTOR_REGISTER(mqfs_vnodeops);
2725
2726 static struct vfsops mqfs_vfsops = {
2727 .vfs_init = mqfs_init,
2728 .vfs_uninit = mqfs_uninit,
2729 .vfs_mount = mqfs_mount,
2730 .vfs_unmount = mqfs_unmount,
2731 .vfs_root = mqfs_root,
2732 .vfs_statfs = mqfs_statfs,
2733 };
2734
2735 static struct vfsconf mqueuefs_vfsconf = {
2736 .vfc_version = VFS_VERSION,
2737 .vfc_name = "mqueuefs",
2738 .vfc_vfsops = &mqfs_vfsops,
2739 .vfc_typenum = -1,
2740 .vfc_flags = VFCF_SYNTHETIC
2741 };
2742
2743 static struct syscall_helper_data mq_syscalls[] = {
2744 SYSCALL_INIT_HELPER(kmq_open),
2745 SYSCALL_INIT_HELPER_F(kmq_setattr, SYF_CAPENABLED),
2746 SYSCALL_INIT_HELPER_F(kmq_timedsend, SYF_CAPENABLED),
2747 SYSCALL_INIT_HELPER_F(kmq_timedreceive, SYF_CAPENABLED),
2748 SYSCALL_INIT_HELPER_F(kmq_notify, SYF_CAPENABLED),
2749 SYSCALL_INIT_HELPER(kmq_unlink),
2750 SYSCALL_INIT_LAST
2751 };
2752
2753 #ifdef COMPAT_FREEBSD32
2754 #include <compat/freebsd32/freebsd32.h>
2755 #include <compat/freebsd32/freebsd32_proto.h>
2756 #include <compat/freebsd32/freebsd32_signal.h>
2757 #include <compat/freebsd32/freebsd32_syscall.h>
2758 #include <compat/freebsd32/freebsd32_util.h>
2759
2760 static void
mq_attr_from32(const struct mq_attr32 * from,struct mq_attr * to)2761 mq_attr_from32(const struct mq_attr32 *from, struct mq_attr *to)
2762 {
2763
2764 to->mq_flags = from->mq_flags;
2765 to->mq_maxmsg = from->mq_maxmsg;
2766 to->mq_msgsize = from->mq_msgsize;
2767 to->mq_curmsgs = from->mq_curmsgs;
2768 }
2769
2770 static void
mq_attr_to32(const struct mq_attr * from,struct mq_attr32 * to)2771 mq_attr_to32(const struct mq_attr *from, struct mq_attr32 *to)
2772 {
2773
2774 to->mq_flags = from->mq_flags;
2775 to->mq_maxmsg = from->mq_maxmsg;
2776 to->mq_msgsize = from->mq_msgsize;
2777 to->mq_curmsgs = from->mq_curmsgs;
2778 }
2779
2780 int
freebsd32_kmq_open(struct thread * td,struct freebsd32_kmq_open_args * uap)2781 freebsd32_kmq_open(struct thread *td, struct freebsd32_kmq_open_args *uap)
2782 {
2783 struct mq_attr attr;
2784 struct mq_attr32 attr32;
2785 int flags, error;
2786
2787 if ((uap->flags & O_ACCMODE) == O_ACCMODE || uap->flags & O_EXEC)
2788 return (EINVAL);
2789 flags = FFLAGS(uap->flags);
2790 if ((flags & O_CREAT) != 0 && uap->attr != NULL) {
2791 error = copyin(uap->attr, &attr32, sizeof(attr32));
2792 if (error)
2793 return (error);
2794 mq_attr_from32(&attr32, &attr);
2795 }
2796 return (kern_kmq_open(td, uap->path, flags, uap->mode,
2797 uap->attr != NULL ? &attr : NULL));
2798 }
2799
2800 int
freebsd32_kmq_setattr(struct thread * td,struct freebsd32_kmq_setattr_args * uap)2801 freebsd32_kmq_setattr(struct thread *td, struct freebsd32_kmq_setattr_args *uap)
2802 {
2803 struct mq_attr attr, oattr;
2804 struct mq_attr32 attr32, oattr32;
2805 int error;
2806
2807 if (uap->attr != NULL) {
2808 error = copyin(uap->attr, &attr32, sizeof(attr32));
2809 if (error != 0)
2810 return (error);
2811 mq_attr_from32(&attr32, &attr);
2812 }
2813 error = kern_kmq_setattr(td, uap->mqd, uap->attr != NULL ? &attr : NULL,
2814 &oattr);
2815 if (error == 0 && uap->oattr != NULL) {
2816 mq_attr_to32(&oattr, &oattr32);
2817 bzero(oattr32.__reserved, sizeof(oattr32.__reserved));
2818 error = copyout(&oattr32, uap->oattr, sizeof(oattr32));
2819 }
2820 return (error);
2821 }
2822
2823 int
freebsd32_kmq_timedsend(struct thread * td,struct freebsd32_kmq_timedsend_args * uap)2824 freebsd32_kmq_timedsend(struct thread *td,
2825 struct freebsd32_kmq_timedsend_args *uap)
2826 {
2827 struct timespec32 ets32;
2828 struct timespec *abs_timeout, ets;
2829 int error;
2830
2831 if (uap->abs_timeout != NULL) {
2832 error = copyin(uap->abs_timeout, &ets32, sizeof(ets32));
2833 if (error != 0)
2834 return (error);
2835 CP(ets32, ets, tv_sec);
2836 CP(ets32, ets, tv_nsec);
2837 abs_timeout = &ets;
2838 } else
2839 abs_timeout = NULL;
2840
2841 return (kern_kmq_timedsend(td, uap->mqd, uap->msg_ptr, uap->msg_len,
2842 uap->msg_prio, abs_timeout));
2843 }
2844
2845 int
freebsd32_kmq_timedreceive(struct thread * td,struct freebsd32_kmq_timedreceive_args * uap)2846 freebsd32_kmq_timedreceive(struct thread *td,
2847 struct freebsd32_kmq_timedreceive_args *uap)
2848 {
2849 struct timespec32 ets32;
2850 struct timespec *abs_timeout, ets;
2851 int error;
2852
2853 if (uap->abs_timeout != NULL) {
2854 error = copyin(uap->abs_timeout, &ets32, sizeof(ets32));
2855 if (error != 0)
2856 return (error);
2857 CP(ets32, ets, tv_sec);
2858 CP(ets32, ets, tv_nsec);
2859 abs_timeout = &ets;
2860 } else
2861 abs_timeout = NULL;
2862
2863 return (kern_kmq_timedreceive(td, uap->mqd, uap->msg_ptr, uap->msg_len,
2864 uap->msg_prio, abs_timeout));
2865 }
2866
2867 int
freebsd32_kmq_notify(struct thread * td,struct freebsd32_kmq_notify_args * uap)2868 freebsd32_kmq_notify(struct thread *td, struct freebsd32_kmq_notify_args *uap)
2869 {
2870 struct sigevent ev, *evp;
2871 struct sigevent32 ev32;
2872 int error;
2873
2874 if (uap->sigev == NULL) {
2875 evp = NULL;
2876 } else {
2877 error = copyin(uap->sigev, &ev32, sizeof(ev32));
2878 if (error != 0)
2879 return (error);
2880 error = convert_sigevent32(&ev32, &ev);
2881 if (error != 0)
2882 return (error);
2883 evp = &ev;
2884 }
2885 return (kern_kmq_notify(td, uap->mqd, evp));
2886 }
2887
2888 static struct syscall_helper_data mq32_syscalls[] = {
2889 SYSCALL32_INIT_HELPER(freebsd32_kmq_open),
2890 SYSCALL32_INIT_HELPER_F(freebsd32_kmq_setattr, SYF_CAPENABLED),
2891 SYSCALL32_INIT_HELPER_F(freebsd32_kmq_timedsend, SYF_CAPENABLED),
2892 SYSCALL32_INIT_HELPER_F(freebsd32_kmq_timedreceive, SYF_CAPENABLED),
2893 SYSCALL32_INIT_HELPER_F(freebsd32_kmq_notify, SYF_CAPENABLED),
2894 SYSCALL32_INIT_HELPER_COMPAT(kmq_unlink),
2895 SYSCALL_INIT_LAST
2896 };
2897 #endif
2898
2899 static int
mqinit(void)2900 mqinit(void)
2901 {
2902 int error;
2903
2904 error = syscall_helper_register(mq_syscalls, SY_THR_STATIC_KLD);
2905 if (error != 0)
2906 return (error);
2907 #ifdef COMPAT_FREEBSD32
2908 error = syscall32_helper_register(mq32_syscalls, SY_THR_STATIC_KLD);
2909 if (error != 0)
2910 return (error);
2911 #endif
2912 return (0);
2913 }
2914
2915 static int
mqunload(void)2916 mqunload(void)
2917 {
2918
2919 #ifdef COMPAT_FREEBSD32
2920 syscall32_helper_unregister(mq32_syscalls);
2921 #endif
2922 syscall_helper_unregister(mq_syscalls);
2923 return (0);
2924 }
2925
2926 static int
mq_modload(struct module * module,int cmd,void * arg)2927 mq_modload(struct module *module, int cmd, void *arg)
2928 {
2929 int error = 0;
2930
2931 error = vfs_modevent(module, cmd, arg);
2932 if (error != 0)
2933 return (error);
2934
2935 switch (cmd) {
2936 case MOD_LOAD:
2937 error = mqinit();
2938 if (error != 0)
2939 mqunload();
2940 break;
2941 case MOD_UNLOAD:
2942 error = mqunload();
2943 break;
2944 default:
2945 break;
2946 }
2947 return (error);
2948 }
2949
2950 static moduledata_t mqueuefs_mod = {
2951 "mqueuefs",
2952 mq_modload,
2953 &mqueuefs_vfsconf
2954 };
2955 DECLARE_MODULE(mqueuefs, mqueuefs_mod, SI_SUB_VFS, SI_ORDER_MIDDLE);
2956 MODULE_VERSION(mqueuefs, 1);
2957