1 /*- 2 * Copyright (c) 2005 David Xu <davidxu@freebsd.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 */ 27 28 /* 29 * POSIX message queue implementation. 30 * 31 * 1) A mqueue filesystem can be mounted, each message queue appears 32 * in mounted directory, user can change queue's permission and 33 * ownership, or remove a queue. Manually creating a file in the 34 * directory causes a message queue to be created in the kernel with 35 * default message queue attributes applied and same name used, this 36 * method is not advocated since mq_open syscall allows user to specify 37 * different attributes. Also the file system can be mounted multiple 38 * times at different mount points but shows same contents. 39 * 40 * 2) Standard POSIX message queue API. The syscalls do not use vfs layer, 41 * but directly operate on internal data structure, this allows user to 42 * use the IPC facility without having to mount mqueue file system. 43 */ 44 45 #include <sys/cdefs.h> 46 __FBSDID("$FreeBSD$"); 47 48 #include <sys/param.h> 49 #include <sys/kernel.h> 50 #include <sys/systm.h> 51 #include <sys/limits.h> 52 #include <sys/buf.h> 53 #include <sys/dirent.h> 54 #include <sys/event.h> 55 #include <sys/eventhandler.h> 56 #include <sys/fcntl.h> 57 #include <sys/file.h> 58 #include <sys/filedesc.h> 59 #include <sys/limits.h> 60 #include <sys/lock.h> 61 #include <sys/malloc.h> 62 #include <sys/module.h> 63 #include <sys/mount.h> 64 #include <sys/mqueue.h> 65 #include <sys/mutex.h> 66 #include <sys/namei.h> 67 #include <sys/posix4.h> 68 #include <sys/poll.h> 69 #include <sys/priv.h> 70 #include <sys/proc.h> 71 #include <sys/queue.h> 72 #include <sys/sysproto.h> 73 #include <sys/stat.h> 74 #include <sys/syscall.h> 75 #include <sys/syscallsubr.h> 76 #include <sys/sysent.h> 77 #include <sys/sx.h> 78 #include <sys/sysctl.h> 79 #include <sys/taskqueue.h> 80 #include <sys/unistd.h> 81 #include <sys/vnode.h> 82 #include <machine/atomic.h> 83 84 /* 85 * Limits and constants 86 */ 87 #define MQFS_NAMELEN NAME_MAX 88 #define MQFS_DELEN (8 + MQFS_NAMELEN) 89 90 /* node types */ 91 typedef enum { 92 mqfstype_none = 0, 93 mqfstype_root, 94 mqfstype_dir, 95 mqfstype_this, 96 mqfstype_parent, 97 mqfstype_file, 98 mqfstype_symlink, 99 } mqfs_type_t; 100 101 struct mqfs_node; 102 103 /* 104 * mqfs_info: describes a mqfs instance 105 */ 106 struct mqfs_info { 107 struct sx mi_lock; 108 struct mqfs_node *mi_root; 109 struct unrhdr *mi_unrhdr; 110 }; 111 112 struct mqfs_vdata { 113 LIST_ENTRY(mqfs_vdata) mv_link; 114 struct mqfs_node *mv_node; 115 struct vnode *mv_vnode; 116 struct task mv_task; 117 }; 118 119 /* 120 * mqfs_node: describes a node (file or directory) within a mqfs 121 */ 122 struct mqfs_node { 123 char mn_name[MQFS_NAMELEN+1]; 124 struct mqfs_info *mn_info; 125 struct mqfs_node *mn_parent; 126 LIST_HEAD(,mqfs_node) mn_children; 127 LIST_ENTRY(mqfs_node) mn_sibling; 128 LIST_HEAD(,mqfs_vdata) mn_vnodes; 129 int mn_refcount; 130 mqfs_type_t mn_type; 131 int mn_deleted; 132 u_int32_t mn_fileno; 133 void *mn_data; 134 struct timespec mn_birth; 135 struct timespec mn_ctime; 136 struct timespec mn_atime; 137 struct timespec mn_mtime; 138 uid_t mn_uid; 139 gid_t mn_gid; 140 int mn_mode; 141 }; 142 143 #define VTON(vp) (((struct mqfs_vdata *)((vp)->v_data))->mv_node) 144 #define VTOMQ(vp) ((struct mqueue *)(VTON(vp)->mn_data)) 145 #define VFSTOMQFS(m) ((struct mqfs_info *)((m)->mnt_data)) 146 #define FPTOMQ(fp) ((struct mqueue *)(((struct mqfs_node *) \ 147 (fp)->f_data)->mn_data)) 148 149 TAILQ_HEAD(msgq, mqueue_msg); 150 151 struct mqueue; 152 153 struct mqueue_notifier { 154 LIST_ENTRY(mqueue_notifier) nt_link; 155 struct sigevent nt_sigev; 156 ksiginfo_t nt_ksi; 157 struct proc *nt_proc; 158 }; 159 160 struct mqueue { 161 struct mtx mq_mutex; 162 int mq_flags; 163 long mq_maxmsg; 164 long mq_msgsize; 165 long mq_curmsgs; 166 long mq_totalbytes; 167 struct msgq mq_msgq; 168 int mq_receivers; 169 int mq_senders; 170 struct selinfo mq_rsel; 171 struct selinfo mq_wsel; 172 struct mqueue_notifier *mq_notifier; 173 }; 174 175 #define MQ_RSEL 0x01 176 #define MQ_WSEL 0x02 177 178 struct mqueue_msg { 179 TAILQ_ENTRY(mqueue_msg) msg_link; 180 unsigned int msg_prio; 181 unsigned int msg_size; 182 /* following real data... */ 183 }; 184 185 SYSCTL_NODE(_kern, OID_AUTO, mqueue, CTLFLAG_RW, 0, 186 "POSIX real time message queue"); 187 188 static int default_maxmsg = 10; 189 static int default_msgsize = 1024; 190 191 static int maxmsg = 100; 192 SYSCTL_INT(_kern_mqueue, OID_AUTO, maxmsg, CTLFLAG_RW, 193 &maxmsg, 0, "Default maximum messages in queue"); 194 static int maxmsgsize = 16384; 195 SYSCTL_INT(_kern_mqueue, OID_AUTO, maxmsgsize, CTLFLAG_RW, 196 &maxmsgsize, 0, "Default maximum message size"); 197 static int maxmq = 100; 198 SYSCTL_INT(_kern_mqueue, OID_AUTO, maxmq, CTLFLAG_RW, 199 &maxmq, 0, "maximum message queues"); 200 static int curmq = 0; 201 SYSCTL_INT(_kern_mqueue, OID_AUTO, curmq, CTLFLAG_RW, 202 &curmq, 0, "current message queue number"); 203 static int unloadable = 0; 204 static MALLOC_DEFINE(M_MQUEUEDATA, "mqdata", "mqueue data"); 205 206 static eventhandler_tag exit_tag; 207 208 /* Only one instance per-system */ 209 static struct mqfs_info mqfs_data; 210 static uma_zone_t mqnode_zone; 211 static uma_zone_t mqueue_zone; 212 static uma_zone_t mvdata_zone; 213 static uma_zone_t mqnoti_zone; 214 static struct vop_vector mqfs_vnodeops; 215 static struct fileops mqueueops; 216 217 /* 218 * Directory structure construction and manipulation 219 */ 220 #ifdef notyet 221 static struct mqfs_node *mqfs_create_dir(struct mqfs_node *parent, 222 const char *name, int namelen, struct ucred *cred, int mode); 223 static struct mqfs_node *mqfs_create_link(struct mqfs_node *parent, 224 const char *name, int namelen, struct ucred *cred, int mode); 225 #endif 226 227 static struct mqfs_node *mqfs_create_file(struct mqfs_node *parent, 228 const char *name, int namelen, struct ucred *cred, int mode); 229 static int mqfs_destroy(struct mqfs_node *mn); 230 static void mqfs_fileno_alloc(struct mqfs_info *mi, struct mqfs_node *mn); 231 static void mqfs_fileno_free(struct mqfs_info *mi, struct mqfs_node *mn); 232 static int mqfs_allocv(struct mount *mp, struct vnode **vpp, struct mqfs_node *pn); 233 234 /* 235 * Message queue construction and maniplation 236 */ 237 static struct mqueue *mqueue_alloc(const struct mq_attr *attr); 238 static void mqueue_free(struct mqueue *mq); 239 static int mqueue_send(struct mqueue *mq, const char *msg_ptr, 240 size_t msg_len, unsigned msg_prio, int waitok, 241 const struct timespec *abs_timeout); 242 static int mqueue_receive(struct mqueue *mq, char *msg_ptr, 243 size_t msg_len, unsigned *msg_prio, int waitok, 244 const struct timespec *abs_timeout); 245 static int _mqueue_send(struct mqueue *mq, struct mqueue_msg *msg, 246 int timo); 247 static int _mqueue_recv(struct mqueue *mq, struct mqueue_msg **msg, 248 int timo); 249 static void mqueue_send_notification(struct mqueue *mq); 250 static void mqueue_fdclose(struct thread *td, int fd, struct file *fp); 251 static void mq_proc_exit(void *arg, struct proc *p); 252 253 /* 254 * kqueue filters 255 */ 256 static void filt_mqdetach(struct knote *kn); 257 static int filt_mqread(struct knote *kn, long hint); 258 static int filt_mqwrite(struct knote *kn, long hint); 259 260 struct filterops mq_rfiltops = 261 { 1, NULL, filt_mqdetach, filt_mqread }; 262 struct filterops mq_wfiltops = 263 { 1, NULL, filt_mqdetach, filt_mqwrite }; 264 265 /* 266 * Initialize fileno bitmap 267 */ 268 static void 269 mqfs_fileno_init(struct mqfs_info *mi) 270 { 271 struct unrhdr *up; 272 273 up = new_unrhdr(1, INT_MAX, NULL); 274 mi->mi_unrhdr = up; 275 } 276 277 /* 278 * Tear down fileno bitmap 279 */ 280 static void 281 mqfs_fileno_uninit(struct mqfs_info *mi) 282 { 283 struct unrhdr *up; 284 285 up = mi->mi_unrhdr; 286 mi->mi_unrhdr = NULL; 287 delete_unrhdr(up); 288 } 289 290 /* 291 * Allocate a file number 292 */ 293 static void 294 mqfs_fileno_alloc(struct mqfs_info *mi, struct mqfs_node *mn) 295 { 296 /* make sure our parent has a file number */ 297 if (mn->mn_parent && !mn->mn_parent->mn_fileno) 298 mqfs_fileno_alloc(mi, mn->mn_parent); 299 300 switch (mn->mn_type) { 301 case mqfstype_root: 302 case mqfstype_dir: 303 case mqfstype_file: 304 case mqfstype_symlink: 305 mn->mn_fileno = alloc_unr(mi->mi_unrhdr); 306 break; 307 case mqfstype_this: 308 KASSERT(mn->mn_parent != NULL, 309 ("mqfstype_this node has no parent")); 310 mn->mn_fileno = mn->mn_parent->mn_fileno; 311 break; 312 case mqfstype_parent: 313 KASSERT(mn->mn_parent != NULL, 314 ("mqfstype_parent node has no parent")); 315 if (mn->mn_parent == mi->mi_root) { 316 mn->mn_fileno = mn->mn_parent->mn_fileno; 317 break; 318 } 319 KASSERT(mn->mn_parent->mn_parent != NULL, 320 ("mqfstype_parent node has no grandparent")); 321 mn->mn_fileno = mn->mn_parent->mn_parent->mn_fileno; 322 break; 323 default: 324 KASSERT(0, 325 ("mqfs_fileno_alloc() called for unknown type node: %d", 326 mn->mn_type)); 327 break; 328 } 329 } 330 331 /* 332 * Release a file number 333 */ 334 static void 335 mqfs_fileno_free(struct mqfs_info *mi, struct mqfs_node *mn) 336 { 337 switch (mn->mn_type) { 338 case mqfstype_root: 339 case mqfstype_dir: 340 case mqfstype_file: 341 case mqfstype_symlink: 342 free_unr(mi->mi_unrhdr, mn->mn_fileno); 343 break; 344 case mqfstype_this: 345 case mqfstype_parent: 346 /* ignore these, as they don't "own" their file number */ 347 break; 348 default: 349 KASSERT(0, 350 ("mqfs_fileno_free() called for unknown type node: %d", 351 mn->mn_type)); 352 break; 353 } 354 } 355 356 static __inline struct mqfs_node * 357 mqnode_alloc(void) 358 { 359 return uma_zalloc(mqnode_zone, M_WAITOK | M_ZERO); 360 } 361 362 static __inline void 363 mqnode_free(struct mqfs_node *node) 364 { 365 uma_zfree(mqnode_zone, node); 366 } 367 368 static __inline void 369 mqnode_addref(struct mqfs_node *node) 370 { 371 atomic_fetchadd_int(&node->mn_refcount, 1); 372 } 373 374 static __inline void 375 mqnode_release(struct mqfs_node *node) 376 { 377 int old, exp; 378 379 old = atomic_fetchadd_int(&node->mn_refcount, -1); 380 if (node->mn_type == mqfstype_dir || 381 node->mn_type == mqfstype_root) 382 exp = 3; /* include . and .. */ 383 else 384 exp = 1; 385 if (old == exp) 386 mqfs_destroy(node); 387 } 388 389 /* 390 * Add a node to a directory 391 */ 392 static int 393 mqfs_add_node(struct mqfs_node *parent, struct mqfs_node *node) 394 { 395 KASSERT(parent != NULL, ("%s(): parent is NULL", __func__)); 396 KASSERT(parent->mn_info != NULL, 397 ("%s(): parent has no mn_info", __func__)); 398 KASSERT(parent->mn_type == mqfstype_dir || 399 parent->mn_type == mqfstype_root, 400 ("%s(): parent is not a directory", __func__)); 401 402 node->mn_info = parent->mn_info; 403 node->mn_parent = parent; 404 LIST_INIT(&node->mn_children); 405 LIST_INIT(&node->mn_vnodes); 406 LIST_INSERT_HEAD(&parent->mn_children, node, mn_sibling); 407 mqnode_addref(parent); 408 return (0); 409 } 410 411 static struct mqfs_node * 412 mqfs_create_node(const char *name, int namelen, struct ucred *cred, int mode, 413 int nodetype) 414 { 415 struct mqfs_node *node; 416 417 node = mqnode_alloc(); 418 strncpy(node->mn_name, name, namelen); 419 node->mn_type = nodetype; 420 node->mn_refcount = 1; 421 getnanotime(&node->mn_birth); 422 node->mn_ctime = node->mn_atime = node->mn_mtime 423 = node->mn_birth; 424 node->mn_uid = cred->cr_uid; 425 node->mn_gid = cred->cr_gid; 426 node->mn_mode = mode; 427 return (node); 428 } 429 430 /* 431 * Create a file 432 */ 433 static struct mqfs_node * 434 mqfs_create_file(struct mqfs_node *parent, const char *name, int namelen, 435 struct ucred *cred, int mode) 436 { 437 struct mqfs_node *node; 438 439 node = mqfs_create_node(name, namelen, cred, mode, mqfstype_file); 440 if (mqfs_add_node(parent, node) != 0) { 441 mqnode_free(node); 442 return (NULL); 443 } 444 return (node); 445 } 446 447 /* 448 * Add . and .. to a directory 449 */ 450 static int 451 mqfs_fixup_dir(struct mqfs_node *parent) 452 { 453 struct mqfs_node *dir; 454 455 dir = mqnode_alloc(); 456 dir->mn_name[0] = '.'; 457 dir->mn_type = mqfstype_this; 458 dir->mn_refcount = 1; 459 if (mqfs_add_node(parent, dir) != 0) { 460 mqnode_free(dir); 461 return (-1); 462 } 463 464 dir = mqnode_alloc(); 465 dir->mn_name[0] = dir->mn_name[1] = '.'; 466 dir->mn_type = mqfstype_parent; 467 dir->mn_refcount = 1; 468 469 if (mqfs_add_node(parent, dir) != 0) { 470 mqnode_free(dir); 471 return (-1); 472 } 473 474 return (0); 475 } 476 477 #ifdef notyet 478 479 /* 480 * Create a directory 481 */ 482 static struct mqfs_node * 483 mqfs_create_dir(struct mqfs_node *parent, const char *name, int namelen, 484 struct ucred *cred, int mode) 485 { 486 struct mqfs_node *node; 487 488 node = mqfs_create_node(name, namelen, cred, mode, mqfstype_dir); 489 if (mqfs_add_node(parent, node) != 0) { 490 mqnode_free(node); 491 return (NULL); 492 } 493 494 if (mqfs_fixup_dir(node) != 0) { 495 mqfs_destroy(node); 496 return (NULL); 497 } 498 return (node); 499 } 500 501 /* 502 * Create a symlink 503 */ 504 static struct mqfs_node * 505 mqfs_create_link(struct mqfs_node *parent, const char *name, int namelen, 506 struct ucred *cred, int mode) 507 { 508 struct mqfs_node *node; 509 510 node = mqfs_create_node(name, namelen, cred, mode, mqfstype_symlink); 511 if (mqfs_add_node(parent, node) != 0) { 512 mqnode_free(node); 513 return (NULL); 514 } 515 return (node); 516 } 517 518 #endif 519 520 /* 521 * Destroy a node or a tree of nodes 522 */ 523 static int 524 mqfs_destroy(struct mqfs_node *node) 525 { 526 struct mqfs_node *parent; 527 528 KASSERT(node != NULL, 529 ("%s(): node is NULL", __func__)); 530 KASSERT(node->mn_info != NULL, 531 ("%s(): node has no mn_info", __func__)); 532 533 /* destroy children */ 534 if (node->mn_type == mqfstype_dir || node->mn_type == mqfstype_root) 535 while (! LIST_EMPTY(&node->mn_children)) 536 mqfs_destroy(LIST_FIRST(&node->mn_children)); 537 538 /* unlink from parent */ 539 if ((parent = node->mn_parent) != NULL) { 540 KASSERT(parent->mn_info == node->mn_info, 541 ("%s(): parent has different mn_info", __func__)); 542 LIST_REMOVE(node, mn_sibling); 543 } 544 545 if (node->mn_fileno != 0) 546 mqfs_fileno_free(node->mn_info, node); 547 if (node->mn_data != NULL) 548 mqueue_free(node->mn_data); 549 mqnode_free(node); 550 return (0); 551 } 552 553 /* 554 * Mount a mqfs instance 555 */ 556 static int 557 mqfs_mount(struct mount *mp, struct thread *td) 558 { 559 struct statfs *sbp; 560 561 if (mp->mnt_flag & MNT_UPDATE) 562 return (EOPNOTSUPP); 563 564 mp->mnt_data = &mqfs_data; 565 MNT_ILOCK(mp); 566 mp->mnt_flag |= MNT_LOCAL; 567 mp->mnt_kern_flag |= MNTK_MPSAFE; 568 MNT_IUNLOCK(mp); 569 vfs_getnewfsid(mp); 570 571 sbp = &mp->mnt_stat; 572 vfs_mountedfrom(mp, "mqueue"); 573 sbp->f_bsize = PAGE_SIZE; 574 sbp->f_iosize = PAGE_SIZE; 575 sbp->f_blocks = 1; 576 sbp->f_bfree = 0; 577 sbp->f_bavail = 0; 578 sbp->f_files = 1; 579 sbp->f_ffree = 0; 580 return (0); 581 } 582 583 /* 584 * Unmount a mqfs instance 585 */ 586 static int 587 mqfs_unmount(struct mount *mp, int mntflags, struct thread *td) 588 { 589 int error; 590 591 error = vflush(mp, 0, (mntflags & MNT_FORCE) ? FORCECLOSE : 0, td); 592 return (error); 593 } 594 595 /* 596 * Return a root vnode 597 */ 598 static int 599 mqfs_root(struct mount *mp, int flags, struct vnode **vpp, struct thread *td) 600 { 601 struct mqfs_info *mqfs; 602 int ret; 603 604 mqfs = VFSTOMQFS(mp); 605 sx_xlock(&mqfs->mi_lock); 606 ret = mqfs_allocv(mp, vpp, mqfs->mi_root); 607 sx_xunlock(&mqfs->mi_lock); 608 return (ret); 609 } 610 611 /* 612 * Return filesystem stats 613 */ 614 static int 615 mqfs_statfs(struct mount *mp, struct statfs *sbp, struct thread *td) 616 { 617 /* XXX update statistics */ 618 return (0); 619 } 620 621 /* 622 * Initialize a mqfs instance 623 */ 624 static int 625 mqfs_init(struct vfsconf *vfc) 626 { 627 struct mqfs_node *root; 628 struct mqfs_info *mi; 629 630 mqnode_zone = uma_zcreate("mqnode", sizeof(struct mqfs_node), 631 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 632 mqueue_zone = uma_zcreate("mqueue", sizeof(struct mqueue), 633 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 634 mvdata_zone = uma_zcreate("mvdata", 635 sizeof(struct mqfs_vdata), NULL, NULL, NULL, 636 NULL, UMA_ALIGN_PTR, 0); 637 mqnoti_zone = uma_zcreate("mqnotifier", sizeof(struct mqueue_notifier), 638 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 639 mi = &mqfs_data; 640 sx_init(&mi->mi_lock, "mqfs lock"); 641 /* set up the root diretory */ 642 root = mqfs_create_node("/", 1, curthread->td_ucred, 01777, 643 mqfstype_root); 644 root->mn_info = mi; 645 LIST_INIT(&root->mn_children); 646 LIST_INIT(&root->mn_vnodes); 647 mi->mi_root = root; 648 mqfs_fileno_init(mi); 649 mqfs_fileno_alloc(mi, root); 650 mqfs_fixup_dir(root); 651 exit_tag = EVENTHANDLER_REGISTER(process_exit, mq_proc_exit, NULL, 652 EVENTHANDLER_PRI_ANY); 653 mq_fdclose = mqueue_fdclose; 654 p31b_setcfg(CTL_P1003_1B_MESSAGE_PASSING, _POSIX_MESSAGE_PASSING); 655 return (0); 656 } 657 658 /* 659 * Destroy a mqfs instance 660 */ 661 static int 662 mqfs_uninit(struct vfsconf *vfc) 663 { 664 struct mqfs_info *mi; 665 666 if (!unloadable) 667 return (EOPNOTSUPP); 668 EVENTHANDLER_DEREGISTER(process_exit, exit_tag); 669 mi = &mqfs_data; 670 mqfs_destroy(mi->mi_root); 671 mi->mi_root = NULL; 672 mqfs_fileno_uninit(mi); 673 sx_destroy(&mi->mi_lock); 674 uma_zdestroy(mqnode_zone); 675 uma_zdestroy(mqueue_zone); 676 uma_zdestroy(mvdata_zone); 677 uma_zdestroy(mqnoti_zone); 678 return (0); 679 } 680 681 /* 682 * task routine 683 */ 684 static void 685 do_recycle(void *context, int pending __unused) 686 { 687 struct vnode *vp = (struct vnode *)context; 688 689 vrecycle(vp, curthread); 690 vdrop(vp); 691 } 692 693 /* 694 * Allocate a vnode 695 */ 696 static int 697 mqfs_allocv(struct mount *mp, struct vnode **vpp, struct mqfs_node *pn) 698 { 699 struct mqfs_vdata *vd; 700 int error; 701 702 LIST_FOREACH(vd, &pn->mn_vnodes, mv_link) { 703 if (vd->mv_vnode->v_mount == mp) 704 break; 705 } 706 707 if (vd != NULL) { 708 if (vget(vd->mv_vnode, 0, curthread) == 0) { 709 *vpp = vd->mv_vnode; 710 vn_lock(*vpp, LK_RETRY | LK_EXCLUSIVE, 711 curthread); 712 return (0); 713 } 714 /* XXX if this can happen, we're in trouble */ 715 } 716 717 error = getnewvnode("mqueue", mp, &mqfs_vnodeops, vpp); 718 if (error) 719 return (error); 720 vd = uma_zalloc(mvdata_zone, M_WAITOK); 721 (*vpp)->v_data = vd; 722 vd->mv_vnode = *vpp; 723 vd->mv_node = pn; 724 TASK_INIT(&vd->mv_task, 0, do_recycle, *vpp); 725 LIST_INSERT_HEAD(&pn->mn_vnodes, vd, mv_link); 726 mqnode_addref(pn); 727 switch (pn->mn_type) { 728 case mqfstype_root: 729 (*vpp)->v_vflag = VV_ROOT; 730 /* fall through */ 731 case mqfstype_dir: 732 case mqfstype_this: 733 case mqfstype_parent: 734 (*vpp)->v_type = VDIR; 735 break; 736 case mqfstype_file: 737 (*vpp)->v_type = VREG; 738 break; 739 case mqfstype_symlink: 740 (*vpp)->v_type = VLNK; 741 break; 742 case mqfstype_none: 743 KASSERT(0, ("mqfs_allocf called for null node\n")); 744 default: 745 panic("%s has unexpected type: %d", pn->mn_name, pn->mn_type); 746 } 747 vn_lock(*vpp, LK_RETRY | LK_EXCLUSIVE, curthread); 748 return (0); 749 } 750 751 /* 752 * Search a directory entry 753 */ 754 static struct mqfs_node * 755 mqfs_search(struct mqfs_node *pd, const char *name, int len) 756 { 757 struct mqfs_node *pn; 758 759 LIST_FOREACH(pn, &pd->mn_children, mn_sibling) { 760 if (strncmp(pn->mn_name, name, len) == 0) 761 return (pn); 762 } 763 return (NULL); 764 } 765 766 /* 767 * Look up a file or directory 768 */ 769 static int 770 mqfs_lookupx(struct vop_cachedlookup_args *ap) 771 { 772 struct componentname *cnp; 773 struct vnode *dvp, **vpp; 774 struct mqfs_node *pd; 775 struct mqfs_node *pn; 776 int nameiop, flags, error, namelen; 777 char *pname; 778 struct thread *td; 779 780 cnp = ap->a_cnp; 781 vpp = ap->a_vpp; 782 dvp = ap->a_dvp; 783 pname = cnp->cn_nameptr; 784 namelen = cnp->cn_namelen; 785 td = cnp->cn_thread; 786 flags = cnp->cn_flags; 787 nameiop = cnp->cn_nameiop; 788 pd = VTON(dvp); 789 pn = NULL; 790 *vpp = NULLVP; 791 792 if (dvp->v_type != VDIR) 793 return (ENOTDIR); 794 795 error = VOP_ACCESS(dvp, VEXEC, cnp->cn_cred, cnp->cn_thread); 796 if (error) 797 return (error); 798 799 /* shortcut: check if the name is too long */ 800 if (cnp->cn_namelen >= MQFS_NAMELEN) 801 return (ENOENT); 802 803 /* self */ 804 if (namelen == 1 && pname[0] == '.') { 805 if ((flags & ISLASTCN) && nameiop != LOOKUP) 806 return (EINVAL); 807 pn = pd; 808 *vpp = dvp; 809 VREF(dvp); 810 return (0); 811 } 812 813 /* parent */ 814 if (cnp->cn_flags & ISDOTDOT) { 815 if (dvp->v_vflag & VV_ROOT) 816 return (EIO); 817 if ((flags & ISLASTCN) && nameiop != LOOKUP) 818 return (EINVAL); 819 VOP_UNLOCK(dvp, 0, cnp->cn_thread); 820 KASSERT(pd->mn_parent, ("non-root directory has no parent")); 821 pn = pd->mn_parent; 822 error = mqfs_allocv(dvp->v_mount, vpp, pn); 823 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY, td); 824 return (error); 825 } 826 827 /* named node */ 828 pn = mqfs_search(pd, pname, namelen); 829 830 /* found */ 831 if (pn != NULL) { 832 /* DELETE */ 833 if (nameiop == DELETE && (flags & ISLASTCN)) { 834 error = VOP_ACCESS(dvp, VWRITE, cnp->cn_cred, td); 835 if (error) 836 return (error); 837 if (*vpp == dvp) { 838 VREF(dvp); 839 *vpp = dvp; 840 return (0); 841 } 842 } 843 844 /* allocate vnode */ 845 error = mqfs_allocv(dvp->v_mount, vpp, pn); 846 if (error == 0 && cnp->cn_flags & MAKEENTRY) 847 cache_enter(dvp, *vpp, cnp); 848 return (error); 849 } 850 851 /* not found */ 852 853 /* will create a new entry in the directory ? */ 854 if ((nameiop == CREATE || nameiop == RENAME) && (flags & LOCKPARENT) 855 && (flags & ISLASTCN)) { 856 error = VOP_ACCESS(dvp, VWRITE, cnp->cn_cred, td); 857 if (error) 858 return (error); 859 cnp->cn_flags |= SAVENAME; 860 return (EJUSTRETURN); 861 } 862 return (ENOENT); 863 } 864 865 #if 0 866 struct vop_lookup_args { 867 struct vop_generic_args a_gen; 868 struct vnode *a_dvp; 869 struct vnode **a_vpp; 870 struct componentname *a_cnp; 871 }; 872 #endif 873 874 /* 875 * vnode lookup operation 876 */ 877 static int 878 mqfs_lookup(struct vop_cachedlookup_args *ap) 879 { 880 struct mqfs_info *mqfs = VFSTOMQFS(ap->a_dvp->v_mount); 881 int rc; 882 883 sx_xlock(&mqfs->mi_lock); 884 rc = mqfs_lookupx(ap); 885 sx_xunlock(&mqfs->mi_lock); 886 return (rc); 887 } 888 889 #if 0 890 struct vop_create_args { 891 struct vnode *a_dvp; 892 struct vnode **a_vpp; 893 struct componentname *a_cnp; 894 struct vattr *a_vap; 895 }; 896 #endif 897 898 /* 899 * vnode creation operation 900 */ 901 static int 902 mqfs_create(struct vop_create_args *ap) 903 { 904 struct mqfs_info *mqfs = VFSTOMQFS(ap->a_dvp->v_mount); 905 struct componentname *cnp = ap->a_cnp; 906 struct mqfs_node *pd; 907 struct mqfs_node *pn; 908 struct mqueue *mq; 909 int error; 910 911 pd = VTON(ap->a_dvp); 912 if (pd->mn_type != mqfstype_root && pd->mn_type != mqfstype_dir) 913 return (ENOTDIR); 914 mq = mqueue_alloc(NULL); 915 if (mq == NULL) 916 return (EAGAIN); 917 sx_xlock(&mqfs->mi_lock); 918 #if 0 919 /* named node */ 920 pn = mqfs_search(pd, cnp->cn_nameptr, cnp->cn_namelen); 921 if (pn != NULL) { 922 mqueue_free(mq); 923 sx_xunlock(&mqfs->mi_lock); 924 return (EEXIST); 925 } 926 #else 927 if ((cnp->cn_flags & HASBUF) == 0) 928 panic("%s: no name", __func__); 929 #endif 930 pn = mqfs_create_file(pd, cnp->cn_nameptr, cnp->cn_namelen, 931 cnp->cn_cred, ap->a_vap->va_mode); 932 if (pn == NULL) 933 error = ENOSPC; 934 else { 935 error = mqfs_allocv(ap->a_dvp->v_mount, ap->a_vpp, pn); 936 if (error) 937 mqfs_destroy(pn); 938 else 939 pn->mn_data = mq; 940 } 941 sx_xunlock(&mqfs->mi_lock); 942 if (error) 943 mqueue_free(mq); 944 return (error); 945 } 946 947 /* 948 * Remove an entry 949 */ 950 static 951 int do_unlink(struct mqfs_node *pn, struct ucred *ucred) 952 { 953 struct mqfs_node *parent; 954 struct mqfs_vdata *vd; 955 int error = 0; 956 957 sx_assert(&pn->mn_info->mi_lock, SX_LOCKED); 958 959 /* 960 * XXXRW: Other instances of the message queue primitive are 961 * allowed in jail? 962 */ 963 if (ucred->cr_uid != pn->mn_uid && 964 (error = priv_check_cred(ucred, PRIV_MQ_ADMIN, 0)) != 0) 965 error = EACCES; 966 else if (!pn->mn_deleted) { 967 parent = pn->mn_parent; 968 pn->mn_parent = NULL; 969 pn->mn_deleted = 1; 970 LIST_REMOVE(pn, mn_sibling); 971 LIST_FOREACH(vd, &pn->mn_vnodes, mv_link) { 972 cache_purge(vd->mv_vnode); 973 vhold(vd->mv_vnode); 974 taskqueue_enqueue(taskqueue_thread, &vd->mv_task); 975 } 976 mqnode_release(pn); 977 mqnode_release(parent); 978 } else 979 error = ENOENT; 980 return (error); 981 } 982 983 #if 0 984 struct vop_remove_args { 985 struct vnode *a_dvp; 986 struct vnode *a_vp; 987 struct componentname *a_cnp; 988 }; 989 #endif 990 991 /* 992 * vnode removal operation 993 */ 994 static int 995 mqfs_remove(struct vop_remove_args *ap) 996 { 997 struct mqfs_info *mqfs = VFSTOMQFS(ap->a_dvp->v_mount); 998 struct mqfs_node *pn; 999 int error; 1000 1001 if (ap->a_vp->v_type == VDIR) 1002 return (EPERM); 1003 pn = VTON(ap->a_vp); 1004 sx_xlock(&mqfs->mi_lock); 1005 error = do_unlink(pn, ap->a_cnp->cn_cred); 1006 sx_xunlock(&mqfs->mi_lock); 1007 return (error); 1008 } 1009 1010 #if 0 1011 struct vop_inactive_args { 1012 struct vnode *a_vp; 1013 struct thread *a_td; 1014 }; 1015 #endif 1016 1017 static int 1018 mqfs_inactive(struct vop_inactive_args *ap) 1019 { 1020 struct mqfs_node *pn = VTON(ap->a_vp); 1021 1022 if (pn->mn_deleted) 1023 vrecycle(ap->a_vp, ap->a_td); 1024 return (0); 1025 } 1026 1027 #if 0 1028 struct vop_reclaim_args { 1029 struct vop_generic_args a_gen; 1030 struct vnode *a_vp; 1031 struct thread *a_td; 1032 }; 1033 #endif 1034 1035 static int 1036 mqfs_reclaim(struct vop_reclaim_args *ap) 1037 { 1038 struct mqfs_info *mqfs = VFSTOMQFS(ap->a_vp->v_mount); 1039 struct vnode *vp = ap->a_vp; 1040 struct mqfs_node *pn; 1041 struct mqfs_vdata *vd; 1042 1043 vd = vp->v_data; 1044 pn = vd->mv_node; 1045 sx_xlock(&mqfs->mi_lock); 1046 vp->v_data = NULL; 1047 LIST_REMOVE(vd, mv_link); 1048 uma_zfree(mvdata_zone, vd); 1049 mqnode_release(pn); 1050 sx_xunlock(&mqfs->mi_lock); 1051 return (0); 1052 } 1053 1054 #if 0 1055 struct vop_open_args { 1056 struct vop_generic_args a_gen; 1057 struct vnode *a_vp; 1058 int a_mode; 1059 struct ucred *a_cred; 1060 struct thread *a_td; 1061 int a_fdidx; 1062 }; 1063 #endif 1064 1065 static int 1066 mqfs_open(struct vop_open_args *ap) 1067 { 1068 return (0); 1069 } 1070 1071 #if 0 1072 struct vop_close_args { 1073 struct vop_generic_args a_gen; 1074 struct vnode *a_vp; 1075 int a_fflag; 1076 struct ucred *a_cred; 1077 struct thread *a_td; 1078 }; 1079 #endif 1080 1081 static int 1082 mqfs_close(struct vop_close_args *ap) 1083 { 1084 return (0); 1085 } 1086 1087 #if 0 1088 struct vop_access_args { 1089 struct vop_generic_args a_gen; 1090 struct vnode *a_vp; 1091 int a_mode; 1092 struct ucred *a_cred; 1093 struct thread *a_td; 1094 }; 1095 #endif 1096 1097 /* 1098 * Verify permissions 1099 */ 1100 static int 1101 mqfs_access(struct vop_access_args *ap) 1102 { 1103 struct vnode *vp = ap->a_vp; 1104 struct vattr vattr; 1105 int error; 1106 1107 error = VOP_GETATTR(vp, &vattr, ap->a_cred, ap->a_td); 1108 if (error) 1109 return (error); 1110 error = vaccess(vp->v_type, vattr.va_mode, vattr.va_uid, 1111 vattr.va_gid, ap->a_mode, ap->a_cred, NULL); 1112 return (error); 1113 } 1114 1115 #if 0 1116 struct vop_getattr_args { 1117 struct vop_generic_args a_gen; 1118 struct vnode *a_vp; 1119 struct vattr *a_vap; 1120 struct ucred *a_cred; 1121 struct thread *a_td; 1122 }; 1123 #endif 1124 1125 /* 1126 * Get file attributes 1127 */ 1128 static int 1129 mqfs_getattr(struct vop_getattr_args *ap) 1130 { 1131 struct vnode *vp = ap->a_vp; 1132 struct mqfs_node *pn = VTON(vp); 1133 struct vattr *vap = ap->a_vap; 1134 int error = 0; 1135 1136 VATTR_NULL(vap); 1137 vap->va_type = vp->v_type; 1138 vap->va_mode = pn->mn_mode; 1139 vap->va_nlink = 1; 1140 vap->va_uid = pn->mn_uid; 1141 vap->va_gid = pn->mn_gid; 1142 vap->va_fsid = vp->v_mount->mnt_stat.f_fsid.val[0]; 1143 vap->va_fileid = pn->mn_fileno; 1144 vap->va_size = 0; 1145 vap->va_blocksize = PAGE_SIZE; 1146 vap->va_bytes = vap->va_size = 0; 1147 vap->va_atime = pn->mn_atime; 1148 vap->va_mtime = pn->mn_mtime; 1149 vap->va_ctime = pn->mn_ctime; 1150 vap->va_birthtime = pn->mn_birth; 1151 vap->va_gen = 0; 1152 vap->va_flags = 0; 1153 vap->va_rdev = 0; 1154 vap->va_bytes = 0; 1155 vap->va_filerev = 0; 1156 vap->va_vaflags = 0; 1157 return (error); 1158 } 1159 1160 #if 0 1161 struct vop_setattr_args { 1162 struct vop_generic_args a_gen; 1163 struct vnode *a_vp; 1164 struct vattr *a_vap; 1165 struct ucred *a_cred; 1166 struct thread *a_td; 1167 }; 1168 #endif 1169 /* 1170 * Set attributes 1171 */ 1172 static int 1173 mqfs_setattr(struct vop_setattr_args *ap) 1174 { 1175 struct mqfs_node *pn; 1176 struct vattr *vap; 1177 struct vnode *vp; 1178 int c, error; 1179 uid_t uid; 1180 gid_t gid; 1181 1182 vap = ap->a_vap; 1183 vp = ap->a_vp; 1184 if ((vap->va_type != VNON) || 1185 (vap->va_nlink != VNOVAL) || 1186 (vap->va_fsid != VNOVAL) || 1187 (vap->va_fileid != VNOVAL) || 1188 (vap->va_blocksize != VNOVAL) || 1189 (vap->va_flags != VNOVAL && vap->va_flags != 0) || 1190 (vap->va_rdev != VNOVAL) || 1191 ((int)vap->va_bytes != VNOVAL) || 1192 (vap->va_gen != VNOVAL)) { 1193 return (EINVAL); 1194 } 1195 1196 pn = VTON(vp); 1197 1198 error = c = 0; 1199 if (vap->va_uid == (uid_t)VNOVAL) 1200 uid = pn->mn_uid; 1201 else 1202 uid = vap->va_uid; 1203 if (vap->va_gid == (gid_t)VNOVAL) 1204 gid = pn->mn_gid; 1205 else 1206 gid = vap->va_gid; 1207 1208 if (uid != pn->mn_uid || gid != pn->mn_gid) { 1209 /* 1210 * To modify the ownership of a file, must possess VADMIN 1211 * for that file. 1212 */ 1213 if ((error = VOP_ACCESS(vp, VADMIN, ap->a_cred, ap->a_td))) 1214 return (error); 1215 1216 /* 1217 * XXXRW: Why is there a privilege check here: shouldn't the 1218 * check in VOP_ACCESS() be enough? Also, are the group bits 1219 * below definitely right? 1220 */ 1221 if (((ap->a_cred->cr_uid != pn->mn_uid) || uid != pn->mn_uid || 1222 (gid != pn->mn_gid && !groupmember(gid, ap->a_cred))) && 1223 (error = priv_check_cred(ap->a_td->td_ucred, 1224 PRIV_MQ_ADMIN, SUSER_ALLOWJAIL)) != 0) 1225 return (error); 1226 pn->mn_uid = uid; 1227 pn->mn_gid = gid; 1228 c = 1; 1229 } 1230 1231 if (vap->va_mode != (mode_t)VNOVAL) { 1232 if ((ap->a_cred->cr_uid != pn->mn_uid) && 1233 (error = priv_check_cred(ap->a_td->td_ucred, 1234 PRIV_MQ_ADMIN, SUSER_ALLOWJAIL))) 1235 return (error); 1236 pn->mn_mode = vap->va_mode; 1237 c = 1; 1238 } 1239 1240 if (vap->va_atime.tv_sec != VNOVAL || vap->va_mtime.tv_sec != VNOVAL) { 1241 /* See the comment in ufs_vnops::ufs_setattr(). */ 1242 if ((error = VOP_ACCESS(vp, VADMIN, ap->a_cred, ap->a_td)) && 1243 ((vap->va_vaflags & VA_UTIMES_NULL) == 0 || 1244 (error = VOP_ACCESS(vp, VWRITE, ap->a_cred, ap->a_td)))) 1245 return (error); 1246 if (vap->va_atime.tv_sec != VNOVAL) { 1247 pn->mn_atime = vap->va_atime; 1248 } 1249 if (vap->va_mtime.tv_sec != VNOVAL) { 1250 pn->mn_mtime = vap->va_mtime; 1251 } 1252 c = 1; 1253 } 1254 if (c) { 1255 vfs_timestamp(&pn->mn_ctime); 1256 } 1257 return (0); 1258 } 1259 1260 #if 0 1261 struct vop_read_args { 1262 struct vop_generic_args a_gen; 1263 struct vnode *a_vp; 1264 struct uio *a_uio; 1265 int a_ioflag; 1266 struct ucred *a_cred; 1267 }; 1268 #endif 1269 1270 /* 1271 * Read from a file 1272 */ 1273 static int 1274 mqfs_read(struct vop_read_args *ap) 1275 { 1276 char buf[80]; 1277 struct vnode *vp = ap->a_vp; 1278 struct uio *uio = ap->a_uio; 1279 struct mqfs_node *pn; 1280 struct mqueue *mq; 1281 int len, error; 1282 1283 if (vp->v_type != VREG) 1284 return (EINVAL); 1285 1286 pn = VTON(vp); 1287 mq = VTOMQ(vp); 1288 snprintf(buf, sizeof(buf), 1289 "QSIZE:%-10ld MAXMSG:%-10ld CURMSG:%-10ld MSGSIZE:%-10ld\n", 1290 mq->mq_totalbytes, 1291 mq->mq_maxmsg, 1292 mq->mq_curmsgs, 1293 mq->mq_msgsize); 1294 buf[sizeof(buf)-1] = '\0'; 1295 len = strlen(buf); 1296 error = uiomove_frombuf(buf, len, uio); 1297 return (error); 1298 } 1299 1300 #if 0 1301 struct vop_readdir_args { 1302 struct vop_generic_args a_gen; 1303 struct vnode *a_vp; 1304 struct uio *a_uio; 1305 struct ucred *a_cred; 1306 int *a_eofflag; 1307 int *a_ncookies; 1308 u_long **a_cookies; 1309 }; 1310 #endif 1311 1312 /* 1313 * Return directory entries. 1314 */ 1315 static int 1316 mqfs_readdir(struct vop_readdir_args *ap) 1317 { 1318 struct vnode *vp; 1319 struct mqfs_info *mi; 1320 struct mqfs_node *pd; 1321 struct mqfs_node *pn; 1322 struct dirent entry; 1323 struct uio *uio; 1324 int *tmp_ncookies = NULL; 1325 off_t offset; 1326 int error, i; 1327 1328 vp = ap->a_vp; 1329 mi = VFSTOMQFS(vp->v_mount); 1330 pd = VTON(vp); 1331 uio = ap->a_uio; 1332 1333 if (vp->v_type != VDIR) 1334 return (ENOTDIR); 1335 1336 if (uio->uio_offset < 0) 1337 return (EINVAL); 1338 1339 if (ap->a_ncookies != NULL) { 1340 tmp_ncookies = ap->a_ncookies; 1341 *ap->a_ncookies = 0; 1342 ap->a_ncookies = NULL; 1343 } 1344 1345 error = 0; 1346 offset = 0; 1347 1348 sx_xlock(&mi->mi_lock); 1349 1350 LIST_FOREACH(pn, &pd->mn_children, mn_sibling) { 1351 entry.d_reclen = sizeof(entry); 1352 if (!pn->mn_fileno) 1353 mqfs_fileno_alloc(mi, pn); 1354 entry.d_fileno = pn->mn_fileno; 1355 for (i = 0; i < MQFS_NAMELEN - 1 && pn->mn_name[i] != '\0'; ++i) 1356 entry.d_name[i] = pn->mn_name[i]; 1357 entry.d_name[i] = 0; 1358 entry.d_namlen = i; 1359 switch (pn->mn_type) { 1360 case mqfstype_root: 1361 case mqfstype_dir: 1362 case mqfstype_this: 1363 case mqfstype_parent: 1364 entry.d_type = DT_DIR; 1365 break; 1366 case mqfstype_file: 1367 entry.d_type = DT_REG; 1368 break; 1369 case mqfstype_symlink: 1370 entry.d_type = DT_LNK; 1371 break; 1372 default: 1373 panic("%s has unexpected node type: %d", pn->mn_name, 1374 pn->mn_type); 1375 } 1376 if (entry.d_reclen > uio->uio_resid) 1377 break; 1378 if (offset >= uio->uio_offset) { 1379 error = vfs_read_dirent(ap, &entry, offset); 1380 if (error) 1381 break; 1382 } 1383 offset += entry.d_reclen; 1384 } 1385 sx_xunlock(&mi->mi_lock); 1386 1387 uio->uio_offset = offset; 1388 1389 if (tmp_ncookies != NULL) 1390 ap->a_ncookies = tmp_ncookies; 1391 1392 return (error); 1393 } 1394 1395 #ifdef notyet 1396 1397 #if 0 1398 struct vop_mkdir_args { 1399 struct vnode *a_dvp; 1400 struvt vnode **a_vpp; 1401 struvt componentname *a_cnp; 1402 struct vattr *a_vap; 1403 }; 1404 #endif 1405 1406 /* 1407 * Create a directory. 1408 */ 1409 static int 1410 mqfs_mkdir(struct vop_mkdir_args *ap) 1411 { 1412 struct mqfs_info *mqfs = VFSTOMQFS(ap->a_dvp->v_mount); 1413 struct componentname *cnp = ap->a_cnp; 1414 struct mqfs_node *pd = VTON(ap->a_dvp); 1415 struct mqfs_node *pn; 1416 int error; 1417 1418 if (pd->mn_type != mqfstype_root && pd->mn_type != mqfstype_dir) 1419 return (ENOTDIR); 1420 sx_xlock(&mqfs->mi_lock); 1421 #if 0 1422 /* named node */ 1423 pn = mqfs_search(pd, cnp->cn_nameptr, cnp->cn_namelen); 1424 if (pn != NULL) { 1425 sx_xunlock(&mqfs->mi_lock); 1426 return (EEXIST); 1427 } 1428 #else 1429 if ((cnp->cn_flags & HASBUF) == 0) 1430 panic("%s: no name", __func__); 1431 #endif 1432 pn = mqfs_create_dir(pd, cnp->cn_nameptr, cnp->cn_namelen, 1433 ap->a_vap->cn_cred, ap->a_vap->va_mode); 1434 if (pn == NULL) 1435 error = ENOSPC; 1436 else 1437 error = mqfs_allocv(ap->a_dvp->v_mount, ap->a_vpp, pn); 1438 sx_xunlock(&mqfs->mi_lock); 1439 return (error); 1440 } 1441 1442 #if 0 1443 struct vop_rmdir_args { 1444 struct vnode *a_dvp; 1445 struct vnode *a_vp; 1446 struct componentname *a_cnp; 1447 }; 1448 #endif 1449 1450 /* 1451 * Remove a directory. 1452 */ 1453 static int 1454 mqfs_rmdir(struct vop_rmdir_args *ap) 1455 { 1456 struct mqfs_info *mqfs = VFSTOMQFS(ap->a_dvp->v_mount); 1457 struct mqfs_node *pn = VTON(ap->a_vp); 1458 struct mqfs_node *pt; 1459 1460 if (pn->mn_type != mqfstype_dir) 1461 return (ENOTDIR); 1462 1463 sx_xlock(&mqfs->mi_lock); 1464 if (pn->mn_deleted) { 1465 sx_xunlock(&mqfs->mi_lock); 1466 return (ENOENT); 1467 } 1468 1469 pt = LIST_FIRST(&pn->mn_children); 1470 pt = LIST_NEXT(pt, mn_sibling); 1471 pt = LIST_NEXT(pt, mn_sibling); 1472 if (pt != NULL) { 1473 sx_xunlock(&mqfs->mi_lock); 1474 return (ENOTEMPTY); 1475 } 1476 pt = pn->mn_parent; 1477 pn->mn_parent = NULL; 1478 pn->mn_deleted = 1; 1479 LIST_REMOVE(pn, mn_sibling); 1480 mqnode_release(pn); 1481 mqnode_release(pt); 1482 sx_xunlock(&mqfs->mi_lock); 1483 cache_purge(ap->a_vp); 1484 return (0); 1485 } 1486 1487 #endif /* notyet */ 1488 1489 /* 1490 * Allocate a message queue 1491 */ 1492 static struct mqueue * 1493 mqueue_alloc(const struct mq_attr *attr) 1494 { 1495 struct mqueue *mq; 1496 1497 if (curmq >= maxmq) 1498 return (NULL); 1499 mq = uma_zalloc(mqueue_zone, M_WAITOK | M_ZERO); 1500 TAILQ_INIT(&mq->mq_msgq); 1501 if (attr != NULL) { 1502 mq->mq_maxmsg = attr->mq_maxmsg; 1503 mq->mq_msgsize = attr->mq_msgsize; 1504 } else { 1505 mq->mq_maxmsg = default_maxmsg; 1506 mq->mq_msgsize = default_msgsize; 1507 } 1508 mtx_init(&mq->mq_mutex, "mqueue", NULL, MTX_DEF); 1509 knlist_init(&mq->mq_rsel.si_note, &mq->mq_mutex, NULL, NULL, NULL); 1510 knlist_init(&mq->mq_wsel.si_note, &mq->mq_mutex, NULL, NULL, NULL); 1511 atomic_add_int(&curmq, 1); 1512 return (mq); 1513 } 1514 1515 /* 1516 * Destroy a message queue 1517 */ 1518 static void 1519 mqueue_free(struct mqueue *mq) 1520 { 1521 struct mqueue_msg *msg; 1522 1523 while ((msg = TAILQ_FIRST(&mq->mq_msgq)) != NULL) { 1524 TAILQ_REMOVE(&mq->mq_msgq, msg, msg_link); 1525 FREE(msg, M_MQUEUEDATA); 1526 } 1527 1528 mtx_destroy(&mq->mq_mutex); 1529 knlist_destroy(&mq->mq_rsel.si_note); 1530 knlist_destroy(&mq->mq_wsel.si_note); 1531 uma_zfree(mqueue_zone, mq); 1532 atomic_add_int(&curmq, -1); 1533 } 1534 1535 /* 1536 * Load a message from user space 1537 */ 1538 static struct mqueue_msg * 1539 mqueue_loadmsg(const char *msg_ptr, size_t msg_size, int msg_prio) 1540 { 1541 struct mqueue_msg *msg; 1542 size_t len; 1543 int error; 1544 1545 len = sizeof(struct mqueue_msg) + msg_size; 1546 MALLOC(msg, struct mqueue_msg *, len, M_MQUEUEDATA, M_WAITOK); 1547 error = copyin(msg_ptr, ((char *)msg) + sizeof(struct mqueue_msg), 1548 msg_size); 1549 if (error) { 1550 FREE(msg, M_MQUEUEDATA); 1551 msg = NULL; 1552 } else { 1553 msg->msg_size = msg_size; 1554 msg->msg_prio = msg_prio; 1555 } 1556 return (msg); 1557 } 1558 1559 /* 1560 * Save a message to user space 1561 */ 1562 static int 1563 mqueue_savemsg(struct mqueue_msg *msg, char *msg_ptr, int *msg_prio) 1564 { 1565 int error; 1566 1567 error = copyout(((char *)msg) + sizeof(*msg), msg_ptr, 1568 msg->msg_size); 1569 if (error == 0 && msg_prio != NULL) 1570 error = copyout(&msg->msg_prio, msg_prio, sizeof(int)); 1571 return (error); 1572 } 1573 1574 /* 1575 * Free a message's memory 1576 */ 1577 static __inline void 1578 mqueue_freemsg(struct mqueue_msg *msg) 1579 { 1580 FREE(msg, M_MQUEUEDATA); 1581 } 1582 1583 /* 1584 * Send a message. if waitok is false, thread will not be 1585 * blocked if there is no data in queue, otherwise, absolute 1586 * time will be checked. 1587 */ 1588 int 1589 mqueue_send(struct mqueue *mq, const char *msg_ptr, 1590 size_t msg_len, unsigned msg_prio, int waitok, 1591 const struct timespec *abs_timeout) 1592 { 1593 struct mqueue_msg *msg; 1594 struct timespec ets, ts, ts2; 1595 struct timeval tv; 1596 int error; 1597 1598 if (msg_prio >= MQ_PRIO_MAX) 1599 return (EINVAL); 1600 if (msg_len > mq->mq_msgsize) 1601 return (EMSGSIZE); 1602 msg = mqueue_loadmsg(msg_ptr, msg_len, msg_prio); 1603 if (msg == NULL) 1604 return (EFAULT); 1605 1606 /* O_NONBLOCK case */ 1607 if (!waitok) { 1608 error = _mqueue_send(mq, msg, -1); 1609 if (error) 1610 goto bad; 1611 return (0); 1612 } 1613 1614 /* we allow a null timeout (wait forever) */ 1615 if (abs_timeout == NULL) { 1616 error = _mqueue_send(mq, msg, 0); 1617 if (error) 1618 goto bad; 1619 return (0); 1620 } 1621 1622 /* send it before checking time */ 1623 error = _mqueue_send(mq, msg, -1); 1624 if (error == 0) 1625 return (0); 1626 1627 if (error != EAGAIN) 1628 goto bad; 1629 1630 error = copyin(abs_timeout, &ets, sizeof(ets)); 1631 if (error != 0) 1632 goto bad; 1633 if (ets.tv_nsec >= 1000000000 || ets.tv_nsec < 0) { 1634 error = EINVAL; 1635 goto bad; 1636 } 1637 for (;;) { 1638 ts2 = ets; 1639 getnanotime(&ts); 1640 timespecsub(&ts2, &ts); 1641 if (ts2.tv_sec < 0 || (ts2.tv_sec == 0 && ts2.tv_nsec <= 0)) { 1642 error = ETIMEDOUT; 1643 break; 1644 } 1645 TIMESPEC_TO_TIMEVAL(&tv, &ts2); 1646 error = _mqueue_send(mq, msg, tvtohz(&tv)); 1647 if (error != ETIMEDOUT) 1648 break; 1649 } 1650 if (error == 0) 1651 return (0); 1652 bad: 1653 mqueue_freemsg(msg); 1654 return (error); 1655 } 1656 1657 /* 1658 * Common routine to send a message 1659 */ 1660 static int 1661 _mqueue_send(struct mqueue *mq, struct mqueue_msg *msg, int timo) 1662 { 1663 struct mqueue_msg *msg2; 1664 int error = 0; 1665 1666 mtx_lock(&mq->mq_mutex); 1667 while (mq->mq_curmsgs >= mq->mq_maxmsg && error == 0) { 1668 if (timo < 0) { 1669 mtx_unlock(&mq->mq_mutex); 1670 return (EAGAIN); 1671 } 1672 mq->mq_senders++; 1673 error = msleep(&mq->mq_senders, &mq->mq_mutex, 1674 PCATCH, "mqsend", timo); 1675 mq->mq_senders--; 1676 if (error == EAGAIN) 1677 error = ETIMEDOUT; 1678 } 1679 if (mq->mq_curmsgs >= mq->mq_maxmsg) { 1680 mtx_unlock(&mq->mq_mutex); 1681 return (error); 1682 } 1683 error = 0; 1684 if (TAILQ_EMPTY(&mq->mq_msgq)) { 1685 TAILQ_INSERT_HEAD(&mq->mq_msgq, msg, msg_link); 1686 } else { 1687 if (msg->msg_prio <= TAILQ_LAST(&mq->mq_msgq, msgq)->msg_prio) { 1688 TAILQ_INSERT_TAIL(&mq->mq_msgq, msg, msg_link); 1689 } else { 1690 TAILQ_FOREACH(msg2, &mq->mq_msgq, msg_link) { 1691 if (msg2->msg_prio < msg->msg_prio) 1692 break; 1693 } 1694 TAILQ_INSERT_BEFORE(msg2, msg, msg_link); 1695 } 1696 } 1697 mq->mq_curmsgs++; 1698 mq->mq_totalbytes += msg->msg_size; 1699 if (mq->mq_receivers) 1700 wakeup_one(&mq->mq_receivers); 1701 else if (mq->mq_notifier != NULL) 1702 mqueue_send_notification(mq); 1703 if (mq->mq_flags & MQ_RSEL) { 1704 mq->mq_flags &= ~MQ_RSEL; 1705 selwakeup(&mq->mq_rsel); 1706 } 1707 KNOTE_LOCKED(&mq->mq_rsel.si_note, 0); 1708 mtx_unlock(&mq->mq_mutex); 1709 return (0); 1710 } 1711 1712 /* 1713 * Send realtime a signal to process which registered itself 1714 * successfully by mq_notify. 1715 */ 1716 static void 1717 mqueue_send_notification(struct mqueue *mq) 1718 { 1719 struct mqueue_notifier *nt; 1720 struct proc *p; 1721 1722 mtx_assert(&mq->mq_mutex, MA_OWNED); 1723 nt = mq->mq_notifier; 1724 if (nt->nt_sigev.sigev_notify != SIGEV_NONE) { 1725 p = nt->nt_proc; 1726 PROC_LOCK(p); 1727 if (!KSI_ONQ(&nt->nt_ksi)) 1728 psignal_event(p, &nt->nt_sigev, &nt->nt_ksi); 1729 PROC_UNLOCK(p); 1730 } 1731 mq->mq_notifier = NULL; 1732 } 1733 1734 /* 1735 * Get a message. if waitok is false, thread will not be 1736 * blocked if there is no data in queue, otherwise, absolute 1737 * time will be checked. 1738 */ 1739 int 1740 mqueue_receive(struct mqueue *mq, char *msg_ptr, 1741 size_t msg_len, unsigned *msg_prio, int waitok, 1742 const struct timespec *abs_timeout) 1743 { 1744 struct mqueue_msg *msg; 1745 struct timespec ets, ts, ts2; 1746 struct timeval tv; 1747 int error; 1748 1749 if (msg_len < mq->mq_msgsize) 1750 return (EMSGSIZE); 1751 1752 /* O_NONBLOCK case */ 1753 if (!waitok) { 1754 error = _mqueue_recv(mq, &msg, -1); 1755 if (error) 1756 return (error); 1757 goto received; 1758 } 1759 1760 /* we allow a null timeout (wait forever). */ 1761 if (abs_timeout == NULL) { 1762 error = _mqueue_recv(mq, &msg, 0); 1763 if (error) 1764 return (error); 1765 goto received; 1766 } 1767 1768 /* try to get a message before checking time */ 1769 error = _mqueue_recv(mq, &msg, -1); 1770 if (error == 0) 1771 goto received; 1772 1773 if (error != EAGAIN) 1774 return (error); 1775 1776 error = copyin(abs_timeout, &ets, sizeof(ets)); 1777 if (error != 0) 1778 return (error); 1779 if (ets.tv_nsec >= 1000000000 || ets.tv_nsec < 0) { 1780 error = EINVAL; 1781 return (error); 1782 } 1783 1784 for (;;) { 1785 ts2 = ets; 1786 getnanotime(&ts); 1787 timespecsub(&ts2, &ts); 1788 if (ts2.tv_sec < 0 || (ts2.tv_sec == 0 && ts2.tv_nsec <= 0)) { 1789 error = ETIMEDOUT; 1790 return (error); 1791 } 1792 TIMESPEC_TO_TIMEVAL(&tv, &ts2); 1793 error = _mqueue_recv(mq, &msg, tvtohz(&tv)); 1794 if (error == 0) 1795 break; 1796 if (error != ETIMEDOUT) 1797 return (error); 1798 } 1799 1800 received: 1801 error = mqueue_savemsg(msg, msg_ptr, msg_prio); 1802 if (error == 0) { 1803 curthread->td_retval[0] = msg->msg_size; 1804 curthread->td_retval[1] = 0; 1805 } 1806 mqueue_freemsg(msg); 1807 return (error); 1808 } 1809 1810 /* 1811 * Common routine to receive a message 1812 */ 1813 static int 1814 _mqueue_recv(struct mqueue *mq, struct mqueue_msg **msg, int timo) 1815 { 1816 int error = 0; 1817 1818 mtx_lock(&mq->mq_mutex); 1819 while ((*msg = TAILQ_FIRST(&mq->mq_msgq)) == NULL && error == 0) { 1820 if (timo < 0) { 1821 mtx_unlock(&mq->mq_mutex); 1822 return (EAGAIN); 1823 } 1824 mq->mq_receivers++; 1825 error = msleep(&mq->mq_receivers, &mq->mq_mutex, 1826 PCATCH, "mqrecv", timo); 1827 mq->mq_receivers--; 1828 if (error == EAGAIN) 1829 error = ETIMEDOUT; 1830 } 1831 if (*msg != NULL) { 1832 error = 0; 1833 TAILQ_REMOVE(&mq->mq_msgq, *msg, msg_link); 1834 mq->mq_curmsgs--; 1835 mq->mq_totalbytes -= (*msg)->msg_size; 1836 if (mq->mq_senders) 1837 wakeup_one(&mq->mq_senders); 1838 if (mq->mq_flags & MQ_WSEL) { 1839 mq->mq_flags &= ~MQ_WSEL; 1840 selwakeup(&mq->mq_wsel); 1841 } 1842 KNOTE_LOCKED(&mq->mq_wsel.si_note, 0); 1843 } 1844 if (mq->mq_notifier != NULL && mq->mq_receivers == 0 && 1845 !TAILQ_EMPTY(&mq->mq_msgq)) { 1846 mqueue_send_notification(mq); 1847 } 1848 mtx_unlock(&mq->mq_mutex); 1849 return (error); 1850 } 1851 1852 static __inline struct mqueue_notifier * 1853 notifier_alloc(void) 1854 { 1855 return (uma_zalloc(mqnoti_zone, M_WAITOK | M_ZERO)); 1856 } 1857 1858 static __inline void 1859 notifier_free(struct mqueue_notifier *p) 1860 { 1861 uma_zfree(mqnoti_zone, p); 1862 } 1863 1864 static struct mqueue_notifier * 1865 notifier_search(struct proc *p, int fd) 1866 { 1867 struct mqueue_notifier *nt; 1868 1869 LIST_FOREACH(nt, &p->p_mqnotifier, nt_link) { 1870 if (nt->nt_ksi.ksi_mqd == fd) 1871 break; 1872 } 1873 return (nt); 1874 } 1875 1876 static __inline void 1877 notifier_insert(struct proc *p, struct mqueue_notifier *nt) 1878 { 1879 LIST_INSERT_HEAD(&p->p_mqnotifier, nt, nt_link); 1880 } 1881 1882 static __inline void 1883 notifier_delete(struct proc *p, struct mqueue_notifier *nt) 1884 { 1885 LIST_REMOVE(nt, nt_link); 1886 notifier_free(nt); 1887 } 1888 1889 static void 1890 notifier_remove(struct proc *p, struct mqueue *mq, int fd) 1891 { 1892 struct mqueue_notifier *nt; 1893 1894 mtx_assert(&mq->mq_mutex, MA_OWNED); 1895 PROC_LOCK(p); 1896 nt = notifier_search(p, fd); 1897 if (nt != NULL) { 1898 if (mq->mq_notifier == nt) 1899 mq->mq_notifier = NULL; 1900 sigqueue_take(&nt->nt_ksi); 1901 notifier_delete(p, nt); 1902 } 1903 PROC_UNLOCK(p); 1904 } 1905 1906 /* 1907 * Syscall to open a message queue 1908 */ 1909 int 1910 kmq_open(struct thread *td, struct kmq_open_args *uap) 1911 { 1912 char path[MQFS_NAMELEN + 1]; 1913 struct mq_attr attr, *pattr; 1914 struct mqfs_node *pn; 1915 struct filedesc *fdp; 1916 struct file *fp; 1917 struct mqueue *mq; 1918 int fd, error, len, flags, cmode; 1919 1920 if ((uap->flags & O_ACCMODE) == O_ACCMODE) 1921 return (EINVAL); 1922 1923 fdp = td->td_proc->p_fd; 1924 flags = FFLAGS(uap->flags); 1925 cmode = (((uap->mode & ~fdp->fd_cmask) & ALLPERMS) & ~S_ISTXT); 1926 mq = NULL; 1927 if ((flags & O_CREAT) && (uap->attr != NULL)) { 1928 error = copyin(uap->attr, &attr, sizeof(attr)); 1929 if (error) 1930 return (error); 1931 if (attr.mq_maxmsg <= 0 || attr.mq_maxmsg > maxmsg) 1932 return (EINVAL); 1933 if (attr.mq_msgsize <= 0 || attr.mq_msgsize > maxmsgsize) 1934 return (EINVAL); 1935 pattr = &attr; 1936 } else 1937 pattr = NULL; 1938 1939 error = copyinstr(uap->path, path, MQFS_NAMELEN + 1, NULL); 1940 if (error) 1941 return (error); 1942 1943 /* 1944 * The first character of name must be a slash (/) character 1945 * and the remaining characters of name cannot include any slash 1946 * characters. 1947 */ 1948 len = strlen(path); 1949 if (len < 2 || path[0] != '/' || index(path + 1, '/') != NULL) 1950 return (EINVAL); 1951 1952 error = falloc(td, &fp, &fd); 1953 if (error) 1954 return (error); 1955 1956 sx_xlock(&mqfs_data.mi_lock); 1957 pn = mqfs_search(mqfs_data.mi_root, path + 1, len - 1); 1958 if (pn == NULL) { 1959 if (!(flags & O_CREAT)) { 1960 error = ENOENT; 1961 } else { 1962 mq = mqueue_alloc(pattr); 1963 if (mq == NULL) { 1964 error = ENFILE; 1965 } else { 1966 pn = mqfs_create_file(mqfs_data.mi_root, 1967 path + 1, len - 1, td->td_ucred, 1968 cmode); 1969 if (pn == NULL) { 1970 error = ENOSPC; 1971 mqueue_free(mq); 1972 } 1973 } 1974 } 1975 1976 if (error == 0) { 1977 pn->mn_data = mq; 1978 } 1979 } else { 1980 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL)) { 1981 error = EEXIST; 1982 } else { 1983 int acc_mode = 0; 1984 1985 if (flags & FREAD) 1986 acc_mode |= VREAD; 1987 if (flags & FWRITE) 1988 acc_mode |= VWRITE; 1989 error = vaccess(VREG, pn->mn_mode, pn->mn_uid, 1990 pn->mn_gid, acc_mode, td->td_ucred, NULL); 1991 } 1992 } 1993 1994 if (error) { 1995 sx_xunlock(&mqfs_data.mi_lock); 1996 fdclose(fdp, fp, fd, td); 1997 fdrop(fp, td); 1998 return (error); 1999 } 2000 2001 mqnode_addref(pn); 2002 sx_xunlock(&mqfs_data.mi_lock); 2003 2004 FILE_LOCK(fp); 2005 fp->f_flag = (flags & (FREAD | FWRITE | O_NONBLOCK)); 2006 fp->f_type = DTYPE_MQUEUE; 2007 fp->f_ops = &mqueueops; 2008 fp->f_data = pn; 2009 FILE_UNLOCK(fp); 2010 2011 FILEDESC_LOCK_FAST(fdp); 2012 if (fdp->fd_ofiles[fd] == fp) 2013 fdp->fd_ofileflags[fd] |= UF_EXCLOSE; 2014 FILEDESC_UNLOCK_FAST(fdp); 2015 td->td_retval[0] = fd; 2016 fdrop(fp, td); 2017 return (0); 2018 } 2019 2020 /* 2021 * Syscall to unlink a message queue 2022 */ 2023 int 2024 kmq_unlink(struct thread *td, struct kmq_unlink_args *uap) 2025 { 2026 char path[MQFS_NAMELEN+1]; 2027 struct mqfs_node *pn; 2028 int error, len; 2029 2030 error = copyinstr(uap->path, path, MQFS_NAMELEN + 1, NULL); 2031 if (error) 2032 return (error); 2033 2034 len = strlen(path); 2035 if (len < 2 || path[0] != '/' || index(path + 1, '/') != NULL) 2036 return (EINVAL); 2037 2038 sx_xlock(&mqfs_data.mi_lock); 2039 pn = mqfs_search(mqfs_data.mi_root, path + 1, len - 1); 2040 if (pn != NULL) 2041 error = do_unlink(pn, td->td_ucred); 2042 else 2043 error = ENOENT; 2044 sx_xunlock(&mqfs_data.mi_lock); 2045 return (error); 2046 } 2047 2048 typedef int (*_fgetf)(struct thread *, int, struct file **); 2049 2050 /* 2051 * Get message queue by giving file slot 2052 */ 2053 static int 2054 _getmq(struct thread *td, int fd, _fgetf func, 2055 struct file **fpp, struct mqfs_node **ppn, struct mqueue **pmq) 2056 { 2057 struct mqfs_node *pn; 2058 int error; 2059 2060 error = func(td, fd, fpp); 2061 if (error) 2062 return (error); 2063 if (&mqueueops != (*fpp)->f_ops) { 2064 fdrop(*fpp, td); 2065 return (EBADF); 2066 } 2067 pn = (*fpp)->f_data; 2068 if (ppn) 2069 *ppn = pn; 2070 if (pmq) 2071 *pmq = pn->mn_data; 2072 return (0); 2073 } 2074 2075 static __inline int 2076 getmq(struct thread *td, int fd, struct file **fpp, struct mqfs_node **ppn, 2077 struct mqueue **pmq) 2078 { 2079 return _getmq(td, fd, fget, fpp, ppn, pmq); 2080 } 2081 2082 static __inline int 2083 getmq_read(struct thread *td, int fd, struct file **fpp, 2084 struct mqfs_node **ppn, struct mqueue **pmq) 2085 { 2086 return _getmq(td, fd, fget_read, fpp, ppn, pmq); 2087 } 2088 2089 static __inline int 2090 getmq_write(struct thread *td, int fd, struct file **fpp, 2091 struct mqfs_node **ppn, struct mqueue **pmq) 2092 { 2093 return _getmq(td, fd, fget_write, fpp, ppn, pmq); 2094 } 2095 2096 /* 2097 * Syscall 2098 */ 2099 int 2100 kmq_setattr(struct thread *td, struct kmq_setattr_args *uap) 2101 { 2102 struct mqueue *mq; 2103 struct file *fp; 2104 struct mq_attr attr, oattr; 2105 int error; 2106 2107 if (uap->attr) { 2108 error = copyin(uap->attr, &attr, sizeof(attr)); 2109 if (error) 2110 return (error); 2111 if (attr.mq_flags & ~O_NONBLOCK) 2112 return (EINVAL); 2113 } 2114 error = getmq(td, uap->mqd, &fp, NULL, &mq); 2115 if (error) 2116 return (error); 2117 oattr.mq_maxmsg = mq->mq_maxmsg; 2118 oattr.mq_msgsize = mq->mq_msgsize; 2119 oattr.mq_curmsgs = mq->mq_curmsgs; 2120 FILE_LOCK(fp); 2121 oattr.mq_flags = (O_NONBLOCK & fp->f_flag); 2122 if (uap->attr) { 2123 fp->f_flag &= ~O_NONBLOCK; 2124 fp->f_flag |= (attr.mq_flags & O_NONBLOCK); 2125 } 2126 FILE_UNLOCK(fp); 2127 fdrop(fp, td); 2128 if (uap->oattr) 2129 error = copyout(&oattr, uap->oattr, sizeof(oattr)); 2130 return (error); 2131 } 2132 2133 /* 2134 * Syscall 2135 */ 2136 int 2137 kmq_timedreceive(struct thread *td, struct kmq_timedreceive_args *uap) 2138 { 2139 struct mqueue *mq; 2140 struct file *fp; 2141 int error; 2142 int waitok; 2143 2144 error = getmq_read(td, uap->mqd, &fp, NULL, &mq); 2145 if (error) 2146 return (error); 2147 waitok = !(fp->f_flag & O_NONBLOCK); 2148 error = mqueue_receive(mq, uap->msg_ptr, uap->msg_len, 2149 uap->msg_prio, waitok, uap->abs_timeout); 2150 fdrop(fp, td); 2151 return (error); 2152 } 2153 2154 /* 2155 * Syscall 2156 */ 2157 int 2158 kmq_timedsend(struct thread *td, struct kmq_timedsend_args *uap) 2159 { 2160 struct mqueue *mq; 2161 struct file *fp; 2162 int error, waitok; 2163 2164 error = getmq_write(td, uap->mqd, &fp, NULL, &mq); 2165 if (error) 2166 return (error); 2167 waitok = !(fp->f_flag & O_NONBLOCK); 2168 error = mqueue_send(mq, uap->msg_ptr, uap->msg_len, 2169 uap->msg_prio, waitok, uap->abs_timeout); 2170 fdrop(fp, td); 2171 return (error); 2172 } 2173 2174 /* 2175 * Syscall 2176 */ 2177 int 2178 kmq_notify(struct thread *td, struct kmq_notify_args *uap) 2179 { 2180 struct sigevent ev; 2181 struct filedesc *fdp; 2182 struct proc *p; 2183 struct mqueue *mq; 2184 struct file *fp; 2185 struct mqueue_notifier *nt, *newnt = NULL; 2186 int error; 2187 2188 p = td->td_proc; 2189 fdp = td->td_proc->p_fd; 2190 if (uap->sigev) { 2191 error = copyin(uap->sigev, &ev, sizeof(ev)); 2192 if (error) 2193 return (error); 2194 if (ev.sigev_notify != SIGEV_SIGNAL && 2195 ev.sigev_notify != SIGEV_THREAD_ID && 2196 ev.sigev_notify != SIGEV_NONE) 2197 return (EINVAL); 2198 if ((ev.sigev_notify == SIGEV_SIGNAL || 2199 ev.sigev_notify == SIGEV_THREAD_ID) && 2200 !_SIG_VALID(ev.sigev_signo)) 2201 return (EINVAL); 2202 } 2203 error = getmq(td, uap->mqd, &fp, NULL, &mq); 2204 if (error) 2205 return (error); 2206 again: 2207 FILEDESC_LOCK_FAST(fdp); 2208 if (fget_locked(fdp, uap->mqd) != fp) { 2209 FILEDESC_UNLOCK_FAST(fdp); 2210 error = EBADF; 2211 goto out; 2212 } 2213 mtx_lock(&mq->mq_mutex); 2214 FILEDESC_UNLOCK_FAST(fdp); 2215 if (uap->sigev != NULL) { 2216 if (mq->mq_notifier != NULL) { 2217 error = EBUSY; 2218 } else { 2219 PROC_LOCK(p); 2220 nt = notifier_search(p, uap->mqd); 2221 if (nt == NULL) { 2222 if (newnt == NULL) { 2223 PROC_UNLOCK(p); 2224 mtx_unlock(&mq->mq_mutex); 2225 newnt = notifier_alloc(); 2226 goto again; 2227 } 2228 } 2229 2230 if (nt != NULL) { 2231 sigqueue_take(&nt->nt_ksi); 2232 if (newnt != NULL) { 2233 notifier_free(newnt); 2234 newnt = NULL; 2235 } 2236 } else { 2237 nt = newnt; 2238 newnt = NULL; 2239 ksiginfo_init(&nt->nt_ksi); 2240 nt->nt_ksi.ksi_flags |= KSI_INS | KSI_EXT; 2241 nt->nt_ksi.ksi_code = SI_MESGQ; 2242 nt->nt_proc = p; 2243 nt->nt_ksi.ksi_mqd = uap->mqd; 2244 notifier_insert(p, nt); 2245 } 2246 nt->nt_sigev = ev; 2247 mq->mq_notifier = nt; 2248 PROC_UNLOCK(p); 2249 /* 2250 * if there is no receivers and message queue 2251 * is not empty, we should send notification 2252 * as soon as possible. 2253 */ 2254 if (mq->mq_receivers == 0 && 2255 !TAILQ_EMPTY(&mq->mq_msgq)) 2256 mqueue_send_notification(mq); 2257 } 2258 } else { 2259 notifier_remove(p, mq, uap->mqd); 2260 } 2261 mtx_unlock(&mq->mq_mutex); 2262 2263 out: 2264 fdrop(fp, td); 2265 if (newnt != NULL) 2266 notifier_free(newnt); 2267 return (error); 2268 } 2269 2270 static void 2271 mqueue_fdclose(struct thread *td, int fd, struct file *fp) 2272 { 2273 struct filedesc *fdp; 2274 struct mqueue *mq; 2275 2276 fdp = td->td_proc->p_fd; 2277 FILEDESC_LOCK_ASSERT(fdp, MA_OWNED); 2278 if (fp->f_ops == &mqueueops) { 2279 mq = FPTOMQ(fp); 2280 mtx_lock(&mq->mq_mutex); 2281 notifier_remove(td->td_proc, mq, fd); 2282 2283 /* have to wakeup thread in same process */ 2284 if (mq->mq_flags & MQ_RSEL) { 2285 mq->mq_flags &= ~MQ_RSEL; 2286 selwakeup(&mq->mq_rsel); 2287 } 2288 if (mq->mq_flags & MQ_WSEL) { 2289 mq->mq_flags &= ~MQ_WSEL; 2290 selwakeup(&mq->mq_wsel); 2291 } 2292 mtx_unlock(&mq->mq_mutex); 2293 } 2294 } 2295 2296 static void 2297 mq_proc_exit(void *arg __unused, struct proc *p) 2298 { 2299 struct filedesc *fdp; 2300 struct file *fp; 2301 struct mqueue *mq; 2302 int i; 2303 2304 fdp = p->p_fd; 2305 FILEDESC_LOCK_FAST(fdp); 2306 for (i = 0; i < fdp->fd_nfiles; ++i) { 2307 fp = fget_locked(fdp, i); 2308 if (fp != NULL && fp->f_ops == &mqueueops) { 2309 mq = FPTOMQ(fp); 2310 mtx_lock(&mq->mq_mutex); 2311 notifier_remove(p, FPTOMQ(fp), i); 2312 mtx_unlock(&mq->mq_mutex); 2313 } 2314 } 2315 FILEDESC_UNLOCK_FAST(fdp); 2316 KASSERT(LIST_EMPTY(&p->p_mqnotifier), ("mq notifiers left")); 2317 } 2318 2319 static int 2320 mqf_read(struct file *fp, struct uio *uio, struct ucred *active_cred, 2321 int flags, struct thread *td) 2322 { 2323 return (EOPNOTSUPP); 2324 } 2325 2326 static int 2327 mqf_write(struct file *fp, struct uio *uio, struct ucred *active_cred, 2328 int flags, struct thread *td) 2329 { 2330 return (EOPNOTSUPP); 2331 } 2332 2333 static int 2334 mqf_ioctl(struct file *fp, u_long cmd, void *data, 2335 struct ucred *active_cred, struct thread *td) 2336 { 2337 return (ENOTTY); 2338 } 2339 2340 static int 2341 mqf_poll(struct file *fp, int events, struct ucred *active_cred, 2342 struct thread *td) 2343 { 2344 struct mqueue *mq = FPTOMQ(fp); 2345 int revents = 0; 2346 2347 mtx_lock(&mq->mq_mutex); 2348 if (events & (POLLIN | POLLRDNORM)) { 2349 if (mq->mq_curmsgs) { 2350 revents |= events & (POLLIN | POLLRDNORM); 2351 } else { 2352 mq->mq_flags |= MQ_RSEL; 2353 selrecord(td, &mq->mq_rsel); 2354 } 2355 } 2356 if (events & POLLOUT) { 2357 if (mq->mq_curmsgs < mq->mq_maxmsg) 2358 revents |= POLLOUT; 2359 else { 2360 mq->mq_flags |= MQ_WSEL; 2361 selrecord(td, &mq->mq_wsel); 2362 } 2363 } 2364 mtx_unlock(&mq->mq_mutex); 2365 return (revents); 2366 } 2367 2368 static int 2369 mqf_close(struct file *fp, struct thread *td) 2370 { 2371 struct mqfs_node *pn; 2372 2373 FILE_LOCK(fp); 2374 fp->f_ops = &badfileops; 2375 FILE_UNLOCK(fp); 2376 pn = fp->f_data; 2377 fp->f_data = NULL; 2378 sx_xlock(&mqfs_data.mi_lock); 2379 mqnode_release(pn); 2380 sx_xunlock(&mqfs_data.mi_lock); 2381 return (0); 2382 } 2383 2384 static int 2385 mqf_stat(struct file *fp, struct stat *st, struct ucred *active_cred, 2386 struct thread *td) 2387 { 2388 struct mqfs_node *pn = fp->f_data; 2389 2390 bzero(st, sizeof *st); 2391 st->st_atimespec = pn->mn_atime; 2392 st->st_mtimespec = pn->mn_mtime; 2393 st->st_ctimespec = pn->mn_ctime; 2394 st->st_birthtimespec = pn->mn_birth; 2395 st->st_uid = pn->mn_uid; 2396 st->st_gid = pn->mn_gid; 2397 st->st_mode = S_IFIFO | pn->mn_mode; 2398 return (0); 2399 } 2400 2401 static int 2402 mqf_kqfilter(struct file *fp, struct knote *kn) 2403 { 2404 struct mqueue *mq = FPTOMQ(fp); 2405 int error = 0; 2406 2407 if (kn->kn_filter == EVFILT_READ) { 2408 kn->kn_fop = &mq_rfiltops; 2409 knlist_add(&mq->mq_rsel.si_note, kn, 0); 2410 } else if (kn->kn_filter == EVFILT_WRITE) { 2411 kn->kn_fop = &mq_wfiltops; 2412 knlist_add(&mq->mq_wsel.si_note, kn, 0); 2413 } else 2414 error = EINVAL; 2415 return (error); 2416 } 2417 2418 static void 2419 filt_mqdetach(struct knote *kn) 2420 { 2421 struct mqueue *mq = FPTOMQ(kn->kn_fp); 2422 2423 if (kn->kn_filter == EVFILT_READ) 2424 knlist_remove(&mq->mq_rsel.si_note, kn, 0); 2425 else if (kn->kn_filter == EVFILT_WRITE) 2426 knlist_remove(&mq->mq_wsel.si_note, kn, 0); 2427 else 2428 panic("filt_mqdetach"); 2429 } 2430 2431 static int 2432 filt_mqread(struct knote *kn, long hint) 2433 { 2434 struct mqueue *mq = FPTOMQ(kn->kn_fp); 2435 2436 mtx_assert(&mq->mq_mutex, MA_OWNED); 2437 return (mq->mq_curmsgs != 0); 2438 } 2439 2440 static int 2441 filt_mqwrite(struct knote *kn, long hint) 2442 { 2443 struct mqueue *mq = FPTOMQ(kn->kn_fp); 2444 2445 mtx_assert(&mq->mq_mutex, MA_OWNED); 2446 return (mq->mq_curmsgs < mq->mq_maxmsg); 2447 } 2448 2449 static struct fileops mqueueops = { 2450 .fo_read = mqf_read, 2451 .fo_write = mqf_write, 2452 .fo_ioctl = mqf_ioctl, 2453 .fo_poll = mqf_poll, 2454 .fo_kqfilter = mqf_kqfilter, 2455 .fo_stat = mqf_stat, 2456 .fo_close = mqf_close 2457 }; 2458 2459 static struct vop_vector mqfs_vnodeops = { 2460 .vop_default = &default_vnodeops, 2461 .vop_access = mqfs_access, 2462 .vop_cachedlookup = mqfs_lookup, 2463 .vop_lookup = vfs_cache_lookup, 2464 .vop_reclaim = mqfs_reclaim, 2465 .vop_create = mqfs_create, 2466 .vop_remove = mqfs_remove, 2467 .vop_inactive = mqfs_inactive, 2468 .vop_open = mqfs_open, 2469 .vop_close = mqfs_close, 2470 .vop_getattr = mqfs_getattr, 2471 .vop_setattr = mqfs_setattr, 2472 .vop_read = mqfs_read, 2473 .vop_write = VOP_EOPNOTSUPP, 2474 .vop_readdir = mqfs_readdir, 2475 .vop_mkdir = VOP_EOPNOTSUPP, 2476 .vop_rmdir = VOP_EOPNOTSUPP 2477 }; 2478 2479 static struct vfsops mqfs_vfsops = { 2480 .vfs_init = mqfs_init, 2481 .vfs_uninit = mqfs_uninit, 2482 .vfs_mount = mqfs_mount, 2483 .vfs_unmount = mqfs_unmount, 2484 .vfs_root = mqfs_root, 2485 .vfs_statfs = mqfs_statfs, 2486 }; 2487 2488 SYSCALL_MODULE_HELPER(kmq_open); 2489 SYSCALL_MODULE_HELPER(kmq_setattr); 2490 SYSCALL_MODULE_HELPER(kmq_timedsend); 2491 SYSCALL_MODULE_HELPER(kmq_timedreceive); 2492 SYSCALL_MODULE_HELPER(kmq_notify); 2493 SYSCALL_MODULE_HELPER(kmq_unlink); 2494 2495 VFS_SET(mqfs_vfsops, mqueuefs, VFCF_SYNTHETIC); 2496 MODULE_VERSION(mqueuefs, 1); 2497