1 /*- 2 * Copyright (c) 2005 David Xu <davidxu@freebsd.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 */ 27 28 /* 29 * POSIX message queue implementation. 30 * 31 * 1) A mqueue filesystem can be mounted, each message queue appears 32 * in mounted directory, user can change queue's permission and 33 * ownership, or remove a queue. Manually creating a file in the 34 * directory causes a message queue to be created in the kernel with 35 * default message queue attributes applied and same name used, this 36 * method is not advocated since mq_open syscall allows user to specify 37 * different attributes. Also the file system can be mounted multiple 38 * times at different mount points but shows same contents. 39 * 40 * 2) Standard POSIX message queue API. The syscalls do not use vfs layer, 41 * but directly operate on internal data structure, this allows user to 42 * use the IPC facility without having to mount mqueue file system. 43 */ 44 45 #include <sys/cdefs.h> 46 __FBSDID("$FreeBSD$"); 47 48 #include <sys/param.h> 49 #include <sys/kernel.h> 50 #include <sys/systm.h> 51 #include <sys/limits.h> 52 #include <sys/buf.h> 53 #include <sys/dirent.h> 54 #include <sys/event.h> 55 #include <sys/eventhandler.h> 56 #include <sys/fcntl.h> 57 #include <sys/file.h> 58 #include <sys/filedesc.h> 59 #include <sys/lock.h> 60 #include <sys/malloc.h> 61 #include <sys/module.h> 62 #include <sys/mount.h> 63 #include <sys/mqueue.h> 64 #include <sys/mutex.h> 65 #include <sys/namei.h> 66 #include <sys/posix4.h> 67 #include <sys/poll.h> 68 #include <sys/priv.h> 69 #include <sys/proc.h> 70 #include <sys/queue.h> 71 #include <sys/sysproto.h> 72 #include <sys/stat.h> 73 #include <sys/syscall.h> 74 #include <sys/syscallsubr.h> 75 #include <sys/sysent.h> 76 #include <sys/sx.h> 77 #include <sys/sysctl.h> 78 #include <sys/taskqueue.h> 79 #include <sys/unistd.h> 80 #include <sys/vnode.h> 81 #include <machine/atomic.h> 82 83 /* 84 * Limits and constants 85 */ 86 #define MQFS_NAMELEN NAME_MAX 87 #define MQFS_DELEN (8 + MQFS_NAMELEN) 88 89 /* node types */ 90 typedef enum { 91 mqfstype_none = 0, 92 mqfstype_root, 93 mqfstype_dir, 94 mqfstype_this, 95 mqfstype_parent, 96 mqfstype_file, 97 mqfstype_symlink, 98 } mqfs_type_t; 99 100 struct mqfs_node; 101 102 /* 103 * mqfs_info: describes a mqfs instance 104 */ 105 struct mqfs_info { 106 struct sx mi_lock; 107 struct mqfs_node *mi_root; 108 struct unrhdr *mi_unrhdr; 109 }; 110 111 struct mqfs_vdata { 112 LIST_ENTRY(mqfs_vdata) mv_link; 113 struct mqfs_node *mv_node; 114 struct vnode *mv_vnode; 115 struct task mv_task; 116 }; 117 118 /* 119 * mqfs_node: describes a node (file or directory) within a mqfs 120 */ 121 struct mqfs_node { 122 char mn_name[MQFS_NAMELEN+1]; 123 struct mqfs_info *mn_info; 124 struct mqfs_node *mn_parent; 125 LIST_HEAD(,mqfs_node) mn_children; 126 LIST_ENTRY(mqfs_node) mn_sibling; 127 LIST_HEAD(,mqfs_vdata) mn_vnodes; 128 int mn_refcount; 129 mqfs_type_t mn_type; 130 int mn_deleted; 131 u_int32_t mn_fileno; 132 void *mn_data; 133 struct timespec mn_birth; 134 struct timespec mn_ctime; 135 struct timespec mn_atime; 136 struct timespec mn_mtime; 137 uid_t mn_uid; 138 gid_t mn_gid; 139 int mn_mode; 140 }; 141 142 #define VTON(vp) (((struct mqfs_vdata *)((vp)->v_data))->mv_node) 143 #define VTOMQ(vp) ((struct mqueue *)(VTON(vp)->mn_data)) 144 #define VFSTOMQFS(m) ((struct mqfs_info *)((m)->mnt_data)) 145 #define FPTOMQ(fp) ((struct mqueue *)(((struct mqfs_node *) \ 146 (fp)->f_data)->mn_data)) 147 148 TAILQ_HEAD(msgq, mqueue_msg); 149 150 struct mqueue; 151 152 struct mqueue_notifier { 153 LIST_ENTRY(mqueue_notifier) nt_link; 154 struct sigevent nt_sigev; 155 ksiginfo_t nt_ksi; 156 struct proc *nt_proc; 157 }; 158 159 struct mqueue { 160 struct mtx mq_mutex; 161 int mq_flags; 162 long mq_maxmsg; 163 long mq_msgsize; 164 long mq_curmsgs; 165 long mq_totalbytes; 166 struct msgq mq_msgq; 167 int mq_receivers; 168 int mq_senders; 169 struct selinfo mq_rsel; 170 struct selinfo mq_wsel; 171 struct mqueue_notifier *mq_notifier; 172 }; 173 174 #define MQ_RSEL 0x01 175 #define MQ_WSEL 0x02 176 177 struct mqueue_msg { 178 TAILQ_ENTRY(mqueue_msg) msg_link; 179 unsigned int msg_prio; 180 unsigned int msg_size; 181 /* following real data... */ 182 }; 183 184 SYSCTL_NODE(_kern, OID_AUTO, mqueue, CTLFLAG_RW, 0, 185 "POSIX real time message queue"); 186 187 static int default_maxmsg = 10; 188 static int default_msgsize = 1024; 189 190 static int maxmsg = 100; 191 SYSCTL_INT(_kern_mqueue, OID_AUTO, maxmsg, CTLFLAG_RW, 192 &maxmsg, 0, "Default maximum messages in queue"); 193 static int maxmsgsize = 16384; 194 SYSCTL_INT(_kern_mqueue, OID_AUTO, maxmsgsize, CTLFLAG_RW, 195 &maxmsgsize, 0, "Default maximum message size"); 196 static int maxmq = 100; 197 SYSCTL_INT(_kern_mqueue, OID_AUTO, maxmq, CTLFLAG_RW, 198 &maxmq, 0, "maximum message queues"); 199 static int curmq = 0; 200 SYSCTL_INT(_kern_mqueue, OID_AUTO, curmq, CTLFLAG_RW, 201 &curmq, 0, "current message queue number"); 202 static int unloadable = 0; 203 static MALLOC_DEFINE(M_MQUEUEDATA, "mqdata", "mqueue data"); 204 205 static eventhandler_tag exit_tag; 206 207 /* Only one instance per-system */ 208 static struct mqfs_info mqfs_data; 209 static uma_zone_t mqnode_zone; 210 static uma_zone_t mqueue_zone; 211 static uma_zone_t mvdata_zone; 212 static uma_zone_t mqnoti_zone; 213 static struct vop_vector mqfs_vnodeops; 214 static struct fileops mqueueops; 215 216 /* 217 * Directory structure construction and manipulation 218 */ 219 #ifdef notyet 220 static struct mqfs_node *mqfs_create_dir(struct mqfs_node *parent, 221 const char *name, int namelen, struct ucred *cred, int mode); 222 static struct mqfs_node *mqfs_create_link(struct mqfs_node *parent, 223 const char *name, int namelen, struct ucred *cred, int mode); 224 #endif 225 226 static struct mqfs_node *mqfs_create_file(struct mqfs_node *parent, 227 const char *name, int namelen, struct ucred *cred, int mode); 228 static int mqfs_destroy(struct mqfs_node *mn); 229 static void mqfs_fileno_alloc(struct mqfs_info *mi, struct mqfs_node *mn); 230 static void mqfs_fileno_free(struct mqfs_info *mi, struct mqfs_node *mn); 231 static int mqfs_allocv(struct mount *mp, struct vnode **vpp, struct mqfs_node *pn); 232 233 /* 234 * Message queue construction and maniplation 235 */ 236 static struct mqueue *mqueue_alloc(const struct mq_attr *attr); 237 static void mqueue_free(struct mqueue *mq); 238 static int mqueue_send(struct mqueue *mq, const char *msg_ptr, 239 size_t msg_len, unsigned msg_prio, int waitok, 240 const struct timespec *abs_timeout); 241 static int mqueue_receive(struct mqueue *mq, char *msg_ptr, 242 size_t msg_len, unsigned *msg_prio, int waitok, 243 const struct timespec *abs_timeout); 244 static int _mqueue_send(struct mqueue *mq, struct mqueue_msg *msg, 245 int timo); 246 static int _mqueue_recv(struct mqueue *mq, struct mqueue_msg **msg, 247 int timo); 248 static void mqueue_send_notification(struct mqueue *mq); 249 static void mqueue_fdclose(struct thread *td, int fd, struct file *fp); 250 static void mq_proc_exit(void *arg, struct proc *p); 251 252 /* 253 * kqueue filters 254 */ 255 static void filt_mqdetach(struct knote *kn); 256 static int filt_mqread(struct knote *kn, long hint); 257 static int filt_mqwrite(struct knote *kn, long hint); 258 259 struct filterops mq_rfiltops = 260 { 1, NULL, filt_mqdetach, filt_mqread }; 261 struct filterops mq_wfiltops = 262 { 1, NULL, filt_mqdetach, filt_mqwrite }; 263 264 /* 265 * Initialize fileno bitmap 266 */ 267 static void 268 mqfs_fileno_init(struct mqfs_info *mi) 269 { 270 struct unrhdr *up; 271 272 up = new_unrhdr(1, INT_MAX, NULL); 273 mi->mi_unrhdr = up; 274 } 275 276 /* 277 * Tear down fileno bitmap 278 */ 279 static void 280 mqfs_fileno_uninit(struct mqfs_info *mi) 281 { 282 struct unrhdr *up; 283 284 up = mi->mi_unrhdr; 285 mi->mi_unrhdr = NULL; 286 delete_unrhdr(up); 287 } 288 289 /* 290 * Allocate a file number 291 */ 292 static void 293 mqfs_fileno_alloc(struct mqfs_info *mi, struct mqfs_node *mn) 294 { 295 /* make sure our parent has a file number */ 296 if (mn->mn_parent && !mn->mn_parent->mn_fileno) 297 mqfs_fileno_alloc(mi, mn->mn_parent); 298 299 switch (mn->mn_type) { 300 case mqfstype_root: 301 case mqfstype_dir: 302 case mqfstype_file: 303 case mqfstype_symlink: 304 mn->mn_fileno = alloc_unr(mi->mi_unrhdr); 305 break; 306 case mqfstype_this: 307 KASSERT(mn->mn_parent != NULL, 308 ("mqfstype_this node has no parent")); 309 mn->mn_fileno = mn->mn_parent->mn_fileno; 310 break; 311 case mqfstype_parent: 312 KASSERT(mn->mn_parent != NULL, 313 ("mqfstype_parent node has no parent")); 314 if (mn->mn_parent == mi->mi_root) { 315 mn->mn_fileno = mn->mn_parent->mn_fileno; 316 break; 317 } 318 KASSERT(mn->mn_parent->mn_parent != NULL, 319 ("mqfstype_parent node has no grandparent")); 320 mn->mn_fileno = mn->mn_parent->mn_parent->mn_fileno; 321 break; 322 default: 323 KASSERT(0, 324 ("mqfs_fileno_alloc() called for unknown type node: %d", 325 mn->mn_type)); 326 break; 327 } 328 } 329 330 /* 331 * Release a file number 332 */ 333 static void 334 mqfs_fileno_free(struct mqfs_info *mi, struct mqfs_node *mn) 335 { 336 switch (mn->mn_type) { 337 case mqfstype_root: 338 case mqfstype_dir: 339 case mqfstype_file: 340 case mqfstype_symlink: 341 free_unr(mi->mi_unrhdr, mn->mn_fileno); 342 break; 343 case mqfstype_this: 344 case mqfstype_parent: 345 /* ignore these, as they don't "own" their file number */ 346 break; 347 default: 348 KASSERT(0, 349 ("mqfs_fileno_free() called for unknown type node: %d", 350 mn->mn_type)); 351 break; 352 } 353 } 354 355 static __inline struct mqfs_node * 356 mqnode_alloc(void) 357 { 358 return uma_zalloc(mqnode_zone, M_WAITOK | M_ZERO); 359 } 360 361 static __inline void 362 mqnode_free(struct mqfs_node *node) 363 { 364 uma_zfree(mqnode_zone, node); 365 } 366 367 static __inline void 368 mqnode_addref(struct mqfs_node *node) 369 { 370 atomic_fetchadd_int(&node->mn_refcount, 1); 371 } 372 373 static __inline void 374 mqnode_release(struct mqfs_node *node) 375 { 376 struct mqfs_info *mqfs; 377 int old, exp; 378 379 mqfs = node->mn_info; 380 old = atomic_fetchadd_int(&node->mn_refcount, -1); 381 if (node->mn_type == mqfstype_dir || 382 node->mn_type == mqfstype_root) 383 exp = 3; /* include . and .. */ 384 else 385 exp = 1; 386 if (old == exp) { 387 int locked = sx_xlocked(&mqfs->mi_lock); 388 if (!locked) 389 sx_xlock(&mqfs->mi_lock); 390 mqfs_destroy(node); 391 if (!locked) 392 sx_xunlock(&mqfs->mi_lock); 393 } 394 } 395 396 /* 397 * Add a node to a directory 398 */ 399 static int 400 mqfs_add_node(struct mqfs_node *parent, struct mqfs_node *node) 401 { 402 KASSERT(parent != NULL, ("%s(): parent is NULL", __func__)); 403 KASSERT(parent->mn_info != NULL, 404 ("%s(): parent has no mn_info", __func__)); 405 KASSERT(parent->mn_type == mqfstype_dir || 406 parent->mn_type == mqfstype_root, 407 ("%s(): parent is not a directory", __func__)); 408 409 node->mn_info = parent->mn_info; 410 node->mn_parent = parent; 411 LIST_INIT(&node->mn_children); 412 LIST_INIT(&node->mn_vnodes); 413 LIST_INSERT_HEAD(&parent->mn_children, node, mn_sibling); 414 mqnode_addref(parent); 415 return (0); 416 } 417 418 static struct mqfs_node * 419 mqfs_create_node(const char *name, int namelen, struct ucred *cred, int mode, 420 int nodetype) 421 { 422 struct mqfs_node *node; 423 424 node = mqnode_alloc(); 425 strncpy(node->mn_name, name, namelen); 426 node->mn_type = nodetype; 427 node->mn_refcount = 1; 428 vfs_timestamp(&node->mn_birth); 429 node->mn_ctime = node->mn_atime = node->mn_mtime 430 = node->mn_birth; 431 node->mn_uid = cred->cr_uid; 432 node->mn_gid = cred->cr_gid; 433 node->mn_mode = mode; 434 return (node); 435 } 436 437 /* 438 * Create a file 439 */ 440 static struct mqfs_node * 441 mqfs_create_file(struct mqfs_node *parent, const char *name, int namelen, 442 struct ucred *cred, int mode) 443 { 444 struct mqfs_node *node; 445 446 node = mqfs_create_node(name, namelen, cred, mode, mqfstype_file); 447 if (mqfs_add_node(parent, node) != 0) { 448 mqnode_free(node); 449 return (NULL); 450 } 451 return (node); 452 } 453 454 /* 455 * Add . and .. to a directory 456 */ 457 static int 458 mqfs_fixup_dir(struct mqfs_node *parent) 459 { 460 struct mqfs_node *dir; 461 462 dir = mqnode_alloc(); 463 dir->mn_name[0] = '.'; 464 dir->mn_type = mqfstype_this; 465 dir->mn_refcount = 1; 466 if (mqfs_add_node(parent, dir) != 0) { 467 mqnode_free(dir); 468 return (-1); 469 } 470 471 dir = mqnode_alloc(); 472 dir->mn_name[0] = dir->mn_name[1] = '.'; 473 dir->mn_type = mqfstype_parent; 474 dir->mn_refcount = 1; 475 476 if (mqfs_add_node(parent, dir) != 0) { 477 mqnode_free(dir); 478 return (-1); 479 } 480 481 return (0); 482 } 483 484 #ifdef notyet 485 486 /* 487 * Create a directory 488 */ 489 static struct mqfs_node * 490 mqfs_create_dir(struct mqfs_node *parent, const char *name, int namelen, 491 struct ucred *cred, int mode) 492 { 493 struct mqfs_node *node; 494 495 node = mqfs_create_node(name, namelen, cred, mode, mqfstype_dir); 496 if (mqfs_add_node(parent, node) != 0) { 497 mqnode_free(node); 498 return (NULL); 499 } 500 501 if (mqfs_fixup_dir(node) != 0) { 502 mqfs_destroy(node); 503 return (NULL); 504 } 505 return (node); 506 } 507 508 /* 509 * Create a symlink 510 */ 511 static struct mqfs_node * 512 mqfs_create_link(struct mqfs_node *parent, const char *name, int namelen, 513 struct ucred *cred, int mode) 514 { 515 struct mqfs_node *node; 516 517 node = mqfs_create_node(name, namelen, cred, mode, mqfstype_symlink); 518 if (mqfs_add_node(parent, node) != 0) { 519 mqnode_free(node); 520 return (NULL); 521 } 522 return (node); 523 } 524 525 #endif 526 527 /* 528 * Destroy a node or a tree of nodes 529 */ 530 static int 531 mqfs_destroy(struct mqfs_node *node) 532 { 533 struct mqfs_node *parent; 534 535 KASSERT(node != NULL, 536 ("%s(): node is NULL", __func__)); 537 KASSERT(node->mn_info != NULL, 538 ("%s(): node has no mn_info", __func__)); 539 540 /* destroy children */ 541 if (node->mn_type == mqfstype_dir || node->mn_type == mqfstype_root) 542 while (! LIST_EMPTY(&node->mn_children)) 543 mqfs_destroy(LIST_FIRST(&node->mn_children)); 544 545 /* unlink from parent */ 546 if ((parent = node->mn_parent) != NULL) { 547 KASSERT(parent->mn_info == node->mn_info, 548 ("%s(): parent has different mn_info", __func__)); 549 LIST_REMOVE(node, mn_sibling); 550 } 551 552 if (node->mn_fileno != 0) 553 mqfs_fileno_free(node->mn_info, node); 554 if (node->mn_data != NULL) 555 mqueue_free(node->mn_data); 556 mqnode_free(node); 557 return (0); 558 } 559 560 /* 561 * Mount a mqfs instance 562 */ 563 static int 564 mqfs_mount(struct mount *mp) 565 { 566 struct statfs *sbp; 567 568 if (mp->mnt_flag & MNT_UPDATE) 569 return (EOPNOTSUPP); 570 571 mp->mnt_data = &mqfs_data; 572 MNT_ILOCK(mp); 573 mp->mnt_flag |= MNT_LOCAL; 574 mp->mnt_kern_flag |= MNTK_MPSAFE; 575 MNT_IUNLOCK(mp); 576 vfs_getnewfsid(mp); 577 578 sbp = &mp->mnt_stat; 579 vfs_mountedfrom(mp, "mqueue"); 580 sbp->f_bsize = PAGE_SIZE; 581 sbp->f_iosize = PAGE_SIZE; 582 sbp->f_blocks = 1; 583 sbp->f_bfree = 0; 584 sbp->f_bavail = 0; 585 sbp->f_files = 1; 586 sbp->f_ffree = 0; 587 return (0); 588 } 589 590 /* 591 * Unmount a mqfs instance 592 */ 593 static int 594 mqfs_unmount(struct mount *mp, int mntflags) 595 { 596 int error; 597 598 error = vflush(mp, 0, (mntflags & MNT_FORCE) ? FORCECLOSE : 0, 599 curthread); 600 return (error); 601 } 602 603 /* 604 * Return a root vnode 605 */ 606 static int 607 mqfs_root(struct mount *mp, int flags, struct vnode **vpp) 608 { 609 struct mqfs_info *mqfs; 610 int ret; 611 612 mqfs = VFSTOMQFS(mp); 613 ret = mqfs_allocv(mp, vpp, mqfs->mi_root); 614 return (ret); 615 } 616 617 /* 618 * Return filesystem stats 619 */ 620 static int 621 mqfs_statfs(struct mount *mp, struct statfs *sbp) 622 { 623 /* XXX update statistics */ 624 return (0); 625 } 626 627 /* 628 * Initialize a mqfs instance 629 */ 630 static int 631 mqfs_init(struct vfsconf *vfc) 632 { 633 struct mqfs_node *root; 634 struct mqfs_info *mi; 635 636 mqnode_zone = uma_zcreate("mqnode", sizeof(struct mqfs_node), 637 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 638 mqueue_zone = uma_zcreate("mqueue", sizeof(struct mqueue), 639 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 640 mvdata_zone = uma_zcreate("mvdata", 641 sizeof(struct mqfs_vdata), NULL, NULL, NULL, 642 NULL, UMA_ALIGN_PTR, 0); 643 mqnoti_zone = uma_zcreate("mqnotifier", sizeof(struct mqueue_notifier), 644 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 645 mi = &mqfs_data; 646 sx_init(&mi->mi_lock, "mqfs lock"); 647 /* set up the root diretory */ 648 root = mqfs_create_node("/", 1, curthread->td_ucred, 01777, 649 mqfstype_root); 650 root->mn_info = mi; 651 LIST_INIT(&root->mn_children); 652 LIST_INIT(&root->mn_vnodes); 653 mi->mi_root = root; 654 mqfs_fileno_init(mi); 655 mqfs_fileno_alloc(mi, root); 656 mqfs_fixup_dir(root); 657 exit_tag = EVENTHANDLER_REGISTER(process_exit, mq_proc_exit, NULL, 658 EVENTHANDLER_PRI_ANY); 659 mq_fdclose = mqueue_fdclose; 660 p31b_setcfg(CTL_P1003_1B_MESSAGE_PASSING, _POSIX_MESSAGE_PASSING); 661 return (0); 662 } 663 664 /* 665 * Destroy a mqfs instance 666 */ 667 static int 668 mqfs_uninit(struct vfsconf *vfc) 669 { 670 struct mqfs_info *mi; 671 672 if (!unloadable) 673 return (EOPNOTSUPP); 674 EVENTHANDLER_DEREGISTER(process_exit, exit_tag); 675 mi = &mqfs_data; 676 mqfs_destroy(mi->mi_root); 677 mi->mi_root = NULL; 678 mqfs_fileno_uninit(mi); 679 sx_destroy(&mi->mi_lock); 680 uma_zdestroy(mqnode_zone); 681 uma_zdestroy(mqueue_zone); 682 uma_zdestroy(mvdata_zone); 683 uma_zdestroy(mqnoti_zone); 684 return (0); 685 } 686 687 /* 688 * task routine 689 */ 690 static void 691 do_recycle(void *context, int pending __unused) 692 { 693 struct vnode *vp = (struct vnode *)context; 694 695 vrecycle(vp, curthread); 696 vdrop(vp); 697 } 698 699 /* 700 * Allocate a vnode 701 */ 702 static int 703 mqfs_allocv(struct mount *mp, struct vnode **vpp, struct mqfs_node *pn) 704 { 705 struct mqfs_vdata *vd; 706 struct mqfs_info *mqfs; 707 struct vnode *newvpp; 708 int error; 709 710 mqfs = pn->mn_info; 711 *vpp = NULL; 712 sx_xlock(&mqfs->mi_lock); 713 LIST_FOREACH(vd, &pn->mn_vnodes, mv_link) { 714 if (vd->mv_vnode->v_mount == mp) { 715 vhold(vd->mv_vnode); 716 break; 717 } 718 } 719 720 if (vd != NULL) { 721 found: 722 *vpp = vd->mv_vnode; 723 sx_xunlock(&mqfs->mi_lock); 724 error = vget(*vpp, LK_RETRY | LK_EXCLUSIVE, curthread); 725 vdrop(*vpp); 726 return (error); 727 } 728 sx_xunlock(&mqfs->mi_lock); 729 730 error = getnewvnode("mqueue", mp, &mqfs_vnodeops, &newvpp); 731 if (error) 732 return (error); 733 vn_lock(newvpp, LK_EXCLUSIVE | LK_RETRY); 734 error = insmntque(newvpp, mp); 735 if (error != 0) 736 return (error); 737 738 sx_xlock(&mqfs->mi_lock); 739 /* 740 * Check if it has already been allocated 741 * while we were blocked. 742 */ 743 LIST_FOREACH(vd, &pn->mn_vnodes, mv_link) { 744 if (vd->mv_vnode->v_mount == mp) { 745 vhold(vd->mv_vnode); 746 sx_xunlock(&mqfs->mi_lock); 747 748 vgone(newvpp); 749 vput(newvpp); 750 goto found; 751 } 752 } 753 754 *vpp = newvpp; 755 756 vd = uma_zalloc(mvdata_zone, M_WAITOK); 757 (*vpp)->v_data = vd; 758 vd->mv_vnode = *vpp; 759 vd->mv_node = pn; 760 TASK_INIT(&vd->mv_task, 0, do_recycle, *vpp); 761 LIST_INSERT_HEAD(&pn->mn_vnodes, vd, mv_link); 762 mqnode_addref(pn); 763 switch (pn->mn_type) { 764 case mqfstype_root: 765 (*vpp)->v_vflag = VV_ROOT; 766 /* fall through */ 767 case mqfstype_dir: 768 case mqfstype_this: 769 case mqfstype_parent: 770 (*vpp)->v_type = VDIR; 771 break; 772 case mqfstype_file: 773 (*vpp)->v_type = VREG; 774 break; 775 case mqfstype_symlink: 776 (*vpp)->v_type = VLNK; 777 break; 778 case mqfstype_none: 779 KASSERT(0, ("mqfs_allocf called for null node\n")); 780 default: 781 panic("%s has unexpected type: %d", pn->mn_name, pn->mn_type); 782 } 783 sx_xunlock(&mqfs->mi_lock); 784 return (0); 785 } 786 787 /* 788 * Search a directory entry 789 */ 790 static struct mqfs_node * 791 mqfs_search(struct mqfs_node *pd, const char *name, int len) 792 { 793 struct mqfs_node *pn; 794 795 sx_assert(&pd->mn_info->mi_lock, SX_LOCKED); 796 LIST_FOREACH(pn, &pd->mn_children, mn_sibling) { 797 if (strncmp(pn->mn_name, name, len) == 0 && 798 pn->mn_name[len] == '\0') 799 return (pn); 800 } 801 return (NULL); 802 } 803 804 /* 805 * Look up a file or directory. 806 */ 807 static int 808 mqfs_lookupx(struct vop_cachedlookup_args *ap) 809 { 810 struct componentname *cnp; 811 struct vnode *dvp, **vpp; 812 struct mqfs_node *pd; 813 struct mqfs_node *pn; 814 struct mqfs_info *mqfs; 815 int nameiop, flags, error, namelen; 816 char *pname; 817 struct thread *td; 818 819 cnp = ap->a_cnp; 820 vpp = ap->a_vpp; 821 dvp = ap->a_dvp; 822 pname = cnp->cn_nameptr; 823 namelen = cnp->cn_namelen; 824 td = cnp->cn_thread; 825 flags = cnp->cn_flags; 826 nameiop = cnp->cn_nameiop; 827 pd = VTON(dvp); 828 pn = NULL; 829 mqfs = pd->mn_info; 830 *vpp = NULLVP; 831 832 if (dvp->v_type != VDIR) 833 return (ENOTDIR); 834 835 error = VOP_ACCESS(dvp, VEXEC, cnp->cn_cred, cnp->cn_thread); 836 if (error) 837 return (error); 838 839 /* shortcut: check if the name is too long */ 840 if (cnp->cn_namelen >= MQFS_NAMELEN) 841 return (ENOENT); 842 843 /* self */ 844 if (namelen == 1 && pname[0] == '.') { 845 if ((flags & ISLASTCN) && nameiop != LOOKUP) 846 return (EINVAL); 847 pn = pd; 848 *vpp = dvp; 849 VREF(dvp); 850 return (0); 851 } 852 853 /* parent */ 854 if (cnp->cn_flags & ISDOTDOT) { 855 if (dvp->v_vflag & VV_ROOT) 856 return (EIO); 857 if ((flags & ISLASTCN) && nameiop != LOOKUP) 858 return (EINVAL); 859 VOP_UNLOCK(dvp, 0); 860 KASSERT(pd->mn_parent, ("non-root directory has no parent")); 861 pn = pd->mn_parent; 862 error = mqfs_allocv(dvp->v_mount, vpp, pn); 863 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY); 864 return (error); 865 } 866 867 /* named node */ 868 sx_xlock(&mqfs->mi_lock); 869 pn = mqfs_search(pd, pname, namelen); 870 if (pn != NULL) 871 mqnode_addref(pn); 872 sx_xunlock(&mqfs->mi_lock); 873 874 /* found */ 875 if (pn != NULL) { 876 /* DELETE */ 877 if (nameiop == DELETE && (flags & ISLASTCN)) { 878 error = VOP_ACCESS(dvp, VWRITE, cnp->cn_cred, td); 879 if (error) { 880 mqnode_release(pn); 881 return (error); 882 } 883 if (*vpp == dvp) { 884 VREF(dvp); 885 *vpp = dvp; 886 mqnode_release(pn); 887 return (0); 888 } 889 } 890 891 /* allocate vnode */ 892 error = mqfs_allocv(dvp->v_mount, vpp, pn); 893 mqnode_release(pn); 894 if (error == 0 && cnp->cn_flags & MAKEENTRY) 895 cache_enter(dvp, *vpp, cnp); 896 return (error); 897 } 898 899 /* not found */ 900 901 /* will create a new entry in the directory ? */ 902 if ((nameiop == CREATE || nameiop == RENAME) && (flags & LOCKPARENT) 903 && (flags & ISLASTCN)) { 904 error = VOP_ACCESS(dvp, VWRITE, cnp->cn_cred, td); 905 if (error) 906 return (error); 907 cnp->cn_flags |= SAVENAME; 908 return (EJUSTRETURN); 909 } 910 return (ENOENT); 911 } 912 913 #if 0 914 struct vop_lookup_args { 915 struct vop_generic_args a_gen; 916 struct vnode *a_dvp; 917 struct vnode **a_vpp; 918 struct componentname *a_cnp; 919 }; 920 #endif 921 922 /* 923 * vnode lookup operation 924 */ 925 static int 926 mqfs_lookup(struct vop_cachedlookup_args *ap) 927 { 928 int rc; 929 930 rc = mqfs_lookupx(ap); 931 return (rc); 932 } 933 934 #if 0 935 struct vop_create_args { 936 struct vnode *a_dvp; 937 struct vnode **a_vpp; 938 struct componentname *a_cnp; 939 struct vattr *a_vap; 940 }; 941 #endif 942 943 /* 944 * vnode creation operation 945 */ 946 static int 947 mqfs_create(struct vop_create_args *ap) 948 { 949 struct mqfs_info *mqfs = VFSTOMQFS(ap->a_dvp->v_mount); 950 struct componentname *cnp = ap->a_cnp; 951 struct mqfs_node *pd; 952 struct mqfs_node *pn; 953 struct mqueue *mq; 954 int error; 955 956 pd = VTON(ap->a_dvp); 957 if (pd->mn_type != mqfstype_root && pd->mn_type != mqfstype_dir) 958 return (ENOTDIR); 959 mq = mqueue_alloc(NULL); 960 if (mq == NULL) 961 return (EAGAIN); 962 sx_xlock(&mqfs->mi_lock); 963 if ((cnp->cn_flags & HASBUF) == 0) 964 panic("%s: no name", __func__); 965 pn = mqfs_create_file(pd, cnp->cn_nameptr, cnp->cn_namelen, 966 cnp->cn_cred, ap->a_vap->va_mode); 967 if (pn == NULL) { 968 sx_xunlock(&mqfs->mi_lock); 969 error = ENOSPC; 970 } else { 971 mqnode_addref(pn); 972 sx_xunlock(&mqfs->mi_lock); 973 error = mqfs_allocv(ap->a_dvp->v_mount, ap->a_vpp, pn); 974 mqnode_release(pn); 975 if (error) 976 mqfs_destroy(pn); 977 else 978 pn->mn_data = mq; 979 } 980 if (error) 981 mqueue_free(mq); 982 return (error); 983 } 984 985 /* 986 * Remove an entry 987 */ 988 static 989 int do_unlink(struct mqfs_node *pn, struct ucred *ucred) 990 { 991 struct mqfs_node *parent; 992 struct mqfs_vdata *vd; 993 int error = 0; 994 995 sx_assert(&pn->mn_info->mi_lock, SX_LOCKED); 996 997 if (ucred->cr_uid != pn->mn_uid && 998 (error = priv_check_cred(ucred, PRIV_MQ_ADMIN, 0)) != 0) 999 error = EACCES; 1000 else if (!pn->mn_deleted) { 1001 parent = pn->mn_parent; 1002 pn->mn_parent = NULL; 1003 pn->mn_deleted = 1; 1004 LIST_REMOVE(pn, mn_sibling); 1005 LIST_FOREACH(vd, &pn->mn_vnodes, mv_link) { 1006 cache_purge(vd->mv_vnode); 1007 vhold(vd->mv_vnode); 1008 taskqueue_enqueue(taskqueue_thread, &vd->mv_task); 1009 } 1010 mqnode_release(pn); 1011 mqnode_release(parent); 1012 } else 1013 error = ENOENT; 1014 return (error); 1015 } 1016 1017 #if 0 1018 struct vop_remove_args { 1019 struct vnode *a_dvp; 1020 struct vnode *a_vp; 1021 struct componentname *a_cnp; 1022 }; 1023 #endif 1024 1025 /* 1026 * vnode removal operation 1027 */ 1028 static int 1029 mqfs_remove(struct vop_remove_args *ap) 1030 { 1031 struct mqfs_info *mqfs = VFSTOMQFS(ap->a_dvp->v_mount); 1032 struct mqfs_node *pn; 1033 int error; 1034 1035 if (ap->a_vp->v_type == VDIR) 1036 return (EPERM); 1037 pn = VTON(ap->a_vp); 1038 sx_xlock(&mqfs->mi_lock); 1039 error = do_unlink(pn, ap->a_cnp->cn_cred); 1040 sx_xunlock(&mqfs->mi_lock); 1041 return (error); 1042 } 1043 1044 #if 0 1045 struct vop_inactive_args { 1046 struct vnode *a_vp; 1047 struct thread *a_td; 1048 }; 1049 #endif 1050 1051 static int 1052 mqfs_inactive(struct vop_inactive_args *ap) 1053 { 1054 struct mqfs_node *pn = VTON(ap->a_vp); 1055 1056 if (pn->mn_deleted) 1057 vrecycle(ap->a_vp, ap->a_td); 1058 return (0); 1059 } 1060 1061 #if 0 1062 struct vop_reclaim_args { 1063 struct vop_generic_args a_gen; 1064 struct vnode *a_vp; 1065 struct thread *a_td; 1066 }; 1067 #endif 1068 1069 static int 1070 mqfs_reclaim(struct vop_reclaim_args *ap) 1071 { 1072 struct mqfs_info *mqfs = VFSTOMQFS(ap->a_vp->v_mount); 1073 struct vnode *vp = ap->a_vp; 1074 struct mqfs_node *pn; 1075 struct mqfs_vdata *vd; 1076 1077 vd = vp->v_data; 1078 pn = vd->mv_node; 1079 sx_xlock(&mqfs->mi_lock); 1080 vp->v_data = NULL; 1081 LIST_REMOVE(vd, mv_link); 1082 uma_zfree(mvdata_zone, vd); 1083 mqnode_release(pn); 1084 sx_xunlock(&mqfs->mi_lock); 1085 return (0); 1086 } 1087 1088 #if 0 1089 struct vop_open_args { 1090 struct vop_generic_args a_gen; 1091 struct vnode *a_vp; 1092 int a_mode; 1093 struct ucred *a_cred; 1094 struct thread *a_td; 1095 struct file *a_fp; 1096 }; 1097 #endif 1098 1099 static int 1100 mqfs_open(struct vop_open_args *ap) 1101 { 1102 return (0); 1103 } 1104 1105 #if 0 1106 struct vop_close_args { 1107 struct vop_generic_args a_gen; 1108 struct vnode *a_vp; 1109 int a_fflag; 1110 struct ucred *a_cred; 1111 struct thread *a_td; 1112 }; 1113 #endif 1114 1115 static int 1116 mqfs_close(struct vop_close_args *ap) 1117 { 1118 return (0); 1119 } 1120 1121 #if 0 1122 struct vop_access_args { 1123 struct vop_generic_args a_gen; 1124 struct vnode *a_vp; 1125 accmode_t a_accmode; 1126 struct ucred *a_cred; 1127 struct thread *a_td; 1128 }; 1129 #endif 1130 1131 /* 1132 * Verify permissions 1133 */ 1134 static int 1135 mqfs_access(struct vop_access_args *ap) 1136 { 1137 struct vnode *vp = ap->a_vp; 1138 struct vattr vattr; 1139 int error; 1140 1141 error = VOP_GETATTR(vp, &vattr, ap->a_cred); 1142 if (error) 1143 return (error); 1144 error = vaccess(vp->v_type, vattr.va_mode, vattr.va_uid, 1145 vattr.va_gid, ap->a_accmode, ap->a_cred, NULL); 1146 return (error); 1147 } 1148 1149 #if 0 1150 struct vop_getattr_args { 1151 struct vop_generic_args a_gen; 1152 struct vnode *a_vp; 1153 struct vattr *a_vap; 1154 struct ucred *a_cred; 1155 }; 1156 #endif 1157 1158 /* 1159 * Get file attributes 1160 */ 1161 static int 1162 mqfs_getattr(struct vop_getattr_args *ap) 1163 { 1164 struct vnode *vp = ap->a_vp; 1165 struct mqfs_node *pn = VTON(vp); 1166 struct vattr *vap = ap->a_vap; 1167 int error = 0; 1168 1169 vap->va_type = vp->v_type; 1170 vap->va_mode = pn->mn_mode; 1171 vap->va_nlink = 1; 1172 vap->va_uid = pn->mn_uid; 1173 vap->va_gid = pn->mn_gid; 1174 vap->va_fsid = vp->v_mount->mnt_stat.f_fsid.val[0]; 1175 vap->va_fileid = pn->mn_fileno; 1176 vap->va_size = 0; 1177 vap->va_blocksize = PAGE_SIZE; 1178 vap->va_bytes = vap->va_size = 0; 1179 vap->va_atime = pn->mn_atime; 1180 vap->va_mtime = pn->mn_mtime; 1181 vap->va_ctime = pn->mn_ctime; 1182 vap->va_birthtime = pn->mn_birth; 1183 vap->va_gen = 0; 1184 vap->va_flags = 0; 1185 vap->va_rdev = NODEV; 1186 vap->va_bytes = 0; 1187 vap->va_filerev = 0; 1188 return (error); 1189 } 1190 1191 #if 0 1192 struct vop_setattr_args { 1193 struct vop_generic_args a_gen; 1194 struct vnode *a_vp; 1195 struct vattr *a_vap; 1196 struct ucred *a_cred; 1197 }; 1198 #endif 1199 /* 1200 * Set attributes 1201 */ 1202 static int 1203 mqfs_setattr(struct vop_setattr_args *ap) 1204 { 1205 struct mqfs_node *pn; 1206 struct vattr *vap; 1207 struct vnode *vp; 1208 struct thread *td; 1209 int c, error; 1210 uid_t uid; 1211 gid_t gid; 1212 1213 td = curthread; 1214 vap = ap->a_vap; 1215 vp = ap->a_vp; 1216 if ((vap->va_type != VNON) || 1217 (vap->va_nlink != VNOVAL) || 1218 (vap->va_fsid != VNOVAL) || 1219 (vap->va_fileid != VNOVAL) || 1220 (vap->va_blocksize != VNOVAL) || 1221 (vap->va_flags != VNOVAL && vap->va_flags != 0) || 1222 (vap->va_rdev != VNOVAL) || 1223 ((int)vap->va_bytes != VNOVAL) || 1224 (vap->va_gen != VNOVAL)) { 1225 return (EINVAL); 1226 } 1227 1228 pn = VTON(vp); 1229 1230 error = c = 0; 1231 if (vap->va_uid == (uid_t)VNOVAL) 1232 uid = pn->mn_uid; 1233 else 1234 uid = vap->va_uid; 1235 if (vap->va_gid == (gid_t)VNOVAL) 1236 gid = pn->mn_gid; 1237 else 1238 gid = vap->va_gid; 1239 1240 if (uid != pn->mn_uid || gid != pn->mn_gid) { 1241 /* 1242 * To modify the ownership of a file, must possess VADMIN 1243 * for that file. 1244 */ 1245 if ((error = VOP_ACCESS(vp, VADMIN, ap->a_cred, td))) 1246 return (error); 1247 1248 /* 1249 * XXXRW: Why is there a privilege check here: shouldn't the 1250 * check in VOP_ACCESS() be enough? Also, are the group bits 1251 * below definitely right? 1252 */ 1253 if (((ap->a_cred->cr_uid != pn->mn_uid) || uid != pn->mn_uid || 1254 (gid != pn->mn_gid && !groupmember(gid, ap->a_cred))) && 1255 (error = priv_check(td, PRIV_MQ_ADMIN)) != 0) 1256 return (error); 1257 pn->mn_uid = uid; 1258 pn->mn_gid = gid; 1259 c = 1; 1260 } 1261 1262 if (vap->va_mode != (mode_t)VNOVAL) { 1263 if ((ap->a_cred->cr_uid != pn->mn_uid) && 1264 (error = priv_check(td, PRIV_MQ_ADMIN))) 1265 return (error); 1266 pn->mn_mode = vap->va_mode; 1267 c = 1; 1268 } 1269 1270 if (vap->va_atime.tv_sec != VNOVAL || vap->va_mtime.tv_sec != VNOVAL) { 1271 /* See the comment in ufs_vnops::ufs_setattr(). */ 1272 if ((error = VOP_ACCESS(vp, VADMIN, ap->a_cred, td)) && 1273 ((vap->va_vaflags & VA_UTIMES_NULL) == 0 || 1274 (error = VOP_ACCESS(vp, VWRITE, ap->a_cred, td)))) 1275 return (error); 1276 if (vap->va_atime.tv_sec != VNOVAL) { 1277 pn->mn_atime = vap->va_atime; 1278 } 1279 if (vap->va_mtime.tv_sec != VNOVAL) { 1280 pn->mn_mtime = vap->va_mtime; 1281 } 1282 c = 1; 1283 } 1284 if (c) { 1285 vfs_timestamp(&pn->mn_ctime); 1286 } 1287 return (0); 1288 } 1289 1290 #if 0 1291 struct vop_read_args { 1292 struct vop_generic_args a_gen; 1293 struct vnode *a_vp; 1294 struct uio *a_uio; 1295 int a_ioflag; 1296 struct ucred *a_cred; 1297 }; 1298 #endif 1299 1300 /* 1301 * Read from a file 1302 */ 1303 static int 1304 mqfs_read(struct vop_read_args *ap) 1305 { 1306 char buf[80]; 1307 struct vnode *vp = ap->a_vp; 1308 struct uio *uio = ap->a_uio; 1309 struct mqfs_node *pn; 1310 struct mqueue *mq; 1311 int len, error; 1312 1313 if (vp->v_type != VREG) 1314 return (EINVAL); 1315 1316 pn = VTON(vp); 1317 mq = VTOMQ(vp); 1318 snprintf(buf, sizeof(buf), 1319 "QSIZE:%-10ld MAXMSG:%-10ld CURMSG:%-10ld MSGSIZE:%-10ld\n", 1320 mq->mq_totalbytes, 1321 mq->mq_maxmsg, 1322 mq->mq_curmsgs, 1323 mq->mq_msgsize); 1324 buf[sizeof(buf)-1] = '\0'; 1325 len = strlen(buf); 1326 error = uiomove_frombuf(buf, len, uio); 1327 return (error); 1328 } 1329 1330 #if 0 1331 struct vop_readdir_args { 1332 struct vop_generic_args a_gen; 1333 struct vnode *a_vp; 1334 struct uio *a_uio; 1335 struct ucred *a_cred; 1336 int *a_eofflag; 1337 int *a_ncookies; 1338 u_long **a_cookies; 1339 }; 1340 #endif 1341 1342 /* 1343 * Return directory entries. 1344 */ 1345 static int 1346 mqfs_readdir(struct vop_readdir_args *ap) 1347 { 1348 struct vnode *vp; 1349 struct mqfs_info *mi; 1350 struct mqfs_node *pd; 1351 struct mqfs_node *pn; 1352 struct dirent entry; 1353 struct uio *uio; 1354 int *tmp_ncookies = NULL; 1355 off_t offset; 1356 int error, i; 1357 1358 vp = ap->a_vp; 1359 mi = VFSTOMQFS(vp->v_mount); 1360 pd = VTON(vp); 1361 uio = ap->a_uio; 1362 1363 if (vp->v_type != VDIR) 1364 return (ENOTDIR); 1365 1366 if (uio->uio_offset < 0) 1367 return (EINVAL); 1368 1369 if (ap->a_ncookies != NULL) { 1370 tmp_ncookies = ap->a_ncookies; 1371 *ap->a_ncookies = 0; 1372 ap->a_ncookies = NULL; 1373 } 1374 1375 error = 0; 1376 offset = 0; 1377 1378 sx_xlock(&mi->mi_lock); 1379 1380 LIST_FOREACH(pn, &pd->mn_children, mn_sibling) { 1381 entry.d_reclen = sizeof(entry); 1382 if (!pn->mn_fileno) 1383 mqfs_fileno_alloc(mi, pn); 1384 entry.d_fileno = pn->mn_fileno; 1385 for (i = 0; i < MQFS_NAMELEN - 1 && pn->mn_name[i] != '\0'; ++i) 1386 entry.d_name[i] = pn->mn_name[i]; 1387 entry.d_name[i] = 0; 1388 entry.d_namlen = i; 1389 switch (pn->mn_type) { 1390 case mqfstype_root: 1391 case mqfstype_dir: 1392 case mqfstype_this: 1393 case mqfstype_parent: 1394 entry.d_type = DT_DIR; 1395 break; 1396 case mqfstype_file: 1397 entry.d_type = DT_REG; 1398 break; 1399 case mqfstype_symlink: 1400 entry.d_type = DT_LNK; 1401 break; 1402 default: 1403 panic("%s has unexpected node type: %d", pn->mn_name, 1404 pn->mn_type); 1405 } 1406 if (entry.d_reclen > uio->uio_resid) 1407 break; 1408 if (offset >= uio->uio_offset) { 1409 error = vfs_read_dirent(ap, &entry, offset); 1410 if (error) 1411 break; 1412 } 1413 offset += entry.d_reclen; 1414 } 1415 sx_xunlock(&mi->mi_lock); 1416 1417 uio->uio_offset = offset; 1418 1419 if (tmp_ncookies != NULL) 1420 ap->a_ncookies = tmp_ncookies; 1421 1422 return (error); 1423 } 1424 1425 #ifdef notyet 1426 1427 #if 0 1428 struct vop_mkdir_args { 1429 struct vnode *a_dvp; 1430 struvt vnode **a_vpp; 1431 struvt componentname *a_cnp; 1432 struct vattr *a_vap; 1433 }; 1434 #endif 1435 1436 /* 1437 * Create a directory. 1438 */ 1439 static int 1440 mqfs_mkdir(struct vop_mkdir_args *ap) 1441 { 1442 struct mqfs_info *mqfs = VFSTOMQFS(ap->a_dvp->v_mount); 1443 struct componentname *cnp = ap->a_cnp; 1444 struct mqfs_node *pd = VTON(ap->a_dvp); 1445 struct mqfs_node *pn; 1446 int error; 1447 1448 if (pd->mn_type != mqfstype_root && pd->mn_type != mqfstype_dir) 1449 return (ENOTDIR); 1450 sx_xlock(&mqfs->mi_lock); 1451 if ((cnp->cn_flags & HASBUF) == 0) 1452 panic("%s: no name", __func__); 1453 pn = mqfs_create_dir(pd, cnp->cn_nameptr, cnp->cn_namelen, 1454 ap->a_vap->cn_cred, ap->a_vap->va_mode); 1455 if (pn != NULL) 1456 mqnode_addref(pn); 1457 sx_xunlock(&mqfs->mi_lock); 1458 if (pn == NULL) { 1459 error = ENOSPC; 1460 } else { 1461 error = mqfs_allocv(ap->a_dvp->v_mount, ap->a_vpp, pn); 1462 mqnode_release(pn); 1463 } 1464 return (error); 1465 } 1466 1467 #if 0 1468 struct vop_rmdir_args { 1469 struct vnode *a_dvp; 1470 struct vnode *a_vp; 1471 struct componentname *a_cnp; 1472 }; 1473 #endif 1474 1475 /* 1476 * Remove a directory. 1477 */ 1478 static int 1479 mqfs_rmdir(struct vop_rmdir_args *ap) 1480 { 1481 struct mqfs_info *mqfs = VFSTOMQFS(ap->a_dvp->v_mount); 1482 struct mqfs_node *pn = VTON(ap->a_vp); 1483 struct mqfs_node *pt; 1484 1485 if (pn->mn_type != mqfstype_dir) 1486 return (ENOTDIR); 1487 1488 sx_xlock(&mqfs->mi_lock); 1489 if (pn->mn_deleted) { 1490 sx_xunlock(&mqfs->mi_lock); 1491 return (ENOENT); 1492 } 1493 1494 pt = LIST_FIRST(&pn->mn_children); 1495 pt = LIST_NEXT(pt, mn_sibling); 1496 pt = LIST_NEXT(pt, mn_sibling); 1497 if (pt != NULL) { 1498 sx_xunlock(&mqfs->mi_lock); 1499 return (ENOTEMPTY); 1500 } 1501 pt = pn->mn_parent; 1502 pn->mn_parent = NULL; 1503 pn->mn_deleted = 1; 1504 LIST_REMOVE(pn, mn_sibling); 1505 mqnode_release(pn); 1506 mqnode_release(pt); 1507 sx_xunlock(&mqfs->mi_lock); 1508 cache_purge(ap->a_vp); 1509 return (0); 1510 } 1511 1512 #endif /* notyet */ 1513 1514 /* 1515 * Allocate a message queue 1516 */ 1517 static struct mqueue * 1518 mqueue_alloc(const struct mq_attr *attr) 1519 { 1520 struct mqueue *mq; 1521 1522 if (curmq >= maxmq) 1523 return (NULL); 1524 mq = uma_zalloc(mqueue_zone, M_WAITOK | M_ZERO); 1525 TAILQ_INIT(&mq->mq_msgq); 1526 if (attr != NULL) { 1527 mq->mq_maxmsg = attr->mq_maxmsg; 1528 mq->mq_msgsize = attr->mq_msgsize; 1529 } else { 1530 mq->mq_maxmsg = default_maxmsg; 1531 mq->mq_msgsize = default_msgsize; 1532 } 1533 mtx_init(&mq->mq_mutex, "mqueue lock", NULL, MTX_DEF); 1534 knlist_init_mtx(&mq->mq_rsel.si_note, &mq->mq_mutex); 1535 knlist_init_mtx(&mq->mq_wsel.si_note, &mq->mq_mutex); 1536 atomic_add_int(&curmq, 1); 1537 return (mq); 1538 } 1539 1540 /* 1541 * Destroy a message queue 1542 */ 1543 static void 1544 mqueue_free(struct mqueue *mq) 1545 { 1546 struct mqueue_msg *msg; 1547 1548 while ((msg = TAILQ_FIRST(&mq->mq_msgq)) != NULL) { 1549 TAILQ_REMOVE(&mq->mq_msgq, msg, msg_link); 1550 free(msg, M_MQUEUEDATA); 1551 } 1552 1553 mtx_destroy(&mq->mq_mutex); 1554 knlist_destroy(&mq->mq_rsel.si_note); 1555 knlist_destroy(&mq->mq_wsel.si_note); 1556 uma_zfree(mqueue_zone, mq); 1557 atomic_add_int(&curmq, -1); 1558 } 1559 1560 /* 1561 * Load a message from user space 1562 */ 1563 static struct mqueue_msg * 1564 mqueue_loadmsg(const char *msg_ptr, size_t msg_size, int msg_prio) 1565 { 1566 struct mqueue_msg *msg; 1567 size_t len; 1568 int error; 1569 1570 len = sizeof(struct mqueue_msg) + msg_size; 1571 msg = malloc(len, M_MQUEUEDATA, M_WAITOK); 1572 error = copyin(msg_ptr, ((char *)msg) + sizeof(struct mqueue_msg), 1573 msg_size); 1574 if (error) { 1575 free(msg, M_MQUEUEDATA); 1576 msg = NULL; 1577 } else { 1578 msg->msg_size = msg_size; 1579 msg->msg_prio = msg_prio; 1580 } 1581 return (msg); 1582 } 1583 1584 /* 1585 * Save a message to user space 1586 */ 1587 static int 1588 mqueue_savemsg(struct mqueue_msg *msg, char *msg_ptr, int *msg_prio) 1589 { 1590 int error; 1591 1592 error = copyout(((char *)msg) + sizeof(*msg), msg_ptr, 1593 msg->msg_size); 1594 if (error == 0 && msg_prio != NULL) 1595 error = copyout(&msg->msg_prio, msg_prio, sizeof(int)); 1596 return (error); 1597 } 1598 1599 /* 1600 * Free a message's memory 1601 */ 1602 static __inline void 1603 mqueue_freemsg(struct mqueue_msg *msg) 1604 { 1605 free(msg, M_MQUEUEDATA); 1606 } 1607 1608 /* 1609 * Send a message. if waitok is false, thread will not be 1610 * blocked if there is no data in queue, otherwise, absolute 1611 * time will be checked. 1612 */ 1613 int 1614 mqueue_send(struct mqueue *mq, const char *msg_ptr, 1615 size_t msg_len, unsigned msg_prio, int waitok, 1616 const struct timespec *abs_timeout) 1617 { 1618 struct mqueue_msg *msg; 1619 struct timespec ets, ts, ts2; 1620 struct timeval tv; 1621 int error; 1622 1623 if (msg_prio >= MQ_PRIO_MAX) 1624 return (EINVAL); 1625 if (msg_len > mq->mq_msgsize) 1626 return (EMSGSIZE); 1627 msg = mqueue_loadmsg(msg_ptr, msg_len, msg_prio); 1628 if (msg == NULL) 1629 return (EFAULT); 1630 1631 /* O_NONBLOCK case */ 1632 if (!waitok) { 1633 error = _mqueue_send(mq, msg, -1); 1634 if (error) 1635 goto bad; 1636 return (0); 1637 } 1638 1639 /* we allow a null timeout (wait forever) */ 1640 if (abs_timeout == NULL) { 1641 error = _mqueue_send(mq, msg, 0); 1642 if (error) 1643 goto bad; 1644 return (0); 1645 } 1646 1647 /* send it before checking time */ 1648 error = _mqueue_send(mq, msg, -1); 1649 if (error == 0) 1650 return (0); 1651 1652 if (error != EAGAIN) 1653 goto bad; 1654 1655 error = copyin(abs_timeout, &ets, sizeof(ets)); 1656 if (error != 0) 1657 goto bad; 1658 if (ets.tv_nsec >= 1000000000 || ets.tv_nsec < 0) { 1659 error = EINVAL; 1660 goto bad; 1661 } 1662 for (;;) { 1663 ts2 = ets; 1664 getnanotime(&ts); 1665 timespecsub(&ts2, &ts); 1666 if (ts2.tv_sec < 0 || (ts2.tv_sec == 0 && ts2.tv_nsec <= 0)) { 1667 error = ETIMEDOUT; 1668 break; 1669 } 1670 TIMESPEC_TO_TIMEVAL(&tv, &ts2); 1671 error = _mqueue_send(mq, msg, tvtohz(&tv)); 1672 if (error != ETIMEDOUT) 1673 break; 1674 } 1675 if (error == 0) 1676 return (0); 1677 bad: 1678 mqueue_freemsg(msg); 1679 return (error); 1680 } 1681 1682 /* 1683 * Common routine to send a message 1684 */ 1685 static int 1686 _mqueue_send(struct mqueue *mq, struct mqueue_msg *msg, int timo) 1687 { 1688 struct mqueue_msg *msg2; 1689 int error = 0; 1690 1691 mtx_lock(&mq->mq_mutex); 1692 while (mq->mq_curmsgs >= mq->mq_maxmsg && error == 0) { 1693 if (timo < 0) { 1694 mtx_unlock(&mq->mq_mutex); 1695 return (EAGAIN); 1696 } 1697 mq->mq_senders++; 1698 error = msleep(&mq->mq_senders, &mq->mq_mutex, 1699 PCATCH, "mqsend", timo); 1700 mq->mq_senders--; 1701 if (error == EAGAIN) 1702 error = ETIMEDOUT; 1703 } 1704 if (mq->mq_curmsgs >= mq->mq_maxmsg) { 1705 mtx_unlock(&mq->mq_mutex); 1706 return (error); 1707 } 1708 error = 0; 1709 if (TAILQ_EMPTY(&mq->mq_msgq)) { 1710 TAILQ_INSERT_HEAD(&mq->mq_msgq, msg, msg_link); 1711 } else { 1712 if (msg->msg_prio <= TAILQ_LAST(&mq->mq_msgq, msgq)->msg_prio) { 1713 TAILQ_INSERT_TAIL(&mq->mq_msgq, msg, msg_link); 1714 } else { 1715 TAILQ_FOREACH(msg2, &mq->mq_msgq, msg_link) { 1716 if (msg2->msg_prio < msg->msg_prio) 1717 break; 1718 } 1719 TAILQ_INSERT_BEFORE(msg2, msg, msg_link); 1720 } 1721 } 1722 mq->mq_curmsgs++; 1723 mq->mq_totalbytes += msg->msg_size; 1724 if (mq->mq_receivers) 1725 wakeup_one(&mq->mq_receivers); 1726 else if (mq->mq_notifier != NULL) 1727 mqueue_send_notification(mq); 1728 if (mq->mq_flags & MQ_RSEL) { 1729 mq->mq_flags &= ~MQ_RSEL; 1730 selwakeup(&mq->mq_rsel); 1731 } 1732 KNOTE_LOCKED(&mq->mq_rsel.si_note, 0); 1733 mtx_unlock(&mq->mq_mutex); 1734 return (0); 1735 } 1736 1737 /* 1738 * Send realtime a signal to process which registered itself 1739 * successfully by mq_notify. 1740 */ 1741 static void 1742 mqueue_send_notification(struct mqueue *mq) 1743 { 1744 struct mqueue_notifier *nt; 1745 struct proc *p; 1746 1747 mtx_assert(&mq->mq_mutex, MA_OWNED); 1748 nt = mq->mq_notifier; 1749 if (nt->nt_sigev.sigev_notify != SIGEV_NONE) { 1750 p = nt->nt_proc; 1751 PROC_LOCK(p); 1752 if (!KSI_ONQ(&nt->nt_ksi)) 1753 psignal_event(p, &nt->nt_sigev, &nt->nt_ksi); 1754 PROC_UNLOCK(p); 1755 } 1756 mq->mq_notifier = NULL; 1757 } 1758 1759 /* 1760 * Get a message. if waitok is false, thread will not be 1761 * blocked if there is no data in queue, otherwise, absolute 1762 * time will be checked. 1763 */ 1764 int 1765 mqueue_receive(struct mqueue *mq, char *msg_ptr, 1766 size_t msg_len, unsigned *msg_prio, int waitok, 1767 const struct timespec *abs_timeout) 1768 { 1769 struct mqueue_msg *msg; 1770 struct timespec ets, ts, ts2; 1771 struct timeval tv; 1772 int error; 1773 1774 if (msg_len < mq->mq_msgsize) 1775 return (EMSGSIZE); 1776 1777 /* O_NONBLOCK case */ 1778 if (!waitok) { 1779 error = _mqueue_recv(mq, &msg, -1); 1780 if (error) 1781 return (error); 1782 goto received; 1783 } 1784 1785 /* we allow a null timeout (wait forever). */ 1786 if (abs_timeout == NULL) { 1787 error = _mqueue_recv(mq, &msg, 0); 1788 if (error) 1789 return (error); 1790 goto received; 1791 } 1792 1793 /* try to get a message before checking time */ 1794 error = _mqueue_recv(mq, &msg, -1); 1795 if (error == 0) 1796 goto received; 1797 1798 if (error != EAGAIN) 1799 return (error); 1800 1801 error = copyin(abs_timeout, &ets, sizeof(ets)); 1802 if (error != 0) 1803 return (error); 1804 if (ets.tv_nsec >= 1000000000 || ets.tv_nsec < 0) { 1805 error = EINVAL; 1806 return (error); 1807 } 1808 1809 for (;;) { 1810 ts2 = ets; 1811 getnanotime(&ts); 1812 timespecsub(&ts2, &ts); 1813 if (ts2.tv_sec < 0 || (ts2.tv_sec == 0 && ts2.tv_nsec <= 0)) { 1814 error = ETIMEDOUT; 1815 return (error); 1816 } 1817 TIMESPEC_TO_TIMEVAL(&tv, &ts2); 1818 error = _mqueue_recv(mq, &msg, tvtohz(&tv)); 1819 if (error == 0) 1820 break; 1821 if (error != ETIMEDOUT) 1822 return (error); 1823 } 1824 1825 received: 1826 error = mqueue_savemsg(msg, msg_ptr, msg_prio); 1827 if (error == 0) { 1828 curthread->td_retval[0] = msg->msg_size; 1829 curthread->td_retval[1] = 0; 1830 } 1831 mqueue_freemsg(msg); 1832 return (error); 1833 } 1834 1835 /* 1836 * Common routine to receive a message 1837 */ 1838 static int 1839 _mqueue_recv(struct mqueue *mq, struct mqueue_msg **msg, int timo) 1840 { 1841 int error = 0; 1842 1843 mtx_lock(&mq->mq_mutex); 1844 while ((*msg = TAILQ_FIRST(&mq->mq_msgq)) == NULL && error == 0) { 1845 if (timo < 0) { 1846 mtx_unlock(&mq->mq_mutex); 1847 return (EAGAIN); 1848 } 1849 mq->mq_receivers++; 1850 error = msleep(&mq->mq_receivers, &mq->mq_mutex, 1851 PCATCH, "mqrecv", timo); 1852 mq->mq_receivers--; 1853 if (error == EAGAIN) 1854 error = ETIMEDOUT; 1855 } 1856 if (*msg != NULL) { 1857 error = 0; 1858 TAILQ_REMOVE(&mq->mq_msgq, *msg, msg_link); 1859 mq->mq_curmsgs--; 1860 mq->mq_totalbytes -= (*msg)->msg_size; 1861 if (mq->mq_senders) 1862 wakeup_one(&mq->mq_senders); 1863 if (mq->mq_flags & MQ_WSEL) { 1864 mq->mq_flags &= ~MQ_WSEL; 1865 selwakeup(&mq->mq_wsel); 1866 } 1867 KNOTE_LOCKED(&mq->mq_wsel.si_note, 0); 1868 } 1869 if (mq->mq_notifier != NULL && mq->mq_receivers == 0 && 1870 !TAILQ_EMPTY(&mq->mq_msgq)) { 1871 mqueue_send_notification(mq); 1872 } 1873 mtx_unlock(&mq->mq_mutex); 1874 return (error); 1875 } 1876 1877 static __inline struct mqueue_notifier * 1878 notifier_alloc(void) 1879 { 1880 return (uma_zalloc(mqnoti_zone, M_WAITOK | M_ZERO)); 1881 } 1882 1883 static __inline void 1884 notifier_free(struct mqueue_notifier *p) 1885 { 1886 uma_zfree(mqnoti_zone, p); 1887 } 1888 1889 static struct mqueue_notifier * 1890 notifier_search(struct proc *p, int fd) 1891 { 1892 struct mqueue_notifier *nt; 1893 1894 LIST_FOREACH(nt, &p->p_mqnotifier, nt_link) { 1895 if (nt->nt_ksi.ksi_mqd == fd) 1896 break; 1897 } 1898 return (nt); 1899 } 1900 1901 static __inline void 1902 notifier_insert(struct proc *p, struct mqueue_notifier *nt) 1903 { 1904 LIST_INSERT_HEAD(&p->p_mqnotifier, nt, nt_link); 1905 } 1906 1907 static __inline void 1908 notifier_delete(struct proc *p, struct mqueue_notifier *nt) 1909 { 1910 LIST_REMOVE(nt, nt_link); 1911 notifier_free(nt); 1912 } 1913 1914 static void 1915 notifier_remove(struct proc *p, struct mqueue *mq, int fd) 1916 { 1917 struct mqueue_notifier *nt; 1918 1919 mtx_assert(&mq->mq_mutex, MA_OWNED); 1920 PROC_LOCK(p); 1921 nt = notifier_search(p, fd); 1922 if (nt != NULL) { 1923 if (mq->mq_notifier == nt) 1924 mq->mq_notifier = NULL; 1925 sigqueue_take(&nt->nt_ksi); 1926 notifier_delete(p, nt); 1927 } 1928 PROC_UNLOCK(p); 1929 } 1930 1931 /* 1932 * Syscall to open a message queue. 1933 */ 1934 int 1935 kmq_open(struct thread *td, struct kmq_open_args *uap) 1936 { 1937 char path[MQFS_NAMELEN + 1]; 1938 struct mq_attr attr, *pattr; 1939 struct mqfs_node *pn; 1940 struct filedesc *fdp; 1941 struct file *fp; 1942 struct mqueue *mq; 1943 int fd, error, len, flags, cmode; 1944 1945 if ((uap->flags & O_ACCMODE) == O_ACCMODE) 1946 return (EINVAL); 1947 1948 fdp = td->td_proc->p_fd; 1949 flags = FFLAGS(uap->flags); 1950 cmode = (((uap->mode & ~fdp->fd_cmask) & ALLPERMS) & ~S_ISTXT); 1951 mq = NULL; 1952 if ((flags & O_CREAT) && (uap->attr != NULL)) { 1953 error = copyin(uap->attr, &attr, sizeof(attr)); 1954 if (error) 1955 return (error); 1956 if (attr.mq_maxmsg <= 0 || attr.mq_maxmsg > maxmsg) 1957 return (EINVAL); 1958 if (attr.mq_msgsize <= 0 || attr.mq_msgsize > maxmsgsize) 1959 return (EINVAL); 1960 pattr = &attr; 1961 } else 1962 pattr = NULL; 1963 1964 error = copyinstr(uap->path, path, MQFS_NAMELEN + 1, NULL); 1965 if (error) 1966 return (error); 1967 1968 /* 1969 * The first character of name must be a slash (/) character 1970 * and the remaining characters of name cannot include any slash 1971 * characters. 1972 */ 1973 len = strlen(path); 1974 if (len < 2 || path[0] != '/' || index(path + 1, '/') != NULL) 1975 return (EINVAL); 1976 1977 error = falloc(td, &fp, &fd); 1978 if (error) 1979 return (error); 1980 1981 sx_xlock(&mqfs_data.mi_lock); 1982 pn = mqfs_search(mqfs_data.mi_root, path + 1, len - 1); 1983 if (pn == NULL) { 1984 if (!(flags & O_CREAT)) { 1985 error = ENOENT; 1986 } else { 1987 mq = mqueue_alloc(pattr); 1988 if (mq == NULL) { 1989 error = ENFILE; 1990 } else { 1991 pn = mqfs_create_file(mqfs_data.mi_root, 1992 path + 1, len - 1, td->td_ucred, 1993 cmode); 1994 if (pn == NULL) { 1995 error = ENOSPC; 1996 mqueue_free(mq); 1997 } 1998 } 1999 } 2000 2001 if (error == 0) { 2002 pn->mn_data = mq; 2003 } 2004 } else { 2005 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL)) { 2006 error = EEXIST; 2007 } else { 2008 accmode_t accmode = 0; 2009 2010 if (flags & FREAD) 2011 accmode |= VREAD; 2012 if (flags & FWRITE) 2013 accmode |= VWRITE; 2014 error = vaccess(VREG, pn->mn_mode, pn->mn_uid, 2015 pn->mn_gid, accmode, td->td_ucred, NULL); 2016 } 2017 } 2018 2019 if (error) { 2020 sx_xunlock(&mqfs_data.mi_lock); 2021 fdclose(fdp, fp, fd, td); 2022 fdrop(fp, td); 2023 return (error); 2024 } 2025 2026 mqnode_addref(pn); 2027 sx_xunlock(&mqfs_data.mi_lock); 2028 2029 finit(fp, flags & (FREAD | FWRITE | O_NONBLOCK), DTYPE_MQUEUE, pn, 2030 &mqueueops); 2031 2032 FILEDESC_XLOCK(fdp); 2033 if (fdp->fd_ofiles[fd] == fp) 2034 fdp->fd_ofileflags[fd] |= UF_EXCLOSE; 2035 FILEDESC_XUNLOCK(fdp); 2036 td->td_retval[0] = fd; 2037 fdrop(fp, td); 2038 return (0); 2039 } 2040 2041 /* 2042 * Syscall to unlink a message queue. 2043 */ 2044 int 2045 kmq_unlink(struct thread *td, struct kmq_unlink_args *uap) 2046 { 2047 char path[MQFS_NAMELEN+1]; 2048 struct mqfs_node *pn; 2049 int error, len; 2050 2051 error = copyinstr(uap->path, path, MQFS_NAMELEN + 1, NULL); 2052 if (error) 2053 return (error); 2054 2055 len = strlen(path); 2056 if (len < 2 || path[0] != '/' || index(path + 1, '/') != NULL) 2057 return (EINVAL); 2058 2059 sx_xlock(&mqfs_data.mi_lock); 2060 pn = mqfs_search(mqfs_data.mi_root, path + 1, len - 1); 2061 if (pn != NULL) 2062 error = do_unlink(pn, td->td_ucred); 2063 else 2064 error = ENOENT; 2065 sx_xunlock(&mqfs_data.mi_lock); 2066 return (error); 2067 } 2068 2069 typedef int (*_fgetf)(struct thread *, int, struct file **); 2070 2071 /* 2072 * Get message queue by giving file slot 2073 */ 2074 static int 2075 _getmq(struct thread *td, int fd, _fgetf func, 2076 struct file **fpp, struct mqfs_node **ppn, struct mqueue **pmq) 2077 { 2078 struct mqfs_node *pn; 2079 int error; 2080 2081 error = func(td, fd, fpp); 2082 if (error) 2083 return (error); 2084 if (&mqueueops != (*fpp)->f_ops) { 2085 fdrop(*fpp, td); 2086 return (EBADF); 2087 } 2088 pn = (*fpp)->f_data; 2089 if (ppn) 2090 *ppn = pn; 2091 if (pmq) 2092 *pmq = pn->mn_data; 2093 return (0); 2094 } 2095 2096 static __inline int 2097 getmq(struct thread *td, int fd, struct file **fpp, struct mqfs_node **ppn, 2098 struct mqueue **pmq) 2099 { 2100 return _getmq(td, fd, fget, fpp, ppn, pmq); 2101 } 2102 2103 static __inline int 2104 getmq_read(struct thread *td, int fd, struct file **fpp, 2105 struct mqfs_node **ppn, struct mqueue **pmq) 2106 { 2107 return _getmq(td, fd, fget_read, fpp, ppn, pmq); 2108 } 2109 2110 static __inline int 2111 getmq_write(struct thread *td, int fd, struct file **fpp, 2112 struct mqfs_node **ppn, struct mqueue **pmq) 2113 { 2114 return _getmq(td, fd, fget_write, fpp, ppn, pmq); 2115 } 2116 2117 int 2118 kmq_setattr(struct thread *td, struct kmq_setattr_args *uap) 2119 { 2120 struct mqueue *mq; 2121 struct file *fp; 2122 struct mq_attr attr, oattr; 2123 u_int oflag, flag; 2124 int error; 2125 2126 if (uap->attr) { 2127 error = copyin(uap->attr, &attr, sizeof(attr)); 2128 if (error) 2129 return (error); 2130 if (attr.mq_flags & ~O_NONBLOCK) 2131 return (EINVAL); 2132 } 2133 error = getmq(td, uap->mqd, &fp, NULL, &mq); 2134 if (error) 2135 return (error); 2136 oattr.mq_maxmsg = mq->mq_maxmsg; 2137 oattr.mq_msgsize = mq->mq_msgsize; 2138 oattr.mq_curmsgs = mq->mq_curmsgs; 2139 if (uap->attr) { 2140 do { 2141 oflag = flag = fp->f_flag; 2142 flag &= ~O_NONBLOCK; 2143 flag |= (attr.mq_flags & O_NONBLOCK); 2144 } while (atomic_cmpset_int(&fp->f_flag, oflag, flag) == 0); 2145 } else 2146 oflag = fp->f_flag; 2147 oattr.mq_flags = (O_NONBLOCK & oflag); 2148 fdrop(fp, td); 2149 if (uap->oattr) 2150 error = copyout(&oattr, uap->oattr, sizeof(oattr)); 2151 return (error); 2152 } 2153 2154 int 2155 kmq_timedreceive(struct thread *td, struct kmq_timedreceive_args *uap) 2156 { 2157 struct mqueue *mq; 2158 struct file *fp; 2159 int error; 2160 int waitok; 2161 2162 error = getmq_read(td, uap->mqd, &fp, NULL, &mq); 2163 if (error) 2164 return (error); 2165 waitok = !(fp->f_flag & O_NONBLOCK); 2166 error = mqueue_receive(mq, uap->msg_ptr, uap->msg_len, 2167 uap->msg_prio, waitok, uap->abs_timeout); 2168 fdrop(fp, td); 2169 return (error); 2170 } 2171 2172 int 2173 kmq_timedsend(struct thread *td, struct kmq_timedsend_args *uap) 2174 { 2175 struct mqueue *mq; 2176 struct file *fp; 2177 int error, waitok; 2178 2179 error = getmq_write(td, uap->mqd, &fp, NULL, &mq); 2180 if (error) 2181 return (error); 2182 waitok = !(fp->f_flag & O_NONBLOCK); 2183 error = mqueue_send(mq, uap->msg_ptr, uap->msg_len, 2184 uap->msg_prio, waitok, uap->abs_timeout); 2185 fdrop(fp, td); 2186 return (error); 2187 } 2188 2189 int 2190 kmq_notify(struct thread *td, struct kmq_notify_args *uap) 2191 { 2192 struct sigevent ev; 2193 struct filedesc *fdp; 2194 struct proc *p; 2195 struct mqueue *mq; 2196 struct file *fp; 2197 struct mqueue_notifier *nt, *newnt = NULL; 2198 int error; 2199 2200 p = td->td_proc; 2201 fdp = td->td_proc->p_fd; 2202 if (uap->sigev) { 2203 error = copyin(uap->sigev, &ev, sizeof(ev)); 2204 if (error) 2205 return (error); 2206 if (ev.sigev_notify != SIGEV_SIGNAL && 2207 ev.sigev_notify != SIGEV_THREAD_ID && 2208 ev.sigev_notify != SIGEV_NONE) 2209 return (EINVAL); 2210 if ((ev.sigev_notify == SIGEV_SIGNAL || 2211 ev.sigev_notify == SIGEV_THREAD_ID) && 2212 !_SIG_VALID(ev.sigev_signo)) 2213 return (EINVAL); 2214 } 2215 error = getmq(td, uap->mqd, &fp, NULL, &mq); 2216 if (error) 2217 return (error); 2218 again: 2219 FILEDESC_SLOCK(fdp); 2220 if (fget_locked(fdp, uap->mqd) != fp) { 2221 FILEDESC_SUNLOCK(fdp); 2222 error = EBADF; 2223 goto out; 2224 } 2225 mtx_lock(&mq->mq_mutex); 2226 FILEDESC_SUNLOCK(fdp); 2227 if (uap->sigev != NULL) { 2228 if (mq->mq_notifier != NULL) { 2229 error = EBUSY; 2230 } else { 2231 PROC_LOCK(p); 2232 nt = notifier_search(p, uap->mqd); 2233 if (nt == NULL) { 2234 if (newnt == NULL) { 2235 PROC_UNLOCK(p); 2236 mtx_unlock(&mq->mq_mutex); 2237 newnt = notifier_alloc(); 2238 goto again; 2239 } 2240 } 2241 2242 if (nt != NULL) { 2243 sigqueue_take(&nt->nt_ksi); 2244 if (newnt != NULL) { 2245 notifier_free(newnt); 2246 newnt = NULL; 2247 } 2248 } else { 2249 nt = newnt; 2250 newnt = NULL; 2251 ksiginfo_init(&nt->nt_ksi); 2252 nt->nt_ksi.ksi_flags |= KSI_INS | KSI_EXT; 2253 nt->nt_ksi.ksi_code = SI_MESGQ; 2254 nt->nt_proc = p; 2255 nt->nt_ksi.ksi_mqd = uap->mqd; 2256 notifier_insert(p, nt); 2257 } 2258 nt->nt_sigev = ev; 2259 mq->mq_notifier = nt; 2260 PROC_UNLOCK(p); 2261 /* 2262 * if there is no receivers and message queue 2263 * is not empty, we should send notification 2264 * as soon as possible. 2265 */ 2266 if (mq->mq_receivers == 0 && 2267 !TAILQ_EMPTY(&mq->mq_msgq)) 2268 mqueue_send_notification(mq); 2269 } 2270 } else { 2271 notifier_remove(p, mq, uap->mqd); 2272 } 2273 mtx_unlock(&mq->mq_mutex); 2274 2275 out: 2276 fdrop(fp, td); 2277 if (newnt != NULL) 2278 notifier_free(newnt); 2279 return (error); 2280 } 2281 2282 static void 2283 mqueue_fdclose(struct thread *td, int fd, struct file *fp) 2284 { 2285 struct filedesc *fdp; 2286 struct mqueue *mq; 2287 2288 fdp = td->td_proc->p_fd; 2289 FILEDESC_LOCK_ASSERT(fdp); 2290 2291 if (fp->f_ops == &mqueueops) { 2292 mq = FPTOMQ(fp); 2293 mtx_lock(&mq->mq_mutex); 2294 notifier_remove(td->td_proc, mq, fd); 2295 2296 /* have to wakeup thread in same process */ 2297 if (mq->mq_flags & MQ_RSEL) { 2298 mq->mq_flags &= ~MQ_RSEL; 2299 selwakeup(&mq->mq_rsel); 2300 } 2301 if (mq->mq_flags & MQ_WSEL) { 2302 mq->mq_flags &= ~MQ_WSEL; 2303 selwakeup(&mq->mq_wsel); 2304 } 2305 mtx_unlock(&mq->mq_mutex); 2306 } 2307 } 2308 2309 static void 2310 mq_proc_exit(void *arg __unused, struct proc *p) 2311 { 2312 struct filedesc *fdp; 2313 struct file *fp; 2314 struct mqueue *mq; 2315 int i; 2316 2317 fdp = p->p_fd; 2318 FILEDESC_SLOCK(fdp); 2319 for (i = 0; i < fdp->fd_nfiles; ++i) { 2320 fp = fget_locked(fdp, i); 2321 if (fp != NULL && fp->f_ops == &mqueueops) { 2322 mq = FPTOMQ(fp); 2323 mtx_lock(&mq->mq_mutex); 2324 notifier_remove(p, FPTOMQ(fp), i); 2325 mtx_unlock(&mq->mq_mutex); 2326 } 2327 } 2328 FILEDESC_SUNLOCK(fdp); 2329 KASSERT(LIST_EMPTY(&p->p_mqnotifier), ("mq notifiers left")); 2330 } 2331 2332 static int 2333 mqf_read(struct file *fp, struct uio *uio, struct ucred *active_cred, 2334 int flags, struct thread *td) 2335 { 2336 return (EOPNOTSUPP); 2337 } 2338 2339 static int 2340 mqf_write(struct file *fp, struct uio *uio, struct ucred *active_cred, 2341 int flags, struct thread *td) 2342 { 2343 return (EOPNOTSUPP); 2344 } 2345 2346 static int 2347 mqf_truncate(struct file *fp, off_t length, struct ucred *active_cred, 2348 struct thread *td) 2349 { 2350 2351 return (EINVAL); 2352 } 2353 2354 static int 2355 mqf_ioctl(struct file *fp, u_long cmd, void *data, 2356 struct ucred *active_cred, struct thread *td) 2357 { 2358 return (ENOTTY); 2359 } 2360 2361 static int 2362 mqf_poll(struct file *fp, int events, struct ucred *active_cred, 2363 struct thread *td) 2364 { 2365 struct mqueue *mq = FPTOMQ(fp); 2366 int revents = 0; 2367 2368 mtx_lock(&mq->mq_mutex); 2369 if (events & (POLLIN | POLLRDNORM)) { 2370 if (mq->mq_curmsgs) { 2371 revents |= events & (POLLIN | POLLRDNORM); 2372 } else { 2373 mq->mq_flags |= MQ_RSEL; 2374 selrecord(td, &mq->mq_rsel); 2375 } 2376 } 2377 if (events & POLLOUT) { 2378 if (mq->mq_curmsgs < mq->mq_maxmsg) 2379 revents |= POLLOUT; 2380 else { 2381 mq->mq_flags |= MQ_WSEL; 2382 selrecord(td, &mq->mq_wsel); 2383 } 2384 } 2385 mtx_unlock(&mq->mq_mutex); 2386 return (revents); 2387 } 2388 2389 static int 2390 mqf_close(struct file *fp, struct thread *td) 2391 { 2392 struct mqfs_node *pn; 2393 2394 fp->f_ops = &badfileops; 2395 pn = fp->f_data; 2396 fp->f_data = NULL; 2397 sx_xlock(&mqfs_data.mi_lock); 2398 mqnode_release(pn); 2399 sx_xunlock(&mqfs_data.mi_lock); 2400 return (0); 2401 } 2402 2403 static int 2404 mqf_stat(struct file *fp, struct stat *st, struct ucred *active_cred, 2405 struct thread *td) 2406 { 2407 struct mqfs_node *pn = fp->f_data; 2408 2409 bzero(st, sizeof *st); 2410 st->st_atimespec = pn->mn_atime; 2411 st->st_mtimespec = pn->mn_mtime; 2412 st->st_ctimespec = pn->mn_ctime; 2413 st->st_birthtimespec = pn->mn_birth; 2414 st->st_uid = pn->mn_uid; 2415 st->st_gid = pn->mn_gid; 2416 st->st_mode = S_IFIFO | pn->mn_mode; 2417 return (0); 2418 } 2419 2420 static int 2421 mqf_kqfilter(struct file *fp, struct knote *kn) 2422 { 2423 struct mqueue *mq = FPTOMQ(fp); 2424 int error = 0; 2425 2426 if (kn->kn_filter == EVFILT_READ) { 2427 kn->kn_fop = &mq_rfiltops; 2428 knlist_add(&mq->mq_rsel.si_note, kn, 0); 2429 } else if (kn->kn_filter == EVFILT_WRITE) { 2430 kn->kn_fop = &mq_wfiltops; 2431 knlist_add(&mq->mq_wsel.si_note, kn, 0); 2432 } else 2433 error = EINVAL; 2434 return (error); 2435 } 2436 2437 static void 2438 filt_mqdetach(struct knote *kn) 2439 { 2440 struct mqueue *mq = FPTOMQ(kn->kn_fp); 2441 2442 if (kn->kn_filter == EVFILT_READ) 2443 knlist_remove(&mq->mq_rsel.si_note, kn, 0); 2444 else if (kn->kn_filter == EVFILT_WRITE) 2445 knlist_remove(&mq->mq_wsel.si_note, kn, 0); 2446 else 2447 panic("filt_mqdetach"); 2448 } 2449 2450 static int 2451 filt_mqread(struct knote *kn, long hint) 2452 { 2453 struct mqueue *mq = FPTOMQ(kn->kn_fp); 2454 2455 mtx_assert(&mq->mq_mutex, MA_OWNED); 2456 return (mq->mq_curmsgs != 0); 2457 } 2458 2459 static int 2460 filt_mqwrite(struct knote *kn, long hint) 2461 { 2462 struct mqueue *mq = FPTOMQ(kn->kn_fp); 2463 2464 mtx_assert(&mq->mq_mutex, MA_OWNED); 2465 return (mq->mq_curmsgs < mq->mq_maxmsg); 2466 } 2467 2468 static struct fileops mqueueops = { 2469 .fo_read = mqf_read, 2470 .fo_write = mqf_write, 2471 .fo_truncate = mqf_truncate, 2472 .fo_ioctl = mqf_ioctl, 2473 .fo_poll = mqf_poll, 2474 .fo_kqfilter = mqf_kqfilter, 2475 .fo_stat = mqf_stat, 2476 .fo_close = mqf_close 2477 }; 2478 2479 static struct vop_vector mqfs_vnodeops = { 2480 .vop_default = &default_vnodeops, 2481 .vop_access = mqfs_access, 2482 .vop_cachedlookup = mqfs_lookup, 2483 .vop_lookup = vfs_cache_lookup, 2484 .vop_reclaim = mqfs_reclaim, 2485 .vop_create = mqfs_create, 2486 .vop_remove = mqfs_remove, 2487 .vop_inactive = mqfs_inactive, 2488 .vop_open = mqfs_open, 2489 .vop_close = mqfs_close, 2490 .vop_getattr = mqfs_getattr, 2491 .vop_setattr = mqfs_setattr, 2492 .vop_read = mqfs_read, 2493 .vop_write = VOP_EOPNOTSUPP, 2494 .vop_readdir = mqfs_readdir, 2495 .vop_mkdir = VOP_EOPNOTSUPP, 2496 .vop_rmdir = VOP_EOPNOTSUPP 2497 }; 2498 2499 static struct vfsops mqfs_vfsops = { 2500 .vfs_init = mqfs_init, 2501 .vfs_uninit = mqfs_uninit, 2502 .vfs_mount = mqfs_mount, 2503 .vfs_unmount = mqfs_unmount, 2504 .vfs_root = mqfs_root, 2505 .vfs_statfs = mqfs_statfs, 2506 }; 2507 2508 SYSCALL_MODULE_HELPER(kmq_open); 2509 SYSCALL_MODULE_HELPER(kmq_setattr); 2510 SYSCALL_MODULE_HELPER(kmq_timedsend); 2511 SYSCALL_MODULE_HELPER(kmq_timedreceive); 2512 SYSCALL_MODULE_HELPER(kmq_notify); 2513 SYSCALL_MODULE_HELPER(kmq_unlink); 2514 2515 VFS_SET(mqfs_vfsops, mqueuefs, VFCF_SYNTHETIC); 2516 MODULE_VERSION(mqueuefs, 1); 2517