1 /*- 2 * Copyright (c) 2005 David Xu <davidxu@freebsd.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 */ 27 28 /* 29 * POSIX message queue implementation. 30 * 31 * 1) A mqueue filesystem can be mounted, each message queue appears 32 * in mounted directory, user can change queue's permission and 33 * ownership, or remove a queue. Manually creating a file in the 34 * directory causes a message queue to be created in the kernel with 35 * default message queue attributes applied and same name used, this 36 * method is not advocated since mq_open syscall allows user to specify 37 * different attributes. Also the file system can be mounted multiple 38 * times at different mount points but shows same contents. 39 * 40 * 2) Standard POSIX message queue API. The syscalls do not use vfs layer, 41 * but directly operate on internal data structure, this allows user to 42 * use the IPC facility without having to mount mqueue file system. 43 */ 44 45 #include <sys/cdefs.h> 46 __FBSDID("$FreeBSD$"); 47 48 #include <sys/param.h> 49 #include <sys/kernel.h> 50 #include <sys/systm.h> 51 #include <sys/limits.h> 52 #include <sys/buf.h> 53 #include <sys/dirent.h> 54 #include <sys/event.h> 55 #include <sys/eventhandler.h> 56 #include <sys/fcntl.h> 57 #include <sys/file.h> 58 #include <sys/filedesc.h> 59 #include <sys/lock.h> 60 #include <sys/malloc.h> 61 #include <sys/module.h> 62 #include <sys/mount.h> 63 #include <sys/mqueue.h> 64 #include <sys/mutex.h> 65 #include <sys/namei.h> 66 #include <sys/posix4.h> 67 #include <sys/poll.h> 68 #include <sys/priv.h> 69 #include <sys/proc.h> 70 #include <sys/queue.h> 71 #include <sys/sysproto.h> 72 #include <sys/stat.h> 73 #include <sys/syscall.h> 74 #include <sys/syscallsubr.h> 75 #include <sys/sysent.h> 76 #include <sys/sx.h> 77 #include <sys/sysctl.h> 78 #include <sys/taskqueue.h> 79 #include <sys/unistd.h> 80 #include <sys/vnode.h> 81 #include <machine/atomic.h> 82 83 /* 84 * Limits and constants 85 */ 86 #define MQFS_NAMELEN NAME_MAX 87 #define MQFS_DELEN (8 + MQFS_NAMELEN) 88 89 /* node types */ 90 typedef enum { 91 mqfstype_none = 0, 92 mqfstype_root, 93 mqfstype_dir, 94 mqfstype_this, 95 mqfstype_parent, 96 mqfstype_file, 97 mqfstype_symlink, 98 } mqfs_type_t; 99 100 struct mqfs_node; 101 102 /* 103 * mqfs_info: describes a mqfs instance 104 */ 105 struct mqfs_info { 106 struct sx mi_lock; 107 struct mqfs_node *mi_root; 108 struct unrhdr *mi_unrhdr; 109 }; 110 111 struct mqfs_vdata { 112 LIST_ENTRY(mqfs_vdata) mv_link; 113 struct mqfs_node *mv_node; 114 struct vnode *mv_vnode; 115 struct task mv_task; 116 }; 117 118 /* 119 * mqfs_node: describes a node (file or directory) within a mqfs 120 */ 121 struct mqfs_node { 122 char mn_name[MQFS_NAMELEN+1]; 123 struct mqfs_info *mn_info; 124 struct mqfs_node *mn_parent; 125 LIST_HEAD(,mqfs_node) mn_children; 126 LIST_ENTRY(mqfs_node) mn_sibling; 127 LIST_HEAD(,mqfs_vdata) mn_vnodes; 128 int mn_refcount; 129 mqfs_type_t mn_type; 130 int mn_deleted; 131 u_int32_t mn_fileno; 132 void *mn_data; 133 struct timespec mn_birth; 134 struct timespec mn_ctime; 135 struct timespec mn_atime; 136 struct timespec mn_mtime; 137 uid_t mn_uid; 138 gid_t mn_gid; 139 int mn_mode; 140 }; 141 142 #define VTON(vp) (((struct mqfs_vdata *)((vp)->v_data))->mv_node) 143 #define VTOMQ(vp) ((struct mqueue *)(VTON(vp)->mn_data)) 144 #define VFSTOMQFS(m) ((struct mqfs_info *)((m)->mnt_data)) 145 #define FPTOMQ(fp) ((struct mqueue *)(((struct mqfs_node *) \ 146 (fp)->f_data)->mn_data)) 147 148 TAILQ_HEAD(msgq, mqueue_msg); 149 150 struct mqueue; 151 152 struct mqueue_notifier { 153 LIST_ENTRY(mqueue_notifier) nt_link; 154 struct sigevent nt_sigev; 155 ksiginfo_t nt_ksi; 156 struct proc *nt_proc; 157 }; 158 159 struct mqueue { 160 struct mtx mq_mutex; 161 int mq_flags; 162 long mq_maxmsg; 163 long mq_msgsize; 164 long mq_curmsgs; 165 long mq_totalbytes; 166 struct msgq mq_msgq; 167 int mq_receivers; 168 int mq_senders; 169 struct selinfo mq_rsel; 170 struct selinfo mq_wsel; 171 struct mqueue_notifier *mq_notifier; 172 }; 173 174 #define MQ_RSEL 0x01 175 #define MQ_WSEL 0x02 176 177 struct mqueue_msg { 178 TAILQ_ENTRY(mqueue_msg) msg_link; 179 unsigned int msg_prio; 180 unsigned int msg_size; 181 /* following real data... */ 182 }; 183 184 SYSCTL_NODE(_kern, OID_AUTO, mqueue, CTLFLAG_RW, 0, 185 "POSIX real time message queue"); 186 187 static int default_maxmsg = 10; 188 static int default_msgsize = 1024; 189 190 static int maxmsg = 100; 191 SYSCTL_INT(_kern_mqueue, OID_AUTO, maxmsg, CTLFLAG_RW, 192 &maxmsg, 0, "Default maximum messages in queue"); 193 static int maxmsgsize = 16384; 194 SYSCTL_INT(_kern_mqueue, OID_AUTO, maxmsgsize, CTLFLAG_RW, 195 &maxmsgsize, 0, "Default maximum message size"); 196 static int maxmq = 100; 197 SYSCTL_INT(_kern_mqueue, OID_AUTO, maxmq, CTLFLAG_RW, 198 &maxmq, 0, "maximum message queues"); 199 static int curmq = 0; 200 SYSCTL_INT(_kern_mqueue, OID_AUTO, curmq, CTLFLAG_RW, 201 &curmq, 0, "current message queue number"); 202 static int unloadable = 0; 203 static MALLOC_DEFINE(M_MQUEUEDATA, "mqdata", "mqueue data"); 204 205 static eventhandler_tag exit_tag; 206 207 /* Only one instance per-system */ 208 static struct mqfs_info mqfs_data; 209 static uma_zone_t mqnode_zone; 210 static uma_zone_t mqueue_zone; 211 static uma_zone_t mvdata_zone; 212 static uma_zone_t mqnoti_zone; 213 static struct vop_vector mqfs_vnodeops; 214 static struct fileops mqueueops; 215 216 /* 217 * Directory structure construction and manipulation 218 */ 219 #ifdef notyet 220 static struct mqfs_node *mqfs_create_dir(struct mqfs_node *parent, 221 const char *name, int namelen, struct ucred *cred, int mode); 222 static struct mqfs_node *mqfs_create_link(struct mqfs_node *parent, 223 const char *name, int namelen, struct ucred *cred, int mode); 224 #endif 225 226 static struct mqfs_node *mqfs_create_file(struct mqfs_node *parent, 227 const char *name, int namelen, struct ucred *cred, int mode); 228 static int mqfs_destroy(struct mqfs_node *mn); 229 static void mqfs_fileno_alloc(struct mqfs_info *mi, struct mqfs_node *mn); 230 static void mqfs_fileno_free(struct mqfs_info *mi, struct mqfs_node *mn); 231 static int mqfs_allocv(struct mount *mp, struct vnode **vpp, struct mqfs_node *pn); 232 233 /* 234 * Message queue construction and maniplation 235 */ 236 static struct mqueue *mqueue_alloc(const struct mq_attr *attr); 237 static void mqueue_free(struct mqueue *mq); 238 static int mqueue_send(struct mqueue *mq, const char *msg_ptr, 239 size_t msg_len, unsigned msg_prio, int waitok, 240 const struct timespec *abs_timeout); 241 static int mqueue_receive(struct mqueue *mq, char *msg_ptr, 242 size_t msg_len, unsigned *msg_prio, int waitok, 243 const struct timespec *abs_timeout); 244 static int _mqueue_send(struct mqueue *mq, struct mqueue_msg *msg, 245 int timo); 246 static int _mqueue_recv(struct mqueue *mq, struct mqueue_msg **msg, 247 int timo); 248 static void mqueue_send_notification(struct mqueue *mq); 249 static void mqueue_fdclose(struct thread *td, int fd, struct file *fp); 250 static void mq_proc_exit(void *arg, struct proc *p); 251 252 /* 253 * kqueue filters 254 */ 255 static void filt_mqdetach(struct knote *kn); 256 static int filt_mqread(struct knote *kn, long hint); 257 static int filt_mqwrite(struct knote *kn, long hint); 258 259 struct filterops mq_rfiltops = 260 { 1, NULL, filt_mqdetach, filt_mqread }; 261 struct filterops mq_wfiltops = 262 { 1, NULL, filt_mqdetach, filt_mqwrite }; 263 264 /* 265 * Initialize fileno bitmap 266 */ 267 static void 268 mqfs_fileno_init(struct mqfs_info *mi) 269 { 270 struct unrhdr *up; 271 272 up = new_unrhdr(1, INT_MAX, NULL); 273 mi->mi_unrhdr = up; 274 } 275 276 /* 277 * Tear down fileno bitmap 278 */ 279 static void 280 mqfs_fileno_uninit(struct mqfs_info *mi) 281 { 282 struct unrhdr *up; 283 284 up = mi->mi_unrhdr; 285 mi->mi_unrhdr = NULL; 286 delete_unrhdr(up); 287 } 288 289 /* 290 * Allocate a file number 291 */ 292 static void 293 mqfs_fileno_alloc(struct mqfs_info *mi, struct mqfs_node *mn) 294 { 295 /* make sure our parent has a file number */ 296 if (mn->mn_parent && !mn->mn_parent->mn_fileno) 297 mqfs_fileno_alloc(mi, mn->mn_parent); 298 299 switch (mn->mn_type) { 300 case mqfstype_root: 301 case mqfstype_dir: 302 case mqfstype_file: 303 case mqfstype_symlink: 304 mn->mn_fileno = alloc_unr(mi->mi_unrhdr); 305 break; 306 case mqfstype_this: 307 KASSERT(mn->mn_parent != NULL, 308 ("mqfstype_this node has no parent")); 309 mn->mn_fileno = mn->mn_parent->mn_fileno; 310 break; 311 case mqfstype_parent: 312 KASSERT(mn->mn_parent != NULL, 313 ("mqfstype_parent node has no parent")); 314 if (mn->mn_parent == mi->mi_root) { 315 mn->mn_fileno = mn->mn_parent->mn_fileno; 316 break; 317 } 318 KASSERT(mn->mn_parent->mn_parent != NULL, 319 ("mqfstype_parent node has no grandparent")); 320 mn->mn_fileno = mn->mn_parent->mn_parent->mn_fileno; 321 break; 322 default: 323 KASSERT(0, 324 ("mqfs_fileno_alloc() called for unknown type node: %d", 325 mn->mn_type)); 326 break; 327 } 328 } 329 330 /* 331 * Release a file number 332 */ 333 static void 334 mqfs_fileno_free(struct mqfs_info *mi, struct mqfs_node *mn) 335 { 336 switch (mn->mn_type) { 337 case mqfstype_root: 338 case mqfstype_dir: 339 case mqfstype_file: 340 case mqfstype_symlink: 341 free_unr(mi->mi_unrhdr, mn->mn_fileno); 342 break; 343 case mqfstype_this: 344 case mqfstype_parent: 345 /* ignore these, as they don't "own" their file number */ 346 break; 347 default: 348 KASSERT(0, 349 ("mqfs_fileno_free() called for unknown type node: %d", 350 mn->mn_type)); 351 break; 352 } 353 } 354 355 static __inline struct mqfs_node * 356 mqnode_alloc(void) 357 { 358 return uma_zalloc(mqnode_zone, M_WAITOK | M_ZERO); 359 } 360 361 static __inline void 362 mqnode_free(struct mqfs_node *node) 363 { 364 uma_zfree(mqnode_zone, node); 365 } 366 367 static __inline void 368 mqnode_addref(struct mqfs_node *node) 369 { 370 atomic_fetchadd_int(&node->mn_refcount, 1); 371 } 372 373 static __inline void 374 mqnode_release(struct mqfs_node *node) 375 { 376 int old, exp; 377 378 old = atomic_fetchadd_int(&node->mn_refcount, -1); 379 if (node->mn_type == mqfstype_dir || 380 node->mn_type == mqfstype_root) 381 exp = 3; /* include . and .. */ 382 else 383 exp = 1; 384 if (old == exp) 385 mqfs_destroy(node); 386 } 387 388 /* 389 * Add a node to a directory 390 */ 391 static int 392 mqfs_add_node(struct mqfs_node *parent, struct mqfs_node *node) 393 { 394 KASSERT(parent != NULL, ("%s(): parent is NULL", __func__)); 395 KASSERT(parent->mn_info != NULL, 396 ("%s(): parent has no mn_info", __func__)); 397 KASSERT(parent->mn_type == mqfstype_dir || 398 parent->mn_type == mqfstype_root, 399 ("%s(): parent is not a directory", __func__)); 400 401 node->mn_info = parent->mn_info; 402 node->mn_parent = parent; 403 LIST_INIT(&node->mn_children); 404 LIST_INIT(&node->mn_vnodes); 405 LIST_INSERT_HEAD(&parent->mn_children, node, mn_sibling); 406 mqnode_addref(parent); 407 return (0); 408 } 409 410 static struct mqfs_node * 411 mqfs_create_node(const char *name, int namelen, struct ucred *cred, int mode, 412 int nodetype) 413 { 414 struct mqfs_node *node; 415 416 node = mqnode_alloc(); 417 strncpy(node->mn_name, name, namelen); 418 node->mn_type = nodetype; 419 node->mn_refcount = 1; 420 getnanotime(&node->mn_birth); 421 node->mn_ctime = node->mn_atime = node->mn_mtime 422 = node->mn_birth; 423 node->mn_uid = cred->cr_uid; 424 node->mn_gid = cred->cr_gid; 425 node->mn_mode = mode; 426 return (node); 427 } 428 429 /* 430 * Create a file 431 */ 432 static struct mqfs_node * 433 mqfs_create_file(struct mqfs_node *parent, const char *name, int namelen, 434 struct ucred *cred, int mode) 435 { 436 struct mqfs_node *node; 437 438 node = mqfs_create_node(name, namelen, cred, mode, mqfstype_file); 439 if (mqfs_add_node(parent, node) != 0) { 440 mqnode_free(node); 441 return (NULL); 442 } 443 return (node); 444 } 445 446 /* 447 * Add . and .. to a directory 448 */ 449 static int 450 mqfs_fixup_dir(struct mqfs_node *parent) 451 { 452 struct mqfs_node *dir; 453 454 dir = mqnode_alloc(); 455 dir->mn_name[0] = '.'; 456 dir->mn_type = mqfstype_this; 457 dir->mn_refcount = 1; 458 if (mqfs_add_node(parent, dir) != 0) { 459 mqnode_free(dir); 460 return (-1); 461 } 462 463 dir = mqnode_alloc(); 464 dir->mn_name[0] = dir->mn_name[1] = '.'; 465 dir->mn_type = mqfstype_parent; 466 dir->mn_refcount = 1; 467 468 if (mqfs_add_node(parent, dir) != 0) { 469 mqnode_free(dir); 470 return (-1); 471 } 472 473 return (0); 474 } 475 476 #ifdef notyet 477 478 /* 479 * Create a directory 480 */ 481 static struct mqfs_node * 482 mqfs_create_dir(struct mqfs_node *parent, const char *name, int namelen, 483 struct ucred *cred, int mode) 484 { 485 struct mqfs_node *node; 486 487 node = mqfs_create_node(name, namelen, cred, mode, mqfstype_dir); 488 if (mqfs_add_node(parent, node) != 0) { 489 mqnode_free(node); 490 return (NULL); 491 } 492 493 if (mqfs_fixup_dir(node) != 0) { 494 mqfs_destroy(node); 495 return (NULL); 496 } 497 return (node); 498 } 499 500 /* 501 * Create a symlink 502 */ 503 static struct mqfs_node * 504 mqfs_create_link(struct mqfs_node *parent, const char *name, int namelen, 505 struct ucred *cred, int mode) 506 { 507 struct mqfs_node *node; 508 509 node = mqfs_create_node(name, namelen, cred, mode, mqfstype_symlink); 510 if (mqfs_add_node(parent, node) != 0) { 511 mqnode_free(node); 512 return (NULL); 513 } 514 return (node); 515 } 516 517 #endif 518 519 /* 520 * Destroy a node or a tree of nodes 521 */ 522 static int 523 mqfs_destroy(struct mqfs_node *node) 524 { 525 struct mqfs_node *parent; 526 527 KASSERT(node != NULL, 528 ("%s(): node is NULL", __func__)); 529 KASSERT(node->mn_info != NULL, 530 ("%s(): node has no mn_info", __func__)); 531 532 /* destroy children */ 533 if (node->mn_type == mqfstype_dir || node->mn_type == mqfstype_root) 534 while (! LIST_EMPTY(&node->mn_children)) 535 mqfs_destroy(LIST_FIRST(&node->mn_children)); 536 537 /* unlink from parent */ 538 if ((parent = node->mn_parent) != NULL) { 539 KASSERT(parent->mn_info == node->mn_info, 540 ("%s(): parent has different mn_info", __func__)); 541 LIST_REMOVE(node, mn_sibling); 542 } 543 544 if (node->mn_fileno != 0) 545 mqfs_fileno_free(node->mn_info, node); 546 if (node->mn_data != NULL) 547 mqueue_free(node->mn_data); 548 mqnode_free(node); 549 return (0); 550 } 551 552 /* 553 * Mount a mqfs instance 554 */ 555 static int 556 mqfs_mount(struct mount *mp, struct thread *td) 557 { 558 struct statfs *sbp; 559 560 if (mp->mnt_flag & MNT_UPDATE) 561 return (EOPNOTSUPP); 562 563 mp->mnt_data = &mqfs_data; 564 MNT_ILOCK(mp); 565 mp->mnt_flag |= MNT_LOCAL; 566 mp->mnt_kern_flag |= MNTK_MPSAFE; 567 MNT_IUNLOCK(mp); 568 vfs_getnewfsid(mp); 569 570 sbp = &mp->mnt_stat; 571 vfs_mountedfrom(mp, "mqueue"); 572 sbp->f_bsize = PAGE_SIZE; 573 sbp->f_iosize = PAGE_SIZE; 574 sbp->f_blocks = 1; 575 sbp->f_bfree = 0; 576 sbp->f_bavail = 0; 577 sbp->f_files = 1; 578 sbp->f_ffree = 0; 579 return (0); 580 } 581 582 /* 583 * Unmount a mqfs instance 584 */ 585 static int 586 mqfs_unmount(struct mount *mp, int mntflags, struct thread *td) 587 { 588 int error; 589 590 error = vflush(mp, 0, (mntflags & MNT_FORCE) ? FORCECLOSE : 0, td); 591 return (error); 592 } 593 594 /* 595 * Return a root vnode 596 */ 597 static int 598 mqfs_root(struct mount *mp, int flags, struct vnode **vpp, struct thread *td) 599 { 600 struct mqfs_info *mqfs; 601 int ret; 602 603 mqfs = VFSTOMQFS(mp); 604 sx_xlock(&mqfs->mi_lock); 605 ret = mqfs_allocv(mp, vpp, mqfs->mi_root); 606 sx_xunlock(&mqfs->mi_lock); 607 return (ret); 608 } 609 610 /* 611 * Return filesystem stats 612 */ 613 static int 614 mqfs_statfs(struct mount *mp, struct statfs *sbp, struct thread *td) 615 { 616 /* XXX update statistics */ 617 return (0); 618 } 619 620 /* 621 * Initialize a mqfs instance 622 */ 623 static int 624 mqfs_init(struct vfsconf *vfc) 625 { 626 struct mqfs_node *root; 627 struct mqfs_info *mi; 628 629 mqnode_zone = uma_zcreate("mqnode", sizeof(struct mqfs_node), 630 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 631 mqueue_zone = uma_zcreate("mqueue", sizeof(struct mqueue), 632 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 633 mvdata_zone = uma_zcreate("mvdata", 634 sizeof(struct mqfs_vdata), NULL, NULL, NULL, 635 NULL, UMA_ALIGN_PTR, 0); 636 mqnoti_zone = uma_zcreate("mqnotifier", sizeof(struct mqueue_notifier), 637 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 638 mi = &mqfs_data; 639 sx_init(&mi->mi_lock, "mqfs lock"); 640 /* set up the root diretory */ 641 root = mqfs_create_node("/", 1, curthread->td_ucred, 01777, 642 mqfstype_root); 643 root->mn_info = mi; 644 LIST_INIT(&root->mn_children); 645 LIST_INIT(&root->mn_vnodes); 646 mi->mi_root = root; 647 mqfs_fileno_init(mi); 648 mqfs_fileno_alloc(mi, root); 649 mqfs_fixup_dir(root); 650 exit_tag = EVENTHANDLER_REGISTER(process_exit, mq_proc_exit, NULL, 651 EVENTHANDLER_PRI_ANY); 652 mq_fdclose = mqueue_fdclose; 653 p31b_setcfg(CTL_P1003_1B_MESSAGE_PASSING, _POSIX_MESSAGE_PASSING); 654 return (0); 655 } 656 657 /* 658 * Destroy a mqfs instance 659 */ 660 static int 661 mqfs_uninit(struct vfsconf *vfc) 662 { 663 struct mqfs_info *mi; 664 665 if (!unloadable) 666 return (EOPNOTSUPP); 667 EVENTHANDLER_DEREGISTER(process_exit, exit_tag); 668 mi = &mqfs_data; 669 mqfs_destroy(mi->mi_root); 670 mi->mi_root = NULL; 671 mqfs_fileno_uninit(mi); 672 sx_destroy(&mi->mi_lock); 673 uma_zdestroy(mqnode_zone); 674 uma_zdestroy(mqueue_zone); 675 uma_zdestroy(mvdata_zone); 676 uma_zdestroy(mqnoti_zone); 677 return (0); 678 } 679 680 /* 681 * task routine 682 */ 683 static void 684 do_recycle(void *context, int pending __unused) 685 { 686 struct vnode *vp = (struct vnode *)context; 687 688 vrecycle(vp, curthread); 689 vdrop(vp); 690 } 691 692 /* 693 * Allocate a vnode 694 */ 695 static int 696 mqfs_allocv(struct mount *mp, struct vnode **vpp, struct mqfs_node *pn) 697 { 698 struct mqfs_vdata *vd; 699 int error; 700 701 LIST_FOREACH(vd, &pn->mn_vnodes, mv_link) { 702 if (vd->mv_vnode->v_mount == mp) 703 break; 704 } 705 706 if (vd != NULL) { 707 if (vget(vd->mv_vnode, 0, curthread) == 0) { 708 *vpp = vd->mv_vnode; 709 vn_lock(*vpp, LK_RETRY | LK_EXCLUSIVE, 710 curthread); 711 return (0); 712 } 713 /* XXX if this can happen, we're in trouble */ 714 } 715 716 error = getnewvnode("mqueue", mp, &mqfs_vnodeops, vpp); 717 if (error) 718 return (error); 719 vn_lock(*vpp, LK_EXCLUSIVE | LK_RETRY, curthread); 720 error = insmntque(*vpp, mp); 721 if (error != 0) { 722 *vpp = NULLVP; 723 return (error); 724 } 725 vd = uma_zalloc(mvdata_zone, M_WAITOK); 726 (*vpp)->v_data = vd; 727 vd->mv_vnode = *vpp; 728 vd->mv_node = pn; 729 TASK_INIT(&vd->mv_task, 0, do_recycle, *vpp); 730 LIST_INSERT_HEAD(&pn->mn_vnodes, vd, mv_link); 731 mqnode_addref(pn); 732 switch (pn->mn_type) { 733 case mqfstype_root: 734 (*vpp)->v_vflag = VV_ROOT; 735 /* fall through */ 736 case mqfstype_dir: 737 case mqfstype_this: 738 case mqfstype_parent: 739 (*vpp)->v_type = VDIR; 740 break; 741 case mqfstype_file: 742 (*vpp)->v_type = VREG; 743 break; 744 case mqfstype_symlink: 745 (*vpp)->v_type = VLNK; 746 break; 747 case mqfstype_none: 748 KASSERT(0, ("mqfs_allocf called for null node\n")); 749 default: 750 panic("%s has unexpected type: %d", pn->mn_name, pn->mn_type); 751 } 752 return (0); 753 } 754 755 /* 756 * Search a directory entry 757 */ 758 static struct mqfs_node * 759 mqfs_search(struct mqfs_node *pd, const char *name, int len) 760 { 761 struct mqfs_node *pn; 762 763 LIST_FOREACH(pn, &pd->mn_children, mn_sibling) { 764 if (strncmp(pn->mn_name, name, len) == 0) 765 return (pn); 766 } 767 return (NULL); 768 } 769 770 /* 771 * Look up a file or directory. 772 */ 773 static int 774 mqfs_lookupx(struct vop_cachedlookup_args *ap) 775 { 776 struct componentname *cnp; 777 struct vnode *dvp, **vpp; 778 struct mqfs_node *pd; 779 struct mqfs_node *pn; 780 int nameiop, flags, error, namelen; 781 char *pname; 782 struct thread *td; 783 784 cnp = ap->a_cnp; 785 vpp = ap->a_vpp; 786 dvp = ap->a_dvp; 787 pname = cnp->cn_nameptr; 788 namelen = cnp->cn_namelen; 789 td = cnp->cn_thread; 790 flags = cnp->cn_flags; 791 nameiop = cnp->cn_nameiop; 792 pd = VTON(dvp); 793 pn = NULL; 794 *vpp = NULLVP; 795 796 if (dvp->v_type != VDIR) 797 return (ENOTDIR); 798 799 error = VOP_ACCESS(dvp, VEXEC, cnp->cn_cred, cnp->cn_thread); 800 if (error) 801 return (error); 802 803 /* shortcut: check if the name is too long */ 804 if (cnp->cn_namelen >= MQFS_NAMELEN) 805 return (ENOENT); 806 807 /* self */ 808 if (namelen == 1 && pname[0] == '.') { 809 if ((flags & ISLASTCN) && nameiop != LOOKUP) 810 return (EINVAL); 811 pn = pd; 812 *vpp = dvp; 813 VREF(dvp); 814 return (0); 815 } 816 817 /* parent */ 818 if (cnp->cn_flags & ISDOTDOT) { 819 if (dvp->v_vflag & VV_ROOT) 820 return (EIO); 821 if ((flags & ISLASTCN) && nameiop != LOOKUP) 822 return (EINVAL); 823 VOP_UNLOCK(dvp, 0, cnp->cn_thread); 824 KASSERT(pd->mn_parent, ("non-root directory has no parent")); 825 pn = pd->mn_parent; 826 error = mqfs_allocv(dvp->v_mount, vpp, pn); 827 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY, td); 828 return (error); 829 } 830 831 /* named node */ 832 pn = mqfs_search(pd, pname, namelen); 833 834 /* found */ 835 if (pn != NULL) { 836 /* DELETE */ 837 if (nameiop == DELETE && (flags & ISLASTCN)) { 838 error = VOP_ACCESS(dvp, VWRITE, cnp->cn_cred, td); 839 if (error) 840 return (error); 841 if (*vpp == dvp) { 842 VREF(dvp); 843 *vpp = dvp; 844 return (0); 845 } 846 } 847 848 /* allocate vnode */ 849 error = mqfs_allocv(dvp->v_mount, vpp, pn); 850 if (error == 0 && cnp->cn_flags & MAKEENTRY) 851 cache_enter(dvp, *vpp, cnp); 852 return (error); 853 } 854 855 /* not found */ 856 857 /* will create a new entry in the directory ? */ 858 if ((nameiop == CREATE || nameiop == RENAME) && (flags & LOCKPARENT) 859 && (flags & ISLASTCN)) { 860 error = VOP_ACCESS(dvp, VWRITE, cnp->cn_cred, td); 861 if (error) 862 return (error); 863 cnp->cn_flags |= SAVENAME; 864 return (EJUSTRETURN); 865 } 866 return (ENOENT); 867 } 868 869 #if 0 870 struct vop_lookup_args { 871 struct vop_generic_args a_gen; 872 struct vnode *a_dvp; 873 struct vnode **a_vpp; 874 struct componentname *a_cnp; 875 }; 876 #endif 877 878 /* 879 * vnode lookup operation 880 */ 881 static int 882 mqfs_lookup(struct vop_cachedlookup_args *ap) 883 { 884 struct mqfs_info *mqfs = VFSTOMQFS(ap->a_dvp->v_mount); 885 int rc; 886 887 sx_xlock(&mqfs->mi_lock); 888 rc = mqfs_lookupx(ap); 889 sx_xunlock(&mqfs->mi_lock); 890 return (rc); 891 } 892 893 #if 0 894 struct vop_create_args { 895 struct vnode *a_dvp; 896 struct vnode **a_vpp; 897 struct componentname *a_cnp; 898 struct vattr *a_vap; 899 }; 900 #endif 901 902 /* 903 * vnode creation operation 904 */ 905 static int 906 mqfs_create(struct vop_create_args *ap) 907 { 908 struct mqfs_info *mqfs = VFSTOMQFS(ap->a_dvp->v_mount); 909 struct componentname *cnp = ap->a_cnp; 910 struct mqfs_node *pd; 911 struct mqfs_node *pn; 912 struct mqueue *mq; 913 int error; 914 915 pd = VTON(ap->a_dvp); 916 if (pd->mn_type != mqfstype_root && pd->mn_type != mqfstype_dir) 917 return (ENOTDIR); 918 mq = mqueue_alloc(NULL); 919 if (mq == NULL) 920 return (EAGAIN); 921 sx_xlock(&mqfs->mi_lock); 922 #if 0 923 /* named node */ 924 pn = mqfs_search(pd, cnp->cn_nameptr, cnp->cn_namelen); 925 if (pn != NULL) { 926 mqueue_free(mq); 927 sx_xunlock(&mqfs->mi_lock); 928 return (EEXIST); 929 } 930 #else 931 if ((cnp->cn_flags & HASBUF) == 0) 932 panic("%s: no name", __func__); 933 #endif 934 pn = mqfs_create_file(pd, cnp->cn_nameptr, cnp->cn_namelen, 935 cnp->cn_cred, ap->a_vap->va_mode); 936 if (pn == NULL) 937 error = ENOSPC; 938 else { 939 error = mqfs_allocv(ap->a_dvp->v_mount, ap->a_vpp, pn); 940 if (error) 941 mqfs_destroy(pn); 942 else 943 pn->mn_data = mq; 944 } 945 sx_xunlock(&mqfs->mi_lock); 946 if (error) 947 mqueue_free(mq); 948 return (error); 949 } 950 951 /* 952 * Remove an entry 953 */ 954 static 955 int do_unlink(struct mqfs_node *pn, struct ucred *ucred) 956 { 957 struct mqfs_node *parent; 958 struct mqfs_vdata *vd; 959 int error = 0; 960 961 sx_assert(&pn->mn_info->mi_lock, SX_LOCKED); 962 963 if (ucred->cr_uid != pn->mn_uid && 964 (error = priv_check_cred(ucred, PRIV_MQ_ADMIN, 0)) != 0) 965 error = EACCES; 966 else if (!pn->mn_deleted) { 967 parent = pn->mn_parent; 968 pn->mn_parent = NULL; 969 pn->mn_deleted = 1; 970 LIST_REMOVE(pn, mn_sibling); 971 LIST_FOREACH(vd, &pn->mn_vnodes, mv_link) { 972 cache_purge(vd->mv_vnode); 973 vhold(vd->mv_vnode); 974 taskqueue_enqueue(taskqueue_thread, &vd->mv_task); 975 } 976 mqnode_release(pn); 977 mqnode_release(parent); 978 } else 979 error = ENOENT; 980 return (error); 981 } 982 983 #if 0 984 struct vop_remove_args { 985 struct vnode *a_dvp; 986 struct vnode *a_vp; 987 struct componentname *a_cnp; 988 }; 989 #endif 990 991 /* 992 * vnode removal operation 993 */ 994 static int 995 mqfs_remove(struct vop_remove_args *ap) 996 { 997 struct mqfs_info *mqfs = VFSTOMQFS(ap->a_dvp->v_mount); 998 struct mqfs_node *pn; 999 int error; 1000 1001 if (ap->a_vp->v_type == VDIR) 1002 return (EPERM); 1003 pn = VTON(ap->a_vp); 1004 sx_xlock(&mqfs->mi_lock); 1005 error = do_unlink(pn, ap->a_cnp->cn_cred); 1006 sx_xunlock(&mqfs->mi_lock); 1007 return (error); 1008 } 1009 1010 #if 0 1011 struct vop_inactive_args { 1012 struct vnode *a_vp; 1013 struct thread *a_td; 1014 }; 1015 #endif 1016 1017 static int 1018 mqfs_inactive(struct vop_inactive_args *ap) 1019 { 1020 struct mqfs_node *pn = VTON(ap->a_vp); 1021 1022 if (pn->mn_deleted) 1023 vrecycle(ap->a_vp, ap->a_td); 1024 return (0); 1025 } 1026 1027 #if 0 1028 struct vop_reclaim_args { 1029 struct vop_generic_args a_gen; 1030 struct vnode *a_vp; 1031 struct thread *a_td; 1032 }; 1033 #endif 1034 1035 static int 1036 mqfs_reclaim(struct vop_reclaim_args *ap) 1037 { 1038 struct mqfs_info *mqfs = VFSTOMQFS(ap->a_vp->v_mount); 1039 struct vnode *vp = ap->a_vp; 1040 struct mqfs_node *pn; 1041 struct mqfs_vdata *vd; 1042 1043 vd = vp->v_data; 1044 pn = vd->mv_node; 1045 sx_xlock(&mqfs->mi_lock); 1046 vp->v_data = NULL; 1047 LIST_REMOVE(vd, mv_link); 1048 uma_zfree(mvdata_zone, vd); 1049 mqnode_release(pn); 1050 sx_xunlock(&mqfs->mi_lock); 1051 return (0); 1052 } 1053 1054 #if 0 1055 struct vop_open_args { 1056 struct vop_generic_args a_gen; 1057 struct vnode *a_vp; 1058 int a_mode; 1059 struct ucred *a_cred; 1060 struct thread *a_td; 1061 int a_fdidx; 1062 }; 1063 #endif 1064 1065 static int 1066 mqfs_open(struct vop_open_args *ap) 1067 { 1068 return (0); 1069 } 1070 1071 #if 0 1072 struct vop_close_args { 1073 struct vop_generic_args a_gen; 1074 struct vnode *a_vp; 1075 int a_fflag; 1076 struct ucred *a_cred; 1077 struct thread *a_td; 1078 }; 1079 #endif 1080 1081 static int 1082 mqfs_close(struct vop_close_args *ap) 1083 { 1084 return (0); 1085 } 1086 1087 #if 0 1088 struct vop_access_args { 1089 struct vop_generic_args a_gen; 1090 struct vnode *a_vp; 1091 int a_mode; 1092 struct ucred *a_cred; 1093 struct thread *a_td; 1094 }; 1095 #endif 1096 1097 /* 1098 * Verify permissions 1099 */ 1100 static int 1101 mqfs_access(struct vop_access_args *ap) 1102 { 1103 struct vnode *vp = ap->a_vp; 1104 struct vattr vattr; 1105 int error; 1106 1107 error = VOP_GETATTR(vp, &vattr, ap->a_cred, ap->a_td); 1108 if (error) 1109 return (error); 1110 error = vaccess(vp->v_type, vattr.va_mode, vattr.va_uid, 1111 vattr.va_gid, ap->a_mode, ap->a_cred, NULL); 1112 return (error); 1113 } 1114 1115 #if 0 1116 struct vop_getattr_args { 1117 struct vop_generic_args a_gen; 1118 struct vnode *a_vp; 1119 struct vattr *a_vap; 1120 struct ucred *a_cred; 1121 struct thread *a_td; 1122 }; 1123 #endif 1124 1125 /* 1126 * Get file attributes 1127 */ 1128 static int 1129 mqfs_getattr(struct vop_getattr_args *ap) 1130 { 1131 struct vnode *vp = ap->a_vp; 1132 struct mqfs_node *pn = VTON(vp); 1133 struct vattr *vap = ap->a_vap; 1134 int error = 0; 1135 1136 VATTR_NULL(vap); 1137 vap->va_type = vp->v_type; 1138 vap->va_mode = pn->mn_mode; 1139 vap->va_nlink = 1; 1140 vap->va_uid = pn->mn_uid; 1141 vap->va_gid = pn->mn_gid; 1142 vap->va_fsid = vp->v_mount->mnt_stat.f_fsid.val[0]; 1143 vap->va_fileid = pn->mn_fileno; 1144 vap->va_size = 0; 1145 vap->va_blocksize = PAGE_SIZE; 1146 vap->va_bytes = vap->va_size = 0; 1147 vap->va_atime = pn->mn_atime; 1148 vap->va_mtime = pn->mn_mtime; 1149 vap->va_ctime = pn->mn_ctime; 1150 vap->va_birthtime = pn->mn_birth; 1151 vap->va_gen = 0; 1152 vap->va_flags = 0; 1153 vap->va_rdev = 0; 1154 vap->va_bytes = 0; 1155 vap->va_filerev = 0; 1156 vap->va_vaflags = 0; 1157 return (error); 1158 } 1159 1160 #if 0 1161 struct vop_setattr_args { 1162 struct vop_generic_args a_gen; 1163 struct vnode *a_vp; 1164 struct vattr *a_vap; 1165 struct ucred *a_cred; 1166 struct thread *a_td; 1167 }; 1168 #endif 1169 /* 1170 * Set attributes 1171 */ 1172 static int 1173 mqfs_setattr(struct vop_setattr_args *ap) 1174 { 1175 struct mqfs_node *pn; 1176 struct vattr *vap; 1177 struct vnode *vp; 1178 int c, error; 1179 uid_t uid; 1180 gid_t gid; 1181 1182 vap = ap->a_vap; 1183 vp = ap->a_vp; 1184 if ((vap->va_type != VNON) || 1185 (vap->va_nlink != VNOVAL) || 1186 (vap->va_fsid != VNOVAL) || 1187 (vap->va_fileid != VNOVAL) || 1188 (vap->va_blocksize != VNOVAL) || 1189 (vap->va_flags != VNOVAL && vap->va_flags != 0) || 1190 (vap->va_rdev != VNOVAL) || 1191 ((int)vap->va_bytes != VNOVAL) || 1192 (vap->va_gen != VNOVAL)) { 1193 return (EINVAL); 1194 } 1195 1196 pn = VTON(vp); 1197 1198 error = c = 0; 1199 if (vap->va_uid == (uid_t)VNOVAL) 1200 uid = pn->mn_uid; 1201 else 1202 uid = vap->va_uid; 1203 if (vap->va_gid == (gid_t)VNOVAL) 1204 gid = pn->mn_gid; 1205 else 1206 gid = vap->va_gid; 1207 1208 if (uid != pn->mn_uid || gid != pn->mn_gid) { 1209 /* 1210 * To modify the ownership of a file, must possess VADMIN 1211 * for that file. 1212 */ 1213 if ((error = VOP_ACCESS(vp, VADMIN, ap->a_cred, ap->a_td))) 1214 return (error); 1215 1216 /* 1217 * XXXRW: Why is there a privilege check here: shouldn't the 1218 * check in VOP_ACCESS() be enough? Also, are the group bits 1219 * below definitely right? 1220 */ 1221 if (((ap->a_cred->cr_uid != pn->mn_uid) || uid != pn->mn_uid || 1222 (gid != pn->mn_gid && !groupmember(gid, ap->a_cred))) && 1223 (error = priv_check(ap->a_td, PRIV_MQ_ADMIN)) != 0) 1224 return (error); 1225 pn->mn_uid = uid; 1226 pn->mn_gid = gid; 1227 c = 1; 1228 } 1229 1230 if (vap->va_mode != (mode_t)VNOVAL) { 1231 if ((ap->a_cred->cr_uid != pn->mn_uid) && 1232 (error = priv_check(ap->a_td, PRIV_MQ_ADMIN))) 1233 return (error); 1234 pn->mn_mode = vap->va_mode; 1235 c = 1; 1236 } 1237 1238 if (vap->va_atime.tv_sec != VNOVAL || vap->va_mtime.tv_sec != VNOVAL) { 1239 /* See the comment in ufs_vnops::ufs_setattr(). */ 1240 if ((error = VOP_ACCESS(vp, VADMIN, ap->a_cred, ap->a_td)) && 1241 ((vap->va_vaflags & VA_UTIMES_NULL) == 0 || 1242 (error = VOP_ACCESS(vp, VWRITE, ap->a_cred, ap->a_td)))) 1243 return (error); 1244 if (vap->va_atime.tv_sec != VNOVAL) { 1245 pn->mn_atime = vap->va_atime; 1246 } 1247 if (vap->va_mtime.tv_sec != VNOVAL) { 1248 pn->mn_mtime = vap->va_mtime; 1249 } 1250 c = 1; 1251 } 1252 if (c) { 1253 vfs_timestamp(&pn->mn_ctime); 1254 } 1255 return (0); 1256 } 1257 1258 #if 0 1259 struct vop_read_args { 1260 struct vop_generic_args a_gen; 1261 struct vnode *a_vp; 1262 struct uio *a_uio; 1263 int a_ioflag; 1264 struct ucred *a_cred; 1265 }; 1266 #endif 1267 1268 /* 1269 * Read from a file 1270 */ 1271 static int 1272 mqfs_read(struct vop_read_args *ap) 1273 { 1274 char buf[80]; 1275 struct vnode *vp = ap->a_vp; 1276 struct uio *uio = ap->a_uio; 1277 struct mqfs_node *pn; 1278 struct mqueue *mq; 1279 int len, error; 1280 1281 if (vp->v_type != VREG) 1282 return (EINVAL); 1283 1284 pn = VTON(vp); 1285 mq = VTOMQ(vp); 1286 snprintf(buf, sizeof(buf), 1287 "QSIZE:%-10ld MAXMSG:%-10ld CURMSG:%-10ld MSGSIZE:%-10ld\n", 1288 mq->mq_totalbytes, 1289 mq->mq_maxmsg, 1290 mq->mq_curmsgs, 1291 mq->mq_msgsize); 1292 buf[sizeof(buf)-1] = '\0'; 1293 len = strlen(buf); 1294 error = uiomove_frombuf(buf, len, uio); 1295 return (error); 1296 } 1297 1298 #if 0 1299 struct vop_readdir_args { 1300 struct vop_generic_args a_gen; 1301 struct vnode *a_vp; 1302 struct uio *a_uio; 1303 struct ucred *a_cred; 1304 int *a_eofflag; 1305 int *a_ncookies; 1306 u_long **a_cookies; 1307 }; 1308 #endif 1309 1310 /* 1311 * Return directory entries. 1312 */ 1313 static int 1314 mqfs_readdir(struct vop_readdir_args *ap) 1315 { 1316 struct vnode *vp; 1317 struct mqfs_info *mi; 1318 struct mqfs_node *pd; 1319 struct mqfs_node *pn; 1320 struct dirent entry; 1321 struct uio *uio; 1322 int *tmp_ncookies = NULL; 1323 off_t offset; 1324 int error, i; 1325 1326 vp = ap->a_vp; 1327 mi = VFSTOMQFS(vp->v_mount); 1328 pd = VTON(vp); 1329 uio = ap->a_uio; 1330 1331 if (vp->v_type != VDIR) 1332 return (ENOTDIR); 1333 1334 if (uio->uio_offset < 0) 1335 return (EINVAL); 1336 1337 if (ap->a_ncookies != NULL) { 1338 tmp_ncookies = ap->a_ncookies; 1339 *ap->a_ncookies = 0; 1340 ap->a_ncookies = NULL; 1341 } 1342 1343 error = 0; 1344 offset = 0; 1345 1346 sx_xlock(&mi->mi_lock); 1347 1348 LIST_FOREACH(pn, &pd->mn_children, mn_sibling) { 1349 entry.d_reclen = sizeof(entry); 1350 if (!pn->mn_fileno) 1351 mqfs_fileno_alloc(mi, pn); 1352 entry.d_fileno = pn->mn_fileno; 1353 for (i = 0; i < MQFS_NAMELEN - 1 && pn->mn_name[i] != '\0'; ++i) 1354 entry.d_name[i] = pn->mn_name[i]; 1355 entry.d_name[i] = 0; 1356 entry.d_namlen = i; 1357 switch (pn->mn_type) { 1358 case mqfstype_root: 1359 case mqfstype_dir: 1360 case mqfstype_this: 1361 case mqfstype_parent: 1362 entry.d_type = DT_DIR; 1363 break; 1364 case mqfstype_file: 1365 entry.d_type = DT_REG; 1366 break; 1367 case mqfstype_symlink: 1368 entry.d_type = DT_LNK; 1369 break; 1370 default: 1371 panic("%s has unexpected node type: %d", pn->mn_name, 1372 pn->mn_type); 1373 } 1374 if (entry.d_reclen > uio->uio_resid) 1375 break; 1376 if (offset >= uio->uio_offset) { 1377 error = vfs_read_dirent(ap, &entry, offset); 1378 if (error) 1379 break; 1380 } 1381 offset += entry.d_reclen; 1382 } 1383 sx_xunlock(&mi->mi_lock); 1384 1385 uio->uio_offset = offset; 1386 1387 if (tmp_ncookies != NULL) 1388 ap->a_ncookies = tmp_ncookies; 1389 1390 return (error); 1391 } 1392 1393 #ifdef notyet 1394 1395 #if 0 1396 struct vop_mkdir_args { 1397 struct vnode *a_dvp; 1398 struvt vnode **a_vpp; 1399 struvt componentname *a_cnp; 1400 struct vattr *a_vap; 1401 }; 1402 #endif 1403 1404 /* 1405 * Create a directory. 1406 */ 1407 static int 1408 mqfs_mkdir(struct vop_mkdir_args *ap) 1409 { 1410 struct mqfs_info *mqfs = VFSTOMQFS(ap->a_dvp->v_mount); 1411 struct componentname *cnp = ap->a_cnp; 1412 struct mqfs_node *pd = VTON(ap->a_dvp); 1413 struct mqfs_node *pn; 1414 int error; 1415 1416 if (pd->mn_type != mqfstype_root && pd->mn_type != mqfstype_dir) 1417 return (ENOTDIR); 1418 sx_xlock(&mqfs->mi_lock); 1419 #if 0 1420 /* named node */ 1421 pn = mqfs_search(pd, cnp->cn_nameptr, cnp->cn_namelen); 1422 if (pn != NULL) { 1423 sx_xunlock(&mqfs->mi_lock); 1424 return (EEXIST); 1425 } 1426 #else 1427 if ((cnp->cn_flags & HASBUF) == 0) 1428 panic("%s: no name", __func__); 1429 #endif 1430 pn = mqfs_create_dir(pd, cnp->cn_nameptr, cnp->cn_namelen, 1431 ap->a_vap->cn_cred, ap->a_vap->va_mode); 1432 if (pn == NULL) 1433 error = ENOSPC; 1434 else 1435 error = mqfs_allocv(ap->a_dvp->v_mount, ap->a_vpp, pn); 1436 sx_xunlock(&mqfs->mi_lock); 1437 return (error); 1438 } 1439 1440 #if 0 1441 struct vop_rmdir_args { 1442 struct vnode *a_dvp; 1443 struct vnode *a_vp; 1444 struct componentname *a_cnp; 1445 }; 1446 #endif 1447 1448 /* 1449 * Remove a directory. 1450 */ 1451 static int 1452 mqfs_rmdir(struct vop_rmdir_args *ap) 1453 { 1454 struct mqfs_info *mqfs = VFSTOMQFS(ap->a_dvp->v_mount); 1455 struct mqfs_node *pn = VTON(ap->a_vp); 1456 struct mqfs_node *pt; 1457 1458 if (pn->mn_type != mqfstype_dir) 1459 return (ENOTDIR); 1460 1461 sx_xlock(&mqfs->mi_lock); 1462 if (pn->mn_deleted) { 1463 sx_xunlock(&mqfs->mi_lock); 1464 return (ENOENT); 1465 } 1466 1467 pt = LIST_FIRST(&pn->mn_children); 1468 pt = LIST_NEXT(pt, mn_sibling); 1469 pt = LIST_NEXT(pt, mn_sibling); 1470 if (pt != NULL) { 1471 sx_xunlock(&mqfs->mi_lock); 1472 return (ENOTEMPTY); 1473 } 1474 pt = pn->mn_parent; 1475 pn->mn_parent = NULL; 1476 pn->mn_deleted = 1; 1477 LIST_REMOVE(pn, mn_sibling); 1478 mqnode_release(pn); 1479 mqnode_release(pt); 1480 sx_xunlock(&mqfs->mi_lock); 1481 cache_purge(ap->a_vp); 1482 return (0); 1483 } 1484 1485 #endif /* notyet */ 1486 1487 /* 1488 * Allocate a message queue 1489 */ 1490 static struct mqueue * 1491 mqueue_alloc(const struct mq_attr *attr) 1492 { 1493 struct mqueue *mq; 1494 1495 if (curmq >= maxmq) 1496 return (NULL); 1497 mq = uma_zalloc(mqueue_zone, M_WAITOK | M_ZERO); 1498 TAILQ_INIT(&mq->mq_msgq); 1499 if (attr != NULL) { 1500 mq->mq_maxmsg = attr->mq_maxmsg; 1501 mq->mq_msgsize = attr->mq_msgsize; 1502 } else { 1503 mq->mq_maxmsg = default_maxmsg; 1504 mq->mq_msgsize = default_msgsize; 1505 } 1506 mtx_init(&mq->mq_mutex, "mqueue", NULL, MTX_DEF); 1507 knlist_init(&mq->mq_rsel.si_note, &mq->mq_mutex, NULL, NULL, NULL); 1508 knlist_init(&mq->mq_wsel.si_note, &mq->mq_mutex, NULL, NULL, NULL); 1509 atomic_add_int(&curmq, 1); 1510 return (mq); 1511 } 1512 1513 /* 1514 * Destroy a message queue 1515 */ 1516 static void 1517 mqueue_free(struct mqueue *mq) 1518 { 1519 struct mqueue_msg *msg; 1520 1521 while ((msg = TAILQ_FIRST(&mq->mq_msgq)) != NULL) { 1522 TAILQ_REMOVE(&mq->mq_msgq, msg, msg_link); 1523 FREE(msg, M_MQUEUEDATA); 1524 } 1525 1526 mtx_destroy(&mq->mq_mutex); 1527 knlist_destroy(&mq->mq_rsel.si_note); 1528 knlist_destroy(&mq->mq_wsel.si_note); 1529 uma_zfree(mqueue_zone, mq); 1530 atomic_add_int(&curmq, -1); 1531 } 1532 1533 /* 1534 * Load a message from user space 1535 */ 1536 static struct mqueue_msg * 1537 mqueue_loadmsg(const char *msg_ptr, size_t msg_size, int msg_prio) 1538 { 1539 struct mqueue_msg *msg; 1540 size_t len; 1541 int error; 1542 1543 len = sizeof(struct mqueue_msg) + msg_size; 1544 MALLOC(msg, struct mqueue_msg *, len, M_MQUEUEDATA, M_WAITOK); 1545 error = copyin(msg_ptr, ((char *)msg) + sizeof(struct mqueue_msg), 1546 msg_size); 1547 if (error) { 1548 FREE(msg, M_MQUEUEDATA); 1549 msg = NULL; 1550 } else { 1551 msg->msg_size = msg_size; 1552 msg->msg_prio = msg_prio; 1553 } 1554 return (msg); 1555 } 1556 1557 /* 1558 * Save a message to user space 1559 */ 1560 static int 1561 mqueue_savemsg(struct mqueue_msg *msg, char *msg_ptr, int *msg_prio) 1562 { 1563 int error; 1564 1565 error = copyout(((char *)msg) + sizeof(*msg), msg_ptr, 1566 msg->msg_size); 1567 if (error == 0 && msg_prio != NULL) 1568 error = copyout(&msg->msg_prio, msg_prio, sizeof(int)); 1569 return (error); 1570 } 1571 1572 /* 1573 * Free a message's memory 1574 */ 1575 static __inline void 1576 mqueue_freemsg(struct mqueue_msg *msg) 1577 { 1578 FREE(msg, M_MQUEUEDATA); 1579 } 1580 1581 /* 1582 * Send a message. if waitok is false, thread will not be 1583 * blocked if there is no data in queue, otherwise, absolute 1584 * time will be checked. 1585 */ 1586 int 1587 mqueue_send(struct mqueue *mq, const char *msg_ptr, 1588 size_t msg_len, unsigned msg_prio, int waitok, 1589 const struct timespec *abs_timeout) 1590 { 1591 struct mqueue_msg *msg; 1592 struct timespec ets, ts, ts2; 1593 struct timeval tv; 1594 int error; 1595 1596 if (msg_prio >= MQ_PRIO_MAX) 1597 return (EINVAL); 1598 if (msg_len > mq->mq_msgsize) 1599 return (EMSGSIZE); 1600 msg = mqueue_loadmsg(msg_ptr, msg_len, msg_prio); 1601 if (msg == NULL) 1602 return (EFAULT); 1603 1604 /* O_NONBLOCK case */ 1605 if (!waitok) { 1606 error = _mqueue_send(mq, msg, -1); 1607 if (error) 1608 goto bad; 1609 return (0); 1610 } 1611 1612 /* we allow a null timeout (wait forever) */ 1613 if (abs_timeout == NULL) { 1614 error = _mqueue_send(mq, msg, 0); 1615 if (error) 1616 goto bad; 1617 return (0); 1618 } 1619 1620 /* send it before checking time */ 1621 error = _mqueue_send(mq, msg, -1); 1622 if (error == 0) 1623 return (0); 1624 1625 if (error != EAGAIN) 1626 goto bad; 1627 1628 error = copyin(abs_timeout, &ets, sizeof(ets)); 1629 if (error != 0) 1630 goto bad; 1631 if (ets.tv_nsec >= 1000000000 || ets.tv_nsec < 0) { 1632 error = EINVAL; 1633 goto bad; 1634 } 1635 for (;;) { 1636 ts2 = ets; 1637 getnanotime(&ts); 1638 timespecsub(&ts2, &ts); 1639 if (ts2.tv_sec < 0 || (ts2.tv_sec == 0 && ts2.tv_nsec <= 0)) { 1640 error = ETIMEDOUT; 1641 break; 1642 } 1643 TIMESPEC_TO_TIMEVAL(&tv, &ts2); 1644 error = _mqueue_send(mq, msg, tvtohz(&tv)); 1645 if (error != ETIMEDOUT) 1646 break; 1647 } 1648 if (error == 0) 1649 return (0); 1650 bad: 1651 mqueue_freemsg(msg); 1652 return (error); 1653 } 1654 1655 /* 1656 * Common routine to send a message 1657 */ 1658 static int 1659 _mqueue_send(struct mqueue *mq, struct mqueue_msg *msg, int timo) 1660 { 1661 struct mqueue_msg *msg2; 1662 int error = 0; 1663 1664 mtx_lock(&mq->mq_mutex); 1665 while (mq->mq_curmsgs >= mq->mq_maxmsg && error == 0) { 1666 if (timo < 0) { 1667 mtx_unlock(&mq->mq_mutex); 1668 return (EAGAIN); 1669 } 1670 mq->mq_senders++; 1671 error = msleep(&mq->mq_senders, &mq->mq_mutex, 1672 PCATCH, "mqsend", timo); 1673 mq->mq_senders--; 1674 if (error == EAGAIN) 1675 error = ETIMEDOUT; 1676 } 1677 if (mq->mq_curmsgs >= mq->mq_maxmsg) { 1678 mtx_unlock(&mq->mq_mutex); 1679 return (error); 1680 } 1681 error = 0; 1682 if (TAILQ_EMPTY(&mq->mq_msgq)) { 1683 TAILQ_INSERT_HEAD(&mq->mq_msgq, msg, msg_link); 1684 } else { 1685 if (msg->msg_prio <= TAILQ_LAST(&mq->mq_msgq, msgq)->msg_prio) { 1686 TAILQ_INSERT_TAIL(&mq->mq_msgq, msg, msg_link); 1687 } else { 1688 TAILQ_FOREACH(msg2, &mq->mq_msgq, msg_link) { 1689 if (msg2->msg_prio < msg->msg_prio) 1690 break; 1691 } 1692 TAILQ_INSERT_BEFORE(msg2, msg, msg_link); 1693 } 1694 } 1695 mq->mq_curmsgs++; 1696 mq->mq_totalbytes += msg->msg_size; 1697 if (mq->mq_receivers) 1698 wakeup_one(&mq->mq_receivers); 1699 else if (mq->mq_notifier != NULL) 1700 mqueue_send_notification(mq); 1701 if (mq->mq_flags & MQ_RSEL) { 1702 mq->mq_flags &= ~MQ_RSEL; 1703 selwakeup(&mq->mq_rsel); 1704 } 1705 KNOTE_LOCKED(&mq->mq_rsel.si_note, 0); 1706 mtx_unlock(&mq->mq_mutex); 1707 return (0); 1708 } 1709 1710 /* 1711 * Send realtime a signal to process which registered itself 1712 * successfully by mq_notify. 1713 */ 1714 static void 1715 mqueue_send_notification(struct mqueue *mq) 1716 { 1717 struct mqueue_notifier *nt; 1718 struct proc *p; 1719 1720 mtx_assert(&mq->mq_mutex, MA_OWNED); 1721 nt = mq->mq_notifier; 1722 if (nt->nt_sigev.sigev_notify != SIGEV_NONE) { 1723 p = nt->nt_proc; 1724 PROC_LOCK(p); 1725 if (!KSI_ONQ(&nt->nt_ksi)) 1726 psignal_event(p, &nt->nt_sigev, &nt->nt_ksi); 1727 PROC_UNLOCK(p); 1728 } 1729 mq->mq_notifier = NULL; 1730 } 1731 1732 /* 1733 * Get a message. if waitok is false, thread will not be 1734 * blocked if there is no data in queue, otherwise, absolute 1735 * time will be checked. 1736 */ 1737 int 1738 mqueue_receive(struct mqueue *mq, char *msg_ptr, 1739 size_t msg_len, unsigned *msg_prio, int waitok, 1740 const struct timespec *abs_timeout) 1741 { 1742 struct mqueue_msg *msg; 1743 struct timespec ets, ts, ts2; 1744 struct timeval tv; 1745 int error; 1746 1747 if (msg_len < mq->mq_msgsize) 1748 return (EMSGSIZE); 1749 1750 /* O_NONBLOCK case */ 1751 if (!waitok) { 1752 error = _mqueue_recv(mq, &msg, -1); 1753 if (error) 1754 return (error); 1755 goto received; 1756 } 1757 1758 /* we allow a null timeout (wait forever). */ 1759 if (abs_timeout == NULL) { 1760 error = _mqueue_recv(mq, &msg, 0); 1761 if (error) 1762 return (error); 1763 goto received; 1764 } 1765 1766 /* try to get a message before checking time */ 1767 error = _mqueue_recv(mq, &msg, -1); 1768 if (error == 0) 1769 goto received; 1770 1771 if (error != EAGAIN) 1772 return (error); 1773 1774 error = copyin(abs_timeout, &ets, sizeof(ets)); 1775 if (error != 0) 1776 return (error); 1777 if (ets.tv_nsec >= 1000000000 || ets.tv_nsec < 0) { 1778 error = EINVAL; 1779 return (error); 1780 } 1781 1782 for (;;) { 1783 ts2 = ets; 1784 getnanotime(&ts); 1785 timespecsub(&ts2, &ts); 1786 if (ts2.tv_sec < 0 || (ts2.tv_sec == 0 && ts2.tv_nsec <= 0)) { 1787 error = ETIMEDOUT; 1788 return (error); 1789 } 1790 TIMESPEC_TO_TIMEVAL(&tv, &ts2); 1791 error = _mqueue_recv(mq, &msg, tvtohz(&tv)); 1792 if (error == 0) 1793 break; 1794 if (error != ETIMEDOUT) 1795 return (error); 1796 } 1797 1798 received: 1799 error = mqueue_savemsg(msg, msg_ptr, msg_prio); 1800 if (error == 0) { 1801 curthread->td_retval[0] = msg->msg_size; 1802 curthread->td_retval[1] = 0; 1803 } 1804 mqueue_freemsg(msg); 1805 return (error); 1806 } 1807 1808 /* 1809 * Common routine to receive a message 1810 */ 1811 static int 1812 _mqueue_recv(struct mqueue *mq, struct mqueue_msg **msg, int timo) 1813 { 1814 int error = 0; 1815 1816 mtx_lock(&mq->mq_mutex); 1817 while ((*msg = TAILQ_FIRST(&mq->mq_msgq)) == NULL && error == 0) { 1818 if (timo < 0) { 1819 mtx_unlock(&mq->mq_mutex); 1820 return (EAGAIN); 1821 } 1822 mq->mq_receivers++; 1823 error = msleep(&mq->mq_receivers, &mq->mq_mutex, 1824 PCATCH, "mqrecv", timo); 1825 mq->mq_receivers--; 1826 if (error == EAGAIN) 1827 error = ETIMEDOUT; 1828 } 1829 if (*msg != NULL) { 1830 error = 0; 1831 TAILQ_REMOVE(&mq->mq_msgq, *msg, msg_link); 1832 mq->mq_curmsgs--; 1833 mq->mq_totalbytes -= (*msg)->msg_size; 1834 if (mq->mq_senders) 1835 wakeup_one(&mq->mq_senders); 1836 if (mq->mq_flags & MQ_WSEL) { 1837 mq->mq_flags &= ~MQ_WSEL; 1838 selwakeup(&mq->mq_wsel); 1839 } 1840 KNOTE_LOCKED(&mq->mq_wsel.si_note, 0); 1841 } 1842 if (mq->mq_notifier != NULL && mq->mq_receivers == 0 && 1843 !TAILQ_EMPTY(&mq->mq_msgq)) { 1844 mqueue_send_notification(mq); 1845 } 1846 mtx_unlock(&mq->mq_mutex); 1847 return (error); 1848 } 1849 1850 static __inline struct mqueue_notifier * 1851 notifier_alloc(void) 1852 { 1853 return (uma_zalloc(mqnoti_zone, M_WAITOK | M_ZERO)); 1854 } 1855 1856 static __inline void 1857 notifier_free(struct mqueue_notifier *p) 1858 { 1859 uma_zfree(mqnoti_zone, p); 1860 } 1861 1862 static struct mqueue_notifier * 1863 notifier_search(struct proc *p, int fd) 1864 { 1865 struct mqueue_notifier *nt; 1866 1867 LIST_FOREACH(nt, &p->p_mqnotifier, nt_link) { 1868 if (nt->nt_ksi.ksi_mqd == fd) 1869 break; 1870 } 1871 return (nt); 1872 } 1873 1874 static __inline void 1875 notifier_insert(struct proc *p, struct mqueue_notifier *nt) 1876 { 1877 LIST_INSERT_HEAD(&p->p_mqnotifier, nt, nt_link); 1878 } 1879 1880 static __inline void 1881 notifier_delete(struct proc *p, struct mqueue_notifier *nt) 1882 { 1883 LIST_REMOVE(nt, nt_link); 1884 notifier_free(nt); 1885 } 1886 1887 static void 1888 notifier_remove(struct proc *p, struct mqueue *mq, int fd) 1889 { 1890 struct mqueue_notifier *nt; 1891 1892 mtx_assert(&mq->mq_mutex, MA_OWNED); 1893 PROC_LOCK(p); 1894 nt = notifier_search(p, fd); 1895 if (nt != NULL) { 1896 if (mq->mq_notifier == nt) 1897 mq->mq_notifier = NULL; 1898 sigqueue_take(&nt->nt_ksi); 1899 notifier_delete(p, nt); 1900 } 1901 PROC_UNLOCK(p); 1902 } 1903 1904 /* 1905 * Syscall to open a message queue. 1906 */ 1907 int 1908 kmq_open(struct thread *td, struct kmq_open_args *uap) 1909 { 1910 char path[MQFS_NAMELEN + 1]; 1911 struct mq_attr attr, *pattr; 1912 struct mqfs_node *pn; 1913 struct filedesc *fdp; 1914 struct file *fp; 1915 struct mqueue *mq; 1916 int fd, error, len, flags, cmode; 1917 1918 if ((uap->flags & O_ACCMODE) == O_ACCMODE) 1919 return (EINVAL); 1920 1921 fdp = td->td_proc->p_fd; 1922 flags = FFLAGS(uap->flags); 1923 cmode = (((uap->mode & ~fdp->fd_cmask) & ALLPERMS) & ~S_ISTXT); 1924 mq = NULL; 1925 if ((flags & O_CREAT) && (uap->attr != NULL)) { 1926 error = copyin(uap->attr, &attr, sizeof(attr)); 1927 if (error) 1928 return (error); 1929 if (attr.mq_maxmsg <= 0 || attr.mq_maxmsg > maxmsg) 1930 return (EINVAL); 1931 if (attr.mq_msgsize <= 0 || attr.mq_msgsize > maxmsgsize) 1932 return (EINVAL); 1933 pattr = &attr; 1934 } else 1935 pattr = NULL; 1936 1937 error = copyinstr(uap->path, path, MQFS_NAMELEN + 1, NULL); 1938 if (error) 1939 return (error); 1940 1941 /* 1942 * The first character of name must be a slash (/) character 1943 * and the remaining characters of name cannot include any slash 1944 * characters. 1945 */ 1946 len = strlen(path); 1947 if (len < 2 || path[0] != '/' || index(path + 1, '/') != NULL) 1948 return (EINVAL); 1949 1950 error = falloc(td, &fp, &fd); 1951 if (error) 1952 return (error); 1953 1954 sx_xlock(&mqfs_data.mi_lock); 1955 pn = mqfs_search(mqfs_data.mi_root, path + 1, len - 1); 1956 if (pn == NULL) { 1957 if (!(flags & O_CREAT)) { 1958 error = ENOENT; 1959 } else { 1960 mq = mqueue_alloc(pattr); 1961 if (mq == NULL) { 1962 error = ENFILE; 1963 } else { 1964 pn = mqfs_create_file(mqfs_data.mi_root, 1965 path + 1, len - 1, td->td_ucred, 1966 cmode); 1967 if (pn == NULL) { 1968 error = ENOSPC; 1969 mqueue_free(mq); 1970 } 1971 } 1972 } 1973 1974 if (error == 0) { 1975 pn->mn_data = mq; 1976 } 1977 } else { 1978 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL)) { 1979 error = EEXIST; 1980 } else { 1981 int acc_mode = 0; 1982 1983 if (flags & FREAD) 1984 acc_mode |= VREAD; 1985 if (flags & FWRITE) 1986 acc_mode |= VWRITE; 1987 error = vaccess(VREG, pn->mn_mode, pn->mn_uid, 1988 pn->mn_gid, acc_mode, td->td_ucred, NULL); 1989 } 1990 } 1991 1992 if (error) { 1993 sx_xunlock(&mqfs_data.mi_lock); 1994 fdclose(fdp, fp, fd, td); 1995 fdrop(fp, td); 1996 return (error); 1997 } 1998 1999 mqnode_addref(pn); 2000 sx_xunlock(&mqfs_data.mi_lock); 2001 2002 FILE_LOCK(fp); 2003 fp->f_flag = (flags & (FREAD | FWRITE | O_NONBLOCK)); 2004 fp->f_type = DTYPE_MQUEUE; 2005 fp->f_data = pn; 2006 fp->f_ops = &mqueueops; 2007 FILE_UNLOCK(fp); 2008 2009 FILEDESC_XLOCK(fdp); 2010 if (fdp->fd_ofiles[fd] == fp) 2011 fdp->fd_ofileflags[fd] |= UF_EXCLOSE; 2012 FILEDESC_XUNLOCK(fdp); 2013 td->td_retval[0] = fd; 2014 fdrop(fp, td); 2015 return (0); 2016 } 2017 2018 /* 2019 * Syscall to unlink a message queue. 2020 */ 2021 int 2022 kmq_unlink(struct thread *td, struct kmq_unlink_args *uap) 2023 { 2024 char path[MQFS_NAMELEN+1]; 2025 struct mqfs_node *pn; 2026 int error, len; 2027 2028 error = copyinstr(uap->path, path, MQFS_NAMELEN + 1, NULL); 2029 if (error) 2030 return (error); 2031 2032 len = strlen(path); 2033 if (len < 2 || path[0] != '/' || index(path + 1, '/') != NULL) 2034 return (EINVAL); 2035 2036 sx_xlock(&mqfs_data.mi_lock); 2037 pn = mqfs_search(mqfs_data.mi_root, path + 1, len - 1); 2038 if (pn != NULL) 2039 error = do_unlink(pn, td->td_ucred); 2040 else 2041 error = ENOENT; 2042 sx_xunlock(&mqfs_data.mi_lock); 2043 return (error); 2044 } 2045 2046 typedef int (*_fgetf)(struct thread *, int, struct file **); 2047 2048 /* 2049 * Get message queue by giving file slot 2050 */ 2051 static int 2052 _getmq(struct thread *td, int fd, _fgetf func, 2053 struct file **fpp, struct mqfs_node **ppn, struct mqueue **pmq) 2054 { 2055 struct mqfs_node *pn; 2056 int error; 2057 2058 error = func(td, fd, fpp); 2059 if (error) 2060 return (error); 2061 if (&mqueueops != (*fpp)->f_ops) { 2062 fdrop(*fpp, td); 2063 return (EBADF); 2064 } 2065 pn = (*fpp)->f_data; 2066 if (ppn) 2067 *ppn = pn; 2068 if (pmq) 2069 *pmq = pn->mn_data; 2070 return (0); 2071 } 2072 2073 static __inline int 2074 getmq(struct thread *td, int fd, struct file **fpp, struct mqfs_node **ppn, 2075 struct mqueue **pmq) 2076 { 2077 return _getmq(td, fd, fget, fpp, ppn, pmq); 2078 } 2079 2080 static __inline int 2081 getmq_read(struct thread *td, int fd, struct file **fpp, 2082 struct mqfs_node **ppn, struct mqueue **pmq) 2083 { 2084 return _getmq(td, fd, fget_read, fpp, ppn, pmq); 2085 } 2086 2087 static __inline int 2088 getmq_write(struct thread *td, int fd, struct file **fpp, 2089 struct mqfs_node **ppn, struct mqueue **pmq) 2090 { 2091 return _getmq(td, fd, fget_write, fpp, ppn, pmq); 2092 } 2093 2094 int 2095 kmq_setattr(struct thread *td, struct kmq_setattr_args *uap) 2096 { 2097 struct mqueue *mq; 2098 struct file *fp; 2099 struct mq_attr attr, oattr; 2100 int error; 2101 2102 if (uap->attr) { 2103 error = copyin(uap->attr, &attr, sizeof(attr)); 2104 if (error) 2105 return (error); 2106 if (attr.mq_flags & ~O_NONBLOCK) 2107 return (EINVAL); 2108 } 2109 error = getmq(td, uap->mqd, &fp, NULL, &mq); 2110 if (error) 2111 return (error); 2112 oattr.mq_maxmsg = mq->mq_maxmsg; 2113 oattr.mq_msgsize = mq->mq_msgsize; 2114 oattr.mq_curmsgs = mq->mq_curmsgs; 2115 FILE_LOCK(fp); 2116 oattr.mq_flags = (O_NONBLOCK & fp->f_flag); 2117 if (uap->attr) { 2118 fp->f_flag &= ~O_NONBLOCK; 2119 fp->f_flag |= (attr.mq_flags & O_NONBLOCK); 2120 } 2121 FILE_UNLOCK(fp); 2122 fdrop(fp, td); 2123 if (uap->oattr) 2124 error = copyout(&oattr, uap->oattr, sizeof(oattr)); 2125 return (error); 2126 } 2127 2128 int 2129 kmq_timedreceive(struct thread *td, struct kmq_timedreceive_args *uap) 2130 { 2131 struct mqueue *mq; 2132 struct file *fp; 2133 int error; 2134 int waitok; 2135 2136 error = getmq_read(td, uap->mqd, &fp, NULL, &mq); 2137 if (error) 2138 return (error); 2139 waitok = !(fp->f_flag & O_NONBLOCK); 2140 error = mqueue_receive(mq, uap->msg_ptr, uap->msg_len, 2141 uap->msg_prio, waitok, uap->abs_timeout); 2142 fdrop(fp, td); 2143 return (error); 2144 } 2145 2146 int 2147 kmq_timedsend(struct thread *td, struct kmq_timedsend_args *uap) 2148 { 2149 struct mqueue *mq; 2150 struct file *fp; 2151 int error, waitok; 2152 2153 error = getmq_write(td, uap->mqd, &fp, NULL, &mq); 2154 if (error) 2155 return (error); 2156 waitok = !(fp->f_flag & O_NONBLOCK); 2157 error = mqueue_send(mq, uap->msg_ptr, uap->msg_len, 2158 uap->msg_prio, waitok, uap->abs_timeout); 2159 fdrop(fp, td); 2160 return (error); 2161 } 2162 2163 int 2164 kmq_notify(struct thread *td, struct kmq_notify_args *uap) 2165 { 2166 struct sigevent ev; 2167 struct filedesc *fdp; 2168 struct proc *p; 2169 struct mqueue *mq; 2170 struct file *fp; 2171 struct mqueue_notifier *nt, *newnt = NULL; 2172 int error; 2173 2174 p = td->td_proc; 2175 fdp = td->td_proc->p_fd; 2176 if (uap->sigev) { 2177 error = copyin(uap->sigev, &ev, sizeof(ev)); 2178 if (error) 2179 return (error); 2180 if (ev.sigev_notify != SIGEV_SIGNAL && 2181 ev.sigev_notify != SIGEV_THREAD_ID && 2182 ev.sigev_notify != SIGEV_NONE) 2183 return (EINVAL); 2184 if ((ev.sigev_notify == SIGEV_SIGNAL || 2185 ev.sigev_notify == SIGEV_THREAD_ID) && 2186 !_SIG_VALID(ev.sigev_signo)) 2187 return (EINVAL); 2188 } 2189 error = getmq(td, uap->mqd, &fp, NULL, &mq); 2190 if (error) 2191 return (error); 2192 again: 2193 FILEDESC_SLOCK(fdp); 2194 if (fget_locked(fdp, uap->mqd) != fp) { 2195 FILEDESC_SUNLOCK(fdp); 2196 error = EBADF; 2197 goto out; 2198 } 2199 mtx_lock(&mq->mq_mutex); 2200 FILEDESC_SUNLOCK(fdp); 2201 if (uap->sigev != NULL) { 2202 if (mq->mq_notifier != NULL) { 2203 error = EBUSY; 2204 } else { 2205 PROC_LOCK(p); 2206 nt = notifier_search(p, uap->mqd); 2207 if (nt == NULL) { 2208 if (newnt == NULL) { 2209 PROC_UNLOCK(p); 2210 mtx_unlock(&mq->mq_mutex); 2211 newnt = notifier_alloc(); 2212 goto again; 2213 } 2214 } 2215 2216 if (nt != NULL) { 2217 sigqueue_take(&nt->nt_ksi); 2218 if (newnt != NULL) { 2219 notifier_free(newnt); 2220 newnt = NULL; 2221 } 2222 } else { 2223 nt = newnt; 2224 newnt = NULL; 2225 ksiginfo_init(&nt->nt_ksi); 2226 nt->nt_ksi.ksi_flags |= KSI_INS | KSI_EXT; 2227 nt->nt_ksi.ksi_code = SI_MESGQ; 2228 nt->nt_proc = p; 2229 nt->nt_ksi.ksi_mqd = uap->mqd; 2230 notifier_insert(p, nt); 2231 } 2232 nt->nt_sigev = ev; 2233 mq->mq_notifier = nt; 2234 PROC_UNLOCK(p); 2235 /* 2236 * if there is no receivers and message queue 2237 * is not empty, we should send notification 2238 * as soon as possible. 2239 */ 2240 if (mq->mq_receivers == 0 && 2241 !TAILQ_EMPTY(&mq->mq_msgq)) 2242 mqueue_send_notification(mq); 2243 } 2244 } else { 2245 notifier_remove(p, mq, uap->mqd); 2246 } 2247 mtx_unlock(&mq->mq_mutex); 2248 2249 out: 2250 fdrop(fp, td); 2251 if (newnt != NULL) 2252 notifier_free(newnt); 2253 return (error); 2254 } 2255 2256 static void 2257 mqueue_fdclose(struct thread *td, int fd, struct file *fp) 2258 { 2259 struct filedesc *fdp; 2260 struct mqueue *mq; 2261 2262 fdp = td->td_proc->p_fd; 2263 FILEDESC_LOCK_ASSERT(fdp); 2264 2265 if (fp->f_ops == &mqueueops) { 2266 mq = FPTOMQ(fp); 2267 mtx_lock(&mq->mq_mutex); 2268 notifier_remove(td->td_proc, mq, fd); 2269 2270 /* have to wakeup thread in same process */ 2271 if (mq->mq_flags & MQ_RSEL) { 2272 mq->mq_flags &= ~MQ_RSEL; 2273 selwakeup(&mq->mq_rsel); 2274 } 2275 if (mq->mq_flags & MQ_WSEL) { 2276 mq->mq_flags &= ~MQ_WSEL; 2277 selwakeup(&mq->mq_wsel); 2278 } 2279 mtx_unlock(&mq->mq_mutex); 2280 } 2281 } 2282 2283 static void 2284 mq_proc_exit(void *arg __unused, struct proc *p) 2285 { 2286 struct filedesc *fdp; 2287 struct file *fp; 2288 struct mqueue *mq; 2289 int i; 2290 2291 fdp = p->p_fd; 2292 FILEDESC_SLOCK(fdp); 2293 for (i = 0; i < fdp->fd_nfiles; ++i) { 2294 fp = fget_locked(fdp, i); 2295 if (fp != NULL && fp->f_ops == &mqueueops) { 2296 mq = FPTOMQ(fp); 2297 mtx_lock(&mq->mq_mutex); 2298 notifier_remove(p, FPTOMQ(fp), i); 2299 mtx_unlock(&mq->mq_mutex); 2300 } 2301 } 2302 FILEDESC_SUNLOCK(fdp); 2303 KASSERT(LIST_EMPTY(&p->p_mqnotifier), ("mq notifiers left")); 2304 } 2305 2306 static int 2307 mqf_read(struct file *fp, struct uio *uio, struct ucred *active_cred, 2308 int flags, struct thread *td) 2309 { 2310 return (EOPNOTSUPP); 2311 } 2312 2313 static int 2314 mqf_write(struct file *fp, struct uio *uio, struct ucred *active_cred, 2315 int flags, struct thread *td) 2316 { 2317 return (EOPNOTSUPP); 2318 } 2319 2320 static int 2321 mqf_ioctl(struct file *fp, u_long cmd, void *data, 2322 struct ucred *active_cred, struct thread *td) 2323 { 2324 return (ENOTTY); 2325 } 2326 2327 static int 2328 mqf_poll(struct file *fp, int events, struct ucred *active_cred, 2329 struct thread *td) 2330 { 2331 struct mqueue *mq = FPTOMQ(fp); 2332 int revents = 0; 2333 2334 mtx_lock(&mq->mq_mutex); 2335 if (events & (POLLIN | POLLRDNORM)) { 2336 if (mq->mq_curmsgs) { 2337 revents |= events & (POLLIN | POLLRDNORM); 2338 } else { 2339 mq->mq_flags |= MQ_RSEL; 2340 selrecord(td, &mq->mq_rsel); 2341 } 2342 } 2343 if (events & POLLOUT) { 2344 if (mq->mq_curmsgs < mq->mq_maxmsg) 2345 revents |= POLLOUT; 2346 else { 2347 mq->mq_flags |= MQ_WSEL; 2348 selrecord(td, &mq->mq_wsel); 2349 } 2350 } 2351 mtx_unlock(&mq->mq_mutex); 2352 return (revents); 2353 } 2354 2355 static int 2356 mqf_close(struct file *fp, struct thread *td) 2357 { 2358 struct mqfs_node *pn; 2359 2360 fp->f_ops = &badfileops; 2361 pn = fp->f_data; 2362 fp->f_data = NULL; 2363 sx_xlock(&mqfs_data.mi_lock); 2364 mqnode_release(pn); 2365 sx_xunlock(&mqfs_data.mi_lock); 2366 return (0); 2367 } 2368 2369 static int 2370 mqf_stat(struct file *fp, struct stat *st, struct ucred *active_cred, 2371 struct thread *td) 2372 { 2373 struct mqfs_node *pn = fp->f_data; 2374 2375 bzero(st, sizeof *st); 2376 st->st_atimespec = pn->mn_atime; 2377 st->st_mtimespec = pn->mn_mtime; 2378 st->st_ctimespec = pn->mn_ctime; 2379 st->st_birthtimespec = pn->mn_birth; 2380 st->st_uid = pn->mn_uid; 2381 st->st_gid = pn->mn_gid; 2382 st->st_mode = S_IFIFO | pn->mn_mode; 2383 return (0); 2384 } 2385 2386 static int 2387 mqf_kqfilter(struct file *fp, struct knote *kn) 2388 { 2389 struct mqueue *mq = FPTOMQ(fp); 2390 int error = 0; 2391 2392 if (kn->kn_filter == EVFILT_READ) { 2393 kn->kn_fop = &mq_rfiltops; 2394 knlist_add(&mq->mq_rsel.si_note, kn, 0); 2395 } else if (kn->kn_filter == EVFILT_WRITE) { 2396 kn->kn_fop = &mq_wfiltops; 2397 knlist_add(&mq->mq_wsel.si_note, kn, 0); 2398 } else 2399 error = EINVAL; 2400 return (error); 2401 } 2402 2403 static void 2404 filt_mqdetach(struct knote *kn) 2405 { 2406 struct mqueue *mq = FPTOMQ(kn->kn_fp); 2407 2408 if (kn->kn_filter == EVFILT_READ) 2409 knlist_remove(&mq->mq_rsel.si_note, kn, 0); 2410 else if (kn->kn_filter == EVFILT_WRITE) 2411 knlist_remove(&mq->mq_wsel.si_note, kn, 0); 2412 else 2413 panic("filt_mqdetach"); 2414 } 2415 2416 static int 2417 filt_mqread(struct knote *kn, long hint) 2418 { 2419 struct mqueue *mq = FPTOMQ(kn->kn_fp); 2420 2421 mtx_assert(&mq->mq_mutex, MA_OWNED); 2422 return (mq->mq_curmsgs != 0); 2423 } 2424 2425 static int 2426 filt_mqwrite(struct knote *kn, long hint) 2427 { 2428 struct mqueue *mq = FPTOMQ(kn->kn_fp); 2429 2430 mtx_assert(&mq->mq_mutex, MA_OWNED); 2431 return (mq->mq_curmsgs < mq->mq_maxmsg); 2432 } 2433 2434 static struct fileops mqueueops = { 2435 .fo_read = mqf_read, 2436 .fo_write = mqf_write, 2437 .fo_ioctl = mqf_ioctl, 2438 .fo_poll = mqf_poll, 2439 .fo_kqfilter = mqf_kqfilter, 2440 .fo_stat = mqf_stat, 2441 .fo_close = mqf_close 2442 }; 2443 2444 static struct vop_vector mqfs_vnodeops = { 2445 .vop_default = &default_vnodeops, 2446 .vop_access = mqfs_access, 2447 .vop_cachedlookup = mqfs_lookup, 2448 .vop_lookup = vfs_cache_lookup, 2449 .vop_reclaim = mqfs_reclaim, 2450 .vop_create = mqfs_create, 2451 .vop_remove = mqfs_remove, 2452 .vop_inactive = mqfs_inactive, 2453 .vop_open = mqfs_open, 2454 .vop_close = mqfs_close, 2455 .vop_getattr = mqfs_getattr, 2456 .vop_setattr = mqfs_setattr, 2457 .vop_read = mqfs_read, 2458 .vop_write = VOP_EOPNOTSUPP, 2459 .vop_readdir = mqfs_readdir, 2460 .vop_mkdir = VOP_EOPNOTSUPP, 2461 .vop_rmdir = VOP_EOPNOTSUPP 2462 }; 2463 2464 static struct vfsops mqfs_vfsops = { 2465 .vfs_init = mqfs_init, 2466 .vfs_uninit = mqfs_uninit, 2467 .vfs_mount = mqfs_mount, 2468 .vfs_unmount = mqfs_unmount, 2469 .vfs_root = mqfs_root, 2470 .vfs_statfs = mqfs_statfs, 2471 }; 2472 2473 SYSCALL_MODULE_HELPER(kmq_open); 2474 SYSCALL_MODULE_HELPER(kmq_setattr); 2475 SYSCALL_MODULE_HELPER(kmq_timedsend); 2476 SYSCALL_MODULE_HELPER(kmq_timedreceive); 2477 SYSCALL_MODULE_HELPER(kmq_notify); 2478 SYSCALL_MODULE_HELPER(kmq_unlink); 2479 2480 VFS_SET(mqfs_vfsops, mqueuefs, VFCF_SYNTHETIC); 2481 MODULE_VERSION(mqueuefs, 1); 2482