1 /*- 2 * Copyright (c) 2005 David Xu <davidxu@freebsd.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 */ 27 28 /* 29 * POSIX message queue implementation. 30 * 31 * 1) A mqueue filesystem can be mounted, each message queue appears 32 * in mounted directory, user can change queue's permission and 33 * ownership, or remove a queue. Manually creating a file in the 34 * directory causes a message queue to be created in the kernel with 35 * default message queue attributes applied and same name used, this 36 * method is not advocated since mq_open syscall allows user to specify 37 * different attributes. Also the file system can be mounted multiple 38 * times at different mount points but shows same contents. 39 * 40 * 2) Standard POSIX message queue API. The syscalls do not use vfs layer, 41 * but directly operate on internal data structure, this allows user to 42 * use the IPC facility without having to mount mqueue file system. 43 */ 44 45 #include <sys/cdefs.h> 46 __FBSDID("$FreeBSD$"); 47 48 #include <sys/param.h> 49 #include <sys/kernel.h> 50 #include <sys/systm.h> 51 #include <sys/limits.h> 52 #include <sys/buf.h> 53 #include <sys/dirent.h> 54 #include <sys/event.h> 55 #include <sys/eventhandler.h> 56 #include <sys/fcntl.h> 57 #include <sys/file.h> 58 #include <sys/filedesc.h> 59 #include <sys/lock.h> 60 #include <sys/malloc.h> 61 #include <sys/module.h> 62 #include <sys/mount.h> 63 #include <sys/mqueue.h> 64 #include <sys/mutex.h> 65 #include <sys/namei.h> 66 #include <sys/posix4.h> 67 #include <sys/poll.h> 68 #include <sys/priv.h> 69 #include <sys/proc.h> 70 #include <sys/queue.h> 71 #include <sys/sysproto.h> 72 #include <sys/stat.h> 73 #include <sys/syscall.h> 74 #include <sys/syscallsubr.h> 75 #include <sys/sysent.h> 76 #include <sys/sx.h> 77 #include <sys/sysctl.h> 78 #include <sys/taskqueue.h> 79 #include <sys/unistd.h> 80 #include <sys/vnode.h> 81 #include <machine/atomic.h> 82 83 /* 84 * Limits and constants 85 */ 86 #define MQFS_NAMELEN NAME_MAX 87 #define MQFS_DELEN (8 + MQFS_NAMELEN) 88 89 /* node types */ 90 typedef enum { 91 mqfstype_none = 0, 92 mqfstype_root, 93 mqfstype_dir, 94 mqfstype_this, 95 mqfstype_parent, 96 mqfstype_file, 97 mqfstype_symlink, 98 } mqfs_type_t; 99 100 struct mqfs_node; 101 102 /* 103 * mqfs_info: describes a mqfs instance 104 */ 105 struct mqfs_info { 106 struct sx mi_lock; 107 struct mqfs_node *mi_root; 108 struct unrhdr *mi_unrhdr; 109 }; 110 111 struct mqfs_vdata { 112 LIST_ENTRY(mqfs_vdata) mv_link; 113 struct mqfs_node *mv_node; 114 struct vnode *mv_vnode; 115 struct task mv_task; 116 }; 117 118 /* 119 * mqfs_node: describes a node (file or directory) within a mqfs 120 */ 121 struct mqfs_node { 122 char mn_name[MQFS_NAMELEN+1]; 123 struct mqfs_info *mn_info; 124 struct mqfs_node *mn_parent; 125 LIST_HEAD(,mqfs_node) mn_children; 126 LIST_ENTRY(mqfs_node) mn_sibling; 127 LIST_HEAD(,mqfs_vdata) mn_vnodes; 128 int mn_refcount; 129 mqfs_type_t mn_type; 130 int mn_deleted; 131 u_int32_t mn_fileno; 132 void *mn_data; 133 struct timespec mn_birth; 134 struct timespec mn_ctime; 135 struct timespec mn_atime; 136 struct timespec mn_mtime; 137 uid_t mn_uid; 138 gid_t mn_gid; 139 int mn_mode; 140 }; 141 142 #define VTON(vp) (((struct mqfs_vdata *)((vp)->v_data))->mv_node) 143 #define VTOMQ(vp) ((struct mqueue *)(VTON(vp)->mn_data)) 144 #define VFSTOMQFS(m) ((struct mqfs_info *)((m)->mnt_data)) 145 #define FPTOMQ(fp) ((struct mqueue *)(((struct mqfs_node *) \ 146 (fp)->f_data)->mn_data)) 147 148 TAILQ_HEAD(msgq, mqueue_msg); 149 150 struct mqueue; 151 152 struct mqueue_notifier { 153 LIST_ENTRY(mqueue_notifier) nt_link; 154 struct sigevent nt_sigev; 155 ksiginfo_t nt_ksi; 156 struct proc *nt_proc; 157 }; 158 159 struct mqueue { 160 struct mtx mq_mutex; 161 int mq_flags; 162 long mq_maxmsg; 163 long mq_msgsize; 164 long mq_curmsgs; 165 long mq_totalbytes; 166 struct msgq mq_msgq; 167 int mq_receivers; 168 int mq_senders; 169 struct selinfo mq_rsel; 170 struct selinfo mq_wsel; 171 struct mqueue_notifier *mq_notifier; 172 }; 173 174 #define MQ_RSEL 0x01 175 #define MQ_WSEL 0x02 176 177 struct mqueue_msg { 178 TAILQ_ENTRY(mqueue_msg) msg_link; 179 unsigned int msg_prio; 180 unsigned int msg_size; 181 /* following real data... */ 182 }; 183 184 SYSCTL_NODE(_kern, OID_AUTO, mqueue, CTLFLAG_RW, 0, 185 "POSIX real time message queue"); 186 187 static int default_maxmsg = 10; 188 static int default_msgsize = 1024; 189 190 static int maxmsg = 100; 191 SYSCTL_INT(_kern_mqueue, OID_AUTO, maxmsg, CTLFLAG_RW, 192 &maxmsg, 0, "Default maximum messages in queue"); 193 static int maxmsgsize = 16384; 194 SYSCTL_INT(_kern_mqueue, OID_AUTO, maxmsgsize, CTLFLAG_RW, 195 &maxmsgsize, 0, "Default maximum message size"); 196 static int maxmq = 100; 197 SYSCTL_INT(_kern_mqueue, OID_AUTO, maxmq, CTLFLAG_RW, 198 &maxmq, 0, "maximum message queues"); 199 static int curmq = 0; 200 SYSCTL_INT(_kern_mqueue, OID_AUTO, curmq, CTLFLAG_RW, 201 &curmq, 0, "current message queue number"); 202 static int unloadable = 0; 203 static MALLOC_DEFINE(M_MQUEUEDATA, "mqdata", "mqueue data"); 204 205 static eventhandler_tag exit_tag; 206 207 /* Only one instance per-system */ 208 static struct mqfs_info mqfs_data; 209 static uma_zone_t mqnode_zone; 210 static uma_zone_t mqueue_zone; 211 static uma_zone_t mvdata_zone; 212 static uma_zone_t mqnoti_zone; 213 static struct vop_vector mqfs_vnodeops; 214 static struct fileops mqueueops; 215 216 /* 217 * Directory structure construction and manipulation 218 */ 219 #ifdef notyet 220 static struct mqfs_node *mqfs_create_dir(struct mqfs_node *parent, 221 const char *name, int namelen, struct ucred *cred, int mode); 222 static struct mqfs_node *mqfs_create_link(struct mqfs_node *parent, 223 const char *name, int namelen, struct ucred *cred, int mode); 224 #endif 225 226 static struct mqfs_node *mqfs_create_file(struct mqfs_node *parent, 227 const char *name, int namelen, struct ucred *cred, int mode); 228 static int mqfs_destroy(struct mqfs_node *mn); 229 static void mqfs_fileno_alloc(struct mqfs_info *mi, struct mqfs_node *mn); 230 static void mqfs_fileno_free(struct mqfs_info *mi, struct mqfs_node *mn); 231 static int mqfs_allocv(struct mount *mp, struct vnode **vpp, struct mqfs_node *pn); 232 233 /* 234 * Message queue construction and maniplation 235 */ 236 static struct mqueue *mqueue_alloc(const struct mq_attr *attr); 237 static void mqueue_free(struct mqueue *mq); 238 static int mqueue_send(struct mqueue *mq, const char *msg_ptr, 239 size_t msg_len, unsigned msg_prio, int waitok, 240 const struct timespec *abs_timeout); 241 static int mqueue_receive(struct mqueue *mq, char *msg_ptr, 242 size_t msg_len, unsigned *msg_prio, int waitok, 243 const struct timespec *abs_timeout); 244 static int _mqueue_send(struct mqueue *mq, struct mqueue_msg *msg, 245 int timo); 246 static int _mqueue_recv(struct mqueue *mq, struct mqueue_msg **msg, 247 int timo); 248 static void mqueue_send_notification(struct mqueue *mq); 249 static void mqueue_fdclose(struct thread *td, int fd, struct file *fp); 250 static void mq_proc_exit(void *arg, struct proc *p); 251 252 /* 253 * kqueue filters 254 */ 255 static void filt_mqdetach(struct knote *kn); 256 static int filt_mqread(struct knote *kn, long hint); 257 static int filt_mqwrite(struct knote *kn, long hint); 258 259 struct filterops mq_rfiltops = 260 { 1, NULL, filt_mqdetach, filt_mqread }; 261 struct filterops mq_wfiltops = 262 { 1, NULL, filt_mqdetach, filt_mqwrite }; 263 264 /* 265 * Initialize fileno bitmap 266 */ 267 static void 268 mqfs_fileno_init(struct mqfs_info *mi) 269 { 270 struct unrhdr *up; 271 272 up = new_unrhdr(1, INT_MAX, NULL); 273 mi->mi_unrhdr = up; 274 } 275 276 /* 277 * Tear down fileno bitmap 278 */ 279 static void 280 mqfs_fileno_uninit(struct mqfs_info *mi) 281 { 282 struct unrhdr *up; 283 284 up = mi->mi_unrhdr; 285 mi->mi_unrhdr = NULL; 286 delete_unrhdr(up); 287 } 288 289 /* 290 * Allocate a file number 291 */ 292 static void 293 mqfs_fileno_alloc(struct mqfs_info *mi, struct mqfs_node *mn) 294 { 295 /* make sure our parent has a file number */ 296 if (mn->mn_parent && !mn->mn_parent->mn_fileno) 297 mqfs_fileno_alloc(mi, mn->mn_parent); 298 299 switch (mn->mn_type) { 300 case mqfstype_root: 301 case mqfstype_dir: 302 case mqfstype_file: 303 case mqfstype_symlink: 304 mn->mn_fileno = alloc_unr(mi->mi_unrhdr); 305 break; 306 case mqfstype_this: 307 KASSERT(mn->mn_parent != NULL, 308 ("mqfstype_this node has no parent")); 309 mn->mn_fileno = mn->mn_parent->mn_fileno; 310 break; 311 case mqfstype_parent: 312 KASSERT(mn->mn_parent != NULL, 313 ("mqfstype_parent node has no parent")); 314 if (mn->mn_parent == mi->mi_root) { 315 mn->mn_fileno = mn->mn_parent->mn_fileno; 316 break; 317 } 318 KASSERT(mn->mn_parent->mn_parent != NULL, 319 ("mqfstype_parent node has no grandparent")); 320 mn->mn_fileno = mn->mn_parent->mn_parent->mn_fileno; 321 break; 322 default: 323 KASSERT(0, 324 ("mqfs_fileno_alloc() called for unknown type node: %d", 325 mn->mn_type)); 326 break; 327 } 328 } 329 330 /* 331 * Release a file number 332 */ 333 static void 334 mqfs_fileno_free(struct mqfs_info *mi, struct mqfs_node *mn) 335 { 336 switch (mn->mn_type) { 337 case mqfstype_root: 338 case mqfstype_dir: 339 case mqfstype_file: 340 case mqfstype_symlink: 341 free_unr(mi->mi_unrhdr, mn->mn_fileno); 342 break; 343 case mqfstype_this: 344 case mqfstype_parent: 345 /* ignore these, as they don't "own" their file number */ 346 break; 347 default: 348 KASSERT(0, 349 ("mqfs_fileno_free() called for unknown type node: %d", 350 mn->mn_type)); 351 break; 352 } 353 } 354 355 static __inline struct mqfs_node * 356 mqnode_alloc(void) 357 { 358 return uma_zalloc(mqnode_zone, M_WAITOK | M_ZERO); 359 } 360 361 static __inline void 362 mqnode_free(struct mqfs_node *node) 363 { 364 uma_zfree(mqnode_zone, node); 365 } 366 367 static __inline void 368 mqnode_addref(struct mqfs_node *node) 369 { 370 atomic_fetchadd_int(&node->mn_refcount, 1); 371 } 372 373 static __inline void 374 mqnode_release(struct mqfs_node *node) 375 { 376 int old, exp; 377 378 old = atomic_fetchadd_int(&node->mn_refcount, -1); 379 if (node->mn_type == mqfstype_dir || 380 node->mn_type == mqfstype_root) 381 exp = 3; /* include . and .. */ 382 else 383 exp = 1; 384 if (old == exp) 385 mqfs_destroy(node); 386 } 387 388 /* 389 * Add a node to a directory 390 */ 391 static int 392 mqfs_add_node(struct mqfs_node *parent, struct mqfs_node *node) 393 { 394 KASSERT(parent != NULL, ("%s(): parent is NULL", __func__)); 395 KASSERT(parent->mn_info != NULL, 396 ("%s(): parent has no mn_info", __func__)); 397 KASSERT(parent->mn_type == mqfstype_dir || 398 parent->mn_type == mqfstype_root, 399 ("%s(): parent is not a directory", __func__)); 400 401 node->mn_info = parent->mn_info; 402 node->mn_parent = parent; 403 LIST_INIT(&node->mn_children); 404 LIST_INIT(&node->mn_vnodes); 405 LIST_INSERT_HEAD(&parent->mn_children, node, mn_sibling); 406 mqnode_addref(parent); 407 return (0); 408 } 409 410 static struct mqfs_node * 411 mqfs_create_node(const char *name, int namelen, struct ucred *cred, int mode, 412 int nodetype) 413 { 414 struct mqfs_node *node; 415 416 node = mqnode_alloc(); 417 strncpy(node->mn_name, name, namelen); 418 node->mn_type = nodetype; 419 node->mn_refcount = 1; 420 getnanotime(&node->mn_birth); 421 node->mn_ctime = node->mn_atime = node->mn_mtime 422 = node->mn_birth; 423 node->mn_uid = cred->cr_uid; 424 node->mn_gid = cred->cr_gid; 425 node->mn_mode = mode; 426 return (node); 427 } 428 429 /* 430 * Create a file 431 */ 432 static struct mqfs_node * 433 mqfs_create_file(struct mqfs_node *parent, const char *name, int namelen, 434 struct ucred *cred, int mode) 435 { 436 struct mqfs_node *node; 437 438 node = mqfs_create_node(name, namelen, cred, mode, mqfstype_file); 439 if (mqfs_add_node(parent, node) != 0) { 440 mqnode_free(node); 441 return (NULL); 442 } 443 return (node); 444 } 445 446 /* 447 * Add . and .. to a directory 448 */ 449 static int 450 mqfs_fixup_dir(struct mqfs_node *parent) 451 { 452 struct mqfs_node *dir; 453 454 dir = mqnode_alloc(); 455 dir->mn_name[0] = '.'; 456 dir->mn_type = mqfstype_this; 457 dir->mn_refcount = 1; 458 if (mqfs_add_node(parent, dir) != 0) { 459 mqnode_free(dir); 460 return (-1); 461 } 462 463 dir = mqnode_alloc(); 464 dir->mn_name[0] = dir->mn_name[1] = '.'; 465 dir->mn_type = mqfstype_parent; 466 dir->mn_refcount = 1; 467 468 if (mqfs_add_node(parent, dir) != 0) { 469 mqnode_free(dir); 470 return (-1); 471 } 472 473 return (0); 474 } 475 476 #ifdef notyet 477 478 /* 479 * Create a directory 480 */ 481 static struct mqfs_node * 482 mqfs_create_dir(struct mqfs_node *parent, const char *name, int namelen, 483 struct ucred *cred, int mode) 484 { 485 struct mqfs_node *node; 486 487 node = mqfs_create_node(name, namelen, cred, mode, mqfstype_dir); 488 if (mqfs_add_node(parent, node) != 0) { 489 mqnode_free(node); 490 return (NULL); 491 } 492 493 if (mqfs_fixup_dir(node) != 0) { 494 mqfs_destroy(node); 495 return (NULL); 496 } 497 return (node); 498 } 499 500 /* 501 * Create a symlink 502 */ 503 static struct mqfs_node * 504 mqfs_create_link(struct mqfs_node *parent, const char *name, int namelen, 505 struct ucred *cred, int mode) 506 { 507 struct mqfs_node *node; 508 509 node = mqfs_create_node(name, namelen, cred, mode, mqfstype_symlink); 510 if (mqfs_add_node(parent, node) != 0) { 511 mqnode_free(node); 512 return (NULL); 513 } 514 return (node); 515 } 516 517 #endif 518 519 /* 520 * Destroy a node or a tree of nodes 521 */ 522 static int 523 mqfs_destroy(struct mqfs_node *node) 524 { 525 struct mqfs_node *parent; 526 527 KASSERT(node != NULL, 528 ("%s(): node is NULL", __func__)); 529 KASSERT(node->mn_info != NULL, 530 ("%s(): node has no mn_info", __func__)); 531 532 /* destroy children */ 533 if (node->mn_type == mqfstype_dir || node->mn_type == mqfstype_root) 534 while (! LIST_EMPTY(&node->mn_children)) 535 mqfs_destroy(LIST_FIRST(&node->mn_children)); 536 537 /* unlink from parent */ 538 if ((parent = node->mn_parent) != NULL) { 539 KASSERT(parent->mn_info == node->mn_info, 540 ("%s(): parent has different mn_info", __func__)); 541 LIST_REMOVE(node, mn_sibling); 542 } 543 544 if (node->mn_fileno != 0) 545 mqfs_fileno_free(node->mn_info, node); 546 if (node->mn_data != NULL) 547 mqueue_free(node->mn_data); 548 mqnode_free(node); 549 return (0); 550 } 551 552 /* 553 * Mount a mqfs instance 554 */ 555 static int 556 mqfs_mount(struct mount *mp, struct thread *td) 557 { 558 struct statfs *sbp; 559 560 if (mp->mnt_flag & MNT_UPDATE) 561 return (EOPNOTSUPP); 562 563 mp->mnt_data = &mqfs_data; 564 MNT_ILOCK(mp); 565 mp->mnt_flag |= MNT_LOCAL; 566 mp->mnt_kern_flag |= MNTK_MPSAFE; 567 MNT_IUNLOCK(mp); 568 vfs_getnewfsid(mp); 569 570 sbp = &mp->mnt_stat; 571 vfs_mountedfrom(mp, "mqueue"); 572 sbp->f_bsize = PAGE_SIZE; 573 sbp->f_iosize = PAGE_SIZE; 574 sbp->f_blocks = 1; 575 sbp->f_bfree = 0; 576 sbp->f_bavail = 0; 577 sbp->f_files = 1; 578 sbp->f_ffree = 0; 579 return (0); 580 } 581 582 /* 583 * Unmount a mqfs instance 584 */ 585 static int 586 mqfs_unmount(struct mount *mp, int mntflags, struct thread *td) 587 { 588 int error; 589 590 error = vflush(mp, 0, (mntflags & MNT_FORCE) ? FORCECLOSE : 0, td); 591 return (error); 592 } 593 594 /* 595 * Return a root vnode 596 */ 597 static int 598 mqfs_root(struct mount *mp, int flags, struct vnode **vpp, struct thread *td) 599 { 600 struct mqfs_info *mqfs; 601 int ret; 602 603 mqfs = VFSTOMQFS(mp); 604 sx_xlock(&mqfs->mi_lock); 605 ret = mqfs_allocv(mp, vpp, mqfs->mi_root); 606 sx_xunlock(&mqfs->mi_lock); 607 return (ret); 608 } 609 610 /* 611 * Return filesystem stats 612 */ 613 static int 614 mqfs_statfs(struct mount *mp, struct statfs *sbp, struct thread *td) 615 { 616 /* XXX update statistics */ 617 return (0); 618 } 619 620 /* 621 * Initialize a mqfs instance 622 */ 623 static int 624 mqfs_init(struct vfsconf *vfc) 625 { 626 struct mqfs_node *root; 627 struct mqfs_info *mi; 628 629 mqnode_zone = uma_zcreate("mqnode", sizeof(struct mqfs_node), 630 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 631 mqueue_zone = uma_zcreate("mqueue", sizeof(struct mqueue), 632 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 633 mvdata_zone = uma_zcreate("mvdata", 634 sizeof(struct mqfs_vdata), NULL, NULL, NULL, 635 NULL, UMA_ALIGN_PTR, 0); 636 mqnoti_zone = uma_zcreate("mqnotifier", sizeof(struct mqueue_notifier), 637 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 638 mi = &mqfs_data; 639 sx_init(&mi->mi_lock, "mqfs lock"); 640 /* set up the root diretory */ 641 root = mqfs_create_node("/", 1, curthread->td_ucred, 01777, 642 mqfstype_root); 643 root->mn_info = mi; 644 LIST_INIT(&root->mn_children); 645 LIST_INIT(&root->mn_vnodes); 646 mi->mi_root = root; 647 mqfs_fileno_init(mi); 648 mqfs_fileno_alloc(mi, root); 649 mqfs_fixup_dir(root); 650 exit_tag = EVENTHANDLER_REGISTER(process_exit, mq_proc_exit, NULL, 651 EVENTHANDLER_PRI_ANY); 652 mq_fdclose = mqueue_fdclose; 653 p31b_setcfg(CTL_P1003_1B_MESSAGE_PASSING, _POSIX_MESSAGE_PASSING); 654 return (0); 655 } 656 657 /* 658 * Destroy a mqfs instance 659 */ 660 static int 661 mqfs_uninit(struct vfsconf *vfc) 662 { 663 struct mqfs_info *mi; 664 665 if (!unloadable) 666 return (EOPNOTSUPP); 667 EVENTHANDLER_DEREGISTER(process_exit, exit_tag); 668 mi = &mqfs_data; 669 mqfs_destroy(mi->mi_root); 670 mi->mi_root = NULL; 671 mqfs_fileno_uninit(mi); 672 sx_destroy(&mi->mi_lock); 673 uma_zdestroy(mqnode_zone); 674 uma_zdestroy(mqueue_zone); 675 uma_zdestroy(mvdata_zone); 676 uma_zdestroy(mqnoti_zone); 677 return (0); 678 } 679 680 /* 681 * task routine 682 */ 683 static void 684 do_recycle(void *context, int pending __unused) 685 { 686 struct vnode *vp = (struct vnode *)context; 687 688 vrecycle(vp, curthread); 689 vdrop(vp); 690 } 691 692 /* 693 * Allocate a vnode 694 */ 695 static int 696 mqfs_allocv(struct mount *mp, struct vnode **vpp, struct mqfs_node *pn) 697 { 698 struct mqfs_vdata *vd; 699 int error; 700 701 LIST_FOREACH(vd, &pn->mn_vnodes, mv_link) { 702 if (vd->mv_vnode->v_mount == mp) 703 break; 704 } 705 706 if (vd != NULL) { 707 *vpp = vd->mv_vnode; 708 vget(*vpp, LK_RETRY | LK_EXCLUSIVE, curthread); 709 return (0); 710 } 711 712 error = getnewvnode("mqueue", mp, &mqfs_vnodeops, vpp); 713 if (error) 714 return (error); 715 vn_lock(*vpp, LK_EXCLUSIVE | LK_RETRY); 716 error = insmntque(*vpp, mp); 717 if (error != 0) { 718 *vpp = NULLVP; 719 return (error); 720 } 721 vd = uma_zalloc(mvdata_zone, M_WAITOK); 722 (*vpp)->v_data = vd; 723 vd->mv_vnode = *vpp; 724 vd->mv_node = pn; 725 TASK_INIT(&vd->mv_task, 0, do_recycle, *vpp); 726 LIST_INSERT_HEAD(&pn->mn_vnodes, vd, mv_link); 727 mqnode_addref(pn); 728 switch (pn->mn_type) { 729 case mqfstype_root: 730 (*vpp)->v_vflag = VV_ROOT; 731 /* fall through */ 732 case mqfstype_dir: 733 case mqfstype_this: 734 case mqfstype_parent: 735 (*vpp)->v_type = VDIR; 736 break; 737 case mqfstype_file: 738 (*vpp)->v_type = VREG; 739 break; 740 case mqfstype_symlink: 741 (*vpp)->v_type = VLNK; 742 break; 743 case mqfstype_none: 744 KASSERT(0, ("mqfs_allocf called for null node\n")); 745 default: 746 panic("%s has unexpected type: %d", pn->mn_name, pn->mn_type); 747 } 748 return (0); 749 } 750 751 /* 752 * Search a directory entry 753 */ 754 static struct mqfs_node * 755 mqfs_search(struct mqfs_node *pd, const char *name, int len) 756 { 757 struct mqfs_node *pn; 758 759 LIST_FOREACH(pn, &pd->mn_children, mn_sibling) { 760 if (strncmp(pn->mn_name, name, len) == 0) 761 return (pn); 762 } 763 return (NULL); 764 } 765 766 /* 767 * Look up a file or directory. 768 */ 769 static int 770 mqfs_lookupx(struct vop_cachedlookup_args *ap) 771 { 772 struct componentname *cnp; 773 struct vnode *dvp, **vpp; 774 struct mqfs_node *pd; 775 struct mqfs_node *pn; 776 int nameiop, flags, error, namelen; 777 char *pname; 778 struct thread *td; 779 780 cnp = ap->a_cnp; 781 vpp = ap->a_vpp; 782 dvp = ap->a_dvp; 783 pname = cnp->cn_nameptr; 784 namelen = cnp->cn_namelen; 785 td = cnp->cn_thread; 786 flags = cnp->cn_flags; 787 nameiop = cnp->cn_nameiop; 788 pd = VTON(dvp); 789 pn = NULL; 790 *vpp = NULLVP; 791 792 if (dvp->v_type != VDIR) 793 return (ENOTDIR); 794 795 error = VOP_ACCESS(dvp, VEXEC, cnp->cn_cred, cnp->cn_thread); 796 if (error) 797 return (error); 798 799 /* shortcut: check if the name is too long */ 800 if (cnp->cn_namelen >= MQFS_NAMELEN) 801 return (ENOENT); 802 803 /* self */ 804 if (namelen == 1 && pname[0] == '.') { 805 if ((flags & ISLASTCN) && nameiop != LOOKUP) 806 return (EINVAL); 807 pn = pd; 808 *vpp = dvp; 809 VREF(dvp); 810 return (0); 811 } 812 813 /* parent */ 814 if (cnp->cn_flags & ISDOTDOT) { 815 if (dvp->v_vflag & VV_ROOT) 816 return (EIO); 817 if ((flags & ISLASTCN) && nameiop != LOOKUP) 818 return (EINVAL); 819 VOP_UNLOCK(dvp, 0); 820 KASSERT(pd->mn_parent, ("non-root directory has no parent")); 821 pn = pd->mn_parent; 822 error = mqfs_allocv(dvp->v_mount, vpp, pn); 823 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY); 824 return (error); 825 } 826 827 /* named node */ 828 pn = mqfs_search(pd, pname, namelen); 829 830 /* found */ 831 if (pn != NULL) { 832 /* DELETE */ 833 if (nameiop == DELETE && (flags & ISLASTCN)) { 834 error = VOP_ACCESS(dvp, VWRITE, cnp->cn_cred, td); 835 if (error) 836 return (error); 837 if (*vpp == dvp) { 838 VREF(dvp); 839 *vpp = dvp; 840 return (0); 841 } 842 } 843 844 /* allocate vnode */ 845 error = mqfs_allocv(dvp->v_mount, vpp, pn); 846 if (error == 0 && cnp->cn_flags & MAKEENTRY) 847 cache_enter(dvp, *vpp, cnp); 848 return (error); 849 } 850 851 /* not found */ 852 853 /* will create a new entry in the directory ? */ 854 if ((nameiop == CREATE || nameiop == RENAME) && (flags & LOCKPARENT) 855 && (flags & ISLASTCN)) { 856 error = VOP_ACCESS(dvp, VWRITE, cnp->cn_cred, td); 857 if (error) 858 return (error); 859 cnp->cn_flags |= SAVENAME; 860 return (EJUSTRETURN); 861 } 862 return (ENOENT); 863 } 864 865 #if 0 866 struct vop_lookup_args { 867 struct vop_generic_args a_gen; 868 struct vnode *a_dvp; 869 struct vnode **a_vpp; 870 struct componentname *a_cnp; 871 }; 872 #endif 873 874 /* 875 * vnode lookup operation 876 */ 877 static int 878 mqfs_lookup(struct vop_cachedlookup_args *ap) 879 { 880 struct mqfs_info *mqfs = VFSTOMQFS(ap->a_dvp->v_mount); 881 int rc; 882 883 sx_xlock(&mqfs->mi_lock); 884 rc = mqfs_lookupx(ap); 885 sx_xunlock(&mqfs->mi_lock); 886 return (rc); 887 } 888 889 #if 0 890 struct vop_create_args { 891 struct vnode *a_dvp; 892 struct vnode **a_vpp; 893 struct componentname *a_cnp; 894 struct vattr *a_vap; 895 }; 896 #endif 897 898 /* 899 * vnode creation operation 900 */ 901 static int 902 mqfs_create(struct vop_create_args *ap) 903 { 904 struct mqfs_info *mqfs = VFSTOMQFS(ap->a_dvp->v_mount); 905 struct componentname *cnp = ap->a_cnp; 906 struct mqfs_node *pd; 907 struct mqfs_node *pn; 908 struct mqueue *mq; 909 int error; 910 911 pd = VTON(ap->a_dvp); 912 if (pd->mn_type != mqfstype_root && pd->mn_type != mqfstype_dir) 913 return (ENOTDIR); 914 mq = mqueue_alloc(NULL); 915 if (mq == NULL) 916 return (EAGAIN); 917 sx_xlock(&mqfs->mi_lock); 918 #if 0 919 /* named node */ 920 pn = mqfs_search(pd, cnp->cn_nameptr, cnp->cn_namelen); 921 if (pn != NULL) { 922 mqueue_free(mq); 923 sx_xunlock(&mqfs->mi_lock); 924 return (EEXIST); 925 } 926 #else 927 if ((cnp->cn_flags & HASBUF) == 0) 928 panic("%s: no name", __func__); 929 #endif 930 pn = mqfs_create_file(pd, cnp->cn_nameptr, cnp->cn_namelen, 931 cnp->cn_cred, ap->a_vap->va_mode); 932 if (pn == NULL) 933 error = ENOSPC; 934 else { 935 error = mqfs_allocv(ap->a_dvp->v_mount, ap->a_vpp, pn); 936 if (error) 937 mqfs_destroy(pn); 938 else 939 pn->mn_data = mq; 940 } 941 sx_xunlock(&mqfs->mi_lock); 942 if (error) 943 mqueue_free(mq); 944 return (error); 945 } 946 947 /* 948 * Remove an entry 949 */ 950 static 951 int do_unlink(struct mqfs_node *pn, struct ucred *ucred) 952 { 953 struct mqfs_node *parent; 954 struct mqfs_vdata *vd; 955 int error = 0; 956 957 sx_assert(&pn->mn_info->mi_lock, SX_LOCKED); 958 959 if (ucred->cr_uid != pn->mn_uid && 960 (error = priv_check_cred(ucred, PRIV_MQ_ADMIN, 0)) != 0) 961 error = EACCES; 962 else if (!pn->mn_deleted) { 963 parent = pn->mn_parent; 964 pn->mn_parent = NULL; 965 pn->mn_deleted = 1; 966 LIST_REMOVE(pn, mn_sibling); 967 LIST_FOREACH(vd, &pn->mn_vnodes, mv_link) { 968 cache_purge(vd->mv_vnode); 969 vhold(vd->mv_vnode); 970 taskqueue_enqueue(taskqueue_thread, &vd->mv_task); 971 } 972 mqnode_release(pn); 973 mqnode_release(parent); 974 } else 975 error = ENOENT; 976 return (error); 977 } 978 979 #if 0 980 struct vop_remove_args { 981 struct vnode *a_dvp; 982 struct vnode *a_vp; 983 struct componentname *a_cnp; 984 }; 985 #endif 986 987 /* 988 * vnode removal operation 989 */ 990 static int 991 mqfs_remove(struct vop_remove_args *ap) 992 { 993 struct mqfs_info *mqfs = VFSTOMQFS(ap->a_dvp->v_mount); 994 struct mqfs_node *pn; 995 int error; 996 997 if (ap->a_vp->v_type == VDIR) 998 return (EPERM); 999 pn = VTON(ap->a_vp); 1000 sx_xlock(&mqfs->mi_lock); 1001 error = do_unlink(pn, ap->a_cnp->cn_cred); 1002 sx_xunlock(&mqfs->mi_lock); 1003 return (error); 1004 } 1005 1006 #if 0 1007 struct vop_inactive_args { 1008 struct vnode *a_vp; 1009 struct thread *a_td; 1010 }; 1011 #endif 1012 1013 static int 1014 mqfs_inactive(struct vop_inactive_args *ap) 1015 { 1016 struct mqfs_node *pn = VTON(ap->a_vp); 1017 1018 if (pn->mn_deleted) 1019 vrecycle(ap->a_vp, ap->a_td); 1020 return (0); 1021 } 1022 1023 #if 0 1024 struct vop_reclaim_args { 1025 struct vop_generic_args a_gen; 1026 struct vnode *a_vp; 1027 struct thread *a_td; 1028 }; 1029 #endif 1030 1031 static int 1032 mqfs_reclaim(struct vop_reclaim_args *ap) 1033 { 1034 struct mqfs_info *mqfs = VFSTOMQFS(ap->a_vp->v_mount); 1035 struct vnode *vp = ap->a_vp; 1036 struct mqfs_node *pn; 1037 struct mqfs_vdata *vd; 1038 1039 vd = vp->v_data; 1040 pn = vd->mv_node; 1041 sx_xlock(&mqfs->mi_lock); 1042 vp->v_data = NULL; 1043 LIST_REMOVE(vd, mv_link); 1044 uma_zfree(mvdata_zone, vd); 1045 mqnode_release(pn); 1046 sx_xunlock(&mqfs->mi_lock); 1047 return (0); 1048 } 1049 1050 #if 0 1051 struct vop_open_args { 1052 struct vop_generic_args a_gen; 1053 struct vnode *a_vp; 1054 int a_mode; 1055 struct ucred *a_cred; 1056 struct thread *a_td; 1057 int a_fdidx; 1058 }; 1059 #endif 1060 1061 static int 1062 mqfs_open(struct vop_open_args *ap) 1063 { 1064 return (0); 1065 } 1066 1067 #if 0 1068 struct vop_close_args { 1069 struct vop_generic_args a_gen; 1070 struct vnode *a_vp; 1071 int a_fflag; 1072 struct ucred *a_cred; 1073 struct thread *a_td; 1074 }; 1075 #endif 1076 1077 static int 1078 mqfs_close(struct vop_close_args *ap) 1079 { 1080 return (0); 1081 } 1082 1083 #if 0 1084 struct vop_access_args { 1085 struct vop_generic_args a_gen; 1086 struct vnode *a_vp; 1087 int a_mode; 1088 struct ucred *a_cred; 1089 struct thread *a_td; 1090 }; 1091 #endif 1092 1093 /* 1094 * Verify permissions 1095 */ 1096 static int 1097 mqfs_access(struct vop_access_args *ap) 1098 { 1099 struct vnode *vp = ap->a_vp; 1100 struct vattr vattr; 1101 int error; 1102 1103 error = VOP_GETATTR(vp, &vattr, ap->a_cred, ap->a_td); 1104 if (error) 1105 return (error); 1106 error = vaccess(vp->v_type, vattr.va_mode, vattr.va_uid, 1107 vattr.va_gid, ap->a_mode, ap->a_cred, NULL); 1108 return (error); 1109 } 1110 1111 #if 0 1112 struct vop_getattr_args { 1113 struct vop_generic_args a_gen; 1114 struct vnode *a_vp; 1115 struct vattr *a_vap; 1116 struct ucred *a_cred; 1117 struct thread *a_td; 1118 }; 1119 #endif 1120 1121 /* 1122 * Get file attributes 1123 */ 1124 static int 1125 mqfs_getattr(struct vop_getattr_args *ap) 1126 { 1127 struct vnode *vp = ap->a_vp; 1128 struct mqfs_node *pn = VTON(vp); 1129 struct vattr *vap = ap->a_vap; 1130 int error = 0; 1131 1132 VATTR_NULL(vap); 1133 vap->va_type = vp->v_type; 1134 vap->va_mode = pn->mn_mode; 1135 vap->va_nlink = 1; 1136 vap->va_uid = pn->mn_uid; 1137 vap->va_gid = pn->mn_gid; 1138 vap->va_fsid = vp->v_mount->mnt_stat.f_fsid.val[0]; 1139 vap->va_fileid = pn->mn_fileno; 1140 vap->va_size = 0; 1141 vap->va_blocksize = PAGE_SIZE; 1142 vap->va_bytes = vap->va_size = 0; 1143 vap->va_atime = pn->mn_atime; 1144 vap->va_mtime = pn->mn_mtime; 1145 vap->va_ctime = pn->mn_ctime; 1146 vap->va_birthtime = pn->mn_birth; 1147 vap->va_gen = 0; 1148 vap->va_flags = 0; 1149 vap->va_rdev = 0; 1150 vap->va_bytes = 0; 1151 vap->va_filerev = 0; 1152 vap->va_vaflags = 0; 1153 return (error); 1154 } 1155 1156 #if 0 1157 struct vop_setattr_args { 1158 struct vop_generic_args a_gen; 1159 struct vnode *a_vp; 1160 struct vattr *a_vap; 1161 struct ucred *a_cred; 1162 struct thread *a_td; 1163 }; 1164 #endif 1165 /* 1166 * Set attributes 1167 */ 1168 static int 1169 mqfs_setattr(struct vop_setattr_args *ap) 1170 { 1171 struct mqfs_node *pn; 1172 struct vattr *vap; 1173 struct vnode *vp; 1174 int c, error; 1175 uid_t uid; 1176 gid_t gid; 1177 1178 vap = ap->a_vap; 1179 vp = ap->a_vp; 1180 if ((vap->va_type != VNON) || 1181 (vap->va_nlink != VNOVAL) || 1182 (vap->va_fsid != VNOVAL) || 1183 (vap->va_fileid != VNOVAL) || 1184 (vap->va_blocksize != VNOVAL) || 1185 (vap->va_flags != VNOVAL && vap->va_flags != 0) || 1186 (vap->va_rdev != VNOVAL) || 1187 ((int)vap->va_bytes != VNOVAL) || 1188 (vap->va_gen != VNOVAL)) { 1189 return (EINVAL); 1190 } 1191 1192 pn = VTON(vp); 1193 1194 error = c = 0; 1195 if (vap->va_uid == (uid_t)VNOVAL) 1196 uid = pn->mn_uid; 1197 else 1198 uid = vap->va_uid; 1199 if (vap->va_gid == (gid_t)VNOVAL) 1200 gid = pn->mn_gid; 1201 else 1202 gid = vap->va_gid; 1203 1204 if (uid != pn->mn_uid || gid != pn->mn_gid) { 1205 /* 1206 * To modify the ownership of a file, must possess VADMIN 1207 * for that file. 1208 */ 1209 if ((error = VOP_ACCESS(vp, VADMIN, ap->a_cred, ap->a_td))) 1210 return (error); 1211 1212 /* 1213 * XXXRW: Why is there a privilege check here: shouldn't the 1214 * check in VOP_ACCESS() be enough? Also, are the group bits 1215 * below definitely right? 1216 */ 1217 if (((ap->a_cred->cr_uid != pn->mn_uid) || uid != pn->mn_uid || 1218 (gid != pn->mn_gid && !groupmember(gid, ap->a_cred))) && 1219 (error = priv_check(ap->a_td, PRIV_MQ_ADMIN)) != 0) 1220 return (error); 1221 pn->mn_uid = uid; 1222 pn->mn_gid = gid; 1223 c = 1; 1224 } 1225 1226 if (vap->va_mode != (mode_t)VNOVAL) { 1227 if ((ap->a_cred->cr_uid != pn->mn_uid) && 1228 (error = priv_check(ap->a_td, PRIV_MQ_ADMIN))) 1229 return (error); 1230 pn->mn_mode = vap->va_mode; 1231 c = 1; 1232 } 1233 1234 if (vap->va_atime.tv_sec != VNOVAL || vap->va_mtime.tv_sec != VNOVAL) { 1235 /* See the comment in ufs_vnops::ufs_setattr(). */ 1236 if ((error = VOP_ACCESS(vp, VADMIN, ap->a_cred, ap->a_td)) && 1237 ((vap->va_vaflags & VA_UTIMES_NULL) == 0 || 1238 (error = VOP_ACCESS(vp, VWRITE, ap->a_cred, ap->a_td)))) 1239 return (error); 1240 if (vap->va_atime.tv_sec != VNOVAL) { 1241 pn->mn_atime = vap->va_atime; 1242 } 1243 if (vap->va_mtime.tv_sec != VNOVAL) { 1244 pn->mn_mtime = vap->va_mtime; 1245 } 1246 c = 1; 1247 } 1248 if (c) { 1249 vfs_timestamp(&pn->mn_ctime); 1250 } 1251 return (0); 1252 } 1253 1254 #if 0 1255 struct vop_read_args { 1256 struct vop_generic_args a_gen; 1257 struct vnode *a_vp; 1258 struct uio *a_uio; 1259 int a_ioflag; 1260 struct ucred *a_cred; 1261 }; 1262 #endif 1263 1264 /* 1265 * Read from a file 1266 */ 1267 static int 1268 mqfs_read(struct vop_read_args *ap) 1269 { 1270 char buf[80]; 1271 struct vnode *vp = ap->a_vp; 1272 struct uio *uio = ap->a_uio; 1273 struct mqfs_node *pn; 1274 struct mqueue *mq; 1275 int len, error; 1276 1277 if (vp->v_type != VREG) 1278 return (EINVAL); 1279 1280 pn = VTON(vp); 1281 mq = VTOMQ(vp); 1282 snprintf(buf, sizeof(buf), 1283 "QSIZE:%-10ld MAXMSG:%-10ld CURMSG:%-10ld MSGSIZE:%-10ld\n", 1284 mq->mq_totalbytes, 1285 mq->mq_maxmsg, 1286 mq->mq_curmsgs, 1287 mq->mq_msgsize); 1288 buf[sizeof(buf)-1] = '\0'; 1289 len = strlen(buf); 1290 error = uiomove_frombuf(buf, len, uio); 1291 return (error); 1292 } 1293 1294 #if 0 1295 struct vop_readdir_args { 1296 struct vop_generic_args a_gen; 1297 struct vnode *a_vp; 1298 struct uio *a_uio; 1299 struct ucred *a_cred; 1300 int *a_eofflag; 1301 int *a_ncookies; 1302 u_long **a_cookies; 1303 }; 1304 #endif 1305 1306 /* 1307 * Return directory entries. 1308 */ 1309 static int 1310 mqfs_readdir(struct vop_readdir_args *ap) 1311 { 1312 struct vnode *vp; 1313 struct mqfs_info *mi; 1314 struct mqfs_node *pd; 1315 struct mqfs_node *pn; 1316 struct dirent entry; 1317 struct uio *uio; 1318 int *tmp_ncookies = NULL; 1319 off_t offset; 1320 int error, i; 1321 1322 vp = ap->a_vp; 1323 mi = VFSTOMQFS(vp->v_mount); 1324 pd = VTON(vp); 1325 uio = ap->a_uio; 1326 1327 if (vp->v_type != VDIR) 1328 return (ENOTDIR); 1329 1330 if (uio->uio_offset < 0) 1331 return (EINVAL); 1332 1333 if (ap->a_ncookies != NULL) { 1334 tmp_ncookies = ap->a_ncookies; 1335 *ap->a_ncookies = 0; 1336 ap->a_ncookies = NULL; 1337 } 1338 1339 error = 0; 1340 offset = 0; 1341 1342 sx_xlock(&mi->mi_lock); 1343 1344 LIST_FOREACH(pn, &pd->mn_children, mn_sibling) { 1345 entry.d_reclen = sizeof(entry); 1346 if (!pn->mn_fileno) 1347 mqfs_fileno_alloc(mi, pn); 1348 entry.d_fileno = pn->mn_fileno; 1349 for (i = 0; i < MQFS_NAMELEN - 1 && pn->mn_name[i] != '\0'; ++i) 1350 entry.d_name[i] = pn->mn_name[i]; 1351 entry.d_name[i] = 0; 1352 entry.d_namlen = i; 1353 switch (pn->mn_type) { 1354 case mqfstype_root: 1355 case mqfstype_dir: 1356 case mqfstype_this: 1357 case mqfstype_parent: 1358 entry.d_type = DT_DIR; 1359 break; 1360 case mqfstype_file: 1361 entry.d_type = DT_REG; 1362 break; 1363 case mqfstype_symlink: 1364 entry.d_type = DT_LNK; 1365 break; 1366 default: 1367 panic("%s has unexpected node type: %d", pn->mn_name, 1368 pn->mn_type); 1369 } 1370 if (entry.d_reclen > uio->uio_resid) 1371 break; 1372 if (offset >= uio->uio_offset) { 1373 error = vfs_read_dirent(ap, &entry, offset); 1374 if (error) 1375 break; 1376 } 1377 offset += entry.d_reclen; 1378 } 1379 sx_xunlock(&mi->mi_lock); 1380 1381 uio->uio_offset = offset; 1382 1383 if (tmp_ncookies != NULL) 1384 ap->a_ncookies = tmp_ncookies; 1385 1386 return (error); 1387 } 1388 1389 #ifdef notyet 1390 1391 #if 0 1392 struct vop_mkdir_args { 1393 struct vnode *a_dvp; 1394 struvt vnode **a_vpp; 1395 struvt componentname *a_cnp; 1396 struct vattr *a_vap; 1397 }; 1398 #endif 1399 1400 /* 1401 * Create a directory. 1402 */ 1403 static int 1404 mqfs_mkdir(struct vop_mkdir_args *ap) 1405 { 1406 struct mqfs_info *mqfs = VFSTOMQFS(ap->a_dvp->v_mount); 1407 struct componentname *cnp = ap->a_cnp; 1408 struct mqfs_node *pd = VTON(ap->a_dvp); 1409 struct mqfs_node *pn; 1410 int error; 1411 1412 if (pd->mn_type != mqfstype_root && pd->mn_type != mqfstype_dir) 1413 return (ENOTDIR); 1414 sx_xlock(&mqfs->mi_lock); 1415 #if 0 1416 /* named node */ 1417 pn = mqfs_search(pd, cnp->cn_nameptr, cnp->cn_namelen); 1418 if (pn != NULL) { 1419 sx_xunlock(&mqfs->mi_lock); 1420 return (EEXIST); 1421 } 1422 #else 1423 if ((cnp->cn_flags & HASBUF) == 0) 1424 panic("%s: no name", __func__); 1425 #endif 1426 pn = mqfs_create_dir(pd, cnp->cn_nameptr, cnp->cn_namelen, 1427 ap->a_vap->cn_cred, ap->a_vap->va_mode); 1428 if (pn == NULL) 1429 error = ENOSPC; 1430 else 1431 error = mqfs_allocv(ap->a_dvp->v_mount, ap->a_vpp, pn); 1432 sx_xunlock(&mqfs->mi_lock); 1433 return (error); 1434 } 1435 1436 #if 0 1437 struct vop_rmdir_args { 1438 struct vnode *a_dvp; 1439 struct vnode *a_vp; 1440 struct componentname *a_cnp; 1441 }; 1442 #endif 1443 1444 /* 1445 * Remove a directory. 1446 */ 1447 static int 1448 mqfs_rmdir(struct vop_rmdir_args *ap) 1449 { 1450 struct mqfs_info *mqfs = VFSTOMQFS(ap->a_dvp->v_mount); 1451 struct mqfs_node *pn = VTON(ap->a_vp); 1452 struct mqfs_node *pt; 1453 1454 if (pn->mn_type != mqfstype_dir) 1455 return (ENOTDIR); 1456 1457 sx_xlock(&mqfs->mi_lock); 1458 if (pn->mn_deleted) { 1459 sx_xunlock(&mqfs->mi_lock); 1460 return (ENOENT); 1461 } 1462 1463 pt = LIST_FIRST(&pn->mn_children); 1464 pt = LIST_NEXT(pt, mn_sibling); 1465 pt = LIST_NEXT(pt, mn_sibling); 1466 if (pt != NULL) { 1467 sx_xunlock(&mqfs->mi_lock); 1468 return (ENOTEMPTY); 1469 } 1470 pt = pn->mn_parent; 1471 pn->mn_parent = NULL; 1472 pn->mn_deleted = 1; 1473 LIST_REMOVE(pn, mn_sibling); 1474 mqnode_release(pn); 1475 mqnode_release(pt); 1476 sx_xunlock(&mqfs->mi_lock); 1477 cache_purge(ap->a_vp); 1478 return (0); 1479 } 1480 1481 #endif /* notyet */ 1482 1483 /* 1484 * Allocate a message queue 1485 */ 1486 static struct mqueue * 1487 mqueue_alloc(const struct mq_attr *attr) 1488 { 1489 struct mqueue *mq; 1490 1491 if (curmq >= maxmq) 1492 return (NULL); 1493 mq = uma_zalloc(mqueue_zone, M_WAITOK | M_ZERO); 1494 TAILQ_INIT(&mq->mq_msgq); 1495 if (attr != NULL) { 1496 mq->mq_maxmsg = attr->mq_maxmsg; 1497 mq->mq_msgsize = attr->mq_msgsize; 1498 } else { 1499 mq->mq_maxmsg = default_maxmsg; 1500 mq->mq_msgsize = default_msgsize; 1501 } 1502 mtx_init(&mq->mq_mutex, "mqueue", NULL, MTX_DEF); 1503 knlist_init(&mq->mq_rsel.si_note, &mq->mq_mutex, NULL, NULL, NULL); 1504 knlist_init(&mq->mq_wsel.si_note, &mq->mq_mutex, NULL, NULL, NULL); 1505 atomic_add_int(&curmq, 1); 1506 return (mq); 1507 } 1508 1509 /* 1510 * Destroy a message queue 1511 */ 1512 static void 1513 mqueue_free(struct mqueue *mq) 1514 { 1515 struct mqueue_msg *msg; 1516 1517 while ((msg = TAILQ_FIRST(&mq->mq_msgq)) != NULL) { 1518 TAILQ_REMOVE(&mq->mq_msgq, msg, msg_link); 1519 FREE(msg, M_MQUEUEDATA); 1520 } 1521 1522 mtx_destroy(&mq->mq_mutex); 1523 knlist_destroy(&mq->mq_rsel.si_note); 1524 knlist_destroy(&mq->mq_wsel.si_note); 1525 uma_zfree(mqueue_zone, mq); 1526 atomic_add_int(&curmq, -1); 1527 } 1528 1529 /* 1530 * Load a message from user space 1531 */ 1532 static struct mqueue_msg * 1533 mqueue_loadmsg(const char *msg_ptr, size_t msg_size, int msg_prio) 1534 { 1535 struct mqueue_msg *msg; 1536 size_t len; 1537 int error; 1538 1539 len = sizeof(struct mqueue_msg) + msg_size; 1540 MALLOC(msg, struct mqueue_msg *, len, M_MQUEUEDATA, M_WAITOK); 1541 error = copyin(msg_ptr, ((char *)msg) + sizeof(struct mqueue_msg), 1542 msg_size); 1543 if (error) { 1544 FREE(msg, M_MQUEUEDATA); 1545 msg = NULL; 1546 } else { 1547 msg->msg_size = msg_size; 1548 msg->msg_prio = msg_prio; 1549 } 1550 return (msg); 1551 } 1552 1553 /* 1554 * Save a message to user space 1555 */ 1556 static int 1557 mqueue_savemsg(struct mqueue_msg *msg, char *msg_ptr, int *msg_prio) 1558 { 1559 int error; 1560 1561 error = copyout(((char *)msg) + sizeof(*msg), msg_ptr, 1562 msg->msg_size); 1563 if (error == 0 && msg_prio != NULL) 1564 error = copyout(&msg->msg_prio, msg_prio, sizeof(int)); 1565 return (error); 1566 } 1567 1568 /* 1569 * Free a message's memory 1570 */ 1571 static __inline void 1572 mqueue_freemsg(struct mqueue_msg *msg) 1573 { 1574 FREE(msg, M_MQUEUEDATA); 1575 } 1576 1577 /* 1578 * Send a message. if waitok is false, thread will not be 1579 * blocked if there is no data in queue, otherwise, absolute 1580 * time will be checked. 1581 */ 1582 int 1583 mqueue_send(struct mqueue *mq, const char *msg_ptr, 1584 size_t msg_len, unsigned msg_prio, int waitok, 1585 const struct timespec *abs_timeout) 1586 { 1587 struct mqueue_msg *msg; 1588 struct timespec ets, ts, ts2; 1589 struct timeval tv; 1590 int error; 1591 1592 if (msg_prio >= MQ_PRIO_MAX) 1593 return (EINVAL); 1594 if (msg_len > mq->mq_msgsize) 1595 return (EMSGSIZE); 1596 msg = mqueue_loadmsg(msg_ptr, msg_len, msg_prio); 1597 if (msg == NULL) 1598 return (EFAULT); 1599 1600 /* O_NONBLOCK case */ 1601 if (!waitok) { 1602 error = _mqueue_send(mq, msg, -1); 1603 if (error) 1604 goto bad; 1605 return (0); 1606 } 1607 1608 /* we allow a null timeout (wait forever) */ 1609 if (abs_timeout == NULL) { 1610 error = _mqueue_send(mq, msg, 0); 1611 if (error) 1612 goto bad; 1613 return (0); 1614 } 1615 1616 /* send it before checking time */ 1617 error = _mqueue_send(mq, msg, -1); 1618 if (error == 0) 1619 return (0); 1620 1621 if (error != EAGAIN) 1622 goto bad; 1623 1624 error = copyin(abs_timeout, &ets, sizeof(ets)); 1625 if (error != 0) 1626 goto bad; 1627 if (ets.tv_nsec >= 1000000000 || ets.tv_nsec < 0) { 1628 error = EINVAL; 1629 goto bad; 1630 } 1631 for (;;) { 1632 ts2 = ets; 1633 getnanotime(&ts); 1634 timespecsub(&ts2, &ts); 1635 if (ts2.tv_sec < 0 || (ts2.tv_sec == 0 && ts2.tv_nsec <= 0)) { 1636 error = ETIMEDOUT; 1637 break; 1638 } 1639 TIMESPEC_TO_TIMEVAL(&tv, &ts2); 1640 error = _mqueue_send(mq, msg, tvtohz(&tv)); 1641 if (error != ETIMEDOUT) 1642 break; 1643 } 1644 if (error == 0) 1645 return (0); 1646 bad: 1647 mqueue_freemsg(msg); 1648 return (error); 1649 } 1650 1651 /* 1652 * Common routine to send a message 1653 */ 1654 static int 1655 _mqueue_send(struct mqueue *mq, struct mqueue_msg *msg, int timo) 1656 { 1657 struct mqueue_msg *msg2; 1658 int error = 0; 1659 1660 mtx_lock(&mq->mq_mutex); 1661 while (mq->mq_curmsgs >= mq->mq_maxmsg && error == 0) { 1662 if (timo < 0) { 1663 mtx_unlock(&mq->mq_mutex); 1664 return (EAGAIN); 1665 } 1666 mq->mq_senders++; 1667 error = msleep(&mq->mq_senders, &mq->mq_mutex, 1668 PCATCH, "mqsend", timo); 1669 mq->mq_senders--; 1670 if (error == EAGAIN) 1671 error = ETIMEDOUT; 1672 } 1673 if (mq->mq_curmsgs >= mq->mq_maxmsg) { 1674 mtx_unlock(&mq->mq_mutex); 1675 return (error); 1676 } 1677 error = 0; 1678 if (TAILQ_EMPTY(&mq->mq_msgq)) { 1679 TAILQ_INSERT_HEAD(&mq->mq_msgq, msg, msg_link); 1680 } else { 1681 if (msg->msg_prio <= TAILQ_LAST(&mq->mq_msgq, msgq)->msg_prio) { 1682 TAILQ_INSERT_TAIL(&mq->mq_msgq, msg, msg_link); 1683 } else { 1684 TAILQ_FOREACH(msg2, &mq->mq_msgq, msg_link) { 1685 if (msg2->msg_prio < msg->msg_prio) 1686 break; 1687 } 1688 TAILQ_INSERT_BEFORE(msg2, msg, msg_link); 1689 } 1690 } 1691 mq->mq_curmsgs++; 1692 mq->mq_totalbytes += msg->msg_size; 1693 if (mq->mq_receivers) 1694 wakeup_one(&mq->mq_receivers); 1695 else if (mq->mq_notifier != NULL) 1696 mqueue_send_notification(mq); 1697 if (mq->mq_flags & MQ_RSEL) { 1698 mq->mq_flags &= ~MQ_RSEL; 1699 selwakeup(&mq->mq_rsel); 1700 } 1701 KNOTE_LOCKED(&mq->mq_rsel.si_note, 0); 1702 mtx_unlock(&mq->mq_mutex); 1703 return (0); 1704 } 1705 1706 /* 1707 * Send realtime a signal to process which registered itself 1708 * successfully by mq_notify. 1709 */ 1710 static void 1711 mqueue_send_notification(struct mqueue *mq) 1712 { 1713 struct mqueue_notifier *nt; 1714 struct proc *p; 1715 1716 mtx_assert(&mq->mq_mutex, MA_OWNED); 1717 nt = mq->mq_notifier; 1718 if (nt->nt_sigev.sigev_notify != SIGEV_NONE) { 1719 p = nt->nt_proc; 1720 PROC_LOCK(p); 1721 if (!KSI_ONQ(&nt->nt_ksi)) 1722 psignal_event(p, &nt->nt_sigev, &nt->nt_ksi); 1723 PROC_UNLOCK(p); 1724 } 1725 mq->mq_notifier = NULL; 1726 } 1727 1728 /* 1729 * Get a message. if waitok is false, thread will not be 1730 * blocked if there is no data in queue, otherwise, absolute 1731 * time will be checked. 1732 */ 1733 int 1734 mqueue_receive(struct mqueue *mq, char *msg_ptr, 1735 size_t msg_len, unsigned *msg_prio, int waitok, 1736 const struct timespec *abs_timeout) 1737 { 1738 struct mqueue_msg *msg; 1739 struct timespec ets, ts, ts2; 1740 struct timeval tv; 1741 int error; 1742 1743 if (msg_len < mq->mq_msgsize) 1744 return (EMSGSIZE); 1745 1746 /* O_NONBLOCK case */ 1747 if (!waitok) { 1748 error = _mqueue_recv(mq, &msg, -1); 1749 if (error) 1750 return (error); 1751 goto received; 1752 } 1753 1754 /* we allow a null timeout (wait forever). */ 1755 if (abs_timeout == NULL) { 1756 error = _mqueue_recv(mq, &msg, 0); 1757 if (error) 1758 return (error); 1759 goto received; 1760 } 1761 1762 /* try to get a message before checking time */ 1763 error = _mqueue_recv(mq, &msg, -1); 1764 if (error == 0) 1765 goto received; 1766 1767 if (error != EAGAIN) 1768 return (error); 1769 1770 error = copyin(abs_timeout, &ets, sizeof(ets)); 1771 if (error != 0) 1772 return (error); 1773 if (ets.tv_nsec >= 1000000000 || ets.tv_nsec < 0) { 1774 error = EINVAL; 1775 return (error); 1776 } 1777 1778 for (;;) { 1779 ts2 = ets; 1780 getnanotime(&ts); 1781 timespecsub(&ts2, &ts); 1782 if (ts2.tv_sec < 0 || (ts2.tv_sec == 0 && ts2.tv_nsec <= 0)) { 1783 error = ETIMEDOUT; 1784 return (error); 1785 } 1786 TIMESPEC_TO_TIMEVAL(&tv, &ts2); 1787 error = _mqueue_recv(mq, &msg, tvtohz(&tv)); 1788 if (error == 0) 1789 break; 1790 if (error != ETIMEDOUT) 1791 return (error); 1792 } 1793 1794 received: 1795 error = mqueue_savemsg(msg, msg_ptr, msg_prio); 1796 if (error == 0) { 1797 curthread->td_retval[0] = msg->msg_size; 1798 curthread->td_retval[1] = 0; 1799 } 1800 mqueue_freemsg(msg); 1801 return (error); 1802 } 1803 1804 /* 1805 * Common routine to receive a message 1806 */ 1807 static int 1808 _mqueue_recv(struct mqueue *mq, struct mqueue_msg **msg, int timo) 1809 { 1810 int error = 0; 1811 1812 mtx_lock(&mq->mq_mutex); 1813 while ((*msg = TAILQ_FIRST(&mq->mq_msgq)) == NULL && error == 0) { 1814 if (timo < 0) { 1815 mtx_unlock(&mq->mq_mutex); 1816 return (EAGAIN); 1817 } 1818 mq->mq_receivers++; 1819 error = msleep(&mq->mq_receivers, &mq->mq_mutex, 1820 PCATCH, "mqrecv", timo); 1821 mq->mq_receivers--; 1822 if (error == EAGAIN) 1823 error = ETIMEDOUT; 1824 } 1825 if (*msg != NULL) { 1826 error = 0; 1827 TAILQ_REMOVE(&mq->mq_msgq, *msg, msg_link); 1828 mq->mq_curmsgs--; 1829 mq->mq_totalbytes -= (*msg)->msg_size; 1830 if (mq->mq_senders) 1831 wakeup_one(&mq->mq_senders); 1832 if (mq->mq_flags & MQ_WSEL) { 1833 mq->mq_flags &= ~MQ_WSEL; 1834 selwakeup(&mq->mq_wsel); 1835 } 1836 KNOTE_LOCKED(&mq->mq_wsel.si_note, 0); 1837 } 1838 if (mq->mq_notifier != NULL && mq->mq_receivers == 0 && 1839 !TAILQ_EMPTY(&mq->mq_msgq)) { 1840 mqueue_send_notification(mq); 1841 } 1842 mtx_unlock(&mq->mq_mutex); 1843 return (error); 1844 } 1845 1846 static __inline struct mqueue_notifier * 1847 notifier_alloc(void) 1848 { 1849 return (uma_zalloc(mqnoti_zone, M_WAITOK | M_ZERO)); 1850 } 1851 1852 static __inline void 1853 notifier_free(struct mqueue_notifier *p) 1854 { 1855 uma_zfree(mqnoti_zone, p); 1856 } 1857 1858 static struct mqueue_notifier * 1859 notifier_search(struct proc *p, int fd) 1860 { 1861 struct mqueue_notifier *nt; 1862 1863 LIST_FOREACH(nt, &p->p_mqnotifier, nt_link) { 1864 if (nt->nt_ksi.ksi_mqd == fd) 1865 break; 1866 } 1867 return (nt); 1868 } 1869 1870 static __inline void 1871 notifier_insert(struct proc *p, struct mqueue_notifier *nt) 1872 { 1873 LIST_INSERT_HEAD(&p->p_mqnotifier, nt, nt_link); 1874 } 1875 1876 static __inline void 1877 notifier_delete(struct proc *p, struct mqueue_notifier *nt) 1878 { 1879 LIST_REMOVE(nt, nt_link); 1880 notifier_free(nt); 1881 } 1882 1883 static void 1884 notifier_remove(struct proc *p, struct mqueue *mq, int fd) 1885 { 1886 struct mqueue_notifier *nt; 1887 1888 mtx_assert(&mq->mq_mutex, MA_OWNED); 1889 PROC_LOCK(p); 1890 nt = notifier_search(p, fd); 1891 if (nt != NULL) { 1892 if (mq->mq_notifier == nt) 1893 mq->mq_notifier = NULL; 1894 sigqueue_take(&nt->nt_ksi); 1895 notifier_delete(p, nt); 1896 } 1897 PROC_UNLOCK(p); 1898 } 1899 1900 /* 1901 * Syscall to open a message queue. 1902 */ 1903 int 1904 kmq_open(struct thread *td, struct kmq_open_args *uap) 1905 { 1906 char path[MQFS_NAMELEN + 1]; 1907 struct mq_attr attr, *pattr; 1908 struct mqfs_node *pn; 1909 struct filedesc *fdp; 1910 struct file *fp; 1911 struct mqueue *mq; 1912 int fd, error, len, flags, cmode; 1913 1914 if ((uap->flags & O_ACCMODE) == O_ACCMODE) 1915 return (EINVAL); 1916 1917 fdp = td->td_proc->p_fd; 1918 flags = FFLAGS(uap->flags); 1919 cmode = (((uap->mode & ~fdp->fd_cmask) & ALLPERMS) & ~S_ISTXT); 1920 mq = NULL; 1921 if ((flags & O_CREAT) && (uap->attr != NULL)) { 1922 error = copyin(uap->attr, &attr, sizeof(attr)); 1923 if (error) 1924 return (error); 1925 if (attr.mq_maxmsg <= 0 || attr.mq_maxmsg > maxmsg) 1926 return (EINVAL); 1927 if (attr.mq_msgsize <= 0 || attr.mq_msgsize > maxmsgsize) 1928 return (EINVAL); 1929 pattr = &attr; 1930 } else 1931 pattr = NULL; 1932 1933 error = copyinstr(uap->path, path, MQFS_NAMELEN + 1, NULL); 1934 if (error) 1935 return (error); 1936 1937 /* 1938 * The first character of name must be a slash (/) character 1939 * and the remaining characters of name cannot include any slash 1940 * characters. 1941 */ 1942 len = strlen(path); 1943 if (len < 2 || path[0] != '/' || index(path + 1, '/') != NULL) 1944 return (EINVAL); 1945 1946 error = falloc(td, &fp, &fd); 1947 if (error) 1948 return (error); 1949 1950 sx_xlock(&mqfs_data.mi_lock); 1951 pn = mqfs_search(mqfs_data.mi_root, path + 1, len - 1); 1952 if (pn == NULL) { 1953 if (!(flags & O_CREAT)) { 1954 error = ENOENT; 1955 } else { 1956 mq = mqueue_alloc(pattr); 1957 if (mq == NULL) { 1958 error = ENFILE; 1959 } else { 1960 pn = mqfs_create_file(mqfs_data.mi_root, 1961 path + 1, len - 1, td->td_ucred, 1962 cmode); 1963 if (pn == NULL) { 1964 error = ENOSPC; 1965 mqueue_free(mq); 1966 } 1967 } 1968 } 1969 1970 if (error == 0) { 1971 pn->mn_data = mq; 1972 } 1973 } else { 1974 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL)) { 1975 error = EEXIST; 1976 } else { 1977 int acc_mode = 0; 1978 1979 if (flags & FREAD) 1980 acc_mode |= VREAD; 1981 if (flags & FWRITE) 1982 acc_mode |= VWRITE; 1983 error = vaccess(VREG, pn->mn_mode, pn->mn_uid, 1984 pn->mn_gid, acc_mode, td->td_ucred, NULL); 1985 } 1986 } 1987 1988 if (error) { 1989 sx_xunlock(&mqfs_data.mi_lock); 1990 fdclose(fdp, fp, fd, td); 1991 fdrop(fp, td); 1992 return (error); 1993 } 1994 1995 mqnode_addref(pn); 1996 sx_xunlock(&mqfs_data.mi_lock); 1997 1998 finit(fp, flags & (FREAD | FWRITE | O_NONBLOCK), DTYPE_MQUEUE, pn, 1999 &mqueueops); 2000 2001 FILEDESC_XLOCK(fdp); 2002 if (fdp->fd_ofiles[fd] == fp) 2003 fdp->fd_ofileflags[fd] |= UF_EXCLOSE; 2004 FILEDESC_XUNLOCK(fdp); 2005 td->td_retval[0] = fd; 2006 fdrop(fp, td); 2007 return (0); 2008 } 2009 2010 /* 2011 * Syscall to unlink a message queue. 2012 */ 2013 int 2014 kmq_unlink(struct thread *td, struct kmq_unlink_args *uap) 2015 { 2016 char path[MQFS_NAMELEN+1]; 2017 struct mqfs_node *pn; 2018 int error, len; 2019 2020 error = copyinstr(uap->path, path, MQFS_NAMELEN + 1, NULL); 2021 if (error) 2022 return (error); 2023 2024 len = strlen(path); 2025 if (len < 2 || path[0] != '/' || index(path + 1, '/') != NULL) 2026 return (EINVAL); 2027 2028 sx_xlock(&mqfs_data.mi_lock); 2029 pn = mqfs_search(mqfs_data.mi_root, path + 1, len - 1); 2030 if (pn != NULL) 2031 error = do_unlink(pn, td->td_ucred); 2032 else 2033 error = ENOENT; 2034 sx_xunlock(&mqfs_data.mi_lock); 2035 return (error); 2036 } 2037 2038 typedef int (*_fgetf)(struct thread *, int, struct file **); 2039 2040 /* 2041 * Get message queue by giving file slot 2042 */ 2043 static int 2044 _getmq(struct thread *td, int fd, _fgetf func, 2045 struct file **fpp, struct mqfs_node **ppn, struct mqueue **pmq) 2046 { 2047 struct mqfs_node *pn; 2048 int error; 2049 2050 error = func(td, fd, fpp); 2051 if (error) 2052 return (error); 2053 if (&mqueueops != (*fpp)->f_ops) { 2054 fdrop(*fpp, td); 2055 return (EBADF); 2056 } 2057 pn = (*fpp)->f_data; 2058 if (ppn) 2059 *ppn = pn; 2060 if (pmq) 2061 *pmq = pn->mn_data; 2062 return (0); 2063 } 2064 2065 static __inline int 2066 getmq(struct thread *td, int fd, struct file **fpp, struct mqfs_node **ppn, 2067 struct mqueue **pmq) 2068 { 2069 return _getmq(td, fd, fget, fpp, ppn, pmq); 2070 } 2071 2072 static __inline int 2073 getmq_read(struct thread *td, int fd, struct file **fpp, 2074 struct mqfs_node **ppn, struct mqueue **pmq) 2075 { 2076 return _getmq(td, fd, fget_read, fpp, ppn, pmq); 2077 } 2078 2079 static __inline int 2080 getmq_write(struct thread *td, int fd, struct file **fpp, 2081 struct mqfs_node **ppn, struct mqueue **pmq) 2082 { 2083 return _getmq(td, fd, fget_write, fpp, ppn, pmq); 2084 } 2085 2086 int 2087 kmq_setattr(struct thread *td, struct kmq_setattr_args *uap) 2088 { 2089 struct mqueue *mq; 2090 struct file *fp; 2091 struct mq_attr attr, oattr; 2092 u_int oflag, flag; 2093 int error; 2094 2095 if (uap->attr) { 2096 error = copyin(uap->attr, &attr, sizeof(attr)); 2097 if (error) 2098 return (error); 2099 if (attr.mq_flags & ~O_NONBLOCK) 2100 return (EINVAL); 2101 } 2102 error = getmq(td, uap->mqd, &fp, NULL, &mq); 2103 if (error) 2104 return (error); 2105 oattr.mq_maxmsg = mq->mq_maxmsg; 2106 oattr.mq_msgsize = mq->mq_msgsize; 2107 oattr.mq_curmsgs = mq->mq_curmsgs; 2108 if (uap->attr) { 2109 do { 2110 oflag = flag = fp->f_flag; 2111 flag &= ~O_NONBLOCK; 2112 flag |= (attr.mq_flags & O_NONBLOCK); 2113 } while (atomic_cmpset_int(&fp->f_flag, oflag, flag) == 0); 2114 } else 2115 oflag = fp->f_flag; 2116 oattr.mq_flags = (O_NONBLOCK & oflag); 2117 fdrop(fp, td); 2118 if (uap->oattr) 2119 error = copyout(&oattr, uap->oattr, sizeof(oattr)); 2120 return (error); 2121 } 2122 2123 int 2124 kmq_timedreceive(struct thread *td, struct kmq_timedreceive_args *uap) 2125 { 2126 struct mqueue *mq; 2127 struct file *fp; 2128 int error; 2129 int waitok; 2130 2131 error = getmq_read(td, uap->mqd, &fp, NULL, &mq); 2132 if (error) 2133 return (error); 2134 waitok = !(fp->f_flag & O_NONBLOCK); 2135 error = mqueue_receive(mq, uap->msg_ptr, uap->msg_len, 2136 uap->msg_prio, waitok, uap->abs_timeout); 2137 fdrop(fp, td); 2138 return (error); 2139 } 2140 2141 int 2142 kmq_timedsend(struct thread *td, struct kmq_timedsend_args *uap) 2143 { 2144 struct mqueue *mq; 2145 struct file *fp; 2146 int error, waitok; 2147 2148 error = getmq_write(td, uap->mqd, &fp, NULL, &mq); 2149 if (error) 2150 return (error); 2151 waitok = !(fp->f_flag & O_NONBLOCK); 2152 error = mqueue_send(mq, uap->msg_ptr, uap->msg_len, 2153 uap->msg_prio, waitok, uap->abs_timeout); 2154 fdrop(fp, td); 2155 return (error); 2156 } 2157 2158 int 2159 kmq_notify(struct thread *td, struct kmq_notify_args *uap) 2160 { 2161 struct sigevent ev; 2162 struct filedesc *fdp; 2163 struct proc *p; 2164 struct mqueue *mq; 2165 struct file *fp; 2166 struct mqueue_notifier *nt, *newnt = NULL; 2167 int error; 2168 2169 p = td->td_proc; 2170 fdp = td->td_proc->p_fd; 2171 if (uap->sigev) { 2172 error = copyin(uap->sigev, &ev, sizeof(ev)); 2173 if (error) 2174 return (error); 2175 if (ev.sigev_notify != SIGEV_SIGNAL && 2176 ev.sigev_notify != SIGEV_THREAD_ID && 2177 ev.sigev_notify != SIGEV_NONE) 2178 return (EINVAL); 2179 if ((ev.sigev_notify == SIGEV_SIGNAL || 2180 ev.sigev_notify == SIGEV_THREAD_ID) && 2181 !_SIG_VALID(ev.sigev_signo)) 2182 return (EINVAL); 2183 } 2184 error = getmq(td, uap->mqd, &fp, NULL, &mq); 2185 if (error) 2186 return (error); 2187 again: 2188 FILEDESC_SLOCK(fdp); 2189 if (fget_locked(fdp, uap->mqd) != fp) { 2190 FILEDESC_SUNLOCK(fdp); 2191 error = EBADF; 2192 goto out; 2193 } 2194 mtx_lock(&mq->mq_mutex); 2195 FILEDESC_SUNLOCK(fdp); 2196 if (uap->sigev != NULL) { 2197 if (mq->mq_notifier != NULL) { 2198 error = EBUSY; 2199 } else { 2200 PROC_LOCK(p); 2201 nt = notifier_search(p, uap->mqd); 2202 if (nt == NULL) { 2203 if (newnt == NULL) { 2204 PROC_UNLOCK(p); 2205 mtx_unlock(&mq->mq_mutex); 2206 newnt = notifier_alloc(); 2207 goto again; 2208 } 2209 } 2210 2211 if (nt != NULL) { 2212 sigqueue_take(&nt->nt_ksi); 2213 if (newnt != NULL) { 2214 notifier_free(newnt); 2215 newnt = NULL; 2216 } 2217 } else { 2218 nt = newnt; 2219 newnt = NULL; 2220 ksiginfo_init(&nt->nt_ksi); 2221 nt->nt_ksi.ksi_flags |= KSI_INS | KSI_EXT; 2222 nt->nt_ksi.ksi_code = SI_MESGQ; 2223 nt->nt_proc = p; 2224 nt->nt_ksi.ksi_mqd = uap->mqd; 2225 notifier_insert(p, nt); 2226 } 2227 nt->nt_sigev = ev; 2228 mq->mq_notifier = nt; 2229 PROC_UNLOCK(p); 2230 /* 2231 * if there is no receivers and message queue 2232 * is not empty, we should send notification 2233 * as soon as possible. 2234 */ 2235 if (mq->mq_receivers == 0 && 2236 !TAILQ_EMPTY(&mq->mq_msgq)) 2237 mqueue_send_notification(mq); 2238 } 2239 } else { 2240 notifier_remove(p, mq, uap->mqd); 2241 } 2242 mtx_unlock(&mq->mq_mutex); 2243 2244 out: 2245 fdrop(fp, td); 2246 if (newnt != NULL) 2247 notifier_free(newnt); 2248 return (error); 2249 } 2250 2251 static void 2252 mqueue_fdclose(struct thread *td, int fd, struct file *fp) 2253 { 2254 struct filedesc *fdp; 2255 struct mqueue *mq; 2256 2257 fdp = td->td_proc->p_fd; 2258 FILEDESC_LOCK_ASSERT(fdp); 2259 2260 if (fp->f_ops == &mqueueops) { 2261 mq = FPTOMQ(fp); 2262 mtx_lock(&mq->mq_mutex); 2263 notifier_remove(td->td_proc, mq, fd); 2264 2265 /* have to wakeup thread in same process */ 2266 if (mq->mq_flags & MQ_RSEL) { 2267 mq->mq_flags &= ~MQ_RSEL; 2268 selwakeup(&mq->mq_rsel); 2269 } 2270 if (mq->mq_flags & MQ_WSEL) { 2271 mq->mq_flags &= ~MQ_WSEL; 2272 selwakeup(&mq->mq_wsel); 2273 } 2274 mtx_unlock(&mq->mq_mutex); 2275 } 2276 } 2277 2278 static void 2279 mq_proc_exit(void *arg __unused, struct proc *p) 2280 { 2281 struct filedesc *fdp; 2282 struct file *fp; 2283 struct mqueue *mq; 2284 int i; 2285 2286 fdp = p->p_fd; 2287 FILEDESC_SLOCK(fdp); 2288 for (i = 0; i < fdp->fd_nfiles; ++i) { 2289 fp = fget_locked(fdp, i); 2290 if (fp != NULL && fp->f_ops == &mqueueops) { 2291 mq = FPTOMQ(fp); 2292 mtx_lock(&mq->mq_mutex); 2293 notifier_remove(p, FPTOMQ(fp), i); 2294 mtx_unlock(&mq->mq_mutex); 2295 } 2296 } 2297 FILEDESC_SUNLOCK(fdp); 2298 KASSERT(LIST_EMPTY(&p->p_mqnotifier), ("mq notifiers left")); 2299 } 2300 2301 static int 2302 mqf_read(struct file *fp, struct uio *uio, struct ucred *active_cred, 2303 int flags, struct thread *td) 2304 { 2305 return (EOPNOTSUPP); 2306 } 2307 2308 static int 2309 mqf_write(struct file *fp, struct uio *uio, struct ucred *active_cred, 2310 int flags, struct thread *td) 2311 { 2312 return (EOPNOTSUPP); 2313 } 2314 2315 static int 2316 mqf_truncate(struct file *fp, off_t length, struct ucred *active_cred, 2317 struct thread *td) 2318 { 2319 2320 return (EINVAL); 2321 } 2322 2323 static int 2324 mqf_ioctl(struct file *fp, u_long cmd, void *data, 2325 struct ucred *active_cred, struct thread *td) 2326 { 2327 return (ENOTTY); 2328 } 2329 2330 static int 2331 mqf_poll(struct file *fp, int events, struct ucred *active_cred, 2332 struct thread *td) 2333 { 2334 struct mqueue *mq = FPTOMQ(fp); 2335 int revents = 0; 2336 2337 mtx_lock(&mq->mq_mutex); 2338 if (events & (POLLIN | POLLRDNORM)) { 2339 if (mq->mq_curmsgs) { 2340 revents |= events & (POLLIN | POLLRDNORM); 2341 } else { 2342 mq->mq_flags |= MQ_RSEL; 2343 selrecord(td, &mq->mq_rsel); 2344 } 2345 } 2346 if (events & POLLOUT) { 2347 if (mq->mq_curmsgs < mq->mq_maxmsg) 2348 revents |= POLLOUT; 2349 else { 2350 mq->mq_flags |= MQ_WSEL; 2351 selrecord(td, &mq->mq_wsel); 2352 } 2353 } 2354 mtx_unlock(&mq->mq_mutex); 2355 return (revents); 2356 } 2357 2358 static int 2359 mqf_close(struct file *fp, struct thread *td) 2360 { 2361 struct mqfs_node *pn; 2362 2363 fp->f_ops = &badfileops; 2364 pn = fp->f_data; 2365 fp->f_data = NULL; 2366 sx_xlock(&mqfs_data.mi_lock); 2367 mqnode_release(pn); 2368 sx_xunlock(&mqfs_data.mi_lock); 2369 return (0); 2370 } 2371 2372 static int 2373 mqf_stat(struct file *fp, struct stat *st, struct ucred *active_cred, 2374 struct thread *td) 2375 { 2376 struct mqfs_node *pn = fp->f_data; 2377 2378 bzero(st, sizeof *st); 2379 st->st_atimespec = pn->mn_atime; 2380 st->st_mtimespec = pn->mn_mtime; 2381 st->st_ctimespec = pn->mn_ctime; 2382 st->st_birthtimespec = pn->mn_birth; 2383 st->st_uid = pn->mn_uid; 2384 st->st_gid = pn->mn_gid; 2385 st->st_mode = S_IFIFO | pn->mn_mode; 2386 return (0); 2387 } 2388 2389 static int 2390 mqf_kqfilter(struct file *fp, struct knote *kn) 2391 { 2392 struct mqueue *mq = FPTOMQ(fp); 2393 int error = 0; 2394 2395 if (kn->kn_filter == EVFILT_READ) { 2396 kn->kn_fop = &mq_rfiltops; 2397 knlist_add(&mq->mq_rsel.si_note, kn, 0); 2398 } else if (kn->kn_filter == EVFILT_WRITE) { 2399 kn->kn_fop = &mq_wfiltops; 2400 knlist_add(&mq->mq_wsel.si_note, kn, 0); 2401 } else 2402 error = EINVAL; 2403 return (error); 2404 } 2405 2406 static void 2407 filt_mqdetach(struct knote *kn) 2408 { 2409 struct mqueue *mq = FPTOMQ(kn->kn_fp); 2410 2411 if (kn->kn_filter == EVFILT_READ) 2412 knlist_remove(&mq->mq_rsel.si_note, kn, 0); 2413 else if (kn->kn_filter == EVFILT_WRITE) 2414 knlist_remove(&mq->mq_wsel.si_note, kn, 0); 2415 else 2416 panic("filt_mqdetach"); 2417 } 2418 2419 static int 2420 filt_mqread(struct knote *kn, long hint) 2421 { 2422 struct mqueue *mq = FPTOMQ(kn->kn_fp); 2423 2424 mtx_assert(&mq->mq_mutex, MA_OWNED); 2425 return (mq->mq_curmsgs != 0); 2426 } 2427 2428 static int 2429 filt_mqwrite(struct knote *kn, long hint) 2430 { 2431 struct mqueue *mq = FPTOMQ(kn->kn_fp); 2432 2433 mtx_assert(&mq->mq_mutex, MA_OWNED); 2434 return (mq->mq_curmsgs < mq->mq_maxmsg); 2435 } 2436 2437 static struct fileops mqueueops = { 2438 .fo_read = mqf_read, 2439 .fo_write = mqf_write, 2440 .fo_truncate = mqf_truncate, 2441 .fo_ioctl = mqf_ioctl, 2442 .fo_poll = mqf_poll, 2443 .fo_kqfilter = mqf_kqfilter, 2444 .fo_stat = mqf_stat, 2445 .fo_close = mqf_close 2446 }; 2447 2448 static struct vop_vector mqfs_vnodeops = { 2449 .vop_default = &default_vnodeops, 2450 .vop_access = mqfs_access, 2451 .vop_cachedlookup = mqfs_lookup, 2452 .vop_lookup = vfs_cache_lookup, 2453 .vop_reclaim = mqfs_reclaim, 2454 .vop_create = mqfs_create, 2455 .vop_remove = mqfs_remove, 2456 .vop_inactive = mqfs_inactive, 2457 .vop_open = mqfs_open, 2458 .vop_close = mqfs_close, 2459 .vop_getattr = mqfs_getattr, 2460 .vop_setattr = mqfs_setattr, 2461 .vop_read = mqfs_read, 2462 .vop_write = VOP_EOPNOTSUPP, 2463 .vop_readdir = mqfs_readdir, 2464 .vop_mkdir = VOP_EOPNOTSUPP, 2465 .vop_rmdir = VOP_EOPNOTSUPP 2466 }; 2467 2468 static struct vfsops mqfs_vfsops = { 2469 .vfs_init = mqfs_init, 2470 .vfs_uninit = mqfs_uninit, 2471 .vfs_mount = mqfs_mount, 2472 .vfs_unmount = mqfs_unmount, 2473 .vfs_root = mqfs_root, 2474 .vfs_statfs = mqfs_statfs, 2475 }; 2476 2477 SYSCALL_MODULE_HELPER(kmq_open); 2478 SYSCALL_MODULE_HELPER(kmq_setattr); 2479 SYSCALL_MODULE_HELPER(kmq_timedsend); 2480 SYSCALL_MODULE_HELPER(kmq_timedreceive); 2481 SYSCALL_MODULE_HELPER(kmq_notify); 2482 SYSCALL_MODULE_HELPER(kmq_unlink); 2483 2484 VFS_SET(mqfs_vfsops, mqueuefs, VFCF_SYNTHETIC); 2485 MODULE_VERSION(mqueuefs, 1); 2486