1 /*- 2 * Copyright (c) 2005 David Xu <davidxu@freebsd.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 */ 27 28 /* 29 * POSIX message queue implementation. 30 * 31 * 1) A mqueue filesystem can be mounted, each message queue appears 32 * in mounted directory, user can change queue's permission and 33 * ownership, or remove a queue. Manually creating a file in the 34 * directory causes a message queue to be created in the kernel with 35 * default message queue attributes applied and same name used, this 36 * method is not advocated since mq_open syscall allows user to specify 37 * different attributes. Also the file system can be mounted multiple 38 * times at different mount points but shows same contents. 39 * 40 * 2) Standard POSIX message queue API. The syscalls do not use vfs layer, 41 * but directly operate on internal data structure, this allows user to 42 * use the IPC facility without having to mount mqueue file system. 43 */ 44 45 #include <sys/cdefs.h> 46 __FBSDID("$FreeBSD$"); 47 48 #include <sys/param.h> 49 #include <sys/kernel.h> 50 #include <sys/systm.h> 51 #include <sys/limits.h> 52 #include <sys/buf.h> 53 #include <sys/dirent.h> 54 #include <sys/event.h> 55 #include <sys/eventhandler.h> 56 #include <sys/fcntl.h> 57 #include <sys/file.h> 58 #include <sys/filedesc.h> 59 #include <sys/limits.h> 60 #include <sys/lock.h> 61 #include <sys/malloc.h> 62 #include <sys/module.h> 63 #include <sys/mount.h> 64 #include <sys/mqueue.h> 65 #include <sys/mutex.h> 66 #include <sys/namei.h> 67 #include <sys/poll.h> 68 #include <sys/proc.h> 69 #include <sys/queue.h> 70 #include <sys/sysproto.h> 71 #include <sys/stat.h> 72 #include <sys/syscall.h> 73 #include <sys/syscallsubr.h> 74 #include <sys/sysent.h> 75 #include <sys/sx.h> 76 #include <sys/sysctl.h> 77 #include <sys/taskqueue.h> 78 #include <sys/unistd.h> 79 #include <sys/vnode.h> 80 #include <machine/atomic.h> 81 #include <posix4/posix4.h> 82 83 /* 84 * Limits and constants 85 */ 86 #define MQFS_NAMELEN NAME_MAX 87 #define MQFS_DELEN (8 + MQFS_NAMELEN) 88 89 /* node types */ 90 typedef enum { 91 mqfstype_none = 0, 92 mqfstype_root, 93 mqfstype_dir, 94 mqfstype_this, 95 mqfstype_parent, 96 mqfstype_file, 97 mqfstype_symlink, 98 } mqfs_type_t; 99 100 struct mqfs_node; 101 102 /* 103 * mqfs_info: describes a mqfs instance 104 */ 105 struct mqfs_info { 106 struct sx mi_lock; 107 struct mqfs_node *mi_root; 108 struct unrhdr *mi_unrhdr; 109 }; 110 111 struct mqfs_vdata { 112 LIST_ENTRY(mqfs_vdata) mv_link; 113 struct mqfs_node *mv_node; 114 struct vnode *mv_vnode; 115 struct task mv_task; 116 }; 117 118 /* 119 * mqfs_node: describes a node (file or directory) within a mqfs 120 */ 121 struct mqfs_node { 122 char mn_name[MQFS_NAMELEN+1]; 123 struct mqfs_info *mn_info; 124 struct mqfs_node *mn_parent; 125 LIST_HEAD(,mqfs_node) mn_children; 126 LIST_ENTRY(mqfs_node) mn_sibling; 127 LIST_HEAD(,mqfs_vdata) mn_vnodes; 128 int mn_refcount; 129 mqfs_type_t mn_type; 130 int mn_deleted; 131 u_int32_t mn_fileno; 132 void *mn_data; 133 struct timespec mn_birth; 134 struct timespec mn_ctime; 135 struct timespec mn_atime; 136 struct timespec mn_mtime; 137 uid_t mn_uid; 138 gid_t mn_gid; 139 int mn_mode; 140 }; 141 142 #define VTON(vp) (((struct mqfs_vdata *)((vp)->v_data))->mv_node) 143 #define VTOMQ(vp) ((struct mqueue *)(VTON(vp)->mn_data)) 144 #define VFSTOMQFS(m) ((struct mqfs_info *)((m)->mnt_data)) 145 #define FPTOMQ(fp) ((struct mqueue *)(((struct mqfs_node *) \ 146 (fp)->f_data)->mn_data)) 147 148 TAILQ_HEAD(msgq, mqueue_msg); 149 150 struct mqueue; 151 152 struct mqueue_notifier { 153 LIST_ENTRY(mqueue_notifier) nt_link; 154 struct sigevent nt_sigev; 155 ksiginfo_t nt_ksi; 156 struct proc *nt_proc; 157 }; 158 159 struct mqueue { 160 struct mtx mq_mutex; 161 int mq_flags; 162 long mq_maxmsg; 163 long mq_msgsize; 164 long mq_curmsgs; 165 long mq_totalbytes; 166 struct msgq mq_msgq; 167 int mq_receivers; 168 int mq_senders; 169 struct selinfo mq_rsel; 170 struct selinfo mq_wsel; 171 struct mqueue_notifier *mq_notifier; 172 }; 173 174 #define MQ_RSEL 0x01 175 #define MQ_WSEL 0x02 176 177 struct mqueue_msg { 178 TAILQ_ENTRY(mqueue_msg) msg_link; 179 unsigned int msg_prio; 180 unsigned int msg_size; 181 /* following real data... */ 182 }; 183 184 SYSCTL_NODE(_kern, OID_AUTO, mqueue, CTLFLAG_RW, 0, 185 "POSIX real time message queue"); 186 187 static int default_maxmsg = 10; 188 static int default_msgsize = 1024; 189 190 static int maxmsg = 100; 191 SYSCTL_INT(_kern_mqueue, OID_AUTO, maxmsg, CTLFLAG_RW, 192 &maxmsg, 0, "Default maximum messages in queue"); 193 static int maxmsgsize = 16384; 194 SYSCTL_INT(_kern_mqueue, OID_AUTO, maxmsgsize, CTLFLAG_RW, 195 &maxmsgsize, 0, "Default maximum message size"); 196 static int maxmq = 100; 197 SYSCTL_INT(_kern_mqueue, OID_AUTO, maxmq, CTLFLAG_RW, 198 &maxmq, 0, "maximum message queues"); 199 static int curmq = 0; 200 SYSCTL_INT(_kern_mqueue, OID_AUTO, curmq, CTLFLAG_RW, 201 &curmq, 0, "current message queue number"); 202 static int unloadable = 0; 203 static MALLOC_DEFINE(M_MQUEUEDATA, "mqdata", "mqueue data"); 204 205 static eventhandler_tag exit_tag; 206 207 /* Only one instance per-system */ 208 static struct mqfs_info mqfs_data; 209 static uma_zone_t mqnode_zone; 210 static uma_zone_t mqueue_zone; 211 static uma_zone_t mvdata_zone; 212 static uma_zone_t mqnoti_zone; 213 static struct vop_vector mqfs_vnodeops; 214 static struct fileops mqueueops; 215 216 /* 217 * Directory structure construction and manipulation 218 */ 219 #ifdef notyet 220 static struct mqfs_node *mqfs_create_dir(struct mqfs_node *parent, 221 const char *name, int namelen, struct ucred *cred, int mode); 222 static struct mqfs_node *mqfs_create_link(struct mqfs_node *parent, 223 const char *name, int namelen, struct ucred *cred, int mode); 224 #endif 225 226 static struct mqfs_node *mqfs_create_file(struct mqfs_node *parent, 227 const char *name, int namelen, struct ucred *cred, int mode); 228 static int mqfs_destroy(struct mqfs_node *mn); 229 static void mqfs_fileno_alloc(struct mqfs_info *mi, struct mqfs_node *mn); 230 static void mqfs_fileno_free(struct mqfs_info *mi, struct mqfs_node *mn); 231 static int mqfs_allocv(struct mount *mp, struct vnode **vpp, struct mqfs_node *pn); 232 233 /* 234 * Message queue construction and maniplation 235 */ 236 static struct mqueue *mqueue_alloc(const struct mq_attr *attr); 237 static void mqueue_free(struct mqueue *mq); 238 static int mqueue_send(struct mqueue *mq, const char *msg_ptr, 239 size_t msg_len, unsigned msg_prio, int waitok, 240 const struct timespec *abs_timeout); 241 static int mqueue_receive(struct mqueue *mq, char *msg_ptr, 242 size_t msg_len, unsigned *msg_prio, int waitok, 243 const struct timespec *abs_timeout); 244 static int _mqueue_send(struct mqueue *mq, struct mqueue_msg *msg, 245 int timo); 246 static int _mqueue_recv(struct mqueue *mq, struct mqueue_msg **msg, 247 int timo); 248 static void mqueue_send_notification(struct mqueue *mq); 249 static void mqueue_fdclose(struct thread *td, int fd, struct file *fp); 250 static void mq_proc_exit(void *arg, struct proc *p); 251 252 /* 253 * kqueue filters 254 */ 255 static void filt_mqdetach(struct knote *kn); 256 static int filt_mqread(struct knote *kn, long hint); 257 static int filt_mqwrite(struct knote *kn, long hint); 258 259 struct filterops mq_rfiltops = 260 { 1, NULL, filt_mqdetach, filt_mqread }; 261 struct filterops mq_wfiltops = 262 { 1, NULL, filt_mqdetach, filt_mqwrite }; 263 264 /* 265 * Initialize fileno bitmap 266 */ 267 static void 268 mqfs_fileno_init(struct mqfs_info *mi) 269 { 270 struct unrhdr *up; 271 272 up = new_unrhdr(1, INT_MAX, NULL); 273 mi->mi_unrhdr = up; 274 } 275 276 /* 277 * Tear down fileno bitmap 278 */ 279 static void 280 mqfs_fileno_uninit(struct mqfs_info *mi) 281 { 282 struct unrhdr *up; 283 284 up = mi->mi_unrhdr; 285 mi->mi_unrhdr = NULL; 286 delete_unrhdr(up); 287 } 288 289 /* 290 * Allocate a file number 291 */ 292 static void 293 mqfs_fileno_alloc(struct mqfs_info *mi, struct mqfs_node *mn) 294 { 295 /* make sure our parent has a file number */ 296 if (mn->mn_parent && !mn->mn_parent->mn_fileno) 297 mqfs_fileno_alloc(mi, mn->mn_parent); 298 299 switch (mn->mn_type) { 300 case mqfstype_root: 301 case mqfstype_dir: 302 case mqfstype_file: 303 case mqfstype_symlink: 304 mn->mn_fileno = alloc_unr(mi->mi_unrhdr); 305 break; 306 case mqfstype_this: 307 KASSERT(mn->mn_parent != NULL, 308 ("mqfstype_this node has no parent")); 309 mn->mn_fileno = mn->mn_parent->mn_fileno; 310 break; 311 case mqfstype_parent: 312 KASSERT(mn->mn_parent != NULL, 313 ("mqfstype_parent node has no parent")); 314 if (mn->mn_parent == mi->mi_root) { 315 mn->mn_fileno = mn->mn_parent->mn_fileno; 316 break; 317 } 318 KASSERT(mn->mn_parent->mn_parent != NULL, 319 ("mqfstype_parent node has no grandparent")); 320 mn->mn_fileno = mn->mn_parent->mn_parent->mn_fileno; 321 break; 322 default: 323 KASSERT(0, 324 ("mqfs_fileno_alloc() called for unknown type node: %d", 325 mn->mn_type)); 326 break; 327 } 328 } 329 330 /* 331 * Release a file number 332 */ 333 static void 334 mqfs_fileno_free(struct mqfs_info *mi, struct mqfs_node *mn) 335 { 336 switch (mn->mn_type) { 337 case mqfstype_root: 338 case mqfstype_dir: 339 case mqfstype_file: 340 case mqfstype_symlink: 341 free_unr(mi->mi_unrhdr, mn->mn_fileno); 342 break; 343 case mqfstype_this: 344 case mqfstype_parent: 345 /* ignore these, as they don't "own" their file number */ 346 break; 347 default: 348 KASSERT(0, 349 ("mqfs_fileno_free() called for unknown type node: %d", 350 mn->mn_type)); 351 break; 352 } 353 } 354 355 static __inline struct mqfs_node * 356 mqnode_alloc(void) 357 { 358 return uma_zalloc(mqnode_zone, M_WAITOK | M_ZERO); 359 } 360 361 static __inline void 362 mqnode_free(struct mqfs_node *node) 363 { 364 uma_zfree(mqnode_zone, node); 365 } 366 367 static __inline void 368 mqnode_addref(struct mqfs_node *node) 369 { 370 atomic_fetchadd_int(&node->mn_refcount, 1); 371 } 372 373 static __inline void 374 mqnode_release(struct mqfs_node *node) 375 { 376 int old, exp; 377 378 old = atomic_fetchadd_int(&node->mn_refcount, -1); 379 if (node->mn_type == mqfstype_dir || 380 node->mn_type == mqfstype_root) 381 exp = 3; /* include . and .. */ 382 else 383 exp = 1; 384 if (old == exp) 385 mqfs_destroy(node); 386 } 387 388 /* 389 * Add a node to a directory 390 */ 391 static int 392 mqfs_add_node(struct mqfs_node *parent, struct mqfs_node *node) 393 { 394 KASSERT(parent != NULL, ("%s(): parent is NULL", __func__)); 395 KASSERT(parent->mn_info != NULL, 396 ("%s(): parent has no mn_info", __func__)); 397 KASSERT(parent->mn_type == mqfstype_dir || 398 parent->mn_type == mqfstype_root, 399 ("%s(): parent is not a directory", __func__)); 400 401 node->mn_info = parent->mn_info; 402 node->mn_parent = parent; 403 LIST_INIT(&node->mn_children); 404 LIST_INIT(&node->mn_vnodes); 405 LIST_INSERT_HEAD(&parent->mn_children, node, mn_sibling); 406 mqnode_addref(parent); 407 return (0); 408 } 409 410 static struct mqfs_node * 411 mqfs_create_node(const char *name, int namelen, struct ucred *cred, int mode, 412 int nodetype) 413 { 414 struct mqfs_node *node; 415 416 node = mqnode_alloc(); 417 strncpy(node->mn_name, name, namelen); 418 node->mn_type = nodetype; 419 node->mn_refcount = 1; 420 getnanotime(&node->mn_birth); 421 node->mn_ctime = node->mn_atime = node->mn_mtime 422 = node->mn_birth; 423 node->mn_uid = cred->cr_uid; 424 node->mn_gid = cred->cr_gid; 425 node->mn_mode = mode; 426 return (node); 427 } 428 429 /* 430 * Create a file 431 */ 432 static struct mqfs_node * 433 mqfs_create_file(struct mqfs_node *parent, const char *name, int namelen, 434 struct ucred *cred, int mode) 435 { 436 struct mqfs_node *node; 437 438 node = mqfs_create_node(name, namelen, cred, mode, mqfstype_file); 439 if (mqfs_add_node(parent, node) != 0) { 440 mqnode_free(node); 441 return (NULL); 442 } 443 return (node); 444 } 445 446 /* 447 * Add . and .. to a directory 448 */ 449 static int 450 mqfs_fixup_dir(struct mqfs_node *parent) 451 { 452 struct mqfs_node *dir; 453 454 dir = mqnode_alloc(); 455 dir->mn_name[0] = '.'; 456 dir->mn_type = mqfstype_this; 457 dir->mn_refcount = 1; 458 if (mqfs_add_node(parent, dir) != 0) { 459 mqnode_free(dir); 460 return (-1); 461 } 462 463 dir = mqnode_alloc(); 464 dir->mn_name[0] = dir->mn_name[1] = '.'; 465 dir->mn_type = mqfstype_parent; 466 dir->mn_refcount = 1; 467 468 if (mqfs_add_node(parent, dir) != 0) { 469 mqnode_free(dir); 470 return (-1); 471 } 472 473 return (0); 474 } 475 476 #ifdef notyet 477 478 /* 479 * Create a directory 480 */ 481 static struct mqfs_node * 482 mqfs_create_dir(struct mqfs_node *parent, const char *name, int namelen, 483 struct ucred *cred, int mode) 484 { 485 struct mqfs_node *node; 486 487 node = mqfs_create_node(name, namelen, cred, mode, mqfstype_dir); 488 if (mqfs_add_node(parent, node) != 0) { 489 mqnode_free(node); 490 return (NULL); 491 } 492 493 if (mqfs_fixup_dir(node) != 0) { 494 mqfs_destroy(node); 495 return (NULL); 496 } 497 return (node); 498 } 499 500 /* 501 * Create a symlink 502 */ 503 static struct mqfs_node * 504 mqfs_create_link(struct mqfs_node *parent, const char *name, int namelen, 505 struct ucred *cred, int mode) 506 { 507 struct mqfs_node *node; 508 509 node = mqfs_create_node(name, namelen, cred, mode, mqfstype_symlink); 510 if (mqfs_add_node(parent, node) != 0) { 511 mqnode_free(node); 512 return (NULL); 513 } 514 return (node); 515 } 516 517 #endif 518 519 /* 520 * Destroy a node or a tree of nodes 521 */ 522 static int 523 mqfs_destroy(struct mqfs_node *node) 524 { 525 struct mqfs_node *parent; 526 527 KASSERT(node != NULL, 528 ("%s(): node is NULL", __func__)); 529 KASSERT(node->mn_info != NULL, 530 ("%s(): node has no mn_info", __func__)); 531 532 /* destroy children */ 533 if (node->mn_type == mqfstype_dir || node->mn_type == mqfstype_root) 534 while (! LIST_EMPTY(&node->mn_children)) 535 mqfs_destroy(LIST_FIRST(&node->mn_children)); 536 537 /* unlink from parent */ 538 if ((parent = node->mn_parent) != NULL) { 539 KASSERT(parent->mn_info == node->mn_info, 540 ("%s(): parent has different mn_info", __func__)); 541 LIST_REMOVE(node, mn_sibling); 542 } 543 544 if (node->mn_fileno != 0) 545 mqfs_fileno_free(node->mn_info, node); 546 if (node->mn_data != NULL) 547 mqueue_free(node->mn_data); 548 mqnode_free(node); 549 return (0); 550 } 551 552 /* 553 * Mount a mqfs instance 554 */ 555 static int 556 mqfs_mount(struct mount *mp, struct thread *td) 557 { 558 struct statfs *sbp; 559 560 if (mp->mnt_flag & MNT_UPDATE) 561 return (EOPNOTSUPP); 562 563 mp->mnt_data = &mqfs_data; 564 mp->mnt_flag |= MNT_LOCAL; 565 mp->mnt_kern_flag |= MNTK_MPSAFE; 566 vfs_getnewfsid(mp); 567 568 sbp = &mp->mnt_stat; 569 vfs_mountedfrom(mp, "mqueue"); 570 sbp->f_bsize = PAGE_SIZE; 571 sbp->f_iosize = PAGE_SIZE; 572 sbp->f_blocks = 1; 573 sbp->f_bfree = 0; 574 sbp->f_bavail = 0; 575 sbp->f_files = 1; 576 sbp->f_ffree = 0; 577 return (0); 578 } 579 580 /* 581 * Unmount a mqfs instance 582 */ 583 static int 584 mqfs_unmount(struct mount *mp, int mntflags, struct thread *td) 585 { 586 int error; 587 588 error = vflush(mp, 0, (mntflags & MNT_FORCE) ? FORCECLOSE : 0, td); 589 return (error); 590 } 591 592 /* 593 * Return a root vnode 594 */ 595 static int 596 mqfs_root(struct mount *mp, int flags, struct vnode **vpp, struct thread *td) 597 { 598 struct mqfs_info *mqfs; 599 int ret; 600 601 mqfs = VFSTOMQFS(mp); 602 sx_xlock(&mqfs->mi_lock); 603 ret = mqfs_allocv(mp, vpp, mqfs->mi_root); 604 sx_xunlock(&mqfs->mi_lock); 605 return (ret); 606 } 607 608 /* 609 * Return filesystem stats 610 */ 611 static int 612 mqfs_statfs(struct mount *mp, struct statfs *sbp, struct thread *td) 613 { 614 /* XXX update statistics */ 615 return (0); 616 } 617 618 /* 619 * Initialize a mqfs instance 620 */ 621 static int 622 mqfs_init(struct vfsconf *vfc) 623 { 624 struct mqfs_node *root; 625 struct mqfs_info *mi; 626 627 mqnode_zone = uma_zcreate("mqnode", sizeof(struct mqfs_node), 628 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 629 mqueue_zone = uma_zcreate("mqueue", sizeof(struct mqueue), 630 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 631 mvdata_zone = uma_zcreate("mvdata", 632 sizeof(struct mqfs_vdata), NULL, NULL, NULL, 633 NULL, UMA_ALIGN_PTR, 0); 634 mqnoti_zone = uma_zcreate("mqnotifier", sizeof(struct mqueue_notifier), 635 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 636 mi = &mqfs_data; 637 sx_init(&mi->mi_lock, "mqfs lock"); 638 /* set up the root diretory */ 639 root = mqfs_create_node("/", 1, curthread->td_ucred, 01777, 640 mqfstype_root); 641 root->mn_info = mi; 642 LIST_INIT(&root->mn_children); 643 LIST_INIT(&root->mn_vnodes); 644 mi->mi_root = root; 645 mqfs_fileno_init(mi); 646 mqfs_fileno_alloc(mi, root); 647 mqfs_fixup_dir(root); 648 exit_tag = EVENTHANDLER_REGISTER(process_exit, mq_proc_exit, NULL, 649 EVENTHANDLER_PRI_ANY); 650 mq_fdclose = mqueue_fdclose; 651 p31b_setcfg(CTL_P1003_1B_MESSAGE_PASSING, _POSIX_MESSAGE_PASSING); 652 return (0); 653 } 654 655 /* 656 * Destroy a mqfs instance 657 */ 658 static int 659 mqfs_uninit(struct vfsconf *vfc) 660 { 661 struct mqfs_info *mi; 662 663 if (!unloadable) 664 return (EOPNOTSUPP); 665 EVENTHANDLER_DEREGISTER(process_exit, exit_tag); 666 mi = &mqfs_data; 667 mqfs_destroy(mi->mi_root); 668 mi->mi_root = NULL; 669 mqfs_fileno_uninit(mi); 670 sx_destroy(&mi->mi_lock); 671 uma_zdestroy(mqnode_zone); 672 uma_zdestroy(mqueue_zone); 673 uma_zdestroy(mvdata_zone); 674 uma_zdestroy(mqnoti_zone); 675 return (0); 676 } 677 678 /* 679 * task routine 680 */ 681 static void 682 do_recycle(void *context, int pending __unused) 683 { 684 struct vnode *vp = (struct vnode *)context; 685 686 vrecycle(vp, curthread); 687 vdrop(vp); 688 } 689 690 /* 691 * Allocate a vnode 692 */ 693 static int 694 mqfs_allocv(struct mount *mp, struct vnode **vpp, struct mqfs_node *pn) 695 { 696 struct mqfs_vdata *vd; 697 int error; 698 699 LIST_FOREACH(vd, &pn->mn_vnodes, mv_link) { 700 if (vd->mv_vnode->v_mount == mp) 701 break; 702 } 703 704 if (vd != NULL) { 705 if (vget(vd->mv_vnode, 0, curthread) == 0) { 706 *vpp = vd->mv_vnode; 707 vn_lock(*vpp, LK_RETRY | LK_EXCLUSIVE, 708 curthread); 709 return (0); 710 } 711 /* XXX if this can happen, we're in trouble */ 712 } 713 714 error = getnewvnode("mqueue", mp, &mqfs_vnodeops, vpp); 715 if (error) 716 return (error); 717 vd = uma_zalloc(mvdata_zone, M_WAITOK); 718 (*vpp)->v_data = vd; 719 vd->mv_vnode = *vpp; 720 vd->mv_node = pn; 721 TASK_INIT(&vd->mv_task, 0, do_recycle, *vpp); 722 LIST_INSERT_HEAD(&pn->mn_vnodes, vd, mv_link); 723 mqnode_addref(pn); 724 switch (pn->mn_type) { 725 case mqfstype_root: 726 (*vpp)->v_vflag = VV_ROOT; 727 /* fall through */ 728 case mqfstype_dir: 729 case mqfstype_this: 730 case mqfstype_parent: 731 (*vpp)->v_type = VDIR; 732 break; 733 case mqfstype_file: 734 (*vpp)->v_type = VREG; 735 break; 736 case mqfstype_symlink: 737 (*vpp)->v_type = VLNK; 738 break; 739 case mqfstype_none: 740 KASSERT(0, ("mqfs_allocf called for null node\n")); 741 default: 742 panic("%s has unexpected type: %d", pn->mn_name, pn->mn_type); 743 } 744 vn_lock(*vpp, LK_RETRY | LK_EXCLUSIVE, curthread); 745 return (0); 746 } 747 748 /* 749 * Search a directory entry 750 */ 751 static struct mqfs_node * 752 mqfs_search(struct mqfs_node *pd, const char *name, int len) 753 { 754 struct mqfs_node *pn; 755 756 LIST_FOREACH(pn, &pd->mn_children, mn_sibling) { 757 if (strncmp(pn->mn_name, name, len) == 0) 758 return (pn); 759 } 760 return (NULL); 761 } 762 763 /* 764 * Look up a file or directory 765 */ 766 static int 767 mqfs_lookupx(struct vop_cachedlookup_args *ap) 768 { 769 struct componentname *cnp; 770 struct vnode *dvp, **vpp; 771 struct mqfs_node *pd; 772 struct mqfs_node *pn; 773 int nameiop, flags, error, namelen; 774 char *pname; 775 struct thread *td; 776 777 cnp = ap->a_cnp; 778 vpp = ap->a_vpp; 779 dvp = ap->a_dvp; 780 pname = cnp->cn_nameptr; 781 namelen = cnp->cn_namelen; 782 td = cnp->cn_thread; 783 flags = cnp->cn_flags; 784 nameiop = cnp->cn_nameiop; 785 pd = VTON(dvp); 786 pn = NULL; 787 *vpp = NULLVP; 788 789 if (dvp->v_type != VDIR) 790 return (ENOTDIR); 791 792 error = VOP_ACCESS(dvp, VEXEC, cnp->cn_cred, cnp->cn_thread); 793 if (error) 794 return (error); 795 796 /* shortcut: check if the name is too long */ 797 if (cnp->cn_namelen >= MQFS_NAMELEN) 798 return (ENOENT); 799 800 /* self */ 801 if (namelen == 1 && pname[0] == '.') { 802 if ((flags & ISLASTCN) && nameiop != LOOKUP) 803 return (EINVAL); 804 pn = pd; 805 *vpp = dvp; 806 VREF(dvp); 807 return (0); 808 } 809 810 /* parent */ 811 if (cnp->cn_flags & ISDOTDOT) { 812 if (dvp->v_vflag & VV_ROOT) 813 return (EIO); 814 if ((flags & ISLASTCN) && nameiop != LOOKUP) 815 return (EINVAL); 816 VOP_UNLOCK(dvp, 0, cnp->cn_thread); 817 KASSERT(pd->mn_parent, ("non-root directory has no parent")); 818 pn = pd->mn_parent; 819 error = mqfs_allocv(dvp->v_mount, vpp, pn); 820 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY, td); 821 return (error); 822 } 823 824 /* named node */ 825 pn = mqfs_search(pd, pname, namelen); 826 827 /* found */ 828 if (pn != NULL) { 829 /* DELETE */ 830 if (nameiop == DELETE && (flags & ISLASTCN)) { 831 error = VOP_ACCESS(dvp, VWRITE, cnp->cn_cred, td); 832 if (error) 833 return (error); 834 if (*vpp == dvp) { 835 VREF(dvp); 836 *vpp = dvp; 837 return (0); 838 } 839 } 840 841 /* allocate vnode */ 842 error = mqfs_allocv(dvp->v_mount, vpp, pn); 843 if (error == 0 && cnp->cn_flags & MAKEENTRY) 844 cache_enter(dvp, *vpp, cnp); 845 return (error); 846 } 847 848 /* not found */ 849 850 /* will create a new entry in the directory ? */ 851 if ((nameiop == CREATE || nameiop == RENAME) && (flags & LOCKPARENT) 852 && (flags & ISLASTCN)) { 853 error = VOP_ACCESS(dvp, VWRITE, cnp->cn_cred, td); 854 if (error) 855 return (error); 856 cnp->cn_flags |= SAVENAME; 857 return (EJUSTRETURN); 858 } 859 return (ENOENT); 860 } 861 862 #if 0 863 struct vop_lookup_args { 864 struct vop_generic_args a_gen; 865 struct vnode *a_dvp; 866 struct vnode **a_vpp; 867 struct componentname *a_cnp; 868 }; 869 #endif 870 871 /* 872 * vnode lookup operation 873 */ 874 static int 875 mqfs_lookup(struct vop_cachedlookup_args *ap) 876 { 877 struct mqfs_info *mqfs = VFSTOMQFS(ap->a_dvp->v_mount); 878 int rc; 879 880 sx_xlock(&mqfs->mi_lock); 881 rc = mqfs_lookupx(ap); 882 sx_xunlock(&mqfs->mi_lock); 883 return (rc); 884 } 885 886 #if 0 887 struct vop_create_args { 888 struct vnode *a_dvp; 889 struct vnode **a_vpp; 890 struct componentname *a_cnp; 891 struct vattr *a_vap; 892 }; 893 #endif 894 895 /* 896 * vnode creation operation 897 */ 898 static int 899 mqfs_create(struct vop_create_args *ap) 900 { 901 struct mqfs_info *mqfs = VFSTOMQFS(ap->a_dvp->v_mount); 902 struct componentname *cnp = ap->a_cnp; 903 struct mqfs_node *pd; 904 struct mqfs_node *pn; 905 struct mqueue *mq; 906 int error; 907 908 pd = VTON(ap->a_dvp); 909 if (pd->mn_type != mqfstype_root && pd->mn_type != mqfstype_dir) 910 return (ENOTDIR); 911 mq = mqueue_alloc(NULL); 912 if (mq == NULL) 913 return (EAGAIN); 914 sx_xlock(&mqfs->mi_lock); 915 #if 0 916 /* named node */ 917 pn = mqfs_search(pd, cnp->cn_nameptr, cnp->cn_namelen); 918 if (pn != NULL) { 919 mqueue_free(mq); 920 sx_xunlock(&mqfs->mi_lock); 921 return (EEXIST); 922 } 923 #else 924 if ((cnp->cn_flags & HASBUF) == 0) 925 panic("%s: no name", __func__); 926 #endif 927 pn = mqfs_create_file(pd, cnp->cn_nameptr, cnp->cn_namelen, 928 cnp->cn_cred, ap->a_vap->va_mode); 929 if (pn == NULL) 930 error = ENOSPC; 931 else { 932 error = mqfs_allocv(ap->a_dvp->v_mount, ap->a_vpp, pn); 933 if (error) 934 mqfs_destroy(pn); 935 else 936 pn->mn_data = mq; 937 } 938 sx_xunlock(&mqfs->mi_lock); 939 if (error) 940 mqueue_free(mq); 941 return (error); 942 } 943 944 /* 945 * Remove an entry 946 */ 947 static 948 int do_unlink(struct mqfs_node *pn, struct ucred *ucred) 949 { 950 struct mqfs_node *parent; 951 struct mqfs_vdata *vd; 952 int error = 0; 953 954 sx_assert(&pn->mn_info->mi_lock, SX_LOCKED); 955 956 if (ucred->cr_uid != pn->mn_uid && 957 (error = suser_cred(ucred, 0)) != 0) 958 error = EACCES; 959 else if (!pn->mn_deleted) { 960 parent = pn->mn_parent; 961 pn->mn_parent = NULL; 962 pn->mn_deleted = 1; 963 LIST_REMOVE(pn, mn_sibling); 964 LIST_FOREACH(vd, &pn->mn_vnodes, mv_link) { 965 cache_purge(vd->mv_vnode); 966 vhold(vd->mv_vnode); 967 taskqueue_enqueue(taskqueue_thread, &vd->mv_task); 968 } 969 mqnode_release(pn); 970 mqnode_release(parent); 971 } else 972 error = ENOENT; 973 return (error); 974 } 975 976 #if 0 977 struct vop_remove_args { 978 struct vnode *a_dvp; 979 struct vnode *a_vp; 980 struct componentname *a_cnp; 981 }; 982 #endif 983 984 /* 985 * vnode removal operation 986 */ 987 static int 988 mqfs_remove(struct vop_remove_args *ap) 989 { 990 struct mqfs_info *mqfs = VFSTOMQFS(ap->a_dvp->v_mount); 991 struct mqfs_node *pn; 992 int error; 993 994 if (ap->a_vp->v_type == VDIR) 995 return (EPERM); 996 pn = VTON(ap->a_vp); 997 sx_xlock(&mqfs->mi_lock); 998 error = do_unlink(pn, ap->a_cnp->cn_cred); 999 sx_xunlock(&mqfs->mi_lock); 1000 return (error); 1001 } 1002 1003 #if 0 1004 struct vop_inactive_args { 1005 struct vnode *a_vp; 1006 struct thread *a_td; 1007 }; 1008 #endif 1009 1010 static int 1011 mqfs_inactive(struct vop_inactive_args *ap) 1012 { 1013 struct mqfs_node *pn = VTON(ap->a_vp); 1014 1015 if (pn->mn_deleted) 1016 vrecycle(ap->a_vp, ap->a_td); 1017 return (0); 1018 } 1019 1020 #if 0 1021 struct vop_reclaim_args { 1022 struct vop_generic_args a_gen; 1023 struct vnode *a_vp; 1024 struct thread *a_td; 1025 }; 1026 #endif 1027 1028 static int 1029 mqfs_reclaim(struct vop_reclaim_args *ap) 1030 { 1031 struct mqfs_info *mqfs = VFSTOMQFS(ap->a_vp->v_mount); 1032 struct vnode *vp = ap->a_vp; 1033 struct mqfs_node *pn; 1034 struct mqfs_vdata *vd; 1035 1036 vd = vp->v_data; 1037 pn = vd->mv_node; 1038 sx_xlock(&mqfs->mi_lock); 1039 vp->v_data = NULL; 1040 LIST_REMOVE(vd, mv_link); 1041 uma_zfree(mvdata_zone, vd); 1042 mqnode_release(pn); 1043 sx_xunlock(&mqfs->mi_lock); 1044 return (0); 1045 } 1046 1047 #if 0 1048 struct vop_open_args { 1049 struct vop_generic_args a_gen; 1050 struct vnode *a_vp; 1051 int a_mode; 1052 struct ucred *a_cred; 1053 struct thread *a_td; 1054 int a_fdidx; 1055 }; 1056 #endif 1057 1058 static int 1059 mqfs_open(struct vop_open_args *ap) 1060 { 1061 return (0); 1062 } 1063 1064 #if 0 1065 struct vop_close_args { 1066 struct vop_generic_args a_gen; 1067 struct vnode *a_vp; 1068 int a_fflag; 1069 struct ucred *a_cred; 1070 struct thread *a_td; 1071 }; 1072 #endif 1073 1074 static int 1075 mqfs_close(struct vop_close_args *ap) 1076 { 1077 return (0); 1078 } 1079 1080 #if 0 1081 struct vop_access_args { 1082 struct vop_generic_args a_gen; 1083 struct vnode *a_vp; 1084 int a_mode; 1085 struct ucred *a_cred; 1086 struct thread *a_td; 1087 }; 1088 #endif 1089 1090 /* 1091 * Verify permissions 1092 */ 1093 static int 1094 mqfs_access(struct vop_access_args *ap) 1095 { 1096 struct vnode *vp = ap->a_vp; 1097 struct vattr vattr; 1098 int error; 1099 1100 error = VOP_GETATTR(vp, &vattr, ap->a_cred, ap->a_td); 1101 if (error) 1102 return (error); 1103 error = vaccess(vp->v_type, vattr.va_mode, vattr.va_uid, 1104 vattr.va_gid, ap->a_mode, ap->a_cred, NULL); 1105 return (error); 1106 } 1107 1108 #if 0 1109 struct vop_getattr_args { 1110 struct vop_generic_args a_gen; 1111 struct vnode *a_vp; 1112 struct vattr *a_vap; 1113 struct ucred *a_cred; 1114 struct thread *a_td; 1115 }; 1116 #endif 1117 1118 /* 1119 * Get file attributes 1120 */ 1121 static int 1122 mqfs_getattr(struct vop_getattr_args *ap) 1123 { 1124 struct vnode *vp = ap->a_vp; 1125 struct mqfs_node *pn = VTON(vp); 1126 struct vattr *vap = ap->a_vap; 1127 int error = 0; 1128 1129 VATTR_NULL(vap); 1130 vap->va_type = vp->v_type; 1131 vap->va_mode = pn->mn_mode; 1132 vap->va_nlink = 1; 1133 vap->va_uid = pn->mn_uid; 1134 vap->va_gid = pn->mn_gid; 1135 vap->va_fsid = vp->v_mount->mnt_stat.f_fsid.val[0]; 1136 vap->va_fileid = pn->mn_fileno; 1137 vap->va_size = 0; 1138 vap->va_blocksize = PAGE_SIZE; 1139 vap->va_bytes = vap->va_size = 0; 1140 vap->va_atime = pn->mn_atime; 1141 vap->va_mtime = pn->mn_mtime; 1142 vap->va_ctime = pn->mn_ctime; 1143 vap->va_birthtime = pn->mn_birth; 1144 vap->va_gen = 0; 1145 vap->va_flags = 0; 1146 vap->va_rdev = 0; 1147 vap->va_bytes = 0; 1148 vap->va_filerev = 0; 1149 vap->va_vaflags = 0; 1150 return (error); 1151 } 1152 1153 #if 0 1154 struct vop_setattr_args { 1155 struct vop_generic_args a_gen; 1156 struct vnode *a_vp; 1157 struct vattr *a_vap; 1158 struct ucred *a_cred; 1159 struct thread *a_td; 1160 }; 1161 #endif 1162 /* 1163 * Set attributes 1164 */ 1165 static int 1166 mqfs_setattr(struct vop_setattr_args *ap) 1167 { 1168 struct mqfs_node *pn; 1169 struct vattr *vap; 1170 struct vnode *vp; 1171 int c, error; 1172 uid_t uid; 1173 gid_t gid; 1174 1175 vap = ap->a_vap; 1176 vp = ap->a_vp; 1177 if ((vap->va_type != VNON) || 1178 (vap->va_nlink != VNOVAL) || 1179 (vap->va_fsid != VNOVAL) || 1180 (vap->va_fileid != VNOVAL) || 1181 (vap->va_blocksize != VNOVAL) || 1182 (vap->va_flags != VNOVAL && vap->va_flags != 0) || 1183 (vap->va_rdev != VNOVAL) || 1184 ((int)vap->va_bytes != VNOVAL) || 1185 (vap->va_gen != VNOVAL)) { 1186 return (EINVAL); 1187 } 1188 1189 pn = VTON(vp); 1190 1191 error = c = 0; 1192 if (vap->va_uid == (uid_t)VNOVAL) 1193 uid = pn->mn_uid; 1194 else 1195 uid = vap->va_uid; 1196 if (vap->va_gid == (gid_t)VNOVAL) 1197 gid = pn->mn_gid; 1198 else 1199 gid = vap->va_gid; 1200 1201 if (uid != pn->mn_uid || gid != pn->mn_gid) { 1202 /* 1203 * To modify the ownership of a file, must possess VADMIN 1204 * for that file. 1205 */ 1206 if ((error = VOP_ACCESS(vp, VADMIN, ap->a_cred, ap->a_td))) 1207 return (error); 1208 if (((ap->a_cred->cr_uid != pn->mn_uid) || uid != pn->mn_uid || 1209 (gid != pn->mn_gid && !groupmember(gid, ap->a_cred))) && 1210 (error = suser_cred(ap->a_td->td_ucred, SUSER_ALLOWJAIL)) 1211 != 0) 1212 return (error); 1213 pn->mn_uid = uid; 1214 pn->mn_gid = gid; 1215 c = 1; 1216 } 1217 1218 if (vap->va_mode != (mode_t)VNOVAL) { 1219 if ((ap->a_cred->cr_uid != pn->mn_uid) && 1220 (error = suser_cred(ap->a_td->td_ucred, SUSER_ALLOWJAIL))) 1221 return (error); 1222 pn->mn_mode = vap->va_mode; 1223 c = 1; 1224 } 1225 1226 if (vap->va_atime.tv_sec != VNOVAL || vap->va_mtime.tv_sec != VNOVAL) { 1227 /* See the comment in ufs_vnops::ufs_setattr(). */ 1228 if ((error = VOP_ACCESS(vp, VADMIN, ap->a_cred, ap->a_td)) && 1229 ((vap->va_vaflags & VA_UTIMES_NULL) == 0 || 1230 (error = VOP_ACCESS(vp, VWRITE, ap->a_cred, ap->a_td)))) 1231 return (error); 1232 if (vap->va_atime.tv_sec != VNOVAL) { 1233 pn->mn_atime = vap->va_atime; 1234 } 1235 if (vap->va_mtime.tv_sec != VNOVAL) { 1236 pn->mn_mtime = vap->va_mtime; 1237 } 1238 c = 1; 1239 } 1240 if (c) { 1241 vfs_timestamp(&pn->mn_ctime); 1242 } 1243 return (0); 1244 } 1245 1246 #if 0 1247 struct vop_read_args { 1248 struct vop_generic_args a_gen; 1249 struct vnode *a_vp; 1250 struct uio *a_uio; 1251 int a_ioflag; 1252 struct ucred *a_cred; 1253 }; 1254 #endif 1255 1256 /* 1257 * Read from a file 1258 */ 1259 static int 1260 mqfs_read(struct vop_read_args *ap) 1261 { 1262 char buf[80]; 1263 struct vnode *vp = ap->a_vp; 1264 struct uio *uio = ap->a_uio; 1265 struct mqfs_node *pn; 1266 struct mqueue *mq; 1267 int len, error; 1268 1269 if (vp->v_type != VREG) 1270 return (EINVAL); 1271 1272 pn = VTON(vp); 1273 mq = VTOMQ(vp); 1274 snprintf(buf, sizeof(buf), 1275 "QSIZE:%-10ld MAXMSG:%-10ld CURMSG:%-10ld MSGSIZE:%-10ld\n", 1276 mq->mq_totalbytes, 1277 mq->mq_maxmsg, 1278 mq->mq_curmsgs, 1279 mq->mq_msgsize); 1280 buf[sizeof(buf)-1] = '\0'; 1281 len = strlen(buf); 1282 error = uiomove_frombuf(buf, len, uio); 1283 return (error); 1284 } 1285 1286 #if 0 1287 struct vop_readdir_args { 1288 struct vop_generic_args a_gen; 1289 struct vnode *a_vp; 1290 struct uio *a_uio; 1291 struct ucred *a_cred; 1292 int *a_eofflag; 1293 int *a_ncookies; 1294 u_long **a_cookies; 1295 }; 1296 #endif 1297 1298 /* 1299 * Return directory entries. 1300 */ 1301 static int 1302 mqfs_readdir(struct vop_readdir_args *ap) 1303 { 1304 struct vnode *vp; 1305 struct mqfs_info *mi; 1306 struct mqfs_node *pd; 1307 struct mqfs_node *pn; 1308 struct dirent entry; 1309 struct uio *uio; 1310 int *tmp_ncookies = NULL; 1311 off_t offset; 1312 int error, i; 1313 1314 vp = ap->a_vp; 1315 mi = VFSTOMQFS(vp->v_mount); 1316 pd = VTON(vp); 1317 uio = ap->a_uio; 1318 1319 if (vp->v_type != VDIR) 1320 return (ENOTDIR); 1321 1322 if (uio->uio_offset < 0) 1323 return (EINVAL); 1324 1325 if (ap->a_ncookies != NULL) { 1326 tmp_ncookies = ap->a_ncookies; 1327 *ap->a_ncookies = 0; 1328 ap->a_ncookies = NULL; 1329 } 1330 1331 error = 0; 1332 offset = 0; 1333 1334 sx_xlock(&mi->mi_lock); 1335 1336 LIST_FOREACH(pn, &pd->mn_children, mn_sibling) { 1337 entry.d_reclen = sizeof(entry); 1338 if (!pn->mn_fileno) 1339 mqfs_fileno_alloc(mi, pn); 1340 entry.d_fileno = pn->mn_fileno; 1341 for (i = 0; i < MQFS_NAMELEN - 1 && pn->mn_name[i] != '\0'; ++i) 1342 entry.d_name[i] = pn->mn_name[i]; 1343 entry.d_name[i] = 0; 1344 entry.d_namlen = i; 1345 switch (pn->mn_type) { 1346 case mqfstype_root: 1347 case mqfstype_dir: 1348 case mqfstype_this: 1349 case mqfstype_parent: 1350 entry.d_type = DT_DIR; 1351 break; 1352 case mqfstype_file: 1353 entry.d_type = DT_REG; 1354 break; 1355 case mqfstype_symlink: 1356 entry.d_type = DT_LNK; 1357 break; 1358 default: 1359 panic("%s has unexpected node type: %d", pn->mn_name, 1360 pn->mn_type); 1361 } 1362 if (entry.d_reclen > uio->uio_resid) 1363 break; 1364 if (offset >= uio->uio_offset) { 1365 error = vfs_read_dirent(ap, &entry, offset); 1366 if (error) 1367 break; 1368 } 1369 offset += entry.d_reclen; 1370 } 1371 sx_xunlock(&mi->mi_lock); 1372 1373 uio->uio_offset = offset; 1374 1375 if (tmp_ncookies != NULL) 1376 ap->a_ncookies = tmp_ncookies; 1377 1378 return (error); 1379 } 1380 1381 #ifdef notyet 1382 1383 #if 0 1384 struct vop_mkdir_args { 1385 struct vnode *a_dvp; 1386 struvt vnode **a_vpp; 1387 struvt componentname *a_cnp; 1388 struct vattr *a_vap; 1389 }; 1390 #endif 1391 1392 /* 1393 * Create a directory. 1394 */ 1395 static int 1396 mqfs_mkdir(struct vop_mkdir_args *ap) 1397 { 1398 struct mqfs_info *mqfs = VFSTOMQFS(ap->a_dvp->v_mount); 1399 struct componentname *cnp = ap->a_cnp; 1400 struct mqfs_node *pd = VTON(ap->a_dvp); 1401 struct mqfs_node *pn; 1402 int error; 1403 1404 if (pd->mn_type != mqfstype_root && pd->mn_type != mqfstype_dir) 1405 return (ENOTDIR); 1406 sx_xlock(&mqfs->mi_lock); 1407 #if 0 1408 /* named node */ 1409 pn = mqfs_search(pd, cnp->cn_nameptr, cnp->cn_namelen); 1410 if (pn != NULL) { 1411 sx_xunlock(&mqfs->mi_lock); 1412 return (EEXIST); 1413 } 1414 #else 1415 if ((cnp->cn_flags & HASBUF) == 0) 1416 panic("%s: no name", __func__); 1417 #endif 1418 pn = mqfs_create_dir(pd, cnp->cn_nameptr, cnp->cn_namelen, 1419 ap->a_vap->cn_cred, ap->a_vap->va_mode); 1420 if (pn == NULL) 1421 error = ENOSPC; 1422 else 1423 error = mqfs_allocv(ap->a_dvp->v_mount, ap->a_vpp, pn); 1424 sx_xunlock(&mqfs->mi_lock); 1425 return (error); 1426 } 1427 1428 #if 0 1429 struct vop_rmdir_args { 1430 struct vnode *a_dvp; 1431 struct vnode *a_vp; 1432 struct componentname *a_cnp; 1433 }; 1434 #endif 1435 1436 /* 1437 * Remove a directory. 1438 */ 1439 static int 1440 mqfs_rmdir(struct vop_rmdir_args *ap) 1441 { 1442 struct mqfs_info *mqfs = VFSTOMQFS(ap->a_dvp->v_mount); 1443 struct mqfs_node *pn = VTON(ap->a_vp); 1444 struct mqfs_node *pt; 1445 1446 if (pn->mn_type != mqfstype_dir) 1447 return (ENOTDIR); 1448 1449 sx_xlock(&mqfs->mi_lock); 1450 if (pn->mn_deleted) { 1451 sx_xunlock(&mqfs->mi_lock); 1452 return (ENOENT); 1453 } 1454 1455 pt = LIST_FIRST(&pn->mn_children); 1456 pt = LIST_NEXT(pt, mn_sibling); 1457 pt = LIST_NEXT(pt, mn_sibling); 1458 if (pt != NULL) { 1459 sx_xunlock(&mqfs->mi_lock); 1460 return (ENOTEMPTY); 1461 } 1462 pt = pn->mn_parent; 1463 pn->mn_parent = NULL; 1464 pn->mn_deleted = 1; 1465 LIST_REMOVE(pn, mn_sibling); 1466 mqnode_release(pn); 1467 mqnode_release(pt); 1468 sx_xunlock(&mqfs->mi_lock); 1469 cache_purge(ap->a_vp); 1470 return (0); 1471 } 1472 1473 #endif /* notyet */ 1474 1475 /* 1476 * Allocate a message queue 1477 */ 1478 static struct mqueue * 1479 mqueue_alloc(const struct mq_attr *attr) 1480 { 1481 struct mqueue *mq; 1482 1483 if (curmq >= maxmq) 1484 return (NULL); 1485 mq = uma_zalloc(mqueue_zone, M_WAITOK | M_ZERO); 1486 TAILQ_INIT(&mq->mq_msgq); 1487 if (attr != NULL) { 1488 mq->mq_maxmsg = attr->mq_maxmsg; 1489 mq->mq_msgsize = attr->mq_msgsize; 1490 } else { 1491 mq->mq_maxmsg = default_maxmsg; 1492 mq->mq_msgsize = default_msgsize; 1493 } 1494 mtx_init(&mq->mq_mutex, "mqueue", NULL, MTX_DEF); 1495 knlist_init(&mq->mq_rsel.si_note, &mq->mq_mutex, NULL, NULL, NULL); 1496 knlist_init(&mq->mq_wsel.si_note, &mq->mq_mutex, NULL, NULL, NULL); 1497 atomic_add_int(&curmq, 1); 1498 return (mq); 1499 } 1500 1501 /* 1502 * Destroy a message queue 1503 */ 1504 static void 1505 mqueue_free(struct mqueue *mq) 1506 { 1507 struct mqueue_msg *msg; 1508 1509 while ((msg = TAILQ_FIRST(&mq->mq_msgq)) != NULL) { 1510 TAILQ_REMOVE(&mq->mq_msgq, msg, msg_link); 1511 FREE(msg, M_MQUEUEDATA); 1512 } 1513 1514 mtx_destroy(&mq->mq_mutex); 1515 knlist_destroy(&mq->mq_rsel.si_note); 1516 knlist_destroy(&mq->mq_wsel.si_note); 1517 uma_zfree(mqueue_zone, mq); 1518 atomic_add_int(&curmq, -1); 1519 } 1520 1521 /* 1522 * Load a message from user space 1523 */ 1524 static struct mqueue_msg * 1525 mqueue_loadmsg(const char *msg_ptr, size_t msg_size, int msg_prio) 1526 { 1527 struct mqueue_msg *msg; 1528 size_t len; 1529 int error; 1530 1531 len = sizeof(struct mqueue_msg) + msg_size; 1532 MALLOC(msg, struct mqueue_msg *, len, M_MQUEUEDATA, M_WAITOK); 1533 error = copyin(msg_ptr, ((char *)msg) + sizeof(struct mqueue_msg), 1534 msg_size); 1535 if (error) { 1536 FREE(msg, M_MQUEUEDATA); 1537 msg = NULL; 1538 } else { 1539 msg->msg_size = msg_size; 1540 msg->msg_prio = msg_prio; 1541 } 1542 return (msg); 1543 } 1544 1545 /* 1546 * Save a message to user space 1547 */ 1548 static int 1549 mqueue_savemsg(struct mqueue_msg *msg, char *msg_ptr, int *msg_prio) 1550 { 1551 int error; 1552 1553 error = copyout(((char *)msg) + sizeof(*msg), msg_ptr, 1554 msg->msg_size); 1555 if (error == 0 && msg_prio != NULL) 1556 error = copyout(&msg->msg_prio, msg_prio, sizeof(int)); 1557 return (error); 1558 } 1559 1560 /* 1561 * Free a message's memory 1562 */ 1563 static __inline void 1564 mqueue_freemsg(struct mqueue_msg *msg) 1565 { 1566 FREE(msg, M_MQUEUEDATA); 1567 } 1568 1569 /* 1570 * Send a message. if waitok is false, thread will not be 1571 * blocked if there is no data in queue, otherwise, absolute 1572 * time will be checked. 1573 */ 1574 int 1575 mqueue_send(struct mqueue *mq, const char *msg_ptr, 1576 size_t msg_len, unsigned msg_prio, int waitok, 1577 const struct timespec *abs_timeout) 1578 { 1579 struct mqueue_msg *msg; 1580 struct timespec ets, ts, ts2; 1581 struct timeval tv; 1582 int error; 1583 1584 if (msg_prio >= MQ_PRIO_MAX) 1585 return (EINVAL); 1586 if (msg_len > mq->mq_msgsize) 1587 return (EMSGSIZE); 1588 msg = mqueue_loadmsg(msg_ptr, msg_len, msg_prio); 1589 if (msg == NULL) 1590 return (EFAULT); 1591 1592 /* O_NONBLOCK case */ 1593 if (!waitok) { 1594 error = _mqueue_send(mq, msg, -1); 1595 if (error) 1596 goto bad; 1597 return (0); 1598 } 1599 1600 /* we allow a null timeout (wait forever) */ 1601 if (abs_timeout == NULL) { 1602 error = _mqueue_send(mq, msg, 0); 1603 if (error) 1604 goto bad; 1605 return (0); 1606 } 1607 1608 /* send it before checking time */ 1609 error = _mqueue_send(mq, msg, -1); 1610 if (error == 0) 1611 return (0); 1612 1613 if (error != EAGAIN) 1614 goto bad; 1615 1616 error = copyin(abs_timeout, &ets, sizeof(ets)); 1617 if (error != 0) 1618 goto bad; 1619 if (ets.tv_nsec >= 1000000000 || ets.tv_nsec < 0) { 1620 error = EINVAL; 1621 goto bad; 1622 } 1623 for (;;) { 1624 ts2 = ets; 1625 getnanotime(&ts); 1626 timespecsub(&ts2, &ts); 1627 if (ts2.tv_sec < 0 || (ts2.tv_sec == 0 && ts2.tv_nsec <= 0)) { 1628 error = ETIMEDOUT; 1629 break; 1630 } 1631 TIMESPEC_TO_TIMEVAL(&tv, &ts2); 1632 error = _mqueue_send(mq, msg, tvtohz(&tv)); 1633 if (error != ETIMEDOUT) 1634 break; 1635 } 1636 if (error == 0) 1637 return (0); 1638 bad: 1639 mqueue_freemsg(msg); 1640 return (error); 1641 } 1642 1643 /* 1644 * Common routine to send a message 1645 */ 1646 static int 1647 _mqueue_send(struct mqueue *mq, struct mqueue_msg *msg, int timo) 1648 { 1649 struct mqueue_msg *msg2; 1650 int error = 0; 1651 1652 mtx_lock(&mq->mq_mutex); 1653 while (mq->mq_curmsgs >= mq->mq_maxmsg && error == 0) { 1654 if (timo < 0) { 1655 mtx_unlock(&mq->mq_mutex); 1656 return (EAGAIN); 1657 } 1658 mq->mq_senders++; 1659 error = msleep(&mq->mq_senders, &mq->mq_mutex, 1660 curthread->td_priority | PCATCH, "mqsend", timo); 1661 mq->mq_senders--; 1662 if (error == EAGAIN) 1663 error = ETIMEDOUT; 1664 } 1665 if (mq->mq_curmsgs >= mq->mq_maxmsg) { 1666 mtx_unlock(&mq->mq_mutex); 1667 return (error); 1668 } 1669 error = 0; 1670 if (TAILQ_EMPTY(&mq->mq_msgq)) { 1671 TAILQ_INSERT_HEAD(&mq->mq_msgq, msg, msg_link); 1672 } else { 1673 if (msg->msg_prio <= TAILQ_LAST(&mq->mq_msgq, msgq)->msg_prio) { 1674 TAILQ_INSERT_TAIL(&mq->mq_msgq, msg, msg_link); 1675 } else { 1676 TAILQ_FOREACH(msg2, &mq->mq_msgq, msg_link) { 1677 if (msg2->msg_prio < msg->msg_prio) 1678 break; 1679 } 1680 TAILQ_INSERT_BEFORE(msg2, msg, msg_link); 1681 } 1682 } 1683 mq->mq_curmsgs++; 1684 mq->mq_totalbytes += msg->msg_size; 1685 if (mq->mq_receivers) 1686 wakeup_one(&mq->mq_receivers); 1687 else if (mq->mq_notifier != NULL) 1688 mqueue_send_notification(mq); 1689 if (mq->mq_flags & MQ_RSEL) { 1690 mq->mq_flags &= ~MQ_RSEL; 1691 selwakeup(&mq->mq_rsel); 1692 } 1693 KNOTE_LOCKED(&mq->mq_rsel.si_note, 0); 1694 mtx_unlock(&mq->mq_mutex); 1695 return (0); 1696 } 1697 1698 /* 1699 * Send realtime a signal to process which registered itself 1700 * successfully by mq_notify. 1701 */ 1702 static void 1703 mqueue_send_notification(struct mqueue *mq) 1704 { 1705 struct mqueue_notifier *nt; 1706 struct proc *p; 1707 1708 mtx_assert(&mq->mq_mutex, MA_OWNED); 1709 nt = mq->mq_notifier; 1710 if (nt->nt_sigev.sigev_notify != SIGEV_NONE) { 1711 p = nt->nt_proc; 1712 PROC_LOCK(p); 1713 if (!KSI_ONQ(&nt->nt_ksi)) 1714 psignal_event(p, &nt->nt_sigev, &nt->nt_ksi); 1715 PROC_UNLOCK(p); 1716 } 1717 mq->mq_notifier = NULL; 1718 } 1719 1720 /* 1721 * Get a message. if waitok is false, thread will not be 1722 * blocked if there is no data in queue, otherwise, absolute 1723 * time will be checked. 1724 */ 1725 int 1726 mqueue_receive(struct mqueue *mq, char *msg_ptr, 1727 size_t msg_len, unsigned *msg_prio, int waitok, 1728 const struct timespec *abs_timeout) 1729 { 1730 struct mqueue_msg *msg; 1731 struct timespec ets, ts, ts2; 1732 struct timeval tv; 1733 int error; 1734 1735 if (msg_len < mq->mq_msgsize) 1736 return (EMSGSIZE); 1737 1738 /* O_NONBLOCK case */ 1739 if (!waitok) { 1740 error = _mqueue_recv(mq, &msg, -1); 1741 if (error) 1742 return (error); 1743 goto received; 1744 } 1745 1746 /* we allow a null timeout (wait forever). */ 1747 if (abs_timeout == NULL) { 1748 error = _mqueue_recv(mq, &msg, 0); 1749 if (error) 1750 return (error); 1751 goto received; 1752 } 1753 1754 /* try to get a message before checking time */ 1755 error = _mqueue_recv(mq, &msg, -1); 1756 if (error == 0) 1757 goto received; 1758 1759 if (error != EAGAIN) 1760 return (error); 1761 1762 error = copyin(abs_timeout, &ets, sizeof(ets)); 1763 if (error != 0) 1764 return (error); 1765 if (ets.tv_nsec >= 1000000000 || ets.tv_nsec < 0) { 1766 error = EINVAL; 1767 return (error); 1768 } 1769 1770 for (;;) { 1771 ts2 = ets; 1772 getnanotime(&ts); 1773 timespecsub(&ts2, &ts); 1774 if (ts2.tv_sec < 0 || (ts2.tv_sec == 0 && ts2.tv_nsec <= 0)) { 1775 error = ETIMEDOUT; 1776 return (error); 1777 } 1778 TIMESPEC_TO_TIMEVAL(&tv, &ts2); 1779 error = _mqueue_recv(mq, &msg, tvtohz(&tv)); 1780 if (error == 0) 1781 break; 1782 if (error != ETIMEDOUT) 1783 return (error); 1784 } 1785 1786 received: 1787 error = mqueue_savemsg(msg, msg_ptr, msg_prio); 1788 if (error == 0) { 1789 curthread->td_retval[0] = msg->msg_size; 1790 curthread->td_retval[1] = 0; 1791 } 1792 mqueue_freemsg(msg); 1793 return (error); 1794 } 1795 1796 /* 1797 * Common routine to receive a message 1798 */ 1799 static int 1800 _mqueue_recv(struct mqueue *mq, struct mqueue_msg **msg, int timo) 1801 { 1802 int error = 0; 1803 1804 mtx_lock(&mq->mq_mutex); 1805 while ((*msg = TAILQ_FIRST(&mq->mq_msgq)) == NULL && error == 0) { 1806 if (timo < 0) { 1807 mtx_unlock(&mq->mq_mutex); 1808 return (EAGAIN); 1809 } 1810 mq->mq_receivers++; 1811 error = msleep(&mq->mq_receivers, &mq->mq_mutex, 1812 curthread->td_priority | PCATCH, "mqrecv", timo); 1813 mq->mq_receivers--; 1814 if (error == EAGAIN) 1815 error = ETIMEDOUT; 1816 } 1817 if (*msg != NULL) { 1818 error = 0; 1819 TAILQ_REMOVE(&mq->mq_msgq, *msg, msg_link); 1820 mq->mq_curmsgs--; 1821 mq->mq_totalbytes -= (*msg)->msg_size; 1822 if (mq->mq_senders) 1823 wakeup_one(&mq->mq_senders); 1824 if (mq->mq_flags & MQ_WSEL) { 1825 mq->mq_flags &= ~MQ_WSEL; 1826 selwakeup(&mq->mq_wsel); 1827 } 1828 KNOTE_LOCKED(&mq->mq_wsel.si_note, 0); 1829 } 1830 if (mq->mq_notifier != NULL && mq->mq_receivers == 0 && 1831 !TAILQ_EMPTY(&mq->mq_msgq)) { 1832 mqueue_send_notification(mq); 1833 } 1834 mtx_unlock(&mq->mq_mutex); 1835 return (error); 1836 } 1837 1838 static __inline struct mqueue_notifier * 1839 notifier_alloc(void) 1840 { 1841 return (uma_zalloc(mqnoti_zone, M_WAITOK | M_ZERO)); 1842 } 1843 1844 static __inline void 1845 notifier_free(struct mqueue_notifier *p) 1846 { 1847 uma_zfree(mqnoti_zone, p); 1848 } 1849 1850 static struct mqueue_notifier * 1851 notifier_search(struct proc *p, int fd) 1852 { 1853 struct mqueue_notifier *nt; 1854 1855 LIST_FOREACH(nt, &p->p_mqnotifier, nt_link) { 1856 if (nt->nt_ksi.ksi_mqd == fd) 1857 break; 1858 } 1859 return (nt); 1860 } 1861 1862 static __inline void 1863 notifier_insert(struct proc *p, struct mqueue_notifier *nt) 1864 { 1865 LIST_INSERT_HEAD(&p->p_mqnotifier, nt, nt_link); 1866 } 1867 1868 static __inline void 1869 notifier_delete(struct proc *p, struct mqueue_notifier *nt) 1870 { 1871 LIST_REMOVE(nt, nt_link); 1872 notifier_free(nt); 1873 } 1874 1875 static void 1876 notifier_remove(struct proc *p, struct mqueue *mq, int fd) 1877 { 1878 struct mqueue_notifier *nt; 1879 1880 mtx_assert(&mq->mq_mutex, MA_OWNED); 1881 PROC_LOCK(p); 1882 nt = notifier_search(p, fd); 1883 if (nt != NULL) { 1884 if (mq->mq_notifier == nt) 1885 mq->mq_notifier = NULL; 1886 sigqueue_take(&nt->nt_ksi); 1887 notifier_delete(p, nt); 1888 } 1889 PROC_UNLOCK(p); 1890 } 1891 1892 /* 1893 * Syscall to open a message queue 1894 */ 1895 int 1896 kmq_open(struct thread *td, struct kmq_open_args *uap) 1897 { 1898 char path[MQFS_NAMELEN + 1]; 1899 struct mq_attr attr, *pattr; 1900 struct mqfs_node *pn; 1901 struct filedesc *fdp; 1902 struct file *fp; 1903 struct mqueue *mq; 1904 int fd, error, len, flags, cmode; 1905 1906 if ((uap->flags & O_ACCMODE) == O_ACCMODE) 1907 return (EINVAL); 1908 1909 fdp = td->td_proc->p_fd; 1910 flags = FFLAGS(uap->flags); 1911 cmode = (((uap->mode & ~fdp->fd_cmask) & ALLPERMS) & ~S_ISTXT); 1912 mq = NULL; 1913 if ((flags & O_CREAT) && (uap->attr != NULL)) { 1914 error = copyin(uap->attr, &attr, sizeof(attr)); 1915 if (error) 1916 return (error); 1917 if (attr.mq_maxmsg <= 0 || attr.mq_maxmsg > maxmsg) 1918 return (EINVAL); 1919 if (attr.mq_msgsize <= 0 || attr.mq_msgsize > maxmsgsize) 1920 return (EINVAL); 1921 pattr = &attr; 1922 } else 1923 pattr = NULL; 1924 1925 error = copyinstr(uap->path, path, MQFS_NAMELEN + 1, NULL); 1926 if (error) 1927 return (error); 1928 1929 /* 1930 * The first character of name must be a slash (/) character 1931 * and the remaining characters of name cannot include any slash 1932 * characters. 1933 */ 1934 len = strlen(path); 1935 if (len < 2 || path[0] != '/' || index(path + 1, '/') != NULL) 1936 return (EINVAL); 1937 1938 error = falloc(td, &fp, &fd); 1939 if (error) 1940 return (error); 1941 1942 sx_xlock(&mqfs_data.mi_lock); 1943 pn = mqfs_search(mqfs_data.mi_root, path + 1, len - 1); 1944 if (pn == NULL) { 1945 if (!(flags & O_CREAT)) { 1946 error = ENOENT; 1947 } else { 1948 mq = mqueue_alloc(pattr); 1949 if (mq == NULL) { 1950 error = ENFILE; 1951 } else { 1952 pn = mqfs_create_file(mqfs_data.mi_root, 1953 path + 1, len - 1, td->td_ucred, 1954 cmode); 1955 if (pn == NULL) { 1956 error = ENOSPC; 1957 mqueue_free(mq); 1958 } 1959 } 1960 } 1961 1962 if (error == 0) { 1963 pn->mn_data = mq; 1964 } 1965 } else { 1966 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL)) { 1967 error = EEXIST; 1968 } else { 1969 int acc_mode = 0; 1970 1971 if (flags & FREAD) 1972 acc_mode |= VREAD; 1973 if (flags & FWRITE) 1974 acc_mode |= VWRITE; 1975 error = vaccess(VREG, pn->mn_mode, pn->mn_uid, 1976 pn->mn_gid, acc_mode, td->td_ucred, NULL); 1977 } 1978 } 1979 1980 if (error) { 1981 sx_xunlock(&mqfs_data.mi_lock); 1982 fdclose(fdp, fp, fd, td); 1983 fdrop(fp, td); 1984 return (error); 1985 } 1986 1987 mqnode_addref(pn); 1988 sx_xunlock(&mqfs_data.mi_lock); 1989 1990 FILE_LOCK(fp); 1991 fp->f_flag = (flags & (FREAD | FWRITE | O_NONBLOCK)); 1992 fp->f_type = DTYPE_MQUEUE; 1993 fp->f_ops = &mqueueops; 1994 fp->f_data = pn; 1995 FILE_UNLOCK(fp); 1996 1997 FILEDESC_LOCK_FAST(fdp); 1998 if (fdp->fd_ofiles[fd] == fp) 1999 fdp->fd_ofileflags[fd] |= UF_EXCLOSE; 2000 FILEDESC_UNLOCK_FAST(fdp); 2001 td->td_retval[0] = fd; 2002 fdrop(fp, td); 2003 return (0); 2004 } 2005 2006 /* 2007 * Syscall to unlink a message queue 2008 */ 2009 int 2010 kmq_unlink(struct thread *td, struct kmq_unlink_args *uap) 2011 { 2012 char path[MQFS_NAMELEN+1]; 2013 struct mqfs_node *pn; 2014 int error, len; 2015 2016 error = copyinstr(uap->path, path, MQFS_NAMELEN + 1, NULL); 2017 if (error) 2018 return (error); 2019 2020 len = strlen(path); 2021 if (len < 2 || path[0] != '/' || index(path + 1, '/') != NULL) 2022 return (EINVAL); 2023 2024 sx_xlock(&mqfs_data.mi_lock); 2025 pn = mqfs_search(mqfs_data.mi_root, path + 1, len - 1); 2026 if (pn != NULL) 2027 error = do_unlink(pn, td->td_ucred); 2028 else 2029 error = ENOENT; 2030 sx_xunlock(&mqfs_data.mi_lock); 2031 return (error); 2032 } 2033 2034 typedef int (*_fgetf)(struct thread *, int, struct file **); 2035 2036 /* 2037 * Get message queue by giving file slot 2038 */ 2039 static int 2040 _getmq(struct thread *td, int fd, _fgetf func, 2041 struct file **fpp, struct mqfs_node **ppn, struct mqueue **pmq) 2042 { 2043 struct mqfs_node *pn; 2044 int error; 2045 2046 error = func(td, fd, fpp); 2047 if (error) 2048 return (error); 2049 if (&mqueueops != (*fpp)->f_ops) { 2050 fdrop(*fpp, td); 2051 return (EBADF); 2052 } 2053 pn = (*fpp)->f_data; 2054 if (ppn) 2055 *ppn = pn; 2056 if (pmq) 2057 *pmq = pn->mn_data; 2058 return (0); 2059 } 2060 2061 static __inline int 2062 getmq(struct thread *td, int fd, struct file **fpp, struct mqfs_node **ppn, 2063 struct mqueue **pmq) 2064 { 2065 return _getmq(td, fd, fget, fpp, ppn, pmq); 2066 } 2067 2068 static __inline int 2069 getmq_read(struct thread *td, int fd, struct file **fpp, 2070 struct mqfs_node **ppn, struct mqueue **pmq) 2071 { 2072 return _getmq(td, fd, fget_read, fpp, ppn, pmq); 2073 } 2074 2075 static __inline int 2076 getmq_write(struct thread *td, int fd, struct file **fpp, 2077 struct mqfs_node **ppn, struct mqueue **pmq) 2078 { 2079 return _getmq(td, fd, fget_write, fpp, ppn, pmq); 2080 } 2081 2082 /* 2083 * Syscall 2084 */ 2085 int 2086 kmq_setattr(struct thread *td, struct kmq_setattr_args *uap) 2087 { 2088 struct mqueue *mq; 2089 struct file *fp; 2090 struct mq_attr attr, oattr; 2091 int error; 2092 2093 if (uap->attr) { 2094 error = copyin(uap->attr, &attr, sizeof(attr)); 2095 if (error) 2096 return (error); 2097 if (attr.mq_flags & ~O_NONBLOCK) 2098 return (EINVAL); 2099 } 2100 error = getmq(td, uap->mqd, &fp, NULL, &mq); 2101 if (error) 2102 return (error); 2103 oattr.mq_maxmsg = mq->mq_maxmsg; 2104 oattr.mq_msgsize = mq->mq_msgsize; 2105 oattr.mq_curmsgs = mq->mq_curmsgs; 2106 FILE_LOCK(fp); 2107 oattr.mq_flags = (O_NONBLOCK & fp->f_flag); 2108 if (uap->attr) { 2109 fp->f_flag &= ~O_NONBLOCK; 2110 fp->f_flag |= (attr.mq_flags & O_NONBLOCK); 2111 } 2112 FILE_UNLOCK(fp); 2113 fdrop(fp, td); 2114 if (uap->oattr) 2115 error = copyout(&oattr, uap->oattr, sizeof(oattr)); 2116 return (error); 2117 } 2118 2119 /* 2120 * Syscall 2121 */ 2122 int 2123 kmq_timedreceive(struct thread *td, struct kmq_timedreceive_args *uap) 2124 { 2125 struct mqueue *mq; 2126 struct file *fp; 2127 int error; 2128 int waitok; 2129 2130 error = getmq_read(td, uap->mqd, &fp, NULL, &mq); 2131 if (error) 2132 return (error); 2133 waitok = !(fp->f_flag & O_NONBLOCK); 2134 error = mqueue_receive(mq, uap->msg_ptr, uap->msg_len, 2135 uap->msg_prio, waitok, uap->abs_timeout); 2136 fdrop(fp, td); 2137 return (error); 2138 } 2139 2140 /* 2141 * Syscall 2142 */ 2143 int 2144 kmq_timedsend(struct thread *td, struct kmq_timedsend_args *uap) 2145 { 2146 struct mqueue *mq; 2147 struct file *fp; 2148 int error, waitok; 2149 2150 error = getmq_write(td, uap->mqd, &fp, NULL, &mq); 2151 if (error) 2152 return (error); 2153 waitok = !(fp->f_flag & O_NONBLOCK); 2154 error = mqueue_send(mq, uap->msg_ptr, uap->msg_len, 2155 uap->msg_prio, waitok, uap->abs_timeout); 2156 fdrop(fp, td); 2157 return (error); 2158 } 2159 2160 /* 2161 * Syscall 2162 */ 2163 int 2164 kmq_notify(struct thread *td, struct kmq_notify_args *uap) 2165 { 2166 struct sigevent ev; 2167 struct filedesc *fdp; 2168 struct proc *p; 2169 struct mqueue *mq; 2170 struct file *fp; 2171 struct mqueue_notifier *nt, *newnt = NULL; 2172 int error; 2173 2174 p = td->td_proc; 2175 fdp = td->td_proc->p_fd; 2176 if (uap->sigev) { 2177 error = copyin(uap->sigev, &ev, sizeof(ev)); 2178 if (error) 2179 return (error); 2180 if (ev.sigev_notify != SIGEV_SIGNAL && 2181 ev.sigev_notify != SIGEV_THREAD_ID && 2182 ev.sigev_notify != SIGEV_NONE) 2183 return (EINVAL); 2184 if ((ev.sigev_notify == SIGEV_SIGNAL || 2185 ev.sigev_notify == SIGEV_THREAD_ID) && 2186 !_SIG_VALID(ev.sigev_signo)) 2187 return (EINVAL); 2188 } 2189 error = getmq(td, uap->mqd, &fp, NULL, &mq); 2190 if (error) 2191 return (error); 2192 again: 2193 FILEDESC_LOCK_FAST(fdp); 2194 if (fget_locked(fdp, uap->mqd) != fp) { 2195 FILEDESC_UNLOCK_FAST(fdp); 2196 error = EBADF; 2197 goto out; 2198 } 2199 mtx_lock(&mq->mq_mutex); 2200 FILEDESC_UNLOCK_FAST(fdp); 2201 if (uap->sigev != NULL) { 2202 if (mq->mq_notifier != NULL) { 2203 error = EBUSY; 2204 } else { 2205 PROC_LOCK(p); 2206 nt = notifier_search(p, uap->mqd); 2207 if (nt == NULL) { 2208 if (newnt == NULL) { 2209 PROC_UNLOCK(p); 2210 mtx_unlock(&mq->mq_mutex); 2211 newnt = notifier_alloc(); 2212 goto again; 2213 } 2214 } 2215 2216 if (nt != NULL) { 2217 sigqueue_take(&nt->nt_ksi); 2218 if (newnt != NULL) { 2219 notifier_free(newnt); 2220 newnt = NULL; 2221 } 2222 } else { 2223 nt = newnt; 2224 newnt = NULL; 2225 ksiginfo_init(&nt->nt_ksi); 2226 nt->nt_ksi.ksi_flags |= KSI_INS | KSI_EXT; 2227 nt->nt_ksi.ksi_code = SI_MESGQ; 2228 nt->nt_proc = p; 2229 nt->nt_ksi.ksi_mqd = uap->mqd; 2230 notifier_insert(p, nt); 2231 } 2232 nt->nt_sigev = ev; 2233 mq->mq_notifier = nt; 2234 PROC_UNLOCK(p); 2235 /* 2236 * if there is no receivers and message queue 2237 * is not empty, we should send notification 2238 * as soon as possible. 2239 */ 2240 if (mq->mq_receivers == 0 && 2241 !TAILQ_EMPTY(&mq->mq_msgq)) 2242 mqueue_send_notification(mq); 2243 } 2244 } else { 2245 notifier_remove(p, mq, uap->mqd); 2246 } 2247 mtx_unlock(&mq->mq_mutex); 2248 2249 out: 2250 fdrop(fp, td); 2251 if (newnt != NULL) 2252 notifier_free(newnt); 2253 return (error); 2254 } 2255 2256 static void 2257 mqueue_fdclose(struct thread *td, int fd, struct file *fp) 2258 { 2259 struct filedesc *fdp; 2260 struct mqueue *mq; 2261 2262 fdp = td->td_proc->p_fd; 2263 FILEDESC_LOCK_ASSERT(fdp, MA_OWNED); 2264 if (fp->f_ops == &mqueueops) { 2265 mq = FPTOMQ(fp); 2266 mtx_lock(&mq->mq_mutex); 2267 notifier_remove(td->td_proc, mq, fd); 2268 2269 /* have to wakeup thread in same process */ 2270 if (mq->mq_flags & MQ_RSEL) { 2271 mq->mq_flags &= ~MQ_RSEL; 2272 selwakeup(&mq->mq_rsel); 2273 } 2274 if (mq->mq_flags & MQ_WSEL) { 2275 mq->mq_flags &= ~MQ_WSEL; 2276 selwakeup(&mq->mq_wsel); 2277 } 2278 mtx_unlock(&mq->mq_mutex); 2279 } 2280 } 2281 2282 static void 2283 mq_proc_exit(void *arg __unused, struct proc *p) 2284 { 2285 struct filedesc *fdp; 2286 struct file *fp; 2287 struct mqueue *mq; 2288 int i; 2289 2290 fdp = p->p_fd; 2291 FILEDESC_LOCK_FAST(fdp); 2292 for (i = 0; i < fdp->fd_nfiles; ++i) { 2293 fp = fget_locked(fdp, i); 2294 if (fp != NULL && fp->f_ops == &mqueueops) { 2295 mq = FPTOMQ(fp); 2296 mtx_lock(&mq->mq_mutex); 2297 notifier_remove(p, FPTOMQ(fp), i); 2298 mtx_unlock(&mq->mq_mutex); 2299 } 2300 } 2301 FILEDESC_UNLOCK_FAST(fdp); 2302 KASSERT(LIST_EMPTY(&p->p_mqnotifier), ("mq notifiers left")); 2303 } 2304 2305 static int 2306 mqf_read(struct file *fp, struct uio *uio, struct ucred *active_cred, 2307 int flags, struct thread *td) 2308 { 2309 return (EOPNOTSUPP); 2310 } 2311 2312 static int 2313 mqf_write(struct file *fp, struct uio *uio, struct ucred *active_cred, 2314 int flags, struct thread *td) 2315 { 2316 return (EOPNOTSUPP); 2317 } 2318 2319 static int 2320 mqf_ioctl(struct file *fp, u_long cmd, void *data, 2321 struct ucred *active_cred, struct thread *td) 2322 { 2323 return (ENOTTY); 2324 } 2325 2326 static int 2327 mqf_poll(struct file *fp, int events, struct ucred *active_cred, 2328 struct thread *td) 2329 { 2330 struct mqueue *mq = FPTOMQ(fp); 2331 int revents = 0; 2332 2333 mtx_lock(&mq->mq_mutex); 2334 if (events & (POLLIN | POLLRDNORM)) { 2335 if (mq->mq_curmsgs) { 2336 revents |= events & (POLLIN | POLLRDNORM); 2337 } else { 2338 mq->mq_flags |= MQ_RSEL; 2339 selrecord(td, &mq->mq_rsel); 2340 } 2341 } 2342 if (events & POLLOUT) { 2343 if (mq->mq_curmsgs < mq->mq_maxmsg) 2344 revents |= POLLOUT; 2345 else { 2346 mq->mq_flags |= MQ_WSEL; 2347 selrecord(td, &mq->mq_wsel); 2348 } 2349 } 2350 mtx_unlock(&mq->mq_mutex); 2351 return (revents); 2352 } 2353 2354 static int 2355 mqf_close(struct file *fp, struct thread *td) 2356 { 2357 struct mqfs_node *pn; 2358 2359 FILE_LOCK(fp); 2360 fp->f_ops = &badfileops; 2361 FILE_UNLOCK(fp); 2362 pn = fp->f_data; 2363 fp->f_data = NULL; 2364 sx_xlock(&mqfs_data.mi_lock); 2365 mqnode_release(pn); 2366 sx_xunlock(&mqfs_data.mi_lock); 2367 return (0); 2368 } 2369 2370 static int 2371 mqf_stat(struct file *fp, struct stat *st, struct ucred *active_cred, 2372 struct thread *td) 2373 { 2374 struct mqfs_node *pn = fp->f_data; 2375 2376 bzero(st, sizeof *st); 2377 st->st_atimespec = pn->mn_atime; 2378 st->st_mtimespec = pn->mn_mtime; 2379 st->st_ctimespec = pn->mn_ctime; 2380 st->st_birthtimespec = pn->mn_birth; 2381 st->st_uid = pn->mn_uid; 2382 st->st_gid = pn->mn_gid; 2383 st->st_mode = S_IFIFO | pn->mn_mode; 2384 return (0); 2385 } 2386 2387 static int 2388 mqf_kqfilter(struct file *fp, struct knote *kn) 2389 { 2390 struct mqueue *mq = FPTOMQ(fp); 2391 int error = 0; 2392 2393 if (kn->kn_filter == EVFILT_READ) { 2394 kn->kn_fop = &mq_rfiltops; 2395 knlist_add(&mq->mq_rsel.si_note, kn, 0); 2396 } else if (kn->kn_filter == EVFILT_WRITE) { 2397 kn->kn_fop = &mq_wfiltops; 2398 knlist_add(&mq->mq_wsel.si_note, kn, 0); 2399 } else 2400 error = EINVAL; 2401 return (error); 2402 } 2403 2404 static void 2405 filt_mqdetach(struct knote *kn) 2406 { 2407 struct mqueue *mq = FPTOMQ(kn->kn_fp); 2408 2409 if (kn->kn_filter == EVFILT_READ) 2410 knlist_remove(&mq->mq_rsel.si_note, kn, 0); 2411 else if (kn->kn_filter == EVFILT_WRITE) 2412 knlist_remove(&mq->mq_wsel.si_note, kn, 0); 2413 else 2414 panic("filt_mqdetach"); 2415 } 2416 2417 static int 2418 filt_mqread(struct knote *kn, long hint) 2419 { 2420 struct mqueue *mq = FPTOMQ(kn->kn_fp); 2421 2422 mtx_assert(&mq->mq_mutex, MA_OWNED); 2423 return (mq->mq_curmsgs != 0); 2424 } 2425 2426 static int 2427 filt_mqwrite(struct knote *kn, long hint) 2428 { 2429 struct mqueue *mq = FPTOMQ(kn->kn_fp); 2430 2431 mtx_assert(&mq->mq_mutex, MA_OWNED); 2432 return (mq->mq_curmsgs < mq->mq_maxmsg); 2433 } 2434 2435 static struct fileops mqueueops = { 2436 .fo_read = mqf_read, 2437 .fo_write = mqf_write, 2438 .fo_ioctl = mqf_ioctl, 2439 .fo_poll = mqf_poll, 2440 .fo_kqfilter = mqf_kqfilter, 2441 .fo_stat = mqf_stat, 2442 .fo_close = mqf_close 2443 }; 2444 2445 static struct vop_vector mqfs_vnodeops = { 2446 .vop_default = &default_vnodeops, 2447 .vop_access = mqfs_access, 2448 .vop_cachedlookup = mqfs_lookup, 2449 .vop_lookup = vfs_cache_lookup, 2450 .vop_reclaim = mqfs_reclaim, 2451 .vop_create = mqfs_create, 2452 .vop_remove = mqfs_remove, 2453 .vop_inactive = mqfs_inactive, 2454 .vop_open = mqfs_open, 2455 .vop_close = mqfs_close, 2456 .vop_getattr = mqfs_getattr, 2457 .vop_setattr = mqfs_setattr, 2458 .vop_read = mqfs_read, 2459 .vop_write = VOP_EOPNOTSUPP, 2460 .vop_readdir = mqfs_readdir, 2461 .vop_mkdir = VOP_EOPNOTSUPP, 2462 .vop_rmdir = VOP_EOPNOTSUPP 2463 }; 2464 2465 static struct vfsops mqfs_vfsops = { 2466 .vfs_init = mqfs_init, 2467 .vfs_uninit = mqfs_uninit, 2468 .vfs_mount = mqfs_mount, 2469 .vfs_unmount = mqfs_unmount, 2470 .vfs_root = mqfs_root, 2471 .vfs_statfs = mqfs_statfs, 2472 }; 2473 2474 SYSCALL_MODULE_HELPER(kmq_open); 2475 SYSCALL_MODULE_HELPER(kmq_setattr); 2476 SYSCALL_MODULE_HELPER(kmq_timedsend); 2477 SYSCALL_MODULE_HELPER(kmq_timedreceive); 2478 SYSCALL_MODULE_HELPER(kmq_notify); 2479 SYSCALL_MODULE_HELPER(kmq_unlink); 2480 2481 VFS_SET(mqfs_vfsops, mqueuefs, VFCF_SYNTHETIC); 2482 MODULE_VERSION(mqueuefs, 1); 2483