1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2005 David Xu <davidxu@freebsd.org> 5 * Copyright (c) 2016-2017 Robert N. M. Watson 6 * All rights reserved. 7 * 8 * Portions of this software were developed by BAE Systems, the University of 9 * Cambridge Computer Laboratory, and Memorial University under DARPA/AFRL 10 * contract FA8650-15-C-7558 ("CADETS"), as part of the DARPA Transparent 11 * Computing (TC) research program. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 */ 35 36 /* 37 * POSIX message queue implementation. 38 * 39 * 1) A mqueue filesystem can be mounted, each message queue appears 40 * in mounted directory, user can change queue's permission and 41 * ownership, or remove a queue. Manually creating a file in the 42 * directory causes a message queue to be created in the kernel with 43 * default message queue attributes applied and same name used, this 44 * method is not advocated since mq_open syscall allows user to specify 45 * different attributes. Also the file system can be mounted multiple 46 * times at different mount points but shows same contents. 47 * 48 * 2) Standard POSIX message queue API. The syscalls do not use vfs layer, 49 * but directly operate on internal data structure, this allows user to 50 * use the IPC facility without having to mount mqueue file system. 51 */ 52 53 #include <sys/cdefs.h> 54 __FBSDID("$FreeBSD$"); 55 56 #include "opt_capsicum.h" 57 58 #include <sys/param.h> 59 #include <sys/kernel.h> 60 #include <sys/systm.h> 61 #include <sys/limits.h> 62 #include <sys/malloc.h> 63 #include <sys/buf.h> 64 #include <sys/capsicum.h> 65 #include <sys/dirent.h> 66 #include <sys/event.h> 67 #include <sys/eventhandler.h> 68 #include <sys/fcntl.h> 69 #include <sys/file.h> 70 #include <sys/filedesc.h> 71 #include <sys/jail.h> 72 #include <sys/lock.h> 73 #include <sys/module.h> 74 #include <sys/mount.h> 75 #include <sys/mqueue.h> 76 #include <sys/mutex.h> 77 #include <sys/namei.h> 78 #include <sys/posix4.h> 79 #include <sys/poll.h> 80 #include <sys/priv.h> 81 #include <sys/proc.h> 82 #include <sys/queue.h> 83 #include <sys/sysproto.h> 84 #include <sys/stat.h> 85 #include <sys/syscall.h> 86 #include <sys/syscallsubr.h> 87 #include <sys/sysent.h> 88 #include <sys/sx.h> 89 #include <sys/sysctl.h> 90 #include <sys/taskqueue.h> 91 #include <sys/unistd.h> 92 #include <sys/user.h> 93 #include <sys/vnode.h> 94 #include <machine/atomic.h> 95 96 #include <security/audit/audit.h> 97 98 FEATURE(p1003_1b_mqueue, "POSIX P1003.1B message queues support"); 99 100 /* 101 * Limits and constants 102 */ 103 #define MQFS_NAMELEN NAME_MAX 104 #define MQFS_DELEN (8 + MQFS_NAMELEN) 105 106 /* node types */ 107 typedef enum { 108 mqfstype_none = 0, 109 mqfstype_root, 110 mqfstype_dir, 111 mqfstype_this, 112 mqfstype_parent, 113 mqfstype_file, 114 mqfstype_symlink, 115 } mqfs_type_t; 116 117 struct mqfs_node; 118 119 /* 120 * mqfs_info: describes a mqfs instance 121 */ 122 struct mqfs_info { 123 struct sx mi_lock; 124 struct mqfs_node *mi_root; 125 struct unrhdr *mi_unrhdr; 126 }; 127 128 struct mqfs_vdata { 129 LIST_ENTRY(mqfs_vdata) mv_link; 130 struct mqfs_node *mv_node; 131 struct vnode *mv_vnode; 132 struct task mv_task; 133 }; 134 135 /* 136 * mqfs_node: describes a node (file or directory) within a mqfs 137 */ 138 struct mqfs_node { 139 char mn_name[MQFS_NAMELEN+1]; 140 struct mqfs_info *mn_info; 141 struct mqfs_node *mn_parent; 142 LIST_HEAD(,mqfs_node) mn_children; 143 LIST_ENTRY(mqfs_node) mn_sibling; 144 LIST_HEAD(,mqfs_vdata) mn_vnodes; 145 const void *mn_pr_root; 146 int mn_refcount; 147 mqfs_type_t mn_type; 148 int mn_deleted; 149 uint32_t mn_fileno; 150 void *mn_data; 151 struct timespec mn_birth; 152 struct timespec mn_ctime; 153 struct timespec mn_atime; 154 struct timespec mn_mtime; 155 uid_t mn_uid; 156 gid_t mn_gid; 157 int mn_mode; 158 }; 159 160 #define VTON(vp) (((struct mqfs_vdata *)((vp)->v_data))->mv_node) 161 #define VTOMQ(vp) ((struct mqueue *)(VTON(vp)->mn_data)) 162 #define VFSTOMQFS(m) ((struct mqfs_info *)((m)->mnt_data)) 163 #define FPTOMQ(fp) ((struct mqueue *)(((struct mqfs_node *) \ 164 (fp)->f_data)->mn_data)) 165 166 TAILQ_HEAD(msgq, mqueue_msg); 167 168 struct mqueue; 169 170 struct mqueue_notifier { 171 LIST_ENTRY(mqueue_notifier) nt_link; 172 struct sigevent nt_sigev; 173 ksiginfo_t nt_ksi; 174 struct proc *nt_proc; 175 }; 176 177 struct mqueue { 178 struct mtx mq_mutex; 179 int mq_flags; 180 long mq_maxmsg; 181 long mq_msgsize; 182 long mq_curmsgs; 183 long mq_totalbytes; 184 struct msgq mq_msgq; 185 int mq_receivers; 186 int mq_senders; 187 struct selinfo mq_rsel; 188 struct selinfo mq_wsel; 189 struct mqueue_notifier *mq_notifier; 190 }; 191 192 #define MQ_RSEL 0x01 193 #define MQ_WSEL 0x02 194 195 struct mqueue_msg { 196 TAILQ_ENTRY(mqueue_msg) msg_link; 197 unsigned int msg_prio; 198 unsigned int msg_size; 199 /* following real data... */ 200 }; 201 202 static SYSCTL_NODE(_kern, OID_AUTO, mqueue, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 203 "POSIX real time message queue"); 204 205 static int default_maxmsg = 10; 206 static int default_msgsize = 1024; 207 208 static int maxmsg = 100; 209 SYSCTL_INT(_kern_mqueue, OID_AUTO, maxmsg, CTLFLAG_RW, 210 &maxmsg, 0, "Default maximum messages in queue"); 211 static int maxmsgsize = 16384; 212 SYSCTL_INT(_kern_mqueue, OID_AUTO, maxmsgsize, CTLFLAG_RW, 213 &maxmsgsize, 0, "Default maximum message size"); 214 static int maxmq = 100; 215 SYSCTL_INT(_kern_mqueue, OID_AUTO, maxmq, CTLFLAG_RW, 216 &maxmq, 0, "maximum message queues"); 217 static int curmq = 0; 218 SYSCTL_INT(_kern_mqueue, OID_AUTO, curmq, CTLFLAG_RW, 219 &curmq, 0, "current message queue number"); 220 static int unloadable = 0; 221 static MALLOC_DEFINE(M_MQUEUEDATA, "mqdata", "mqueue data"); 222 223 static eventhandler_tag exit_tag; 224 225 /* Only one instance per-system */ 226 static struct mqfs_info mqfs_data; 227 static uma_zone_t mqnode_zone; 228 static uma_zone_t mqueue_zone; 229 static uma_zone_t mvdata_zone; 230 static uma_zone_t mqnoti_zone; 231 static struct vop_vector mqfs_vnodeops; 232 static struct fileops mqueueops; 233 static unsigned mqfs_osd_jail_slot; 234 235 /* 236 * Directory structure construction and manipulation 237 */ 238 #ifdef notyet 239 static struct mqfs_node *mqfs_create_dir(struct mqfs_node *parent, 240 const char *name, int namelen, struct ucred *cred, int mode); 241 static struct mqfs_node *mqfs_create_link(struct mqfs_node *parent, 242 const char *name, int namelen, struct ucred *cred, int mode); 243 #endif 244 245 static struct mqfs_node *mqfs_create_file(struct mqfs_node *parent, 246 const char *name, int namelen, struct ucred *cred, int mode); 247 static int mqfs_destroy(struct mqfs_node *mn); 248 static void mqfs_fileno_alloc(struct mqfs_info *mi, struct mqfs_node *mn); 249 static void mqfs_fileno_free(struct mqfs_info *mi, struct mqfs_node *mn); 250 static int mqfs_allocv(struct mount *mp, struct vnode **vpp, struct mqfs_node *pn); 251 static int mqfs_prison_remove(void *obj, void *data); 252 253 /* 254 * Message queue construction and maniplation 255 */ 256 static struct mqueue *mqueue_alloc(const struct mq_attr *attr); 257 static void mqueue_free(struct mqueue *mq); 258 static int mqueue_send(struct mqueue *mq, const char *msg_ptr, 259 size_t msg_len, unsigned msg_prio, int waitok, 260 const struct timespec *abs_timeout); 261 static int mqueue_receive(struct mqueue *mq, char *msg_ptr, 262 size_t msg_len, unsigned *msg_prio, int waitok, 263 const struct timespec *abs_timeout); 264 static int _mqueue_send(struct mqueue *mq, struct mqueue_msg *msg, 265 int timo); 266 static int _mqueue_recv(struct mqueue *mq, struct mqueue_msg **msg, 267 int timo); 268 static void mqueue_send_notification(struct mqueue *mq); 269 static void mqueue_fdclose(struct thread *td, int fd, struct file *fp); 270 static void mq_proc_exit(void *arg, struct proc *p); 271 272 /* 273 * kqueue filters 274 */ 275 static void filt_mqdetach(struct knote *kn); 276 static int filt_mqread(struct knote *kn, long hint); 277 static int filt_mqwrite(struct knote *kn, long hint); 278 279 struct filterops mq_rfiltops = { 280 .f_isfd = 1, 281 .f_detach = filt_mqdetach, 282 .f_event = filt_mqread, 283 }; 284 struct filterops mq_wfiltops = { 285 .f_isfd = 1, 286 .f_detach = filt_mqdetach, 287 .f_event = filt_mqwrite, 288 }; 289 290 /* 291 * Initialize fileno bitmap 292 */ 293 static void 294 mqfs_fileno_init(struct mqfs_info *mi) 295 { 296 struct unrhdr *up; 297 298 up = new_unrhdr(1, INT_MAX, NULL); 299 mi->mi_unrhdr = up; 300 } 301 302 /* 303 * Tear down fileno bitmap 304 */ 305 static void 306 mqfs_fileno_uninit(struct mqfs_info *mi) 307 { 308 struct unrhdr *up; 309 310 up = mi->mi_unrhdr; 311 mi->mi_unrhdr = NULL; 312 delete_unrhdr(up); 313 } 314 315 /* 316 * Allocate a file number 317 */ 318 static void 319 mqfs_fileno_alloc(struct mqfs_info *mi, struct mqfs_node *mn) 320 { 321 /* make sure our parent has a file number */ 322 if (mn->mn_parent && !mn->mn_parent->mn_fileno) 323 mqfs_fileno_alloc(mi, mn->mn_parent); 324 325 switch (mn->mn_type) { 326 case mqfstype_root: 327 case mqfstype_dir: 328 case mqfstype_file: 329 case mqfstype_symlink: 330 mn->mn_fileno = alloc_unr(mi->mi_unrhdr); 331 break; 332 case mqfstype_this: 333 KASSERT(mn->mn_parent != NULL, 334 ("mqfstype_this node has no parent")); 335 mn->mn_fileno = mn->mn_parent->mn_fileno; 336 break; 337 case mqfstype_parent: 338 KASSERT(mn->mn_parent != NULL, 339 ("mqfstype_parent node has no parent")); 340 if (mn->mn_parent == mi->mi_root) { 341 mn->mn_fileno = mn->mn_parent->mn_fileno; 342 break; 343 } 344 KASSERT(mn->mn_parent->mn_parent != NULL, 345 ("mqfstype_parent node has no grandparent")); 346 mn->mn_fileno = mn->mn_parent->mn_parent->mn_fileno; 347 break; 348 default: 349 KASSERT(0, 350 ("mqfs_fileno_alloc() called for unknown type node: %d", 351 mn->mn_type)); 352 break; 353 } 354 } 355 356 /* 357 * Release a file number 358 */ 359 static void 360 mqfs_fileno_free(struct mqfs_info *mi, struct mqfs_node *mn) 361 { 362 switch (mn->mn_type) { 363 case mqfstype_root: 364 case mqfstype_dir: 365 case mqfstype_file: 366 case mqfstype_symlink: 367 free_unr(mi->mi_unrhdr, mn->mn_fileno); 368 break; 369 case mqfstype_this: 370 case mqfstype_parent: 371 /* ignore these, as they don't "own" their file number */ 372 break; 373 default: 374 KASSERT(0, 375 ("mqfs_fileno_free() called for unknown type node: %d", 376 mn->mn_type)); 377 break; 378 } 379 } 380 381 static __inline struct mqfs_node * 382 mqnode_alloc(void) 383 { 384 return uma_zalloc(mqnode_zone, M_WAITOK | M_ZERO); 385 } 386 387 static __inline void 388 mqnode_free(struct mqfs_node *node) 389 { 390 uma_zfree(mqnode_zone, node); 391 } 392 393 static __inline void 394 mqnode_addref(struct mqfs_node *node) 395 { 396 atomic_add_int(&node->mn_refcount, 1); 397 } 398 399 static __inline void 400 mqnode_release(struct mqfs_node *node) 401 { 402 struct mqfs_info *mqfs; 403 int old, exp; 404 405 mqfs = node->mn_info; 406 old = atomic_fetchadd_int(&node->mn_refcount, -1); 407 if (node->mn_type == mqfstype_dir || 408 node->mn_type == mqfstype_root) 409 exp = 3; /* include . and .. */ 410 else 411 exp = 1; 412 if (old == exp) { 413 int locked = sx_xlocked(&mqfs->mi_lock); 414 if (!locked) 415 sx_xlock(&mqfs->mi_lock); 416 mqfs_destroy(node); 417 if (!locked) 418 sx_xunlock(&mqfs->mi_lock); 419 } 420 } 421 422 /* 423 * Add a node to a directory 424 */ 425 static int 426 mqfs_add_node(struct mqfs_node *parent, struct mqfs_node *node) 427 { 428 KASSERT(parent != NULL, ("%s(): parent is NULL", __func__)); 429 KASSERT(parent->mn_info != NULL, 430 ("%s(): parent has no mn_info", __func__)); 431 KASSERT(parent->mn_type == mqfstype_dir || 432 parent->mn_type == mqfstype_root, 433 ("%s(): parent is not a directory", __func__)); 434 435 node->mn_info = parent->mn_info; 436 node->mn_parent = parent; 437 LIST_INIT(&node->mn_children); 438 LIST_INIT(&node->mn_vnodes); 439 LIST_INSERT_HEAD(&parent->mn_children, node, mn_sibling); 440 mqnode_addref(parent); 441 return (0); 442 } 443 444 static struct mqfs_node * 445 mqfs_create_node(const char *name, int namelen, struct ucred *cred, int mode, 446 int nodetype) 447 { 448 struct mqfs_node *node; 449 450 node = mqnode_alloc(); 451 strncpy(node->mn_name, name, namelen); 452 node->mn_pr_root = cred->cr_prison->pr_root; 453 node->mn_type = nodetype; 454 node->mn_refcount = 1; 455 vfs_timestamp(&node->mn_birth); 456 node->mn_ctime = node->mn_atime = node->mn_mtime 457 = node->mn_birth; 458 node->mn_uid = cred->cr_uid; 459 node->mn_gid = cred->cr_gid; 460 node->mn_mode = mode; 461 return (node); 462 } 463 464 /* 465 * Create a file 466 */ 467 static struct mqfs_node * 468 mqfs_create_file(struct mqfs_node *parent, const char *name, int namelen, 469 struct ucred *cred, int mode) 470 { 471 struct mqfs_node *node; 472 473 node = mqfs_create_node(name, namelen, cred, mode, mqfstype_file); 474 if (mqfs_add_node(parent, node) != 0) { 475 mqnode_free(node); 476 return (NULL); 477 } 478 return (node); 479 } 480 481 /* 482 * Add . and .. to a directory 483 */ 484 static int 485 mqfs_fixup_dir(struct mqfs_node *parent) 486 { 487 struct mqfs_node *dir; 488 489 dir = mqnode_alloc(); 490 dir->mn_name[0] = '.'; 491 dir->mn_type = mqfstype_this; 492 dir->mn_refcount = 1; 493 if (mqfs_add_node(parent, dir) != 0) { 494 mqnode_free(dir); 495 return (-1); 496 } 497 498 dir = mqnode_alloc(); 499 dir->mn_name[0] = dir->mn_name[1] = '.'; 500 dir->mn_type = mqfstype_parent; 501 dir->mn_refcount = 1; 502 503 if (mqfs_add_node(parent, dir) != 0) { 504 mqnode_free(dir); 505 return (-1); 506 } 507 508 return (0); 509 } 510 511 #ifdef notyet 512 513 /* 514 * Create a directory 515 */ 516 static struct mqfs_node * 517 mqfs_create_dir(struct mqfs_node *parent, const char *name, int namelen, 518 struct ucred *cred, int mode) 519 { 520 struct mqfs_node *node; 521 522 node = mqfs_create_node(name, namelen, cred, mode, mqfstype_dir); 523 if (mqfs_add_node(parent, node) != 0) { 524 mqnode_free(node); 525 return (NULL); 526 } 527 528 if (mqfs_fixup_dir(node) != 0) { 529 mqfs_destroy(node); 530 return (NULL); 531 } 532 return (node); 533 } 534 535 /* 536 * Create a symlink 537 */ 538 static struct mqfs_node * 539 mqfs_create_link(struct mqfs_node *parent, const char *name, int namelen, 540 struct ucred *cred, int mode) 541 { 542 struct mqfs_node *node; 543 544 node = mqfs_create_node(name, namelen, cred, mode, mqfstype_symlink); 545 if (mqfs_add_node(parent, node) != 0) { 546 mqnode_free(node); 547 return (NULL); 548 } 549 return (node); 550 } 551 552 #endif 553 554 /* 555 * Destroy a node or a tree of nodes 556 */ 557 static int 558 mqfs_destroy(struct mqfs_node *node) 559 { 560 struct mqfs_node *parent; 561 562 KASSERT(node != NULL, 563 ("%s(): node is NULL", __func__)); 564 KASSERT(node->mn_info != NULL, 565 ("%s(): node has no mn_info", __func__)); 566 567 /* destroy children */ 568 if (node->mn_type == mqfstype_dir || node->mn_type == mqfstype_root) 569 while (! LIST_EMPTY(&node->mn_children)) 570 mqfs_destroy(LIST_FIRST(&node->mn_children)); 571 572 /* unlink from parent */ 573 if ((parent = node->mn_parent) != NULL) { 574 KASSERT(parent->mn_info == node->mn_info, 575 ("%s(): parent has different mn_info", __func__)); 576 LIST_REMOVE(node, mn_sibling); 577 } 578 579 if (node->mn_fileno != 0) 580 mqfs_fileno_free(node->mn_info, node); 581 if (node->mn_data != NULL) 582 mqueue_free(node->mn_data); 583 mqnode_free(node); 584 return (0); 585 } 586 587 /* 588 * Mount a mqfs instance 589 */ 590 static int 591 mqfs_mount(struct mount *mp) 592 { 593 struct statfs *sbp; 594 595 if (mp->mnt_flag & MNT_UPDATE) 596 return (EOPNOTSUPP); 597 598 mp->mnt_data = &mqfs_data; 599 MNT_ILOCK(mp); 600 mp->mnt_flag |= MNT_LOCAL; 601 MNT_IUNLOCK(mp); 602 vfs_getnewfsid(mp); 603 604 sbp = &mp->mnt_stat; 605 vfs_mountedfrom(mp, "mqueue"); 606 sbp->f_bsize = PAGE_SIZE; 607 sbp->f_iosize = PAGE_SIZE; 608 sbp->f_blocks = 1; 609 sbp->f_bfree = 0; 610 sbp->f_bavail = 0; 611 sbp->f_files = 1; 612 sbp->f_ffree = 0; 613 return (0); 614 } 615 616 /* 617 * Unmount a mqfs instance 618 */ 619 static int 620 mqfs_unmount(struct mount *mp, int mntflags) 621 { 622 int error; 623 624 error = vflush(mp, 0, (mntflags & MNT_FORCE) ? FORCECLOSE : 0, 625 curthread); 626 return (error); 627 } 628 629 /* 630 * Return a root vnode 631 */ 632 static int 633 mqfs_root(struct mount *mp, int flags, struct vnode **vpp) 634 { 635 struct mqfs_info *mqfs; 636 int ret; 637 638 mqfs = VFSTOMQFS(mp); 639 ret = mqfs_allocv(mp, vpp, mqfs->mi_root); 640 return (ret); 641 } 642 643 /* 644 * Return filesystem stats 645 */ 646 static int 647 mqfs_statfs(struct mount *mp, struct statfs *sbp) 648 { 649 /* XXX update statistics */ 650 return (0); 651 } 652 653 /* 654 * Initialize a mqfs instance 655 */ 656 static int 657 mqfs_init(struct vfsconf *vfc) 658 { 659 struct mqfs_node *root; 660 struct mqfs_info *mi; 661 osd_method_t methods[PR_MAXMETHOD] = { 662 [PR_METHOD_REMOVE] = mqfs_prison_remove, 663 }; 664 665 mqnode_zone = uma_zcreate("mqnode", sizeof(struct mqfs_node), 666 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 667 mqueue_zone = uma_zcreate("mqueue", sizeof(struct mqueue), 668 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 669 mvdata_zone = uma_zcreate("mvdata", 670 sizeof(struct mqfs_vdata), NULL, NULL, NULL, 671 NULL, UMA_ALIGN_PTR, 0); 672 mqnoti_zone = uma_zcreate("mqnotifier", sizeof(struct mqueue_notifier), 673 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 674 mi = &mqfs_data; 675 sx_init(&mi->mi_lock, "mqfs lock"); 676 /* set up the root diretory */ 677 root = mqfs_create_node("/", 1, curthread->td_ucred, 01777, 678 mqfstype_root); 679 root->mn_info = mi; 680 LIST_INIT(&root->mn_children); 681 LIST_INIT(&root->mn_vnodes); 682 mi->mi_root = root; 683 mqfs_fileno_init(mi); 684 mqfs_fileno_alloc(mi, root); 685 mqfs_fixup_dir(root); 686 exit_tag = EVENTHANDLER_REGISTER(process_exit, mq_proc_exit, NULL, 687 EVENTHANDLER_PRI_ANY); 688 mq_fdclose = mqueue_fdclose; 689 p31b_setcfg(CTL_P1003_1B_MESSAGE_PASSING, _POSIX_MESSAGE_PASSING); 690 mqfs_osd_jail_slot = osd_jail_register(NULL, methods); 691 return (0); 692 } 693 694 /* 695 * Destroy a mqfs instance 696 */ 697 static int 698 mqfs_uninit(struct vfsconf *vfc) 699 { 700 struct mqfs_info *mi; 701 702 if (!unloadable) 703 return (EOPNOTSUPP); 704 osd_jail_deregister(mqfs_osd_jail_slot); 705 EVENTHANDLER_DEREGISTER(process_exit, exit_tag); 706 mi = &mqfs_data; 707 mqfs_destroy(mi->mi_root); 708 mi->mi_root = NULL; 709 mqfs_fileno_uninit(mi); 710 sx_destroy(&mi->mi_lock); 711 uma_zdestroy(mqnode_zone); 712 uma_zdestroy(mqueue_zone); 713 uma_zdestroy(mvdata_zone); 714 uma_zdestroy(mqnoti_zone); 715 return (0); 716 } 717 718 /* 719 * task routine 720 */ 721 static void 722 do_recycle(void *context, int pending __unused) 723 { 724 struct vnode *vp = (struct vnode *)context; 725 726 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 727 vrecycle(vp); 728 VOP_UNLOCK(vp); 729 vdrop(vp); 730 } 731 732 /* 733 * Allocate a vnode 734 */ 735 static int 736 mqfs_allocv(struct mount *mp, struct vnode **vpp, struct mqfs_node *pn) 737 { 738 struct mqfs_vdata *vd; 739 struct mqfs_info *mqfs; 740 struct vnode *newvpp; 741 int error; 742 743 mqfs = pn->mn_info; 744 *vpp = NULL; 745 sx_xlock(&mqfs->mi_lock); 746 LIST_FOREACH(vd, &pn->mn_vnodes, mv_link) { 747 if (vd->mv_vnode->v_mount == mp) { 748 vhold(vd->mv_vnode); 749 break; 750 } 751 } 752 753 if (vd != NULL) { 754 found: 755 *vpp = vd->mv_vnode; 756 sx_xunlock(&mqfs->mi_lock); 757 error = vget(*vpp, LK_RETRY | LK_EXCLUSIVE); 758 vdrop(*vpp); 759 return (error); 760 } 761 sx_xunlock(&mqfs->mi_lock); 762 763 error = getnewvnode("mqueue", mp, &mqfs_vnodeops, &newvpp); 764 if (error) 765 return (error); 766 vn_lock(newvpp, LK_EXCLUSIVE | LK_RETRY); 767 error = insmntque(newvpp, mp); 768 if (error != 0) 769 return (error); 770 771 sx_xlock(&mqfs->mi_lock); 772 /* 773 * Check if it has already been allocated 774 * while we were blocked. 775 */ 776 LIST_FOREACH(vd, &pn->mn_vnodes, mv_link) { 777 if (vd->mv_vnode->v_mount == mp) { 778 vhold(vd->mv_vnode); 779 sx_xunlock(&mqfs->mi_lock); 780 781 vgone(newvpp); 782 vput(newvpp); 783 goto found; 784 } 785 } 786 787 *vpp = newvpp; 788 789 vd = uma_zalloc(mvdata_zone, M_WAITOK); 790 (*vpp)->v_data = vd; 791 vd->mv_vnode = *vpp; 792 vd->mv_node = pn; 793 TASK_INIT(&vd->mv_task, 0, do_recycle, *vpp); 794 LIST_INSERT_HEAD(&pn->mn_vnodes, vd, mv_link); 795 mqnode_addref(pn); 796 switch (pn->mn_type) { 797 case mqfstype_root: 798 (*vpp)->v_vflag = VV_ROOT; 799 /* fall through */ 800 case mqfstype_dir: 801 case mqfstype_this: 802 case mqfstype_parent: 803 (*vpp)->v_type = VDIR; 804 break; 805 case mqfstype_file: 806 (*vpp)->v_type = VREG; 807 break; 808 case mqfstype_symlink: 809 (*vpp)->v_type = VLNK; 810 break; 811 case mqfstype_none: 812 KASSERT(0, ("mqfs_allocf called for null node\n")); 813 default: 814 panic("%s has unexpected type: %d", pn->mn_name, pn->mn_type); 815 } 816 sx_xunlock(&mqfs->mi_lock); 817 return (0); 818 } 819 820 /* 821 * Search a directory entry 822 */ 823 static struct mqfs_node * 824 mqfs_search(struct mqfs_node *pd, const char *name, int len, struct ucred *cred) 825 { 826 struct mqfs_node *pn; 827 const void *pr_root; 828 829 sx_assert(&pd->mn_info->mi_lock, SX_LOCKED); 830 pr_root = cred->cr_prison->pr_root; 831 LIST_FOREACH(pn, &pd->mn_children, mn_sibling) { 832 /* Only match names within the same prison root directory */ 833 if ((pn->mn_pr_root == NULL || pn->mn_pr_root == pr_root) && 834 strncmp(pn->mn_name, name, len) == 0 && 835 pn->mn_name[len] == '\0') 836 return (pn); 837 } 838 return (NULL); 839 } 840 841 /* 842 * Look up a file or directory. 843 */ 844 static int 845 mqfs_lookupx(struct vop_cachedlookup_args *ap) 846 { 847 struct componentname *cnp; 848 struct vnode *dvp, **vpp; 849 struct mqfs_node *pd; 850 struct mqfs_node *pn; 851 struct mqfs_info *mqfs; 852 int nameiop, flags, error, namelen; 853 char *pname; 854 struct thread *td; 855 856 cnp = ap->a_cnp; 857 vpp = ap->a_vpp; 858 dvp = ap->a_dvp; 859 pname = cnp->cn_nameptr; 860 namelen = cnp->cn_namelen; 861 td = cnp->cn_thread; 862 flags = cnp->cn_flags; 863 nameiop = cnp->cn_nameiop; 864 pd = VTON(dvp); 865 pn = NULL; 866 mqfs = pd->mn_info; 867 *vpp = NULLVP; 868 869 if (dvp->v_type != VDIR) 870 return (ENOTDIR); 871 872 error = VOP_ACCESS(dvp, VEXEC, cnp->cn_cred, cnp->cn_thread); 873 if (error) 874 return (error); 875 876 /* shortcut: check if the name is too long */ 877 if (cnp->cn_namelen >= MQFS_NAMELEN) 878 return (ENOENT); 879 880 /* self */ 881 if (namelen == 1 && pname[0] == '.') { 882 if ((flags & ISLASTCN) && nameiop != LOOKUP) 883 return (EINVAL); 884 pn = pd; 885 *vpp = dvp; 886 VREF(dvp); 887 return (0); 888 } 889 890 /* parent */ 891 if (cnp->cn_flags & ISDOTDOT) { 892 if (dvp->v_vflag & VV_ROOT) 893 return (EIO); 894 if ((flags & ISLASTCN) && nameiop != LOOKUP) 895 return (EINVAL); 896 VOP_UNLOCK(dvp); 897 KASSERT(pd->mn_parent, ("non-root directory has no parent")); 898 pn = pd->mn_parent; 899 error = mqfs_allocv(dvp->v_mount, vpp, pn); 900 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY); 901 return (error); 902 } 903 904 /* named node */ 905 sx_xlock(&mqfs->mi_lock); 906 pn = mqfs_search(pd, pname, namelen, cnp->cn_cred); 907 if (pn != NULL) 908 mqnode_addref(pn); 909 sx_xunlock(&mqfs->mi_lock); 910 911 /* found */ 912 if (pn != NULL) { 913 /* DELETE */ 914 if (nameiop == DELETE && (flags & ISLASTCN)) { 915 error = VOP_ACCESS(dvp, VWRITE, cnp->cn_cred, td); 916 if (error) { 917 mqnode_release(pn); 918 return (error); 919 } 920 if (*vpp == dvp) { 921 VREF(dvp); 922 *vpp = dvp; 923 mqnode_release(pn); 924 return (0); 925 } 926 } 927 928 /* allocate vnode */ 929 error = mqfs_allocv(dvp->v_mount, vpp, pn); 930 mqnode_release(pn); 931 if (error == 0 && cnp->cn_flags & MAKEENTRY) 932 cache_enter(dvp, *vpp, cnp); 933 return (error); 934 } 935 936 /* not found */ 937 938 /* will create a new entry in the directory ? */ 939 if ((nameiop == CREATE || nameiop == RENAME) && (flags & LOCKPARENT) 940 && (flags & ISLASTCN)) { 941 error = VOP_ACCESS(dvp, VWRITE, cnp->cn_cred, td); 942 if (error) 943 return (error); 944 cnp->cn_flags |= SAVENAME; 945 return (EJUSTRETURN); 946 } 947 return (ENOENT); 948 } 949 950 #if 0 951 struct vop_lookup_args { 952 struct vop_generic_args a_gen; 953 struct vnode *a_dvp; 954 struct vnode **a_vpp; 955 struct componentname *a_cnp; 956 }; 957 #endif 958 959 /* 960 * vnode lookup operation 961 */ 962 static int 963 mqfs_lookup(struct vop_cachedlookup_args *ap) 964 { 965 int rc; 966 967 rc = mqfs_lookupx(ap); 968 return (rc); 969 } 970 971 #if 0 972 struct vop_create_args { 973 struct vnode *a_dvp; 974 struct vnode **a_vpp; 975 struct componentname *a_cnp; 976 struct vattr *a_vap; 977 }; 978 #endif 979 980 /* 981 * vnode creation operation 982 */ 983 static int 984 mqfs_create(struct vop_create_args *ap) 985 { 986 struct mqfs_info *mqfs = VFSTOMQFS(ap->a_dvp->v_mount); 987 struct componentname *cnp = ap->a_cnp; 988 struct mqfs_node *pd; 989 struct mqfs_node *pn; 990 struct mqueue *mq; 991 int error; 992 993 pd = VTON(ap->a_dvp); 994 if (pd->mn_type != mqfstype_root && pd->mn_type != mqfstype_dir) 995 return (ENOTDIR); 996 mq = mqueue_alloc(NULL); 997 if (mq == NULL) 998 return (EAGAIN); 999 sx_xlock(&mqfs->mi_lock); 1000 if ((cnp->cn_flags & HASBUF) == 0) 1001 panic("%s: no name", __func__); 1002 pn = mqfs_create_file(pd, cnp->cn_nameptr, cnp->cn_namelen, 1003 cnp->cn_cred, ap->a_vap->va_mode); 1004 if (pn == NULL) { 1005 sx_xunlock(&mqfs->mi_lock); 1006 error = ENOSPC; 1007 } else { 1008 mqnode_addref(pn); 1009 sx_xunlock(&mqfs->mi_lock); 1010 error = mqfs_allocv(ap->a_dvp->v_mount, ap->a_vpp, pn); 1011 mqnode_release(pn); 1012 if (error) 1013 mqfs_destroy(pn); 1014 else 1015 pn->mn_data = mq; 1016 } 1017 if (error) 1018 mqueue_free(mq); 1019 return (error); 1020 } 1021 1022 /* 1023 * Remove an entry 1024 */ 1025 static 1026 int do_unlink(struct mqfs_node *pn, struct ucred *ucred) 1027 { 1028 struct mqfs_node *parent; 1029 struct mqfs_vdata *vd; 1030 int error = 0; 1031 1032 sx_assert(&pn->mn_info->mi_lock, SX_LOCKED); 1033 1034 if (ucred->cr_uid != pn->mn_uid && 1035 (error = priv_check_cred(ucred, PRIV_MQ_ADMIN)) != 0) 1036 error = EACCES; 1037 else if (!pn->mn_deleted) { 1038 parent = pn->mn_parent; 1039 pn->mn_parent = NULL; 1040 pn->mn_deleted = 1; 1041 LIST_REMOVE(pn, mn_sibling); 1042 LIST_FOREACH(vd, &pn->mn_vnodes, mv_link) { 1043 cache_purge(vd->mv_vnode); 1044 vhold(vd->mv_vnode); 1045 taskqueue_enqueue(taskqueue_thread, &vd->mv_task); 1046 } 1047 mqnode_release(pn); 1048 mqnode_release(parent); 1049 } else 1050 error = ENOENT; 1051 return (error); 1052 } 1053 1054 #if 0 1055 struct vop_remove_args { 1056 struct vnode *a_dvp; 1057 struct vnode *a_vp; 1058 struct componentname *a_cnp; 1059 }; 1060 #endif 1061 1062 /* 1063 * vnode removal operation 1064 */ 1065 static int 1066 mqfs_remove(struct vop_remove_args *ap) 1067 { 1068 struct mqfs_info *mqfs = VFSTOMQFS(ap->a_dvp->v_mount); 1069 struct mqfs_node *pn; 1070 int error; 1071 1072 if (ap->a_vp->v_type == VDIR) 1073 return (EPERM); 1074 pn = VTON(ap->a_vp); 1075 sx_xlock(&mqfs->mi_lock); 1076 error = do_unlink(pn, ap->a_cnp->cn_cred); 1077 sx_xunlock(&mqfs->mi_lock); 1078 return (error); 1079 } 1080 1081 #if 0 1082 struct vop_inactive_args { 1083 struct vnode *a_vp; 1084 struct thread *a_td; 1085 }; 1086 #endif 1087 1088 static int 1089 mqfs_inactive(struct vop_inactive_args *ap) 1090 { 1091 struct mqfs_node *pn = VTON(ap->a_vp); 1092 1093 if (pn->mn_deleted) 1094 vrecycle(ap->a_vp); 1095 return (0); 1096 } 1097 1098 #if 0 1099 struct vop_reclaim_args { 1100 struct vop_generic_args a_gen; 1101 struct vnode *a_vp; 1102 }; 1103 #endif 1104 1105 static int 1106 mqfs_reclaim(struct vop_reclaim_args *ap) 1107 { 1108 struct mqfs_info *mqfs = VFSTOMQFS(ap->a_vp->v_mount); 1109 struct vnode *vp = ap->a_vp; 1110 struct mqfs_node *pn; 1111 struct mqfs_vdata *vd; 1112 1113 vd = vp->v_data; 1114 pn = vd->mv_node; 1115 sx_xlock(&mqfs->mi_lock); 1116 vp->v_data = NULL; 1117 LIST_REMOVE(vd, mv_link); 1118 uma_zfree(mvdata_zone, vd); 1119 mqnode_release(pn); 1120 sx_xunlock(&mqfs->mi_lock); 1121 return (0); 1122 } 1123 1124 #if 0 1125 struct vop_open_args { 1126 struct vop_generic_args a_gen; 1127 struct vnode *a_vp; 1128 int a_mode; 1129 struct ucred *a_cred; 1130 struct thread *a_td; 1131 struct file *a_fp; 1132 }; 1133 #endif 1134 1135 static int 1136 mqfs_open(struct vop_open_args *ap) 1137 { 1138 return (0); 1139 } 1140 1141 #if 0 1142 struct vop_close_args { 1143 struct vop_generic_args a_gen; 1144 struct vnode *a_vp; 1145 int a_fflag; 1146 struct ucred *a_cred; 1147 struct thread *a_td; 1148 }; 1149 #endif 1150 1151 static int 1152 mqfs_close(struct vop_close_args *ap) 1153 { 1154 return (0); 1155 } 1156 1157 #if 0 1158 struct vop_access_args { 1159 struct vop_generic_args a_gen; 1160 struct vnode *a_vp; 1161 accmode_t a_accmode; 1162 struct ucred *a_cred; 1163 struct thread *a_td; 1164 }; 1165 #endif 1166 1167 /* 1168 * Verify permissions 1169 */ 1170 static int 1171 mqfs_access(struct vop_access_args *ap) 1172 { 1173 struct vnode *vp = ap->a_vp; 1174 struct vattr vattr; 1175 int error; 1176 1177 error = VOP_GETATTR(vp, &vattr, ap->a_cred); 1178 if (error) 1179 return (error); 1180 error = vaccess(vp->v_type, vattr.va_mode, vattr.va_uid, vattr.va_gid, 1181 ap->a_accmode, ap->a_cred); 1182 return (error); 1183 } 1184 1185 #if 0 1186 struct vop_getattr_args { 1187 struct vop_generic_args a_gen; 1188 struct vnode *a_vp; 1189 struct vattr *a_vap; 1190 struct ucred *a_cred; 1191 }; 1192 #endif 1193 1194 /* 1195 * Get file attributes 1196 */ 1197 static int 1198 mqfs_getattr(struct vop_getattr_args *ap) 1199 { 1200 struct vnode *vp = ap->a_vp; 1201 struct mqfs_node *pn = VTON(vp); 1202 struct vattr *vap = ap->a_vap; 1203 int error = 0; 1204 1205 vap->va_type = vp->v_type; 1206 vap->va_mode = pn->mn_mode; 1207 vap->va_nlink = 1; 1208 vap->va_uid = pn->mn_uid; 1209 vap->va_gid = pn->mn_gid; 1210 vap->va_fsid = vp->v_mount->mnt_stat.f_fsid.val[0]; 1211 vap->va_fileid = pn->mn_fileno; 1212 vap->va_size = 0; 1213 vap->va_blocksize = PAGE_SIZE; 1214 vap->va_bytes = vap->va_size = 0; 1215 vap->va_atime = pn->mn_atime; 1216 vap->va_mtime = pn->mn_mtime; 1217 vap->va_ctime = pn->mn_ctime; 1218 vap->va_birthtime = pn->mn_birth; 1219 vap->va_gen = 0; 1220 vap->va_flags = 0; 1221 vap->va_rdev = NODEV; 1222 vap->va_bytes = 0; 1223 vap->va_filerev = 0; 1224 return (error); 1225 } 1226 1227 #if 0 1228 struct vop_setattr_args { 1229 struct vop_generic_args a_gen; 1230 struct vnode *a_vp; 1231 struct vattr *a_vap; 1232 struct ucred *a_cred; 1233 }; 1234 #endif 1235 /* 1236 * Set attributes 1237 */ 1238 static int 1239 mqfs_setattr(struct vop_setattr_args *ap) 1240 { 1241 struct mqfs_node *pn; 1242 struct vattr *vap; 1243 struct vnode *vp; 1244 struct thread *td; 1245 int c, error; 1246 uid_t uid; 1247 gid_t gid; 1248 1249 td = curthread; 1250 vap = ap->a_vap; 1251 vp = ap->a_vp; 1252 if ((vap->va_type != VNON) || 1253 (vap->va_nlink != VNOVAL) || 1254 (vap->va_fsid != VNOVAL) || 1255 (vap->va_fileid != VNOVAL) || 1256 (vap->va_blocksize != VNOVAL) || 1257 (vap->va_flags != VNOVAL && vap->va_flags != 0) || 1258 (vap->va_rdev != VNOVAL) || 1259 ((int)vap->va_bytes != VNOVAL) || 1260 (vap->va_gen != VNOVAL)) { 1261 return (EINVAL); 1262 } 1263 1264 pn = VTON(vp); 1265 1266 error = c = 0; 1267 if (vap->va_uid == (uid_t)VNOVAL) 1268 uid = pn->mn_uid; 1269 else 1270 uid = vap->va_uid; 1271 if (vap->va_gid == (gid_t)VNOVAL) 1272 gid = pn->mn_gid; 1273 else 1274 gid = vap->va_gid; 1275 1276 if (uid != pn->mn_uid || gid != pn->mn_gid) { 1277 /* 1278 * To modify the ownership of a file, must possess VADMIN 1279 * for that file. 1280 */ 1281 if ((error = VOP_ACCESS(vp, VADMIN, ap->a_cred, td))) 1282 return (error); 1283 1284 /* 1285 * XXXRW: Why is there a privilege check here: shouldn't the 1286 * check in VOP_ACCESS() be enough? Also, are the group bits 1287 * below definitely right? 1288 */ 1289 if (((ap->a_cred->cr_uid != pn->mn_uid) || uid != pn->mn_uid || 1290 (gid != pn->mn_gid && !groupmember(gid, ap->a_cred))) && 1291 (error = priv_check(td, PRIV_MQ_ADMIN)) != 0) 1292 return (error); 1293 pn->mn_uid = uid; 1294 pn->mn_gid = gid; 1295 c = 1; 1296 } 1297 1298 if (vap->va_mode != (mode_t)VNOVAL) { 1299 if ((ap->a_cred->cr_uid != pn->mn_uid) && 1300 (error = priv_check(td, PRIV_MQ_ADMIN))) 1301 return (error); 1302 pn->mn_mode = vap->va_mode; 1303 c = 1; 1304 } 1305 1306 if (vap->va_atime.tv_sec != VNOVAL || vap->va_mtime.tv_sec != VNOVAL) { 1307 /* See the comment in ufs_vnops::ufs_setattr(). */ 1308 if ((error = VOP_ACCESS(vp, VADMIN, ap->a_cred, td)) && 1309 ((vap->va_vaflags & VA_UTIMES_NULL) == 0 || 1310 (error = VOP_ACCESS(vp, VWRITE, ap->a_cred, td)))) 1311 return (error); 1312 if (vap->va_atime.tv_sec != VNOVAL) { 1313 pn->mn_atime = vap->va_atime; 1314 } 1315 if (vap->va_mtime.tv_sec != VNOVAL) { 1316 pn->mn_mtime = vap->va_mtime; 1317 } 1318 c = 1; 1319 } 1320 if (c) { 1321 vfs_timestamp(&pn->mn_ctime); 1322 } 1323 return (0); 1324 } 1325 1326 #if 0 1327 struct vop_read_args { 1328 struct vop_generic_args a_gen; 1329 struct vnode *a_vp; 1330 struct uio *a_uio; 1331 int a_ioflag; 1332 struct ucred *a_cred; 1333 }; 1334 #endif 1335 1336 /* 1337 * Read from a file 1338 */ 1339 static int 1340 mqfs_read(struct vop_read_args *ap) 1341 { 1342 char buf[80]; 1343 struct vnode *vp = ap->a_vp; 1344 struct uio *uio = ap->a_uio; 1345 struct mqueue *mq; 1346 int len, error; 1347 1348 if (vp->v_type != VREG) 1349 return (EINVAL); 1350 1351 mq = VTOMQ(vp); 1352 snprintf(buf, sizeof(buf), 1353 "QSIZE:%-10ld MAXMSG:%-10ld CURMSG:%-10ld MSGSIZE:%-10ld\n", 1354 mq->mq_totalbytes, 1355 mq->mq_maxmsg, 1356 mq->mq_curmsgs, 1357 mq->mq_msgsize); 1358 buf[sizeof(buf)-1] = '\0'; 1359 len = strlen(buf); 1360 error = uiomove_frombuf(buf, len, uio); 1361 return (error); 1362 } 1363 1364 #if 0 1365 struct vop_readdir_args { 1366 struct vop_generic_args a_gen; 1367 struct vnode *a_vp; 1368 struct uio *a_uio; 1369 struct ucred *a_cred; 1370 int *a_eofflag; 1371 int *a_ncookies; 1372 u_long **a_cookies; 1373 }; 1374 #endif 1375 1376 /* 1377 * Return directory entries. 1378 */ 1379 static int 1380 mqfs_readdir(struct vop_readdir_args *ap) 1381 { 1382 struct vnode *vp; 1383 struct mqfs_info *mi; 1384 struct mqfs_node *pd; 1385 struct mqfs_node *pn; 1386 struct dirent entry; 1387 struct uio *uio; 1388 const void *pr_root; 1389 int *tmp_ncookies = NULL; 1390 off_t offset; 1391 int error, i; 1392 1393 vp = ap->a_vp; 1394 mi = VFSTOMQFS(vp->v_mount); 1395 pd = VTON(vp); 1396 uio = ap->a_uio; 1397 1398 if (vp->v_type != VDIR) 1399 return (ENOTDIR); 1400 1401 if (uio->uio_offset < 0) 1402 return (EINVAL); 1403 1404 if (ap->a_ncookies != NULL) { 1405 tmp_ncookies = ap->a_ncookies; 1406 *ap->a_ncookies = 0; 1407 ap->a_ncookies = NULL; 1408 } 1409 1410 error = 0; 1411 offset = 0; 1412 1413 pr_root = ap->a_cred->cr_prison->pr_root; 1414 sx_xlock(&mi->mi_lock); 1415 1416 LIST_FOREACH(pn, &pd->mn_children, mn_sibling) { 1417 entry.d_reclen = sizeof(entry); 1418 1419 /* 1420 * Only show names within the same prison root directory 1421 * (or not associated with a prison, e.g. "." and ".."). 1422 */ 1423 if (pn->mn_pr_root != NULL && pn->mn_pr_root != pr_root) 1424 continue; 1425 if (!pn->mn_fileno) 1426 mqfs_fileno_alloc(mi, pn); 1427 entry.d_fileno = pn->mn_fileno; 1428 for (i = 0; i < MQFS_NAMELEN - 1 && pn->mn_name[i] != '\0'; ++i) 1429 entry.d_name[i] = pn->mn_name[i]; 1430 entry.d_namlen = i; 1431 switch (pn->mn_type) { 1432 case mqfstype_root: 1433 case mqfstype_dir: 1434 case mqfstype_this: 1435 case mqfstype_parent: 1436 entry.d_type = DT_DIR; 1437 break; 1438 case mqfstype_file: 1439 entry.d_type = DT_REG; 1440 break; 1441 case mqfstype_symlink: 1442 entry.d_type = DT_LNK; 1443 break; 1444 default: 1445 panic("%s has unexpected node type: %d", pn->mn_name, 1446 pn->mn_type); 1447 } 1448 dirent_terminate(&entry); 1449 if (entry.d_reclen > uio->uio_resid) 1450 break; 1451 if (offset >= uio->uio_offset) { 1452 error = vfs_read_dirent(ap, &entry, offset); 1453 if (error) 1454 break; 1455 } 1456 offset += entry.d_reclen; 1457 } 1458 sx_xunlock(&mi->mi_lock); 1459 1460 uio->uio_offset = offset; 1461 1462 if (tmp_ncookies != NULL) 1463 ap->a_ncookies = tmp_ncookies; 1464 1465 return (error); 1466 } 1467 1468 #ifdef notyet 1469 1470 #if 0 1471 struct vop_mkdir_args { 1472 struct vnode *a_dvp; 1473 struvt vnode **a_vpp; 1474 struvt componentname *a_cnp; 1475 struct vattr *a_vap; 1476 }; 1477 #endif 1478 1479 /* 1480 * Create a directory. 1481 */ 1482 static int 1483 mqfs_mkdir(struct vop_mkdir_args *ap) 1484 { 1485 struct mqfs_info *mqfs = VFSTOMQFS(ap->a_dvp->v_mount); 1486 struct componentname *cnp = ap->a_cnp; 1487 struct mqfs_node *pd = VTON(ap->a_dvp); 1488 struct mqfs_node *pn; 1489 int error; 1490 1491 if (pd->mn_type != mqfstype_root && pd->mn_type != mqfstype_dir) 1492 return (ENOTDIR); 1493 sx_xlock(&mqfs->mi_lock); 1494 if ((cnp->cn_flags & HASBUF) == 0) 1495 panic("%s: no name", __func__); 1496 pn = mqfs_create_dir(pd, cnp->cn_nameptr, cnp->cn_namelen, 1497 ap->a_vap->cn_cred, ap->a_vap->va_mode); 1498 if (pn != NULL) 1499 mqnode_addref(pn); 1500 sx_xunlock(&mqfs->mi_lock); 1501 if (pn == NULL) { 1502 error = ENOSPC; 1503 } else { 1504 error = mqfs_allocv(ap->a_dvp->v_mount, ap->a_vpp, pn); 1505 mqnode_release(pn); 1506 } 1507 return (error); 1508 } 1509 1510 #if 0 1511 struct vop_rmdir_args { 1512 struct vnode *a_dvp; 1513 struct vnode *a_vp; 1514 struct componentname *a_cnp; 1515 }; 1516 #endif 1517 1518 /* 1519 * Remove a directory. 1520 */ 1521 static int 1522 mqfs_rmdir(struct vop_rmdir_args *ap) 1523 { 1524 struct mqfs_info *mqfs = VFSTOMQFS(ap->a_dvp->v_mount); 1525 struct mqfs_node *pn = VTON(ap->a_vp); 1526 struct mqfs_node *pt; 1527 1528 if (pn->mn_type != mqfstype_dir) 1529 return (ENOTDIR); 1530 1531 sx_xlock(&mqfs->mi_lock); 1532 if (pn->mn_deleted) { 1533 sx_xunlock(&mqfs->mi_lock); 1534 return (ENOENT); 1535 } 1536 1537 pt = LIST_FIRST(&pn->mn_children); 1538 pt = LIST_NEXT(pt, mn_sibling); 1539 pt = LIST_NEXT(pt, mn_sibling); 1540 if (pt != NULL) { 1541 sx_xunlock(&mqfs->mi_lock); 1542 return (ENOTEMPTY); 1543 } 1544 pt = pn->mn_parent; 1545 pn->mn_parent = NULL; 1546 pn->mn_deleted = 1; 1547 LIST_REMOVE(pn, mn_sibling); 1548 mqnode_release(pn); 1549 mqnode_release(pt); 1550 sx_xunlock(&mqfs->mi_lock); 1551 cache_purge(ap->a_vp); 1552 return (0); 1553 } 1554 1555 #endif /* notyet */ 1556 1557 /* 1558 * See if this prison root is obsolete, and clean up associated queues if it is. 1559 */ 1560 static int 1561 mqfs_prison_remove(void *obj, void *data __unused) 1562 { 1563 const struct prison *pr = obj; 1564 const struct prison *tpr; 1565 struct mqfs_node *pn, *tpn; 1566 int found; 1567 1568 found = 0; 1569 TAILQ_FOREACH(tpr, &allprison, pr_list) { 1570 if (tpr->pr_root == pr->pr_root && tpr != pr && tpr->pr_ref > 0) 1571 found = 1; 1572 } 1573 if (!found) { 1574 /* 1575 * No jails are rooted in this directory anymore, 1576 * so no queues should be either. 1577 */ 1578 sx_xlock(&mqfs_data.mi_lock); 1579 LIST_FOREACH_SAFE(pn, &mqfs_data.mi_root->mn_children, 1580 mn_sibling, tpn) { 1581 if (pn->mn_pr_root == pr->pr_root) 1582 (void)do_unlink(pn, curthread->td_ucred); 1583 } 1584 sx_xunlock(&mqfs_data.mi_lock); 1585 } 1586 return (0); 1587 } 1588 1589 /* 1590 * Allocate a message queue 1591 */ 1592 static struct mqueue * 1593 mqueue_alloc(const struct mq_attr *attr) 1594 { 1595 struct mqueue *mq; 1596 1597 if (curmq >= maxmq) 1598 return (NULL); 1599 mq = uma_zalloc(mqueue_zone, M_WAITOK | M_ZERO); 1600 TAILQ_INIT(&mq->mq_msgq); 1601 if (attr != NULL) { 1602 mq->mq_maxmsg = attr->mq_maxmsg; 1603 mq->mq_msgsize = attr->mq_msgsize; 1604 } else { 1605 mq->mq_maxmsg = default_maxmsg; 1606 mq->mq_msgsize = default_msgsize; 1607 } 1608 mtx_init(&mq->mq_mutex, "mqueue lock", NULL, MTX_DEF); 1609 knlist_init_mtx(&mq->mq_rsel.si_note, &mq->mq_mutex); 1610 knlist_init_mtx(&mq->mq_wsel.si_note, &mq->mq_mutex); 1611 atomic_add_int(&curmq, 1); 1612 return (mq); 1613 } 1614 1615 /* 1616 * Destroy a message queue 1617 */ 1618 static void 1619 mqueue_free(struct mqueue *mq) 1620 { 1621 struct mqueue_msg *msg; 1622 1623 while ((msg = TAILQ_FIRST(&mq->mq_msgq)) != NULL) { 1624 TAILQ_REMOVE(&mq->mq_msgq, msg, msg_link); 1625 free(msg, M_MQUEUEDATA); 1626 } 1627 1628 mtx_destroy(&mq->mq_mutex); 1629 seldrain(&mq->mq_rsel); 1630 seldrain(&mq->mq_wsel); 1631 knlist_destroy(&mq->mq_rsel.si_note); 1632 knlist_destroy(&mq->mq_wsel.si_note); 1633 uma_zfree(mqueue_zone, mq); 1634 atomic_add_int(&curmq, -1); 1635 } 1636 1637 /* 1638 * Load a message from user space 1639 */ 1640 static struct mqueue_msg * 1641 mqueue_loadmsg(const char *msg_ptr, size_t msg_size, int msg_prio) 1642 { 1643 struct mqueue_msg *msg; 1644 size_t len; 1645 int error; 1646 1647 len = sizeof(struct mqueue_msg) + msg_size; 1648 msg = malloc(len, M_MQUEUEDATA, M_WAITOK); 1649 error = copyin(msg_ptr, ((char *)msg) + sizeof(struct mqueue_msg), 1650 msg_size); 1651 if (error) { 1652 free(msg, M_MQUEUEDATA); 1653 msg = NULL; 1654 } else { 1655 msg->msg_size = msg_size; 1656 msg->msg_prio = msg_prio; 1657 } 1658 return (msg); 1659 } 1660 1661 /* 1662 * Save a message to user space 1663 */ 1664 static int 1665 mqueue_savemsg(struct mqueue_msg *msg, char *msg_ptr, int *msg_prio) 1666 { 1667 int error; 1668 1669 error = copyout(((char *)msg) + sizeof(*msg), msg_ptr, 1670 msg->msg_size); 1671 if (error == 0 && msg_prio != NULL) 1672 error = copyout(&msg->msg_prio, msg_prio, sizeof(int)); 1673 return (error); 1674 } 1675 1676 /* 1677 * Free a message's memory 1678 */ 1679 static __inline void 1680 mqueue_freemsg(struct mqueue_msg *msg) 1681 { 1682 free(msg, M_MQUEUEDATA); 1683 } 1684 1685 /* 1686 * Send a message. if waitok is false, thread will not be 1687 * blocked if there is no data in queue, otherwise, absolute 1688 * time will be checked. 1689 */ 1690 int 1691 mqueue_send(struct mqueue *mq, const char *msg_ptr, 1692 size_t msg_len, unsigned msg_prio, int waitok, 1693 const struct timespec *abs_timeout) 1694 { 1695 struct mqueue_msg *msg; 1696 struct timespec ts, ts2; 1697 struct timeval tv; 1698 int error; 1699 1700 if (msg_prio >= MQ_PRIO_MAX) 1701 return (EINVAL); 1702 if (msg_len > mq->mq_msgsize) 1703 return (EMSGSIZE); 1704 msg = mqueue_loadmsg(msg_ptr, msg_len, msg_prio); 1705 if (msg == NULL) 1706 return (EFAULT); 1707 1708 /* O_NONBLOCK case */ 1709 if (!waitok) { 1710 error = _mqueue_send(mq, msg, -1); 1711 if (error) 1712 goto bad; 1713 return (0); 1714 } 1715 1716 /* we allow a null timeout (wait forever) */ 1717 if (abs_timeout == NULL) { 1718 error = _mqueue_send(mq, msg, 0); 1719 if (error) 1720 goto bad; 1721 return (0); 1722 } 1723 1724 /* send it before checking time */ 1725 error = _mqueue_send(mq, msg, -1); 1726 if (error == 0) 1727 return (0); 1728 1729 if (error != EAGAIN) 1730 goto bad; 1731 1732 if (abs_timeout->tv_nsec >= 1000000000 || abs_timeout->tv_nsec < 0) { 1733 error = EINVAL; 1734 goto bad; 1735 } 1736 for (;;) { 1737 getnanotime(&ts); 1738 timespecsub(abs_timeout, &ts, &ts2); 1739 if (ts2.tv_sec < 0 || (ts2.tv_sec == 0 && ts2.tv_nsec <= 0)) { 1740 error = ETIMEDOUT; 1741 break; 1742 } 1743 TIMESPEC_TO_TIMEVAL(&tv, &ts2); 1744 error = _mqueue_send(mq, msg, tvtohz(&tv)); 1745 if (error != ETIMEDOUT) 1746 break; 1747 } 1748 if (error == 0) 1749 return (0); 1750 bad: 1751 mqueue_freemsg(msg); 1752 return (error); 1753 } 1754 1755 /* 1756 * Common routine to send a message 1757 */ 1758 static int 1759 _mqueue_send(struct mqueue *mq, struct mqueue_msg *msg, int timo) 1760 { 1761 struct mqueue_msg *msg2; 1762 int error = 0; 1763 1764 mtx_lock(&mq->mq_mutex); 1765 while (mq->mq_curmsgs >= mq->mq_maxmsg && error == 0) { 1766 if (timo < 0) { 1767 mtx_unlock(&mq->mq_mutex); 1768 return (EAGAIN); 1769 } 1770 mq->mq_senders++; 1771 error = msleep(&mq->mq_senders, &mq->mq_mutex, 1772 PCATCH, "mqsend", timo); 1773 mq->mq_senders--; 1774 if (error == EAGAIN) 1775 error = ETIMEDOUT; 1776 } 1777 if (mq->mq_curmsgs >= mq->mq_maxmsg) { 1778 mtx_unlock(&mq->mq_mutex); 1779 return (error); 1780 } 1781 error = 0; 1782 if (TAILQ_EMPTY(&mq->mq_msgq)) { 1783 TAILQ_INSERT_HEAD(&mq->mq_msgq, msg, msg_link); 1784 } else { 1785 if (msg->msg_prio <= TAILQ_LAST(&mq->mq_msgq, msgq)->msg_prio) { 1786 TAILQ_INSERT_TAIL(&mq->mq_msgq, msg, msg_link); 1787 } else { 1788 TAILQ_FOREACH(msg2, &mq->mq_msgq, msg_link) { 1789 if (msg2->msg_prio < msg->msg_prio) 1790 break; 1791 } 1792 TAILQ_INSERT_BEFORE(msg2, msg, msg_link); 1793 } 1794 } 1795 mq->mq_curmsgs++; 1796 mq->mq_totalbytes += msg->msg_size; 1797 if (mq->mq_receivers) 1798 wakeup_one(&mq->mq_receivers); 1799 else if (mq->mq_notifier != NULL) 1800 mqueue_send_notification(mq); 1801 if (mq->mq_flags & MQ_RSEL) { 1802 mq->mq_flags &= ~MQ_RSEL; 1803 selwakeup(&mq->mq_rsel); 1804 } 1805 KNOTE_LOCKED(&mq->mq_rsel.si_note, 0); 1806 mtx_unlock(&mq->mq_mutex); 1807 return (0); 1808 } 1809 1810 /* 1811 * Send realtime a signal to process which registered itself 1812 * successfully by mq_notify. 1813 */ 1814 static void 1815 mqueue_send_notification(struct mqueue *mq) 1816 { 1817 struct mqueue_notifier *nt; 1818 struct thread *td; 1819 struct proc *p; 1820 int error; 1821 1822 mtx_assert(&mq->mq_mutex, MA_OWNED); 1823 nt = mq->mq_notifier; 1824 if (nt->nt_sigev.sigev_notify != SIGEV_NONE) { 1825 p = nt->nt_proc; 1826 error = sigev_findtd(p, &nt->nt_sigev, &td); 1827 if (error) { 1828 mq->mq_notifier = NULL; 1829 return; 1830 } 1831 if (!KSI_ONQ(&nt->nt_ksi)) { 1832 ksiginfo_set_sigev(&nt->nt_ksi, &nt->nt_sigev); 1833 tdsendsignal(p, td, nt->nt_ksi.ksi_signo, &nt->nt_ksi); 1834 } 1835 PROC_UNLOCK(p); 1836 } 1837 mq->mq_notifier = NULL; 1838 } 1839 1840 /* 1841 * Get a message. if waitok is false, thread will not be 1842 * blocked if there is no data in queue, otherwise, absolute 1843 * time will be checked. 1844 */ 1845 int 1846 mqueue_receive(struct mqueue *mq, char *msg_ptr, 1847 size_t msg_len, unsigned *msg_prio, int waitok, 1848 const struct timespec *abs_timeout) 1849 { 1850 struct mqueue_msg *msg; 1851 struct timespec ts, ts2; 1852 struct timeval tv; 1853 int error; 1854 1855 if (msg_len < mq->mq_msgsize) 1856 return (EMSGSIZE); 1857 1858 /* O_NONBLOCK case */ 1859 if (!waitok) { 1860 error = _mqueue_recv(mq, &msg, -1); 1861 if (error) 1862 return (error); 1863 goto received; 1864 } 1865 1866 /* we allow a null timeout (wait forever). */ 1867 if (abs_timeout == NULL) { 1868 error = _mqueue_recv(mq, &msg, 0); 1869 if (error) 1870 return (error); 1871 goto received; 1872 } 1873 1874 /* try to get a message before checking time */ 1875 error = _mqueue_recv(mq, &msg, -1); 1876 if (error == 0) 1877 goto received; 1878 1879 if (error != EAGAIN) 1880 return (error); 1881 1882 if (abs_timeout->tv_nsec >= 1000000000 || abs_timeout->tv_nsec < 0) { 1883 error = EINVAL; 1884 return (error); 1885 } 1886 1887 for (;;) { 1888 getnanotime(&ts); 1889 timespecsub(abs_timeout, &ts, &ts2); 1890 if (ts2.tv_sec < 0 || (ts2.tv_sec == 0 && ts2.tv_nsec <= 0)) { 1891 error = ETIMEDOUT; 1892 return (error); 1893 } 1894 TIMESPEC_TO_TIMEVAL(&tv, &ts2); 1895 error = _mqueue_recv(mq, &msg, tvtohz(&tv)); 1896 if (error == 0) 1897 break; 1898 if (error != ETIMEDOUT) 1899 return (error); 1900 } 1901 1902 received: 1903 error = mqueue_savemsg(msg, msg_ptr, msg_prio); 1904 if (error == 0) { 1905 curthread->td_retval[0] = msg->msg_size; 1906 curthread->td_retval[1] = 0; 1907 } 1908 mqueue_freemsg(msg); 1909 return (error); 1910 } 1911 1912 /* 1913 * Common routine to receive a message 1914 */ 1915 static int 1916 _mqueue_recv(struct mqueue *mq, struct mqueue_msg **msg, int timo) 1917 { 1918 int error = 0; 1919 1920 mtx_lock(&mq->mq_mutex); 1921 while ((*msg = TAILQ_FIRST(&mq->mq_msgq)) == NULL && error == 0) { 1922 if (timo < 0) { 1923 mtx_unlock(&mq->mq_mutex); 1924 return (EAGAIN); 1925 } 1926 mq->mq_receivers++; 1927 error = msleep(&mq->mq_receivers, &mq->mq_mutex, 1928 PCATCH, "mqrecv", timo); 1929 mq->mq_receivers--; 1930 if (error == EAGAIN) 1931 error = ETIMEDOUT; 1932 } 1933 if (*msg != NULL) { 1934 error = 0; 1935 TAILQ_REMOVE(&mq->mq_msgq, *msg, msg_link); 1936 mq->mq_curmsgs--; 1937 mq->mq_totalbytes -= (*msg)->msg_size; 1938 if (mq->mq_senders) 1939 wakeup_one(&mq->mq_senders); 1940 if (mq->mq_flags & MQ_WSEL) { 1941 mq->mq_flags &= ~MQ_WSEL; 1942 selwakeup(&mq->mq_wsel); 1943 } 1944 KNOTE_LOCKED(&mq->mq_wsel.si_note, 0); 1945 } 1946 if (mq->mq_notifier != NULL && mq->mq_receivers == 0 && 1947 !TAILQ_EMPTY(&mq->mq_msgq)) { 1948 mqueue_send_notification(mq); 1949 } 1950 mtx_unlock(&mq->mq_mutex); 1951 return (error); 1952 } 1953 1954 static __inline struct mqueue_notifier * 1955 notifier_alloc(void) 1956 { 1957 return (uma_zalloc(mqnoti_zone, M_WAITOK | M_ZERO)); 1958 } 1959 1960 static __inline void 1961 notifier_free(struct mqueue_notifier *p) 1962 { 1963 uma_zfree(mqnoti_zone, p); 1964 } 1965 1966 static struct mqueue_notifier * 1967 notifier_search(struct proc *p, int fd) 1968 { 1969 struct mqueue_notifier *nt; 1970 1971 LIST_FOREACH(nt, &p->p_mqnotifier, nt_link) { 1972 if (nt->nt_ksi.ksi_mqd == fd) 1973 break; 1974 } 1975 return (nt); 1976 } 1977 1978 static __inline void 1979 notifier_insert(struct proc *p, struct mqueue_notifier *nt) 1980 { 1981 LIST_INSERT_HEAD(&p->p_mqnotifier, nt, nt_link); 1982 } 1983 1984 static __inline void 1985 notifier_delete(struct proc *p, struct mqueue_notifier *nt) 1986 { 1987 LIST_REMOVE(nt, nt_link); 1988 notifier_free(nt); 1989 } 1990 1991 static void 1992 notifier_remove(struct proc *p, struct mqueue *mq, int fd) 1993 { 1994 struct mqueue_notifier *nt; 1995 1996 mtx_assert(&mq->mq_mutex, MA_OWNED); 1997 PROC_LOCK(p); 1998 nt = notifier_search(p, fd); 1999 if (nt != NULL) { 2000 if (mq->mq_notifier == nt) 2001 mq->mq_notifier = NULL; 2002 sigqueue_take(&nt->nt_ksi); 2003 notifier_delete(p, nt); 2004 } 2005 PROC_UNLOCK(p); 2006 } 2007 2008 static int 2009 kern_kmq_open(struct thread *td, const char *upath, int flags, mode_t mode, 2010 const struct mq_attr *attr) 2011 { 2012 char path[MQFS_NAMELEN + 1]; 2013 struct mqfs_node *pn; 2014 struct pwddesc *pdp; 2015 struct file *fp; 2016 struct mqueue *mq; 2017 int fd, error, len, cmode; 2018 2019 AUDIT_ARG_FFLAGS(flags); 2020 AUDIT_ARG_MODE(mode); 2021 2022 pdp = td->td_proc->p_pd; 2023 cmode = (((mode & ~pdp->pd_cmask) & ALLPERMS) & ~S_ISTXT); 2024 mq = NULL; 2025 if ((flags & O_CREAT) != 0 && attr != NULL) { 2026 if (attr->mq_maxmsg <= 0 || attr->mq_maxmsg > maxmsg) 2027 return (EINVAL); 2028 if (attr->mq_msgsize <= 0 || attr->mq_msgsize > maxmsgsize) 2029 return (EINVAL); 2030 } 2031 2032 error = copyinstr(upath, path, MQFS_NAMELEN + 1, NULL); 2033 if (error) 2034 return (error); 2035 2036 /* 2037 * The first character of name must be a slash (/) character 2038 * and the remaining characters of name cannot include any slash 2039 * characters. 2040 */ 2041 len = strlen(path); 2042 if (len < 2 || path[0] != '/' || strchr(path + 1, '/') != NULL) 2043 return (EINVAL); 2044 /* 2045 * "." and ".." are magic directories, populated on the fly, and cannot 2046 * be opened as queues. 2047 */ 2048 if (strcmp(path, "/.") == 0 || strcmp(path, "/..") == 0) 2049 return (EINVAL); 2050 AUDIT_ARG_UPATH1_CANON(path); 2051 2052 error = falloc(td, &fp, &fd, O_CLOEXEC); 2053 if (error) 2054 return (error); 2055 2056 sx_xlock(&mqfs_data.mi_lock); 2057 pn = mqfs_search(mqfs_data.mi_root, path + 1, len - 1, td->td_ucred); 2058 if (pn == NULL) { 2059 if (!(flags & O_CREAT)) { 2060 error = ENOENT; 2061 } else { 2062 mq = mqueue_alloc(attr); 2063 if (mq == NULL) { 2064 error = ENFILE; 2065 } else { 2066 pn = mqfs_create_file(mqfs_data.mi_root, 2067 path + 1, len - 1, td->td_ucred, 2068 cmode); 2069 if (pn == NULL) { 2070 error = ENOSPC; 2071 mqueue_free(mq); 2072 } 2073 } 2074 } 2075 2076 if (error == 0) { 2077 pn->mn_data = mq; 2078 } 2079 } else { 2080 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL)) { 2081 error = EEXIST; 2082 } else { 2083 accmode_t accmode = 0; 2084 2085 if (flags & FREAD) 2086 accmode |= VREAD; 2087 if (flags & FWRITE) 2088 accmode |= VWRITE; 2089 error = vaccess(VREG, pn->mn_mode, pn->mn_uid, 2090 pn->mn_gid, accmode, td->td_ucred); 2091 } 2092 } 2093 2094 if (error) { 2095 sx_xunlock(&mqfs_data.mi_lock); 2096 fdclose(td, fp, fd); 2097 fdrop(fp, td); 2098 return (error); 2099 } 2100 2101 mqnode_addref(pn); 2102 sx_xunlock(&mqfs_data.mi_lock); 2103 2104 finit(fp, flags & (FREAD | FWRITE | O_NONBLOCK), DTYPE_MQUEUE, pn, 2105 &mqueueops); 2106 2107 td->td_retval[0] = fd; 2108 fdrop(fp, td); 2109 return (0); 2110 } 2111 2112 /* 2113 * Syscall to open a message queue. 2114 */ 2115 int 2116 sys_kmq_open(struct thread *td, struct kmq_open_args *uap) 2117 { 2118 struct mq_attr attr; 2119 int flags, error; 2120 2121 if ((uap->flags & O_ACCMODE) == O_ACCMODE || uap->flags & O_EXEC) 2122 return (EINVAL); 2123 flags = FFLAGS(uap->flags); 2124 if ((flags & O_CREAT) != 0 && uap->attr != NULL) { 2125 error = copyin(uap->attr, &attr, sizeof(attr)); 2126 if (error) 2127 return (error); 2128 } 2129 return (kern_kmq_open(td, uap->path, flags, uap->mode, 2130 uap->attr != NULL ? &attr : NULL)); 2131 } 2132 2133 /* 2134 * Syscall to unlink a message queue. 2135 */ 2136 int 2137 sys_kmq_unlink(struct thread *td, struct kmq_unlink_args *uap) 2138 { 2139 char path[MQFS_NAMELEN+1]; 2140 struct mqfs_node *pn; 2141 int error, len; 2142 2143 error = copyinstr(uap->path, path, MQFS_NAMELEN + 1, NULL); 2144 if (error) 2145 return (error); 2146 2147 len = strlen(path); 2148 if (len < 2 || path[0] != '/' || strchr(path + 1, '/') != NULL) 2149 return (EINVAL); 2150 if (strcmp(path, "/.") == 0 || strcmp(path, "/..") == 0) 2151 return (EINVAL); 2152 AUDIT_ARG_UPATH1_CANON(path); 2153 2154 sx_xlock(&mqfs_data.mi_lock); 2155 pn = mqfs_search(mqfs_data.mi_root, path + 1, len - 1, td->td_ucred); 2156 if (pn != NULL) 2157 error = do_unlink(pn, td->td_ucred); 2158 else 2159 error = ENOENT; 2160 sx_xunlock(&mqfs_data.mi_lock); 2161 return (error); 2162 } 2163 2164 typedef int (*_fgetf)(struct thread *, int, cap_rights_t *, struct file **); 2165 2166 /* 2167 * Get message queue by giving file slot 2168 */ 2169 static int 2170 _getmq(struct thread *td, int fd, cap_rights_t *rightsp, _fgetf func, 2171 struct file **fpp, struct mqfs_node **ppn, struct mqueue **pmq) 2172 { 2173 struct mqfs_node *pn; 2174 int error; 2175 2176 error = func(td, fd, rightsp, fpp); 2177 if (error) 2178 return (error); 2179 if (&mqueueops != (*fpp)->f_ops) { 2180 fdrop(*fpp, td); 2181 return (EBADF); 2182 } 2183 pn = (*fpp)->f_data; 2184 if (ppn) 2185 *ppn = pn; 2186 if (pmq) 2187 *pmq = pn->mn_data; 2188 return (0); 2189 } 2190 2191 static __inline int 2192 getmq(struct thread *td, int fd, struct file **fpp, struct mqfs_node **ppn, 2193 struct mqueue **pmq) 2194 { 2195 2196 return _getmq(td, fd, &cap_event_rights, fget, 2197 fpp, ppn, pmq); 2198 } 2199 2200 static __inline int 2201 getmq_read(struct thread *td, int fd, struct file **fpp, 2202 struct mqfs_node **ppn, struct mqueue **pmq) 2203 { 2204 2205 return _getmq(td, fd, &cap_read_rights, fget_read, 2206 fpp, ppn, pmq); 2207 } 2208 2209 static __inline int 2210 getmq_write(struct thread *td, int fd, struct file **fpp, 2211 struct mqfs_node **ppn, struct mqueue **pmq) 2212 { 2213 2214 return _getmq(td, fd, &cap_write_rights, fget_write, 2215 fpp, ppn, pmq); 2216 } 2217 2218 static int 2219 kern_kmq_setattr(struct thread *td, int mqd, const struct mq_attr *attr, 2220 struct mq_attr *oattr) 2221 { 2222 struct mqueue *mq; 2223 struct file *fp; 2224 u_int oflag, flag; 2225 int error; 2226 2227 AUDIT_ARG_FD(mqd); 2228 if (attr != NULL && (attr->mq_flags & ~O_NONBLOCK) != 0) 2229 return (EINVAL); 2230 error = getmq(td, mqd, &fp, NULL, &mq); 2231 if (error) 2232 return (error); 2233 oattr->mq_maxmsg = mq->mq_maxmsg; 2234 oattr->mq_msgsize = mq->mq_msgsize; 2235 oattr->mq_curmsgs = mq->mq_curmsgs; 2236 if (attr != NULL) { 2237 do { 2238 oflag = flag = fp->f_flag; 2239 flag &= ~O_NONBLOCK; 2240 flag |= (attr->mq_flags & O_NONBLOCK); 2241 } while (atomic_cmpset_int(&fp->f_flag, oflag, flag) == 0); 2242 } else 2243 oflag = fp->f_flag; 2244 oattr->mq_flags = (O_NONBLOCK & oflag); 2245 fdrop(fp, td); 2246 return (error); 2247 } 2248 2249 int 2250 sys_kmq_setattr(struct thread *td, struct kmq_setattr_args *uap) 2251 { 2252 struct mq_attr attr, oattr; 2253 int error; 2254 2255 if (uap->attr != NULL) { 2256 error = copyin(uap->attr, &attr, sizeof(attr)); 2257 if (error != 0) 2258 return (error); 2259 } 2260 error = kern_kmq_setattr(td, uap->mqd, uap->attr != NULL ? &attr : NULL, 2261 &oattr); 2262 if (error == 0 && uap->oattr != NULL) { 2263 bzero(oattr.__reserved, sizeof(oattr.__reserved)); 2264 error = copyout(&oattr, uap->oattr, sizeof(oattr)); 2265 } 2266 return (error); 2267 } 2268 2269 int 2270 sys_kmq_timedreceive(struct thread *td, struct kmq_timedreceive_args *uap) 2271 { 2272 struct mqueue *mq; 2273 struct file *fp; 2274 struct timespec *abs_timeout, ets; 2275 int error; 2276 int waitok; 2277 2278 AUDIT_ARG_FD(uap->mqd); 2279 error = getmq_read(td, uap->mqd, &fp, NULL, &mq); 2280 if (error) 2281 return (error); 2282 if (uap->abs_timeout != NULL) { 2283 error = copyin(uap->abs_timeout, &ets, sizeof(ets)); 2284 if (error != 0) 2285 goto out; 2286 abs_timeout = &ets; 2287 } else 2288 abs_timeout = NULL; 2289 waitok = !(fp->f_flag & O_NONBLOCK); 2290 error = mqueue_receive(mq, uap->msg_ptr, uap->msg_len, 2291 uap->msg_prio, waitok, abs_timeout); 2292 out: 2293 fdrop(fp, td); 2294 return (error); 2295 } 2296 2297 int 2298 sys_kmq_timedsend(struct thread *td, struct kmq_timedsend_args *uap) 2299 { 2300 struct mqueue *mq; 2301 struct file *fp; 2302 struct timespec *abs_timeout, ets; 2303 int error, waitok; 2304 2305 AUDIT_ARG_FD(uap->mqd); 2306 error = getmq_write(td, uap->mqd, &fp, NULL, &mq); 2307 if (error) 2308 return (error); 2309 if (uap->abs_timeout != NULL) { 2310 error = copyin(uap->abs_timeout, &ets, sizeof(ets)); 2311 if (error != 0) 2312 goto out; 2313 abs_timeout = &ets; 2314 } else 2315 abs_timeout = NULL; 2316 waitok = !(fp->f_flag & O_NONBLOCK); 2317 error = mqueue_send(mq, uap->msg_ptr, uap->msg_len, 2318 uap->msg_prio, waitok, abs_timeout); 2319 out: 2320 fdrop(fp, td); 2321 return (error); 2322 } 2323 2324 static int 2325 kern_kmq_notify(struct thread *td, int mqd, struct sigevent *sigev) 2326 { 2327 struct filedesc *fdp; 2328 struct proc *p; 2329 struct mqueue *mq; 2330 struct file *fp, *fp2; 2331 struct mqueue_notifier *nt, *newnt = NULL; 2332 int error; 2333 2334 AUDIT_ARG_FD(mqd); 2335 if (sigev != NULL) { 2336 if (sigev->sigev_notify != SIGEV_SIGNAL && 2337 sigev->sigev_notify != SIGEV_THREAD_ID && 2338 sigev->sigev_notify != SIGEV_NONE) 2339 return (EINVAL); 2340 if ((sigev->sigev_notify == SIGEV_SIGNAL || 2341 sigev->sigev_notify == SIGEV_THREAD_ID) && 2342 !_SIG_VALID(sigev->sigev_signo)) 2343 return (EINVAL); 2344 } 2345 p = td->td_proc; 2346 fdp = td->td_proc->p_fd; 2347 error = getmq(td, mqd, &fp, NULL, &mq); 2348 if (error) 2349 return (error); 2350 again: 2351 FILEDESC_SLOCK(fdp); 2352 fp2 = fget_locked(fdp, mqd); 2353 if (fp2 == NULL) { 2354 FILEDESC_SUNLOCK(fdp); 2355 error = EBADF; 2356 goto out; 2357 } 2358 #ifdef CAPABILITIES 2359 error = cap_check(cap_rights(fdp, mqd), &cap_event_rights); 2360 if (error) { 2361 FILEDESC_SUNLOCK(fdp); 2362 goto out; 2363 } 2364 #endif 2365 if (fp2 != fp) { 2366 FILEDESC_SUNLOCK(fdp); 2367 error = EBADF; 2368 goto out; 2369 } 2370 mtx_lock(&mq->mq_mutex); 2371 FILEDESC_SUNLOCK(fdp); 2372 if (sigev != NULL) { 2373 if (mq->mq_notifier != NULL) { 2374 error = EBUSY; 2375 } else { 2376 PROC_LOCK(p); 2377 nt = notifier_search(p, mqd); 2378 if (nt == NULL) { 2379 if (newnt == NULL) { 2380 PROC_UNLOCK(p); 2381 mtx_unlock(&mq->mq_mutex); 2382 newnt = notifier_alloc(); 2383 goto again; 2384 } 2385 } 2386 2387 if (nt != NULL) { 2388 sigqueue_take(&nt->nt_ksi); 2389 if (newnt != NULL) { 2390 notifier_free(newnt); 2391 newnt = NULL; 2392 } 2393 } else { 2394 nt = newnt; 2395 newnt = NULL; 2396 ksiginfo_init(&nt->nt_ksi); 2397 nt->nt_ksi.ksi_flags |= KSI_INS | KSI_EXT; 2398 nt->nt_ksi.ksi_code = SI_MESGQ; 2399 nt->nt_proc = p; 2400 nt->nt_ksi.ksi_mqd = mqd; 2401 notifier_insert(p, nt); 2402 } 2403 nt->nt_sigev = *sigev; 2404 mq->mq_notifier = nt; 2405 PROC_UNLOCK(p); 2406 /* 2407 * if there is no receivers and message queue 2408 * is not empty, we should send notification 2409 * as soon as possible. 2410 */ 2411 if (mq->mq_receivers == 0 && 2412 !TAILQ_EMPTY(&mq->mq_msgq)) 2413 mqueue_send_notification(mq); 2414 } 2415 } else { 2416 notifier_remove(p, mq, mqd); 2417 } 2418 mtx_unlock(&mq->mq_mutex); 2419 2420 out: 2421 fdrop(fp, td); 2422 if (newnt != NULL) 2423 notifier_free(newnt); 2424 return (error); 2425 } 2426 2427 int 2428 sys_kmq_notify(struct thread *td, struct kmq_notify_args *uap) 2429 { 2430 struct sigevent ev, *evp; 2431 int error; 2432 2433 if (uap->sigev == NULL) { 2434 evp = NULL; 2435 } else { 2436 error = copyin(uap->sigev, &ev, sizeof(ev)); 2437 if (error != 0) 2438 return (error); 2439 evp = &ev; 2440 } 2441 return (kern_kmq_notify(td, uap->mqd, evp)); 2442 } 2443 2444 static void 2445 mqueue_fdclose(struct thread *td, int fd, struct file *fp) 2446 { 2447 struct mqueue *mq; 2448 #ifdef INVARIANTS 2449 struct filedesc *fdp; 2450 2451 fdp = td->td_proc->p_fd; 2452 FILEDESC_LOCK_ASSERT(fdp); 2453 #endif 2454 2455 if (fp->f_ops == &mqueueops) { 2456 mq = FPTOMQ(fp); 2457 mtx_lock(&mq->mq_mutex); 2458 notifier_remove(td->td_proc, mq, fd); 2459 2460 /* have to wakeup thread in same process */ 2461 if (mq->mq_flags & MQ_RSEL) { 2462 mq->mq_flags &= ~MQ_RSEL; 2463 selwakeup(&mq->mq_rsel); 2464 } 2465 if (mq->mq_flags & MQ_WSEL) { 2466 mq->mq_flags &= ~MQ_WSEL; 2467 selwakeup(&mq->mq_wsel); 2468 } 2469 mtx_unlock(&mq->mq_mutex); 2470 } 2471 } 2472 2473 static void 2474 mq_proc_exit(void *arg __unused, struct proc *p) 2475 { 2476 struct filedesc *fdp; 2477 struct file *fp; 2478 struct mqueue *mq; 2479 int i; 2480 2481 fdp = p->p_fd; 2482 FILEDESC_SLOCK(fdp); 2483 for (i = 0; i < fdp->fd_nfiles; ++i) { 2484 fp = fget_locked(fdp, i); 2485 if (fp != NULL && fp->f_ops == &mqueueops) { 2486 mq = FPTOMQ(fp); 2487 mtx_lock(&mq->mq_mutex); 2488 notifier_remove(p, FPTOMQ(fp), i); 2489 mtx_unlock(&mq->mq_mutex); 2490 } 2491 } 2492 FILEDESC_SUNLOCK(fdp); 2493 KASSERT(LIST_EMPTY(&p->p_mqnotifier), ("mq notifiers left")); 2494 } 2495 2496 static int 2497 mqf_poll(struct file *fp, int events, struct ucred *active_cred, 2498 struct thread *td) 2499 { 2500 struct mqueue *mq = FPTOMQ(fp); 2501 int revents = 0; 2502 2503 mtx_lock(&mq->mq_mutex); 2504 if (events & (POLLIN | POLLRDNORM)) { 2505 if (mq->mq_curmsgs) { 2506 revents |= events & (POLLIN | POLLRDNORM); 2507 } else { 2508 mq->mq_flags |= MQ_RSEL; 2509 selrecord(td, &mq->mq_rsel); 2510 } 2511 } 2512 if (events & POLLOUT) { 2513 if (mq->mq_curmsgs < mq->mq_maxmsg) 2514 revents |= POLLOUT; 2515 else { 2516 mq->mq_flags |= MQ_WSEL; 2517 selrecord(td, &mq->mq_wsel); 2518 } 2519 } 2520 mtx_unlock(&mq->mq_mutex); 2521 return (revents); 2522 } 2523 2524 static int 2525 mqf_close(struct file *fp, struct thread *td) 2526 { 2527 struct mqfs_node *pn; 2528 2529 fp->f_ops = &badfileops; 2530 pn = fp->f_data; 2531 fp->f_data = NULL; 2532 sx_xlock(&mqfs_data.mi_lock); 2533 mqnode_release(pn); 2534 sx_xunlock(&mqfs_data.mi_lock); 2535 return (0); 2536 } 2537 2538 static int 2539 mqf_stat(struct file *fp, struct stat *st, struct ucred *active_cred, 2540 struct thread *td) 2541 { 2542 struct mqfs_node *pn = fp->f_data; 2543 2544 bzero(st, sizeof *st); 2545 sx_xlock(&mqfs_data.mi_lock); 2546 st->st_atim = pn->mn_atime; 2547 st->st_mtim = pn->mn_mtime; 2548 st->st_ctim = pn->mn_ctime; 2549 st->st_birthtim = pn->mn_birth; 2550 st->st_uid = pn->mn_uid; 2551 st->st_gid = pn->mn_gid; 2552 st->st_mode = S_IFIFO | pn->mn_mode; 2553 sx_xunlock(&mqfs_data.mi_lock); 2554 return (0); 2555 } 2556 2557 static int 2558 mqf_chmod(struct file *fp, mode_t mode, struct ucred *active_cred, 2559 struct thread *td) 2560 { 2561 struct mqfs_node *pn; 2562 int error; 2563 2564 error = 0; 2565 pn = fp->f_data; 2566 sx_xlock(&mqfs_data.mi_lock); 2567 error = vaccess(VREG, pn->mn_mode, pn->mn_uid, pn->mn_gid, VADMIN, 2568 active_cred); 2569 if (error != 0) 2570 goto out; 2571 pn->mn_mode = mode & ACCESSPERMS; 2572 out: 2573 sx_xunlock(&mqfs_data.mi_lock); 2574 return (error); 2575 } 2576 2577 static int 2578 mqf_chown(struct file *fp, uid_t uid, gid_t gid, struct ucred *active_cred, 2579 struct thread *td) 2580 { 2581 struct mqfs_node *pn; 2582 int error; 2583 2584 error = 0; 2585 pn = fp->f_data; 2586 sx_xlock(&mqfs_data.mi_lock); 2587 if (uid == (uid_t)-1) 2588 uid = pn->mn_uid; 2589 if (gid == (gid_t)-1) 2590 gid = pn->mn_gid; 2591 if (((uid != pn->mn_uid && uid != active_cred->cr_uid) || 2592 (gid != pn->mn_gid && !groupmember(gid, active_cred))) && 2593 (error = priv_check_cred(active_cred, PRIV_VFS_CHOWN))) 2594 goto out; 2595 pn->mn_uid = uid; 2596 pn->mn_gid = gid; 2597 out: 2598 sx_xunlock(&mqfs_data.mi_lock); 2599 return (error); 2600 } 2601 2602 static int 2603 mqf_kqfilter(struct file *fp, struct knote *kn) 2604 { 2605 struct mqueue *mq = FPTOMQ(fp); 2606 int error = 0; 2607 2608 if (kn->kn_filter == EVFILT_READ) { 2609 kn->kn_fop = &mq_rfiltops; 2610 knlist_add(&mq->mq_rsel.si_note, kn, 0); 2611 } else if (kn->kn_filter == EVFILT_WRITE) { 2612 kn->kn_fop = &mq_wfiltops; 2613 knlist_add(&mq->mq_wsel.si_note, kn, 0); 2614 } else 2615 error = EINVAL; 2616 return (error); 2617 } 2618 2619 static void 2620 filt_mqdetach(struct knote *kn) 2621 { 2622 struct mqueue *mq = FPTOMQ(kn->kn_fp); 2623 2624 if (kn->kn_filter == EVFILT_READ) 2625 knlist_remove(&mq->mq_rsel.si_note, kn, 0); 2626 else if (kn->kn_filter == EVFILT_WRITE) 2627 knlist_remove(&mq->mq_wsel.si_note, kn, 0); 2628 else 2629 panic("filt_mqdetach"); 2630 } 2631 2632 static int 2633 filt_mqread(struct knote *kn, long hint) 2634 { 2635 struct mqueue *mq = FPTOMQ(kn->kn_fp); 2636 2637 mtx_assert(&mq->mq_mutex, MA_OWNED); 2638 return (mq->mq_curmsgs != 0); 2639 } 2640 2641 static int 2642 filt_mqwrite(struct knote *kn, long hint) 2643 { 2644 struct mqueue *mq = FPTOMQ(kn->kn_fp); 2645 2646 mtx_assert(&mq->mq_mutex, MA_OWNED); 2647 return (mq->mq_curmsgs < mq->mq_maxmsg); 2648 } 2649 2650 static int 2651 mqf_fill_kinfo(struct file *fp, struct kinfo_file *kif, struct filedesc *fdp) 2652 { 2653 2654 kif->kf_type = KF_TYPE_MQUEUE; 2655 return (0); 2656 } 2657 2658 static struct fileops mqueueops = { 2659 .fo_read = invfo_rdwr, 2660 .fo_write = invfo_rdwr, 2661 .fo_truncate = invfo_truncate, 2662 .fo_ioctl = invfo_ioctl, 2663 .fo_poll = mqf_poll, 2664 .fo_kqfilter = mqf_kqfilter, 2665 .fo_stat = mqf_stat, 2666 .fo_close = mqf_close, 2667 .fo_chmod = mqf_chmod, 2668 .fo_chown = mqf_chown, 2669 .fo_sendfile = invfo_sendfile, 2670 .fo_fill_kinfo = mqf_fill_kinfo, 2671 .fo_flags = DFLAG_PASSABLE, 2672 }; 2673 2674 static struct vop_vector mqfs_vnodeops = { 2675 .vop_default = &default_vnodeops, 2676 .vop_access = mqfs_access, 2677 .vop_cachedlookup = mqfs_lookup, 2678 .vop_lookup = vfs_cache_lookup, 2679 .vop_reclaim = mqfs_reclaim, 2680 .vop_create = mqfs_create, 2681 .vop_remove = mqfs_remove, 2682 .vop_inactive = mqfs_inactive, 2683 .vop_open = mqfs_open, 2684 .vop_close = mqfs_close, 2685 .vop_getattr = mqfs_getattr, 2686 .vop_setattr = mqfs_setattr, 2687 .vop_read = mqfs_read, 2688 .vop_write = VOP_EOPNOTSUPP, 2689 .vop_readdir = mqfs_readdir, 2690 .vop_mkdir = VOP_EOPNOTSUPP, 2691 .vop_rmdir = VOP_EOPNOTSUPP 2692 }; 2693 VFS_VOP_VECTOR_REGISTER(mqfs_vnodeops); 2694 2695 static struct vfsops mqfs_vfsops = { 2696 .vfs_init = mqfs_init, 2697 .vfs_uninit = mqfs_uninit, 2698 .vfs_mount = mqfs_mount, 2699 .vfs_unmount = mqfs_unmount, 2700 .vfs_root = mqfs_root, 2701 .vfs_statfs = mqfs_statfs, 2702 }; 2703 2704 static struct vfsconf mqueuefs_vfsconf = { 2705 .vfc_version = VFS_VERSION, 2706 .vfc_name = "mqueuefs", 2707 .vfc_vfsops = &mqfs_vfsops, 2708 .vfc_typenum = -1, 2709 .vfc_flags = VFCF_SYNTHETIC 2710 }; 2711 2712 static struct syscall_helper_data mq_syscalls[] = { 2713 SYSCALL_INIT_HELPER(kmq_open), 2714 SYSCALL_INIT_HELPER_F(kmq_setattr, SYF_CAPENABLED), 2715 SYSCALL_INIT_HELPER_F(kmq_timedsend, SYF_CAPENABLED), 2716 SYSCALL_INIT_HELPER_F(kmq_timedreceive, SYF_CAPENABLED), 2717 SYSCALL_INIT_HELPER_F(kmq_notify, SYF_CAPENABLED), 2718 SYSCALL_INIT_HELPER(kmq_unlink), 2719 SYSCALL_INIT_LAST 2720 }; 2721 2722 #ifdef COMPAT_FREEBSD32 2723 #include <compat/freebsd32/freebsd32.h> 2724 #include <compat/freebsd32/freebsd32_proto.h> 2725 #include <compat/freebsd32/freebsd32_signal.h> 2726 #include <compat/freebsd32/freebsd32_syscall.h> 2727 #include <compat/freebsd32/freebsd32_util.h> 2728 2729 static void 2730 mq_attr_from32(const struct mq_attr32 *from, struct mq_attr *to) 2731 { 2732 2733 to->mq_flags = from->mq_flags; 2734 to->mq_maxmsg = from->mq_maxmsg; 2735 to->mq_msgsize = from->mq_msgsize; 2736 to->mq_curmsgs = from->mq_curmsgs; 2737 } 2738 2739 static void 2740 mq_attr_to32(const struct mq_attr *from, struct mq_attr32 *to) 2741 { 2742 2743 to->mq_flags = from->mq_flags; 2744 to->mq_maxmsg = from->mq_maxmsg; 2745 to->mq_msgsize = from->mq_msgsize; 2746 to->mq_curmsgs = from->mq_curmsgs; 2747 } 2748 2749 int 2750 freebsd32_kmq_open(struct thread *td, struct freebsd32_kmq_open_args *uap) 2751 { 2752 struct mq_attr attr; 2753 struct mq_attr32 attr32; 2754 int flags, error; 2755 2756 if ((uap->flags & O_ACCMODE) == O_ACCMODE || uap->flags & O_EXEC) 2757 return (EINVAL); 2758 flags = FFLAGS(uap->flags); 2759 if ((flags & O_CREAT) != 0 && uap->attr != NULL) { 2760 error = copyin(uap->attr, &attr32, sizeof(attr32)); 2761 if (error) 2762 return (error); 2763 mq_attr_from32(&attr32, &attr); 2764 } 2765 return (kern_kmq_open(td, uap->path, flags, uap->mode, 2766 uap->attr != NULL ? &attr : NULL)); 2767 } 2768 2769 int 2770 freebsd32_kmq_setattr(struct thread *td, struct freebsd32_kmq_setattr_args *uap) 2771 { 2772 struct mq_attr attr, oattr; 2773 struct mq_attr32 attr32, oattr32; 2774 int error; 2775 2776 if (uap->attr != NULL) { 2777 error = copyin(uap->attr, &attr32, sizeof(attr32)); 2778 if (error != 0) 2779 return (error); 2780 mq_attr_from32(&attr32, &attr); 2781 } 2782 error = kern_kmq_setattr(td, uap->mqd, uap->attr != NULL ? &attr : NULL, 2783 &oattr); 2784 if (error == 0 && uap->oattr != NULL) { 2785 mq_attr_to32(&oattr, &oattr32); 2786 bzero(oattr32.__reserved, sizeof(oattr32.__reserved)); 2787 error = copyout(&oattr32, uap->oattr, sizeof(oattr32)); 2788 } 2789 return (error); 2790 } 2791 2792 int 2793 freebsd32_kmq_timedsend(struct thread *td, 2794 struct freebsd32_kmq_timedsend_args *uap) 2795 { 2796 struct mqueue *mq; 2797 struct file *fp; 2798 struct timespec32 ets32; 2799 struct timespec *abs_timeout, ets; 2800 int error; 2801 int waitok; 2802 2803 AUDIT_ARG_FD(uap->mqd); 2804 error = getmq_write(td, uap->mqd, &fp, NULL, &mq); 2805 if (error) 2806 return (error); 2807 if (uap->abs_timeout != NULL) { 2808 error = copyin(uap->abs_timeout, &ets32, sizeof(ets32)); 2809 if (error != 0) 2810 goto out; 2811 CP(ets32, ets, tv_sec); 2812 CP(ets32, ets, tv_nsec); 2813 abs_timeout = &ets; 2814 } else 2815 abs_timeout = NULL; 2816 waitok = !(fp->f_flag & O_NONBLOCK); 2817 error = mqueue_send(mq, uap->msg_ptr, uap->msg_len, 2818 uap->msg_prio, waitok, abs_timeout); 2819 out: 2820 fdrop(fp, td); 2821 return (error); 2822 } 2823 2824 int 2825 freebsd32_kmq_timedreceive(struct thread *td, 2826 struct freebsd32_kmq_timedreceive_args *uap) 2827 { 2828 struct mqueue *mq; 2829 struct file *fp; 2830 struct timespec32 ets32; 2831 struct timespec *abs_timeout, ets; 2832 int error, waitok; 2833 2834 AUDIT_ARG_FD(uap->mqd); 2835 error = getmq_read(td, uap->mqd, &fp, NULL, &mq); 2836 if (error) 2837 return (error); 2838 if (uap->abs_timeout != NULL) { 2839 error = copyin(uap->abs_timeout, &ets32, sizeof(ets32)); 2840 if (error != 0) 2841 goto out; 2842 CP(ets32, ets, tv_sec); 2843 CP(ets32, ets, tv_nsec); 2844 abs_timeout = &ets; 2845 } else 2846 abs_timeout = NULL; 2847 waitok = !(fp->f_flag & O_NONBLOCK); 2848 error = mqueue_receive(mq, uap->msg_ptr, uap->msg_len, 2849 uap->msg_prio, waitok, abs_timeout); 2850 out: 2851 fdrop(fp, td); 2852 return (error); 2853 } 2854 2855 int 2856 freebsd32_kmq_notify(struct thread *td, struct freebsd32_kmq_notify_args *uap) 2857 { 2858 struct sigevent ev, *evp; 2859 struct sigevent32 ev32; 2860 int error; 2861 2862 if (uap->sigev == NULL) { 2863 evp = NULL; 2864 } else { 2865 error = copyin(uap->sigev, &ev32, sizeof(ev32)); 2866 if (error != 0) 2867 return (error); 2868 error = convert_sigevent32(&ev32, &ev); 2869 if (error != 0) 2870 return (error); 2871 evp = &ev; 2872 } 2873 return (kern_kmq_notify(td, uap->mqd, evp)); 2874 } 2875 2876 static struct syscall_helper_data mq32_syscalls[] = { 2877 SYSCALL32_INIT_HELPER(freebsd32_kmq_open), 2878 SYSCALL32_INIT_HELPER_F(freebsd32_kmq_setattr, SYF_CAPENABLED), 2879 SYSCALL32_INIT_HELPER_F(freebsd32_kmq_timedsend, SYF_CAPENABLED), 2880 SYSCALL32_INIT_HELPER_F(freebsd32_kmq_timedreceive, SYF_CAPENABLED), 2881 SYSCALL32_INIT_HELPER_F(freebsd32_kmq_notify, SYF_CAPENABLED), 2882 SYSCALL32_INIT_HELPER_COMPAT(kmq_unlink), 2883 SYSCALL_INIT_LAST 2884 }; 2885 #endif 2886 2887 static int 2888 mqinit(void) 2889 { 2890 int error; 2891 2892 error = syscall_helper_register(mq_syscalls, SY_THR_STATIC_KLD); 2893 if (error != 0) 2894 return (error); 2895 #ifdef COMPAT_FREEBSD32 2896 error = syscall32_helper_register(mq32_syscalls, SY_THR_STATIC_KLD); 2897 if (error != 0) 2898 return (error); 2899 #endif 2900 return (0); 2901 } 2902 2903 static int 2904 mqunload(void) 2905 { 2906 2907 #ifdef COMPAT_FREEBSD32 2908 syscall32_helper_unregister(mq32_syscalls); 2909 #endif 2910 syscall_helper_unregister(mq_syscalls); 2911 return (0); 2912 } 2913 2914 static int 2915 mq_modload(struct module *module, int cmd, void *arg) 2916 { 2917 int error = 0; 2918 2919 error = vfs_modevent(module, cmd, arg); 2920 if (error != 0) 2921 return (error); 2922 2923 switch (cmd) { 2924 case MOD_LOAD: 2925 error = mqinit(); 2926 if (error != 0) 2927 mqunload(); 2928 break; 2929 case MOD_UNLOAD: 2930 error = mqunload(); 2931 break; 2932 default: 2933 break; 2934 } 2935 return (error); 2936 } 2937 2938 static moduledata_t mqueuefs_mod = { 2939 "mqueuefs", 2940 mq_modload, 2941 &mqueuefs_vfsconf 2942 }; 2943 DECLARE_MODULE(mqueuefs, mqueuefs_mod, SI_SUB_VFS, SI_ORDER_MIDDLE); 2944 MODULE_VERSION(mqueuefs, 1); 2945