1 /* 2 * POSIX message queues filesystem for Linux. 3 * 4 * Copyright (C) 2003,2004 Krzysztof Benedyczak (golbi@mat.uni.torun.pl) 5 * Michal Wronski (michal.wronski@gmail.com) 6 * 7 * Spinlocks: Mohamed Abbas (abbas.mohamed@intel.com) 8 * Lockless receive & send, fd based notify: 9 * Manfred Spraul (manfred@colorfullife.com) 10 * 11 * Audit: George Wilson (ltcgcw@us.ibm.com) 12 * 13 * This file is released under the GPL. 14 */ 15 16 #include <linux/capability.h> 17 #include <linux/init.h> 18 #include <linux/pagemap.h> 19 #include <linux/file.h> 20 #include <linux/mount.h> 21 #include <linux/fs_context.h> 22 #include <linux/namei.h> 23 #include <linux/sysctl.h> 24 #include <linux/poll.h> 25 #include <linux/mqueue.h> 26 #include <linux/msg.h> 27 #include <linux/skbuff.h> 28 #include <linux/vmalloc.h> 29 #include <linux/netlink.h> 30 #include <linux/syscalls.h> 31 #include <linux/audit.h> 32 #include <linux/signal.h> 33 #include <linux/mutex.h> 34 #include <linux/nsproxy.h> 35 #include <linux/pid.h> 36 #include <linux/ipc_namespace.h> 37 #include <linux/user_namespace.h> 38 #include <linux/slab.h> 39 #include <linux/sched/wake_q.h> 40 #include <linux/sched/signal.h> 41 #include <linux/sched/user.h> 42 43 #include <net/sock.h> 44 #include "util.h" 45 46 struct mqueue_fs_context { 47 struct ipc_namespace *ipc_ns; 48 bool newns; /* Set if newly created ipc namespace */ 49 }; 50 51 #define MQUEUE_MAGIC 0x19800202 52 #define DIRENT_SIZE 20 53 #define FILENT_SIZE 80 54 55 #define SEND 0 56 #define RECV 1 57 58 #define STATE_NONE 0 59 #define STATE_READY 1 60 61 struct posix_msg_tree_node { 62 struct rb_node rb_node; 63 struct list_head msg_list; 64 int priority; 65 }; 66 67 /* 68 * Locking: 69 * 70 * Accesses to a message queue are synchronized by acquiring info->lock. 71 * 72 * There are two notable exceptions: 73 * - The actual wakeup of a sleeping task is performed using the wake_q 74 * framework. info->lock is already released when wake_up_q is called. 75 * - The exit codepaths after sleeping check ext_wait_queue->state without 76 * any locks. If it is STATE_READY, then the syscall is completed without 77 * acquiring info->lock. 78 * 79 * MQ_BARRIER: 80 * To achieve proper release/acquire memory barrier pairing, the state is set to 81 * STATE_READY with smp_store_release(), and it is read with READ_ONCE followed 82 * by smp_acquire__after_ctrl_dep(). In addition, wake_q_add_safe() is used. 83 * 84 * This prevents the following races: 85 * 86 * 1) With the simple wake_q_add(), the task could be gone already before 87 * the increase of the reference happens 88 * Thread A 89 * Thread B 90 * WRITE_ONCE(wait.state, STATE_NONE); 91 * schedule_hrtimeout() 92 * wake_q_add(A) 93 * if (cmpxchg()) // success 94 * ->state = STATE_READY (reordered) 95 * <timeout returns> 96 * if (wait.state == STATE_READY) return; 97 * sysret to user space 98 * sys_exit() 99 * get_task_struct() // UaF 100 * 101 * Solution: Use wake_q_add_safe() and perform the get_task_struct() before 102 * the smp_store_release() that does ->state = STATE_READY. 103 * 104 * 2) Without proper _release/_acquire barriers, the woken up task 105 * could read stale data 106 * 107 * Thread A 108 * Thread B 109 * do_mq_timedreceive 110 * WRITE_ONCE(wait.state, STATE_NONE); 111 * schedule_hrtimeout() 112 * state = STATE_READY; 113 * <timeout returns> 114 * if (wait.state == STATE_READY) return; 115 * msg_ptr = wait.msg; // Access to stale data! 116 * receiver->msg = message; (reordered) 117 * 118 * Solution: use _release and _acquire barriers. 119 * 120 * 3) There is intentionally no barrier when setting current->state 121 * to TASK_INTERRUPTIBLE: spin_unlock(&info->lock) provides the 122 * release memory barrier, and the wakeup is triggered when holding 123 * info->lock, i.e. spin_lock(&info->lock) provided a pairing 124 * acquire memory barrier. 125 */ 126 127 struct ext_wait_queue { /* queue of sleeping tasks */ 128 struct task_struct *task; 129 struct list_head list; 130 struct msg_msg *msg; /* ptr of loaded message */ 131 int state; /* one of STATE_* values */ 132 }; 133 134 struct mqueue_inode_info { 135 spinlock_t lock; 136 struct inode vfs_inode; 137 wait_queue_head_t wait_q; 138 139 struct rb_root msg_tree; 140 struct rb_node *msg_tree_rightmost; 141 struct posix_msg_tree_node *node_cache; 142 struct mq_attr attr; 143 144 struct sigevent notify; 145 struct pid *notify_owner; 146 u32 notify_self_exec_id; 147 struct user_namespace *notify_user_ns; 148 struct ucounts *ucounts; /* user who created, for accounting */ 149 struct sock *notify_sock; 150 struct sk_buff *notify_cookie; 151 152 /* for tasks waiting for free space and messages, respectively */ 153 struct ext_wait_queue e_wait_q[2]; 154 155 unsigned long qsize; /* size of queue in memory (sum of all msgs) */ 156 }; 157 158 static struct file_system_type mqueue_fs_type; 159 static const struct inode_operations mqueue_dir_inode_operations; 160 static const struct file_operations mqueue_file_operations; 161 static const struct super_operations mqueue_super_ops; 162 static const struct fs_context_operations mqueue_fs_context_ops; 163 static void remove_notification(struct mqueue_inode_info *info); 164 165 static struct kmem_cache *mqueue_inode_cachep; 166 167 static inline struct mqueue_inode_info *MQUEUE_I(struct inode *inode) 168 { 169 return container_of(inode, struct mqueue_inode_info, vfs_inode); 170 } 171 172 /* 173 * This routine should be called with the mq_lock held. 174 */ 175 static inline struct ipc_namespace *__get_ns_from_inode(struct inode *inode) 176 { 177 return get_ipc_ns(inode->i_sb->s_fs_info); 178 } 179 180 static struct ipc_namespace *get_ns_from_inode(struct inode *inode) 181 { 182 struct ipc_namespace *ns; 183 184 spin_lock(&mq_lock); 185 ns = __get_ns_from_inode(inode); 186 spin_unlock(&mq_lock); 187 return ns; 188 } 189 190 /* Auxiliary functions to manipulate messages' list */ 191 static int msg_insert(struct msg_msg *msg, struct mqueue_inode_info *info) 192 { 193 struct rb_node **p, *parent = NULL; 194 struct posix_msg_tree_node *leaf; 195 bool rightmost = true; 196 197 p = &info->msg_tree.rb_node; 198 while (*p) { 199 parent = *p; 200 leaf = rb_entry(parent, struct posix_msg_tree_node, rb_node); 201 202 if (likely(leaf->priority == msg->m_type)) 203 goto insert_msg; 204 else if (msg->m_type < leaf->priority) { 205 p = &(*p)->rb_left; 206 rightmost = false; 207 } else 208 p = &(*p)->rb_right; 209 } 210 if (info->node_cache) { 211 leaf = info->node_cache; 212 info->node_cache = NULL; 213 } else { 214 leaf = kmalloc(sizeof(*leaf), GFP_ATOMIC); 215 if (!leaf) 216 return -ENOMEM; 217 INIT_LIST_HEAD(&leaf->msg_list); 218 } 219 leaf->priority = msg->m_type; 220 221 if (rightmost) 222 info->msg_tree_rightmost = &leaf->rb_node; 223 224 rb_link_node(&leaf->rb_node, parent, p); 225 rb_insert_color(&leaf->rb_node, &info->msg_tree); 226 insert_msg: 227 info->attr.mq_curmsgs++; 228 info->qsize += msg->m_ts; 229 list_add_tail(&msg->m_list, &leaf->msg_list); 230 return 0; 231 } 232 233 static inline void msg_tree_erase(struct posix_msg_tree_node *leaf, 234 struct mqueue_inode_info *info) 235 { 236 struct rb_node *node = &leaf->rb_node; 237 238 if (info->msg_tree_rightmost == node) 239 info->msg_tree_rightmost = rb_prev(node); 240 241 rb_erase(node, &info->msg_tree); 242 if (info->node_cache) 243 kfree(leaf); 244 else 245 info->node_cache = leaf; 246 } 247 248 static inline struct msg_msg *msg_get(struct mqueue_inode_info *info) 249 { 250 struct rb_node *parent = NULL; 251 struct posix_msg_tree_node *leaf; 252 struct msg_msg *msg; 253 254 try_again: 255 /* 256 * During insert, low priorities go to the left and high to the 257 * right. On receive, we want the highest priorities first, so 258 * walk all the way to the right. 259 */ 260 parent = info->msg_tree_rightmost; 261 if (!parent) { 262 if (info->attr.mq_curmsgs) { 263 pr_warn_once("Inconsistency in POSIX message queue, " 264 "no tree element, but supposedly messages " 265 "should exist!\n"); 266 info->attr.mq_curmsgs = 0; 267 } 268 return NULL; 269 } 270 leaf = rb_entry(parent, struct posix_msg_tree_node, rb_node); 271 if (unlikely(list_empty(&leaf->msg_list))) { 272 pr_warn_once("Inconsistency in POSIX message queue, " 273 "empty leaf node but we haven't implemented " 274 "lazy leaf delete!\n"); 275 msg_tree_erase(leaf, info); 276 goto try_again; 277 } else { 278 msg = list_first_entry(&leaf->msg_list, 279 struct msg_msg, m_list); 280 list_del(&msg->m_list); 281 if (list_empty(&leaf->msg_list)) { 282 msg_tree_erase(leaf, info); 283 } 284 } 285 info->attr.mq_curmsgs--; 286 info->qsize -= msg->m_ts; 287 return msg; 288 } 289 290 static struct inode *mqueue_get_inode(struct super_block *sb, 291 struct ipc_namespace *ipc_ns, umode_t mode, 292 struct mq_attr *attr) 293 { 294 struct inode *inode; 295 int ret = -ENOMEM; 296 297 inode = new_inode(sb); 298 if (!inode) 299 goto err; 300 301 inode->i_ino = get_next_ino(); 302 inode->i_mode = mode; 303 inode->i_uid = current_fsuid(); 304 inode->i_gid = current_fsgid(); 305 simple_inode_init_ts(inode); 306 307 if (S_ISREG(mode)) { 308 struct mqueue_inode_info *info; 309 unsigned long mq_bytes, mq_treesize; 310 311 inode->i_fop = &mqueue_file_operations; 312 inode->i_size = FILENT_SIZE; 313 /* mqueue specific info */ 314 info = MQUEUE_I(inode); 315 spin_lock_init(&info->lock); 316 init_waitqueue_head(&info->wait_q); 317 INIT_LIST_HEAD(&info->e_wait_q[0].list); 318 INIT_LIST_HEAD(&info->e_wait_q[1].list); 319 info->notify_owner = NULL; 320 info->notify_user_ns = NULL; 321 info->qsize = 0; 322 info->ucounts = NULL; /* set when all is ok */ 323 info->msg_tree = RB_ROOT; 324 info->msg_tree_rightmost = NULL; 325 info->node_cache = NULL; 326 memset(&info->attr, 0, sizeof(info->attr)); 327 info->attr.mq_maxmsg = min(ipc_ns->mq_msg_max, 328 ipc_ns->mq_msg_default); 329 info->attr.mq_msgsize = min(ipc_ns->mq_msgsize_max, 330 ipc_ns->mq_msgsize_default); 331 if (attr) { 332 info->attr.mq_maxmsg = attr->mq_maxmsg; 333 info->attr.mq_msgsize = attr->mq_msgsize; 334 } 335 /* 336 * We used to allocate a static array of pointers and account 337 * the size of that array as well as one msg_msg struct per 338 * possible message into the queue size. That's no longer 339 * accurate as the queue is now an rbtree and will grow and 340 * shrink depending on usage patterns. We can, however, still 341 * account one msg_msg struct per message, but the nodes are 342 * allocated depending on priority usage, and most programs 343 * only use one, or a handful, of priorities. However, since 344 * this is pinned memory, we need to assume worst case, so 345 * that means the min(mq_maxmsg, max_priorities) * struct 346 * posix_msg_tree_node. 347 */ 348 349 ret = -EINVAL; 350 if (info->attr.mq_maxmsg <= 0 || info->attr.mq_msgsize <= 0) 351 goto out_inode; 352 if (capable(CAP_SYS_RESOURCE)) { 353 if (info->attr.mq_maxmsg > HARD_MSGMAX || 354 info->attr.mq_msgsize > HARD_MSGSIZEMAX) 355 goto out_inode; 356 } else { 357 if (info->attr.mq_maxmsg > ipc_ns->mq_msg_max || 358 info->attr.mq_msgsize > ipc_ns->mq_msgsize_max) 359 goto out_inode; 360 } 361 ret = -EOVERFLOW; 362 /* check for overflow */ 363 if (info->attr.mq_msgsize > ULONG_MAX/info->attr.mq_maxmsg) 364 goto out_inode; 365 mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) + 366 min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) * 367 sizeof(struct posix_msg_tree_node); 368 mq_bytes = info->attr.mq_maxmsg * info->attr.mq_msgsize; 369 if (mq_bytes + mq_treesize < mq_bytes) 370 goto out_inode; 371 mq_bytes += mq_treesize; 372 info->ucounts = get_ucounts(current_ucounts()); 373 if (info->ucounts) { 374 long msgqueue; 375 376 spin_lock(&mq_lock); 377 msgqueue = inc_rlimit_ucounts(info->ucounts, UCOUNT_RLIMIT_MSGQUEUE, mq_bytes); 378 if (msgqueue == LONG_MAX || msgqueue > rlimit(RLIMIT_MSGQUEUE)) { 379 dec_rlimit_ucounts(info->ucounts, UCOUNT_RLIMIT_MSGQUEUE, mq_bytes); 380 spin_unlock(&mq_lock); 381 put_ucounts(info->ucounts); 382 info->ucounts = NULL; 383 /* mqueue_evict_inode() releases info->messages */ 384 ret = -EMFILE; 385 goto out_inode; 386 } 387 spin_unlock(&mq_lock); 388 } 389 } else if (S_ISDIR(mode)) { 390 inc_nlink(inode); 391 /* Some things misbehave if size == 0 on a directory */ 392 inode->i_size = 2 * DIRENT_SIZE; 393 inode->i_op = &mqueue_dir_inode_operations; 394 inode->i_fop = &simple_dir_operations; 395 } 396 397 return inode; 398 out_inode: 399 iput(inode); 400 err: 401 return ERR_PTR(ret); 402 } 403 404 static int mqueue_fill_super(struct super_block *sb, struct fs_context *fc) 405 { 406 struct inode *inode; 407 struct ipc_namespace *ns = sb->s_fs_info; 408 409 sb->s_iflags |= SB_I_NOEXEC | SB_I_NODEV; 410 sb->s_blocksize = PAGE_SIZE; 411 sb->s_blocksize_bits = PAGE_SHIFT; 412 sb->s_magic = MQUEUE_MAGIC; 413 sb->s_op = &mqueue_super_ops; 414 415 inode = mqueue_get_inode(sb, ns, S_IFDIR | S_ISVTX | S_IRWXUGO, NULL); 416 if (IS_ERR(inode)) 417 return PTR_ERR(inode); 418 419 sb->s_root = d_make_root(inode); 420 if (!sb->s_root) 421 return -ENOMEM; 422 return 0; 423 } 424 425 static int mqueue_get_tree(struct fs_context *fc) 426 { 427 struct mqueue_fs_context *ctx = fc->fs_private; 428 429 /* 430 * With a newly created ipc namespace, we don't need to do a search 431 * for an ipc namespace match, but we still need to set s_fs_info. 432 */ 433 if (ctx->newns) { 434 fc->s_fs_info = ctx->ipc_ns; 435 return get_tree_nodev(fc, mqueue_fill_super); 436 } 437 return get_tree_keyed(fc, mqueue_fill_super, ctx->ipc_ns); 438 } 439 440 static void mqueue_fs_context_free(struct fs_context *fc) 441 { 442 struct mqueue_fs_context *ctx = fc->fs_private; 443 444 put_ipc_ns(ctx->ipc_ns); 445 kfree(ctx); 446 } 447 448 static int mqueue_init_fs_context(struct fs_context *fc) 449 { 450 struct mqueue_fs_context *ctx; 451 452 ctx = kzalloc(sizeof(struct mqueue_fs_context), GFP_KERNEL); 453 if (!ctx) 454 return -ENOMEM; 455 456 ctx->ipc_ns = get_ipc_ns(current->nsproxy->ipc_ns); 457 put_user_ns(fc->user_ns); 458 fc->user_ns = get_user_ns(ctx->ipc_ns->user_ns); 459 fc->fs_private = ctx; 460 fc->ops = &mqueue_fs_context_ops; 461 return 0; 462 } 463 464 /* 465 * mq_init_ns() is currently the only caller of mq_create_mount(). 466 * So the ns parameter is always a newly created ipc namespace. 467 */ 468 static struct vfsmount *mq_create_mount(struct ipc_namespace *ns) 469 { 470 struct mqueue_fs_context *ctx; 471 struct fs_context *fc; 472 struct vfsmount *mnt; 473 474 fc = fs_context_for_mount(&mqueue_fs_type, SB_KERNMOUNT); 475 if (IS_ERR(fc)) 476 return ERR_CAST(fc); 477 478 ctx = fc->fs_private; 479 ctx->newns = true; 480 put_ipc_ns(ctx->ipc_ns); 481 ctx->ipc_ns = get_ipc_ns(ns); 482 put_user_ns(fc->user_ns); 483 fc->user_ns = get_user_ns(ctx->ipc_ns->user_ns); 484 485 mnt = fc_mount(fc); 486 put_fs_context(fc); 487 return mnt; 488 } 489 490 static void init_once(void *foo) 491 { 492 struct mqueue_inode_info *p = foo; 493 494 inode_init_once(&p->vfs_inode); 495 } 496 497 static struct inode *mqueue_alloc_inode(struct super_block *sb) 498 { 499 struct mqueue_inode_info *ei; 500 501 ei = alloc_inode_sb(sb, mqueue_inode_cachep, GFP_KERNEL); 502 if (!ei) 503 return NULL; 504 return &ei->vfs_inode; 505 } 506 507 static void mqueue_free_inode(struct inode *inode) 508 { 509 kmem_cache_free(mqueue_inode_cachep, MQUEUE_I(inode)); 510 } 511 512 static void mqueue_evict_inode(struct inode *inode) 513 { 514 struct mqueue_inode_info *info; 515 struct ipc_namespace *ipc_ns; 516 struct msg_msg *msg, *nmsg; 517 LIST_HEAD(tmp_msg); 518 519 clear_inode(inode); 520 521 if (S_ISDIR(inode->i_mode)) 522 return; 523 524 ipc_ns = get_ns_from_inode(inode); 525 info = MQUEUE_I(inode); 526 spin_lock(&info->lock); 527 while ((msg = msg_get(info)) != NULL) 528 list_add_tail(&msg->m_list, &tmp_msg); 529 kfree(info->node_cache); 530 spin_unlock(&info->lock); 531 532 list_for_each_entry_safe(msg, nmsg, &tmp_msg, m_list) { 533 list_del(&msg->m_list); 534 free_msg(msg); 535 } 536 537 if (info->ucounts) { 538 unsigned long mq_bytes, mq_treesize; 539 540 /* Total amount of bytes accounted for the mqueue */ 541 mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) + 542 min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) * 543 sizeof(struct posix_msg_tree_node); 544 545 mq_bytes = mq_treesize + (info->attr.mq_maxmsg * 546 info->attr.mq_msgsize); 547 548 spin_lock(&mq_lock); 549 dec_rlimit_ucounts(info->ucounts, UCOUNT_RLIMIT_MSGQUEUE, mq_bytes); 550 /* 551 * get_ns_from_inode() ensures that the 552 * (ipc_ns = sb->s_fs_info) is either a valid ipc_ns 553 * to which we now hold a reference, or it is NULL. 554 * We can't put it here under mq_lock, though. 555 */ 556 if (ipc_ns) 557 ipc_ns->mq_queues_count--; 558 spin_unlock(&mq_lock); 559 put_ucounts(info->ucounts); 560 info->ucounts = NULL; 561 } 562 if (ipc_ns) 563 put_ipc_ns(ipc_ns); 564 } 565 566 static int mqueue_create_attr(struct dentry *dentry, umode_t mode, void *arg) 567 { 568 struct inode *dir = dentry->d_parent->d_inode; 569 struct inode *inode; 570 struct mq_attr *attr = arg; 571 int error; 572 struct ipc_namespace *ipc_ns; 573 574 spin_lock(&mq_lock); 575 ipc_ns = __get_ns_from_inode(dir); 576 if (!ipc_ns) { 577 error = -EACCES; 578 goto out_unlock; 579 } 580 581 if (ipc_ns->mq_queues_count >= ipc_ns->mq_queues_max && 582 !capable(CAP_SYS_RESOURCE)) { 583 error = -ENOSPC; 584 goto out_unlock; 585 } 586 ipc_ns->mq_queues_count++; 587 spin_unlock(&mq_lock); 588 589 inode = mqueue_get_inode(dir->i_sb, ipc_ns, mode, attr); 590 if (IS_ERR(inode)) { 591 error = PTR_ERR(inode); 592 spin_lock(&mq_lock); 593 ipc_ns->mq_queues_count--; 594 goto out_unlock; 595 } 596 597 put_ipc_ns(ipc_ns); 598 dir->i_size += DIRENT_SIZE; 599 simple_inode_init_ts(dir); 600 601 d_instantiate(dentry, inode); 602 dget(dentry); 603 return 0; 604 out_unlock: 605 spin_unlock(&mq_lock); 606 if (ipc_ns) 607 put_ipc_ns(ipc_ns); 608 return error; 609 } 610 611 static int mqueue_create(struct mnt_idmap *idmap, struct inode *dir, 612 struct dentry *dentry, umode_t mode, bool excl) 613 { 614 return mqueue_create_attr(dentry, mode, NULL); 615 } 616 617 static int mqueue_unlink(struct inode *dir, struct dentry *dentry) 618 { 619 struct inode *inode = d_inode(dentry); 620 621 simple_inode_init_ts(dir); 622 dir->i_size -= DIRENT_SIZE; 623 drop_nlink(inode); 624 dput(dentry); 625 return 0; 626 } 627 628 /* 629 * This is routine for system read from queue file. 630 * To avoid mess with doing here some sort of mq_receive we allow 631 * to read only queue size & notification info (the only values 632 * that are interesting from user point of view and aren't accessible 633 * through std routines) 634 */ 635 static ssize_t mqueue_read_file(struct file *filp, char __user *u_data, 636 size_t count, loff_t *off) 637 { 638 struct inode *inode = file_inode(filp); 639 struct mqueue_inode_info *info = MQUEUE_I(inode); 640 char buffer[FILENT_SIZE]; 641 ssize_t ret; 642 643 spin_lock(&info->lock); 644 snprintf(buffer, sizeof(buffer), 645 "QSIZE:%-10lu NOTIFY:%-5d SIGNO:%-5d NOTIFY_PID:%-6d\n", 646 info->qsize, 647 info->notify_owner ? info->notify.sigev_notify : 0, 648 (info->notify_owner && 649 info->notify.sigev_notify == SIGEV_SIGNAL) ? 650 info->notify.sigev_signo : 0, 651 pid_vnr(info->notify_owner)); 652 spin_unlock(&info->lock); 653 buffer[sizeof(buffer)-1] = '\0'; 654 655 ret = simple_read_from_buffer(u_data, count, off, buffer, 656 strlen(buffer)); 657 if (ret <= 0) 658 return ret; 659 660 inode_set_atime_to_ts(inode, inode_set_ctime_current(inode)); 661 return ret; 662 } 663 664 static int mqueue_flush_file(struct file *filp, fl_owner_t id) 665 { 666 struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp)); 667 668 spin_lock(&info->lock); 669 if (task_tgid(current) == info->notify_owner) 670 remove_notification(info); 671 672 spin_unlock(&info->lock); 673 return 0; 674 } 675 676 static __poll_t mqueue_poll_file(struct file *filp, struct poll_table_struct *poll_tab) 677 { 678 struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp)); 679 __poll_t retval = 0; 680 681 poll_wait(filp, &info->wait_q, poll_tab); 682 683 spin_lock(&info->lock); 684 if (info->attr.mq_curmsgs) 685 retval = EPOLLIN | EPOLLRDNORM; 686 687 if (info->attr.mq_curmsgs < info->attr.mq_maxmsg) 688 retval |= EPOLLOUT | EPOLLWRNORM; 689 spin_unlock(&info->lock); 690 691 return retval; 692 } 693 694 /* Adds current to info->e_wait_q[sr] before element with smaller prio */ 695 static void wq_add(struct mqueue_inode_info *info, int sr, 696 struct ext_wait_queue *ewp) 697 { 698 struct ext_wait_queue *walk; 699 700 list_for_each_entry(walk, &info->e_wait_q[sr].list, list) { 701 if (walk->task->prio <= current->prio) { 702 list_add_tail(&ewp->list, &walk->list); 703 return; 704 } 705 } 706 list_add_tail(&ewp->list, &info->e_wait_q[sr].list); 707 } 708 709 /* 710 * Puts current task to sleep. Caller must hold queue lock. After return 711 * lock isn't held. 712 * sr: SEND or RECV 713 */ 714 static int wq_sleep(struct mqueue_inode_info *info, int sr, 715 ktime_t *timeout, struct ext_wait_queue *ewp) 716 __releases(&info->lock) 717 { 718 int retval; 719 signed long time; 720 721 wq_add(info, sr, ewp); 722 723 for (;;) { 724 /* memory barrier not required, we hold info->lock */ 725 __set_current_state(TASK_INTERRUPTIBLE); 726 727 spin_unlock(&info->lock); 728 time = schedule_hrtimeout_range_clock(timeout, 0, 729 HRTIMER_MODE_ABS, CLOCK_REALTIME); 730 731 if (READ_ONCE(ewp->state) == STATE_READY) { 732 /* see MQ_BARRIER for purpose/pairing */ 733 smp_acquire__after_ctrl_dep(); 734 retval = 0; 735 goto out; 736 } 737 spin_lock(&info->lock); 738 739 /* we hold info->lock, so no memory barrier required */ 740 if (READ_ONCE(ewp->state) == STATE_READY) { 741 retval = 0; 742 goto out_unlock; 743 } 744 if (signal_pending(current)) { 745 retval = -ERESTARTSYS; 746 break; 747 } 748 if (time == 0) { 749 retval = -ETIMEDOUT; 750 break; 751 } 752 } 753 list_del(&ewp->list); 754 out_unlock: 755 spin_unlock(&info->lock); 756 out: 757 return retval; 758 } 759 760 /* 761 * Returns waiting task that should be serviced first or NULL if none exists 762 */ 763 static struct ext_wait_queue *wq_get_first_waiter( 764 struct mqueue_inode_info *info, int sr) 765 { 766 struct list_head *ptr; 767 768 ptr = info->e_wait_q[sr].list.prev; 769 if (ptr == &info->e_wait_q[sr].list) 770 return NULL; 771 return list_entry(ptr, struct ext_wait_queue, list); 772 } 773 774 775 static inline void set_cookie(struct sk_buff *skb, char code) 776 { 777 ((char *)skb->data)[NOTIFY_COOKIE_LEN-1] = code; 778 } 779 780 /* 781 * The next function is only to split too long sys_mq_timedsend 782 */ 783 static void __do_notify(struct mqueue_inode_info *info) 784 { 785 /* notification 786 * invoked when there is registered process and there isn't process 787 * waiting synchronously for message AND state of queue changed from 788 * empty to not empty. Here we are sure that no one is waiting 789 * synchronously. */ 790 if (info->notify_owner && 791 info->attr.mq_curmsgs == 1) { 792 switch (info->notify.sigev_notify) { 793 case SIGEV_NONE: 794 break; 795 case SIGEV_SIGNAL: { 796 struct kernel_siginfo sig_i; 797 struct task_struct *task; 798 799 /* do_mq_notify() accepts sigev_signo == 0, why?? */ 800 if (!info->notify.sigev_signo) 801 break; 802 803 clear_siginfo(&sig_i); 804 sig_i.si_signo = info->notify.sigev_signo; 805 sig_i.si_errno = 0; 806 sig_i.si_code = SI_MESGQ; 807 sig_i.si_value = info->notify.sigev_value; 808 rcu_read_lock(); 809 /* map current pid/uid into info->owner's namespaces */ 810 sig_i.si_pid = task_tgid_nr_ns(current, 811 ns_of_pid(info->notify_owner)); 812 sig_i.si_uid = from_kuid_munged(info->notify_user_ns, 813 current_uid()); 814 /* 815 * We can't use kill_pid_info(), this signal should 816 * bypass check_kill_permission(). It is from kernel 817 * but si_fromuser() can't know this. 818 * We do check the self_exec_id, to avoid sending 819 * signals to programs that don't expect them. 820 */ 821 task = pid_task(info->notify_owner, PIDTYPE_TGID); 822 if (task && task->self_exec_id == 823 info->notify_self_exec_id) { 824 do_send_sig_info(info->notify.sigev_signo, 825 &sig_i, task, PIDTYPE_TGID); 826 } 827 rcu_read_unlock(); 828 break; 829 } 830 case SIGEV_THREAD: 831 set_cookie(info->notify_cookie, NOTIFY_WOKENUP); 832 netlink_sendskb(info->notify_sock, info->notify_cookie); 833 break; 834 } 835 /* after notification unregisters process */ 836 put_pid(info->notify_owner); 837 put_user_ns(info->notify_user_ns); 838 info->notify_owner = NULL; 839 info->notify_user_ns = NULL; 840 } 841 wake_up(&info->wait_q); 842 } 843 844 static int prepare_timeout(const struct __kernel_timespec __user *u_abs_timeout, 845 struct timespec64 *ts) 846 { 847 if (get_timespec64(ts, u_abs_timeout)) 848 return -EFAULT; 849 if (!timespec64_valid(ts)) 850 return -EINVAL; 851 return 0; 852 } 853 854 static void remove_notification(struct mqueue_inode_info *info) 855 { 856 if (info->notify_owner != NULL && 857 info->notify.sigev_notify == SIGEV_THREAD) { 858 set_cookie(info->notify_cookie, NOTIFY_REMOVED); 859 netlink_sendskb(info->notify_sock, info->notify_cookie); 860 } 861 put_pid(info->notify_owner); 862 put_user_ns(info->notify_user_ns); 863 info->notify_owner = NULL; 864 info->notify_user_ns = NULL; 865 } 866 867 static int prepare_open(struct dentry *dentry, int oflag, int ro, 868 umode_t mode, struct filename *name, 869 struct mq_attr *attr) 870 { 871 static const int oflag2acc[O_ACCMODE] = { MAY_READ, MAY_WRITE, 872 MAY_READ | MAY_WRITE }; 873 int acc; 874 875 if (d_really_is_negative(dentry)) { 876 if (!(oflag & O_CREAT)) 877 return -ENOENT; 878 if (ro) 879 return ro; 880 audit_inode_parent_hidden(name, dentry->d_parent); 881 return vfs_mkobj(dentry, mode & ~current_umask(), 882 mqueue_create_attr, attr); 883 } 884 /* it already existed */ 885 audit_inode(name, dentry, 0); 886 if ((oflag & (O_CREAT|O_EXCL)) == (O_CREAT|O_EXCL)) 887 return -EEXIST; 888 if ((oflag & O_ACCMODE) == (O_RDWR | O_WRONLY)) 889 return -EINVAL; 890 acc = oflag2acc[oflag & O_ACCMODE]; 891 return inode_permission(&nop_mnt_idmap, d_inode(dentry), acc); 892 } 893 894 static int do_mq_open(const char __user *u_name, int oflag, umode_t mode, 895 struct mq_attr *attr) 896 { 897 struct vfsmount *mnt = current->nsproxy->ipc_ns->mq_mnt; 898 struct dentry *root = mnt->mnt_root; 899 struct filename *name; 900 struct path path; 901 int fd, error; 902 int ro; 903 904 audit_mq_open(oflag, mode, attr); 905 906 name = getname(u_name); 907 if (IS_ERR(name)) 908 return PTR_ERR(name); 909 910 fd = get_unused_fd_flags(O_CLOEXEC); 911 if (fd < 0) 912 goto out_putname; 913 914 ro = mnt_want_write(mnt); /* we'll drop it in any case */ 915 inode_lock(d_inode(root)); 916 path.dentry = lookup_noperm(&QSTR(name->name), root); 917 if (IS_ERR(path.dentry)) { 918 error = PTR_ERR(path.dentry); 919 goto out_putfd; 920 } 921 path.mnt = mntget(mnt); 922 error = prepare_open(path.dentry, oflag, ro, mode, name, attr); 923 if (!error) { 924 struct file *file = dentry_open(&path, oflag, current_cred()); 925 if (!IS_ERR(file)) 926 fd_install(fd, file); 927 else 928 error = PTR_ERR(file); 929 } 930 path_put(&path); 931 out_putfd: 932 if (error) { 933 put_unused_fd(fd); 934 fd = error; 935 } 936 inode_unlock(d_inode(root)); 937 if (!ro) 938 mnt_drop_write(mnt); 939 out_putname: 940 putname(name); 941 return fd; 942 } 943 944 SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, umode_t, mode, 945 struct mq_attr __user *, u_attr) 946 { 947 struct mq_attr attr; 948 if (u_attr && copy_from_user(&attr, u_attr, sizeof(struct mq_attr))) 949 return -EFAULT; 950 951 return do_mq_open(u_name, oflag, mode, u_attr ? &attr : NULL); 952 } 953 954 SYSCALL_DEFINE1(mq_unlink, const char __user *, u_name) 955 { 956 int err; 957 struct filename *name; 958 struct dentry *dentry; 959 struct inode *inode = NULL; 960 struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns; 961 struct vfsmount *mnt = ipc_ns->mq_mnt; 962 963 name = getname(u_name); 964 if (IS_ERR(name)) 965 return PTR_ERR(name); 966 967 audit_inode_parent_hidden(name, mnt->mnt_root); 968 err = mnt_want_write(mnt); 969 if (err) 970 goto out_name; 971 inode_lock_nested(d_inode(mnt->mnt_root), I_MUTEX_PARENT); 972 dentry = lookup_noperm(&QSTR(name->name), mnt->mnt_root); 973 if (IS_ERR(dentry)) { 974 err = PTR_ERR(dentry); 975 goto out_unlock; 976 } 977 978 inode = d_inode(dentry); 979 if (!inode) { 980 err = -ENOENT; 981 } else { 982 ihold(inode); 983 err = vfs_unlink(&nop_mnt_idmap, d_inode(dentry->d_parent), 984 dentry, NULL); 985 } 986 dput(dentry); 987 988 out_unlock: 989 inode_unlock(d_inode(mnt->mnt_root)); 990 iput(inode); 991 mnt_drop_write(mnt); 992 out_name: 993 putname(name); 994 995 return err; 996 } 997 998 /* Pipelined send and receive functions. 999 * 1000 * If a receiver finds no waiting message, then it registers itself in the 1001 * list of waiting receivers. A sender checks that list before adding the new 1002 * message into the message array. If there is a waiting receiver, then it 1003 * bypasses the message array and directly hands the message over to the 1004 * receiver. The receiver accepts the message and returns without grabbing the 1005 * queue spinlock: 1006 * 1007 * - Set pointer to message. 1008 * - Queue the receiver task for later wakeup (without the info->lock). 1009 * - Update its state to STATE_READY. Now the receiver can continue. 1010 * - Wake up the process after the lock is dropped. Should the process wake up 1011 * before this wakeup (due to a timeout or a signal) it will either see 1012 * STATE_READY and continue or acquire the lock to check the state again. 1013 * 1014 * The same algorithm is used for senders. 1015 */ 1016 1017 static inline void __pipelined_op(struct wake_q_head *wake_q, 1018 struct mqueue_inode_info *info, 1019 struct ext_wait_queue *this) 1020 { 1021 struct task_struct *task; 1022 1023 list_del(&this->list); 1024 task = get_task_struct(this->task); 1025 1026 /* see MQ_BARRIER for purpose/pairing */ 1027 smp_store_release(&this->state, STATE_READY); 1028 wake_q_add_safe(wake_q, task); 1029 } 1030 1031 /* pipelined_send() - send a message directly to the task waiting in 1032 * sys_mq_timedreceive() (without inserting message into a queue). 1033 */ 1034 static inline void pipelined_send(struct wake_q_head *wake_q, 1035 struct mqueue_inode_info *info, 1036 struct msg_msg *message, 1037 struct ext_wait_queue *receiver) 1038 { 1039 receiver->msg = message; 1040 __pipelined_op(wake_q, info, receiver); 1041 } 1042 1043 /* pipelined_receive() - if there is task waiting in sys_mq_timedsend() 1044 * gets its message and put to the queue (we have one free place for sure). */ 1045 static inline void pipelined_receive(struct wake_q_head *wake_q, 1046 struct mqueue_inode_info *info) 1047 { 1048 struct ext_wait_queue *sender = wq_get_first_waiter(info, SEND); 1049 1050 if (!sender) { 1051 /* for poll */ 1052 wake_up_interruptible(&info->wait_q); 1053 return; 1054 } 1055 if (msg_insert(sender->msg, info)) 1056 return; 1057 1058 __pipelined_op(wake_q, info, sender); 1059 } 1060 1061 static int do_mq_timedsend(mqd_t mqdes, const char __user *u_msg_ptr, 1062 size_t msg_len, unsigned int msg_prio, 1063 struct timespec64 *ts) 1064 { 1065 struct inode *inode; 1066 struct ext_wait_queue wait; 1067 struct ext_wait_queue *receiver; 1068 struct msg_msg *msg_ptr; 1069 struct mqueue_inode_info *info; 1070 ktime_t expires, *timeout = NULL; 1071 struct posix_msg_tree_node *new_leaf = NULL; 1072 int ret = 0; 1073 DEFINE_WAKE_Q(wake_q); 1074 1075 if (unlikely(msg_prio >= (unsigned long) MQ_PRIO_MAX)) 1076 return -EINVAL; 1077 1078 if (ts) { 1079 expires = timespec64_to_ktime(*ts); 1080 timeout = &expires; 1081 } 1082 1083 audit_mq_sendrecv(mqdes, msg_len, msg_prio, ts); 1084 1085 CLASS(fd, f)(mqdes); 1086 if (fd_empty(f)) 1087 return -EBADF; 1088 1089 inode = file_inode(fd_file(f)); 1090 if (unlikely(fd_file(f)->f_op != &mqueue_file_operations)) 1091 return -EBADF; 1092 info = MQUEUE_I(inode); 1093 audit_file(fd_file(f)); 1094 1095 if (unlikely(!(fd_file(f)->f_mode & FMODE_WRITE))) 1096 return -EBADF; 1097 1098 if (unlikely(msg_len > info->attr.mq_msgsize)) 1099 return -EMSGSIZE; 1100 1101 /* First try to allocate memory, before doing anything with 1102 * existing queues. */ 1103 msg_ptr = load_msg(u_msg_ptr, msg_len); 1104 if (IS_ERR(msg_ptr)) 1105 return PTR_ERR(msg_ptr); 1106 msg_ptr->m_ts = msg_len; 1107 msg_ptr->m_type = msg_prio; 1108 1109 /* 1110 * msg_insert really wants us to have a valid, spare node struct so 1111 * it doesn't have to kmalloc a GFP_ATOMIC allocation, but it will 1112 * fall back to that if necessary. 1113 */ 1114 if (!info->node_cache) 1115 new_leaf = kmalloc(sizeof(*new_leaf), GFP_KERNEL); 1116 1117 spin_lock(&info->lock); 1118 1119 if (!info->node_cache && new_leaf) { 1120 /* Save our speculative allocation into the cache */ 1121 INIT_LIST_HEAD(&new_leaf->msg_list); 1122 info->node_cache = new_leaf; 1123 new_leaf = NULL; 1124 } else { 1125 kfree(new_leaf); 1126 } 1127 1128 if (info->attr.mq_curmsgs == info->attr.mq_maxmsg) { 1129 if (fd_file(f)->f_flags & O_NONBLOCK) { 1130 ret = -EAGAIN; 1131 } else { 1132 wait.task = current; 1133 wait.msg = (void *) msg_ptr; 1134 1135 /* memory barrier not required, we hold info->lock */ 1136 WRITE_ONCE(wait.state, STATE_NONE); 1137 ret = wq_sleep(info, SEND, timeout, &wait); 1138 /* 1139 * wq_sleep must be called with info->lock held, and 1140 * returns with the lock released 1141 */ 1142 goto out_free; 1143 } 1144 } else { 1145 receiver = wq_get_first_waiter(info, RECV); 1146 if (receiver) { 1147 pipelined_send(&wake_q, info, msg_ptr, receiver); 1148 } else { 1149 /* adds message to the queue */ 1150 ret = msg_insert(msg_ptr, info); 1151 if (ret) 1152 goto out_unlock; 1153 __do_notify(info); 1154 } 1155 simple_inode_init_ts(inode); 1156 } 1157 out_unlock: 1158 spin_unlock(&info->lock); 1159 wake_up_q(&wake_q); 1160 out_free: 1161 if (ret) 1162 free_msg(msg_ptr); 1163 return ret; 1164 } 1165 1166 static int do_mq_timedreceive(mqd_t mqdes, char __user *u_msg_ptr, 1167 size_t msg_len, unsigned int __user *u_msg_prio, 1168 struct timespec64 *ts) 1169 { 1170 ssize_t ret; 1171 struct msg_msg *msg_ptr; 1172 struct inode *inode; 1173 struct mqueue_inode_info *info; 1174 struct ext_wait_queue wait; 1175 ktime_t expires, *timeout = NULL; 1176 struct posix_msg_tree_node *new_leaf = NULL; 1177 1178 if (ts) { 1179 expires = timespec64_to_ktime(*ts); 1180 timeout = &expires; 1181 } 1182 1183 audit_mq_sendrecv(mqdes, msg_len, 0, ts); 1184 1185 CLASS(fd, f)(mqdes); 1186 if (fd_empty(f)) 1187 return -EBADF; 1188 1189 inode = file_inode(fd_file(f)); 1190 if (unlikely(fd_file(f)->f_op != &mqueue_file_operations)) 1191 return -EBADF; 1192 info = MQUEUE_I(inode); 1193 audit_file(fd_file(f)); 1194 1195 if (unlikely(!(fd_file(f)->f_mode & FMODE_READ))) 1196 return -EBADF; 1197 1198 /* checks if buffer is big enough */ 1199 if (unlikely(msg_len < info->attr.mq_msgsize)) 1200 return -EMSGSIZE; 1201 1202 /* 1203 * msg_insert really wants us to have a valid, spare node struct so 1204 * it doesn't have to kmalloc a GFP_ATOMIC allocation, but it will 1205 * fall back to that if necessary. 1206 */ 1207 if (!info->node_cache) 1208 new_leaf = kmalloc(sizeof(*new_leaf), GFP_KERNEL); 1209 1210 spin_lock(&info->lock); 1211 1212 if (!info->node_cache && new_leaf) { 1213 /* Save our speculative allocation into the cache */ 1214 INIT_LIST_HEAD(&new_leaf->msg_list); 1215 info->node_cache = new_leaf; 1216 } else { 1217 kfree(new_leaf); 1218 } 1219 1220 if (info->attr.mq_curmsgs == 0) { 1221 if (fd_file(f)->f_flags & O_NONBLOCK) { 1222 spin_unlock(&info->lock); 1223 ret = -EAGAIN; 1224 } else { 1225 wait.task = current; 1226 1227 /* memory barrier not required, we hold info->lock */ 1228 WRITE_ONCE(wait.state, STATE_NONE); 1229 ret = wq_sleep(info, RECV, timeout, &wait); 1230 msg_ptr = wait.msg; 1231 } 1232 } else { 1233 DEFINE_WAKE_Q(wake_q); 1234 1235 msg_ptr = msg_get(info); 1236 1237 simple_inode_init_ts(inode); 1238 1239 /* There is now free space in queue. */ 1240 pipelined_receive(&wake_q, info); 1241 spin_unlock(&info->lock); 1242 wake_up_q(&wake_q); 1243 ret = 0; 1244 } 1245 if (ret == 0) { 1246 ret = msg_ptr->m_ts; 1247 1248 if ((u_msg_prio && put_user(msg_ptr->m_type, u_msg_prio)) || 1249 store_msg(u_msg_ptr, msg_ptr, msg_ptr->m_ts)) { 1250 ret = -EFAULT; 1251 } 1252 free_msg(msg_ptr); 1253 } 1254 return ret; 1255 } 1256 1257 SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr, 1258 size_t, msg_len, unsigned int, msg_prio, 1259 const struct __kernel_timespec __user *, u_abs_timeout) 1260 { 1261 struct timespec64 ts, *p = NULL; 1262 if (u_abs_timeout) { 1263 int res = prepare_timeout(u_abs_timeout, &ts); 1264 if (res) 1265 return res; 1266 p = &ts; 1267 } 1268 return do_mq_timedsend(mqdes, u_msg_ptr, msg_len, msg_prio, p); 1269 } 1270 1271 SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr, 1272 size_t, msg_len, unsigned int __user *, u_msg_prio, 1273 const struct __kernel_timespec __user *, u_abs_timeout) 1274 { 1275 struct timespec64 ts, *p = NULL; 1276 if (u_abs_timeout) { 1277 int res = prepare_timeout(u_abs_timeout, &ts); 1278 if (res) 1279 return res; 1280 p = &ts; 1281 } 1282 return do_mq_timedreceive(mqdes, u_msg_ptr, msg_len, u_msg_prio, p); 1283 } 1284 1285 /* 1286 * Notes: the case when user wants us to deregister (with NULL as pointer) 1287 * and he isn't currently owner of notification, will be silently discarded. 1288 * It isn't explicitly defined in the POSIX. 1289 */ 1290 static int do_mq_notify(mqd_t mqdes, const struct sigevent *notification) 1291 { 1292 int ret; 1293 struct sock *sock; 1294 struct inode *inode; 1295 struct mqueue_inode_info *info; 1296 struct sk_buff *nc; 1297 1298 audit_mq_notify(mqdes, notification); 1299 1300 nc = NULL; 1301 sock = NULL; 1302 if (notification != NULL) { 1303 if (unlikely(notification->sigev_notify != SIGEV_NONE && 1304 notification->sigev_notify != SIGEV_SIGNAL && 1305 notification->sigev_notify != SIGEV_THREAD)) 1306 return -EINVAL; 1307 if (notification->sigev_notify == SIGEV_SIGNAL && 1308 !valid_signal(notification->sigev_signo)) { 1309 return -EINVAL; 1310 } 1311 if (notification->sigev_notify == SIGEV_THREAD) { 1312 long timeo; 1313 1314 /* create the notify skb */ 1315 nc = alloc_skb(NOTIFY_COOKIE_LEN, GFP_KERNEL); 1316 if (!nc) 1317 return -ENOMEM; 1318 1319 if (copy_from_user(nc->data, 1320 notification->sigev_value.sival_ptr, 1321 NOTIFY_COOKIE_LEN)) { 1322 kfree_skb(nc); 1323 return -EFAULT; 1324 } 1325 1326 /* TODO: add a header? */ 1327 skb_put(nc, NOTIFY_COOKIE_LEN); 1328 /* and attach it to the socket */ 1329 retry: 1330 sock = netlink_getsockbyfd(notification->sigev_signo); 1331 if (IS_ERR(sock)) { 1332 kfree_skb(nc); 1333 return PTR_ERR(sock); 1334 } 1335 1336 timeo = MAX_SCHEDULE_TIMEOUT; 1337 ret = netlink_attachskb(sock, nc, &timeo, NULL); 1338 if (ret == 1) 1339 goto retry; 1340 if (ret) 1341 return ret; 1342 } 1343 } 1344 1345 CLASS(fd, f)(mqdes); 1346 if (fd_empty(f)) { 1347 ret = -EBADF; 1348 goto out; 1349 } 1350 1351 inode = file_inode(fd_file(f)); 1352 if (unlikely(fd_file(f)->f_op != &mqueue_file_operations)) { 1353 ret = -EBADF; 1354 goto out; 1355 } 1356 info = MQUEUE_I(inode); 1357 1358 ret = 0; 1359 spin_lock(&info->lock); 1360 if (notification == NULL) { 1361 if (info->notify_owner == task_tgid(current)) { 1362 remove_notification(info); 1363 inode_set_atime_to_ts(inode, 1364 inode_set_ctime_current(inode)); 1365 } 1366 } else if (info->notify_owner != NULL) { 1367 ret = -EBUSY; 1368 } else { 1369 switch (notification->sigev_notify) { 1370 case SIGEV_NONE: 1371 info->notify.sigev_notify = SIGEV_NONE; 1372 break; 1373 case SIGEV_THREAD: 1374 info->notify_sock = sock; 1375 info->notify_cookie = nc; 1376 sock = NULL; 1377 nc = NULL; 1378 info->notify.sigev_notify = SIGEV_THREAD; 1379 break; 1380 case SIGEV_SIGNAL: 1381 info->notify.sigev_signo = notification->sigev_signo; 1382 info->notify.sigev_value = notification->sigev_value; 1383 info->notify.sigev_notify = SIGEV_SIGNAL; 1384 info->notify_self_exec_id = current->self_exec_id; 1385 break; 1386 } 1387 1388 info->notify_owner = get_pid(task_tgid(current)); 1389 info->notify_user_ns = get_user_ns(current_user_ns()); 1390 inode_set_atime_to_ts(inode, inode_set_ctime_current(inode)); 1391 } 1392 spin_unlock(&info->lock); 1393 out: 1394 if (sock) 1395 netlink_detachskb(sock, nc); 1396 return ret; 1397 } 1398 1399 SYSCALL_DEFINE2(mq_notify, mqd_t, mqdes, 1400 const struct sigevent __user *, u_notification) 1401 { 1402 struct sigevent n, *p = NULL; 1403 if (u_notification) { 1404 if (copy_from_user(&n, u_notification, sizeof(struct sigevent))) 1405 return -EFAULT; 1406 p = &n; 1407 } 1408 return do_mq_notify(mqdes, p); 1409 } 1410 1411 static int do_mq_getsetattr(int mqdes, struct mq_attr *new, struct mq_attr *old) 1412 { 1413 struct inode *inode; 1414 struct mqueue_inode_info *info; 1415 1416 if (new && (new->mq_flags & (~O_NONBLOCK))) 1417 return -EINVAL; 1418 1419 CLASS(fd, f)(mqdes); 1420 if (fd_empty(f)) 1421 return -EBADF; 1422 1423 if (unlikely(fd_file(f)->f_op != &mqueue_file_operations)) 1424 return -EBADF; 1425 1426 inode = file_inode(fd_file(f)); 1427 info = MQUEUE_I(inode); 1428 1429 spin_lock(&info->lock); 1430 1431 if (old) { 1432 *old = info->attr; 1433 old->mq_flags = fd_file(f)->f_flags & O_NONBLOCK; 1434 } 1435 if (new) { 1436 audit_mq_getsetattr(mqdes, new); 1437 spin_lock(&fd_file(f)->f_lock); 1438 if (new->mq_flags & O_NONBLOCK) 1439 fd_file(f)->f_flags |= O_NONBLOCK; 1440 else 1441 fd_file(f)->f_flags &= ~O_NONBLOCK; 1442 spin_unlock(&fd_file(f)->f_lock); 1443 1444 inode_set_atime_to_ts(inode, inode_set_ctime_current(inode)); 1445 } 1446 1447 spin_unlock(&info->lock); 1448 return 0; 1449 } 1450 1451 SYSCALL_DEFINE3(mq_getsetattr, mqd_t, mqdes, 1452 const struct mq_attr __user *, u_mqstat, 1453 struct mq_attr __user *, u_omqstat) 1454 { 1455 int ret; 1456 struct mq_attr mqstat, omqstat; 1457 struct mq_attr *new = NULL, *old = NULL; 1458 1459 if (u_mqstat) { 1460 new = &mqstat; 1461 if (copy_from_user(new, u_mqstat, sizeof(struct mq_attr))) 1462 return -EFAULT; 1463 } 1464 if (u_omqstat) 1465 old = &omqstat; 1466 1467 ret = do_mq_getsetattr(mqdes, new, old); 1468 if (ret || !old) 1469 return ret; 1470 1471 if (copy_to_user(u_omqstat, old, sizeof(struct mq_attr))) 1472 return -EFAULT; 1473 return 0; 1474 } 1475 1476 #ifdef CONFIG_COMPAT 1477 1478 struct compat_mq_attr { 1479 compat_long_t mq_flags; /* message queue flags */ 1480 compat_long_t mq_maxmsg; /* maximum number of messages */ 1481 compat_long_t mq_msgsize; /* maximum message size */ 1482 compat_long_t mq_curmsgs; /* number of messages currently queued */ 1483 compat_long_t __reserved[4]; /* ignored for input, zeroed for output */ 1484 }; 1485 1486 static inline int get_compat_mq_attr(struct mq_attr *attr, 1487 const struct compat_mq_attr __user *uattr) 1488 { 1489 struct compat_mq_attr v; 1490 1491 if (copy_from_user(&v, uattr, sizeof(*uattr))) 1492 return -EFAULT; 1493 1494 memset(attr, 0, sizeof(*attr)); 1495 attr->mq_flags = v.mq_flags; 1496 attr->mq_maxmsg = v.mq_maxmsg; 1497 attr->mq_msgsize = v.mq_msgsize; 1498 attr->mq_curmsgs = v.mq_curmsgs; 1499 return 0; 1500 } 1501 1502 static inline int put_compat_mq_attr(const struct mq_attr *attr, 1503 struct compat_mq_attr __user *uattr) 1504 { 1505 struct compat_mq_attr v; 1506 1507 memset(&v, 0, sizeof(v)); 1508 v.mq_flags = attr->mq_flags; 1509 v.mq_maxmsg = attr->mq_maxmsg; 1510 v.mq_msgsize = attr->mq_msgsize; 1511 v.mq_curmsgs = attr->mq_curmsgs; 1512 if (copy_to_user(uattr, &v, sizeof(*uattr))) 1513 return -EFAULT; 1514 return 0; 1515 } 1516 1517 COMPAT_SYSCALL_DEFINE4(mq_open, const char __user *, u_name, 1518 int, oflag, compat_mode_t, mode, 1519 struct compat_mq_attr __user *, u_attr) 1520 { 1521 struct mq_attr attr, *p = NULL; 1522 if (u_attr && oflag & O_CREAT) { 1523 p = &attr; 1524 if (get_compat_mq_attr(&attr, u_attr)) 1525 return -EFAULT; 1526 } 1527 return do_mq_open(u_name, oflag, mode, p); 1528 } 1529 1530 COMPAT_SYSCALL_DEFINE2(mq_notify, mqd_t, mqdes, 1531 const struct compat_sigevent __user *, u_notification) 1532 { 1533 struct sigevent n, *p = NULL; 1534 if (u_notification) { 1535 if (get_compat_sigevent(&n, u_notification)) 1536 return -EFAULT; 1537 if (n.sigev_notify == SIGEV_THREAD) 1538 n.sigev_value.sival_ptr = compat_ptr(n.sigev_value.sival_int); 1539 p = &n; 1540 } 1541 return do_mq_notify(mqdes, p); 1542 } 1543 1544 COMPAT_SYSCALL_DEFINE3(mq_getsetattr, mqd_t, mqdes, 1545 const struct compat_mq_attr __user *, u_mqstat, 1546 struct compat_mq_attr __user *, u_omqstat) 1547 { 1548 int ret; 1549 struct mq_attr mqstat, omqstat; 1550 struct mq_attr *new = NULL, *old = NULL; 1551 1552 if (u_mqstat) { 1553 new = &mqstat; 1554 if (get_compat_mq_attr(new, u_mqstat)) 1555 return -EFAULT; 1556 } 1557 if (u_omqstat) 1558 old = &omqstat; 1559 1560 ret = do_mq_getsetattr(mqdes, new, old); 1561 if (ret || !old) 1562 return ret; 1563 1564 if (put_compat_mq_attr(old, u_omqstat)) 1565 return -EFAULT; 1566 return 0; 1567 } 1568 #endif 1569 1570 #ifdef CONFIG_COMPAT_32BIT_TIME 1571 static int compat_prepare_timeout(const struct old_timespec32 __user *p, 1572 struct timespec64 *ts) 1573 { 1574 if (get_old_timespec32(ts, p)) 1575 return -EFAULT; 1576 if (!timespec64_valid(ts)) 1577 return -EINVAL; 1578 return 0; 1579 } 1580 1581 SYSCALL_DEFINE5(mq_timedsend_time32, mqd_t, mqdes, 1582 const char __user *, u_msg_ptr, 1583 unsigned int, msg_len, unsigned int, msg_prio, 1584 const struct old_timespec32 __user *, u_abs_timeout) 1585 { 1586 struct timespec64 ts, *p = NULL; 1587 if (u_abs_timeout) { 1588 int res = compat_prepare_timeout(u_abs_timeout, &ts); 1589 if (res) 1590 return res; 1591 p = &ts; 1592 } 1593 return do_mq_timedsend(mqdes, u_msg_ptr, msg_len, msg_prio, p); 1594 } 1595 1596 SYSCALL_DEFINE5(mq_timedreceive_time32, mqd_t, mqdes, 1597 char __user *, u_msg_ptr, 1598 unsigned int, msg_len, unsigned int __user *, u_msg_prio, 1599 const struct old_timespec32 __user *, u_abs_timeout) 1600 { 1601 struct timespec64 ts, *p = NULL; 1602 if (u_abs_timeout) { 1603 int res = compat_prepare_timeout(u_abs_timeout, &ts); 1604 if (res) 1605 return res; 1606 p = &ts; 1607 } 1608 return do_mq_timedreceive(mqdes, u_msg_ptr, msg_len, u_msg_prio, p); 1609 } 1610 #endif 1611 1612 static const struct inode_operations mqueue_dir_inode_operations = { 1613 .lookup = simple_lookup, 1614 .create = mqueue_create, 1615 .unlink = mqueue_unlink, 1616 }; 1617 1618 static const struct file_operations mqueue_file_operations = { 1619 .flush = mqueue_flush_file, 1620 .poll = mqueue_poll_file, 1621 .read = mqueue_read_file, 1622 .llseek = default_llseek, 1623 }; 1624 1625 static const struct super_operations mqueue_super_ops = { 1626 .alloc_inode = mqueue_alloc_inode, 1627 .free_inode = mqueue_free_inode, 1628 .evict_inode = mqueue_evict_inode, 1629 .statfs = simple_statfs, 1630 }; 1631 1632 static const struct fs_context_operations mqueue_fs_context_ops = { 1633 .free = mqueue_fs_context_free, 1634 .get_tree = mqueue_get_tree, 1635 }; 1636 1637 static struct file_system_type mqueue_fs_type = { 1638 .name = "mqueue", 1639 .init_fs_context = mqueue_init_fs_context, 1640 .kill_sb = kill_litter_super, 1641 .fs_flags = FS_USERNS_MOUNT, 1642 }; 1643 1644 int mq_init_ns(struct ipc_namespace *ns) 1645 { 1646 struct vfsmount *m; 1647 1648 ns->mq_queues_count = 0; 1649 ns->mq_queues_max = DFLT_QUEUESMAX; 1650 ns->mq_msg_max = DFLT_MSGMAX; 1651 ns->mq_msgsize_max = DFLT_MSGSIZEMAX; 1652 ns->mq_msg_default = DFLT_MSG; 1653 ns->mq_msgsize_default = DFLT_MSGSIZE; 1654 1655 m = mq_create_mount(ns); 1656 if (IS_ERR(m)) 1657 return PTR_ERR(m); 1658 ns->mq_mnt = m; 1659 return 0; 1660 } 1661 1662 void mq_clear_sbinfo(struct ipc_namespace *ns) 1663 { 1664 ns->mq_mnt->mnt_sb->s_fs_info = NULL; 1665 } 1666 1667 static int __init init_mqueue_fs(void) 1668 { 1669 int error; 1670 1671 mqueue_inode_cachep = kmem_cache_create("mqueue_inode_cache", 1672 sizeof(struct mqueue_inode_info), 0, 1673 SLAB_HWCACHE_ALIGN|SLAB_ACCOUNT, init_once); 1674 if (mqueue_inode_cachep == NULL) 1675 return -ENOMEM; 1676 1677 if (!setup_mq_sysctls(&init_ipc_ns)) { 1678 pr_warn("sysctl registration failed\n"); 1679 error = -ENOMEM; 1680 goto out_kmem; 1681 } 1682 1683 error = register_filesystem(&mqueue_fs_type); 1684 if (error) 1685 goto out_sysctl; 1686 1687 spin_lock_init(&mq_lock); 1688 1689 error = mq_init_ns(&init_ipc_ns); 1690 if (error) 1691 goto out_filesystem; 1692 1693 return 0; 1694 1695 out_filesystem: 1696 unregister_filesystem(&mqueue_fs_type); 1697 out_sysctl: 1698 retire_mq_sysctls(&init_ipc_ns); 1699 out_kmem: 1700 kmem_cache_destroy(mqueue_inode_cachep); 1701 return error; 1702 } 1703 1704 device_initcall(init_mqueue_fs); 1705