1 /* 2 * POSIX message queues filesystem for Linux. 3 * 4 * Copyright (C) 2003,2004 Krzysztof Benedyczak (golbi@mat.uni.torun.pl) 5 * Michal Wronski (michal.wronski@gmail.com) 6 * 7 * Spinlocks: Mohamed Abbas (abbas.mohamed@intel.com) 8 * Lockless receive & send, fd based notify: 9 * Manfred Spraul (manfred@colorfullife.com) 10 * 11 * Audit: George Wilson (ltcgcw@us.ibm.com) 12 * 13 * This file is released under the GPL. 14 */ 15 16 #include <linux/capability.h> 17 #include <linux/init.h> 18 #include <linux/pagemap.h> 19 #include <linux/file.h> 20 #include <linux/mount.h> 21 #include <linux/namei.h> 22 #include <linux/sysctl.h> 23 #include <linux/poll.h> 24 #include <linux/mqueue.h> 25 #include <linux/msg.h> 26 #include <linux/skbuff.h> 27 #include <linux/vmalloc.h> 28 #include <linux/netlink.h> 29 #include <linux/syscalls.h> 30 #include <linux/audit.h> 31 #include <linux/signal.h> 32 #include <linux/mutex.h> 33 #include <linux/nsproxy.h> 34 #include <linux/pid.h> 35 #include <linux/ipc_namespace.h> 36 #include <linux/user_namespace.h> 37 #include <linux/slab.h> 38 #include <linux/sched/wake_q.h> 39 #include <linux/sched/signal.h> 40 #include <linux/sched/user.h> 41 42 #include <net/sock.h> 43 #include "util.h" 44 45 #define MQUEUE_MAGIC 0x19800202 46 #define DIRENT_SIZE 20 47 #define FILENT_SIZE 80 48 49 #define SEND 0 50 #define RECV 1 51 52 #define STATE_NONE 0 53 #define STATE_READY 1 54 55 struct posix_msg_tree_node { 56 struct rb_node rb_node; 57 struct list_head msg_list; 58 int priority; 59 }; 60 61 struct ext_wait_queue { /* queue of sleeping tasks */ 62 struct task_struct *task; 63 struct list_head list; 64 struct msg_msg *msg; /* ptr of loaded message */ 65 int state; /* one of STATE_* values */ 66 }; 67 68 struct mqueue_inode_info { 69 spinlock_t lock; 70 struct inode vfs_inode; 71 wait_queue_head_t wait_q; 72 73 struct rb_root msg_tree; 74 struct posix_msg_tree_node *node_cache; 75 struct mq_attr attr; 76 77 struct sigevent notify; 78 struct pid *notify_owner; 79 struct user_namespace *notify_user_ns; 80 struct user_struct *user; /* user who created, for accounting */ 81 struct sock *notify_sock; 82 struct sk_buff *notify_cookie; 83 84 /* for tasks waiting for free space and messages, respectively */ 85 struct ext_wait_queue e_wait_q[2]; 86 87 unsigned long qsize; /* size of queue in memory (sum of all msgs) */ 88 }; 89 90 static const struct inode_operations mqueue_dir_inode_operations; 91 static const struct file_operations mqueue_file_operations; 92 static const struct super_operations mqueue_super_ops; 93 static void remove_notification(struct mqueue_inode_info *info); 94 95 static struct kmem_cache *mqueue_inode_cachep; 96 97 static struct ctl_table_header *mq_sysctl_table; 98 99 static inline struct mqueue_inode_info *MQUEUE_I(struct inode *inode) 100 { 101 return container_of(inode, struct mqueue_inode_info, vfs_inode); 102 } 103 104 /* 105 * This routine should be called with the mq_lock held. 106 */ 107 static inline struct ipc_namespace *__get_ns_from_inode(struct inode *inode) 108 { 109 return get_ipc_ns(inode->i_sb->s_fs_info); 110 } 111 112 static struct ipc_namespace *get_ns_from_inode(struct inode *inode) 113 { 114 struct ipc_namespace *ns; 115 116 spin_lock(&mq_lock); 117 ns = __get_ns_from_inode(inode); 118 spin_unlock(&mq_lock); 119 return ns; 120 } 121 122 /* Auxiliary functions to manipulate messages' list */ 123 static int msg_insert(struct msg_msg *msg, struct mqueue_inode_info *info) 124 { 125 struct rb_node **p, *parent = NULL; 126 struct posix_msg_tree_node *leaf; 127 128 p = &info->msg_tree.rb_node; 129 while (*p) { 130 parent = *p; 131 leaf = rb_entry(parent, struct posix_msg_tree_node, rb_node); 132 133 if (likely(leaf->priority == msg->m_type)) 134 goto insert_msg; 135 else if (msg->m_type < leaf->priority) 136 p = &(*p)->rb_left; 137 else 138 p = &(*p)->rb_right; 139 } 140 if (info->node_cache) { 141 leaf = info->node_cache; 142 info->node_cache = NULL; 143 } else { 144 leaf = kmalloc(sizeof(*leaf), GFP_ATOMIC); 145 if (!leaf) 146 return -ENOMEM; 147 INIT_LIST_HEAD(&leaf->msg_list); 148 } 149 leaf->priority = msg->m_type; 150 rb_link_node(&leaf->rb_node, parent, p); 151 rb_insert_color(&leaf->rb_node, &info->msg_tree); 152 insert_msg: 153 info->attr.mq_curmsgs++; 154 info->qsize += msg->m_ts; 155 list_add_tail(&msg->m_list, &leaf->msg_list); 156 return 0; 157 } 158 159 static inline struct msg_msg *msg_get(struct mqueue_inode_info *info) 160 { 161 struct rb_node **p, *parent = NULL; 162 struct posix_msg_tree_node *leaf; 163 struct msg_msg *msg; 164 165 try_again: 166 p = &info->msg_tree.rb_node; 167 while (*p) { 168 parent = *p; 169 /* 170 * During insert, low priorities go to the left and high to the 171 * right. On receive, we want the highest priorities first, so 172 * walk all the way to the right. 173 */ 174 p = &(*p)->rb_right; 175 } 176 if (!parent) { 177 if (info->attr.mq_curmsgs) { 178 pr_warn_once("Inconsistency in POSIX message queue, " 179 "no tree element, but supposedly messages " 180 "should exist!\n"); 181 info->attr.mq_curmsgs = 0; 182 } 183 return NULL; 184 } 185 leaf = rb_entry(parent, struct posix_msg_tree_node, rb_node); 186 if (unlikely(list_empty(&leaf->msg_list))) { 187 pr_warn_once("Inconsistency in POSIX message queue, " 188 "empty leaf node but we haven't implemented " 189 "lazy leaf delete!\n"); 190 rb_erase(&leaf->rb_node, &info->msg_tree); 191 if (info->node_cache) { 192 kfree(leaf); 193 } else { 194 info->node_cache = leaf; 195 } 196 goto try_again; 197 } else { 198 msg = list_first_entry(&leaf->msg_list, 199 struct msg_msg, m_list); 200 list_del(&msg->m_list); 201 if (list_empty(&leaf->msg_list)) { 202 rb_erase(&leaf->rb_node, &info->msg_tree); 203 if (info->node_cache) { 204 kfree(leaf); 205 } else { 206 info->node_cache = leaf; 207 } 208 } 209 } 210 info->attr.mq_curmsgs--; 211 info->qsize -= msg->m_ts; 212 return msg; 213 } 214 215 static struct inode *mqueue_get_inode(struct super_block *sb, 216 struct ipc_namespace *ipc_ns, umode_t mode, 217 struct mq_attr *attr) 218 { 219 struct user_struct *u = current_user(); 220 struct inode *inode; 221 int ret = -ENOMEM; 222 223 inode = new_inode(sb); 224 if (!inode) 225 goto err; 226 227 inode->i_ino = get_next_ino(); 228 inode->i_mode = mode; 229 inode->i_uid = current_fsuid(); 230 inode->i_gid = current_fsgid(); 231 inode->i_mtime = inode->i_ctime = inode->i_atime = current_time(inode); 232 233 if (S_ISREG(mode)) { 234 struct mqueue_inode_info *info; 235 unsigned long mq_bytes, mq_treesize; 236 237 inode->i_fop = &mqueue_file_operations; 238 inode->i_size = FILENT_SIZE; 239 /* mqueue specific info */ 240 info = MQUEUE_I(inode); 241 spin_lock_init(&info->lock); 242 init_waitqueue_head(&info->wait_q); 243 INIT_LIST_HEAD(&info->e_wait_q[0].list); 244 INIT_LIST_HEAD(&info->e_wait_q[1].list); 245 info->notify_owner = NULL; 246 info->notify_user_ns = NULL; 247 info->qsize = 0; 248 info->user = NULL; /* set when all is ok */ 249 info->msg_tree = RB_ROOT; 250 info->node_cache = NULL; 251 memset(&info->attr, 0, sizeof(info->attr)); 252 info->attr.mq_maxmsg = min(ipc_ns->mq_msg_max, 253 ipc_ns->mq_msg_default); 254 info->attr.mq_msgsize = min(ipc_ns->mq_msgsize_max, 255 ipc_ns->mq_msgsize_default); 256 if (attr) { 257 info->attr.mq_maxmsg = attr->mq_maxmsg; 258 info->attr.mq_msgsize = attr->mq_msgsize; 259 } 260 /* 261 * We used to allocate a static array of pointers and account 262 * the size of that array as well as one msg_msg struct per 263 * possible message into the queue size. That's no longer 264 * accurate as the queue is now an rbtree and will grow and 265 * shrink depending on usage patterns. We can, however, still 266 * account one msg_msg struct per message, but the nodes are 267 * allocated depending on priority usage, and most programs 268 * only use one, or a handful, of priorities. However, since 269 * this is pinned memory, we need to assume worst case, so 270 * that means the min(mq_maxmsg, max_priorities) * struct 271 * posix_msg_tree_node. 272 */ 273 274 ret = -EINVAL; 275 if (info->attr.mq_maxmsg <= 0 || info->attr.mq_msgsize <= 0) 276 goto out_inode; 277 if (capable(CAP_SYS_RESOURCE)) { 278 if (info->attr.mq_maxmsg > HARD_MSGMAX || 279 info->attr.mq_msgsize > HARD_MSGSIZEMAX) 280 goto out_inode; 281 } else { 282 if (info->attr.mq_maxmsg > ipc_ns->mq_msg_max || 283 info->attr.mq_msgsize > ipc_ns->mq_msgsize_max) 284 goto out_inode; 285 } 286 ret = -EOVERFLOW; 287 /* check for overflow */ 288 if (info->attr.mq_msgsize > ULONG_MAX/info->attr.mq_maxmsg) 289 goto out_inode; 290 mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) + 291 min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) * 292 sizeof(struct posix_msg_tree_node); 293 mq_bytes = info->attr.mq_maxmsg * info->attr.mq_msgsize; 294 if (mq_bytes + mq_treesize < mq_bytes) 295 goto out_inode; 296 mq_bytes += mq_treesize; 297 spin_lock(&mq_lock); 298 if (u->mq_bytes + mq_bytes < u->mq_bytes || 299 u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) { 300 spin_unlock(&mq_lock); 301 /* mqueue_evict_inode() releases info->messages */ 302 ret = -EMFILE; 303 goto out_inode; 304 } 305 u->mq_bytes += mq_bytes; 306 spin_unlock(&mq_lock); 307 308 /* all is ok */ 309 info->user = get_uid(u); 310 } else if (S_ISDIR(mode)) { 311 inc_nlink(inode); 312 /* Some things misbehave if size == 0 on a directory */ 313 inode->i_size = 2 * DIRENT_SIZE; 314 inode->i_op = &mqueue_dir_inode_operations; 315 inode->i_fop = &simple_dir_operations; 316 } 317 318 return inode; 319 out_inode: 320 iput(inode); 321 err: 322 return ERR_PTR(ret); 323 } 324 325 static int mqueue_fill_super(struct super_block *sb, void *data, int silent) 326 { 327 struct inode *inode; 328 struct ipc_namespace *ns = sb->s_fs_info; 329 330 sb->s_iflags |= SB_I_NOEXEC | SB_I_NODEV; 331 sb->s_blocksize = PAGE_SIZE; 332 sb->s_blocksize_bits = PAGE_SHIFT; 333 sb->s_magic = MQUEUE_MAGIC; 334 sb->s_op = &mqueue_super_ops; 335 336 inode = mqueue_get_inode(sb, ns, S_IFDIR | S_ISVTX | S_IRWXUGO, NULL); 337 if (IS_ERR(inode)) 338 return PTR_ERR(inode); 339 340 sb->s_root = d_make_root(inode); 341 if (!sb->s_root) 342 return -ENOMEM; 343 return 0; 344 } 345 346 static struct dentry *mqueue_mount(struct file_system_type *fs_type, 347 int flags, const char *dev_name, 348 void *data) 349 { 350 struct ipc_namespace *ns; 351 if (flags & SB_KERNMOUNT) { 352 ns = data; 353 data = NULL; 354 } else { 355 ns = current->nsproxy->ipc_ns; 356 } 357 return mount_ns(fs_type, flags, data, ns, ns->user_ns, mqueue_fill_super); 358 } 359 360 static void init_once(void *foo) 361 { 362 struct mqueue_inode_info *p = (struct mqueue_inode_info *) foo; 363 364 inode_init_once(&p->vfs_inode); 365 } 366 367 static struct inode *mqueue_alloc_inode(struct super_block *sb) 368 { 369 struct mqueue_inode_info *ei; 370 371 ei = kmem_cache_alloc(mqueue_inode_cachep, GFP_KERNEL); 372 if (!ei) 373 return NULL; 374 return &ei->vfs_inode; 375 } 376 377 static void mqueue_i_callback(struct rcu_head *head) 378 { 379 struct inode *inode = container_of(head, struct inode, i_rcu); 380 kmem_cache_free(mqueue_inode_cachep, MQUEUE_I(inode)); 381 } 382 383 static void mqueue_destroy_inode(struct inode *inode) 384 { 385 call_rcu(&inode->i_rcu, mqueue_i_callback); 386 } 387 388 static void mqueue_evict_inode(struct inode *inode) 389 { 390 struct mqueue_inode_info *info; 391 struct user_struct *user; 392 unsigned long mq_bytes, mq_treesize; 393 struct ipc_namespace *ipc_ns; 394 struct msg_msg *msg; 395 396 clear_inode(inode); 397 398 if (S_ISDIR(inode->i_mode)) 399 return; 400 401 ipc_ns = get_ns_from_inode(inode); 402 info = MQUEUE_I(inode); 403 spin_lock(&info->lock); 404 while ((msg = msg_get(info)) != NULL) 405 free_msg(msg); 406 kfree(info->node_cache); 407 spin_unlock(&info->lock); 408 409 /* Total amount of bytes accounted for the mqueue */ 410 mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) + 411 min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) * 412 sizeof(struct posix_msg_tree_node); 413 414 mq_bytes = mq_treesize + (info->attr.mq_maxmsg * 415 info->attr.mq_msgsize); 416 417 user = info->user; 418 if (user) { 419 spin_lock(&mq_lock); 420 user->mq_bytes -= mq_bytes; 421 /* 422 * get_ns_from_inode() ensures that the 423 * (ipc_ns = sb->s_fs_info) is either a valid ipc_ns 424 * to which we now hold a reference, or it is NULL. 425 * We can't put it here under mq_lock, though. 426 */ 427 if (ipc_ns) 428 ipc_ns->mq_queues_count--; 429 spin_unlock(&mq_lock); 430 free_uid(user); 431 } 432 if (ipc_ns) 433 put_ipc_ns(ipc_ns); 434 } 435 436 static int mqueue_create_attr(struct dentry *dentry, umode_t mode, void *arg) 437 { 438 struct inode *dir = dentry->d_parent->d_inode; 439 struct inode *inode; 440 struct mq_attr *attr = arg; 441 int error; 442 struct ipc_namespace *ipc_ns; 443 444 spin_lock(&mq_lock); 445 ipc_ns = __get_ns_from_inode(dir); 446 if (!ipc_ns) { 447 error = -EACCES; 448 goto out_unlock; 449 } 450 451 if (ipc_ns->mq_queues_count >= ipc_ns->mq_queues_max && 452 !capable(CAP_SYS_RESOURCE)) { 453 error = -ENOSPC; 454 goto out_unlock; 455 } 456 ipc_ns->mq_queues_count++; 457 spin_unlock(&mq_lock); 458 459 inode = mqueue_get_inode(dir->i_sb, ipc_ns, mode, attr); 460 if (IS_ERR(inode)) { 461 error = PTR_ERR(inode); 462 spin_lock(&mq_lock); 463 ipc_ns->mq_queues_count--; 464 goto out_unlock; 465 } 466 467 put_ipc_ns(ipc_ns); 468 dir->i_size += DIRENT_SIZE; 469 dir->i_ctime = dir->i_mtime = dir->i_atime = current_time(dir); 470 471 d_instantiate(dentry, inode); 472 dget(dentry); 473 return 0; 474 out_unlock: 475 spin_unlock(&mq_lock); 476 if (ipc_ns) 477 put_ipc_ns(ipc_ns); 478 return error; 479 } 480 481 static int mqueue_create(struct inode *dir, struct dentry *dentry, 482 umode_t mode, bool excl) 483 { 484 return mqueue_create_attr(dentry, mode, NULL); 485 } 486 487 static int mqueue_unlink(struct inode *dir, struct dentry *dentry) 488 { 489 struct inode *inode = d_inode(dentry); 490 491 dir->i_ctime = dir->i_mtime = dir->i_atime = current_time(dir); 492 dir->i_size -= DIRENT_SIZE; 493 drop_nlink(inode); 494 dput(dentry); 495 return 0; 496 } 497 498 /* 499 * This is routine for system read from queue file. 500 * To avoid mess with doing here some sort of mq_receive we allow 501 * to read only queue size & notification info (the only values 502 * that are interesting from user point of view and aren't accessible 503 * through std routines) 504 */ 505 static ssize_t mqueue_read_file(struct file *filp, char __user *u_data, 506 size_t count, loff_t *off) 507 { 508 struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp)); 509 char buffer[FILENT_SIZE]; 510 ssize_t ret; 511 512 spin_lock(&info->lock); 513 snprintf(buffer, sizeof(buffer), 514 "QSIZE:%-10lu NOTIFY:%-5d SIGNO:%-5d NOTIFY_PID:%-6d\n", 515 info->qsize, 516 info->notify_owner ? info->notify.sigev_notify : 0, 517 (info->notify_owner && 518 info->notify.sigev_notify == SIGEV_SIGNAL) ? 519 info->notify.sigev_signo : 0, 520 pid_vnr(info->notify_owner)); 521 spin_unlock(&info->lock); 522 buffer[sizeof(buffer)-1] = '\0'; 523 524 ret = simple_read_from_buffer(u_data, count, off, buffer, 525 strlen(buffer)); 526 if (ret <= 0) 527 return ret; 528 529 file_inode(filp)->i_atime = file_inode(filp)->i_ctime = current_time(file_inode(filp)); 530 return ret; 531 } 532 533 static int mqueue_flush_file(struct file *filp, fl_owner_t id) 534 { 535 struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp)); 536 537 spin_lock(&info->lock); 538 if (task_tgid(current) == info->notify_owner) 539 remove_notification(info); 540 541 spin_unlock(&info->lock); 542 return 0; 543 } 544 545 static __poll_t mqueue_poll_file(struct file *filp, struct poll_table_struct *poll_tab) 546 { 547 struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp)); 548 __poll_t retval = 0; 549 550 poll_wait(filp, &info->wait_q, poll_tab); 551 552 spin_lock(&info->lock); 553 if (info->attr.mq_curmsgs) 554 retval = EPOLLIN | EPOLLRDNORM; 555 556 if (info->attr.mq_curmsgs < info->attr.mq_maxmsg) 557 retval |= EPOLLOUT | EPOLLWRNORM; 558 spin_unlock(&info->lock); 559 560 return retval; 561 } 562 563 /* Adds current to info->e_wait_q[sr] before element with smaller prio */ 564 static void wq_add(struct mqueue_inode_info *info, int sr, 565 struct ext_wait_queue *ewp) 566 { 567 struct ext_wait_queue *walk; 568 569 ewp->task = current; 570 571 list_for_each_entry(walk, &info->e_wait_q[sr].list, list) { 572 if (walk->task->prio <= current->prio) { 573 list_add_tail(&ewp->list, &walk->list); 574 return; 575 } 576 } 577 list_add_tail(&ewp->list, &info->e_wait_q[sr].list); 578 } 579 580 /* 581 * Puts current task to sleep. Caller must hold queue lock. After return 582 * lock isn't held. 583 * sr: SEND or RECV 584 */ 585 static int wq_sleep(struct mqueue_inode_info *info, int sr, 586 ktime_t *timeout, struct ext_wait_queue *ewp) 587 __releases(&info->lock) 588 { 589 int retval; 590 signed long time; 591 592 wq_add(info, sr, ewp); 593 594 for (;;) { 595 __set_current_state(TASK_INTERRUPTIBLE); 596 597 spin_unlock(&info->lock); 598 time = schedule_hrtimeout_range_clock(timeout, 0, 599 HRTIMER_MODE_ABS, CLOCK_REALTIME); 600 601 if (ewp->state == STATE_READY) { 602 retval = 0; 603 goto out; 604 } 605 spin_lock(&info->lock); 606 if (ewp->state == STATE_READY) { 607 retval = 0; 608 goto out_unlock; 609 } 610 if (signal_pending(current)) { 611 retval = -ERESTARTSYS; 612 break; 613 } 614 if (time == 0) { 615 retval = -ETIMEDOUT; 616 break; 617 } 618 } 619 list_del(&ewp->list); 620 out_unlock: 621 spin_unlock(&info->lock); 622 out: 623 return retval; 624 } 625 626 /* 627 * Returns waiting task that should be serviced first or NULL if none exists 628 */ 629 static struct ext_wait_queue *wq_get_first_waiter( 630 struct mqueue_inode_info *info, int sr) 631 { 632 struct list_head *ptr; 633 634 ptr = info->e_wait_q[sr].list.prev; 635 if (ptr == &info->e_wait_q[sr].list) 636 return NULL; 637 return list_entry(ptr, struct ext_wait_queue, list); 638 } 639 640 641 static inline void set_cookie(struct sk_buff *skb, char code) 642 { 643 ((char *)skb->data)[NOTIFY_COOKIE_LEN-1] = code; 644 } 645 646 /* 647 * The next function is only to split too long sys_mq_timedsend 648 */ 649 static void __do_notify(struct mqueue_inode_info *info) 650 { 651 /* notification 652 * invoked when there is registered process and there isn't process 653 * waiting synchronously for message AND state of queue changed from 654 * empty to not empty. Here we are sure that no one is waiting 655 * synchronously. */ 656 if (info->notify_owner && 657 info->attr.mq_curmsgs == 1) { 658 struct siginfo sig_i; 659 switch (info->notify.sigev_notify) { 660 case SIGEV_NONE: 661 break; 662 case SIGEV_SIGNAL: 663 /* sends signal */ 664 665 clear_siginfo(&sig_i); 666 sig_i.si_signo = info->notify.sigev_signo; 667 sig_i.si_errno = 0; 668 sig_i.si_code = SI_MESGQ; 669 sig_i.si_value = info->notify.sigev_value; 670 /* map current pid/uid into info->owner's namespaces */ 671 rcu_read_lock(); 672 sig_i.si_pid = task_tgid_nr_ns(current, 673 ns_of_pid(info->notify_owner)); 674 sig_i.si_uid = from_kuid_munged(info->notify_user_ns, current_uid()); 675 rcu_read_unlock(); 676 677 kill_pid_info(info->notify.sigev_signo, 678 &sig_i, info->notify_owner); 679 break; 680 case SIGEV_THREAD: 681 set_cookie(info->notify_cookie, NOTIFY_WOKENUP); 682 netlink_sendskb(info->notify_sock, info->notify_cookie); 683 break; 684 } 685 /* after notification unregisters process */ 686 put_pid(info->notify_owner); 687 put_user_ns(info->notify_user_ns); 688 info->notify_owner = NULL; 689 info->notify_user_ns = NULL; 690 } 691 wake_up(&info->wait_q); 692 } 693 694 static int prepare_timeout(const struct timespec __user *u_abs_timeout, 695 struct timespec64 *ts) 696 { 697 if (get_timespec64(ts, u_abs_timeout)) 698 return -EFAULT; 699 if (!timespec64_valid(ts)) 700 return -EINVAL; 701 return 0; 702 } 703 704 static void remove_notification(struct mqueue_inode_info *info) 705 { 706 if (info->notify_owner != NULL && 707 info->notify.sigev_notify == SIGEV_THREAD) { 708 set_cookie(info->notify_cookie, NOTIFY_REMOVED); 709 netlink_sendskb(info->notify_sock, info->notify_cookie); 710 } 711 put_pid(info->notify_owner); 712 put_user_ns(info->notify_user_ns); 713 info->notify_owner = NULL; 714 info->notify_user_ns = NULL; 715 } 716 717 static int prepare_open(struct dentry *dentry, int oflag, int ro, 718 umode_t mode, struct filename *name, 719 struct mq_attr *attr) 720 { 721 static const int oflag2acc[O_ACCMODE] = { MAY_READ, MAY_WRITE, 722 MAY_READ | MAY_WRITE }; 723 int acc; 724 725 if (d_really_is_negative(dentry)) { 726 if (!(oflag & O_CREAT)) 727 return -ENOENT; 728 if (ro) 729 return ro; 730 audit_inode_parent_hidden(name, dentry->d_parent); 731 return vfs_mkobj(dentry, mode & ~current_umask(), 732 mqueue_create_attr, attr); 733 } 734 /* it already existed */ 735 audit_inode(name, dentry, 0); 736 if ((oflag & (O_CREAT|O_EXCL)) == (O_CREAT|O_EXCL)) 737 return -EEXIST; 738 if ((oflag & O_ACCMODE) == (O_RDWR | O_WRONLY)) 739 return -EINVAL; 740 acc = oflag2acc[oflag & O_ACCMODE]; 741 return inode_permission(d_inode(dentry), acc); 742 } 743 744 static int do_mq_open(const char __user *u_name, int oflag, umode_t mode, 745 struct mq_attr *attr) 746 { 747 struct vfsmount *mnt = current->nsproxy->ipc_ns->mq_mnt; 748 struct dentry *root = mnt->mnt_root; 749 struct filename *name; 750 struct path path; 751 int fd, error; 752 int ro; 753 754 audit_mq_open(oflag, mode, attr); 755 756 if (IS_ERR(name = getname(u_name))) 757 return PTR_ERR(name); 758 759 fd = get_unused_fd_flags(O_CLOEXEC); 760 if (fd < 0) 761 goto out_putname; 762 763 ro = mnt_want_write(mnt); /* we'll drop it in any case */ 764 inode_lock(d_inode(root)); 765 path.dentry = lookup_one_len(name->name, root, strlen(name->name)); 766 if (IS_ERR(path.dentry)) { 767 error = PTR_ERR(path.dentry); 768 goto out_putfd; 769 } 770 path.mnt = mntget(mnt); 771 error = prepare_open(path.dentry, oflag, ro, mode, name, attr); 772 if (!error) { 773 struct file *file = dentry_open(&path, oflag, current_cred()); 774 if (!IS_ERR(file)) 775 fd_install(fd, file); 776 else 777 error = PTR_ERR(file); 778 } 779 path_put(&path); 780 out_putfd: 781 if (error) { 782 put_unused_fd(fd); 783 fd = error; 784 } 785 inode_unlock(d_inode(root)); 786 if (!ro) 787 mnt_drop_write(mnt); 788 out_putname: 789 putname(name); 790 return fd; 791 } 792 793 SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, umode_t, mode, 794 struct mq_attr __user *, u_attr) 795 { 796 struct mq_attr attr; 797 if (u_attr && copy_from_user(&attr, u_attr, sizeof(struct mq_attr))) 798 return -EFAULT; 799 800 return do_mq_open(u_name, oflag, mode, u_attr ? &attr : NULL); 801 } 802 803 SYSCALL_DEFINE1(mq_unlink, const char __user *, u_name) 804 { 805 int err; 806 struct filename *name; 807 struct dentry *dentry; 808 struct inode *inode = NULL; 809 struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns; 810 struct vfsmount *mnt = ipc_ns->mq_mnt; 811 812 name = getname(u_name); 813 if (IS_ERR(name)) 814 return PTR_ERR(name); 815 816 audit_inode_parent_hidden(name, mnt->mnt_root); 817 err = mnt_want_write(mnt); 818 if (err) 819 goto out_name; 820 inode_lock_nested(d_inode(mnt->mnt_root), I_MUTEX_PARENT); 821 dentry = lookup_one_len(name->name, mnt->mnt_root, 822 strlen(name->name)); 823 if (IS_ERR(dentry)) { 824 err = PTR_ERR(dentry); 825 goto out_unlock; 826 } 827 828 inode = d_inode(dentry); 829 if (!inode) { 830 err = -ENOENT; 831 } else { 832 ihold(inode); 833 err = vfs_unlink(d_inode(dentry->d_parent), dentry, NULL); 834 } 835 dput(dentry); 836 837 out_unlock: 838 inode_unlock(d_inode(mnt->mnt_root)); 839 if (inode) 840 iput(inode); 841 mnt_drop_write(mnt); 842 out_name: 843 putname(name); 844 845 return err; 846 } 847 848 /* Pipelined send and receive functions. 849 * 850 * If a receiver finds no waiting message, then it registers itself in the 851 * list of waiting receivers. A sender checks that list before adding the new 852 * message into the message array. If there is a waiting receiver, then it 853 * bypasses the message array and directly hands the message over to the 854 * receiver. The receiver accepts the message and returns without grabbing the 855 * queue spinlock: 856 * 857 * - Set pointer to message. 858 * - Queue the receiver task for later wakeup (without the info->lock). 859 * - Update its state to STATE_READY. Now the receiver can continue. 860 * - Wake up the process after the lock is dropped. Should the process wake up 861 * before this wakeup (due to a timeout or a signal) it will either see 862 * STATE_READY and continue or acquire the lock to check the state again. 863 * 864 * The same algorithm is used for senders. 865 */ 866 867 /* pipelined_send() - send a message directly to the task waiting in 868 * sys_mq_timedreceive() (without inserting message into a queue). 869 */ 870 static inline void pipelined_send(struct wake_q_head *wake_q, 871 struct mqueue_inode_info *info, 872 struct msg_msg *message, 873 struct ext_wait_queue *receiver) 874 { 875 receiver->msg = message; 876 list_del(&receiver->list); 877 wake_q_add(wake_q, receiver->task); 878 /* 879 * Rely on the implicit cmpxchg barrier from wake_q_add such 880 * that we can ensure that updating receiver->state is the last 881 * write operation: As once set, the receiver can continue, 882 * and if we don't have the reference count from the wake_q, 883 * yet, at that point we can later have a use-after-free 884 * condition and bogus wakeup. 885 */ 886 receiver->state = STATE_READY; 887 } 888 889 /* pipelined_receive() - if there is task waiting in sys_mq_timedsend() 890 * gets its message and put to the queue (we have one free place for sure). */ 891 static inline void pipelined_receive(struct wake_q_head *wake_q, 892 struct mqueue_inode_info *info) 893 { 894 struct ext_wait_queue *sender = wq_get_first_waiter(info, SEND); 895 896 if (!sender) { 897 /* for poll */ 898 wake_up_interruptible(&info->wait_q); 899 return; 900 } 901 if (msg_insert(sender->msg, info)) 902 return; 903 904 list_del(&sender->list); 905 wake_q_add(wake_q, sender->task); 906 sender->state = STATE_READY; 907 } 908 909 static int do_mq_timedsend(mqd_t mqdes, const char __user *u_msg_ptr, 910 size_t msg_len, unsigned int msg_prio, 911 struct timespec64 *ts) 912 { 913 struct fd f; 914 struct inode *inode; 915 struct ext_wait_queue wait; 916 struct ext_wait_queue *receiver; 917 struct msg_msg *msg_ptr; 918 struct mqueue_inode_info *info; 919 ktime_t expires, *timeout = NULL; 920 struct posix_msg_tree_node *new_leaf = NULL; 921 int ret = 0; 922 DEFINE_WAKE_Q(wake_q); 923 924 if (unlikely(msg_prio >= (unsigned long) MQ_PRIO_MAX)) 925 return -EINVAL; 926 927 if (ts) { 928 expires = timespec64_to_ktime(*ts); 929 timeout = &expires; 930 } 931 932 audit_mq_sendrecv(mqdes, msg_len, msg_prio, ts); 933 934 f = fdget(mqdes); 935 if (unlikely(!f.file)) { 936 ret = -EBADF; 937 goto out; 938 } 939 940 inode = file_inode(f.file); 941 if (unlikely(f.file->f_op != &mqueue_file_operations)) { 942 ret = -EBADF; 943 goto out_fput; 944 } 945 info = MQUEUE_I(inode); 946 audit_file(f.file); 947 948 if (unlikely(!(f.file->f_mode & FMODE_WRITE))) { 949 ret = -EBADF; 950 goto out_fput; 951 } 952 953 if (unlikely(msg_len > info->attr.mq_msgsize)) { 954 ret = -EMSGSIZE; 955 goto out_fput; 956 } 957 958 /* First try to allocate memory, before doing anything with 959 * existing queues. */ 960 msg_ptr = load_msg(u_msg_ptr, msg_len); 961 if (IS_ERR(msg_ptr)) { 962 ret = PTR_ERR(msg_ptr); 963 goto out_fput; 964 } 965 msg_ptr->m_ts = msg_len; 966 msg_ptr->m_type = msg_prio; 967 968 /* 969 * msg_insert really wants us to have a valid, spare node struct so 970 * it doesn't have to kmalloc a GFP_ATOMIC allocation, but it will 971 * fall back to that if necessary. 972 */ 973 if (!info->node_cache) 974 new_leaf = kmalloc(sizeof(*new_leaf), GFP_KERNEL); 975 976 spin_lock(&info->lock); 977 978 if (!info->node_cache && new_leaf) { 979 /* Save our speculative allocation into the cache */ 980 INIT_LIST_HEAD(&new_leaf->msg_list); 981 info->node_cache = new_leaf; 982 new_leaf = NULL; 983 } else { 984 kfree(new_leaf); 985 } 986 987 if (info->attr.mq_curmsgs == info->attr.mq_maxmsg) { 988 if (f.file->f_flags & O_NONBLOCK) { 989 ret = -EAGAIN; 990 } else { 991 wait.task = current; 992 wait.msg = (void *) msg_ptr; 993 wait.state = STATE_NONE; 994 ret = wq_sleep(info, SEND, timeout, &wait); 995 /* 996 * wq_sleep must be called with info->lock held, and 997 * returns with the lock released 998 */ 999 goto out_free; 1000 } 1001 } else { 1002 receiver = wq_get_first_waiter(info, RECV); 1003 if (receiver) { 1004 pipelined_send(&wake_q, info, msg_ptr, receiver); 1005 } else { 1006 /* adds message to the queue */ 1007 ret = msg_insert(msg_ptr, info); 1008 if (ret) 1009 goto out_unlock; 1010 __do_notify(info); 1011 } 1012 inode->i_atime = inode->i_mtime = inode->i_ctime = 1013 current_time(inode); 1014 } 1015 out_unlock: 1016 spin_unlock(&info->lock); 1017 wake_up_q(&wake_q); 1018 out_free: 1019 if (ret) 1020 free_msg(msg_ptr); 1021 out_fput: 1022 fdput(f); 1023 out: 1024 return ret; 1025 } 1026 1027 static int do_mq_timedreceive(mqd_t mqdes, char __user *u_msg_ptr, 1028 size_t msg_len, unsigned int __user *u_msg_prio, 1029 struct timespec64 *ts) 1030 { 1031 ssize_t ret; 1032 struct msg_msg *msg_ptr; 1033 struct fd f; 1034 struct inode *inode; 1035 struct mqueue_inode_info *info; 1036 struct ext_wait_queue wait; 1037 ktime_t expires, *timeout = NULL; 1038 struct posix_msg_tree_node *new_leaf = NULL; 1039 1040 if (ts) { 1041 expires = timespec64_to_ktime(*ts); 1042 timeout = &expires; 1043 } 1044 1045 audit_mq_sendrecv(mqdes, msg_len, 0, ts); 1046 1047 f = fdget(mqdes); 1048 if (unlikely(!f.file)) { 1049 ret = -EBADF; 1050 goto out; 1051 } 1052 1053 inode = file_inode(f.file); 1054 if (unlikely(f.file->f_op != &mqueue_file_operations)) { 1055 ret = -EBADF; 1056 goto out_fput; 1057 } 1058 info = MQUEUE_I(inode); 1059 audit_file(f.file); 1060 1061 if (unlikely(!(f.file->f_mode & FMODE_READ))) { 1062 ret = -EBADF; 1063 goto out_fput; 1064 } 1065 1066 /* checks if buffer is big enough */ 1067 if (unlikely(msg_len < info->attr.mq_msgsize)) { 1068 ret = -EMSGSIZE; 1069 goto out_fput; 1070 } 1071 1072 /* 1073 * msg_insert really wants us to have a valid, spare node struct so 1074 * it doesn't have to kmalloc a GFP_ATOMIC allocation, but it will 1075 * fall back to that if necessary. 1076 */ 1077 if (!info->node_cache) 1078 new_leaf = kmalloc(sizeof(*new_leaf), GFP_KERNEL); 1079 1080 spin_lock(&info->lock); 1081 1082 if (!info->node_cache && new_leaf) { 1083 /* Save our speculative allocation into the cache */ 1084 INIT_LIST_HEAD(&new_leaf->msg_list); 1085 info->node_cache = new_leaf; 1086 } else { 1087 kfree(new_leaf); 1088 } 1089 1090 if (info->attr.mq_curmsgs == 0) { 1091 if (f.file->f_flags & O_NONBLOCK) { 1092 spin_unlock(&info->lock); 1093 ret = -EAGAIN; 1094 } else { 1095 wait.task = current; 1096 wait.state = STATE_NONE; 1097 ret = wq_sleep(info, RECV, timeout, &wait); 1098 msg_ptr = wait.msg; 1099 } 1100 } else { 1101 DEFINE_WAKE_Q(wake_q); 1102 1103 msg_ptr = msg_get(info); 1104 1105 inode->i_atime = inode->i_mtime = inode->i_ctime = 1106 current_time(inode); 1107 1108 /* There is now free space in queue. */ 1109 pipelined_receive(&wake_q, info); 1110 spin_unlock(&info->lock); 1111 wake_up_q(&wake_q); 1112 ret = 0; 1113 } 1114 if (ret == 0) { 1115 ret = msg_ptr->m_ts; 1116 1117 if ((u_msg_prio && put_user(msg_ptr->m_type, u_msg_prio)) || 1118 store_msg(u_msg_ptr, msg_ptr, msg_ptr->m_ts)) { 1119 ret = -EFAULT; 1120 } 1121 free_msg(msg_ptr); 1122 } 1123 out_fput: 1124 fdput(f); 1125 out: 1126 return ret; 1127 } 1128 1129 SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr, 1130 size_t, msg_len, unsigned int, msg_prio, 1131 const struct timespec __user *, u_abs_timeout) 1132 { 1133 struct timespec64 ts, *p = NULL; 1134 if (u_abs_timeout) { 1135 int res = prepare_timeout(u_abs_timeout, &ts); 1136 if (res) 1137 return res; 1138 p = &ts; 1139 } 1140 return do_mq_timedsend(mqdes, u_msg_ptr, msg_len, msg_prio, p); 1141 } 1142 1143 SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr, 1144 size_t, msg_len, unsigned int __user *, u_msg_prio, 1145 const struct timespec __user *, u_abs_timeout) 1146 { 1147 struct timespec64 ts, *p = NULL; 1148 if (u_abs_timeout) { 1149 int res = prepare_timeout(u_abs_timeout, &ts); 1150 if (res) 1151 return res; 1152 p = &ts; 1153 } 1154 return do_mq_timedreceive(mqdes, u_msg_ptr, msg_len, u_msg_prio, p); 1155 } 1156 1157 /* 1158 * Notes: the case when user wants us to deregister (with NULL as pointer) 1159 * and he isn't currently owner of notification, will be silently discarded. 1160 * It isn't explicitly defined in the POSIX. 1161 */ 1162 static int do_mq_notify(mqd_t mqdes, const struct sigevent *notification) 1163 { 1164 int ret; 1165 struct fd f; 1166 struct sock *sock; 1167 struct inode *inode; 1168 struct mqueue_inode_info *info; 1169 struct sk_buff *nc; 1170 1171 audit_mq_notify(mqdes, notification); 1172 1173 nc = NULL; 1174 sock = NULL; 1175 if (notification != NULL) { 1176 if (unlikely(notification->sigev_notify != SIGEV_NONE && 1177 notification->sigev_notify != SIGEV_SIGNAL && 1178 notification->sigev_notify != SIGEV_THREAD)) 1179 return -EINVAL; 1180 if (notification->sigev_notify == SIGEV_SIGNAL && 1181 !valid_signal(notification->sigev_signo)) { 1182 return -EINVAL; 1183 } 1184 if (notification->sigev_notify == SIGEV_THREAD) { 1185 long timeo; 1186 1187 /* create the notify skb */ 1188 nc = alloc_skb(NOTIFY_COOKIE_LEN, GFP_KERNEL); 1189 if (!nc) { 1190 ret = -ENOMEM; 1191 goto out; 1192 } 1193 if (copy_from_user(nc->data, 1194 notification->sigev_value.sival_ptr, 1195 NOTIFY_COOKIE_LEN)) { 1196 ret = -EFAULT; 1197 goto out; 1198 } 1199 1200 /* TODO: add a header? */ 1201 skb_put(nc, NOTIFY_COOKIE_LEN); 1202 /* and attach it to the socket */ 1203 retry: 1204 f = fdget(notification->sigev_signo); 1205 if (!f.file) { 1206 ret = -EBADF; 1207 goto out; 1208 } 1209 sock = netlink_getsockbyfilp(f.file); 1210 fdput(f); 1211 if (IS_ERR(sock)) { 1212 ret = PTR_ERR(sock); 1213 sock = NULL; 1214 goto out; 1215 } 1216 1217 timeo = MAX_SCHEDULE_TIMEOUT; 1218 ret = netlink_attachskb(sock, nc, &timeo, NULL); 1219 if (ret == 1) { 1220 sock = NULL; 1221 goto retry; 1222 } 1223 if (ret) { 1224 sock = NULL; 1225 nc = NULL; 1226 goto out; 1227 } 1228 } 1229 } 1230 1231 f = fdget(mqdes); 1232 if (!f.file) { 1233 ret = -EBADF; 1234 goto out; 1235 } 1236 1237 inode = file_inode(f.file); 1238 if (unlikely(f.file->f_op != &mqueue_file_operations)) { 1239 ret = -EBADF; 1240 goto out_fput; 1241 } 1242 info = MQUEUE_I(inode); 1243 1244 ret = 0; 1245 spin_lock(&info->lock); 1246 if (notification == NULL) { 1247 if (info->notify_owner == task_tgid(current)) { 1248 remove_notification(info); 1249 inode->i_atime = inode->i_ctime = current_time(inode); 1250 } 1251 } else if (info->notify_owner != NULL) { 1252 ret = -EBUSY; 1253 } else { 1254 switch (notification->sigev_notify) { 1255 case SIGEV_NONE: 1256 info->notify.sigev_notify = SIGEV_NONE; 1257 break; 1258 case SIGEV_THREAD: 1259 info->notify_sock = sock; 1260 info->notify_cookie = nc; 1261 sock = NULL; 1262 nc = NULL; 1263 info->notify.sigev_notify = SIGEV_THREAD; 1264 break; 1265 case SIGEV_SIGNAL: 1266 info->notify.sigev_signo = notification->sigev_signo; 1267 info->notify.sigev_value = notification->sigev_value; 1268 info->notify.sigev_notify = SIGEV_SIGNAL; 1269 break; 1270 } 1271 1272 info->notify_owner = get_pid(task_tgid(current)); 1273 info->notify_user_ns = get_user_ns(current_user_ns()); 1274 inode->i_atime = inode->i_ctime = current_time(inode); 1275 } 1276 spin_unlock(&info->lock); 1277 out_fput: 1278 fdput(f); 1279 out: 1280 if (sock) 1281 netlink_detachskb(sock, nc); 1282 else if (nc) 1283 dev_kfree_skb(nc); 1284 1285 return ret; 1286 } 1287 1288 SYSCALL_DEFINE2(mq_notify, mqd_t, mqdes, 1289 const struct sigevent __user *, u_notification) 1290 { 1291 struct sigevent n, *p = NULL; 1292 if (u_notification) { 1293 if (copy_from_user(&n, u_notification, sizeof(struct sigevent))) 1294 return -EFAULT; 1295 p = &n; 1296 } 1297 return do_mq_notify(mqdes, p); 1298 } 1299 1300 static int do_mq_getsetattr(int mqdes, struct mq_attr *new, struct mq_attr *old) 1301 { 1302 struct fd f; 1303 struct inode *inode; 1304 struct mqueue_inode_info *info; 1305 1306 if (new && (new->mq_flags & (~O_NONBLOCK))) 1307 return -EINVAL; 1308 1309 f = fdget(mqdes); 1310 if (!f.file) 1311 return -EBADF; 1312 1313 if (unlikely(f.file->f_op != &mqueue_file_operations)) { 1314 fdput(f); 1315 return -EBADF; 1316 } 1317 1318 inode = file_inode(f.file); 1319 info = MQUEUE_I(inode); 1320 1321 spin_lock(&info->lock); 1322 1323 if (old) { 1324 *old = info->attr; 1325 old->mq_flags = f.file->f_flags & O_NONBLOCK; 1326 } 1327 if (new) { 1328 audit_mq_getsetattr(mqdes, new); 1329 spin_lock(&f.file->f_lock); 1330 if (new->mq_flags & O_NONBLOCK) 1331 f.file->f_flags |= O_NONBLOCK; 1332 else 1333 f.file->f_flags &= ~O_NONBLOCK; 1334 spin_unlock(&f.file->f_lock); 1335 1336 inode->i_atime = inode->i_ctime = current_time(inode); 1337 } 1338 1339 spin_unlock(&info->lock); 1340 fdput(f); 1341 return 0; 1342 } 1343 1344 SYSCALL_DEFINE3(mq_getsetattr, mqd_t, mqdes, 1345 const struct mq_attr __user *, u_mqstat, 1346 struct mq_attr __user *, u_omqstat) 1347 { 1348 int ret; 1349 struct mq_attr mqstat, omqstat; 1350 struct mq_attr *new = NULL, *old = NULL; 1351 1352 if (u_mqstat) { 1353 new = &mqstat; 1354 if (copy_from_user(new, u_mqstat, sizeof(struct mq_attr))) 1355 return -EFAULT; 1356 } 1357 if (u_omqstat) 1358 old = &omqstat; 1359 1360 ret = do_mq_getsetattr(mqdes, new, old); 1361 if (ret || !old) 1362 return ret; 1363 1364 if (copy_to_user(u_omqstat, old, sizeof(struct mq_attr))) 1365 return -EFAULT; 1366 return 0; 1367 } 1368 1369 #ifdef CONFIG_COMPAT 1370 1371 struct compat_mq_attr { 1372 compat_long_t mq_flags; /* message queue flags */ 1373 compat_long_t mq_maxmsg; /* maximum number of messages */ 1374 compat_long_t mq_msgsize; /* maximum message size */ 1375 compat_long_t mq_curmsgs; /* number of messages currently queued */ 1376 compat_long_t __reserved[4]; /* ignored for input, zeroed for output */ 1377 }; 1378 1379 static inline int get_compat_mq_attr(struct mq_attr *attr, 1380 const struct compat_mq_attr __user *uattr) 1381 { 1382 struct compat_mq_attr v; 1383 1384 if (copy_from_user(&v, uattr, sizeof(*uattr))) 1385 return -EFAULT; 1386 1387 memset(attr, 0, sizeof(*attr)); 1388 attr->mq_flags = v.mq_flags; 1389 attr->mq_maxmsg = v.mq_maxmsg; 1390 attr->mq_msgsize = v.mq_msgsize; 1391 attr->mq_curmsgs = v.mq_curmsgs; 1392 return 0; 1393 } 1394 1395 static inline int put_compat_mq_attr(const struct mq_attr *attr, 1396 struct compat_mq_attr __user *uattr) 1397 { 1398 struct compat_mq_attr v; 1399 1400 memset(&v, 0, sizeof(v)); 1401 v.mq_flags = attr->mq_flags; 1402 v.mq_maxmsg = attr->mq_maxmsg; 1403 v.mq_msgsize = attr->mq_msgsize; 1404 v.mq_curmsgs = attr->mq_curmsgs; 1405 if (copy_to_user(uattr, &v, sizeof(*uattr))) 1406 return -EFAULT; 1407 return 0; 1408 } 1409 1410 COMPAT_SYSCALL_DEFINE4(mq_open, const char __user *, u_name, 1411 int, oflag, compat_mode_t, mode, 1412 struct compat_mq_attr __user *, u_attr) 1413 { 1414 struct mq_attr attr, *p = NULL; 1415 if (u_attr && oflag & O_CREAT) { 1416 p = &attr; 1417 if (get_compat_mq_attr(&attr, u_attr)) 1418 return -EFAULT; 1419 } 1420 return do_mq_open(u_name, oflag, mode, p); 1421 } 1422 1423 static int compat_prepare_timeout(const struct compat_timespec __user *p, 1424 struct timespec64 *ts) 1425 { 1426 if (compat_get_timespec64(ts, p)) 1427 return -EFAULT; 1428 if (!timespec64_valid(ts)) 1429 return -EINVAL; 1430 return 0; 1431 } 1432 1433 COMPAT_SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, 1434 const char __user *, u_msg_ptr, 1435 compat_size_t, msg_len, unsigned int, msg_prio, 1436 const struct compat_timespec __user *, u_abs_timeout) 1437 { 1438 struct timespec64 ts, *p = NULL; 1439 if (u_abs_timeout) { 1440 int res = compat_prepare_timeout(u_abs_timeout, &ts); 1441 if (res) 1442 return res; 1443 p = &ts; 1444 } 1445 return do_mq_timedsend(mqdes, u_msg_ptr, msg_len, msg_prio, p); 1446 } 1447 1448 COMPAT_SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, 1449 char __user *, u_msg_ptr, 1450 compat_size_t, msg_len, unsigned int __user *, u_msg_prio, 1451 const struct compat_timespec __user *, u_abs_timeout) 1452 { 1453 struct timespec64 ts, *p = NULL; 1454 if (u_abs_timeout) { 1455 int res = compat_prepare_timeout(u_abs_timeout, &ts); 1456 if (res) 1457 return res; 1458 p = &ts; 1459 } 1460 return do_mq_timedreceive(mqdes, u_msg_ptr, msg_len, u_msg_prio, p); 1461 } 1462 1463 COMPAT_SYSCALL_DEFINE2(mq_notify, mqd_t, mqdes, 1464 const struct compat_sigevent __user *, u_notification) 1465 { 1466 struct sigevent n, *p = NULL; 1467 if (u_notification) { 1468 if (get_compat_sigevent(&n, u_notification)) 1469 return -EFAULT; 1470 if (n.sigev_notify == SIGEV_THREAD) 1471 n.sigev_value.sival_ptr = compat_ptr(n.sigev_value.sival_int); 1472 p = &n; 1473 } 1474 return do_mq_notify(mqdes, p); 1475 } 1476 1477 COMPAT_SYSCALL_DEFINE3(mq_getsetattr, mqd_t, mqdes, 1478 const struct compat_mq_attr __user *, u_mqstat, 1479 struct compat_mq_attr __user *, u_omqstat) 1480 { 1481 int ret; 1482 struct mq_attr mqstat, omqstat; 1483 struct mq_attr *new = NULL, *old = NULL; 1484 1485 if (u_mqstat) { 1486 new = &mqstat; 1487 if (get_compat_mq_attr(new, u_mqstat)) 1488 return -EFAULT; 1489 } 1490 if (u_omqstat) 1491 old = &omqstat; 1492 1493 ret = do_mq_getsetattr(mqdes, new, old); 1494 if (ret || !old) 1495 return ret; 1496 1497 if (put_compat_mq_attr(old, u_omqstat)) 1498 return -EFAULT; 1499 return 0; 1500 } 1501 #endif 1502 1503 static const struct inode_operations mqueue_dir_inode_operations = { 1504 .lookup = simple_lookup, 1505 .create = mqueue_create, 1506 .unlink = mqueue_unlink, 1507 }; 1508 1509 static const struct file_operations mqueue_file_operations = { 1510 .flush = mqueue_flush_file, 1511 .poll = mqueue_poll_file, 1512 .read = mqueue_read_file, 1513 .llseek = default_llseek, 1514 }; 1515 1516 static const struct super_operations mqueue_super_ops = { 1517 .alloc_inode = mqueue_alloc_inode, 1518 .destroy_inode = mqueue_destroy_inode, 1519 .evict_inode = mqueue_evict_inode, 1520 .statfs = simple_statfs, 1521 }; 1522 1523 static struct file_system_type mqueue_fs_type = { 1524 .name = "mqueue", 1525 .mount = mqueue_mount, 1526 .kill_sb = kill_litter_super, 1527 .fs_flags = FS_USERNS_MOUNT, 1528 }; 1529 1530 int mq_init_ns(struct ipc_namespace *ns) 1531 { 1532 ns->mq_queues_count = 0; 1533 ns->mq_queues_max = DFLT_QUEUESMAX; 1534 ns->mq_msg_max = DFLT_MSGMAX; 1535 ns->mq_msgsize_max = DFLT_MSGSIZEMAX; 1536 ns->mq_msg_default = DFLT_MSG; 1537 ns->mq_msgsize_default = DFLT_MSGSIZE; 1538 1539 ns->mq_mnt = kern_mount_data(&mqueue_fs_type, ns); 1540 if (IS_ERR(ns->mq_mnt)) { 1541 int err = PTR_ERR(ns->mq_mnt); 1542 ns->mq_mnt = NULL; 1543 return err; 1544 } 1545 return 0; 1546 } 1547 1548 void mq_clear_sbinfo(struct ipc_namespace *ns) 1549 { 1550 ns->mq_mnt->mnt_sb->s_fs_info = NULL; 1551 } 1552 1553 void mq_put_mnt(struct ipc_namespace *ns) 1554 { 1555 kern_unmount(ns->mq_mnt); 1556 } 1557 1558 static int __init init_mqueue_fs(void) 1559 { 1560 int error; 1561 1562 mqueue_inode_cachep = kmem_cache_create("mqueue_inode_cache", 1563 sizeof(struct mqueue_inode_info), 0, 1564 SLAB_HWCACHE_ALIGN|SLAB_ACCOUNT, init_once); 1565 if (mqueue_inode_cachep == NULL) 1566 return -ENOMEM; 1567 1568 /* ignore failures - they are not fatal */ 1569 mq_sysctl_table = mq_register_sysctl_table(); 1570 1571 error = register_filesystem(&mqueue_fs_type); 1572 if (error) 1573 goto out_sysctl; 1574 1575 spin_lock_init(&mq_lock); 1576 1577 error = mq_init_ns(&init_ipc_ns); 1578 if (error) 1579 goto out_filesystem; 1580 1581 return 0; 1582 1583 out_filesystem: 1584 unregister_filesystem(&mqueue_fs_type); 1585 out_sysctl: 1586 if (mq_sysctl_table) 1587 unregister_sysctl_table(mq_sysctl_table); 1588 kmem_cache_destroy(mqueue_inode_cachep); 1589 return error; 1590 } 1591 1592 device_initcall(init_mqueue_fs); 1593