1 /* 2 * POSIX message queues filesystem for Linux. 3 * 4 * Copyright (C) 2003,2004 Krzysztof Benedyczak (golbi@mat.uni.torun.pl) 5 * Michal Wronski (michal.wronski@gmail.com) 6 * 7 * Spinlocks: Mohamed Abbas (abbas.mohamed@intel.com) 8 * Lockless receive & send, fd based notify: 9 * Manfred Spraul (manfred@colorfullife.com) 10 * 11 * Audit: George Wilson (ltcgcw@us.ibm.com) 12 * 13 * This file is released under the GPL. 14 */ 15 16 #include <linux/capability.h> 17 #include <linux/init.h> 18 #include <linux/pagemap.h> 19 #include <linux/file.h> 20 #include <linux/mount.h> 21 #include <linux/namei.h> 22 #include <linux/sysctl.h> 23 #include <linux/poll.h> 24 #include <linux/mqueue.h> 25 #include <linux/msg.h> 26 #include <linux/skbuff.h> 27 #include <linux/vmalloc.h> 28 #include <linux/netlink.h> 29 #include <linux/syscalls.h> 30 #include <linux/audit.h> 31 #include <linux/signal.h> 32 #include <linux/mutex.h> 33 #include <linux/nsproxy.h> 34 #include <linux/pid.h> 35 #include <linux/ipc_namespace.h> 36 #include <linux/user_namespace.h> 37 #include <linux/slab.h> 38 39 #include <net/sock.h> 40 #include "util.h" 41 42 #define MQUEUE_MAGIC 0x19800202 43 #define DIRENT_SIZE 20 44 #define FILENT_SIZE 80 45 46 #define SEND 0 47 #define RECV 1 48 49 #define STATE_NONE 0 50 #define STATE_PENDING 1 51 #define STATE_READY 2 52 53 struct posix_msg_tree_node { 54 struct rb_node rb_node; 55 struct list_head msg_list; 56 int priority; 57 }; 58 59 struct ext_wait_queue { /* queue of sleeping tasks */ 60 struct task_struct *task; 61 struct list_head list; 62 struct msg_msg *msg; /* ptr of loaded message */ 63 int state; /* one of STATE_* values */ 64 }; 65 66 struct mqueue_inode_info { 67 spinlock_t lock; 68 struct inode vfs_inode; 69 wait_queue_head_t wait_q; 70 71 struct rb_root msg_tree; 72 struct posix_msg_tree_node *node_cache; 73 struct mq_attr attr; 74 75 struct sigevent notify; 76 struct pid* notify_owner; 77 struct user_namespace *notify_user_ns; 78 struct user_struct *user; /* user who created, for accounting */ 79 struct sock *notify_sock; 80 struct sk_buff *notify_cookie; 81 82 /* for tasks waiting for free space and messages, respectively */ 83 struct ext_wait_queue e_wait_q[2]; 84 85 unsigned long qsize; /* size of queue in memory (sum of all msgs) */ 86 }; 87 88 static const struct inode_operations mqueue_dir_inode_operations; 89 static const struct file_operations mqueue_file_operations; 90 static const struct super_operations mqueue_super_ops; 91 static void remove_notification(struct mqueue_inode_info *info); 92 93 static struct kmem_cache *mqueue_inode_cachep; 94 95 static struct ctl_table_header * mq_sysctl_table; 96 97 static inline struct mqueue_inode_info *MQUEUE_I(struct inode *inode) 98 { 99 return container_of(inode, struct mqueue_inode_info, vfs_inode); 100 } 101 102 /* 103 * This routine should be called with the mq_lock held. 104 */ 105 static inline struct ipc_namespace *__get_ns_from_inode(struct inode *inode) 106 { 107 return get_ipc_ns(inode->i_sb->s_fs_info); 108 } 109 110 static struct ipc_namespace *get_ns_from_inode(struct inode *inode) 111 { 112 struct ipc_namespace *ns; 113 114 spin_lock(&mq_lock); 115 ns = __get_ns_from_inode(inode); 116 spin_unlock(&mq_lock); 117 return ns; 118 } 119 120 /* Auxiliary functions to manipulate messages' list */ 121 static int msg_insert(struct msg_msg *msg, struct mqueue_inode_info *info) 122 { 123 struct rb_node **p, *parent = NULL; 124 struct posix_msg_tree_node *leaf; 125 126 p = &info->msg_tree.rb_node; 127 while (*p) { 128 parent = *p; 129 leaf = rb_entry(parent, struct posix_msg_tree_node, rb_node); 130 131 if (likely(leaf->priority == msg->m_type)) 132 goto insert_msg; 133 else if (msg->m_type < leaf->priority) 134 p = &(*p)->rb_left; 135 else 136 p = &(*p)->rb_right; 137 } 138 if (info->node_cache) { 139 leaf = info->node_cache; 140 info->node_cache = NULL; 141 } else { 142 leaf = kmalloc(sizeof(*leaf), GFP_ATOMIC); 143 if (!leaf) 144 return -ENOMEM; 145 INIT_LIST_HEAD(&leaf->msg_list); 146 info->qsize += sizeof(*leaf); 147 } 148 leaf->priority = msg->m_type; 149 rb_link_node(&leaf->rb_node, parent, p); 150 rb_insert_color(&leaf->rb_node, &info->msg_tree); 151 insert_msg: 152 info->attr.mq_curmsgs++; 153 info->qsize += msg->m_ts; 154 list_add_tail(&msg->m_list, &leaf->msg_list); 155 return 0; 156 } 157 158 static inline struct msg_msg *msg_get(struct mqueue_inode_info *info) 159 { 160 struct rb_node **p, *parent = NULL; 161 struct posix_msg_tree_node *leaf; 162 struct msg_msg *msg; 163 164 try_again: 165 p = &info->msg_tree.rb_node; 166 while (*p) { 167 parent = *p; 168 /* 169 * During insert, low priorities go to the left and high to the 170 * right. On receive, we want the highest priorities first, so 171 * walk all the way to the right. 172 */ 173 p = &(*p)->rb_right; 174 } 175 if (!parent) { 176 if (info->attr.mq_curmsgs) { 177 pr_warn_once("Inconsistency in POSIX message queue, " 178 "no tree element, but supposedly messages " 179 "should exist!\n"); 180 info->attr.mq_curmsgs = 0; 181 } 182 return NULL; 183 } 184 leaf = rb_entry(parent, struct posix_msg_tree_node, rb_node); 185 if (unlikely(list_empty(&leaf->msg_list))) { 186 pr_warn_once("Inconsistency in POSIX message queue, " 187 "empty leaf node but we haven't implemented " 188 "lazy leaf delete!\n"); 189 rb_erase(&leaf->rb_node, &info->msg_tree); 190 if (info->node_cache) { 191 info->qsize -= sizeof(*leaf); 192 kfree(leaf); 193 } else { 194 info->node_cache = leaf; 195 } 196 goto try_again; 197 } else { 198 msg = list_first_entry(&leaf->msg_list, 199 struct msg_msg, m_list); 200 list_del(&msg->m_list); 201 if (list_empty(&leaf->msg_list)) { 202 rb_erase(&leaf->rb_node, &info->msg_tree); 203 if (info->node_cache) { 204 info->qsize -= sizeof(*leaf); 205 kfree(leaf); 206 } else { 207 info->node_cache = leaf; 208 } 209 } 210 } 211 info->attr.mq_curmsgs--; 212 info->qsize -= msg->m_ts; 213 return msg; 214 } 215 216 static struct inode *mqueue_get_inode(struct super_block *sb, 217 struct ipc_namespace *ipc_ns, umode_t mode, 218 struct mq_attr *attr) 219 { 220 struct user_struct *u = current_user(); 221 struct inode *inode; 222 int ret = -ENOMEM; 223 224 inode = new_inode(sb); 225 if (!inode) 226 goto err; 227 228 inode->i_ino = get_next_ino(); 229 inode->i_mode = mode; 230 inode->i_uid = current_fsuid(); 231 inode->i_gid = current_fsgid(); 232 inode->i_mtime = inode->i_ctime = inode->i_atime = CURRENT_TIME; 233 234 if (S_ISREG(mode)) { 235 struct mqueue_inode_info *info; 236 unsigned long mq_bytes, mq_treesize; 237 238 inode->i_fop = &mqueue_file_operations; 239 inode->i_size = FILENT_SIZE; 240 /* mqueue specific info */ 241 info = MQUEUE_I(inode); 242 spin_lock_init(&info->lock); 243 init_waitqueue_head(&info->wait_q); 244 INIT_LIST_HEAD(&info->e_wait_q[0].list); 245 INIT_LIST_HEAD(&info->e_wait_q[1].list); 246 info->notify_owner = NULL; 247 info->notify_user_ns = NULL; 248 info->qsize = 0; 249 info->user = NULL; /* set when all is ok */ 250 info->msg_tree = RB_ROOT; 251 info->node_cache = NULL; 252 memset(&info->attr, 0, sizeof(info->attr)); 253 info->attr.mq_maxmsg = min(ipc_ns->mq_msg_max, 254 ipc_ns->mq_msg_default); 255 info->attr.mq_msgsize = min(ipc_ns->mq_msgsize_max, 256 ipc_ns->mq_msgsize_default); 257 if (attr) { 258 info->attr.mq_maxmsg = attr->mq_maxmsg; 259 info->attr.mq_msgsize = attr->mq_msgsize; 260 } 261 /* 262 * We used to allocate a static array of pointers and account 263 * the size of that array as well as one msg_msg struct per 264 * possible message into the queue size. That's no longer 265 * accurate as the queue is now an rbtree and will grow and 266 * shrink depending on usage patterns. We can, however, still 267 * account one msg_msg struct per message, but the nodes are 268 * allocated depending on priority usage, and most programs 269 * only use one, or a handful, of priorities. However, since 270 * this is pinned memory, we need to assume worst case, so 271 * that means the min(mq_maxmsg, max_priorities) * struct 272 * posix_msg_tree_node. 273 */ 274 mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) + 275 min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) * 276 sizeof(struct posix_msg_tree_node); 277 278 mq_bytes = mq_treesize + (info->attr.mq_maxmsg * 279 info->attr.mq_msgsize); 280 281 spin_lock(&mq_lock); 282 if (u->mq_bytes + mq_bytes < u->mq_bytes || 283 u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) { 284 spin_unlock(&mq_lock); 285 /* mqueue_evict_inode() releases info->messages */ 286 ret = -EMFILE; 287 goto out_inode; 288 } 289 u->mq_bytes += mq_bytes; 290 spin_unlock(&mq_lock); 291 292 /* all is ok */ 293 info->user = get_uid(u); 294 } else if (S_ISDIR(mode)) { 295 inc_nlink(inode); 296 /* Some things misbehave if size == 0 on a directory */ 297 inode->i_size = 2 * DIRENT_SIZE; 298 inode->i_op = &mqueue_dir_inode_operations; 299 inode->i_fop = &simple_dir_operations; 300 } 301 302 return inode; 303 out_inode: 304 iput(inode); 305 err: 306 return ERR_PTR(ret); 307 } 308 309 static int mqueue_fill_super(struct super_block *sb, void *data, int silent) 310 { 311 struct inode *inode; 312 struct ipc_namespace *ns = data; 313 314 sb->s_blocksize = PAGE_CACHE_SIZE; 315 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 316 sb->s_magic = MQUEUE_MAGIC; 317 sb->s_op = &mqueue_super_ops; 318 319 inode = mqueue_get_inode(sb, ns, S_IFDIR | S_ISVTX | S_IRWXUGO, NULL); 320 if (IS_ERR(inode)) 321 return PTR_ERR(inode); 322 323 sb->s_root = d_make_root(inode); 324 if (!sb->s_root) 325 return -ENOMEM; 326 return 0; 327 } 328 329 static struct dentry *mqueue_mount(struct file_system_type *fs_type, 330 int flags, const char *dev_name, 331 void *data) 332 { 333 if (!(flags & MS_KERNMOUNT)) 334 data = current->nsproxy->ipc_ns; 335 return mount_ns(fs_type, flags, data, mqueue_fill_super); 336 } 337 338 static void init_once(void *foo) 339 { 340 struct mqueue_inode_info *p = (struct mqueue_inode_info *) foo; 341 342 inode_init_once(&p->vfs_inode); 343 } 344 345 static struct inode *mqueue_alloc_inode(struct super_block *sb) 346 { 347 struct mqueue_inode_info *ei; 348 349 ei = kmem_cache_alloc(mqueue_inode_cachep, GFP_KERNEL); 350 if (!ei) 351 return NULL; 352 return &ei->vfs_inode; 353 } 354 355 static void mqueue_i_callback(struct rcu_head *head) 356 { 357 struct inode *inode = container_of(head, struct inode, i_rcu); 358 kmem_cache_free(mqueue_inode_cachep, MQUEUE_I(inode)); 359 } 360 361 static void mqueue_destroy_inode(struct inode *inode) 362 { 363 call_rcu(&inode->i_rcu, mqueue_i_callback); 364 } 365 366 static void mqueue_evict_inode(struct inode *inode) 367 { 368 struct mqueue_inode_info *info; 369 struct user_struct *user; 370 unsigned long mq_bytes, mq_treesize; 371 struct ipc_namespace *ipc_ns; 372 struct msg_msg *msg; 373 374 clear_inode(inode); 375 376 if (S_ISDIR(inode->i_mode)) 377 return; 378 379 ipc_ns = get_ns_from_inode(inode); 380 info = MQUEUE_I(inode); 381 spin_lock(&info->lock); 382 while ((msg = msg_get(info)) != NULL) 383 free_msg(msg); 384 kfree(info->node_cache); 385 spin_unlock(&info->lock); 386 387 /* Total amount of bytes accounted for the mqueue */ 388 mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) + 389 min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) * 390 sizeof(struct posix_msg_tree_node); 391 392 mq_bytes = mq_treesize + (info->attr.mq_maxmsg * 393 info->attr.mq_msgsize); 394 395 user = info->user; 396 if (user) { 397 spin_lock(&mq_lock); 398 user->mq_bytes -= mq_bytes; 399 /* 400 * get_ns_from_inode() ensures that the 401 * (ipc_ns = sb->s_fs_info) is either a valid ipc_ns 402 * to which we now hold a reference, or it is NULL. 403 * We can't put it here under mq_lock, though. 404 */ 405 if (ipc_ns) 406 ipc_ns->mq_queues_count--; 407 spin_unlock(&mq_lock); 408 free_uid(user); 409 } 410 if (ipc_ns) 411 put_ipc_ns(ipc_ns); 412 } 413 414 static int mqueue_create(struct inode *dir, struct dentry *dentry, 415 umode_t mode, bool excl) 416 { 417 struct inode *inode; 418 struct mq_attr *attr = dentry->d_fsdata; 419 int error; 420 struct ipc_namespace *ipc_ns; 421 422 spin_lock(&mq_lock); 423 ipc_ns = __get_ns_from_inode(dir); 424 if (!ipc_ns) { 425 error = -EACCES; 426 goto out_unlock; 427 } 428 if (ipc_ns->mq_queues_count >= HARD_QUEUESMAX || 429 (ipc_ns->mq_queues_count >= ipc_ns->mq_queues_max && 430 !capable(CAP_SYS_RESOURCE))) { 431 error = -ENOSPC; 432 goto out_unlock; 433 } 434 ipc_ns->mq_queues_count++; 435 spin_unlock(&mq_lock); 436 437 inode = mqueue_get_inode(dir->i_sb, ipc_ns, mode, attr); 438 if (IS_ERR(inode)) { 439 error = PTR_ERR(inode); 440 spin_lock(&mq_lock); 441 ipc_ns->mq_queues_count--; 442 goto out_unlock; 443 } 444 445 put_ipc_ns(ipc_ns); 446 dir->i_size += DIRENT_SIZE; 447 dir->i_ctime = dir->i_mtime = dir->i_atime = CURRENT_TIME; 448 449 d_instantiate(dentry, inode); 450 dget(dentry); 451 return 0; 452 out_unlock: 453 spin_unlock(&mq_lock); 454 if (ipc_ns) 455 put_ipc_ns(ipc_ns); 456 return error; 457 } 458 459 static int mqueue_unlink(struct inode *dir, struct dentry *dentry) 460 { 461 struct inode *inode = dentry->d_inode; 462 463 dir->i_ctime = dir->i_mtime = dir->i_atime = CURRENT_TIME; 464 dir->i_size -= DIRENT_SIZE; 465 drop_nlink(inode); 466 dput(dentry); 467 return 0; 468 } 469 470 /* 471 * This is routine for system read from queue file. 472 * To avoid mess with doing here some sort of mq_receive we allow 473 * to read only queue size & notification info (the only values 474 * that are interesting from user point of view and aren't accessible 475 * through std routines) 476 */ 477 static ssize_t mqueue_read_file(struct file *filp, char __user *u_data, 478 size_t count, loff_t *off) 479 { 480 struct mqueue_inode_info *info = MQUEUE_I(filp->f_path.dentry->d_inode); 481 char buffer[FILENT_SIZE]; 482 ssize_t ret; 483 484 spin_lock(&info->lock); 485 snprintf(buffer, sizeof(buffer), 486 "QSIZE:%-10lu NOTIFY:%-5d SIGNO:%-5d NOTIFY_PID:%-6d\n", 487 info->qsize, 488 info->notify_owner ? info->notify.sigev_notify : 0, 489 (info->notify_owner && 490 info->notify.sigev_notify == SIGEV_SIGNAL) ? 491 info->notify.sigev_signo : 0, 492 pid_vnr(info->notify_owner)); 493 spin_unlock(&info->lock); 494 buffer[sizeof(buffer)-1] = '\0'; 495 496 ret = simple_read_from_buffer(u_data, count, off, buffer, 497 strlen(buffer)); 498 if (ret <= 0) 499 return ret; 500 501 filp->f_path.dentry->d_inode->i_atime = filp->f_path.dentry->d_inode->i_ctime = CURRENT_TIME; 502 return ret; 503 } 504 505 static int mqueue_flush_file(struct file *filp, fl_owner_t id) 506 { 507 struct mqueue_inode_info *info = MQUEUE_I(filp->f_path.dentry->d_inode); 508 509 spin_lock(&info->lock); 510 if (task_tgid(current) == info->notify_owner) 511 remove_notification(info); 512 513 spin_unlock(&info->lock); 514 return 0; 515 } 516 517 static unsigned int mqueue_poll_file(struct file *filp, struct poll_table_struct *poll_tab) 518 { 519 struct mqueue_inode_info *info = MQUEUE_I(filp->f_path.dentry->d_inode); 520 int retval = 0; 521 522 poll_wait(filp, &info->wait_q, poll_tab); 523 524 spin_lock(&info->lock); 525 if (info->attr.mq_curmsgs) 526 retval = POLLIN | POLLRDNORM; 527 528 if (info->attr.mq_curmsgs < info->attr.mq_maxmsg) 529 retval |= POLLOUT | POLLWRNORM; 530 spin_unlock(&info->lock); 531 532 return retval; 533 } 534 535 /* Adds current to info->e_wait_q[sr] before element with smaller prio */ 536 static void wq_add(struct mqueue_inode_info *info, int sr, 537 struct ext_wait_queue *ewp) 538 { 539 struct ext_wait_queue *walk; 540 541 ewp->task = current; 542 543 list_for_each_entry(walk, &info->e_wait_q[sr].list, list) { 544 if (walk->task->static_prio <= current->static_prio) { 545 list_add_tail(&ewp->list, &walk->list); 546 return; 547 } 548 } 549 list_add_tail(&ewp->list, &info->e_wait_q[sr].list); 550 } 551 552 /* 553 * Puts current task to sleep. Caller must hold queue lock. After return 554 * lock isn't held. 555 * sr: SEND or RECV 556 */ 557 static int wq_sleep(struct mqueue_inode_info *info, int sr, 558 ktime_t *timeout, struct ext_wait_queue *ewp) 559 { 560 int retval; 561 signed long time; 562 563 wq_add(info, sr, ewp); 564 565 for (;;) { 566 set_current_state(TASK_INTERRUPTIBLE); 567 568 spin_unlock(&info->lock); 569 time = schedule_hrtimeout_range_clock(timeout, 0, 570 HRTIMER_MODE_ABS, CLOCK_REALTIME); 571 572 while (ewp->state == STATE_PENDING) 573 cpu_relax(); 574 575 if (ewp->state == STATE_READY) { 576 retval = 0; 577 goto out; 578 } 579 spin_lock(&info->lock); 580 if (ewp->state == STATE_READY) { 581 retval = 0; 582 goto out_unlock; 583 } 584 if (signal_pending(current)) { 585 retval = -ERESTARTSYS; 586 break; 587 } 588 if (time == 0) { 589 retval = -ETIMEDOUT; 590 break; 591 } 592 } 593 list_del(&ewp->list); 594 out_unlock: 595 spin_unlock(&info->lock); 596 out: 597 return retval; 598 } 599 600 /* 601 * Returns waiting task that should be serviced first or NULL if none exists 602 */ 603 static struct ext_wait_queue *wq_get_first_waiter( 604 struct mqueue_inode_info *info, int sr) 605 { 606 struct list_head *ptr; 607 608 ptr = info->e_wait_q[sr].list.prev; 609 if (ptr == &info->e_wait_q[sr].list) 610 return NULL; 611 return list_entry(ptr, struct ext_wait_queue, list); 612 } 613 614 615 static inline void set_cookie(struct sk_buff *skb, char code) 616 { 617 ((char*)skb->data)[NOTIFY_COOKIE_LEN-1] = code; 618 } 619 620 /* 621 * The next function is only to split too long sys_mq_timedsend 622 */ 623 static void __do_notify(struct mqueue_inode_info *info) 624 { 625 /* notification 626 * invoked when there is registered process and there isn't process 627 * waiting synchronously for message AND state of queue changed from 628 * empty to not empty. Here we are sure that no one is waiting 629 * synchronously. */ 630 if (info->notify_owner && 631 info->attr.mq_curmsgs == 1) { 632 struct siginfo sig_i; 633 switch (info->notify.sigev_notify) { 634 case SIGEV_NONE: 635 break; 636 case SIGEV_SIGNAL: 637 /* sends signal */ 638 639 sig_i.si_signo = info->notify.sigev_signo; 640 sig_i.si_errno = 0; 641 sig_i.si_code = SI_MESGQ; 642 sig_i.si_value = info->notify.sigev_value; 643 /* map current pid/uid into info->owner's namespaces */ 644 rcu_read_lock(); 645 sig_i.si_pid = task_tgid_nr_ns(current, 646 ns_of_pid(info->notify_owner)); 647 sig_i.si_uid = from_kuid_munged(info->notify_user_ns, current_uid()); 648 rcu_read_unlock(); 649 650 kill_pid_info(info->notify.sigev_signo, 651 &sig_i, info->notify_owner); 652 break; 653 case SIGEV_THREAD: 654 set_cookie(info->notify_cookie, NOTIFY_WOKENUP); 655 netlink_sendskb(info->notify_sock, info->notify_cookie); 656 break; 657 } 658 /* after notification unregisters process */ 659 put_pid(info->notify_owner); 660 put_user_ns(info->notify_user_ns); 661 info->notify_owner = NULL; 662 info->notify_user_ns = NULL; 663 } 664 wake_up(&info->wait_q); 665 } 666 667 static int prepare_timeout(const struct timespec __user *u_abs_timeout, 668 ktime_t *expires, struct timespec *ts) 669 { 670 if (copy_from_user(ts, u_abs_timeout, sizeof(struct timespec))) 671 return -EFAULT; 672 if (!timespec_valid(ts)) 673 return -EINVAL; 674 675 *expires = timespec_to_ktime(*ts); 676 return 0; 677 } 678 679 static void remove_notification(struct mqueue_inode_info *info) 680 { 681 if (info->notify_owner != NULL && 682 info->notify.sigev_notify == SIGEV_THREAD) { 683 set_cookie(info->notify_cookie, NOTIFY_REMOVED); 684 netlink_sendskb(info->notify_sock, info->notify_cookie); 685 } 686 put_pid(info->notify_owner); 687 put_user_ns(info->notify_user_ns); 688 info->notify_owner = NULL; 689 info->notify_user_ns = NULL; 690 } 691 692 static int mq_attr_ok(struct ipc_namespace *ipc_ns, struct mq_attr *attr) 693 { 694 int mq_treesize; 695 unsigned long total_size; 696 697 if (attr->mq_maxmsg <= 0 || attr->mq_msgsize <= 0) 698 return -EINVAL; 699 if (capable(CAP_SYS_RESOURCE)) { 700 if (attr->mq_maxmsg > HARD_MSGMAX || 701 attr->mq_msgsize > HARD_MSGSIZEMAX) 702 return -EINVAL; 703 } else { 704 if (attr->mq_maxmsg > ipc_ns->mq_msg_max || 705 attr->mq_msgsize > ipc_ns->mq_msgsize_max) 706 return -EINVAL; 707 } 708 /* check for overflow */ 709 if (attr->mq_msgsize > ULONG_MAX/attr->mq_maxmsg) 710 return -EOVERFLOW; 711 mq_treesize = attr->mq_maxmsg * sizeof(struct msg_msg) + 712 min_t(unsigned int, attr->mq_maxmsg, MQ_PRIO_MAX) * 713 sizeof(struct posix_msg_tree_node); 714 total_size = attr->mq_maxmsg * attr->mq_msgsize; 715 if (total_size + mq_treesize < total_size) 716 return -EOVERFLOW; 717 return 0; 718 } 719 720 /* 721 * Invoked when creating a new queue via sys_mq_open 722 */ 723 static struct file *do_create(struct ipc_namespace *ipc_ns, struct inode *dir, 724 struct path *path, int oflag, umode_t mode, 725 struct mq_attr *attr) 726 { 727 const struct cred *cred = current_cred(); 728 int ret; 729 730 if (attr) { 731 ret = mq_attr_ok(ipc_ns, attr); 732 if (ret) 733 return ERR_PTR(ret); 734 /* store for use during create */ 735 path->dentry->d_fsdata = attr; 736 } else { 737 struct mq_attr def_attr; 738 739 def_attr.mq_maxmsg = min(ipc_ns->mq_msg_max, 740 ipc_ns->mq_msg_default); 741 def_attr.mq_msgsize = min(ipc_ns->mq_msgsize_max, 742 ipc_ns->mq_msgsize_default); 743 ret = mq_attr_ok(ipc_ns, &def_attr); 744 if (ret) 745 return ERR_PTR(ret); 746 } 747 748 mode &= ~current_umask(); 749 ret = vfs_create(dir, path->dentry, mode, true); 750 path->dentry->d_fsdata = NULL; 751 if (ret) 752 return ERR_PTR(ret); 753 return dentry_open(path, oflag, cred); 754 } 755 756 /* Opens existing queue */ 757 static struct file *do_open(struct path *path, int oflag) 758 { 759 static const int oflag2acc[O_ACCMODE] = { MAY_READ, MAY_WRITE, 760 MAY_READ | MAY_WRITE }; 761 int acc; 762 if ((oflag & O_ACCMODE) == (O_RDWR | O_WRONLY)) 763 return ERR_PTR(-EINVAL); 764 acc = oflag2acc[oflag & O_ACCMODE]; 765 if (inode_permission(path->dentry->d_inode, acc)) 766 return ERR_PTR(-EACCES); 767 return dentry_open(path, oflag, current_cred()); 768 } 769 770 SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, umode_t, mode, 771 struct mq_attr __user *, u_attr) 772 { 773 struct path path; 774 struct file *filp; 775 struct filename *name; 776 struct mq_attr attr; 777 int fd, error; 778 struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns; 779 struct vfsmount *mnt = ipc_ns->mq_mnt; 780 struct dentry *root = mnt->mnt_root; 781 int ro; 782 783 if (u_attr && copy_from_user(&attr, u_attr, sizeof(struct mq_attr))) 784 return -EFAULT; 785 786 audit_mq_open(oflag, mode, u_attr ? &attr : NULL); 787 788 if (IS_ERR(name = getname(u_name))) 789 return PTR_ERR(name); 790 791 fd = get_unused_fd_flags(O_CLOEXEC); 792 if (fd < 0) 793 goto out_putname; 794 795 ro = mnt_want_write(mnt); /* we'll drop it in any case */ 796 error = 0; 797 mutex_lock(&root->d_inode->i_mutex); 798 path.dentry = lookup_one_len(name->name, root, strlen(name->name)); 799 if (IS_ERR(path.dentry)) { 800 error = PTR_ERR(path.dentry); 801 goto out_putfd; 802 } 803 path.mnt = mntget(mnt); 804 805 if (oflag & O_CREAT) { 806 if (path.dentry->d_inode) { /* entry already exists */ 807 audit_inode(name, path.dentry, 0); 808 if (oflag & O_EXCL) { 809 error = -EEXIST; 810 goto out; 811 } 812 filp = do_open(&path, oflag); 813 } else { 814 if (ro) { 815 error = ro; 816 goto out; 817 } 818 filp = do_create(ipc_ns, root->d_inode, 819 &path, oflag, mode, 820 u_attr ? &attr : NULL); 821 } 822 } else { 823 if (!path.dentry->d_inode) { 824 error = -ENOENT; 825 goto out; 826 } 827 audit_inode(name, path.dentry, 0); 828 filp = do_open(&path, oflag); 829 } 830 831 if (!IS_ERR(filp)) 832 fd_install(fd, filp); 833 else 834 error = PTR_ERR(filp); 835 out: 836 path_put(&path); 837 out_putfd: 838 if (error) { 839 put_unused_fd(fd); 840 fd = error; 841 } 842 mutex_unlock(&root->d_inode->i_mutex); 843 mnt_drop_write(mnt); 844 out_putname: 845 putname(name); 846 return fd; 847 } 848 849 SYSCALL_DEFINE1(mq_unlink, const char __user *, u_name) 850 { 851 int err; 852 struct filename *name; 853 struct dentry *dentry; 854 struct inode *inode = NULL; 855 struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns; 856 struct vfsmount *mnt = ipc_ns->mq_mnt; 857 858 name = getname(u_name); 859 if (IS_ERR(name)) 860 return PTR_ERR(name); 861 862 err = mnt_want_write(mnt); 863 if (err) 864 goto out_name; 865 mutex_lock_nested(&mnt->mnt_root->d_inode->i_mutex, I_MUTEX_PARENT); 866 dentry = lookup_one_len(name->name, mnt->mnt_root, 867 strlen(name->name)); 868 if (IS_ERR(dentry)) { 869 err = PTR_ERR(dentry); 870 goto out_unlock; 871 } 872 873 inode = dentry->d_inode; 874 if (!inode) { 875 err = -ENOENT; 876 } else { 877 ihold(inode); 878 err = vfs_unlink(dentry->d_parent->d_inode, dentry); 879 } 880 dput(dentry); 881 882 out_unlock: 883 mutex_unlock(&mnt->mnt_root->d_inode->i_mutex); 884 if (inode) 885 iput(inode); 886 mnt_drop_write(mnt); 887 out_name: 888 putname(name); 889 890 return err; 891 } 892 893 /* Pipelined send and receive functions. 894 * 895 * If a receiver finds no waiting message, then it registers itself in the 896 * list of waiting receivers. A sender checks that list before adding the new 897 * message into the message array. If there is a waiting receiver, then it 898 * bypasses the message array and directly hands the message over to the 899 * receiver. 900 * The receiver accepts the message and returns without grabbing the queue 901 * spinlock. Therefore an intermediate STATE_PENDING state and memory barriers 902 * are necessary. The same algorithm is used for sysv semaphores, see 903 * ipc/sem.c for more details. 904 * 905 * The same algorithm is used for senders. 906 */ 907 908 /* pipelined_send() - send a message directly to the task waiting in 909 * sys_mq_timedreceive() (without inserting message into a queue). 910 */ 911 static inline void pipelined_send(struct mqueue_inode_info *info, 912 struct msg_msg *message, 913 struct ext_wait_queue *receiver) 914 { 915 receiver->msg = message; 916 list_del(&receiver->list); 917 receiver->state = STATE_PENDING; 918 wake_up_process(receiver->task); 919 smp_wmb(); 920 receiver->state = STATE_READY; 921 } 922 923 /* pipelined_receive() - if there is task waiting in sys_mq_timedsend() 924 * gets its message and put to the queue (we have one free place for sure). */ 925 static inline void pipelined_receive(struct mqueue_inode_info *info) 926 { 927 struct ext_wait_queue *sender = wq_get_first_waiter(info, SEND); 928 929 if (!sender) { 930 /* for poll */ 931 wake_up_interruptible(&info->wait_q); 932 return; 933 } 934 if (msg_insert(sender->msg, info)) 935 return; 936 list_del(&sender->list); 937 sender->state = STATE_PENDING; 938 wake_up_process(sender->task); 939 smp_wmb(); 940 sender->state = STATE_READY; 941 } 942 943 SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr, 944 size_t, msg_len, unsigned int, msg_prio, 945 const struct timespec __user *, u_abs_timeout) 946 { 947 struct fd f; 948 struct inode *inode; 949 struct ext_wait_queue wait; 950 struct ext_wait_queue *receiver; 951 struct msg_msg *msg_ptr; 952 struct mqueue_inode_info *info; 953 ktime_t expires, *timeout = NULL; 954 struct timespec ts; 955 struct posix_msg_tree_node *new_leaf = NULL; 956 int ret = 0; 957 958 if (u_abs_timeout) { 959 int res = prepare_timeout(u_abs_timeout, &expires, &ts); 960 if (res) 961 return res; 962 timeout = &expires; 963 } 964 965 if (unlikely(msg_prio >= (unsigned long) MQ_PRIO_MAX)) 966 return -EINVAL; 967 968 audit_mq_sendrecv(mqdes, msg_len, msg_prio, timeout ? &ts : NULL); 969 970 f = fdget(mqdes); 971 if (unlikely(!f.file)) { 972 ret = -EBADF; 973 goto out; 974 } 975 976 inode = f.file->f_path.dentry->d_inode; 977 if (unlikely(f.file->f_op != &mqueue_file_operations)) { 978 ret = -EBADF; 979 goto out_fput; 980 } 981 info = MQUEUE_I(inode); 982 audit_inode(NULL, f.file->f_path.dentry, 0); 983 984 if (unlikely(!(f.file->f_mode & FMODE_WRITE))) { 985 ret = -EBADF; 986 goto out_fput; 987 } 988 989 if (unlikely(msg_len > info->attr.mq_msgsize)) { 990 ret = -EMSGSIZE; 991 goto out_fput; 992 } 993 994 /* First try to allocate memory, before doing anything with 995 * existing queues. */ 996 msg_ptr = load_msg(u_msg_ptr, msg_len); 997 if (IS_ERR(msg_ptr)) { 998 ret = PTR_ERR(msg_ptr); 999 goto out_fput; 1000 } 1001 msg_ptr->m_ts = msg_len; 1002 msg_ptr->m_type = msg_prio; 1003 1004 /* 1005 * msg_insert really wants us to have a valid, spare node struct so 1006 * it doesn't have to kmalloc a GFP_ATOMIC allocation, but it will 1007 * fall back to that if necessary. 1008 */ 1009 if (!info->node_cache) 1010 new_leaf = kmalloc(sizeof(*new_leaf), GFP_KERNEL); 1011 1012 spin_lock(&info->lock); 1013 1014 if (!info->node_cache && new_leaf) { 1015 /* Save our speculative allocation into the cache */ 1016 INIT_LIST_HEAD(&new_leaf->msg_list); 1017 info->node_cache = new_leaf; 1018 info->qsize += sizeof(*new_leaf); 1019 new_leaf = NULL; 1020 } else { 1021 kfree(new_leaf); 1022 } 1023 1024 if (info->attr.mq_curmsgs == info->attr.mq_maxmsg) { 1025 if (f.file->f_flags & O_NONBLOCK) { 1026 ret = -EAGAIN; 1027 } else { 1028 wait.task = current; 1029 wait.msg = (void *) msg_ptr; 1030 wait.state = STATE_NONE; 1031 ret = wq_sleep(info, SEND, timeout, &wait); 1032 /* 1033 * wq_sleep must be called with info->lock held, and 1034 * returns with the lock released 1035 */ 1036 goto out_free; 1037 } 1038 } else { 1039 receiver = wq_get_first_waiter(info, RECV); 1040 if (receiver) { 1041 pipelined_send(info, msg_ptr, receiver); 1042 } else { 1043 /* adds message to the queue */ 1044 ret = msg_insert(msg_ptr, info); 1045 if (ret) 1046 goto out_unlock; 1047 __do_notify(info); 1048 } 1049 inode->i_atime = inode->i_mtime = inode->i_ctime = 1050 CURRENT_TIME; 1051 } 1052 out_unlock: 1053 spin_unlock(&info->lock); 1054 out_free: 1055 if (ret) 1056 free_msg(msg_ptr); 1057 out_fput: 1058 fdput(f); 1059 out: 1060 return ret; 1061 } 1062 1063 SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr, 1064 size_t, msg_len, unsigned int __user *, u_msg_prio, 1065 const struct timespec __user *, u_abs_timeout) 1066 { 1067 ssize_t ret; 1068 struct msg_msg *msg_ptr; 1069 struct fd f; 1070 struct inode *inode; 1071 struct mqueue_inode_info *info; 1072 struct ext_wait_queue wait; 1073 ktime_t expires, *timeout = NULL; 1074 struct timespec ts; 1075 struct posix_msg_tree_node *new_leaf = NULL; 1076 1077 if (u_abs_timeout) { 1078 int res = prepare_timeout(u_abs_timeout, &expires, &ts); 1079 if (res) 1080 return res; 1081 timeout = &expires; 1082 } 1083 1084 audit_mq_sendrecv(mqdes, msg_len, 0, timeout ? &ts : NULL); 1085 1086 f = fdget(mqdes); 1087 if (unlikely(!f.file)) { 1088 ret = -EBADF; 1089 goto out; 1090 } 1091 1092 inode = f.file->f_path.dentry->d_inode; 1093 if (unlikely(f.file->f_op != &mqueue_file_operations)) { 1094 ret = -EBADF; 1095 goto out_fput; 1096 } 1097 info = MQUEUE_I(inode); 1098 audit_inode(NULL, f.file->f_path.dentry, 0); 1099 1100 if (unlikely(!(f.file->f_mode & FMODE_READ))) { 1101 ret = -EBADF; 1102 goto out_fput; 1103 } 1104 1105 /* checks if buffer is big enough */ 1106 if (unlikely(msg_len < info->attr.mq_msgsize)) { 1107 ret = -EMSGSIZE; 1108 goto out_fput; 1109 } 1110 1111 /* 1112 * msg_insert really wants us to have a valid, spare node struct so 1113 * it doesn't have to kmalloc a GFP_ATOMIC allocation, but it will 1114 * fall back to that if necessary. 1115 */ 1116 if (!info->node_cache) 1117 new_leaf = kmalloc(sizeof(*new_leaf), GFP_KERNEL); 1118 1119 spin_lock(&info->lock); 1120 1121 if (!info->node_cache && new_leaf) { 1122 /* Save our speculative allocation into the cache */ 1123 INIT_LIST_HEAD(&new_leaf->msg_list); 1124 info->node_cache = new_leaf; 1125 info->qsize += sizeof(*new_leaf); 1126 } else { 1127 kfree(new_leaf); 1128 } 1129 1130 if (info->attr.mq_curmsgs == 0) { 1131 if (f.file->f_flags & O_NONBLOCK) { 1132 spin_unlock(&info->lock); 1133 ret = -EAGAIN; 1134 } else { 1135 wait.task = current; 1136 wait.state = STATE_NONE; 1137 ret = wq_sleep(info, RECV, timeout, &wait); 1138 msg_ptr = wait.msg; 1139 } 1140 } else { 1141 msg_ptr = msg_get(info); 1142 1143 inode->i_atime = inode->i_mtime = inode->i_ctime = 1144 CURRENT_TIME; 1145 1146 /* There is now free space in queue. */ 1147 pipelined_receive(info); 1148 spin_unlock(&info->lock); 1149 ret = 0; 1150 } 1151 if (ret == 0) { 1152 ret = msg_ptr->m_ts; 1153 1154 if ((u_msg_prio && put_user(msg_ptr->m_type, u_msg_prio)) || 1155 store_msg(u_msg_ptr, msg_ptr, msg_ptr->m_ts)) { 1156 ret = -EFAULT; 1157 } 1158 free_msg(msg_ptr); 1159 } 1160 out_fput: 1161 fdput(f); 1162 out: 1163 return ret; 1164 } 1165 1166 /* 1167 * Notes: the case when user wants us to deregister (with NULL as pointer) 1168 * and he isn't currently owner of notification, will be silently discarded. 1169 * It isn't explicitly defined in the POSIX. 1170 */ 1171 SYSCALL_DEFINE2(mq_notify, mqd_t, mqdes, 1172 const struct sigevent __user *, u_notification) 1173 { 1174 int ret; 1175 struct fd f; 1176 struct sock *sock; 1177 struct inode *inode; 1178 struct sigevent notification; 1179 struct mqueue_inode_info *info; 1180 struct sk_buff *nc; 1181 1182 if (u_notification) { 1183 if (copy_from_user(¬ification, u_notification, 1184 sizeof(struct sigevent))) 1185 return -EFAULT; 1186 } 1187 1188 audit_mq_notify(mqdes, u_notification ? ¬ification : NULL); 1189 1190 nc = NULL; 1191 sock = NULL; 1192 if (u_notification != NULL) { 1193 if (unlikely(notification.sigev_notify != SIGEV_NONE && 1194 notification.sigev_notify != SIGEV_SIGNAL && 1195 notification.sigev_notify != SIGEV_THREAD)) 1196 return -EINVAL; 1197 if (notification.sigev_notify == SIGEV_SIGNAL && 1198 !valid_signal(notification.sigev_signo)) { 1199 return -EINVAL; 1200 } 1201 if (notification.sigev_notify == SIGEV_THREAD) { 1202 long timeo; 1203 1204 /* create the notify skb */ 1205 nc = alloc_skb(NOTIFY_COOKIE_LEN, GFP_KERNEL); 1206 if (!nc) { 1207 ret = -ENOMEM; 1208 goto out; 1209 } 1210 if (copy_from_user(nc->data, 1211 notification.sigev_value.sival_ptr, 1212 NOTIFY_COOKIE_LEN)) { 1213 ret = -EFAULT; 1214 goto out; 1215 } 1216 1217 /* TODO: add a header? */ 1218 skb_put(nc, NOTIFY_COOKIE_LEN); 1219 /* and attach it to the socket */ 1220 retry: 1221 f = fdget(notification.sigev_signo); 1222 if (!f.file) { 1223 ret = -EBADF; 1224 goto out; 1225 } 1226 sock = netlink_getsockbyfilp(f.file); 1227 fdput(f); 1228 if (IS_ERR(sock)) { 1229 ret = PTR_ERR(sock); 1230 sock = NULL; 1231 goto out; 1232 } 1233 1234 timeo = MAX_SCHEDULE_TIMEOUT; 1235 ret = netlink_attachskb(sock, nc, &timeo, NULL); 1236 if (ret == 1) 1237 goto retry; 1238 if (ret) { 1239 sock = NULL; 1240 nc = NULL; 1241 goto out; 1242 } 1243 } 1244 } 1245 1246 f = fdget(mqdes); 1247 if (!f.file) { 1248 ret = -EBADF; 1249 goto out; 1250 } 1251 1252 inode = f.file->f_path.dentry->d_inode; 1253 if (unlikely(f.file->f_op != &mqueue_file_operations)) { 1254 ret = -EBADF; 1255 goto out_fput; 1256 } 1257 info = MQUEUE_I(inode); 1258 1259 ret = 0; 1260 spin_lock(&info->lock); 1261 if (u_notification == NULL) { 1262 if (info->notify_owner == task_tgid(current)) { 1263 remove_notification(info); 1264 inode->i_atime = inode->i_ctime = CURRENT_TIME; 1265 } 1266 } else if (info->notify_owner != NULL) { 1267 ret = -EBUSY; 1268 } else { 1269 switch (notification.sigev_notify) { 1270 case SIGEV_NONE: 1271 info->notify.sigev_notify = SIGEV_NONE; 1272 break; 1273 case SIGEV_THREAD: 1274 info->notify_sock = sock; 1275 info->notify_cookie = nc; 1276 sock = NULL; 1277 nc = NULL; 1278 info->notify.sigev_notify = SIGEV_THREAD; 1279 break; 1280 case SIGEV_SIGNAL: 1281 info->notify.sigev_signo = notification.sigev_signo; 1282 info->notify.sigev_value = notification.sigev_value; 1283 info->notify.sigev_notify = SIGEV_SIGNAL; 1284 break; 1285 } 1286 1287 info->notify_owner = get_pid(task_tgid(current)); 1288 info->notify_user_ns = get_user_ns(current_user_ns()); 1289 inode->i_atime = inode->i_ctime = CURRENT_TIME; 1290 } 1291 spin_unlock(&info->lock); 1292 out_fput: 1293 fdput(f); 1294 out: 1295 if (sock) { 1296 netlink_detachskb(sock, nc); 1297 } else if (nc) { 1298 dev_kfree_skb(nc); 1299 } 1300 return ret; 1301 } 1302 1303 SYSCALL_DEFINE3(mq_getsetattr, mqd_t, mqdes, 1304 const struct mq_attr __user *, u_mqstat, 1305 struct mq_attr __user *, u_omqstat) 1306 { 1307 int ret; 1308 struct mq_attr mqstat, omqstat; 1309 struct fd f; 1310 struct inode *inode; 1311 struct mqueue_inode_info *info; 1312 1313 if (u_mqstat != NULL) { 1314 if (copy_from_user(&mqstat, u_mqstat, sizeof(struct mq_attr))) 1315 return -EFAULT; 1316 if (mqstat.mq_flags & (~O_NONBLOCK)) 1317 return -EINVAL; 1318 } 1319 1320 f = fdget(mqdes); 1321 if (!f.file) { 1322 ret = -EBADF; 1323 goto out; 1324 } 1325 1326 inode = f.file->f_path.dentry->d_inode; 1327 if (unlikely(f.file->f_op != &mqueue_file_operations)) { 1328 ret = -EBADF; 1329 goto out_fput; 1330 } 1331 info = MQUEUE_I(inode); 1332 1333 spin_lock(&info->lock); 1334 1335 omqstat = info->attr; 1336 omqstat.mq_flags = f.file->f_flags & O_NONBLOCK; 1337 if (u_mqstat) { 1338 audit_mq_getsetattr(mqdes, &mqstat); 1339 spin_lock(&f.file->f_lock); 1340 if (mqstat.mq_flags & O_NONBLOCK) 1341 f.file->f_flags |= O_NONBLOCK; 1342 else 1343 f.file->f_flags &= ~O_NONBLOCK; 1344 spin_unlock(&f.file->f_lock); 1345 1346 inode->i_atime = inode->i_ctime = CURRENT_TIME; 1347 } 1348 1349 spin_unlock(&info->lock); 1350 1351 ret = 0; 1352 if (u_omqstat != NULL && copy_to_user(u_omqstat, &omqstat, 1353 sizeof(struct mq_attr))) 1354 ret = -EFAULT; 1355 1356 out_fput: 1357 fdput(f); 1358 out: 1359 return ret; 1360 } 1361 1362 static const struct inode_operations mqueue_dir_inode_operations = { 1363 .lookup = simple_lookup, 1364 .create = mqueue_create, 1365 .unlink = mqueue_unlink, 1366 }; 1367 1368 static const struct file_operations mqueue_file_operations = { 1369 .flush = mqueue_flush_file, 1370 .poll = mqueue_poll_file, 1371 .read = mqueue_read_file, 1372 .llseek = default_llseek, 1373 }; 1374 1375 static const struct super_operations mqueue_super_ops = { 1376 .alloc_inode = mqueue_alloc_inode, 1377 .destroy_inode = mqueue_destroy_inode, 1378 .evict_inode = mqueue_evict_inode, 1379 .statfs = simple_statfs, 1380 }; 1381 1382 static struct file_system_type mqueue_fs_type = { 1383 .name = "mqueue", 1384 .mount = mqueue_mount, 1385 .kill_sb = kill_litter_super, 1386 }; 1387 1388 int mq_init_ns(struct ipc_namespace *ns) 1389 { 1390 ns->mq_queues_count = 0; 1391 ns->mq_queues_max = DFLT_QUEUESMAX; 1392 ns->mq_msg_max = DFLT_MSGMAX; 1393 ns->mq_msgsize_max = DFLT_MSGSIZEMAX; 1394 ns->mq_msg_default = DFLT_MSG; 1395 ns->mq_msgsize_default = DFLT_MSGSIZE; 1396 1397 ns->mq_mnt = kern_mount_data(&mqueue_fs_type, ns); 1398 if (IS_ERR(ns->mq_mnt)) { 1399 int err = PTR_ERR(ns->mq_mnt); 1400 ns->mq_mnt = NULL; 1401 return err; 1402 } 1403 return 0; 1404 } 1405 1406 void mq_clear_sbinfo(struct ipc_namespace *ns) 1407 { 1408 ns->mq_mnt->mnt_sb->s_fs_info = NULL; 1409 } 1410 1411 void mq_put_mnt(struct ipc_namespace *ns) 1412 { 1413 kern_unmount(ns->mq_mnt); 1414 } 1415 1416 static int __init init_mqueue_fs(void) 1417 { 1418 int error; 1419 1420 mqueue_inode_cachep = kmem_cache_create("mqueue_inode_cache", 1421 sizeof(struct mqueue_inode_info), 0, 1422 SLAB_HWCACHE_ALIGN, init_once); 1423 if (mqueue_inode_cachep == NULL) 1424 return -ENOMEM; 1425 1426 /* ignore failures - they are not fatal */ 1427 mq_sysctl_table = mq_register_sysctl_table(); 1428 1429 error = register_filesystem(&mqueue_fs_type); 1430 if (error) 1431 goto out_sysctl; 1432 1433 spin_lock_init(&mq_lock); 1434 1435 error = mq_init_ns(&init_ipc_ns); 1436 if (error) 1437 goto out_filesystem; 1438 1439 return 0; 1440 1441 out_filesystem: 1442 unregister_filesystem(&mqueue_fs_type); 1443 out_sysctl: 1444 if (mq_sysctl_table) 1445 unregister_sysctl_table(mq_sysctl_table); 1446 kmem_cache_destroy(mqueue_inode_cachep); 1447 return error; 1448 } 1449 1450 __initcall(init_mqueue_fs); 1451