1 /* 2 * POSIX message queues filesystem for Linux. 3 * 4 * Copyright (C) 2003,2004 Krzysztof Benedyczak (golbi@mat.uni.torun.pl) 5 * Michal Wronski (michal.wronski@gmail.com) 6 * 7 * Spinlocks: Mohamed Abbas (abbas.mohamed@intel.com) 8 * Lockless receive & send, fd based notify: 9 * Manfred Spraul (manfred@colorfullife.com) 10 * 11 * Audit: George Wilson (ltcgcw@us.ibm.com) 12 * 13 * This file is released under the GPL. 14 */ 15 16 #include <linux/capability.h> 17 #include <linux/init.h> 18 #include <linux/pagemap.h> 19 #include <linux/file.h> 20 #include <linux/mount.h> 21 #include <linux/namei.h> 22 #include <linux/sysctl.h> 23 #include <linux/poll.h> 24 #include <linux/mqueue.h> 25 #include <linux/msg.h> 26 #include <linux/skbuff.h> 27 #include <linux/vmalloc.h> 28 #include <linux/netlink.h> 29 #include <linux/syscalls.h> 30 #include <linux/audit.h> 31 #include <linux/signal.h> 32 #include <linux/mutex.h> 33 #include <linux/nsproxy.h> 34 #include <linux/pid.h> 35 #include <linux/ipc_namespace.h> 36 #include <linux/user_namespace.h> 37 #include <linux/slab.h> 38 39 #include <net/sock.h> 40 #include "util.h" 41 42 #define MQUEUE_MAGIC 0x19800202 43 #define DIRENT_SIZE 20 44 #define FILENT_SIZE 80 45 46 #define SEND 0 47 #define RECV 1 48 49 #define STATE_NONE 0 50 #define STATE_PENDING 1 51 #define STATE_READY 2 52 53 struct posix_msg_tree_node { 54 struct rb_node rb_node; 55 struct list_head msg_list; 56 int priority; 57 }; 58 59 struct ext_wait_queue { /* queue of sleeping tasks */ 60 struct task_struct *task; 61 struct list_head list; 62 struct msg_msg *msg; /* ptr of loaded message */ 63 int state; /* one of STATE_* values */ 64 }; 65 66 struct mqueue_inode_info { 67 spinlock_t lock; 68 struct inode vfs_inode; 69 wait_queue_head_t wait_q; 70 71 struct rb_root msg_tree; 72 struct posix_msg_tree_node *node_cache; 73 struct mq_attr attr; 74 75 struct sigevent notify; 76 struct pid* notify_owner; 77 struct user_namespace *notify_user_ns; 78 struct user_struct *user; /* user who created, for accounting */ 79 struct sock *notify_sock; 80 struct sk_buff *notify_cookie; 81 82 /* for tasks waiting for free space and messages, respectively */ 83 struct ext_wait_queue e_wait_q[2]; 84 85 unsigned long qsize; /* size of queue in memory (sum of all msgs) */ 86 }; 87 88 static const struct inode_operations mqueue_dir_inode_operations; 89 static const struct file_operations mqueue_file_operations; 90 static const struct super_operations mqueue_super_ops; 91 static void remove_notification(struct mqueue_inode_info *info); 92 93 static struct kmem_cache *mqueue_inode_cachep; 94 95 static struct ctl_table_header * mq_sysctl_table; 96 97 static inline struct mqueue_inode_info *MQUEUE_I(struct inode *inode) 98 { 99 return container_of(inode, struct mqueue_inode_info, vfs_inode); 100 } 101 102 /* 103 * This routine should be called with the mq_lock held. 104 */ 105 static inline struct ipc_namespace *__get_ns_from_inode(struct inode *inode) 106 { 107 return get_ipc_ns(inode->i_sb->s_fs_info); 108 } 109 110 static struct ipc_namespace *get_ns_from_inode(struct inode *inode) 111 { 112 struct ipc_namespace *ns; 113 114 spin_lock(&mq_lock); 115 ns = __get_ns_from_inode(inode); 116 spin_unlock(&mq_lock); 117 return ns; 118 } 119 120 /* Auxiliary functions to manipulate messages' list */ 121 static int msg_insert(struct msg_msg *msg, struct mqueue_inode_info *info) 122 { 123 struct rb_node **p, *parent = NULL; 124 struct posix_msg_tree_node *leaf; 125 126 p = &info->msg_tree.rb_node; 127 while (*p) { 128 parent = *p; 129 leaf = rb_entry(parent, struct posix_msg_tree_node, rb_node); 130 131 if (likely(leaf->priority == msg->m_type)) 132 goto insert_msg; 133 else if (msg->m_type < leaf->priority) 134 p = &(*p)->rb_left; 135 else 136 p = &(*p)->rb_right; 137 } 138 if (info->node_cache) { 139 leaf = info->node_cache; 140 info->node_cache = NULL; 141 } else { 142 leaf = kmalloc(sizeof(*leaf), GFP_ATOMIC); 143 if (!leaf) 144 return -ENOMEM; 145 rb_init_node(&leaf->rb_node); 146 INIT_LIST_HEAD(&leaf->msg_list); 147 info->qsize += sizeof(*leaf); 148 } 149 leaf->priority = msg->m_type; 150 rb_link_node(&leaf->rb_node, parent, p); 151 rb_insert_color(&leaf->rb_node, &info->msg_tree); 152 insert_msg: 153 info->attr.mq_curmsgs++; 154 info->qsize += msg->m_ts; 155 list_add_tail(&msg->m_list, &leaf->msg_list); 156 return 0; 157 } 158 159 static inline struct msg_msg *msg_get(struct mqueue_inode_info *info) 160 { 161 struct rb_node **p, *parent = NULL; 162 struct posix_msg_tree_node *leaf; 163 struct msg_msg *msg; 164 165 try_again: 166 p = &info->msg_tree.rb_node; 167 while (*p) { 168 parent = *p; 169 /* 170 * During insert, low priorities go to the left and high to the 171 * right. On receive, we want the highest priorities first, so 172 * walk all the way to the right. 173 */ 174 p = &(*p)->rb_right; 175 } 176 if (!parent) { 177 if (info->attr.mq_curmsgs) { 178 pr_warn_once("Inconsistency in POSIX message queue, " 179 "no tree element, but supposedly messages " 180 "should exist!\n"); 181 info->attr.mq_curmsgs = 0; 182 } 183 return NULL; 184 } 185 leaf = rb_entry(parent, struct posix_msg_tree_node, rb_node); 186 if (unlikely(list_empty(&leaf->msg_list))) { 187 pr_warn_once("Inconsistency in POSIX message queue, " 188 "empty leaf node but we haven't implemented " 189 "lazy leaf delete!\n"); 190 rb_erase(&leaf->rb_node, &info->msg_tree); 191 if (info->node_cache) { 192 info->qsize -= sizeof(*leaf); 193 kfree(leaf); 194 } else { 195 info->node_cache = leaf; 196 } 197 goto try_again; 198 } else { 199 msg = list_first_entry(&leaf->msg_list, 200 struct msg_msg, m_list); 201 list_del(&msg->m_list); 202 if (list_empty(&leaf->msg_list)) { 203 rb_erase(&leaf->rb_node, &info->msg_tree); 204 if (info->node_cache) { 205 info->qsize -= sizeof(*leaf); 206 kfree(leaf); 207 } else { 208 info->node_cache = leaf; 209 } 210 } 211 } 212 info->attr.mq_curmsgs--; 213 info->qsize -= msg->m_ts; 214 return msg; 215 } 216 217 static struct inode *mqueue_get_inode(struct super_block *sb, 218 struct ipc_namespace *ipc_ns, umode_t mode, 219 struct mq_attr *attr) 220 { 221 struct user_struct *u = current_user(); 222 struct inode *inode; 223 int ret = -ENOMEM; 224 225 inode = new_inode(sb); 226 if (!inode) 227 goto err; 228 229 inode->i_ino = get_next_ino(); 230 inode->i_mode = mode; 231 inode->i_uid = current_fsuid(); 232 inode->i_gid = current_fsgid(); 233 inode->i_mtime = inode->i_ctime = inode->i_atime = CURRENT_TIME; 234 235 if (S_ISREG(mode)) { 236 struct mqueue_inode_info *info; 237 unsigned long mq_bytes, mq_treesize; 238 239 inode->i_fop = &mqueue_file_operations; 240 inode->i_size = FILENT_SIZE; 241 /* mqueue specific info */ 242 info = MQUEUE_I(inode); 243 spin_lock_init(&info->lock); 244 init_waitqueue_head(&info->wait_q); 245 INIT_LIST_HEAD(&info->e_wait_q[0].list); 246 INIT_LIST_HEAD(&info->e_wait_q[1].list); 247 info->notify_owner = NULL; 248 info->notify_user_ns = NULL; 249 info->qsize = 0; 250 info->user = NULL; /* set when all is ok */ 251 info->msg_tree = RB_ROOT; 252 info->node_cache = NULL; 253 memset(&info->attr, 0, sizeof(info->attr)); 254 info->attr.mq_maxmsg = min(ipc_ns->mq_msg_max, 255 ipc_ns->mq_msg_default); 256 info->attr.mq_msgsize = min(ipc_ns->mq_msgsize_max, 257 ipc_ns->mq_msgsize_default); 258 if (attr) { 259 info->attr.mq_maxmsg = attr->mq_maxmsg; 260 info->attr.mq_msgsize = attr->mq_msgsize; 261 } 262 /* 263 * We used to allocate a static array of pointers and account 264 * the size of that array as well as one msg_msg struct per 265 * possible message into the queue size. That's no longer 266 * accurate as the queue is now an rbtree and will grow and 267 * shrink depending on usage patterns. We can, however, still 268 * account one msg_msg struct per message, but the nodes are 269 * allocated depending on priority usage, and most programs 270 * only use one, or a handful, of priorities. However, since 271 * this is pinned memory, we need to assume worst case, so 272 * that means the min(mq_maxmsg, max_priorities) * struct 273 * posix_msg_tree_node. 274 */ 275 mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) + 276 min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) * 277 sizeof(struct posix_msg_tree_node); 278 279 mq_bytes = mq_treesize + (info->attr.mq_maxmsg * 280 info->attr.mq_msgsize); 281 282 spin_lock(&mq_lock); 283 if (u->mq_bytes + mq_bytes < u->mq_bytes || 284 u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) { 285 spin_unlock(&mq_lock); 286 /* mqueue_evict_inode() releases info->messages */ 287 ret = -EMFILE; 288 goto out_inode; 289 } 290 u->mq_bytes += mq_bytes; 291 spin_unlock(&mq_lock); 292 293 /* all is ok */ 294 info->user = get_uid(u); 295 } else if (S_ISDIR(mode)) { 296 inc_nlink(inode); 297 /* Some things misbehave if size == 0 on a directory */ 298 inode->i_size = 2 * DIRENT_SIZE; 299 inode->i_op = &mqueue_dir_inode_operations; 300 inode->i_fop = &simple_dir_operations; 301 } 302 303 return inode; 304 out_inode: 305 iput(inode); 306 err: 307 return ERR_PTR(ret); 308 } 309 310 static int mqueue_fill_super(struct super_block *sb, void *data, int silent) 311 { 312 struct inode *inode; 313 struct ipc_namespace *ns = data; 314 315 sb->s_blocksize = PAGE_CACHE_SIZE; 316 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 317 sb->s_magic = MQUEUE_MAGIC; 318 sb->s_op = &mqueue_super_ops; 319 320 inode = mqueue_get_inode(sb, ns, S_IFDIR | S_ISVTX | S_IRWXUGO, NULL); 321 if (IS_ERR(inode)) 322 return PTR_ERR(inode); 323 324 sb->s_root = d_make_root(inode); 325 if (!sb->s_root) 326 return -ENOMEM; 327 return 0; 328 } 329 330 static struct dentry *mqueue_mount(struct file_system_type *fs_type, 331 int flags, const char *dev_name, 332 void *data) 333 { 334 if (!(flags & MS_KERNMOUNT)) 335 data = current->nsproxy->ipc_ns; 336 return mount_ns(fs_type, flags, data, mqueue_fill_super); 337 } 338 339 static void init_once(void *foo) 340 { 341 struct mqueue_inode_info *p = (struct mqueue_inode_info *) foo; 342 343 inode_init_once(&p->vfs_inode); 344 } 345 346 static struct inode *mqueue_alloc_inode(struct super_block *sb) 347 { 348 struct mqueue_inode_info *ei; 349 350 ei = kmem_cache_alloc(mqueue_inode_cachep, GFP_KERNEL); 351 if (!ei) 352 return NULL; 353 return &ei->vfs_inode; 354 } 355 356 static void mqueue_i_callback(struct rcu_head *head) 357 { 358 struct inode *inode = container_of(head, struct inode, i_rcu); 359 kmem_cache_free(mqueue_inode_cachep, MQUEUE_I(inode)); 360 } 361 362 static void mqueue_destroy_inode(struct inode *inode) 363 { 364 call_rcu(&inode->i_rcu, mqueue_i_callback); 365 } 366 367 static void mqueue_evict_inode(struct inode *inode) 368 { 369 struct mqueue_inode_info *info; 370 struct user_struct *user; 371 unsigned long mq_bytes, mq_treesize; 372 struct ipc_namespace *ipc_ns; 373 struct msg_msg *msg; 374 375 clear_inode(inode); 376 377 if (S_ISDIR(inode->i_mode)) 378 return; 379 380 ipc_ns = get_ns_from_inode(inode); 381 info = MQUEUE_I(inode); 382 spin_lock(&info->lock); 383 while ((msg = msg_get(info)) != NULL) 384 free_msg(msg); 385 kfree(info->node_cache); 386 spin_unlock(&info->lock); 387 388 /* Total amount of bytes accounted for the mqueue */ 389 mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) + 390 min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) * 391 sizeof(struct posix_msg_tree_node); 392 393 mq_bytes = mq_treesize + (info->attr.mq_maxmsg * 394 info->attr.mq_msgsize); 395 396 user = info->user; 397 if (user) { 398 spin_lock(&mq_lock); 399 user->mq_bytes -= mq_bytes; 400 /* 401 * get_ns_from_inode() ensures that the 402 * (ipc_ns = sb->s_fs_info) is either a valid ipc_ns 403 * to which we now hold a reference, or it is NULL. 404 * We can't put it here under mq_lock, though. 405 */ 406 if (ipc_ns) 407 ipc_ns->mq_queues_count--; 408 spin_unlock(&mq_lock); 409 free_uid(user); 410 } 411 if (ipc_ns) 412 put_ipc_ns(ipc_ns); 413 } 414 415 static int mqueue_create(struct inode *dir, struct dentry *dentry, 416 umode_t mode, struct nameidata *nd) 417 { 418 struct inode *inode; 419 struct mq_attr *attr = dentry->d_fsdata; 420 int error; 421 struct ipc_namespace *ipc_ns; 422 423 spin_lock(&mq_lock); 424 ipc_ns = __get_ns_from_inode(dir); 425 if (!ipc_ns) { 426 error = -EACCES; 427 goto out_unlock; 428 } 429 if (ipc_ns->mq_queues_count >= HARD_QUEUESMAX || 430 (ipc_ns->mq_queues_count >= ipc_ns->mq_queues_max && 431 !capable(CAP_SYS_RESOURCE))) { 432 error = -ENOSPC; 433 goto out_unlock; 434 } 435 ipc_ns->mq_queues_count++; 436 spin_unlock(&mq_lock); 437 438 inode = mqueue_get_inode(dir->i_sb, ipc_ns, mode, attr); 439 if (IS_ERR(inode)) { 440 error = PTR_ERR(inode); 441 spin_lock(&mq_lock); 442 ipc_ns->mq_queues_count--; 443 goto out_unlock; 444 } 445 446 put_ipc_ns(ipc_ns); 447 dir->i_size += DIRENT_SIZE; 448 dir->i_ctime = dir->i_mtime = dir->i_atime = CURRENT_TIME; 449 450 d_instantiate(dentry, inode); 451 dget(dentry); 452 return 0; 453 out_unlock: 454 spin_unlock(&mq_lock); 455 if (ipc_ns) 456 put_ipc_ns(ipc_ns); 457 return error; 458 } 459 460 static int mqueue_unlink(struct inode *dir, struct dentry *dentry) 461 { 462 struct inode *inode = dentry->d_inode; 463 464 dir->i_ctime = dir->i_mtime = dir->i_atime = CURRENT_TIME; 465 dir->i_size -= DIRENT_SIZE; 466 drop_nlink(inode); 467 dput(dentry); 468 return 0; 469 } 470 471 /* 472 * This is routine for system read from queue file. 473 * To avoid mess with doing here some sort of mq_receive we allow 474 * to read only queue size & notification info (the only values 475 * that are interesting from user point of view and aren't accessible 476 * through std routines) 477 */ 478 static ssize_t mqueue_read_file(struct file *filp, char __user *u_data, 479 size_t count, loff_t *off) 480 { 481 struct mqueue_inode_info *info = MQUEUE_I(filp->f_path.dentry->d_inode); 482 char buffer[FILENT_SIZE]; 483 ssize_t ret; 484 485 spin_lock(&info->lock); 486 snprintf(buffer, sizeof(buffer), 487 "QSIZE:%-10lu NOTIFY:%-5d SIGNO:%-5d NOTIFY_PID:%-6d\n", 488 info->qsize, 489 info->notify_owner ? info->notify.sigev_notify : 0, 490 (info->notify_owner && 491 info->notify.sigev_notify == SIGEV_SIGNAL) ? 492 info->notify.sigev_signo : 0, 493 pid_vnr(info->notify_owner)); 494 spin_unlock(&info->lock); 495 buffer[sizeof(buffer)-1] = '\0'; 496 497 ret = simple_read_from_buffer(u_data, count, off, buffer, 498 strlen(buffer)); 499 if (ret <= 0) 500 return ret; 501 502 filp->f_path.dentry->d_inode->i_atime = filp->f_path.dentry->d_inode->i_ctime = CURRENT_TIME; 503 return ret; 504 } 505 506 static int mqueue_flush_file(struct file *filp, fl_owner_t id) 507 { 508 struct mqueue_inode_info *info = MQUEUE_I(filp->f_path.dentry->d_inode); 509 510 spin_lock(&info->lock); 511 if (task_tgid(current) == info->notify_owner) 512 remove_notification(info); 513 514 spin_unlock(&info->lock); 515 return 0; 516 } 517 518 static unsigned int mqueue_poll_file(struct file *filp, struct poll_table_struct *poll_tab) 519 { 520 struct mqueue_inode_info *info = MQUEUE_I(filp->f_path.dentry->d_inode); 521 int retval = 0; 522 523 poll_wait(filp, &info->wait_q, poll_tab); 524 525 spin_lock(&info->lock); 526 if (info->attr.mq_curmsgs) 527 retval = POLLIN | POLLRDNORM; 528 529 if (info->attr.mq_curmsgs < info->attr.mq_maxmsg) 530 retval |= POLLOUT | POLLWRNORM; 531 spin_unlock(&info->lock); 532 533 return retval; 534 } 535 536 /* Adds current to info->e_wait_q[sr] before element with smaller prio */ 537 static void wq_add(struct mqueue_inode_info *info, int sr, 538 struct ext_wait_queue *ewp) 539 { 540 struct ext_wait_queue *walk; 541 542 ewp->task = current; 543 544 list_for_each_entry(walk, &info->e_wait_q[sr].list, list) { 545 if (walk->task->static_prio <= current->static_prio) { 546 list_add_tail(&ewp->list, &walk->list); 547 return; 548 } 549 } 550 list_add_tail(&ewp->list, &info->e_wait_q[sr].list); 551 } 552 553 /* 554 * Puts current task to sleep. Caller must hold queue lock. After return 555 * lock isn't held. 556 * sr: SEND or RECV 557 */ 558 static int wq_sleep(struct mqueue_inode_info *info, int sr, 559 ktime_t *timeout, struct ext_wait_queue *ewp) 560 { 561 int retval; 562 signed long time; 563 564 wq_add(info, sr, ewp); 565 566 for (;;) { 567 set_current_state(TASK_INTERRUPTIBLE); 568 569 spin_unlock(&info->lock); 570 time = schedule_hrtimeout_range_clock(timeout, 0, 571 HRTIMER_MODE_ABS, CLOCK_REALTIME); 572 573 while (ewp->state == STATE_PENDING) 574 cpu_relax(); 575 576 if (ewp->state == STATE_READY) { 577 retval = 0; 578 goto out; 579 } 580 spin_lock(&info->lock); 581 if (ewp->state == STATE_READY) { 582 retval = 0; 583 goto out_unlock; 584 } 585 if (signal_pending(current)) { 586 retval = -ERESTARTSYS; 587 break; 588 } 589 if (time == 0) { 590 retval = -ETIMEDOUT; 591 break; 592 } 593 } 594 list_del(&ewp->list); 595 out_unlock: 596 spin_unlock(&info->lock); 597 out: 598 return retval; 599 } 600 601 /* 602 * Returns waiting task that should be serviced first or NULL if none exists 603 */ 604 static struct ext_wait_queue *wq_get_first_waiter( 605 struct mqueue_inode_info *info, int sr) 606 { 607 struct list_head *ptr; 608 609 ptr = info->e_wait_q[sr].list.prev; 610 if (ptr == &info->e_wait_q[sr].list) 611 return NULL; 612 return list_entry(ptr, struct ext_wait_queue, list); 613 } 614 615 616 static inline void set_cookie(struct sk_buff *skb, char code) 617 { 618 ((char*)skb->data)[NOTIFY_COOKIE_LEN-1] = code; 619 } 620 621 /* 622 * The next function is only to split too long sys_mq_timedsend 623 */ 624 static void __do_notify(struct mqueue_inode_info *info) 625 { 626 /* notification 627 * invoked when there is registered process and there isn't process 628 * waiting synchronously for message AND state of queue changed from 629 * empty to not empty. Here we are sure that no one is waiting 630 * synchronously. */ 631 if (info->notify_owner && 632 info->attr.mq_curmsgs == 1) { 633 struct siginfo sig_i; 634 switch (info->notify.sigev_notify) { 635 case SIGEV_NONE: 636 break; 637 case SIGEV_SIGNAL: 638 /* sends signal */ 639 640 sig_i.si_signo = info->notify.sigev_signo; 641 sig_i.si_errno = 0; 642 sig_i.si_code = SI_MESGQ; 643 sig_i.si_value = info->notify.sigev_value; 644 /* map current pid/uid into info->owner's namespaces */ 645 rcu_read_lock(); 646 sig_i.si_pid = task_tgid_nr_ns(current, 647 ns_of_pid(info->notify_owner)); 648 sig_i.si_uid = from_kuid_munged(info->notify_user_ns, current_uid()); 649 rcu_read_unlock(); 650 651 kill_pid_info(info->notify.sigev_signo, 652 &sig_i, info->notify_owner); 653 break; 654 case SIGEV_THREAD: 655 set_cookie(info->notify_cookie, NOTIFY_WOKENUP); 656 netlink_sendskb(info->notify_sock, info->notify_cookie); 657 break; 658 } 659 /* after notification unregisters process */ 660 put_pid(info->notify_owner); 661 put_user_ns(info->notify_user_ns); 662 info->notify_owner = NULL; 663 info->notify_user_ns = NULL; 664 } 665 wake_up(&info->wait_q); 666 } 667 668 static int prepare_timeout(const struct timespec __user *u_abs_timeout, 669 ktime_t *expires, struct timespec *ts) 670 { 671 if (copy_from_user(ts, u_abs_timeout, sizeof(struct timespec))) 672 return -EFAULT; 673 if (!timespec_valid(ts)) 674 return -EINVAL; 675 676 *expires = timespec_to_ktime(*ts); 677 return 0; 678 } 679 680 static void remove_notification(struct mqueue_inode_info *info) 681 { 682 if (info->notify_owner != NULL && 683 info->notify.sigev_notify == SIGEV_THREAD) { 684 set_cookie(info->notify_cookie, NOTIFY_REMOVED); 685 netlink_sendskb(info->notify_sock, info->notify_cookie); 686 } 687 put_pid(info->notify_owner); 688 put_user_ns(info->notify_user_ns); 689 info->notify_owner = NULL; 690 info->notify_user_ns = NULL; 691 } 692 693 static int mq_attr_ok(struct ipc_namespace *ipc_ns, struct mq_attr *attr) 694 { 695 int mq_treesize; 696 unsigned long total_size; 697 698 if (attr->mq_maxmsg <= 0 || attr->mq_msgsize <= 0) 699 return -EINVAL; 700 if (capable(CAP_SYS_RESOURCE)) { 701 if (attr->mq_maxmsg > HARD_MSGMAX || 702 attr->mq_msgsize > HARD_MSGSIZEMAX) 703 return -EINVAL; 704 } else { 705 if (attr->mq_maxmsg > ipc_ns->mq_msg_max || 706 attr->mq_msgsize > ipc_ns->mq_msgsize_max) 707 return -EINVAL; 708 } 709 /* check for overflow */ 710 if (attr->mq_msgsize > ULONG_MAX/attr->mq_maxmsg) 711 return -EOVERFLOW; 712 mq_treesize = attr->mq_maxmsg * sizeof(struct msg_msg) + 713 min_t(unsigned int, attr->mq_maxmsg, MQ_PRIO_MAX) * 714 sizeof(struct posix_msg_tree_node); 715 total_size = attr->mq_maxmsg * attr->mq_msgsize; 716 if (total_size + mq_treesize < total_size) 717 return -EOVERFLOW; 718 return 0; 719 } 720 721 /* 722 * Invoked when creating a new queue via sys_mq_open 723 */ 724 static struct file *do_create(struct ipc_namespace *ipc_ns, struct dentry *dir, 725 struct dentry *dentry, int oflag, umode_t mode, 726 struct mq_attr *attr) 727 { 728 const struct cred *cred = current_cred(); 729 struct file *result; 730 int ret; 731 732 if (attr) { 733 ret = mq_attr_ok(ipc_ns, attr); 734 if (ret) 735 goto out; 736 /* store for use during create */ 737 dentry->d_fsdata = attr; 738 } else { 739 struct mq_attr def_attr; 740 741 def_attr.mq_maxmsg = min(ipc_ns->mq_msg_max, 742 ipc_ns->mq_msg_default); 743 def_attr.mq_msgsize = min(ipc_ns->mq_msgsize_max, 744 ipc_ns->mq_msgsize_default); 745 ret = mq_attr_ok(ipc_ns, &def_attr); 746 if (ret) 747 goto out; 748 } 749 750 mode &= ~current_umask(); 751 ret = mnt_want_write(ipc_ns->mq_mnt); 752 if (ret) 753 goto out; 754 ret = vfs_create(dir->d_inode, dentry, mode, NULL); 755 dentry->d_fsdata = NULL; 756 if (ret) 757 goto out_drop_write; 758 759 result = dentry_open(dentry, ipc_ns->mq_mnt, oflag, cred); 760 /* 761 * dentry_open() took a persistent mnt_want_write(), 762 * so we can now drop this one. 763 */ 764 mnt_drop_write(ipc_ns->mq_mnt); 765 return result; 766 767 out_drop_write: 768 mnt_drop_write(ipc_ns->mq_mnt); 769 out: 770 dput(dentry); 771 mntput(ipc_ns->mq_mnt); 772 return ERR_PTR(ret); 773 } 774 775 /* Opens existing queue */ 776 static struct file *do_open(struct ipc_namespace *ipc_ns, 777 struct dentry *dentry, int oflag) 778 { 779 int ret; 780 const struct cred *cred = current_cred(); 781 782 static const int oflag2acc[O_ACCMODE] = { MAY_READ, MAY_WRITE, 783 MAY_READ | MAY_WRITE }; 784 785 if ((oflag & O_ACCMODE) == (O_RDWR | O_WRONLY)) { 786 ret = -EINVAL; 787 goto err; 788 } 789 790 if (inode_permission(dentry->d_inode, oflag2acc[oflag & O_ACCMODE])) { 791 ret = -EACCES; 792 goto err; 793 } 794 795 return dentry_open(dentry, ipc_ns->mq_mnt, oflag, cred); 796 797 err: 798 dput(dentry); 799 mntput(ipc_ns->mq_mnt); 800 return ERR_PTR(ret); 801 } 802 803 SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, umode_t, mode, 804 struct mq_attr __user *, u_attr) 805 { 806 struct dentry *dentry; 807 struct file *filp; 808 char *name; 809 struct mq_attr attr; 810 int fd, error; 811 struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns; 812 813 if (u_attr && copy_from_user(&attr, u_attr, sizeof(struct mq_attr))) 814 return -EFAULT; 815 816 audit_mq_open(oflag, mode, u_attr ? &attr : NULL); 817 818 if (IS_ERR(name = getname(u_name))) 819 return PTR_ERR(name); 820 821 fd = get_unused_fd_flags(O_CLOEXEC); 822 if (fd < 0) 823 goto out_putname; 824 825 mutex_lock(&ipc_ns->mq_mnt->mnt_root->d_inode->i_mutex); 826 dentry = lookup_one_len(name, ipc_ns->mq_mnt->mnt_root, strlen(name)); 827 if (IS_ERR(dentry)) { 828 error = PTR_ERR(dentry); 829 goto out_putfd; 830 } 831 mntget(ipc_ns->mq_mnt); 832 833 if (oflag & O_CREAT) { 834 if (dentry->d_inode) { /* entry already exists */ 835 audit_inode(name, dentry); 836 if (oflag & O_EXCL) { 837 error = -EEXIST; 838 goto out; 839 } 840 filp = do_open(ipc_ns, dentry, oflag); 841 } else { 842 filp = do_create(ipc_ns, ipc_ns->mq_mnt->mnt_root, 843 dentry, oflag, mode, 844 u_attr ? &attr : NULL); 845 } 846 } else { 847 if (!dentry->d_inode) { 848 error = -ENOENT; 849 goto out; 850 } 851 audit_inode(name, dentry); 852 filp = do_open(ipc_ns, dentry, oflag); 853 } 854 855 if (IS_ERR(filp)) { 856 error = PTR_ERR(filp); 857 goto out_putfd; 858 } 859 860 fd_install(fd, filp); 861 goto out_upsem; 862 863 out: 864 dput(dentry); 865 mntput(ipc_ns->mq_mnt); 866 out_putfd: 867 put_unused_fd(fd); 868 fd = error; 869 out_upsem: 870 mutex_unlock(&ipc_ns->mq_mnt->mnt_root->d_inode->i_mutex); 871 out_putname: 872 putname(name); 873 return fd; 874 } 875 876 SYSCALL_DEFINE1(mq_unlink, const char __user *, u_name) 877 { 878 int err; 879 char *name; 880 struct dentry *dentry; 881 struct inode *inode = NULL; 882 struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns; 883 884 name = getname(u_name); 885 if (IS_ERR(name)) 886 return PTR_ERR(name); 887 888 mutex_lock_nested(&ipc_ns->mq_mnt->mnt_root->d_inode->i_mutex, 889 I_MUTEX_PARENT); 890 dentry = lookup_one_len(name, ipc_ns->mq_mnt->mnt_root, strlen(name)); 891 if (IS_ERR(dentry)) { 892 err = PTR_ERR(dentry); 893 goto out_unlock; 894 } 895 896 if (!dentry->d_inode) { 897 err = -ENOENT; 898 goto out_err; 899 } 900 901 inode = dentry->d_inode; 902 if (inode) 903 ihold(inode); 904 err = mnt_want_write(ipc_ns->mq_mnt); 905 if (err) 906 goto out_err; 907 err = vfs_unlink(dentry->d_parent->d_inode, dentry); 908 mnt_drop_write(ipc_ns->mq_mnt); 909 out_err: 910 dput(dentry); 911 912 out_unlock: 913 mutex_unlock(&ipc_ns->mq_mnt->mnt_root->d_inode->i_mutex); 914 putname(name); 915 if (inode) 916 iput(inode); 917 918 return err; 919 } 920 921 /* Pipelined send and receive functions. 922 * 923 * If a receiver finds no waiting message, then it registers itself in the 924 * list of waiting receivers. A sender checks that list before adding the new 925 * message into the message array. If there is a waiting receiver, then it 926 * bypasses the message array and directly hands the message over to the 927 * receiver. 928 * The receiver accepts the message and returns without grabbing the queue 929 * spinlock. Therefore an intermediate STATE_PENDING state and memory barriers 930 * are necessary. The same algorithm is used for sysv semaphores, see 931 * ipc/sem.c for more details. 932 * 933 * The same algorithm is used for senders. 934 */ 935 936 /* pipelined_send() - send a message directly to the task waiting in 937 * sys_mq_timedreceive() (without inserting message into a queue). 938 */ 939 static inline void pipelined_send(struct mqueue_inode_info *info, 940 struct msg_msg *message, 941 struct ext_wait_queue *receiver) 942 { 943 receiver->msg = message; 944 list_del(&receiver->list); 945 receiver->state = STATE_PENDING; 946 wake_up_process(receiver->task); 947 smp_wmb(); 948 receiver->state = STATE_READY; 949 } 950 951 /* pipelined_receive() - if there is task waiting in sys_mq_timedsend() 952 * gets its message and put to the queue (we have one free place for sure). */ 953 static inline void pipelined_receive(struct mqueue_inode_info *info) 954 { 955 struct ext_wait_queue *sender = wq_get_first_waiter(info, SEND); 956 957 if (!sender) { 958 /* for poll */ 959 wake_up_interruptible(&info->wait_q); 960 return; 961 } 962 if (msg_insert(sender->msg, info)) 963 return; 964 list_del(&sender->list); 965 sender->state = STATE_PENDING; 966 wake_up_process(sender->task); 967 smp_wmb(); 968 sender->state = STATE_READY; 969 } 970 971 SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr, 972 size_t, msg_len, unsigned int, msg_prio, 973 const struct timespec __user *, u_abs_timeout) 974 { 975 struct file *filp; 976 struct inode *inode; 977 struct ext_wait_queue wait; 978 struct ext_wait_queue *receiver; 979 struct msg_msg *msg_ptr; 980 struct mqueue_inode_info *info; 981 ktime_t expires, *timeout = NULL; 982 struct timespec ts; 983 struct posix_msg_tree_node *new_leaf = NULL; 984 int ret = 0; 985 986 if (u_abs_timeout) { 987 int res = prepare_timeout(u_abs_timeout, &expires, &ts); 988 if (res) 989 return res; 990 timeout = &expires; 991 } 992 993 if (unlikely(msg_prio >= (unsigned long) MQ_PRIO_MAX)) 994 return -EINVAL; 995 996 audit_mq_sendrecv(mqdes, msg_len, msg_prio, timeout ? &ts : NULL); 997 998 filp = fget(mqdes); 999 if (unlikely(!filp)) { 1000 ret = -EBADF; 1001 goto out; 1002 } 1003 1004 inode = filp->f_path.dentry->d_inode; 1005 if (unlikely(filp->f_op != &mqueue_file_operations)) { 1006 ret = -EBADF; 1007 goto out_fput; 1008 } 1009 info = MQUEUE_I(inode); 1010 audit_inode(NULL, filp->f_path.dentry); 1011 1012 if (unlikely(!(filp->f_mode & FMODE_WRITE))) { 1013 ret = -EBADF; 1014 goto out_fput; 1015 } 1016 1017 if (unlikely(msg_len > info->attr.mq_msgsize)) { 1018 ret = -EMSGSIZE; 1019 goto out_fput; 1020 } 1021 1022 /* First try to allocate memory, before doing anything with 1023 * existing queues. */ 1024 msg_ptr = load_msg(u_msg_ptr, msg_len); 1025 if (IS_ERR(msg_ptr)) { 1026 ret = PTR_ERR(msg_ptr); 1027 goto out_fput; 1028 } 1029 msg_ptr->m_ts = msg_len; 1030 msg_ptr->m_type = msg_prio; 1031 1032 /* 1033 * msg_insert really wants us to have a valid, spare node struct so 1034 * it doesn't have to kmalloc a GFP_ATOMIC allocation, but it will 1035 * fall back to that if necessary. 1036 */ 1037 if (!info->node_cache) 1038 new_leaf = kmalloc(sizeof(*new_leaf), GFP_KERNEL); 1039 1040 spin_lock(&info->lock); 1041 1042 if (!info->node_cache && new_leaf) { 1043 /* Save our speculative allocation into the cache */ 1044 rb_init_node(&new_leaf->rb_node); 1045 INIT_LIST_HEAD(&new_leaf->msg_list); 1046 info->node_cache = new_leaf; 1047 info->qsize += sizeof(*new_leaf); 1048 new_leaf = NULL; 1049 } else { 1050 kfree(new_leaf); 1051 } 1052 1053 if (info->attr.mq_curmsgs == info->attr.mq_maxmsg) { 1054 if (filp->f_flags & O_NONBLOCK) { 1055 ret = -EAGAIN; 1056 } else { 1057 wait.task = current; 1058 wait.msg = (void *) msg_ptr; 1059 wait.state = STATE_NONE; 1060 ret = wq_sleep(info, SEND, timeout, &wait); 1061 /* 1062 * wq_sleep must be called with info->lock held, and 1063 * returns with the lock released 1064 */ 1065 goto out_free; 1066 } 1067 } else { 1068 receiver = wq_get_first_waiter(info, RECV); 1069 if (receiver) { 1070 pipelined_send(info, msg_ptr, receiver); 1071 } else { 1072 /* adds message to the queue */ 1073 ret = msg_insert(msg_ptr, info); 1074 if (ret) 1075 goto out_unlock; 1076 __do_notify(info); 1077 } 1078 inode->i_atime = inode->i_mtime = inode->i_ctime = 1079 CURRENT_TIME; 1080 } 1081 out_unlock: 1082 spin_unlock(&info->lock); 1083 out_free: 1084 if (ret) 1085 free_msg(msg_ptr); 1086 out_fput: 1087 fput(filp); 1088 out: 1089 return ret; 1090 } 1091 1092 SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr, 1093 size_t, msg_len, unsigned int __user *, u_msg_prio, 1094 const struct timespec __user *, u_abs_timeout) 1095 { 1096 ssize_t ret; 1097 struct msg_msg *msg_ptr; 1098 struct file *filp; 1099 struct inode *inode; 1100 struct mqueue_inode_info *info; 1101 struct ext_wait_queue wait; 1102 ktime_t expires, *timeout = NULL; 1103 struct timespec ts; 1104 struct posix_msg_tree_node *new_leaf = NULL; 1105 1106 if (u_abs_timeout) { 1107 int res = prepare_timeout(u_abs_timeout, &expires, &ts); 1108 if (res) 1109 return res; 1110 timeout = &expires; 1111 } 1112 1113 audit_mq_sendrecv(mqdes, msg_len, 0, timeout ? &ts : NULL); 1114 1115 filp = fget(mqdes); 1116 if (unlikely(!filp)) { 1117 ret = -EBADF; 1118 goto out; 1119 } 1120 1121 inode = filp->f_path.dentry->d_inode; 1122 if (unlikely(filp->f_op != &mqueue_file_operations)) { 1123 ret = -EBADF; 1124 goto out_fput; 1125 } 1126 info = MQUEUE_I(inode); 1127 audit_inode(NULL, filp->f_path.dentry); 1128 1129 if (unlikely(!(filp->f_mode & FMODE_READ))) { 1130 ret = -EBADF; 1131 goto out_fput; 1132 } 1133 1134 /* checks if buffer is big enough */ 1135 if (unlikely(msg_len < info->attr.mq_msgsize)) { 1136 ret = -EMSGSIZE; 1137 goto out_fput; 1138 } 1139 1140 /* 1141 * msg_insert really wants us to have a valid, spare node struct so 1142 * it doesn't have to kmalloc a GFP_ATOMIC allocation, but it will 1143 * fall back to that if necessary. 1144 */ 1145 if (!info->node_cache) 1146 new_leaf = kmalloc(sizeof(*new_leaf), GFP_KERNEL); 1147 1148 spin_lock(&info->lock); 1149 1150 if (!info->node_cache && new_leaf) { 1151 /* Save our speculative allocation into the cache */ 1152 rb_init_node(&new_leaf->rb_node); 1153 INIT_LIST_HEAD(&new_leaf->msg_list); 1154 info->node_cache = new_leaf; 1155 info->qsize += sizeof(*new_leaf); 1156 } else { 1157 kfree(new_leaf); 1158 } 1159 1160 if (info->attr.mq_curmsgs == 0) { 1161 if (filp->f_flags & O_NONBLOCK) { 1162 spin_unlock(&info->lock); 1163 ret = -EAGAIN; 1164 } else { 1165 wait.task = current; 1166 wait.state = STATE_NONE; 1167 ret = wq_sleep(info, RECV, timeout, &wait); 1168 msg_ptr = wait.msg; 1169 } 1170 } else { 1171 msg_ptr = msg_get(info); 1172 1173 inode->i_atime = inode->i_mtime = inode->i_ctime = 1174 CURRENT_TIME; 1175 1176 /* There is now free space in queue. */ 1177 pipelined_receive(info); 1178 spin_unlock(&info->lock); 1179 ret = 0; 1180 } 1181 if (ret == 0) { 1182 ret = msg_ptr->m_ts; 1183 1184 if ((u_msg_prio && put_user(msg_ptr->m_type, u_msg_prio)) || 1185 store_msg(u_msg_ptr, msg_ptr, msg_ptr->m_ts)) { 1186 ret = -EFAULT; 1187 } 1188 free_msg(msg_ptr); 1189 } 1190 out_fput: 1191 fput(filp); 1192 out: 1193 return ret; 1194 } 1195 1196 /* 1197 * Notes: the case when user wants us to deregister (with NULL as pointer) 1198 * and he isn't currently owner of notification, will be silently discarded. 1199 * It isn't explicitly defined in the POSIX. 1200 */ 1201 SYSCALL_DEFINE2(mq_notify, mqd_t, mqdes, 1202 const struct sigevent __user *, u_notification) 1203 { 1204 int ret; 1205 struct file *filp; 1206 struct sock *sock; 1207 struct inode *inode; 1208 struct sigevent notification; 1209 struct mqueue_inode_info *info; 1210 struct sk_buff *nc; 1211 1212 if (u_notification) { 1213 if (copy_from_user(¬ification, u_notification, 1214 sizeof(struct sigevent))) 1215 return -EFAULT; 1216 } 1217 1218 audit_mq_notify(mqdes, u_notification ? ¬ification : NULL); 1219 1220 nc = NULL; 1221 sock = NULL; 1222 if (u_notification != NULL) { 1223 if (unlikely(notification.sigev_notify != SIGEV_NONE && 1224 notification.sigev_notify != SIGEV_SIGNAL && 1225 notification.sigev_notify != SIGEV_THREAD)) 1226 return -EINVAL; 1227 if (notification.sigev_notify == SIGEV_SIGNAL && 1228 !valid_signal(notification.sigev_signo)) { 1229 return -EINVAL; 1230 } 1231 if (notification.sigev_notify == SIGEV_THREAD) { 1232 long timeo; 1233 1234 /* create the notify skb */ 1235 nc = alloc_skb(NOTIFY_COOKIE_LEN, GFP_KERNEL); 1236 if (!nc) { 1237 ret = -ENOMEM; 1238 goto out; 1239 } 1240 if (copy_from_user(nc->data, 1241 notification.sigev_value.sival_ptr, 1242 NOTIFY_COOKIE_LEN)) { 1243 ret = -EFAULT; 1244 goto out; 1245 } 1246 1247 /* TODO: add a header? */ 1248 skb_put(nc, NOTIFY_COOKIE_LEN); 1249 /* and attach it to the socket */ 1250 retry: 1251 filp = fget(notification.sigev_signo); 1252 if (!filp) { 1253 ret = -EBADF; 1254 goto out; 1255 } 1256 sock = netlink_getsockbyfilp(filp); 1257 fput(filp); 1258 if (IS_ERR(sock)) { 1259 ret = PTR_ERR(sock); 1260 sock = NULL; 1261 goto out; 1262 } 1263 1264 timeo = MAX_SCHEDULE_TIMEOUT; 1265 ret = netlink_attachskb(sock, nc, &timeo, NULL); 1266 if (ret == 1) 1267 goto retry; 1268 if (ret) { 1269 sock = NULL; 1270 nc = NULL; 1271 goto out; 1272 } 1273 } 1274 } 1275 1276 filp = fget(mqdes); 1277 if (!filp) { 1278 ret = -EBADF; 1279 goto out; 1280 } 1281 1282 inode = filp->f_path.dentry->d_inode; 1283 if (unlikely(filp->f_op != &mqueue_file_operations)) { 1284 ret = -EBADF; 1285 goto out_fput; 1286 } 1287 info = MQUEUE_I(inode); 1288 1289 ret = 0; 1290 spin_lock(&info->lock); 1291 if (u_notification == NULL) { 1292 if (info->notify_owner == task_tgid(current)) { 1293 remove_notification(info); 1294 inode->i_atime = inode->i_ctime = CURRENT_TIME; 1295 } 1296 } else if (info->notify_owner != NULL) { 1297 ret = -EBUSY; 1298 } else { 1299 switch (notification.sigev_notify) { 1300 case SIGEV_NONE: 1301 info->notify.sigev_notify = SIGEV_NONE; 1302 break; 1303 case SIGEV_THREAD: 1304 info->notify_sock = sock; 1305 info->notify_cookie = nc; 1306 sock = NULL; 1307 nc = NULL; 1308 info->notify.sigev_notify = SIGEV_THREAD; 1309 break; 1310 case SIGEV_SIGNAL: 1311 info->notify.sigev_signo = notification.sigev_signo; 1312 info->notify.sigev_value = notification.sigev_value; 1313 info->notify.sigev_notify = SIGEV_SIGNAL; 1314 break; 1315 } 1316 1317 info->notify_owner = get_pid(task_tgid(current)); 1318 info->notify_user_ns = get_user_ns(current_user_ns()); 1319 inode->i_atime = inode->i_ctime = CURRENT_TIME; 1320 } 1321 spin_unlock(&info->lock); 1322 out_fput: 1323 fput(filp); 1324 out: 1325 if (sock) { 1326 netlink_detachskb(sock, nc); 1327 } else if (nc) { 1328 dev_kfree_skb(nc); 1329 } 1330 return ret; 1331 } 1332 1333 SYSCALL_DEFINE3(mq_getsetattr, mqd_t, mqdes, 1334 const struct mq_attr __user *, u_mqstat, 1335 struct mq_attr __user *, u_omqstat) 1336 { 1337 int ret; 1338 struct mq_attr mqstat, omqstat; 1339 struct file *filp; 1340 struct inode *inode; 1341 struct mqueue_inode_info *info; 1342 1343 if (u_mqstat != NULL) { 1344 if (copy_from_user(&mqstat, u_mqstat, sizeof(struct mq_attr))) 1345 return -EFAULT; 1346 if (mqstat.mq_flags & (~O_NONBLOCK)) 1347 return -EINVAL; 1348 } 1349 1350 filp = fget(mqdes); 1351 if (!filp) { 1352 ret = -EBADF; 1353 goto out; 1354 } 1355 1356 inode = filp->f_path.dentry->d_inode; 1357 if (unlikely(filp->f_op != &mqueue_file_operations)) { 1358 ret = -EBADF; 1359 goto out_fput; 1360 } 1361 info = MQUEUE_I(inode); 1362 1363 spin_lock(&info->lock); 1364 1365 omqstat = info->attr; 1366 omqstat.mq_flags = filp->f_flags & O_NONBLOCK; 1367 if (u_mqstat) { 1368 audit_mq_getsetattr(mqdes, &mqstat); 1369 spin_lock(&filp->f_lock); 1370 if (mqstat.mq_flags & O_NONBLOCK) 1371 filp->f_flags |= O_NONBLOCK; 1372 else 1373 filp->f_flags &= ~O_NONBLOCK; 1374 spin_unlock(&filp->f_lock); 1375 1376 inode->i_atime = inode->i_ctime = CURRENT_TIME; 1377 } 1378 1379 spin_unlock(&info->lock); 1380 1381 ret = 0; 1382 if (u_omqstat != NULL && copy_to_user(u_omqstat, &omqstat, 1383 sizeof(struct mq_attr))) 1384 ret = -EFAULT; 1385 1386 out_fput: 1387 fput(filp); 1388 out: 1389 return ret; 1390 } 1391 1392 static const struct inode_operations mqueue_dir_inode_operations = { 1393 .lookup = simple_lookup, 1394 .create = mqueue_create, 1395 .unlink = mqueue_unlink, 1396 }; 1397 1398 static const struct file_operations mqueue_file_operations = { 1399 .flush = mqueue_flush_file, 1400 .poll = mqueue_poll_file, 1401 .read = mqueue_read_file, 1402 .llseek = default_llseek, 1403 }; 1404 1405 static const struct super_operations mqueue_super_ops = { 1406 .alloc_inode = mqueue_alloc_inode, 1407 .destroy_inode = mqueue_destroy_inode, 1408 .evict_inode = mqueue_evict_inode, 1409 .statfs = simple_statfs, 1410 }; 1411 1412 static struct file_system_type mqueue_fs_type = { 1413 .name = "mqueue", 1414 .mount = mqueue_mount, 1415 .kill_sb = kill_litter_super, 1416 }; 1417 1418 int mq_init_ns(struct ipc_namespace *ns) 1419 { 1420 ns->mq_queues_count = 0; 1421 ns->mq_queues_max = DFLT_QUEUESMAX; 1422 ns->mq_msg_max = DFLT_MSGMAX; 1423 ns->mq_msgsize_max = DFLT_MSGSIZEMAX; 1424 ns->mq_msg_default = DFLT_MSG; 1425 ns->mq_msgsize_default = DFLT_MSGSIZE; 1426 1427 ns->mq_mnt = kern_mount_data(&mqueue_fs_type, ns); 1428 if (IS_ERR(ns->mq_mnt)) { 1429 int err = PTR_ERR(ns->mq_mnt); 1430 ns->mq_mnt = NULL; 1431 return err; 1432 } 1433 return 0; 1434 } 1435 1436 void mq_clear_sbinfo(struct ipc_namespace *ns) 1437 { 1438 ns->mq_mnt->mnt_sb->s_fs_info = NULL; 1439 } 1440 1441 void mq_put_mnt(struct ipc_namespace *ns) 1442 { 1443 kern_unmount(ns->mq_mnt); 1444 } 1445 1446 static int __init init_mqueue_fs(void) 1447 { 1448 int error; 1449 1450 mqueue_inode_cachep = kmem_cache_create("mqueue_inode_cache", 1451 sizeof(struct mqueue_inode_info), 0, 1452 SLAB_HWCACHE_ALIGN, init_once); 1453 if (mqueue_inode_cachep == NULL) 1454 return -ENOMEM; 1455 1456 /* ignore failures - they are not fatal */ 1457 mq_sysctl_table = mq_register_sysctl_table(); 1458 1459 error = register_filesystem(&mqueue_fs_type); 1460 if (error) 1461 goto out_sysctl; 1462 1463 spin_lock_init(&mq_lock); 1464 1465 error = mq_init_ns(&init_ipc_ns); 1466 if (error) 1467 goto out_filesystem; 1468 1469 return 0; 1470 1471 out_filesystem: 1472 unregister_filesystem(&mqueue_fs_type); 1473 out_sysctl: 1474 if (mq_sysctl_table) 1475 unregister_sysctl_table(mq_sysctl_table); 1476 kmem_cache_destroy(mqueue_inode_cachep); 1477 return error; 1478 } 1479 1480 __initcall(init_mqueue_fs); 1481