1 /* 2 * POSIX message queues filesystem for Linux. 3 * 4 * Copyright (C) 2003,2004 Krzysztof Benedyczak (golbi@mat.uni.torun.pl) 5 * Michal Wronski (michal.wronski@gmail.com) 6 * 7 * Spinlocks: Mohamed Abbas (abbas.mohamed@intel.com) 8 * Lockless receive & send, fd based notify: 9 * Manfred Spraul (manfred@colorfullife.com) 10 * 11 * Audit: George Wilson (ltcgcw@us.ibm.com) 12 * 13 * This file is released under the GPL. 14 */ 15 16 #include <linux/capability.h> 17 #include <linux/init.h> 18 #include <linux/pagemap.h> 19 #include <linux/file.h> 20 #include <linux/mount.h> 21 #include <linux/namei.h> 22 #include <linux/sysctl.h> 23 #include <linux/poll.h> 24 #include <linux/mqueue.h> 25 #include <linux/msg.h> 26 #include <linux/skbuff.h> 27 #include <linux/vmalloc.h> 28 #include <linux/netlink.h> 29 #include <linux/syscalls.h> 30 #include <linux/audit.h> 31 #include <linux/signal.h> 32 #include <linux/mutex.h> 33 #include <linux/nsproxy.h> 34 #include <linux/pid.h> 35 #include <linux/ipc_namespace.h> 36 #include <linux/user_namespace.h> 37 #include <linux/slab.h> 38 39 #include <net/sock.h> 40 #include "util.h" 41 42 #define MQUEUE_MAGIC 0x19800202 43 #define DIRENT_SIZE 20 44 #define FILENT_SIZE 80 45 46 #define SEND 0 47 #define RECV 1 48 49 #define STATE_NONE 0 50 #define STATE_READY 1 51 52 struct posix_msg_tree_node { 53 struct rb_node rb_node; 54 struct list_head msg_list; 55 int priority; 56 }; 57 58 struct ext_wait_queue { /* queue of sleeping tasks */ 59 struct task_struct *task; 60 struct list_head list; 61 struct msg_msg *msg; /* ptr of loaded message */ 62 int state; /* one of STATE_* values */ 63 }; 64 65 struct mqueue_inode_info { 66 spinlock_t lock; 67 struct inode vfs_inode; 68 wait_queue_head_t wait_q; 69 70 struct rb_root msg_tree; 71 struct posix_msg_tree_node *node_cache; 72 struct mq_attr attr; 73 74 struct sigevent notify; 75 struct pid *notify_owner; 76 struct user_namespace *notify_user_ns; 77 struct user_struct *user; /* user who created, for accounting */ 78 struct sock *notify_sock; 79 struct sk_buff *notify_cookie; 80 81 /* for tasks waiting for free space and messages, respectively */ 82 struct ext_wait_queue e_wait_q[2]; 83 84 unsigned long qsize; /* size of queue in memory (sum of all msgs) */ 85 }; 86 87 static const struct inode_operations mqueue_dir_inode_operations; 88 static const struct file_operations mqueue_file_operations; 89 static const struct super_operations mqueue_super_ops; 90 static void remove_notification(struct mqueue_inode_info *info); 91 92 static struct kmem_cache *mqueue_inode_cachep; 93 94 static struct ctl_table_header *mq_sysctl_table; 95 96 static inline struct mqueue_inode_info *MQUEUE_I(struct inode *inode) 97 { 98 return container_of(inode, struct mqueue_inode_info, vfs_inode); 99 } 100 101 /* 102 * This routine should be called with the mq_lock held. 103 */ 104 static inline struct ipc_namespace *__get_ns_from_inode(struct inode *inode) 105 { 106 return get_ipc_ns(inode->i_sb->s_fs_info); 107 } 108 109 static struct ipc_namespace *get_ns_from_inode(struct inode *inode) 110 { 111 struct ipc_namespace *ns; 112 113 spin_lock(&mq_lock); 114 ns = __get_ns_from_inode(inode); 115 spin_unlock(&mq_lock); 116 return ns; 117 } 118 119 /* Auxiliary functions to manipulate messages' list */ 120 static int msg_insert(struct msg_msg *msg, struct mqueue_inode_info *info) 121 { 122 struct rb_node **p, *parent = NULL; 123 struct posix_msg_tree_node *leaf; 124 125 p = &info->msg_tree.rb_node; 126 while (*p) { 127 parent = *p; 128 leaf = rb_entry(parent, struct posix_msg_tree_node, rb_node); 129 130 if (likely(leaf->priority == msg->m_type)) 131 goto insert_msg; 132 else if (msg->m_type < leaf->priority) 133 p = &(*p)->rb_left; 134 else 135 p = &(*p)->rb_right; 136 } 137 if (info->node_cache) { 138 leaf = info->node_cache; 139 info->node_cache = NULL; 140 } else { 141 leaf = kmalloc(sizeof(*leaf), GFP_ATOMIC); 142 if (!leaf) 143 return -ENOMEM; 144 INIT_LIST_HEAD(&leaf->msg_list); 145 info->qsize += sizeof(*leaf); 146 } 147 leaf->priority = msg->m_type; 148 rb_link_node(&leaf->rb_node, parent, p); 149 rb_insert_color(&leaf->rb_node, &info->msg_tree); 150 insert_msg: 151 info->attr.mq_curmsgs++; 152 info->qsize += msg->m_ts; 153 list_add_tail(&msg->m_list, &leaf->msg_list); 154 return 0; 155 } 156 157 static inline struct msg_msg *msg_get(struct mqueue_inode_info *info) 158 { 159 struct rb_node **p, *parent = NULL; 160 struct posix_msg_tree_node *leaf; 161 struct msg_msg *msg; 162 163 try_again: 164 p = &info->msg_tree.rb_node; 165 while (*p) { 166 parent = *p; 167 /* 168 * During insert, low priorities go to the left and high to the 169 * right. On receive, we want the highest priorities first, so 170 * walk all the way to the right. 171 */ 172 p = &(*p)->rb_right; 173 } 174 if (!parent) { 175 if (info->attr.mq_curmsgs) { 176 pr_warn_once("Inconsistency in POSIX message queue, " 177 "no tree element, but supposedly messages " 178 "should exist!\n"); 179 info->attr.mq_curmsgs = 0; 180 } 181 return NULL; 182 } 183 leaf = rb_entry(parent, struct posix_msg_tree_node, rb_node); 184 if (unlikely(list_empty(&leaf->msg_list))) { 185 pr_warn_once("Inconsistency in POSIX message queue, " 186 "empty leaf node but we haven't implemented " 187 "lazy leaf delete!\n"); 188 rb_erase(&leaf->rb_node, &info->msg_tree); 189 if (info->node_cache) { 190 info->qsize -= sizeof(*leaf); 191 kfree(leaf); 192 } else { 193 info->node_cache = leaf; 194 } 195 goto try_again; 196 } else { 197 msg = list_first_entry(&leaf->msg_list, 198 struct msg_msg, m_list); 199 list_del(&msg->m_list); 200 if (list_empty(&leaf->msg_list)) { 201 rb_erase(&leaf->rb_node, &info->msg_tree); 202 if (info->node_cache) { 203 info->qsize -= sizeof(*leaf); 204 kfree(leaf); 205 } else { 206 info->node_cache = leaf; 207 } 208 } 209 } 210 info->attr.mq_curmsgs--; 211 info->qsize -= msg->m_ts; 212 return msg; 213 } 214 215 static struct inode *mqueue_get_inode(struct super_block *sb, 216 struct ipc_namespace *ipc_ns, umode_t mode, 217 struct mq_attr *attr) 218 { 219 struct user_struct *u = current_user(); 220 struct inode *inode; 221 int ret = -ENOMEM; 222 223 inode = new_inode(sb); 224 if (!inode) 225 goto err; 226 227 inode->i_ino = get_next_ino(); 228 inode->i_mode = mode; 229 inode->i_uid = current_fsuid(); 230 inode->i_gid = current_fsgid(); 231 inode->i_mtime = inode->i_ctime = inode->i_atime = CURRENT_TIME; 232 233 if (S_ISREG(mode)) { 234 struct mqueue_inode_info *info; 235 unsigned long mq_bytes, mq_treesize; 236 237 inode->i_fop = &mqueue_file_operations; 238 inode->i_size = FILENT_SIZE; 239 /* mqueue specific info */ 240 info = MQUEUE_I(inode); 241 spin_lock_init(&info->lock); 242 init_waitqueue_head(&info->wait_q); 243 INIT_LIST_HEAD(&info->e_wait_q[0].list); 244 INIT_LIST_HEAD(&info->e_wait_q[1].list); 245 info->notify_owner = NULL; 246 info->notify_user_ns = NULL; 247 info->qsize = 0; 248 info->user = NULL; /* set when all is ok */ 249 info->msg_tree = RB_ROOT; 250 info->node_cache = NULL; 251 memset(&info->attr, 0, sizeof(info->attr)); 252 info->attr.mq_maxmsg = min(ipc_ns->mq_msg_max, 253 ipc_ns->mq_msg_default); 254 info->attr.mq_msgsize = min(ipc_ns->mq_msgsize_max, 255 ipc_ns->mq_msgsize_default); 256 if (attr) { 257 info->attr.mq_maxmsg = attr->mq_maxmsg; 258 info->attr.mq_msgsize = attr->mq_msgsize; 259 } 260 /* 261 * We used to allocate a static array of pointers and account 262 * the size of that array as well as one msg_msg struct per 263 * possible message into the queue size. That's no longer 264 * accurate as the queue is now an rbtree and will grow and 265 * shrink depending on usage patterns. We can, however, still 266 * account one msg_msg struct per message, but the nodes are 267 * allocated depending on priority usage, and most programs 268 * only use one, or a handful, of priorities. However, since 269 * this is pinned memory, we need to assume worst case, so 270 * that means the min(mq_maxmsg, max_priorities) * struct 271 * posix_msg_tree_node. 272 */ 273 mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) + 274 min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) * 275 sizeof(struct posix_msg_tree_node); 276 277 mq_bytes = mq_treesize + (info->attr.mq_maxmsg * 278 info->attr.mq_msgsize); 279 280 spin_lock(&mq_lock); 281 if (u->mq_bytes + mq_bytes < u->mq_bytes || 282 u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) { 283 spin_unlock(&mq_lock); 284 /* mqueue_evict_inode() releases info->messages */ 285 ret = -EMFILE; 286 goto out_inode; 287 } 288 u->mq_bytes += mq_bytes; 289 spin_unlock(&mq_lock); 290 291 /* all is ok */ 292 info->user = get_uid(u); 293 } else if (S_ISDIR(mode)) { 294 inc_nlink(inode); 295 /* Some things misbehave if size == 0 on a directory */ 296 inode->i_size = 2 * DIRENT_SIZE; 297 inode->i_op = &mqueue_dir_inode_operations; 298 inode->i_fop = &simple_dir_operations; 299 } 300 301 return inode; 302 out_inode: 303 iput(inode); 304 err: 305 return ERR_PTR(ret); 306 } 307 308 static int mqueue_fill_super(struct super_block *sb, void *data, int silent) 309 { 310 struct inode *inode; 311 struct ipc_namespace *ns = data; 312 313 sb->s_blocksize = PAGE_CACHE_SIZE; 314 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 315 sb->s_magic = MQUEUE_MAGIC; 316 sb->s_op = &mqueue_super_ops; 317 318 inode = mqueue_get_inode(sb, ns, S_IFDIR | S_ISVTX | S_IRWXUGO, NULL); 319 if (IS_ERR(inode)) 320 return PTR_ERR(inode); 321 322 sb->s_root = d_make_root(inode); 323 if (!sb->s_root) 324 return -ENOMEM; 325 return 0; 326 } 327 328 static struct dentry *mqueue_mount(struct file_system_type *fs_type, 329 int flags, const char *dev_name, 330 void *data) 331 { 332 if (!(flags & MS_KERNMOUNT)) { 333 struct ipc_namespace *ns = current->nsproxy->ipc_ns; 334 /* Don't allow mounting unless the caller has CAP_SYS_ADMIN 335 * over the ipc namespace. 336 */ 337 if (!ns_capable(ns->user_ns, CAP_SYS_ADMIN)) 338 return ERR_PTR(-EPERM); 339 340 data = ns; 341 } 342 return mount_ns(fs_type, flags, data, mqueue_fill_super); 343 } 344 345 static void init_once(void *foo) 346 { 347 struct mqueue_inode_info *p = (struct mqueue_inode_info *) foo; 348 349 inode_init_once(&p->vfs_inode); 350 } 351 352 static struct inode *mqueue_alloc_inode(struct super_block *sb) 353 { 354 struct mqueue_inode_info *ei; 355 356 ei = kmem_cache_alloc(mqueue_inode_cachep, GFP_KERNEL); 357 if (!ei) 358 return NULL; 359 return &ei->vfs_inode; 360 } 361 362 static void mqueue_i_callback(struct rcu_head *head) 363 { 364 struct inode *inode = container_of(head, struct inode, i_rcu); 365 kmem_cache_free(mqueue_inode_cachep, MQUEUE_I(inode)); 366 } 367 368 static void mqueue_destroy_inode(struct inode *inode) 369 { 370 call_rcu(&inode->i_rcu, mqueue_i_callback); 371 } 372 373 static void mqueue_evict_inode(struct inode *inode) 374 { 375 struct mqueue_inode_info *info; 376 struct user_struct *user; 377 unsigned long mq_bytes, mq_treesize; 378 struct ipc_namespace *ipc_ns; 379 struct msg_msg *msg; 380 381 clear_inode(inode); 382 383 if (S_ISDIR(inode->i_mode)) 384 return; 385 386 ipc_ns = get_ns_from_inode(inode); 387 info = MQUEUE_I(inode); 388 spin_lock(&info->lock); 389 while ((msg = msg_get(info)) != NULL) 390 free_msg(msg); 391 kfree(info->node_cache); 392 spin_unlock(&info->lock); 393 394 /* Total amount of bytes accounted for the mqueue */ 395 mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) + 396 min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) * 397 sizeof(struct posix_msg_tree_node); 398 399 mq_bytes = mq_treesize + (info->attr.mq_maxmsg * 400 info->attr.mq_msgsize); 401 402 user = info->user; 403 if (user) { 404 spin_lock(&mq_lock); 405 user->mq_bytes -= mq_bytes; 406 /* 407 * get_ns_from_inode() ensures that the 408 * (ipc_ns = sb->s_fs_info) is either a valid ipc_ns 409 * to which we now hold a reference, or it is NULL. 410 * We can't put it here under mq_lock, though. 411 */ 412 if (ipc_ns) 413 ipc_ns->mq_queues_count--; 414 spin_unlock(&mq_lock); 415 free_uid(user); 416 } 417 if (ipc_ns) 418 put_ipc_ns(ipc_ns); 419 } 420 421 static int mqueue_create(struct inode *dir, struct dentry *dentry, 422 umode_t mode, bool excl) 423 { 424 struct inode *inode; 425 struct mq_attr *attr = dentry->d_fsdata; 426 int error; 427 struct ipc_namespace *ipc_ns; 428 429 spin_lock(&mq_lock); 430 ipc_ns = __get_ns_from_inode(dir); 431 if (!ipc_ns) { 432 error = -EACCES; 433 goto out_unlock; 434 } 435 436 if (ipc_ns->mq_queues_count >= ipc_ns->mq_queues_max && 437 !capable(CAP_SYS_RESOURCE)) { 438 error = -ENOSPC; 439 goto out_unlock; 440 } 441 ipc_ns->mq_queues_count++; 442 spin_unlock(&mq_lock); 443 444 inode = mqueue_get_inode(dir->i_sb, ipc_ns, mode, attr); 445 if (IS_ERR(inode)) { 446 error = PTR_ERR(inode); 447 spin_lock(&mq_lock); 448 ipc_ns->mq_queues_count--; 449 goto out_unlock; 450 } 451 452 put_ipc_ns(ipc_ns); 453 dir->i_size += DIRENT_SIZE; 454 dir->i_ctime = dir->i_mtime = dir->i_atime = CURRENT_TIME; 455 456 d_instantiate(dentry, inode); 457 dget(dentry); 458 return 0; 459 out_unlock: 460 spin_unlock(&mq_lock); 461 if (ipc_ns) 462 put_ipc_ns(ipc_ns); 463 return error; 464 } 465 466 static int mqueue_unlink(struct inode *dir, struct dentry *dentry) 467 { 468 struct inode *inode = d_inode(dentry); 469 470 dir->i_ctime = dir->i_mtime = dir->i_atime = CURRENT_TIME; 471 dir->i_size -= DIRENT_SIZE; 472 drop_nlink(inode); 473 dput(dentry); 474 return 0; 475 } 476 477 /* 478 * This is routine for system read from queue file. 479 * To avoid mess with doing here some sort of mq_receive we allow 480 * to read only queue size & notification info (the only values 481 * that are interesting from user point of view and aren't accessible 482 * through std routines) 483 */ 484 static ssize_t mqueue_read_file(struct file *filp, char __user *u_data, 485 size_t count, loff_t *off) 486 { 487 struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp)); 488 char buffer[FILENT_SIZE]; 489 ssize_t ret; 490 491 spin_lock(&info->lock); 492 snprintf(buffer, sizeof(buffer), 493 "QSIZE:%-10lu NOTIFY:%-5d SIGNO:%-5d NOTIFY_PID:%-6d\n", 494 info->qsize, 495 info->notify_owner ? info->notify.sigev_notify : 0, 496 (info->notify_owner && 497 info->notify.sigev_notify == SIGEV_SIGNAL) ? 498 info->notify.sigev_signo : 0, 499 pid_vnr(info->notify_owner)); 500 spin_unlock(&info->lock); 501 buffer[sizeof(buffer)-1] = '\0'; 502 503 ret = simple_read_from_buffer(u_data, count, off, buffer, 504 strlen(buffer)); 505 if (ret <= 0) 506 return ret; 507 508 file_inode(filp)->i_atime = file_inode(filp)->i_ctime = CURRENT_TIME; 509 return ret; 510 } 511 512 static int mqueue_flush_file(struct file *filp, fl_owner_t id) 513 { 514 struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp)); 515 516 spin_lock(&info->lock); 517 if (task_tgid(current) == info->notify_owner) 518 remove_notification(info); 519 520 spin_unlock(&info->lock); 521 return 0; 522 } 523 524 static unsigned int mqueue_poll_file(struct file *filp, struct poll_table_struct *poll_tab) 525 { 526 struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp)); 527 int retval = 0; 528 529 poll_wait(filp, &info->wait_q, poll_tab); 530 531 spin_lock(&info->lock); 532 if (info->attr.mq_curmsgs) 533 retval = POLLIN | POLLRDNORM; 534 535 if (info->attr.mq_curmsgs < info->attr.mq_maxmsg) 536 retval |= POLLOUT | POLLWRNORM; 537 spin_unlock(&info->lock); 538 539 return retval; 540 } 541 542 /* Adds current to info->e_wait_q[sr] before element with smaller prio */ 543 static void wq_add(struct mqueue_inode_info *info, int sr, 544 struct ext_wait_queue *ewp) 545 { 546 struct ext_wait_queue *walk; 547 548 ewp->task = current; 549 550 list_for_each_entry(walk, &info->e_wait_q[sr].list, list) { 551 if (walk->task->static_prio <= current->static_prio) { 552 list_add_tail(&ewp->list, &walk->list); 553 return; 554 } 555 } 556 list_add_tail(&ewp->list, &info->e_wait_q[sr].list); 557 } 558 559 /* 560 * Puts current task to sleep. Caller must hold queue lock. After return 561 * lock isn't held. 562 * sr: SEND or RECV 563 */ 564 static int wq_sleep(struct mqueue_inode_info *info, int sr, 565 ktime_t *timeout, struct ext_wait_queue *ewp) 566 { 567 int retval; 568 signed long time; 569 570 wq_add(info, sr, ewp); 571 572 for (;;) { 573 __set_current_state(TASK_INTERRUPTIBLE); 574 575 spin_unlock(&info->lock); 576 time = schedule_hrtimeout_range_clock(timeout, 0, 577 HRTIMER_MODE_ABS, CLOCK_REALTIME); 578 579 if (ewp->state == STATE_READY) { 580 retval = 0; 581 goto out; 582 } 583 spin_lock(&info->lock); 584 if (ewp->state == STATE_READY) { 585 retval = 0; 586 goto out_unlock; 587 } 588 if (signal_pending(current)) { 589 retval = -ERESTARTSYS; 590 break; 591 } 592 if (time == 0) { 593 retval = -ETIMEDOUT; 594 break; 595 } 596 } 597 list_del(&ewp->list); 598 out_unlock: 599 spin_unlock(&info->lock); 600 out: 601 return retval; 602 } 603 604 /* 605 * Returns waiting task that should be serviced first or NULL if none exists 606 */ 607 static struct ext_wait_queue *wq_get_first_waiter( 608 struct mqueue_inode_info *info, int sr) 609 { 610 struct list_head *ptr; 611 612 ptr = info->e_wait_q[sr].list.prev; 613 if (ptr == &info->e_wait_q[sr].list) 614 return NULL; 615 return list_entry(ptr, struct ext_wait_queue, list); 616 } 617 618 619 static inline void set_cookie(struct sk_buff *skb, char code) 620 { 621 ((char *)skb->data)[NOTIFY_COOKIE_LEN-1] = code; 622 } 623 624 /* 625 * The next function is only to split too long sys_mq_timedsend 626 */ 627 static void __do_notify(struct mqueue_inode_info *info) 628 { 629 /* notification 630 * invoked when there is registered process and there isn't process 631 * waiting synchronously for message AND state of queue changed from 632 * empty to not empty. Here we are sure that no one is waiting 633 * synchronously. */ 634 if (info->notify_owner && 635 info->attr.mq_curmsgs == 1) { 636 struct siginfo sig_i; 637 switch (info->notify.sigev_notify) { 638 case SIGEV_NONE: 639 break; 640 case SIGEV_SIGNAL: 641 /* sends signal */ 642 643 sig_i.si_signo = info->notify.sigev_signo; 644 sig_i.si_errno = 0; 645 sig_i.si_code = SI_MESGQ; 646 sig_i.si_value = info->notify.sigev_value; 647 /* map current pid/uid into info->owner's namespaces */ 648 rcu_read_lock(); 649 sig_i.si_pid = task_tgid_nr_ns(current, 650 ns_of_pid(info->notify_owner)); 651 sig_i.si_uid = from_kuid_munged(info->notify_user_ns, current_uid()); 652 rcu_read_unlock(); 653 654 kill_pid_info(info->notify.sigev_signo, 655 &sig_i, info->notify_owner); 656 break; 657 case SIGEV_THREAD: 658 set_cookie(info->notify_cookie, NOTIFY_WOKENUP); 659 netlink_sendskb(info->notify_sock, info->notify_cookie); 660 break; 661 } 662 /* after notification unregisters process */ 663 put_pid(info->notify_owner); 664 put_user_ns(info->notify_user_ns); 665 info->notify_owner = NULL; 666 info->notify_user_ns = NULL; 667 } 668 wake_up(&info->wait_q); 669 } 670 671 static int prepare_timeout(const struct timespec __user *u_abs_timeout, 672 ktime_t *expires, struct timespec *ts) 673 { 674 if (copy_from_user(ts, u_abs_timeout, sizeof(struct timespec))) 675 return -EFAULT; 676 if (!timespec_valid(ts)) 677 return -EINVAL; 678 679 *expires = timespec_to_ktime(*ts); 680 return 0; 681 } 682 683 static void remove_notification(struct mqueue_inode_info *info) 684 { 685 if (info->notify_owner != NULL && 686 info->notify.sigev_notify == SIGEV_THREAD) { 687 set_cookie(info->notify_cookie, NOTIFY_REMOVED); 688 netlink_sendskb(info->notify_sock, info->notify_cookie); 689 } 690 put_pid(info->notify_owner); 691 put_user_ns(info->notify_user_ns); 692 info->notify_owner = NULL; 693 info->notify_user_ns = NULL; 694 } 695 696 static int mq_attr_ok(struct ipc_namespace *ipc_ns, struct mq_attr *attr) 697 { 698 int mq_treesize; 699 unsigned long total_size; 700 701 if (attr->mq_maxmsg <= 0 || attr->mq_msgsize <= 0) 702 return -EINVAL; 703 if (capable(CAP_SYS_RESOURCE)) { 704 if (attr->mq_maxmsg > HARD_MSGMAX || 705 attr->mq_msgsize > HARD_MSGSIZEMAX) 706 return -EINVAL; 707 } else { 708 if (attr->mq_maxmsg > ipc_ns->mq_msg_max || 709 attr->mq_msgsize > ipc_ns->mq_msgsize_max) 710 return -EINVAL; 711 } 712 /* check for overflow */ 713 if (attr->mq_msgsize > ULONG_MAX/attr->mq_maxmsg) 714 return -EOVERFLOW; 715 mq_treesize = attr->mq_maxmsg * sizeof(struct msg_msg) + 716 min_t(unsigned int, attr->mq_maxmsg, MQ_PRIO_MAX) * 717 sizeof(struct posix_msg_tree_node); 718 total_size = attr->mq_maxmsg * attr->mq_msgsize; 719 if (total_size + mq_treesize < total_size) 720 return -EOVERFLOW; 721 return 0; 722 } 723 724 /* 725 * Invoked when creating a new queue via sys_mq_open 726 */ 727 static struct file *do_create(struct ipc_namespace *ipc_ns, struct inode *dir, 728 struct path *path, int oflag, umode_t mode, 729 struct mq_attr *attr) 730 { 731 const struct cred *cred = current_cred(); 732 int ret; 733 734 if (attr) { 735 ret = mq_attr_ok(ipc_ns, attr); 736 if (ret) 737 return ERR_PTR(ret); 738 /* store for use during create */ 739 path->dentry->d_fsdata = attr; 740 } else { 741 struct mq_attr def_attr; 742 743 def_attr.mq_maxmsg = min(ipc_ns->mq_msg_max, 744 ipc_ns->mq_msg_default); 745 def_attr.mq_msgsize = min(ipc_ns->mq_msgsize_max, 746 ipc_ns->mq_msgsize_default); 747 ret = mq_attr_ok(ipc_ns, &def_attr); 748 if (ret) 749 return ERR_PTR(ret); 750 } 751 752 mode &= ~current_umask(); 753 ret = vfs_create(dir, path->dentry, mode, true); 754 path->dentry->d_fsdata = NULL; 755 if (ret) 756 return ERR_PTR(ret); 757 return dentry_open(path, oflag, cred); 758 } 759 760 /* Opens existing queue */ 761 static struct file *do_open(struct path *path, int oflag) 762 { 763 static const int oflag2acc[O_ACCMODE] = { MAY_READ, MAY_WRITE, 764 MAY_READ | MAY_WRITE }; 765 int acc; 766 if ((oflag & O_ACCMODE) == (O_RDWR | O_WRONLY)) 767 return ERR_PTR(-EINVAL); 768 acc = oflag2acc[oflag & O_ACCMODE]; 769 if (inode_permission(d_inode(path->dentry), acc)) 770 return ERR_PTR(-EACCES); 771 return dentry_open(path, oflag, current_cred()); 772 } 773 774 SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, umode_t, mode, 775 struct mq_attr __user *, u_attr) 776 { 777 struct path path; 778 struct file *filp; 779 struct filename *name; 780 struct mq_attr attr; 781 int fd, error; 782 struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns; 783 struct vfsmount *mnt = ipc_ns->mq_mnt; 784 struct dentry *root = mnt->mnt_root; 785 int ro; 786 787 if (u_attr && copy_from_user(&attr, u_attr, sizeof(struct mq_attr))) 788 return -EFAULT; 789 790 audit_mq_open(oflag, mode, u_attr ? &attr : NULL); 791 792 if (IS_ERR(name = getname(u_name))) 793 return PTR_ERR(name); 794 795 fd = get_unused_fd_flags(O_CLOEXEC); 796 if (fd < 0) 797 goto out_putname; 798 799 ro = mnt_want_write(mnt); /* we'll drop it in any case */ 800 error = 0; 801 mutex_lock(&d_inode(root)->i_mutex); 802 path.dentry = lookup_one_len(name->name, root, strlen(name->name)); 803 if (IS_ERR(path.dentry)) { 804 error = PTR_ERR(path.dentry); 805 goto out_putfd; 806 } 807 path.mnt = mntget(mnt); 808 809 if (oflag & O_CREAT) { 810 if (d_really_is_positive(path.dentry)) { /* entry already exists */ 811 audit_inode(name, path.dentry, 0); 812 if (oflag & O_EXCL) { 813 error = -EEXIST; 814 goto out; 815 } 816 filp = do_open(&path, oflag); 817 } else { 818 if (ro) { 819 error = ro; 820 goto out; 821 } 822 audit_inode_parent_hidden(name, root); 823 filp = do_create(ipc_ns, d_inode(root), 824 &path, oflag, mode, 825 u_attr ? &attr : NULL); 826 } 827 } else { 828 if (d_really_is_negative(path.dentry)) { 829 error = -ENOENT; 830 goto out; 831 } 832 audit_inode(name, path.dentry, 0); 833 filp = do_open(&path, oflag); 834 } 835 836 if (!IS_ERR(filp)) 837 fd_install(fd, filp); 838 else 839 error = PTR_ERR(filp); 840 out: 841 path_put(&path); 842 out_putfd: 843 if (error) { 844 put_unused_fd(fd); 845 fd = error; 846 } 847 mutex_unlock(&d_inode(root)->i_mutex); 848 if (!ro) 849 mnt_drop_write(mnt); 850 out_putname: 851 putname(name); 852 return fd; 853 } 854 855 SYSCALL_DEFINE1(mq_unlink, const char __user *, u_name) 856 { 857 int err; 858 struct filename *name; 859 struct dentry *dentry; 860 struct inode *inode = NULL; 861 struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns; 862 struct vfsmount *mnt = ipc_ns->mq_mnt; 863 864 name = getname(u_name); 865 if (IS_ERR(name)) 866 return PTR_ERR(name); 867 868 audit_inode_parent_hidden(name, mnt->mnt_root); 869 err = mnt_want_write(mnt); 870 if (err) 871 goto out_name; 872 mutex_lock_nested(&d_inode(mnt->mnt_root)->i_mutex, I_MUTEX_PARENT); 873 dentry = lookup_one_len(name->name, mnt->mnt_root, 874 strlen(name->name)); 875 if (IS_ERR(dentry)) { 876 err = PTR_ERR(dentry); 877 goto out_unlock; 878 } 879 880 inode = d_inode(dentry); 881 if (!inode) { 882 err = -ENOENT; 883 } else { 884 ihold(inode); 885 err = vfs_unlink(d_inode(dentry->d_parent), dentry, NULL); 886 } 887 dput(dentry); 888 889 out_unlock: 890 mutex_unlock(&d_inode(mnt->mnt_root)->i_mutex); 891 if (inode) 892 iput(inode); 893 mnt_drop_write(mnt); 894 out_name: 895 putname(name); 896 897 return err; 898 } 899 900 /* Pipelined send and receive functions. 901 * 902 * If a receiver finds no waiting message, then it registers itself in the 903 * list of waiting receivers. A sender checks that list before adding the new 904 * message into the message array. If there is a waiting receiver, then it 905 * bypasses the message array and directly hands the message over to the 906 * receiver. The receiver accepts the message and returns without grabbing the 907 * queue spinlock: 908 * 909 * - Set pointer to message. 910 * - Queue the receiver task for later wakeup (without the info->lock). 911 * - Update its state to STATE_READY. Now the receiver can continue. 912 * - Wake up the process after the lock is dropped. Should the process wake up 913 * before this wakeup (due to a timeout or a signal) it will either see 914 * STATE_READY and continue or acquire the lock to check the state again. 915 * 916 * The same algorithm is used for senders. 917 */ 918 919 /* pipelined_send() - send a message directly to the task waiting in 920 * sys_mq_timedreceive() (without inserting message into a queue). 921 */ 922 static inline void pipelined_send(struct wake_q_head *wake_q, 923 struct mqueue_inode_info *info, 924 struct msg_msg *message, 925 struct ext_wait_queue *receiver) 926 { 927 receiver->msg = message; 928 list_del(&receiver->list); 929 wake_q_add(wake_q, receiver->task); 930 /* 931 * Rely on the implicit cmpxchg barrier from wake_q_add such 932 * that we can ensure that updating receiver->state is the last 933 * write operation: As once set, the receiver can continue, 934 * and if we don't have the reference count from the wake_q, 935 * yet, at that point we can later have a use-after-free 936 * condition and bogus wakeup. 937 */ 938 receiver->state = STATE_READY; 939 } 940 941 /* pipelined_receive() - if there is task waiting in sys_mq_timedsend() 942 * gets its message and put to the queue (we have one free place for sure). */ 943 static inline void pipelined_receive(struct wake_q_head *wake_q, 944 struct mqueue_inode_info *info) 945 { 946 struct ext_wait_queue *sender = wq_get_first_waiter(info, SEND); 947 948 if (!sender) { 949 /* for poll */ 950 wake_up_interruptible(&info->wait_q); 951 return; 952 } 953 if (msg_insert(sender->msg, info)) 954 return; 955 956 list_del(&sender->list); 957 wake_q_add(wake_q, sender->task); 958 sender->state = STATE_READY; 959 } 960 961 SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr, 962 size_t, msg_len, unsigned int, msg_prio, 963 const struct timespec __user *, u_abs_timeout) 964 { 965 struct fd f; 966 struct inode *inode; 967 struct ext_wait_queue wait; 968 struct ext_wait_queue *receiver; 969 struct msg_msg *msg_ptr; 970 struct mqueue_inode_info *info; 971 ktime_t expires, *timeout = NULL; 972 struct timespec ts; 973 struct posix_msg_tree_node *new_leaf = NULL; 974 int ret = 0; 975 WAKE_Q(wake_q); 976 977 if (u_abs_timeout) { 978 int res = prepare_timeout(u_abs_timeout, &expires, &ts); 979 if (res) 980 return res; 981 timeout = &expires; 982 } 983 984 if (unlikely(msg_prio >= (unsigned long) MQ_PRIO_MAX)) 985 return -EINVAL; 986 987 audit_mq_sendrecv(mqdes, msg_len, msg_prio, timeout ? &ts : NULL); 988 989 f = fdget(mqdes); 990 if (unlikely(!f.file)) { 991 ret = -EBADF; 992 goto out; 993 } 994 995 inode = file_inode(f.file); 996 if (unlikely(f.file->f_op != &mqueue_file_operations)) { 997 ret = -EBADF; 998 goto out_fput; 999 } 1000 info = MQUEUE_I(inode); 1001 audit_file(f.file); 1002 1003 if (unlikely(!(f.file->f_mode & FMODE_WRITE))) { 1004 ret = -EBADF; 1005 goto out_fput; 1006 } 1007 1008 if (unlikely(msg_len > info->attr.mq_msgsize)) { 1009 ret = -EMSGSIZE; 1010 goto out_fput; 1011 } 1012 1013 /* First try to allocate memory, before doing anything with 1014 * existing queues. */ 1015 msg_ptr = load_msg(u_msg_ptr, msg_len); 1016 if (IS_ERR(msg_ptr)) { 1017 ret = PTR_ERR(msg_ptr); 1018 goto out_fput; 1019 } 1020 msg_ptr->m_ts = msg_len; 1021 msg_ptr->m_type = msg_prio; 1022 1023 /* 1024 * msg_insert really wants us to have a valid, spare node struct so 1025 * it doesn't have to kmalloc a GFP_ATOMIC allocation, but it will 1026 * fall back to that if necessary. 1027 */ 1028 if (!info->node_cache) 1029 new_leaf = kmalloc(sizeof(*new_leaf), GFP_KERNEL); 1030 1031 spin_lock(&info->lock); 1032 1033 if (!info->node_cache && new_leaf) { 1034 /* Save our speculative allocation into the cache */ 1035 INIT_LIST_HEAD(&new_leaf->msg_list); 1036 info->node_cache = new_leaf; 1037 info->qsize += sizeof(*new_leaf); 1038 new_leaf = NULL; 1039 } else { 1040 kfree(new_leaf); 1041 } 1042 1043 if (info->attr.mq_curmsgs == info->attr.mq_maxmsg) { 1044 if (f.file->f_flags & O_NONBLOCK) { 1045 ret = -EAGAIN; 1046 } else { 1047 wait.task = current; 1048 wait.msg = (void *) msg_ptr; 1049 wait.state = STATE_NONE; 1050 ret = wq_sleep(info, SEND, timeout, &wait); 1051 /* 1052 * wq_sleep must be called with info->lock held, and 1053 * returns with the lock released 1054 */ 1055 goto out_free; 1056 } 1057 } else { 1058 receiver = wq_get_first_waiter(info, RECV); 1059 if (receiver) { 1060 pipelined_send(&wake_q, info, msg_ptr, receiver); 1061 } else { 1062 /* adds message to the queue */ 1063 ret = msg_insert(msg_ptr, info); 1064 if (ret) 1065 goto out_unlock; 1066 __do_notify(info); 1067 } 1068 inode->i_atime = inode->i_mtime = inode->i_ctime = 1069 CURRENT_TIME; 1070 } 1071 out_unlock: 1072 spin_unlock(&info->lock); 1073 wake_up_q(&wake_q); 1074 out_free: 1075 if (ret) 1076 free_msg(msg_ptr); 1077 out_fput: 1078 fdput(f); 1079 out: 1080 return ret; 1081 } 1082 1083 SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr, 1084 size_t, msg_len, unsigned int __user *, u_msg_prio, 1085 const struct timespec __user *, u_abs_timeout) 1086 { 1087 ssize_t ret; 1088 struct msg_msg *msg_ptr; 1089 struct fd f; 1090 struct inode *inode; 1091 struct mqueue_inode_info *info; 1092 struct ext_wait_queue wait; 1093 ktime_t expires, *timeout = NULL; 1094 struct timespec ts; 1095 struct posix_msg_tree_node *new_leaf = NULL; 1096 1097 if (u_abs_timeout) { 1098 int res = prepare_timeout(u_abs_timeout, &expires, &ts); 1099 if (res) 1100 return res; 1101 timeout = &expires; 1102 } 1103 1104 audit_mq_sendrecv(mqdes, msg_len, 0, timeout ? &ts : NULL); 1105 1106 f = fdget(mqdes); 1107 if (unlikely(!f.file)) { 1108 ret = -EBADF; 1109 goto out; 1110 } 1111 1112 inode = file_inode(f.file); 1113 if (unlikely(f.file->f_op != &mqueue_file_operations)) { 1114 ret = -EBADF; 1115 goto out_fput; 1116 } 1117 info = MQUEUE_I(inode); 1118 audit_file(f.file); 1119 1120 if (unlikely(!(f.file->f_mode & FMODE_READ))) { 1121 ret = -EBADF; 1122 goto out_fput; 1123 } 1124 1125 /* checks if buffer is big enough */ 1126 if (unlikely(msg_len < info->attr.mq_msgsize)) { 1127 ret = -EMSGSIZE; 1128 goto out_fput; 1129 } 1130 1131 /* 1132 * msg_insert really wants us to have a valid, spare node struct so 1133 * it doesn't have to kmalloc a GFP_ATOMIC allocation, but it will 1134 * fall back to that if necessary. 1135 */ 1136 if (!info->node_cache) 1137 new_leaf = kmalloc(sizeof(*new_leaf), GFP_KERNEL); 1138 1139 spin_lock(&info->lock); 1140 1141 if (!info->node_cache && new_leaf) { 1142 /* Save our speculative allocation into the cache */ 1143 INIT_LIST_HEAD(&new_leaf->msg_list); 1144 info->node_cache = new_leaf; 1145 info->qsize += sizeof(*new_leaf); 1146 } else { 1147 kfree(new_leaf); 1148 } 1149 1150 if (info->attr.mq_curmsgs == 0) { 1151 if (f.file->f_flags & O_NONBLOCK) { 1152 spin_unlock(&info->lock); 1153 ret = -EAGAIN; 1154 } else { 1155 wait.task = current; 1156 wait.state = STATE_NONE; 1157 ret = wq_sleep(info, RECV, timeout, &wait); 1158 msg_ptr = wait.msg; 1159 } 1160 } else { 1161 WAKE_Q(wake_q); 1162 1163 msg_ptr = msg_get(info); 1164 1165 inode->i_atime = inode->i_mtime = inode->i_ctime = 1166 CURRENT_TIME; 1167 1168 /* There is now free space in queue. */ 1169 pipelined_receive(&wake_q, info); 1170 spin_unlock(&info->lock); 1171 wake_up_q(&wake_q); 1172 ret = 0; 1173 } 1174 if (ret == 0) { 1175 ret = msg_ptr->m_ts; 1176 1177 if ((u_msg_prio && put_user(msg_ptr->m_type, u_msg_prio)) || 1178 store_msg(u_msg_ptr, msg_ptr, msg_ptr->m_ts)) { 1179 ret = -EFAULT; 1180 } 1181 free_msg(msg_ptr); 1182 } 1183 out_fput: 1184 fdput(f); 1185 out: 1186 return ret; 1187 } 1188 1189 /* 1190 * Notes: the case when user wants us to deregister (with NULL as pointer) 1191 * and he isn't currently owner of notification, will be silently discarded. 1192 * It isn't explicitly defined in the POSIX. 1193 */ 1194 SYSCALL_DEFINE2(mq_notify, mqd_t, mqdes, 1195 const struct sigevent __user *, u_notification) 1196 { 1197 int ret; 1198 struct fd f; 1199 struct sock *sock; 1200 struct inode *inode; 1201 struct sigevent notification; 1202 struct mqueue_inode_info *info; 1203 struct sk_buff *nc; 1204 1205 if (u_notification) { 1206 if (copy_from_user(¬ification, u_notification, 1207 sizeof(struct sigevent))) 1208 return -EFAULT; 1209 } 1210 1211 audit_mq_notify(mqdes, u_notification ? ¬ification : NULL); 1212 1213 nc = NULL; 1214 sock = NULL; 1215 if (u_notification != NULL) { 1216 if (unlikely(notification.sigev_notify != SIGEV_NONE && 1217 notification.sigev_notify != SIGEV_SIGNAL && 1218 notification.sigev_notify != SIGEV_THREAD)) 1219 return -EINVAL; 1220 if (notification.sigev_notify == SIGEV_SIGNAL && 1221 !valid_signal(notification.sigev_signo)) { 1222 return -EINVAL; 1223 } 1224 if (notification.sigev_notify == SIGEV_THREAD) { 1225 long timeo; 1226 1227 /* create the notify skb */ 1228 nc = alloc_skb(NOTIFY_COOKIE_LEN, GFP_KERNEL); 1229 if (!nc) { 1230 ret = -ENOMEM; 1231 goto out; 1232 } 1233 if (copy_from_user(nc->data, 1234 notification.sigev_value.sival_ptr, 1235 NOTIFY_COOKIE_LEN)) { 1236 ret = -EFAULT; 1237 goto out; 1238 } 1239 1240 /* TODO: add a header? */ 1241 skb_put(nc, NOTIFY_COOKIE_LEN); 1242 /* and attach it to the socket */ 1243 retry: 1244 f = fdget(notification.sigev_signo); 1245 if (!f.file) { 1246 ret = -EBADF; 1247 goto out; 1248 } 1249 sock = netlink_getsockbyfilp(f.file); 1250 fdput(f); 1251 if (IS_ERR(sock)) { 1252 ret = PTR_ERR(sock); 1253 sock = NULL; 1254 goto out; 1255 } 1256 1257 timeo = MAX_SCHEDULE_TIMEOUT; 1258 ret = netlink_attachskb(sock, nc, &timeo, NULL); 1259 if (ret == 1) 1260 goto retry; 1261 if (ret) { 1262 sock = NULL; 1263 nc = NULL; 1264 goto out; 1265 } 1266 } 1267 } 1268 1269 f = fdget(mqdes); 1270 if (!f.file) { 1271 ret = -EBADF; 1272 goto out; 1273 } 1274 1275 inode = file_inode(f.file); 1276 if (unlikely(f.file->f_op != &mqueue_file_operations)) { 1277 ret = -EBADF; 1278 goto out_fput; 1279 } 1280 info = MQUEUE_I(inode); 1281 1282 ret = 0; 1283 spin_lock(&info->lock); 1284 if (u_notification == NULL) { 1285 if (info->notify_owner == task_tgid(current)) { 1286 remove_notification(info); 1287 inode->i_atime = inode->i_ctime = CURRENT_TIME; 1288 } 1289 } else if (info->notify_owner != NULL) { 1290 ret = -EBUSY; 1291 } else { 1292 switch (notification.sigev_notify) { 1293 case SIGEV_NONE: 1294 info->notify.sigev_notify = SIGEV_NONE; 1295 break; 1296 case SIGEV_THREAD: 1297 info->notify_sock = sock; 1298 info->notify_cookie = nc; 1299 sock = NULL; 1300 nc = NULL; 1301 info->notify.sigev_notify = SIGEV_THREAD; 1302 break; 1303 case SIGEV_SIGNAL: 1304 info->notify.sigev_signo = notification.sigev_signo; 1305 info->notify.sigev_value = notification.sigev_value; 1306 info->notify.sigev_notify = SIGEV_SIGNAL; 1307 break; 1308 } 1309 1310 info->notify_owner = get_pid(task_tgid(current)); 1311 info->notify_user_ns = get_user_ns(current_user_ns()); 1312 inode->i_atime = inode->i_ctime = CURRENT_TIME; 1313 } 1314 spin_unlock(&info->lock); 1315 out_fput: 1316 fdput(f); 1317 out: 1318 if (sock) 1319 netlink_detachskb(sock, nc); 1320 else if (nc) 1321 dev_kfree_skb(nc); 1322 1323 return ret; 1324 } 1325 1326 SYSCALL_DEFINE3(mq_getsetattr, mqd_t, mqdes, 1327 const struct mq_attr __user *, u_mqstat, 1328 struct mq_attr __user *, u_omqstat) 1329 { 1330 int ret; 1331 struct mq_attr mqstat, omqstat; 1332 struct fd f; 1333 struct inode *inode; 1334 struct mqueue_inode_info *info; 1335 1336 if (u_mqstat != NULL) { 1337 if (copy_from_user(&mqstat, u_mqstat, sizeof(struct mq_attr))) 1338 return -EFAULT; 1339 if (mqstat.mq_flags & (~O_NONBLOCK)) 1340 return -EINVAL; 1341 } 1342 1343 f = fdget(mqdes); 1344 if (!f.file) { 1345 ret = -EBADF; 1346 goto out; 1347 } 1348 1349 inode = file_inode(f.file); 1350 if (unlikely(f.file->f_op != &mqueue_file_operations)) { 1351 ret = -EBADF; 1352 goto out_fput; 1353 } 1354 info = MQUEUE_I(inode); 1355 1356 spin_lock(&info->lock); 1357 1358 omqstat = info->attr; 1359 omqstat.mq_flags = f.file->f_flags & O_NONBLOCK; 1360 if (u_mqstat) { 1361 audit_mq_getsetattr(mqdes, &mqstat); 1362 spin_lock(&f.file->f_lock); 1363 if (mqstat.mq_flags & O_NONBLOCK) 1364 f.file->f_flags |= O_NONBLOCK; 1365 else 1366 f.file->f_flags &= ~O_NONBLOCK; 1367 spin_unlock(&f.file->f_lock); 1368 1369 inode->i_atime = inode->i_ctime = CURRENT_TIME; 1370 } 1371 1372 spin_unlock(&info->lock); 1373 1374 ret = 0; 1375 if (u_omqstat != NULL && copy_to_user(u_omqstat, &omqstat, 1376 sizeof(struct mq_attr))) 1377 ret = -EFAULT; 1378 1379 out_fput: 1380 fdput(f); 1381 out: 1382 return ret; 1383 } 1384 1385 static const struct inode_operations mqueue_dir_inode_operations = { 1386 .lookup = simple_lookup, 1387 .create = mqueue_create, 1388 .unlink = mqueue_unlink, 1389 }; 1390 1391 static const struct file_operations mqueue_file_operations = { 1392 .flush = mqueue_flush_file, 1393 .poll = mqueue_poll_file, 1394 .read = mqueue_read_file, 1395 .llseek = default_llseek, 1396 }; 1397 1398 static const struct super_operations mqueue_super_ops = { 1399 .alloc_inode = mqueue_alloc_inode, 1400 .destroy_inode = mqueue_destroy_inode, 1401 .evict_inode = mqueue_evict_inode, 1402 .statfs = simple_statfs, 1403 }; 1404 1405 static struct file_system_type mqueue_fs_type = { 1406 .name = "mqueue", 1407 .mount = mqueue_mount, 1408 .kill_sb = kill_litter_super, 1409 .fs_flags = FS_USERNS_MOUNT, 1410 }; 1411 1412 int mq_init_ns(struct ipc_namespace *ns) 1413 { 1414 ns->mq_queues_count = 0; 1415 ns->mq_queues_max = DFLT_QUEUESMAX; 1416 ns->mq_msg_max = DFLT_MSGMAX; 1417 ns->mq_msgsize_max = DFLT_MSGSIZEMAX; 1418 ns->mq_msg_default = DFLT_MSG; 1419 ns->mq_msgsize_default = DFLT_MSGSIZE; 1420 1421 ns->mq_mnt = kern_mount_data(&mqueue_fs_type, ns); 1422 if (IS_ERR(ns->mq_mnt)) { 1423 int err = PTR_ERR(ns->mq_mnt); 1424 ns->mq_mnt = NULL; 1425 return err; 1426 } 1427 return 0; 1428 } 1429 1430 void mq_clear_sbinfo(struct ipc_namespace *ns) 1431 { 1432 ns->mq_mnt->mnt_sb->s_fs_info = NULL; 1433 } 1434 1435 void mq_put_mnt(struct ipc_namespace *ns) 1436 { 1437 kern_unmount(ns->mq_mnt); 1438 } 1439 1440 static int __init init_mqueue_fs(void) 1441 { 1442 int error; 1443 1444 mqueue_inode_cachep = kmem_cache_create("mqueue_inode_cache", 1445 sizeof(struct mqueue_inode_info), 0, 1446 SLAB_HWCACHE_ALIGN, init_once); 1447 if (mqueue_inode_cachep == NULL) 1448 return -ENOMEM; 1449 1450 /* ignore failures - they are not fatal */ 1451 mq_sysctl_table = mq_register_sysctl_table(); 1452 1453 error = register_filesystem(&mqueue_fs_type); 1454 if (error) 1455 goto out_sysctl; 1456 1457 spin_lock_init(&mq_lock); 1458 1459 error = mq_init_ns(&init_ipc_ns); 1460 if (error) 1461 goto out_filesystem; 1462 1463 return 0; 1464 1465 out_filesystem: 1466 unregister_filesystem(&mqueue_fs_type); 1467 out_sysctl: 1468 if (mq_sysctl_table) 1469 unregister_sysctl_table(mq_sysctl_table); 1470 kmem_cache_destroy(mqueue_inode_cachep); 1471 return error; 1472 } 1473 1474 device_initcall(init_mqueue_fs); 1475