1 /* 2 * fs/inotify_user.c - inotify support for userspace 3 * 4 * Authors: 5 * John McCutchan <ttb@tentacle.dhs.org> 6 * Robert Love <rml@novell.com> 7 * 8 * Copyright (C) 2005 John McCutchan 9 * Copyright 2006 Hewlett-Packard Development Company, L.P. 10 * 11 * Copyright (C) 2009 Eric Paris <Red Hat Inc> 12 * inotify was largely rewriten to make use of the fsnotify infrastructure 13 * 14 * This program is free software; you can redistribute it and/or modify it 15 * under the terms of the GNU General Public License as published by the 16 * Free Software Foundation; either version 2, or (at your option) any 17 * later version. 18 * 19 * This program is distributed in the hope that it will be useful, but 20 * WITHOUT ANY WARRANTY; without even the implied warranty of 21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 22 * General Public License for more details. 23 */ 24 25 #include <linux/file.h> 26 #include <linux/fs.h> /* struct inode */ 27 #include <linux/fsnotify_backend.h> 28 #include <linux/idr.h> 29 #include <linux/init.h> /* module_init */ 30 #include <linux/inotify.h> 31 #include <linux/kernel.h> /* roundup() */ 32 #include <linux/magic.h> /* superblock magic number */ 33 #include <linux/mount.h> /* mntget */ 34 #include <linux/namei.h> /* LOOKUP_FOLLOW */ 35 #include <linux/path.h> /* struct path */ 36 #include <linux/sched.h> /* struct user */ 37 #include <linux/slab.h> /* struct kmem_cache */ 38 #include <linux/syscalls.h> 39 #include <linux/types.h> 40 #include <linux/uaccess.h> 41 #include <linux/poll.h> 42 #include <linux/wait.h> 43 44 #include "inotify.h" 45 46 #include <asm/ioctls.h> 47 48 static struct vfsmount *inotify_mnt __read_mostly; 49 50 /* these are configurable via /proc/sys/fs/inotify/ */ 51 static int inotify_max_user_instances __read_mostly; 52 static int inotify_max_queued_events __read_mostly; 53 int inotify_max_user_watches __read_mostly; 54 55 static struct kmem_cache *inotify_inode_mark_cachep __read_mostly; 56 struct kmem_cache *event_priv_cachep __read_mostly; 57 58 /* 59 * When inotify registers a new group it increments this and uses that 60 * value as an offset to set the fsnotify group "name" and priority. 61 */ 62 static atomic_t inotify_grp_num; 63 64 #ifdef CONFIG_SYSCTL 65 66 #include <linux/sysctl.h> 67 68 static int zero; 69 70 ctl_table inotify_table[] = { 71 { 72 .ctl_name = INOTIFY_MAX_USER_INSTANCES, 73 .procname = "max_user_instances", 74 .data = &inotify_max_user_instances, 75 .maxlen = sizeof(int), 76 .mode = 0644, 77 .proc_handler = &proc_dointvec_minmax, 78 .strategy = &sysctl_intvec, 79 .extra1 = &zero, 80 }, 81 { 82 .ctl_name = INOTIFY_MAX_USER_WATCHES, 83 .procname = "max_user_watches", 84 .data = &inotify_max_user_watches, 85 .maxlen = sizeof(int), 86 .mode = 0644, 87 .proc_handler = &proc_dointvec_minmax, 88 .strategy = &sysctl_intvec, 89 .extra1 = &zero, 90 }, 91 { 92 .ctl_name = INOTIFY_MAX_QUEUED_EVENTS, 93 .procname = "max_queued_events", 94 .data = &inotify_max_queued_events, 95 .maxlen = sizeof(int), 96 .mode = 0644, 97 .proc_handler = &proc_dointvec_minmax, 98 .strategy = &sysctl_intvec, 99 .extra1 = &zero 100 }, 101 { .ctl_name = 0 } 102 }; 103 #endif /* CONFIG_SYSCTL */ 104 105 static inline __u32 inotify_arg_to_mask(u32 arg) 106 { 107 __u32 mask; 108 109 /* everything should accept their own ignored and cares about children */ 110 mask = (FS_IN_IGNORED | FS_EVENT_ON_CHILD); 111 112 /* mask off the flags used to open the fd */ 113 mask |= (arg & (IN_ALL_EVENTS | IN_ONESHOT)); 114 115 return mask; 116 } 117 118 static inline u32 inotify_mask_to_arg(__u32 mask) 119 { 120 return mask & (IN_ALL_EVENTS | IN_ISDIR | IN_UNMOUNT | IN_IGNORED | 121 IN_Q_OVERFLOW); 122 } 123 124 /* intofiy userspace file descriptor functions */ 125 static unsigned int inotify_poll(struct file *file, poll_table *wait) 126 { 127 struct fsnotify_group *group = file->private_data; 128 int ret = 0; 129 130 poll_wait(file, &group->notification_waitq, wait); 131 mutex_lock(&group->notification_mutex); 132 if (!fsnotify_notify_queue_is_empty(group)) 133 ret = POLLIN | POLLRDNORM; 134 mutex_unlock(&group->notification_mutex); 135 136 return ret; 137 } 138 139 /* 140 * Get an inotify_kernel_event if one exists and is small 141 * enough to fit in "count". Return an error pointer if 142 * not large enough. 143 * 144 * Called with the group->notification_mutex held. 145 */ 146 static struct fsnotify_event *get_one_event(struct fsnotify_group *group, 147 size_t count) 148 { 149 size_t event_size = sizeof(struct inotify_event); 150 struct fsnotify_event *event; 151 152 if (fsnotify_notify_queue_is_empty(group)) 153 return NULL; 154 155 event = fsnotify_peek_notify_event(group); 156 157 if (event->name_len) 158 event_size += roundup(event->name_len + 1, event_size); 159 160 if (event_size > count) 161 return ERR_PTR(-EINVAL); 162 163 /* held the notification_mutex the whole time, so this is the 164 * same event we peeked above */ 165 fsnotify_remove_notify_event(group); 166 167 return event; 168 } 169 170 /* 171 * Copy an event to user space, returning how much we copied. 172 * 173 * We already checked that the event size is smaller than the 174 * buffer we had in "get_one_event()" above. 175 */ 176 static ssize_t copy_event_to_user(struct fsnotify_group *group, 177 struct fsnotify_event *event, 178 char __user *buf) 179 { 180 struct inotify_event inotify_event; 181 struct fsnotify_event_private_data *fsn_priv; 182 struct inotify_event_private_data *priv; 183 size_t event_size = sizeof(struct inotify_event); 184 size_t name_len = 0; 185 186 /* we get the inotify watch descriptor from the event private data */ 187 spin_lock(&event->lock); 188 fsn_priv = fsnotify_remove_priv_from_event(group, event); 189 spin_unlock(&event->lock); 190 191 if (!fsn_priv) 192 inotify_event.wd = -1; 193 else { 194 priv = container_of(fsn_priv, struct inotify_event_private_data, 195 fsnotify_event_priv_data); 196 inotify_event.wd = priv->wd; 197 inotify_free_event_priv(fsn_priv); 198 } 199 200 /* 201 * round up event->name_len so it is a multiple of event_size 202 * plus an extra byte for the terminating '\0'. 203 */ 204 if (event->name_len) 205 name_len = roundup(event->name_len + 1, event_size); 206 inotify_event.len = name_len; 207 208 inotify_event.mask = inotify_mask_to_arg(event->mask); 209 inotify_event.cookie = event->sync_cookie; 210 211 /* send the main event */ 212 if (copy_to_user(buf, &inotify_event, event_size)) 213 return -EFAULT; 214 215 buf += event_size; 216 217 /* 218 * fsnotify only stores the pathname, so here we have to send the pathname 219 * and then pad that pathname out to a multiple of sizeof(inotify_event) 220 * with zeros. I get my zeros from the nul_inotify_event. 221 */ 222 if (name_len) { 223 unsigned int len_to_zero = name_len - event->name_len; 224 /* copy the path name */ 225 if (copy_to_user(buf, event->file_name, event->name_len)) 226 return -EFAULT; 227 buf += event->name_len; 228 229 /* fill userspace with 0's */ 230 if (clear_user(buf, len_to_zero)) 231 return -EFAULT; 232 buf += len_to_zero; 233 event_size += name_len; 234 } 235 236 return event_size; 237 } 238 239 static ssize_t inotify_read(struct file *file, char __user *buf, 240 size_t count, loff_t *pos) 241 { 242 struct fsnotify_group *group; 243 struct fsnotify_event *kevent; 244 char __user *start; 245 int ret; 246 DEFINE_WAIT(wait); 247 248 start = buf; 249 group = file->private_data; 250 251 while (1) { 252 prepare_to_wait(&group->notification_waitq, &wait, TASK_INTERRUPTIBLE); 253 254 mutex_lock(&group->notification_mutex); 255 kevent = get_one_event(group, count); 256 mutex_unlock(&group->notification_mutex); 257 258 if (kevent) { 259 ret = PTR_ERR(kevent); 260 if (IS_ERR(kevent)) 261 break; 262 ret = copy_event_to_user(group, kevent, buf); 263 fsnotify_put_event(kevent); 264 if (ret < 0) 265 break; 266 buf += ret; 267 count -= ret; 268 continue; 269 } 270 271 ret = -EAGAIN; 272 if (file->f_flags & O_NONBLOCK) 273 break; 274 ret = -EINTR; 275 if (signal_pending(current)) 276 break; 277 278 if (start != buf) 279 break; 280 281 schedule(); 282 } 283 284 finish_wait(&group->notification_waitq, &wait); 285 if (start != buf && ret != -EFAULT) 286 ret = buf - start; 287 return ret; 288 } 289 290 static int inotify_fasync(int fd, struct file *file, int on) 291 { 292 struct fsnotify_group *group = file->private_data; 293 294 return fasync_helper(fd, file, on, &group->inotify_data.fa) >= 0 ? 0 : -EIO; 295 } 296 297 static int inotify_release(struct inode *ignored, struct file *file) 298 { 299 struct fsnotify_group *group = file->private_data; 300 struct user_struct *user = group->inotify_data.user; 301 302 fsnotify_clear_marks_by_group(group); 303 304 /* free this group, matching get was inotify_init->fsnotify_obtain_group */ 305 fsnotify_put_group(group); 306 307 atomic_dec(&user->inotify_devs); 308 309 return 0; 310 } 311 312 static long inotify_ioctl(struct file *file, unsigned int cmd, 313 unsigned long arg) 314 { 315 struct fsnotify_group *group; 316 struct fsnotify_event_holder *holder; 317 struct fsnotify_event *event; 318 void __user *p; 319 int ret = -ENOTTY; 320 size_t send_len = 0; 321 322 group = file->private_data; 323 p = (void __user *) arg; 324 325 switch (cmd) { 326 case FIONREAD: 327 mutex_lock(&group->notification_mutex); 328 list_for_each_entry(holder, &group->notification_list, event_list) { 329 event = holder->event; 330 send_len += sizeof(struct inotify_event); 331 if (event->name_len) 332 send_len += roundup(event->name_len + 1, 333 sizeof(struct inotify_event)); 334 } 335 mutex_unlock(&group->notification_mutex); 336 ret = put_user(send_len, (int __user *) p); 337 break; 338 } 339 340 return ret; 341 } 342 343 static const struct file_operations inotify_fops = { 344 .poll = inotify_poll, 345 .read = inotify_read, 346 .fasync = inotify_fasync, 347 .release = inotify_release, 348 .unlocked_ioctl = inotify_ioctl, 349 .compat_ioctl = inotify_ioctl, 350 }; 351 352 353 /* 354 * find_inode - resolve a user-given path to a specific inode 355 */ 356 static int inotify_find_inode(const char __user *dirname, struct path *path, unsigned flags) 357 { 358 int error; 359 360 error = user_path_at(AT_FDCWD, dirname, flags, path); 361 if (error) 362 return error; 363 /* you can only watch an inode if you have read permissions on it */ 364 error = inode_permission(path->dentry->d_inode, MAY_READ); 365 if (error) 366 path_put(path); 367 return error; 368 } 369 370 /* 371 * Remove the mark from the idr (if present) and drop the reference 372 * on the mark because it was in the idr. 373 */ 374 static void inotify_remove_from_idr(struct fsnotify_group *group, 375 struct inotify_inode_mark_entry *ientry) 376 { 377 struct idr *idr; 378 struct fsnotify_mark_entry *entry; 379 struct inotify_inode_mark_entry *found_ientry; 380 int wd; 381 382 spin_lock(&group->inotify_data.idr_lock); 383 idr = &group->inotify_data.idr; 384 wd = ientry->wd; 385 386 if (wd == -1) 387 goto out; 388 389 entry = idr_find(&group->inotify_data.idr, wd); 390 if (unlikely(!entry)) 391 goto out; 392 393 found_ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry); 394 if (unlikely(found_ientry != ientry)) { 395 /* We found an entry in the idr with the right wd, but it's 396 * not the entry we were told to remove. eparis seriously 397 * fucked up somewhere. */ 398 WARN_ON(1); 399 ientry->wd = -1; 400 goto out; 401 } 402 403 /* One ref for being in the idr, one ref held by the caller */ 404 BUG_ON(atomic_read(&entry->refcnt) < 2); 405 406 idr_remove(idr, wd); 407 ientry->wd = -1; 408 409 /* removed from the idr, drop that ref */ 410 fsnotify_put_mark(entry); 411 out: 412 spin_unlock(&group->inotify_data.idr_lock); 413 } 414 415 /* 416 * Send IN_IGNORED for this wd, remove this wd from the idr. 417 */ 418 void inotify_ignored_and_remove_idr(struct fsnotify_mark_entry *entry, 419 struct fsnotify_group *group) 420 { 421 struct inotify_inode_mark_entry *ientry; 422 struct fsnotify_event *ignored_event; 423 struct inotify_event_private_data *event_priv; 424 struct fsnotify_event_private_data *fsn_event_priv; 425 int ret; 426 427 ignored_event = fsnotify_create_event(NULL, FS_IN_IGNORED, NULL, 428 FSNOTIFY_EVENT_NONE, NULL, 0, 429 GFP_NOFS); 430 if (!ignored_event) 431 return; 432 433 ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry); 434 435 event_priv = kmem_cache_alloc(event_priv_cachep, GFP_NOFS); 436 if (unlikely(!event_priv)) 437 goto skip_send_ignore; 438 439 fsn_event_priv = &event_priv->fsnotify_event_priv_data; 440 441 fsn_event_priv->group = group; 442 event_priv->wd = ientry->wd; 443 444 ret = fsnotify_add_notify_event(group, ignored_event, fsn_event_priv); 445 if (ret) 446 inotify_free_event_priv(fsn_event_priv); 447 448 skip_send_ignore: 449 450 /* matches the reference taken when the event was created */ 451 fsnotify_put_event(ignored_event); 452 453 /* remove this entry from the idr */ 454 inotify_remove_from_idr(group, ientry); 455 456 atomic_dec(&group->inotify_data.user->inotify_watches); 457 } 458 459 /* ding dong the mark is dead */ 460 static void inotify_free_mark(struct fsnotify_mark_entry *entry) 461 { 462 struct inotify_inode_mark_entry *ientry = (struct inotify_inode_mark_entry *)entry; 463 464 kmem_cache_free(inotify_inode_mark_cachep, ientry); 465 } 466 467 static int inotify_update_existing_watch(struct fsnotify_group *group, 468 struct inode *inode, 469 u32 arg) 470 { 471 struct fsnotify_mark_entry *entry; 472 struct inotify_inode_mark_entry *ientry; 473 __u32 old_mask, new_mask; 474 __u32 mask; 475 int add = (arg & IN_MASK_ADD); 476 int ret; 477 478 /* don't allow invalid bits: we don't want flags set */ 479 mask = inotify_arg_to_mask(arg); 480 if (unlikely(!mask)) 481 return -EINVAL; 482 483 spin_lock(&inode->i_lock); 484 entry = fsnotify_find_mark_entry(group, inode); 485 spin_unlock(&inode->i_lock); 486 if (!entry) 487 return -ENOENT; 488 489 ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry); 490 491 spin_lock(&entry->lock); 492 493 old_mask = entry->mask; 494 if (add) { 495 entry->mask |= mask; 496 new_mask = entry->mask; 497 } else { 498 entry->mask = mask; 499 new_mask = entry->mask; 500 } 501 502 spin_unlock(&entry->lock); 503 504 if (old_mask != new_mask) { 505 /* more bits in old than in new? */ 506 int dropped = (old_mask & ~new_mask); 507 /* more bits in this entry than the inode's mask? */ 508 int do_inode = (new_mask & ~inode->i_fsnotify_mask); 509 /* more bits in this entry than the group? */ 510 int do_group = (new_mask & ~group->mask); 511 512 /* update the inode with this new entry */ 513 if (dropped || do_inode) 514 fsnotify_recalc_inode_mask(inode); 515 516 /* update the group mask with the new mask */ 517 if (dropped || do_group) 518 fsnotify_recalc_group_mask(group); 519 } 520 521 /* return the wd */ 522 ret = ientry->wd; 523 524 /* match the get from fsnotify_find_mark_entry() */ 525 fsnotify_put_mark(entry); 526 527 return ret; 528 } 529 530 static int inotify_new_watch(struct fsnotify_group *group, 531 struct inode *inode, 532 u32 arg) 533 { 534 struct inotify_inode_mark_entry *tmp_ientry; 535 __u32 mask; 536 int ret; 537 538 /* don't allow invalid bits: we don't want flags set */ 539 mask = inotify_arg_to_mask(arg); 540 if (unlikely(!mask)) 541 return -EINVAL; 542 543 tmp_ientry = kmem_cache_alloc(inotify_inode_mark_cachep, GFP_KERNEL); 544 if (unlikely(!tmp_ientry)) 545 return -ENOMEM; 546 547 fsnotify_init_mark(&tmp_ientry->fsn_entry, inotify_free_mark); 548 tmp_ientry->fsn_entry.mask = mask; 549 tmp_ientry->wd = -1; 550 551 ret = -ENOSPC; 552 if (atomic_read(&group->inotify_data.user->inotify_watches) >= inotify_max_user_watches) 553 goto out_err; 554 retry: 555 ret = -ENOMEM; 556 if (unlikely(!idr_pre_get(&group->inotify_data.idr, GFP_KERNEL))) 557 goto out_err; 558 559 spin_lock(&group->inotify_data.idr_lock); 560 ret = idr_get_new_above(&group->inotify_data.idr, &tmp_ientry->fsn_entry, 561 group->inotify_data.last_wd, 562 &tmp_ientry->wd); 563 spin_unlock(&group->inotify_data.idr_lock); 564 if (ret) { 565 /* idr was out of memory allocate and try again */ 566 if (ret == -EAGAIN) 567 goto retry; 568 goto out_err; 569 } 570 571 /* we put the mark on the idr, take a reference */ 572 fsnotify_get_mark(&tmp_ientry->fsn_entry); 573 574 /* we are on the idr, now get on the inode */ 575 ret = fsnotify_add_mark(&tmp_ientry->fsn_entry, group, inode); 576 if (ret) { 577 /* we failed to get on the inode, get off the idr */ 578 inotify_remove_from_idr(group, tmp_ientry); 579 goto out_err; 580 } 581 582 /* update the idr hint, who cares about races, it's just a hint */ 583 group->inotify_data.last_wd = tmp_ientry->wd; 584 585 /* increment the number of watches the user has */ 586 atomic_inc(&group->inotify_data.user->inotify_watches); 587 588 /* return the watch descriptor for this new entry */ 589 ret = tmp_ientry->wd; 590 591 /* match the ref from fsnotify_init_markentry() */ 592 fsnotify_put_mark(&tmp_ientry->fsn_entry); 593 594 /* if this mark added a new event update the group mask */ 595 if (mask & ~group->mask) 596 fsnotify_recalc_group_mask(group); 597 598 out_err: 599 if (ret < 0) 600 kmem_cache_free(inotify_inode_mark_cachep, tmp_ientry); 601 602 return ret; 603 } 604 605 static int inotify_update_watch(struct fsnotify_group *group, struct inode *inode, u32 arg) 606 { 607 int ret = 0; 608 609 retry: 610 /* try to update and existing watch with the new arg */ 611 ret = inotify_update_existing_watch(group, inode, arg); 612 /* no mark present, try to add a new one */ 613 if (ret == -ENOENT) 614 ret = inotify_new_watch(group, inode, arg); 615 /* 616 * inotify_new_watch could race with another thread which did an 617 * inotify_new_watch between the update_existing and the add watch 618 * here, go back and try to update an existing mark again. 619 */ 620 if (ret == -EEXIST) 621 goto retry; 622 623 return ret; 624 } 625 626 static struct fsnotify_group *inotify_new_group(struct user_struct *user, unsigned int max_events) 627 { 628 struct fsnotify_group *group; 629 unsigned int grp_num; 630 631 /* fsnotify_obtain_group took a reference to group, we put this when we kill the file in the end */ 632 grp_num = (INOTIFY_GROUP_NUM - atomic_inc_return(&inotify_grp_num)); 633 group = fsnotify_obtain_group(grp_num, 0, &inotify_fsnotify_ops); 634 if (IS_ERR(group)) 635 return group; 636 637 group->max_events = max_events; 638 639 spin_lock_init(&group->inotify_data.idr_lock); 640 idr_init(&group->inotify_data.idr); 641 group->inotify_data.last_wd = 1; 642 group->inotify_data.user = user; 643 group->inotify_data.fa = NULL; 644 645 return group; 646 } 647 648 649 /* inotify syscalls */ 650 SYSCALL_DEFINE1(inotify_init1, int, flags) 651 { 652 struct fsnotify_group *group; 653 struct user_struct *user; 654 struct file *filp; 655 int fd, ret; 656 657 /* Check the IN_* constants for consistency. */ 658 BUILD_BUG_ON(IN_CLOEXEC != O_CLOEXEC); 659 BUILD_BUG_ON(IN_NONBLOCK != O_NONBLOCK); 660 661 if (flags & ~(IN_CLOEXEC | IN_NONBLOCK)) 662 return -EINVAL; 663 664 fd = get_unused_fd_flags(flags & O_CLOEXEC); 665 if (fd < 0) 666 return fd; 667 668 filp = get_empty_filp(); 669 if (!filp) { 670 ret = -ENFILE; 671 goto out_put_fd; 672 } 673 674 user = get_current_user(); 675 if (unlikely(atomic_read(&user->inotify_devs) >= 676 inotify_max_user_instances)) { 677 ret = -EMFILE; 678 goto out_free_uid; 679 } 680 681 /* fsnotify_obtain_group took a reference to group, we put this when we kill the file in the end */ 682 group = inotify_new_group(user, inotify_max_queued_events); 683 if (IS_ERR(group)) { 684 ret = PTR_ERR(group); 685 goto out_free_uid; 686 } 687 688 filp->f_op = &inotify_fops; 689 filp->f_path.mnt = mntget(inotify_mnt); 690 filp->f_path.dentry = dget(inotify_mnt->mnt_root); 691 filp->f_mapping = filp->f_path.dentry->d_inode->i_mapping; 692 filp->f_mode = FMODE_READ; 693 filp->f_flags = O_RDONLY | (flags & O_NONBLOCK); 694 filp->private_data = group; 695 696 atomic_inc(&user->inotify_devs); 697 698 fd_install(fd, filp); 699 700 return fd; 701 702 out_free_uid: 703 free_uid(user); 704 put_filp(filp); 705 out_put_fd: 706 put_unused_fd(fd); 707 return ret; 708 } 709 710 SYSCALL_DEFINE0(inotify_init) 711 { 712 return sys_inotify_init1(0); 713 } 714 715 SYSCALL_DEFINE3(inotify_add_watch, int, fd, const char __user *, pathname, 716 u32, mask) 717 { 718 struct fsnotify_group *group; 719 struct inode *inode; 720 struct path path; 721 struct file *filp; 722 int ret, fput_needed; 723 unsigned flags = 0; 724 725 filp = fget_light(fd, &fput_needed); 726 if (unlikely(!filp)) 727 return -EBADF; 728 729 /* verify that this is indeed an inotify instance */ 730 if (unlikely(filp->f_op != &inotify_fops)) { 731 ret = -EINVAL; 732 goto fput_and_out; 733 } 734 735 if (!(mask & IN_DONT_FOLLOW)) 736 flags |= LOOKUP_FOLLOW; 737 if (mask & IN_ONLYDIR) 738 flags |= LOOKUP_DIRECTORY; 739 740 ret = inotify_find_inode(pathname, &path, flags); 741 if (ret) 742 goto fput_and_out; 743 744 /* inode held in place by reference to path; group by fget on fd */ 745 inode = path.dentry->d_inode; 746 group = filp->private_data; 747 748 /* create/update an inode mark */ 749 ret = inotify_update_watch(group, inode, mask); 750 if (unlikely(ret)) 751 goto path_put_and_out; 752 753 path_put_and_out: 754 path_put(&path); 755 fput_and_out: 756 fput_light(filp, fput_needed); 757 return ret; 758 } 759 760 SYSCALL_DEFINE2(inotify_rm_watch, int, fd, __s32, wd) 761 { 762 struct fsnotify_group *group; 763 struct fsnotify_mark_entry *entry; 764 struct file *filp; 765 int ret = 0, fput_needed; 766 767 filp = fget_light(fd, &fput_needed); 768 if (unlikely(!filp)) 769 return -EBADF; 770 771 /* verify that this is indeed an inotify instance */ 772 if (unlikely(filp->f_op != &inotify_fops)) { 773 ret = -EINVAL; 774 goto out; 775 } 776 777 group = filp->private_data; 778 779 spin_lock(&group->inotify_data.idr_lock); 780 entry = idr_find(&group->inotify_data.idr, wd); 781 if (unlikely(!entry)) { 782 spin_unlock(&group->inotify_data.idr_lock); 783 ret = -EINVAL; 784 goto out; 785 } 786 fsnotify_get_mark(entry); 787 spin_unlock(&group->inotify_data.idr_lock); 788 789 fsnotify_destroy_mark_by_entry(entry); 790 fsnotify_put_mark(entry); 791 792 out: 793 fput_light(filp, fput_needed); 794 return ret; 795 } 796 797 static int 798 inotify_get_sb(struct file_system_type *fs_type, int flags, 799 const char *dev_name, void *data, struct vfsmount *mnt) 800 { 801 return get_sb_pseudo(fs_type, "inotify", NULL, 802 INOTIFYFS_SUPER_MAGIC, mnt); 803 } 804 805 static struct file_system_type inotify_fs_type = { 806 .name = "inotifyfs", 807 .get_sb = inotify_get_sb, 808 .kill_sb = kill_anon_super, 809 }; 810 811 /* 812 * inotify_user_setup - Our initialization function. Note that we cannnot return 813 * error because we have compiled-in VFS hooks. So an (unlikely) failure here 814 * must result in panic(). 815 */ 816 static int __init inotify_user_setup(void) 817 { 818 int ret; 819 820 ret = register_filesystem(&inotify_fs_type); 821 if (unlikely(ret)) 822 panic("inotify: register_filesystem returned %d!\n", ret); 823 824 inotify_mnt = kern_mount(&inotify_fs_type); 825 if (IS_ERR(inotify_mnt)) 826 panic("inotify: kern_mount ret %ld!\n", PTR_ERR(inotify_mnt)); 827 828 inotify_inode_mark_cachep = KMEM_CACHE(inotify_inode_mark_entry, SLAB_PANIC); 829 event_priv_cachep = KMEM_CACHE(inotify_event_private_data, SLAB_PANIC); 830 831 inotify_max_queued_events = 16384; 832 inotify_max_user_instances = 128; 833 inotify_max_user_watches = 8192; 834 835 return 0; 836 } 837 module_init(inotify_user_setup); 838