1 /* 2 * fs/inotify_user.c - inotify support for userspace 3 * 4 * Authors: 5 * John McCutchan <ttb@tentacle.dhs.org> 6 * Robert Love <rml@novell.com> 7 * 8 * Copyright (C) 2005 John McCutchan 9 * Copyright 2006 Hewlett-Packard Development Company, L.P. 10 * 11 * Copyright (C) 2009 Eric Paris <Red Hat Inc> 12 * inotify was largely rewriten to make use of the fsnotify infrastructure 13 * 14 * This program is free software; you can redistribute it and/or modify it 15 * under the terms of the GNU General Public License as published by the 16 * Free Software Foundation; either version 2, or (at your option) any 17 * later version. 18 * 19 * This program is distributed in the hope that it will be useful, but 20 * WITHOUT ANY WARRANTY; without even the implied warranty of 21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 22 * General Public License for more details. 23 */ 24 25 #include <linux/dcache.h> /* d_unlinked */ 26 #include <linux/fs.h> /* struct inode */ 27 #include <linux/fsnotify_backend.h> 28 #include <linux/inotify.h> 29 #include <linux/path.h> /* struct path */ 30 #include <linux/slab.h> /* kmem_* */ 31 #include <linux/types.h> 32 #include <linux/sched.h> 33 34 #include "inotify.h" 35 36 /* 37 * Check if 2 events contain the same information. We do not compare private data 38 * but at this moment that isn't a problem for any know fsnotify listeners. 39 */ 40 static bool event_compare(struct fsnotify_event *old, struct fsnotify_event *new) 41 { 42 if ((old->mask == new->mask) && 43 (old->to_tell == new->to_tell) && 44 (old->data_type == new->data_type) && 45 (old->name_len == new->name_len)) { 46 switch (old->data_type) { 47 case (FSNOTIFY_EVENT_INODE): 48 /* remember, after old was put on the wait_q we aren't 49 * allowed to look at the inode any more, only thing 50 * left to check was if the file_name is the same */ 51 if (!old->name_len || 52 !strcmp(old->file_name, new->file_name)) 53 return true; 54 break; 55 case (FSNOTIFY_EVENT_PATH): 56 if ((old->path.mnt == new->path.mnt) && 57 (old->path.dentry == new->path.dentry)) 58 return true; 59 break; 60 case (FSNOTIFY_EVENT_NONE): 61 if (old->mask & FS_Q_OVERFLOW) 62 return true; 63 else if (old->mask & FS_IN_IGNORED) 64 return false; 65 return true; 66 }; 67 } 68 return false; 69 } 70 71 static struct fsnotify_event *inotify_merge(struct list_head *list, 72 struct fsnotify_event *event) 73 { 74 struct fsnotify_event_holder *last_holder; 75 struct fsnotify_event *last_event; 76 77 /* and the list better be locked by something too */ 78 spin_lock(&event->lock); 79 80 last_holder = list_entry(list->prev, struct fsnotify_event_holder, event_list); 81 last_event = last_holder->event; 82 if (event_compare(last_event, event)) 83 fsnotify_get_event(last_event); 84 else 85 last_event = NULL; 86 87 spin_unlock(&event->lock); 88 89 return last_event; 90 } 91 92 static int inotify_handle_event(struct fsnotify_group *group, 93 struct fsnotify_mark *inode_mark, 94 struct fsnotify_mark *vfsmount_mark, 95 struct fsnotify_event *event) 96 { 97 struct inotify_inode_mark *i_mark; 98 struct inode *to_tell; 99 struct inotify_event_private_data *event_priv; 100 struct fsnotify_event_private_data *fsn_event_priv; 101 struct fsnotify_event *added_event; 102 int wd, ret = 0; 103 104 BUG_ON(vfsmount_mark); 105 106 pr_debug("%s: group=%p event=%p to_tell=%p mask=%x\n", __func__, group, 107 event, event->to_tell, event->mask); 108 109 to_tell = event->to_tell; 110 111 i_mark = container_of(inode_mark, struct inotify_inode_mark, 112 fsn_mark); 113 wd = i_mark->wd; 114 115 event_priv = kmem_cache_alloc(event_priv_cachep, GFP_KERNEL); 116 if (unlikely(!event_priv)) 117 return -ENOMEM; 118 119 fsn_event_priv = &event_priv->fsnotify_event_priv_data; 120 121 fsn_event_priv->group = group; 122 event_priv->wd = wd; 123 124 added_event = fsnotify_add_notify_event(group, event, fsn_event_priv, inotify_merge); 125 if (added_event) { 126 inotify_free_event_priv(fsn_event_priv); 127 if (!IS_ERR(added_event)) 128 fsnotify_put_event(added_event); 129 else 130 ret = PTR_ERR(added_event); 131 } 132 133 if (inode_mark->mask & IN_ONESHOT) 134 fsnotify_destroy_mark(inode_mark); 135 136 return ret; 137 } 138 139 static void inotify_freeing_mark(struct fsnotify_mark *fsn_mark, struct fsnotify_group *group) 140 { 141 inotify_ignored_and_remove_idr(fsn_mark, group); 142 } 143 144 static bool inotify_should_send_event(struct fsnotify_group *group, struct inode *inode, 145 struct fsnotify_mark *inode_mark, 146 struct fsnotify_mark *vfsmount_mark, 147 __u32 mask, void *data, int data_type) 148 { 149 if ((inode_mark->mask & FS_EXCL_UNLINK) && 150 (data_type == FSNOTIFY_EVENT_PATH)) { 151 struct path *path = data; 152 153 if (d_unlinked(path->dentry)) 154 return false; 155 } 156 157 return true; 158 } 159 160 /* 161 * This is NEVER supposed to be called. Inotify marks should either have been 162 * removed from the idr when the watch was removed or in the 163 * fsnotify_destroy_mark_by_group() call when the inotify instance was being 164 * torn down. This is only called if the idr is about to be freed but there 165 * are still marks in it. 166 */ 167 static int idr_callback(int id, void *p, void *data) 168 { 169 struct fsnotify_mark *fsn_mark; 170 struct inotify_inode_mark *i_mark; 171 static bool warned = false; 172 173 if (warned) 174 return 0; 175 176 warned = true; 177 fsn_mark = p; 178 i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark); 179 180 WARN(1, "inotify closing but id=%d for fsn_mark=%p in group=%p still in " 181 "idr. Probably leaking memory\n", id, p, data); 182 183 /* 184 * I'm taking the liberty of assuming that the mark in question is a 185 * valid address and I'm dereferencing it. This might help to figure 186 * out why we got here and the panic is no worse than the original 187 * BUG() that was here. 188 */ 189 if (fsn_mark) 190 printk(KERN_WARNING "fsn_mark->group=%p inode=%p wd=%d\n", 191 fsn_mark->group, fsn_mark->i.inode, i_mark->wd); 192 return 0; 193 } 194 195 static void inotify_free_group_priv(struct fsnotify_group *group) 196 { 197 /* ideally the idr is empty and we won't hit the BUG in the callback */ 198 idr_for_each(&group->inotify_data.idr, idr_callback, group); 199 idr_remove_all(&group->inotify_data.idr); 200 idr_destroy(&group->inotify_data.idr); 201 atomic_dec(&group->inotify_data.user->inotify_devs); 202 free_uid(group->inotify_data.user); 203 } 204 205 void inotify_free_event_priv(struct fsnotify_event_private_data *fsn_event_priv) 206 { 207 struct inotify_event_private_data *event_priv; 208 209 210 event_priv = container_of(fsn_event_priv, struct inotify_event_private_data, 211 fsnotify_event_priv_data); 212 213 kmem_cache_free(event_priv_cachep, event_priv); 214 } 215 216 const struct fsnotify_ops inotify_fsnotify_ops = { 217 .handle_event = inotify_handle_event, 218 .should_send_event = inotify_should_send_event, 219 .free_group_priv = inotify_free_group_priv, 220 .free_event_priv = inotify_free_event_priv, 221 .freeing_mark = inotify_freeing_mark, 222 }; 223