xref: /linux/fs/notify/inotify/inotify_fsnotify.c (revision 5d4a2e29fba5b2bef95b96a46b338ec4d76fa4fd)
1 /*
2  * fs/inotify_user.c - inotify support for userspace
3  *
4  * Authors:
5  *	John McCutchan	<ttb@tentacle.dhs.org>
6  *	Robert Love	<rml@novell.com>
7  *
8  * Copyright (C) 2005 John McCutchan
9  * Copyright 2006 Hewlett-Packard Development Company, L.P.
10  *
11  * Copyright (C) 2009 Eric Paris <Red Hat Inc>
12  * inotify was largely rewriten to make use of the fsnotify infrastructure
13  *
14  * This program is free software; you can redistribute it and/or modify it
15  * under the terms of the GNU General Public License as published by the
16  * Free Software Foundation; either version 2, or (at your option) any
17  * later version.
18  *
19  * This program is distributed in the hope that it will be useful, but
20  * WITHOUT ANY WARRANTY; without even the implied warranty of
21  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
22  * General Public License for more details.
23  */
24 
25 #include <linux/fs.h> /* struct inode */
26 #include <linux/fsnotify_backend.h>
27 #include <linux/inotify.h>
28 #include <linux/path.h> /* struct path */
29 #include <linux/slab.h> /* kmem_* */
30 #include <linux/types.h>
31 #include <linux/sched.h>
32 
33 #include "inotify.h"
34 
35 static int inotify_handle_event(struct fsnotify_group *group, struct fsnotify_event *event)
36 {
37 	struct fsnotify_mark_entry *entry;
38 	struct inotify_inode_mark_entry *ientry;
39 	struct inode *to_tell;
40 	struct inotify_event_private_data *event_priv;
41 	struct fsnotify_event_private_data *fsn_event_priv;
42 	int wd, ret;
43 
44 	to_tell = event->to_tell;
45 
46 	spin_lock(&to_tell->i_lock);
47 	entry = fsnotify_find_mark_entry(group, to_tell);
48 	spin_unlock(&to_tell->i_lock);
49 	/* race with watch removal?  We already passes should_send */
50 	if (unlikely(!entry))
51 		return 0;
52 	ientry = container_of(entry, struct inotify_inode_mark_entry,
53 			      fsn_entry);
54 	wd = ientry->wd;
55 
56 	event_priv = kmem_cache_alloc(event_priv_cachep, GFP_KERNEL);
57 	if (unlikely(!event_priv))
58 		return -ENOMEM;
59 
60 	fsn_event_priv = &event_priv->fsnotify_event_priv_data;
61 
62 	fsn_event_priv->group = group;
63 	event_priv->wd = wd;
64 
65 	ret = fsnotify_add_notify_event(group, event, fsn_event_priv);
66 	if (ret) {
67 		inotify_free_event_priv(fsn_event_priv);
68 		/* EEXIST says we tail matched, EOVERFLOW isn't something
69 		 * to report up the stack. */
70 		if ((ret == -EEXIST) ||
71 		    (ret == -EOVERFLOW))
72 			ret = 0;
73 	}
74 
75 	/*
76 	 * If we hold the entry until after the event is on the queue
77 	 * IN_IGNORED won't be able to pass this event in the queue
78 	 */
79 	fsnotify_put_mark(entry);
80 
81 	return ret;
82 }
83 
84 static void inotify_freeing_mark(struct fsnotify_mark_entry *entry, struct fsnotify_group *group)
85 {
86 	inotify_ignored_and_remove_idr(entry, group);
87 }
88 
89 static bool inotify_should_send_event(struct fsnotify_group *group, struct inode *inode, __u32 mask)
90 {
91 	struct fsnotify_mark_entry *entry;
92 	bool send;
93 
94 	spin_lock(&inode->i_lock);
95 	entry = fsnotify_find_mark_entry(group, inode);
96 	spin_unlock(&inode->i_lock);
97 	if (!entry)
98 		return false;
99 
100 	mask = (mask & ~FS_EVENT_ON_CHILD);
101 	send = (entry->mask & mask);
102 
103 	/* find took a reference */
104 	fsnotify_put_mark(entry);
105 
106 	return send;
107 }
108 
109 /*
110  * This is NEVER supposed to be called.  Inotify marks should either have been
111  * removed from the idr when the watch was removed or in the
112  * fsnotify_destroy_mark_by_group() call when the inotify instance was being
113  * torn down.  This is only called if the idr is about to be freed but there
114  * are still marks in it.
115  */
116 static int idr_callback(int id, void *p, void *data)
117 {
118 	struct fsnotify_mark_entry *entry;
119 	struct inotify_inode_mark_entry *ientry;
120 	static bool warned = false;
121 
122 	if (warned)
123 		return 0;
124 
125 	warned = true;
126 	entry = p;
127 	ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry);
128 
129 	WARN(1, "inotify closing but id=%d for entry=%p in group=%p still in "
130 		"idr.  Probably leaking memory\n", id, p, data);
131 
132 	/*
133 	 * I'm taking the liberty of assuming that the mark in question is a
134 	 * valid address and I'm dereferencing it.  This might help to figure
135 	 * out why we got here and the panic is no worse than the original
136 	 * BUG() that was here.
137 	 */
138 	if (entry)
139 		printk(KERN_WARNING "entry->group=%p inode=%p wd=%d\n",
140 			entry->group, entry->inode, ientry->wd);
141 	return 0;
142 }
143 
144 static void inotify_free_group_priv(struct fsnotify_group *group)
145 {
146 	/* ideally the idr is empty and we won't hit the BUG in teh callback */
147 	idr_for_each(&group->inotify_data.idr, idr_callback, group);
148 	idr_remove_all(&group->inotify_data.idr);
149 	idr_destroy(&group->inotify_data.idr);
150 	free_uid(group->inotify_data.user);
151 }
152 
153 void inotify_free_event_priv(struct fsnotify_event_private_data *fsn_event_priv)
154 {
155 	struct inotify_event_private_data *event_priv;
156 
157 
158 	event_priv = container_of(fsn_event_priv, struct inotify_event_private_data,
159 				  fsnotify_event_priv_data);
160 
161 	kmem_cache_free(event_priv_cachep, event_priv);
162 }
163 
164 const struct fsnotify_ops inotify_fsnotify_ops = {
165 	.handle_event = inotify_handle_event,
166 	.should_send_event = inotify_should_send_event,
167 	.free_group_priv = inotify_free_group_priv,
168 	.free_event_priv = inotify_free_event_priv,
169 	.freeing_mark = inotify_freeing_mark,
170 };
171