xref: /linux/fs/notify/fsnotify.c (revision c31f4aa8fed048fa70e742c4bb49bb48dc489ab3)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *  Copyright (C) 2008 Red Hat, Inc., Eric Paris <eparis@redhat.com>
4  */
5 
6 #include <linux/dcache.h>
7 #include <linux/fs.h>
8 #include <linux/gfp.h>
9 #include <linux/init.h>
10 #include <linux/module.h>
11 #include <linux/mount.h>
12 #include <linux/srcu.h>
13 
14 #include <linux/fsnotify_backend.h>
15 #include "fsnotify.h"
16 
17 /*
18  * Clear all of the marks on an inode when it is being evicted from core
19  */
20 void __fsnotify_inode_delete(struct inode *inode)
21 {
22 	fsnotify_clear_marks_by_inode(inode);
23 }
24 EXPORT_SYMBOL_GPL(__fsnotify_inode_delete);
25 
26 void __fsnotify_vfsmount_delete(struct vfsmount *mnt)
27 {
28 	fsnotify_clear_marks_by_mount(mnt);
29 }
30 
31 void __fsnotify_mntns_delete(struct mnt_namespace *mntns)
32 {
33 	fsnotify_clear_marks_by_mntns(mntns);
34 }
35 
36 /**
37  * fsnotify_unmount_inodes - an sb is unmounting.  handle any watched inodes.
38  * @sb: superblock being unmounted.
39  *
40  * Called during unmount with no locks held, so needs to be safe against
41  * concurrent modifiers. We temporarily drop sb->s_inode_list_lock and CAN block.
42  */
43 static void fsnotify_unmount_inodes(struct super_block *sb)
44 {
45 	struct inode *inode, *iput_inode = NULL;
46 
47 	spin_lock(&sb->s_inode_list_lock);
48 	list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
49 		/*
50 		 * We cannot __iget() an inode in state I_FREEING,
51 		 * I_WILL_FREE, or I_NEW which is fine because by that point
52 		 * the inode cannot have any associated watches.
53 		 */
54 		spin_lock(&inode->i_lock);
55 		if (inode_state_read(inode) & (I_FREEING | I_WILL_FREE | I_NEW)) {
56 			spin_unlock(&inode->i_lock);
57 			continue;
58 		}
59 
60 		/*
61 		 * If i_count is zero, the inode cannot have any watches and
62 		 * doing an __iget/iput with SB_ACTIVE clear would actually
63 		 * evict all inodes with zero i_count from icache which is
64 		 * unnecessarily violent and may in fact be illegal to do.
65 		 * However, we should have been called /after/ evict_inodes
66 		 * removed all zero refcount inodes, in any case.  Test to
67 		 * be sure.
68 		 */
69 		if (!icount_read(inode)) {
70 			spin_unlock(&inode->i_lock);
71 			continue;
72 		}
73 
74 		__iget(inode);
75 		spin_unlock(&inode->i_lock);
76 		spin_unlock(&sb->s_inode_list_lock);
77 
78 		iput(iput_inode);
79 
80 		/* for each watch, send FS_UNMOUNT and then remove it */
81 		fsnotify_inode(inode, FS_UNMOUNT);
82 
83 		fsnotify_inode_delete(inode);
84 
85 		iput_inode = inode;
86 
87 		cond_resched();
88 		spin_lock(&sb->s_inode_list_lock);
89 	}
90 	spin_unlock(&sb->s_inode_list_lock);
91 
92 	iput(iput_inode);
93 }
94 
95 void fsnotify_sb_delete(struct super_block *sb)
96 {
97 	struct fsnotify_sb_info *sbinfo = fsnotify_sb_info(sb);
98 
99 	/* Were any marks ever added to any object on this sb? */
100 	if (!sbinfo)
101 		return;
102 
103 	fsnotify_unmount_inodes(sb);
104 	fsnotify_clear_marks_by_sb(sb);
105 	/* Wait for outstanding object references from connectors */
106 	wait_var_event(fsnotify_sb_watched_objects(sb),
107 		       !atomic_long_read(fsnotify_sb_watched_objects(sb)));
108 	WARN_ON(fsnotify_sb_has_priority_watchers(sb, FSNOTIFY_PRIO_CONTENT));
109 	WARN_ON(fsnotify_sb_has_priority_watchers(sb,
110 						  FSNOTIFY_PRIO_PRE_CONTENT));
111 }
112 
113 void fsnotify_sb_free(struct super_block *sb)
114 {
115 	kfree(sb->s_fsnotify_info);
116 }
117 
118 /*
119  * Given an inode, first check if we care what happens to our children.  Inotify
120  * and dnotify both tell their parents about events.  If we care about any event
121  * on a child we run all of our children and set a dentry flag saying that the
122  * parent cares.  Thus when an event happens on a child it can quickly tell
123  * if there is a need to find a parent and send the event to the parent.
124  */
125 void fsnotify_set_children_dentry_flags(struct inode *inode)
126 {
127 	struct dentry *alias;
128 
129 	if (!S_ISDIR(inode->i_mode))
130 		return;
131 
132 	spin_lock(&inode->i_lock);
133 	/* run all of the dentries associated with this inode.  Since this is a
134 	 * directory, there damn well better only be one item on this list */
135 	hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) {
136 		struct dentry *child;
137 
138 		/* run all of the children of the original inode and fix their
139 		 * d_flags to indicate parental interest (their parent is the
140 		 * original inode) */
141 		spin_lock(&alias->d_lock);
142 		hlist_for_each_entry(child, &alias->d_children, d_sib) {
143 			if (!child->d_inode)
144 				continue;
145 
146 			spin_lock_nested(&child->d_lock, DENTRY_D_LOCK_NESTED);
147 			child->d_flags |= DCACHE_FSNOTIFY_PARENT_WATCHED;
148 			spin_unlock(&child->d_lock);
149 		}
150 		spin_unlock(&alias->d_lock);
151 	}
152 	spin_unlock(&inode->i_lock);
153 }
154 
155 /*
156  * Lazily clear false positive PARENT_WATCHED flag for child whose parent had
157  * stopped watching children.
158  */
159 static void fsnotify_clear_child_dentry_flag(struct inode *pinode,
160 					     struct dentry *dentry)
161 {
162 	spin_lock(&dentry->d_lock);
163 	/*
164 	 * d_lock is a sufficient barrier to prevent observing a non-watched
165 	 * parent state from before the fsnotify_set_children_dentry_flags()
166 	 * or fsnotify_update_flags() call that had set PARENT_WATCHED.
167 	 */
168 	if (!fsnotify_inode_watches_children(pinode))
169 		dentry->d_flags &= ~DCACHE_FSNOTIFY_PARENT_WATCHED;
170 	spin_unlock(&dentry->d_lock);
171 }
172 
173 /* Are inode/sb/mount interested in parent and name info with this event? */
174 static bool fsnotify_event_needs_parent(struct inode *inode, __u32 mnt_mask,
175 					__u32 mask)
176 {
177 	__u32 marks_mask = 0;
178 
179 	/* We only send parent/name to inode/sb/mount for events on non-dir */
180 	if (mask & FS_ISDIR)
181 		return false;
182 
183 	/*
184 	 * All events that are possible on child can also may be reported with
185 	 * parent/name info to inode/sb/mount.  Otherwise, a watching parent
186 	 * could result in events reported with unexpected name info to sb/mount.
187 	 */
188 	BUILD_BUG_ON(FS_EVENTS_POSS_ON_CHILD & ~FS_EVENTS_POSS_TO_PARENT);
189 
190 	/* Did either inode/sb/mount subscribe for events with parent/name? */
191 	marks_mask |= fsnotify_parent_needed_mask(
192 				READ_ONCE(inode->i_fsnotify_mask));
193 	marks_mask |= fsnotify_parent_needed_mask(
194 				READ_ONCE(inode->i_sb->s_fsnotify_mask));
195 	marks_mask |= fsnotify_parent_needed_mask(mnt_mask);
196 
197 	/* Did they subscribe for this event with parent/name info? */
198 	return mask & marks_mask;
199 }
200 
201 /* Are there any inode/mount/sb objects that watch for these events? */
202 static inline __u32 fsnotify_object_watched(struct inode *inode, __u32 mnt_mask,
203 					    __u32 mask)
204 {
205 	__u32 marks_mask = READ_ONCE(inode->i_fsnotify_mask) | mnt_mask |
206 			   READ_ONCE(inode->i_sb->s_fsnotify_mask);
207 
208 	return mask & marks_mask & ALL_FSNOTIFY_EVENTS;
209 }
210 
211 /* Report pre-content event with optional range info */
212 int fsnotify_pre_content(const struct path *path, const loff_t *ppos,
213 			 size_t count)
214 {
215 	struct file_range range;
216 
217 	/* Report page aligned range only when pos is known */
218 	if (!ppos)
219 		return fsnotify_path(path, FS_PRE_ACCESS);
220 
221 	range.path = path;
222 	range.pos = PAGE_ALIGN_DOWN(*ppos);
223 	range.count = PAGE_ALIGN(*ppos + count) - range.pos;
224 
225 	return fsnotify_parent(path->dentry, FS_PRE_ACCESS, &range,
226 			       FSNOTIFY_EVENT_FILE_RANGE);
227 }
228 
229 /*
230  * Notify this dentry's parent about a child's events with child name info
231  * if parent is watching or if inode/sb/mount are interested in events with
232  * parent and name info.
233  *
234  * Notify only the child without name info if parent is not watching and
235  * inode/sb/mount are not interested in events with parent and name info.
236  */
237 int __fsnotify_parent(struct dentry *dentry, __u32 mask, const void *data,
238 		      int data_type)
239 {
240 	const struct path *path = fsnotify_data_path(data, data_type);
241 	__u32 mnt_mask = path ?
242 		READ_ONCE(real_mount(path->mnt)->mnt_fsnotify_mask) : 0;
243 	struct inode *inode = d_inode(dentry);
244 	struct dentry *parent;
245 	bool parent_watched = dentry->d_flags & DCACHE_FSNOTIFY_PARENT_WATCHED;
246 	bool parent_needed, parent_interested;
247 	__u32 p_mask;
248 	struct inode *p_inode = NULL;
249 	struct name_snapshot name;
250 	struct qstr *file_name = NULL;
251 	int ret = 0;
252 
253 	/* Optimize the likely case of nobody watching this path */
254 	if (likely(!parent_watched &&
255 		   !fsnotify_object_watched(inode, mnt_mask, mask)))
256 		return 0;
257 
258 	parent = NULL;
259 	parent_needed = fsnotify_event_needs_parent(inode, mnt_mask, mask);
260 	if (!parent_watched && !parent_needed)
261 		goto notify;
262 
263 	/* Does parent inode care about events on children? */
264 	parent = dget_parent(dentry);
265 	p_inode = parent->d_inode;
266 	p_mask = fsnotify_inode_watches_children(p_inode);
267 	if (unlikely(parent_watched && !p_mask))
268 		fsnotify_clear_child_dentry_flag(p_inode, dentry);
269 
270 	/*
271 	 * Include parent/name in notification either if some notification
272 	 * groups require parent info or the parent is interested in this event.
273 	 * The parent interest in ACCESS/MODIFY events does not apply to special
274 	 * files, where read/write are not on the filesystem of the parent and
275 	 * events can provide an undesirable side-channel for information
276 	 * exfiltration.
277 	 */
278 	parent_interested = mask & p_mask & ALL_FSNOTIFY_EVENTS &&
279 			    !(data_type == FSNOTIFY_EVENT_PATH &&
280 			      d_is_special(dentry) &&
281 			      (mask & (FS_ACCESS | FS_MODIFY)));
282 	if (parent_needed || parent_interested) {
283 		/* When notifying parent, child should be passed as data */
284 		WARN_ON_ONCE(inode != fsnotify_data_inode(data, data_type));
285 
286 		/* Notify both parent and child with child name info */
287 		take_dentry_name_snapshot(&name, dentry);
288 		file_name = &name.name;
289 		if (parent_interested)
290 			mask |= FS_EVENT_ON_CHILD;
291 	}
292 
293 notify:
294 	ret = fsnotify(mask, data, data_type, p_inode, file_name, inode, 0);
295 
296 	if (file_name)
297 		release_dentry_name_snapshot(&name);
298 	dput(parent);
299 
300 	return ret;
301 }
302 EXPORT_SYMBOL_GPL(__fsnotify_parent);
303 
304 static int fsnotify_handle_inode_event(struct fsnotify_group *group,
305 				       struct fsnotify_mark *inode_mark,
306 				       u32 mask, const void *data, int data_type,
307 				       struct inode *dir, const struct qstr *name,
308 				       u32 cookie)
309 {
310 	const struct path *path = fsnotify_data_path(data, data_type);
311 	struct inode *inode = fsnotify_data_inode(data, data_type);
312 	const struct fsnotify_ops *ops = group->ops;
313 
314 	if (WARN_ON_ONCE(!ops->handle_inode_event))
315 		return 0;
316 
317 	if (WARN_ON_ONCE(!inode && !dir))
318 		return 0;
319 
320 	if ((inode_mark->flags & FSNOTIFY_MARK_FLAG_EXCL_UNLINK) &&
321 	    path && d_unlinked(path->dentry))
322 		return 0;
323 
324 	/* Check interest of this mark in case event was sent with two marks */
325 	if (!(mask & inode_mark->mask & ALL_FSNOTIFY_EVENTS))
326 		return 0;
327 
328 	return ops->handle_inode_event(inode_mark, mask, inode, dir, name, cookie);
329 }
330 
331 static int fsnotify_handle_event(struct fsnotify_group *group, __u32 mask,
332 				 const void *data, int data_type,
333 				 struct inode *dir, const struct qstr *name,
334 				 u32 cookie, struct fsnotify_iter_info *iter_info)
335 {
336 	struct fsnotify_mark *inode_mark = fsnotify_iter_inode_mark(iter_info);
337 	struct fsnotify_mark *parent_mark = fsnotify_iter_parent_mark(iter_info);
338 	int ret;
339 
340 	if (WARN_ON_ONCE(fsnotify_iter_sb_mark(iter_info)) ||
341 	    WARN_ON_ONCE(fsnotify_iter_vfsmount_mark(iter_info)))
342 		return 0;
343 
344 	/*
345 	 * For FS_RENAME, 'dir' is old dir and 'data' is new dentry.
346 	 * The only ->handle_inode_event() backend that supports FS_RENAME is
347 	 * dnotify, where it means file was renamed within same parent.
348 	 */
349 	if (mask & FS_RENAME) {
350 		struct dentry *moved = fsnotify_data_dentry(data, data_type);
351 
352 		if (dir != moved->d_parent->d_inode)
353 			return 0;
354 	}
355 
356 	if (parent_mark) {
357 		ret = fsnotify_handle_inode_event(group, parent_mark, mask,
358 						  data, data_type, dir, name, 0);
359 		if (ret)
360 			return ret;
361 	}
362 
363 	if (!inode_mark)
364 		return 0;
365 
366 	/*
367 	 * Some events can be sent on both parent dir and child marks (e.g.
368 	 * FS_ATTRIB).  If both parent dir and child are watching, report the
369 	 * event once to parent dir with name (if interested) and once to child
370 	 * without name (if interested).
371 	 *
372 	 * In any case regardless whether the parent is watching or not, the
373 	 * child watcher is expecting an event without the FS_EVENT_ON_CHILD
374 	 * flag. The file name is expected if and only if this is a directory
375 	 * event.
376 	 */
377 	mask &= ~FS_EVENT_ON_CHILD;
378 	if (!(mask & ALL_FSNOTIFY_DIRENT_EVENTS)) {
379 		dir = NULL;
380 		name = NULL;
381 	}
382 
383 	return fsnotify_handle_inode_event(group, inode_mark, mask, data, data_type,
384 					   dir, name, cookie);
385 }
386 
387 static int send_to_group(__u32 mask, const void *data, int data_type,
388 			 struct inode *dir, const struct qstr *file_name,
389 			 u32 cookie, struct fsnotify_iter_info *iter_info)
390 {
391 	struct fsnotify_group *group = NULL;
392 	__u32 test_mask = (mask & ALL_FSNOTIFY_EVENTS);
393 	__u32 marks_mask = 0;
394 	__u32 marks_ignore_mask = 0;
395 	bool is_dir = mask & FS_ISDIR;
396 	struct fsnotify_mark *mark;
397 	int type;
398 
399 	if (!iter_info->report_mask)
400 		return 0;
401 
402 	/* clear ignored on inode modification */
403 	if (mask & FS_MODIFY) {
404 		fsnotify_foreach_iter_mark_type(iter_info, mark, type) {
405 			if (!(mark->flags &
406 			      FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY))
407 				mark->ignore_mask = 0;
408 		}
409 	}
410 
411 	/* Are any of the group marks interested in this event? */
412 	fsnotify_foreach_iter_mark_type(iter_info, mark, type) {
413 		group = mark->group;
414 		marks_mask |= mark->mask;
415 		marks_ignore_mask |=
416 			fsnotify_effective_ignore_mask(mark, is_dir, type);
417 	}
418 
419 	pr_debug("%s: group=%p mask=%x marks_mask=%x marks_ignore_mask=%x data=%p data_type=%d dir=%p cookie=%d\n",
420 		 __func__, group, mask, marks_mask, marks_ignore_mask,
421 		 data, data_type, dir, cookie);
422 
423 	if (!(test_mask & marks_mask & ~marks_ignore_mask))
424 		return 0;
425 
426 	if (group->ops->handle_event) {
427 		return group->ops->handle_event(group, mask, data, data_type, dir,
428 						file_name, cookie, iter_info);
429 	}
430 
431 	return fsnotify_handle_event(group, mask, data, data_type, dir,
432 				     file_name, cookie, iter_info);
433 }
434 
435 static struct fsnotify_mark *fsnotify_first_mark(struct fsnotify_mark_connector *const *connp)
436 {
437 	struct fsnotify_mark_connector *conn;
438 	struct hlist_node *node = NULL;
439 
440 	conn = srcu_dereference(*connp, &fsnotify_mark_srcu);
441 	if (conn)
442 		node = srcu_dereference(conn->list.first, &fsnotify_mark_srcu);
443 
444 	return hlist_entry_safe(node, struct fsnotify_mark, obj_list);
445 }
446 
447 static struct fsnotify_mark *fsnotify_next_mark(struct fsnotify_mark *mark)
448 {
449 	struct hlist_node *node = NULL;
450 
451 	if (mark)
452 		node = srcu_dereference(mark->obj_list.next,
453 					&fsnotify_mark_srcu);
454 
455 	return hlist_entry_safe(node, struct fsnotify_mark, obj_list);
456 }
457 
458 /*
459  * iter_info is a multi head priority queue of marks.
460  * Pick a subset of marks from queue heads, all with the same group
461  * and set the report_mask to a subset of the selected marks.
462  * Returns false if there are no more groups to iterate.
463  */
464 static bool fsnotify_iter_select_report_types(
465 		struct fsnotify_iter_info *iter_info)
466 {
467 	struct fsnotify_group *max_prio_group = NULL;
468 	struct fsnotify_mark *mark;
469 	int type;
470 
471 	/* Choose max prio group among groups of all queue heads */
472 	fsnotify_foreach_iter_type(type) {
473 		mark = iter_info->marks[type];
474 		if (mark &&
475 		    fsnotify_compare_groups(max_prio_group, mark->group) > 0)
476 			max_prio_group = mark->group;
477 	}
478 
479 	if (!max_prio_group)
480 		return false;
481 
482 	/* Set the report mask for marks from same group as max prio group */
483 	iter_info->current_group = max_prio_group;
484 	iter_info->report_mask = 0;
485 	fsnotify_foreach_iter_type(type) {
486 		mark = iter_info->marks[type];
487 		if (mark && mark->group == iter_info->current_group) {
488 			/*
489 			 * FSNOTIFY_ITER_TYPE_PARENT indicates that this inode
490 			 * is watching children and interested in this event,
491 			 * which is an event possible on child.
492 			 * But is *this mark* watching children?
493 			 */
494 			if (type == FSNOTIFY_ITER_TYPE_PARENT &&
495 			    !(mark->mask & FS_EVENT_ON_CHILD) &&
496 			    !(fsnotify_ignore_mask(mark) & FS_EVENT_ON_CHILD))
497 				continue;
498 
499 			fsnotify_iter_set_report_type(iter_info, type);
500 		}
501 	}
502 
503 	return true;
504 }
505 
506 /*
507  * Pop from iter_info multi head queue, the marks that belong to the group of
508  * current iteration step.
509  */
510 static void fsnotify_iter_next(struct fsnotify_iter_info *iter_info)
511 {
512 	struct fsnotify_mark *mark;
513 	int type;
514 
515 	/*
516 	 * We cannot use fsnotify_foreach_iter_mark_type() here because we
517 	 * may need to advance a mark of type X that belongs to current_group
518 	 * but was not selected for reporting.
519 	 */
520 	fsnotify_foreach_iter_type(type) {
521 		mark = iter_info->marks[type];
522 		if (mark && mark->group == iter_info->current_group)
523 			iter_info->marks[type] =
524 				fsnotify_next_mark(iter_info->marks[type]);
525 	}
526 }
527 
528 /*
529  * fsnotify - This is the main call to fsnotify.
530  *
531  * The VFS calls into hook specific functions in linux/fsnotify.h.
532  * Those functions then in turn call here.  Here will call out to all of the
533  * registered fsnotify_group.  Those groups can then use the notification event
534  * in whatever means they feel necessary.
535  *
536  * @mask:	event type and flags
537  * @data:	object that event happened on
538  * @data_type:	type of object for fanotify_data_XXX() accessors
539  * @dir:	optional directory associated with event -
540  *		if @file_name is not NULL, this is the directory that
541  *		@file_name is relative to
542  * @file_name:	optional file name associated with event
543  * @inode:	optional inode associated with event -
544  *		If @dir and @inode are both non-NULL, event may be
545  *		reported to both.
546  * @cookie:	inotify rename cookie
547  */
548 int fsnotify(__u32 mask, const void *data, int data_type, struct inode *dir,
549 	     const struct qstr *file_name, struct inode *inode, u32 cookie)
550 {
551 	const struct path *path = fsnotify_data_path(data, data_type);
552 	struct super_block *sb = fsnotify_data_sb(data, data_type);
553 	const struct fsnotify_mnt *mnt_data = fsnotify_data_mnt(data, data_type);
554 	struct fsnotify_sb_info *sbinfo = sb ? fsnotify_sb_info(sb) : NULL;
555 	struct fsnotify_iter_info iter_info = {};
556 	struct mount *mnt = NULL;
557 	struct inode *inode2 = NULL;
558 	struct dentry *moved;
559 	int inode2_type;
560 	int ret = 0;
561 	__u32 test_mask, marks_mask = 0;
562 
563 	if (path)
564 		mnt = real_mount(path->mnt);
565 
566 	if (!inode) {
567 		/* Dirent event - report on TYPE_INODE to dir */
568 		inode = dir;
569 		/* For FS_RENAME, inode is old_dir and inode2 is new_dir */
570 		if (mask & FS_RENAME) {
571 			moved = fsnotify_data_dentry(data, data_type);
572 			inode2 = moved->d_parent->d_inode;
573 			inode2_type = FSNOTIFY_ITER_TYPE_INODE2;
574 		}
575 	} else if (mask & FS_EVENT_ON_CHILD) {
576 		/*
577 		 * Event on child - report on TYPE_PARENT to dir if it is
578 		 * watching children and on TYPE_INODE to child.
579 		 */
580 		inode2 = dir;
581 		inode2_type = FSNOTIFY_ITER_TYPE_PARENT;
582 	}
583 
584 	/*
585 	 * Optimization: srcu_read_lock() has a memory barrier which can
586 	 * be expensive.  It protects walking the *_fsnotify_marks lists.
587 	 * However, if we do not walk the lists, we do not have to do
588 	 * SRCU because we have no references to any objects and do not
589 	 * need SRCU to keep them "alive".
590 	 */
591 	if ((!sbinfo || !sbinfo->sb_marks) &&
592 	    (!mnt || !mnt->mnt_fsnotify_marks) &&
593 	    (!inode || !inode->i_fsnotify_marks) &&
594 	    (!inode2 || !inode2->i_fsnotify_marks) &&
595 	    (!mnt_data || !mnt_data->ns->n_fsnotify_marks))
596 		return 0;
597 
598 	if (sb)
599 		marks_mask |= READ_ONCE(sb->s_fsnotify_mask);
600 	if (mnt)
601 		marks_mask |= READ_ONCE(mnt->mnt_fsnotify_mask);
602 	if (inode)
603 		marks_mask |= READ_ONCE(inode->i_fsnotify_mask);
604 	if (inode2)
605 		marks_mask |= READ_ONCE(inode2->i_fsnotify_mask);
606 	if (mnt_data)
607 		marks_mask |= READ_ONCE(mnt_data->ns->n_fsnotify_mask);
608 
609 	/*
610 	 * If this is a modify event we may need to clear some ignore masks.
611 	 * In that case, the object with ignore masks will have the FS_MODIFY
612 	 * event in its mask.
613 	 * Otherwise, return if none of the marks care about this type of event.
614 	 */
615 	test_mask = (mask & ALL_FSNOTIFY_EVENTS);
616 	if (!(test_mask & marks_mask))
617 		return 0;
618 
619 	iter_info.srcu_idx = srcu_read_lock(&fsnotify_mark_srcu);
620 
621 	if (sbinfo) {
622 		iter_info.marks[FSNOTIFY_ITER_TYPE_SB] =
623 			fsnotify_first_mark(&sbinfo->sb_marks);
624 	}
625 	if (mnt) {
626 		iter_info.marks[FSNOTIFY_ITER_TYPE_VFSMOUNT] =
627 			fsnotify_first_mark(&mnt->mnt_fsnotify_marks);
628 	}
629 	if (inode) {
630 		iter_info.marks[FSNOTIFY_ITER_TYPE_INODE] =
631 			fsnotify_first_mark(&inode->i_fsnotify_marks);
632 	}
633 	if (inode2) {
634 		iter_info.marks[inode2_type] =
635 			fsnotify_first_mark(&inode2->i_fsnotify_marks);
636 	}
637 	if (mnt_data) {
638 		iter_info.marks[FSNOTIFY_ITER_TYPE_MNTNS] =
639 			fsnotify_first_mark(&mnt_data->ns->n_fsnotify_marks);
640 	}
641 
642 	/*
643 	 * We need to merge inode/vfsmount/sb mark lists so that e.g. inode mark
644 	 * ignore masks are properly reflected for mount/sb mark notifications.
645 	 * That's why this traversal is so complicated...
646 	 */
647 	while (fsnotify_iter_select_report_types(&iter_info)) {
648 		ret = send_to_group(mask, data, data_type, dir, file_name,
649 				    cookie, &iter_info);
650 
651 		if (ret && (mask & ALL_FSNOTIFY_PERM_EVENTS))
652 			goto out;
653 
654 		fsnotify_iter_next(&iter_info);
655 	}
656 	ret = 0;
657 out:
658 	srcu_read_unlock(&fsnotify_mark_srcu, iter_info.srcu_idx);
659 
660 	return ret;
661 }
662 EXPORT_SYMBOL_GPL(fsnotify);
663 
664 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
665 /*
666  * At open time we check fsnotify_sb_has_priority_watchers(), call the open perm
667  * hook and set the FMODE_NONOTIFY_ mode bits accordignly.
668  * Later, fsnotify permission hooks do not check if there are permission event
669  * watches, but that there were permission event watches at open time.
670  */
671 int fsnotify_open_perm_and_set_mode(struct file *file)
672 {
673 	struct dentry *dentry = file->f_path.dentry, *parent;
674 	struct super_block *sb = dentry->d_sb;
675 	__u32 mnt_mask, p_mask = 0;
676 
677 	/* Is it a file opened by fanotify? */
678 	if (FMODE_FSNOTIFY_NONE(file->f_mode))
679 		return 0;
680 
681 	/*
682 	 * Permission events is a super set of pre-content events, so if there
683 	 * are no permission event watchers, there are also no pre-content event
684 	 * watchers and this is implied from the single FMODE_NONOTIFY_PERM bit.
685 	 */
686 	if (likely(!fsnotify_sb_has_priority_watchers(sb,
687 						FSNOTIFY_PRIO_CONTENT))) {
688 		file_set_fsnotify_mode(file, FMODE_NONOTIFY_PERM);
689 		return 0;
690 	}
691 
692 	/*
693 	 * OK, there are some permission event watchers. Check if anybody is
694 	 * watching for permission events on *this* file.
695 	 */
696 	mnt_mask = READ_ONCE(real_mount(file->f_path.mnt)->mnt_fsnotify_mask);
697 	p_mask = fsnotify_object_watched(d_inode(dentry), mnt_mask,
698 					 ALL_FSNOTIFY_PERM_EVENTS);
699 	if (dentry->d_flags & DCACHE_FSNOTIFY_PARENT_WATCHED) {
700 		parent = dget_parent(dentry);
701 		p_mask |= fsnotify_inode_watches_children(d_inode(parent));
702 		dput(parent);
703 	}
704 
705 	/*
706 	 * Legacy FAN_ACCESS_PERM events have very high performance overhead,
707 	 * so unlikely to be used in the wild. If they are used there will be
708 	 * no optimizations at all.
709 	 */
710 	if (unlikely(p_mask & FS_ACCESS_PERM)) {
711 		/* Enable all permission and pre-content events */
712 		file_set_fsnotify_mode(file, 0);
713 		goto open_perm;
714 	}
715 
716 	/*
717 	 * Pre-content events are only supported on regular files.
718 	 * If there are pre-content event watchers and no permission access
719 	 * watchers, set FMODE_NONOTIFY | FMODE_NONOTIFY_PERM to indicate that.
720 	 * That is the common case with HSM service.
721 	 */
722 	if (d_is_reg(dentry) && (p_mask & FSNOTIFY_PRE_CONTENT_EVENTS)) {
723 		file_set_fsnotify_mode(file, FMODE_NONOTIFY |
724 					     FMODE_NONOTIFY_PERM);
725 		goto open_perm;
726 	}
727 
728 	/* Nobody watching permission and pre-content events on this file */
729 	file_set_fsnotify_mode(file, FMODE_NONOTIFY_PERM);
730 
731 open_perm:
732 	/*
733 	 * Send open perm events depending on object masks and regardless of
734 	 * FMODE_NONOTIFY_PERM.
735 	 */
736 	if (file->f_flags & __FMODE_EXEC && p_mask & FS_OPEN_EXEC_PERM) {
737 		int ret = fsnotify_path(&file->f_path, FS_OPEN_EXEC_PERM);
738 
739 		if (ret)
740 			return ret;
741 	}
742 
743 	if (p_mask & FS_OPEN_PERM)
744 		return fsnotify_path(&file->f_path, FS_OPEN_PERM);
745 
746 	return 0;
747 }
748 #endif
749 
750 void fsnotify_mnt(__u32 mask, struct mnt_namespace *ns, struct vfsmount *mnt)
751 {
752 	struct fsnotify_mnt data = {
753 		.ns = ns,
754 		.mnt_id = real_mount(mnt)->mnt_id_unique,
755 	};
756 
757 	if (WARN_ON_ONCE(!ns))
758 		return;
759 
760 	/*
761 	 * This is an optimization as well as making sure fsnotify_init() has
762 	 * been called.
763 	 */
764 	if (!ns->n_fsnotify_marks)
765 		return;
766 
767 	fsnotify(mask, &data, FSNOTIFY_EVENT_MNT, NULL, NULL, NULL, 0);
768 }
769 
770 static __init int fsnotify_init(void)
771 {
772 	int ret;
773 
774 	BUILD_BUG_ON(HWEIGHT32(ALL_FSNOTIFY_BITS) != 26);
775 
776 	ret = init_srcu_struct(&fsnotify_mark_srcu);
777 	if (ret)
778 		panic("initializing fsnotify_mark_srcu");
779 
780 	fsnotify_mark_connector_cachep = KMEM_CACHE(fsnotify_mark_connector,
781 						    SLAB_PANIC);
782 
783 	return 0;
784 }
785 core_initcall(fsnotify_init);
786