xref: /linux/fs/notify/inotify/inotify_user.c (revision 2277ab4a1df50e05bc732fe9488d4e902bb8399a)
1 /*
2  * fs/inotify_user.c - inotify support for userspace
3  *
4  * Authors:
5  *	John McCutchan	<ttb@tentacle.dhs.org>
6  *	Robert Love	<rml@novell.com>
7  *
8  * Copyright (C) 2005 John McCutchan
9  * Copyright 2006 Hewlett-Packard Development Company, L.P.
10  *
11  * Copyright (C) 2009 Eric Paris <Red Hat Inc>
12  * inotify was largely rewriten to make use of the fsnotify infrastructure
13  *
14  * This program is free software; you can redistribute it and/or modify it
15  * under the terms of the GNU General Public License as published by the
16  * Free Software Foundation; either version 2, or (at your option) any
17  * later version.
18  *
19  * This program is distributed in the hope that it will be useful, but
20  * WITHOUT ANY WARRANTY; without even the implied warranty of
21  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
22  * General Public License for more details.
23  */
24 
25 #include <linux/file.h>
26 #include <linux/fs.h> /* struct inode */
27 #include <linux/fsnotify_backend.h>
28 #include <linux/idr.h>
29 #include <linux/init.h> /* module_init */
30 #include <linux/inotify.h>
31 #include <linux/kernel.h> /* roundup() */
32 #include <linux/magic.h> /* superblock magic number */
33 #include <linux/mount.h> /* mntget */
34 #include <linux/namei.h> /* LOOKUP_FOLLOW */
35 #include <linux/path.h> /* struct path */
36 #include <linux/sched.h> /* struct user */
37 #include <linux/slab.h> /* struct kmem_cache */
38 #include <linux/syscalls.h>
39 #include <linux/types.h>
40 #include <linux/uaccess.h>
41 #include <linux/poll.h>
42 #include <linux/wait.h>
43 
44 #include "inotify.h"
45 
46 #include <asm/ioctls.h>
47 
48 static struct vfsmount *inotify_mnt __read_mostly;
49 
50 /* this just sits here and wastes global memory.  used to just pad userspace messages with zeros */
51 static struct inotify_event nul_inotify_event;
52 
53 /* these are configurable via /proc/sys/fs/inotify/ */
54 static int inotify_max_user_instances __read_mostly;
55 static int inotify_max_queued_events __read_mostly;
56 int inotify_max_user_watches __read_mostly;
57 
58 static struct kmem_cache *inotify_inode_mark_cachep __read_mostly;
59 struct kmem_cache *event_priv_cachep __read_mostly;
60 static struct fsnotify_event *inotify_ignored_event;
61 
62 /*
63  * When inotify registers a new group it increments this and uses that
64  * value as an offset to set the fsnotify group "name" and priority.
65  */
66 static atomic_t inotify_grp_num;
67 
68 #ifdef CONFIG_SYSCTL
69 
70 #include <linux/sysctl.h>
71 
72 static int zero;
73 
74 ctl_table inotify_table[] = {
75 	{
76 		.ctl_name	= INOTIFY_MAX_USER_INSTANCES,
77 		.procname	= "max_user_instances",
78 		.data		= &inotify_max_user_instances,
79 		.maxlen		= sizeof(int),
80 		.mode		= 0644,
81 		.proc_handler	= &proc_dointvec_minmax,
82 		.strategy	= &sysctl_intvec,
83 		.extra1		= &zero,
84 	},
85 	{
86 		.ctl_name	= INOTIFY_MAX_USER_WATCHES,
87 		.procname	= "max_user_watches",
88 		.data		= &inotify_max_user_watches,
89 		.maxlen		= sizeof(int),
90 		.mode		= 0644,
91 		.proc_handler	= &proc_dointvec_minmax,
92 		.strategy	= &sysctl_intvec,
93 		.extra1		= &zero,
94 	},
95 	{
96 		.ctl_name	= INOTIFY_MAX_QUEUED_EVENTS,
97 		.procname	= "max_queued_events",
98 		.data		= &inotify_max_queued_events,
99 		.maxlen		= sizeof(int),
100 		.mode		= 0644,
101 		.proc_handler	= &proc_dointvec_minmax,
102 		.strategy	= &sysctl_intvec,
103 		.extra1		= &zero
104 	},
105 	{ .ctl_name = 0 }
106 };
107 #endif /* CONFIG_SYSCTL */
108 
109 static inline __u32 inotify_arg_to_mask(u32 arg)
110 {
111 	__u32 mask;
112 
113 	/* everything should accept their own ignored and cares about children */
114 	mask = (FS_IN_IGNORED | FS_EVENT_ON_CHILD);
115 
116 	/* mask off the flags used to open the fd */
117 	mask |= (arg & (IN_ALL_EVENTS | IN_ONESHOT));
118 
119 	return mask;
120 }
121 
122 static inline u32 inotify_mask_to_arg(__u32 mask)
123 {
124 	return mask & (IN_ALL_EVENTS | IN_ISDIR | IN_UNMOUNT | IN_IGNORED |
125 		       IN_Q_OVERFLOW);
126 }
127 
128 /* intofiy userspace file descriptor functions */
129 static unsigned int inotify_poll(struct file *file, poll_table *wait)
130 {
131 	struct fsnotify_group *group = file->private_data;
132 	int ret = 0;
133 
134 	poll_wait(file, &group->notification_waitq, wait);
135 	mutex_lock(&group->notification_mutex);
136 	if (!fsnotify_notify_queue_is_empty(group))
137 		ret = POLLIN | POLLRDNORM;
138 	mutex_unlock(&group->notification_mutex);
139 
140 	return ret;
141 }
142 
143 /*
144  * Get an inotify_kernel_event if one exists and is small
145  * enough to fit in "count". Return an error pointer if
146  * not large enough.
147  *
148  * Called with the group->notification_mutex held.
149  */
150 static struct fsnotify_event *get_one_event(struct fsnotify_group *group,
151 					    size_t count)
152 {
153 	size_t event_size = sizeof(struct inotify_event);
154 	struct fsnotify_event *event;
155 
156 	if (fsnotify_notify_queue_is_empty(group))
157 		return NULL;
158 
159 	event = fsnotify_peek_notify_event(group);
160 
161 	event_size += roundup(event->name_len, event_size);
162 
163 	if (event_size > count)
164 		return ERR_PTR(-EINVAL);
165 
166 	/* held the notification_mutex the whole time, so this is the
167 	 * same event we peeked above */
168 	fsnotify_remove_notify_event(group);
169 
170 	return event;
171 }
172 
173 /*
174  * Copy an event to user space, returning how much we copied.
175  *
176  * We already checked that the event size is smaller than the
177  * buffer we had in "get_one_event()" above.
178  */
179 static ssize_t copy_event_to_user(struct fsnotify_group *group,
180 				  struct fsnotify_event *event,
181 				  char __user *buf)
182 {
183 	struct inotify_event inotify_event;
184 	struct fsnotify_event_private_data *fsn_priv;
185 	struct inotify_event_private_data *priv;
186 	size_t event_size = sizeof(struct inotify_event);
187 	size_t name_len;
188 
189 	/* we get the inotify watch descriptor from the event private data */
190 	spin_lock(&event->lock);
191 	fsn_priv = fsnotify_remove_priv_from_event(group, event);
192 	spin_unlock(&event->lock);
193 
194 	if (!fsn_priv)
195 		inotify_event.wd = -1;
196 	else {
197 		priv = container_of(fsn_priv, struct inotify_event_private_data,
198 				    fsnotify_event_priv_data);
199 		inotify_event.wd = priv->wd;
200 		inotify_free_event_priv(fsn_priv);
201 	}
202 
203 	/* round up event->name_len so it is a multiple of event_size */
204 	name_len = roundup(event->name_len, event_size);
205 	inotify_event.len = name_len;
206 
207 	inotify_event.mask = inotify_mask_to_arg(event->mask);
208 	inotify_event.cookie = event->sync_cookie;
209 
210 	/* send the main event */
211 	if (copy_to_user(buf, &inotify_event, event_size))
212 		return -EFAULT;
213 
214 	buf += event_size;
215 
216 	/*
217 	 * fsnotify only stores the pathname, so here we have to send the pathname
218 	 * and then pad that pathname out to a multiple of sizeof(inotify_event)
219 	 * with zeros.  I get my zeros from the nul_inotify_event.
220 	 */
221 	if (name_len) {
222 		unsigned int len_to_zero = name_len - event->name_len;
223 		/* copy the path name */
224 		if (copy_to_user(buf, event->file_name, event->name_len))
225 			return -EFAULT;
226 		buf += event->name_len;
227 
228 		/* fill userspace with 0's from nul_inotify_event */
229 		if (copy_to_user(buf, &nul_inotify_event, len_to_zero))
230 			return -EFAULT;
231 		buf += len_to_zero;
232 		event_size += name_len;
233 	}
234 
235 	return event_size;
236 }
237 
238 static ssize_t inotify_read(struct file *file, char __user *buf,
239 			    size_t count, loff_t *pos)
240 {
241 	struct fsnotify_group *group;
242 	struct fsnotify_event *kevent;
243 	char __user *start;
244 	int ret;
245 	DEFINE_WAIT(wait);
246 
247 	start = buf;
248 	group = file->private_data;
249 
250 	while (1) {
251 		prepare_to_wait(&group->notification_waitq, &wait, TASK_INTERRUPTIBLE);
252 
253 		mutex_lock(&group->notification_mutex);
254 		kevent = get_one_event(group, count);
255 		mutex_unlock(&group->notification_mutex);
256 
257 		if (kevent) {
258 			ret = PTR_ERR(kevent);
259 			if (IS_ERR(kevent))
260 				break;
261 			ret = copy_event_to_user(group, kevent, buf);
262 			fsnotify_put_event(kevent);
263 			if (ret < 0)
264 				break;
265 			buf += ret;
266 			count -= ret;
267 			continue;
268 		}
269 
270 		ret = -EAGAIN;
271 		if (file->f_flags & O_NONBLOCK)
272 			break;
273 		ret = -EINTR;
274 		if (signal_pending(current))
275 			break;
276 
277 		if (start != buf)
278 			break;
279 
280 		schedule();
281 	}
282 
283 	finish_wait(&group->notification_waitq, &wait);
284 	if (start != buf && ret != -EFAULT)
285 		ret = buf - start;
286 	return ret;
287 }
288 
289 static int inotify_fasync(int fd, struct file *file, int on)
290 {
291 	struct fsnotify_group *group = file->private_data;
292 
293 	return fasync_helper(fd, file, on, &group->inotify_data.fa) >= 0 ? 0 : -EIO;
294 }
295 
296 static int inotify_release(struct inode *ignored, struct file *file)
297 {
298 	struct fsnotify_group *group = file->private_data;
299 	struct user_struct *user = group->inotify_data.user;
300 
301 	fsnotify_clear_marks_by_group(group);
302 
303 	/* free this group, matching get was inotify_init->fsnotify_obtain_group */
304 	fsnotify_put_group(group);
305 
306 	atomic_dec(&user->inotify_devs);
307 
308 	return 0;
309 }
310 
311 static long inotify_ioctl(struct file *file, unsigned int cmd,
312 			  unsigned long arg)
313 {
314 	struct fsnotify_group *group;
315 	struct fsnotify_event_holder *holder;
316 	struct fsnotify_event *event;
317 	void __user *p;
318 	int ret = -ENOTTY;
319 	size_t send_len = 0;
320 
321 	group = file->private_data;
322 	p = (void __user *) arg;
323 
324 	switch (cmd) {
325 	case FIONREAD:
326 		mutex_lock(&group->notification_mutex);
327 		list_for_each_entry(holder, &group->notification_list, event_list) {
328 			event = holder->event;
329 			send_len += sizeof(struct inotify_event);
330 			send_len += roundup(event->name_len,
331 					     sizeof(struct inotify_event));
332 		}
333 		mutex_unlock(&group->notification_mutex);
334 		ret = put_user(send_len, (int __user *) p);
335 		break;
336 	}
337 
338 	return ret;
339 }
340 
341 static const struct file_operations inotify_fops = {
342 	.poll		= inotify_poll,
343 	.read		= inotify_read,
344 	.fasync		= inotify_fasync,
345 	.release	= inotify_release,
346 	.unlocked_ioctl	= inotify_ioctl,
347 	.compat_ioctl	= inotify_ioctl,
348 };
349 
350 
351 /*
352  * find_inode - resolve a user-given path to a specific inode
353  */
354 static int inotify_find_inode(const char __user *dirname, struct path *path, unsigned flags)
355 {
356 	int error;
357 
358 	error = user_path_at(AT_FDCWD, dirname, flags, path);
359 	if (error)
360 		return error;
361 	/* you can only watch an inode if you have read permissions on it */
362 	error = inode_permission(path->dentry->d_inode, MAY_READ);
363 	if (error)
364 		path_put(path);
365 	return error;
366 }
367 
368 /*
369  * Send IN_IGNORED for this wd, remove this wd from the idr, and drop the
370  * internal reference help on the mark because it is in the idr.
371  */
372 void inotify_ignored_and_remove_idr(struct fsnotify_mark_entry *entry,
373 				    struct fsnotify_group *group)
374 {
375 	struct inotify_inode_mark_entry *ientry;
376 	struct inotify_event_private_data *event_priv;
377 	struct fsnotify_event_private_data *fsn_event_priv;
378 	struct idr *idr;
379 
380 	ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry);
381 
382 	event_priv = kmem_cache_alloc(event_priv_cachep, GFP_KERNEL);
383 	if (unlikely(!event_priv))
384 		goto skip_send_ignore;
385 
386 	fsn_event_priv = &event_priv->fsnotify_event_priv_data;
387 
388 	fsn_event_priv->group = group;
389 	event_priv->wd = ientry->wd;
390 
391 	fsnotify_add_notify_event(group, inotify_ignored_event, fsn_event_priv);
392 
393 	/* did the private data get added? */
394 	if (list_empty(&fsn_event_priv->event_list))
395 		inotify_free_event_priv(fsn_event_priv);
396 
397 skip_send_ignore:
398 
399 	/* remove this entry from the idr */
400 	spin_lock(&group->inotify_data.idr_lock);
401 	idr = &group->inotify_data.idr;
402 	idr_remove(idr, ientry->wd);
403 	spin_unlock(&group->inotify_data.idr_lock);
404 
405 	/* removed from idr, drop that reference */
406 	fsnotify_put_mark(entry);
407 }
408 
409 /* ding dong the mark is dead */
410 static void inotify_free_mark(struct fsnotify_mark_entry *entry)
411 {
412 	struct inotify_inode_mark_entry *ientry = (struct inotify_inode_mark_entry *)entry;
413 
414 	kmem_cache_free(inotify_inode_mark_cachep, ientry);
415 }
416 
417 static int inotify_update_watch(struct fsnotify_group *group, struct inode *inode, u32 arg)
418 {
419 	struct fsnotify_mark_entry *entry = NULL;
420 	struct inotify_inode_mark_entry *ientry;
421 	int ret = 0;
422 	int add = (arg & IN_MASK_ADD);
423 	__u32 mask;
424 	__u32 old_mask, new_mask;
425 
426 	/* don't allow invalid bits: we don't want flags set */
427 	mask = inotify_arg_to_mask(arg);
428 	if (unlikely(!mask))
429 		return -EINVAL;
430 
431 	ientry = kmem_cache_alloc(inotify_inode_mark_cachep, GFP_KERNEL);
432 	if (unlikely(!ientry))
433 		return -ENOMEM;
434 	/* we set the mask at the end after attaching it */
435 	fsnotify_init_mark(&ientry->fsn_entry, inotify_free_mark);
436 	ientry->wd = 0;
437 
438 find_entry:
439 	spin_lock(&inode->i_lock);
440 	entry = fsnotify_find_mark_entry(group, inode);
441 	spin_unlock(&inode->i_lock);
442 	if (entry) {
443 		kmem_cache_free(inotify_inode_mark_cachep, ientry);
444 		ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry);
445 	} else {
446 		if (atomic_read(&group->inotify_data.user->inotify_watches) >= inotify_max_user_watches) {
447 			ret = -ENOSPC;
448 			goto out_err;
449 		}
450 
451 		ret = fsnotify_add_mark(&ientry->fsn_entry, group, inode);
452 		if (ret == -EEXIST)
453 			goto find_entry;
454 		else if (ret)
455 			goto out_err;
456 
457 		entry = &ientry->fsn_entry;
458 retry:
459 		ret = -ENOMEM;
460 		if (unlikely(!idr_pre_get(&group->inotify_data.idr, GFP_KERNEL)))
461 			goto out_err;
462 
463 		spin_lock(&group->inotify_data.idr_lock);
464 		/* if entry is added to the idr we keep the reference obtained
465 		 * through fsnotify_mark_add.  remember to drop this reference
466 		 * when entry is removed from idr */
467 		ret = idr_get_new_above(&group->inotify_data.idr, entry,
468 					++group->inotify_data.last_wd,
469 					&ientry->wd);
470 		spin_unlock(&group->inotify_data.idr_lock);
471 		if (ret) {
472 			if (ret == -EAGAIN)
473 				goto retry;
474 			goto out_err;
475 		}
476 		atomic_inc(&group->inotify_data.user->inotify_watches);
477 	}
478 
479 	spin_lock(&entry->lock);
480 
481 	old_mask = entry->mask;
482 	if (add) {
483 		entry->mask |= mask;
484 		new_mask = entry->mask;
485 	} else {
486 		entry->mask = mask;
487 		new_mask = entry->mask;
488 	}
489 
490 	spin_unlock(&entry->lock);
491 
492 	if (old_mask != new_mask) {
493 		/* more bits in old than in new? */
494 		int dropped = (old_mask & ~new_mask);
495 		/* more bits in this entry than the inode's mask? */
496 		int do_inode = (new_mask & ~inode->i_fsnotify_mask);
497 		/* more bits in this entry than the group? */
498 		int do_group = (new_mask & ~group->mask);
499 
500 		/* update the inode with this new entry */
501 		if (dropped || do_inode)
502 			fsnotify_recalc_inode_mask(inode);
503 
504 		/* update the group mask with the new mask */
505 		if (dropped || do_group)
506 			fsnotify_recalc_group_mask(group);
507 	}
508 
509 	return ientry->wd;
510 
511 out_err:
512 	/* see this isn't supposed to happen, just kill the watch */
513 	if (entry) {
514 		fsnotify_destroy_mark_by_entry(entry);
515 		fsnotify_put_mark(entry);
516 	}
517 	return ret;
518 }
519 
520 static struct fsnotify_group *inotify_new_group(struct user_struct *user, unsigned int max_events)
521 {
522 	struct fsnotify_group *group;
523 	unsigned int grp_num;
524 
525 	/* fsnotify_obtain_group took a reference to group, we put this when we kill the file in the end */
526 	grp_num = (INOTIFY_GROUP_NUM - atomic_inc_return(&inotify_grp_num));
527 	group = fsnotify_obtain_group(grp_num, 0, &inotify_fsnotify_ops);
528 	if (IS_ERR(group))
529 		return group;
530 
531 	group->max_events = max_events;
532 
533 	spin_lock_init(&group->inotify_data.idr_lock);
534 	idr_init(&group->inotify_data.idr);
535 	group->inotify_data.last_wd = 0;
536 	group->inotify_data.user = user;
537 	group->inotify_data.fa = NULL;
538 
539 	return group;
540 }
541 
542 
543 /* inotify syscalls */
544 SYSCALL_DEFINE1(inotify_init1, int, flags)
545 {
546 	struct fsnotify_group *group;
547 	struct user_struct *user;
548 	struct file *filp;
549 	int fd, ret;
550 
551 	/* Check the IN_* constants for consistency.  */
552 	BUILD_BUG_ON(IN_CLOEXEC != O_CLOEXEC);
553 	BUILD_BUG_ON(IN_NONBLOCK != O_NONBLOCK);
554 
555 	if (flags & ~(IN_CLOEXEC | IN_NONBLOCK))
556 		return -EINVAL;
557 
558 	fd = get_unused_fd_flags(flags & O_CLOEXEC);
559 	if (fd < 0)
560 		return fd;
561 
562 	filp = get_empty_filp();
563 	if (!filp) {
564 		ret = -ENFILE;
565 		goto out_put_fd;
566 	}
567 
568 	user = get_current_user();
569 	if (unlikely(atomic_read(&user->inotify_devs) >=
570 			inotify_max_user_instances)) {
571 		ret = -EMFILE;
572 		goto out_free_uid;
573 	}
574 
575 	/* fsnotify_obtain_group took a reference to group, we put this when we kill the file in the end */
576 	group = inotify_new_group(user, inotify_max_queued_events);
577 	if (IS_ERR(group)) {
578 		ret = PTR_ERR(group);
579 		goto out_free_uid;
580 	}
581 
582 	filp->f_op = &inotify_fops;
583 	filp->f_path.mnt = mntget(inotify_mnt);
584 	filp->f_path.dentry = dget(inotify_mnt->mnt_root);
585 	filp->f_mapping = filp->f_path.dentry->d_inode->i_mapping;
586 	filp->f_mode = FMODE_READ;
587 	filp->f_flags = O_RDONLY | (flags & O_NONBLOCK);
588 	filp->private_data = group;
589 
590 	atomic_inc(&user->inotify_devs);
591 
592 	fd_install(fd, filp);
593 
594 	return fd;
595 
596 out_free_uid:
597 	free_uid(user);
598 	put_filp(filp);
599 out_put_fd:
600 	put_unused_fd(fd);
601 	return ret;
602 }
603 
604 SYSCALL_DEFINE0(inotify_init)
605 {
606 	return sys_inotify_init1(0);
607 }
608 
609 SYSCALL_DEFINE3(inotify_add_watch, int, fd, const char __user *, pathname,
610 		u32, mask)
611 {
612 	struct fsnotify_group *group;
613 	struct inode *inode;
614 	struct path path;
615 	struct file *filp;
616 	int ret, fput_needed;
617 	unsigned flags = 0;
618 
619 	filp = fget_light(fd, &fput_needed);
620 	if (unlikely(!filp))
621 		return -EBADF;
622 
623 	/* verify that this is indeed an inotify instance */
624 	if (unlikely(filp->f_op != &inotify_fops)) {
625 		ret = -EINVAL;
626 		goto fput_and_out;
627 	}
628 
629 	if (!(mask & IN_DONT_FOLLOW))
630 		flags |= LOOKUP_FOLLOW;
631 	if (mask & IN_ONLYDIR)
632 		flags |= LOOKUP_DIRECTORY;
633 
634 	ret = inotify_find_inode(pathname, &path, flags);
635 	if (ret)
636 		goto fput_and_out;
637 
638 	/* inode held in place by reference to path; group by fget on fd */
639 	inode = path.dentry->d_inode;
640 	group = filp->private_data;
641 
642 	/* create/update an inode mark */
643 	ret = inotify_update_watch(group, inode, mask);
644 	if (unlikely(ret))
645 		goto path_put_and_out;
646 
647 path_put_and_out:
648 	path_put(&path);
649 fput_and_out:
650 	fput_light(filp, fput_needed);
651 	return ret;
652 }
653 
654 SYSCALL_DEFINE2(inotify_rm_watch, int, fd, __s32, wd)
655 {
656 	struct fsnotify_group *group;
657 	struct fsnotify_mark_entry *entry;
658 	struct file *filp;
659 	int ret = 0, fput_needed;
660 
661 	filp = fget_light(fd, &fput_needed);
662 	if (unlikely(!filp))
663 		return -EBADF;
664 
665 	/* verify that this is indeed an inotify instance */
666 	if (unlikely(filp->f_op != &inotify_fops)) {
667 		ret = -EINVAL;
668 		goto out;
669 	}
670 
671 	group = filp->private_data;
672 
673 	spin_lock(&group->inotify_data.idr_lock);
674 	entry = idr_find(&group->inotify_data.idr, wd);
675 	if (unlikely(!entry)) {
676 		spin_unlock(&group->inotify_data.idr_lock);
677 		ret = -EINVAL;
678 		goto out;
679 	}
680 	fsnotify_get_mark(entry);
681 	spin_unlock(&group->inotify_data.idr_lock);
682 
683 	fsnotify_destroy_mark_by_entry(entry);
684 	fsnotify_put_mark(entry);
685 
686 out:
687 	fput_light(filp, fput_needed);
688 	return ret;
689 }
690 
691 static int
692 inotify_get_sb(struct file_system_type *fs_type, int flags,
693 	       const char *dev_name, void *data, struct vfsmount *mnt)
694 {
695 	return get_sb_pseudo(fs_type, "inotify", NULL,
696 			INOTIFYFS_SUPER_MAGIC, mnt);
697 }
698 
699 static struct file_system_type inotify_fs_type = {
700     .name	= "inotifyfs",
701     .get_sb	= inotify_get_sb,
702     .kill_sb	= kill_anon_super,
703 };
704 
705 /*
706  * inotify_user_setup - Our initialization function.  Note that we cannnot return
707  * error because we have compiled-in VFS hooks.  So an (unlikely) failure here
708  * must result in panic().
709  */
710 static int __init inotify_user_setup(void)
711 {
712 	int ret;
713 
714 	ret = register_filesystem(&inotify_fs_type);
715 	if (unlikely(ret))
716 		panic("inotify: register_filesystem returned %d!\n", ret);
717 
718 	inotify_mnt = kern_mount(&inotify_fs_type);
719 	if (IS_ERR(inotify_mnt))
720 		panic("inotify: kern_mount ret %ld!\n", PTR_ERR(inotify_mnt));
721 
722 	inotify_inode_mark_cachep = KMEM_CACHE(inotify_inode_mark_entry, SLAB_PANIC);
723 	event_priv_cachep = KMEM_CACHE(inotify_event_private_data, SLAB_PANIC);
724 	inotify_ignored_event = fsnotify_create_event(NULL, FS_IN_IGNORED, NULL, FSNOTIFY_EVENT_NONE, NULL, 0);
725 	if (!inotify_ignored_event)
726 		panic("unable to allocate the inotify ignored event\n");
727 
728 	inotify_max_queued_events = 16384;
729 	inotify_max_user_instances = 128;
730 	inotify_max_user_watches = 8192;
731 
732 	return 0;
733 }
734 module_init(inotify_user_setup);
735