xref: /linux/fs/notify/fanotify/fanotify_user.c (revision 95e9fd10f06cb5642028b6b851e32b8c8afb4571)
1 #include <linux/fanotify.h>
2 #include <linux/fcntl.h>
3 #include <linux/file.h>
4 #include <linux/fs.h>
5 #include <linux/anon_inodes.h>
6 #include <linux/fsnotify_backend.h>
7 #include <linux/init.h>
8 #include <linux/mount.h>
9 #include <linux/namei.h>
10 #include <linux/poll.h>
11 #include <linux/security.h>
12 #include <linux/syscalls.h>
13 #include <linux/slab.h>
14 #include <linux/types.h>
15 #include <linux/uaccess.h>
16 
17 #include <asm/ioctls.h>
18 
19 #include "../../mount.h"
20 
21 #define FANOTIFY_DEFAULT_MAX_EVENTS	16384
22 #define FANOTIFY_DEFAULT_MAX_MARKS	8192
23 #define FANOTIFY_DEFAULT_MAX_LISTENERS	128
24 
25 extern const struct fsnotify_ops fanotify_fsnotify_ops;
26 
27 static struct kmem_cache *fanotify_mark_cache __read_mostly;
28 static struct kmem_cache *fanotify_response_event_cache __read_mostly;
29 
30 struct fanotify_response_event {
31 	struct list_head list;
32 	__s32 fd;
33 	struct fsnotify_event *event;
34 };
35 
36 /*
37  * Get an fsnotify notification event if one exists and is small
38  * enough to fit in "count". Return an error pointer if the count
39  * is not large enough.
40  *
41  * Called with the group->notification_mutex held.
42  */
43 static struct fsnotify_event *get_one_event(struct fsnotify_group *group,
44 					    size_t count)
45 {
46 	BUG_ON(!mutex_is_locked(&group->notification_mutex));
47 
48 	pr_debug("%s: group=%p count=%zd\n", __func__, group, count);
49 
50 	if (fsnotify_notify_queue_is_empty(group))
51 		return NULL;
52 
53 	if (FAN_EVENT_METADATA_LEN > count)
54 		return ERR_PTR(-EINVAL);
55 
56 	/* held the notification_mutex the whole time, so this is the
57 	 * same event we peeked above */
58 	return fsnotify_remove_notify_event(group);
59 }
60 
61 static int create_fd(struct fsnotify_group *group, struct fsnotify_event *event)
62 {
63 	int client_fd;
64 	struct file *new_file;
65 
66 	pr_debug("%s: group=%p event=%p\n", __func__, group, event);
67 
68 	client_fd = get_unused_fd();
69 	if (client_fd < 0)
70 		return client_fd;
71 
72 	if (event->data_type != FSNOTIFY_EVENT_PATH) {
73 		WARN_ON(1);
74 		put_unused_fd(client_fd);
75 		return -EINVAL;
76 	}
77 
78 	/*
79 	 * we need a new file handle for the userspace program so it can read even if it was
80 	 * originally opened O_WRONLY.
81 	 */
82 	/* it's possible this event was an overflow event.  in that case dentry and mnt
83 	 * are NULL;  That's fine, just don't call dentry open */
84 	if (event->path.dentry && event->path.mnt)
85 		new_file = dentry_open(&event->path,
86 				       group->fanotify_data.f_flags | FMODE_NONOTIFY,
87 				       current_cred());
88 	else
89 		new_file = ERR_PTR(-EOVERFLOW);
90 	if (IS_ERR(new_file)) {
91 		/*
92 		 * we still send an event even if we can't open the file.  this
93 		 * can happen when say tasks are gone and we try to open their
94 		 * /proc files or we try to open a WRONLY file like in sysfs
95 		 * we just send the errno to userspace since there isn't much
96 		 * else we can do.
97 		 */
98 		put_unused_fd(client_fd);
99 		client_fd = PTR_ERR(new_file);
100 	} else {
101 		fd_install(client_fd, new_file);
102 	}
103 
104 	return client_fd;
105 }
106 
107 static int fill_event_metadata(struct fsnotify_group *group,
108 				   struct fanotify_event_metadata *metadata,
109 				   struct fsnotify_event *event)
110 {
111 	int ret = 0;
112 
113 	pr_debug("%s: group=%p metadata=%p event=%p\n", __func__,
114 		 group, metadata, event);
115 
116 	metadata->event_len = FAN_EVENT_METADATA_LEN;
117 	metadata->metadata_len = FAN_EVENT_METADATA_LEN;
118 	metadata->vers = FANOTIFY_METADATA_VERSION;
119 	metadata->mask = event->mask & FAN_ALL_OUTGOING_EVENTS;
120 	metadata->pid = pid_vnr(event->tgid);
121 	if (unlikely(event->mask & FAN_Q_OVERFLOW))
122 		metadata->fd = FAN_NOFD;
123 	else {
124 		metadata->fd = create_fd(group, event);
125 		if (metadata->fd < 0)
126 			ret = metadata->fd;
127 	}
128 
129 	return ret;
130 }
131 
132 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
133 static struct fanotify_response_event *dequeue_re(struct fsnotify_group *group,
134 						  __s32 fd)
135 {
136 	struct fanotify_response_event *re, *return_re = NULL;
137 
138 	mutex_lock(&group->fanotify_data.access_mutex);
139 	list_for_each_entry(re, &group->fanotify_data.access_list, list) {
140 		if (re->fd != fd)
141 			continue;
142 
143 		list_del_init(&re->list);
144 		return_re = re;
145 		break;
146 	}
147 	mutex_unlock(&group->fanotify_data.access_mutex);
148 
149 	pr_debug("%s: found return_re=%p\n", __func__, return_re);
150 
151 	return return_re;
152 }
153 
154 static int process_access_response(struct fsnotify_group *group,
155 				   struct fanotify_response *response_struct)
156 {
157 	struct fanotify_response_event *re;
158 	__s32 fd = response_struct->fd;
159 	__u32 response = response_struct->response;
160 
161 	pr_debug("%s: group=%p fd=%d response=%d\n", __func__, group,
162 		 fd, response);
163 	/*
164 	 * make sure the response is valid, if invalid we do nothing and either
165 	 * userspace can send a valid response or we will clean it up after the
166 	 * timeout
167 	 */
168 	switch (response) {
169 	case FAN_ALLOW:
170 	case FAN_DENY:
171 		break;
172 	default:
173 		return -EINVAL;
174 	}
175 
176 	if (fd < 0)
177 		return -EINVAL;
178 
179 	re = dequeue_re(group, fd);
180 	if (!re)
181 		return -ENOENT;
182 
183 	re->event->response = response;
184 
185 	wake_up(&group->fanotify_data.access_waitq);
186 
187 	kmem_cache_free(fanotify_response_event_cache, re);
188 
189 	return 0;
190 }
191 
192 static int prepare_for_access_response(struct fsnotify_group *group,
193 				       struct fsnotify_event *event,
194 				       __s32 fd)
195 {
196 	struct fanotify_response_event *re;
197 
198 	if (!(event->mask & FAN_ALL_PERM_EVENTS))
199 		return 0;
200 
201 	re = kmem_cache_alloc(fanotify_response_event_cache, GFP_KERNEL);
202 	if (!re)
203 		return -ENOMEM;
204 
205 	re->event = event;
206 	re->fd = fd;
207 
208 	mutex_lock(&group->fanotify_data.access_mutex);
209 
210 	if (atomic_read(&group->fanotify_data.bypass_perm)) {
211 		mutex_unlock(&group->fanotify_data.access_mutex);
212 		kmem_cache_free(fanotify_response_event_cache, re);
213 		event->response = FAN_ALLOW;
214 		return 0;
215 	}
216 
217 	list_add_tail(&re->list, &group->fanotify_data.access_list);
218 	mutex_unlock(&group->fanotify_data.access_mutex);
219 
220 	return 0;
221 }
222 
223 static void remove_access_response(struct fsnotify_group *group,
224 				   struct fsnotify_event *event,
225 				   __s32 fd)
226 {
227 	struct fanotify_response_event *re;
228 
229 	if (!(event->mask & FAN_ALL_PERM_EVENTS))
230 		return;
231 
232 	re = dequeue_re(group, fd);
233 	if (!re)
234 		return;
235 
236 	BUG_ON(re->event != event);
237 
238 	kmem_cache_free(fanotify_response_event_cache, re);
239 
240 	return;
241 }
242 #else
243 static int prepare_for_access_response(struct fsnotify_group *group,
244 				       struct fsnotify_event *event,
245 				       __s32 fd)
246 {
247 	return 0;
248 }
249 
250 static void remove_access_response(struct fsnotify_group *group,
251 				   struct fsnotify_event *event,
252 				   __s32 fd)
253 {
254 	return;
255 }
256 #endif
257 
258 static ssize_t copy_event_to_user(struct fsnotify_group *group,
259 				  struct fsnotify_event *event,
260 				  char __user *buf)
261 {
262 	struct fanotify_event_metadata fanotify_event_metadata;
263 	int fd, ret;
264 
265 	pr_debug("%s: group=%p event=%p\n", __func__, group, event);
266 
267 	ret = fill_event_metadata(group, &fanotify_event_metadata, event);
268 	if (ret < 0)
269 		goto out;
270 
271 	fd = fanotify_event_metadata.fd;
272 	ret = prepare_for_access_response(group, event, fd);
273 	if (ret)
274 		goto out_close_fd;
275 
276 	ret = -EFAULT;
277 	if (copy_to_user(buf, &fanotify_event_metadata,
278 			 fanotify_event_metadata.event_len))
279 		goto out_kill_access_response;
280 
281 	return fanotify_event_metadata.event_len;
282 
283 out_kill_access_response:
284 	remove_access_response(group, event, fd);
285 out_close_fd:
286 	if (fd != FAN_NOFD)
287 		sys_close(fd);
288 out:
289 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
290 	if (event->mask & FAN_ALL_PERM_EVENTS) {
291 		event->response = FAN_DENY;
292 		wake_up(&group->fanotify_data.access_waitq);
293 	}
294 #endif
295 	return ret;
296 }
297 
298 /* intofiy userspace file descriptor functions */
299 static unsigned int fanotify_poll(struct file *file, poll_table *wait)
300 {
301 	struct fsnotify_group *group = file->private_data;
302 	int ret = 0;
303 
304 	poll_wait(file, &group->notification_waitq, wait);
305 	mutex_lock(&group->notification_mutex);
306 	if (!fsnotify_notify_queue_is_empty(group))
307 		ret = POLLIN | POLLRDNORM;
308 	mutex_unlock(&group->notification_mutex);
309 
310 	return ret;
311 }
312 
313 static ssize_t fanotify_read(struct file *file, char __user *buf,
314 			     size_t count, loff_t *pos)
315 {
316 	struct fsnotify_group *group;
317 	struct fsnotify_event *kevent;
318 	char __user *start;
319 	int ret;
320 	DEFINE_WAIT(wait);
321 
322 	start = buf;
323 	group = file->private_data;
324 
325 	pr_debug("%s: group=%p\n", __func__, group);
326 
327 	while (1) {
328 		prepare_to_wait(&group->notification_waitq, &wait, TASK_INTERRUPTIBLE);
329 
330 		mutex_lock(&group->notification_mutex);
331 		kevent = get_one_event(group, count);
332 		mutex_unlock(&group->notification_mutex);
333 
334 		if (kevent) {
335 			ret = PTR_ERR(kevent);
336 			if (IS_ERR(kevent))
337 				break;
338 			ret = copy_event_to_user(group, kevent, buf);
339 			fsnotify_put_event(kevent);
340 			if (ret < 0)
341 				break;
342 			buf += ret;
343 			count -= ret;
344 			continue;
345 		}
346 
347 		ret = -EAGAIN;
348 		if (file->f_flags & O_NONBLOCK)
349 			break;
350 		ret = -ERESTARTSYS;
351 		if (signal_pending(current))
352 			break;
353 
354 		if (start != buf)
355 			break;
356 
357 		schedule();
358 	}
359 
360 	finish_wait(&group->notification_waitq, &wait);
361 	if (start != buf && ret != -EFAULT)
362 		ret = buf - start;
363 	return ret;
364 }
365 
366 static ssize_t fanotify_write(struct file *file, const char __user *buf, size_t count, loff_t *pos)
367 {
368 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
369 	struct fanotify_response response = { .fd = -1, .response = -1 };
370 	struct fsnotify_group *group;
371 	int ret;
372 
373 	group = file->private_data;
374 
375 	if (count > sizeof(response))
376 		count = sizeof(response);
377 
378 	pr_debug("%s: group=%p count=%zu\n", __func__, group, count);
379 
380 	if (copy_from_user(&response, buf, count))
381 		return -EFAULT;
382 
383 	ret = process_access_response(group, &response);
384 	if (ret < 0)
385 		count = ret;
386 
387 	return count;
388 #else
389 	return -EINVAL;
390 #endif
391 }
392 
393 static int fanotify_release(struct inode *ignored, struct file *file)
394 {
395 	struct fsnotify_group *group = file->private_data;
396 
397 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
398 	struct fanotify_response_event *re, *lre;
399 
400 	mutex_lock(&group->fanotify_data.access_mutex);
401 
402 	atomic_inc(&group->fanotify_data.bypass_perm);
403 
404 	list_for_each_entry_safe(re, lre, &group->fanotify_data.access_list, list) {
405 		pr_debug("%s: found group=%p re=%p event=%p\n", __func__, group,
406 			 re, re->event);
407 
408 		list_del_init(&re->list);
409 		re->event->response = FAN_ALLOW;
410 
411 		kmem_cache_free(fanotify_response_event_cache, re);
412 	}
413 	mutex_unlock(&group->fanotify_data.access_mutex);
414 
415 	wake_up(&group->fanotify_data.access_waitq);
416 #endif
417 	/* matches the fanotify_init->fsnotify_alloc_group */
418 	fsnotify_put_group(group);
419 
420 	return 0;
421 }
422 
423 static long fanotify_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
424 {
425 	struct fsnotify_group *group;
426 	struct fsnotify_event_holder *holder;
427 	void __user *p;
428 	int ret = -ENOTTY;
429 	size_t send_len = 0;
430 
431 	group = file->private_data;
432 
433 	p = (void __user *) arg;
434 
435 	switch (cmd) {
436 	case FIONREAD:
437 		mutex_lock(&group->notification_mutex);
438 		list_for_each_entry(holder, &group->notification_list, event_list)
439 			send_len += FAN_EVENT_METADATA_LEN;
440 		mutex_unlock(&group->notification_mutex);
441 		ret = put_user(send_len, (int __user *) p);
442 		break;
443 	}
444 
445 	return ret;
446 }
447 
448 static const struct file_operations fanotify_fops = {
449 	.poll		= fanotify_poll,
450 	.read		= fanotify_read,
451 	.write		= fanotify_write,
452 	.fasync		= NULL,
453 	.release	= fanotify_release,
454 	.unlocked_ioctl	= fanotify_ioctl,
455 	.compat_ioctl	= fanotify_ioctl,
456 	.llseek		= noop_llseek,
457 };
458 
459 static void fanotify_free_mark(struct fsnotify_mark *fsn_mark)
460 {
461 	kmem_cache_free(fanotify_mark_cache, fsn_mark);
462 }
463 
464 static int fanotify_find_path(int dfd, const char __user *filename,
465 			      struct path *path, unsigned int flags)
466 {
467 	int ret;
468 
469 	pr_debug("%s: dfd=%d filename=%p flags=%x\n", __func__,
470 		 dfd, filename, flags);
471 
472 	if (filename == NULL) {
473 		struct file *file;
474 		int fput_needed;
475 
476 		ret = -EBADF;
477 		file = fget_light(dfd, &fput_needed);
478 		if (!file)
479 			goto out;
480 
481 		ret = -ENOTDIR;
482 		if ((flags & FAN_MARK_ONLYDIR) &&
483 		    !(S_ISDIR(file->f_path.dentry->d_inode->i_mode))) {
484 			fput_light(file, fput_needed);
485 			goto out;
486 		}
487 
488 		*path = file->f_path;
489 		path_get(path);
490 		fput_light(file, fput_needed);
491 	} else {
492 		unsigned int lookup_flags = 0;
493 
494 		if (!(flags & FAN_MARK_DONT_FOLLOW))
495 			lookup_flags |= LOOKUP_FOLLOW;
496 		if (flags & FAN_MARK_ONLYDIR)
497 			lookup_flags |= LOOKUP_DIRECTORY;
498 
499 		ret = user_path_at(dfd, filename, lookup_flags, path);
500 		if (ret)
501 			goto out;
502 	}
503 
504 	/* you can only watch an inode if you have read permissions on it */
505 	ret = inode_permission(path->dentry->d_inode, MAY_READ);
506 	if (ret)
507 		path_put(path);
508 out:
509 	return ret;
510 }
511 
512 static __u32 fanotify_mark_remove_from_mask(struct fsnotify_mark *fsn_mark,
513 					    __u32 mask,
514 					    unsigned int flags)
515 {
516 	__u32 oldmask;
517 
518 	spin_lock(&fsn_mark->lock);
519 	if (!(flags & FAN_MARK_IGNORED_MASK)) {
520 		oldmask = fsn_mark->mask;
521 		fsnotify_set_mark_mask_locked(fsn_mark, (oldmask & ~mask));
522 	} else {
523 		oldmask = fsn_mark->ignored_mask;
524 		fsnotify_set_mark_ignored_mask_locked(fsn_mark, (oldmask & ~mask));
525 	}
526 	spin_unlock(&fsn_mark->lock);
527 
528 	if (!(oldmask & ~mask))
529 		fsnotify_destroy_mark(fsn_mark);
530 
531 	return mask & oldmask;
532 }
533 
534 static int fanotify_remove_vfsmount_mark(struct fsnotify_group *group,
535 					 struct vfsmount *mnt, __u32 mask,
536 					 unsigned int flags)
537 {
538 	struct fsnotify_mark *fsn_mark = NULL;
539 	__u32 removed;
540 
541 	fsn_mark = fsnotify_find_vfsmount_mark(group, mnt);
542 	if (!fsn_mark)
543 		return -ENOENT;
544 
545 	removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags);
546 	fsnotify_put_mark(fsn_mark);
547 	if (removed & real_mount(mnt)->mnt_fsnotify_mask)
548 		fsnotify_recalc_vfsmount_mask(mnt);
549 
550 	return 0;
551 }
552 
553 static int fanotify_remove_inode_mark(struct fsnotify_group *group,
554 				      struct inode *inode, __u32 mask,
555 				      unsigned int flags)
556 {
557 	struct fsnotify_mark *fsn_mark = NULL;
558 	__u32 removed;
559 
560 	fsn_mark = fsnotify_find_inode_mark(group, inode);
561 	if (!fsn_mark)
562 		return -ENOENT;
563 
564 	removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags);
565 	/* matches the fsnotify_find_inode_mark() */
566 	fsnotify_put_mark(fsn_mark);
567 	if (removed & inode->i_fsnotify_mask)
568 		fsnotify_recalc_inode_mask(inode);
569 
570 	return 0;
571 }
572 
573 static __u32 fanotify_mark_add_to_mask(struct fsnotify_mark *fsn_mark,
574 				       __u32 mask,
575 				       unsigned int flags)
576 {
577 	__u32 oldmask = -1;
578 
579 	spin_lock(&fsn_mark->lock);
580 	if (!(flags & FAN_MARK_IGNORED_MASK)) {
581 		oldmask = fsn_mark->mask;
582 		fsnotify_set_mark_mask_locked(fsn_mark, (oldmask | mask));
583 	} else {
584 		__u32 tmask = fsn_mark->ignored_mask | mask;
585 		fsnotify_set_mark_ignored_mask_locked(fsn_mark, tmask);
586 		if (flags & FAN_MARK_IGNORED_SURV_MODIFY)
587 			fsn_mark->flags |= FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY;
588 	}
589 
590 	if (!(flags & FAN_MARK_ONDIR)) {
591 		__u32 tmask = fsn_mark->ignored_mask | FAN_ONDIR;
592 		fsnotify_set_mark_ignored_mask_locked(fsn_mark, tmask);
593 	}
594 
595 	spin_unlock(&fsn_mark->lock);
596 
597 	return mask & ~oldmask;
598 }
599 
600 static int fanotify_add_vfsmount_mark(struct fsnotify_group *group,
601 				      struct vfsmount *mnt, __u32 mask,
602 				      unsigned int flags)
603 {
604 	struct fsnotify_mark *fsn_mark;
605 	__u32 added;
606 	int ret = 0;
607 
608 	fsn_mark = fsnotify_find_vfsmount_mark(group, mnt);
609 	if (!fsn_mark) {
610 		if (atomic_read(&group->num_marks) > group->fanotify_data.max_marks)
611 			return -ENOSPC;
612 
613 		fsn_mark = kmem_cache_alloc(fanotify_mark_cache, GFP_KERNEL);
614 		if (!fsn_mark)
615 			return -ENOMEM;
616 
617 		fsnotify_init_mark(fsn_mark, fanotify_free_mark);
618 		ret = fsnotify_add_mark(fsn_mark, group, NULL, mnt, 0);
619 		if (ret)
620 			goto err;
621 	}
622 	added = fanotify_mark_add_to_mask(fsn_mark, mask, flags);
623 
624 	if (added & ~real_mount(mnt)->mnt_fsnotify_mask)
625 		fsnotify_recalc_vfsmount_mask(mnt);
626 err:
627 	fsnotify_put_mark(fsn_mark);
628 	return ret;
629 }
630 
631 static int fanotify_add_inode_mark(struct fsnotify_group *group,
632 				   struct inode *inode, __u32 mask,
633 				   unsigned int flags)
634 {
635 	struct fsnotify_mark *fsn_mark;
636 	__u32 added;
637 	int ret = 0;
638 
639 	pr_debug("%s: group=%p inode=%p\n", __func__, group, inode);
640 
641 	/*
642 	 * If some other task has this inode open for write we should not add
643 	 * an ignored mark, unless that ignored mark is supposed to survive
644 	 * modification changes anyway.
645 	 */
646 	if ((flags & FAN_MARK_IGNORED_MASK) &&
647 	    !(flags & FAN_MARK_IGNORED_SURV_MODIFY) &&
648 	    (atomic_read(&inode->i_writecount) > 0))
649 		return 0;
650 
651 	fsn_mark = fsnotify_find_inode_mark(group, inode);
652 	if (!fsn_mark) {
653 		if (atomic_read(&group->num_marks) > group->fanotify_data.max_marks)
654 			return -ENOSPC;
655 
656 		fsn_mark = kmem_cache_alloc(fanotify_mark_cache, GFP_KERNEL);
657 		if (!fsn_mark)
658 			return -ENOMEM;
659 
660 		fsnotify_init_mark(fsn_mark, fanotify_free_mark);
661 		ret = fsnotify_add_mark(fsn_mark, group, inode, NULL, 0);
662 		if (ret)
663 			goto err;
664 	}
665 	added = fanotify_mark_add_to_mask(fsn_mark, mask, flags);
666 
667 	if (added & ~inode->i_fsnotify_mask)
668 		fsnotify_recalc_inode_mask(inode);
669 err:
670 	fsnotify_put_mark(fsn_mark);
671 	return ret;
672 }
673 
674 /* fanotify syscalls */
675 SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
676 {
677 	struct fsnotify_group *group;
678 	int f_flags, fd;
679 	struct user_struct *user;
680 
681 	pr_debug("%s: flags=%d event_f_flags=%d\n",
682 		__func__, flags, event_f_flags);
683 
684 	if (!capable(CAP_SYS_ADMIN))
685 		return -EPERM;
686 
687 	if (flags & ~FAN_ALL_INIT_FLAGS)
688 		return -EINVAL;
689 
690 	user = get_current_user();
691 	if (atomic_read(&user->fanotify_listeners) > FANOTIFY_DEFAULT_MAX_LISTENERS) {
692 		free_uid(user);
693 		return -EMFILE;
694 	}
695 
696 	f_flags = O_RDWR | FMODE_NONOTIFY;
697 	if (flags & FAN_CLOEXEC)
698 		f_flags |= O_CLOEXEC;
699 	if (flags & FAN_NONBLOCK)
700 		f_flags |= O_NONBLOCK;
701 
702 	/* fsnotify_alloc_group takes a ref.  Dropped in fanotify_release */
703 	group = fsnotify_alloc_group(&fanotify_fsnotify_ops);
704 	if (IS_ERR(group)) {
705 		free_uid(user);
706 		return PTR_ERR(group);
707 	}
708 
709 	group->fanotify_data.user = user;
710 	atomic_inc(&user->fanotify_listeners);
711 
712 	group->fanotify_data.f_flags = event_f_flags;
713 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
714 	mutex_init(&group->fanotify_data.access_mutex);
715 	init_waitqueue_head(&group->fanotify_data.access_waitq);
716 	INIT_LIST_HEAD(&group->fanotify_data.access_list);
717 	atomic_set(&group->fanotify_data.bypass_perm, 0);
718 #endif
719 	switch (flags & FAN_ALL_CLASS_BITS) {
720 	case FAN_CLASS_NOTIF:
721 		group->priority = FS_PRIO_0;
722 		break;
723 	case FAN_CLASS_CONTENT:
724 		group->priority = FS_PRIO_1;
725 		break;
726 	case FAN_CLASS_PRE_CONTENT:
727 		group->priority = FS_PRIO_2;
728 		break;
729 	default:
730 		fd = -EINVAL;
731 		goto out_put_group;
732 	}
733 
734 	if (flags & FAN_UNLIMITED_QUEUE) {
735 		fd = -EPERM;
736 		if (!capable(CAP_SYS_ADMIN))
737 			goto out_put_group;
738 		group->max_events = UINT_MAX;
739 	} else {
740 		group->max_events = FANOTIFY_DEFAULT_MAX_EVENTS;
741 	}
742 
743 	if (flags & FAN_UNLIMITED_MARKS) {
744 		fd = -EPERM;
745 		if (!capable(CAP_SYS_ADMIN))
746 			goto out_put_group;
747 		group->fanotify_data.max_marks = UINT_MAX;
748 	} else {
749 		group->fanotify_data.max_marks = FANOTIFY_DEFAULT_MAX_MARKS;
750 	}
751 
752 	fd = anon_inode_getfd("[fanotify]", &fanotify_fops, group, f_flags);
753 	if (fd < 0)
754 		goto out_put_group;
755 
756 	return fd;
757 
758 out_put_group:
759 	fsnotify_put_group(group);
760 	return fd;
761 }
762 
763 SYSCALL_DEFINE(fanotify_mark)(int fanotify_fd, unsigned int flags,
764 			      __u64 mask, int dfd,
765 			      const char  __user * pathname)
766 {
767 	struct inode *inode = NULL;
768 	struct vfsmount *mnt = NULL;
769 	struct fsnotify_group *group;
770 	struct file *filp;
771 	struct path path;
772 	int ret, fput_needed;
773 
774 	pr_debug("%s: fanotify_fd=%d flags=%x dfd=%d pathname=%p mask=%llx\n",
775 		 __func__, fanotify_fd, flags, dfd, pathname, mask);
776 
777 	/* we only use the lower 32 bits as of right now. */
778 	if (mask & ((__u64)0xffffffff << 32))
779 		return -EINVAL;
780 
781 	if (flags & ~FAN_ALL_MARK_FLAGS)
782 		return -EINVAL;
783 	switch (flags & (FAN_MARK_ADD | FAN_MARK_REMOVE | FAN_MARK_FLUSH)) {
784 	case FAN_MARK_ADD:		/* fallthrough */
785 	case FAN_MARK_REMOVE:
786 		if (!mask)
787 			return -EINVAL;
788 	case FAN_MARK_FLUSH:
789 		break;
790 	default:
791 		return -EINVAL;
792 	}
793 
794 	if (mask & FAN_ONDIR) {
795 		flags |= FAN_MARK_ONDIR;
796 		mask &= ~FAN_ONDIR;
797 	}
798 
799 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
800 	if (mask & ~(FAN_ALL_EVENTS | FAN_ALL_PERM_EVENTS | FAN_EVENT_ON_CHILD))
801 #else
802 	if (mask & ~(FAN_ALL_EVENTS | FAN_EVENT_ON_CHILD))
803 #endif
804 		return -EINVAL;
805 
806 	filp = fget_light(fanotify_fd, &fput_needed);
807 	if (unlikely(!filp))
808 		return -EBADF;
809 
810 	/* verify that this is indeed an fanotify instance */
811 	ret = -EINVAL;
812 	if (unlikely(filp->f_op != &fanotify_fops))
813 		goto fput_and_out;
814 	group = filp->private_data;
815 
816 	/*
817 	 * group->priority == FS_PRIO_0 == FAN_CLASS_NOTIF.  These are not
818 	 * allowed to set permissions events.
819 	 */
820 	ret = -EINVAL;
821 	if (mask & FAN_ALL_PERM_EVENTS &&
822 	    group->priority == FS_PRIO_0)
823 		goto fput_and_out;
824 
825 	ret = fanotify_find_path(dfd, pathname, &path, flags);
826 	if (ret)
827 		goto fput_and_out;
828 
829 	/* inode held in place by reference to path; group by fget on fd */
830 	if (!(flags & FAN_MARK_MOUNT))
831 		inode = path.dentry->d_inode;
832 	else
833 		mnt = path.mnt;
834 
835 	/* create/update an inode mark */
836 	switch (flags & (FAN_MARK_ADD | FAN_MARK_REMOVE | FAN_MARK_FLUSH)) {
837 	case FAN_MARK_ADD:
838 		if (flags & FAN_MARK_MOUNT)
839 			ret = fanotify_add_vfsmount_mark(group, mnt, mask, flags);
840 		else
841 			ret = fanotify_add_inode_mark(group, inode, mask, flags);
842 		break;
843 	case FAN_MARK_REMOVE:
844 		if (flags & FAN_MARK_MOUNT)
845 			ret = fanotify_remove_vfsmount_mark(group, mnt, mask, flags);
846 		else
847 			ret = fanotify_remove_inode_mark(group, inode, mask, flags);
848 		break;
849 	case FAN_MARK_FLUSH:
850 		if (flags & FAN_MARK_MOUNT)
851 			fsnotify_clear_vfsmount_marks_by_group(group);
852 		else
853 			fsnotify_clear_inode_marks_by_group(group);
854 		break;
855 	default:
856 		ret = -EINVAL;
857 	}
858 
859 	path_put(&path);
860 fput_and_out:
861 	fput_light(filp, fput_needed);
862 	return ret;
863 }
864 
865 #ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
866 asmlinkage long SyS_fanotify_mark(long fanotify_fd, long flags, __u64 mask,
867 				  long dfd, long pathname)
868 {
869 	return SYSC_fanotify_mark((int) fanotify_fd, (unsigned int) flags,
870 				  mask, (int) dfd,
871 				  (const char  __user *) pathname);
872 }
873 SYSCALL_ALIAS(sys_fanotify_mark, SyS_fanotify_mark);
874 #endif
875 
876 /*
877  * fanotify_user_setup - Our initialization function.  Note that we cannot return
878  * error because we have compiled-in VFS hooks.  So an (unlikely) failure here
879  * must result in panic().
880  */
881 static int __init fanotify_user_setup(void)
882 {
883 	fanotify_mark_cache = KMEM_CACHE(fsnotify_mark, SLAB_PANIC);
884 	fanotify_response_event_cache = KMEM_CACHE(fanotify_response_event,
885 						   SLAB_PANIC);
886 
887 	return 0;
888 }
889 device_initcall(fanotify_user_setup);
890