xref: /linux/fs/notify/fanotify/fanotify_user.c (revision f49f4ab95c301dbccad0efe85296d908b8ae7ad4)
1 #include <linux/fanotify.h>
2 #include <linux/fcntl.h>
3 #include <linux/file.h>
4 #include <linux/fs.h>
5 #include <linux/anon_inodes.h>
6 #include <linux/fsnotify_backend.h>
7 #include <linux/init.h>
8 #include <linux/mount.h>
9 #include <linux/namei.h>
10 #include <linux/poll.h>
11 #include <linux/security.h>
12 #include <linux/syscalls.h>
13 #include <linux/slab.h>
14 #include <linux/types.h>
15 #include <linux/uaccess.h>
16 
17 #include <asm/ioctls.h>
18 
19 #include "../../mount.h"
20 
21 #define FANOTIFY_DEFAULT_MAX_EVENTS	16384
22 #define FANOTIFY_DEFAULT_MAX_MARKS	8192
23 #define FANOTIFY_DEFAULT_MAX_LISTENERS	128
24 
25 extern const struct fsnotify_ops fanotify_fsnotify_ops;
26 
27 static struct kmem_cache *fanotify_mark_cache __read_mostly;
28 static struct kmem_cache *fanotify_response_event_cache __read_mostly;
29 
30 struct fanotify_response_event {
31 	struct list_head list;
32 	__s32 fd;
33 	struct fsnotify_event *event;
34 };
35 
36 /*
37  * Get an fsnotify notification event if one exists and is small
38  * enough to fit in "count". Return an error pointer if the count
39  * is not large enough.
40  *
41  * Called with the group->notification_mutex held.
42  */
43 static struct fsnotify_event *get_one_event(struct fsnotify_group *group,
44 					    size_t count)
45 {
46 	BUG_ON(!mutex_is_locked(&group->notification_mutex));
47 
48 	pr_debug("%s: group=%p count=%zd\n", __func__, group, count);
49 
50 	if (fsnotify_notify_queue_is_empty(group))
51 		return NULL;
52 
53 	if (FAN_EVENT_METADATA_LEN > count)
54 		return ERR_PTR(-EINVAL);
55 
56 	/* held the notification_mutex the whole time, so this is the
57 	 * same event we peeked above */
58 	return fsnotify_remove_notify_event(group);
59 }
60 
61 static int create_fd(struct fsnotify_group *group,
62 			struct fsnotify_event *event,
63 			struct file **file)
64 {
65 	int client_fd;
66 	struct file *new_file;
67 
68 	pr_debug("%s: group=%p event=%p\n", __func__, group, event);
69 
70 	client_fd = get_unused_fd();
71 	if (client_fd < 0)
72 		return client_fd;
73 
74 	if (event->data_type != FSNOTIFY_EVENT_PATH) {
75 		WARN_ON(1);
76 		put_unused_fd(client_fd);
77 		return -EINVAL;
78 	}
79 
80 	/*
81 	 * we need a new file handle for the userspace program so it can read even if it was
82 	 * originally opened O_WRONLY.
83 	 */
84 	/* it's possible this event was an overflow event.  in that case dentry and mnt
85 	 * are NULL;  That's fine, just don't call dentry open */
86 	if (event->path.dentry && event->path.mnt)
87 		new_file = dentry_open(&event->path,
88 				       group->fanotify_data.f_flags | FMODE_NONOTIFY,
89 				       current_cred());
90 	else
91 		new_file = ERR_PTR(-EOVERFLOW);
92 	if (IS_ERR(new_file)) {
93 		/*
94 		 * we still send an event even if we can't open the file.  this
95 		 * can happen when say tasks are gone and we try to open their
96 		 * /proc files or we try to open a WRONLY file like in sysfs
97 		 * we just send the errno to userspace since there isn't much
98 		 * else we can do.
99 		 */
100 		put_unused_fd(client_fd);
101 		client_fd = PTR_ERR(new_file);
102 	} else {
103 		*file = new_file;
104 	}
105 
106 	return client_fd;
107 }
108 
109 static int fill_event_metadata(struct fsnotify_group *group,
110 				   struct fanotify_event_metadata *metadata,
111 				   struct fsnotify_event *event,
112 				   struct file **file)
113 {
114 	int ret = 0;
115 
116 	pr_debug("%s: group=%p metadata=%p event=%p\n", __func__,
117 		 group, metadata, event);
118 
119 	*file = NULL;
120 	metadata->event_len = FAN_EVENT_METADATA_LEN;
121 	metadata->metadata_len = FAN_EVENT_METADATA_LEN;
122 	metadata->vers = FANOTIFY_METADATA_VERSION;
123 	metadata->mask = event->mask & FAN_ALL_OUTGOING_EVENTS;
124 	metadata->pid = pid_vnr(event->tgid);
125 	if (unlikely(event->mask & FAN_Q_OVERFLOW))
126 		metadata->fd = FAN_NOFD;
127 	else {
128 		metadata->fd = create_fd(group, event, file);
129 		if (metadata->fd < 0)
130 			ret = metadata->fd;
131 	}
132 
133 	return ret;
134 }
135 
136 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
137 static struct fanotify_response_event *dequeue_re(struct fsnotify_group *group,
138 						  __s32 fd)
139 {
140 	struct fanotify_response_event *re, *return_re = NULL;
141 
142 	mutex_lock(&group->fanotify_data.access_mutex);
143 	list_for_each_entry(re, &group->fanotify_data.access_list, list) {
144 		if (re->fd != fd)
145 			continue;
146 
147 		list_del_init(&re->list);
148 		return_re = re;
149 		break;
150 	}
151 	mutex_unlock(&group->fanotify_data.access_mutex);
152 
153 	pr_debug("%s: found return_re=%p\n", __func__, return_re);
154 
155 	return return_re;
156 }
157 
158 static int process_access_response(struct fsnotify_group *group,
159 				   struct fanotify_response *response_struct)
160 {
161 	struct fanotify_response_event *re;
162 	__s32 fd = response_struct->fd;
163 	__u32 response = response_struct->response;
164 
165 	pr_debug("%s: group=%p fd=%d response=%d\n", __func__, group,
166 		 fd, response);
167 	/*
168 	 * make sure the response is valid, if invalid we do nothing and either
169 	 * userspace can send a valid response or we will clean it up after the
170 	 * timeout
171 	 */
172 	switch (response) {
173 	case FAN_ALLOW:
174 	case FAN_DENY:
175 		break;
176 	default:
177 		return -EINVAL;
178 	}
179 
180 	if (fd < 0)
181 		return -EINVAL;
182 
183 	re = dequeue_re(group, fd);
184 	if (!re)
185 		return -ENOENT;
186 
187 	re->event->response = response;
188 
189 	wake_up(&group->fanotify_data.access_waitq);
190 
191 	kmem_cache_free(fanotify_response_event_cache, re);
192 
193 	return 0;
194 }
195 
196 static int prepare_for_access_response(struct fsnotify_group *group,
197 				       struct fsnotify_event *event,
198 				       __s32 fd)
199 {
200 	struct fanotify_response_event *re;
201 
202 	if (!(event->mask & FAN_ALL_PERM_EVENTS))
203 		return 0;
204 
205 	re = kmem_cache_alloc(fanotify_response_event_cache, GFP_KERNEL);
206 	if (!re)
207 		return -ENOMEM;
208 
209 	re->event = event;
210 	re->fd = fd;
211 
212 	mutex_lock(&group->fanotify_data.access_mutex);
213 
214 	if (atomic_read(&group->fanotify_data.bypass_perm)) {
215 		mutex_unlock(&group->fanotify_data.access_mutex);
216 		kmem_cache_free(fanotify_response_event_cache, re);
217 		event->response = FAN_ALLOW;
218 		return 0;
219 	}
220 
221 	list_add_tail(&re->list, &group->fanotify_data.access_list);
222 	mutex_unlock(&group->fanotify_data.access_mutex);
223 
224 	return 0;
225 }
226 
227 #else
228 static int prepare_for_access_response(struct fsnotify_group *group,
229 				       struct fsnotify_event *event,
230 				       __s32 fd)
231 {
232 	return 0;
233 }
234 
235 #endif
236 
237 static ssize_t copy_event_to_user(struct fsnotify_group *group,
238 				  struct fsnotify_event *event,
239 				  char __user *buf)
240 {
241 	struct fanotify_event_metadata fanotify_event_metadata;
242 	struct file *f;
243 	int fd, ret;
244 
245 	pr_debug("%s: group=%p event=%p\n", __func__, group, event);
246 
247 	ret = fill_event_metadata(group, &fanotify_event_metadata, event, &f);
248 	if (ret < 0)
249 		goto out;
250 
251 	fd = fanotify_event_metadata.fd;
252 	ret = -EFAULT;
253 	if (copy_to_user(buf, &fanotify_event_metadata,
254 			 fanotify_event_metadata.event_len))
255 		goto out_close_fd;
256 
257 	ret = prepare_for_access_response(group, event, fd);
258 	if (ret)
259 		goto out_close_fd;
260 
261 	fd_install(fd, f);
262 	return fanotify_event_metadata.event_len;
263 
264 out_close_fd:
265 	if (fd != FAN_NOFD) {
266 		put_unused_fd(fd);
267 		fput(f);
268 	}
269 out:
270 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
271 	if (event->mask & FAN_ALL_PERM_EVENTS) {
272 		event->response = FAN_DENY;
273 		wake_up(&group->fanotify_data.access_waitq);
274 	}
275 #endif
276 	return ret;
277 }
278 
279 /* intofiy userspace file descriptor functions */
280 static unsigned int fanotify_poll(struct file *file, poll_table *wait)
281 {
282 	struct fsnotify_group *group = file->private_data;
283 	int ret = 0;
284 
285 	poll_wait(file, &group->notification_waitq, wait);
286 	mutex_lock(&group->notification_mutex);
287 	if (!fsnotify_notify_queue_is_empty(group))
288 		ret = POLLIN | POLLRDNORM;
289 	mutex_unlock(&group->notification_mutex);
290 
291 	return ret;
292 }
293 
294 static ssize_t fanotify_read(struct file *file, char __user *buf,
295 			     size_t count, loff_t *pos)
296 {
297 	struct fsnotify_group *group;
298 	struct fsnotify_event *kevent;
299 	char __user *start;
300 	int ret;
301 	DEFINE_WAIT(wait);
302 
303 	start = buf;
304 	group = file->private_data;
305 
306 	pr_debug("%s: group=%p\n", __func__, group);
307 
308 	while (1) {
309 		prepare_to_wait(&group->notification_waitq, &wait, TASK_INTERRUPTIBLE);
310 
311 		mutex_lock(&group->notification_mutex);
312 		kevent = get_one_event(group, count);
313 		mutex_unlock(&group->notification_mutex);
314 
315 		if (kevent) {
316 			ret = PTR_ERR(kevent);
317 			if (IS_ERR(kevent))
318 				break;
319 			ret = copy_event_to_user(group, kevent, buf);
320 			fsnotify_put_event(kevent);
321 			if (ret < 0)
322 				break;
323 			buf += ret;
324 			count -= ret;
325 			continue;
326 		}
327 
328 		ret = -EAGAIN;
329 		if (file->f_flags & O_NONBLOCK)
330 			break;
331 		ret = -ERESTARTSYS;
332 		if (signal_pending(current))
333 			break;
334 
335 		if (start != buf)
336 			break;
337 
338 		schedule();
339 	}
340 
341 	finish_wait(&group->notification_waitq, &wait);
342 	if (start != buf && ret != -EFAULT)
343 		ret = buf - start;
344 	return ret;
345 }
346 
347 static ssize_t fanotify_write(struct file *file, const char __user *buf, size_t count, loff_t *pos)
348 {
349 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
350 	struct fanotify_response response = { .fd = -1, .response = -1 };
351 	struct fsnotify_group *group;
352 	int ret;
353 
354 	group = file->private_data;
355 
356 	if (count > sizeof(response))
357 		count = sizeof(response);
358 
359 	pr_debug("%s: group=%p count=%zu\n", __func__, group, count);
360 
361 	if (copy_from_user(&response, buf, count))
362 		return -EFAULT;
363 
364 	ret = process_access_response(group, &response);
365 	if (ret < 0)
366 		count = ret;
367 
368 	return count;
369 #else
370 	return -EINVAL;
371 #endif
372 }
373 
374 static int fanotify_release(struct inode *ignored, struct file *file)
375 {
376 	struct fsnotify_group *group = file->private_data;
377 
378 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
379 	struct fanotify_response_event *re, *lre;
380 
381 	mutex_lock(&group->fanotify_data.access_mutex);
382 
383 	atomic_inc(&group->fanotify_data.bypass_perm);
384 
385 	list_for_each_entry_safe(re, lre, &group->fanotify_data.access_list, list) {
386 		pr_debug("%s: found group=%p re=%p event=%p\n", __func__, group,
387 			 re, re->event);
388 
389 		list_del_init(&re->list);
390 		re->event->response = FAN_ALLOW;
391 
392 		kmem_cache_free(fanotify_response_event_cache, re);
393 	}
394 	mutex_unlock(&group->fanotify_data.access_mutex);
395 
396 	wake_up(&group->fanotify_data.access_waitq);
397 #endif
398 	/* matches the fanotify_init->fsnotify_alloc_group */
399 	fsnotify_put_group(group);
400 
401 	return 0;
402 }
403 
404 static long fanotify_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
405 {
406 	struct fsnotify_group *group;
407 	struct fsnotify_event_holder *holder;
408 	void __user *p;
409 	int ret = -ENOTTY;
410 	size_t send_len = 0;
411 
412 	group = file->private_data;
413 
414 	p = (void __user *) arg;
415 
416 	switch (cmd) {
417 	case FIONREAD:
418 		mutex_lock(&group->notification_mutex);
419 		list_for_each_entry(holder, &group->notification_list, event_list)
420 			send_len += FAN_EVENT_METADATA_LEN;
421 		mutex_unlock(&group->notification_mutex);
422 		ret = put_user(send_len, (int __user *) p);
423 		break;
424 	}
425 
426 	return ret;
427 }
428 
429 static const struct file_operations fanotify_fops = {
430 	.poll		= fanotify_poll,
431 	.read		= fanotify_read,
432 	.write		= fanotify_write,
433 	.fasync		= NULL,
434 	.release	= fanotify_release,
435 	.unlocked_ioctl	= fanotify_ioctl,
436 	.compat_ioctl	= fanotify_ioctl,
437 	.llseek		= noop_llseek,
438 };
439 
440 static void fanotify_free_mark(struct fsnotify_mark *fsn_mark)
441 {
442 	kmem_cache_free(fanotify_mark_cache, fsn_mark);
443 }
444 
445 static int fanotify_find_path(int dfd, const char __user *filename,
446 			      struct path *path, unsigned int flags)
447 {
448 	int ret;
449 
450 	pr_debug("%s: dfd=%d filename=%p flags=%x\n", __func__,
451 		 dfd, filename, flags);
452 
453 	if (filename == NULL) {
454 		struct fd f = fdget(dfd);
455 
456 		ret = -EBADF;
457 		if (!f.file)
458 			goto out;
459 
460 		ret = -ENOTDIR;
461 		if ((flags & FAN_MARK_ONLYDIR) &&
462 		    !(S_ISDIR(f.file->f_path.dentry->d_inode->i_mode))) {
463 			fdput(f);
464 			goto out;
465 		}
466 
467 		*path = f.file->f_path;
468 		path_get(path);
469 		fdput(f);
470 	} else {
471 		unsigned int lookup_flags = 0;
472 
473 		if (!(flags & FAN_MARK_DONT_FOLLOW))
474 			lookup_flags |= LOOKUP_FOLLOW;
475 		if (flags & FAN_MARK_ONLYDIR)
476 			lookup_flags |= LOOKUP_DIRECTORY;
477 
478 		ret = user_path_at(dfd, filename, lookup_flags, path);
479 		if (ret)
480 			goto out;
481 	}
482 
483 	/* you can only watch an inode if you have read permissions on it */
484 	ret = inode_permission(path->dentry->d_inode, MAY_READ);
485 	if (ret)
486 		path_put(path);
487 out:
488 	return ret;
489 }
490 
491 static __u32 fanotify_mark_remove_from_mask(struct fsnotify_mark *fsn_mark,
492 					    __u32 mask,
493 					    unsigned int flags)
494 {
495 	__u32 oldmask;
496 
497 	spin_lock(&fsn_mark->lock);
498 	if (!(flags & FAN_MARK_IGNORED_MASK)) {
499 		oldmask = fsn_mark->mask;
500 		fsnotify_set_mark_mask_locked(fsn_mark, (oldmask & ~mask));
501 	} else {
502 		oldmask = fsn_mark->ignored_mask;
503 		fsnotify_set_mark_ignored_mask_locked(fsn_mark, (oldmask & ~mask));
504 	}
505 	spin_unlock(&fsn_mark->lock);
506 
507 	if (!(oldmask & ~mask))
508 		fsnotify_destroy_mark(fsn_mark);
509 
510 	return mask & oldmask;
511 }
512 
513 static int fanotify_remove_vfsmount_mark(struct fsnotify_group *group,
514 					 struct vfsmount *mnt, __u32 mask,
515 					 unsigned int flags)
516 {
517 	struct fsnotify_mark *fsn_mark = NULL;
518 	__u32 removed;
519 
520 	fsn_mark = fsnotify_find_vfsmount_mark(group, mnt);
521 	if (!fsn_mark)
522 		return -ENOENT;
523 
524 	removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags);
525 	fsnotify_put_mark(fsn_mark);
526 	if (removed & real_mount(mnt)->mnt_fsnotify_mask)
527 		fsnotify_recalc_vfsmount_mask(mnt);
528 
529 	return 0;
530 }
531 
532 static int fanotify_remove_inode_mark(struct fsnotify_group *group,
533 				      struct inode *inode, __u32 mask,
534 				      unsigned int flags)
535 {
536 	struct fsnotify_mark *fsn_mark = NULL;
537 	__u32 removed;
538 
539 	fsn_mark = fsnotify_find_inode_mark(group, inode);
540 	if (!fsn_mark)
541 		return -ENOENT;
542 
543 	removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags);
544 	/* matches the fsnotify_find_inode_mark() */
545 	fsnotify_put_mark(fsn_mark);
546 	if (removed & inode->i_fsnotify_mask)
547 		fsnotify_recalc_inode_mask(inode);
548 
549 	return 0;
550 }
551 
552 static __u32 fanotify_mark_add_to_mask(struct fsnotify_mark *fsn_mark,
553 				       __u32 mask,
554 				       unsigned int flags)
555 {
556 	__u32 oldmask = -1;
557 
558 	spin_lock(&fsn_mark->lock);
559 	if (!(flags & FAN_MARK_IGNORED_MASK)) {
560 		oldmask = fsn_mark->mask;
561 		fsnotify_set_mark_mask_locked(fsn_mark, (oldmask | mask));
562 	} else {
563 		__u32 tmask = fsn_mark->ignored_mask | mask;
564 		fsnotify_set_mark_ignored_mask_locked(fsn_mark, tmask);
565 		if (flags & FAN_MARK_IGNORED_SURV_MODIFY)
566 			fsn_mark->flags |= FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY;
567 	}
568 
569 	if (!(flags & FAN_MARK_ONDIR)) {
570 		__u32 tmask = fsn_mark->ignored_mask | FAN_ONDIR;
571 		fsnotify_set_mark_ignored_mask_locked(fsn_mark, tmask);
572 	}
573 
574 	spin_unlock(&fsn_mark->lock);
575 
576 	return mask & ~oldmask;
577 }
578 
579 static int fanotify_add_vfsmount_mark(struct fsnotify_group *group,
580 				      struct vfsmount *mnt, __u32 mask,
581 				      unsigned int flags)
582 {
583 	struct fsnotify_mark *fsn_mark;
584 	__u32 added;
585 	int ret = 0;
586 
587 	fsn_mark = fsnotify_find_vfsmount_mark(group, mnt);
588 	if (!fsn_mark) {
589 		if (atomic_read(&group->num_marks) > group->fanotify_data.max_marks)
590 			return -ENOSPC;
591 
592 		fsn_mark = kmem_cache_alloc(fanotify_mark_cache, GFP_KERNEL);
593 		if (!fsn_mark)
594 			return -ENOMEM;
595 
596 		fsnotify_init_mark(fsn_mark, fanotify_free_mark);
597 		ret = fsnotify_add_mark(fsn_mark, group, NULL, mnt, 0);
598 		if (ret)
599 			goto err;
600 	}
601 	added = fanotify_mark_add_to_mask(fsn_mark, mask, flags);
602 
603 	if (added & ~real_mount(mnt)->mnt_fsnotify_mask)
604 		fsnotify_recalc_vfsmount_mask(mnt);
605 err:
606 	fsnotify_put_mark(fsn_mark);
607 	return ret;
608 }
609 
610 static int fanotify_add_inode_mark(struct fsnotify_group *group,
611 				   struct inode *inode, __u32 mask,
612 				   unsigned int flags)
613 {
614 	struct fsnotify_mark *fsn_mark;
615 	__u32 added;
616 	int ret = 0;
617 
618 	pr_debug("%s: group=%p inode=%p\n", __func__, group, inode);
619 
620 	/*
621 	 * If some other task has this inode open for write we should not add
622 	 * an ignored mark, unless that ignored mark is supposed to survive
623 	 * modification changes anyway.
624 	 */
625 	if ((flags & FAN_MARK_IGNORED_MASK) &&
626 	    !(flags & FAN_MARK_IGNORED_SURV_MODIFY) &&
627 	    (atomic_read(&inode->i_writecount) > 0))
628 		return 0;
629 
630 	fsn_mark = fsnotify_find_inode_mark(group, inode);
631 	if (!fsn_mark) {
632 		if (atomic_read(&group->num_marks) > group->fanotify_data.max_marks)
633 			return -ENOSPC;
634 
635 		fsn_mark = kmem_cache_alloc(fanotify_mark_cache, GFP_KERNEL);
636 		if (!fsn_mark)
637 			return -ENOMEM;
638 
639 		fsnotify_init_mark(fsn_mark, fanotify_free_mark);
640 		ret = fsnotify_add_mark(fsn_mark, group, inode, NULL, 0);
641 		if (ret)
642 			goto err;
643 	}
644 	added = fanotify_mark_add_to_mask(fsn_mark, mask, flags);
645 
646 	if (added & ~inode->i_fsnotify_mask)
647 		fsnotify_recalc_inode_mask(inode);
648 err:
649 	fsnotify_put_mark(fsn_mark);
650 	return ret;
651 }
652 
653 /* fanotify syscalls */
654 SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
655 {
656 	struct fsnotify_group *group;
657 	int f_flags, fd;
658 	struct user_struct *user;
659 
660 	pr_debug("%s: flags=%d event_f_flags=%d\n",
661 		__func__, flags, event_f_flags);
662 
663 	if (!capable(CAP_SYS_ADMIN))
664 		return -EPERM;
665 
666 	if (flags & ~FAN_ALL_INIT_FLAGS)
667 		return -EINVAL;
668 
669 	user = get_current_user();
670 	if (atomic_read(&user->fanotify_listeners) > FANOTIFY_DEFAULT_MAX_LISTENERS) {
671 		free_uid(user);
672 		return -EMFILE;
673 	}
674 
675 	f_flags = O_RDWR | FMODE_NONOTIFY;
676 	if (flags & FAN_CLOEXEC)
677 		f_flags |= O_CLOEXEC;
678 	if (flags & FAN_NONBLOCK)
679 		f_flags |= O_NONBLOCK;
680 
681 	/* fsnotify_alloc_group takes a ref.  Dropped in fanotify_release */
682 	group = fsnotify_alloc_group(&fanotify_fsnotify_ops);
683 	if (IS_ERR(group)) {
684 		free_uid(user);
685 		return PTR_ERR(group);
686 	}
687 
688 	group->fanotify_data.user = user;
689 	atomic_inc(&user->fanotify_listeners);
690 
691 	group->fanotify_data.f_flags = event_f_flags;
692 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
693 	mutex_init(&group->fanotify_data.access_mutex);
694 	init_waitqueue_head(&group->fanotify_data.access_waitq);
695 	INIT_LIST_HEAD(&group->fanotify_data.access_list);
696 	atomic_set(&group->fanotify_data.bypass_perm, 0);
697 #endif
698 	switch (flags & FAN_ALL_CLASS_BITS) {
699 	case FAN_CLASS_NOTIF:
700 		group->priority = FS_PRIO_0;
701 		break;
702 	case FAN_CLASS_CONTENT:
703 		group->priority = FS_PRIO_1;
704 		break;
705 	case FAN_CLASS_PRE_CONTENT:
706 		group->priority = FS_PRIO_2;
707 		break;
708 	default:
709 		fd = -EINVAL;
710 		goto out_put_group;
711 	}
712 
713 	if (flags & FAN_UNLIMITED_QUEUE) {
714 		fd = -EPERM;
715 		if (!capable(CAP_SYS_ADMIN))
716 			goto out_put_group;
717 		group->max_events = UINT_MAX;
718 	} else {
719 		group->max_events = FANOTIFY_DEFAULT_MAX_EVENTS;
720 	}
721 
722 	if (flags & FAN_UNLIMITED_MARKS) {
723 		fd = -EPERM;
724 		if (!capable(CAP_SYS_ADMIN))
725 			goto out_put_group;
726 		group->fanotify_data.max_marks = UINT_MAX;
727 	} else {
728 		group->fanotify_data.max_marks = FANOTIFY_DEFAULT_MAX_MARKS;
729 	}
730 
731 	fd = anon_inode_getfd("[fanotify]", &fanotify_fops, group, f_flags);
732 	if (fd < 0)
733 		goto out_put_group;
734 
735 	return fd;
736 
737 out_put_group:
738 	fsnotify_put_group(group);
739 	return fd;
740 }
741 
742 SYSCALL_DEFINE(fanotify_mark)(int fanotify_fd, unsigned int flags,
743 			      __u64 mask, int dfd,
744 			      const char  __user * pathname)
745 {
746 	struct inode *inode = NULL;
747 	struct vfsmount *mnt = NULL;
748 	struct fsnotify_group *group;
749 	struct fd f;
750 	struct path path;
751 	int ret;
752 
753 	pr_debug("%s: fanotify_fd=%d flags=%x dfd=%d pathname=%p mask=%llx\n",
754 		 __func__, fanotify_fd, flags, dfd, pathname, mask);
755 
756 	/* we only use the lower 32 bits as of right now. */
757 	if (mask & ((__u64)0xffffffff << 32))
758 		return -EINVAL;
759 
760 	if (flags & ~FAN_ALL_MARK_FLAGS)
761 		return -EINVAL;
762 	switch (flags & (FAN_MARK_ADD | FAN_MARK_REMOVE | FAN_MARK_FLUSH)) {
763 	case FAN_MARK_ADD:		/* fallthrough */
764 	case FAN_MARK_REMOVE:
765 		if (!mask)
766 			return -EINVAL;
767 	case FAN_MARK_FLUSH:
768 		break;
769 	default:
770 		return -EINVAL;
771 	}
772 
773 	if (mask & FAN_ONDIR) {
774 		flags |= FAN_MARK_ONDIR;
775 		mask &= ~FAN_ONDIR;
776 	}
777 
778 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
779 	if (mask & ~(FAN_ALL_EVENTS | FAN_ALL_PERM_EVENTS | FAN_EVENT_ON_CHILD))
780 #else
781 	if (mask & ~(FAN_ALL_EVENTS | FAN_EVENT_ON_CHILD))
782 #endif
783 		return -EINVAL;
784 
785 	f = fdget(fanotify_fd);
786 	if (unlikely(!f.file))
787 		return -EBADF;
788 
789 	/* verify that this is indeed an fanotify instance */
790 	ret = -EINVAL;
791 	if (unlikely(f.file->f_op != &fanotify_fops))
792 		goto fput_and_out;
793 	group = f.file->private_data;
794 
795 	/*
796 	 * group->priority == FS_PRIO_0 == FAN_CLASS_NOTIF.  These are not
797 	 * allowed to set permissions events.
798 	 */
799 	ret = -EINVAL;
800 	if (mask & FAN_ALL_PERM_EVENTS &&
801 	    group->priority == FS_PRIO_0)
802 		goto fput_and_out;
803 
804 	ret = fanotify_find_path(dfd, pathname, &path, flags);
805 	if (ret)
806 		goto fput_and_out;
807 
808 	/* inode held in place by reference to path; group by fget on fd */
809 	if (!(flags & FAN_MARK_MOUNT))
810 		inode = path.dentry->d_inode;
811 	else
812 		mnt = path.mnt;
813 
814 	/* create/update an inode mark */
815 	switch (flags & (FAN_MARK_ADD | FAN_MARK_REMOVE | FAN_MARK_FLUSH)) {
816 	case FAN_MARK_ADD:
817 		if (flags & FAN_MARK_MOUNT)
818 			ret = fanotify_add_vfsmount_mark(group, mnt, mask, flags);
819 		else
820 			ret = fanotify_add_inode_mark(group, inode, mask, flags);
821 		break;
822 	case FAN_MARK_REMOVE:
823 		if (flags & FAN_MARK_MOUNT)
824 			ret = fanotify_remove_vfsmount_mark(group, mnt, mask, flags);
825 		else
826 			ret = fanotify_remove_inode_mark(group, inode, mask, flags);
827 		break;
828 	case FAN_MARK_FLUSH:
829 		if (flags & FAN_MARK_MOUNT)
830 			fsnotify_clear_vfsmount_marks_by_group(group);
831 		else
832 			fsnotify_clear_inode_marks_by_group(group);
833 		break;
834 	default:
835 		ret = -EINVAL;
836 	}
837 
838 	path_put(&path);
839 fput_and_out:
840 	fdput(f);
841 	return ret;
842 }
843 
844 #ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
845 asmlinkage long SyS_fanotify_mark(long fanotify_fd, long flags, __u64 mask,
846 				  long dfd, long pathname)
847 {
848 	return SYSC_fanotify_mark((int) fanotify_fd, (unsigned int) flags,
849 				  mask, (int) dfd,
850 				  (const char  __user *) pathname);
851 }
852 SYSCALL_ALIAS(sys_fanotify_mark, SyS_fanotify_mark);
853 #endif
854 
855 /*
856  * fanotify_user_setup - Our initialization function.  Note that we cannot return
857  * error because we have compiled-in VFS hooks.  So an (unlikely) failure here
858  * must result in panic().
859  */
860 static int __init fanotify_user_setup(void)
861 {
862 	fanotify_mark_cache = KMEM_CACHE(fsnotify_mark, SLAB_PANIC);
863 	fanotify_response_event_cache = KMEM_CACHE(fanotify_response_event,
864 						   SLAB_PANIC);
865 
866 	return 0;
867 }
868 device_initcall(fanotify_user_setup);
869