xref: /linux/fs/notify/fanotify/fanotify_user.c (revision c6bd5bcc4983f1a2d2f87a3769bf309482ee8c04)
1 #include <linux/fanotify.h>
2 #include <linux/fcntl.h>
3 #include <linux/file.h>
4 #include <linux/fs.h>
5 #include <linux/anon_inodes.h>
6 #include <linux/fsnotify_backend.h>
7 #include <linux/init.h>
8 #include <linux/mount.h>
9 #include <linux/namei.h>
10 #include <linux/poll.h>
11 #include <linux/security.h>
12 #include <linux/syscalls.h>
13 #include <linux/slab.h>
14 #include <linux/types.h>
15 #include <linux/uaccess.h>
16 
17 #include <asm/ioctls.h>
18 
19 #include "../../mount.h"
20 
21 #define FANOTIFY_DEFAULT_MAX_EVENTS	16384
22 #define FANOTIFY_DEFAULT_MAX_MARKS	8192
23 #define FANOTIFY_DEFAULT_MAX_LISTENERS	128
24 
25 extern const struct fsnotify_ops fanotify_fsnotify_ops;
26 
27 static struct kmem_cache *fanotify_mark_cache __read_mostly;
28 static struct kmem_cache *fanotify_response_event_cache __read_mostly;
29 
30 struct fanotify_response_event {
31 	struct list_head list;
32 	__s32 fd;
33 	struct fsnotify_event *event;
34 };
35 
36 /*
37  * Get an fsnotify notification event if one exists and is small
38  * enough to fit in "count". Return an error pointer if the count
39  * is not large enough.
40  *
41  * Called with the group->notification_mutex held.
42  */
43 static struct fsnotify_event *get_one_event(struct fsnotify_group *group,
44 					    size_t count)
45 {
46 	BUG_ON(!mutex_is_locked(&group->notification_mutex));
47 
48 	pr_debug("%s: group=%p count=%zd\n", __func__, group, count);
49 
50 	if (fsnotify_notify_queue_is_empty(group))
51 		return NULL;
52 
53 	if (FAN_EVENT_METADATA_LEN > count)
54 		return ERR_PTR(-EINVAL);
55 
56 	/* held the notification_mutex the whole time, so this is the
57 	 * same event we peeked above */
58 	return fsnotify_remove_notify_event(group);
59 }
60 
61 static int create_fd(struct fsnotify_group *group,
62 			struct fsnotify_event *event,
63 			struct file **file)
64 {
65 	int client_fd;
66 	struct file *new_file;
67 
68 	pr_debug("%s: group=%p event=%p\n", __func__, group, event);
69 
70 	client_fd = get_unused_fd();
71 	if (client_fd < 0)
72 		return client_fd;
73 
74 	if (event->data_type != FSNOTIFY_EVENT_PATH) {
75 		WARN_ON(1);
76 		put_unused_fd(client_fd);
77 		return -EINVAL;
78 	}
79 
80 	/*
81 	 * we need a new file handle for the userspace program so it can read even if it was
82 	 * originally opened O_WRONLY.
83 	 */
84 	/* it's possible this event was an overflow event.  in that case dentry and mnt
85 	 * are NULL;  That's fine, just don't call dentry open */
86 	if (event->path.dentry && event->path.mnt)
87 		new_file = dentry_open(&event->path,
88 				       group->fanotify_data.f_flags | FMODE_NONOTIFY,
89 				       current_cred());
90 	else
91 		new_file = ERR_PTR(-EOVERFLOW);
92 	if (IS_ERR(new_file)) {
93 		/*
94 		 * we still send an event even if we can't open the file.  this
95 		 * can happen when say tasks are gone and we try to open their
96 		 * /proc files or we try to open a WRONLY file like in sysfs
97 		 * we just send the errno to userspace since there isn't much
98 		 * else we can do.
99 		 */
100 		put_unused_fd(client_fd);
101 		client_fd = PTR_ERR(new_file);
102 	} else {
103 		*file = new_file;
104 	}
105 
106 	return client_fd;
107 }
108 
109 static int fill_event_metadata(struct fsnotify_group *group,
110 				   struct fanotify_event_metadata *metadata,
111 				   struct fsnotify_event *event,
112 				   struct file **file)
113 {
114 	int ret = 0;
115 
116 	pr_debug("%s: group=%p metadata=%p event=%p\n", __func__,
117 		 group, metadata, event);
118 
119 	*file = NULL;
120 	metadata->event_len = FAN_EVENT_METADATA_LEN;
121 	metadata->metadata_len = FAN_EVENT_METADATA_LEN;
122 	metadata->vers = FANOTIFY_METADATA_VERSION;
123 	metadata->mask = event->mask & FAN_ALL_OUTGOING_EVENTS;
124 	metadata->pid = pid_vnr(event->tgid);
125 	if (unlikely(event->mask & FAN_Q_OVERFLOW))
126 		metadata->fd = FAN_NOFD;
127 	else {
128 		metadata->fd = create_fd(group, event, file);
129 		if (metadata->fd < 0)
130 			ret = metadata->fd;
131 	}
132 
133 	return ret;
134 }
135 
136 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
137 static struct fanotify_response_event *dequeue_re(struct fsnotify_group *group,
138 						  __s32 fd)
139 {
140 	struct fanotify_response_event *re, *return_re = NULL;
141 
142 	mutex_lock(&group->fanotify_data.access_mutex);
143 	list_for_each_entry(re, &group->fanotify_data.access_list, list) {
144 		if (re->fd != fd)
145 			continue;
146 
147 		list_del_init(&re->list);
148 		return_re = re;
149 		break;
150 	}
151 	mutex_unlock(&group->fanotify_data.access_mutex);
152 
153 	pr_debug("%s: found return_re=%p\n", __func__, return_re);
154 
155 	return return_re;
156 }
157 
158 static int process_access_response(struct fsnotify_group *group,
159 				   struct fanotify_response *response_struct)
160 {
161 	struct fanotify_response_event *re;
162 	__s32 fd = response_struct->fd;
163 	__u32 response = response_struct->response;
164 
165 	pr_debug("%s: group=%p fd=%d response=%d\n", __func__, group,
166 		 fd, response);
167 	/*
168 	 * make sure the response is valid, if invalid we do nothing and either
169 	 * userspace can send a valid response or we will clean it up after the
170 	 * timeout
171 	 */
172 	switch (response) {
173 	case FAN_ALLOW:
174 	case FAN_DENY:
175 		break;
176 	default:
177 		return -EINVAL;
178 	}
179 
180 	if (fd < 0)
181 		return -EINVAL;
182 
183 	re = dequeue_re(group, fd);
184 	if (!re)
185 		return -ENOENT;
186 
187 	re->event->response = response;
188 
189 	wake_up(&group->fanotify_data.access_waitq);
190 
191 	kmem_cache_free(fanotify_response_event_cache, re);
192 
193 	return 0;
194 }
195 
196 static int prepare_for_access_response(struct fsnotify_group *group,
197 				       struct fsnotify_event *event,
198 				       __s32 fd)
199 {
200 	struct fanotify_response_event *re;
201 
202 	if (!(event->mask & FAN_ALL_PERM_EVENTS))
203 		return 0;
204 
205 	re = kmem_cache_alloc(fanotify_response_event_cache, GFP_KERNEL);
206 	if (!re)
207 		return -ENOMEM;
208 
209 	re->event = event;
210 	re->fd = fd;
211 
212 	mutex_lock(&group->fanotify_data.access_mutex);
213 
214 	if (atomic_read(&group->fanotify_data.bypass_perm)) {
215 		mutex_unlock(&group->fanotify_data.access_mutex);
216 		kmem_cache_free(fanotify_response_event_cache, re);
217 		event->response = FAN_ALLOW;
218 		return 0;
219 	}
220 
221 	list_add_tail(&re->list, &group->fanotify_data.access_list);
222 	mutex_unlock(&group->fanotify_data.access_mutex);
223 
224 	return 0;
225 }
226 
227 #else
228 static int prepare_for_access_response(struct fsnotify_group *group,
229 				       struct fsnotify_event *event,
230 				       __s32 fd)
231 {
232 	return 0;
233 }
234 
235 #endif
236 
237 static ssize_t copy_event_to_user(struct fsnotify_group *group,
238 				  struct fsnotify_event *event,
239 				  char __user *buf)
240 {
241 	struct fanotify_event_metadata fanotify_event_metadata;
242 	struct file *f;
243 	int fd, ret;
244 
245 	pr_debug("%s: group=%p event=%p\n", __func__, group, event);
246 
247 	ret = fill_event_metadata(group, &fanotify_event_metadata, event, &f);
248 	if (ret < 0)
249 		goto out;
250 
251 	fd = fanotify_event_metadata.fd;
252 	ret = -EFAULT;
253 	if (copy_to_user(buf, &fanotify_event_metadata,
254 			 fanotify_event_metadata.event_len))
255 		goto out_close_fd;
256 
257 	ret = prepare_for_access_response(group, event, fd);
258 	if (ret)
259 		goto out_close_fd;
260 
261 	if (fd != FAN_NOFD)
262 		fd_install(fd, f);
263 	return fanotify_event_metadata.event_len;
264 
265 out_close_fd:
266 	if (fd != FAN_NOFD) {
267 		put_unused_fd(fd);
268 		fput(f);
269 	}
270 out:
271 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
272 	if (event->mask & FAN_ALL_PERM_EVENTS) {
273 		event->response = FAN_DENY;
274 		wake_up(&group->fanotify_data.access_waitq);
275 	}
276 #endif
277 	return ret;
278 }
279 
280 /* intofiy userspace file descriptor functions */
281 static unsigned int fanotify_poll(struct file *file, poll_table *wait)
282 {
283 	struct fsnotify_group *group = file->private_data;
284 	int ret = 0;
285 
286 	poll_wait(file, &group->notification_waitq, wait);
287 	mutex_lock(&group->notification_mutex);
288 	if (!fsnotify_notify_queue_is_empty(group))
289 		ret = POLLIN | POLLRDNORM;
290 	mutex_unlock(&group->notification_mutex);
291 
292 	return ret;
293 }
294 
295 static ssize_t fanotify_read(struct file *file, char __user *buf,
296 			     size_t count, loff_t *pos)
297 {
298 	struct fsnotify_group *group;
299 	struct fsnotify_event *kevent;
300 	char __user *start;
301 	int ret;
302 	DEFINE_WAIT(wait);
303 
304 	start = buf;
305 	group = file->private_data;
306 
307 	pr_debug("%s: group=%p\n", __func__, group);
308 
309 	while (1) {
310 		prepare_to_wait(&group->notification_waitq, &wait, TASK_INTERRUPTIBLE);
311 
312 		mutex_lock(&group->notification_mutex);
313 		kevent = get_one_event(group, count);
314 		mutex_unlock(&group->notification_mutex);
315 
316 		if (kevent) {
317 			ret = PTR_ERR(kevent);
318 			if (IS_ERR(kevent))
319 				break;
320 			ret = copy_event_to_user(group, kevent, buf);
321 			fsnotify_put_event(kevent);
322 			if (ret < 0)
323 				break;
324 			buf += ret;
325 			count -= ret;
326 			continue;
327 		}
328 
329 		ret = -EAGAIN;
330 		if (file->f_flags & O_NONBLOCK)
331 			break;
332 		ret = -ERESTARTSYS;
333 		if (signal_pending(current))
334 			break;
335 
336 		if (start != buf)
337 			break;
338 
339 		schedule();
340 	}
341 
342 	finish_wait(&group->notification_waitq, &wait);
343 	if (start != buf && ret != -EFAULT)
344 		ret = buf - start;
345 	return ret;
346 }
347 
348 static ssize_t fanotify_write(struct file *file, const char __user *buf, size_t count, loff_t *pos)
349 {
350 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
351 	struct fanotify_response response = { .fd = -1, .response = -1 };
352 	struct fsnotify_group *group;
353 	int ret;
354 
355 	group = file->private_data;
356 
357 	if (count > sizeof(response))
358 		count = sizeof(response);
359 
360 	pr_debug("%s: group=%p count=%zu\n", __func__, group, count);
361 
362 	if (copy_from_user(&response, buf, count))
363 		return -EFAULT;
364 
365 	ret = process_access_response(group, &response);
366 	if (ret < 0)
367 		count = ret;
368 
369 	return count;
370 #else
371 	return -EINVAL;
372 #endif
373 }
374 
375 static int fanotify_release(struct inode *ignored, struct file *file)
376 {
377 	struct fsnotify_group *group = file->private_data;
378 
379 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
380 	struct fanotify_response_event *re, *lre;
381 
382 	mutex_lock(&group->fanotify_data.access_mutex);
383 
384 	atomic_inc(&group->fanotify_data.bypass_perm);
385 
386 	list_for_each_entry_safe(re, lre, &group->fanotify_data.access_list, list) {
387 		pr_debug("%s: found group=%p re=%p event=%p\n", __func__, group,
388 			 re, re->event);
389 
390 		list_del_init(&re->list);
391 		re->event->response = FAN_ALLOW;
392 
393 		kmem_cache_free(fanotify_response_event_cache, re);
394 	}
395 	mutex_unlock(&group->fanotify_data.access_mutex);
396 
397 	wake_up(&group->fanotify_data.access_waitq);
398 #endif
399 	/* matches the fanotify_init->fsnotify_alloc_group */
400 	fsnotify_put_group(group);
401 
402 	return 0;
403 }
404 
405 static long fanotify_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
406 {
407 	struct fsnotify_group *group;
408 	struct fsnotify_event_holder *holder;
409 	void __user *p;
410 	int ret = -ENOTTY;
411 	size_t send_len = 0;
412 
413 	group = file->private_data;
414 
415 	p = (void __user *) arg;
416 
417 	switch (cmd) {
418 	case FIONREAD:
419 		mutex_lock(&group->notification_mutex);
420 		list_for_each_entry(holder, &group->notification_list, event_list)
421 			send_len += FAN_EVENT_METADATA_LEN;
422 		mutex_unlock(&group->notification_mutex);
423 		ret = put_user(send_len, (int __user *) p);
424 		break;
425 	}
426 
427 	return ret;
428 }
429 
430 static const struct file_operations fanotify_fops = {
431 	.poll		= fanotify_poll,
432 	.read		= fanotify_read,
433 	.write		= fanotify_write,
434 	.fasync		= NULL,
435 	.release	= fanotify_release,
436 	.unlocked_ioctl	= fanotify_ioctl,
437 	.compat_ioctl	= fanotify_ioctl,
438 	.llseek		= noop_llseek,
439 };
440 
441 static void fanotify_free_mark(struct fsnotify_mark *fsn_mark)
442 {
443 	kmem_cache_free(fanotify_mark_cache, fsn_mark);
444 }
445 
446 static int fanotify_find_path(int dfd, const char __user *filename,
447 			      struct path *path, unsigned int flags)
448 {
449 	int ret;
450 
451 	pr_debug("%s: dfd=%d filename=%p flags=%x\n", __func__,
452 		 dfd, filename, flags);
453 
454 	if (filename == NULL) {
455 		struct fd f = fdget(dfd);
456 
457 		ret = -EBADF;
458 		if (!f.file)
459 			goto out;
460 
461 		ret = -ENOTDIR;
462 		if ((flags & FAN_MARK_ONLYDIR) &&
463 		    !(S_ISDIR(f.file->f_path.dentry->d_inode->i_mode))) {
464 			fdput(f);
465 			goto out;
466 		}
467 
468 		*path = f.file->f_path;
469 		path_get(path);
470 		fdput(f);
471 	} else {
472 		unsigned int lookup_flags = 0;
473 
474 		if (!(flags & FAN_MARK_DONT_FOLLOW))
475 			lookup_flags |= LOOKUP_FOLLOW;
476 		if (flags & FAN_MARK_ONLYDIR)
477 			lookup_flags |= LOOKUP_DIRECTORY;
478 
479 		ret = user_path_at(dfd, filename, lookup_flags, path);
480 		if (ret)
481 			goto out;
482 	}
483 
484 	/* you can only watch an inode if you have read permissions on it */
485 	ret = inode_permission(path->dentry->d_inode, MAY_READ);
486 	if (ret)
487 		path_put(path);
488 out:
489 	return ret;
490 }
491 
492 static __u32 fanotify_mark_remove_from_mask(struct fsnotify_mark *fsn_mark,
493 					    __u32 mask,
494 					    unsigned int flags)
495 {
496 	__u32 oldmask;
497 
498 	spin_lock(&fsn_mark->lock);
499 	if (!(flags & FAN_MARK_IGNORED_MASK)) {
500 		oldmask = fsn_mark->mask;
501 		fsnotify_set_mark_mask_locked(fsn_mark, (oldmask & ~mask));
502 	} else {
503 		oldmask = fsn_mark->ignored_mask;
504 		fsnotify_set_mark_ignored_mask_locked(fsn_mark, (oldmask & ~mask));
505 	}
506 	spin_unlock(&fsn_mark->lock);
507 
508 	if (!(oldmask & ~mask))
509 		fsnotify_destroy_mark(fsn_mark);
510 
511 	return mask & oldmask;
512 }
513 
514 static int fanotify_remove_vfsmount_mark(struct fsnotify_group *group,
515 					 struct vfsmount *mnt, __u32 mask,
516 					 unsigned int flags)
517 {
518 	struct fsnotify_mark *fsn_mark = NULL;
519 	__u32 removed;
520 
521 	fsn_mark = fsnotify_find_vfsmount_mark(group, mnt);
522 	if (!fsn_mark)
523 		return -ENOENT;
524 
525 	removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags);
526 	fsnotify_put_mark(fsn_mark);
527 	if (removed & real_mount(mnt)->mnt_fsnotify_mask)
528 		fsnotify_recalc_vfsmount_mask(mnt);
529 
530 	return 0;
531 }
532 
533 static int fanotify_remove_inode_mark(struct fsnotify_group *group,
534 				      struct inode *inode, __u32 mask,
535 				      unsigned int flags)
536 {
537 	struct fsnotify_mark *fsn_mark = NULL;
538 	__u32 removed;
539 
540 	fsn_mark = fsnotify_find_inode_mark(group, inode);
541 	if (!fsn_mark)
542 		return -ENOENT;
543 
544 	removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags);
545 	/* matches the fsnotify_find_inode_mark() */
546 	fsnotify_put_mark(fsn_mark);
547 	if (removed & inode->i_fsnotify_mask)
548 		fsnotify_recalc_inode_mask(inode);
549 
550 	return 0;
551 }
552 
553 static __u32 fanotify_mark_add_to_mask(struct fsnotify_mark *fsn_mark,
554 				       __u32 mask,
555 				       unsigned int flags)
556 {
557 	__u32 oldmask = -1;
558 
559 	spin_lock(&fsn_mark->lock);
560 	if (!(flags & FAN_MARK_IGNORED_MASK)) {
561 		oldmask = fsn_mark->mask;
562 		fsnotify_set_mark_mask_locked(fsn_mark, (oldmask | mask));
563 	} else {
564 		__u32 tmask = fsn_mark->ignored_mask | mask;
565 		fsnotify_set_mark_ignored_mask_locked(fsn_mark, tmask);
566 		if (flags & FAN_MARK_IGNORED_SURV_MODIFY)
567 			fsn_mark->flags |= FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY;
568 	}
569 
570 	if (!(flags & FAN_MARK_ONDIR)) {
571 		__u32 tmask = fsn_mark->ignored_mask | FAN_ONDIR;
572 		fsnotify_set_mark_ignored_mask_locked(fsn_mark, tmask);
573 	}
574 
575 	spin_unlock(&fsn_mark->lock);
576 
577 	return mask & ~oldmask;
578 }
579 
580 static int fanotify_add_vfsmount_mark(struct fsnotify_group *group,
581 				      struct vfsmount *mnt, __u32 mask,
582 				      unsigned int flags)
583 {
584 	struct fsnotify_mark *fsn_mark;
585 	__u32 added;
586 	int ret = 0;
587 
588 	fsn_mark = fsnotify_find_vfsmount_mark(group, mnt);
589 	if (!fsn_mark) {
590 		if (atomic_read(&group->num_marks) > group->fanotify_data.max_marks)
591 			return -ENOSPC;
592 
593 		fsn_mark = kmem_cache_alloc(fanotify_mark_cache, GFP_KERNEL);
594 		if (!fsn_mark)
595 			return -ENOMEM;
596 
597 		fsnotify_init_mark(fsn_mark, fanotify_free_mark);
598 		ret = fsnotify_add_mark(fsn_mark, group, NULL, mnt, 0);
599 		if (ret)
600 			goto err;
601 	}
602 	added = fanotify_mark_add_to_mask(fsn_mark, mask, flags);
603 
604 	if (added & ~real_mount(mnt)->mnt_fsnotify_mask)
605 		fsnotify_recalc_vfsmount_mask(mnt);
606 err:
607 	fsnotify_put_mark(fsn_mark);
608 	return ret;
609 }
610 
611 static int fanotify_add_inode_mark(struct fsnotify_group *group,
612 				   struct inode *inode, __u32 mask,
613 				   unsigned int flags)
614 {
615 	struct fsnotify_mark *fsn_mark;
616 	__u32 added;
617 	int ret = 0;
618 
619 	pr_debug("%s: group=%p inode=%p\n", __func__, group, inode);
620 
621 	/*
622 	 * If some other task has this inode open for write we should not add
623 	 * an ignored mark, unless that ignored mark is supposed to survive
624 	 * modification changes anyway.
625 	 */
626 	if ((flags & FAN_MARK_IGNORED_MASK) &&
627 	    !(flags & FAN_MARK_IGNORED_SURV_MODIFY) &&
628 	    (atomic_read(&inode->i_writecount) > 0))
629 		return 0;
630 
631 	fsn_mark = fsnotify_find_inode_mark(group, inode);
632 	if (!fsn_mark) {
633 		if (atomic_read(&group->num_marks) > group->fanotify_data.max_marks)
634 			return -ENOSPC;
635 
636 		fsn_mark = kmem_cache_alloc(fanotify_mark_cache, GFP_KERNEL);
637 		if (!fsn_mark)
638 			return -ENOMEM;
639 
640 		fsnotify_init_mark(fsn_mark, fanotify_free_mark);
641 		ret = fsnotify_add_mark(fsn_mark, group, inode, NULL, 0);
642 		if (ret)
643 			goto err;
644 	}
645 	added = fanotify_mark_add_to_mask(fsn_mark, mask, flags);
646 
647 	if (added & ~inode->i_fsnotify_mask)
648 		fsnotify_recalc_inode_mask(inode);
649 err:
650 	fsnotify_put_mark(fsn_mark);
651 	return ret;
652 }
653 
654 /* fanotify syscalls */
655 SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
656 {
657 	struct fsnotify_group *group;
658 	int f_flags, fd;
659 	struct user_struct *user;
660 
661 	pr_debug("%s: flags=%d event_f_flags=%d\n",
662 		__func__, flags, event_f_flags);
663 
664 	if (!capable(CAP_SYS_ADMIN))
665 		return -EPERM;
666 
667 	if (flags & ~FAN_ALL_INIT_FLAGS)
668 		return -EINVAL;
669 
670 	user = get_current_user();
671 	if (atomic_read(&user->fanotify_listeners) > FANOTIFY_DEFAULT_MAX_LISTENERS) {
672 		free_uid(user);
673 		return -EMFILE;
674 	}
675 
676 	f_flags = O_RDWR | FMODE_NONOTIFY;
677 	if (flags & FAN_CLOEXEC)
678 		f_flags |= O_CLOEXEC;
679 	if (flags & FAN_NONBLOCK)
680 		f_flags |= O_NONBLOCK;
681 
682 	/* fsnotify_alloc_group takes a ref.  Dropped in fanotify_release */
683 	group = fsnotify_alloc_group(&fanotify_fsnotify_ops);
684 	if (IS_ERR(group)) {
685 		free_uid(user);
686 		return PTR_ERR(group);
687 	}
688 
689 	group->fanotify_data.user = user;
690 	atomic_inc(&user->fanotify_listeners);
691 
692 	group->fanotify_data.f_flags = event_f_flags;
693 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
694 	mutex_init(&group->fanotify_data.access_mutex);
695 	init_waitqueue_head(&group->fanotify_data.access_waitq);
696 	INIT_LIST_HEAD(&group->fanotify_data.access_list);
697 	atomic_set(&group->fanotify_data.bypass_perm, 0);
698 #endif
699 	switch (flags & FAN_ALL_CLASS_BITS) {
700 	case FAN_CLASS_NOTIF:
701 		group->priority = FS_PRIO_0;
702 		break;
703 	case FAN_CLASS_CONTENT:
704 		group->priority = FS_PRIO_1;
705 		break;
706 	case FAN_CLASS_PRE_CONTENT:
707 		group->priority = FS_PRIO_2;
708 		break;
709 	default:
710 		fd = -EINVAL;
711 		goto out_put_group;
712 	}
713 
714 	if (flags & FAN_UNLIMITED_QUEUE) {
715 		fd = -EPERM;
716 		if (!capable(CAP_SYS_ADMIN))
717 			goto out_put_group;
718 		group->max_events = UINT_MAX;
719 	} else {
720 		group->max_events = FANOTIFY_DEFAULT_MAX_EVENTS;
721 	}
722 
723 	if (flags & FAN_UNLIMITED_MARKS) {
724 		fd = -EPERM;
725 		if (!capable(CAP_SYS_ADMIN))
726 			goto out_put_group;
727 		group->fanotify_data.max_marks = UINT_MAX;
728 	} else {
729 		group->fanotify_data.max_marks = FANOTIFY_DEFAULT_MAX_MARKS;
730 	}
731 
732 	fd = anon_inode_getfd("[fanotify]", &fanotify_fops, group, f_flags);
733 	if (fd < 0)
734 		goto out_put_group;
735 
736 	return fd;
737 
738 out_put_group:
739 	fsnotify_put_group(group);
740 	return fd;
741 }
742 
743 SYSCALL_DEFINE(fanotify_mark)(int fanotify_fd, unsigned int flags,
744 			      __u64 mask, int dfd,
745 			      const char  __user * pathname)
746 {
747 	struct inode *inode = NULL;
748 	struct vfsmount *mnt = NULL;
749 	struct fsnotify_group *group;
750 	struct fd f;
751 	struct path path;
752 	int ret;
753 
754 	pr_debug("%s: fanotify_fd=%d flags=%x dfd=%d pathname=%p mask=%llx\n",
755 		 __func__, fanotify_fd, flags, dfd, pathname, mask);
756 
757 	/* we only use the lower 32 bits as of right now. */
758 	if (mask & ((__u64)0xffffffff << 32))
759 		return -EINVAL;
760 
761 	if (flags & ~FAN_ALL_MARK_FLAGS)
762 		return -EINVAL;
763 	switch (flags & (FAN_MARK_ADD | FAN_MARK_REMOVE | FAN_MARK_FLUSH)) {
764 	case FAN_MARK_ADD:		/* fallthrough */
765 	case FAN_MARK_REMOVE:
766 		if (!mask)
767 			return -EINVAL;
768 	case FAN_MARK_FLUSH:
769 		break;
770 	default:
771 		return -EINVAL;
772 	}
773 
774 	if (mask & FAN_ONDIR) {
775 		flags |= FAN_MARK_ONDIR;
776 		mask &= ~FAN_ONDIR;
777 	}
778 
779 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
780 	if (mask & ~(FAN_ALL_EVENTS | FAN_ALL_PERM_EVENTS | FAN_EVENT_ON_CHILD))
781 #else
782 	if (mask & ~(FAN_ALL_EVENTS | FAN_EVENT_ON_CHILD))
783 #endif
784 		return -EINVAL;
785 
786 	f = fdget(fanotify_fd);
787 	if (unlikely(!f.file))
788 		return -EBADF;
789 
790 	/* verify that this is indeed an fanotify instance */
791 	ret = -EINVAL;
792 	if (unlikely(f.file->f_op != &fanotify_fops))
793 		goto fput_and_out;
794 	group = f.file->private_data;
795 
796 	/*
797 	 * group->priority == FS_PRIO_0 == FAN_CLASS_NOTIF.  These are not
798 	 * allowed to set permissions events.
799 	 */
800 	ret = -EINVAL;
801 	if (mask & FAN_ALL_PERM_EVENTS &&
802 	    group->priority == FS_PRIO_0)
803 		goto fput_and_out;
804 
805 	ret = fanotify_find_path(dfd, pathname, &path, flags);
806 	if (ret)
807 		goto fput_and_out;
808 
809 	/* inode held in place by reference to path; group by fget on fd */
810 	if (!(flags & FAN_MARK_MOUNT))
811 		inode = path.dentry->d_inode;
812 	else
813 		mnt = path.mnt;
814 
815 	/* create/update an inode mark */
816 	switch (flags & (FAN_MARK_ADD | FAN_MARK_REMOVE | FAN_MARK_FLUSH)) {
817 	case FAN_MARK_ADD:
818 		if (flags & FAN_MARK_MOUNT)
819 			ret = fanotify_add_vfsmount_mark(group, mnt, mask, flags);
820 		else
821 			ret = fanotify_add_inode_mark(group, inode, mask, flags);
822 		break;
823 	case FAN_MARK_REMOVE:
824 		if (flags & FAN_MARK_MOUNT)
825 			ret = fanotify_remove_vfsmount_mark(group, mnt, mask, flags);
826 		else
827 			ret = fanotify_remove_inode_mark(group, inode, mask, flags);
828 		break;
829 	case FAN_MARK_FLUSH:
830 		if (flags & FAN_MARK_MOUNT)
831 			fsnotify_clear_vfsmount_marks_by_group(group);
832 		else
833 			fsnotify_clear_inode_marks_by_group(group);
834 		break;
835 	default:
836 		ret = -EINVAL;
837 	}
838 
839 	path_put(&path);
840 fput_and_out:
841 	fdput(f);
842 	return ret;
843 }
844 
845 #ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
846 asmlinkage long SyS_fanotify_mark(long fanotify_fd, long flags, __u64 mask,
847 				  long dfd, long pathname)
848 {
849 	return SYSC_fanotify_mark((int) fanotify_fd, (unsigned int) flags,
850 				  mask, (int) dfd,
851 				  (const char  __user *) pathname);
852 }
853 SYSCALL_ALIAS(sys_fanotify_mark, SyS_fanotify_mark);
854 #endif
855 
856 /*
857  * fanotify_user_setup - Our initialization function.  Note that we cannot return
858  * error because we have compiled-in VFS hooks.  So an (unlikely) failure here
859  * must result in panic().
860  */
861 static int __init fanotify_user_setup(void)
862 {
863 	fanotify_mark_cache = KMEM_CACHE(fsnotify_mark, SLAB_PANIC);
864 	fanotify_response_event_cache = KMEM_CACHE(fanotify_response_event,
865 						   SLAB_PANIC);
866 
867 	return 0;
868 }
869 device_initcall(fanotify_user_setup);
870