1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/fanotify.h> 3 #include <linux/fcntl.h> 4 #include <linux/file.h> 5 #include <linux/fs.h> 6 #include <linux/anon_inodes.h> 7 #include <linux/fsnotify_backend.h> 8 #include <linux/init.h> 9 #include <linux/mount.h> 10 #include <linux/namei.h> 11 #include <linux/poll.h> 12 #include <linux/security.h> 13 #include <linux/syscalls.h> 14 #include <linux/slab.h> 15 #include <linux/types.h> 16 #include <linux/uaccess.h> 17 #include <linux/compat.h> 18 #include <linux/sched/signal.h> 19 20 #include <asm/ioctls.h> 21 22 #include "../../mount.h" 23 #include "../fdinfo.h" 24 #include "fanotify.h" 25 26 #define FANOTIFY_DEFAULT_MAX_EVENTS 16384 27 #define FANOTIFY_DEFAULT_MAX_MARKS 8192 28 #define FANOTIFY_DEFAULT_MAX_LISTENERS 128 29 30 /* 31 * All flags that may be specified in parameter event_f_flags of fanotify_init. 32 * 33 * Internal and external open flags are stored together in field f_flags of 34 * struct file. Only external open flags shall be allowed in event_f_flags. 35 * Internal flags like FMODE_NONOTIFY, FMODE_EXEC, FMODE_NOCMTIME shall be 36 * excluded. 37 */ 38 #define FANOTIFY_INIT_ALL_EVENT_F_BITS ( \ 39 O_ACCMODE | O_APPEND | O_NONBLOCK | \ 40 __O_SYNC | O_DSYNC | O_CLOEXEC | \ 41 O_LARGEFILE | O_NOATIME ) 42 43 extern const struct fsnotify_ops fanotify_fsnotify_ops; 44 45 struct kmem_cache *fanotify_mark_cache __read_mostly; 46 struct kmem_cache *fanotify_event_cachep __read_mostly; 47 struct kmem_cache *fanotify_perm_event_cachep __read_mostly; 48 49 /* 50 * Get an fsnotify notification event if one exists and is small 51 * enough to fit in "count". Return an error pointer if the count 52 * is not large enough. 53 * 54 * Called with the group->notification_lock held. 55 */ 56 static struct fsnotify_event *get_one_event(struct fsnotify_group *group, 57 size_t count) 58 { 59 assert_spin_locked(&group->notification_lock); 60 61 pr_debug("%s: group=%p count=%zd\n", __func__, group, count); 62 63 if (fsnotify_notify_queue_is_empty(group)) 64 return NULL; 65 66 if (FAN_EVENT_METADATA_LEN > count) 67 return ERR_PTR(-EINVAL); 68 69 /* held the notification_lock the whole time, so this is the 70 * same event we peeked above */ 71 return fsnotify_remove_first_event(group); 72 } 73 74 static int create_fd(struct fsnotify_group *group, 75 struct fanotify_event_info *event, 76 struct file **file) 77 { 78 int client_fd; 79 struct file *new_file; 80 81 pr_debug("%s: group=%p event=%p\n", __func__, group, event); 82 83 client_fd = get_unused_fd_flags(group->fanotify_data.f_flags); 84 if (client_fd < 0) 85 return client_fd; 86 87 /* 88 * we need a new file handle for the userspace program so it can read even if it was 89 * originally opened O_WRONLY. 90 */ 91 /* it's possible this event was an overflow event. in that case dentry and mnt 92 * are NULL; That's fine, just don't call dentry open */ 93 if (event->path.dentry && event->path.mnt) 94 new_file = dentry_open(&event->path, 95 group->fanotify_data.f_flags | FMODE_NONOTIFY, 96 current_cred()); 97 else 98 new_file = ERR_PTR(-EOVERFLOW); 99 if (IS_ERR(new_file)) { 100 /* 101 * we still send an event even if we can't open the file. this 102 * can happen when say tasks are gone and we try to open their 103 * /proc files or we try to open a WRONLY file like in sysfs 104 * we just send the errno to userspace since there isn't much 105 * else we can do. 106 */ 107 put_unused_fd(client_fd); 108 client_fd = PTR_ERR(new_file); 109 } else { 110 *file = new_file; 111 } 112 113 return client_fd; 114 } 115 116 static int fill_event_metadata(struct fsnotify_group *group, 117 struct fanotify_event_metadata *metadata, 118 struct fsnotify_event *fsn_event, 119 struct file **file) 120 { 121 int ret = 0; 122 struct fanotify_event_info *event; 123 124 pr_debug("%s: group=%p metadata=%p event=%p\n", __func__, 125 group, metadata, fsn_event); 126 127 *file = NULL; 128 event = container_of(fsn_event, struct fanotify_event_info, fse); 129 metadata->event_len = FAN_EVENT_METADATA_LEN; 130 metadata->metadata_len = FAN_EVENT_METADATA_LEN; 131 metadata->vers = FANOTIFY_METADATA_VERSION; 132 metadata->reserved = 0; 133 metadata->mask = fsn_event->mask & FAN_ALL_OUTGOING_EVENTS; 134 metadata->pid = pid_vnr(event->tgid); 135 if (unlikely(fsn_event->mask & FAN_Q_OVERFLOW)) 136 metadata->fd = FAN_NOFD; 137 else { 138 metadata->fd = create_fd(group, event, file); 139 if (metadata->fd < 0) 140 ret = metadata->fd; 141 } 142 143 return ret; 144 } 145 146 static struct fanotify_perm_event_info *dequeue_event( 147 struct fsnotify_group *group, int fd) 148 { 149 struct fanotify_perm_event_info *event, *return_e = NULL; 150 151 spin_lock(&group->notification_lock); 152 list_for_each_entry(event, &group->fanotify_data.access_list, 153 fae.fse.list) { 154 if (event->fd != fd) 155 continue; 156 157 list_del_init(&event->fae.fse.list); 158 return_e = event; 159 break; 160 } 161 spin_unlock(&group->notification_lock); 162 163 pr_debug("%s: found return_re=%p\n", __func__, return_e); 164 165 return return_e; 166 } 167 168 static int process_access_response(struct fsnotify_group *group, 169 struct fanotify_response *response_struct) 170 { 171 struct fanotify_perm_event_info *event; 172 int fd = response_struct->fd; 173 int response = response_struct->response; 174 175 pr_debug("%s: group=%p fd=%d response=%d\n", __func__, group, 176 fd, response); 177 /* 178 * make sure the response is valid, if invalid we do nothing and either 179 * userspace can send a valid response or we will clean it up after the 180 * timeout 181 */ 182 switch (response & ~FAN_AUDIT) { 183 case FAN_ALLOW: 184 case FAN_DENY: 185 break; 186 default: 187 return -EINVAL; 188 } 189 190 if (fd < 0) 191 return -EINVAL; 192 193 if ((response & FAN_AUDIT) && !group->fanotify_data.audit) 194 return -EINVAL; 195 196 event = dequeue_event(group, fd); 197 if (!event) 198 return -ENOENT; 199 200 event->response = response; 201 wake_up(&group->fanotify_data.access_waitq); 202 203 return 0; 204 } 205 206 static ssize_t copy_event_to_user(struct fsnotify_group *group, 207 struct fsnotify_event *event, 208 char __user *buf) 209 { 210 struct fanotify_event_metadata fanotify_event_metadata; 211 struct file *f; 212 int fd, ret; 213 214 pr_debug("%s: group=%p event=%p\n", __func__, group, event); 215 216 ret = fill_event_metadata(group, &fanotify_event_metadata, event, &f); 217 if (ret < 0) 218 return ret; 219 220 fd = fanotify_event_metadata.fd; 221 ret = -EFAULT; 222 if (copy_to_user(buf, &fanotify_event_metadata, 223 fanotify_event_metadata.event_len)) 224 goto out_close_fd; 225 226 if (fanotify_is_perm_event(event->mask)) 227 FANOTIFY_PE(event)->fd = fd; 228 229 if (fd != FAN_NOFD) 230 fd_install(fd, f); 231 return fanotify_event_metadata.event_len; 232 233 out_close_fd: 234 if (fd != FAN_NOFD) { 235 put_unused_fd(fd); 236 fput(f); 237 } 238 return ret; 239 } 240 241 /* intofiy userspace file descriptor functions */ 242 static unsigned int fanotify_poll(struct file *file, poll_table *wait) 243 { 244 struct fsnotify_group *group = file->private_data; 245 int ret = 0; 246 247 poll_wait(file, &group->notification_waitq, wait); 248 spin_lock(&group->notification_lock); 249 if (!fsnotify_notify_queue_is_empty(group)) 250 ret = POLLIN | POLLRDNORM; 251 spin_unlock(&group->notification_lock); 252 253 return ret; 254 } 255 256 static ssize_t fanotify_read(struct file *file, char __user *buf, 257 size_t count, loff_t *pos) 258 { 259 struct fsnotify_group *group; 260 struct fsnotify_event *kevent; 261 char __user *start; 262 int ret; 263 DEFINE_WAIT_FUNC(wait, woken_wake_function); 264 265 start = buf; 266 group = file->private_data; 267 268 pr_debug("%s: group=%p\n", __func__, group); 269 270 add_wait_queue(&group->notification_waitq, &wait); 271 while (1) { 272 spin_lock(&group->notification_lock); 273 kevent = get_one_event(group, count); 274 spin_unlock(&group->notification_lock); 275 276 if (IS_ERR(kevent)) { 277 ret = PTR_ERR(kevent); 278 break; 279 } 280 281 if (!kevent) { 282 ret = -EAGAIN; 283 if (file->f_flags & O_NONBLOCK) 284 break; 285 286 ret = -ERESTARTSYS; 287 if (signal_pending(current)) 288 break; 289 290 if (start != buf) 291 break; 292 293 wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); 294 continue; 295 } 296 297 ret = copy_event_to_user(group, kevent, buf); 298 if (unlikely(ret == -EOPENSTALE)) { 299 /* 300 * We cannot report events with stale fd so drop it. 301 * Setting ret to 0 will continue the event loop and 302 * do the right thing if there are no more events to 303 * read (i.e. return bytes read, -EAGAIN or wait). 304 */ 305 ret = 0; 306 } 307 308 /* 309 * Permission events get queued to wait for response. Other 310 * events can be destroyed now. 311 */ 312 if (!fanotify_is_perm_event(kevent->mask)) { 313 fsnotify_destroy_event(group, kevent); 314 } else { 315 if (ret <= 0) { 316 FANOTIFY_PE(kevent)->response = FAN_DENY; 317 wake_up(&group->fanotify_data.access_waitq); 318 } else { 319 spin_lock(&group->notification_lock); 320 list_add_tail(&kevent->list, 321 &group->fanotify_data.access_list); 322 spin_unlock(&group->notification_lock); 323 } 324 } 325 if (ret < 0) 326 break; 327 buf += ret; 328 count -= ret; 329 } 330 remove_wait_queue(&group->notification_waitq, &wait); 331 332 if (start != buf && ret != -EFAULT) 333 ret = buf - start; 334 return ret; 335 } 336 337 static ssize_t fanotify_write(struct file *file, const char __user *buf, size_t count, loff_t *pos) 338 { 339 struct fanotify_response response = { .fd = -1, .response = -1 }; 340 struct fsnotify_group *group; 341 int ret; 342 343 if (!IS_ENABLED(CONFIG_FANOTIFY_ACCESS_PERMISSIONS)) 344 return -EINVAL; 345 346 group = file->private_data; 347 348 if (count > sizeof(response)) 349 count = sizeof(response); 350 351 pr_debug("%s: group=%p count=%zu\n", __func__, group, count); 352 353 if (copy_from_user(&response, buf, count)) 354 return -EFAULT; 355 356 ret = process_access_response(group, &response); 357 if (ret < 0) 358 count = ret; 359 360 return count; 361 } 362 363 static int fanotify_release(struct inode *ignored, struct file *file) 364 { 365 struct fsnotify_group *group = file->private_data; 366 struct fanotify_perm_event_info *event, *next; 367 struct fsnotify_event *fsn_event; 368 369 /* 370 * Stop new events from arriving in the notification queue. since 371 * userspace cannot use fanotify fd anymore, no event can enter or 372 * leave access_list by now either. 373 */ 374 fsnotify_group_stop_queueing(group); 375 376 /* 377 * Process all permission events on access_list and notification queue 378 * and simulate reply from userspace. 379 */ 380 spin_lock(&group->notification_lock); 381 list_for_each_entry_safe(event, next, &group->fanotify_data.access_list, 382 fae.fse.list) { 383 pr_debug("%s: found group=%p event=%p\n", __func__, group, 384 event); 385 386 list_del_init(&event->fae.fse.list); 387 event->response = FAN_ALLOW; 388 } 389 390 /* 391 * Destroy all non-permission events. For permission events just 392 * dequeue them and set the response. They will be freed once the 393 * response is consumed and fanotify_get_response() returns. 394 */ 395 while (!fsnotify_notify_queue_is_empty(group)) { 396 fsn_event = fsnotify_remove_first_event(group); 397 if (!(fsn_event->mask & FAN_ALL_PERM_EVENTS)) { 398 spin_unlock(&group->notification_lock); 399 fsnotify_destroy_event(group, fsn_event); 400 spin_lock(&group->notification_lock); 401 } else { 402 FANOTIFY_PE(fsn_event)->response = FAN_ALLOW; 403 } 404 } 405 spin_unlock(&group->notification_lock); 406 407 /* Response for all permission events it set, wakeup waiters */ 408 wake_up(&group->fanotify_data.access_waitq); 409 410 /* matches the fanotify_init->fsnotify_alloc_group */ 411 fsnotify_destroy_group(group); 412 413 return 0; 414 } 415 416 static long fanotify_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 417 { 418 struct fsnotify_group *group; 419 struct fsnotify_event *fsn_event; 420 void __user *p; 421 int ret = -ENOTTY; 422 size_t send_len = 0; 423 424 group = file->private_data; 425 426 p = (void __user *) arg; 427 428 switch (cmd) { 429 case FIONREAD: 430 spin_lock(&group->notification_lock); 431 list_for_each_entry(fsn_event, &group->notification_list, list) 432 send_len += FAN_EVENT_METADATA_LEN; 433 spin_unlock(&group->notification_lock); 434 ret = put_user(send_len, (int __user *) p); 435 break; 436 } 437 438 return ret; 439 } 440 441 static const struct file_operations fanotify_fops = { 442 .show_fdinfo = fanotify_show_fdinfo, 443 .poll = fanotify_poll, 444 .read = fanotify_read, 445 .write = fanotify_write, 446 .fasync = NULL, 447 .release = fanotify_release, 448 .unlocked_ioctl = fanotify_ioctl, 449 .compat_ioctl = fanotify_ioctl, 450 .llseek = noop_llseek, 451 }; 452 453 static int fanotify_find_path(int dfd, const char __user *filename, 454 struct path *path, unsigned int flags) 455 { 456 int ret; 457 458 pr_debug("%s: dfd=%d filename=%p flags=%x\n", __func__, 459 dfd, filename, flags); 460 461 if (filename == NULL) { 462 struct fd f = fdget(dfd); 463 464 ret = -EBADF; 465 if (!f.file) 466 goto out; 467 468 ret = -ENOTDIR; 469 if ((flags & FAN_MARK_ONLYDIR) && 470 !(S_ISDIR(file_inode(f.file)->i_mode))) { 471 fdput(f); 472 goto out; 473 } 474 475 *path = f.file->f_path; 476 path_get(path); 477 fdput(f); 478 } else { 479 unsigned int lookup_flags = 0; 480 481 if (!(flags & FAN_MARK_DONT_FOLLOW)) 482 lookup_flags |= LOOKUP_FOLLOW; 483 if (flags & FAN_MARK_ONLYDIR) 484 lookup_flags |= LOOKUP_DIRECTORY; 485 486 ret = user_path_at(dfd, filename, lookup_flags, path); 487 if (ret) 488 goto out; 489 } 490 491 /* you can only watch an inode if you have read permissions on it */ 492 ret = inode_permission(path->dentry->d_inode, MAY_READ); 493 if (ret) 494 path_put(path); 495 out: 496 return ret; 497 } 498 499 static __u32 fanotify_mark_remove_from_mask(struct fsnotify_mark *fsn_mark, 500 __u32 mask, 501 unsigned int flags, 502 int *destroy) 503 { 504 __u32 oldmask = 0; 505 506 spin_lock(&fsn_mark->lock); 507 if (!(flags & FAN_MARK_IGNORED_MASK)) { 508 __u32 tmask = fsn_mark->mask & ~mask; 509 510 if (flags & FAN_MARK_ONDIR) 511 tmask &= ~FAN_ONDIR; 512 513 oldmask = fsn_mark->mask; 514 fsn_mark->mask = tmask; 515 } else { 516 __u32 tmask = fsn_mark->ignored_mask & ~mask; 517 if (flags & FAN_MARK_ONDIR) 518 tmask &= ~FAN_ONDIR; 519 fsn_mark->ignored_mask = tmask; 520 } 521 *destroy = !(fsn_mark->mask | fsn_mark->ignored_mask); 522 spin_unlock(&fsn_mark->lock); 523 524 return mask & oldmask; 525 } 526 527 static int fanotify_remove_vfsmount_mark(struct fsnotify_group *group, 528 struct vfsmount *mnt, __u32 mask, 529 unsigned int flags) 530 { 531 struct fsnotify_mark *fsn_mark = NULL; 532 __u32 removed; 533 int destroy_mark; 534 535 mutex_lock(&group->mark_mutex); 536 fsn_mark = fsnotify_find_mark(&real_mount(mnt)->mnt_fsnotify_marks, 537 group); 538 if (!fsn_mark) { 539 mutex_unlock(&group->mark_mutex); 540 return -ENOENT; 541 } 542 543 removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags, 544 &destroy_mark); 545 if (removed & real_mount(mnt)->mnt_fsnotify_mask) 546 fsnotify_recalc_mask(real_mount(mnt)->mnt_fsnotify_marks); 547 if (destroy_mark) 548 fsnotify_detach_mark(fsn_mark); 549 mutex_unlock(&group->mark_mutex); 550 if (destroy_mark) 551 fsnotify_free_mark(fsn_mark); 552 553 fsnotify_put_mark(fsn_mark); 554 return 0; 555 } 556 557 static int fanotify_remove_inode_mark(struct fsnotify_group *group, 558 struct inode *inode, __u32 mask, 559 unsigned int flags) 560 { 561 struct fsnotify_mark *fsn_mark = NULL; 562 __u32 removed; 563 int destroy_mark; 564 565 mutex_lock(&group->mark_mutex); 566 fsn_mark = fsnotify_find_mark(&inode->i_fsnotify_marks, group); 567 if (!fsn_mark) { 568 mutex_unlock(&group->mark_mutex); 569 return -ENOENT; 570 } 571 572 removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags, 573 &destroy_mark); 574 if (removed & inode->i_fsnotify_mask) 575 fsnotify_recalc_mask(inode->i_fsnotify_marks); 576 if (destroy_mark) 577 fsnotify_detach_mark(fsn_mark); 578 mutex_unlock(&group->mark_mutex); 579 if (destroy_mark) 580 fsnotify_free_mark(fsn_mark); 581 582 /* matches the fsnotify_find_mark() */ 583 fsnotify_put_mark(fsn_mark); 584 585 return 0; 586 } 587 588 static __u32 fanotify_mark_add_to_mask(struct fsnotify_mark *fsn_mark, 589 __u32 mask, 590 unsigned int flags) 591 { 592 __u32 oldmask = -1; 593 594 spin_lock(&fsn_mark->lock); 595 if (!(flags & FAN_MARK_IGNORED_MASK)) { 596 __u32 tmask = fsn_mark->mask | mask; 597 598 if (flags & FAN_MARK_ONDIR) 599 tmask |= FAN_ONDIR; 600 601 oldmask = fsn_mark->mask; 602 fsn_mark->mask = tmask; 603 } else { 604 __u32 tmask = fsn_mark->ignored_mask | mask; 605 if (flags & FAN_MARK_ONDIR) 606 tmask |= FAN_ONDIR; 607 608 fsn_mark->ignored_mask = tmask; 609 if (flags & FAN_MARK_IGNORED_SURV_MODIFY) 610 fsn_mark->flags |= FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY; 611 } 612 spin_unlock(&fsn_mark->lock); 613 614 return mask & ~oldmask; 615 } 616 617 static struct fsnotify_mark *fanotify_add_new_mark(struct fsnotify_group *group, 618 struct inode *inode, 619 struct vfsmount *mnt) 620 { 621 struct fsnotify_mark *mark; 622 int ret; 623 624 if (atomic_read(&group->num_marks) > group->fanotify_data.max_marks) 625 return ERR_PTR(-ENOSPC); 626 627 mark = kmem_cache_alloc(fanotify_mark_cache, GFP_KERNEL); 628 if (!mark) 629 return ERR_PTR(-ENOMEM); 630 631 fsnotify_init_mark(mark, group); 632 ret = fsnotify_add_mark_locked(mark, inode, mnt, 0); 633 if (ret) { 634 fsnotify_put_mark(mark); 635 return ERR_PTR(ret); 636 } 637 638 return mark; 639 } 640 641 642 static int fanotify_add_vfsmount_mark(struct fsnotify_group *group, 643 struct vfsmount *mnt, __u32 mask, 644 unsigned int flags) 645 { 646 struct fsnotify_mark *fsn_mark; 647 __u32 added; 648 649 mutex_lock(&group->mark_mutex); 650 fsn_mark = fsnotify_find_mark(&real_mount(mnt)->mnt_fsnotify_marks, 651 group); 652 if (!fsn_mark) { 653 fsn_mark = fanotify_add_new_mark(group, NULL, mnt); 654 if (IS_ERR(fsn_mark)) { 655 mutex_unlock(&group->mark_mutex); 656 return PTR_ERR(fsn_mark); 657 } 658 } 659 added = fanotify_mark_add_to_mask(fsn_mark, mask, flags); 660 if (added & ~real_mount(mnt)->mnt_fsnotify_mask) 661 fsnotify_recalc_mask(real_mount(mnt)->mnt_fsnotify_marks); 662 mutex_unlock(&group->mark_mutex); 663 664 fsnotify_put_mark(fsn_mark); 665 return 0; 666 } 667 668 static int fanotify_add_inode_mark(struct fsnotify_group *group, 669 struct inode *inode, __u32 mask, 670 unsigned int flags) 671 { 672 struct fsnotify_mark *fsn_mark; 673 __u32 added; 674 675 pr_debug("%s: group=%p inode=%p\n", __func__, group, inode); 676 677 /* 678 * If some other task has this inode open for write we should not add 679 * an ignored mark, unless that ignored mark is supposed to survive 680 * modification changes anyway. 681 */ 682 if ((flags & FAN_MARK_IGNORED_MASK) && 683 !(flags & FAN_MARK_IGNORED_SURV_MODIFY) && 684 (atomic_read(&inode->i_writecount) > 0)) 685 return 0; 686 687 mutex_lock(&group->mark_mutex); 688 fsn_mark = fsnotify_find_mark(&inode->i_fsnotify_marks, group); 689 if (!fsn_mark) { 690 fsn_mark = fanotify_add_new_mark(group, inode, NULL); 691 if (IS_ERR(fsn_mark)) { 692 mutex_unlock(&group->mark_mutex); 693 return PTR_ERR(fsn_mark); 694 } 695 } 696 added = fanotify_mark_add_to_mask(fsn_mark, mask, flags); 697 if (added & ~inode->i_fsnotify_mask) 698 fsnotify_recalc_mask(inode->i_fsnotify_marks); 699 mutex_unlock(&group->mark_mutex); 700 701 fsnotify_put_mark(fsn_mark); 702 return 0; 703 } 704 705 /* fanotify syscalls */ 706 SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags) 707 { 708 struct fsnotify_group *group; 709 int f_flags, fd; 710 struct user_struct *user; 711 struct fanotify_event_info *oevent; 712 713 pr_debug("%s: flags=%d event_f_flags=%d\n", 714 __func__, flags, event_f_flags); 715 716 if (!capable(CAP_SYS_ADMIN)) 717 return -EPERM; 718 719 #ifdef CONFIG_AUDITSYSCALL 720 if (flags & ~(FAN_ALL_INIT_FLAGS | FAN_ENABLE_AUDIT)) 721 #else 722 if (flags & ~FAN_ALL_INIT_FLAGS) 723 #endif 724 return -EINVAL; 725 726 if (event_f_flags & ~FANOTIFY_INIT_ALL_EVENT_F_BITS) 727 return -EINVAL; 728 729 switch (event_f_flags & O_ACCMODE) { 730 case O_RDONLY: 731 case O_RDWR: 732 case O_WRONLY: 733 break; 734 default: 735 return -EINVAL; 736 } 737 738 user = get_current_user(); 739 if (atomic_read(&user->fanotify_listeners) > FANOTIFY_DEFAULT_MAX_LISTENERS) { 740 free_uid(user); 741 return -EMFILE; 742 } 743 744 f_flags = O_RDWR | FMODE_NONOTIFY; 745 if (flags & FAN_CLOEXEC) 746 f_flags |= O_CLOEXEC; 747 if (flags & FAN_NONBLOCK) 748 f_flags |= O_NONBLOCK; 749 750 /* fsnotify_alloc_group takes a ref. Dropped in fanotify_release */ 751 group = fsnotify_alloc_group(&fanotify_fsnotify_ops); 752 if (IS_ERR(group)) { 753 free_uid(user); 754 return PTR_ERR(group); 755 } 756 757 group->fanotify_data.user = user; 758 atomic_inc(&user->fanotify_listeners); 759 760 oevent = fanotify_alloc_event(NULL, FS_Q_OVERFLOW, NULL); 761 if (unlikely(!oevent)) { 762 fd = -ENOMEM; 763 goto out_destroy_group; 764 } 765 group->overflow_event = &oevent->fse; 766 767 if (force_o_largefile()) 768 event_f_flags |= O_LARGEFILE; 769 group->fanotify_data.f_flags = event_f_flags; 770 init_waitqueue_head(&group->fanotify_data.access_waitq); 771 INIT_LIST_HEAD(&group->fanotify_data.access_list); 772 switch (flags & FAN_ALL_CLASS_BITS) { 773 case FAN_CLASS_NOTIF: 774 group->priority = FS_PRIO_0; 775 break; 776 case FAN_CLASS_CONTENT: 777 group->priority = FS_PRIO_1; 778 break; 779 case FAN_CLASS_PRE_CONTENT: 780 group->priority = FS_PRIO_2; 781 break; 782 default: 783 fd = -EINVAL; 784 goto out_destroy_group; 785 } 786 787 if (flags & FAN_UNLIMITED_QUEUE) { 788 fd = -EPERM; 789 if (!capable(CAP_SYS_ADMIN)) 790 goto out_destroy_group; 791 group->max_events = UINT_MAX; 792 } else { 793 group->max_events = FANOTIFY_DEFAULT_MAX_EVENTS; 794 } 795 796 if (flags & FAN_UNLIMITED_MARKS) { 797 fd = -EPERM; 798 if (!capable(CAP_SYS_ADMIN)) 799 goto out_destroy_group; 800 group->fanotify_data.max_marks = UINT_MAX; 801 } else { 802 group->fanotify_data.max_marks = FANOTIFY_DEFAULT_MAX_MARKS; 803 } 804 805 if (flags & FAN_ENABLE_AUDIT) { 806 fd = -EPERM; 807 if (!capable(CAP_AUDIT_WRITE)) 808 goto out_destroy_group; 809 group->fanotify_data.audit = true; 810 } 811 812 fd = anon_inode_getfd("[fanotify]", &fanotify_fops, group, f_flags); 813 if (fd < 0) 814 goto out_destroy_group; 815 816 return fd; 817 818 out_destroy_group: 819 fsnotify_destroy_group(group); 820 return fd; 821 } 822 823 SYSCALL_DEFINE5(fanotify_mark, int, fanotify_fd, unsigned int, flags, 824 __u64, mask, int, dfd, 825 const char __user *, pathname) 826 { 827 struct inode *inode = NULL; 828 struct vfsmount *mnt = NULL; 829 struct fsnotify_group *group; 830 struct fd f; 831 struct path path; 832 u32 valid_mask = FAN_ALL_EVENTS | FAN_EVENT_ON_CHILD; 833 int ret; 834 835 pr_debug("%s: fanotify_fd=%d flags=%x dfd=%d pathname=%p mask=%llx\n", 836 __func__, fanotify_fd, flags, dfd, pathname, mask); 837 838 /* we only use the lower 32 bits as of right now. */ 839 if (mask & ((__u64)0xffffffff << 32)) 840 return -EINVAL; 841 842 if (flags & ~FAN_ALL_MARK_FLAGS) 843 return -EINVAL; 844 switch (flags & (FAN_MARK_ADD | FAN_MARK_REMOVE | FAN_MARK_FLUSH)) { 845 case FAN_MARK_ADD: /* fallthrough */ 846 case FAN_MARK_REMOVE: 847 if (!mask) 848 return -EINVAL; 849 break; 850 case FAN_MARK_FLUSH: 851 if (flags & ~(FAN_MARK_MOUNT | FAN_MARK_FLUSH)) 852 return -EINVAL; 853 break; 854 default: 855 return -EINVAL; 856 } 857 858 if (mask & FAN_ONDIR) { 859 flags |= FAN_MARK_ONDIR; 860 mask &= ~FAN_ONDIR; 861 } 862 863 if (IS_ENABLED(CONFIG_FANOTIFY_ACCESS_PERMISSIONS)) 864 valid_mask |= FAN_ALL_PERM_EVENTS; 865 866 if (mask & ~valid_mask) 867 return -EINVAL; 868 869 f = fdget(fanotify_fd); 870 if (unlikely(!f.file)) 871 return -EBADF; 872 873 /* verify that this is indeed an fanotify instance */ 874 ret = -EINVAL; 875 if (unlikely(f.file->f_op != &fanotify_fops)) 876 goto fput_and_out; 877 group = f.file->private_data; 878 879 /* 880 * group->priority == FS_PRIO_0 == FAN_CLASS_NOTIF. These are not 881 * allowed to set permissions events. 882 */ 883 ret = -EINVAL; 884 if (mask & FAN_ALL_PERM_EVENTS && 885 group->priority == FS_PRIO_0) 886 goto fput_and_out; 887 888 if (flags & FAN_MARK_FLUSH) { 889 ret = 0; 890 if (flags & FAN_MARK_MOUNT) 891 fsnotify_clear_vfsmount_marks_by_group(group); 892 else 893 fsnotify_clear_inode_marks_by_group(group); 894 goto fput_and_out; 895 } 896 897 ret = fanotify_find_path(dfd, pathname, &path, flags); 898 if (ret) 899 goto fput_and_out; 900 901 /* inode held in place by reference to path; group by fget on fd */ 902 if (!(flags & FAN_MARK_MOUNT)) 903 inode = path.dentry->d_inode; 904 else 905 mnt = path.mnt; 906 907 /* create/update an inode mark */ 908 switch (flags & (FAN_MARK_ADD | FAN_MARK_REMOVE)) { 909 case FAN_MARK_ADD: 910 if (flags & FAN_MARK_MOUNT) 911 ret = fanotify_add_vfsmount_mark(group, mnt, mask, flags); 912 else 913 ret = fanotify_add_inode_mark(group, inode, mask, flags); 914 break; 915 case FAN_MARK_REMOVE: 916 if (flags & FAN_MARK_MOUNT) 917 ret = fanotify_remove_vfsmount_mark(group, mnt, mask, flags); 918 else 919 ret = fanotify_remove_inode_mark(group, inode, mask, flags); 920 break; 921 default: 922 ret = -EINVAL; 923 } 924 925 path_put(&path); 926 fput_and_out: 927 fdput(f); 928 return ret; 929 } 930 931 #ifdef CONFIG_COMPAT 932 COMPAT_SYSCALL_DEFINE6(fanotify_mark, 933 int, fanotify_fd, unsigned int, flags, 934 __u32, mask0, __u32, mask1, int, dfd, 935 const char __user *, pathname) 936 { 937 return sys_fanotify_mark(fanotify_fd, flags, 938 #ifdef __BIG_ENDIAN 939 ((__u64)mask0 << 32) | mask1, 940 #else 941 ((__u64)mask1 << 32) | mask0, 942 #endif 943 dfd, pathname); 944 } 945 #endif 946 947 /* 948 * fanotify_user_setup - Our initialization function. Note that we cannot return 949 * error because we have compiled-in VFS hooks. So an (unlikely) failure here 950 * must result in panic(). 951 */ 952 static int __init fanotify_user_setup(void) 953 { 954 fanotify_mark_cache = KMEM_CACHE(fsnotify_mark, SLAB_PANIC); 955 fanotify_event_cachep = KMEM_CACHE(fanotify_event_info, SLAB_PANIC); 956 if (IS_ENABLED(CONFIG_FANOTIFY_ACCESS_PERMISSIONS)) { 957 fanotify_perm_event_cachep = 958 KMEM_CACHE(fanotify_perm_event_info, SLAB_PANIC); 959 } 960 961 return 0; 962 } 963 device_initcall(fanotify_user_setup); 964