1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/fanotify.h> 3 #include <linux/fdtable.h> 4 #include <linux/fsnotify_backend.h> 5 #include <linux/init.h> 6 #include <linux/jiffies.h> 7 #include <linux/kernel.h> /* UINT_MAX */ 8 #include <linux/mount.h> 9 #include <linux/sched.h> 10 #include <linux/sched/user.h> 11 #include <linux/sched/signal.h> 12 #include <linux/types.h> 13 #include <linux/wait.h> 14 #include <linux/audit.h> 15 #include <linux/sched/mm.h> 16 #include <linux/statfs.h> 17 18 #include "fanotify.h" 19 20 static bool fanotify_path_equal(struct path *p1, struct path *p2) 21 { 22 return p1->mnt == p2->mnt && p1->dentry == p2->dentry; 23 } 24 25 static inline bool fanotify_fsid_equal(__kernel_fsid_t *fsid1, 26 __kernel_fsid_t *fsid2) 27 { 28 return fsid1->val[0] == fsid2->val[0] && fsid1->val[1] == fsid2->val[1]; 29 } 30 31 static bool fanotify_fh_equal(struct fanotify_fh *fh1, 32 struct fanotify_fh *fh2) 33 { 34 if (fh1->type != fh2->type || fh1->len != fh2->len) 35 return false; 36 37 /* Do not merge events if we failed to encode fh */ 38 if (fh1->type == FILEID_INVALID) 39 return false; 40 41 return !fh1->len || 42 !memcmp(fanotify_fh_buf(fh1), fanotify_fh_buf(fh2), fh1->len); 43 } 44 45 static bool fanotify_fid_event_equal(struct fanotify_fid_event *ffe1, 46 struct fanotify_fid_event *ffe2) 47 { 48 /* Do not merge fid events without object fh */ 49 if (!ffe1->object_fh.len) 50 return false; 51 52 return fanotify_fsid_equal(&ffe1->fsid, &ffe2->fsid) && 53 fanotify_fh_equal(&ffe1->object_fh, &ffe2->object_fh); 54 } 55 56 static bool fanotify_name_event_equal(struct fanotify_name_event *fne1, 57 struct fanotify_name_event *fne2) 58 { 59 /* 60 * Do not merge name events without dir fh. 61 * FAN_DIR_MODIFY does not encode object fh, so it may be empty. 62 */ 63 if (!fne1->dir_fh.len) 64 return false; 65 66 if (fne1->name_len != fne2->name_len || 67 !fanotify_fh_equal(&fne1->dir_fh, &fne2->dir_fh)) 68 return false; 69 70 return !memcmp(fne1->name, fne2->name, fne1->name_len); 71 } 72 73 static bool should_merge(struct fsnotify_event *old_fsn, 74 struct fsnotify_event *new_fsn) 75 { 76 struct fanotify_event *old, *new; 77 78 pr_debug("%s: old=%p new=%p\n", __func__, old_fsn, new_fsn); 79 old = FANOTIFY_E(old_fsn); 80 new = FANOTIFY_E(new_fsn); 81 82 if (old_fsn->objectid != new_fsn->objectid || 83 old->type != new->type || old->pid != new->pid) 84 return false; 85 86 switch (old->type) { 87 case FANOTIFY_EVENT_TYPE_PATH: 88 return fanotify_path_equal(fanotify_event_path(old), 89 fanotify_event_path(new)); 90 case FANOTIFY_EVENT_TYPE_FID: 91 /* 92 * We want to merge many dirent events in the same dir (i.e. 93 * creates/unlinks/renames), but we do not want to merge dirent 94 * events referring to subdirs with dirent events referring to 95 * non subdirs, otherwise, user won't be able to tell from a 96 * mask FAN_CREATE|FAN_DELETE|FAN_ONDIR if it describes mkdir+ 97 * unlink pair or rmdir+create pair of events. 98 */ 99 if ((old->mask & FS_ISDIR) != (new->mask & FS_ISDIR)) 100 return false; 101 102 return fanotify_fid_event_equal(FANOTIFY_FE(old), 103 FANOTIFY_FE(new)); 104 case FANOTIFY_EVENT_TYPE_FID_NAME: 105 return fanotify_name_event_equal(FANOTIFY_NE(old), 106 FANOTIFY_NE(new)); 107 default: 108 WARN_ON_ONCE(1); 109 } 110 111 return false; 112 } 113 114 /* and the list better be locked by something too! */ 115 static int fanotify_merge(struct list_head *list, struct fsnotify_event *event) 116 { 117 struct fsnotify_event *test_event; 118 struct fanotify_event *new; 119 120 pr_debug("%s: list=%p event=%p\n", __func__, list, event); 121 new = FANOTIFY_E(event); 122 123 /* 124 * Don't merge a permission event with any other event so that we know 125 * the event structure we have created in fanotify_handle_event() is the 126 * one we should check for permission response. 127 */ 128 if (fanotify_is_perm_event(new->mask)) 129 return 0; 130 131 list_for_each_entry_reverse(test_event, list, list) { 132 if (should_merge(test_event, event)) { 133 FANOTIFY_E(test_event)->mask |= new->mask; 134 return 1; 135 } 136 } 137 138 return 0; 139 } 140 141 /* 142 * Wait for response to permission event. The function also takes care of 143 * freeing the permission event (or offloads that in case the wait is canceled 144 * by a signal). The function returns 0 in case access got allowed by userspace, 145 * -EPERM in case userspace disallowed the access, and -ERESTARTSYS in case 146 * the wait got interrupted by a signal. 147 */ 148 static int fanotify_get_response(struct fsnotify_group *group, 149 struct fanotify_perm_event *event, 150 struct fsnotify_iter_info *iter_info) 151 { 152 int ret; 153 154 pr_debug("%s: group=%p event=%p\n", __func__, group, event); 155 156 ret = wait_event_killable(group->fanotify_data.access_waitq, 157 event->state == FAN_EVENT_ANSWERED); 158 /* Signal pending? */ 159 if (ret < 0) { 160 spin_lock(&group->notification_lock); 161 /* Event reported to userspace and no answer yet? */ 162 if (event->state == FAN_EVENT_REPORTED) { 163 /* Event will get freed once userspace answers to it */ 164 event->state = FAN_EVENT_CANCELED; 165 spin_unlock(&group->notification_lock); 166 return ret; 167 } 168 /* Event not yet reported? Just remove it. */ 169 if (event->state == FAN_EVENT_INIT) 170 fsnotify_remove_queued_event(group, &event->fae.fse); 171 /* 172 * Event may be also answered in case signal delivery raced 173 * with wakeup. In that case we have nothing to do besides 174 * freeing the event and reporting error. 175 */ 176 spin_unlock(&group->notification_lock); 177 goto out; 178 } 179 180 /* userspace responded, convert to something usable */ 181 switch (event->response & ~FAN_AUDIT) { 182 case FAN_ALLOW: 183 ret = 0; 184 break; 185 case FAN_DENY: 186 default: 187 ret = -EPERM; 188 } 189 190 /* Check if the response should be audited */ 191 if (event->response & FAN_AUDIT) 192 audit_fanotify(event->response & ~FAN_AUDIT); 193 194 pr_debug("%s: group=%p event=%p about to return ret=%d\n", __func__, 195 group, event, ret); 196 out: 197 fsnotify_destroy_event(group, &event->fae.fse); 198 199 return ret; 200 } 201 202 /* 203 * This function returns a mask for an event that only contains the flags 204 * that have been specifically requested by the user. Flags that may have 205 * been included within the event mask, but have not been explicitly 206 * requested by the user, will not be present in the returned mask. 207 */ 208 static u32 fanotify_group_event_mask(struct fsnotify_group *group, 209 struct fsnotify_iter_info *iter_info, 210 u32 event_mask, const void *data, 211 int data_type) 212 { 213 __u32 marks_mask = 0, marks_ignored_mask = 0; 214 __u32 test_mask, user_mask = FANOTIFY_OUTGOING_EVENTS; 215 const struct path *path = fsnotify_data_path(data, data_type); 216 struct fsnotify_mark *mark; 217 int type; 218 219 pr_debug("%s: report_mask=%x mask=%x data=%p data_type=%d\n", 220 __func__, iter_info->report_mask, event_mask, data, data_type); 221 222 if (!FAN_GROUP_FLAG(group, FAN_REPORT_FID)) { 223 /* Do we have path to open a file descriptor? */ 224 if (!path) 225 return 0; 226 /* Path type events are only relevant for files and dirs */ 227 if (!d_is_reg(path->dentry) && !d_can_lookup(path->dentry)) 228 return 0; 229 } 230 231 fsnotify_foreach_obj_type(type) { 232 if (!fsnotify_iter_should_report_type(iter_info, type)) 233 continue; 234 mark = iter_info->marks[type]; 235 /* 236 * If the event is on dir and this mark doesn't care about 237 * events on dir, don't send it! 238 */ 239 if (event_mask & FS_ISDIR && !(mark->mask & FS_ISDIR)) 240 continue; 241 242 /* 243 * If the event is for a child and this mark doesn't care about 244 * events on a child, don't send it! 245 */ 246 if (event_mask & FS_EVENT_ON_CHILD && 247 (type != FSNOTIFY_OBJ_TYPE_INODE || 248 !(mark->mask & FS_EVENT_ON_CHILD))) 249 continue; 250 251 marks_mask |= mark->mask; 252 marks_ignored_mask |= mark->ignored_mask; 253 } 254 255 test_mask = event_mask & marks_mask & ~marks_ignored_mask; 256 257 /* 258 * For dirent modification events (create/delete/move) that do not carry 259 * the child entry name information, we report FAN_ONDIR for mkdir/rmdir 260 * so user can differentiate them from creat/unlink. 261 * 262 * For backward compatibility and consistency, do not report FAN_ONDIR 263 * to user in legacy fanotify mode (reporting fd) and report FAN_ONDIR 264 * to user in FAN_REPORT_FID mode for all event types. 265 */ 266 if (FAN_GROUP_FLAG(group, FAN_REPORT_FID)) { 267 /* Do not report FAN_ONDIR without any event */ 268 if (!(test_mask & ~FAN_ONDIR)) 269 return 0; 270 } else { 271 user_mask &= ~FAN_ONDIR; 272 } 273 274 return test_mask & user_mask; 275 } 276 277 static void fanotify_encode_fh(struct fanotify_fh *fh, struct inode *inode, 278 gfp_t gfp) 279 { 280 int dwords, type, bytes = 0; 281 char *ext_buf = NULL; 282 void *buf = fh->buf; 283 int err; 284 285 if (!inode) 286 goto out; 287 288 dwords = 0; 289 err = -ENOENT; 290 type = exportfs_encode_inode_fh(inode, NULL, &dwords, NULL); 291 if (!dwords) 292 goto out_err; 293 294 bytes = dwords << 2; 295 if (bytes > FANOTIFY_INLINE_FH_LEN) { 296 /* Treat failure to allocate fh as failure to allocate event */ 297 err = -ENOMEM; 298 ext_buf = kmalloc(bytes, gfp); 299 if (!ext_buf) 300 goto out_err; 301 302 *fanotify_fh_ext_buf_ptr(fh) = ext_buf; 303 buf = ext_buf; 304 } 305 306 type = exportfs_encode_inode_fh(inode, buf, &dwords, NULL); 307 err = -EINVAL; 308 if (!type || type == FILEID_INVALID || bytes != dwords << 2) 309 goto out_err; 310 311 fh->type = type; 312 fh->len = bytes; 313 314 return; 315 316 out_err: 317 pr_warn_ratelimited("fanotify: failed to encode fid (type=%d, len=%d, err=%i)\n", 318 type, bytes, err); 319 kfree(ext_buf); 320 *fanotify_fh_ext_buf_ptr(fh) = NULL; 321 out: 322 /* Report the event without a file identifier on encode error */ 323 fh->type = FILEID_INVALID; 324 fh->len = 0; 325 } 326 327 /* 328 * The inode to use as identifier when reporting fid depends on the event. 329 * Report the modified directory inode on dirent modification events. 330 * Report the "victim" inode otherwise. 331 * For example: 332 * FS_ATTRIB reports the child inode even if reported on a watched parent. 333 * FS_CREATE reports the modified dir inode and not the created inode. 334 */ 335 static struct inode *fanotify_fid_inode(struct inode *to_tell, u32 event_mask, 336 const void *data, int data_type) 337 { 338 if (event_mask & ALL_FSNOTIFY_DIRENT_EVENTS) 339 return to_tell; 340 341 return (struct inode *)fsnotify_data_inode(data, data_type); 342 } 343 344 struct fanotify_event *fanotify_alloc_event(struct fsnotify_group *group, 345 struct inode *inode, u32 mask, 346 const void *data, int data_type, 347 const struct qstr *file_name, 348 __kernel_fsid_t *fsid) 349 { 350 struct fanotify_event *event = NULL; 351 struct fanotify_fid_event *ffe = NULL; 352 struct fanotify_name_event *fne = NULL; 353 gfp_t gfp = GFP_KERNEL_ACCOUNT; 354 struct inode *id = fanotify_fid_inode(inode, mask, data, data_type); 355 const struct path *path = fsnotify_data_path(data, data_type); 356 357 /* 358 * For queues with unlimited length lost events are not expected and 359 * can possibly have security implications. Avoid losing events when 360 * memory is short. For the limited size queues, avoid OOM killer in the 361 * target monitoring memcg as it may have security repercussion. 362 */ 363 if (group->max_events == UINT_MAX) 364 gfp |= __GFP_NOFAIL; 365 else 366 gfp |= __GFP_RETRY_MAYFAIL; 367 368 /* Whoever is interested in the event, pays for the allocation. */ 369 memalloc_use_memcg(group->memcg); 370 371 if (fanotify_is_perm_event(mask)) { 372 struct fanotify_perm_event *pevent; 373 374 pevent = kmem_cache_alloc(fanotify_perm_event_cachep, gfp); 375 if (!pevent) 376 goto out; 377 378 event = &pevent->fae; 379 event->type = FANOTIFY_EVENT_TYPE_PATH_PERM; 380 pevent->response = 0; 381 pevent->state = FAN_EVENT_INIT; 382 goto init; 383 } 384 385 /* 386 * For FAN_DIR_MODIFY event, we report the fid of the directory and 387 * the name of the modified entry. 388 * Allocate an fanotify_name_event struct and copy the name. 389 */ 390 if (mask & FAN_DIR_MODIFY && !(WARN_ON_ONCE(!file_name))) { 391 fne = kmalloc(sizeof(*fne) + file_name->len + 1, gfp); 392 if (!fne) 393 goto out; 394 395 event = &fne->fae; 396 event->type = FANOTIFY_EVENT_TYPE_FID_NAME; 397 fne->name_len = file_name->len; 398 strcpy(fne->name, file_name->name); 399 goto init; 400 } 401 402 if (FAN_GROUP_FLAG(group, FAN_REPORT_FID)) { 403 ffe = kmem_cache_alloc(fanotify_fid_event_cachep, gfp); 404 if (!ffe) 405 goto out; 406 407 event = &ffe->fae; 408 event->type = FANOTIFY_EVENT_TYPE_FID; 409 } else { 410 struct fanotify_path_event *pevent; 411 412 pevent = kmem_cache_alloc(fanotify_path_event_cachep, gfp); 413 if (!pevent) 414 goto out; 415 416 event = &pevent->fae; 417 event->type = FANOTIFY_EVENT_TYPE_PATH; 418 } 419 420 init: 421 /* 422 * Use the victim inode instead of the watching inode as the id for 423 * event queue, so event reported on parent is merged with event 424 * reported on child when both directory and child watches exist. 425 */ 426 fsnotify_init_event(&event->fse, (unsigned long)id); 427 event->mask = mask; 428 if (FAN_GROUP_FLAG(group, FAN_REPORT_TID)) 429 event->pid = get_pid(task_pid(current)); 430 else 431 event->pid = get_pid(task_tgid(current)); 432 433 if (fsid && fanotify_event_fsid(event)) 434 *fanotify_event_fsid(event) = *fsid; 435 436 if (fanotify_event_object_fh(event)) 437 fanotify_encode_fh(fanotify_event_object_fh(event), id, gfp); 438 439 if (fanotify_event_dir_fh(event)) 440 fanotify_encode_fh(fanotify_event_dir_fh(event), id, gfp); 441 442 if (fanotify_event_has_path(event)) { 443 struct path *p = fanotify_event_path(event); 444 445 if (path) { 446 *p = *path; 447 path_get(path); 448 } else { 449 p->mnt = NULL; 450 p->dentry = NULL; 451 } 452 } 453 out: 454 memalloc_unuse_memcg(); 455 return event; 456 } 457 458 /* 459 * Get cached fsid of the filesystem containing the object from any connector. 460 * All connectors are supposed to have the same fsid, but we do not verify that 461 * here. 462 */ 463 static __kernel_fsid_t fanotify_get_fsid(struct fsnotify_iter_info *iter_info) 464 { 465 int type; 466 __kernel_fsid_t fsid = {}; 467 468 fsnotify_foreach_obj_type(type) { 469 struct fsnotify_mark_connector *conn; 470 471 if (!fsnotify_iter_should_report_type(iter_info, type)) 472 continue; 473 474 conn = READ_ONCE(iter_info->marks[type]->connector); 475 /* Mark is just getting destroyed or created? */ 476 if (!conn) 477 continue; 478 if (!(conn->flags & FSNOTIFY_CONN_FLAG_HAS_FSID)) 479 continue; 480 /* Pairs with smp_wmb() in fsnotify_add_mark_list() */ 481 smp_rmb(); 482 fsid = conn->fsid; 483 if (WARN_ON_ONCE(!fsid.val[0] && !fsid.val[1])) 484 continue; 485 return fsid; 486 } 487 488 return fsid; 489 } 490 491 static int fanotify_handle_event(struct fsnotify_group *group, 492 struct inode *inode, 493 u32 mask, const void *data, int data_type, 494 const struct qstr *file_name, u32 cookie, 495 struct fsnotify_iter_info *iter_info) 496 { 497 int ret = 0; 498 struct fanotify_event *event; 499 struct fsnotify_event *fsn_event; 500 __kernel_fsid_t fsid = {}; 501 502 BUILD_BUG_ON(FAN_ACCESS != FS_ACCESS); 503 BUILD_BUG_ON(FAN_MODIFY != FS_MODIFY); 504 BUILD_BUG_ON(FAN_ATTRIB != FS_ATTRIB); 505 BUILD_BUG_ON(FAN_CLOSE_NOWRITE != FS_CLOSE_NOWRITE); 506 BUILD_BUG_ON(FAN_CLOSE_WRITE != FS_CLOSE_WRITE); 507 BUILD_BUG_ON(FAN_OPEN != FS_OPEN); 508 BUILD_BUG_ON(FAN_MOVED_TO != FS_MOVED_TO); 509 BUILD_BUG_ON(FAN_MOVED_FROM != FS_MOVED_FROM); 510 BUILD_BUG_ON(FAN_CREATE != FS_CREATE); 511 BUILD_BUG_ON(FAN_DELETE != FS_DELETE); 512 BUILD_BUG_ON(FAN_DIR_MODIFY != FS_DIR_MODIFY); 513 BUILD_BUG_ON(FAN_DELETE_SELF != FS_DELETE_SELF); 514 BUILD_BUG_ON(FAN_MOVE_SELF != FS_MOVE_SELF); 515 BUILD_BUG_ON(FAN_EVENT_ON_CHILD != FS_EVENT_ON_CHILD); 516 BUILD_BUG_ON(FAN_Q_OVERFLOW != FS_Q_OVERFLOW); 517 BUILD_BUG_ON(FAN_OPEN_PERM != FS_OPEN_PERM); 518 BUILD_BUG_ON(FAN_ACCESS_PERM != FS_ACCESS_PERM); 519 BUILD_BUG_ON(FAN_ONDIR != FS_ISDIR); 520 BUILD_BUG_ON(FAN_OPEN_EXEC != FS_OPEN_EXEC); 521 BUILD_BUG_ON(FAN_OPEN_EXEC_PERM != FS_OPEN_EXEC_PERM); 522 523 BUILD_BUG_ON(HWEIGHT32(ALL_FANOTIFY_EVENT_BITS) != 20); 524 525 mask = fanotify_group_event_mask(group, iter_info, mask, data, 526 data_type); 527 if (!mask) 528 return 0; 529 530 pr_debug("%s: group=%p inode=%p mask=%x\n", __func__, group, inode, 531 mask); 532 533 if (fanotify_is_perm_event(mask)) { 534 /* 535 * fsnotify_prepare_user_wait() fails if we race with mark 536 * deletion. Just let the operation pass in that case. 537 */ 538 if (!fsnotify_prepare_user_wait(iter_info)) 539 return 0; 540 } 541 542 if (FAN_GROUP_FLAG(group, FAN_REPORT_FID)) { 543 fsid = fanotify_get_fsid(iter_info); 544 /* Racing with mark destruction or creation? */ 545 if (!fsid.val[0] && !fsid.val[1]) 546 return 0; 547 } 548 549 event = fanotify_alloc_event(group, inode, mask, data, data_type, 550 file_name, &fsid); 551 ret = -ENOMEM; 552 if (unlikely(!event)) { 553 /* 554 * We don't queue overflow events for permission events as 555 * there the access is denied and so no event is in fact lost. 556 */ 557 if (!fanotify_is_perm_event(mask)) 558 fsnotify_queue_overflow(group); 559 goto finish; 560 } 561 562 fsn_event = &event->fse; 563 ret = fsnotify_add_event(group, fsn_event, fanotify_merge); 564 if (ret) { 565 /* Permission events shouldn't be merged */ 566 BUG_ON(ret == 1 && mask & FANOTIFY_PERM_EVENTS); 567 /* Our event wasn't used in the end. Free it. */ 568 fsnotify_destroy_event(group, fsn_event); 569 570 ret = 0; 571 } else if (fanotify_is_perm_event(mask)) { 572 ret = fanotify_get_response(group, FANOTIFY_PERM(event), 573 iter_info); 574 } 575 finish: 576 if (fanotify_is_perm_event(mask)) 577 fsnotify_finish_user_wait(iter_info); 578 579 return ret; 580 } 581 582 static void fanotify_free_group_priv(struct fsnotify_group *group) 583 { 584 struct user_struct *user; 585 586 user = group->fanotify_data.user; 587 atomic_dec(&user->fanotify_listeners); 588 free_uid(user); 589 } 590 591 static void fanotify_free_path_event(struct fanotify_event *event) 592 { 593 path_put(fanotify_event_path(event)); 594 kmem_cache_free(fanotify_path_event_cachep, FANOTIFY_PE(event)); 595 } 596 597 static void fanotify_free_perm_event(struct fanotify_event *event) 598 { 599 path_put(fanotify_event_path(event)); 600 kmem_cache_free(fanotify_perm_event_cachep, FANOTIFY_PERM(event)); 601 } 602 603 static void fanotify_free_fid_event(struct fanotify_event *event) 604 { 605 struct fanotify_fid_event *ffe = FANOTIFY_FE(event); 606 607 if (fanotify_fh_has_ext_buf(&ffe->object_fh)) 608 kfree(fanotify_fh_ext_buf(&ffe->object_fh)); 609 kmem_cache_free(fanotify_fid_event_cachep, ffe); 610 } 611 612 static void fanotify_free_name_event(struct fanotify_event *event) 613 { 614 struct fanotify_name_event *fne = FANOTIFY_NE(event); 615 616 if (fanotify_fh_has_ext_buf(&fne->dir_fh)) 617 kfree(fanotify_fh_ext_buf(&fne->dir_fh)); 618 kfree(fne); 619 } 620 621 static void fanotify_free_event(struct fsnotify_event *fsn_event) 622 { 623 struct fanotify_event *event; 624 625 event = FANOTIFY_E(fsn_event); 626 put_pid(event->pid); 627 switch (event->type) { 628 case FANOTIFY_EVENT_TYPE_PATH: 629 fanotify_free_path_event(event); 630 break; 631 case FANOTIFY_EVENT_TYPE_PATH_PERM: 632 fanotify_free_perm_event(event); 633 break; 634 case FANOTIFY_EVENT_TYPE_FID: 635 fanotify_free_fid_event(event); 636 break; 637 case FANOTIFY_EVENT_TYPE_FID_NAME: 638 fanotify_free_name_event(event); 639 break; 640 default: 641 WARN_ON_ONCE(1); 642 } 643 } 644 645 static void fanotify_free_mark(struct fsnotify_mark *fsn_mark) 646 { 647 kmem_cache_free(fanotify_mark_cache, fsn_mark); 648 } 649 650 const struct fsnotify_ops fanotify_fsnotify_ops = { 651 .handle_event = fanotify_handle_event, 652 .free_group_priv = fanotify_free_group_priv, 653 .free_event = fanotify_free_event, 654 .free_mark = fanotify_free_mark, 655 }; 656