1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/fanotify.h> 3 #include <linux/fdtable.h> 4 #include <linux/fsnotify_backend.h> 5 #include <linux/init.h> 6 #include <linux/jiffies.h> 7 #include <linux/kernel.h> /* UINT_MAX */ 8 #include <linux/mount.h> 9 #include <linux/sched.h> 10 #include <linux/sched/user.h> 11 #include <linux/sched/signal.h> 12 #include <linux/types.h> 13 #include <linux/wait.h> 14 #include <linux/audit.h> 15 #include <linux/sched/mm.h> 16 #include <linux/statfs.h> 17 18 #include "fanotify.h" 19 20 static bool fanotify_path_equal(struct path *p1, struct path *p2) 21 { 22 return p1->mnt == p2->mnt && p1->dentry == p2->dentry; 23 } 24 25 static inline bool fanotify_fsid_equal(__kernel_fsid_t *fsid1, 26 __kernel_fsid_t *fsid2) 27 { 28 return fsid1->val[0] == fsid2->val[0] && fsid1->val[1] == fsid2->val[1]; 29 } 30 31 static bool fanotify_fh_equal(struct fanotify_fh *fh1, 32 struct fanotify_fh *fh2) 33 { 34 if (fh1->type != fh2->type || fh1->len != fh2->len) 35 return false; 36 37 return !fh1->len || 38 !memcmp(fanotify_fh_buf(fh1), fanotify_fh_buf(fh2), fh1->len); 39 } 40 41 static bool fanotify_fid_event_equal(struct fanotify_fid_event *ffe1, 42 struct fanotify_fid_event *ffe2) 43 { 44 /* Do not merge fid events without object fh */ 45 if (!ffe1->object_fh.len) 46 return false; 47 48 return fanotify_fsid_equal(&ffe1->fsid, &ffe2->fsid) && 49 fanotify_fh_equal(&ffe1->object_fh, &ffe2->object_fh); 50 } 51 52 static bool fanotify_info_equal(struct fanotify_info *info1, 53 struct fanotify_info *info2) 54 { 55 if (info1->dir_fh_totlen != info2->dir_fh_totlen || 56 info1->file_fh_totlen != info2->file_fh_totlen || 57 info1->name_len != info2->name_len) 58 return false; 59 60 if (info1->dir_fh_totlen && 61 !fanotify_fh_equal(fanotify_info_dir_fh(info1), 62 fanotify_info_dir_fh(info2))) 63 return false; 64 65 if (info1->file_fh_totlen && 66 !fanotify_fh_equal(fanotify_info_file_fh(info1), 67 fanotify_info_file_fh(info2))) 68 return false; 69 70 return !info1->name_len || 71 !memcmp(fanotify_info_name(info1), fanotify_info_name(info2), 72 info1->name_len); 73 } 74 75 static bool fanotify_name_event_equal(struct fanotify_name_event *fne1, 76 struct fanotify_name_event *fne2) 77 { 78 struct fanotify_info *info1 = &fne1->info; 79 struct fanotify_info *info2 = &fne2->info; 80 81 /* Do not merge name events without dir fh */ 82 if (!info1->dir_fh_totlen) 83 return false; 84 85 if (!fanotify_fsid_equal(&fne1->fsid, &fne2->fsid)) 86 return false; 87 88 return fanotify_info_equal(info1, info2); 89 } 90 91 static bool fanotify_should_merge(struct fsnotify_event *old_fsn, 92 struct fsnotify_event *new_fsn) 93 { 94 struct fanotify_event *old, *new; 95 96 pr_debug("%s: old=%p new=%p\n", __func__, old_fsn, new_fsn); 97 old = FANOTIFY_E(old_fsn); 98 new = FANOTIFY_E(new_fsn); 99 100 if (old_fsn->objectid != new_fsn->objectid || 101 old->type != new->type || old->pid != new->pid) 102 return false; 103 104 /* 105 * We want to merge many dirent events in the same dir (i.e. 106 * creates/unlinks/renames), but we do not want to merge dirent 107 * events referring to subdirs with dirent events referring to 108 * non subdirs, otherwise, user won't be able to tell from a 109 * mask FAN_CREATE|FAN_DELETE|FAN_ONDIR if it describes mkdir+ 110 * unlink pair or rmdir+create pair of events. 111 */ 112 if ((old->mask & FS_ISDIR) != (new->mask & FS_ISDIR)) 113 return false; 114 115 switch (old->type) { 116 case FANOTIFY_EVENT_TYPE_PATH: 117 return fanotify_path_equal(fanotify_event_path(old), 118 fanotify_event_path(new)); 119 case FANOTIFY_EVENT_TYPE_FID: 120 return fanotify_fid_event_equal(FANOTIFY_FE(old), 121 FANOTIFY_FE(new)); 122 case FANOTIFY_EVENT_TYPE_FID_NAME: 123 return fanotify_name_event_equal(FANOTIFY_NE(old), 124 FANOTIFY_NE(new)); 125 default: 126 WARN_ON_ONCE(1); 127 } 128 129 return false; 130 } 131 132 /* and the list better be locked by something too! */ 133 static int fanotify_merge(struct list_head *list, struct fsnotify_event *event) 134 { 135 struct fsnotify_event *test_event; 136 struct fanotify_event *new; 137 138 pr_debug("%s: list=%p event=%p\n", __func__, list, event); 139 new = FANOTIFY_E(event); 140 141 /* 142 * Don't merge a permission event with any other event so that we know 143 * the event structure we have created in fanotify_handle_event() is the 144 * one we should check for permission response. 145 */ 146 if (fanotify_is_perm_event(new->mask)) 147 return 0; 148 149 list_for_each_entry_reverse(test_event, list, list) { 150 if (fanotify_should_merge(test_event, event)) { 151 FANOTIFY_E(test_event)->mask |= new->mask; 152 return 1; 153 } 154 } 155 156 return 0; 157 } 158 159 /* 160 * Wait for response to permission event. The function also takes care of 161 * freeing the permission event (or offloads that in case the wait is canceled 162 * by a signal). The function returns 0 in case access got allowed by userspace, 163 * -EPERM in case userspace disallowed the access, and -ERESTARTSYS in case 164 * the wait got interrupted by a signal. 165 */ 166 static int fanotify_get_response(struct fsnotify_group *group, 167 struct fanotify_perm_event *event, 168 struct fsnotify_iter_info *iter_info) 169 { 170 int ret; 171 172 pr_debug("%s: group=%p event=%p\n", __func__, group, event); 173 174 ret = wait_event_killable(group->fanotify_data.access_waitq, 175 event->state == FAN_EVENT_ANSWERED); 176 /* Signal pending? */ 177 if (ret < 0) { 178 spin_lock(&group->notification_lock); 179 /* Event reported to userspace and no answer yet? */ 180 if (event->state == FAN_EVENT_REPORTED) { 181 /* Event will get freed once userspace answers to it */ 182 event->state = FAN_EVENT_CANCELED; 183 spin_unlock(&group->notification_lock); 184 return ret; 185 } 186 /* Event not yet reported? Just remove it. */ 187 if (event->state == FAN_EVENT_INIT) 188 fsnotify_remove_queued_event(group, &event->fae.fse); 189 /* 190 * Event may be also answered in case signal delivery raced 191 * with wakeup. In that case we have nothing to do besides 192 * freeing the event and reporting error. 193 */ 194 spin_unlock(&group->notification_lock); 195 goto out; 196 } 197 198 /* userspace responded, convert to something usable */ 199 switch (event->response & ~FAN_AUDIT) { 200 case FAN_ALLOW: 201 ret = 0; 202 break; 203 case FAN_DENY: 204 default: 205 ret = -EPERM; 206 } 207 208 /* Check if the response should be audited */ 209 if (event->response & FAN_AUDIT) 210 audit_fanotify(event->response & ~FAN_AUDIT); 211 212 pr_debug("%s: group=%p event=%p about to return ret=%d\n", __func__, 213 group, event, ret); 214 out: 215 fsnotify_destroy_event(group, &event->fae.fse); 216 217 return ret; 218 } 219 220 /* 221 * This function returns a mask for an event that only contains the flags 222 * that have been specifically requested by the user. Flags that may have 223 * been included within the event mask, but have not been explicitly 224 * requested by the user, will not be present in the returned mask. 225 */ 226 static u32 fanotify_group_event_mask(struct fsnotify_group *group, 227 struct fsnotify_iter_info *iter_info, 228 u32 event_mask, const void *data, 229 int data_type, struct inode *dir) 230 { 231 __u32 marks_mask = 0, marks_ignored_mask = 0; 232 __u32 test_mask, user_mask = FANOTIFY_OUTGOING_EVENTS | 233 FANOTIFY_EVENT_FLAGS; 234 const struct path *path = fsnotify_data_path(data, data_type); 235 unsigned int fid_mode = FAN_GROUP_FLAG(group, FANOTIFY_FID_BITS); 236 struct fsnotify_mark *mark; 237 int type; 238 239 pr_debug("%s: report_mask=%x mask=%x data=%p data_type=%d\n", 240 __func__, iter_info->report_mask, event_mask, data, data_type); 241 242 if (!fid_mode) { 243 /* Do we have path to open a file descriptor? */ 244 if (!path) 245 return 0; 246 /* Path type events are only relevant for files and dirs */ 247 if (!d_is_reg(path->dentry) && !d_can_lookup(path->dentry)) 248 return 0; 249 } else if (!(fid_mode & FAN_REPORT_FID)) { 250 /* Do we have a directory inode to report? */ 251 if (!dir && !(event_mask & FS_ISDIR)) 252 return 0; 253 } 254 255 fsnotify_foreach_obj_type(type) { 256 if (!fsnotify_iter_should_report_type(iter_info, type)) 257 continue; 258 mark = iter_info->marks[type]; 259 260 /* Apply ignore mask regardless of ISDIR and ON_CHILD flags */ 261 marks_ignored_mask |= mark->ignored_mask; 262 263 /* 264 * If the event is on dir and this mark doesn't care about 265 * events on dir, don't send it! 266 */ 267 if (event_mask & FS_ISDIR && !(mark->mask & FS_ISDIR)) 268 continue; 269 270 /* 271 * If the event is on a child and this mark is on a parent not 272 * watching children, don't send it! 273 */ 274 if (type == FSNOTIFY_OBJ_TYPE_PARENT && 275 !(mark->mask & FS_EVENT_ON_CHILD)) 276 continue; 277 278 marks_mask |= mark->mask; 279 } 280 281 test_mask = event_mask & marks_mask & ~marks_ignored_mask; 282 283 /* 284 * For dirent modification events (create/delete/move) that do not carry 285 * the child entry name information, we report FAN_ONDIR for mkdir/rmdir 286 * so user can differentiate them from creat/unlink. 287 * 288 * For backward compatibility and consistency, do not report FAN_ONDIR 289 * to user in legacy fanotify mode (reporting fd) and report FAN_ONDIR 290 * to user in fid mode for all event types. 291 * 292 * We never report FAN_EVENT_ON_CHILD to user, but we do pass it in to 293 * fanotify_alloc_event() when group is reporting fid as indication 294 * that event happened on child. 295 */ 296 if (fid_mode) { 297 /* Do not report event flags without any event */ 298 if (!(test_mask & ~FANOTIFY_EVENT_FLAGS)) 299 return 0; 300 } else { 301 user_mask &= ~FANOTIFY_EVENT_FLAGS; 302 } 303 304 return test_mask & user_mask; 305 } 306 307 /* 308 * Check size needed to encode fanotify_fh. 309 * 310 * Return size of encoded fh without fanotify_fh header. 311 * Return 0 on failure to encode. 312 */ 313 static int fanotify_encode_fh_len(struct inode *inode) 314 { 315 int dwords = 0; 316 317 if (!inode) 318 return 0; 319 320 exportfs_encode_inode_fh(inode, NULL, &dwords, NULL); 321 322 return dwords << 2; 323 } 324 325 /* 326 * Encode fanotify_fh. 327 * 328 * Return total size of encoded fh including fanotify_fh header. 329 * Return 0 on failure to encode. 330 */ 331 static int fanotify_encode_fh(struct fanotify_fh *fh, struct inode *inode, 332 unsigned int fh_len, gfp_t gfp) 333 { 334 int dwords, type = 0; 335 char *ext_buf = NULL; 336 void *buf = fh->buf; 337 int err; 338 339 fh->type = FILEID_ROOT; 340 fh->len = 0; 341 fh->flags = 0; 342 if (!inode) 343 return 0; 344 345 /* 346 * !gpf means preallocated variable size fh, but fh_len could 347 * be zero in that case if encoding fh len failed. 348 */ 349 err = -ENOENT; 350 if (fh_len < 4 || WARN_ON_ONCE(fh_len % 4)) 351 goto out_err; 352 353 /* No external buffer in a variable size allocated fh */ 354 if (gfp && fh_len > FANOTIFY_INLINE_FH_LEN) { 355 /* Treat failure to allocate fh as failure to encode fh */ 356 err = -ENOMEM; 357 ext_buf = kmalloc(fh_len, gfp); 358 if (!ext_buf) 359 goto out_err; 360 361 *fanotify_fh_ext_buf_ptr(fh) = ext_buf; 362 buf = ext_buf; 363 fh->flags |= FANOTIFY_FH_FLAG_EXT_BUF; 364 } 365 366 dwords = fh_len >> 2; 367 type = exportfs_encode_inode_fh(inode, buf, &dwords, NULL); 368 err = -EINVAL; 369 if (!type || type == FILEID_INVALID || fh_len != dwords << 2) 370 goto out_err; 371 372 fh->type = type; 373 fh->len = fh_len; 374 375 return FANOTIFY_FH_HDR_LEN + fh_len; 376 377 out_err: 378 pr_warn_ratelimited("fanotify: failed to encode fid (type=%d, len=%d, err=%i)\n", 379 type, fh_len, err); 380 kfree(ext_buf); 381 *fanotify_fh_ext_buf_ptr(fh) = NULL; 382 /* Report the event without a file identifier on encode error */ 383 fh->type = FILEID_INVALID; 384 fh->len = 0; 385 return 0; 386 } 387 388 /* 389 * The inode to use as identifier when reporting fid depends on the event. 390 * Report the modified directory inode on dirent modification events. 391 * Report the "victim" inode otherwise. 392 * For example: 393 * FS_ATTRIB reports the child inode even if reported on a watched parent. 394 * FS_CREATE reports the modified dir inode and not the created inode. 395 */ 396 static struct inode *fanotify_fid_inode(u32 event_mask, const void *data, 397 int data_type, struct inode *dir) 398 { 399 if (event_mask & ALL_FSNOTIFY_DIRENT_EVENTS) 400 return dir; 401 402 return fsnotify_data_inode(data, data_type); 403 } 404 405 /* 406 * The inode to use as identifier when reporting dir fid depends on the event. 407 * Report the modified directory inode on dirent modification events. 408 * Report the "victim" inode if "victim" is a directory. 409 * Report the parent inode if "victim" is not a directory and event is 410 * reported to parent. 411 * Otherwise, do not report dir fid. 412 */ 413 static struct inode *fanotify_dfid_inode(u32 event_mask, const void *data, 414 int data_type, struct inode *dir) 415 { 416 struct inode *inode = fsnotify_data_inode(data, data_type); 417 418 if (event_mask & ALL_FSNOTIFY_DIRENT_EVENTS) 419 return dir; 420 421 if (S_ISDIR(inode->i_mode)) 422 return inode; 423 424 return dir; 425 } 426 427 static struct fanotify_event *fanotify_alloc_path_event(const struct path *path, 428 gfp_t gfp) 429 { 430 struct fanotify_path_event *pevent; 431 432 pevent = kmem_cache_alloc(fanotify_path_event_cachep, gfp); 433 if (!pevent) 434 return NULL; 435 436 pevent->fae.type = FANOTIFY_EVENT_TYPE_PATH; 437 pevent->path = *path; 438 path_get(path); 439 440 return &pevent->fae; 441 } 442 443 static struct fanotify_event *fanotify_alloc_perm_event(const struct path *path, 444 gfp_t gfp) 445 { 446 struct fanotify_perm_event *pevent; 447 448 pevent = kmem_cache_alloc(fanotify_perm_event_cachep, gfp); 449 if (!pevent) 450 return NULL; 451 452 pevent->fae.type = FANOTIFY_EVENT_TYPE_PATH_PERM; 453 pevent->response = 0; 454 pevent->state = FAN_EVENT_INIT; 455 pevent->path = *path; 456 path_get(path); 457 458 return &pevent->fae; 459 } 460 461 static struct fanotify_event *fanotify_alloc_fid_event(struct inode *id, 462 __kernel_fsid_t *fsid, 463 gfp_t gfp) 464 { 465 struct fanotify_fid_event *ffe; 466 467 ffe = kmem_cache_alloc(fanotify_fid_event_cachep, gfp); 468 if (!ffe) 469 return NULL; 470 471 ffe->fae.type = FANOTIFY_EVENT_TYPE_FID; 472 ffe->fsid = *fsid; 473 fanotify_encode_fh(&ffe->object_fh, id, fanotify_encode_fh_len(id), 474 gfp); 475 476 return &ffe->fae; 477 } 478 479 static struct fanotify_event *fanotify_alloc_name_event(struct inode *id, 480 __kernel_fsid_t *fsid, 481 const struct qstr *file_name, 482 struct inode *child, 483 gfp_t gfp) 484 { 485 struct fanotify_name_event *fne; 486 struct fanotify_info *info; 487 struct fanotify_fh *dfh, *ffh; 488 unsigned int dir_fh_len = fanotify_encode_fh_len(id); 489 unsigned int child_fh_len = fanotify_encode_fh_len(child); 490 unsigned int size; 491 492 size = sizeof(*fne) + FANOTIFY_FH_HDR_LEN + dir_fh_len; 493 if (child_fh_len) 494 size += FANOTIFY_FH_HDR_LEN + child_fh_len; 495 if (file_name) 496 size += file_name->len + 1; 497 fne = kmalloc(size, gfp); 498 if (!fne) 499 return NULL; 500 501 fne->fae.type = FANOTIFY_EVENT_TYPE_FID_NAME; 502 fne->fsid = *fsid; 503 info = &fne->info; 504 fanotify_info_init(info); 505 dfh = fanotify_info_dir_fh(info); 506 info->dir_fh_totlen = fanotify_encode_fh(dfh, id, dir_fh_len, 0); 507 if (child_fh_len) { 508 ffh = fanotify_info_file_fh(info); 509 info->file_fh_totlen = fanotify_encode_fh(ffh, child, child_fh_len, 0); 510 } 511 if (file_name) 512 fanotify_info_copy_name(info, file_name); 513 514 pr_debug("%s: ino=%lu size=%u dir_fh_len=%u child_fh_len=%u name_len=%u name='%.*s'\n", 515 __func__, id->i_ino, size, dir_fh_len, child_fh_len, 516 info->name_len, info->name_len, fanotify_info_name(info)); 517 518 return &fne->fae; 519 } 520 521 static struct fanotify_event *fanotify_alloc_event(struct fsnotify_group *group, 522 u32 mask, const void *data, 523 int data_type, struct inode *dir, 524 const struct qstr *file_name, 525 __kernel_fsid_t *fsid) 526 { 527 struct fanotify_event *event = NULL; 528 gfp_t gfp = GFP_KERNEL_ACCOUNT; 529 struct inode *id = fanotify_fid_inode(mask, data, data_type, dir); 530 struct inode *dirid = fanotify_dfid_inode(mask, data, data_type, dir); 531 const struct path *path = fsnotify_data_path(data, data_type); 532 unsigned int fid_mode = FAN_GROUP_FLAG(group, FANOTIFY_FID_BITS); 533 struct mem_cgroup *old_memcg; 534 struct inode *child = NULL; 535 bool name_event = false; 536 537 if ((fid_mode & FAN_REPORT_DIR_FID) && dirid) { 538 /* 539 * With both flags FAN_REPORT_DIR_FID and FAN_REPORT_FID, we 540 * report the child fid for events reported on a non-dir child 541 * in addition to reporting the parent fid and maybe child name. 542 */ 543 if ((fid_mode & FAN_REPORT_FID) && 544 id != dirid && !(mask & FAN_ONDIR)) 545 child = id; 546 547 id = dirid; 548 549 /* 550 * We record file name only in a group with FAN_REPORT_NAME 551 * and when we have a directory inode to report. 552 * 553 * For directory entry modification event, we record the fid of 554 * the directory and the name of the modified entry. 555 * 556 * For event on non-directory that is reported to parent, we 557 * record the fid of the parent and the name of the child. 558 * 559 * Even if not reporting name, we need a variable length 560 * fanotify_name_event if reporting both parent and child fids. 561 */ 562 if (!(fid_mode & FAN_REPORT_NAME)) { 563 name_event = !!child; 564 file_name = NULL; 565 } else if ((mask & ALL_FSNOTIFY_DIRENT_EVENTS) || 566 !(mask & FAN_ONDIR)) { 567 name_event = true; 568 } 569 } 570 571 /* 572 * For queues with unlimited length lost events are not expected and 573 * can possibly have security implications. Avoid losing events when 574 * memory is short. For the limited size queues, avoid OOM killer in the 575 * target monitoring memcg as it may have security repercussion. 576 */ 577 if (group->max_events == UINT_MAX) 578 gfp |= __GFP_NOFAIL; 579 else 580 gfp |= __GFP_RETRY_MAYFAIL; 581 582 /* Whoever is interested in the event, pays for the allocation. */ 583 old_memcg = set_active_memcg(group->memcg); 584 585 if (fanotify_is_perm_event(mask)) { 586 event = fanotify_alloc_perm_event(path, gfp); 587 } else if (name_event && (file_name || child)) { 588 event = fanotify_alloc_name_event(id, fsid, file_name, child, 589 gfp); 590 } else if (fid_mode) { 591 event = fanotify_alloc_fid_event(id, fsid, gfp); 592 } else { 593 event = fanotify_alloc_path_event(path, gfp); 594 } 595 596 if (!event) 597 goto out; 598 599 /* 600 * Use the victim inode instead of the watching inode as the id for 601 * event queue, so event reported on parent is merged with event 602 * reported on child when both directory and child watches exist. 603 */ 604 fanotify_init_event(event, (unsigned long)id, mask); 605 if (FAN_GROUP_FLAG(group, FAN_REPORT_TID)) 606 event->pid = get_pid(task_pid(current)); 607 else 608 event->pid = get_pid(task_tgid(current)); 609 610 out: 611 set_active_memcg(old_memcg); 612 return event; 613 } 614 615 /* 616 * Get cached fsid of the filesystem containing the object from any connector. 617 * All connectors are supposed to have the same fsid, but we do not verify that 618 * here. 619 */ 620 static __kernel_fsid_t fanotify_get_fsid(struct fsnotify_iter_info *iter_info) 621 { 622 int type; 623 __kernel_fsid_t fsid = {}; 624 625 fsnotify_foreach_obj_type(type) { 626 struct fsnotify_mark_connector *conn; 627 628 if (!fsnotify_iter_should_report_type(iter_info, type)) 629 continue; 630 631 conn = READ_ONCE(iter_info->marks[type]->connector); 632 /* Mark is just getting destroyed or created? */ 633 if (!conn) 634 continue; 635 if (!(conn->flags & FSNOTIFY_CONN_FLAG_HAS_FSID)) 636 continue; 637 /* Pairs with smp_wmb() in fsnotify_add_mark_list() */ 638 smp_rmb(); 639 fsid = conn->fsid; 640 if (WARN_ON_ONCE(!fsid.val[0] && !fsid.val[1])) 641 continue; 642 return fsid; 643 } 644 645 return fsid; 646 } 647 648 static int fanotify_handle_event(struct fsnotify_group *group, u32 mask, 649 const void *data, int data_type, 650 struct inode *dir, 651 const struct qstr *file_name, u32 cookie, 652 struct fsnotify_iter_info *iter_info) 653 { 654 int ret = 0; 655 struct fanotify_event *event; 656 struct fsnotify_event *fsn_event; 657 __kernel_fsid_t fsid = {}; 658 659 BUILD_BUG_ON(FAN_ACCESS != FS_ACCESS); 660 BUILD_BUG_ON(FAN_MODIFY != FS_MODIFY); 661 BUILD_BUG_ON(FAN_ATTRIB != FS_ATTRIB); 662 BUILD_BUG_ON(FAN_CLOSE_NOWRITE != FS_CLOSE_NOWRITE); 663 BUILD_BUG_ON(FAN_CLOSE_WRITE != FS_CLOSE_WRITE); 664 BUILD_BUG_ON(FAN_OPEN != FS_OPEN); 665 BUILD_BUG_ON(FAN_MOVED_TO != FS_MOVED_TO); 666 BUILD_BUG_ON(FAN_MOVED_FROM != FS_MOVED_FROM); 667 BUILD_BUG_ON(FAN_CREATE != FS_CREATE); 668 BUILD_BUG_ON(FAN_DELETE != FS_DELETE); 669 BUILD_BUG_ON(FAN_DELETE_SELF != FS_DELETE_SELF); 670 BUILD_BUG_ON(FAN_MOVE_SELF != FS_MOVE_SELF); 671 BUILD_BUG_ON(FAN_EVENT_ON_CHILD != FS_EVENT_ON_CHILD); 672 BUILD_BUG_ON(FAN_Q_OVERFLOW != FS_Q_OVERFLOW); 673 BUILD_BUG_ON(FAN_OPEN_PERM != FS_OPEN_PERM); 674 BUILD_BUG_ON(FAN_ACCESS_PERM != FS_ACCESS_PERM); 675 BUILD_BUG_ON(FAN_ONDIR != FS_ISDIR); 676 BUILD_BUG_ON(FAN_OPEN_EXEC != FS_OPEN_EXEC); 677 BUILD_BUG_ON(FAN_OPEN_EXEC_PERM != FS_OPEN_EXEC_PERM); 678 679 BUILD_BUG_ON(HWEIGHT32(ALL_FANOTIFY_EVENT_BITS) != 19); 680 681 mask = fanotify_group_event_mask(group, iter_info, mask, data, 682 data_type, dir); 683 if (!mask) 684 return 0; 685 686 pr_debug("%s: group=%p mask=%x\n", __func__, group, mask); 687 688 if (fanotify_is_perm_event(mask)) { 689 /* 690 * fsnotify_prepare_user_wait() fails if we race with mark 691 * deletion. Just let the operation pass in that case. 692 */ 693 if (!fsnotify_prepare_user_wait(iter_info)) 694 return 0; 695 } 696 697 if (FAN_GROUP_FLAG(group, FANOTIFY_FID_BITS)) { 698 fsid = fanotify_get_fsid(iter_info); 699 /* Racing with mark destruction or creation? */ 700 if (!fsid.val[0] && !fsid.val[1]) 701 return 0; 702 } 703 704 event = fanotify_alloc_event(group, mask, data, data_type, dir, 705 file_name, &fsid); 706 ret = -ENOMEM; 707 if (unlikely(!event)) { 708 /* 709 * We don't queue overflow events for permission events as 710 * there the access is denied and so no event is in fact lost. 711 */ 712 if (!fanotify_is_perm_event(mask)) 713 fsnotify_queue_overflow(group); 714 goto finish; 715 } 716 717 fsn_event = &event->fse; 718 ret = fsnotify_add_event(group, fsn_event, fanotify_merge); 719 if (ret) { 720 /* Permission events shouldn't be merged */ 721 BUG_ON(ret == 1 && mask & FANOTIFY_PERM_EVENTS); 722 /* Our event wasn't used in the end. Free it. */ 723 fsnotify_destroy_event(group, fsn_event); 724 725 ret = 0; 726 } else if (fanotify_is_perm_event(mask)) { 727 ret = fanotify_get_response(group, FANOTIFY_PERM(event), 728 iter_info); 729 } 730 finish: 731 if (fanotify_is_perm_event(mask)) 732 fsnotify_finish_user_wait(iter_info); 733 734 return ret; 735 } 736 737 static void fanotify_free_group_priv(struct fsnotify_group *group) 738 { 739 struct user_struct *user; 740 741 user = group->fanotify_data.user; 742 atomic_dec(&user->fanotify_listeners); 743 free_uid(user); 744 } 745 746 static void fanotify_free_path_event(struct fanotify_event *event) 747 { 748 path_put(fanotify_event_path(event)); 749 kmem_cache_free(fanotify_path_event_cachep, FANOTIFY_PE(event)); 750 } 751 752 static void fanotify_free_perm_event(struct fanotify_event *event) 753 { 754 path_put(fanotify_event_path(event)); 755 kmem_cache_free(fanotify_perm_event_cachep, FANOTIFY_PERM(event)); 756 } 757 758 static void fanotify_free_fid_event(struct fanotify_event *event) 759 { 760 struct fanotify_fid_event *ffe = FANOTIFY_FE(event); 761 762 if (fanotify_fh_has_ext_buf(&ffe->object_fh)) 763 kfree(fanotify_fh_ext_buf(&ffe->object_fh)); 764 kmem_cache_free(fanotify_fid_event_cachep, ffe); 765 } 766 767 static void fanotify_free_name_event(struct fanotify_event *event) 768 { 769 kfree(FANOTIFY_NE(event)); 770 } 771 772 static void fanotify_free_event(struct fsnotify_event *fsn_event) 773 { 774 struct fanotify_event *event; 775 776 event = FANOTIFY_E(fsn_event); 777 put_pid(event->pid); 778 switch (event->type) { 779 case FANOTIFY_EVENT_TYPE_PATH: 780 fanotify_free_path_event(event); 781 break; 782 case FANOTIFY_EVENT_TYPE_PATH_PERM: 783 fanotify_free_perm_event(event); 784 break; 785 case FANOTIFY_EVENT_TYPE_FID: 786 fanotify_free_fid_event(event); 787 break; 788 case FANOTIFY_EVENT_TYPE_FID_NAME: 789 fanotify_free_name_event(event); 790 break; 791 case FANOTIFY_EVENT_TYPE_OVERFLOW: 792 kfree(event); 793 break; 794 default: 795 WARN_ON_ONCE(1); 796 } 797 } 798 799 static void fanotify_free_mark(struct fsnotify_mark *fsn_mark) 800 { 801 kmem_cache_free(fanotify_mark_cache, fsn_mark); 802 } 803 804 const struct fsnotify_ops fanotify_fsnotify_ops = { 805 .handle_event = fanotify_handle_event, 806 .free_group_priv = fanotify_free_group_priv, 807 .free_event = fanotify_free_event, 808 .free_mark = fanotify_free_mark, 809 }; 810