1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2011 Novell Inc. 4 * Copyright (C) 2016 Red Hat, Inc. 5 */ 6 7 #include <linux/fs.h> 8 #include <linux/cred.h> 9 #include <linux/ctype.h> 10 #include <linux/hex.h> 11 #include <linux/namei.h> 12 #include <linux/xattr.h> 13 #include <linux/ratelimit.h> 14 #include <linux/mount.h> 15 #include <linux/exportfs.h> 16 #include "overlayfs.h" 17 18 struct ovl_lookup_data { 19 struct super_block *sb; 20 struct dentry *dentry; 21 const struct ovl_layer *layer; 22 struct qstr name; 23 bool is_dir; 24 bool opaque; 25 bool xwhiteouts; 26 bool stop; 27 bool last; 28 char *redirect; 29 char *upperredirect; 30 int metacopy; 31 /* Referring to last redirect xattr */ 32 bool absolute_redirect; 33 }; 34 35 static int ovl_check_redirect(const struct path *path, struct ovl_lookup_data *d, 36 size_t prelen, const char *post) 37 { 38 int res; 39 char *buf; 40 struct ovl_fs *ofs = OVL_FS(d->sb); 41 42 d->absolute_redirect = false; 43 buf = ovl_get_redirect_xattr(ofs, path, prelen + strlen(post)); 44 if (IS_ERR_OR_NULL(buf)) 45 return PTR_ERR(buf); 46 47 if (buf[0] == '/') { 48 d->absolute_redirect = true; 49 /* 50 * One of the ancestor path elements in an absolute path 51 * lookup in ovl_lookup_layer() could have been opaque and 52 * that will stop further lookup in lower layers (d->stop=true) 53 * But we have found an absolute redirect in descendant path 54 * element and that should force continue lookup in lower 55 * layers (reset d->stop). 56 */ 57 d->stop = false; 58 } else { 59 res = strlen(buf) + 1; 60 memmove(buf + prelen, buf, res); 61 memcpy(buf, d->name.name, prelen); 62 } 63 64 strcat(buf, post); 65 kfree(d->redirect); 66 d->redirect = buf; 67 d->name.name = d->redirect; 68 d->name.len = strlen(d->redirect); 69 70 return 0; 71 } 72 73 static int ovl_acceptable(void *ctx, struct dentry *dentry) 74 { 75 /* 76 * A non-dir origin may be disconnected, which is fine, because 77 * we only need it for its unique inode number. 78 */ 79 if (!d_is_dir(dentry)) 80 return 1; 81 82 /* Don't decode a deleted empty directory */ 83 if (d_unhashed(dentry)) 84 return 0; 85 86 /* Check if directory belongs to the layer we are decoding from */ 87 return is_subdir(dentry, ((struct vfsmount *)ctx)->mnt_root); 88 } 89 90 /* 91 * Check validity of an overlay file handle buffer. 92 * 93 * Return 0 for a valid file handle. 94 * Return -ENODATA for "origin unknown". 95 * Return <0 for an invalid file handle. 96 */ 97 int ovl_check_fb_len(struct ovl_fb *fb, int fb_len) 98 { 99 if (fb_len < sizeof(struct ovl_fb) || fb_len < fb->len) 100 return -EINVAL; 101 102 if (fb->magic != OVL_FH_MAGIC) 103 return -EINVAL; 104 105 /* Treat larger version and unknown flags as "origin unknown" */ 106 if (fb->version > OVL_FH_VERSION || fb->flags & ~OVL_FH_FLAG_ALL) 107 return -ENODATA; 108 109 /* Treat endianness mismatch as "origin unknown" */ 110 if (!(fb->flags & OVL_FH_FLAG_ANY_ENDIAN) && 111 (fb->flags & OVL_FH_FLAG_BIG_ENDIAN) != OVL_FH_FLAG_CPU_ENDIAN) 112 return -ENODATA; 113 114 return 0; 115 } 116 117 static struct ovl_fh *ovl_get_fh(struct ovl_fs *ofs, struct dentry *upperdentry, 118 enum ovl_xattr ox) 119 { 120 int res, err; 121 struct ovl_fh *fh = NULL; 122 123 res = ovl_getxattr_upper(ofs, upperdentry, ox, NULL, 0); 124 if (res < 0) { 125 if (res == -ENODATA || res == -EOPNOTSUPP) 126 return NULL; 127 goto fail; 128 } 129 /* Zero size value means "copied up but origin unknown" */ 130 if (res == 0) 131 return NULL; 132 133 fh = kzalloc(res + OVL_FH_WIRE_OFFSET, GFP_KERNEL); 134 if (!fh) 135 return ERR_PTR(-ENOMEM); 136 137 res = ovl_getxattr_upper(ofs, upperdentry, ox, fh->buf, res); 138 if (res < 0) 139 goto fail; 140 141 err = ovl_check_fb_len(&fh->fb, res); 142 if (err < 0) { 143 if (err == -ENODATA) 144 goto out; 145 goto invalid; 146 } 147 148 return fh; 149 150 out: 151 kfree(fh); 152 return NULL; 153 154 fail: 155 pr_warn_ratelimited("failed to get origin (%i)\n", res); 156 goto out; 157 invalid: 158 pr_warn_ratelimited("invalid origin (%*phN)\n", res, fh); 159 goto out; 160 } 161 162 struct dentry *ovl_decode_real_fh(struct ovl_fs *ofs, struct ovl_fh *fh, 163 struct vfsmount *mnt, bool connected) 164 { 165 struct dentry *real; 166 int bytes; 167 168 if (!capable(CAP_DAC_READ_SEARCH)) 169 return NULL; 170 171 /* 172 * Make sure that the stored uuid matches the uuid of the lower 173 * layer where file handle will be decoded. 174 * In case of uuid=off option just make sure that stored uuid is null. 175 */ 176 if (ovl_origin_uuid(ofs) ? 177 !uuid_equal(&fh->fb.uuid, &mnt->mnt_sb->s_uuid) : 178 !uuid_is_null(&fh->fb.uuid)) 179 return NULL; 180 181 bytes = (fh->fb.len - offsetof(struct ovl_fb, fid)); 182 real = exportfs_decode_fh(mnt, (struct fid *)fh->fb.fid, 183 bytes >> 2, (int)fh->fb.type, 184 connected ? ovl_acceptable : NULL, mnt); 185 if (IS_ERR(real)) { 186 /* 187 * Treat stale file handle to lower file as "origin unknown". 188 * upper file handle could become stale when upper file is 189 * unlinked and this information is needed to handle stale 190 * index entries correctly. 191 */ 192 if (real == ERR_PTR(-ESTALE) && 193 !(fh->fb.flags & OVL_FH_FLAG_PATH_UPPER)) 194 real = NULL; 195 return real; 196 } 197 198 if (ovl_dentry_weird(real)) { 199 dput(real); 200 return NULL; 201 } 202 203 return real; 204 } 205 206 static struct dentry *ovl_lookup_positive_unlocked(struct ovl_lookup_data *d, 207 const char *name, 208 struct dentry *base, int len, 209 bool drop_negative) 210 { 211 struct dentry *ret = lookup_one_unlocked(mnt_idmap(d->layer->mnt), 212 &QSTR_LEN(name, len), base); 213 214 if (!IS_ERR(ret) && d_flags_negative(smp_load_acquire(&ret->d_flags))) { 215 if (drop_negative && ret->d_lockref.count == 1) { 216 spin_lock(&ret->d_lock); 217 /* Recheck condition under lock */ 218 if (d_is_negative(ret) && ret->d_lockref.count == 1) 219 __d_drop(ret); 220 spin_unlock(&ret->d_lock); 221 } 222 dput(ret); 223 ret = ERR_PTR(-ENOENT); 224 } 225 return ret; 226 } 227 228 static int ovl_lookup_single(struct dentry *base, struct ovl_lookup_data *d, 229 const char *name, unsigned int namelen, 230 size_t prelen, const char *post, 231 struct dentry **ret, bool drop_negative) 232 { 233 struct ovl_fs *ofs = OVL_FS(d->sb); 234 struct dentry *this = NULL; 235 const char *warn; 236 struct path path; 237 int err; 238 bool last_element = !post[0]; 239 bool is_upper = d->layer->idx == 0; 240 char val; 241 242 /* 243 * We allow filesystems that are case-folding capable as long as the 244 * layers are consistently enabled in the stack, enabled for every dir 245 * or disabled in all dirs. If someone has modified case folding on a 246 * directory on underlying layer, the warranty of the ovl stack is 247 * voided. 248 */ 249 if (ofs->casefold != ovl_dentry_casefolded(base)) { 250 warn = "parent wrong casefold"; 251 err = -ESTALE; 252 goto out_warn; 253 } 254 255 this = ovl_lookup_positive_unlocked(d, name, base, namelen, drop_negative); 256 if (IS_ERR(this)) { 257 err = PTR_ERR(this); 258 this = NULL; 259 if (err == -ENOENT || err == -ENAMETOOLONG) 260 goto out; 261 goto out_err; 262 } 263 264 if (ofs->casefold != ovl_dentry_casefolded(this)) { 265 warn = "child wrong casefold"; 266 err = -EREMOTE; 267 goto out_warn; 268 } 269 270 if (ovl_dentry_weird(this)) { 271 /* Don't support traversing automounts and other weirdness */ 272 warn = "unsupported object type"; 273 err = -EREMOTE; 274 goto out_warn; 275 } 276 277 path.dentry = this; 278 path.mnt = d->layer->mnt; 279 if (ovl_path_is_whiteout(ofs, &path)) { 280 d->stop = d->opaque = true; 281 goto put_and_out; 282 } 283 /* 284 * This dentry should be a regular file if previous layer lookup 285 * found a metacopy dentry. 286 */ 287 if (last_element && d->metacopy && !d_is_reg(this)) { 288 d->stop = true; 289 goto put_and_out; 290 } 291 292 if (!d_can_lookup(this)) { 293 if (d->is_dir || !last_element) { 294 d->stop = true; 295 goto put_and_out; 296 } 297 err = ovl_check_metacopy_xattr(ofs, &path, NULL); 298 if (err < 0) 299 goto out_err; 300 301 d->metacopy = err; 302 d->stop = !d->metacopy; 303 if (!d->metacopy || d->last) 304 goto out; 305 } else { 306 if (ovl_lookup_trap_inode(d->sb, this)) { 307 /* Caught in a trap of overlapping layers */ 308 warn = "overlapping layers"; 309 err = -ELOOP; 310 goto out_warn; 311 } 312 313 if (last_element) 314 d->is_dir = true; 315 if (d->last) 316 goto out; 317 318 /* overlay.opaque=x means xwhiteouts directory */ 319 val = ovl_get_opaquedir_val(ofs, &path); 320 if (last_element && !is_upper && val == 'x') { 321 d->xwhiteouts = true; 322 ovl_layer_set_xwhiteouts(ofs, d->layer); 323 } else if (val == 'y') { 324 d->stop = true; 325 if (last_element) 326 d->opaque = true; 327 goto out; 328 } 329 } 330 err = ovl_check_redirect(&path, d, prelen, post); 331 if (err) 332 goto out_err; 333 out: 334 *ret = this; 335 return 0; 336 337 put_and_out: 338 dput(this); 339 this = NULL; 340 goto out; 341 342 out_warn: 343 pr_warn_ratelimited("failed lookup in %s (%pd2, name='%.*s', err=%i): %s\n", 344 is_upper ? "upper" : "lower", base, 345 namelen, name, err, warn); 346 out_err: 347 dput(this); 348 return err; 349 } 350 351 static int ovl_lookup_layer(struct dentry *base, struct ovl_lookup_data *d, 352 struct dentry **ret, bool drop_negative) 353 { 354 /* Counting down from the end, since the prefix can change */ 355 size_t rem = d->name.len - 1; 356 struct dentry *dentry = NULL; 357 int err; 358 359 if (d->name.name[0] != '/') 360 return ovl_lookup_single(base, d, d->name.name, d->name.len, 361 0, "", ret, drop_negative); 362 363 while (!IS_ERR_OR_NULL(base) && d_can_lookup(base)) { 364 const char *s = d->name.name + d->name.len - rem; 365 const char *next = strchrnul(s, '/'); 366 size_t thislen = next - s; 367 bool end = !next[0]; 368 369 /* Verify we did not go off the rails */ 370 if (WARN_ON(s[-1] != '/')) 371 return -EIO; 372 373 err = ovl_lookup_single(base, d, s, thislen, 374 d->name.len - rem, next, &base, 375 drop_negative); 376 dput(dentry); 377 if (err) 378 return err; 379 dentry = base; 380 if (end) 381 break; 382 383 rem -= thislen + 1; 384 385 if (WARN_ON(rem >= d->name.len)) 386 return -EIO; 387 } 388 *ret = dentry; 389 return 0; 390 } 391 392 static int ovl_lookup_data_layer(struct dentry *dentry, const char *redirect, 393 const struct ovl_layer *layer, 394 struct path *datapath) 395 { 396 int err; 397 398 err = vfs_path_lookup(layer->mnt->mnt_root, layer->mnt, redirect, 399 LOOKUP_BENEATH | LOOKUP_NO_SYMLINKS | LOOKUP_NO_XDEV, 400 datapath); 401 pr_debug("lookup lowerdata (%pd2, redirect=\"%s\", layer=%d, err=%i)\n", 402 dentry, redirect, layer->idx, err); 403 404 if (err) 405 return err; 406 407 err = -EREMOTE; 408 if (ovl_dentry_weird(datapath->dentry)) 409 goto out_path_put; 410 411 err = -ENOENT; 412 /* Only regular file is acceptable as lower data */ 413 if (!d_is_reg(datapath->dentry)) 414 goto out_path_put; 415 416 return 0; 417 418 out_path_put: 419 path_put(datapath); 420 421 return err; 422 } 423 424 /* Lookup in data-only layers by absolute redirect to layer root */ 425 static int ovl_lookup_data_layers(struct dentry *dentry, const char *redirect, 426 struct ovl_path *lowerdata) 427 { 428 struct ovl_fs *ofs = OVL_FS(dentry->d_sb); 429 const struct ovl_layer *layer; 430 struct path datapath; 431 int err = -ENOENT; 432 int i; 433 434 layer = &ofs->layers[ofs->numlayer - ofs->numdatalayer]; 435 for (i = 0; i < ofs->numdatalayer; i++, layer++) { 436 err = ovl_lookup_data_layer(dentry, redirect, layer, &datapath); 437 if (!err) { 438 mntput(datapath.mnt); 439 lowerdata->dentry = datapath.dentry; 440 lowerdata->layer = layer; 441 return 0; 442 } 443 } 444 445 return err; 446 } 447 448 int ovl_check_origin_fh(struct ovl_fs *ofs, struct ovl_fh *fh, bool connected, 449 struct dentry *upperdentry, struct ovl_path **stackp) 450 { 451 struct dentry *origin = NULL; 452 int i; 453 454 for (i = 1; i <= ovl_numlowerlayer(ofs); i++) { 455 /* 456 * If lower fs uuid is not unique among lower fs we cannot match 457 * fh->uuid to layer. 458 */ 459 if (ofs->layers[i].fsid && 460 ofs->layers[i].fs->bad_uuid) 461 continue; 462 463 origin = ovl_decode_real_fh(ofs, fh, ofs->layers[i].mnt, 464 connected); 465 if (origin) 466 break; 467 } 468 469 if (!origin) 470 return -ESTALE; 471 else if (IS_ERR(origin)) 472 return PTR_ERR(origin); 473 474 if (upperdentry && !ovl_upper_is_whiteout(ofs, upperdentry) && 475 inode_wrong_type(d_inode(upperdentry), d_inode(origin)->i_mode)) 476 goto invalid; 477 478 if (!*stackp) 479 *stackp = kmalloc(sizeof(struct ovl_path), GFP_KERNEL); 480 if (!*stackp) { 481 dput(origin); 482 return -ENOMEM; 483 } 484 **stackp = (struct ovl_path){ 485 .dentry = origin, 486 .layer = &ofs->layers[i] 487 }; 488 489 return 0; 490 491 invalid: 492 pr_warn_ratelimited("invalid origin (%pd2, ftype=%x, origin ftype=%x).\n", 493 upperdentry, d_inode(upperdentry)->i_mode & S_IFMT, 494 d_inode(origin)->i_mode & S_IFMT); 495 dput(origin); 496 return -ESTALE; 497 } 498 499 static int ovl_check_origin(struct ovl_fs *ofs, struct dentry *upperdentry, 500 struct ovl_path **stackp) 501 { 502 struct ovl_fh *fh = ovl_get_fh(ofs, upperdentry, OVL_XATTR_ORIGIN); 503 int err; 504 505 if (IS_ERR_OR_NULL(fh)) 506 return PTR_ERR(fh); 507 508 err = ovl_check_origin_fh(ofs, fh, false, upperdentry, stackp); 509 kfree(fh); 510 511 if (err) { 512 if (err == -ESTALE) 513 return 0; 514 return err; 515 } 516 517 return 0; 518 } 519 520 /* 521 * Verify that @fh matches the file handle stored in xattr @name. 522 * Return 0 on match, -ESTALE on mismatch, < 0 on error. 523 */ 524 static int ovl_verify_fh(struct ovl_fs *ofs, struct dentry *dentry, 525 enum ovl_xattr ox, const struct ovl_fh *fh) 526 { 527 struct ovl_fh *ofh = ovl_get_fh(ofs, dentry, ox); 528 int err = 0; 529 530 if (!ofh) 531 return -ENODATA; 532 533 if (IS_ERR(ofh)) 534 return PTR_ERR(ofh); 535 536 if (fh->fb.len != ofh->fb.len || memcmp(&fh->fb, &ofh->fb, fh->fb.len)) 537 err = -ESTALE; 538 539 kfree(ofh); 540 return err; 541 } 542 543 int ovl_verify_set_fh(struct ovl_fs *ofs, struct dentry *dentry, 544 enum ovl_xattr ox, const struct ovl_fh *fh, 545 bool is_upper, bool set) 546 { 547 int err; 548 549 err = ovl_verify_fh(ofs, dentry, ox, fh); 550 if (set && err == -ENODATA) 551 err = ovl_setxattr(ofs, dentry, ox, fh->buf, fh->fb.len); 552 553 return err; 554 } 555 556 /* 557 * Verify that @real dentry matches the file handle stored in xattr @name. 558 * 559 * If @set is true and there is no stored file handle, encode @real and store 560 * file handle in xattr @name. 561 * 562 * Return 0 on match, -ESTALE on mismatch, -ENODATA on no xattr, < 0 on error. 563 */ 564 int ovl_verify_origin_xattr(struct ovl_fs *ofs, struct dentry *dentry, 565 enum ovl_xattr ox, struct dentry *real, 566 bool is_upper, bool set) 567 { 568 struct inode *inode; 569 struct ovl_fh *fh; 570 int err; 571 572 fh = ovl_encode_real_fh(ofs, d_inode(real), is_upper); 573 err = PTR_ERR(fh); 574 if (IS_ERR(fh)) { 575 fh = NULL; 576 goto fail; 577 } 578 579 err = ovl_verify_set_fh(ofs, dentry, ox, fh, is_upper, set); 580 if (err) 581 goto fail; 582 583 out: 584 kfree(fh); 585 return err; 586 587 fail: 588 inode = d_inode(real); 589 pr_warn_ratelimited("failed to verify %s (%pd2, ino=%lu, err=%i)\n", 590 is_upper ? "upper" : "origin", real, 591 inode ? inode->i_ino : 0, err); 592 goto out; 593 } 594 595 596 /* Get upper dentry from index */ 597 struct dentry *ovl_index_upper(struct ovl_fs *ofs, struct dentry *index, 598 bool connected) 599 { 600 struct ovl_fh *fh; 601 struct dentry *upper; 602 603 if (!d_is_dir(index)) 604 return dget(index); 605 606 fh = ovl_get_fh(ofs, index, OVL_XATTR_UPPER); 607 if (IS_ERR_OR_NULL(fh)) 608 return ERR_CAST(fh); 609 610 upper = ovl_decode_real_fh(ofs, fh, ovl_upper_mnt(ofs), connected); 611 kfree(fh); 612 613 if (IS_ERR_OR_NULL(upper)) 614 return upper ?: ERR_PTR(-ESTALE); 615 616 if (!d_is_dir(upper)) { 617 pr_warn_ratelimited("invalid index upper (%pd2, upper=%pd2).\n", 618 index, upper); 619 dput(upper); 620 return ERR_PTR(-EIO); 621 } 622 623 return upper; 624 } 625 626 /* 627 * Verify that an index entry name matches the origin file handle stored in 628 * OVL_XATTR_ORIGIN and that origin file handle can be decoded to lower path. 629 * Return 0 on match, -ESTALE on mismatch or stale origin, < 0 on error. 630 */ 631 int ovl_verify_index(struct ovl_fs *ofs, struct dentry *index) 632 { 633 struct ovl_fh *fh = NULL; 634 size_t len; 635 struct ovl_path origin = { }; 636 struct ovl_path *stack = &origin; 637 struct dentry *upper = NULL; 638 int err; 639 640 if (!d_inode(index)) 641 return 0; 642 643 err = -EINVAL; 644 if (index->d_name.len < sizeof(struct ovl_fb)*2) 645 goto fail; 646 647 err = -ENOMEM; 648 len = index->d_name.len / 2; 649 fh = kzalloc(len + OVL_FH_WIRE_OFFSET, GFP_KERNEL); 650 if (!fh) 651 goto fail; 652 653 err = -EINVAL; 654 if (hex2bin(fh->buf, index->d_name.name, len)) 655 goto fail; 656 657 err = ovl_check_fb_len(&fh->fb, len); 658 if (err) 659 goto fail; 660 661 /* 662 * Whiteout index entries are used as an indication that an exported 663 * overlay file handle should be treated as stale (i.e. after unlink 664 * of the overlay inode). These entries contain no origin xattr. 665 */ 666 if (ovl_is_whiteout(index)) 667 goto out; 668 669 /* 670 * Verifying directory index entries are not stale is expensive, so 671 * only verify stale dir index if NFS export is enabled. 672 */ 673 if (d_is_dir(index) && !ofs->config.nfs_export) 674 goto out; 675 676 /* 677 * Directory index entries should have 'upper' xattr pointing to the 678 * real upper dir. Non-dir index entries are hardlinks to the upper 679 * real inode. For non-dir index, we can read the copy up origin xattr 680 * directly from the index dentry, but for dir index we first need to 681 * decode the upper directory. 682 */ 683 upper = ovl_index_upper(ofs, index, false); 684 if (IS_ERR_OR_NULL(upper)) { 685 err = PTR_ERR(upper); 686 /* 687 * Directory index entries with no 'upper' xattr need to be 688 * removed. When dir index entry has a stale 'upper' xattr, 689 * we assume that upper dir was removed and we treat the dir 690 * index as orphan entry that needs to be whited out. 691 */ 692 if (err == -ESTALE) 693 goto orphan; 694 else if (!err) 695 err = -ESTALE; 696 goto fail; 697 } 698 699 err = ovl_verify_fh(ofs, upper, OVL_XATTR_ORIGIN, fh); 700 dput(upper); 701 if (err) 702 goto fail; 703 704 /* Check if non-dir index is orphan and don't warn before cleaning it */ 705 if (!d_is_dir(index) && d_inode(index)->i_nlink == 1) { 706 err = ovl_check_origin_fh(ofs, fh, false, index, &stack); 707 if (err) 708 goto fail; 709 710 if (ovl_get_nlink(ofs, origin.dentry, index, 0) == 0) 711 goto orphan; 712 } 713 714 out: 715 dput(origin.dentry); 716 kfree(fh); 717 return err; 718 719 fail: 720 pr_warn_ratelimited("failed to verify index (%pd2, ftype=%x, err=%i)\n", 721 index, d_inode(index)->i_mode & S_IFMT, err); 722 goto out; 723 724 orphan: 725 pr_warn_ratelimited("orphan index entry (%pd2, ftype=%x, nlink=%u)\n", 726 index, d_inode(index)->i_mode & S_IFMT, 727 d_inode(index)->i_nlink); 728 err = -ENOENT; 729 goto out; 730 } 731 732 int ovl_get_index_name_fh(const struct ovl_fh *fh, struct qstr *name) 733 { 734 char *n, *s; 735 736 n = kcalloc(fh->fb.len, 2, GFP_KERNEL); 737 if (!n) 738 return -ENOMEM; 739 740 s = bin2hex(n, fh->buf, fh->fb.len); 741 *name = (struct qstr) QSTR_INIT(n, s - n); 742 743 return 0; 744 745 } 746 747 /* 748 * Lookup in indexdir for the index entry of a lower real inode or a copy up 749 * origin inode. The index entry name is the hex representation of the lower 750 * inode file handle. 751 * 752 * If the index dentry in negative, then either no lower aliases have been 753 * copied up yet, or aliases have been copied up in older kernels and are 754 * not indexed. 755 * 756 * If the index dentry for a copy up origin inode is positive, but points 757 * to an inode different than the upper inode, then either the upper inode 758 * has been copied up and not indexed or it was indexed, but since then 759 * index dir was cleared. Either way, that index cannot be used to identify 760 * the overlay inode. 761 */ 762 int ovl_get_index_name(struct ovl_fs *ofs, struct dentry *origin, 763 struct qstr *name) 764 { 765 struct ovl_fh *fh; 766 int err; 767 768 fh = ovl_encode_real_fh(ofs, d_inode(origin), false); 769 if (IS_ERR(fh)) 770 return PTR_ERR(fh); 771 772 err = ovl_get_index_name_fh(fh, name); 773 774 kfree(fh); 775 return err; 776 } 777 778 /* Lookup index by file handle for NFS export */ 779 struct dentry *ovl_get_index_fh(struct ovl_fs *ofs, struct ovl_fh *fh) 780 { 781 struct dentry *index; 782 struct qstr name; 783 int err; 784 785 err = ovl_get_index_name_fh(fh, &name); 786 if (err) 787 return ERR_PTR(err); 788 789 index = lookup_noperm_positive_unlocked(&name, ofs->workdir); 790 kfree(name.name); 791 if (IS_ERR(index)) { 792 if (PTR_ERR(index) == -ENOENT) 793 index = NULL; 794 return index; 795 } 796 797 if (ovl_is_whiteout(index)) 798 err = -ESTALE; 799 else if (ovl_dentry_weird(index)) 800 err = -EIO; 801 else 802 return index; 803 804 dput(index); 805 return ERR_PTR(err); 806 } 807 808 struct dentry *ovl_lookup_index(struct ovl_fs *ofs, struct dentry *upper, 809 struct dentry *origin, bool verify) 810 { 811 struct dentry *index; 812 struct inode *inode; 813 struct qstr name; 814 bool is_dir = d_is_dir(origin); 815 int err; 816 817 err = ovl_get_index_name(ofs, origin, &name); 818 if (err) 819 return ERR_PTR(err); 820 821 index = lookup_one_positive_unlocked(ovl_upper_mnt_idmap(ofs), &name, 822 ofs->workdir); 823 if (IS_ERR(index)) { 824 err = PTR_ERR(index); 825 if (err == -ENOENT) { 826 index = NULL; 827 goto out; 828 } 829 pr_warn_ratelimited("failed inode index lookup (ino=%lu, key=%.*s, err=%i);\n" 830 "overlayfs: mount with '-o index=off' to disable inodes index.\n", 831 d_inode(origin)->i_ino, name.len, name.name, 832 err); 833 goto out; 834 } 835 836 inode = d_inode(index); 837 if (ovl_is_whiteout(index) && !verify) { 838 /* 839 * When index lookup is called with !verify for decoding an 840 * overlay file handle, a whiteout index implies that decode 841 * should treat file handle as stale and no need to print a 842 * warning about it. 843 */ 844 dput(index); 845 index = ERR_PTR(-ESTALE); 846 goto out; 847 } else if (ovl_dentry_weird(index) || ovl_is_whiteout(index) || 848 inode_wrong_type(inode, d_inode(origin)->i_mode)) { 849 /* 850 * Index should always be of the same file type as origin 851 * except for the case of a whiteout index. A whiteout 852 * index should only exist if all lower aliases have been 853 * unlinked, which means that finding a lower origin on lookup 854 * whose index is a whiteout should be treated as an error. 855 */ 856 pr_warn_ratelimited("bad index found (index=%pd2, ftype=%x, origin ftype=%x).\n", 857 index, d_inode(index)->i_mode & S_IFMT, 858 d_inode(origin)->i_mode & S_IFMT); 859 goto fail; 860 } else if (is_dir && verify) { 861 if (!upper) { 862 pr_warn_ratelimited("suspected uncovered redirected dir found (origin=%pd2, index=%pd2).\n", 863 origin, index); 864 goto fail; 865 } 866 867 /* Verify that dir index 'upper' xattr points to upper dir */ 868 err = ovl_verify_upper(ofs, index, upper, false); 869 if (err) { 870 if (err == -ESTALE) { 871 pr_warn_ratelimited("suspected multiply redirected dir found (upper=%pd2, origin=%pd2, index=%pd2).\n", 872 upper, origin, index); 873 } 874 goto fail; 875 } 876 } else if (upper && d_inode(upper) != inode) { 877 goto out_dput; 878 } 879 out: 880 kfree(name.name); 881 return index; 882 883 out_dput: 884 dput(index); 885 index = NULL; 886 goto out; 887 888 fail: 889 dput(index); 890 index = ERR_PTR(-EIO); 891 goto out; 892 } 893 894 /* 895 * Returns next layer in stack starting from top. 896 * Returns -1 if this is the last layer. 897 */ 898 int ovl_path_next(int idx, struct dentry *dentry, struct path *path, 899 const struct ovl_layer **layer) 900 { 901 struct ovl_entry *oe = OVL_E(dentry); 902 struct ovl_path *lowerstack = ovl_lowerstack(oe); 903 904 BUG_ON(idx < 0); 905 if (idx == 0) { 906 ovl_path_upper(dentry, path); 907 if (path->dentry) { 908 *layer = &OVL_FS(dentry->d_sb)->layers[0]; 909 return ovl_numlower(oe) ? 1 : -1; 910 } 911 idx++; 912 } 913 BUG_ON(idx > ovl_numlower(oe)); 914 path->dentry = lowerstack[idx - 1].dentry; 915 *layer = lowerstack[idx - 1].layer; 916 path->mnt = (*layer)->mnt; 917 918 return (idx < ovl_numlower(oe)) ? idx + 1 : -1; 919 } 920 921 /* Fix missing 'origin' xattr */ 922 static int ovl_fix_origin(struct ovl_fs *ofs, struct dentry *dentry, 923 struct dentry *lower, struct dentry *upper) 924 { 925 const struct ovl_fh *fh; 926 int err; 927 928 if (ovl_check_origin_xattr(ofs, upper)) 929 return 0; 930 931 fh = ovl_get_origin_fh(ofs, lower); 932 if (IS_ERR(fh)) 933 return PTR_ERR(fh); 934 935 err = ovl_want_write(dentry); 936 if (err) 937 goto out; 938 939 err = ovl_set_origin_fh(ofs, fh, upper); 940 if (!err) 941 err = ovl_set_impure(dentry->d_parent, upper->d_parent); 942 943 ovl_drop_write(dentry); 944 out: 945 kfree(fh); 946 return err; 947 } 948 949 static int ovl_maybe_validate_verity(struct dentry *dentry) 950 { 951 struct ovl_fs *ofs = OVL_FS(dentry->d_sb); 952 struct inode *inode = d_inode(dentry); 953 struct path datapath, metapath; 954 int err; 955 956 if (!ofs->config.verity_mode || 957 !ovl_is_metacopy_dentry(dentry) || 958 ovl_test_flag(OVL_VERIFIED_DIGEST, inode)) 959 return 0; 960 961 if (!ovl_test_flag(OVL_HAS_DIGEST, inode)) { 962 if (ofs->config.verity_mode == OVL_VERITY_REQUIRE) { 963 pr_warn_ratelimited("metacopy file '%pd' has no digest specified\n", 964 dentry); 965 return -EIO; 966 } 967 return 0; 968 } 969 970 ovl_path_lowerdata(dentry, &datapath); 971 if (!datapath.dentry) 972 return -EIO; 973 974 ovl_path_real(dentry, &metapath); 975 if (!metapath.dentry) 976 return -EIO; 977 978 err = ovl_inode_lock_interruptible(inode); 979 if (err) 980 return err; 981 982 if (!ovl_test_flag(OVL_VERIFIED_DIGEST, inode)) { 983 with_ovl_creds(dentry->d_sb) 984 err = ovl_validate_verity(ofs, &metapath, &datapath); 985 if (err == 0) 986 ovl_set_flag(OVL_VERIFIED_DIGEST, inode); 987 } 988 989 ovl_inode_unlock(inode); 990 991 return err; 992 } 993 994 /* Lazy lookup of lowerdata */ 995 static int ovl_maybe_lookup_lowerdata(struct dentry *dentry) 996 { 997 struct inode *inode = d_inode(dentry); 998 const char *redirect = ovl_lowerdata_redirect(inode); 999 struct ovl_path datapath = {}; 1000 int err; 1001 1002 if (!redirect || ovl_dentry_lowerdata(dentry)) 1003 return 0; 1004 1005 if (redirect[0] != '/') 1006 return -EIO; 1007 1008 err = ovl_inode_lock_interruptible(inode); 1009 if (err) 1010 return err; 1011 1012 err = 0; 1013 /* Someone got here before us? */ 1014 if (ovl_dentry_lowerdata(dentry)) 1015 goto out; 1016 1017 with_ovl_creds(dentry->d_sb) 1018 err = ovl_lookup_data_layers(dentry, redirect, &datapath); 1019 if (err) 1020 goto out_err; 1021 1022 err = ovl_dentry_set_lowerdata(dentry, &datapath); 1023 if (err) 1024 goto out_err; 1025 1026 out: 1027 ovl_inode_unlock(inode); 1028 dput(datapath.dentry); 1029 1030 return err; 1031 1032 out_err: 1033 pr_warn_ratelimited("lazy lowerdata lookup failed (%pd2, err=%i)\n", 1034 dentry, err); 1035 goto out; 1036 } 1037 1038 int ovl_verify_lowerdata(struct dentry *dentry) 1039 { 1040 int err; 1041 1042 err = ovl_maybe_lookup_lowerdata(dentry); 1043 if (err) 1044 return err; 1045 1046 return ovl_maybe_validate_verity(dentry); 1047 } 1048 1049 /* 1050 * Following redirects/metacopy can have security consequences: it's like a 1051 * symlink into the lower layer without the permission checks. 1052 * 1053 * This is only a problem if the upper layer is untrusted (e.g comes from an USB 1054 * drive). This can allow a non-readable file or directory to become readable. 1055 * 1056 * Only following redirects when redirects are enabled disables this attack 1057 * vector when not necessary. 1058 */ 1059 static bool ovl_check_follow_redirect(struct ovl_lookup_data *d) 1060 { 1061 struct ovl_fs *ofs = OVL_FS(d->sb); 1062 1063 if (d->metacopy && !ofs->config.metacopy) { 1064 pr_warn_ratelimited("refusing to follow metacopy origin for (%pd2)\n", d->dentry); 1065 return false; 1066 } 1067 if ((d->redirect || d->upperredirect) && !ovl_redirect_follow(ofs)) { 1068 pr_warn_ratelimited("refusing to follow redirect for (%pd2)\n", d->dentry); 1069 return false; 1070 } 1071 return true; 1072 } 1073 1074 struct ovl_lookup_ctx { 1075 struct dentry *dentry; 1076 struct ovl_entry *oe; 1077 struct ovl_path *stack; 1078 struct ovl_path *origin_path; 1079 struct dentry *upperdentry; 1080 struct dentry *index; 1081 struct inode *inode; 1082 unsigned int ctr; 1083 }; 1084 1085 static int ovl_lookup_layers(struct ovl_lookup_ctx *ctx, struct ovl_lookup_data *d) 1086 { 1087 struct dentry *dentry = ctx->dentry; 1088 struct ovl_fs *ofs = OVL_FS(dentry->d_sb); 1089 struct ovl_entry *poe = OVL_E(dentry->d_parent); 1090 struct ovl_entry *roe = OVL_E(dentry->d_sb->s_root); 1091 bool check_redirect = (ovl_redirect_follow(ofs) || ofs->numdatalayer); 1092 struct dentry *upperdir; 1093 struct dentry *this; 1094 struct dentry *origin = NULL; 1095 bool upperopaque = false; 1096 bool uppermetacopy = false; 1097 int metacopy_size = 0; 1098 unsigned int i; 1099 int err; 1100 1101 upperdir = ovl_dentry_upper(dentry->d_parent); 1102 if (upperdir) { 1103 d->layer = &ofs->layers[0]; 1104 err = ovl_lookup_layer(upperdir, d, &ctx->upperdentry, true); 1105 if (err) 1106 return err; 1107 1108 if (ctx->upperdentry && ctx->upperdentry->d_flags & DCACHE_OP_REAL) 1109 return -EREMOTE; 1110 1111 if (ctx->upperdentry && !d->is_dir) { 1112 /* 1113 * Lookup copy up origin by decoding origin file handle. 1114 * We may get a disconnected dentry, which is fine, 1115 * because we only need to hold the origin inode in 1116 * cache and use its inode number. We may even get a 1117 * connected dentry, that is not under any of the lower 1118 * layers root. That is also fine for using it's inode 1119 * number - it's the same as if we held a reference 1120 * to a dentry in lower layer that was moved under us. 1121 */ 1122 err = ovl_check_origin(ofs, ctx->upperdentry, &ctx->origin_path); 1123 if (err) 1124 return err; 1125 1126 if (d->metacopy) 1127 uppermetacopy = true; 1128 metacopy_size = d->metacopy; 1129 } 1130 1131 if (d->redirect) { 1132 err = -ENOMEM; 1133 d->upperredirect = kstrdup(d->redirect, GFP_KERNEL); 1134 if (!d->upperredirect) 1135 return err; 1136 if (d->redirect[0] == '/') 1137 poe = roe; 1138 } 1139 upperopaque = d->opaque; 1140 } 1141 1142 if (!d->stop && ovl_numlower(poe)) { 1143 err = -ENOMEM; 1144 ctx->stack = ovl_stack_alloc(ofs->numlayer - 1); 1145 if (!ctx->stack) 1146 return err; 1147 } 1148 1149 for (i = 0; !d->stop && i < ovl_numlower(poe); i++) { 1150 struct ovl_path lower = ovl_lowerstack(poe)[i]; 1151 1152 if (!ovl_check_follow_redirect(d)) { 1153 err = -EPERM; 1154 return err; 1155 } 1156 1157 if (!check_redirect) 1158 d->last = i == ovl_numlower(poe) - 1; 1159 else if (d->is_dir || !ofs->numdatalayer) 1160 d->last = lower.layer->idx == ovl_numlower(roe); 1161 1162 d->layer = lower.layer; 1163 err = ovl_lookup_layer(lower.dentry, d, &this, false); 1164 if (err) 1165 return err; 1166 1167 if (!this) 1168 continue; 1169 1170 /* 1171 * If no origin fh is stored in upper of a merge dir, store fh 1172 * of lower dir and set upper parent "impure". 1173 */ 1174 if (ctx->upperdentry && !ctx->ctr && !ofs->noxattr && d->is_dir) { 1175 err = ovl_fix_origin(ofs, dentry, this, ctx->upperdentry); 1176 if (err) { 1177 dput(this); 1178 return err; 1179 } 1180 } 1181 1182 /* 1183 * When "verify_lower" feature is enabled, do not merge with a 1184 * lower dir that does not match a stored origin xattr. In any 1185 * case, only verified origin is used for index lookup. 1186 * 1187 * For non-dir dentry, if index=on, then ensure origin 1188 * matches the dentry found using path based lookup, 1189 * otherwise error out. 1190 */ 1191 if (ctx->upperdentry && !ctx->ctr && 1192 ((d->is_dir && ovl_verify_lower(dentry->d_sb)) || 1193 (!d->is_dir && ofs->config.index && ctx->origin_path))) { 1194 err = ovl_verify_origin(ofs, ctx->upperdentry, this, false); 1195 if (err) { 1196 dput(this); 1197 if (d->is_dir) 1198 break; 1199 return err; 1200 } 1201 origin = this; 1202 } 1203 1204 if (!ctx->upperdentry && !d->is_dir && !ctx->ctr && d->metacopy) 1205 metacopy_size = d->metacopy; 1206 1207 if (d->metacopy && ctx->ctr) { 1208 /* 1209 * Do not store intermediate metacopy dentries in 1210 * lower chain, except top most lower metacopy dentry. 1211 * Continue the loop so that if there is an absolute 1212 * redirect on this dentry, poe can be reset to roe. 1213 */ 1214 dput(this); 1215 this = NULL; 1216 } else { 1217 ctx->stack[ctx->ctr].dentry = this; 1218 ctx->stack[ctx->ctr].layer = lower.layer; 1219 ctx->ctr++; 1220 } 1221 1222 if (d->stop) 1223 break; 1224 1225 if (d->redirect && d->redirect[0] == '/' && poe != roe) { 1226 poe = roe; 1227 /* Find the current layer on the root dentry */ 1228 i = lower.layer->idx - 1; 1229 } 1230 } 1231 1232 /* 1233 * Defer lookup of lowerdata in data-only layers to first access. 1234 * Don't require redirect=follow and metacopy=on in this case. 1235 */ 1236 if (d->metacopy && ctx->ctr && ofs->numdatalayer && d->absolute_redirect) { 1237 d->metacopy = 0; 1238 ctx->ctr++; 1239 } else if (!ovl_check_follow_redirect(d)) { 1240 err = -EPERM; 1241 return err; 1242 } 1243 1244 /* 1245 * For regular non-metacopy upper dentries, there is no lower 1246 * path based lookup, hence ctr will be zero. If a dentry is found 1247 * using ORIGIN xattr on upper, install it in stack. 1248 * 1249 * For metacopy dentry, path based lookup will find lower dentries. 1250 * Just make sure a corresponding data dentry has been found. 1251 */ 1252 if (d->metacopy || (uppermetacopy && !ctx->ctr)) { 1253 pr_warn_ratelimited("metacopy with no lower data found - abort lookup (%pd2)\n", 1254 dentry); 1255 err = -EIO; 1256 return err; 1257 } else if (!d->is_dir && ctx->upperdentry && !ctx->ctr && ctx->origin_path) { 1258 if (WARN_ON(ctx->stack != NULL)) { 1259 err = -EIO; 1260 return err; 1261 } 1262 ctx->stack = ctx->origin_path; 1263 ctx->ctr = 1; 1264 origin = ctx->origin_path->dentry; 1265 ctx->origin_path = NULL; 1266 } 1267 1268 /* 1269 * Always lookup index if there is no-upperdentry. 1270 * 1271 * For the case of upperdentry, we have set origin by now if it 1272 * needed to be set. There are basically three cases. 1273 * 1274 * For directories, lookup index by lower inode and verify it matches 1275 * upper inode. We only trust dir index if we verified that lower dir 1276 * matches origin, otherwise dir index entries may be inconsistent 1277 * and we ignore them. 1278 * 1279 * For regular upper, we already set origin if upper had ORIGIN 1280 * xattr. There is no verification though as there is no path 1281 * based dentry lookup in lower in this case. 1282 * 1283 * For metacopy upper, we set a verified origin already if index 1284 * is enabled and if upper had an ORIGIN xattr. 1285 * 1286 */ 1287 if (!ctx->upperdentry && ctx->ctr) 1288 origin = ctx->stack[0].dentry; 1289 1290 if (origin && ovl_indexdir(dentry->d_sb) && 1291 (!d->is_dir || ovl_index_all(dentry->d_sb))) { 1292 ctx->index = ovl_lookup_index(ofs, ctx->upperdentry, origin, true); 1293 if (IS_ERR(ctx->index)) { 1294 err = PTR_ERR(ctx->index); 1295 ctx->index = NULL; 1296 return err; 1297 } 1298 } 1299 1300 if (ctx->ctr) { 1301 ctx->oe = ovl_alloc_entry(ctx->ctr); 1302 err = -ENOMEM; 1303 if (!ctx->oe) 1304 return err; 1305 1306 ovl_stack_cpy(ovl_lowerstack(ctx->oe), ctx->stack, ctx->ctr); 1307 } 1308 1309 if (upperopaque) 1310 ovl_dentry_set_opaque(dentry); 1311 if (d->xwhiteouts) 1312 ovl_dentry_set_xwhiteouts(dentry); 1313 1314 if (ctx->upperdentry) 1315 ovl_dentry_set_upper_alias(dentry); 1316 else if (ctx->index) { 1317 char *upperredirect; 1318 struct path upperpath = { 1319 .dentry = ctx->upperdentry = dget(ctx->index), 1320 .mnt = ovl_upper_mnt(ofs), 1321 }; 1322 1323 /* 1324 * It's safe to assign upperredirect here: the previous 1325 * assignment happens only if upperdentry is non-NULL, and 1326 * this one only if upperdentry is NULL. 1327 */ 1328 upperredirect = ovl_get_redirect_xattr(ofs, &upperpath, 0); 1329 if (IS_ERR(upperredirect)) 1330 return PTR_ERR(upperredirect); 1331 d->upperredirect = upperredirect; 1332 1333 err = ovl_check_metacopy_xattr(ofs, &upperpath, NULL); 1334 if (err < 0) 1335 return err; 1336 d->metacopy = uppermetacopy = err; 1337 metacopy_size = err; 1338 1339 if (!ovl_check_follow_redirect(d)) { 1340 err = -EPERM; 1341 return err; 1342 } 1343 } 1344 1345 if (ctx->upperdentry || ctx->ctr) { 1346 struct inode *inode; 1347 struct ovl_inode_params oip = { 1348 .upperdentry = ctx->upperdentry, 1349 .oe = ctx->oe, 1350 .index = ctx->index, 1351 .redirect = d->upperredirect, 1352 }; 1353 1354 /* Store lowerdata redirect for lazy lookup */ 1355 if (ctx->ctr > 1 && !d->is_dir && !ctx->stack[ctx->ctr - 1].dentry) { 1356 oip.lowerdata_redirect = d->redirect; 1357 d->redirect = NULL; 1358 } 1359 1360 inode = ovl_get_inode(dentry->d_sb, &oip); 1361 if (IS_ERR(inode)) 1362 return PTR_ERR(inode); 1363 1364 ctx->inode = inode; 1365 if (ctx->upperdentry && !uppermetacopy) 1366 ovl_set_flag(OVL_UPPERDATA, ctx->inode); 1367 1368 if (metacopy_size > OVL_METACOPY_MIN_SIZE) 1369 ovl_set_flag(OVL_HAS_DIGEST, ctx->inode); 1370 } 1371 1372 ovl_dentry_init_reval(dentry, ctx->upperdentry, OVL_I_E(ctx->inode)); 1373 1374 return 0; 1375 } 1376 1377 struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry, 1378 unsigned int flags) 1379 { 1380 struct ovl_fs *ofs = OVL_FS(dentry->d_sb); 1381 struct ovl_entry *poe = OVL_E(dentry->d_parent); 1382 bool check_redirect = (ovl_redirect_follow(ofs) || ofs->numdatalayer); 1383 int err; 1384 struct ovl_lookup_ctx ctx = { 1385 .dentry = dentry, 1386 }; 1387 struct ovl_lookup_data d = { 1388 .sb = dentry->d_sb, 1389 .dentry = dentry, 1390 .name = dentry->d_name, 1391 .last = check_redirect ? false : !ovl_numlower(poe), 1392 }; 1393 1394 if (dentry->d_name.len > ofs->namelen) 1395 return ERR_PTR(-ENAMETOOLONG); 1396 1397 with_ovl_creds(dentry->d_sb) 1398 err = ovl_lookup_layers(&ctx, &d); 1399 1400 if (ctx.origin_path) { 1401 dput(ctx.origin_path->dentry); 1402 kfree(ctx.origin_path); 1403 } 1404 dput(ctx.index); 1405 ovl_stack_free(ctx.stack, ctx.ctr); 1406 kfree(d.redirect); 1407 1408 if (err) { 1409 ovl_free_entry(ctx.oe); 1410 dput(ctx.upperdentry); 1411 kfree(d.upperredirect); 1412 return ERR_PTR(err); 1413 } 1414 1415 return d_splice_alias(ctx.inode, dentry); 1416 } 1417 1418 bool ovl_lower_positive(struct dentry *dentry) 1419 { 1420 struct ovl_entry *poe = OVL_E(dentry->d_parent); 1421 const struct qstr *name = &dentry->d_name; 1422 unsigned int i; 1423 bool positive = false; 1424 bool done = false; 1425 1426 /* 1427 * If dentry is negative, then lower is positive iff this is a 1428 * whiteout. 1429 */ 1430 if (!dentry->d_inode) 1431 return ovl_dentry_is_opaque(dentry); 1432 1433 /* Negative upper -> positive lower */ 1434 if (!ovl_dentry_upper(dentry)) 1435 return true; 1436 1437 with_ovl_creds(dentry->d_sb) { 1438 /* Positive upper -> have to look up lower to see whether it exists */ 1439 for (i = 0; !done && !positive && i < ovl_numlower(poe); i++) { 1440 struct dentry *this; 1441 struct ovl_path *parentpath = &ovl_lowerstack(poe)[i]; 1442 1443 /* 1444 * We need to make a non-const copy of dentry->d_name, 1445 * because lookup_one_positive_unlocked() will hash name 1446 * with parentpath base, which is on another (lower fs). 1447 */ 1448 this = lookup_one_positive_unlocked(mnt_idmap(parentpath->layer->mnt), 1449 &QSTR_LEN(name->name, name->len), 1450 parentpath->dentry); 1451 if (IS_ERR(this)) { 1452 switch (PTR_ERR(this)) { 1453 case -ENOENT: 1454 case -ENAMETOOLONG: 1455 break; 1456 1457 default: 1458 /* 1459 * Assume something is there, we just couldn't 1460 * access it. 1461 */ 1462 positive = true; 1463 break; 1464 } 1465 } else { 1466 struct path path = { 1467 .dentry = this, 1468 .mnt = parentpath->layer->mnt, 1469 }; 1470 positive = !ovl_path_is_whiteout(OVL_FS(dentry->d_sb), &path); 1471 done = true; 1472 dput(this); 1473 } 1474 } 1475 } 1476 1477 return positive; 1478 } 1479