1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2011 Novell Inc. 4 * Copyright (C) 2016 Red Hat, Inc. 5 */ 6 7 #include <linux/fs.h> 8 #include <linux/cred.h> 9 #include <linux/ctype.h> 10 #include <linux/hex.h> 11 #include <linux/namei.h> 12 #include <linux/xattr.h> 13 #include <linux/ratelimit.h> 14 #include <linux/mount.h> 15 #include <linux/exportfs.h> 16 #include "overlayfs.h" 17 18 struct ovl_lookup_data { 19 struct super_block *sb; 20 struct dentry *dentry; 21 const struct ovl_layer *layer; 22 struct qstr name; 23 bool is_dir; 24 bool opaque; 25 bool xwhiteouts; 26 bool stop; 27 bool last; 28 char *redirect; 29 char *upperredirect; 30 int metacopy; 31 /* Referring to last redirect xattr */ 32 bool absolute_redirect; 33 }; 34 35 static int ovl_check_redirect(const struct path *path, struct ovl_lookup_data *d, 36 size_t prelen, const char *post) 37 { 38 int res; 39 char *buf; 40 struct ovl_fs *ofs = OVL_FS(d->sb); 41 42 d->absolute_redirect = false; 43 buf = ovl_get_redirect_xattr(ofs, path, prelen + strlen(post)); 44 if (IS_ERR_OR_NULL(buf)) 45 return PTR_ERR(buf); 46 47 if (buf[0] == '/') { 48 d->absolute_redirect = true; 49 /* 50 * One of the ancestor path elements in an absolute path 51 * lookup in ovl_lookup_layer() could have been opaque and 52 * that will stop further lookup in lower layers (d->stop=true) 53 * But we have found an absolute redirect in descendant path 54 * element and that should force continue lookup in lower 55 * layers (reset d->stop). 56 */ 57 d->stop = false; 58 } else { 59 res = strlen(buf) + 1; 60 memmove(buf + prelen, buf, res); 61 memcpy(buf, d->name.name, prelen); 62 } 63 64 strcat(buf, post); 65 kfree(d->redirect); 66 d->redirect = buf; 67 d->name.name = d->redirect; 68 d->name.len = strlen(d->redirect); 69 70 return 0; 71 } 72 73 static int ovl_acceptable(void *ctx, struct dentry *dentry) 74 { 75 /* 76 * A non-dir origin may be disconnected, which is fine, because 77 * we only need it for its unique inode number. 78 */ 79 if (!d_is_dir(dentry)) 80 return 1; 81 82 /* Don't decode a deleted empty directory */ 83 if (d_unhashed(dentry)) 84 return 0; 85 86 /* Check if directory belongs to the layer we are decoding from */ 87 return is_subdir(dentry, ((struct vfsmount *)ctx)->mnt_root); 88 } 89 90 /* 91 * Check validity of an overlay file handle buffer. 92 * 93 * Return 0 for a valid file handle. 94 * Return -ENODATA for "origin unknown". 95 * Return <0 for an invalid file handle. 96 */ 97 int ovl_check_fb_len(struct ovl_fb *fb, int fb_len) 98 { 99 if (fb_len < sizeof(struct ovl_fb) || fb_len < fb->len) 100 return -EINVAL; 101 102 if (fb->magic != OVL_FH_MAGIC) 103 return -EINVAL; 104 105 /* Treat larger version and unknown flags as "origin unknown" */ 106 if (fb->version > OVL_FH_VERSION || fb->flags & ~OVL_FH_FLAG_ALL) 107 return -ENODATA; 108 109 /* Treat endianness mismatch as "origin unknown" */ 110 if (!(fb->flags & OVL_FH_FLAG_ANY_ENDIAN) && 111 (fb->flags & OVL_FH_FLAG_BIG_ENDIAN) != OVL_FH_FLAG_CPU_ENDIAN) 112 return -ENODATA; 113 114 return 0; 115 } 116 117 static struct ovl_fh *ovl_get_fh(struct ovl_fs *ofs, struct dentry *upperdentry, 118 enum ovl_xattr ox) 119 { 120 int res, err; 121 struct ovl_fh *fh = NULL; 122 123 res = ovl_getxattr_upper(ofs, upperdentry, ox, NULL, 0); 124 if (res < 0) { 125 if (res == -ENODATA || res == -EOPNOTSUPP) 126 return NULL; 127 goto fail; 128 } 129 /* Zero size value means "copied up but origin unknown" */ 130 if (res == 0) 131 return NULL; 132 133 fh = kzalloc(res + OVL_FH_WIRE_OFFSET, GFP_KERNEL); 134 if (!fh) 135 return ERR_PTR(-ENOMEM); 136 137 res = ovl_getxattr_upper(ofs, upperdentry, ox, fh->buf, res); 138 if (res < 0) 139 goto fail; 140 141 err = ovl_check_fb_len(&fh->fb, res); 142 if (err < 0) { 143 if (err == -ENODATA) 144 goto out; 145 goto invalid; 146 } 147 148 return fh; 149 150 out: 151 kfree(fh); 152 return NULL; 153 154 fail: 155 pr_warn_ratelimited("failed to get origin (%i)\n", res); 156 goto out; 157 invalid: 158 pr_warn_ratelimited("invalid origin (%*phN)\n", res, fh); 159 goto out; 160 } 161 162 bool ovl_uuid_match(struct ovl_fs *ofs, const struct super_block *sb, 163 const uuid_t *uuid) 164 { 165 /* 166 * Make sure that the stored uuid matches the uuid of the lower 167 * layer where file handle will be decoded. 168 * In case of uuid=off option just make sure that stored uuid is null. 169 */ 170 return ovl_origin_uuid(ofs) ? uuid_equal(uuid, &sb->s_uuid) : 171 uuid_is_null(uuid); 172 } 173 174 struct dentry *ovl_decode_real_fh(struct ovl_fs *ofs, struct ovl_fh *fh, 175 struct vfsmount *mnt, bool connected) 176 { 177 struct dentry *real; 178 int bytes; 179 180 if (!capable(CAP_DAC_READ_SEARCH)) 181 return NULL; 182 183 if (!ovl_uuid_match(ofs, mnt->mnt_sb, &fh->fb.uuid)) 184 return NULL; 185 186 bytes = (fh->fb.len - offsetof(struct ovl_fb, fid)); 187 real = exportfs_decode_fh(mnt, (struct fid *)fh->fb.fid, 188 bytes >> 2, (int)fh->fb.type, 189 connected ? ovl_acceptable : NULL, mnt); 190 if (IS_ERR(real)) { 191 /* 192 * Treat stale file handle to lower file as "origin unknown". 193 * upper file handle could become stale when upper file is 194 * unlinked and this information is needed to handle stale 195 * index entries correctly. 196 */ 197 if (real == ERR_PTR(-ESTALE) && 198 !(fh->fb.flags & OVL_FH_FLAG_PATH_UPPER)) 199 real = NULL; 200 return real; 201 } 202 203 if (ovl_dentry_weird(real)) { 204 dput(real); 205 return NULL; 206 } 207 208 return real; 209 } 210 211 static struct dentry *ovl_lookup_positive_unlocked(struct ovl_lookup_data *d, 212 const char *name, 213 struct dentry *base, int len, 214 bool drop_negative) 215 { 216 struct dentry *ret = lookup_one_unlocked(mnt_idmap(d->layer->mnt), 217 &QSTR_LEN(name, len), base); 218 219 if (!IS_ERR(ret) && d_flags_negative(smp_load_acquire(&ret->d_flags))) { 220 if (drop_negative && ret->d_lockref.count == 1) { 221 spin_lock(&ret->d_lock); 222 /* Recheck condition under lock */ 223 if (d_is_negative(ret) && ret->d_lockref.count == 1) 224 __d_drop(ret); 225 spin_unlock(&ret->d_lock); 226 } 227 dput(ret); 228 ret = ERR_PTR(-ENOENT); 229 } 230 return ret; 231 } 232 233 static int ovl_lookup_single(struct dentry *base, struct ovl_lookup_data *d, 234 const char *name, unsigned int namelen, 235 size_t prelen, const char *post, 236 struct dentry **ret, bool drop_negative) 237 { 238 struct ovl_fs *ofs = OVL_FS(d->sb); 239 struct dentry *this = NULL; 240 const char *warn; 241 struct path path; 242 int err; 243 bool last_element = !post[0]; 244 bool is_upper = d->layer->idx == 0; 245 char val; 246 247 /* 248 * We allow filesystems that are case-folding capable as long as the 249 * layers are consistently enabled in the stack, enabled for every dir 250 * or disabled in all dirs. If someone has modified case folding on a 251 * directory on underlying layer, the warranty of the ovl stack is 252 * voided. 253 */ 254 if (ofs->casefold != ovl_dentry_casefolded(base)) { 255 warn = "parent wrong casefold"; 256 err = -ESTALE; 257 goto out_warn; 258 } 259 260 this = ovl_lookup_positive_unlocked(d, name, base, namelen, drop_negative); 261 if (IS_ERR(this)) { 262 err = PTR_ERR(this); 263 this = NULL; 264 if (err == -ENOENT || err == -ENAMETOOLONG) 265 goto out; 266 goto out_err; 267 } 268 269 if (ofs->casefold != ovl_dentry_casefolded(this)) { 270 warn = "child wrong casefold"; 271 err = -EREMOTE; 272 goto out_warn; 273 } 274 275 if (ovl_dentry_weird(this)) { 276 /* Don't support traversing automounts and other weirdness */ 277 warn = "unsupported object type"; 278 err = -EREMOTE; 279 goto out_warn; 280 } 281 282 path.dentry = this; 283 path.mnt = d->layer->mnt; 284 if (ovl_path_is_whiteout(ofs, &path)) { 285 d->stop = d->opaque = true; 286 goto put_and_out; 287 } 288 /* 289 * This dentry should be a regular file if previous layer lookup 290 * found a metacopy dentry. 291 */ 292 if (last_element && d->metacopy && !d_is_reg(this)) { 293 d->stop = true; 294 goto put_and_out; 295 } 296 297 if (!d_can_lookup(this)) { 298 if (d->is_dir || !last_element) { 299 d->stop = true; 300 goto put_and_out; 301 } 302 err = ovl_check_metacopy_xattr(ofs, &path, NULL); 303 if (err < 0) 304 goto out_err; 305 306 d->metacopy = err; 307 d->stop = !d->metacopy; 308 if (!d->metacopy || d->last) 309 goto out; 310 } else { 311 if (ovl_lookup_trap_inode(d->sb, this)) { 312 /* Caught in a trap of overlapping layers */ 313 warn = "overlapping layers"; 314 err = -ELOOP; 315 goto out_warn; 316 } 317 318 if (last_element) 319 d->is_dir = true; 320 if (d->last) 321 goto out; 322 323 /* overlay.opaque=x means xwhiteouts directory */ 324 val = ovl_get_opaquedir_val(ofs, &path); 325 if (last_element && !is_upper && val == 'x') { 326 d->xwhiteouts = true; 327 ovl_layer_set_xwhiteouts(ofs, d->layer); 328 } else if (val == 'y') { 329 d->stop = true; 330 if (last_element) 331 d->opaque = true; 332 goto out; 333 } 334 } 335 err = ovl_check_redirect(&path, d, prelen, post); 336 if (err) 337 goto out_err; 338 out: 339 *ret = this; 340 return 0; 341 342 put_and_out: 343 dput(this); 344 this = NULL; 345 goto out; 346 347 out_warn: 348 pr_warn_ratelimited("failed lookup in %s (%pd2, name='%.*s', err=%i): %s\n", 349 is_upper ? "upper" : "lower", base, 350 namelen, name, err, warn); 351 out_err: 352 dput(this); 353 return err; 354 } 355 356 static int ovl_lookup_layer(struct dentry *base, struct ovl_lookup_data *d, 357 struct dentry **ret, bool drop_negative) 358 { 359 /* Counting down from the end, since the prefix can change */ 360 size_t rem = d->name.len - 1; 361 struct dentry *dentry = NULL; 362 int err; 363 364 if (d->name.name[0] != '/') 365 return ovl_lookup_single(base, d, d->name.name, d->name.len, 366 0, "", ret, drop_negative); 367 368 while (!IS_ERR_OR_NULL(base) && d_can_lookup(base)) { 369 const char *s = d->name.name + d->name.len - rem; 370 const char *next = strchrnul(s, '/'); 371 size_t thislen = next - s; 372 bool end = !next[0]; 373 374 /* Verify we did not go off the rails */ 375 if (WARN_ON(s[-1] != '/')) 376 return -EIO; 377 378 err = ovl_lookup_single(base, d, s, thislen, 379 d->name.len - rem, next, &base, 380 drop_negative); 381 dput(dentry); 382 if (err) 383 return err; 384 dentry = base; 385 if (end) 386 break; 387 388 rem -= thislen + 1; 389 390 if (WARN_ON(rem >= d->name.len)) 391 return -EIO; 392 } 393 *ret = dentry; 394 return 0; 395 } 396 397 static int ovl_lookup_data_layer(struct dentry *dentry, const char *redirect, 398 const struct ovl_layer *layer, 399 struct path *datapath) 400 { 401 int err; 402 403 err = vfs_path_lookup(layer->mnt->mnt_root, layer->mnt, redirect, 404 LOOKUP_BENEATH | LOOKUP_NO_SYMLINKS | LOOKUP_NO_XDEV, 405 datapath); 406 pr_debug("lookup lowerdata (%pd2, redirect=\"%s\", layer=%d, err=%i)\n", 407 dentry, redirect, layer->idx, err); 408 409 if (err) 410 return err; 411 412 err = -EREMOTE; 413 if (ovl_dentry_weird(datapath->dentry)) 414 goto out_path_put; 415 416 err = -ENOENT; 417 /* Only regular file is acceptable as lower data */ 418 if (!d_is_reg(datapath->dentry)) 419 goto out_path_put; 420 421 return 0; 422 423 out_path_put: 424 path_put(datapath); 425 426 return err; 427 } 428 429 /* Lookup in data-only layers by absolute redirect to layer root */ 430 static int ovl_lookup_data_layers(struct dentry *dentry, const char *redirect, 431 struct ovl_path *lowerdata) 432 { 433 struct ovl_fs *ofs = OVL_FS(dentry->d_sb); 434 const struct ovl_layer *layer; 435 struct path datapath; 436 int err = -ENOENT; 437 int i; 438 439 layer = &ofs->layers[ofs->numlayer - ofs->numdatalayer]; 440 for (i = 0; i < ofs->numdatalayer; i++, layer++) { 441 err = ovl_lookup_data_layer(dentry, redirect, layer, &datapath); 442 if (!err) { 443 mntput(datapath.mnt); 444 lowerdata->dentry = datapath.dentry; 445 lowerdata->layer = layer; 446 return 0; 447 } 448 } 449 450 return err; 451 } 452 453 int ovl_check_origin_fh(struct ovl_fs *ofs, struct ovl_fh *fh, bool connected, 454 struct dentry *upperdentry, struct ovl_path **stackp) 455 { 456 struct dentry *origin = NULL; 457 int i; 458 459 for (i = 1; i <= ovl_numlowerlayer(ofs); i++) { 460 /* 461 * If lower fs uuid is not unique among lower fs we cannot match 462 * fh->uuid to layer. 463 */ 464 if (ofs->layers[i].fsid && 465 ofs->layers[i].fs->bad_uuid) 466 continue; 467 468 origin = ovl_decode_real_fh(ofs, fh, ofs->layers[i].mnt, 469 connected); 470 if (origin) 471 break; 472 } 473 474 if (!origin) 475 return -ESTALE; 476 else if (IS_ERR(origin)) 477 return PTR_ERR(origin); 478 479 if (upperdentry && !ovl_upper_is_whiteout(ofs, upperdentry) && 480 inode_wrong_type(d_inode(upperdentry), d_inode(origin)->i_mode)) 481 goto invalid; 482 483 if (!*stackp) 484 *stackp = kmalloc(sizeof(struct ovl_path), GFP_KERNEL); 485 if (!*stackp) { 486 dput(origin); 487 return -ENOMEM; 488 } 489 **stackp = (struct ovl_path){ 490 .dentry = origin, 491 .layer = &ofs->layers[i] 492 }; 493 494 return 0; 495 496 invalid: 497 pr_warn_ratelimited("invalid origin (%pd2, ftype=%x, origin ftype=%x).\n", 498 upperdentry, d_inode(upperdentry)->i_mode & S_IFMT, 499 d_inode(origin)->i_mode & S_IFMT); 500 dput(origin); 501 return -ESTALE; 502 } 503 504 static int ovl_check_origin(struct ovl_fs *ofs, struct dentry *upperdentry, 505 struct ovl_path **stackp) 506 { 507 struct ovl_fh *fh = ovl_get_fh(ofs, upperdentry, OVL_XATTR_ORIGIN); 508 int err; 509 510 if (IS_ERR_OR_NULL(fh)) 511 return PTR_ERR(fh); 512 513 err = ovl_check_origin_fh(ofs, fh, false, upperdentry, stackp); 514 kfree(fh); 515 516 if (err) { 517 if (err == -ESTALE) 518 return 0; 519 return err; 520 } 521 522 return 0; 523 } 524 525 /* 526 * Verify that @fh matches the file handle stored in xattr @name. 527 * Return 0 on match, -ESTALE on mismatch, < 0 on error. 528 */ 529 static int ovl_verify_fh(struct ovl_fs *ofs, struct dentry *dentry, 530 enum ovl_xattr ox, const struct ovl_fh *fh) 531 { 532 struct ovl_fh *ofh = ovl_get_fh(ofs, dentry, ox); 533 int err = 0; 534 535 if (!ofh) 536 return -ENODATA; 537 538 if (IS_ERR(ofh)) 539 return PTR_ERR(ofh); 540 541 if (fh->fb.len != ofh->fb.len || memcmp(&fh->fb, &ofh->fb, fh->fb.len)) 542 err = -ESTALE; 543 544 kfree(ofh); 545 return err; 546 } 547 548 int ovl_verify_set_fh(struct ovl_fs *ofs, struct dentry *dentry, 549 enum ovl_xattr ox, const struct ovl_fh *fh, 550 bool is_upper, bool set) 551 { 552 int err; 553 554 err = ovl_verify_fh(ofs, dentry, ox, fh); 555 if (set && err == -ENODATA) 556 err = ovl_setxattr(ofs, dentry, ox, fh->buf, fh->fb.len); 557 558 return err; 559 } 560 561 /* 562 * Verify that @real dentry matches the file handle stored in xattr @name. 563 * 564 * If @set is true and there is no stored file handle, encode @real and store 565 * file handle in xattr @name. 566 * 567 * Return 0 on match, -ESTALE on mismatch, -ENODATA on no xattr, < 0 on error. 568 */ 569 int ovl_verify_origin_xattr(struct ovl_fs *ofs, struct dentry *dentry, 570 enum ovl_xattr ox, struct dentry *real, 571 bool is_upper, bool set) 572 { 573 struct inode *inode; 574 struct ovl_fh *fh; 575 int err; 576 577 fh = ovl_encode_real_fh(ofs, d_inode(real), is_upper); 578 err = PTR_ERR(fh); 579 if (IS_ERR(fh)) { 580 fh = NULL; 581 goto fail; 582 } 583 584 err = ovl_verify_set_fh(ofs, dentry, ox, fh, is_upper, set); 585 if (err) 586 goto fail; 587 588 out: 589 kfree(fh); 590 return err; 591 592 fail: 593 inode = d_inode(real); 594 pr_warn_ratelimited("failed to verify %s (%pd2, ino=%lu, err=%i)\n", 595 is_upper ? "upper" : "origin", real, 596 inode ? inode->i_ino : 0, err); 597 goto out; 598 } 599 600 601 /* Get upper dentry from index */ 602 struct dentry *ovl_index_upper(struct ovl_fs *ofs, struct dentry *index, 603 bool connected) 604 { 605 struct ovl_fh *fh; 606 struct dentry *upper; 607 608 if (!d_is_dir(index)) 609 return dget(index); 610 611 fh = ovl_get_fh(ofs, index, OVL_XATTR_UPPER); 612 if (IS_ERR_OR_NULL(fh)) 613 return ERR_CAST(fh); 614 615 upper = ovl_decode_real_fh(ofs, fh, ovl_upper_mnt(ofs), connected); 616 kfree(fh); 617 618 if (IS_ERR_OR_NULL(upper)) 619 return upper ?: ERR_PTR(-ESTALE); 620 621 if (!d_is_dir(upper)) { 622 pr_warn_ratelimited("invalid index upper (%pd2, upper=%pd2).\n", 623 index, upper); 624 dput(upper); 625 return ERR_PTR(-EIO); 626 } 627 628 return upper; 629 } 630 631 /* 632 * Verify that an index entry name matches the origin file handle stored in 633 * OVL_XATTR_ORIGIN and that origin file handle can be decoded to lower path. 634 * Return 0 on match, -ESTALE on mismatch or stale origin, < 0 on error. 635 */ 636 int ovl_verify_index(struct ovl_fs *ofs, struct dentry *index) 637 { 638 struct ovl_fh *fh = NULL; 639 size_t len; 640 struct ovl_path origin = { }; 641 struct ovl_path *stack = &origin; 642 struct dentry *upper = NULL; 643 int err; 644 645 if (!d_inode(index)) 646 return 0; 647 648 err = -EINVAL; 649 if (index->d_name.len < sizeof(struct ovl_fb)*2) 650 goto fail; 651 652 err = -ENOMEM; 653 len = index->d_name.len / 2; 654 fh = kzalloc(len + OVL_FH_WIRE_OFFSET, GFP_KERNEL); 655 if (!fh) 656 goto fail; 657 658 err = -EINVAL; 659 if (hex2bin(fh->buf, index->d_name.name, len)) 660 goto fail; 661 662 err = ovl_check_fb_len(&fh->fb, len); 663 if (err) 664 goto fail; 665 666 /* 667 * Whiteout index entries are used as an indication that an exported 668 * overlay file handle should be treated as stale (i.e. after unlink 669 * of the overlay inode). These entries contain no origin xattr. 670 */ 671 if (ovl_is_whiteout(index)) 672 goto out; 673 674 /* 675 * Verifying directory index entries are not stale is expensive, so 676 * only verify stale dir index if NFS export is enabled. 677 */ 678 if (d_is_dir(index) && !ofs->config.nfs_export) 679 goto out; 680 681 /* 682 * Directory index entries should have 'upper' xattr pointing to the 683 * real upper dir. Non-dir index entries are hardlinks to the upper 684 * real inode. For non-dir index, we can read the copy up origin xattr 685 * directly from the index dentry, but for dir index we first need to 686 * decode the upper directory. 687 */ 688 upper = ovl_index_upper(ofs, index, false); 689 if (IS_ERR_OR_NULL(upper)) { 690 err = PTR_ERR(upper); 691 /* 692 * Directory index entries with no 'upper' xattr need to be 693 * removed. When dir index entry has a stale 'upper' xattr, 694 * we assume that upper dir was removed and we treat the dir 695 * index as orphan entry that needs to be whited out. 696 */ 697 if (err == -ESTALE) 698 goto orphan; 699 else if (!err) 700 err = -ESTALE; 701 goto fail; 702 } 703 704 err = ovl_verify_fh(ofs, upper, OVL_XATTR_ORIGIN, fh); 705 dput(upper); 706 if (err) 707 goto fail; 708 709 /* Check if non-dir index is orphan and don't warn before cleaning it */ 710 if (!d_is_dir(index) && d_inode(index)->i_nlink == 1) { 711 err = ovl_check_origin_fh(ofs, fh, false, index, &stack); 712 if (err) 713 goto fail; 714 715 if (ovl_get_nlink(ofs, origin.dentry, index, 0) == 0) 716 goto orphan; 717 } 718 719 out: 720 dput(origin.dentry); 721 kfree(fh); 722 return err; 723 724 fail: 725 pr_warn_ratelimited("failed to verify index (%pd2, ftype=%x, err=%i)\n", 726 index, d_inode(index)->i_mode & S_IFMT, err); 727 goto out; 728 729 orphan: 730 pr_warn_ratelimited("orphan index entry (%pd2, ftype=%x, nlink=%u)\n", 731 index, d_inode(index)->i_mode & S_IFMT, 732 d_inode(index)->i_nlink); 733 err = -ENOENT; 734 goto out; 735 } 736 737 int ovl_get_index_name_fh(const struct ovl_fh *fh, struct qstr *name) 738 { 739 char *n, *s; 740 741 n = kcalloc(fh->fb.len, 2, GFP_KERNEL); 742 if (!n) 743 return -ENOMEM; 744 745 s = bin2hex(n, fh->buf, fh->fb.len); 746 *name = (struct qstr) QSTR_INIT(n, s - n); 747 748 return 0; 749 750 } 751 752 /* 753 * Lookup in indexdir for the index entry of a lower real inode or a copy up 754 * origin inode. The index entry name is the hex representation of the lower 755 * inode file handle. 756 * 757 * If the index dentry in negative, then either no lower aliases have been 758 * copied up yet, or aliases have been copied up in older kernels and are 759 * not indexed. 760 * 761 * If the index dentry for a copy up origin inode is positive, but points 762 * to an inode different than the upper inode, then either the upper inode 763 * has been copied up and not indexed or it was indexed, but since then 764 * index dir was cleared. Either way, that index cannot be used to identify 765 * the overlay inode. 766 */ 767 int ovl_get_index_name(struct ovl_fs *ofs, struct dentry *origin, 768 struct qstr *name) 769 { 770 struct ovl_fh *fh; 771 int err; 772 773 fh = ovl_encode_real_fh(ofs, d_inode(origin), false); 774 if (IS_ERR(fh)) 775 return PTR_ERR(fh); 776 777 err = ovl_get_index_name_fh(fh, name); 778 779 kfree(fh); 780 return err; 781 } 782 783 /* Lookup index by file handle for NFS export */ 784 struct dentry *ovl_get_index_fh(struct ovl_fs *ofs, struct ovl_fh *fh) 785 { 786 struct dentry *index; 787 struct qstr name; 788 int err; 789 790 err = ovl_get_index_name_fh(fh, &name); 791 if (err) 792 return ERR_PTR(err); 793 794 index = lookup_noperm_positive_unlocked(&name, ofs->workdir); 795 kfree(name.name); 796 if (IS_ERR(index)) { 797 if (PTR_ERR(index) == -ENOENT) 798 index = NULL; 799 return index; 800 } 801 802 if (ovl_is_whiteout(index)) 803 err = -ESTALE; 804 else if (ovl_dentry_weird(index)) 805 err = -EIO; 806 else 807 return index; 808 809 dput(index); 810 return ERR_PTR(err); 811 } 812 813 struct dentry *ovl_lookup_index(struct ovl_fs *ofs, struct dentry *upper, 814 struct dentry *origin, bool verify) 815 { 816 struct dentry *index; 817 struct inode *inode; 818 struct qstr name; 819 bool is_dir = d_is_dir(origin); 820 int err; 821 822 err = ovl_get_index_name(ofs, origin, &name); 823 if (err) 824 return ERR_PTR(err); 825 826 index = lookup_one_positive_unlocked(ovl_upper_mnt_idmap(ofs), &name, 827 ofs->workdir); 828 if (IS_ERR(index)) { 829 err = PTR_ERR(index); 830 if (err == -ENOENT) { 831 index = NULL; 832 goto out; 833 } 834 pr_warn_ratelimited("failed inode index lookup (ino=%lu, key=%.*s, err=%i);\n" 835 "overlayfs: mount with '-o index=off' to disable inodes index.\n", 836 d_inode(origin)->i_ino, name.len, name.name, 837 err); 838 goto out; 839 } 840 841 inode = d_inode(index); 842 if (ovl_is_whiteout(index) && !verify) { 843 /* 844 * When index lookup is called with !verify for decoding an 845 * overlay file handle, a whiteout index implies that decode 846 * should treat file handle as stale and no need to print a 847 * warning about it. 848 */ 849 dput(index); 850 index = ERR_PTR(-ESTALE); 851 goto out; 852 } else if (ovl_dentry_weird(index) || ovl_is_whiteout(index) || 853 inode_wrong_type(inode, d_inode(origin)->i_mode)) { 854 /* 855 * Index should always be of the same file type as origin 856 * except for the case of a whiteout index. A whiteout 857 * index should only exist if all lower aliases have been 858 * unlinked, which means that finding a lower origin on lookup 859 * whose index is a whiteout should be treated as an error. 860 */ 861 pr_warn_ratelimited("bad index found (index=%pd2, ftype=%x, origin ftype=%x).\n", 862 index, d_inode(index)->i_mode & S_IFMT, 863 d_inode(origin)->i_mode & S_IFMT); 864 goto fail; 865 } else if (is_dir && verify) { 866 if (!upper) { 867 pr_warn_ratelimited("suspected uncovered redirected dir found (origin=%pd2, index=%pd2).\n", 868 origin, index); 869 goto fail; 870 } 871 872 /* Verify that dir index 'upper' xattr points to upper dir */ 873 err = ovl_verify_upper(ofs, index, upper, false); 874 if (err) { 875 if (err == -ESTALE) { 876 pr_warn_ratelimited("suspected multiply redirected dir found (upper=%pd2, origin=%pd2, index=%pd2).\n", 877 upper, origin, index); 878 } 879 goto fail; 880 } 881 } else if (upper && d_inode(upper) != inode) { 882 goto out_dput; 883 } 884 out: 885 kfree(name.name); 886 return index; 887 888 out_dput: 889 dput(index); 890 index = NULL; 891 goto out; 892 893 fail: 894 dput(index); 895 index = ERR_PTR(-EIO); 896 goto out; 897 } 898 899 /* 900 * Returns next layer in stack starting from top. 901 * Returns -1 if this is the last layer. 902 */ 903 int ovl_path_next(int idx, struct dentry *dentry, struct path *path, 904 const struct ovl_layer **layer) 905 { 906 struct ovl_entry *oe = OVL_E(dentry); 907 struct ovl_path *lowerstack = ovl_lowerstack(oe); 908 909 BUG_ON(idx < 0); 910 if (idx == 0) { 911 ovl_path_upper(dentry, path); 912 if (path->dentry) { 913 *layer = &OVL_FS(dentry->d_sb)->layers[0]; 914 return ovl_numlower(oe) ? 1 : -1; 915 } 916 idx++; 917 } 918 BUG_ON(idx > ovl_numlower(oe)); 919 path->dentry = lowerstack[idx - 1].dentry; 920 *layer = lowerstack[idx - 1].layer; 921 path->mnt = (*layer)->mnt; 922 923 return (idx < ovl_numlower(oe)) ? idx + 1 : -1; 924 } 925 926 /* Fix missing 'origin' xattr */ 927 static int ovl_fix_origin(struct ovl_fs *ofs, struct dentry *dentry, 928 struct dentry *lower, struct dentry *upper) 929 { 930 const struct ovl_fh *fh; 931 int err; 932 933 if (ovl_check_origin_xattr(ofs, upper)) 934 return 0; 935 936 fh = ovl_get_origin_fh(ofs, lower); 937 if (IS_ERR(fh)) 938 return PTR_ERR(fh); 939 940 err = ovl_want_write(dentry); 941 if (err) 942 goto out; 943 944 err = ovl_set_origin_fh(ofs, fh, upper); 945 if (!err) 946 err = ovl_set_impure(dentry->d_parent, upper->d_parent); 947 948 ovl_drop_write(dentry); 949 out: 950 kfree(fh); 951 return err; 952 } 953 954 static int ovl_maybe_validate_verity(struct dentry *dentry) 955 { 956 struct ovl_fs *ofs = OVL_FS(dentry->d_sb); 957 struct inode *inode = d_inode(dentry); 958 struct path datapath, metapath; 959 int err; 960 961 if (!ofs->config.verity_mode || 962 !ovl_is_metacopy_dentry(dentry) || 963 ovl_test_flag(OVL_VERIFIED_DIGEST, inode)) 964 return 0; 965 966 if (!ovl_test_flag(OVL_HAS_DIGEST, inode)) { 967 if (ofs->config.verity_mode == OVL_VERITY_REQUIRE) { 968 pr_warn_ratelimited("metacopy file '%pd' has no digest specified\n", 969 dentry); 970 return -EIO; 971 } 972 return 0; 973 } 974 975 ovl_path_lowerdata(dentry, &datapath); 976 if (!datapath.dentry) 977 return -EIO; 978 979 ovl_path_real(dentry, &metapath); 980 if (!metapath.dentry) 981 return -EIO; 982 983 err = ovl_inode_lock_interruptible(inode); 984 if (err) 985 return err; 986 987 if (!ovl_test_flag(OVL_VERIFIED_DIGEST, inode)) { 988 with_ovl_creds(dentry->d_sb) 989 err = ovl_validate_verity(ofs, &metapath, &datapath); 990 if (err == 0) 991 ovl_set_flag(OVL_VERIFIED_DIGEST, inode); 992 } 993 994 ovl_inode_unlock(inode); 995 996 return err; 997 } 998 999 /* Lazy lookup of lowerdata */ 1000 static int ovl_maybe_lookup_lowerdata(struct dentry *dentry) 1001 { 1002 struct inode *inode = d_inode(dentry); 1003 const char *redirect = ovl_lowerdata_redirect(inode); 1004 struct ovl_path datapath = {}; 1005 int err; 1006 1007 if (!redirect || ovl_dentry_lowerdata(dentry)) 1008 return 0; 1009 1010 if (redirect[0] != '/') 1011 return -EIO; 1012 1013 err = ovl_inode_lock_interruptible(inode); 1014 if (err) 1015 return err; 1016 1017 err = 0; 1018 /* Someone got here before us? */ 1019 if (ovl_dentry_lowerdata(dentry)) 1020 goto out; 1021 1022 with_ovl_creds(dentry->d_sb) 1023 err = ovl_lookup_data_layers(dentry, redirect, &datapath); 1024 if (err) 1025 goto out_err; 1026 1027 err = ovl_dentry_set_lowerdata(dentry, &datapath); 1028 if (err) 1029 goto out_err; 1030 1031 out: 1032 ovl_inode_unlock(inode); 1033 dput(datapath.dentry); 1034 1035 return err; 1036 1037 out_err: 1038 pr_warn_ratelimited("lazy lowerdata lookup failed (%pd2, err=%i)\n", 1039 dentry, err); 1040 goto out; 1041 } 1042 1043 int ovl_verify_lowerdata(struct dentry *dentry) 1044 { 1045 int err; 1046 1047 err = ovl_maybe_lookup_lowerdata(dentry); 1048 if (err) 1049 return err; 1050 1051 return ovl_maybe_validate_verity(dentry); 1052 } 1053 1054 /* 1055 * Following redirects/metacopy can have security consequences: it's like a 1056 * symlink into the lower layer without the permission checks. 1057 * 1058 * This is only a problem if the upper layer is untrusted (e.g comes from an USB 1059 * drive). This can allow a non-readable file or directory to become readable. 1060 * 1061 * Only following redirects when redirects are enabled disables this attack 1062 * vector when not necessary. 1063 */ 1064 static bool ovl_check_follow_redirect(struct ovl_lookup_data *d) 1065 { 1066 struct ovl_fs *ofs = OVL_FS(d->sb); 1067 1068 if (d->metacopy && !ofs->config.metacopy) { 1069 pr_warn_ratelimited("refusing to follow metacopy origin for (%pd2)\n", d->dentry); 1070 return false; 1071 } 1072 if ((d->redirect || d->upperredirect) && !ovl_redirect_follow(ofs)) { 1073 pr_warn_ratelimited("refusing to follow redirect for (%pd2)\n", d->dentry); 1074 return false; 1075 } 1076 return true; 1077 } 1078 1079 struct ovl_lookup_ctx { 1080 struct dentry *dentry; 1081 struct ovl_entry *oe; 1082 struct ovl_path *stack; 1083 struct ovl_path *origin_path; 1084 struct dentry *upperdentry; 1085 struct dentry *index; 1086 struct inode *inode; 1087 unsigned int ctr; 1088 }; 1089 1090 static int ovl_lookup_layers(struct ovl_lookup_ctx *ctx, struct ovl_lookup_data *d) 1091 { 1092 struct dentry *dentry = ctx->dentry; 1093 struct ovl_fs *ofs = OVL_FS(dentry->d_sb); 1094 struct ovl_entry *poe = OVL_E(dentry->d_parent); 1095 struct ovl_entry *roe = OVL_E(dentry->d_sb->s_root); 1096 bool check_redirect = (ovl_redirect_follow(ofs) || ofs->numdatalayer); 1097 struct dentry *upperdir; 1098 struct dentry *this; 1099 struct dentry *origin = NULL; 1100 bool upperopaque = false; 1101 bool uppermetacopy = false; 1102 int metacopy_size = 0; 1103 unsigned int i; 1104 int err; 1105 1106 upperdir = ovl_dentry_upper(dentry->d_parent); 1107 if (upperdir) { 1108 d->layer = &ofs->layers[0]; 1109 err = ovl_lookup_layer(upperdir, d, &ctx->upperdentry, true); 1110 if (err) 1111 return err; 1112 1113 if (ctx->upperdentry && ctx->upperdentry->d_flags & DCACHE_OP_REAL) 1114 return -EREMOTE; 1115 1116 if (ctx->upperdentry && !d->is_dir) { 1117 /* 1118 * Lookup copy up origin by decoding origin file handle. 1119 * We may get a disconnected dentry, which is fine, 1120 * because we only need to hold the origin inode in 1121 * cache and use its inode number. We may even get a 1122 * connected dentry, that is not under any of the lower 1123 * layers root. That is also fine for using it's inode 1124 * number - it's the same as if we held a reference 1125 * to a dentry in lower layer that was moved under us. 1126 */ 1127 err = ovl_check_origin(ofs, ctx->upperdentry, &ctx->origin_path); 1128 if (err) 1129 return err; 1130 1131 if (d->metacopy) 1132 uppermetacopy = true; 1133 metacopy_size = d->metacopy; 1134 } 1135 1136 if (d->redirect) { 1137 err = -ENOMEM; 1138 d->upperredirect = kstrdup(d->redirect, GFP_KERNEL); 1139 if (!d->upperredirect) 1140 return err; 1141 if (d->redirect[0] == '/') 1142 poe = roe; 1143 } 1144 upperopaque = d->opaque; 1145 } 1146 1147 if (!d->stop && ovl_numlower(poe)) { 1148 err = -ENOMEM; 1149 ctx->stack = ovl_stack_alloc(ofs->numlayer - 1); 1150 if (!ctx->stack) 1151 return err; 1152 } 1153 1154 for (i = 0; !d->stop && i < ovl_numlower(poe); i++) { 1155 struct ovl_path lower = ovl_lowerstack(poe)[i]; 1156 1157 if (!ovl_check_follow_redirect(d)) { 1158 err = -EPERM; 1159 return err; 1160 } 1161 1162 if (!check_redirect) 1163 d->last = i == ovl_numlower(poe) - 1; 1164 else if (d->is_dir || !ofs->numdatalayer) 1165 d->last = lower.layer->idx == ovl_numlower(roe); 1166 1167 d->layer = lower.layer; 1168 err = ovl_lookup_layer(lower.dentry, d, &this, false); 1169 if (err) 1170 return err; 1171 1172 if (!this) 1173 continue; 1174 1175 /* 1176 * If no origin fh is stored in upper of a merge dir, store fh 1177 * of lower dir and set upper parent "impure". 1178 */ 1179 if (ctx->upperdentry && !ctx->ctr && !ofs->noxattr && d->is_dir) { 1180 err = ovl_fix_origin(ofs, dentry, this, ctx->upperdentry); 1181 if (err) { 1182 dput(this); 1183 return err; 1184 } 1185 } 1186 1187 /* 1188 * When "verify_lower" feature is enabled, do not merge with a 1189 * lower dir that does not match a stored origin xattr. In any 1190 * case, only verified origin is used for index lookup. 1191 * 1192 * For non-dir dentry, if index=on, then ensure origin 1193 * matches the dentry found using path based lookup, 1194 * otherwise error out. 1195 */ 1196 if (ctx->upperdentry && !ctx->ctr && 1197 ((d->is_dir && ovl_verify_lower(dentry->d_sb)) || 1198 (!d->is_dir && ofs->config.index && ctx->origin_path))) { 1199 err = ovl_verify_origin(ofs, ctx->upperdentry, this, false); 1200 if (err) { 1201 dput(this); 1202 if (d->is_dir) 1203 break; 1204 return err; 1205 } 1206 origin = this; 1207 } 1208 1209 if (!ctx->upperdentry && !d->is_dir && !ctx->ctr && d->metacopy) 1210 metacopy_size = d->metacopy; 1211 1212 if (d->metacopy && ctx->ctr) { 1213 /* 1214 * Do not store intermediate metacopy dentries in 1215 * lower chain, except top most lower metacopy dentry. 1216 * Continue the loop so that if there is an absolute 1217 * redirect on this dentry, poe can be reset to roe. 1218 */ 1219 dput(this); 1220 this = NULL; 1221 } else { 1222 ctx->stack[ctx->ctr].dentry = this; 1223 ctx->stack[ctx->ctr].layer = lower.layer; 1224 ctx->ctr++; 1225 } 1226 1227 if (d->stop) 1228 break; 1229 1230 if (d->redirect && d->redirect[0] == '/' && poe != roe) { 1231 poe = roe; 1232 /* Find the current layer on the root dentry */ 1233 i = lower.layer->idx - 1; 1234 } 1235 } 1236 1237 /* 1238 * Defer lookup of lowerdata in data-only layers to first access. 1239 * Don't require redirect=follow and metacopy=on in this case. 1240 */ 1241 if (d->metacopy && ctx->ctr && ofs->numdatalayer && d->absolute_redirect) { 1242 d->metacopy = 0; 1243 ctx->ctr++; 1244 } else if (!ovl_check_follow_redirect(d)) { 1245 err = -EPERM; 1246 return err; 1247 } 1248 1249 /* 1250 * For regular non-metacopy upper dentries, there is no lower 1251 * path based lookup, hence ctr will be zero. If a dentry is found 1252 * using ORIGIN xattr on upper, install it in stack. 1253 * 1254 * For metacopy dentry, path based lookup will find lower dentries. 1255 * Just make sure a corresponding data dentry has been found. 1256 */ 1257 if (d->metacopy || (uppermetacopy && !ctx->ctr)) { 1258 pr_warn_ratelimited("metacopy with no lower data found - abort lookup (%pd2)\n", 1259 dentry); 1260 err = -EIO; 1261 return err; 1262 } else if (!d->is_dir && ctx->upperdentry && !ctx->ctr && ctx->origin_path) { 1263 if (WARN_ON(ctx->stack != NULL)) { 1264 err = -EIO; 1265 return err; 1266 } 1267 ctx->stack = ctx->origin_path; 1268 ctx->ctr = 1; 1269 origin = ctx->origin_path->dentry; 1270 ctx->origin_path = NULL; 1271 } 1272 1273 /* 1274 * Always lookup index if there is no-upperdentry. 1275 * 1276 * For the case of upperdentry, we have set origin by now if it 1277 * needed to be set. There are basically three cases. 1278 * 1279 * For directories, lookup index by lower inode and verify it matches 1280 * upper inode. We only trust dir index if we verified that lower dir 1281 * matches origin, otherwise dir index entries may be inconsistent 1282 * and we ignore them. 1283 * 1284 * For regular upper, we already set origin if upper had ORIGIN 1285 * xattr. There is no verification though as there is no path 1286 * based dentry lookup in lower in this case. 1287 * 1288 * For metacopy upper, we set a verified origin already if index 1289 * is enabled and if upper had an ORIGIN xattr. 1290 * 1291 */ 1292 if (!ctx->upperdentry && ctx->ctr) 1293 origin = ctx->stack[0].dentry; 1294 1295 if (origin && ovl_indexdir(dentry->d_sb) && 1296 (!d->is_dir || ovl_index_all(dentry->d_sb))) { 1297 ctx->index = ovl_lookup_index(ofs, ctx->upperdentry, origin, true); 1298 if (IS_ERR(ctx->index)) { 1299 err = PTR_ERR(ctx->index); 1300 ctx->index = NULL; 1301 return err; 1302 } 1303 } 1304 1305 if (ctx->ctr) { 1306 ctx->oe = ovl_alloc_entry(ctx->ctr); 1307 err = -ENOMEM; 1308 if (!ctx->oe) 1309 return err; 1310 1311 ovl_stack_cpy(ovl_lowerstack(ctx->oe), ctx->stack, ctx->ctr); 1312 } 1313 1314 if (upperopaque) 1315 ovl_dentry_set_opaque(dentry); 1316 if (d->xwhiteouts) 1317 ovl_dentry_set_xwhiteouts(dentry); 1318 1319 if (ctx->upperdentry) 1320 ovl_dentry_set_upper_alias(dentry); 1321 else if (ctx->index) { 1322 char *upperredirect; 1323 struct path upperpath = { 1324 .dentry = ctx->upperdentry = dget(ctx->index), 1325 .mnt = ovl_upper_mnt(ofs), 1326 }; 1327 1328 /* 1329 * It's safe to assign upperredirect here: the previous 1330 * assignment happens only if upperdentry is non-NULL, and 1331 * this one only if upperdentry is NULL. 1332 */ 1333 upperredirect = ovl_get_redirect_xattr(ofs, &upperpath, 0); 1334 if (IS_ERR(upperredirect)) 1335 return PTR_ERR(upperredirect); 1336 d->upperredirect = upperredirect; 1337 1338 err = ovl_check_metacopy_xattr(ofs, &upperpath, NULL); 1339 if (err < 0) 1340 return err; 1341 d->metacopy = uppermetacopy = err; 1342 metacopy_size = err; 1343 1344 if (!ovl_check_follow_redirect(d)) { 1345 err = -EPERM; 1346 return err; 1347 } 1348 } 1349 1350 if (ctx->upperdentry || ctx->ctr) { 1351 struct inode *inode; 1352 struct ovl_inode_params oip = { 1353 .upperdentry = ctx->upperdentry, 1354 .oe = ctx->oe, 1355 .index = ctx->index, 1356 .redirect = d->upperredirect, 1357 }; 1358 1359 /* Store lowerdata redirect for lazy lookup */ 1360 if (ctx->ctr > 1 && !d->is_dir && !ctx->stack[ctx->ctr - 1].dentry) { 1361 oip.lowerdata_redirect = d->redirect; 1362 d->redirect = NULL; 1363 } 1364 1365 inode = ovl_get_inode(dentry->d_sb, &oip); 1366 if (IS_ERR(inode)) 1367 return PTR_ERR(inode); 1368 1369 ctx->inode = inode; 1370 if (ctx->upperdentry && !uppermetacopy) 1371 ovl_set_flag(OVL_UPPERDATA, ctx->inode); 1372 1373 if (metacopy_size > OVL_METACOPY_MIN_SIZE) 1374 ovl_set_flag(OVL_HAS_DIGEST, ctx->inode); 1375 } 1376 1377 ovl_dentry_init_reval(dentry, ctx->upperdentry, OVL_I_E(ctx->inode)); 1378 1379 return 0; 1380 } 1381 1382 struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry, 1383 unsigned int flags) 1384 { 1385 struct ovl_fs *ofs = OVL_FS(dentry->d_sb); 1386 struct ovl_entry *poe = OVL_E(dentry->d_parent); 1387 bool check_redirect = (ovl_redirect_follow(ofs) || ofs->numdatalayer); 1388 int err; 1389 struct ovl_lookup_ctx ctx = { 1390 .dentry = dentry, 1391 }; 1392 struct ovl_lookup_data d = { 1393 .sb = dentry->d_sb, 1394 .dentry = dentry, 1395 .name = dentry->d_name, 1396 .last = check_redirect ? false : !ovl_numlower(poe), 1397 }; 1398 1399 if (dentry->d_name.len > ofs->namelen) 1400 return ERR_PTR(-ENAMETOOLONG); 1401 1402 with_ovl_creds(dentry->d_sb) 1403 err = ovl_lookup_layers(&ctx, &d); 1404 1405 if (ctx.origin_path) { 1406 dput(ctx.origin_path->dentry); 1407 kfree(ctx.origin_path); 1408 } 1409 dput(ctx.index); 1410 ovl_stack_free(ctx.stack, ctx.ctr); 1411 kfree(d.redirect); 1412 1413 if (err) { 1414 ovl_free_entry(ctx.oe); 1415 dput(ctx.upperdentry); 1416 kfree(d.upperredirect); 1417 return ERR_PTR(err); 1418 } 1419 1420 return d_splice_alias(ctx.inode, dentry); 1421 } 1422 1423 bool ovl_lower_positive(struct dentry *dentry) 1424 { 1425 struct ovl_entry *poe = OVL_E(dentry->d_parent); 1426 const struct qstr *name = &dentry->d_name; 1427 unsigned int i; 1428 bool positive = false; 1429 bool done = false; 1430 1431 /* 1432 * If dentry is negative, then lower is positive iff this is a 1433 * whiteout. 1434 */ 1435 if (!dentry->d_inode) 1436 return ovl_dentry_is_opaque(dentry); 1437 1438 /* Negative upper -> positive lower */ 1439 if (!ovl_dentry_upper(dentry)) 1440 return true; 1441 1442 with_ovl_creds(dentry->d_sb) { 1443 /* Positive upper -> have to look up lower to see whether it exists */ 1444 for (i = 0; !done && !positive && i < ovl_numlower(poe); i++) { 1445 struct dentry *this; 1446 struct ovl_path *parentpath = &ovl_lowerstack(poe)[i]; 1447 1448 /* 1449 * We need to make a non-const copy of dentry->d_name, 1450 * because lookup_one_positive_unlocked() will hash name 1451 * with parentpath base, which is on another (lower fs). 1452 */ 1453 this = lookup_one_positive_unlocked(mnt_idmap(parentpath->layer->mnt), 1454 &QSTR_LEN(name->name, name->len), 1455 parentpath->dentry); 1456 if (IS_ERR(this)) { 1457 switch (PTR_ERR(this)) { 1458 case -ENOENT: 1459 case -ENAMETOOLONG: 1460 break; 1461 1462 default: 1463 /* 1464 * Assume something is there, we just couldn't 1465 * access it. 1466 */ 1467 positive = true; 1468 break; 1469 } 1470 } else { 1471 struct path path = { 1472 .dentry = this, 1473 .mnt = parentpath->layer->mnt, 1474 }; 1475 positive = !ovl_path_is_whiteout(OVL_FS(dentry->d_sb), &path); 1476 done = true; 1477 dput(this); 1478 } 1479 } 1480 } 1481 1482 return positive; 1483 } 1484