1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * 4 * Copyright (C) 2011 Novell Inc. 5 */ 6 7 #include <uapi/linux/magic.h> 8 #include <linux/fs.h> 9 #include <linux/namei.h> 10 #include <linux/xattr.h> 11 #include <linux/mount.h> 12 #include <linux/parser.h> 13 #include <linux/module.h> 14 #include <linux/statfs.h> 15 #include <linux/seq_file.h> 16 #include <linux/posix_acl_xattr.h> 17 #include <linux/exportfs.h> 18 #include <linux/file.h> 19 #include <linux/fs_context.h> 20 #include <linux/fs_parser.h> 21 #include "overlayfs.h" 22 #include "params.h" 23 24 MODULE_AUTHOR("Miklos Szeredi <miklos@szeredi.hu>"); 25 MODULE_DESCRIPTION("Overlay filesystem"); 26 MODULE_LICENSE("GPL"); 27 28 29 struct ovl_dir_cache; 30 31 static struct dentry *ovl_d_real(struct dentry *dentry, enum d_real_type type) 32 { 33 struct dentry *upper, *lower; 34 int err; 35 36 switch (type) { 37 case D_REAL_DATA: 38 case D_REAL_METADATA: 39 break; 40 default: 41 goto bug; 42 } 43 44 if (!d_is_reg(dentry)) { 45 /* d_real_inode() is only relevant for regular files */ 46 return dentry; 47 } 48 49 upper = ovl_dentry_upper(dentry); 50 if (upper && (type == D_REAL_METADATA || 51 ovl_has_upperdata(d_inode(dentry)))) 52 return upper; 53 54 if (type == D_REAL_METADATA) { 55 lower = ovl_dentry_lower(dentry); 56 goto real_lower; 57 } 58 59 /* 60 * Best effort lazy lookup of lowerdata for D_REAL_DATA case to return 61 * the real lowerdata dentry. The only current caller of d_real() with 62 * D_REAL_DATA is d_real_inode() from trace_uprobe and this caller is 63 * likely going to be followed reading from the file, before placing 64 * uprobes on offset within the file, so lowerdata should be available 65 * when setting the uprobe. 66 */ 67 err = ovl_verify_lowerdata(dentry); 68 if (err) 69 goto bug; 70 lower = ovl_dentry_lowerdata(dentry); 71 if (!lower) 72 goto bug; 73 74 real_lower: 75 /* Handle recursion into stacked lower fs */ 76 return d_real(lower, type); 77 78 bug: 79 WARN(1, "%s(%pd4, %d): real dentry not found\n", __func__, dentry, type); 80 return dentry; 81 } 82 83 static int ovl_revalidate_real(struct dentry *d, unsigned int flags, bool weak) 84 { 85 int ret = 1; 86 87 if (!d) 88 return 1; 89 90 if (weak) { 91 if (d->d_flags & DCACHE_OP_WEAK_REVALIDATE) 92 ret = d->d_op->d_weak_revalidate(d, flags); 93 } else if (d->d_flags & DCACHE_OP_REVALIDATE) { 94 struct dentry *parent; 95 struct inode *dir; 96 struct name_snapshot n; 97 98 if (flags & LOOKUP_RCU) { 99 parent = READ_ONCE(d->d_parent); 100 dir = d_inode_rcu(parent); 101 if (!dir) 102 return -ECHILD; 103 } else { 104 parent = dget_parent(d); 105 dir = d_inode(parent); 106 } 107 take_dentry_name_snapshot(&n, d); 108 ret = d->d_op->d_revalidate(dir, &n.name, d, flags); 109 release_dentry_name_snapshot(&n); 110 if (!(flags & LOOKUP_RCU)) 111 dput(parent); 112 if (!ret) { 113 if (!(flags & LOOKUP_RCU)) 114 d_invalidate(d); 115 ret = -ESTALE; 116 } 117 } 118 return ret; 119 } 120 121 static int ovl_dentry_revalidate_common(struct dentry *dentry, 122 unsigned int flags, bool weak) 123 { 124 struct ovl_entry *oe; 125 struct ovl_path *lowerstack; 126 struct inode *inode = d_inode_rcu(dentry); 127 struct dentry *upper; 128 unsigned int i; 129 int ret = 1; 130 131 /* Careful in RCU mode */ 132 if (!inode) 133 return -ECHILD; 134 135 oe = OVL_I_E(inode); 136 lowerstack = ovl_lowerstack(oe); 137 upper = ovl_i_dentry_upper(inode); 138 if (upper) 139 ret = ovl_revalidate_real(upper, flags, weak); 140 141 for (i = 0; ret > 0 && i < ovl_numlower(oe); i++) 142 ret = ovl_revalidate_real(lowerstack[i].dentry, flags, weak); 143 144 return ret; 145 } 146 147 static int ovl_dentry_revalidate(struct inode *dir, const struct qstr *name, 148 struct dentry *dentry, unsigned int flags) 149 { 150 return ovl_dentry_revalidate_common(dentry, flags, false); 151 } 152 153 static int ovl_dentry_weak_revalidate(struct dentry *dentry, unsigned int flags) 154 { 155 return ovl_dentry_revalidate_common(dentry, flags, true); 156 } 157 158 static const struct dentry_operations ovl_dentry_operations = { 159 .d_real = ovl_d_real, 160 .d_revalidate = ovl_dentry_revalidate, 161 .d_weak_revalidate = ovl_dentry_weak_revalidate, 162 }; 163 164 #if IS_ENABLED(CONFIG_UNICODE) 165 static const struct dentry_operations ovl_dentry_ci_operations = { 166 .d_real = ovl_d_real, 167 .d_revalidate = ovl_dentry_revalidate, 168 .d_weak_revalidate = ovl_dentry_weak_revalidate, 169 .d_hash = generic_ci_d_hash, 170 .d_compare = generic_ci_d_compare, 171 }; 172 #endif 173 174 static struct kmem_cache *ovl_inode_cachep; 175 176 static struct inode *ovl_alloc_inode(struct super_block *sb) 177 { 178 struct ovl_inode *oi = alloc_inode_sb(sb, ovl_inode_cachep, GFP_KERNEL); 179 180 if (!oi) 181 return NULL; 182 183 oi->cache = NULL; 184 oi->redirect = NULL; 185 oi->version = 0; 186 oi->flags = 0; 187 oi->__upperdentry = NULL; 188 oi->lowerdata_redirect = NULL; 189 oi->oe = NULL; 190 mutex_init(&oi->lock); 191 192 return &oi->vfs_inode; 193 } 194 195 static void ovl_free_inode(struct inode *inode) 196 { 197 struct ovl_inode *oi = OVL_I(inode); 198 199 kfree(oi->redirect); 200 kfree(oi->oe); 201 mutex_destroy(&oi->lock); 202 kmem_cache_free(ovl_inode_cachep, oi); 203 } 204 205 static void ovl_destroy_inode(struct inode *inode) 206 { 207 struct ovl_inode *oi = OVL_I(inode); 208 209 dput(oi->__upperdentry); 210 ovl_stack_put(ovl_lowerstack(oi->oe), ovl_numlower(oi->oe)); 211 if (S_ISDIR(inode->i_mode)) 212 ovl_dir_cache_free(inode); 213 else 214 kfree(oi->lowerdata_redirect); 215 } 216 217 static void ovl_put_super(struct super_block *sb) 218 { 219 struct ovl_fs *ofs = OVL_FS(sb); 220 221 if (ofs) 222 ovl_free_fs(ofs); 223 } 224 225 /* Sync real dirty inodes in upper filesystem (if it exists) */ 226 static int ovl_sync_fs(struct super_block *sb, int wait) 227 { 228 struct ovl_fs *ofs = OVL_FS(sb); 229 struct super_block *upper_sb; 230 int ret; 231 232 ret = ovl_sync_status(ofs); 233 234 if (ret < 0) 235 return -EIO; 236 237 if (!ret) 238 return ret; 239 240 /* 241 * Not called for sync(2) call or an emergency sync (SB_I_SKIP_SYNC). 242 * All the super blocks will be iterated, including upper_sb. 243 * 244 * If this is a syncfs(2) call, then we do need to call 245 * sync_filesystem() on upper_sb, but enough if we do it when being 246 * called with wait == 1. 247 */ 248 if (!wait) 249 return 0; 250 251 upper_sb = ovl_upper_mnt(ofs)->mnt_sb; 252 253 down_read(&upper_sb->s_umount); 254 ret = sync_filesystem(upper_sb); 255 up_read(&upper_sb->s_umount); 256 257 return ret; 258 } 259 260 /** 261 * ovl_statfs 262 * @dentry: The dentry to query 263 * @buf: The struct kstatfs to fill in with stats 264 * 265 * Get the filesystem statistics. As writes always target the upper layer 266 * filesystem pass the statfs to the upper filesystem (if it exists) 267 */ 268 static int ovl_statfs(struct dentry *dentry, struct kstatfs *buf) 269 { 270 struct super_block *sb = dentry->d_sb; 271 struct ovl_fs *ofs = OVL_FS(sb); 272 struct dentry *root_dentry = sb->s_root; 273 struct path path; 274 int err; 275 276 ovl_path_real(root_dentry, &path); 277 278 err = vfs_statfs(&path, buf); 279 if (!err) { 280 buf->f_namelen = ofs->namelen; 281 buf->f_type = OVERLAYFS_SUPER_MAGIC; 282 if (ovl_has_fsid(ofs)) 283 buf->f_fsid = uuid_to_fsid(sb->s_uuid.b); 284 } 285 286 return err; 287 } 288 289 static const struct super_operations ovl_super_operations = { 290 .alloc_inode = ovl_alloc_inode, 291 .free_inode = ovl_free_inode, 292 .destroy_inode = ovl_destroy_inode, 293 .drop_inode = inode_just_drop, 294 .put_super = ovl_put_super, 295 .sync_fs = ovl_sync_fs, 296 .statfs = ovl_statfs, 297 .show_options = ovl_show_options, 298 }; 299 300 #define OVL_WORKDIR_NAME "work" 301 #define OVL_INDEXDIR_NAME "index" 302 303 static struct dentry *ovl_workdir_create(struct ovl_fs *ofs, 304 const char *name, bool persist) 305 { 306 struct inode *dir = ofs->workbasedir->d_inode; 307 struct vfsmount *mnt = ovl_upper_mnt(ofs); 308 struct dentry *work; 309 int err; 310 bool retried = false; 311 312 retry: 313 inode_lock_nested(dir, I_MUTEX_PARENT); 314 work = ovl_lookup_upper(ofs, name, ofs->workbasedir, strlen(name)); 315 316 if (!IS_ERR(work)) { 317 struct iattr attr = { 318 .ia_valid = ATTR_MODE, 319 .ia_mode = S_IFDIR | 0, 320 }; 321 322 if (work->d_inode) { 323 err = -EEXIST; 324 inode_unlock(dir); 325 if (retried) 326 goto out_dput; 327 328 if (persist) 329 return work; 330 331 retried = true; 332 err = ovl_workdir_cleanup(ofs, ofs->workbasedir, mnt, work, 0); 333 dput(work); 334 if (err == -EINVAL) 335 return ERR_PTR(err); 336 337 goto retry; 338 } 339 340 work = ovl_do_mkdir(ofs, dir, work, attr.ia_mode); 341 inode_unlock(dir); 342 err = PTR_ERR(work); 343 if (IS_ERR(work)) 344 goto out_err; 345 346 /* Weird filesystem returning with hashed negative (kernfs)? */ 347 err = -EINVAL; 348 if (d_really_is_negative(work)) 349 goto out_dput; 350 351 /* 352 * Try to remove POSIX ACL xattrs from workdir. We are good if: 353 * 354 * a) success (there was a POSIX ACL xattr and was removed) 355 * b) -ENODATA (there was no POSIX ACL xattr) 356 * c) -EOPNOTSUPP (POSIX ACL xattrs are not supported) 357 * 358 * There are various other error values that could effectively 359 * mean that the xattr doesn't exist (e.g. -ERANGE is returned 360 * if the xattr name is too long), but the set of filesystems 361 * allowed as upper are limited to "normal" ones, where checking 362 * for the above two errors is sufficient. 363 */ 364 err = ovl_do_remove_acl(ofs, work, XATTR_NAME_POSIX_ACL_DEFAULT); 365 if (err && err != -ENODATA && err != -EOPNOTSUPP) 366 goto out_dput; 367 368 err = ovl_do_remove_acl(ofs, work, XATTR_NAME_POSIX_ACL_ACCESS); 369 if (err && err != -ENODATA && err != -EOPNOTSUPP) 370 goto out_dput; 371 372 /* Clear any inherited mode bits */ 373 inode_lock(work->d_inode); 374 err = ovl_do_notify_change(ofs, work, &attr); 375 inode_unlock(work->d_inode); 376 if (err) 377 goto out_dput; 378 } else { 379 inode_unlock(dir); 380 err = PTR_ERR(work); 381 goto out_err; 382 } 383 return work; 384 385 out_dput: 386 dput(work); 387 out_err: 388 pr_warn("failed to create directory %s/%s (errno: %i); mounting read-only\n", 389 ofs->config.workdir, name, -err); 390 return NULL; 391 } 392 393 static int ovl_check_namelen(const struct path *path, struct ovl_fs *ofs, 394 const char *name) 395 { 396 struct kstatfs statfs; 397 int err = vfs_statfs(path, &statfs); 398 399 if (err) 400 pr_err("statfs failed on '%s'\n", name); 401 else 402 ofs->namelen = max(ofs->namelen, statfs.f_namelen); 403 404 return err; 405 } 406 407 static int ovl_lower_dir(const char *name, const struct path *path, 408 struct ovl_fs *ofs, int *stack_depth) 409 { 410 int fh_type; 411 int err; 412 413 err = ovl_check_namelen(path, ofs, name); 414 if (err) 415 return err; 416 417 *stack_depth = max(*stack_depth, path->mnt->mnt_sb->s_stack_depth); 418 419 /* 420 * The inodes index feature and NFS export need to encode and decode 421 * file handles, so they require that all layers support them. 422 */ 423 fh_type = ovl_can_decode_fh(path->dentry->d_sb); 424 if ((ofs->config.nfs_export || 425 (ofs->config.index && ofs->config.upperdir)) && !fh_type) { 426 ofs->config.index = false; 427 ofs->config.nfs_export = false; 428 pr_warn("fs on '%s' does not support file handles, falling back to index=off,nfs_export=off.\n", 429 name); 430 } 431 ofs->nofh |= !fh_type; 432 /* 433 * Decoding origin file handle is required for persistent st_ino. 434 * Without persistent st_ino, xino=auto falls back to xino=off. 435 */ 436 if (ofs->config.xino == OVL_XINO_AUTO && 437 ofs->config.upperdir && !fh_type) { 438 ofs->config.xino = OVL_XINO_OFF; 439 pr_warn("fs on '%s' does not support file handles, falling back to xino=off.\n", 440 name); 441 } 442 443 /* Check if lower fs has 32bit inode numbers */ 444 if (fh_type != FILEID_INO32_GEN) 445 ofs->xino_mode = -1; 446 447 return 0; 448 } 449 450 /* Workdir should not be subdir of upperdir and vice versa */ 451 static bool ovl_workdir_ok(struct dentry *workdir, struct dentry *upperdir) 452 { 453 bool ok = false; 454 455 if (workdir != upperdir) { 456 struct dentry *trap = lock_rename(workdir, upperdir); 457 if (!IS_ERR(trap)) 458 unlock_rename(workdir, upperdir); 459 ok = (trap == NULL); 460 } 461 return ok; 462 } 463 464 static int ovl_setup_trap(struct super_block *sb, struct dentry *dir, 465 struct inode **ptrap, const char *name) 466 { 467 struct inode *trap; 468 int err; 469 470 trap = ovl_get_trap_inode(sb, dir); 471 err = PTR_ERR_OR_ZERO(trap); 472 if (err) { 473 if (err == -ELOOP) 474 pr_err("conflicting %s path\n", name); 475 return err; 476 } 477 478 *ptrap = trap; 479 return 0; 480 } 481 482 /* 483 * Determine how we treat concurrent use of upperdir/workdir based on the 484 * index feature. This is papering over mount leaks of container runtimes, 485 * for example, an old overlay mount is leaked and now its upperdir is 486 * attempted to be used as a lower layer in a new overlay mount. 487 */ 488 static int ovl_report_in_use(struct ovl_fs *ofs, const char *name) 489 { 490 if (ofs->config.index) { 491 pr_err("%s is in-use as upperdir/workdir of another mount, mount with '-o index=off' to override exclusive upperdir protection.\n", 492 name); 493 return -EBUSY; 494 } else { 495 pr_warn("%s is in-use as upperdir/workdir of another mount, accessing files from both mounts will result in undefined behavior.\n", 496 name); 497 return 0; 498 } 499 } 500 501 static int ovl_get_upper(struct super_block *sb, struct ovl_fs *ofs, 502 struct ovl_layer *upper_layer, 503 const struct path *upperpath) 504 { 505 struct vfsmount *upper_mnt; 506 int err; 507 508 /* Upperdir path should not be r/o */ 509 if (__mnt_is_readonly(upperpath->mnt)) { 510 pr_err("upper fs is r/o, try multi-lower layers mount\n"); 511 err = -EINVAL; 512 goto out; 513 } 514 515 err = ovl_check_namelen(upperpath, ofs, ofs->config.upperdir); 516 if (err) 517 goto out; 518 519 err = ovl_setup_trap(sb, upperpath->dentry, &upper_layer->trap, 520 "upperdir"); 521 if (err) 522 goto out; 523 524 upper_mnt = clone_private_mount(upperpath); 525 err = PTR_ERR(upper_mnt); 526 if (IS_ERR(upper_mnt)) { 527 pr_err("failed to clone upperpath\n"); 528 goto out; 529 } 530 531 /* Don't inherit atime flags */ 532 upper_mnt->mnt_flags &= ~(MNT_NOATIME | MNT_NODIRATIME | MNT_RELATIME); 533 upper_layer->mnt = upper_mnt; 534 upper_layer->idx = 0; 535 upper_layer->fsid = 0; 536 537 /* 538 * Inherit SB_NOSEC flag from upperdir. 539 * 540 * This optimization changes behavior when a security related attribute 541 * (suid/sgid/security.*) is changed on an underlying layer. This is 542 * okay because we don't yet have guarantees in that case, but it will 543 * need careful treatment once we want to honour changes to underlying 544 * filesystems. 545 */ 546 if (upper_mnt->mnt_sb->s_flags & SB_NOSEC) 547 sb->s_flags |= SB_NOSEC; 548 549 if (ovl_inuse_trylock(ovl_upper_mnt(ofs)->mnt_root)) { 550 ofs->upperdir_locked = true; 551 } else { 552 err = ovl_report_in_use(ofs, "upperdir"); 553 if (err) 554 goto out; 555 } 556 557 err = 0; 558 out: 559 return err; 560 } 561 562 /* 563 * Returns 1 if RENAME_WHITEOUT is supported, 0 if not supported and 564 * negative values if error is encountered. 565 */ 566 static int ovl_check_rename_whiteout(struct ovl_fs *ofs) 567 { 568 struct dentry *workdir = ofs->workdir; 569 struct dentry *temp; 570 struct dentry *dest; 571 struct dentry *whiteout; 572 struct name_snapshot name; 573 int err; 574 575 temp = ovl_create_temp(ofs, workdir, OVL_CATTR(S_IFREG | 0)); 576 err = PTR_ERR(temp); 577 if (IS_ERR(temp)) 578 return err; 579 580 err = ovl_parent_lock(workdir, temp); 581 if (err) { 582 dput(temp); 583 return err; 584 } 585 dest = ovl_lookup_temp(ofs, workdir); 586 err = PTR_ERR(dest); 587 if (IS_ERR(dest)) { 588 dput(temp); 589 ovl_parent_unlock(workdir); 590 return err; 591 } 592 593 /* Name is inline and stable - using snapshot as a copy helper */ 594 take_dentry_name_snapshot(&name, temp); 595 err = ovl_do_rename(ofs, workdir, temp, workdir, dest, RENAME_WHITEOUT); 596 ovl_parent_unlock(workdir); 597 if (err) { 598 if (err == -EINVAL) 599 err = 0; 600 goto cleanup_temp; 601 } 602 603 whiteout = ovl_lookup_upper_unlocked(ofs, name.name.name, 604 workdir, name.name.len); 605 err = PTR_ERR(whiteout); 606 if (IS_ERR(whiteout)) 607 goto cleanup_temp; 608 609 err = ovl_upper_is_whiteout(ofs, whiteout); 610 611 /* Best effort cleanup of whiteout and temp file */ 612 if (err) 613 ovl_cleanup(ofs, workdir, whiteout); 614 dput(whiteout); 615 616 cleanup_temp: 617 ovl_cleanup(ofs, workdir, temp); 618 release_dentry_name_snapshot(&name); 619 dput(temp); 620 dput(dest); 621 622 return err; 623 } 624 625 static struct dentry *ovl_lookup_or_create(struct ovl_fs *ofs, 626 struct dentry *parent, 627 const char *name, umode_t mode) 628 { 629 size_t len = strlen(name); 630 struct dentry *child; 631 632 inode_lock_nested(parent->d_inode, I_MUTEX_PARENT); 633 child = ovl_lookup_upper(ofs, name, parent, len); 634 if (!IS_ERR(child) && !child->d_inode) 635 child = ovl_create_real(ofs, parent, child, OVL_CATTR(mode)); 636 inode_unlock(parent->d_inode); 637 dput(parent); 638 639 return child; 640 } 641 642 /* 643 * Creates $workdir/work/incompat/volatile/dirty file if it is not already 644 * present. 645 */ 646 static int ovl_create_volatile_dirty(struct ovl_fs *ofs) 647 { 648 unsigned int ctr; 649 struct dentry *d = dget(ofs->workbasedir); 650 static const char *const volatile_path[] = { 651 OVL_WORKDIR_NAME, "incompat", "volatile", "dirty" 652 }; 653 const char *const *name = volatile_path; 654 655 for (ctr = ARRAY_SIZE(volatile_path); ctr; ctr--, name++) { 656 d = ovl_lookup_or_create(ofs, d, *name, ctr > 1 ? S_IFDIR : S_IFREG); 657 if (IS_ERR(d)) 658 return PTR_ERR(d); 659 } 660 dput(d); 661 return 0; 662 } 663 664 static int ovl_make_workdir(struct super_block *sb, struct ovl_fs *ofs, 665 const struct path *workpath) 666 { 667 struct vfsmount *mnt = ovl_upper_mnt(ofs); 668 struct dentry *workdir; 669 struct file *tmpfile; 670 bool rename_whiteout; 671 bool d_type; 672 int fh_type; 673 int err; 674 675 err = mnt_want_write(mnt); 676 if (err) 677 return err; 678 679 workdir = ovl_workdir_create(ofs, OVL_WORKDIR_NAME, false); 680 err = PTR_ERR(workdir); 681 if (IS_ERR_OR_NULL(workdir)) 682 goto out; 683 684 ofs->workdir = workdir; 685 686 err = ovl_setup_trap(sb, ofs->workdir, &ofs->workdir_trap, "workdir"); 687 if (err) 688 goto out; 689 690 /* 691 * Upper should support d_type, else whiteouts are visible. Given 692 * workdir and upper are on same fs, we can do iterate_dir() on 693 * workdir. This check requires successful creation of workdir in 694 * previous step. 695 */ 696 err = ovl_check_d_type_supported(workpath); 697 if (err < 0) 698 goto out; 699 700 d_type = err; 701 if (!d_type) 702 pr_warn("upper fs needs to support d_type.\n"); 703 704 /* Check if upper/work fs supports O_TMPFILE */ 705 tmpfile = ovl_do_tmpfile(ofs, ofs->workdir, S_IFREG | 0); 706 ofs->tmpfile = !IS_ERR(tmpfile); 707 if (ofs->tmpfile) 708 fput(tmpfile); 709 else 710 pr_warn("upper fs does not support tmpfile.\n"); 711 712 713 /* Check if upper/work fs supports RENAME_WHITEOUT */ 714 err = ovl_check_rename_whiteout(ofs); 715 if (err < 0) 716 goto out; 717 718 rename_whiteout = err; 719 if (!rename_whiteout) 720 pr_warn("upper fs does not support RENAME_WHITEOUT.\n"); 721 722 /* 723 * Check if upper/work fs supports (trusted|user).overlay.* xattr 724 */ 725 err = ovl_setxattr(ofs, ofs->workdir, OVL_XATTR_OPAQUE, "0", 1); 726 if (err) { 727 pr_warn("failed to set xattr on upper\n"); 728 ofs->noxattr = true; 729 if (ovl_redirect_follow(ofs)) { 730 ofs->config.redirect_mode = OVL_REDIRECT_NOFOLLOW; 731 pr_warn("...falling back to redirect_dir=nofollow.\n"); 732 } 733 if (ofs->config.metacopy) { 734 ofs->config.metacopy = false; 735 pr_warn("...falling back to metacopy=off.\n"); 736 } 737 if (ofs->config.index) { 738 ofs->config.index = false; 739 pr_warn("...falling back to index=off.\n"); 740 } 741 if (ovl_has_fsid(ofs)) { 742 ofs->config.uuid = OVL_UUID_NULL; 743 pr_warn("...falling back to uuid=null.\n"); 744 } 745 /* 746 * xattr support is required for persistent st_ino. 747 * Without persistent st_ino, xino=auto falls back to xino=off. 748 */ 749 if (ofs->config.xino == OVL_XINO_AUTO) { 750 ofs->config.xino = OVL_XINO_OFF; 751 pr_warn("...falling back to xino=off.\n"); 752 } 753 if (err == -EPERM && !ofs->config.userxattr) 754 pr_info("try mounting with 'userxattr' option\n"); 755 err = 0; 756 } else { 757 ovl_removexattr(ofs, ofs->workdir, OVL_XATTR_OPAQUE); 758 } 759 760 /* 761 * We allowed sub-optimal upper fs configuration and don't want to break 762 * users over kernel upgrade, but we never allowed remote upper fs, so 763 * we can enforce strict requirements for remote upper fs. 764 */ 765 if (ovl_dentry_remote(ofs->workdir) && 766 (!d_type || !rename_whiteout || ofs->noxattr)) { 767 pr_err("upper fs missing required features.\n"); 768 err = -EINVAL; 769 goto out; 770 } 771 772 /* 773 * For volatile mount, create a incompat/volatile/dirty file to keep 774 * track of it. 775 */ 776 if (ofs->config.ovl_volatile) { 777 err = ovl_create_volatile_dirty(ofs); 778 if (err < 0) { 779 pr_err("Failed to create volatile/dirty file.\n"); 780 goto out; 781 } 782 } 783 784 /* Check if upper/work fs supports file handles */ 785 fh_type = ovl_can_decode_fh(ofs->workdir->d_sb); 786 if (ofs->config.index && !fh_type) { 787 ofs->config.index = false; 788 pr_warn("upper fs does not support file handles, falling back to index=off.\n"); 789 } 790 ofs->nofh |= !fh_type; 791 792 /* Check if upper fs has 32bit inode numbers */ 793 if (fh_type != FILEID_INO32_GEN) 794 ofs->xino_mode = -1; 795 796 /* NFS export of r/w mount depends on index */ 797 if (ofs->config.nfs_export && !ofs->config.index) { 798 pr_warn("NFS export requires \"index=on\", falling back to nfs_export=off.\n"); 799 ofs->config.nfs_export = false; 800 } 801 out: 802 mnt_drop_write(mnt); 803 return err; 804 } 805 806 static int ovl_get_workdir(struct super_block *sb, struct ovl_fs *ofs, 807 const struct path *upperpath, 808 const struct path *workpath) 809 { 810 int err; 811 812 err = -EINVAL; 813 if (upperpath->mnt != workpath->mnt) { 814 pr_err("workdir and upperdir must reside under the same mount\n"); 815 return err; 816 } 817 if (!ovl_workdir_ok(workpath->dentry, upperpath->dentry)) { 818 pr_err("workdir and upperdir must be separate subtrees\n"); 819 return err; 820 } 821 822 ofs->workbasedir = dget(workpath->dentry); 823 824 if (ovl_inuse_trylock(ofs->workbasedir)) { 825 ofs->workdir_locked = true; 826 } else { 827 err = ovl_report_in_use(ofs, "workdir"); 828 if (err) 829 return err; 830 } 831 832 err = ovl_setup_trap(sb, ofs->workbasedir, &ofs->workbasedir_trap, 833 "workdir"); 834 if (err) 835 return err; 836 837 return ovl_make_workdir(sb, ofs, workpath); 838 } 839 840 static int ovl_get_indexdir(struct super_block *sb, struct ovl_fs *ofs, 841 struct ovl_entry *oe, const struct path *upperpath) 842 { 843 struct vfsmount *mnt = ovl_upper_mnt(ofs); 844 struct dentry *indexdir; 845 struct dentry *origin = ovl_lowerstack(oe)->dentry; 846 const struct ovl_fh *fh; 847 int err; 848 849 fh = ovl_get_origin_fh(ofs, origin); 850 if (IS_ERR(fh)) 851 return PTR_ERR(fh); 852 853 err = mnt_want_write(mnt); 854 if (err) 855 goto out_free_fh; 856 857 /* Verify lower root is upper root origin */ 858 err = ovl_verify_origin_fh(ofs, upperpath->dentry, fh, true); 859 if (err) { 860 pr_err("failed to verify upper root origin\n"); 861 goto out; 862 } 863 864 /* index dir will act also as workdir */ 865 iput(ofs->workdir_trap); 866 ofs->workdir_trap = NULL; 867 dput(ofs->workdir); 868 ofs->workdir = NULL; 869 indexdir = ovl_workdir_create(ofs, OVL_INDEXDIR_NAME, true); 870 if (IS_ERR(indexdir)) { 871 err = PTR_ERR(indexdir); 872 } else if (indexdir) { 873 ofs->workdir = indexdir; 874 err = ovl_setup_trap(sb, indexdir, &ofs->workdir_trap, 875 "indexdir"); 876 if (err) 877 goto out; 878 879 /* 880 * Verify upper root is exclusively associated with index dir. 881 * Older kernels stored upper fh in ".overlay.origin" 882 * xattr. If that xattr exists, verify that it is a match to 883 * upper dir file handle. In any case, verify or set xattr 884 * ".overlay.upper" to indicate that index may have 885 * directory entries. 886 */ 887 if (ovl_check_origin_xattr(ofs, indexdir)) { 888 err = ovl_verify_origin_xattr(ofs, indexdir, 889 OVL_XATTR_ORIGIN, 890 upperpath->dentry, true, 891 false); 892 if (err) 893 pr_err("failed to verify index dir 'origin' xattr\n"); 894 } 895 err = ovl_verify_upper(ofs, indexdir, upperpath->dentry, true); 896 if (err) 897 pr_err("failed to verify index dir 'upper' xattr\n"); 898 899 /* Cleanup bad/stale/orphan index entries */ 900 if (!err) 901 err = ovl_indexdir_cleanup(ofs); 902 } 903 if (err || !indexdir) 904 pr_warn("try deleting index dir or mounting with '-o index=off' to disable inodes index.\n"); 905 906 out: 907 mnt_drop_write(mnt); 908 out_free_fh: 909 kfree(fh); 910 return err; 911 } 912 913 static bool ovl_lower_uuid_ok(struct ovl_fs *ofs, const uuid_t *uuid) 914 { 915 unsigned int i; 916 917 if (!ofs->config.nfs_export && !ovl_upper_mnt(ofs)) 918 return true; 919 920 /* 921 * We allow using single lower with null uuid for index and nfs_export 922 * for example to support those features with single lower squashfs. 923 * To avoid regressions in setups of overlay with re-formatted lower 924 * squashfs, do not allow decoding origin with lower null uuid unless 925 * user opted-in to one of the new features that require following the 926 * lower inode of non-dir upper. 927 */ 928 if (ovl_allow_offline_changes(ofs) && uuid_is_null(uuid)) 929 return false; 930 931 for (i = 0; i < ofs->numfs; i++) { 932 /* 933 * We use uuid to associate an overlay lower file handle with a 934 * lower layer, so we can accept lower fs with null uuid as long 935 * as all lower layers with null uuid are on the same fs. 936 * if we detect multiple lower fs with the same uuid, we 937 * disable lower file handle decoding on all of them. 938 */ 939 if (ofs->fs[i].is_lower && 940 uuid_equal(&ofs->fs[i].sb->s_uuid, uuid)) { 941 ofs->fs[i].bad_uuid = true; 942 return false; 943 } 944 } 945 return true; 946 } 947 948 /* Get a unique fsid for the layer */ 949 static int ovl_get_fsid(struct ovl_fs *ofs, const struct path *path) 950 { 951 struct super_block *sb = path->mnt->mnt_sb; 952 unsigned int i; 953 dev_t dev; 954 int err; 955 bool bad_uuid = false; 956 bool warn = false; 957 958 for (i = 0; i < ofs->numfs; i++) { 959 if (ofs->fs[i].sb == sb) 960 return i; 961 } 962 963 if (!ovl_lower_uuid_ok(ofs, &sb->s_uuid)) { 964 bad_uuid = true; 965 if (ofs->config.xino == OVL_XINO_AUTO) { 966 ofs->config.xino = OVL_XINO_OFF; 967 warn = true; 968 } 969 if (ofs->config.index || ofs->config.nfs_export) { 970 ofs->config.index = false; 971 ofs->config.nfs_export = false; 972 warn = true; 973 } 974 if (warn) { 975 pr_warn("%s uuid detected in lower fs '%pd2', falling back to xino=%s,index=off,nfs_export=off.\n", 976 uuid_is_null(&sb->s_uuid) ? "null" : 977 "conflicting", 978 path->dentry, ovl_xino_mode(&ofs->config)); 979 } 980 } 981 982 err = get_anon_bdev(&dev); 983 if (err) { 984 pr_err("failed to get anonymous bdev for lowerpath\n"); 985 return err; 986 } 987 988 ofs->fs[ofs->numfs].sb = sb; 989 ofs->fs[ofs->numfs].pseudo_dev = dev; 990 ofs->fs[ofs->numfs].bad_uuid = bad_uuid; 991 992 return ofs->numfs++; 993 } 994 995 /* 996 * The fsid after the last lower fsid is used for the data layers. 997 * It is a "null fs" with a null sb, null uuid, and no pseudo dev. 998 */ 999 static int ovl_get_data_fsid(struct ovl_fs *ofs) 1000 { 1001 return ofs->numfs; 1002 } 1003 1004 /* 1005 * Set the ovl sb encoding as the same one used by the first layer 1006 */ 1007 static int ovl_set_encoding(struct super_block *sb, struct super_block *fs_sb) 1008 { 1009 if (!sb_has_encoding(fs_sb)) 1010 return 0; 1011 1012 #if IS_ENABLED(CONFIG_UNICODE) 1013 if (sb_has_strict_encoding(fs_sb)) { 1014 pr_err("strict encoding not supported\n"); 1015 return -EINVAL; 1016 } 1017 1018 sb->s_encoding = fs_sb->s_encoding; 1019 sb->s_encoding_flags = fs_sb->s_encoding_flags; 1020 #endif 1021 return 0; 1022 } 1023 1024 static int ovl_get_layers(struct super_block *sb, struct ovl_fs *ofs, 1025 struct ovl_fs_context *ctx, struct ovl_layer *layers) 1026 { 1027 int err; 1028 unsigned int i; 1029 size_t nr_merged_lower; 1030 1031 ofs->fs = kcalloc(ctx->nr + 2, sizeof(struct ovl_sb), GFP_KERNEL); 1032 if (ofs->fs == NULL) 1033 return -ENOMEM; 1034 1035 /* 1036 * idx/fsid 0 are reserved for upper fs even with lower only overlay 1037 * and the last fsid is reserved for "null fs" of the data layers. 1038 */ 1039 ofs->numfs++; 1040 1041 /* 1042 * All lower layers that share the same fs as upper layer, use the same 1043 * pseudo_dev as upper layer. Allocate fs[0].pseudo_dev even for lower 1044 * only overlay to simplify ovl_fs_free(). 1045 * is_lower will be set if upper fs is shared with a lower layer. 1046 */ 1047 err = get_anon_bdev(&ofs->fs[0].pseudo_dev); 1048 if (err) { 1049 pr_err("failed to get anonymous bdev for upper fs\n"); 1050 return err; 1051 } 1052 1053 if (ovl_upper_mnt(ofs)) { 1054 ofs->fs[0].sb = ovl_upper_mnt(ofs)->mnt_sb; 1055 ofs->fs[0].is_lower = false; 1056 1057 if (ofs->casefold) { 1058 err = ovl_set_encoding(sb, ofs->fs[0].sb); 1059 if (err) 1060 return err; 1061 } 1062 } 1063 1064 nr_merged_lower = ctx->nr - ctx->nr_data; 1065 for (i = 0; i < ctx->nr; i++) { 1066 struct ovl_fs_context_layer *l = &ctx->lower[i]; 1067 struct vfsmount *mnt; 1068 struct inode *trap; 1069 int fsid; 1070 1071 if (i < nr_merged_lower) 1072 fsid = ovl_get_fsid(ofs, &l->path); 1073 else 1074 fsid = ovl_get_data_fsid(ofs); 1075 if (fsid < 0) 1076 return fsid; 1077 1078 /* 1079 * Check if lower root conflicts with this overlay layers before 1080 * checking if it is in-use as upperdir/workdir of "another" 1081 * mount, because we do not bother to check in ovl_is_inuse() if 1082 * the upperdir/workdir is in fact in-use by our 1083 * upperdir/workdir. 1084 */ 1085 err = ovl_setup_trap(sb, l->path.dentry, &trap, "lowerdir"); 1086 if (err) 1087 return err; 1088 1089 if (ovl_is_inuse(l->path.dentry)) { 1090 err = ovl_report_in_use(ofs, "lowerdir"); 1091 if (err) { 1092 iput(trap); 1093 return err; 1094 } 1095 } 1096 1097 mnt = clone_private_mount(&l->path); 1098 err = PTR_ERR(mnt); 1099 if (IS_ERR(mnt)) { 1100 pr_err("failed to clone lowerpath\n"); 1101 iput(trap); 1102 return err; 1103 } 1104 1105 /* 1106 * Make lower layers R/O. That way fchmod/fchown on lower file 1107 * will fail instead of modifying lower fs. 1108 */ 1109 mnt->mnt_flags |= MNT_READONLY | MNT_NOATIME; 1110 1111 layers[ofs->numlayer].trap = trap; 1112 layers[ofs->numlayer].mnt = mnt; 1113 layers[ofs->numlayer].idx = ofs->numlayer; 1114 layers[ofs->numlayer].fsid = fsid; 1115 layers[ofs->numlayer].fs = &ofs->fs[fsid]; 1116 /* Store for printing lowerdir=... in ovl_show_options() */ 1117 ofs->config.lowerdirs[ofs->numlayer] = l->name; 1118 l->name = NULL; 1119 ofs->numlayer++; 1120 ofs->fs[fsid].is_lower = true; 1121 1122 if (ofs->casefold) { 1123 if (!ovl_upper_mnt(ofs) && !sb_has_encoding(sb)) { 1124 err = ovl_set_encoding(sb, ofs->fs[fsid].sb); 1125 if (err) 1126 return err; 1127 } 1128 1129 if (!sb_same_encoding(sb, mnt->mnt_sb)) { 1130 pr_err("all layers must have the same encoding\n"); 1131 return -EINVAL; 1132 } 1133 } 1134 } 1135 1136 /* 1137 * When all layers on same fs, overlay can use real inode numbers. 1138 * With mount option "xino=<on|auto>", mounter declares that there are 1139 * enough free high bits in underlying fs to hold the unique fsid. 1140 * If overlayfs does encounter underlying inodes using the high xino 1141 * bits reserved for fsid, it emits a warning and uses the original 1142 * inode number or a non persistent inode number allocated from a 1143 * dedicated range. 1144 */ 1145 if (ofs->numfs - !ovl_upper_mnt(ofs) == 1) { 1146 if (ofs->config.xino == OVL_XINO_ON) 1147 pr_info("\"xino=on\" is useless with all layers on same fs, ignore.\n"); 1148 ofs->xino_mode = 0; 1149 } else if (ofs->config.xino == OVL_XINO_OFF) { 1150 ofs->xino_mode = -1; 1151 } else if (ofs->xino_mode < 0) { 1152 /* 1153 * This is a roundup of number of bits needed for encoding 1154 * fsid, where fsid 0 is reserved for upper fs (even with 1155 * lower only overlay) +1 extra bit is reserved for the non 1156 * persistent inode number range that is used for resolving 1157 * xino lower bits overflow. 1158 */ 1159 BUILD_BUG_ON(ilog2(OVL_MAX_STACK) > 30); 1160 ofs->xino_mode = ilog2(ofs->numfs - 1) + 2; 1161 } 1162 1163 if (ofs->xino_mode > 0) { 1164 pr_info("\"xino\" feature enabled using %d upper inode bits.\n", 1165 ofs->xino_mode); 1166 } 1167 1168 return 0; 1169 } 1170 1171 static struct ovl_entry *ovl_get_lowerstack(struct super_block *sb, 1172 struct ovl_fs_context *ctx, 1173 struct ovl_fs *ofs, 1174 struct ovl_layer *layers) 1175 { 1176 int err; 1177 unsigned int i; 1178 size_t nr_merged_lower; 1179 struct ovl_entry *oe; 1180 struct ovl_path *lowerstack; 1181 1182 struct ovl_fs_context_layer *l; 1183 1184 if (!ofs->config.upperdir && ctx->nr == 1) { 1185 pr_err("at least 2 lowerdir are needed while upperdir nonexistent\n"); 1186 return ERR_PTR(-EINVAL); 1187 } 1188 1189 if (ctx->nr == ctx->nr_data) { 1190 pr_err("at least one non-data lowerdir is required\n"); 1191 return ERR_PTR(-EINVAL); 1192 } 1193 1194 err = -EINVAL; 1195 for (i = 0; i < ctx->nr; i++) { 1196 l = &ctx->lower[i]; 1197 1198 err = ovl_lower_dir(l->name, &l->path, ofs, &sb->s_stack_depth); 1199 if (err) 1200 return ERR_PTR(err); 1201 } 1202 1203 err = -EINVAL; 1204 sb->s_stack_depth++; 1205 if (sb->s_stack_depth > FILESYSTEM_MAX_STACK_DEPTH) { 1206 pr_err("maximum fs stacking depth exceeded\n"); 1207 return ERR_PTR(err); 1208 } 1209 1210 err = ovl_get_layers(sb, ofs, ctx, layers); 1211 if (err) 1212 return ERR_PTR(err); 1213 1214 err = -ENOMEM; 1215 /* Data-only layers are not merged in root directory */ 1216 nr_merged_lower = ctx->nr - ctx->nr_data; 1217 oe = ovl_alloc_entry(nr_merged_lower); 1218 if (!oe) 1219 return ERR_PTR(err); 1220 1221 lowerstack = ovl_lowerstack(oe); 1222 for (i = 0; i < nr_merged_lower; i++) { 1223 l = &ctx->lower[i]; 1224 lowerstack[i].dentry = dget(l->path.dentry); 1225 lowerstack[i].layer = &ofs->layers[i + 1]; 1226 } 1227 ofs->numdatalayer = ctx->nr_data; 1228 1229 return oe; 1230 } 1231 1232 /* 1233 * Check if this layer root is a descendant of: 1234 * - another layer of this overlayfs instance 1235 * - upper/work dir of any overlayfs instance 1236 */ 1237 static int ovl_check_layer(struct super_block *sb, struct ovl_fs *ofs, 1238 struct dentry *dentry, const char *name, 1239 bool is_lower) 1240 { 1241 struct dentry *next = dentry, *parent; 1242 int err = 0; 1243 1244 if (!dentry) 1245 return 0; 1246 1247 parent = dget_parent(next); 1248 1249 /* Walk back ancestors to root (inclusive) looking for traps */ 1250 while (!err && parent != next) { 1251 if (is_lower && ovl_lookup_trap_inode(sb, parent)) { 1252 err = -ELOOP; 1253 pr_err("overlapping %s path\n", name); 1254 } else if (ovl_is_inuse(parent)) { 1255 err = ovl_report_in_use(ofs, name); 1256 } 1257 next = parent; 1258 parent = dget_parent(next); 1259 dput(next); 1260 } 1261 1262 dput(parent); 1263 1264 return err; 1265 } 1266 1267 /* 1268 * Check if any of the layers or work dirs overlap. 1269 */ 1270 static int ovl_check_overlapping_layers(struct super_block *sb, 1271 struct ovl_fs *ofs) 1272 { 1273 int i, err; 1274 1275 if (ovl_upper_mnt(ofs)) { 1276 err = ovl_check_layer(sb, ofs, ovl_upper_mnt(ofs)->mnt_root, 1277 "upperdir", false); 1278 if (err) 1279 return err; 1280 1281 /* 1282 * Checking workbasedir avoids hitting ovl_is_inuse(parent) of 1283 * this instance and covers overlapping work and index dirs, 1284 * unless work or index dir have been moved since created inside 1285 * workbasedir. In that case, we already have their traps in 1286 * inode cache and we will catch that case on lookup. 1287 */ 1288 err = ovl_check_layer(sb, ofs, ofs->workbasedir, "workdir", 1289 false); 1290 if (err) 1291 return err; 1292 } 1293 1294 for (i = 1; i < ofs->numlayer; i++) { 1295 err = ovl_check_layer(sb, ofs, 1296 ofs->layers[i].mnt->mnt_root, 1297 "lowerdir", true); 1298 if (err) 1299 return err; 1300 } 1301 1302 return 0; 1303 } 1304 1305 static struct dentry *ovl_get_root(struct super_block *sb, 1306 struct dentry *upperdentry, 1307 struct ovl_entry *oe) 1308 { 1309 struct dentry *root; 1310 struct ovl_fs *ofs = OVL_FS(sb); 1311 struct ovl_path *lowerpath = ovl_lowerstack(oe); 1312 unsigned long ino = d_inode(lowerpath->dentry)->i_ino; 1313 int fsid = lowerpath->layer->fsid; 1314 struct ovl_inode_params oip = { 1315 .upperdentry = upperdentry, 1316 .oe = oe, 1317 }; 1318 1319 root = d_make_root(ovl_new_inode(sb, S_IFDIR, 0)); 1320 if (!root) 1321 return NULL; 1322 1323 if (upperdentry) { 1324 /* Root inode uses upper st_ino/i_ino */ 1325 ino = d_inode(upperdentry)->i_ino; 1326 fsid = 0; 1327 ovl_dentry_set_upper_alias(root); 1328 if (ovl_is_impuredir(sb, upperdentry)) 1329 ovl_set_flag(OVL_IMPURE, d_inode(root)); 1330 } 1331 1332 /* Look for xwhiteouts marker except in the lowermost layer */ 1333 for (int i = 0; i < ovl_numlower(oe) - 1; i++, lowerpath++) { 1334 struct path path = { 1335 .mnt = lowerpath->layer->mnt, 1336 .dentry = lowerpath->dentry, 1337 }; 1338 1339 /* overlay.opaque=x means xwhiteouts directory */ 1340 if (ovl_get_opaquedir_val(ofs, &path) == 'x') { 1341 ovl_layer_set_xwhiteouts(ofs, lowerpath->layer); 1342 ovl_dentry_set_xwhiteouts(root); 1343 } 1344 } 1345 1346 /* Root is always merge -> can have whiteouts */ 1347 ovl_set_flag(OVL_WHITEOUTS, d_inode(root)); 1348 ovl_dentry_set_flag(OVL_E_CONNECTED, root); 1349 ovl_set_upperdata(d_inode(root)); 1350 ovl_inode_init(d_inode(root), &oip, ino, fsid); 1351 WARN_ON(!!IS_CASEFOLDED(d_inode(root)) != ofs->casefold); 1352 ovl_dentry_init_flags(root, upperdentry, oe, DCACHE_OP_WEAK_REVALIDATE); 1353 /* root keeps a reference of upperdentry */ 1354 dget(upperdentry); 1355 1356 return root; 1357 } 1358 1359 static void ovl_set_d_op(struct super_block *sb) 1360 { 1361 #if IS_ENABLED(CONFIG_UNICODE) 1362 struct ovl_fs *ofs = sb->s_fs_info; 1363 1364 if (ofs->casefold) { 1365 set_default_d_op(sb, &ovl_dentry_ci_operations); 1366 return; 1367 } 1368 #endif 1369 set_default_d_op(sb, &ovl_dentry_operations); 1370 } 1371 1372 int ovl_fill_super(struct super_block *sb, struct fs_context *fc) 1373 { 1374 struct ovl_fs *ofs = sb->s_fs_info; 1375 struct ovl_fs_context *ctx = fc->fs_private; 1376 const struct cred *old_cred = NULL; 1377 struct dentry *root_dentry; 1378 struct ovl_entry *oe; 1379 struct ovl_layer *layers; 1380 struct cred *cred; 1381 int err; 1382 1383 err = -EIO; 1384 if (WARN_ON(fc->user_ns != current_user_ns())) 1385 goto out_err; 1386 1387 ovl_set_d_op(sb); 1388 1389 err = -ENOMEM; 1390 if (!ofs->creator_cred) 1391 ofs->creator_cred = cred = prepare_creds(); 1392 else 1393 cred = (struct cred *)ofs->creator_cred; 1394 if (!cred) 1395 goto out_err; 1396 1397 old_cred = ovl_override_creds(sb); 1398 1399 err = ovl_fs_params_verify(ctx, &ofs->config); 1400 if (err) 1401 goto out_err; 1402 1403 err = -EINVAL; 1404 if (ctx->nr == 0) { 1405 if (!(fc->sb_flags & SB_SILENT)) 1406 pr_err("missing 'lowerdir'\n"); 1407 goto out_err; 1408 } 1409 1410 err = -ENOMEM; 1411 layers = kcalloc(ctx->nr + 1, sizeof(struct ovl_layer), GFP_KERNEL); 1412 if (!layers) 1413 goto out_err; 1414 1415 ofs->config.lowerdirs = kcalloc(ctx->nr + 1, sizeof(char *), GFP_KERNEL); 1416 if (!ofs->config.lowerdirs) { 1417 kfree(layers); 1418 goto out_err; 1419 } 1420 ofs->layers = layers; 1421 /* 1422 * Layer 0 is reserved for upper even if there's no upper. 1423 * config.lowerdirs[0] is used for storing the user provided colon 1424 * separated lowerdir string. 1425 */ 1426 ofs->config.lowerdirs[0] = ctx->lowerdir_all; 1427 ctx->lowerdir_all = NULL; 1428 ofs->numlayer = 1; 1429 1430 sb->s_stack_depth = 0; 1431 sb->s_maxbytes = MAX_LFS_FILESIZE; 1432 atomic_long_set(&ofs->last_ino, 1); 1433 /* Assume underlying fs uses 32bit inodes unless proven otherwise */ 1434 if (ofs->config.xino != OVL_XINO_OFF) { 1435 ofs->xino_mode = BITS_PER_LONG - 32; 1436 if (!ofs->xino_mode) { 1437 pr_warn("xino not supported on 32bit kernel, falling back to xino=off.\n"); 1438 ofs->config.xino = OVL_XINO_OFF; 1439 } 1440 } 1441 1442 /* alloc/destroy_inode needed for setting up traps in inode cache */ 1443 sb->s_op = &ovl_super_operations; 1444 1445 if (ofs->config.upperdir) { 1446 struct super_block *upper_sb; 1447 1448 err = -EINVAL; 1449 if (!ofs->config.workdir) { 1450 pr_err("missing 'workdir'\n"); 1451 goto out_err; 1452 } 1453 1454 err = ovl_get_upper(sb, ofs, &layers[0], &ctx->upper); 1455 if (err) 1456 goto out_err; 1457 1458 upper_sb = ovl_upper_mnt(ofs)->mnt_sb; 1459 if (!ovl_should_sync(ofs)) { 1460 ofs->errseq = errseq_sample(&upper_sb->s_wb_err); 1461 if (errseq_check(&upper_sb->s_wb_err, ofs->errseq)) { 1462 err = -EIO; 1463 pr_err("Cannot mount volatile when upperdir has an unseen error. Sync upperdir fs to clear state.\n"); 1464 goto out_err; 1465 } 1466 } 1467 1468 err = ovl_get_workdir(sb, ofs, &ctx->upper, &ctx->work); 1469 if (err) 1470 goto out_err; 1471 1472 if (!ofs->workdir) 1473 sb->s_flags |= SB_RDONLY; 1474 1475 sb->s_stack_depth = upper_sb->s_stack_depth; 1476 sb->s_time_gran = upper_sb->s_time_gran; 1477 } 1478 oe = ovl_get_lowerstack(sb, ctx, ofs, layers); 1479 err = PTR_ERR(oe); 1480 if (IS_ERR(oe)) 1481 goto out_err; 1482 1483 /* If the upper fs is nonexistent, we mark overlayfs r/o too */ 1484 if (!ovl_upper_mnt(ofs)) 1485 sb->s_flags |= SB_RDONLY; 1486 1487 if (!ovl_origin_uuid(ofs) && ofs->numfs > 1) { 1488 pr_warn("The uuid=off requires a single fs for lower and upper, falling back to uuid=null.\n"); 1489 ofs->config.uuid = OVL_UUID_NULL; 1490 } else if (ovl_has_fsid(ofs) && ovl_upper_mnt(ofs)) { 1491 /* Use per instance persistent uuid/fsid */ 1492 ovl_init_uuid_xattr(sb, ofs, &ctx->upper); 1493 } 1494 1495 if (!ovl_force_readonly(ofs) && ofs->config.index) { 1496 err = ovl_get_indexdir(sb, ofs, oe, &ctx->upper); 1497 if (err) 1498 goto out_free_oe; 1499 1500 /* Force r/o mount with no index dir */ 1501 if (!ofs->workdir) 1502 sb->s_flags |= SB_RDONLY; 1503 } 1504 1505 err = ovl_check_overlapping_layers(sb, ofs); 1506 if (err) 1507 goto out_free_oe; 1508 1509 /* Show index=off in /proc/mounts for forced r/o mount */ 1510 if (!ofs->workdir) { 1511 ofs->config.index = false; 1512 if (ovl_upper_mnt(ofs) && ofs->config.nfs_export) { 1513 pr_warn("NFS export requires an index dir, falling back to nfs_export=off.\n"); 1514 ofs->config.nfs_export = false; 1515 } 1516 } 1517 1518 if (ofs->config.metacopy && ofs->config.nfs_export) { 1519 pr_warn("NFS export is not supported with metadata only copy up, falling back to nfs_export=off.\n"); 1520 ofs->config.nfs_export = false; 1521 } 1522 1523 /* 1524 * Support encoding decodable file handles with nfs_export=on 1525 * and encoding non-decodable file handles with nfs_export=off 1526 * if all layers support file handles. 1527 */ 1528 if (ofs->config.nfs_export) 1529 sb->s_export_op = &ovl_export_operations; 1530 else if (!ofs->nofh) 1531 sb->s_export_op = &ovl_export_fid_operations; 1532 1533 /* Never override disk quota limits or use reserved space */ 1534 cap_lower(cred->cap_effective, CAP_SYS_RESOURCE); 1535 1536 sb->s_magic = OVERLAYFS_SUPER_MAGIC; 1537 sb->s_xattr = ovl_xattr_handlers(ofs); 1538 sb->s_fs_info = ofs; 1539 #ifdef CONFIG_FS_POSIX_ACL 1540 sb->s_flags |= SB_POSIXACL; 1541 #endif 1542 sb->s_iflags |= SB_I_SKIP_SYNC; 1543 /* 1544 * Ensure that umask handling is done by the filesystems used 1545 * for the the upper layer instead of overlayfs as that would 1546 * lead to unexpected results. 1547 */ 1548 sb->s_iflags |= SB_I_NOUMASK; 1549 sb->s_iflags |= SB_I_EVM_HMAC_UNSUPPORTED; 1550 1551 err = -ENOMEM; 1552 root_dentry = ovl_get_root(sb, ctx->upper.dentry, oe); 1553 if (!root_dentry) 1554 goto out_free_oe; 1555 1556 sb->s_root = root_dentry; 1557 1558 ovl_revert_creds(old_cred); 1559 return 0; 1560 1561 out_free_oe: 1562 ovl_free_entry(oe); 1563 out_err: 1564 /* 1565 * Revert creds before calling ovl_free_fs() which will call 1566 * put_cred() and put_cred() requires that the cred's that are 1567 * put are not the caller's creds, i.e., current->cred. 1568 */ 1569 if (old_cred) 1570 ovl_revert_creds(old_cred); 1571 ovl_free_fs(ofs); 1572 sb->s_fs_info = NULL; 1573 return err; 1574 } 1575 1576 struct file_system_type ovl_fs_type = { 1577 .owner = THIS_MODULE, 1578 .name = "overlay", 1579 .init_fs_context = ovl_init_fs_context, 1580 .parameters = ovl_parameter_spec, 1581 .fs_flags = FS_USERNS_MOUNT, 1582 .kill_sb = kill_anon_super, 1583 }; 1584 MODULE_ALIAS_FS("overlay"); 1585 1586 static void ovl_inode_init_once(void *foo) 1587 { 1588 struct ovl_inode *oi = foo; 1589 1590 inode_init_once(&oi->vfs_inode); 1591 } 1592 1593 static int __init ovl_init(void) 1594 { 1595 int err; 1596 1597 ovl_inode_cachep = kmem_cache_create("ovl_inode", 1598 sizeof(struct ovl_inode), 0, 1599 (SLAB_RECLAIM_ACCOUNT| 1600 SLAB_ACCOUNT), 1601 ovl_inode_init_once); 1602 if (ovl_inode_cachep == NULL) 1603 return -ENOMEM; 1604 1605 err = register_filesystem(&ovl_fs_type); 1606 if (!err) 1607 return 0; 1608 1609 kmem_cache_destroy(ovl_inode_cachep); 1610 1611 return err; 1612 } 1613 1614 static void __exit ovl_exit(void) 1615 { 1616 unregister_filesystem(&ovl_fs_type); 1617 1618 /* 1619 * Make sure all delayed rcu free inodes are flushed before we 1620 * destroy cache. 1621 */ 1622 rcu_barrier(); 1623 kmem_cache_destroy(ovl_inode_cachep); 1624 } 1625 1626 module_init(ovl_init); 1627 module_exit(ovl_exit); 1628