1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * 4 * Copyright (C) 2011 Novell Inc. 5 */ 6 7 #include <linux/fs.h> 8 #include <linux/slab.h> 9 #include <linux/cred.h> 10 #include <linux/xattr.h> 11 #include <linux/ratelimit.h> 12 #include <linux/fiemap.h> 13 #include <linux/fileattr.h> 14 #include <linux/security.h> 15 #include <linux/namei.h> 16 #include <linux/posix_acl.h> 17 #include <linux/posix_acl_xattr.h> 18 #include "overlayfs.h" 19 20 21 int ovl_setattr(struct mnt_idmap *idmap, struct dentry *dentry, 22 struct iattr *attr) 23 { 24 int err; 25 struct ovl_fs *ofs = OVL_FS(dentry->d_sb); 26 bool full_copy_up = false; 27 struct dentry *upperdentry; 28 const struct cred *old_cred; 29 30 err = setattr_prepare(&nop_mnt_idmap, dentry, attr); 31 if (err) 32 return err; 33 34 if (attr->ia_valid & ATTR_SIZE) { 35 /* Truncate should trigger data copy up as well */ 36 full_copy_up = true; 37 } 38 39 if (!full_copy_up) 40 err = ovl_copy_up(dentry); 41 else 42 err = ovl_copy_up_with_data(dentry); 43 if (!err) { 44 struct inode *winode = NULL; 45 46 upperdentry = ovl_dentry_upper(dentry); 47 48 if (attr->ia_valid & ATTR_SIZE) { 49 winode = d_inode(upperdentry); 50 err = get_write_access(winode); 51 if (err) 52 goto out; 53 } 54 55 if (attr->ia_valid & (ATTR_KILL_SUID|ATTR_KILL_SGID)) 56 attr->ia_valid &= ~ATTR_MODE; 57 58 /* 59 * We might have to translate ovl file into real file object 60 * once use cases emerge. For now, simply don't let underlying 61 * filesystem rely on attr->ia_file 62 */ 63 attr->ia_valid &= ~ATTR_FILE; 64 65 /* 66 * If open(O_TRUNC) is done, VFS calls ->setattr with ATTR_OPEN 67 * set. Overlayfs does not pass O_TRUNC flag to underlying 68 * filesystem during open -> do not pass ATTR_OPEN. This 69 * disables optimization in fuse which assumes open(O_TRUNC) 70 * already set file size to 0. But we never passed O_TRUNC to 71 * fuse. So by clearing ATTR_OPEN, fuse will be forced to send 72 * setattr request to server. 73 */ 74 attr->ia_valid &= ~ATTR_OPEN; 75 76 err = ovl_want_write(dentry); 77 if (err) 78 goto out_put_write; 79 80 inode_lock(upperdentry->d_inode); 81 old_cred = ovl_override_creds(dentry->d_sb); 82 err = ovl_do_notify_change(ofs, upperdentry, attr); 83 revert_creds(old_cred); 84 if (!err) 85 ovl_copyattr(dentry->d_inode); 86 inode_unlock(upperdentry->d_inode); 87 ovl_drop_write(dentry); 88 89 out_put_write: 90 if (winode) 91 put_write_access(winode); 92 } 93 out: 94 return err; 95 } 96 97 static void ovl_map_dev_ino(struct dentry *dentry, struct kstat *stat, int fsid) 98 { 99 struct ovl_fs *ofs = OVL_FS(dentry->d_sb); 100 bool samefs = ovl_same_fs(ofs); 101 unsigned int xinobits = ovl_xino_bits(ofs); 102 unsigned int xinoshift = 64 - xinobits; 103 104 if (samefs) { 105 /* 106 * When all layers are on the same fs, all real inode 107 * number are unique, so we use the overlay st_dev, 108 * which is friendly to du -x. 109 */ 110 stat->dev = dentry->d_sb->s_dev; 111 return; 112 } else if (xinobits) { 113 /* 114 * All inode numbers of underlying fs should not be using the 115 * high xinobits, so we use high xinobits to partition the 116 * overlay st_ino address space. The high bits holds the fsid 117 * (upper fsid is 0). The lowest xinobit is reserved for mapping 118 * the non-persistent inode numbers range in case of overflow. 119 * This way all overlay inode numbers are unique and use the 120 * overlay st_dev. 121 */ 122 if (likely(!(stat->ino >> xinoshift))) { 123 stat->ino |= ((u64)fsid) << (xinoshift + 1); 124 stat->dev = dentry->d_sb->s_dev; 125 return; 126 } else if (ovl_xino_warn(ofs)) { 127 pr_warn_ratelimited("inode number too big (%pd2, ino=%llu, xinobits=%d)\n", 128 dentry, stat->ino, xinobits); 129 } 130 } 131 132 /* The inode could not be mapped to a unified st_ino address space */ 133 if (S_ISDIR(dentry->d_inode->i_mode)) { 134 /* 135 * Always use the overlay st_dev for directories, so 'find 136 * -xdev' will scan the entire overlay mount and won't cross the 137 * overlay mount boundaries. 138 * 139 * If not all layers are on the same fs the pair {real st_ino; 140 * overlay st_dev} is not unique, so use the non persistent 141 * overlay st_ino for directories. 142 */ 143 stat->dev = dentry->d_sb->s_dev; 144 stat->ino = dentry->d_inode->i_ino; 145 } else { 146 /* 147 * For non-samefs setup, if we cannot map all layers st_ino 148 * to a unified address space, we need to make sure that st_dev 149 * is unique per underlying fs, so we use the unique anonymous 150 * bdev assigned to the underlying fs. 151 */ 152 stat->dev = ofs->fs[fsid].pseudo_dev; 153 } 154 } 155 156 int ovl_getattr(struct mnt_idmap *idmap, const struct path *path, 157 struct kstat *stat, u32 request_mask, unsigned int flags) 158 { 159 struct dentry *dentry = path->dentry; 160 enum ovl_path_type type; 161 struct path realpath; 162 const struct cred *old_cred; 163 struct inode *inode = d_inode(dentry); 164 bool is_dir = S_ISDIR(inode->i_mode); 165 int fsid = 0; 166 int err; 167 bool metacopy_blocks = false; 168 169 metacopy_blocks = ovl_is_metacopy_dentry(dentry); 170 171 type = ovl_path_real(dentry, &realpath); 172 old_cred = ovl_override_creds(dentry->d_sb); 173 err = ovl_do_getattr(&realpath, stat, request_mask, flags); 174 if (err) 175 goto out; 176 177 /* Report the effective immutable/append-only STATX flags */ 178 generic_fill_statx_attr(inode, stat); 179 180 /* 181 * For non-dir or same fs, we use st_ino of the copy up origin. 182 * This guaranties constant st_dev/st_ino across copy up. 183 * With xino feature and non-samefs, we use st_ino of the copy up 184 * origin masked with high bits that represent the layer id. 185 * 186 * If lower filesystem supports NFS file handles, this also guaranties 187 * persistent st_ino across mount cycle. 188 */ 189 if (!is_dir || ovl_same_dev(OVL_FS(dentry->d_sb))) { 190 if (!OVL_TYPE_UPPER(type)) { 191 fsid = ovl_layer_lower(dentry)->fsid; 192 } else if (OVL_TYPE_ORIGIN(type)) { 193 struct kstat lowerstat; 194 u32 lowermask = STATX_INO | STATX_BLOCKS | 195 (!is_dir ? STATX_NLINK : 0); 196 197 ovl_path_lower(dentry, &realpath); 198 err = ovl_do_getattr(&realpath, &lowerstat, lowermask, 199 flags); 200 if (err) 201 goto out; 202 203 /* 204 * Lower hardlinks may be broken on copy up to different 205 * upper files, so we cannot use the lower origin st_ino 206 * for those different files, even for the same fs case. 207 * 208 * Similarly, several redirected dirs can point to the 209 * same dir on a lower layer. With the "verify_lower" 210 * feature, we do not use the lower origin st_ino, if 211 * we haven't verified that this redirect is unique. 212 * 213 * With inodes index enabled, it is safe to use st_ino 214 * of an indexed origin. The index validates that the 215 * upper hardlink is not broken and that a redirected 216 * dir is the only redirect to that origin. 217 */ 218 if (ovl_test_flag(OVL_INDEX, d_inode(dentry)) || 219 (!ovl_verify_lower(dentry->d_sb) && 220 (is_dir || lowerstat.nlink == 1))) { 221 fsid = ovl_layer_lower(dentry)->fsid; 222 stat->ino = lowerstat.ino; 223 } 224 225 /* 226 * If we are querying a metacopy dentry and lower 227 * dentry is data dentry, then use the blocks we 228 * queried just now. We don't have to do additional 229 * vfs_getattr(). If lower itself is metacopy, then 230 * additional vfs_getattr() is unavoidable. 231 */ 232 if (metacopy_blocks && 233 realpath.dentry == ovl_dentry_lowerdata(dentry)) { 234 stat->blocks = lowerstat.blocks; 235 metacopy_blocks = false; 236 } 237 } 238 239 if (metacopy_blocks) { 240 /* 241 * If lower is not same as lowerdata or if there was 242 * no origin on upper, we can end up here. 243 * With lazy lowerdata lookup, guess lowerdata blocks 244 * from size to avoid lowerdata lookup on stat(2). 245 */ 246 struct kstat lowerdatastat; 247 u32 lowermask = STATX_BLOCKS; 248 249 ovl_path_lowerdata(dentry, &realpath); 250 if (realpath.dentry) { 251 err = ovl_do_getattr(&realpath, &lowerdatastat, 252 lowermask, flags); 253 if (err) 254 goto out; 255 } else { 256 lowerdatastat.blocks = 257 round_up(stat->size, stat->blksize) >> 9; 258 } 259 stat->blocks = lowerdatastat.blocks; 260 } 261 } 262 263 ovl_map_dev_ino(dentry, stat, fsid); 264 265 /* 266 * It's probably not worth it to count subdirs to get the 267 * correct link count. nlink=1 seems to pacify 'find' and 268 * other utilities. 269 */ 270 if (is_dir && OVL_TYPE_MERGE(type)) 271 stat->nlink = 1; 272 273 /* 274 * Return the overlay inode nlinks for indexed upper inodes. 275 * Overlay inode nlink counts the union of the upper hardlinks 276 * and non-covered lower hardlinks. It does not include the upper 277 * index hardlink. 278 */ 279 if (!is_dir && ovl_test_flag(OVL_INDEX, d_inode(dentry))) 280 stat->nlink = dentry->d_inode->i_nlink; 281 282 out: 283 revert_creds(old_cred); 284 285 return err; 286 } 287 288 int ovl_permission(struct mnt_idmap *idmap, 289 struct inode *inode, int mask) 290 { 291 struct inode *upperinode = ovl_inode_upper(inode); 292 struct inode *realinode; 293 struct path realpath; 294 const struct cred *old_cred; 295 int err; 296 297 /* Careful in RCU walk mode */ 298 realinode = ovl_i_path_real(inode, &realpath); 299 if (!realinode) { 300 WARN_ON(!(mask & MAY_NOT_BLOCK)); 301 return -ECHILD; 302 } 303 304 /* 305 * Check overlay inode with the creds of task and underlying inode 306 * with creds of mounter 307 */ 308 err = generic_permission(&nop_mnt_idmap, inode, mask); 309 if (err) 310 return err; 311 312 old_cred = ovl_override_creds(inode->i_sb); 313 if (!upperinode && 314 !special_file(realinode->i_mode) && mask & MAY_WRITE) { 315 mask &= ~(MAY_WRITE | MAY_APPEND); 316 /* Make sure mounter can read file for copy up later */ 317 mask |= MAY_READ; 318 } 319 err = inode_permission(mnt_idmap(realpath.mnt), realinode, mask); 320 revert_creds(old_cred); 321 322 return err; 323 } 324 325 static const char *ovl_get_link(struct dentry *dentry, 326 struct inode *inode, 327 struct delayed_call *done) 328 { 329 const struct cred *old_cred; 330 const char *p; 331 332 if (!dentry) 333 return ERR_PTR(-ECHILD); 334 335 old_cred = ovl_override_creds(dentry->d_sb); 336 p = vfs_get_link(ovl_dentry_real(dentry), done); 337 revert_creds(old_cred); 338 return p; 339 } 340 341 #ifdef CONFIG_FS_POSIX_ACL 342 /* 343 * Apply the idmapping of the layer to POSIX ACLs. The caller must pass a clone 344 * of the POSIX ACLs retrieved from the lower layer to this function to not 345 * alter the POSIX ACLs for the underlying filesystem. 346 */ 347 static void ovl_idmap_posix_acl(const struct inode *realinode, 348 struct mnt_idmap *idmap, 349 struct posix_acl *acl) 350 { 351 struct user_namespace *fs_userns = i_user_ns(realinode); 352 353 for (unsigned int i = 0; i < acl->a_count; i++) { 354 vfsuid_t vfsuid; 355 vfsgid_t vfsgid; 356 357 struct posix_acl_entry *e = &acl->a_entries[i]; 358 switch (e->e_tag) { 359 case ACL_USER: 360 vfsuid = make_vfsuid(idmap, fs_userns, e->e_uid); 361 e->e_uid = vfsuid_into_kuid(vfsuid); 362 break; 363 case ACL_GROUP: 364 vfsgid = make_vfsgid(idmap, fs_userns, e->e_gid); 365 e->e_gid = vfsgid_into_kgid(vfsgid); 366 break; 367 } 368 } 369 } 370 371 /* 372 * The @noperm argument is used to skip permission checking and is a temporary 373 * measure. Quoting Miklos from an earlier discussion: 374 * 375 * > So there are two paths to getting an acl: 376 * > 1) permission checking and 2) retrieving the value via getxattr(2). 377 * > This is a similar situation as reading a symlink vs. following it. 378 * > When following a symlink overlayfs always reads the link on the 379 * > underlying fs just as if it was a readlink(2) call, calling 380 * > security_inode_readlink() instead of security_inode_follow_link(). 381 * > This is logical: we are reading the link from the underlying storage, 382 * > and following it on overlayfs. 383 * > 384 * > Applying the same logic to acl: we do need to call the 385 * > security_inode_getxattr() on the underlying fs, even if just want to 386 * > check permissions on overlay. This is currently not done, which is an 387 * > inconsistency. 388 * > 389 * > Maybe adding the check to ovl_get_acl() is the right way to go, but 390 * > I'm a little afraid of a performance regression. Will look into that. 391 * 392 * Until we have made a decision allow this helper to take the @noperm 393 * argument. We should hopefully be able to remove it soon. 394 */ 395 struct posix_acl *ovl_get_acl_path(const struct path *path, 396 const char *acl_name, bool noperm) 397 { 398 struct posix_acl *real_acl, *clone; 399 struct mnt_idmap *idmap; 400 struct inode *realinode = d_inode(path->dentry); 401 402 idmap = mnt_idmap(path->mnt); 403 404 if (noperm) 405 real_acl = get_inode_acl(realinode, posix_acl_type(acl_name)); 406 else 407 real_acl = vfs_get_acl(idmap, path->dentry, acl_name); 408 if (IS_ERR_OR_NULL(real_acl)) 409 return real_acl; 410 411 if (!is_idmapped_mnt(path->mnt)) 412 return real_acl; 413 414 /* 415 * We cannot alter the ACLs returned from the relevant layer as that 416 * would alter the cached values filesystem wide for the lower 417 * filesystem. Instead we can clone the ACLs and then apply the 418 * relevant idmapping of the layer. 419 */ 420 clone = posix_acl_clone(real_acl, GFP_KERNEL); 421 posix_acl_release(real_acl); /* release original acl */ 422 if (!clone) 423 return ERR_PTR(-ENOMEM); 424 425 ovl_idmap_posix_acl(realinode, idmap, clone); 426 return clone; 427 } 428 429 /* 430 * When the relevant layer is an idmapped mount we need to take the idmapping 431 * of the layer into account and translate any ACL_{GROUP,USER} values 432 * according to the idmapped mount. 433 * 434 * We cannot alter the ACLs returned from the relevant layer as that would 435 * alter the cached values filesystem wide for the lower filesystem. Instead we 436 * can clone the ACLs and then apply the relevant idmapping of the layer. 437 * 438 * This is obviously only relevant when idmapped layers are used. 439 */ 440 struct posix_acl *do_ovl_get_acl(struct mnt_idmap *idmap, 441 struct inode *inode, int type, 442 bool rcu, bool noperm) 443 { 444 struct inode *realinode; 445 struct posix_acl *acl; 446 struct path realpath; 447 448 /* Careful in RCU walk mode */ 449 realinode = ovl_i_path_real(inode, &realpath); 450 if (!realinode) { 451 WARN_ON(!rcu); 452 return ERR_PTR(-ECHILD); 453 } 454 455 if (!IS_POSIXACL(realinode)) 456 return NULL; 457 458 if (rcu) { 459 /* 460 * If the layer is idmapped drop out of RCU path walk 461 * so we can clone the ACLs. 462 */ 463 if (is_idmapped_mnt(realpath.mnt)) 464 return ERR_PTR(-ECHILD); 465 466 acl = get_cached_acl_rcu(realinode, type); 467 } else { 468 const struct cred *old_cred; 469 470 old_cred = ovl_override_creds(inode->i_sb); 471 acl = ovl_get_acl_path(&realpath, posix_acl_xattr_name(type), noperm); 472 revert_creds(old_cred); 473 } 474 475 return acl; 476 } 477 478 static int ovl_set_or_remove_acl(struct dentry *dentry, struct inode *inode, 479 struct posix_acl *acl, int type) 480 { 481 int err; 482 struct path realpath; 483 const char *acl_name; 484 const struct cred *old_cred; 485 struct ovl_fs *ofs = OVL_FS(dentry->d_sb); 486 struct dentry *upperdentry = ovl_dentry_upper(dentry); 487 struct dentry *realdentry = upperdentry ?: ovl_dentry_lower(dentry); 488 489 /* 490 * If ACL is to be removed from a lower file, check if it exists in 491 * the first place before copying it up. 492 */ 493 acl_name = posix_acl_xattr_name(type); 494 if (!acl && !upperdentry) { 495 struct posix_acl *real_acl; 496 497 ovl_path_lower(dentry, &realpath); 498 old_cred = ovl_override_creds(dentry->d_sb); 499 real_acl = vfs_get_acl(mnt_idmap(realpath.mnt), realdentry, 500 acl_name); 501 revert_creds(old_cred); 502 if (IS_ERR(real_acl)) { 503 err = PTR_ERR(real_acl); 504 goto out; 505 } 506 posix_acl_release(real_acl); 507 } 508 509 if (!upperdentry) { 510 err = ovl_copy_up(dentry); 511 if (err) 512 goto out; 513 514 realdentry = ovl_dentry_upper(dentry); 515 } 516 517 err = ovl_want_write(dentry); 518 if (err) 519 goto out; 520 521 old_cred = ovl_override_creds(dentry->d_sb); 522 if (acl) 523 err = ovl_do_set_acl(ofs, realdentry, acl_name, acl); 524 else 525 err = ovl_do_remove_acl(ofs, realdentry, acl_name); 526 revert_creds(old_cred); 527 ovl_drop_write(dentry); 528 529 /* copy c/mtime */ 530 ovl_copyattr(inode); 531 out: 532 return err; 533 } 534 535 int ovl_set_acl(struct mnt_idmap *idmap, struct dentry *dentry, 536 struct posix_acl *acl, int type) 537 { 538 int err; 539 struct inode *inode = d_inode(dentry); 540 struct dentry *workdir = ovl_workdir(dentry); 541 struct inode *realinode = ovl_inode_real(inode); 542 543 if (!IS_POSIXACL(d_inode(workdir))) 544 return -EOPNOTSUPP; 545 if (!realinode->i_op->set_acl) 546 return -EOPNOTSUPP; 547 if (type == ACL_TYPE_DEFAULT && !S_ISDIR(inode->i_mode)) 548 return acl ? -EACCES : 0; 549 if (!inode_owner_or_capable(&nop_mnt_idmap, inode)) 550 return -EPERM; 551 552 /* 553 * Check if sgid bit needs to be cleared (actual setacl operation will 554 * be done with mounter's capabilities and so that won't do it for us). 555 */ 556 if (unlikely(inode->i_mode & S_ISGID) && type == ACL_TYPE_ACCESS && 557 !in_group_p(inode->i_gid) && 558 !capable_wrt_inode_uidgid(&nop_mnt_idmap, inode, CAP_FSETID)) { 559 struct iattr iattr = { .ia_valid = ATTR_KILL_SGID }; 560 561 err = ovl_setattr(&nop_mnt_idmap, dentry, &iattr); 562 if (err) 563 return err; 564 } 565 566 return ovl_set_or_remove_acl(dentry, inode, acl, type); 567 } 568 #endif 569 570 int ovl_update_time(struct inode *inode, int flags) 571 { 572 if (flags & S_ATIME) { 573 struct ovl_fs *ofs = OVL_FS(inode->i_sb); 574 struct path upperpath = { 575 .mnt = ovl_upper_mnt(ofs), 576 .dentry = ovl_upperdentry_dereference(OVL_I(inode)), 577 }; 578 579 if (upperpath.dentry) { 580 touch_atime(&upperpath); 581 inode_set_atime_to_ts(inode, 582 inode_get_atime(d_inode(upperpath.dentry))); 583 } 584 } 585 return 0; 586 } 587 588 static int ovl_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, 589 u64 start, u64 len) 590 { 591 int err; 592 struct inode *realinode = ovl_inode_realdata(inode); 593 const struct cred *old_cred; 594 595 if (!realinode) 596 return -EIO; 597 598 if (!realinode->i_op->fiemap) 599 return -EOPNOTSUPP; 600 601 old_cred = ovl_override_creds(inode->i_sb); 602 err = realinode->i_op->fiemap(realinode, fieinfo, start, len); 603 revert_creds(old_cred); 604 605 return err; 606 } 607 608 /* 609 * Work around the fact that security_file_ioctl() takes a file argument. 610 * Introducing security_inode_fileattr_get/set() hooks would solve this issue 611 * properly. 612 */ 613 static int ovl_security_fileattr(const struct path *realpath, struct fileattr *fa, 614 bool set) 615 { 616 struct file *file; 617 unsigned int cmd; 618 int err; 619 620 file = dentry_open(realpath, O_RDONLY, current_cred()); 621 if (IS_ERR(file)) 622 return PTR_ERR(file); 623 624 if (set) 625 cmd = fa->fsx_valid ? FS_IOC_FSSETXATTR : FS_IOC_SETFLAGS; 626 else 627 cmd = fa->fsx_valid ? FS_IOC_FSGETXATTR : FS_IOC_GETFLAGS; 628 629 err = security_file_ioctl(file, cmd, 0); 630 fput(file); 631 632 return err; 633 } 634 635 int ovl_real_fileattr_set(const struct path *realpath, struct fileattr *fa) 636 { 637 int err; 638 639 err = ovl_security_fileattr(realpath, fa, true); 640 if (err) 641 return err; 642 643 return vfs_fileattr_set(mnt_idmap(realpath->mnt), realpath->dentry, fa); 644 } 645 646 int ovl_fileattr_set(struct mnt_idmap *idmap, 647 struct dentry *dentry, struct fileattr *fa) 648 { 649 struct inode *inode = d_inode(dentry); 650 struct path upperpath; 651 const struct cred *old_cred; 652 unsigned int flags; 653 int err; 654 655 err = ovl_copy_up(dentry); 656 if (!err) { 657 ovl_path_real(dentry, &upperpath); 658 659 err = ovl_want_write(dentry); 660 if (err) 661 goto out; 662 663 old_cred = ovl_override_creds(inode->i_sb); 664 /* 665 * Store immutable/append-only flags in xattr and clear them 666 * in upper fileattr (in case they were set by older kernel) 667 * so children of "ovl-immutable" directories lower aliases of 668 * "ovl-immutable" hardlinks could be copied up. 669 * Clear xattr when flags are cleared. 670 */ 671 err = ovl_set_protattr(inode, upperpath.dentry, fa); 672 if (!err) 673 err = ovl_real_fileattr_set(&upperpath, fa); 674 revert_creds(old_cred); 675 ovl_drop_write(dentry); 676 677 /* 678 * Merge real inode flags with inode flags read from 679 * overlay.protattr xattr 680 */ 681 flags = ovl_inode_real(inode)->i_flags & OVL_COPY_I_FLAGS_MASK; 682 683 BUILD_BUG_ON(OVL_PROT_I_FLAGS_MASK & ~OVL_COPY_I_FLAGS_MASK); 684 flags |= inode->i_flags & OVL_PROT_I_FLAGS_MASK; 685 inode_set_flags(inode, flags, OVL_COPY_I_FLAGS_MASK); 686 687 /* Update ctime */ 688 ovl_copyattr(inode); 689 } 690 out: 691 return err; 692 } 693 694 /* Convert inode protection flags to fileattr flags */ 695 static void ovl_fileattr_prot_flags(struct inode *inode, struct fileattr *fa) 696 { 697 BUILD_BUG_ON(OVL_PROT_FS_FLAGS_MASK & ~FS_COMMON_FL); 698 BUILD_BUG_ON(OVL_PROT_FSX_FLAGS_MASK & ~FS_XFLAG_COMMON); 699 700 if (inode->i_flags & S_APPEND) { 701 fa->flags |= FS_APPEND_FL; 702 fa->fsx_xflags |= FS_XFLAG_APPEND; 703 } 704 if (inode->i_flags & S_IMMUTABLE) { 705 fa->flags |= FS_IMMUTABLE_FL; 706 fa->fsx_xflags |= FS_XFLAG_IMMUTABLE; 707 } 708 } 709 710 int ovl_real_fileattr_get(const struct path *realpath, struct fileattr *fa) 711 { 712 int err; 713 714 err = ovl_security_fileattr(realpath, fa, false); 715 if (err) 716 return err; 717 718 err = vfs_fileattr_get(realpath->dentry, fa); 719 if (err == -ENOIOCTLCMD) 720 err = -ENOTTY; 721 return err; 722 } 723 724 int ovl_fileattr_get(struct dentry *dentry, struct fileattr *fa) 725 { 726 struct inode *inode = d_inode(dentry); 727 struct path realpath; 728 const struct cred *old_cred; 729 int err; 730 731 ovl_path_real(dentry, &realpath); 732 733 old_cred = ovl_override_creds(inode->i_sb); 734 err = ovl_real_fileattr_get(&realpath, fa); 735 ovl_fileattr_prot_flags(inode, fa); 736 revert_creds(old_cred); 737 738 return err; 739 } 740 741 static const struct inode_operations ovl_file_inode_operations = { 742 .setattr = ovl_setattr, 743 .permission = ovl_permission, 744 .getattr = ovl_getattr, 745 .listxattr = ovl_listxattr, 746 .get_inode_acl = ovl_get_inode_acl, 747 .get_acl = ovl_get_acl, 748 .set_acl = ovl_set_acl, 749 .update_time = ovl_update_time, 750 .fiemap = ovl_fiemap, 751 .fileattr_get = ovl_fileattr_get, 752 .fileattr_set = ovl_fileattr_set, 753 }; 754 755 static const struct inode_operations ovl_symlink_inode_operations = { 756 .setattr = ovl_setattr, 757 .get_link = ovl_get_link, 758 .getattr = ovl_getattr, 759 .listxattr = ovl_listxattr, 760 .update_time = ovl_update_time, 761 }; 762 763 static const struct inode_operations ovl_special_inode_operations = { 764 .setattr = ovl_setattr, 765 .permission = ovl_permission, 766 .getattr = ovl_getattr, 767 .listxattr = ovl_listxattr, 768 .get_inode_acl = ovl_get_inode_acl, 769 .get_acl = ovl_get_acl, 770 .set_acl = ovl_set_acl, 771 .update_time = ovl_update_time, 772 }; 773 774 static const struct address_space_operations ovl_aops = { 775 /* For O_DIRECT dentry_open() checks f_mapping->a_ops->direct_IO */ 776 .direct_IO = noop_direct_IO, 777 }; 778 779 /* 780 * It is possible to stack overlayfs instance on top of another 781 * overlayfs instance as lower layer. We need to annotate the 782 * stackable i_mutex locks according to stack level of the super 783 * block instance. An overlayfs instance can never be in stack 784 * depth 0 (there is always a real fs below it). An overlayfs 785 * inode lock will use the lockdep annotation ovl_i_mutex_key[depth]. 786 * 787 * For example, here is a snip from /proc/lockdep_chains after 788 * dir_iterate of nested overlayfs: 789 * 790 * [...] &ovl_i_mutex_dir_key[depth] (stack_depth=2) 791 * [...] &ovl_i_mutex_dir_key[depth]#2 (stack_depth=1) 792 * [...] &type->i_mutex_dir_key (stack_depth=0) 793 * 794 * Locking order w.r.t ovl_want_write() is important for nested overlayfs. 795 * 796 * This chain is valid: 797 * - inode->i_rwsem (inode_lock[2]) 798 * - upper_mnt->mnt_sb->s_writers (ovl_want_write[0]) 799 * - OVL_I(inode)->lock (ovl_inode_lock[2]) 800 * - OVL_I(lowerinode)->lock (ovl_inode_lock[1]) 801 * 802 * And this chain is valid: 803 * - inode->i_rwsem (inode_lock[2]) 804 * - OVL_I(inode)->lock (ovl_inode_lock[2]) 805 * - lowerinode->i_rwsem (inode_lock[1]) 806 * - OVL_I(lowerinode)->lock (ovl_inode_lock[1]) 807 * 808 * But lowerinode->i_rwsem SHOULD NOT be acquired while ovl_want_write() is 809 * held, because it is in reverse order of the non-nested case using the same 810 * upper fs: 811 * - inode->i_rwsem (inode_lock[1]) 812 * - upper_mnt->mnt_sb->s_writers (ovl_want_write[0]) 813 * - OVL_I(inode)->lock (ovl_inode_lock[1]) 814 */ 815 #define OVL_MAX_NESTING FILESYSTEM_MAX_STACK_DEPTH 816 817 static inline void ovl_lockdep_annotate_inode_mutex_key(struct inode *inode) 818 { 819 #ifdef CONFIG_LOCKDEP 820 static struct lock_class_key ovl_i_mutex_key[OVL_MAX_NESTING]; 821 static struct lock_class_key ovl_i_mutex_dir_key[OVL_MAX_NESTING]; 822 static struct lock_class_key ovl_i_lock_key[OVL_MAX_NESTING]; 823 824 int depth = inode->i_sb->s_stack_depth - 1; 825 826 if (WARN_ON_ONCE(depth < 0 || depth >= OVL_MAX_NESTING)) 827 depth = 0; 828 829 if (S_ISDIR(inode->i_mode)) 830 lockdep_set_class(&inode->i_rwsem, &ovl_i_mutex_dir_key[depth]); 831 else 832 lockdep_set_class(&inode->i_rwsem, &ovl_i_mutex_key[depth]); 833 834 lockdep_set_class(&OVL_I(inode)->lock, &ovl_i_lock_key[depth]); 835 #endif 836 } 837 838 static void ovl_next_ino(struct inode *inode) 839 { 840 struct ovl_fs *ofs = OVL_FS(inode->i_sb); 841 842 inode->i_ino = atomic_long_inc_return(&ofs->last_ino); 843 if (unlikely(!inode->i_ino)) 844 inode->i_ino = atomic_long_inc_return(&ofs->last_ino); 845 } 846 847 static void ovl_map_ino(struct inode *inode, unsigned long ino, int fsid) 848 { 849 struct ovl_fs *ofs = OVL_FS(inode->i_sb); 850 int xinobits = ovl_xino_bits(ofs); 851 unsigned int xinoshift = 64 - xinobits; 852 853 /* 854 * When d_ino is consistent with st_ino (samefs or i_ino has enough 855 * bits to encode layer), set the same value used for st_ino to i_ino, 856 * so inode number exposed via /proc/locks and a like will be 857 * consistent with d_ino and st_ino values. An i_ino value inconsistent 858 * with d_ino also causes nfsd readdirplus to fail. 859 */ 860 inode->i_ino = ino; 861 if (ovl_same_fs(ofs)) { 862 return; 863 } else if (xinobits && likely(!(ino >> xinoshift))) { 864 inode->i_ino |= (unsigned long)fsid << (xinoshift + 1); 865 return; 866 } 867 868 /* 869 * For directory inodes on non-samefs with xino disabled or xino 870 * overflow, we allocate a non-persistent inode number, to be used for 871 * resolving st_ino collisions in ovl_map_dev_ino(). 872 * 873 * To avoid ino collision with legitimate xino values from upper 874 * layer (fsid 0), use the lowest xinobit to map the non 875 * persistent inode numbers to the unified st_ino address space. 876 */ 877 if (S_ISDIR(inode->i_mode)) { 878 ovl_next_ino(inode); 879 if (xinobits) { 880 inode->i_ino &= ~0UL >> xinobits; 881 inode->i_ino |= 1UL << xinoshift; 882 } 883 } 884 } 885 886 void ovl_inode_init(struct inode *inode, struct ovl_inode_params *oip, 887 unsigned long ino, int fsid) 888 { 889 struct inode *realinode; 890 struct ovl_inode *oi = OVL_I(inode); 891 892 oi->__upperdentry = oip->upperdentry; 893 oi->oe = oip->oe; 894 oi->redirect = oip->redirect; 895 oi->lowerdata_redirect = oip->lowerdata_redirect; 896 897 realinode = ovl_inode_real(inode); 898 ovl_copyattr(inode); 899 ovl_copyflags(realinode, inode); 900 ovl_map_ino(inode, ino, fsid); 901 } 902 903 static void ovl_fill_inode(struct inode *inode, umode_t mode, dev_t rdev) 904 { 905 inode->i_mode = mode; 906 inode->i_flags |= S_NOCMTIME; 907 #ifdef CONFIG_FS_POSIX_ACL 908 inode->i_acl = inode->i_default_acl = ACL_DONT_CACHE; 909 #endif 910 911 ovl_lockdep_annotate_inode_mutex_key(inode); 912 913 switch (mode & S_IFMT) { 914 case S_IFREG: 915 inode->i_op = &ovl_file_inode_operations; 916 inode->i_fop = &ovl_file_operations; 917 inode->i_mapping->a_ops = &ovl_aops; 918 break; 919 920 case S_IFDIR: 921 inode->i_op = &ovl_dir_inode_operations; 922 inode->i_fop = &ovl_dir_operations; 923 break; 924 925 case S_IFLNK: 926 inode->i_op = &ovl_symlink_inode_operations; 927 break; 928 929 default: 930 inode->i_op = &ovl_special_inode_operations; 931 init_special_inode(inode, mode, rdev); 932 break; 933 } 934 } 935 936 /* 937 * With inodes index enabled, an overlay inode nlink counts the union of upper 938 * hardlinks and non-covered lower hardlinks. During the lifetime of a non-pure 939 * upper inode, the following nlink modifying operations can happen: 940 * 941 * 1. Lower hardlink copy up 942 * 2. Upper hardlink created, unlinked or renamed over 943 * 3. Lower hardlink whiteout or renamed over 944 * 945 * For the first, copy up case, the union nlink does not change, whether the 946 * operation succeeds or fails, but the upper inode nlink may change. 947 * Therefore, before copy up, we store the union nlink value relative to the 948 * lower inode nlink in the index inode xattr .overlay.nlink. 949 * 950 * For the second, upper hardlink case, the union nlink should be incremented 951 * or decremented IFF the operation succeeds, aligned with nlink change of the 952 * upper inode. Therefore, before link/unlink/rename, we store the union nlink 953 * value relative to the upper inode nlink in the index inode. 954 * 955 * For the last, lower cover up case, we simplify things by preceding the 956 * whiteout or cover up with copy up. This makes sure that there is an index 957 * upper inode where the nlink xattr can be stored before the copied up upper 958 * entry is unlink. 959 */ 960 #define OVL_NLINK_ADD_UPPER (1 << 0) 961 962 /* 963 * On-disk format for indexed nlink: 964 * 965 * nlink relative to the upper inode - "U[+-]NUM" 966 * nlink relative to the lower inode - "L[+-]NUM" 967 */ 968 969 static int ovl_set_nlink_common(struct dentry *dentry, 970 struct dentry *realdentry, const char *format) 971 { 972 struct inode *inode = d_inode(dentry); 973 struct inode *realinode = d_inode(realdentry); 974 char buf[13]; 975 int len; 976 977 len = snprintf(buf, sizeof(buf), format, 978 (int) (inode->i_nlink - realinode->i_nlink)); 979 980 if (WARN_ON(len >= sizeof(buf))) 981 return -EIO; 982 983 return ovl_setxattr(OVL_FS(inode->i_sb), ovl_dentry_upper(dentry), 984 OVL_XATTR_NLINK, buf, len); 985 } 986 987 int ovl_set_nlink_upper(struct dentry *dentry) 988 { 989 return ovl_set_nlink_common(dentry, ovl_dentry_upper(dentry), "U%+i"); 990 } 991 992 int ovl_set_nlink_lower(struct dentry *dentry) 993 { 994 return ovl_set_nlink_common(dentry, ovl_dentry_lower(dentry), "L%+i"); 995 } 996 997 unsigned int ovl_get_nlink(struct ovl_fs *ofs, struct dentry *lowerdentry, 998 struct dentry *upperdentry, 999 unsigned int fallback) 1000 { 1001 int nlink_diff; 1002 int nlink; 1003 char buf[13]; 1004 int err; 1005 1006 if (!lowerdentry || !upperdentry || d_inode(lowerdentry)->i_nlink == 1) 1007 return fallback; 1008 1009 err = ovl_getxattr_upper(ofs, upperdentry, OVL_XATTR_NLINK, 1010 &buf, sizeof(buf) - 1); 1011 if (err < 0) 1012 goto fail; 1013 1014 buf[err] = '\0'; 1015 if ((buf[0] != 'L' && buf[0] != 'U') || 1016 (buf[1] != '+' && buf[1] != '-')) 1017 goto fail; 1018 1019 err = kstrtoint(buf + 1, 10, &nlink_diff); 1020 if (err < 0) 1021 goto fail; 1022 1023 nlink = d_inode(buf[0] == 'L' ? lowerdentry : upperdentry)->i_nlink; 1024 nlink += nlink_diff; 1025 1026 if (nlink <= 0) 1027 goto fail; 1028 1029 return nlink; 1030 1031 fail: 1032 pr_warn_ratelimited("failed to get index nlink (%pd2, err=%i)\n", 1033 upperdentry, err); 1034 return fallback; 1035 } 1036 1037 struct inode *ovl_new_inode(struct super_block *sb, umode_t mode, dev_t rdev) 1038 { 1039 struct inode *inode; 1040 1041 inode = new_inode(sb); 1042 if (inode) 1043 ovl_fill_inode(inode, mode, rdev); 1044 1045 return inode; 1046 } 1047 1048 static int ovl_inode_test(struct inode *inode, void *data) 1049 { 1050 return inode->i_private == data; 1051 } 1052 1053 static int ovl_inode_set(struct inode *inode, void *data) 1054 { 1055 inode->i_private = data; 1056 return 0; 1057 } 1058 1059 static bool ovl_verify_inode(struct inode *inode, struct dentry *lowerdentry, 1060 struct dentry *upperdentry, bool strict) 1061 { 1062 /* 1063 * For directories, @strict verify from lookup path performs consistency 1064 * checks, so NULL lower/upper in dentry must match NULL lower/upper in 1065 * inode. Non @strict verify from NFS handle decode path passes NULL for 1066 * 'unknown' lower/upper. 1067 */ 1068 if (S_ISDIR(inode->i_mode) && strict) { 1069 /* Real lower dir moved to upper layer under us? */ 1070 if (!lowerdentry && ovl_inode_lower(inode)) 1071 return false; 1072 1073 /* Lookup of an uncovered redirect origin? */ 1074 if (!upperdentry && ovl_inode_upper(inode)) 1075 return false; 1076 } 1077 1078 /* 1079 * Allow non-NULL lower inode in ovl_inode even if lowerdentry is NULL. 1080 * This happens when finding a copied up overlay inode for a renamed 1081 * or hardlinked overlay dentry and lower dentry cannot be followed 1082 * by origin because lower fs does not support file handles. 1083 */ 1084 if (lowerdentry && ovl_inode_lower(inode) != d_inode(lowerdentry)) 1085 return false; 1086 1087 /* 1088 * Allow non-NULL __upperdentry in inode even if upperdentry is NULL. 1089 * This happens when finding a lower alias for a copied up hard link. 1090 */ 1091 if (upperdentry && ovl_inode_upper(inode) != d_inode(upperdentry)) 1092 return false; 1093 1094 return true; 1095 } 1096 1097 struct inode *ovl_lookup_inode(struct super_block *sb, struct dentry *real, 1098 bool is_upper) 1099 { 1100 struct inode *inode, *key = d_inode(real); 1101 1102 inode = ilookup5(sb, (unsigned long) key, ovl_inode_test, key); 1103 if (!inode) 1104 return NULL; 1105 1106 if (!ovl_verify_inode(inode, is_upper ? NULL : real, 1107 is_upper ? real : NULL, false)) { 1108 iput(inode); 1109 return ERR_PTR(-ESTALE); 1110 } 1111 1112 return inode; 1113 } 1114 1115 bool ovl_lookup_trap_inode(struct super_block *sb, struct dentry *dir) 1116 { 1117 struct inode *key = d_inode(dir); 1118 struct inode *trap; 1119 bool res; 1120 1121 trap = ilookup5(sb, (unsigned long) key, ovl_inode_test, key); 1122 if (!trap) 1123 return false; 1124 1125 res = IS_DEADDIR(trap) && !ovl_inode_upper(trap) && 1126 !ovl_inode_lower(trap); 1127 1128 iput(trap); 1129 return res; 1130 } 1131 1132 /* 1133 * Create an inode cache entry for layer root dir, that will intentionally 1134 * fail ovl_verify_inode(), so any lookup that will find some layer root 1135 * will fail. 1136 */ 1137 struct inode *ovl_get_trap_inode(struct super_block *sb, struct dentry *dir) 1138 { 1139 struct inode *key = d_inode(dir); 1140 struct inode *trap; 1141 1142 if (!d_is_dir(dir)) 1143 return ERR_PTR(-ENOTDIR); 1144 1145 trap = iget5_locked(sb, (unsigned long) key, ovl_inode_test, 1146 ovl_inode_set, key); 1147 if (!trap) 1148 return ERR_PTR(-ENOMEM); 1149 1150 if (!(trap->i_state & I_NEW)) { 1151 /* Conflicting layer roots? */ 1152 iput(trap); 1153 return ERR_PTR(-ELOOP); 1154 } 1155 1156 trap->i_mode = S_IFDIR; 1157 trap->i_flags = S_DEAD; 1158 unlock_new_inode(trap); 1159 1160 return trap; 1161 } 1162 1163 /* 1164 * Does overlay inode need to be hashed by lower inode? 1165 */ 1166 static bool ovl_hash_bylower(struct super_block *sb, struct dentry *upper, 1167 struct dentry *lower, bool index) 1168 { 1169 struct ovl_fs *ofs = OVL_FS(sb); 1170 1171 /* No, if pure upper */ 1172 if (!lower) 1173 return false; 1174 1175 /* Yes, if already indexed */ 1176 if (index) 1177 return true; 1178 1179 /* Yes, if won't be copied up */ 1180 if (!ovl_upper_mnt(ofs)) 1181 return true; 1182 1183 /* No, if lower hardlink is or will be broken on copy up */ 1184 if ((upper || !ovl_indexdir(sb)) && 1185 !d_is_dir(lower) && d_inode(lower)->i_nlink > 1) 1186 return false; 1187 1188 /* No, if non-indexed upper with NFS export */ 1189 if (ofs->config.nfs_export && upper) 1190 return false; 1191 1192 /* Otherwise, hash by lower inode for fsnotify */ 1193 return true; 1194 } 1195 1196 static struct inode *ovl_iget5(struct super_block *sb, struct inode *newinode, 1197 struct inode *key) 1198 { 1199 return newinode ? inode_insert5(newinode, (unsigned long) key, 1200 ovl_inode_test, ovl_inode_set, key) : 1201 iget5_locked(sb, (unsigned long) key, 1202 ovl_inode_test, ovl_inode_set, key); 1203 } 1204 1205 struct inode *ovl_get_inode(struct super_block *sb, 1206 struct ovl_inode_params *oip) 1207 { 1208 struct ovl_fs *ofs = OVL_FS(sb); 1209 struct dentry *upperdentry = oip->upperdentry; 1210 struct ovl_path *lowerpath = ovl_lowerpath(oip->oe); 1211 struct inode *realinode = upperdentry ? d_inode(upperdentry) : NULL; 1212 struct inode *inode; 1213 struct dentry *lowerdentry = lowerpath ? lowerpath->dentry : NULL; 1214 struct path realpath = { 1215 .dentry = upperdentry ?: lowerdentry, 1216 .mnt = upperdentry ? ovl_upper_mnt(ofs) : lowerpath->layer->mnt, 1217 }; 1218 bool bylower = ovl_hash_bylower(sb, upperdentry, lowerdentry, 1219 oip->index); 1220 int fsid = bylower ? lowerpath->layer->fsid : 0; 1221 bool is_dir; 1222 unsigned long ino = 0; 1223 int err = oip->newinode ? -EEXIST : -ENOMEM; 1224 1225 if (!realinode) 1226 realinode = d_inode(lowerdentry); 1227 1228 /* 1229 * Copy up origin (lower) may exist for non-indexed upper, but we must 1230 * not use lower as hash key if this is a broken hardlink. 1231 */ 1232 is_dir = S_ISDIR(realinode->i_mode); 1233 if (upperdentry || bylower) { 1234 struct inode *key = d_inode(bylower ? lowerdentry : 1235 upperdentry); 1236 unsigned int nlink = is_dir ? 1 : realinode->i_nlink; 1237 1238 inode = ovl_iget5(sb, oip->newinode, key); 1239 if (!inode) 1240 goto out_err; 1241 if (!(inode->i_state & I_NEW)) { 1242 /* 1243 * Verify that the underlying files stored in the inode 1244 * match those in the dentry. 1245 */ 1246 if (!ovl_verify_inode(inode, lowerdentry, upperdentry, 1247 true)) { 1248 iput(inode); 1249 err = -ESTALE; 1250 goto out_err; 1251 } 1252 1253 dput(upperdentry); 1254 ovl_free_entry(oip->oe); 1255 kfree(oip->redirect); 1256 kfree(oip->lowerdata_redirect); 1257 goto out; 1258 } 1259 1260 /* Recalculate nlink for non-dir due to indexing */ 1261 if (!is_dir) 1262 nlink = ovl_get_nlink(ofs, lowerdentry, upperdentry, 1263 nlink); 1264 set_nlink(inode, nlink); 1265 ino = key->i_ino; 1266 } else { 1267 /* Lower hardlink that will be broken on copy up */ 1268 inode = new_inode(sb); 1269 if (!inode) { 1270 err = -ENOMEM; 1271 goto out_err; 1272 } 1273 ino = realinode->i_ino; 1274 fsid = lowerpath->layer->fsid; 1275 } 1276 ovl_fill_inode(inode, realinode->i_mode, realinode->i_rdev); 1277 ovl_inode_init(inode, oip, ino, fsid); 1278 1279 if (upperdentry && ovl_is_impuredir(sb, upperdentry)) 1280 ovl_set_flag(OVL_IMPURE, inode); 1281 1282 if (oip->index) 1283 ovl_set_flag(OVL_INDEX, inode); 1284 1285 if (bylower) 1286 ovl_set_flag(OVL_CONST_INO, inode); 1287 1288 /* Check for non-merge dir that may have whiteouts */ 1289 if (is_dir) { 1290 if (((upperdentry && lowerdentry) || ovl_numlower(oip->oe) > 1) || 1291 ovl_path_check_origin_xattr(ofs, &realpath)) { 1292 ovl_set_flag(OVL_WHITEOUTS, inode); 1293 } 1294 } 1295 1296 /* Check for immutable/append-only inode flags in xattr */ 1297 if (upperdentry) 1298 ovl_check_protattr(inode, upperdentry); 1299 1300 if (inode->i_state & I_NEW) 1301 unlock_new_inode(inode); 1302 out: 1303 return inode; 1304 1305 out_err: 1306 pr_warn_ratelimited("failed to get inode (%i)\n", err); 1307 inode = ERR_PTR(err); 1308 goto out; 1309 } 1310