1 // SPDX-License-Identifier: GPL-2.0 2 #ifndef NO_BCACHEFS_FS 3 4 #include "bcachefs.h" 5 #include "acl.h" 6 #include "bkey_buf.h" 7 #include "btree_update.h" 8 #include "buckets.h" 9 #include "chardev.h" 10 #include "dirent.h" 11 #include "errcode.h" 12 #include "extents.h" 13 #include "fs.h" 14 #include "fs-common.h" 15 #include "fs-io.h" 16 #include "fs-ioctl.h" 17 #include "fs-io-buffered.h" 18 #include "fs-io-direct.h" 19 #include "fs-io-pagecache.h" 20 #include "fsck.h" 21 #include "inode.h" 22 #include "io_read.h" 23 #include "journal.h" 24 #include "keylist.h" 25 #include "quota.h" 26 #include "snapshot.h" 27 #include "super.h" 28 #include "xattr.h" 29 30 #include <linux/aio.h> 31 #include <linux/backing-dev.h> 32 #include <linux/exportfs.h> 33 #include <linux/fiemap.h> 34 #include <linux/module.h> 35 #include <linux/pagemap.h> 36 #include <linux/posix_acl.h> 37 #include <linux/random.h> 38 #include <linux/seq_file.h> 39 #include <linux/statfs.h> 40 #include <linux/string.h> 41 #include <linux/xattr.h> 42 43 static struct kmem_cache *bch2_inode_cache; 44 45 static void bch2_vfs_inode_init(struct btree_trans *, subvol_inum, 46 struct bch_inode_info *, 47 struct bch_inode_unpacked *, 48 struct bch_subvolume *); 49 50 void bch2_inode_update_after_write(struct btree_trans *trans, 51 struct bch_inode_info *inode, 52 struct bch_inode_unpacked *bi, 53 unsigned fields) 54 { 55 struct bch_fs *c = trans->c; 56 57 BUG_ON(bi->bi_inum != inode->v.i_ino); 58 59 bch2_assert_pos_locked(trans, BTREE_ID_inodes, 60 POS(0, bi->bi_inum), 61 c->opts.inodes_use_key_cache); 62 63 set_nlink(&inode->v, bch2_inode_nlink_get(bi)); 64 i_uid_write(&inode->v, bi->bi_uid); 65 i_gid_write(&inode->v, bi->bi_gid); 66 inode->v.i_mode = bi->bi_mode; 67 68 if (fields & ATTR_ATIME) 69 inode_set_atime_to_ts(&inode->v, bch2_time_to_timespec(c, bi->bi_atime)); 70 if (fields & ATTR_MTIME) 71 inode_set_mtime_to_ts(&inode->v, bch2_time_to_timespec(c, bi->bi_mtime)); 72 if (fields & ATTR_CTIME) 73 inode_set_ctime_to_ts(&inode->v, bch2_time_to_timespec(c, bi->bi_ctime)); 74 75 inode->ei_inode = *bi; 76 77 bch2_inode_flags_to_vfs(inode); 78 } 79 80 int __must_check bch2_write_inode(struct bch_fs *c, 81 struct bch_inode_info *inode, 82 inode_set_fn set, 83 void *p, unsigned fields) 84 { 85 struct btree_trans *trans = bch2_trans_get(c); 86 struct btree_iter iter = { NULL }; 87 struct bch_inode_unpacked inode_u; 88 int ret; 89 retry: 90 bch2_trans_begin(trans); 91 92 ret = bch2_inode_peek(trans, &iter, &inode_u, inode_inum(inode), 93 BTREE_ITER_INTENT) ?: 94 (set ? set(trans, inode, &inode_u, p) : 0) ?: 95 bch2_inode_write(trans, &iter, &inode_u) ?: 96 bch2_trans_commit(trans, NULL, NULL, BTREE_INSERT_NOFAIL); 97 98 /* 99 * the btree node lock protects inode->ei_inode, not ei_update_lock; 100 * this is important for inode updates via bchfs_write_index_update 101 */ 102 if (!ret) 103 bch2_inode_update_after_write(trans, inode, &inode_u, fields); 104 105 bch2_trans_iter_exit(trans, &iter); 106 107 if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) 108 goto retry; 109 110 bch2_fs_fatal_err_on(bch2_err_matches(ret, ENOENT), c, 111 "inode %u:%llu not found when updating", 112 inode_inum(inode).subvol, 113 inode_inum(inode).inum); 114 115 bch2_trans_put(trans); 116 return ret < 0 ? ret : 0; 117 } 118 119 int bch2_fs_quota_transfer(struct bch_fs *c, 120 struct bch_inode_info *inode, 121 struct bch_qid new_qid, 122 unsigned qtypes, 123 enum quota_acct_mode mode) 124 { 125 unsigned i; 126 int ret; 127 128 qtypes &= enabled_qtypes(c); 129 130 for (i = 0; i < QTYP_NR; i++) 131 if (new_qid.q[i] == inode->ei_qid.q[i]) 132 qtypes &= ~(1U << i); 133 134 if (!qtypes) 135 return 0; 136 137 mutex_lock(&inode->ei_quota_lock); 138 139 ret = bch2_quota_transfer(c, qtypes, new_qid, 140 inode->ei_qid, 141 inode->v.i_blocks + 142 inode->ei_quota_reserved, 143 mode); 144 if (!ret) 145 for (i = 0; i < QTYP_NR; i++) 146 if (qtypes & (1 << i)) 147 inode->ei_qid.q[i] = new_qid.q[i]; 148 149 mutex_unlock(&inode->ei_quota_lock); 150 151 return ret; 152 } 153 154 static int bch2_iget5_test(struct inode *vinode, void *p) 155 { 156 struct bch_inode_info *inode = to_bch_ei(vinode); 157 subvol_inum *inum = p; 158 159 return inode->ei_subvol == inum->subvol && 160 inode->ei_inode.bi_inum == inum->inum; 161 } 162 163 static int bch2_iget5_set(struct inode *vinode, void *p) 164 { 165 struct bch_inode_info *inode = to_bch_ei(vinode); 166 subvol_inum *inum = p; 167 168 inode->v.i_ino = inum->inum; 169 inode->ei_subvol = inum->subvol; 170 inode->ei_inode.bi_inum = inum->inum; 171 return 0; 172 } 173 174 static unsigned bch2_inode_hash(subvol_inum inum) 175 { 176 return jhash_3words(inum.subvol, inum.inum >> 32, inum.inum, JHASH_INITVAL); 177 } 178 179 struct inode *bch2_vfs_inode_get(struct bch_fs *c, subvol_inum inum) 180 { 181 struct bch_inode_unpacked inode_u; 182 struct bch_inode_info *inode; 183 struct btree_trans *trans; 184 struct bch_subvolume subvol; 185 int ret; 186 187 inode = to_bch_ei(iget5_locked(c->vfs_sb, 188 bch2_inode_hash(inum), 189 bch2_iget5_test, 190 bch2_iget5_set, 191 &inum)); 192 if (unlikely(!inode)) 193 return ERR_PTR(-ENOMEM); 194 if (!(inode->v.i_state & I_NEW)) 195 return &inode->v; 196 197 trans = bch2_trans_get(c); 198 ret = lockrestart_do(trans, 199 bch2_subvolume_get(trans, inum.subvol, true, 0, &subvol) ?: 200 bch2_inode_find_by_inum_trans(trans, inum, &inode_u)); 201 202 if (!ret) 203 bch2_vfs_inode_init(trans, inum, inode, &inode_u, &subvol); 204 bch2_trans_put(trans); 205 206 if (ret) { 207 iget_failed(&inode->v); 208 return ERR_PTR(bch2_err_class(ret)); 209 } 210 211 mutex_lock(&c->vfs_inodes_lock); 212 list_add(&inode->ei_vfs_inode_list, &c->vfs_inodes_list); 213 mutex_unlock(&c->vfs_inodes_lock); 214 215 unlock_new_inode(&inode->v); 216 217 return &inode->v; 218 } 219 220 struct bch_inode_info * 221 __bch2_create(struct mnt_idmap *idmap, 222 struct bch_inode_info *dir, struct dentry *dentry, 223 umode_t mode, dev_t rdev, subvol_inum snapshot_src, 224 unsigned flags) 225 { 226 struct bch_fs *c = dir->v.i_sb->s_fs_info; 227 struct btree_trans *trans; 228 struct bch_inode_unpacked dir_u; 229 struct bch_inode_info *inode, *old; 230 struct bch_inode_unpacked inode_u; 231 struct posix_acl *default_acl = NULL, *acl = NULL; 232 subvol_inum inum; 233 struct bch_subvolume subvol; 234 u64 journal_seq = 0; 235 int ret; 236 237 /* 238 * preallocate acls + vfs inode before btree transaction, so that 239 * nothing can fail after the transaction succeeds: 240 */ 241 #ifdef CONFIG_BCACHEFS_POSIX_ACL 242 ret = posix_acl_create(&dir->v, &mode, &default_acl, &acl); 243 if (ret) 244 return ERR_PTR(ret); 245 #endif 246 inode = to_bch_ei(new_inode(c->vfs_sb)); 247 if (unlikely(!inode)) { 248 inode = ERR_PTR(-ENOMEM); 249 goto err; 250 } 251 252 bch2_inode_init_early(c, &inode_u); 253 254 if (!(flags & BCH_CREATE_TMPFILE)) 255 mutex_lock(&dir->ei_update_lock); 256 257 trans = bch2_trans_get(c); 258 retry: 259 bch2_trans_begin(trans); 260 261 ret = bch2_create_trans(trans, 262 inode_inum(dir), &dir_u, &inode_u, 263 !(flags & BCH_CREATE_TMPFILE) 264 ? &dentry->d_name : NULL, 265 from_kuid(i_user_ns(&dir->v), current_fsuid()), 266 from_kgid(i_user_ns(&dir->v), current_fsgid()), 267 mode, rdev, 268 default_acl, acl, snapshot_src, flags) ?: 269 bch2_quota_acct(c, bch_qid(&inode_u), Q_INO, 1, 270 KEY_TYPE_QUOTA_PREALLOC); 271 if (unlikely(ret)) 272 goto err_before_quota; 273 274 inum.subvol = inode_u.bi_subvol ?: dir->ei_subvol; 275 inum.inum = inode_u.bi_inum; 276 277 ret = bch2_subvolume_get(trans, inum.subvol, true, 278 BTREE_ITER_WITH_UPDATES, &subvol) ?: 279 bch2_trans_commit(trans, NULL, &journal_seq, 0); 280 if (unlikely(ret)) { 281 bch2_quota_acct(c, bch_qid(&inode_u), Q_INO, -1, 282 KEY_TYPE_QUOTA_WARN); 283 err_before_quota: 284 if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) 285 goto retry; 286 goto err_trans; 287 } 288 289 if (!(flags & BCH_CREATE_TMPFILE)) { 290 bch2_inode_update_after_write(trans, dir, &dir_u, 291 ATTR_MTIME|ATTR_CTIME); 292 mutex_unlock(&dir->ei_update_lock); 293 } 294 295 bch2_iget5_set(&inode->v, &inum); 296 bch2_vfs_inode_init(trans, inum, inode, &inode_u, &subvol); 297 298 set_cached_acl(&inode->v, ACL_TYPE_ACCESS, acl); 299 set_cached_acl(&inode->v, ACL_TYPE_DEFAULT, default_acl); 300 301 /* 302 * we must insert the new inode into the inode cache before calling 303 * bch2_trans_exit() and dropping locks, else we could race with another 304 * thread pulling the inode in and modifying it: 305 */ 306 307 inode->v.i_state |= I_CREATING; 308 309 old = to_bch_ei(inode_insert5(&inode->v, 310 bch2_inode_hash(inum), 311 bch2_iget5_test, 312 bch2_iget5_set, 313 &inum)); 314 BUG_ON(!old); 315 316 if (unlikely(old != inode)) { 317 /* 318 * We raced, another process pulled the new inode into cache 319 * before us: 320 */ 321 make_bad_inode(&inode->v); 322 iput(&inode->v); 323 324 inode = old; 325 } else { 326 mutex_lock(&c->vfs_inodes_lock); 327 list_add(&inode->ei_vfs_inode_list, &c->vfs_inodes_list); 328 mutex_unlock(&c->vfs_inodes_lock); 329 /* 330 * we really don't want insert_inode_locked2() to be setting 331 * I_NEW... 332 */ 333 unlock_new_inode(&inode->v); 334 } 335 336 bch2_trans_put(trans); 337 err: 338 posix_acl_release(default_acl); 339 posix_acl_release(acl); 340 return inode; 341 err_trans: 342 if (!(flags & BCH_CREATE_TMPFILE)) 343 mutex_unlock(&dir->ei_update_lock); 344 345 bch2_trans_put(trans); 346 make_bad_inode(&inode->v); 347 iput(&inode->v); 348 inode = ERR_PTR(ret); 349 goto err; 350 } 351 352 /* methods */ 353 354 static struct dentry *bch2_lookup(struct inode *vdir, struct dentry *dentry, 355 unsigned int flags) 356 { 357 struct bch_fs *c = vdir->i_sb->s_fs_info; 358 struct bch_inode_info *dir = to_bch_ei(vdir); 359 struct bch_hash_info hash = bch2_hash_info_init(c, &dir->ei_inode); 360 struct inode *vinode = NULL; 361 subvol_inum inum = { .subvol = 1 }; 362 int ret; 363 364 ret = bch2_dirent_lookup(c, inode_inum(dir), &hash, 365 &dentry->d_name, &inum); 366 367 if (!ret) 368 vinode = bch2_vfs_inode_get(c, inum); 369 370 return d_splice_alias(vinode, dentry); 371 } 372 373 static int bch2_mknod(struct mnt_idmap *idmap, 374 struct inode *vdir, struct dentry *dentry, 375 umode_t mode, dev_t rdev) 376 { 377 struct bch_inode_info *inode = 378 __bch2_create(idmap, to_bch_ei(vdir), dentry, mode, rdev, 379 (subvol_inum) { 0 }, 0); 380 381 if (IS_ERR(inode)) 382 return bch2_err_class(PTR_ERR(inode)); 383 384 d_instantiate(dentry, &inode->v); 385 return 0; 386 } 387 388 static int bch2_create(struct mnt_idmap *idmap, 389 struct inode *vdir, struct dentry *dentry, 390 umode_t mode, bool excl) 391 { 392 return bch2_mknod(idmap, vdir, dentry, mode|S_IFREG, 0); 393 } 394 395 static int __bch2_link(struct bch_fs *c, 396 struct bch_inode_info *inode, 397 struct bch_inode_info *dir, 398 struct dentry *dentry) 399 { 400 struct btree_trans *trans = bch2_trans_get(c); 401 struct bch_inode_unpacked dir_u, inode_u; 402 int ret; 403 404 mutex_lock(&inode->ei_update_lock); 405 406 ret = commit_do(trans, NULL, NULL, 0, 407 bch2_link_trans(trans, 408 inode_inum(dir), &dir_u, 409 inode_inum(inode), &inode_u, 410 &dentry->d_name)); 411 412 if (likely(!ret)) { 413 bch2_inode_update_after_write(trans, dir, &dir_u, 414 ATTR_MTIME|ATTR_CTIME); 415 bch2_inode_update_after_write(trans, inode, &inode_u, ATTR_CTIME); 416 } 417 418 bch2_trans_put(trans); 419 mutex_unlock(&inode->ei_update_lock); 420 return ret; 421 } 422 423 static int bch2_link(struct dentry *old_dentry, struct inode *vdir, 424 struct dentry *dentry) 425 { 426 struct bch_fs *c = vdir->i_sb->s_fs_info; 427 struct bch_inode_info *dir = to_bch_ei(vdir); 428 struct bch_inode_info *inode = to_bch_ei(old_dentry->d_inode); 429 int ret; 430 431 lockdep_assert_held(&inode->v.i_rwsem); 432 433 ret = __bch2_link(c, inode, dir, dentry); 434 if (unlikely(ret)) 435 return ret; 436 437 ihold(&inode->v); 438 d_instantiate(dentry, &inode->v); 439 return 0; 440 } 441 442 int __bch2_unlink(struct inode *vdir, struct dentry *dentry, 443 bool deleting_snapshot) 444 { 445 struct bch_fs *c = vdir->i_sb->s_fs_info; 446 struct bch_inode_info *dir = to_bch_ei(vdir); 447 struct bch_inode_info *inode = to_bch_ei(dentry->d_inode); 448 struct bch_inode_unpacked dir_u, inode_u; 449 struct btree_trans *trans = bch2_trans_get(c); 450 int ret; 451 452 bch2_lock_inodes(INODE_UPDATE_LOCK, dir, inode); 453 454 ret = commit_do(trans, NULL, NULL, 455 BTREE_INSERT_NOFAIL, 456 bch2_unlink_trans(trans, 457 inode_inum(dir), &dir_u, 458 &inode_u, &dentry->d_name, 459 deleting_snapshot)); 460 if (unlikely(ret)) 461 goto err; 462 463 bch2_inode_update_after_write(trans, dir, &dir_u, 464 ATTR_MTIME|ATTR_CTIME); 465 bch2_inode_update_after_write(trans, inode, &inode_u, 466 ATTR_MTIME); 467 468 if (inode_u.bi_subvol) { 469 /* 470 * Subvolume deletion is asynchronous, but we still want to tell 471 * the VFS that it's been deleted here: 472 */ 473 set_nlink(&inode->v, 0); 474 } 475 err: 476 bch2_unlock_inodes(INODE_UPDATE_LOCK, dir, inode); 477 bch2_trans_put(trans); 478 479 return ret; 480 } 481 482 static int bch2_unlink(struct inode *vdir, struct dentry *dentry) 483 { 484 return __bch2_unlink(vdir, dentry, false); 485 } 486 487 static int bch2_symlink(struct mnt_idmap *idmap, 488 struct inode *vdir, struct dentry *dentry, 489 const char *symname) 490 { 491 struct bch_fs *c = vdir->i_sb->s_fs_info; 492 struct bch_inode_info *dir = to_bch_ei(vdir), *inode; 493 int ret; 494 495 inode = __bch2_create(idmap, dir, dentry, S_IFLNK|S_IRWXUGO, 0, 496 (subvol_inum) { 0 }, BCH_CREATE_TMPFILE); 497 if (IS_ERR(inode)) 498 return bch2_err_class(PTR_ERR(inode)); 499 500 inode_lock(&inode->v); 501 ret = page_symlink(&inode->v, symname, strlen(symname) + 1); 502 inode_unlock(&inode->v); 503 504 if (unlikely(ret)) 505 goto err; 506 507 ret = filemap_write_and_wait_range(inode->v.i_mapping, 0, LLONG_MAX); 508 if (unlikely(ret)) 509 goto err; 510 511 ret = __bch2_link(c, inode, dir, dentry); 512 if (unlikely(ret)) 513 goto err; 514 515 d_instantiate(dentry, &inode->v); 516 return 0; 517 err: 518 iput(&inode->v); 519 return ret; 520 } 521 522 static int bch2_mkdir(struct mnt_idmap *idmap, 523 struct inode *vdir, struct dentry *dentry, umode_t mode) 524 { 525 return bch2_mknod(idmap, vdir, dentry, mode|S_IFDIR, 0); 526 } 527 528 static int bch2_rename2(struct mnt_idmap *idmap, 529 struct inode *src_vdir, struct dentry *src_dentry, 530 struct inode *dst_vdir, struct dentry *dst_dentry, 531 unsigned flags) 532 { 533 struct bch_fs *c = src_vdir->i_sb->s_fs_info; 534 struct bch_inode_info *src_dir = to_bch_ei(src_vdir); 535 struct bch_inode_info *dst_dir = to_bch_ei(dst_vdir); 536 struct bch_inode_info *src_inode = to_bch_ei(src_dentry->d_inode); 537 struct bch_inode_info *dst_inode = to_bch_ei(dst_dentry->d_inode); 538 struct bch_inode_unpacked dst_dir_u, src_dir_u; 539 struct bch_inode_unpacked src_inode_u, dst_inode_u; 540 struct btree_trans *trans; 541 enum bch_rename_mode mode = flags & RENAME_EXCHANGE 542 ? BCH_RENAME_EXCHANGE 543 : dst_dentry->d_inode 544 ? BCH_RENAME_OVERWRITE : BCH_RENAME; 545 int ret; 546 547 if (flags & ~(RENAME_NOREPLACE|RENAME_EXCHANGE)) 548 return -EINVAL; 549 550 if (mode == BCH_RENAME_OVERWRITE) { 551 ret = filemap_write_and_wait_range(src_inode->v.i_mapping, 552 0, LLONG_MAX); 553 if (ret) 554 return ret; 555 } 556 557 trans = bch2_trans_get(c); 558 559 bch2_lock_inodes(INODE_UPDATE_LOCK, 560 src_dir, 561 dst_dir, 562 src_inode, 563 dst_inode); 564 565 if (inode_attr_changing(dst_dir, src_inode, Inode_opt_project)) { 566 ret = bch2_fs_quota_transfer(c, src_inode, 567 dst_dir->ei_qid, 568 1 << QTYP_PRJ, 569 KEY_TYPE_QUOTA_PREALLOC); 570 if (ret) 571 goto err; 572 } 573 574 if (mode == BCH_RENAME_EXCHANGE && 575 inode_attr_changing(src_dir, dst_inode, Inode_opt_project)) { 576 ret = bch2_fs_quota_transfer(c, dst_inode, 577 src_dir->ei_qid, 578 1 << QTYP_PRJ, 579 KEY_TYPE_QUOTA_PREALLOC); 580 if (ret) 581 goto err; 582 } 583 584 ret = commit_do(trans, NULL, NULL, 0, 585 bch2_rename_trans(trans, 586 inode_inum(src_dir), &src_dir_u, 587 inode_inum(dst_dir), &dst_dir_u, 588 &src_inode_u, 589 &dst_inode_u, 590 &src_dentry->d_name, 591 &dst_dentry->d_name, 592 mode)); 593 if (unlikely(ret)) 594 goto err; 595 596 BUG_ON(src_inode->v.i_ino != src_inode_u.bi_inum); 597 BUG_ON(dst_inode && 598 dst_inode->v.i_ino != dst_inode_u.bi_inum); 599 600 bch2_inode_update_after_write(trans, src_dir, &src_dir_u, 601 ATTR_MTIME|ATTR_CTIME); 602 603 if (src_dir != dst_dir) 604 bch2_inode_update_after_write(trans, dst_dir, &dst_dir_u, 605 ATTR_MTIME|ATTR_CTIME); 606 607 bch2_inode_update_after_write(trans, src_inode, &src_inode_u, 608 ATTR_CTIME); 609 610 if (dst_inode) 611 bch2_inode_update_after_write(trans, dst_inode, &dst_inode_u, 612 ATTR_CTIME); 613 err: 614 bch2_trans_put(trans); 615 616 bch2_fs_quota_transfer(c, src_inode, 617 bch_qid(&src_inode->ei_inode), 618 1 << QTYP_PRJ, 619 KEY_TYPE_QUOTA_NOCHECK); 620 if (dst_inode) 621 bch2_fs_quota_transfer(c, dst_inode, 622 bch_qid(&dst_inode->ei_inode), 623 1 << QTYP_PRJ, 624 KEY_TYPE_QUOTA_NOCHECK); 625 626 bch2_unlock_inodes(INODE_UPDATE_LOCK, 627 src_dir, 628 dst_dir, 629 src_inode, 630 dst_inode); 631 632 return ret; 633 } 634 635 static void bch2_setattr_copy(struct mnt_idmap *idmap, 636 struct bch_inode_info *inode, 637 struct bch_inode_unpacked *bi, 638 struct iattr *attr) 639 { 640 struct bch_fs *c = inode->v.i_sb->s_fs_info; 641 unsigned int ia_valid = attr->ia_valid; 642 643 if (ia_valid & ATTR_UID) 644 bi->bi_uid = from_kuid(i_user_ns(&inode->v), attr->ia_uid); 645 if (ia_valid & ATTR_GID) 646 bi->bi_gid = from_kgid(i_user_ns(&inode->v), attr->ia_gid); 647 648 if (ia_valid & ATTR_SIZE) 649 bi->bi_size = attr->ia_size; 650 651 if (ia_valid & ATTR_ATIME) 652 bi->bi_atime = timespec_to_bch2_time(c, attr->ia_atime); 653 if (ia_valid & ATTR_MTIME) 654 bi->bi_mtime = timespec_to_bch2_time(c, attr->ia_mtime); 655 if (ia_valid & ATTR_CTIME) 656 bi->bi_ctime = timespec_to_bch2_time(c, attr->ia_ctime); 657 658 if (ia_valid & ATTR_MODE) { 659 umode_t mode = attr->ia_mode; 660 kgid_t gid = ia_valid & ATTR_GID 661 ? attr->ia_gid 662 : inode->v.i_gid; 663 664 if (!in_group_p(gid) && 665 !capable_wrt_inode_uidgid(idmap, &inode->v, CAP_FSETID)) 666 mode &= ~S_ISGID; 667 bi->bi_mode = mode; 668 } 669 } 670 671 int bch2_setattr_nonsize(struct mnt_idmap *idmap, 672 struct bch_inode_info *inode, 673 struct iattr *attr) 674 { 675 struct bch_fs *c = inode->v.i_sb->s_fs_info; 676 struct bch_qid qid; 677 struct btree_trans *trans; 678 struct btree_iter inode_iter = { NULL }; 679 struct bch_inode_unpacked inode_u; 680 struct posix_acl *acl = NULL; 681 int ret; 682 683 mutex_lock(&inode->ei_update_lock); 684 685 qid = inode->ei_qid; 686 687 if (attr->ia_valid & ATTR_UID) 688 qid.q[QTYP_USR] = from_kuid(i_user_ns(&inode->v), attr->ia_uid); 689 690 if (attr->ia_valid & ATTR_GID) 691 qid.q[QTYP_GRP] = from_kgid(i_user_ns(&inode->v), attr->ia_gid); 692 693 ret = bch2_fs_quota_transfer(c, inode, qid, ~0, 694 KEY_TYPE_QUOTA_PREALLOC); 695 if (ret) 696 goto err; 697 698 trans = bch2_trans_get(c); 699 retry: 700 bch2_trans_begin(trans); 701 kfree(acl); 702 acl = NULL; 703 704 ret = bch2_inode_peek(trans, &inode_iter, &inode_u, inode_inum(inode), 705 BTREE_ITER_INTENT); 706 if (ret) 707 goto btree_err; 708 709 bch2_setattr_copy(idmap, inode, &inode_u, attr); 710 711 if (attr->ia_valid & ATTR_MODE) { 712 ret = bch2_acl_chmod(trans, inode_inum(inode), &inode_u, 713 inode_u.bi_mode, &acl); 714 if (ret) 715 goto btree_err; 716 } 717 718 ret = bch2_inode_write(trans, &inode_iter, &inode_u) ?: 719 bch2_trans_commit(trans, NULL, NULL, 720 BTREE_INSERT_NOFAIL); 721 btree_err: 722 bch2_trans_iter_exit(trans, &inode_iter); 723 724 if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) 725 goto retry; 726 if (unlikely(ret)) 727 goto err_trans; 728 729 bch2_inode_update_after_write(trans, inode, &inode_u, attr->ia_valid); 730 731 if (acl) 732 set_cached_acl(&inode->v, ACL_TYPE_ACCESS, acl); 733 err_trans: 734 bch2_trans_put(trans); 735 err: 736 mutex_unlock(&inode->ei_update_lock); 737 738 return bch2_err_class(ret); 739 } 740 741 static int bch2_getattr(struct mnt_idmap *idmap, 742 const struct path *path, struct kstat *stat, 743 u32 request_mask, unsigned query_flags) 744 { 745 struct bch_inode_info *inode = to_bch_ei(d_inode(path->dentry)); 746 struct bch_fs *c = inode->v.i_sb->s_fs_info; 747 748 stat->dev = inode->v.i_sb->s_dev; 749 stat->ino = inode->v.i_ino; 750 stat->mode = inode->v.i_mode; 751 stat->nlink = inode->v.i_nlink; 752 stat->uid = inode->v.i_uid; 753 stat->gid = inode->v.i_gid; 754 stat->rdev = inode->v.i_rdev; 755 stat->size = i_size_read(&inode->v); 756 stat->atime = inode_get_atime(&inode->v); 757 stat->mtime = inode_get_mtime(&inode->v); 758 stat->ctime = inode_get_ctime(&inode->v); 759 stat->blksize = block_bytes(c); 760 stat->blocks = inode->v.i_blocks; 761 762 if (request_mask & STATX_BTIME) { 763 stat->result_mask |= STATX_BTIME; 764 stat->btime = bch2_time_to_timespec(c, inode->ei_inode.bi_otime); 765 } 766 767 if (inode->ei_inode.bi_flags & BCH_INODE_IMMUTABLE) 768 stat->attributes |= STATX_ATTR_IMMUTABLE; 769 stat->attributes_mask |= STATX_ATTR_IMMUTABLE; 770 771 if (inode->ei_inode.bi_flags & BCH_INODE_APPEND) 772 stat->attributes |= STATX_ATTR_APPEND; 773 stat->attributes_mask |= STATX_ATTR_APPEND; 774 775 if (inode->ei_inode.bi_flags & BCH_INODE_NODUMP) 776 stat->attributes |= STATX_ATTR_NODUMP; 777 stat->attributes_mask |= STATX_ATTR_NODUMP; 778 779 return 0; 780 } 781 782 static int bch2_setattr(struct mnt_idmap *idmap, 783 struct dentry *dentry, struct iattr *iattr) 784 { 785 struct bch_inode_info *inode = to_bch_ei(dentry->d_inode); 786 int ret; 787 788 lockdep_assert_held(&inode->v.i_rwsem); 789 790 ret = setattr_prepare(idmap, dentry, iattr); 791 if (ret) 792 return ret; 793 794 return iattr->ia_valid & ATTR_SIZE 795 ? bchfs_truncate(idmap, inode, iattr) 796 : bch2_setattr_nonsize(idmap, inode, iattr); 797 } 798 799 static int bch2_tmpfile(struct mnt_idmap *idmap, 800 struct inode *vdir, struct file *file, umode_t mode) 801 { 802 struct bch_inode_info *inode = 803 __bch2_create(idmap, to_bch_ei(vdir), 804 file->f_path.dentry, mode, 0, 805 (subvol_inum) { 0 }, BCH_CREATE_TMPFILE); 806 807 if (IS_ERR(inode)) 808 return bch2_err_class(PTR_ERR(inode)); 809 810 d_mark_tmpfile(file, &inode->v); 811 d_instantiate(file->f_path.dentry, &inode->v); 812 return finish_open_simple(file, 0); 813 } 814 815 static int bch2_fill_extent(struct bch_fs *c, 816 struct fiemap_extent_info *info, 817 struct bkey_s_c k, unsigned flags) 818 { 819 if (bkey_extent_is_direct_data(k.k)) { 820 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); 821 const union bch_extent_entry *entry; 822 struct extent_ptr_decoded p; 823 int ret; 824 825 if (k.k->type == KEY_TYPE_reflink_v) 826 flags |= FIEMAP_EXTENT_SHARED; 827 828 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) { 829 int flags2 = 0; 830 u64 offset = p.ptr.offset; 831 832 if (p.ptr.unwritten) 833 flags2 |= FIEMAP_EXTENT_UNWRITTEN; 834 835 if (p.crc.compression_type) 836 flags2 |= FIEMAP_EXTENT_ENCODED; 837 else 838 offset += p.crc.offset; 839 840 if ((offset & (block_sectors(c) - 1)) || 841 (k.k->size & (block_sectors(c) - 1))) 842 flags2 |= FIEMAP_EXTENT_NOT_ALIGNED; 843 844 ret = fiemap_fill_next_extent(info, 845 bkey_start_offset(k.k) << 9, 846 offset << 9, 847 k.k->size << 9, flags|flags2); 848 if (ret) 849 return ret; 850 } 851 852 return 0; 853 } else if (bkey_extent_is_inline_data(k.k)) { 854 return fiemap_fill_next_extent(info, 855 bkey_start_offset(k.k) << 9, 856 0, k.k->size << 9, 857 flags| 858 FIEMAP_EXTENT_DATA_INLINE); 859 } else if (k.k->type == KEY_TYPE_reservation) { 860 return fiemap_fill_next_extent(info, 861 bkey_start_offset(k.k) << 9, 862 0, k.k->size << 9, 863 flags| 864 FIEMAP_EXTENT_DELALLOC| 865 FIEMAP_EXTENT_UNWRITTEN); 866 } else { 867 BUG(); 868 } 869 } 870 871 static int bch2_fiemap(struct inode *vinode, struct fiemap_extent_info *info, 872 u64 start, u64 len) 873 { 874 struct bch_fs *c = vinode->i_sb->s_fs_info; 875 struct bch_inode_info *ei = to_bch_ei(vinode); 876 struct btree_trans *trans; 877 struct btree_iter iter; 878 struct bkey_s_c k; 879 struct bkey_buf cur, prev; 880 struct bpos end = POS(ei->v.i_ino, (start + len) >> 9); 881 unsigned offset_into_extent, sectors; 882 bool have_extent = false; 883 u32 snapshot; 884 int ret = 0; 885 886 ret = fiemap_prep(&ei->v, info, start, &len, FIEMAP_FLAG_SYNC); 887 if (ret) 888 return ret; 889 890 if (start + len < start) 891 return -EINVAL; 892 893 start >>= 9; 894 895 bch2_bkey_buf_init(&cur); 896 bch2_bkey_buf_init(&prev); 897 trans = bch2_trans_get(c); 898 retry: 899 bch2_trans_begin(trans); 900 901 ret = bch2_subvolume_get_snapshot(trans, ei->ei_subvol, &snapshot); 902 if (ret) 903 goto err; 904 905 bch2_trans_iter_init(trans, &iter, BTREE_ID_extents, 906 SPOS(ei->v.i_ino, start, snapshot), 0); 907 908 while (!(ret = btree_trans_too_many_iters(trans)) && 909 (k = bch2_btree_iter_peek_upto(&iter, end)).k && 910 !(ret = bkey_err(k))) { 911 enum btree_id data_btree = BTREE_ID_extents; 912 913 if (!bkey_extent_is_data(k.k) && 914 k.k->type != KEY_TYPE_reservation) { 915 bch2_btree_iter_advance(&iter); 916 continue; 917 } 918 919 offset_into_extent = iter.pos.offset - 920 bkey_start_offset(k.k); 921 sectors = k.k->size - offset_into_extent; 922 923 bch2_bkey_buf_reassemble(&cur, c, k); 924 925 ret = bch2_read_indirect_extent(trans, &data_btree, 926 &offset_into_extent, &cur); 927 if (ret) 928 break; 929 930 k = bkey_i_to_s_c(cur.k); 931 bch2_bkey_buf_realloc(&prev, c, k.k->u64s); 932 933 sectors = min(sectors, k.k->size - offset_into_extent); 934 935 bch2_cut_front(POS(k.k->p.inode, 936 bkey_start_offset(k.k) + 937 offset_into_extent), 938 cur.k); 939 bch2_key_resize(&cur.k->k, sectors); 940 cur.k->k.p = iter.pos; 941 cur.k->k.p.offset += cur.k->k.size; 942 943 if (have_extent) { 944 bch2_trans_unlock(trans); 945 ret = bch2_fill_extent(c, info, 946 bkey_i_to_s_c(prev.k), 0); 947 if (ret) 948 break; 949 } 950 951 bkey_copy(prev.k, cur.k); 952 have_extent = true; 953 954 bch2_btree_iter_set_pos(&iter, 955 POS(iter.pos.inode, iter.pos.offset + sectors)); 956 } 957 start = iter.pos.offset; 958 bch2_trans_iter_exit(trans, &iter); 959 err: 960 if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) 961 goto retry; 962 963 if (!ret && have_extent) { 964 bch2_trans_unlock(trans); 965 ret = bch2_fill_extent(c, info, bkey_i_to_s_c(prev.k), 966 FIEMAP_EXTENT_LAST); 967 } 968 969 bch2_trans_put(trans); 970 bch2_bkey_buf_exit(&cur, c); 971 bch2_bkey_buf_exit(&prev, c); 972 return ret < 0 ? ret : 0; 973 } 974 975 static const struct vm_operations_struct bch_vm_ops = { 976 .fault = bch2_page_fault, 977 .map_pages = filemap_map_pages, 978 .page_mkwrite = bch2_page_mkwrite, 979 }; 980 981 static int bch2_mmap(struct file *file, struct vm_area_struct *vma) 982 { 983 file_accessed(file); 984 985 vma->vm_ops = &bch_vm_ops; 986 return 0; 987 } 988 989 /* Directories: */ 990 991 static loff_t bch2_dir_llseek(struct file *file, loff_t offset, int whence) 992 { 993 return generic_file_llseek_size(file, offset, whence, 994 S64_MAX, S64_MAX); 995 } 996 997 static int bch2_vfs_readdir(struct file *file, struct dir_context *ctx) 998 { 999 struct bch_inode_info *inode = file_bch_inode(file); 1000 struct bch_fs *c = inode->v.i_sb->s_fs_info; 1001 int ret; 1002 1003 if (!dir_emit_dots(file, ctx)) 1004 return 0; 1005 1006 ret = bch2_readdir(c, inode_inum(inode), ctx); 1007 if (ret) 1008 bch_err_fn(c, ret); 1009 1010 return bch2_err_class(ret); 1011 } 1012 1013 static const struct file_operations bch_file_operations = { 1014 .llseek = bch2_llseek, 1015 .read_iter = bch2_read_iter, 1016 .write_iter = bch2_write_iter, 1017 .mmap = bch2_mmap, 1018 .open = generic_file_open, 1019 .fsync = bch2_fsync, 1020 .splice_read = filemap_splice_read, 1021 .splice_write = iter_file_splice_write, 1022 .fallocate = bch2_fallocate_dispatch, 1023 .unlocked_ioctl = bch2_fs_file_ioctl, 1024 #ifdef CONFIG_COMPAT 1025 .compat_ioctl = bch2_compat_fs_ioctl, 1026 #endif 1027 .remap_file_range = bch2_remap_file_range, 1028 }; 1029 1030 static const struct inode_operations bch_file_inode_operations = { 1031 .getattr = bch2_getattr, 1032 .setattr = bch2_setattr, 1033 .fiemap = bch2_fiemap, 1034 .listxattr = bch2_xattr_list, 1035 #ifdef CONFIG_BCACHEFS_POSIX_ACL 1036 .get_acl = bch2_get_acl, 1037 .set_acl = bch2_set_acl, 1038 #endif 1039 }; 1040 1041 static const struct inode_operations bch_dir_inode_operations = { 1042 .lookup = bch2_lookup, 1043 .create = bch2_create, 1044 .link = bch2_link, 1045 .unlink = bch2_unlink, 1046 .symlink = bch2_symlink, 1047 .mkdir = bch2_mkdir, 1048 .rmdir = bch2_unlink, 1049 .mknod = bch2_mknod, 1050 .rename = bch2_rename2, 1051 .getattr = bch2_getattr, 1052 .setattr = bch2_setattr, 1053 .tmpfile = bch2_tmpfile, 1054 .listxattr = bch2_xattr_list, 1055 #ifdef CONFIG_BCACHEFS_POSIX_ACL 1056 .get_acl = bch2_get_acl, 1057 .set_acl = bch2_set_acl, 1058 #endif 1059 }; 1060 1061 static const struct file_operations bch_dir_file_operations = { 1062 .llseek = bch2_dir_llseek, 1063 .read = generic_read_dir, 1064 .iterate_shared = bch2_vfs_readdir, 1065 .fsync = bch2_fsync, 1066 .unlocked_ioctl = bch2_fs_file_ioctl, 1067 #ifdef CONFIG_COMPAT 1068 .compat_ioctl = bch2_compat_fs_ioctl, 1069 #endif 1070 }; 1071 1072 static const struct inode_operations bch_symlink_inode_operations = { 1073 .get_link = page_get_link, 1074 .getattr = bch2_getattr, 1075 .setattr = bch2_setattr, 1076 .listxattr = bch2_xattr_list, 1077 #ifdef CONFIG_BCACHEFS_POSIX_ACL 1078 .get_acl = bch2_get_acl, 1079 .set_acl = bch2_set_acl, 1080 #endif 1081 }; 1082 1083 static const struct inode_operations bch_special_inode_operations = { 1084 .getattr = bch2_getattr, 1085 .setattr = bch2_setattr, 1086 .listxattr = bch2_xattr_list, 1087 #ifdef CONFIG_BCACHEFS_POSIX_ACL 1088 .get_acl = bch2_get_acl, 1089 .set_acl = bch2_set_acl, 1090 #endif 1091 }; 1092 1093 static const struct address_space_operations bch_address_space_operations = { 1094 .read_folio = bch2_read_folio, 1095 .writepages = bch2_writepages, 1096 .readahead = bch2_readahead, 1097 .dirty_folio = filemap_dirty_folio, 1098 .write_begin = bch2_write_begin, 1099 .write_end = bch2_write_end, 1100 .invalidate_folio = bch2_invalidate_folio, 1101 .release_folio = bch2_release_folio, 1102 .direct_IO = noop_direct_IO, 1103 #ifdef CONFIG_MIGRATION 1104 .migrate_folio = filemap_migrate_folio, 1105 #endif 1106 .error_remove_page = generic_error_remove_page, 1107 }; 1108 1109 struct bcachefs_fid { 1110 u64 inum; 1111 u32 subvol; 1112 u32 gen; 1113 } __packed; 1114 1115 struct bcachefs_fid_with_parent { 1116 struct bcachefs_fid fid; 1117 struct bcachefs_fid dir; 1118 } __packed; 1119 1120 static int bcachefs_fid_valid(int fh_len, int fh_type) 1121 { 1122 switch (fh_type) { 1123 case FILEID_BCACHEFS_WITHOUT_PARENT: 1124 return fh_len == sizeof(struct bcachefs_fid) / sizeof(u32); 1125 case FILEID_BCACHEFS_WITH_PARENT: 1126 return fh_len == sizeof(struct bcachefs_fid_with_parent) / sizeof(u32); 1127 default: 1128 return false; 1129 } 1130 } 1131 1132 static struct bcachefs_fid bch2_inode_to_fid(struct bch_inode_info *inode) 1133 { 1134 return (struct bcachefs_fid) { 1135 .inum = inode->ei_inode.bi_inum, 1136 .subvol = inode->ei_subvol, 1137 .gen = inode->ei_inode.bi_generation, 1138 }; 1139 } 1140 1141 static int bch2_encode_fh(struct inode *vinode, u32 *fh, int *len, 1142 struct inode *vdir) 1143 { 1144 struct bch_inode_info *inode = to_bch_ei(vinode); 1145 struct bch_inode_info *dir = to_bch_ei(vdir); 1146 1147 if (*len < sizeof(struct bcachefs_fid_with_parent) / sizeof(u32)) 1148 return FILEID_INVALID; 1149 1150 if (!S_ISDIR(inode->v.i_mode) && dir) { 1151 struct bcachefs_fid_with_parent *fid = (void *) fh; 1152 1153 fid->fid = bch2_inode_to_fid(inode); 1154 fid->dir = bch2_inode_to_fid(dir); 1155 1156 *len = sizeof(*fid) / sizeof(u32); 1157 return FILEID_BCACHEFS_WITH_PARENT; 1158 } else { 1159 struct bcachefs_fid *fid = (void *) fh; 1160 1161 *fid = bch2_inode_to_fid(inode); 1162 1163 *len = sizeof(*fid) / sizeof(u32); 1164 return FILEID_BCACHEFS_WITHOUT_PARENT; 1165 } 1166 } 1167 1168 static struct inode *bch2_nfs_get_inode(struct super_block *sb, 1169 struct bcachefs_fid fid) 1170 { 1171 struct bch_fs *c = sb->s_fs_info; 1172 struct inode *vinode = bch2_vfs_inode_get(c, (subvol_inum) { 1173 .subvol = fid.subvol, 1174 .inum = fid.inum, 1175 }); 1176 if (!IS_ERR(vinode) && vinode->i_generation != fid.gen) { 1177 iput(vinode); 1178 vinode = ERR_PTR(-ESTALE); 1179 } 1180 return vinode; 1181 } 1182 1183 static struct dentry *bch2_fh_to_dentry(struct super_block *sb, struct fid *_fid, 1184 int fh_len, int fh_type) 1185 { 1186 struct bcachefs_fid *fid = (void *) _fid; 1187 1188 if (!bcachefs_fid_valid(fh_len, fh_type)) 1189 return NULL; 1190 1191 return d_obtain_alias(bch2_nfs_get_inode(sb, *fid)); 1192 } 1193 1194 static struct dentry *bch2_fh_to_parent(struct super_block *sb, struct fid *_fid, 1195 int fh_len, int fh_type) 1196 { 1197 struct bcachefs_fid_with_parent *fid = (void *) _fid; 1198 1199 if (!bcachefs_fid_valid(fh_len, fh_type) || 1200 fh_type != FILEID_BCACHEFS_WITH_PARENT) 1201 return NULL; 1202 1203 return d_obtain_alias(bch2_nfs_get_inode(sb, fid->dir)); 1204 } 1205 1206 static struct dentry *bch2_get_parent(struct dentry *child) 1207 { 1208 struct bch_inode_info *inode = to_bch_ei(child->d_inode); 1209 struct bch_fs *c = inode->v.i_sb->s_fs_info; 1210 subvol_inum parent_inum = { 1211 .subvol = inode->ei_inode.bi_parent_subvol ?: 1212 inode->ei_subvol, 1213 .inum = inode->ei_inode.bi_dir, 1214 }; 1215 1216 if (!parent_inum.inum) 1217 return NULL; 1218 1219 return d_obtain_alias(bch2_vfs_inode_get(c, parent_inum)); 1220 } 1221 1222 static int bch2_get_name(struct dentry *parent, char *name, struct dentry *child) 1223 { 1224 struct bch_inode_info *inode = to_bch_ei(child->d_inode); 1225 struct bch_inode_info *dir = to_bch_ei(parent->d_inode); 1226 struct bch_fs *c = inode->v.i_sb->s_fs_info; 1227 struct btree_trans *trans; 1228 struct btree_iter iter1; 1229 struct btree_iter iter2; 1230 struct bkey_s_c k; 1231 struct bkey_s_c_dirent d; 1232 struct bch_inode_unpacked inode_u; 1233 subvol_inum target; 1234 u32 snapshot; 1235 struct qstr dirent_name; 1236 unsigned name_len = 0; 1237 int ret; 1238 1239 if (!S_ISDIR(dir->v.i_mode)) 1240 return -EINVAL; 1241 1242 trans = bch2_trans_get(c); 1243 1244 bch2_trans_iter_init(trans, &iter1, BTREE_ID_dirents, 1245 POS(dir->ei_inode.bi_inum, 0), 0); 1246 bch2_trans_iter_init(trans, &iter2, BTREE_ID_dirents, 1247 POS(dir->ei_inode.bi_inum, 0), 0); 1248 retry: 1249 bch2_trans_begin(trans); 1250 1251 ret = bch2_subvolume_get_snapshot(trans, dir->ei_subvol, &snapshot); 1252 if (ret) 1253 goto err; 1254 1255 bch2_btree_iter_set_snapshot(&iter1, snapshot); 1256 bch2_btree_iter_set_snapshot(&iter2, snapshot); 1257 1258 ret = bch2_inode_find_by_inum_trans(trans, inode_inum(inode), &inode_u); 1259 if (ret) 1260 goto err; 1261 1262 if (inode_u.bi_dir == dir->ei_inode.bi_inum) { 1263 bch2_btree_iter_set_pos(&iter1, POS(inode_u.bi_dir, inode_u.bi_dir_offset)); 1264 1265 k = bch2_btree_iter_peek_slot(&iter1); 1266 ret = bkey_err(k); 1267 if (ret) 1268 goto err; 1269 1270 if (k.k->type != KEY_TYPE_dirent) { 1271 ret = -BCH_ERR_ENOENT_dirent_doesnt_match_inode; 1272 goto err; 1273 } 1274 1275 d = bkey_s_c_to_dirent(k); 1276 ret = bch2_dirent_read_target(trans, inode_inum(dir), d, &target); 1277 if (ret > 0) 1278 ret = -BCH_ERR_ENOENT_dirent_doesnt_match_inode; 1279 if (ret) 1280 goto err; 1281 1282 if (target.subvol == inode->ei_subvol && 1283 target.inum == inode->ei_inode.bi_inum) 1284 goto found; 1285 } else { 1286 /* 1287 * File with multiple hardlinks and our backref is to the wrong 1288 * directory - linear search: 1289 */ 1290 for_each_btree_key_continue_norestart(iter2, 0, k, ret) { 1291 if (k.k->p.inode > dir->ei_inode.bi_inum) 1292 break; 1293 1294 if (k.k->type != KEY_TYPE_dirent) 1295 continue; 1296 1297 d = bkey_s_c_to_dirent(k); 1298 ret = bch2_dirent_read_target(trans, inode_inum(dir), d, &target); 1299 if (ret < 0) 1300 break; 1301 if (ret) 1302 continue; 1303 1304 if (target.subvol == inode->ei_subvol && 1305 target.inum == inode->ei_inode.bi_inum) 1306 goto found; 1307 } 1308 } 1309 1310 ret = -ENOENT; 1311 goto err; 1312 found: 1313 dirent_name = bch2_dirent_get_name(d); 1314 1315 name_len = min_t(unsigned, dirent_name.len, NAME_MAX); 1316 memcpy(name, dirent_name.name, name_len); 1317 name[name_len] = '\0'; 1318 err: 1319 if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) 1320 goto retry; 1321 1322 bch2_trans_iter_exit(trans, &iter1); 1323 bch2_trans_iter_exit(trans, &iter2); 1324 bch2_trans_put(trans); 1325 1326 return ret; 1327 } 1328 1329 static const struct export_operations bch_export_ops = { 1330 .encode_fh = bch2_encode_fh, 1331 .fh_to_dentry = bch2_fh_to_dentry, 1332 .fh_to_parent = bch2_fh_to_parent, 1333 .get_parent = bch2_get_parent, 1334 .get_name = bch2_get_name, 1335 }; 1336 1337 static void bch2_vfs_inode_init(struct btree_trans *trans, subvol_inum inum, 1338 struct bch_inode_info *inode, 1339 struct bch_inode_unpacked *bi, 1340 struct bch_subvolume *subvol) 1341 { 1342 bch2_inode_update_after_write(trans, inode, bi, ~0); 1343 1344 if (BCH_SUBVOLUME_SNAP(subvol)) 1345 set_bit(EI_INODE_SNAPSHOT, &inode->ei_flags); 1346 else 1347 clear_bit(EI_INODE_SNAPSHOT, &inode->ei_flags); 1348 1349 inode->v.i_blocks = bi->bi_sectors; 1350 inode->v.i_ino = bi->bi_inum; 1351 inode->v.i_rdev = bi->bi_dev; 1352 inode->v.i_generation = bi->bi_generation; 1353 inode->v.i_size = bi->bi_size; 1354 1355 inode->ei_flags = 0; 1356 inode->ei_quota_reserved = 0; 1357 inode->ei_qid = bch_qid(bi); 1358 inode->ei_subvol = inum.subvol; 1359 1360 inode->v.i_mapping->a_ops = &bch_address_space_operations; 1361 1362 switch (inode->v.i_mode & S_IFMT) { 1363 case S_IFREG: 1364 inode->v.i_op = &bch_file_inode_operations; 1365 inode->v.i_fop = &bch_file_operations; 1366 break; 1367 case S_IFDIR: 1368 inode->v.i_op = &bch_dir_inode_operations; 1369 inode->v.i_fop = &bch_dir_file_operations; 1370 break; 1371 case S_IFLNK: 1372 inode_nohighmem(&inode->v); 1373 inode->v.i_op = &bch_symlink_inode_operations; 1374 break; 1375 default: 1376 init_special_inode(&inode->v, inode->v.i_mode, inode->v.i_rdev); 1377 inode->v.i_op = &bch_special_inode_operations; 1378 break; 1379 } 1380 1381 mapping_set_large_folios(inode->v.i_mapping); 1382 } 1383 1384 static struct inode *bch2_alloc_inode(struct super_block *sb) 1385 { 1386 struct bch_inode_info *inode; 1387 1388 inode = kmem_cache_alloc(bch2_inode_cache, GFP_NOFS); 1389 if (!inode) 1390 return NULL; 1391 1392 inode_init_once(&inode->v); 1393 mutex_init(&inode->ei_update_lock); 1394 two_state_lock_init(&inode->ei_pagecache_lock); 1395 INIT_LIST_HEAD(&inode->ei_vfs_inode_list); 1396 mutex_init(&inode->ei_quota_lock); 1397 1398 return &inode->v; 1399 } 1400 1401 static void bch2_i_callback(struct rcu_head *head) 1402 { 1403 struct inode *vinode = container_of(head, struct inode, i_rcu); 1404 struct bch_inode_info *inode = to_bch_ei(vinode); 1405 1406 kmem_cache_free(bch2_inode_cache, inode); 1407 } 1408 1409 static void bch2_destroy_inode(struct inode *vinode) 1410 { 1411 call_rcu(&vinode->i_rcu, bch2_i_callback); 1412 } 1413 1414 static int inode_update_times_fn(struct btree_trans *trans, 1415 struct bch_inode_info *inode, 1416 struct bch_inode_unpacked *bi, 1417 void *p) 1418 { 1419 struct bch_fs *c = inode->v.i_sb->s_fs_info; 1420 1421 bi->bi_atime = timespec_to_bch2_time(c, inode_get_atime(&inode->v)); 1422 bi->bi_mtime = timespec_to_bch2_time(c, inode_get_mtime(&inode->v)); 1423 bi->bi_ctime = timespec_to_bch2_time(c, inode_get_ctime(&inode->v)); 1424 1425 return 0; 1426 } 1427 1428 static int bch2_vfs_write_inode(struct inode *vinode, 1429 struct writeback_control *wbc) 1430 { 1431 struct bch_fs *c = vinode->i_sb->s_fs_info; 1432 struct bch_inode_info *inode = to_bch_ei(vinode); 1433 int ret; 1434 1435 mutex_lock(&inode->ei_update_lock); 1436 ret = bch2_write_inode(c, inode, inode_update_times_fn, NULL, 1437 ATTR_ATIME|ATTR_MTIME|ATTR_CTIME); 1438 mutex_unlock(&inode->ei_update_lock); 1439 1440 return bch2_err_class(ret); 1441 } 1442 1443 static void bch2_evict_inode(struct inode *vinode) 1444 { 1445 struct bch_fs *c = vinode->i_sb->s_fs_info; 1446 struct bch_inode_info *inode = to_bch_ei(vinode); 1447 1448 truncate_inode_pages_final(&inode->v.i_data); 1449 1450 clear_inode(&inode->v); 1451 1452 BUG_ON(!is_bad_inode(&inode->v) && inode->ei_quota_reserved); 1453 1454 if (!inode->v.i_nlink && !is_bad_inode(&inode->v)) { 1455 bch2_quota_acct(c, inode->ei_qid, Q_SPC, -((s64) inode->v.i_blocks), 1456 KEY_TYPE_QUOTA_WARN); 1457 bch2_quota_acct(c, inode->ei_qid, Q_INO, -1, 1458 KEY_TYPE_QUOTA_WARN); 1459 bch2_inode_rm(c, inode_inum(inode)); 1460 } 1461 1462 mutex_lock(&c->vfs_inodes_lock); 1463 list_del_init(&inode->ei_vfs_inode_list); 1464 mutex_unlock(&c->vfs_inodes_lock); 1465 } 1466 1467 void bch2_evict_subvolume_inodes(struct bch_fs *c, snapshot_id_list *s) 1468 { 1469 struct bch_inode_info *inode, **i; 1470 DARRAY(struct bch_inode_info *) grabbed; 1471 bool clean_pass = false, this_pass_clean; 1472 1473 /* 1474 * Initially, we scan for inodes without I_DONTCACHE, then mark them to 1475 * be pruned with d_mark_dontcache(). 1476 * 1477 * Once we've had a clean pass where we didn't find any inodes without 1478 * I_DONTCACHE, we wait for them to be freed: 1479 */ 1480 1481 darray_init(&grabbed); 1482 darray_make_room(&grabbed, 1024); 1483 again: 1484 cond_resched(); 1485 this_pass_clean = true; 1486 1487 mutex_lock(&c->vfs_inodes_lock); 1488 list_for_each_entry(inode, &c->vfs_inodes_list, ei_vfs_inode_list) { 1489 if (!snapshot_list_has_id(s, inode->ei_subvol)) 1490 continue; 1491 1492 if (!(inode->v.i_state & I_DONTCACHE) && 1493 !(inode->v.i_state & I_FREEING) && 1494 igrab(&inode->v)) { 1495 this_pass_clean = false; 1496 1497 if (darray_push_gfp(&grabbed, inode, GFP_ATOMIC|__GFP_NOWARN)) { 1498 iput(&inode->v); 1499 break; 1500 } 1501 } else if (clean_pass && this_pass_clean) { 1502 wait_queue_head_t *wq = bit_waitqueue(&inode->v.i_state, __I_NEW); 1503 DEFINE_WAIT_BIT(wait, &inode->v.i_state, __I_NEW); 1504 1505 prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE); 1506 mutex_unlock(&c->vfs_inodes_lock); 1507 1508 schedule(); 1509 finish_wait(wq, &wait.wq_entry); 1510 goto again; 1511 } 1512 } 1513 mutex_unlock(&c->vfs_inodes_lock); 1514 1515 darray_for_each(grabbed, i) { 1516 inode = *i; 1517 d_mark_dontcache(&inode->v); 1518 d_prune_aliases(&inode->v); 1519 iput(&inode->v); 1520 } 1521 grabbed.nr = 0; 1522 1523 if (!clean_pass || !this_pass_clean) { 1524 clean_pass = this_pass_clean; 1525 goto again; 1526 } 1527 1528 darray_exit(&grabbed); 1529 } 1530 1531 static int bch2_statfs(struct dentry *dentry, struct kstatfs *buf) 1532 { 1533 struct super_block *sb = dentry->d_sb; 1534 struct bch_fs *c = sb->s_fs_info; 1535 struct bch_fs_usage_short usage = bch2_fs_usage_read_short(c); 1536 unsigned shift = sb->s_blocksize_bits - 9; 1537 /* 1538 * this assumes inodes take up 64 bytes, which is a decent average 1539 * number: 1540 */ 1541 u64 avail_inodes = ((usage.capacity - usage.used) << 3); 1542 u64 fsid; 1543 1544 buf->f_type = BCACHEFS_STATFS_MAGIC; 1545 buf->f_bsize = sb->s_blocksize; 1546 buf->f_blocks = usage.capacity >> shift; 1547 buf->f_bfree = usage.free >> shift; 1548 buf->f_bavail = avail_factor(usage.free) >> shift; 1549 1550 buf->f_files = usage.nr_inodes + avail_inodes; 1551 buf->f_ffree = avail_inodes; 1552 1553 fsid = le64_to_cpup((void *) c->sb.user_uuid.b) ^ 1554 le64_to_cpup((void *) c->sb.user_uuid.b + sizeof(u64)); 1555 buf->f_fsid.val[0] = fsid & 0xFFFFFFFFUL; 1556 buf->f_fsid.val[1] = (fsid >> 32) & 0xFFFFFFFFUL; 1557 buf->f_namelen = BCH_NAME_MAX; 1558 1559 return 0; 1560 } 1561 1562 static int bch2_sync_fs(struct super_block *sb, int wait) 1563 { 1564 struct bch_fs *c = sb->s_fs_info; 1565 int ret; 1566 1567 if (c->opts.journal_flush_disabled) 1568 return 0; 1569 1570 if (!wait) { 1571 bch2_journal_flush_async(&c->journal, NULL); 1572 return 0; 1573 } 1574 1575 ret = bch2_journal_flush(&c->journal); 1576 return bch2_err_class(ret); 1577 } 1578 1579 static struct bch_fs *bch2_path_to_fs(const char *path) 1580 { 1581 struct bch_fs *c; 1582 dev_t dev; 1583 int ret; 1584 1585 ret = lookup_bdev(path, &dev); 1586 if (ret) 1587 return ERR_PTR(ret); 1588 1589 c = bch2_dev_to_fs(dev); 1590 if (c) 1591 closure_put(&c->cl); 1592 return c ?: ERR_PTR(-ENOENT); 1593 } 1594 1595 static char **split_devs(const char *_dev_name, unsigned *nr) 1596 { 1597 char *dev_name = NULL, **devs = NULL, *s; 1598 size_t i = 0, nr_devs = 0; 1599 1600 dev_name = kstrdup(_dev_name, GFP_KERNEL); 1601 if (!dev_name) 1602 return NULL; 1603 1604 for (s = dev_name; s; s = strchr(s + 1, ':')) 1605 nr_devs++; 1606 1607 devs = kcalloc(nr_devs + 1, sizeof(const char *), GFP_KERNEL); 1608 if (!devs) { 1609 kfree(dev_name); 1610 return NULL; 1611 } 1612 1613 while ((s = strsep(&dev_name, ":"))) 1614 devs[i++] = s; 1615 1616 *nr = nr_devs; 1617 return devs; 1618 } 1619 1620 static int bch2_remount(struct super_block *sb, int *flags, char *data) 1621 { 1622 struct bch_fs *c = sb->s_fs_info; 1623 struct bch_opts opts = bch2_opts_empty(); 1624 int ret; 1625 1626 opt_set(opts, read_only, (*flags & SB_RDONLY) != 0); 1627 1628 ret = bch2_parse_mount_opts(c, &opts, data); 1629 if (ret) 1630 goto err; 1631 1632 if (opts.read_only != c->opts.read_only) { 1633 down_write(&c->state_lock); 1634 1635 if (opts.read_only) { 1636 bch2_fs_read_only(c); 1637 1638 sb->s_flags |= SB_RDONLY; 1639 } else { 1640 ret = bch2_fs_read_write(c); 1641 if (ret) { 1642 bch_err(c, "error going rw: %i", ret); 1643 up_write(&c->state_lock); 1644 ret = -EINVAL; 1645 goto err; 1646 } 1647 1648 sb->s_flags &= ~SB_RDONLY; 1649 } 1650 1651 c->opts.read_only = opts.read_only; 1652 1653 up_write(&c->state_lock); 1654 } 1655 1656 if (opt_defined(opts, errors)) 1657 c->opts.errors = opts.errors; 1658 err: 1659 return bch2_err_class(ret); 1660 } 1661 1662 static int bch2_show_devname(struct seq_file *seq, struct dentry *root) 1663 { 1664 struct bch_fs *c = root->d_sb->s_fs_info; 1665 struct bch_dev *ca; 1666 unsigned i; 1667 bool first = true; 1668 1669 for_each_online_member(ca, c, i) { 1670 if (!first) 1671 seq_putc(seq, ':'); 1672 first = false; 1673 seq_puts(seq, "/dev/"); 1674 seq_puts(seq, ca->name); 1675 } 1676 1677 return 0; 1678 } 1679 1680 static int bch2_show_options(struct seq_file *seq, struct dentry *root) 1681 { 1682 struct bch_fs *c = root->d_sb->s_fs_info; 1683 enum bch_opt_id i; 1684 struct printbuf buf = PRINTBUF; 1685 int ret = 0; 1686 1687 for (i = 0; i < bch2_opts_nr; i++) { 1688 const struct bch_option *opt = &bch2_opt_table[i]; 1689 u64 v = bch2_opt_get_by_id(&c->opts, i); 1690 1691 if (!(opt->flags & OPT_MOUNT)) 1692 continue; 1693 1694 if (v == bch2_opt_get_by_id(&bch2_opts_default, i)) 1695 continue; 1696 1697 printbuf_reset(&buf); 1698 bch2_opt_to_text(&buf, c, c->disk_sb.sb, opt, v, 1699 OPT_SHOW_MOUNT_STYLE); 1700 seq_putc(seq, ','); 1701 seq_puts(seq, buf.buf); 1702 } 1703 1704 if (buf.allocation_failure) 1705 ret = -ENOMEM; 1706 printbuf_exit(&buf); 1707 return ret; 1708 } 1709 1710 static void bch2_put_super(struct super_block *sb) 1711 { 1712 struct bch_fs *c = sb->s_fs_info; 1713 1714 __bch2_fs_stop(c); 1715 } 1716 1717 /* 1718 * bcachefs doesn't currently integrate intwrite freeze protection but the 1719 * internal write references serve the same purpose. Therefore reuse the 1720 * read-only transition code to perform the quiesce. The caveat is that we don't 1721 * currently have the ability to block tasks that want a write reference while 1722 * the superblock is frozen. This is fine for now, but we should either add 1723 * blocking support or find a way to integrate sb_start_intwrite() and friends. 1724 */ 1725 static int bch2_freeze(struct super_block *sb) 1726 { 1727 struct bch_fs *c = sb->s_fs_info; 1728 1729 down_write(&c->state_lock); 1730 bch2_fs_read_only(c); 1731 up_write(&c->state_lock); 1732 return 0; 1733 } 1734 1735 static int bch2_unfreeze(struct super_block *sb) 1736 { 1737 struct bch_fs *c = sb->s_fs_info; 1738 int ret; 1739 1740 down_write(&c->state_lock); 1741 ret = bch2_fs_read_write(c); 1742 up_write(&c->state_lock); 1743 return ret; 1744 } 1745 1746 static const struct super_operations bch_super_operations = { 1747 .alloc_inode = bch2_alloc_inode, 1748 .destroy_inode = bch2_destroy_inode, 1749 .write_inode = bch2_vfs_write_inode, 1750 .evict_inode = bch2_evict_inode, 1751 .sync_fs = bch2_sync_fs, 1752 .statfs = bch2_statfs, 1753 .show_devname = bch2_show_devname, 1754 .show_options = bch2_show_options, 1755 .remount_fs = bch2_remount, 1756 .put_super = bch2_put_super, 1757 .freeze_fs = bch2_freeze, 1758 .unfreeze_fs = bch2_unfreeze, 1759 }; 1760 1761 static int bch2_set_super(struct super_block *s, void *data) 1762 { 1763 s->s_fs_info = data; 1764 return 0; 1765 } 1766 1767 static int bch2_noset_super(struct super_block *s, void *data) 1768 { 1769 return -EBUSY; 1770 } 1771 1772 static int bch2_test_super(struct super_block *s, void *data) 1773 { 1774 struct bch_fs *c = s->s_fs_info; 1775 struct bch_fs **devs = data; 1776 unsigned i; 1777 1778 if (!c) 1779 return false; 1780 1781 for (i = 0; devs[i]; i++) 1782 if (c != devs[i]) 1783 return false; 1784 return true; 1785 } 1786 1787 static struct dentry *bch2_mount(struct file_system_type *fs_type, 1788 int flags, const char *dev_name, void *data) 1789 { 1790 struct bch_fs *c; 1791 struct bch_dev *ca; 1792 struct super_block *sb; 1793 struct inode *vinode; 1794 struct bch_opts opts = bch2_opts_empty(); 1795 char **devs; 1796 struct bch_fs **devs_to_fs = NULL; 1797 unsigned i, nr_devs; 1798 int ret; 1799 1800 opt_set(opts, read_only, (flags & SB_RDONLY) != 0); 1801 1802 ret = bch2_parse_mount_opts(NULL, &opts, data); 1803 if (ret) 1804 return ERR_PTR(ret); 1805 1806 if (!dev_name || strlen(dev_name) == 0) 1807 return ERR_PTR(-EINVAL); 1808 1809 devs = split_devs(dev_name, &nr_devs); 1810 if (!devs) 1811 return ERR_PTR(-ENOMEM); 1812 1813 devs_to_fs = kcalloc(nr_devs + 1, sizeof(void *), GFP_KERNEL); 1814 if (!devs_to_fs) { 1815 sb = ERR_PTR(-ENOMEM); 1816 goto got_sb; 1817 } 1818 1819 for (i = 0; i < nr_devs; i++) 1820 devs_to_fs[i] = bch2_path_to_fs(devs[i]); 1821 1822 sb = sget(fs_type, bch2_test_super, bch2_noset_super, 1823 flags|SB_NOSEC, devs_to_fs); 1824 if (!IS_ERR(sb)) 1825 goto got_sb; 1826 1827 c = bch2_fs_open(devs, nr_devs, opts); 1828 if (IS_ERR(c)) { 1829 sb = ERR_CAST(c); 1830 goto got_sb; 1831 } 1832 1833 /* Some options can't be parsed until after the fs is started: */ 1834 ret = bch2_parse_mount_opts(c, &opts, data); 1835 if (ret) { 1836 bch2_fs_stop(c); 1837 sb = ERR_PTR(ret); 1838 goto got_sb; 1839 } 1840 1841 bch2_opts_apply(&c->opts, opts); 1842 1843 sb = sget(fs_type, NULL, bch2_set_super, flags|SB_NOSEC, c); 1844 if (IS_ERR(sb)) 1845 bch2_fs_stop(c); 1846 got_sb: 1847 kfree(devs_to_fs); 1848 kfree(devs[0]); 1849 kfree(devs); 1850 1851 if (IS_ERR(sb)) { 1852 ret = PTR_ERR(sb); 1853 ret = bch2_err_class(ret); 1854 return ERR_PTR(ret); 1855 } 1856 1857 c = sb->s_fs_info; 1858 1859 if (sb->s_root) { 1860 if ((flags ^ sb->s_flags) & SB_RDONLY) { 1861 ret = -EBUSY; 1862 goto err_put_super; 1863 } 1864 goto out; 1865 } 1866 1867 sb->s_blocksize = block_bytes(c); 1868 sb->s_blocksize_bits = ilog2(block_bytes(c)); 1869 sb->s_maxbytes = MAX_LFS_FILESIZE; 1870 sb->s_op = &bch_super_operations; 1871 sb->s_export_op = &bch_export_ops; 1872 #ifdef CONFIG_BCACHEFS_QUOTA 1873 sb->s_qcop = &bch2_quotactl_operations; 1874 sb->s_quota_types = QTYPE_MASK_USR|QTYPE_MASK_GRP|QTYPE_MASK_PRJ; 1875 #endif 1876 sb->s_xattr = bch2_xattr_handlers; 1877 sb->s_magic = BCACHEFS_STATFS_MAGIC; 1878 sb->s_time_gran = c->sb.nsec_per_time_unit; 1879 sb->s_time_min = div_s64(S64_MIN, c->sb.time_units_per_sec) + 1; 1880 sb->s_time_max = div_s64(S64_MAX, c->sb.time_units_per_sec); 1881 c->vfs_sb = sb; 1882 strscpy(sb->s_id, c->name, sizeof(sb->s_id)); 1883 1884 ret = super_setup_bdi(sb); 1885 if (ret) 1886 goto err_put_super; 1887 1888 sb->s_bdi->ra_pages = VM_READAHEAD_PAGES; 1889 1890 for_each_online_member(ca, c, i) { 1891 struct block_device *bdev = ca->disk_sb.bdev; 1892 1893 /* XXX: create an anonymous device for multi device filesystems */ 1894 sb->s_bdev = bdev; 1895 sb->s_dev = bdev->bd_dev; 1896 percpu_ref_put(&ca->io_ref); 1897 break; 1898 } 1899 1900 c->dev = sb->s_dev; 1901 1902 #ifdef CONFIG_BCACHEFS_POSIX_ACL 1903 if (c->opts.acl) 1904 sb->s_flags |= SB_POSIXACL; 1905 #endif 1906 1907 sb->s_shrink.seeks = 0; 1908 1909 vinode = bch2_vfs_inode_get(c, BCACHEFS_ROOT_SUBVOL_INUM); 1910 ret = PTR_ERR_OR_ZERO(vinode); 1911 if (ret) { 1912 bch_err_msg(c, ret, "mounting: error getting root inode"); 1913 goto err_put_super; 1914 } 1915 1916 sb->s_root = d_make_root(vinode); 1917 if (!sb->s_root) { 1918 bch_err(c, "error mounting: error allocating root dentry"); 1919 ret = -ENOMEM; 1920 goto err_put_super; 1921 } 1922 1923 sb->s_flags |= SB_ACTIVE; 1924 out: 1925 return dget(sb->s_root); 1926 1927 err_put_super: 1928 sb->s_fs_info = NULL; 1929 c->vfs_sb = NULL; 1930 deactivate_locked_super(sb); 1931 bch2_fs_stop(c); 1932 return ERR_PTR(bch2_err_class(ret)); 1933 } 1934 1935 static void bch2_kill_sb(struct super_block *sb) 1936 { 1937 struct bch_fs *c = sb->s_fs_info; 1938 1939 if (c) 1940 c->vfs_sb = NULL; 1941 generic_shutdown_super(sb); 1942 if (c) 1943 bch2_fs_free(c); 1944 } 1945 1946 static struct file_system_type bcache_fs_type = { 1947 .owner = THIS_MODULE, 1948 .name = "bcachefs", 1949 .mount = bch2_mount, 1950 .kill_sb = bch2_kill_sb, 1951 .fs_flags = FS_REQUIRES_DEV, 1952 }; 1953 1954 MODULE_ALIAS_FS("bcachefs"); 1955 1956 void bch2_vfs_exit(void) 1957 { 1958 unregister_filesystem(&bcache_fs_type); 1959 kmem_cache_destroy(bch2_inode_cache); 1960 } 1961 1962 int __init bch2_vfs_init(void) 1963 { 1964 int ret = -ENOMEM; 1965 1966 bch2_inode_cache = KMEM_CACHE(bch_inode_info, SLAB_RECLAIM_ACCOUNT); 1967 if (!bch2_inode_cache) 1968 goto err; 1969 1970 ret = register_filesystem(&bcache_fs_type); 1971 if (ret) 1972 goto err; 1973 1974 return 0; 1975 err: 1976 bch2_vfs_exit(); 1977 return ret; 1978 } 1979 1980 #endif /* NO_BCACHEFS_FS */ 1981