1 // SPDX-License-Identifier: GPL-2.0 2 #ifndef NO_BCACHEFS_FS 3 4 #include "bcachefs.h" 5 #include "acl.h" 6 #include "bkey_buf.h" 7 #include "btree_update.h" 8 #include "buckets.h" 9 #include "chardev.h" 10 #include "dirent.h" 11 #include "errcode.h" 12 #include "extents.h" 13 #include "fs.h" 14 #include "fs-common.h" 15 #include "fs-io.h" 16 #include "fs-ioctl.h" 17 #include "fs-io-buffered.h" 18 #include "fs-io-direct.h" 19 #include "fs-io-pagecache.h" 20 #include "fsck.h" 21 #include "inode.h" 22 #include "io_read.h" 23 #include "journal.h" 24 #include "keylist.h" 25 #include "quota.h" 26 #include "snapshot.h" 27 #include "super.h" 28 #include "xattr.h" 29 #include "trace.h" 30 31 #include <linux/aio.h> 32 #include <linux/backing-dev.h> 33 #include <linux/exportfs.h> 34 #include <linux/fiemap.h> 35 #include <linux/fs_context.h> 36 #include <linux/module.h> 37 #include <linux/pagemap.h> 38 #include <linux/posix_acl.h> 39 #include <linux/random.h> 40 #include <linux/seq_file.h> 41 #include <linux/statfs.h> 42 #include <linux/string.h> 43 #include <linux/xattr.h> 44 45 static struct kmem_cache *bch2_inode_cache; 46 47 static void bch2_vfs_inode_init(struct btree_trans *, subvol_inum, 48 struct bch_inode_info *, 49 struct bch_inode_unpacked *, 50 struct bch_subvolume *); 51 52 void bch2_inode_update_after_write(struct btree_trans *trans, 53 struct bch_inode_info *inode, 54 struct bch_inode_unpacked *bi, 55 unsigned fields) 56 { 57 struct bch_fs *c = trans->c; 58 59 BUG_ON(bi->bi_inum != inode->v.i_ino); 60 61 bch2_assert_pos_locked(trans, BTREE_ID_inodes, POS(0, bi->bi_inum)); 62 63 set_nlink(&inode->v, bch2_inode_nlink_get(bi)); 64 i_uid_write(&inode->v, bi->bi_uid); 65 i_gid_write(&inode->v, bi->bi_gid); 66 inode->v.i_mode = bi->bi_mode; 67 68 if (fields & ATTR_ATIME) 69 inode_set_atime_to_ts(&inode->v, bch2_time_to_timespec(c, bi->bi_atime)); 70 if (fields & ATTR_MTIME) 71 inode_set_mtime_to_ts(&inode->v, bch2_time_to_timespec(c, bi->bi_mtime)); 72 if (fields & ATTR_CTIME) 73 inode_set_ctime_to_ts(&inode->v, bch2_time_to_timespec(c, bi->bi_ctime)); 74 75 inode->ei_inode = *bi; 76 77 bch2_inode_flags_to_vfs(inode); 78 } 79 80 int __must_check bch2_write_inode(struct bch_fs *c, 81 struct bch_inode_info *inode, 82 inode_set_fn set, 83 void *p, unsigned fields) 84 { 85 struct btree_trans *trans = bch2_trans_get(c); 86 struct btree_iter iter = { NULL }; 87 struct bch_inode_unpacked inode_u; 88 int ret; 89 retry: 90 bch2_trans_begin(trans); 91 92 ret = bch2_inode_peek(trans, &iter, &inode_u, inode_inum(inode), 93 BTREE_ITER_intent) ?: 94 (set ? set(trans, inode, &inode_u, p) : 0) ?: 95 bch2_inode_write(trans, &iter, &inode_u) ?: 96 bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc); 97 98 /* 99 * the btree node lock protects inode->ei_inode, not ei_update_lock; 100 * this is important for inode updates via bchfs_write_index_update 101 */ 102 if (!ret) 103 bch2_inode_update_after_write(trans, inode, &inode_u, fields); 104 105 bch2_trans_iter_exit(trans, &iter); 106 107 if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) 108 goto retry; 109 110 bch2_fs_fatal_err_on(bch2_err_matches(ret, ENOENT), c, 111 "%s: inode %llu:%llu not found when updating", 112 bch2_err_str(ret), 113 inode_inum(inode).subvol, 114 inode_inum(inode).inum); 115 116 bch2_trans_put(trans); 117 return ret < 0 ? ret : 0; 118 } 119 120 int bch2_fs_quota_transfer(struct bch_fs *c, 121 struct bch_inode_info *inode, 122 struct bch_qid new_qid, 123 unsigned qtypes, 124 enum quota_acct_mode mode) 125 { 126 unsigned i; 127 int ret; 128 129 qtypes &= enabled_qtypes(c); 130 131 for (i = 0; i < QTYP_NR; i++) 132 if (new_qid.q[i] == inode->ei_qid.q[i]) 133 qtypes &= ~(1U << i); 134 135 if (!qtypes) 136 return 0; 137 138 mutex_lock(&inode->ei_quota_lock); 139 140 ret = bch2_quota_transfer(c, qtypes, new_qid, 141 inode->ei_qid, 142 inode->v.i_blocks + 143 inode->ei_quota_reserved, 144 mode); 145 if (!ret) 146 for (i = 0; i < QTYP_NR; i++) 147 if (qtypes & (1 << i)) 148 inode->ei_qid.q[i] = new_qid.q[i]; 149 150 mutex_unlock(&inode->ei_quota_lock); 151 152 return ret; 153 } 154 155 static bool subvol_inum_eq(subvol_inum a, subvol_inum b) 156 { 157 return a.subvol == b.subvol && a.inum == b.inum; 158 } 159 160 static int bch2_vfs_inode_cmp_fn(struct rhashtable_compare_arg *arg, 161 const void *obj) 162 { 163 const struct bch_inode_info *inode = obj; 164 const subvol_inum *v = arg->key; 165 166 return !subvol_inum_eq(inode->ei_inum, *v); 167 } 168 169 static const struct rhashtable_params bch2_vfs_inodes_params = { 170 .head_offset = offsetof(struct bch_inode_info, hash), 171 .key_offset = offsetof(struct bch_inode_info, ei_inum), 172 .key_len = sizeof(subvol_inum), 173 .obj_cmpfn = bch2_vfs_inode_cmp_fn, 174 .automatic_shrinking = true, 175 }; 176 177 struct bch_inode_info *__bch2_inode_hash_find(struct bch_fs *c, subvol_inum inum) 178 { 179 return rhashtable_lookup_fast(&c->vfs_inodes_table, &inum, bch2_vfs_inodes_params); 180 } 181 182 static void __wait_on_freeing_inode(struct bch_fs *c, 183 struct bch_inode_info *inode, 184 subvol_inum inum) 185 { 186 wait_queue_head_t *wq; 187 DEFINE_WAIT_BIT(wait, &inode->v.i_state, __I_NEW); 188 wq = inode_bit_waitqueue(&wait, &inode->v, __I_NEW); 189 prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE); 190 spin_unlock(&inode->v.i_lock); 191 192 if (__bch2_inode_hash_find(c, inum) == inode) 193 schedule_timeout(HZ * 10); 194 finish_wait(wq, &wait.wq_entry); 195 } 196 197 static struct bch_inode_info *bch2_inode_hash_find(struct bch_fs *c, struct btree_trans *trans, 198 subvol_inum inum) 199 { 200 struct bch_inode_info *inode; 201 repeat: 202 inode = __bch2_inode_hash_find(c, inum); 203 if (inode) { 204 spin_lock(&inode->v.i_lock); 205 if (!test_bit(EI_INODE_HASHED, &inode->ei_flags)) { 206 spin_unlock(&inode->v.i_lock); 207 return NULL; 208 } 209 if ((inode->v.i_state & (I_FREEING|I_WILL_FREE))) { 210 if (!trans) { 211 __wait_on_freeing_inode(c, inode, inum); 212 } else { 213 bch2_trans_unlock(trans); 214 __wait_on_freeing_inode(c, inode, inum); 215 int ret = bch2_trans_relock(trans); 216 if (ret) 217 return ERR_PTR(ret); 218 } 219 goto repeat; 220 } 221 __iget(&inode->v); 222 spin_unlock(&inode->v.i_lock); 223 } 224 225 return inode; 226 } 227 228 static void bch2_inode_hash_remove(struct bch_fs *c, struct bch_inode_info *inode) 229 { 230 spin_lock(&inode->v.i_lock); 231 bool remove = test_and_clear_bit(EI_INODE_HASHED, &inode->ei_flags); 232 spin_unlock(&inode->v.i_lock); 233 234 if (remove) { 235 int ret = rhashtable_remove_fast(&c->vfs_inodes_table, 236 &inode->hash, bch2_vfs_inodes_params); 237 BUG_ON(ret); 238 inode->v.i_hash.pprev = NULL; 239 /* 240 * This pairs with the bch2_inode_hash_find() -> 241 * __wait_on_freeing_inode() path 242 */ 243 inode_wake_up_bit(&inode->v, __I_NEW); 244 } 245 } 246 247 static struct bch_inode_info *bch2_inode_hash_insert(struct bch_fs *c, 248 struct btree_trans *trans, 249 struct bch_inode_info *inode) 250 { 251 struct bch_inode_info *old = inode; 252 253 set_bit(EI_INODE_HASHED, &inode->ei_flags); 254 retry: 255 if (unlikely(rhashtable_lookup_insert_fast(&c->vfs_inodes_table, 256 &inode->hash, 257 bch2_vfs_inodes_params))) { 258 old = bch2_inode_hash_find(c, trans, inode->ei_inum); 259 if (!old) 260 goto retry; 261 262 clear_bit(EI_INODE_HASHED, &inode->ei_flags); 263 264 /* 265 * bcachefs doesn't use I_NEW; we have no use for it since we 266 * only insert fully created inodes in the inode hash table. But 267 * discard_new_inode() expects it to be set... 268 */ 269 inode->v.i_state |= I_NEW; 270 /* 271 * We don't want bch2_evict_inode() to delete the inode on disk, 272 * we just raced and had another inode in cache. Normally new 273 * inodes don't have nlink == 0 - except tmpfiles do... 274 */ 275 set_nlink(&inode->v, 1); 276 discard_new_inode(&inode->v); 277 return old; 278 } else { 279 inode_fake_hash(&inode->v); 280 281 inode_sb_list_add(&inode->v); 282 283 mutex_lock(&c->vfs_inodes_lock); 284 list_add(&inode->ei_vfs_inode_list, &c->vfs_inodes_list); 285 mutex_unlock(&c->vfs_inodes_lock); 286 return inode; 287 } 288 } 289 290 #define memalloc_flags_do(_flags, _do) \ 291 ({ \ 292 unsigned _saved_flags = memalloc_flags_save(_flags); \ 293 typeof(_do) _ret = _do; \ 294 memalloc_noreclaim_restore(_saved_flags); \ 295 _ret; \ 296 }) 297 298 static struct inode *bch2_alloc_inode(struct super_block *sb) 299 { 300 BUG(); 301 } 302 303 static struct bch_inode_info *__bch2_new_inode(struct bch_fs *c) 304 { 305 struct bch_inode_info *inode = alloc_inode_sb(c->vfs_sb, 306 bch2_inode_cache, GFP_NOFS); 307 if (!inode) 308 return NULL; 309 310 inode_init_once(&inode->v); 311 mutex_init(&inode->ei_update_lock); 312 two_state_lock_init(&inode->ei_pagecache_lock); 313 INIT_LIST_HEAD(&inode->ei_vfs_inode_list); 314 inode->ei_flags = 0; 315 mutex_init(&inode->ei_quota_lock); 316 memset(&inode->ei_devs_need_flush, 0, sizeof(inode->ei_devs_need_flush)); 317 318 if (unlikely(inode_init_always(c->vfs_sb, &inode->v))) { 319 kmem_cache_free(bch2_inode_cache, inode); 320 return NULL; 321 } 322 323 return inode; 324 } 325 326 /* 327 * Allocate a new inode, dropping/retaking btree locks if necessary: 328 */ 329 static struct bch_inode_info *bch2_new_inode(struct btree_trans *trans) 330 { 331 struct bch_inode_info *inode = 332 memalloc_flags_do(PF_MEMALLOC_NORECLAIM|PF_MEMALLOC_NOWARN, 333 __bch2_new_inode(trans->c)); 334 335 if (unlikely(!inode)) { 336 int ret = drop_locks_do(trans, (inode = __bch2_new_inode(trans->c)) ? 0 : -ENOMEM); 337 if (ret && inode) { 338 __destroy_inode(&inode->v); 339 kmem_cache_free(bch2_inode_cache, inode); 340 } 341 if (ret) 342 return ERR_PTR(ret); 343 } 344 345 return inode; 346 } 347 348 static struct bch_inode_info *bch2_inode_hash_init_insert(struct btree_trans *trans, 349 subvol_inum inum, 350 struct bch_inode_unpacked *bi, 351 struct bch_subvolume *subvol) 352 { 353 struct bch_inode_info *inode = bch2_new_inode(trans); 354 if (IS_ERR(inode)) 355 return inode; 356 357 bch2_vfs_inode_init(trans, inum, inode, bi, subvol); 358 359 return bch2_inode_hash_insert(trans->c, trans, inode); 360 361 } 362 363 struct inode *bch2_vfs_inode_get(struct bch_fs *c, subvol_inum inum) 364 { 365 struct bch_inode_info *inode = bch2_inode_hash_find(c, NULL, inum); 366 if (inode) 367 return &inode->v; 368 369 struct btree_trans *trans = bch2_trans_get(c); 370 371 struct bch_inode_unpacked inode_u; 372 struct bch_subvolume subvol; 373 int ret = lockrestart_do(trans, 374 bch2_subvolume_get(trans, inum.subvol, true, 0, &subvol) ?: 375 bch2_inode_find_by_inum_trans(trans, inum, &inode_u)) ?: 376 PTR_ERR_OR_ZERO(inode = bch2_inode_hash_init_insert(trans, inum, &inode_u, &subvol)); 377 bch2_trans_put(trans); 378 379 return ret ? ERR_PTR(ret) : &inode->v; 380 } 381 382 struct bch_inode_info * 383 __bch2_create(struct mnt_idmap *idmap, 384 struct bch_inode_info *dir, struct dentry *dentry, 385 umode_t mode, dev_t rdev, subvol_inum snapshot_src, 386 unsigned flags) 387 { 388 struct bch_fs *c = dir->v.i_sb->s_fs_info; 389 struct btree_trans *trans; 390 struct bch_inode_unpacked dir_u; 391 struct bch_inode_info *inode; 392 struct bch_inode_unpacked inode_u; 393 struct posix_acl *default_acl = NULL, *acl = NULL; 394 subvol_inum inum; 395 struct bch_subvolume subvol; 396 u64 journal_seq = 0; 397 kuid_t kuid; 398 kgid_t kgid; 399 int ret; 400 401 /* 402 * preallocate acls + vfs inode before btree transaction, so that 403 * nothing can fail after the transaction succeeds: 404 */ 405 #ifdef CONFIG_BCACHEFS_POSIX_ACL 406 ret = posix_acl_create(&dir->v, &mode, &default_acl, &acl); 407 if (ret) 408 return ERR_PTR(ret); 409 #endif 410 inode = __bch2_new_inode(c); 411 if (unlikely(!inode)) { 412 inode = ERR_PTR(-ENOMEM); 413 goto err; 414 } 415 416 bch2_inode_init_early(c, &inode_u); 417 418 if (!(flags & BCH_CREATE_TMPFILE)) 419 mutex_lock(&dir->ei_update_lock); 420 421 trans = bch2_trans_get(c); 422 retry: 423 bch2_trans_begin(trans); 424 425 kuid = mapped_fsuid(idmap, i_user_ns(&dir->v)); 426 kgid = mapped_fsgid(idmap, i_user_ns(&dir->v)); 427 ret = bch2_subvol_is_ro_trans(trans, dir->ei_inum.subvol) ?: 428 bch2_create_trans(trans, 429 inode_inum(dir), &dir_u, &inode_u, 430 !(flags & BCH_CREATE_TMPFILE) 431 ? &dentry->d_name : NULL, 432 from_kuid(i_user_ns(&dir->v), kuid), 433 from_kgid(i_user_ns(&dir->v), kgid), 434 mode, rdev, 435 default_acl, acl, snapshot_src, flags) ?: 436 bch2_quota_acct(c, bch_qid(&inode_u), Q_INO, 1, 437 KEY_TYPE_QUOTA_PREALLOC); 438 if (unlikely(ret)) 439 goto err_before_quota; 440 441 inum.subvol = inode_u.bi_subvol ?: dir->ei_inum.subvol; 442 inum.inum = inode_u.bi_inum; 443 444 ret = bch2_subvolume_get(trans, inum.subvol, true, 445 BTREE_ITER_with_updates, &subvol) ?: 446 bch2_trans_commit(trans, NULL, &journal_seq, 0); 447 if (unlikely(ret)) { 448 bch2_quota_acct(c, bch_qid(&inode_u), Q_INO, -1, 449 KEY_TYPE_QUOTA_WARN); 450 err_before_quota: 451 if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) 452 goto retry; 453 goto err_trans; 454 } 455 456 if (!(flags & BCH_CREATE_TMPFILE)) { 457 bch2_inode_update_after_write(trans, dir, &dir_u, 458 ATTR_MTIME|ATTR_CTIME); 459 mutex_unlock(&dir->ei_update_lock); 460 } 461 462 bch2_vfs_inode_init(trans, inum, inode, &inode_u, &subvol); 463 464 set_cached_acl(&inode->v, ACL_TYPE_ACCESS, acl); 465 set_cached_acl(&inode->v, ACL_TYPE_DEFAULT, default_acl); 466 467 /* 468 * we must insert the new inode into the inode cache before calling 469 * bch2_trans_exit() and dropping locks, else we could race with another 470 * thread pulling the inode in and modifying it: 471 * 472 * also, calling bch2_inode_hash_insert() without passing in the 473 * transaction object is sketchy - if we could ever end up in 474 * __wait_on_freeing_inode(), we'd risk deadlock. 475 * 476 * But that shouldn't be possible, since we still have the inode locked 477 * that we just created, and we _really_ can't take a transaction 478 * restart here. 479 */ 480 inode = bch2_inode_hash_insert(c, NULL, inode); 481 bch2_trans_put(trans); 482 err: 483 posix_acl_release(default_acl); 484 posix_acl_release(acl); 485 return inode; 486 err_trans: 487 if (!(flags & BCH_CREATE_TMPFILE)) 488 mutex_unlock(&dir->ei_update_lock); 489 490 bch2_trans_put(trans); 491 make_bad_inode(&inode->v); 492 iput(&inode->v); 493 inode = ERR_PTR(ret); 494 goto err; 495 } 496 497 /* methods */ 498 499 static struct bch_inode_info *bch2_lookup_trans(struct btree_trans *trans, 500 subvol_inum dir, struct bch_hash_info *dir_hash_info, 501 const struct qstr *name) 502 { 503 struct bch_fs *c = trans->c; 504 struct btree_iter dirent_iter = {}; 505 subvol_inum inum = {}; 506 struct printbuf buf = PRINTBUF; 507 508 struct bkey_s_c k = bch2_hash_lookup(trans, &dirent_iter, bch2_dirent_hash_desc, 509 dir_hash_info, dir, name, 0); 510 int ret = bkey_err(k); 511 if (ret) 512 return ERR_PTR(ret); 513 514 ret = bch2_dirent_read_target(trans, dir, bkey_s_c_to_dirent(k), &inum); 515 if (ret > 0) 516 ret = -ENOENT; 517 if (ret) 518 goto err; 519 520 struct bch_inode_info *inode = bch2_inode_hash_find(c, trans, inum); 521 if (inode) 522 goto out; 523 524 struct bch_subvolume subvol; 525 struct bch_inode_unpacked inode_u; 526 ret = bch2_subvolume_get(trans, inum.subvol, true, 0, &subvol) ?: 527 bch2_inode_find_by_inum_nowarn_trans(trans, inum, &inode_u) ?: 528 PTR_ERR_OR_ZERO(inode = bch2_inode_hash_init_insert(trans, inum, &inode_u, &subvol)); 529 530 bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), 531 c, "dirent to missing inode:\n %s", 532 (bch2_bkey_val_to_text(&buf, c, k), buf.buf)); 533 if (ret) 534 goto err; 535 536 /* regular files may have hardlinks: */ 537 if (bch2_fs_inconsistent_on(bch2_inode_should_have_bp(&inode_u) && 538 !bkey_eq(k.k->p, POS(inode_u.bi_dir, inode_u.bi_dir_offset)), 539 c, 540 "dirent points to inode that does not point back:\n %s", 541 (bch2_bkey_val_to_text(&buf, c, k), 542 prt_printf(&buf, "\n "), 543 bch2_inode_unpacked_to_text(&buf, &inode_u), 544 buf.buf))) { 545 ret = -ENOENT; 546 goto err; 547 } 548 out: 549 bch2_trans_iter_exit(trans, &dirent_iter); 550 printbuf_exit(&buf); 551 return inode; 552 err: 553 inode = ERR_PTR(ret); 554 goto out; 555 } 556 557 static struct dentry *bch2_lookup(struct inode *vdir, struct dentry *dentry, 558 unsigned int flags) 559 { 560 struct bch_fs *c = vdir->i_sb->s_fs_info; 561 struct bch_inode_info *dir = to_bch_ei(vdir); 562 struct bch_hash_info hash = bch2_hash_info_init(c, &dir->ei_inode); 563 564 struct bch_inode_info *inode; 565 bch2_trans_do(c, NULL, NULL, 0, 566 PTR_ERR_OR_ZERO(inode = bch2_lookup_trans(trans, inode_inum(dir), 567 &hash, &dentry->d_name))); 568 if (IS_ERR(inode)) 569 inode = NULL; 570 571 return d_splice_alias(&inode->v, dentry); 572 } 573 574 static int bch2_mknod(struct mnt_idmap *idmap, 575 struct inode *vdir, struct dentry *dentry, 576 umode_t mode, dev_t rdev) 577 { 578 struct bch_inode_info *inode = 579 __bch2_create(idmap, to_bch_ei(vdir), dentry, mode, rdev, 580 (subvol_inum) { 0 }, 0); 581 582 if (IS_ERR(inode)) 583 return bch2_err_class(PTR_ERR(inode)); 584 585 d_instantiate(dentry, &inode->v); 586 return 0; 587 } 588 589 static int bch2_create(struct mnt_idmap *idmap, 590 struct inode *vdir, struct dentry *dentry, 591 umode_t mode, bool excl) 592 { 593 return bch2_mknod(idmap, vdir, dentry, mode|S_IFREG, 0); 594 } 595 596 static int __bch2_link(struct bch_fs *c, 597 struct bch_inode_info *inode, 598 struct bch_inode_info *dir, 599 struct dentry *dentry) 600 { 601 struct bch_inode_unpacked dir_u, inode_u; 602 int ret; 603 604 mutex_lock(&inode->ei_update_lock); 605 struct btree_trans *trans = bch2_trans_get(c); 606 607 ret = commit_do(trans, NULL, NULL, 0, 608 bch2_link_trans(trans, 609 inode_inum(dir), &dir_u, 610 inode_inum(inode), &inode_u, 611 &dentry->d_name)); 612 613 if (likely(!ret)) { 614 bch2_inode_update_after_write(trans, dir, &dir_u, 615 ATTR_MTIME|ATTR_CTIME); 616 bch2_inode_update_after_write(trans, inode, &inode_u, ATTR_CTIME); 617 } 618 619 bch2_trans_put(trans); 620 mutex_unlock(&inode->ei_update_lock); 621 return ret; 622 } 623 624 static int bch2_link(struct dentry *old_dentry, struct inode *vdir, 625 struct dentry *dentry) 626 { 627 struct bch_fs *c = vdir->i_sb->s_fs_info; 628 struct bch_inode_info *dir = to_bch_ei(vdir); 629 struct bch_inode_info *inode = to_bch_ei(old_dentry->d_inode); 630 int ret; 631 632 lockdep_assert_held(&inode->v.i_rwsem); 633 634 ret = bch2_subvol_is_ro(c, dir->ei_inum.subvol) ?: 635 bch2_subvol_is_ro(c, inode->ei_inum.subvol) ?: 636 __bch2_link(c, inode, dir, dentry); 637 if (unlikely(ret)) 638 return bch2_err_class(ret); 639 640 ihold(&inode->v); 641 d_instantiate(dentry, &inode->v); 642 return 0; 643 } 644 645 int __bch2_unlink(struct inode *vdir, struct dentry *dentry, 646 bool deleting_snapshot) 647 { 648 struct bch_fs *c = vdir->i_sb->s_fs_info; 649 struct bch_inode_info *dir = to_bch_ei(vdir); 650 struct bch_inode_info *inode = to_bch_ei(dentry->d_inode); 651 struct bch_inode_unpacked dir_u, inode_u; 652 int ret; 653 654 bch2_lock_inodes(INODE_UPDATE_LOCK, dir, inode); 655 656 struct btree_trans *trans = bch2_trans_get(c); 657 658 ret = commit_do(trans, NULL, NULL, 659 BCH_TRANS_COMMIT_no_enospc, 660 bch2_unlink_trans(trans, 661 inode_inum(dir), &dir_u, 662 &inode_u, &dentry->d_name, 663 deleting_snapshot)); 664 if (unlikely(ret)) 665 goto err; 666 667 bch2_inode_update_after_write(trans, dir, &dir_u, 668 ATTR_MTIME|ATTR_CTIME); 669 bch2_inode_update_after_write(trans, inode, &inode_u, 670 ATTR_MTIME); 671 672 if (inode_u.bi_subvol) { 673 /* 674 * Subvolume deletion is asynchronous, but we still want to tell 675 * the VFS that it's been deleted here: 676 */ 677 set_nlink(&inode->v, 0); 678 } 679 err: 680 bch2_trans_put(trans); 681 bch2_unlock_inodes(INODE_UPDATE_LOCK, dir, inode); 682 683 return ret; 684 } 685 686 static int bch2_unlink(struct inode *vdir, struct dentry *dentry) 687 { 688 struct bch_inode_info *dir= to_bch_ei(vdir); 689 struct bch_fs *c = dir->v.i_sb->s_fs_info; 690 691 int ret = bch2_subvol_is_ro(c, dir->ei_inum.subvol) ?: 692 __bch2_unlink(vdir, dentry, false); 693 return bch2_err_class(ret); 694 } 695 696 static int bch2_symlink(struct mnt_idmap *idmap, 697 struct inode *vdir, struct dentry *dentry, 698 const char *symname) 699 { 700 struct bch_fs *c = vdir->i_sb->s_fs_info; 701 struct bch_inode_info *dir = to_bch_ei(vdir), *inode; 702 int ret; 703 704 inode = __bch2_create(idmap, dir, dentry, S_IFLNK|S_IRWXUGO, 0, 705 (subvol_inum) { 0 }, BCH_CREATE_TMPFILE); 706 if (IS_ERR(inode)) 707 return bch2_err_class(PTR_ERR(inode)); 708 709 inode_lock(&inode->v); 710 ret = page_symlink(&inode->v, symname, strlen(symname) + 1); 711 inode_unlock(&inode->v); 712 713 if (unlikely(ret)) 714 goto err; 715 716 ret = filemap_write_and_wait_range(inode->v.i_mapping, 0, LLONG_MAX); 717 if (unlikely(ret)) 718 goto err; 719 720 ret = __bch2_link(c, inode, dir, dentry); 721 if (unlikely(ret)) 722 goto err; 723 724 d_instantiate(dentry, &inode->v); 725 return 0; 726 err: 727 iput(&inode->v); 728 return bch2_err_class(ret); 729 } 730 731 static int bch2_mkdir(struct mnt_idmap *idmap, 732 struct inode *vdir, struct dentry *dentry, umode_t mode) 733 { 734 return bch2_mknod(idmap, vdir, dentry, mode|S_IFDIR, 0); 735 } 736 737 static int bch2_rename2(struct mnt_idmap *idmap, 738 struct inode *src_vdir, struct dentry *src_dentry, 739 struct inode *dst_vdir, struct dentry *dst_dentry, 740 unsigned flags) 741 { 742 struct bch_fs *c = src_vdir->i_sb->s_fs_info; 743 struct bch_inode_info *src_dir = to_bch_ei(src_vdir); 744 struct bch_inode_info *dst_dir = to_bch_ei(dst_vdir); 745 struct bch_inode_info *src_inode = to_bch_ei(src_dentry->d_inode); 746 struct bch_inode_info *dst_inode = to_bch_ei(dst_dentry->d_inode); 747 struct bch_inode_unpacked dst_dir_u, src_dir_u; 748 struct bch_inode_unpacked src_inode_u, dst_inode_u, *whiteout_inode_u; 749 struct btree_trans *trans; 750 enum bch_rename_mode mode = flags & RENAME_EXCHANGE 751 ? BCH_RENAME_EXCHANGE 752 : dst_dentry->d_inode 753 ? BCH_RENAME_OVERWRITE : BCH_RENAME; 754 bool whiteout = !!(flags & RENAME_WHITEOUT); 755 int ret; 756 757 if (flags & ~(RENAME_NOREPLACE|RENAME_EXCHANGE|RENAME_WHITEOUT)) 758 return -EINVAL; 759 760 if (mode == BCH_RENAME_OVERWRITE) { 761 ret = filemap_write_and_wait_range(src_inode->v.i_mapping, 762 0, LLONG_MAX); 763 if (ret) 764 return ret; 765 } 766 767 bch2_lock_inodes(INODE_UPDATE_LOCK, 768 src_dir, 769 dst_dir, 770 src_inode, 771 dst_inode); 772 773 trans = bch2_trans_get(c); 774 775 ret = bch2_subvol_is_ro_trans(trans, src_dir->ei_inum.subvol) ?: 776 bch2_subvol_is_ro_trans(trans, dst_dir->ei_inum.subvol); 777 if (ret) 778 goto err; 779 780 if (inode_attr_changing(dst_dir, src_inode, Inode_opt_project)) { 781 ret = bch2_fs_quota_transfer(c, src_inode, 782 dst_dir->ei_qid, 783 1 << QTYP_PRJ, 784 KEY_TYPE_QUOTA_PREALLOC); 785 if (ret) 786 goto err; 787 } 788 789 if (mode == BCH_RENAME_EXCHANGE && 790 inode_attr_changing(src_dir, dst_inode, Inode_opt_project)) { 791 ret = bch2_fs_quota_transfer(c, dst_inode, 792 src_dir->ei_qid, 793 1 << QTYP_PRJ, 794 KEY_TYPE_QUOTA_PREALLOC); 795 if (ret) 796 goto err; 797 } 798 retry: 799 bch2_trans_begin(trans); 800 801 ret = bch2_rename_trans(trans, 802 inode_inum(src_dir), &src_dir_u, 803 inode_inum(dst_dir), &dst_dir_u, 804 &src_inode_u, 805 &dst_inode_u, 806 &src_dentry->d_name, 807 &dst_dentry->d_name, 808 mode); 809 if (unlikely(ret)) 810 goto err_tx_restart; 811 812 if (whiteout) { 813 whiteout_inode_u = bch2_trans_kmalloc_nomemzero(trans, sizeof(*whiteout_inode_u)); 814 ret = PTR_ERR_OR_ZERO(whiteout_inode_u); 815 if (unlikely(ret)) 816 goto err_tx_restart; 817 bch2_inode_init_early(c, whiteout_inode_u); 818 819 ret = bch2_create_trans(trans, 820 inode_inum(src_dir), &src_dir_u, 821 whiteout_inode_u, 822 &src_dentry->d_name, 823 from_kuid(i_user_ns(&src_dir->v), current_fsuid()), 824 from_kgid(i_user_ns(&src_dir->v), current_fsgid()), 825 S_IFCHR|WHITEOUT_MODE, 0, 826 NULL, NULL, (subvol_inum) { 0 }, 0) ?: 827 bch2_quota_acct(c, bch_qid(whiteout_inode_u), Q_INO, 1, 828 KEY_TYPE_QUOTA_PREALLOC); 829 if (unlikely(ret)) 830 goto err_tx_restart; 831 } 832 833 ret = bch2_trans_commit(trans, NULL, NULL, 0); 834 if (unlikely(ret)) { 835 err_tx_restart: 836 if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) 837 goto retry; 838 goto err; 839 } 840 841 BUG_ON(src_inode->v.i_ino != src_inode_u.bi_inum); 842 BUG_ON(dst_inode && 843 dst_inode->v.i_ino != dst_inode_u.bi_inum); 844 845 bch2_inode_update_after_write(trans, src_dir, &src_dir_u, 846 ATTR_MTIME|ATTR_CTIME); 847 848 if (src_dir != dst_dir) 849 bch2_inode_update_after_write(trans, dst_dir, &dst_dir_u, 850 ATTR_MTIME|ATTR_CTIME); 851 852 bch2_inode_update_after_write(trans, src_inode, &src_inode_u, 853 ATTR_CTIME); 854 855 if (dst_inode) 856 bch2_inode_update_after_write(trans, dst_inode, &dst_inode_u, 857 ATTR_CTIME); 858 err: 859 bch2_trans_put(trans); 860 861 bch2_fs_quota_transfer(c, src_inode, 862 bch_qid(&src_inode->ei_inode), 863 1 << QTYP_PRJ, 864 KEY_TYPE_QUOTA_NOCHECK); 865 if (dst_inode) 866 bch2_fs_quota_transfer(c, dst_inode, 867 bch_qid(&dst_inode->ei_inode), 868 1 << QTYP_PRJ, 869 KEY_TYPE_QUOTA_NOCHECK); 870 871 bch2_unlock_inodes(INODE_UPDATE_LOCK, 872 src_dir, 873 dst_dir, 874 src_inode, 875 dst_inode); 876 877 return bch2_err_class(ret); 878 } 879 880 static void bch2_setattr_copy(struct mnt_idmap *idmap, 881 struct bch_inode_info *inode, 882 struct bch_inode_unpacked *bi, 883 struct iattr *attr) 884 { 885 struct bch_fs *c = inode->v.i_sb->s_fs_info; 886 unsigned int ia_valid = attr->ia_valid; 887 kuid_t kuid; 888 kgid_t kgid; 889 890 if (ia_valid & ATTR_UID) { 891 kuid = from_vfsuid(idmap, i_user_ns(&inode->v), attr->ia_vfsuid); 892 bi->bi_uid = from_kuid(i_user_ns(&inode->v), kuid); 893 } 894 if (ia_valid & ATTR_GID) { 895 kgid = from_vfsgid(idmap, i_user_ns(&inode->v), attr->ia_vfsgid); 896 bi->bi_gid = from_kgid(i_user_ns(&inode->v), kgid); 897 } 898 899 if (ia_valid & ATTR_SIZE) 900 bi->bi_size = attr->ia_size; 901 902 if (ia_valid & ATTR_ATIME) 903 bi->bi_atime = timespec_to_bch2_time(c, attr->ia_atime); 904 if (ia_valid & ATTR_MTIME) 905 bi->bi_mtime = timespec_to_bch2_time(c, attr->ia_mtime); 906 if (ia_valid & ATTR_CTIME) 907 bi->bi_ctime = timespec_to_bch2_time(c, attr->ia_ctime); 908 909 if (ia_valid & ATTR_MODE) { 910 umode_t mode = attr->ia_mode; 911 kgid_t gid = ia_valid & ATTR_GID 912 ? kgid 913 : inode->v.i_gid; 914 915 if (!in_group_or_capable(idmap, &inode->v, 916 make_vfsgid(idmap, i_user_ns(&inode->v), gid))) 917 mode &= ~S_ISGID; 918 bi->bi_mode = mode; 919 } 920 } 921 922 int bch2_setattr_nonsize(struct mnt_idmap *idmap, 923 struct bch_inode_info *inode, 924 struct iattr *attr) 925 { 926 struct bch_fs *c = inode->v.i_sb->s_fs_info; 927 struct bch_qid qid; 928 struct btree_trans *trans; 929 struct btree_iter inode_iter = { NULL }; 930 struct bch_inode_unpacked inode_u; 931 struct posix_acl *acl = NULL; 932 kuid_t kuid; 933 kgid_t kgid; 934 int ret; 935 936 mutex_lock(&inode->ei_update_lock); 937 938 qid = inode->ei_qid; 939 940 if (attr->ia_valid & ATTR_UID) { 941 kuid = from_vfsuid(idmap, i_user_ns(&inode->v), attr->ia_vfsuid); 942 qid.q[QTYP_USR] = from_kuid(i_user_ns(&inode->v), kuid); 943 } 944 945 if (attr->ia_valid & ATTR_GID) { 946 kgid = from_vfsgid(idmap, i_user_ns(&inode->v), attr->ia_vfsgid); 947 qid.q[QTYP_GRP] = from_kgid(i_user_ns(&inode->v), kgid); 948 } 949 950 ret = bch2_fs_quota_transfer(c, inode, qid, ~0, 951 KEY_TYPE_QUOTA_PREALLOC); 952 if (ret) 953 goto err; 954 955 trans = bch2_trans_get(c); 956 retry: 957 bch2_trans_begin(trans); 958 kfree(acl); 959 acl = NULL; 960 961 ret = bch2_inode_peek(trans, &inode_iter, &inode_u, inode_inum(inode), 962 BTREE_ITER_intent); 963 if (ret) 964 goto btree_err; 965 966 bch2_setattr_copy(idmap, inode, &inode_u, attr); 967 968 if (attr->ia_valid & ATTR_MODE) { 969 ret = bch2_acl_chmod(trans, inode_inum(inode), &inode_u, 970 inode_u.bi_mode, &acl); 971 if (ret) 972 goto btree_err; 973 } 974 975 ret = bch2_inode_write(trans, &inode_iter, &inode_u) ?: 976 bch2_trans_commit(trans, NULL, NULL, 977 BCH_TRANS_COMMIT_no_enospc); 978 btree_err: 979 bch2_trans_iter_exit(trans, &inode_iter); 980 981 if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) 982 goto retry; 983 if (unlikely(ret)) 984 goto err_trans; 985 986 bch2_inode_update_after_write(trans, inode, &inode_u, attr->ia_valid); 987 988 if (acl) 989 set_cached_acl(&inode->v, ACL_TYPE_ACCESS, acl); 990 err_trans: 991 bch2_trans_put(trans); 992 err: 993 mutex_unlock(&inode->ei_update_lock); 994 995 return bch2_err_class(ret); 996 } 997 998 static int bch2_getattr(struct mnt_idmap *idmap, 999 const struct path *path, struct kstat *stat, 1000 u32 request_mask, unsigned query_flags) 1001 { 1002 struct bch_inode_info *inode = to_bch_ei(d_inode(path->dentry)); 1003 struct bch_fs *c = inode->v.i_sb->s_fs_info; 1004 vfsuid_t vfsuid = i_uid_into_vfsuid(idmap, &inode->v); 1005 vfsgid_t vfsgid = i_gid_into_vfsgid(idmap, &inode->v); 1006 1007 stat->dev = inode->v.i_sb->s_dev; 1008 stat->ino = inode->v.i_ino; 1009 stat->mode = inode->v.i_mode; 1010 stat->nlink = inode->v.i_nlink; 1011 stat->uid = vfsuid_into_kuid(vfsuid); 1012 stat->gid = vfsgid_into_kgid(vfsgid); 1013 stat->rdev = inode->v.i_rdev; 1014 stat->size = i_size_read(&inode->v); 1015 stat->atime = inode_get_atime(&inode->v); 1016 stat->mtime = inode_get_mtime(&inode->v); 1017 stat->ctime = inode_get_ctime(&inode->v); 1018 stat->blksize = block_bytes(c); 1019 stat->blocks = inode->v.i_blocks; 1020 1021 stat->subvol = inode->ei_inum.subvol; 1022 stat->result_mask |= STATX_SUBVOL; 1023 1024 if ((request_mask & STATX_DIOALIGN) && S_ISREG(inode->v.i_mode)) { 1025 stat->result_mask |= STATX_DIOALIGN; 1026 /* 1027 * this is incorrect; we should be tracking this in superblock, 1028 * and checking the alignment of open devices 1029 */ 1030 stat->dio_mem_align = SECTOR_SIZE; 1031 stat->dio_offset_align = block_bytes(c); 1032 } 1033 1034 if (request_mask & STATX_BTIME) { 1035 stat->result_mask |= STATX_BTIME; 1036 stat->btime = bch2_time_to_timespec(c, inode->ei_inode.bi_otime); 1037 } 1038 1039 if (inode->ei_inode.bi_flags & BCH_INODE_immutable) 1040 stat->attributes |= STATX_ATTR_IMMUTABLE; 1041 stat->attributes_mask |= STATX_ATTR_IMMUTABLE; 1042 1043 if (inode->ei_inode.bi_flags & BCH_INODE_append) 1044 stat->attributes |= STATX_ATTR_APPEND; 1045 stat->attributes_mask |= STATX_ATTR_APPEND; 1046 1047 if (inode->ei_inode.bi_flags & BCH_INODE_nodump) 1048 stat->attributes |= STATX_ATTR_NODUMP; 1049 stat->attributes_mask |= STATX_ATTR_NODUMP; 1050 1051 return 0; 1052 } 1053 1054 static int bch2_setattr(struct mnt_idmap *idmap, 1055 struct dentry *dentry, struct iattr *iattr) 1056 { 1057 struct bch_inode_info *inode = to_bch_ei(dentry->d_inode); 1058 struct bch_fs *c = inode->v.i_sb->s_fs_info; 1059 int ret; 1060 1061 lockdep_assert_held(&inode->v.i_rwsem); 1062 1063 ret = bch2_subvol_is_ro(c, inode->ei_inum.subvol) ?: 1064 setattr_prepare(idmap, dentry, iattr); 1065 if (ret) 1066 return ret; 1067 1068 return iattr->ia_valid & ATTR_SIZE 1069 ? bchfs_truncate(idmap, inode, iattr) 1070 : bch2_setattr_nonsize(idmap, inode, iattr); 1071 } 1072 1073 static int bch2_tmpfile(struct mnt_idmap *idmap, 1074 struct inode *vdir, struct file *file, umode_t mode) 1075 { 1076 struct bch_inode_info *inode = 1077 __bch2_create(idmap, to_bch_ei(vdir), 1078 file->f_path.dentry, mode, 0, 1079 (subvol_inum) { 0 }, BCH_CREATE_TMPFILE); 1080 1081 if (IS_ERR(inode)) 1082 return bch2_err_class(PTR_ERR(inode)); 1083 1084 d_mark_tmpfile(file, &inode->v); 1085 d_instantiate(file->f_path.dentry, &inode->v); 1086 return finish_open_simple(file, 0); 1087 } 1088 1089 static int bch2_fill_extent(struct bch_fs *c, 1090 struct fiemap_extent_info *info, 1091 struct bkey_s_c k, unsigned flags) 1092 { 1093 if (bkey_extent_is_direct_data(k.k)) { 1094 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); 1095 const union bch_extent_entry *entry; 1096 struct extent_ptr_decoded p; 1097 int ret; 1098 1099 if (k.k->type == KEY_TYPE_reflink_v) 1100 flags |= FIEMAP_EXTENT_SHARED; 1101 1102 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) { 1103 int flags2 = 0; 1104 u64 offset = p.ptr.offset; 1105 1106 if (p.ptr.unwritten) 1107 flags2 |= FIEMAP_EXTENT_UNWRITTEN; 1108 1109 if (p.crc.compression_type) 1110 flags2 |= FIEMAP_EXTENT_ENCODED; 1111 else 1112 offset += p.crc.offset; 1113 1114 if ((offset & (block_sectors(c) - 1)) || 1115 (k.k->size & (block_sectors(c) - 1))) 1116 flags2 |= FIEMAP_EXTENT_NOT_ALIGNED; 1117 1118 ret = fiemap_fill_next_extent(info, 1119 bkey_start_offset(k.k) << 9, 1120 offset << 9, 1121 k.k->size << 9, flags|flags2); 1122 if (ret) 1123 return ret; 1124 } 1125 1126 return 0; 1127 } else if (bkey_extent_is_inline_data(k.k)) { 1128 return fiemap_fill_next_extent(info, 1129 bkey_start_offset(k.k) << 9, 1130 0, k.k->size << 9, 1131 flags| 1132 FIEMAP_EXTENT_DATA_INLINE); 1133 } else if (k.k->type == KEY_TYPE_reservation) { 1134 return fiemap_fill_next_extent(info, 1135 bkey_start_offset(k.k) << 9, 1136 0, k.k->size << 9, 1137 flags| 1138 FIEMAP_EXTENT_DELALLOC| 1139 FIEMAP_EXTENT_UNWRITTEN); 1140 } else { 1141 BUG(); 1142 } 1143 } 1144 1145 static int bch2_fiemap(struct inode *vinode, struct fiemap_extent_info *info, 1146 u64 start, u64 len) 1147 { 1148 struct bch_fs *c = vinode->i_sb->s_fs_info; 1149 struct bch_inode_info *ei = to_bch_ei(vinode); 1150 struct btree_trans *trans; 1151 struct btree_iter iter; 1152 struct bkey_s_c k; 1153 struct bkey_buf cur, prev; 1154 unsigned offset_into_extent, sectors; 1155 bool have_extent = false; 1156 int ret = 0; 1157 1158 ret = fiemap_prep(&ei->v, info, start, &len, FIEMAP_FLAG_SYNC); 1159 if (ret) 1160 return ret; 1161 1162 struct bpos end = POS(ei->v.i_ino, (start + len) >> 9); 1163 if (start + len < start) 1164 return -EINVAL; 1165 1166 start >>= 9; 1167 1168 bch2_bkey_buf_init(&cur); 1169 bch2_bkey_buf_init(&prev); 1170 trans = bch2_trans_get(c); 1171 1172 bch2_trans_iter_init(trans, &iter, BTREE_ID_extents, 1173 POS(ei->v.i_ino, start), 0); 1174 1175 while (true) { 1176 enum btree_id data_btree = BTREE_ID_extents; 1177 1178 bch2_trans_begin(trans); 1179 1180 u32 snapshot; 1181 ret = bch2_subvolume_get_snapshot(trans, ei->ei_inum.subvol, &snapshot); 1182 if (ret) 1183 goto err; 1184 1185 bch2_btree_iter_set_snapshot(&iter, snapshot); 1186 1187 k = bch2_btree_iter_peek_upto(&iter, end); 1188 ret = bkey_err(k); 1189 if (ret) 1190 goto err; 1191 1192 if (!k.k) 1193 break; 1194 1195 if (!bkey_extent_is_data(k.k) && 1196 k.k->type != KEY_TYPE_reservation) { 1197 bch2_btree_iter_advance(&iter); 1198 continue; 1199 } 1200 1201 offset_into_extent = iter.pos.offset - 1202 bkey_start_offset(k.k); 1203 sectors = k.k->size - offset_into_extent; 1204 1205 bch2_bkey_buf_reassemble(&cur, c, k); 1206 1207 ret = bch2_read_indirect_extent(trans, &data_btree, 1208 &offset_into_extent, &cur); 1209 if (ret) 1210 break; 1211 1212 k = bkey_i_to_s_c(cur.k); 1213 bch2_bkey_buf_realloc(&prev, c, k.k->u64s); 1214 1215 sectors = min(sectors, k.k->size - offset_into_extent); 1216 1217 bch2_cut_front(POS(k.k->p.inode, 1218 bkey_start_offset(k.k) + 1219 offset_into_extent), 1220 cur.k); 1221 bch2_key_resize(&cur.k->k, sectors); 1222 cur.k->k.p = iter.pos; 1223 cur.k->k.p.offset += cur.k->k.size; 1224 1225 if (have_extent) { 1226 bch2_trans_unlock(trans); 1227 ret = bch2_fill_extent(c, info, 1228 bkey_i_to_s_c(prev.k), 0); 1229 if (ret) 1230 break; 1231 } 1232 1233 bkey_copy(prev.k, cur.k); 1234 have_extent = true; 1235 1236 bch2_btree_iter_set_pos(&iter, 1237 POS(iter.pos.inode, iter.pos.offset + sectors)); 1238 err: 1239 if (ret && 1240 !bch2_err_matches(ret, BCH_ERR_transaction_restart)) 1241 break; 1242 } 1243 bch2_trans_iter_exit(trans, &iter); 1244 1245 if (!ret && have_extent) { 1246 bch2_trans_unlock(trans); 1247 ret = bch2_fill_extent(c, info, bkey_i_to_s_c(prev.k), 1248 FIEMAP_EXTENT_LAST); 1249 } 1250 1251 bch2_trans_put(trans); 1252 bch2_bkey_buf_exit(&cur, c); 1253 bch2_bkey_buf_exit(&prev, c); 1254 return ret < 0 ? ret : 0; 1255 } 1256 1257 static const struct vm_operations_struct bch_vm_ops = { 1258 .fault = bch2_page_fault, 1259 .map_pages = filemap_map_pages, 1260 .page_mkwrite = bch2_page_mkwrite, 1261 }; 1262 1263 static int bch2_mmap(struct file *file, struct vm_area_struct *vma) 1264 { 1265 file_accessed(file); 1266 1267 vma->vm_ops = &bch_vm_ops; 1268 return 0; 1269 } 1270 1271 /* Directories: */ 1272 1273 static loff_t bch2_dir_llseek(struct file *file, loff_t offset, int whence) 1274 { 1275 return generic_file_llseek_size(file, offset, whence, 1276 S64_MAX, S64_MAX); 1277 } 1278 1279 static int bch2_vfs_readdir(struct file *file, struct dir_context *ctx) 1280 { 1281 struct bch_inode_info *inode = file_bch_inode(file); 1282 struct bch_fs *c = inode->v.i_sb->s_fs_info; 1283 1284 if (!dir_emit_dots(file, ctx)) 1285 return 0; 1286 1287 int ret = bch2_readdir(c, inode_inum(inode), ctx); 1288 1289 bch_err_fn(c, ret); 1290 return bch2_err_class(ret); 1291 } 1292 1293 static int bch2_open(struct inode *vinode, struct file *file) 1294 { 1295 if (file->f_flags & (O_WRONLY|O_RDWR)) { 1296 struct bch_inode_info *inode = to_bch_ei(vinode); 1297 struct bch_fs *c = inode->v.i_sb->s_fs_info; 1298 1299 int ret = bch2_subvol_is_ro(c, inode->ei_inum.subvol); 1300 if (ret) 1301 return ret; 1302 } 1303 1304 file->f_mode |= FMODE_CAN_ODIRECT; 1305 1306 return generic_file_open(vinode, file); 1307 } 1308 1309 static const struct file_operations bch_file_operations = { 1310 .open = bch2_open, 1311 .llseek = bch2_llseek, 1312 .read_iter = bch2_read_iter, 1313 .write_iter = bch2_write_iter, 1314 .mmap = bch2_mmap, 1315 .get_unmapped_area = thp_get_unmapped_area, 1316 .fsync = bch2_fsync, 1317 .splice_read = filemap_splice_read, 1318 .splice_write = iter_file_splice_write, 1319 .fallocate = bch2_fallocate_dispatch, 1320 .unlocked_ioctl = bch2_fs_file_ioctl, 1321 #ifdef CONFIG_COMPAT 1322 .compat_ioctl = bch2_compat_fs_ioctl, 1323 #endif 1324 .remap_file_range = bch2_remap_file_range, 1325 }; 1326 1327 static const struct inode_operations bch_file_inode_operations = { 1328 .getattr = bch2_getattr, 1329 .setattr = bch2_setattr, 1330 .fiemap = bch2_fiemap, 1331 .listxattr = bch2_xattr_list, 1332 #ifdef CONFIG_BCACHEFS_POSIX_ACL 1333 .get_inode_acl = bch2_get_acl, 1334 .set_acl = bch2_set_acl, 1335 #endif 1336 }; 1337 1338 static const struct inode_operations bch_dir_inode_operations = { 1339 .lookup = bch2_lookup, 1340 .create = bch2_create, 1341 .link = bch2_link, 1342 .unlink = bch2_unlink, 1343 .symlink = bch2_symlink, 1344 .mkdir = bch2_mkdir, 1345 .rmdir = bch2_unlink, 1346 .mknod = bch2_mknod, 1347 .rename = bch2_rename2, 1348 .getattr = bch2_getattr, 1349 .setattr = bch2_setattr, 1350 .tmpfile = bch2_tmpfile, 1351 .listxattr = bch2_xattr_list, 1352 #ifdef CONFIG_BCACHEFS_POSIX_ACL 1353 .get_inode_acl = bch2_get_acl, 1354 .set_acl = bch2_set_acl, 1355 #endif 1356 }; 1357 1358 static const struct file_operations bch_dir_file_operations = { 1359 .llseek = bch2_dir_llseek, 1360 .read = generic_read_dir, 1361 .iterate_shared = bch2_vfs_readdir, 1362 .fsync = bch2_fsync, 1363 .unlocked_ioctl = bch2_fs_file_ioctl, 1364 #ifdef CONFIG_COMPAT 1365 .compat_ioctl = bch2_compat_fs_ioctl, 1366 #endif 1367 }; 1368 1369 static const struct inode_operations bch_symlink_inode_operations = { 1370 .get_link = page_get_link, 1371 .getattr = bch2_getattr, 1372 .setattr = bch2_setattr, 1373 .listxattr = bch2_xattr_list, 1374 #ifdef CONFIG_BCACHEFS_POSIX_ACL 1375 .get_inode_acl = bch2_get_acl, 1376 .set_acl = bch2_set_acl, 1377 #endif 1378 }; 1379 1380 static const struct inode_operations bch_special_inode_operations = { 1381 .getattr = bch2_getattr, 1382 .setattr = bch2_setattr, 1383 .listxattr = bch2_xattr_list, 1384 #ifdef CONFIG_BCACHEFS_POSIX_ACL 1385 .get_inode_acl = bch2_get_acl, 1386 .set_acl = bch2_set_acl, 1387 #endif 1388 }; 1389 1390 static const struct address_space_operations bch_address_space_operations = { 1391 .read_folio = bch2_read_folio, 1392 .writepages = bch2_writepages, 1393 .readahead = bch2_readahead, 1394 .dirty_folio = filemap_dirty_folio, 1395 .write_begin = bch2_write_begin, 1396 .write_end = bch2_write_end, 1397 .invalidate_folio = bch2_invalidate_folio, 1398 .release_folio = bch2_release_folio, 1399 #ifdef CONFIG_MIGRATION 1400 .migrate_folio = filemap_migrate_folio, 1401 #endif 1402 .error_remove_folio = generic_error_remove_folio, 1403 }; 1404 1405 struct bcachefs_fid { 1406 u64 inum; 1407 u32 subvol; 1408 u32 gen; 1409 } __packed; 1410 1411 struct bcachefs_fid_with_parent { 1412 struct bcachefs_fid fid; 1413 struct bcachefs_fid dir; 1414 } __packed; 1415 1416 static int bcachefs_fid_valid(int fh_len, int fh_type) 1417 { 1418 switch (fh_type) { 1419 case FILEID_BCACHEFS_WITHOUT_PARENT: 1420 return fh_len == sizeof(struct bcachefs_fid) / sizeof(u32); 1421 case FILEID_BCACHEFS_WITH_PARENT: 1422 return fh_len == sizeof(struct bcachefs_fid_with_parent) / sizeof(u32); 1423 default: 1424 return false; 1425 } 1426 } 1427 1428 static struct bcachefs_fid bch2_inode_to_fid(struct bch_inode_info *inode) 1429 { 1430 return (struct bcachefs_fid) { 1431 .inum = inode->ei_inum.inum, 1432 .subvol = inode->ei_inum.subvol, 1433 .gen = inode->ei_inode.bi_generation, 1434 }; 1435 } 1436 1437 static int bch2_encode_fh(struct inode *vinode, u32 *fh, int *len, 1438 struct inode *vdir) 1439 { 1440 struct bch_inode_info *inode = to_bch_ei(vinode); 1441 struct bch_inode_info *dir = to_bch_ei(vdir); 1442 int min_len; 1443 1444 if (!S_ISDIR(inode->v.i_mode) && dir) { 1445 struct bcachefs_fid_with_parent *fid = (void *) fh; 1446 1447 min_len = sizeof(*fid) / sizeof(u32); 1448 if (*len < min_len) { 1449 *len = min_len; 1450 return FILEID_INVALID; 1451 } 1452 1453 fid->fid = bch2_inode_to_fid(inode); 1454 fid->dir = bch2_inode_to_fid(dir); 1455 1456 *len = min_len; 1457 return FILEID_BCACHEFS_WITH_PARENT; 1458 } else { 1459 struct bcachefs_fid *fid = (void *) fh; 1460 1461 min_len = sizeof(*fid) / sizeof(u32); 1462 if (*len < min_len) { 1463 *len = min_len; 1464 return FILEID_INVALID; 1465 } 1466 *fid = bch2_inode_to_fid(inode); 1467 1468 *len = min_len; 1469 return FILEID_BCACHEFS_WITHOUT_PARENT; 1470 } 1471 } 1472 1473 static struct inode *bch2_nfs_get_inode(struct super_block *sb, 1474 struct bcachefs_fid fid) 1475 { 1476 struct bch_fs *c = sb->s_fs_info; 1477 struct inode *vinode = bch2_vfs_inode_get(c, (subvol_inum) { 1478 .subvol = fid.subvol, 1479 .inum = fid.inum, 1480 }); 1481 if (!IS_ERR(vinode) && vinode->i_generation != fid.gen) { 1482 iput(vinode); 1483 vinode = ERR_PTR(-ESTALE); 1484 } 1485 return vinode; 1486 } 1487 1488 static struct dentry *bch2_fh_to_dentry(struct super_block *sb, struct fid *_fid, 1489 int fh_len, int fh_type) 1490 { 1491 struct bcachefs_fid *fid = (void *) _fid; 1492 1493 if (!bcachefs_fid_valid(fh_len, fh_type)) 1494 return NULL; 1495 1496 return d_obtain_alias(bch2_nfs_get_inode(sb, *fid)); 1497 } 1498 1499 static struct dentry *bch2_fh_to_parent(struct super_block *sb, struct fid *_fid, 1500 int fh_len, int fh_type) 1501 { 1502 struct bcachefs_fid_with_parent *fid = (void *) _fid; 1503 1504 if (!bcachefs_fid_valid(fh_len, fh_type) || 1505 fh_type != FILEID_BCACHEFS_WITH_PARENT) 1506 return NULL; 1507 1508 return d_obtain_alias(bch2_nfs_get_inode(sb, fid->dir)); 1509 } 1510 1511 static struct dentry *bch2_get_parent(struct dentry *child) 1512 { 1513 struct bch_inode_info *inode = to_bch_ei(child->d_inode); 1514 struct bch_fs *c = inode->v.i_sb->s_fs_info; 1515 subvol_inum parent_inum = { 1516 .subvol = inode->ei_inode.bi_parent_subvol ?: 1517 inode->ei_inum.subvol, 1518 .inum = inode->ei_inode.bi_dir, 1519 }; 1520 1521 return d_obtain_alias(bch2_vfs_inode_get(c, parent_inum)); 1522 } 1523 1524 static int bch2_get_name(struct dentry *parent, char *name, struct dentry *child) 1525 { 1526 struct bch_inode_info *inode = to_bch_ei(child->d_inode); 1527 struct bch_inode_info *dir = to_bch_ei(parent->d_inode); 1528 struct bch_fs *c = inode->v.i_sb->s_fs_info; 1529 struct btree_trans *trans; 1530 struct btree_iter iter1; 1531 struct btree_iter iter2; 1532 struct bkey_s_c k; 1533 struct bkey_s_c_dirent d; 1534 struct bch_inode_unpacked inode_u; 1535 subvol_inum target; 1536 u32 snapshot; 1537 struct qstr dirent_name; 1538 unsigned name_len = 0; 1539 int ret; 1540 1541 if (!S_ISDIR(dir->v.i_mode)) 1542 return -EINVAL; 1543 1544 trans = bch2_trans_get(c); 1545 1546 bch2_trans_iter_init(trans, &iter1, BTREE_ID_dirents, 1547 POS(dir->ei_inode.bi_inum, 0), 0); 1548 bch2_trans_iter_init(trans, &iter2, BTREE_ID_dirents, 1549 POS(dir->ei_inode.bi_inum, 0), 0); 1550 retry: 1551 bch2_trans_begin(trans); 1552 1553 ret = bch2_subvolume_get_snapshot(trans, dir->ei_inum.subvol, &snapshot); 1554 if (ret) 1555 goto err; 1556 1557 bch2_btree_iter_set_snapshot(&iter1, snapshot); 1558 bch2_btree_iter_set_snapshot(&iter2, snapshot); 1559 1560 ret = bch2_inode_find_by_inum_trans(trans, inode_inum(inode), &inode_u); 1561 if (ret) 1562 goto err; 1563 1564 if (inode_u.bi_dir == dir->ei_inode.bi_inum) { 1565 bch2_btree_iter_set_pos(&iter1, POS(inode_u.bi_dir, inode_u.bi_dir_offset)); 1566 1567 k = bch2_btree_iter_peek_slot(&iter1); 1568 ret = bkey_err(k); 1569 if (ret) 1570 goto err; 1571 1572 if (k.k->type != KEY_TYPE_dirent) { 1573 ret = -BCH_ERR_ENOENT_dirent_doesnt_match_inode; 1574 goto err; 1575 } 1576 1577 d = bkey_s_c_to_dirent(k); 1578 ret = bch2_dirent_read_target(trans, inode_inum(dir), d, &target); 1579 if (ret > 0) 1580 ret = -BCH_ERR_ENOENT_dirent_doesnt_match_inode; 1581 if (ret) 1582 goto err; 1583 1584 if (subvol_inum_eq(target, inode->ei_inum)) 1585 goto found; 1586 } else { 1587 /* 1588 * File with multiple hardlinks and our backref is to the wrong 1589 * directory - linear search: 1590 */ 1591 for_each_btree_key_continue_norestart(iter2, 0, k, ret) { 1592 if (k.k->p.inode > dir->ei_inode.bi_inum) 1593 break; 1594 1595 if (k.k->type != KEY_TYPE_dirent) 1596 continue; 1597 1598 d = bkey_s_c_to_dirent(k); 1599 ret = bch2_dirent_read_target(trans, inode_inum(dir), d, &target); 1600 if (ret < 0) 1601 break; 1602 if (ret) 1603 continue; 1604 1605 if (subvol_inum_eq(target, inode->ei_inum)) 1606 goto found; 1607 } 1608 } 1609 1610 ret = -ENOENT; 1611 goto err; 1612 found: 1613 dirent_name = bch2_dirent_get_name(d); 1614 1615 name_len = min_t(unsigned, dirent_name.len, NAME_MAX); 1616 memcpy(name, dirent_name.name, name_len); 1617 name[name_len] = '\0'; 1618 err: 1619 if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) 1620 goto retry; 1621 1622 bch2_trans_iter_exit(trans, &iter1); 1623 bch2_trans_iter_exit(trans, &iter2); 1624 bch2_trans_put(trans); 1625 1626 return ret; 1627 } 1628 1629 static const struct export_operations bch_export_ops = { 1630 .encode_fh = bch2_encode_fh, 1631 .fh_to_dentry = bch2_fh_to_dentry, 1632 .fh_to_parent = bch2_fh_to_parent, 1633 .get_parent = bch2_get_parent, 1634 .get_name = bch2_get_name, 1635 }; 1636 1637 static void bch2_vfs_inode_init(struct btree_trans *trans, 1638 subvol_inum inum, 1639 struct bch_inode_info *inode, 1640 struct bch_inode_unpacked *bi, 1641 struct bch_subvolume *subvol) 1642 { 1643 inode->v.i_ino = inum.inum; 1644 inode->ei_inum = inum; 1645 inode->ei_inode.bi_inum = inum.inum; 1646 bch2_inode_update_after_write(trans, inode, bi, ~0); 1647 1648 inode->v.i_blocks = bi->bi_sectors; 1649 inode->v.i_ino = bi->bi_inum; 1650 inode->v.i_rdev = bi->bi_dev; 1651 inode->v.i_generation = bi->bi_generation; 1652 inode->v.i_size = bi->bi_size; 1653 1654 inode->ei_flags = 0; 1655 inode->ei_quota_reserved = 0; 1656 inode->ei_qid = bch_qid(bi); 1657 1658 if (BCH_SUBVOLUME_SNAP(subvol)) 1659 set_bit(EI_INODE_SNAPSHOT, &inode->ei_flags); 1660 1661 inode->v.i_mapping->a_ops = &bch_address_space_operations; 1662 1663 switch (inode->v.i_mode & S_IFMT) { 1664 case S_IFREG: 1665 inode->v.i_op = &bch_file_inode_operations; 1666 inode->v.i_fop = &bch_file_operations; 1667 break; 1668 case S_IFDIR: 1669 inode->v.i_op = &bch_dir_inode_operations; 1670 inode->v.i_fop = &bch_dir_file_operations; 1671 break; 1672 case S_IFLNK: 1673 inode_nohighmem(&inode->v); 1674 inode->v.i_op = &bch_symlink_inode_operations; 1675 break; 1676 default: 1677 init_special_inode(&inode->v, inode->v.i_mode, inode->v.i_rdev); 1678 inode->v.i_op = &bch_special_inode_operations; 1679 break; 1680 } 1681 1682 mapping_set_large_folios(inode->v.i_mapping); 1683 } 1684 1685 static void bch2_free_inode(struct inode *vinode) 1686 { 1687 kmem_cache_free(bch2_inode_cache, to_bch_ei(vinode)); 1688 } 1689 1690 static int inode_update_times_fn(struct btree_trans *trans, 1691 struct bch_inode_info *inode, 1692 struct bch_inode_unpacked *bi, 1693 void *p) 1694 { 1695 struct bch_fs *c = inode->v.i_sb->s_fs_info; 1696 1697 bi->bi_atime = timespec_to_bch2_time(c, inode_get_atime(&inode->v)); 1698 bi->bi_mtime = timespec_to_bch2_time(c, inode_get_mtime(&inode->v)); 1699 bi->bi_ctime = timespec_to_bch2_time(c, inode_get_ctime(&inode->v)); 1700 1701 return 0; 1702 } 1703 1704 static int bch2_vfs_write_inode(struct inode *vinode, 1705 struct writeback_control *wbc) 1706 { 1707 struct bch_fs *c = vinode->i_sb->s_fs_info; 1708 struct bch_inode_info *inode = to_bch_ei(vinode); 1709 int ret; 1710 1711 mutex_lock(&inode->ei_update_lock); 1712 ret = bch2_write_inode(c, inode, inode_update_times_fn, NULL, 1713 ATTR_ATIME|ATTR_MTIME|ATTR_CTIME); 1714 mutex_unlock(&inode->ei_update_lock); 1715 1716 return bch2_err_class(ret); 1717 } 1718 1719 static void bch2_evict_inode(struct inode *vinode) 1720 { 1721 struct bch_fs *c = vinode->i_sb->s_fs_info; 1722 struct bch_inode_info *inode = to_bch_ei(vinode); 1723 bool delete = !inode->v.i_nlink && !is_bad_inode(&inode->v); 1724 1725 /* 1726 * evict() has waited for outstanding writeback, we'll do no more IO 1727 * through this inode: it's safe to remove from VFS inode hashtable here 1728 * 1729 * Do that now so that other threads aren't blocked from pulling it back 1730 * in, there's no reason for them to be: 1731 */ 1732 if (!delete) 1733 bch2_inode_hash_remove(c, inode); 1734 1735 truncate_inode_pages_final(&inode->v.i_data); 1736 1737 clear_inode(&inode->v); 1738 1739 BUG_ON(!is_bad_inode(&inode->v) && inode->ei_quota_reserved); 1740 1741 if (delete) { 1742 bch2_quota_acct(c, inode->ei_qid, Q_SPC, -((s64) inode->v.i_blocks), 1743 KEY_TYPE_QUOTA_WARN); 1744 bch2_quota_acct(c, inode->ei_qid, Q_INO, -1, 1745 KEY_TYPE_QUOTA_WARN); 1746 bch2_inode_rm(c, inode_inum(inode)); 1747 1748 /* 1749 * If we are deleting, we need it present in the vfs hash table 1750 * so that fsck can check if unlinked inodes are still open: 1751 */ 1752 bch2_inode_hash_remove(c, inode); 1753 } 1754 1755 mutex_lock(&c->vfs_inodes_lock); 1756 list_del_init(&inode->ei_vfs_inode_list); 1757 mutex_unlock(&c->vfs_inodes_lock); 1758 } 1759 1760 void bch2_evict_subvolume_inodes(struct bch_fs *c, snapshot_id_list *s) 1761 { 1762 struct bch_inode_info *inode; 1763 DARRAY(struct bch_inode_info *) grabbed; 1764 bool clean_pass = false, this_pass_clean; 1765 1766 /* 1767 * Initially, we scan for inodes without I_DONTCACHE, then mark them to 1768 * be pruned with d_mark_dontcache(). 1769 * 1770 * Once we've had a clean pass where we didn't find any inodes without 1771 * I_DONTCACHE, we wait for them to be freed: 1772 */ 1773 1774 darray_init(&grabbed); 1775 darray_make_room(&grabbed, 1024); 1776 again: 1777 cond_resched(); 1778 this_pass_clean = true; 1779 1780 mutex_lock(&c->vfs_inodes_lock); 1781 list_for_each_entry(inode, &c->vfs_inodes_list, ei_vfs_inode_list) { 1782 if (!snapshot_list_has_id(s, inode->ei_inum.subvol)) 1783 continue; 1784 1785 if (!(inode->v.i_state & I_DONTCACHE) && 1786 !(inode->v.i_state & I_FREEING) && 1787 igrab(&inode->v)) { 1788 this_pass_clean = false; 1789 1790 if (darray_push_gfp(&grabbed, inode, GFP_ATOMIC|__GFP_NOWARN)) { 1791 iput(&inode->v); 1792 break; 1793 } 1794 } else if (clean_pass && this_pass_clean) { 1795 struct wait_bit_queue_entry wqe; 1796 struct wait_queue_head *wq_head; 1797 1798 wq_head = inode_bit_waitqueue(&wqe, &inode->v, __I_NEW); 1799 prepare_to_wait_event(wq_head, &wqe.wq_entry, 1800 TASK_UNINTERRUPTIBLE); 1801 mutex_unlock(&c->vfs_inodes_lock); 1802 1803 schedule(); 1804 finish_wait(wq_head, &wqe.wq_entry); 1805 goto again; 1806 } 1807 } 1808 mutex_unlock(&c->vfs_inodes_lock); 1809 1810 darray_for_each(grabbed, i) { 1811 inode = *i; 1812 d_mark_dontcache(&inode->v); 1813 d_prune_aliases(&inode->v); 1814 iput(&inode->v); 1815 } 1816 grabbed.nr = 0; 1817 1818 if (!clean_pass || !this_pass_clean) { 1819 clean_pass = this_pass_clean; 1820 goto again; 1821 } 1822 1823 darray_exit(&grabbed); 1824 } 1825 1826 static int bch2_statfs(struct dentry *dentry, struct kstatfs *buf) 1827 { 1828 struct super_block *sb = dentry->d_sb; 1829 struct bch_fs *c = sb->s_fs_info; 1830 struct bch_fs_usage_short usage = bch2_fs_usage_read_short(c); 1831 unsigned shift = sb->s_blocksize_bits - 9; 1832 /* 1833 * this assumes inodes take up 64 bytes, which is a decent average 1834 * number: 1835 */ 1836 u64 avail_inodes = ((usage.capacity - usage.used) << 3); 1837 1838 buf->f_type = BCACHEFS_STATFS_MAGIC; 1839 buf->f_bsize = sb->s_blocksize; 1840 buf->f_blocks = usage.capacity >> shift; 1841 buf->f_bfree = usage.free >> shift; 1842 buf->f_bavail = avail_factor(usage.free) >> shift; 1843 1844 buf->f_files = usage.nr_inodes + avail_inodes; 1845 buf->f_ffree = avail_inodes; 1846 1847 buf->f_fsid = uuid_to_fsid(c->sb.user_uuid.b); 1848 buf->f_namelen = BCH_NAME_MAX; 1849 1850 return 0; 1851 } 1852 1853 static int bch2_sync_fs(struct super_block *sb, int wait) 1854 { 1855 struct bch_fs *c = sb->s_fs_info; 1856 int ret; 1857 1858 trace_bch2_sync_fs(sb, wait); 1859 1860 if (c->opts.journal_flush_disabled) 1861 return 0; 1862 1863 if (!wait) { 1864 bch2_journal_flush_async(&c->journal, NULL); 1865 return 0; 1866 } 1867 1868 ret = bch2_journal_flush(&c->journal); 1869 return bch2_err_class(ret); 1870 } 1871 1872 static struct bch_fs *bch2_path_to_fs(const char *path) 1873 { 1874 struct bch_fs *c; 1875 dev_t dev; 1876 int ret; 1877 1878 ret = lookup_bdev(path, &dev); 1879 if (ret) 1880 return ERR_PTR(ret); 1881 1882 c = bch2_dev_to_fs(dev); 1883 if (c) 1884 closure_put(&c->cl); 1885 return c ?: ERR_PTR(-ENOENT); 1886 } 1887 1888 static int bch2_remount(struct super_block *sb, int *flags, 1889 struct bch_opts opts) 1890 { 1891 struct bch_fs *c = sb->s_fs_info; 1892 int ret = 0; 1893 1894 opt_set(opts, read_only, (*flags & SB_RDONLY) != 0); 1895 1896 if (opts.read_only != c->opts.read_only) { 1897 down_write(&c->state_lock); 1898 1899 if (opts.read_only) { 1900 bch2_fs_read_only(c); 1901 1902 sb->s_flags |= SB_RDONLY; 1903 } else { 1904 ret = bch2_fs_read_write(c); 1905 if (ret) { 1906 bch_err(c, "error going rw: %i", ret); 1907 up_write(&c->state_lock); 1908 ret = -EINVAL; 1909 goto err; 1910 } 1911 1912 sb->s_flags &= ~SB_RDONLY; 1913 } 1914 1915 c->opts.read_only = opts.read_only; 1916 1917 up_write(&c->state_lock); 1918 } 1919 1920 if (opt_defined(opts, errors)) 1921 c->opts.errors = opts.errors; 1922 err: 1923 return bch2_err_class(ret); 1924 } 1925 1926 static int bch2_show_devname(struct seq_file *seq, struct dentry *root) 1927 { 1928 struct bch_fs *c = root->d_sb->s_fs_info; 1929 bool first = true; 1930 1931 for_each_online_member(c, ca) { 1932 if (!first) 1933 seq_putc(seq, ':'); 1934 first = false; 1935 seq_puts(seq, ca->disk_sb.sb_name); 1936 } 1937 1938 return 0; 1939 } 1940 1941 static int bch2_show_options(struct seq_file *seq, struct dentry *root) 1942 { 1943 struct bch_fs *c = root->d_sb->s_fs_info; 1944 struct printbuf buf = PRINTBUF; 1945 1946 bch2_opts_to_text(&buf, c->opts, c, c->disk_sb.sb, 1947 OPT_MOUNT, OPT_HIDDEN, OPT_SHOW_MOUNT_STYLE); 1948 printbuf_nul_terminate(&buf); 1949 seq_puts(seq, buf.buf); 1950 1951 int ret = buf.allocation_failure ? -ENOMEM : 0; 1952 printbuf_exit(&buf); 1953 return ret; 1954 } 1955 1956 static void bch2_put_super(struct super_block *sb) 1957 { 1958 struct bch_fs *c = sb->s_fs_info; 1959 1960 __bch2_fs_stop(c); 1961 } 1962 1963 /* 1964 * bcachefs doesn't currently integrate intwrite freeze protection but the 1965 * internal write references serve the same purpose. Therefore reuse the 1966 * read-only transition code to perform the quiesce. The caveat is that we don't 1967 * currently have the ability to block tasks that want a write reference while 1968 * the superblock is frozen. This is fine for now, but we should either add 1969 * blocking support or find a way to integrate sb_start_intwrite() and friends. 1970 */ 1971 static int bch2_freeze(struct super_block *sb) 1972 { 1973 struct bch_fs *c = sb->s_fs_info; 1974 1975 down_write(&c->state_lock); 1976 bch2_fs_read_only(c); 1977 up_write(&c->state_lock); 1978 return 0; 1979 } 1980 1981 static int bch2_unfreeze(struct super_block *sb) 1982 { 1983 struct bch_fs *c = sb->s_fs_info; 1984 int ret; 1985 1986 if (test_bit(BCH_FS_emergency_ro, &c->flags)) 1987 return 0; 1988 1989 down_write(&c->state_lock); 1990 ret = bch2_fs_read_write(c); 1991 up_write(&c->state_lock); 1992 return ret; 1993 } 1994 1995 static const struct super_operations bch_super_operations = { 1996 .alloc_inode = bch2_alloc_inode, 1997 .free_inode = bch2_free_inode, 1998 .write_inode = bch2_vfs_write_inode, 1999 .evict_inode = bch2_evict_inode, 2000 .sync_fs = bch2_sync_fs, 2001 .statfs = bch2_statfs, 2002 .show_devname = bch2_show_devname, 2003 .show_options = bch2_show_options, 2004 .put_super = bch2_put_super, 2005 .freeze_fs = bch2_freeze, 2006 .unfreeze_fs = bch2_unfreeze, 2007 }; 2008 2009 static int bch2_set_super(struct super_block *s, void *data) 2010 { 2011 s->s_fs_info = data; 2012 return 0; 2013 } 2014 2015 static int bch2_noset_super(struct super_block *s, void *data) 2016 { 2017 return -EBUSY; 2018 } 2019 2020 typedef DARRAY(struct bch_fs *) darray_fs; 2021 2022 static int bch2_test_super(struct super_block *s, void *data) 2023 { 2024 struct bch_fs *c = s->s_fs_info; 2025 darray_fs *d = data; 2026 2027 if (!c) 2028 return false; 2029 2030 darray_for_each(*d, i) 2031 if (c != *i) 2032 return false; 2033 return true; 2034 } 2035 2036 static int bch2_fs_get_tree(struct fs_context *fc) 2037 { 2038 struct bch_fs *c; 2039 struct super_block *sb; 2040 struct inode *vinode; 2041 struct bch2_opts_parse *opts_parse = fc->fs_private; 2042 struct bch_opts opts = opts_parse->opts; 2043 darray_str devs; 2044 darray_fs devs_to_fs = {}; 2045 int ret; 2046 2047 opt_set(opts, read_only, (fc->sb_flags & SB_RDONLY) != 0); 2048 opt_set(opts, nostart, true); 2049 2050 if (!fc->source || strlen(fc->source) == 0) 2051 return -EINVAL; 2052 2053 ret = bch2_split_devs(fc->source, &devs); 2054 if (ret) 2055 return ret; 2056 2057 darray_for_each(devs, i) { 2058 ret = darray_push(&devs_to_fs, bch2_path_to_fs(*i)); 2059 if (ret) 2060 goto err; 2061 } 2062 2063 sb = sget(fc->fs_type, bch2_test_super, bch2_noset_super, fc->sb_flags|SB_NOSEC, &devs_to_fs); 2064 if (!IS_ERR(sb)) 2065 goto got_sb; 2066 2067 c = bch2_fs_open(devs.data, devs.nr, opts); 2068 ret = PTR_ERR_OR_ZERO(c); 2069 if (ret) 2070 goto err; 2071 2072 /* Some options can't be parsed until after the fs is started: */ 2073 opts = bch2_opts_empty(); 2074 ret = bch2_parse_mount_opts(c, &opts, NULL, opts_parse->parse_later.buf); 2075 if (ret) 2076 goto err_stop_fs; 2077 2078 bch2_opts_apply(&c->opts, opts); 2079 2080 ret = bch2_fs_start(c); 2081 if (ret) 2082 goto err_stop_fs; 2083 2084 sb = sget(fc->fs_type, NULL, bch2_set_super, fc->sb_flags|SB_NOSEC, c); 2085 ret = PTR_ERR_OR_ZERO(sb); 2086 if (ret) 2087 goto err_stop_fs; 2088 got_sb: 2089 c = sb->s_fs_info; 2090 2091 if (sb->s_root) { 2092 if ((fc->sb_flags ^ sb->s_flags) & SB_RDONLY) { 2093 ret = -EBUSY; 2094 goto err_put_super; 2095 } 2096 goto out; 2097 } 2098 2099 sb->s_blocksize = block_bytes(c); 2100 sb->s_blocksize_bits = ilog2(block_bytes(c)); 2101 sb->s_maxbytes = MAX_LFS_FILESIZE; 2102 sb->s_op = &bch_super_operations; 2103 sb->s_export_op = &bch_export_ops; 2104 #ifdef CONFIG_BCACHEFS_QUOTA 2105 sb->s_qcop = &bch2_quotactl_operations; 2106 sb->s_quota_types = QTYPE_MASK_USR|QTYPE_MASK_GRP|QTYPE_MASK_PRJ; 2107 #endif 2108 sb->s_xattr = bch2_xattr_handlers; 2109 sb->s_magic = BCACHEFS_STATFS_MAGIC; 2110 sb->s_time_gran = c->sb.nsec_per_time_unit; 2111 sb->s_time_min = div_s64(S64_MIN, c->sb.time_units_per_sec) + 1; 2112 sb->s_time_max = div_s64(S64_MAX, c->sb.time_units_per_sec); 2113 sb->s_uuid = c->sb.user_uuid; 2114 sb->s_shrink->seeks = 0; 2115 c->vfs_sb = sb; 2116 strscpy(sb->s_id, c->name, sizeof(sb->s_id)); 2117 2118 ret = super_setup_bdi(sb); 2119 if (ret) 2120 goto err_put_super; 2121 2122 sb->s_bdi->ra_pages = VM_READAHEAD_PAGES; 2123 2124 for_each_online_member(c, ca) { 2125 struct block_device *bdev = ca->disk_sb.bdev; 2126 2127 /* XXX: create an anonymous device for multi device filesystems */ 2128 sb->s_bdev = bdev; 2129 sb->s_dev = bdev->bd_dev; 2130 percpu_ref_put(&ca->io_ref); 2131 break; 2132 } 2133 2134 c->dev = sb->s_dev; 2135 2136 #ifdef CONFIG_BCACHEFS_POSIX_ACL 2137 if (c->opts.acl) 2138 sb->s_flags |= SB_POSIXACL; 2139 #endif 2140 2141 sb->s_shrink->seeks = 0; 2142 2143 vinode = bch2_vfs_inode_get(c, BCACHEFS_ROOT_SUBVOL_INUM); 2144 ret = PTR_ERR_OR_ZERO(vinode); 2145 bch_err_msg(c, ret, "mounting: error getting root inode"); 2146 if (ret) 2147 goto err_put_super; 2148 2149 sb->s_root = d_make_root(vinode); 2150 if (!sb->s_root) { 2151 bch_err(c, "error mounting: error allocating root dentry"); 2152 ret = -ENOMEM; 2153 goto err_put_super; 2154 } 2155 2156 sb->s_flags |= SB_ACTIVE; 2157 out: 2158 fc->root = dget(sb->s_root); 2159 err: 2160 darray_exit(&devs_to_fs); 2161 bch2_darray_str_exit(&devs); 2162 if (ret) 2163 pr_err("error: %s", bch2_err_str(ret)); 2164 /* 2165 * On an inconsistency error in recovery we might see an -EROFS derived 2166 * errorcode (from the journal), but we don't want to return that to 2167 * userspace as that causes util-linux to retry the mount RO - which is 2168 * confusing: 2169 */ 2170 if (bch2_err_matches(ret, EROFS) && ret != -EROFS) 2171 ret = -EIO; 2172 return bch2_err_class(ret); 2173 2174 err_stop_fs: 2175 bch2_fs_stop(c); 2176 goto err; 2177 2178 err_put_super: 2179 __bch2_fs_stop(c); 2180 deactivate_locked_super(sb); 2181 goto err; 2182 } 2183 2184 static void bch2_kill_sb(struct super_block *sb) 2185 { 2186 struct bch_fs *c = sb->s_fs_info; 2187 2188 generic_shutdown_super(sb); 2189 bch2_fs_free(c); 2190 } 2191 2192 static void bch2_fs_context_free(struct fs_context *fc) 2193 { 2194 struct bch2_opts_parse *opts = fc->fs_private; 2195 2196 if (opts) { 2197 printbuf_exit(&opts->parse_later); 2198 kfree(opts); 2199 } 2200 } 2201 2202 static int bch2_fs_parse_param(struct fs_context *fc, 2203 struct fs_parameter *param) 2204 { 2205 /* 2206 * the "source" param, i.e., the name of the device(s) to mount, 2207 * is handled by the VFS layer. 2208 */ 2209 if (!strcmp(param->key, "source")) 2210 return -ENOPARAM; 2211 2212 struct bch2_opts_parse *opts = fc->fs_private; 2213 struct bch_fs *c = NULL; 2214 2215 /* for reconfigure, we already have a struct bch_fs */ 2216 if (fc->root) 2217 c = fc->root->d_sb->s_fs_info; 2218 2219 int ret = bch2_parse_one_mount_opt(c, &opts->opts, 2220 &opts->parse_later, param->key, 2221 param->string); 2222 2223 return bch2_err_class(ret); 2224 } 2225 2226 static int bch2_fs_reconfigure(struct fs_context *fc) 2227 { 2228 struct super_block *sb = fc->root->d_sb; 2229 struct bch2_opts_parse *opts = fc->fs_private; 2230 2231 return bch2_remount(sb, &fc->sb_flags, opts->opts); 2232 } 2233 2234 static const struct fs_context_operations bch2_context_ops = { 2235 .free = bch2_fs_context_free, 2236 .parse_param = bch2_fs_parse_param, 2237 .get_tree = bch2_fs_get_tree, 2238 .reconfigure = bch2_fs_reconfigure, 2239 }; 2240 2241 static int bch2_init_fs_context(struct fs_context *fc) 2242 { 2243 struct bch2_opts_parse *opts = kzalloc(sizeof(*opts), GFP_KERNEL); 2244 2245 if (!opts) 2246 return -ENOMEM; 2247 2248 opts->parse_later = PRINTBUF; 2249 2250 fc->ops = &bch2_context_ops; 2251 fc->fs_private = opts; 2252 2253 return 0; 2254 } 2255 2256 void bch2_fs_vfs_exit(struct bch_fs *c) 2257 { 2258 if (c->vfs_inodes_table.tbl) 2259 rhashtable_destroy(&c->vfs_inodes_table); 2260 } 2261 2262 int bch2_fs_vfs_init(struct bch_fs *c) 2263 { 2264 return rhashtable_init(&c->vfs_inodes_table, &bch2_vfs_inodes_params); 2265 } 2266 2267 static struct file_system_type bcache_fs_type = { 2268 .owner = THIS_MODULE, 2269 .name = "bcachefs", 2270 .init_fs_context = bch2_init_fs_context, 2271 .kill_sb = bch2_kill_sb, 2272 .fs_flags = FS_REQUIRES_DEV | FS_ALLOW_IDMAP, 2273 }; 2274 2275 MODULE_ALIAS_FS("bcachefs"); 2276 2277 void bch2_vfs_exit(void) 2278 { 2279 unregister_filesystem(&bcache_fs_type); 2280 kmem_cache_destroy(bch2_inode_cache); 2281 } 2282 2283 int __init bch2_vfs_init(void) 2284 { 2285 int ret = -ENOMEM; 2286 2287 bch2_inode_cache = KMEM_CACHE(bch_inode_info, SLAB_RECLAIM_ACCOUNT | 2288 SLAB_ACCOUNT); 2289 if (!bch2_inode_cache) 2290 goto err; 2291 2292 ret = register_filesystem(&bcache_fs_type); 2293 if (ret) 2294 goto err; 2295 2296 return 0; 2297 err: 2298 bch2_vfs_exit(); 2299 return ret; 2300 } 2301 2302 #endif /* NO_BCACHEFS_FS */ 2303