1 // SPDX-License-Identifier: GPL-2.0 2 3 #include "bcachefs.h" 4 #include "btree_key_cache.h" 5 #include "btree_write_buffer.h" 6 #include "bkey_methods.h" 7 #include "btree_update.h" 8 #include "buckets.h" 9 #include "compress.h" 10 #include "dirent.h" 11 #include "disk_accounting.h" 12 #include "error.h" 13 #include "extents.h" 14 #include "extent_update.h" 15 #include "fs.h" 16 #include "inode.h" 17 #include "str_hash.h" 18 #include "snapshot.h" 19 #include "subvolume.h" 20 #include "varint.h" 21 22 #include <linux/random.h> 23 24 #include <linux/unaligned.h> 25 26 #define x(name, ...) #name, 27 const char * const bch2_inode_opts[] = { 28 BCH_INODE_OPTS() 29 NULL, 30 }; 31 32 static const char * const bch2_inode_flag_strs[] = { 33 BCH_INODE_FLAGS() 34 NULL 35 }; 36 #undef x 37 38 static int delete_ancestor_snapshot_inodes(struct btree_trans *, struct bpos); 39 40 static const u8 byte_table[8] = { 1, 2, 3, 4, 6, 8, 10, 13 }; 41 42 static int inode_decode_field(const u8 *in, const u8 *end, 43 u64 out[2], unsigned *out_bits) 44 { 45 __be64 be[2] = { 0, 0 }; 46 unsigned bytes, shift; 47 u8 *p; 48 49 if (in >= end) 50 return -1; 51 52 if (!*in) 53 return -1; 54 55 /* 56 * position of highest set bit indicates number of bytes: 57 * shift = number of bits to remove in high byte: 58 */ 59 shift = 8 - __fls(*in); /* 1 <= shift <= 8 */ 60 bytes = byte_table[shift - 1]; 61 62 if (in + bytes > end) 63 return -1; 64 65 p = (u8 *) be + 16 - bytes; 66 memcpy(p, in, bytes); 67 *p ^= (1 << 8) >> shift; 68 69 out[0] = be64_to_cpu(be[0]); 70 out[1] = be64_to_cpu(be[1]); 71 *out_bits = out[0] ? 64 + fls64(out[0]) : fls64(out[1]); 72 73 return bytes; 74 } 75 76 static inline void bch2_inode_pack_inlined(struct bkey_inode_buf *packed, 77 const struct bch_inode_unpacked *inode) 78 { 79 struct bkey_i_inode_v3 *k = &packed->inode; 80 u8 *out = k->v.fields; 81 u8 *end = (void *) &packed[1]; 82 u8 *last_nonzero_field = out; 83 unsigned nr_fields = 0, last_nonzero_fieldnr = 0; 84 unsigned bytes; 85 int ret; 86 87 bkey_inode_v3_init(&packed->inode.k_i); 88 packed->inode.k.p.offset = inode->bi_inum; 89 packed->inode.v.bi_journal_seq = cpu_to_le64(inode->bi_journal_seq); 90 packed->inode.v.bi_hash_seed = inode->bi_hash_seed; 91 packed->inode.v.bi_flags = cpu_to_le64(inode->bi_flags); 92 packed->inode.v.bi_sectors = cpu_to_le64(inode->bi_sectors); 93 packed->inode.v.bi_size = cpu_to_le64(inode->bi_size); 94 packed->inode.v.bi_version = cpu_to_le64(inode->bi_version); 95 SET_INODEv3_MODE(&packed->inode.v, inode->bi_mode); 96 SET_INODEv3_FIELDS_START(&packed->inode.v, INODEv3_FIELDS_START_CUR); 97 98 99 #define x(_name, _bits) \ 100 nr_fields++; \ 101 \ 102 if (inode->_name) { \ 103 ret = bch2_varint_encode_fast(out, inode->_name); \ 104 out += ret; \ 105 \ 106 if (_bits > 64) \ 107 *out++ = 0; \ 108 \ 109 last_nonzero_field = out; \ 110 last_nonzero_fieldnr = nr_fields; \ 111 } else { \ 112 *out++ = 0; \ 113 \ 114 if (_bits > 64) \ 115 *out++ = 0; \ 116 } 117 118 BCH_INODE_FIELDS_v3() 119 #undef x 120 BUG_ON(out > end); 121 122 out = last_nonzero_field; 123 nr_fields = last_nonzero_fieldnr; 124 125 bytes = out - (u8 *) &packed->inode.v; 126 set_bkey_val_bytes(&packed->inode.k, bytes); 127 memset_u64s_tail(&packed->inode.v, 0, bytes); 128 129 SET_INODEv3_NR_FIELDS(&k->v, nr_fields); 130 131 if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG)) { 132 struct bch_inode_unpacked unpacked; 133 134 ret = bch2_inode_unpack(bkey_i_to_s_c(&packed->inode.k_i), &unpacked); 135 BUG_ON(ret); 136 BUG_ON(unpacked.bi_inum != inode->bi_inum); 137 BUG_ON(unpacked.bi_hash_seed != inode->bi_hash_seed); 138 BUG_ON(unpacked.bi_sectors != inode->bi_sectors); 139 BUG_ON(unpacked.bi_size != inode->bi_size); 140 BUG_ON(unpacked.bi_version != inode->bi_version); 141 BUG_ON(unpacked.bi_mode != inode->bi_mode); 142 143 #define x(_name, _bits) if (unpacked._name != inode->_name) \ 144 panic("unpacked %llu should be %llu", \ 145 (u64) unpacked._name, (u64) inode->_name); 146 BCH_INODE_FIELDS_v3() 147 #undef x 148 } 149 } 150 151 void bch2_inode_pack(struct bkey_inode_buf *packed, 152 const struct bch_inode_unpacked *inode) 153 { 154 bch2_inode_pack_inlined(packed, inode); 155 } 156 157 static noinline int bch2_inode_unpack_v1(struct bkey_s_c_inode inode, 158 struct bch_inode_unpacked *unpacked) 159 { 160 const u8 *in = inode.v->fields; 161 const u8 *end = bkey_val_end(inode); 162 u64 field[2]; 163 unsigned fieldnr = 0, field_bits; 164 int ret; 165 166 #define x(_name, _bits) \ 167 if (fieldnr++ == INODE_NR_FIELDS(inode.v)) { \ 168 unsigned offset = offsetof(struct bch_inode_unpacked, _name);\ 169 memset((void *) unpacked + offset, 0, \ 170 sizeof(*unpacked) - offset); \ 171 return 0; \ 172 } \ 173 \ 174 ret = inode_decode_field(in, end, field, &field_bits); \ 175 if (ret < 0) \ 176 return ret; \ 177 \ 178 if (field_bits > sizeof(unpacked->_name) * 8) \ 179 return -1; \ 180 \ 181 unpacked->_name = field[1]; \ 182 in += ret; 183 184 BCH_INODE_FIELDS_v2() 185 #undef x 186 187 /* XXX: signal if there were more fields than expected? */ 188 return 0; 189 } 190 191 static int bch2_inode_unpack_v2(struct bch_inode_unpacked *unpacked, 192 const u8 *in, const u8 *end, 193 unsigned nr_fields) 194 { 195 unsigned fieldnr = 0; 196 int ret; 197 u64 v[2]; 198 199 #define x(_name, _bits) \ 200 if (fieldnr < nr_fields) { \ 201 ret = bch2_varint_decode_fast(in, end, &v[0]); \ 202 if (ret < 0) \ 203 return ret; \ 204 in += ret; \ 205 \ 206 if (_bits > 64) { \ 207 ret = bch2_varint_decode_fast(in, end, &v[1]); \ 208 if (ret < 0) \ 209 return ret; \ 210 in += ret; \ 211 } else { \ 212 v[1] = 0; \ 213 } \ 214 } else { \ 215 v[0] = v[1] = 0; \ 216 } \ 217 \ 218 unpacked->_name = v[0]; \ 219 if (v[1] || v[0] != unpacked->_name) \ 220 return -1; \ 221 fieldnr++; 222 223 BCH_INODE_FIELDS_v2() 224 #undef x 225 226 /* XXX: signal if there were more fields than expected? */ 227 return 0; 228 } 229 230 static int bch2_inode_unpack_v3(struct bkey_s_c k, 231 struct bch_inode_unpacked *unpacked) 232 { 233 struct bkey_s_c_inode_v3 inode = bkey_s_c_to_inode_v3(k); 234 const u8 *in = inode.v->fields; 235 const u8 *end = bkey_val_end(inode); 236 unsigned nr_fields = INODEv3_NR_FIELDS(inode.v); 237 unsigned fieldnr = 0; 238 int ret; 239 u64 v[2]; 240 241 unpacked->bi_inum = inode.k->p.offset; 242 unpacked->bi_journal_seq= le64_to_cpu(inode.v->bi_journal_seq); 243 unpacked->bi_hash_seed = inode.v->bi_hash_seed; 244 unpacked->bi_flags = le64_to_cpu(inode.v->bi_flags); 245 unpacked->bi_sectors = le64_to_cpu(inode.v->bi_sectors); 246 unpacked->bi_size = le64_to_cpu(inode.v->bi_size); 247 unpacked->bi_version = le64_to_cpu(inode.v->bi_version); 248 unpacked->bi_mode = INODEv3_MODE(inode.v); 249 250 #define x(_name, _bits) \ 251 if (fieldnr < nr_fields) { \ 252 ret = bch2_varint_decode_fast(in, end, &v[0]); \ 253 if (ret < 0) \ 254 return ret; \ 255 in += ret; \ 256 \ 257 if (_bits > 64) { \ 258 ret = bch2_varint_decode_fast(in, end, &v[1]); \ 259 if (ret < 0) \ 260 return ret; \ 261 in += ret; \ 262 } else { \ 263 v[1] = 0; \ 264 } \ 265 } else { \ 266 v[0] = v[1] = 0; \ 267 } \ 268 \ 269 unpacked->_name = v[0]; \ 270 if (v[1] || v[0] != unpacked->_name) \ 271 return -1; \ 272 fieldnr++; 273 274 BCH_INODE_FIELDS_v3() 275 #undef x 276 277 /* XXX: signal if there were more fields than expected? */ 278 return 0; 279 } 280 281 static noinline int bch2_inode_unpack_slowpath(struct bkey_s_c k, 282 struct bch_inode_unpacked *unpacked) 283 { 284 memset(unpacked, 0, sizeof(*unpacked)); 285 286 switch (k.k->type) { 287 case KEY_TYPE_inode: { 288 struct bkey_s_c_inode inode = bkey_s_c_to_inode(k); 289 290 unpacked->bi_inum = inode.k->p.offset; 291 unpacked->bi_journal_seq= 0; 292 unpacked->bi_hash_seed = inode.v->bi_hash_seed; 293 unpacked->bi_flags = le32_to_cpu(inode.v->bi_flags); 294 unpacked->bi_mode = le16_to_cpu(inode.v->bi_mode); 295 296 if (INODE_NEW_VARINT(inode.v)) { 297 return bch2_inode_unpack_v2(unpacked, inode.v->fields, 298 bkey_val_end(inode), 299 INODE_NR_FIELDS(inode.v)); 300 } else { 301 return bch2_inode_unpack_v1(inode, unpacked); 302 } 303 break; 304 } 305 case KEY_TYPE_inode_v2: { 306 struct bkey_s_c_inode_v2 inode = bkey_s_c_to_inode_v2(k); 307 308 unpacked->bi_inum = inode.k->p.offset; 309 unpacked->bi_journal_seq= le64_to_cpu(inode.v->bi_journal_seq); 310 unpacked->bi_hash_seed = inode.v->bi_hash_seed; 311 unpacked->bi_flags = le64_to_cpu(inode.v->bi_flags); 312 unpacked->bi_mode = le16_to_cpu(inode.v->bi_mode); 313 314 return bch2_inode_unpack_v2(unpacked, inode.v->fields, 315 bkey_val_end(inode), 316 INODEv2_NR_FIELDS(inode.v)); 317 } 318 default: 319 BUG(); 320 } 321 } 322 323 int bch2_inode_unpack(struct bkey_s_c k, 324 struct bch_inode_unpacked *unpacked) 325 { 326 unpacked->bi_snapshot = k.k->p.snapshot; 327 328 return likely(k.k->type == KEY_TYPE_inode_v3) 329 ? bch2_inode_unpack_v3(k, unpacked) 330 : bch2_inode_unpack_slowpath(k, unpacked); 331 } 332 333 int __bch2_inode_peek(struct btree_trans *trans, 334 struct btree_iter *iter, 335 struct bch_inode_unpacked *inode, 336 subvol_inum inum, unsigned flags, 337 bool warn) 338 { 339 u32 snapshot; 340 int ret = __bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot, warn); 341 if (ret) 342 return ret; 343 344 struct bkey_s_c k = bch2_bkey_get_iter(trans, iter, BTREE_ID_inodes, 345 SPOS(0, inum.inum, snapshot), 346 flags|BTREE_ITER_cached); 347 ret = bkey_err(k); 348 if (ret) 349 return ret; 350 351 ret = bkey_is_inode(k.k) ? 0 : -BCH_ERR_ENOENT_inode; 352 if (ret) 353 goto err; 354 355 ret = bch2_inode_unpack(k, inode); 356 if (ret) 357 goto err; 358 359 return 0; 360 err: 361 if (warn) 362 bch_err_msg(trans->c, ret, "looking up inum %llu:%llu:", inum.subvol, inum.inum); 363 bch2_trans_iter_exit(trans, iter); 364 return ret; 365 } 366 367 int bch2_inode_write_flags(struct btree_trans *trans, 368 struct btree_iter *iter, 369 struct bch_inode_unpacked *inode, 370 enum btree_iter_update_trigger_flags flags) 371 { 372 struct bkey_inode_buf *inode_p; 373 374 inode_p = bch2_trans_kmalloc(trans, sizeof(*inode_p)); 375 if (IS_ERR(inode_p)) 376 return PTR_ERR(inode_p); 377 378 bch2_inode_pack_inlined(inode_p, inode); 379 inode_p->inode.k.p.snapshot = iter->snapshot; 380 return bch2_trans_update(trans, iter, &inode_p->inode.k_i, flags); 381 } 382 383 int __bch2_fsck_write_inode(struct btree_trans *trans, struct bch_inode_unpacked *inode) 384 { 385 struct bkey_inode_buf *inode_p = 386 bch2_trans_kmalloc(trans, sizeof(*inode_p)); 387 388 if (IS_ERR(inode_p)) 389 return PTR_ERR(inode_p); 390 391 bch2_inode_pack(inode_p, inode); 392 inode_p->inode.k.p.snapshot = inode->bi_snapshot; 393 394 return bch2_btree_insert_nonextent(trans, BTREE_ID_inodes, 395 &inode_p->inode.k_i, 396 BTREE_UPDATE_internal_snapshot_node); 397 } 398 399 int bch2_fsck_write_inode(struct btree_trans *trans, struct bch_inode_unpacked *inode) 400 { 401 int ret = commit_do(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc, 402 __bch2_fsck_write_inode(trans, inode)); 403 bch_err_fn(trans->c, ret); 404 return ret; 405 } 406 407 struct bkey_i *bch2_inode_to_v3(struct btree_trans *trans, struct bkey_i *k) 408 { 409 struct bch_inode_unpacked u; 410 struct bkey_inode_buf *inode_p; 411 int ret; 412 413 if (!bkey_is_inode(&k->k)) 414 return ERR_PTR(-ENOENT); 415 416 inode_p = bch2_trans_kmalloc(trans, sizeof(*inode_p)); 417 if (IS_ERR(inode_p)) 418 return ERR_CAST(inode_p); 419 420 ret = bch2_inode_unpack(bkey_i_to_s_c(k), &u); 421 if (ret) 422 return ERR_PTR(ret); 423 424 bch2_inode_pack(inode_p, &u); 425 return &inode_p->inode.k_i; 426 } 427 428 static int __bch2_inode_validate(struct bch_fs *c, struct bkey_s_c k, 429 enum bch_validate_flags flags) 430 { 431 struct bch_inode_unpacked unpacked; 432 int ret = 0; 433 434 bkey_fsck_err_on(k.k->p.inode, 435 c, inode_pos_inode_nonzero, 436 "nonzero k.p.inode"); 437 438 bkey_fsck_err_on(k.k->p.offset < BLOCKDEV_INODE_MAX, 439 c, inode_pos_blockdev_range, 440 "fs inode in blockdev range"); 441 442 bkey_fsck_err_on(bch2_inode_unpack(k, &unpacked), 443 c, inode_unpack_error, 444 "invalid variable length fields"); 445 446 bkey_fsck_err_on(unpacked.bi_data_checksum >= BCH_CSUM_OPT_NR + 1, 447 c, inode_checksum_type_invalid, 448 "invalid data checksum type (%u >= %u", 449 unpacked.bi_data_checksum, BCH_CSUM_OPT_NR + 1); 450 451 bkey_fsck_err_on(unpacked.bi_compression && 452 !bch2_compression_opt_valid(unpacked.bi_compression - 1), 453 c, inode_compression_type_invalid, 454 "invalid compression opt %u", unpacked.bi_compression - 1); 455 456 bkey_fsck_err_on((unpacked.bi_flags & BCH_INODE_unlinked) && 457 unpacked.bi_nlink != 0, 458 c, inode_unlinked_but_nlink_nonzero, 459 "flagged as unlinked but bi_nlink != 0"); 460 461 bkey_fsck_err_on(unpacked.bi_subvol && !S_ISDIR(unpacked.bi_mode), 462 c, inode_subvol_root_but_not_dir, 463 "subvolume root but not a directory"); 464 fsck_err: 465 return ret; 466 } 467 468 int bch2_inode_validate(struct bch_fs *c, struct bkey_s_c k, 469 enum bch_validate_flags flags) 470 { 471 struct bkey_s_c_inode inode = bkey_s_c_to_inode(k); 472 int ret = 0; 473 474 bkey_fsck_err_on(INODE_STR_HASH(inode.v) >= BCH_STR_HASH_NR, 475 c, inode_str_hash_invalid, 476 "invalid str hash type (%llu >= %u)", 477 INODE_STR_HASH(inode.v), BCH_STR_HASH_NR); 478 479 ret = __bch2_inode_validate(c, k, flags); 480 fsck_err: 481 return ret; 482 } 483 484 int bch2_inode_v2_validate(struct bch_fs *c, struct bkey_s_c k, 485 enum bch_validate_flags flags) 486 { 487 struct bkey_s_c_inode_v2 inode = bkey_s_c_to_inode_v2(k); 488 int ret = 0; 489 490 bkey_fsck_err_on(INODEv2_STR_HASH(inode.v) >= BCH_STR_HASH_NR, 491 c, inode_str_hash_invalid, 492 "invalid str hash type (%llu >= %u)", 493 INODEv2_STR_HASH(inode.v), BCH_STR_HASH_NR); 494 495 ret = __bch2_inode_validate(c, k, flags); 496 fsck_err: 497 return ret; 498 } 499 500 int bch2_inode_v3_validate(struct bch_fs *c, struct bkey_s_c k, 501 enum bch_validate_flags flags) 502 { 503 struct bkey_s_c_inode_v3 inode = bkey_s_c_to_inode_v3(k); 504 int ret = 0; 505 506 bkey_fsck_err_on(INODEv3_FIELDS_START(inode.v) < INODEv3_FIELDS_START_INITIAL || 507 INODEv3_FIELDS_START(inode.v) > bkey_val_u64s(inode.k), 508 c, inode_v3_fields_start_bad, 509 "invalid fields_start (got %llu, min %u max %zu)", 510 INODEv3_FIELDS_START(inode.v), 511 INODEv3_FIELDS_START_INITIAL, 512 bkey_val_u64s(inode.k)); 513 514 bkey_fsck_err_on(INODEv3_STR_HASH(inode.v) >= BCH_STR_HASH_NR, 515 c, inode_str_hash_invalid, 516 "invalid str hash type (%llu >= %u)", 517 INODEv3_STR_HASH(inode.v), BCH_STR_HASH_NR); 518 519 ret = __bch2_inode_validate(c, k, flags); 520 fsck_err: 521 return ret; 522 } 523 524 static void __bch2_inode_unpacked_to_text(struct printbuf *out, 525 struct bch_inode_unpacked *inode) 526 { 527 prt_printf(out, "\n"); 528 printbuf_indent_add(out, 2); 529 prt_printf(out, "mode=%o\n", inode->bi_mode); 530 531 prt_str(out, "flags="); 532 prt_bitflags(out, bch2_inode_flag_strs, inode->bi_flags & ((1U << 20) - 1)); 533 prt_printf(out, "(%x)\n", inode->bi_flags); 534 535 prt_printf(out, "journal_seq=%llu\n", inode->bi_journal_seq); 536 prt_printf(out, "bi_size=%llu\n", inode->bi_size); 537 prt_printf(out, "bi_sectors=%llu\n", inode->bi_sectors); 538 prt_printf(out, "bi_version=%llu\n", inode->bi_version); 539 540 #define x(_name, _bits) \ 541 prt_printf(out, #_name "=%llu\n", (u64) inode->_name); 542 BCH_INODE_FIELDS_v3() 543 #undef x 544 545 bch2_printbuf_strip_trailing_newline(out); 546 printbuf_indent_sub(out, 2); 547 } 548 549 void bch2_inode_unpacked_to_text(struct printbuf *out, struct bch_inode_unpacked *inode) 550 { 551 prt_printf(out, "inum: %llu:%u ", inode->bi_inum, inode->bi_snapshot); 552 __bch2_inode_unpacked_to_text(out, inode); 553 } 554 555 void bch2_inode_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k) 556 { 557 struct bch_inode_unpacked inode; 558 559 if (bch2_inode_unpack(k, &inode)) { 560 prt_printf(out, "(unpack error)"); 561 return; 562 } 563 564 __bch2_inode_unpacked_to_text(out, &inode); 565 } 566 567 static inline u64 bkey_inode_flags(struct bkey_s_c k) 568 { 569 switch (k.k->type) { 570 case KEY_TYPE_inode: 571 return le32_to_cpu(bkey_s_c_to_inode(k).v->bi_flags); 572 case KEY_TYPE_inode_v2: 573 return le64_to_cpu(bkey_s_c_to_inode_v2(k).v->bi_flags); 574 case KEY_TYPE_inode_v3: 575 return le64_to_cpu(bkey_s_c_to_inode_v3(k).v->bi_flags); 576 default: 577 return 0; 578 } 579 } 580 581 static inline void bkey_inode_flags_set(struct bkey_s k, u64 f) 582 { 583 switch (k.k->type) { 584 case KEY_TYPE_inode: 585 bkey_s_to_inode(k).v->bi_flags = cpu_to_le32(f); 586 return; 587 case KEY_TYPE_inode_v2: 588 bkey_s_to_inode_v2(k).v->bi_flags = cpu_to_le64(f); 589 return; 590 case KEY_TYPE_inode_v3: 591 bkey_s_to_inode_v3(k).v->bi_flags = cpu_to_le64(f); 592 return; 593 default: 594 BUG(); 595 } 596 } 597 598 static inline bool bkey_is_unlinked_inode(struct bkey_s_c k) 599 { 600 unsigned f = bkey_inode_flags(k) & BCH_INODE_unlinked; 601 602 return (f & BCH_INODE_unlinked) && !(f & BCH_INODE_has_child_snapshot); 603 } 604 605 static struct bkey_s_c 606 bch2_bkey_get_iter_snapshot_parent(struct btree_trans *trans, struct btree_iter *iter, 607 enum btree_id btree, struct bpos pos, 608 unsigned flags) 609 { 610 struct bch_fs *c = trans->c; 611 struct bkey_s_c k; 612 int ret = 0; 613 614 for_each_btree_key_upto_norestart(trans, *iter, btree, 615 bpos_successor(pos), 616 SPOS(pos.inode, pos.offset, U32_MAX), 617 flags|BTREE_ITER_all_snapshots, k, ret) 618 if (bch2_snapshot_is_ancestor(c, pos.snapshot, k.k->p.snapshot)) 619 return k; 620 621 bch2_trans_iter_exit(trans, iter); 622 return ret ? bkey_s_c_err(ret) : bkey_s_c_null; 623 } 624 625 static struct bkey_s_c 626 bch2_inode_get_iter_snapshot_parent(struct btree_trans *trans, struct btree_iter *iter, 627 struct bpos pos, unsigned flags) 628 { 629 struct bkey_s_c k; 630 again: 631 k = bch2_bkey_get_iter_snapshot_parent(trans, iter, BTREE_ID_inodes, pos, flags); 632 if (!k.k || 633 bkey_err(k) || 634 bkey_is_inode(k.k)) 635 return k; 636 637 bch2_trans_iter_exit(trans, iter); 638 pos = k.k->p; 639 goto again; 640 } 641 642 int __bch2_inode_has_child_snapshots(struct btree_trans *trans, struct bpos pos) 643 { 644 struct bch_fs *c = trans->c; 645 struct btree_iter iter; 646 struct bkey_s_c k; 647 int ret = 0; 648 649 for_each_btree_key_upto_norestart(trans, iter, 650 BTREE_ID_inodes, POS(0, pos.offset), bpos_predecessor(pos), 651 BTREE_ITER_all_snapshots| 652 BTREE_ITER_with_updates, k, ret) 653 if (bch2_snapshot_is_ancestor(c, k.k->p.snapshot, pos.snapshot) && 654 bkey_is_inode(k.k)) { 655 ret = 1; 656 break; 657 } 658 bch2_trans_iter_exit(trans, &iter); 659 return ret; 660 } 661 662 static int update_inode_has_children(struct btree_trans *trans, 663 struct bkey_s k, 664 bool have_child) 665 { 666 if (!have_child) { 667 int ret = bch2_inode_has_child_snapshots(trans, k.k->p); 668 if (ret) 669 return ret < 0 ? ret : 0; 670 } 671 672 u64 f = bkey_inode_flags(k.s_c); 673 if (have_child != !!(f & BCH_INODE_has_child_snapshot)) 674 bkey_inode_flags_set(k, f ^ BCH_INODE_has_child_snapshot); 675 676 return 0; 677 } 678 679 static int update_parent_inode_has_children(struct btree_trans *trans, struct bpos pos, 680 bool have_child) 681 { 682 struct btree_iter iter; 683 struct bkey_s_c k = bch2_inode_get_iter_snapshot_parent(trans, 684 &iter, pos, BTREE_ITER_with_updates); 685 int ret = bkey_err(k); 686 if (ret) 687 return ret; 688 if (!k.k) 689 return 0; 690 691 if (!have_child) { 692 ret = bch2_inode_has_child_snapshots(trans, k.k->p); 693 if (ret) { 694 ret = ret < 0 ? ret : 0; 695 goto err; 696 } 697 } 698 699 u64 f = bkey_inode_flags(k); 700 if (have_child != !!(f & BCH_INODE_has_child_snapshot)) { 701 struct bkey_i *update = bch2_bkey_make_mut(trans, &iter, &k, 702 BTREE_UPDATE_internal_snapshot_node); 703 ret = PTR_ERR_OR_ZERO(update); 704 if (ret) 705 goto err; 706 707 bkey_inode_flags_set(bkey_i_to_s(update), f ^ BCH_INODE_has_child_snapshot); 708 } 709 err: 710 bch2_trans_iter_exit(trans, &iter); 711 return ret; 712 } 713 714 int bch2_trigger_inode(struct btree_trans *trans, 715 enum btree_id btree_id, unsigned level, 716 struct bkey_s_c old, 717 struct bkey_s new, 718 enum btree_iter_update_trigger_flags flags) 719 { 720 struct bch_fs *c = trans->c; 721 722 if ((flags & BTREE_TRIGGER_atomic) && (flags & BTREE_TRIGGER_insert)) { 723 BUG_ON(!trans->journal_res.seq); 724 bkey_s_to_inode_v3(new).v->bi_journal_seq = cpu_to_le64(trans->journal_res.seq); 725 } 726 727 s64 nr = bkey_is_inode(new.k) - bkey_is_inode(old.k); 728 if ((flags & (BTREE_TRIGGER_transactional|BTREE_TRIGGER_gc)) && nr) { 729 struct disk_accounting_pos acc = { .type = BCH_DISK_ACCOUNTING_nr_inodes }; 730 int ret = bch2_disk_accounting_mod(trans, &acc, &nr, 1, flags & BTREE_TRIGGER_gc); 731 if (ret) 732 return ret; 733 } 734 735 if (flags & BTREE_TRIGGER_transactional) { 736 int unlinked_delta = (int) bkey_is_unlinked_inode(new.s_c) - 737 (int) bkey_is_unlinked_inode(old); 738 if (unlinked_delta) { 739 int ret = bch2_btree_bit_mod_buffered(trans, BTREE_ID_deleted_inodes, 740 new.k->p, unlinked_delta > 0); 741 if (ret) 742 return ret; 743 } 744 745 /* 746 * If we're creating or deleting an inode at this snapshot ID, 747 * and there might be an inode in a parent snapshot ID, we might 748 * need to set or clear the has_child_snapshot flag on the 749 * parent. 750 */ 751 int deleted_delta = (int) bkey_is_inode(new.k) - 752 (int) bkey_is_inode(old.k); 753 if (deleted_delta && 754 bch2_snapshot_parent(c, new.k->p.snapshot)) { 755 int ret = update_parent_inode_has_children(trans, new.k->p, 756 deleted_delta > 0); 757 if (ret) 758 return ret; 759 } 760 761 /* 762 * When an inode is first updated in a new snapshot, we may need 763 * to clear has_child_snapshot 764 */ 765 if (deleted_delta > 0) { 766 int ret = update_inode_has_children(trans, new, false); 767 if (ret) 768 return ret; 769 } 770 } 771 772 return 0; 773 } 774 775 int bch2_inode_generation_validate(struct bch_fs *c, struct bkey_s_c k, 776 enum bch_validate_flags flags) 777 { 778 int ret = 0; 779 780 bkey_fsck_err_on(k.k->p.inode, 781 c, inode_pos_inode_nonzero, 782 "nonzero k.p.inode"); 783 fsck_err: 784 return ret; 785 } 786 787 void bch2_inode_generation_to_text(struct printbuf *out, struct bch_fs *c, 788 struct bkey_s_c k) 789 { 790 struct bkey_s_c_inode_generation gen = bkey_s_c_to_inode_generation(k); 791 792 prt_printf(out, "generation: %u", le32_to_cpu(gen.v->bi_generation)); 793 } 794 795 void bch2_inode_init_early(struct bch_fs *c, 796 struct bch_inode_unpacked *inode_u) 797 { 798 enum bch_str_hash_type str_hash = 799 bch2_str_hash_opt_to_type(c, c->opts.str_hash); 800 801 memset(inode_u, 0, sizeof(*inode_u)); 802 803 /* ick */ 804 inode_u->bi_flags |= str_hash << INODE_STR_HASH_OFFSET; 805 get_random_bytes(&inode_u->bi_hash_seed, 806 sizeof(inode_u->bi_hash_seed)); 807 } 808 809 void bch2_inode_init_late(struct bch_inode_unpacked *inode_u, u64 now, 810 uid_t uid, gid_t gid, umode_t mode, dev_t rdev, 811 struct bch_inode_unpacked *parent) 812 { 813 inode_u->bi_mode = mode; 814 inode_u->bi_uid = uid; 815 inode_u->bi_gid = gid; 816 inode_u->bi_dev = rdev; 817 inode_u->bi_atime = now; 818 inode_u->bi_mtime = now; 819 inode_u->bi_ctime = now; 820 inode_u->bi_otime = now; 821 822 if (parent && parent->bi_mode & S_ISGID) { 823 inode_u->bi_gid = parent->bi_gid; 824 if (S_ISDIR(mode)) 825 inode_u->bi_mode |= S_ISGID; 826 } 827 828 if (parent) { 829 #define x(_name, ...) inode_u->bi_##_name = parent->bi_##_name; 830 BCH_INODE_OPTS() 831 #undef x 832 } 833 } 834 835 void bch2_inode_init(struct bch_fs *c, struct bch_inode_unpacked *inode_u, 836 uid_t uid, gid_t gid, umode_t mode, dev_t rdev, 837 struct bch_inode_unpacked *parent) 838 { 839 bch2_inode_init_early(c, inode_u); 840 bch2_inode_init_late(inode_u, bch2_current_time(c), 841 uid, gid, mode, rdev, parent); 842 } 843 844 static inline u32 bkey_generation(struct bkey_s_c k) 845 { 846 switch (k.k->type) { 847 case KEY_TYPE_inode: 848 case KEY_TYPE_inode_v2: 849 BUG(); 850 case KEY_TYPE_inode_generation: 851 return le32_to_cpu(bkey_s_c_to_inode_generation(k).v->bi_generation); 852 default: 853 return 0; 854 } 855 } 856 857 /* 858 * This just finds an empty slot: 859 */ 860 int bch2_inode_create(struct btree_trans *trans, 861 struct btree_iter *iter, 862 struct bch_inode_unpacked *inode_u, 863 u32 snapshot, u64 cpu) 864 { 865 struct bch_fs *c = trans->c; 866 struct bkey_s_c k; 867 u64 min, max, start, pos, *hint; 868 int ret = 0; 869 unsigned bits = (c->opts.inodes_32bit ? 31 : 63); 870 871 if (c->opts.shard_inode_numbers) { 872 bits -= c->inode_shard_bits; 873 874 min = (cpu << bits); 875 max = (cpu << bits) | ~(ULLONG_MAX << bits); 876 877 min = max_t(u64, min, BLOCKDEV_INODE_MAX); 878 hint = c->unused_inode_hints + cpu; 879 } else { 880 min = BLOCKDEV_INODE_MAX; 881 max = ~(ULLONG_MAX << bits); 882 hint = c->unused_inode_hints; 883 } 884 885 start = READ_ONCE(*hint); 886 887 if (start >= max || start < min) 888 start = min; 889 890 pos = start; 891 bch2_trans_iter_init(trans, iter, BTREE_ID_inodes, POS(0, pos), 892 BTREE_ITER_all_snapshots| 893 BTREE_ITER_intent); 894 again: 895 while ((k = bch2_btree_iter_peek(iter)).k && 896 !(ret = bkey_err(k)) && 897 bkey_lt(k.k->p, POS(0, max))) { 898 if (pos < iter->pos.offset) 899 goto found_slot; 900 901 /* 902 * We don't need to iterate over keys in every snapshot once 903 * we've found just one: 904 */ 905 pos = iter->pos.offset + 1; 906 bch2_btree_iter_set_pos(iter, POS(0, pos)); 907 } 908 909 if (!ret && pos < max) 910 goto found_slot; 911 912 if (!ret && start == min) 913 ret = -BCH_ERR_ENOSPC_inode_create; 914 915 if (ret) { 916 bch2_trans_iter_exit(trans, iter); 917 return ret; 918 } 919 920 /* Retry from start */ 921 pos = start = min; 922 bch2_btree_iter_set_pos(iter, POS(0, pos)); 923 goto again; 924 found_slot: 925 bch2_btree_iter_set_pos(iter, SPOS(0, pos, snapshot)); 926 k = bch2_btree_iter_peek_slot(iter); 927 ret = bkey_err(k); 928 if (ret) { 929 bch2_trans_iter_exit(trans, iter); 930 return ret; 931 } 932 933 *hint = k.k->p.offset; 934 inode_u->bi_inum = k.k->p.offset; 935 inode_u->bi_generation = bkey_generation(k); 936 return 0; 937 } 938 939 static int bch2_inode_delete_keys(struct btree_trans *trans, 940 subvol_inum inum, enum btree_id id) 941 { 942 struct btree_iter iter; 943 struct bkey_s_c k; 944 struct bkey_i delete; 945 struct bpos end = POS(inum.inum, U64_MAX); 946 u32 snapshot; 947 int ret = 0; 948 949 /* 950 * We're never going to be deleting partial extents, no need to use an 951 * extent iterator: 952 */ 953 bch2_trans_iter_init(trans, &iter, id, POS(inum.inum, 0), 954 BTREE_ITER_intent); 955 956 while (1) { 957 bch2_trans_begin(trans); 958 959 ret = bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot); 960 if (ret) 961 goto err; 962 963 bch2_btree_iter_set_snapshot(&iter, snapshot); 964 965 k = bch2_btree_iter_peek_upto(&iter, end); 966 ret = bkey_err(k); 967 if (ret) 968 goto err; 969 970 if (!k.k) 971 break; 972 973 bkey_init(&delete.k); 974 delete.k.p = iter.pos; 975 976 if (iter.flags & BTREE_ITER_is_extents) 977 bch2_key_resize(&delete.k, 978 bpos_min(end, k.k->p).offset - 979 iter.pos.offset); 980 981 ret = bch2_trans_update(trans, &iter, &delete, 0) ?: 982 bch2_trans_commit(trans, NULL, NULL, 983 BCH_TRANS_COMMIT_no_enospc); 984 err: 985 if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart)) 986 break; 987 } 988 989 bch2_trans_iter_exit(trans, &iter); 990 return ret; 991 } 992 993 int bch2_inode_rm(struct bch_fs *c, subvol_inum inum) 994 { 995 struct btree_trans *trans = bch2_trans_get(c); 996 struct btree_iter iter = { NULL }; 997 struct bkey_i_inode_generation delete; 998 struct bch_inode_unpacked inode_u; 999 struct bkey_s_c k; 1000 u32 snapshot; 1001 int ret; 1002 1003 /* 1004 * If this was a directory, there shouldn't be any real dirents left - 1005 * but there could be whiteouts (from hash collisions) that we should 1006 * delete: 1007 * 1008 * XXX: the dirent could ideally would delete whiteouts when they're no 1009 * longer needed 1010 */ 1011 ret = bch2_inode_delete_keys(trans, inum, BTREE_ID_extents) ?: 1012 bch2_inode_delete_keys(trans, inum, BTREE_ID_xattrs) ?: 1013 bch2_inode_delete_keys(trans, inum, BTREE_ID_dirents); 1014 if (ret) 1015 goto err; 1016 retry: 1017 bch2_trans_begin(trans); 1018 1019 ret = bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot); 1020 if (ret) 1021 goto err; 1022 1023 k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_inodes, 1024 SPOS(0, inum.inum, snapshot), 1025 BTREE_ITER_intent|BTREE_ITER_cached); 1026 ret = bkey_err(k); 1027 if (ret) 1028 goto err; 1029 1030 if (!bkey_is_inode(k.k)) { 1031 bch2_fs_inconsistent(c, 1032 "inode %llu:%u not found when deleting", 1033 inum.inum, snapshot); 1034 ret = -EIO; 1035 goto err; 1036 } 1037 1038 bch2_inode_unpack(k, &inode_u); 1039 1040 bkey_inode_generation_init(&delete.k_i); 1041 delete.k.p = iter.pos; 1042 delete.v.bi_generation = cpu_to_le32(inode_u.bi_generation + 1); 1043 1044 ret = bch2_trans_update(trans, &iter, &delete.k_i, 0) ?: 1045 bch2_trans_commit(trans, NULL, NULL, 1046 BCH_TRANS_COMMIT_no_enospc); 1047 err: 1048 bch2_trans_iter_exit(trans, &iter); 1049 if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) 1050 goto retry; 1051 1052 if (ret) 1053 goto err2; 1054 1055 ret = delete_ancestor_snapshot_inodes(trans, SPOS(0, inum.inum, snapshot)); 1056 err2: 1057 bch2_trans_put(trans); 1058 return ret; 1059 } 1060 1061 int bch2_inode_find_by_inum_nowarn_trans(struct btree_trans *trans, 1062 subvol_inum inum, 1063 struct bch_inode_unpacked *inode) 1064 { 1065 struct btree_iter iter; 1066 int ret; 1067 1068 ret = bch2_inode_peek_nowarn(trans, &iter, inode, inum, 0); 1069 if (!ret) 1070 bch2_trans_iter_exit(trans, &iter); 1071 return ret; 1072 } 1073 1074 int bch2_inode_find_by_inum_trans(struct btree_trans *trans, 1075 subvol_inum inum, 1076 struct bch_inode_unpacked *inode) 1077 { 1078 struct btree_iter iter; 1079 int ret; 1080 1081 ret = bch2_inode_peek(trans, &iter, inode, inum, 0); 1082 if (!ret) 1083 bch2_trans_iter_exit(trans, &iter); 1084 return ret; 1085 } 1086 1087 int bch2_inode_find_by_inum(struct bch_fs *c, subvol_inum inum, 1088 struct bch_inode_unpacked *inode) 1089 { 1090 return bch2_trans_do(c, NULL, NULL, 0, 1091 bch2_inode_find_by_inum_trans(trans, inum, inode)); 1092 } 1093 1094 int bch2_inode_nlink_inc(struct bch_inode_unpacked *bi) 1095 { 1096 if (bi->bi_flags & BCH_INODE_unlinked) 1097 bi->bi_flags &= ~BCH_INODE_unlinked; 1098 else { 1099 if (bi->bi_nlink == U32_MAX) 1100 return -EINVAL; 1101 1102 bi->bi_nlink++; 1103 } 1104 1105 return 0; 1106 } 1107 1108 void bch2_inode_nlink_dec(struct btree_trans *trans, struct bch_inode_unpacked *bi) 1109 { 1110 if (bi->bi_nlink && (bi->bi_flags & BCH_INODE_unlinked)) { 1111 bch2_trans_inconsistent(trans, "inode %llu unlinked but link count nonzero", 1112 bi->bi_inum); 1113 return; 1114 } 1115 1116 if (bi->bi_flags & BCH_INODE_unlinked) { 1117 bch2_trans_inconsistent(trans, "inode %llu link count underflow", bi->bi_inum); 1118 return; 1119 } 1120 1121 if (bi->bi_nlink) 1122 bi->bi_nlink--; 1123 else 1124 bi->bi_flags |= BCH_INODE_unlinked; 1125 } 1126 1127 struct bch_opts bch2_inode_opts_to_opts(struct bch_inode_unpacked *inode) 1128 { 1129 struct bch_opts ret = { 0 }; 1130 #define x(_name, _bits) \ 1131 if (inode->bi_##_name) \ 1132 opt_set(ret, _name, inode->bi_##_name - 1); 1133 BCH_INODE_OPTS() 1134 #undef x 1135 return ret; 1136 } 1137 1138 void bch2_inode_opts_get(struct bch_io_opts *opts, struct bch_fs *c, 1139 struct bch_inode_unpacked *inode) 1140 { 1141 #define x(_name, _bits) opts->_name = inode_opt_get(c, inode, _name); 1142 BCH_INODE_OPTS() 1143 #undef x 1144 1145 if (opts->nocow) 1146 opts->compression = opts->background_compression = opts->data_checksum = opts->erasure_code = 0; 1147 } 1148 1149 int bch2_inum_opts_get(struct btree_trans *trans, subvol_inum inum, struct bch_io_opts *opts) 1150 { 1151 struct bch_inode_unpacked inode; 1152 int ret = lockrestart_do(trans, bch2_inode_find_by_inum_trans(trans, inum, &inode)); 1153 1154 if (ret) 1155 return ret; 1156 1157 bch2_inode_opts_get(opts, trans->c, &inode); 1158 return 0; 1159 } 1160 1161 static noinline int __bch2_inode_rm_snapshot(struct btree_trans *trans, u64 inum, u32 snapshot) 1162 { 1163 struct bch_fs *c = trans->c; 1164 struct btree_iter iter = { NULL }; 1165 struct bkey_i_inode_generation delete; 1166 struct bch_inode_unpacked inode_u; 1167 struct bkey_s_c k; 1168 int ret; 1169 1170 do { 1171 ret = bch2_btree_delete_range_trans(trans, BTREE_ID_extents, 1172 SPOS(inum, 0, snapshot), 1173 SPOS(inum, U64_MAX, snapshot), 1174 0, NULL) ?: 1175 bch2_btree_delete_range_trans(trans, BTREE_ID_dirents, 1176 SPOS(inum, 0, snapshot), 1177 SPOS(inum, U64_MAX, snapshot), 1178 0, NULL) ?: 1179 bch2_btree_delete_range_trans(trans, BTREE_ID_xattrs, 1180 SPOS(inum, 0, snapshot), 1181 SPOS(inum, U64_MAX, snapshot), 1182 0, NULL); 1183 } while (ret == -BCH_ERR_transaction_restart_nested); 1184 if (ret) 1185 goto err; 1186 retry: 1187 bch2_trans_begin(trans); 1188 1189 k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_inodes, 1190 SPOS(0, inum, snapshot), BTREE_ITER_intent); 1191 ret = bkey_err(k); 1192 if (ret) 1193 goto err; 1194 1195 if (!bkey_is_inode(k.k)) { 1196 bch2_fs_inconsistent(c, 1197 "inode %llu:%u not found when deleting", 1198 inum, snapshot); 1199 ret = -EIO; 1200 goto err; 1201 } 1202 1203 bch2_inode_unpack(k, &inode_u); 1204 1205 /* Subvolume root? */ 1206 if (inode_u.bi_subvol) 1207 bch_warn(c, "deleting inode %llu marked as unlinked, but also a subvolume root!?", inode_u.bi_inum); 1208 1209 bkey_inode_generation_init(&delete.k_i); 1210 delete.k.p = iter.pos; 1211 delete.v.bi_generation = cpu_to_le32(inode_u.bi_generation + 1); 1212 1213 ret = bch2_trans_update(trans, &iter, &delete.k_i, 0) ?: 1214 bch2_trans_commit(trans, NULL, NULL, 1215 BCH_TRANS_COMMIT_no_enospc); 1216 err: 1217 bch2_trans_iter_exit(trans, &iter); 1218 if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) 1219 goto retry; 1220 1221 return ret ?: -BCH_ERR_transaction_restart_nested; 1222 } 1223 1224 /* 1225 * After deleting an inode, there may be versions in older snapshots that should 1226 * also be deleted - if they're not referenced by sibling snapshots and not open 1227 * in other subvolumes: 1228 */ 1229 static int delete_ancestor_snapshot_inodes(struct btree_trans *trans, struct bpos pos) 1230 { 1231 struct btree_iter iter; 1232 struct bkey_s_c k; 1233 int ret; 1234 next_parent: 1235 ret = lockrestart_do(trans, 1236 bkey_err(k = bch2_inode_get_iter_snapshot_parent(trans, &iter, pos, 0))); 1237 if (ret || !k.k) 1238 return ret; 1239 1240 bool unlinked = bkey_is_unlinked_inode(k); 1241 pos = k.k->p; 1242 bch2_trans_iter_exit(trans, &iter); 1243 1244 if (!unlinked) 1245 return 0; 1246 1247 ret = lockrestart_do(trans, bch2_inode_or_descendents_is_open(trans, pos)); 1248 if (ret) 1249 return ret < 0 ? ret : 0; 1250 1251 ret = __bch2_inode_rm_snapshot(trans, pos.offset, pos.snapshot); 1252 if (ret) 1253 return ret; 1254 goto next_parent; 1255 } 1256 1257 int bch2_inode_rm_snapshot(struct btree_trans *trans, u64 inum, u32 snapshot) 1258 { 1259 return __bch2_inode_rm_snapshot(trans, inum, snapshot) ?: 1260 delete_ancestor_snapshot_inodes(trans, SPOS(0, inum, snapshot)); 1261 } 1262 1263 static int may_delete_deleted_inode(struct btree_trans *trans, 1264 struct btree_iter *iter, 1265 struct bpos pos, 1266 bool *need_another_pass) 1267 { 1268 struct bch_fs *c = trans->c; 1269 struct btree_iter inode_iter; 1270 struct bkey_s_c k; 1271 struct bch_inode_unpacked inode; 1272 struct printbuf buf = PRINTBUF; 1273 int ret; 1274 1275 k = bch2_bkey_get_iter(trans, &inode_iter, BTREE_ID_inodes, pos, BTREE_ITER_cached); 1276 ret = bkey_err(k); 1277 if (ret) 1278 return ret; 1279 1280 ret = bkey_is_inode(k.k) ? 0 : -BCH_ERR_ENOENT_inode; 1281 if (fsck_err_on(!bkey_is_inode(k.k), 1282 trans, deleted_inode_missing, 1283 "nonexistent inode %llu:%u in deleted_inodes btree", 1284 pos.offset, pos.snapshot)) 1285 goto delete; 1286 1287 ret = bch2_inode_unpack(k, &inode); 1288 if (ret) 1289 goto out; 1290 1291 if (S_ISDIR(inode.bi_mode)) { 1292 ret = bch2_empty_dir_snapshot(trans, pos.offset, 0, pos.snapshot); 1293 if (fsck_err_on(bch2_err_matches(ret, ENOTEMPTY), 1294 trans, deleted_inode_is_dir, 1295 "non empty directory %llu:%u in deleted_inodes btree", 1296 pos.offset, pos.snapshot)) 1297 goto delete; 1298 if (ret) 1299 goto out; 1300 } 1301 1302 if (fsck_err_on(!(inode.bi_flags & BCH_INODE_unlinked), 1303 trans, deleted_inode_not_unlinked, 1304 "non-deleted inode %llu:%u in deleted_inodes btree", 1305 pos.offset, pos.snapshot)) 1306 goto delete; 1307 1308 if (fsck_err_on(inode.bi_flags & BCH_INODE_has_child_snapshot, 1309 trans, deleted_inode_has_child_snapshots, 1310 "inode with child snapshots %llu:%u in deleted_inodes btree", 1311 pos.offset, pos.snapshot)) 1312 goto delete; 1313 1314 ret = bch2_inode_has_child_snapshots(trans, k.k->p); 1315 if (ret < 0) 1316 goto out; 1317 1318 if (ret) { 1319 if (fsck_err(trans, inode_has_child_snapshots_wrong, 1320 "inode has_child_snapshots flag wrong (should be set)\n%s", 1321 (printbuf_reset(&buf), 1322 bch2_inode_unpacked_to_text(&buf, &inode), 1323 buf.buf))) { 1324 inode.bi_flags |= BCH_INODE_has_child_snapshot; 1325 ret = __bch2_fsck_write_inode(trans, &inode); 1326 if (ret) 1327 goto out; 1328 } 1329 goto delete; 1330 1331 } 1332 1333 if (test_bit(BCH_FS_clean_recovery, &c->flags) && 1334 !fsck_err(trans, deleted_inode_but_clean, 1335 "filesystem marked as clean but have deleted inode %llu:%u", 1336 pos.offset, pos.snapshot)) { 1337 ret = 0; 1338 goto out; 1339 } 1340 1341 ret = 1; 1342 out: 1343 fsck_err: 1344 bch2_trans_iter_exit(trans, &inode_iter); 1345 printbuf_exit(&buf); 1346 return ret; 1347 delete: 1348 ret = bch2_btree_bit_mod_buffered(trans, BTREE_ID_deleted_inodes, pos, false); 1349 goto out; 1350 } 1351 1352 int bch2_delete_dead_inodes(struct bch_fs *c) 1353 { 1354 struct btree_trans *trans = bch2_trans_get(c); 1355 bool need_another_pass; 1356 int ret; 1357 again: 1358 /* 1359 * if we ran check_inodes() unlinked inodes will have already been 1360 * cleaned up but the write buffer will be out of sync; therefore we 1361 * alway need a write buffer flush 1362 */ 1363 ret = bch2_btree_write_buffer_flush_sync(trans); 1364 if (ret) 1365 goto err; 1366 1367 need_another_pass = false; 1368 1369 /* 1370 * Weird transaction restart handling here because on successful delete, 1371 * bch2_inode_rm_snapshot() will return a nested transaction restart, 1372 * but we can't retry because the btree write buffer won't have been 1373 * flushed and we'd spin: 1374 */ 1375 ret = for_each_btree_key_commit(trans, iter, BTREE_ID_deleted_inodes, POS_MIN, 1376 BTREE_ITER_prefetch|BTREE_ITER_all_snapshots, k, 1377 NULL, NULL, BCH_TRANS_COMMIT_no_enospc, ({ 1378 ret = may_delete_deleted_inode(trans, &iter, k.k->p, &need_another_pass); 1379 if (ret > 0) { 1380 bch_verbose(c, "deleting unlinked inode %llu:%u", k.k->p.offset, k.k->p.snapshot); 1381 1382 ret = bch2_inode_rm_snapshot(trans, k.k->p.offset, k.k->p.snapshot); 1383 /* 1384 * We don't want to loop here: a transaction restart 1385 * error here means we handled a transaction restart and 1386 * we're actually done, but if we loop we'll retry the 1387 * same key because the write buffer hasn't been flushed 1388 * yet 1389 */ 1390 if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) { 1391 ret = 0; 1392 continue; 1393 } 1394 } 1395 1396 ret; 1397 })); 1398 1399 if (!ret && need_another_pass) 1400 goto again; 1401 err: 1402 bch2_trans_put(trans); 1403 return ret; 1404 } 1405