1 // SPDX-License-Identifier: GPL-2.0 2 3 #include "bcachefs.h" 4 #include "async_objs.h" 5 #include "bkey_buf.h" 6 #include "bkey_methods.h" 7 #include "bkey_sort.h" 8 #include "btree_cache.h" 9 #include "btree_io.h" 10 #include "btree_iter.h" 11 #include "btree_locking.h" 12 #include "btree_update.h" 13 #include "btree_update_interior.h" 14 #include "buckets.h" 15 #include "checksum.h" 16 #include "debug.h" 17 #include "enumerated_ref.h" 18 #include "error.h" 19 #include "extents.h" 20 #include "io_write.h" 21 #include "journal_reclaim.h" 22 #include "journal_seq_blacklist.h" 23 #include "recovery.h" 24 #include "super-io.h" 25 #include "trace.h" 26 27 #include <linux/sched/mm.h> 28 29 static void bch2_btree_node_header_to_text(struct printbuf *out, struct btree_node *bn) 30 { 31 bch2_btree_id_level_to_text(out, BTREE_NODE_ID(bn), BTREE_NODE_LEVEL(bn)); 32 prt_printf(out, " seq %llx %llu\n", bn->keys.seq, BTREE_NODE_SEQ(bn)); 33 prt_str(out, "min: "); 34 bch2_bpos_to_text(out, bn->min_key); 35 prt_newline(out); 36 prt_str(out, "max: "); 37 bch2_bpos_to_text(out, bn->max_key); 38 } 39 40 void bch2_btree_node_io_unlock(struct btree *b) 41 { 42 EBUG_ON(!btree_node_write_in_flight(b)); 43 44 clear_btree_node_write_in_flight_inner(b); 45 clear_btree_node_write_in_flight(b); 46 smp_mb__after_atomic(); 47 wake_up_bit(&b->flags, BTREE_NODE_write_in_flight); 48 } 49 50 void bch2_btree_node_io_lock(struct btree *b) 51 { 52 wait_on_bit_lock_io(&b->flags, BTREE_NODE_write_in_flight, 53 TASK_UNINTERRUPTIBLE); 54 } 55 56 void __bch2_btree_node_wait_on_read(struct btree *b) 57 { 58 wait_on_bit_io(&b->flags, BTREE_NODE_read_in_flight, 59 TASK_UNINTERRUPTIBLE); 60 } 61 62 void __bch2_btree_node_wait_on_write(struct btree *b) 63 { 64 wait_on_bit_io(&b->flags, BTREE_NODE_write_in_flight, 65 TASK_UNINTERRUPTIBLE); 66 } 67 68 void bch2_btree_node_wait_on_read(struct btree *b) 69 { 70 wait_on_bit_io(&b->flags, BTREE_NODE_read_in_flight, 71 TASK_UNINTERRUPTIBLE); 72 } 73 74 void bch2_btree_node_wait_on_write(struct btree *b) 75 { 76 wait_on_bit_io(&b->flags, BTREE_NODE_write_in_flight, 77 TASK_UNINTERRUPTIBLE); 78 } 79 80 static void verify_no_dups(struct btree *b, 81 struct bkey_packed *start, 82 struct bkey_packed *end) 83 { 84 #ifdef CONFIG_BCACHEFS_DEBUG 85 struct bkey_packed *k, *p; 86 87 if (start == end) 88 return; 89 90 for (p = start, k = bkey_p_next(start); 91 k != end; 92 p = k, k = bkey_p_next(k)) { 93 struct bkey l = bkey_unpack_key(b, p); 94 struct bkey r = bkey_unpack_key(b, k); 95 96 BUG_ON(bpos_ge(l.p, bkey_start_pos(&r))); 97 } 98 #endif 99 } 100 101 static void set_needs_whiteout(struct bset *i, int v) 102 { 103 struct bkey_packed *k; 104 105 for (k = i->start; k != vstruct_last(i); k = bkey_p_next(k)) 106 k->needs_whiteout = v; 107 } 108 109 static void btree_bounce_free(struct bch_fs *c, size_t size, 110 bool used_mempool, void *p) 111 { 112 if (used_mempool) 113 mempool_free(p, &c->btree_bounce_pool); 114 else 115 kvfree(p); 116 } 117 118 static void *btree_bounce_alloc(struct bch_fs *c, size_t size, 119 bool *used_mempool) 120 { 121 unsigned flags = memalloc_nofs_save(); 122 void *p; 123 124 BUG_ON(size > c->opts.btree_node_size); 125 126 *used_mempool = false; 127 p = kvmalloc(size, __GFP_NOWARN|GFP_NOWAIT); 128 if (!p) { 129 *used_mempool = true; 130 p = mempool_alloc(&c->btree_bounce_pool, GFP_NOFS); 131 } 132 memalloc_nofs_restore(flags); 133 return p; 134 } 135 136 static void sort_bkey_ptrs(const struct btree *bt, 137 struct bkey_packed **ptrs, unsigned nr) 138 { 139 unsigned n = nr, a = nr / 2, b, c, d; 140 141 if (!a) 142 return; 143 144 /* Heap sort: see lib/sort.c: */ 145 while (1) { 146 if (a) 147 a--; 148 else if (--n) 149 swap(ptrs[0], ptrs[n]); 150 else 151 break; 152 153 for (b = a; c = 2 * b + 1, (d = c + 1) < n;) 154 b = bch2_bkey_cmp_packed(bt, 155 ptrs[c], 156 ptrs[d]) >= 0 ? c : d; 157 if (d == n) 158 b = c; 159 160 while (b != a && 161 bch2_bkey_cmp_packed(bt, 162 ptrs[a], 163 ptrs[b]) >= 0) 164 b = (b - 1) / 2; 165 c = b; 166 while (b != a) { 167 b = (b - 1) / 2; 168 swap(ptrs[b], ptrs[c]); 169 } 170 } 171 } 172 173 static void bch2_sort_whiteouts(struct bch_fs *c, struct btree *b) 174 { 175 struct bkey_packed *new_whiteouts, **ptrs, **ptrs_end, *k; 176 bool used_mempool = false; 177 size_t bytes = b->whiteout_u64s * sizeof(u64); 178 179 if (!b->whiteout_u64s) 180 return; 181 182 new_whiteouts = btree_bounce_alloc(c, bytes, &used_mempool); 183 184 ptrs = ptrs_end = ((void *) new_whiteouts + bytes); 185 186 for (k = unwritten_whiteouts_start(b); 187 k != unwritten_whiteouts_end(b); 188 k = bkey_p_next(k)) 189 *--ptrs = k; 190 191 sort_bkey_ptrs(b, ptrs, ptrs_end - ptrs); 192 193 k = new_whiteouts; 194 195 while (ptrs != ptrs_end) { 196 bkey_p_copy(k, *ptrs); 197 k = bkey_p_next(k); 198 ptrs++; 199 } 200 201 verify_no_dups(b, new_whiteouts, 202 (void *) ((u64 *) new_whiteouts + b->whiteout_u64s)); 203 204 memcpy_u64s(unwritten_whiteouts_start(b), 205 new_whiteouts, b->whiteout_u64s); 206 207 btree_bounce_free(c, bytes, used_mempool, new_whiteouts); 208 } 209 210 static bool should_compact_bset(struct btree *b, struct bset_tree *t, 211 bool compacting, enum compact_mode mode) 212 { 213 if (!bset_dead_u64s(b, t)) 214 return false; 215 216 switch (mode) { 217 case COMPACT_LAZY: 218 return should_compact_bset_lazy(b, t) || 219 (compacting && !bset_written(b, bset(b, t))); 220 case COMPACT_ALL: 221 return true; 222 default: 223 BUG(); 224 } 225 } 226 227 static bool bch2_drop_whiteouts(struct btree *b, enum compact_mode mode) 228 { 229 bool ret = false; 230 231 for_each_bset(b, t) { 232 struct bset *i = bset(b, t); 233 struct bkey_packed *k, *n, *out, *start, *end; 234 struct btree_node_entry *src = NULL, *dst = NULL; 235 236 if (t != b->set && !bset_written(b, i)) { 237 src = container_of(i, struct btree_node_entry, keys); 238 dst = max(write_block(b), 239 (void *) btree_bkey_last(b, t - 1)); 240 } 241 242 if (src != dst) 243 ret = true; 244 245 if (!should_compact_bset(b, t, ret, mode)) { 246 if (src != dst) { 247 memmove(dst, src, sizeof(*src) + 248 le16_to_cpu(src->keys.u64s) * 249 sizeof(u64)); 250 i = &dst->keys; 251 set_btree_bset(b, t, i); 252 } 253 continue; 254 } 255 256 start = btree_bkey_first(b, t); 257 end = btree_bkey_last(b, t); 258 259 if (src != dst) { 260 memmove(dst, src, sizeof(*src)); 261 i = &dst->keys; 262 set_btree_bset(b, t, i); 263 } 264 265 out = i->start; 266 267 for (k = start; k != end; k = n) { 268 n = bkey_p_next(k); 269 270 if (!bkey_deleted(k)) { 271 bkey_p_copy(out, k); 272 out = bkey_p_next(out); 273 } else { 274 BUG_ON(k->needs_whiteout); 275 } 276 } 277 278 i->u64s = cpu_to_le16((u64 *) out - i->_data); 279 set_btree_bset_end(b, t); 280 bch2_bset_set_no_aux_tree(b, t); 281 ret = true; 282 } 283 284 bch2_verify_btree_nr_keys(b); 285 286 bch2_btree_build_aux_trees(b); 287 288 return ret; 289 } 290 291 bool bch2_compact_whiteouts(struct bch_fs *c, struct btree *b, 292 enum compact_mode mode) 293 { 294 return bch2_drop_whiteouts(b, mode); 295 } 296 297 static void btree_node_sort(struct bch_fs *c, struct btree *b, 298 unsigned start_idx, 299 unsigned end_idx) 300 { 301 struct btree_node *out; 302 struct sort_iter_stack sort_iter; 303 struct bset_tree *t; 304 struct bset *start_bset = bset(b, &b->set[start_idx]); 305 bool used_mempool = false; 306 u64 start_time, seq = 0; 307 unsigned i, u64s = 0, bytes, shift = end_idx - start_idx - 1; 308 bool sorting_entire_node = start_idx == 0 && 309 end_idx == b->nsets; 310 311 sort_iter_stack_init(&sort_iter, b); 312 313 for (t = b->set + start_idx; 314 t < b->set + end_idx; 315 t++) { 316 u64s += le16_to_cpu(bset(b, t)->u64s); 317 sort_iter_add(&sort_iter.iter, 318 btree_bkey_first(b, t), 319 btree_bkey_last(b, t)); 320 } 321 322 bytes = sorting_entire_node 323 ? btree_buf_bytes(b) 324 : __vstruct_bytes(struct btree_node, u64s); 325 326 out = btree_bounce_alloc(c, bytes, &used_mempool); 327 328 start_time = local_clock(); 329 330 u64s = bch2_sort_keys(out->keys.start, &sort_iter.iter); 331 332 out->keys.u64s = cpu_to_le16(u64s); 333 334 BUG_ON(vstruct_end(&out->keys) > (void *) out + bytes); 335 336 if (sorting_entire_node) 337 bch2_time_stats_update(&c->times[BCH_TIME_btree_node_sort], 338 start_time); 339 340 /* Make sure we preserve bset journal_seq: */ 341 for (t = b->set + start_idx; t < b->set + end_idx; t++) 342 seq = max(seq, le64_to_cpu(bset(b, t)->journal_seq)); 343 start_bset->journal_seq = cpu_to_le64(seq); 344 345 if (sorting_entire_node) { 346 u64s = le16_to_cpu(out->keys.u64s); 347 348 BUG_ON(bytes != btree_buf_bytes(b)); 349 350 /* 351 * Our temporary buffer is the same size as the btree node's 352 * buffer, we can just swap buffers instead of doing a big 353 * memcpy() 354 */ 355 *out = *b->data; 356 out->keys.u64s = cpu_to_le16(u64s); 357 swap(out, b->data); 358 set_btree_bset(b, b->set, &b->data->keys); 359 } else { 360 start_bset->u64s = out->keys.u64s; 361 memcpy_u64s(start_bset->start, 362 out->keys.start, 363 le16_to_cpu(out->keys.u64s)); 364 } 365 366 for (i = start_idx + 1; i < end_idx; i++) 367 b->nr.bset_u64s[start_idx] += 368 b->nr.bset_u64s[i]; 369 370 b->nsets -= shift; 371 372 for (i = start_idx + 1; i < b->nsets; i++) { 373 b->nr.bset_u64s[i] = b->nr.bset_u64s[i + shift]; 374 b->set[i] = b->set[i + shift]; 375 } 376 377 for (i = b->nsets; i < MAX_BSETS; i++) 378 b->nr.bset_u64s[i] = 0; 379 380 set_btree_bset_end(b, &b->set[start_idx]); 381 bch2_bset_set_no_aux_tree(b, &b->set[start_idx]); 382 383 btree_bounce_free(c, bytes, used_mempool, out); 384 385 bch2_verify_btree_nr_keys(b); 386 } 387 388 void bch2_btree_sort_into(struct bch_fs *c, 389 struct btree *dst, 390 struct btree *src) 391 { 392 struct btree_nr_keys nr; 393 struct btree_node_iter src_iter; 394 u64 start_time = local_clock(); 395 396 BUG_ON(dst->nsets != 1); 397 398 bch2_bset_set_no_aux_tree(dst, dst->set); 399 400 bch2_btree_node_iter_init_from_start(&src_iter, src); 401 402 nr = bch2_sort_repack(btree_bset_first(dst), 403 src, &src_iter, 404 &dst->format, 405 true); 406 407 bch2_time_stats_update(&c->times[BCH_TIME_btree_node_sort], 408 start_time); 409 410 set_btree_bset_end(dst, dst->set); 411 412 dst->nr.live_u64s += nr.live_u64s; 413 dst->nr.bset_u64s[0] += nr.bset_u64s[0]; 414 dst->nr.packed_keys += nr.packed_keys; 415 dst->nr.unpacked_keys += nr.unpacked_keys; 416 417 bch2_verify_btree_nr_keys(dst); 418 } 419 420 /* 421 * We're about to add another bset to the btree node, so if there's currently 422 * too many bsets - sort some of them together: 423 */ 424 static bool btree_node_compact(struct bch_fs *c, struct btree *b) 425 { 426 unsigned unwritten_idx; 427 bool ret = false; 428 429 for (unwritten_idx = 0; 430 unwritten_idx < b->nsets; 431 unwritten_idx++) 432 if (!bset_written(b, bset(b, &b->set[unwritten_idx]))) 433 break; 434 435 if (b->nsets - unwritten_idx > 1) { 436 btree_node_sort(c, b, unwritten_idx, b->nsets); 437 ret = true; 438 } 439 440 if (unwritten_idx > 1) { 441 btree_node_sort(c, b, 0, unwritten_idx); 442 ret = true; 443 } 444 445 return ret; 446 } 447 448 void bch2_btree_build_aux_trees(struct btree *b) 449 { 450 for_each_bset(b, t) 451 bch2_bset_build_aux_tree(b, t, 452 !bset_written(b, bset(b, t)) && 453 t == bset_tree_last(b)); 454 } 455 456 /* 457 * If we have MAX_BSETS (3) bsets, should we sort them all down to just one? 458 * 459 * The first bset is going to be of similar order to the size of the node, the 460 * last bset is bounded by btree_write_set_buffer(), which is set to keep the 461 * memmove on insert from being too expensive: the middle bset should, ideally, 462 * be the geometric mean of the first and the last. 463 * 464 * Returns true if the middle bset is greater than that geometric mean: 465 */ 466 static inline bool should_compact_all(struct bch_fs *c, struct btree *b) 467 { 468 unsigned mid_u64s_bits = 469 (ilog2(btree_max_u64s(c)) + BTREE_WRITE_SET_U64s_BITS) / 2; 470 471 return bset_u64s(&b->set[1]) > 1U << mid_u64s_bits; 472 } 473 474 /* 475 * @bch_btree_init_next - initialize a new (unwritten) bset that can then be 476 * inserted into 477 * 478 * Safe to call if there already is an unwritten bset - will only add a new bset 479 * if @b doesn't already have one. 480 * 481 * Returns true if we sorted (i.e. invalidated iterators 482 */ 483 void bch2_btree_init_next(struct btree_trans *trans, struct btree *b) 484 { 485 struct bch_fs *c = trans->c; 486 struct btree_node_entry *bne; 487 bool reinit_iter = false; 488 489 EBUG_ON(!six_lock_counts(&b->c.lock).n[SIX_LOCK_write]); 490 BUG_ON(bset_written(b, bset(b, &b->set[1]))); 491 BUG_ON(btree_node_just_written(b)); 492 493 if (b->nsets == MAX_BSETS && 494 !btree_node_write_in_flight(b) && 495 should_compact_all(c, b)) { 496 bch2_btree_node_write_trans(trans, b, SIX_LOCK_write, 497 BTREE_WRITE_init_next_bset); 498 reinit_iter = true; 499 } 500 501 if (b->nsets == MAX_BSETS && 502 btree_node_compact(c, b)) 503 reinit_iter = true; 504 505 BUG_ON(b->nsets >= MAX_BSETS); 506 507 bne = want_new_bset(c, b); 508 if (bne) 509 bch2_bset_init_next(b, bne); 510 511 bch2_btree_build_aux_trees(b); 512 513 if (reinit_iter) 514 bch2_trans_node_reinit_iter(trans, b); 515 } 516 517 static void btree_err_msg(struct printbuf *out, struct bch_fs *c, 518 struct bch_dev *ca, 519 bool print_pos, 520 struct btree *b, struct bset *i, struct bkey_packed *k, 521 unsigned offset, int rw) 522 { 523 if (print_pos) { 524 prt_str(out, rw == READ 525 ? "error validating btree node " 526 : "corrupt btree node before write "); 527 prt_printf(out, "at btree "); 528 bch2_btree_pos_to_text(out, c, b); 529 prt_newline(out); 530 } 531 532 if (ca) 533 prt_printf(out, "%s ", ca->name); 534 535 prt_printf(out, "node offset %u/%u", 536 b->written, btree_ptr_sectors_written(bkey_i_to_s_c(&b->key))); 537 if (i) 538 prt_printf(out, " bset u64s %u", le16_to_cpu(i->u64s)); 539 if (k) 540 prt_printf(out, " bset byte offset %lu", 541 (unsigned long)(void *)k - 542 ((unsigned long)(void *)i & ~511UL)); 543 prt_str(out, ": "); 544 } 545 546 __printf(11, 12) 547 static int __btree_err(int ret, 548 struct bch_fs *c, 549 struct bch_dev *ca, 550 struct btree *b, 551 struct bset *i, 552 struct bkey_packed *k, 553 int rw, 554 enum bch_sb_error_id err_type, 555 struct bch_io_failures *failed, 556 struct printbuf *err_msg, 557 const char *fmt, ...) 558 { 559 if (c->recovery.curr_pass == BCH_RECOVERY_PASS_scan_for_btree_nodes) 560 return ret == -BCH_ERR_btree_node_read_err_fixable 561 ? bch_err_throw(c, fsck_fix) 562 : ret; 563 564 bool have_retry = false; 565 int ret2; 566 567 if (ca) { 568 bch2_mark_btree_validate_failure(failed, ca->dev_idx); 569 570 struct extent_ptr_decoded pick; 571 have_retry = bch2_bkey_pick_read_device(c, 572 bkey_i_to_s_c(&b->key), 573 failed, &pick, -1) == 1; 574 } 575 576 if (!have_retry && ret == -BCH_ERR_btree_node_read_err_want_retry) 577 ret = bch_err_throw(c, btree_node_read_err_fixable); 578 if (!have_retry && ret == -BCH_ERR_btree_node_read_err_must_retry) 579 ret = bch_err_throw(c, btree_node_read_err_bad_node); 580 581 bch2_sb_error_count(c, err_type); 582 583 bool print_deferred = err_msg && 584 rw == READ && 585 !(test_bit(BCH_FS_in_fsck, &c->flags) && 586 c->opts.fix_errors == FSCK_FIX_ask); 587 588 struct printbuf out = PRINTBUF; 589 bch2_log_msg_start(c, &out); 590 591 if (!print_deferred) 592 err_msg = &out; 593 594 btree_err_msg(err_msg, c, ca, !print_deferred, b, i, k, b->written, rw); 595 596 va_list args; 597 va_start(args, fmt); 598 prt_vprintf(err_msg, fmt, args); 599 va_end(args); 600 601 if (print_deferred) { 602 prt_newline(err_msg); 603 604 switch (ret) { 605 case -BCH_ERR_btree_node_read_err_fixable: 606 ret2 = bch2_fsck_err_opt(c, FSCK_CAN_FIX, err_type); 607 if (!bch2_err_matches(ret2, BCH_ERR_fsck_fix) && 608 !bch2_err_matches(ret2, BCH_ERR_fsck_ignore)) { 609 ret = ret2; 610 goto fsck_err; 611 } 612 613 if (!have_retry) 614 ret = bch_err_throw(c, fsck_fix); 615 goto out; 616 case -BCH_ERR_btree_node_read_err_bad_node: 617 prt_str(&out, ", "); 618 break; 619 } 620 621 goto out; 622 } 623 624 if (rw == WRITE) { 625 prt_str(&out, ", "); 626 ret = __bch2_inconsistent_error(c, &out) 627 ? -BCH_ERR_fsck_errors_not_fixed 628 : 0; 629 goto print; 630 } 631 632 switch (ret) { 633 case -BCH_ERR_btree_node_read_err_fixable: 634 ret2 = __bch2_fsck_err(c, NULL, FSCK_CAN_FIX, err_type, "%s", out.buf); 635 if (!bch2_err_matches(ret2, BCH_ERR_fsck_fix) && 636 !bch2_err_matches(ret2, BCH_ERR_fsck_ignore)) { 637 ret = ret2; 638 goto fsck_err; 639 } 640 641 if (!have_retry) 642 ret = bch_err_throw(c, fsck_fix); 643 goto out; 644 case -BCH_ERR_btree_node_read_err_bad_node: 645 prt_str(&out, ", "); 646 break; 647 } 648 print: 649 bch2_print_str(c, KERN_ERR, out.buf); 650 out: 651 fsck_err: 652 printbuf_exit(&out); 653 return ret; 654 } 655 656 #define btree_err(type, c, ca, b, i, k, _err_type, msg, ...) \ 657 ({ \ 658 int _ret = __btree_err(type, c, ca, b, i, k, write, \ 659 BCH_FSCK_ERR_##_err_type, \ 660 failed, err_msg, \ 661 msg, ##__VA_ARGS__); \ 662 \ 663 if (!bch2_err_matches(_ret, BCH_ERR_fsck_fix)) { \ 664 ret = _ret; \ 665 goto fsck_err; \ 666 } \ 667 \ 668 true; \ 669 }) 670 671 #define btree_err_on(cond, ...) ((cond) ? btree_err(__VA_ARGS__) : false) 672 673 /* 674 * When btree topology repair changes the start or end of a node, that might 675 * mean we have to drop keys that are no longer inside the node: 676 */ 677 __cold 678 void bch2_btree_node_drop_keys_outside_node(struct btree *b) 679 { 680 for_each_bset(b, t) { 681 struct bset *i = bset(b, t); 682 struct bkey_packed *k; 683 684 for (k = i->start; k != vstruct_last(i); k = bkey_p_next(k)) 685 if (bkey_cmp_left_packed(b, k, &b->data->min_key) >= 0) 686 break; 687 688 if (k != i->start) { 689 unsigned shift = (u64 *) k - (u64 *) i->start; 690 691 memmove_u64s_down(i->start, k, 692 (u64 *) vstruct_end(i) - (u64 *) k); 693 i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - shift); 694 set_btree_bset_end(b, t); 695 } 696 697 for (k = i->start; k != vstruct_last(i); k = bkey_p_next(k)) 698 if (bkey_cmp_left_packed(b, k, &b->data->max_key) > 0) 699 break; 700 701 if (k != vstruct_last(i)) { 702 i->u64s = cpu_to_le16((u64 *) k - (u64 *) i->start); 703 set_btree_bset_end(b, t); 704 } 705 } 706 707 /* 708 * Always rebuild search trees: eytzinger search tree nodes directly 709 * depend on the values of min/max key: 710 */ 711 bch2_bset_set_no_aux_tree(b, b->set); 712 bch2_btree_build_aux_trees(b); 713 b->nr = bch2_btree_node_count_keys(b); 714 715 struct bkey_s_c k; 716 struct bkey unpacked; 717 struct btree_node_iter iter; 718 for_each_btree_node_key_unpack(b, k, &iter, &unpacked) { 719 BUG_ON(bpos_lt(k.k->p, b->data->min_key)); 720 BUG_ON(bpos_gt(k.k->p, b->data->max_key)); 721 } 722 } 723 724 static int validate_bset(struct bch_fs *c, struct bch_dev *ca, 725 struct btree *b, struct bset *i, 726 unsigned offset, int write, 727 struct bch_io_failures *failed, 728 struct printbuf *err_msg) 729 { 730 unsigned version = le16_to_cpu(i->version); 731 struct printbuf buf1 = PRINTBUF; 732 struct printbuf buf2 = PRINTBUF; 733 int ret = 0; 734 735 btree_err_on(!bch2_version_compatible(version), 736 -BCH_ERR_btree_node_read_err_incompatible, 737 c, ca, b, i, NULL, 738 btree_node_unsupported_version, 739 "unsupported bset version %u.%u", 740 BCH_VERSION_MAJOR(version), 741 BCH_VERSION_MINOR(version)); 742 743 if (c->recovery.curr_pass != BCH_RECOVERY_PASS_scan_for_btree_nodes && 744 btree_err_on(version < c->sb.version_min, 745 -BCH_ERR_btree_node_read_err_fixable, 746 c, NULL, b, i, NULL, 747 btree_node_bset_older_than_sb_min, 748 "bset version %u older than superblock version_min %u", 749 version, c->sb.version_min)) { 750 if (bch2_version_compatible(version)) { 751 mutex_lock(&c->sb_lock); 752 c->disk_sb.sb->version_min = cpu_to_le16(version); 753 bch2_write_super(c); 754 mutex_unlock(&c->sb_lock); 755 } else { 756 /* We have no idea what's going on: */ 757 i->version = cpu_to_le16(c->sb.version); 758 } 759 } 760 761 if (btree_err_on(BCH_VERSION_MAJOR(version) > 762 BCH_VERSION_MAJOR(c->sb.version), 763 -BCH_ERR_btree_node_read_err_fixable, 764 c, NULL, b, i, NULL, 765 btree_node_bset_newer_than_sb, 766 "bset version %u newer than superblock version %u", 767 version, c->sb.version)) { 768 mutex_lock(&c->sb_lock); 769 c->disk_sb.sb->version = cpu_to_le16(version); 770 bch2_write_super(c); 771 mutex_unlock(&c->sb_lock); 772 } 773 774 btree_err_on(BSET_SEPARATE_WHITEOUTS(i), 775 -BCH_ERR_btree_node_read_err_incompatible, 776 c, ca, b, i, NULL, 777 btree_node_unsupported_version, 778 "BSET_SEPARATE_WHITEOUTS no longer supported"); 779 780 btree_err_on(offset && !i->u64s, 781 -BCH_ERR_btree_node_read_err_fixable, 782 c, ca, b, i, NULL, 783 bset_empty, 784 "empty bset"); 785 786 btree_err_on(BSET_OFFSET(i) && BSET_OFFSET(i) != offset, 787 -BCH_ERR_btree_node_read_err_want_retry, 788 c, ca, b, i, NULL, 789 bset_wrong_sector_offset, 790 "bset at wrong sector offset"); 791 792 if (!offset) { 793 struct btree_node *bn = 794 container_of(i, struct btree_node, keys); 795 /* These indicate that we read the wrong btree node: */ 796 797 if (b->key.k.type == KEY_TYPE_btree_ptr_v2) { 798 struct bch_btree_ptr_v2 *bp = 799 &bkey_i_to_btree_ptr_v2(&b->key)->v; 800 801 /* XXX endianness */ 802 btree_err_on(bp->seq != bn->keys.seq, 803 -BCH_ERR_btree_node_read_err_must_retry, 804 c, ca, b, NULL, NULL, 805 bset_bad_seq, 806 "incorrect sequence number (wrong btree node)"); 807 } 808 809 btree_err_on(BTREE_NODE_ID(bn) != b->c.btree_id, 810 -BCH_ERR_btree_node_read_err_must_retry, 811 c, ca, b, i, NULL, 812 btree_node_bad_btree, 813 "incorrect btree id"); 814 815 btree_err_on(BTREE_NODE_LEVEL(bn) != b->c.level, 816 -BCH_ERR_btree_node_read_err_must_retry, 817 c, ca, b, i, NULL, 818 btree_node_bad_level, 819 "incorrect level"); 820 821 if (!write) 822 compat_btree_node(b->c.level, b->c.btree_id, version, 823 BSET_BIG_ENDIAN(i), write, bn); 824 825 if (b->key.k.type == KEY_TYPE_btree_ptr_v2) { 826 struct bch_btree_ptr_v2 *bp = 827 &bkey_i_to_btree_ptr_v2(&b->key)->v; 828 829 if (BTREE_PTR_RANGE_UPDATED(bp)) { 830 b->data->min_key = bp->min_key; 831 b->data->max_key = b->key.k.p; 832 } 833 834 btree_err_on(!bpos_eq(b->data->min_key, bp->min_key), 835 -BCH_ERR_btree_node_read_err_must_retry, 836 c, ca, b, NULL, NULL, 837 btree_node_bad_min_key, 838 "incorrect min_key: got %s should be %s", 839 (printbuf_reset(&buf1), 840 bch2_bpos_to_text(&buf1, bn->min_key), buf1.buf), 841 (printbuf_reset(&buf2), 842 bch2_bpos_to_text(&buf2, bp->min_key), buf2.buf)); 843 } 844 845 btree_err_on(!bpos_eq(bn->max_key, b->key.k.p), 846 -BCH_ERR_btree_node_read_err_must_retry, 847 c, ca, b, i, NULL, 848 btree_node_bad_max_key, 849 "incorrect max key %s", 850 (printbuf_reset(&buf1), 851 bch2_bpos_to_text(&buf1, bn->max_key), buf1.buf)); 852 853 if (write) 854 compat_btree_node(b->c.level, b->c.btree_id, version, 855 BSET_BIG_ENDIAN(i), write, bn); 856 857 btree_err_on(bch2_bkey_format_invalid(c, &bn->format, write, &buf1), 858 -BCH_ERR_btree_node_read_err_bad_node, 859 c, ca, b, i, NULL, 860 btree_node_bad_format, 861 "invalid bkey format: %s\n%s", buf1.buf, 862 (printbuf_reset(&buf2), 863 bch2_bkey_format_to_text(&buf2, &bn->format), buf2.buf)); 864 printbuf_reset(&buf1); 865 866 compat_bformat(b->c.level, b->c.btree_id, version, 867 BSET_BIG_ENDIAN(i), write, 868 &bn->format); 869 } 870 fsck_err: 871 printbuf_exit(&buf2); 872 printbuf_exit(&buf1); 873 return ret; 874 } 875 876 static int btree_node_bkey_val_validate(struct bch_fs *c, struct btree *b, 877 struct bkey_s_c k, 878 enum bch_validate_flags flags) 879 { 880 return bch2_bkey_val_validate(c, k, (struct bkey_validate_context) { 881 .from = BKEY_VALIDATE_btree_node, 882 .level = b->c.level, 883 .btree = b->c.btree_id, 884 .flags = flags 885 }); 886 } 887 888 static int bset_key_validate(struct bch_fs *c, struct btree *b, 889 struct bkey_s_c k, 890 bool updated_range, 891 enum bch_validate_flags flags) 892 { 893 struct bkey_validate_context from = (struct bkey_validate_context) { 894 .from = BKEY_VALIDATE_btree_node, 895 .level = b->c.level, 896 .btree = b->c.btree_id, 897 .flags = flags, 898 }; 899 return __bch2_bkey_validate(c, k, from) ?: 900 (!updated_range ? bch2_bkey_in_btree_node(c, b, k, from) : 0) ?: 901 (flags & BCH_VALIDATE_write ? btree_node_bkey_val_validate(c, b, k, flags) : 0); 902 } 903 904 static bool bkey_packed_valid(struct bch_fs *c, struct btree *b, 905 struct bset *i, struct bkey_packed *k) 906 { 907 if (bkey_p_next(k) > vstruct_last(i)) 908 return false; 909 910 if (k->format > KEY_FORMAT_CURRENT) 911 return false; 912 913 if (!bkeyp_u64s_valid(&b->format, k)) 914 return false; 915 916 struct bkey tmp; 917 struct bkey_s u = __bkey_disassemble(b, k, &tmp); 918 return !__bch2_bkey_validate(c, u.s_c, 919 (struct bkey_validate_context) { 920 .from = BKEY_VALIDATE_btree_node, 921 .level = b->c.level, 922 .btree = b->c.btree_id, 923 .flags = BCH_VALIDATE_silent 924 }); 925 } 926 927 static inline int btree_node_read_bkey_cmp(const struct btree *b, 928 const struct bkey_packed *l, 929 const struct bkey_packed *r) 930 { 931 return bch2_bkey_cmp_packed(b, l, r) 932 ?: (int) bkey_deleted(r) - (int) bkey_deleted(l); 933 } 934 935 static int validate_bset_keys(struct bch_fs *c, struct btree *b, 936 struct bset *i, int write, 937 struct bch_io_failures *failed, 938 struct printbuf *err_msg) 939 { 940 unsigned version = le16_to_cpu(i->version); 941 struct bkey_packed *k, *prev = NULL; 942 struct printbuf buf = PRINTBUF; 943 bool updated_range = b->key.k.type == KEY_TYPE_btree_ptr_v2 && 944 BTREE_PTR_RANGE_UPDATED(&bkey_i_to_btree_ptr_v2(&b->key)->v); 945 int ret = 0; 946 947 for (k = i->start; 948 k != vstruct_last(i);) { 949 struct bkey_s u; 950 struct bkey tmp; 951 unsigned next_good_key; 952 953 if (btree_err_on(bkey_p_next(k) > vstruct_last(i), 954 -BCH_ERR_btree_node_read_err_fixable, 955 c, NULL, b, i, k, 956 btree_node_bkey_past_bset_end, 957 "key extends past end of bset")) { 958 i->u64s = cpu_to_le16((u64 *) k - i->_data); 959 break; 960 } 961 962 if (btree_err_on(k->format > KEY_FORMAT_CURRENT, 963 -BCH_ERR_btree_node_read_err_fixable, 964 c, NULL, b, i, k, 965 btree_node_bkey_bad_format, 966 "invalid bkey format %u", k->format)) 967 goto drop_this_key; 968 969 if (btree_err_on(!bkeyp_u64s_valid(&b->format, k), 970 -BCH_ERR_btree_node_read_err_fixable, 971 c, NULL, b, i, k, 972 btree_node_bkey_bad_u64s, 973 "bad k->u64s %u (min %u max %zu)", k->u64s, 974 bkeyp_key_u64s(&b->format, k), 975 U8_MAX - BKEY_U64s + bkeyp_key_u64s(&b->format, k))) 976 goto drop_this_key; 977 978 if (!write) 979 bch2_bkey_compat(b->c.level, b->c.btree_id, version, 980 BSET_BIG_ENDIAN(i), write, 981 &b->format, k); 982 983 u = __bkey_disassemble(b, k, &tmp); 984 985 ret = bset_key_validate(c, b, u.s_c, updated_range, write); 986 if (ret == -BCH_ERR_fsck_delete_bkey) 987 goto drop_this_key; 988 if (ret) 989 goto fsck_err; 990 991 if (write) 992 bch2_bkey_compat(b->c.level, b->c.btree_id, version, 993 BSET_BIG_ENDIAN(i), write, 994 &b->format, k); 995 996 if (prev && btree_node_read_bkey_cmp(b, prev, k) >= 0) { 997 struct bkey up = bkey_unpack_key(b, prev); 998 999 printbuf_reset(&buf); 1000 prt_printf(&buf, "keys out of order: "); 1001 bch2_bkey_to_text(&buf, &up); 1002 prt_printf(&buf, " > "); 1003 bch2_bkey_to_text(&buf, u.k); 1004 1005 if (btree_err(-BCH_ERR_btree_node_read_err_fixable, 1006 c, NULL, b, i, k, 1007 btree_node_bkey_out_of_order, 1008 "%s", buf.buf)) 1009 goto drop_this_key; 1010 } 1011 1012 prev = k; 1013 k = bkey_p_next(k); 1014 continue; 1015 drop_this_key: 1016 next_good_key = k->u64s; 1017 1018 if (!next_good_key || 1019 (BSET_BIG_ENDIAN(i) == CPU_BIG_ENDIAN && 1020 version >= bcachefs_metadata_version_snapshot)) { 1021 /* 1022 * only do scanning if bch2_bkey_compat() has nothing to 1023 * do 1024 */ 1025 1026 if (!bkey_packed_valid(c, b, i, (void *) ((u64 *) k + next_good_key))) { 1027 for (next_good_key = 1; 1028 next_good_key < (u64 *) vstruct_last(i) - (u64 *) k; 1029 next_good_key++) 1030 if (bkey_packed_valid(c, b, i, (void *) ((u64 *) k + next_good_key))) 1031 goto got_good_key; 1032 } 1033 1034 /* 1035 * didn't find a good key, have to truncate the rest of 1036 * the bset 1037 */ 1038 next_good_key = (u64 *) vstruct_last(i) - (u64 *) k; 1039 } 1040 got_good_key: 1041 le16_add_cpu(&i->u64s, -next_good_key); 1042 memmove_u64s_down(k, (u64 *) k + next_good_key, (u64 *) vstruct_end(i) - (u64 *) k); 1043 set_btree_node_need_rewrite(b); 1044 set_btree_node_need_rewrite_error(b); 1045 } 1046 fsck_err: 1047 printbuf_exit(&buf); 1048 return ret; 1049 } 1050 1051 int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca, 1052 struct btree *b, 1053 struct bch_io_failures *failed, 1054 struct printbuf *err_msg) 1055 { 1056 struct btree_node_entry *bne; 1057 struct sort_iter *iter; 1058 struct btree_node *sorted; 1059 struct bkey_packed *k; 1060 struct bset *i; 1061 bool used_mempool, blacklisted; 1062 bool updated_range = b->key.k.type == KEY_TYPE_btree_ptr_v2 && 1063 BTREE_PTR_RANGE_UPDATED(&bkey_i_to_btree_ptr_v2(&b->key)->v); 1064 unsigned ptr_written = btree_ptr_sectors_written(bkey_i_to_s_c(&b->key)); 1065 u64 max_journal_seq = 0; 1066 struct printbuf buf = PRINTBUF; 1067 int ret = 0, write = READ; 1068 u64 start_time = local_clock(); 1069 1070 b->version_ondisk = U16_MAX; 1071 /* We might get called multiple times on read retry: */ 1072 b->written = 0; 1073 1074 iter = mempool_alloc(&c->fill_iter, GFP_NOFS); 1075 sort_iter_init(iter, b, (btree_blocks(c) + 1) * 2); 1076 1077 if (bch2_meta_read_fault("btree")) 1078 btree_err(-BCH_ERR_btree_node_read_err_must_retry, 1079 c, ca, b, NULL, NULL, 1080 btree_node_fault_injected, 1081 "dynamic fault"); 1082 1083 btree_err_on(le64_to_cpu(b->data->magic) != bset_magic(c), 1084 -BCH_ERR_btree_node_read_err_must_retry, 1085 c, ca, b, NULL, NULL, 1086 btree_node_bad_magic, 1087 "bad magic: want %llx, got %llx", 1088 bset_magic(c), le64_to_cpu(b->data->magic)); 1089 1090 if (b->key.k.type == KEY_TYPE_btree_ptr_v2) { 1091 struct bch_btree_ptr_v2 *bp = 1092 &bkey_i_to_btree_ptr_v2(&b->key)->v; 1093 1094 bch2_bpos_to_text(&buf, b->data->min_key); 1095 prt_str(&buf, "-"); 1096 bch2_bpos_to_text(&buf, b->data->max_key); 1097 1098 btree_err_on(b->data->keys.seq != bp->seq, 1099 -BCH_ERR_btree_node_read_err_must_retry, 1100 c, ca, b, NULL, NULL, 1101 btree_node_bad_seq, 1102 "got wrong btree node: got\n%s", 1103 (printbuf_reset(&buf), 1104 bch2_btree_node_header_to_text(&buf, b->data), 1105 buf.buf)); 1106 } else { 1107 btree_err_on(!b->data->keys.seq, 1108 -BCH_ERR_btree_node_read_err_must_retry, 1109 c, ca, b, NULL, NULL, 1110 btree_node_bad_seq, 1111 "bad btree header: seq 0\n%s", 1112 (printbuf_reset(&buf), 1113 bch2_btree_node_header_to_text(&buf, b->data), 1114 buf.buf)); 1115 } 1116 1117 while (b->written < (ptr_written ?: btree_sectors(c))) { 1118 unsigned sectors; 1119 bool first = !b->written; 1120 1121 if (first) { 1122 bne = NULL; 1123 i = &b->data->keys; 1124 } else { 1125 bne = write_block(b); 1126 i = &bne->keys; 1127 1128 if (i->seq != b->data->keys.seq) 1129 break; 1130 } 1131 1132 struct nonce nonce = btree_nonce(i, b->written << 9); 1133 bool good_csum_type = bch2_checksum_type_valid(c, BSET_CSUM_TYPE(i)); 1134 1135 btree_err_on(!good_csum_type, 1136 bch2_csum_type_is_encryption(BSET_CSUM_TYPE(i)) 1137 ? -BCH_ERR_btree_node_read_err_must_retry 1138 : -BCH_ERR_btree_node_read_err_want_retry, 1139 c, ca, b, i, NULL, 1140 bset_unknown_csum, 1141 "unknown checksum type %llu", BSET_CSUM_TYPE(i)); 1142 1143 if (first) { 1144 sectors = vstruct_sectors(b->data, c->block_bits); 1145 if (btree_err_on(b->written + sectors > (ptr_written ?: btree_sectors(c)), 1146 -BCH_ERR_btree_node_read_err_fixable, 1147 c, ca, b, i, NULL, 1148 bset_past_end_of_btree_node, 1149 "bset past end of btree node (offset %u len %u but written %zu)", 1150 b->written, sectors, ptr_written ?: btree_sectors(c))) 1151 i->u64s = 0; 1152 if (good_csum_type) { 1153 struct bch_csum csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, b->data); 1154 bool csum_bad = bch2_crc_cmp(b->data->csum, csum); 1155 if (csum_bad) 1156 bch2_io_error(ca, BCH_MEMBER_ERROR_checksum); 1157 1158 btree_err_on(csum_bad, 1159 -BCH_ERR_btree_node_read_err_want_retry, 1160 c, ca, b, i, NULL, 1161 bset_bad_csum, 1162 "%s", 1163 (printbuf_reset(&buf), 1164 bch2_csum_err_msg(&buf, BSET_CSUM_TYPE(i), b->data->csum, csum), 1165 buf.buf)); 1166 1167 ret = bset_encrypt(c, i, b->written << 9); 1168 if (bch2_fs_fatal_err_on(ret, c, 1169 "decrypting btree node: %s", bch2_err_str(ret))) 1170 goto fsck_err; 1171 } 1172 1173 btree_err_on(btree_node_type_is_extents(btree_node_type(b)) && 1174 !BTREE_NODE_NEW_EXTENT_OVERWRITE(b->data), 1175 -BCH_ERR_btree_node_read_err_incompatible, 1176 c, NULL, b, NULL, NULL, 1177 btree_node_unsupported_version, 1178 "btree node does not have NEW_EXTENT_OVERWRITE set"); 1179 } else { 1180 sectors = vstruct_sectors(bne, c->block_bits); 1181 if (btree_err_on(b->written + sectors > (ptr_written ?: btree_sectors(c)), 1182 -BCH_ERR_btree_node_read_err_fixable, 1183 c, ca, b, i, NULL, 1184 bset_past_end_of_btree_node, 1185 "bset past end of btree node (offset %u len %u but written %zu)", 1186 b->written, sectors, ptr_written ?: btree_sectors(c))) 1187 i->u64s = 0; 1188 if (good_csum_type) { 1189 struct bch_csum csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bne); 1190 bool csum_bad = bch2_crc_cmp(bne->csum, csum); 1191 if (ca && csum_bad) 1192 bch2_io_error(ca, BCH_MEMBER_ERROR_checksum); 1193 1194 btree_err_on(csum_bad, 1195 -BCH_ERR_btree_node_read_err_want_retry, 1196 c, ca, b, i, NULL, 1197 bset_bad_csum, 1198 "%s", 1199 (printbuf_reset(&buf), 1200 bch2_csum_err_msg(&buf, BSET_CSUM_TYPE(i), bne->csum, csum), 1201 buf.buf)); 1202 1203 ret = bset_encrypt(c, i, b->written << 9); 1204 if (bch2_fs_fatal_err_on(ret, c, 1205 "decrypting btree node: %s", bch2_err_str(ret))) 1206 goto fsck_err; 1207 } 1208 } 1209 1210 b->version_ondisk = min(b->version_ondisk, 1211 le16_to_cpu(i->version)); 1212 1213 ret = validate_bset(c, ca, b, i, b->written, READ, failed, err_msg); 1214 if (ret) 1215 goto fsck_err; 1216 1217 if (!b->written) 1218 btree_node_set_format(b, b->data->format); 1219 1220 ret = validate_bset_keys(c, b, i, READ, failed, err_msg); 1221 if (ret) 1222 goto fsck_err; 1223 1224 SET_BSET_BIG_ENDIAN(i, CPU_BIG_ENDIAN); 1225 1226 blacklisted = bch2_journal_seq_is_blacklisted(c, 1227 le64_to_cpu(i->journal_seq), 1228 true); 1229 1230 btree_err_on(blacklisted && first, 1231 -BCH_ERR_btree_node_read_err_fixable, 1232 c, ca, b, i, NULL, 1233 bset_blacklisted_journal_seq, 1234 "first btree node bset has blacklisted journal seq (%llu)", 1235 le64_to_cpu(i->journal_seq)); 1236 1237 btree_err_on(blacklisted && ptr_written, 1238 -BCH_ERR_btree_node_read_err_fixable, 1239 c, ca, b, i, NULL, 1240 first_bset_blacklisted_journal_seq, 1241 "found blacklisted bset (journal seq %llu) in btree node at offset %u-%u/%u", 1242 le64_to_cpu(i->journal_seq), 1243 b->written, b->written + sectors, ptr_written); 1244 1245 b->written = min(b->written + sectors, btree_sectors(c)); 1246 1247 if (blacklisted && !first) 1248 continue; 1249 1250 sort_iter_add(iter, 1251 vstruct_idx(i, 0), 1252 vstruct_last(i)); 1253 1254 max_journal_seq = max(max_journal_seq, le64_to_cpu(i->journal_seq)); 1255 } 1256 1257 if (ptr_written) { 1258 btree_err_on(b->written < ptr_written, 1259 -BCH_ERR_btree_node_read_err_want_retry, 1260 c, ca, b, NULL, NULL, 1261 btree_node_data_missing, 1262 "btree node data missing: expected %u sectors, found %u", 1263 ptr_written, b->written); 1264 } else { 1265 for (bne = write_block(b); 1266 bset_byte_offset(b, bne) < btree_buf_bytes(b); 1267 bne = (void *) bne + block_bytes(c)) 1268 btree_err_on(bne->keys.seq == b->data->keys.seq && 1269 !bch2_journal_seq_is_blacklisted(c, 1270 le64_to_cpu(bne->keys.journal_seq), 1271 true), 1272 -BCH_ERR_btree_node_read_err_want_retry, 1273 c, ca, b, NULL, NULL, 1274 btree_node_bset_after_end, 1275 "found bset signature after last bset"); 1276 } 1277 1278 sorted = btree_bounce_alloc(c, btree_buf_bytes(b), &used_mempool); 1279 sorted->keys.u64s = 0; 1280 1281 b->nr = bch2_key_sort_fix_overlapping(c, &sorted->keys, iter); 1282 memset((uint8_t *)(sorted + 1) + b->nr.live_u64s * sizeof(u64), 0, 1283 btree_buf_bytes(b) - 1284 sizeof(struct btree_node) - 1285 b->nr.live_u64s * sizeof(u64)); 1286 1287 b->data->keys.u64s = sorted->keys.u64s; 1288 *sorted = *b->data; 1289 swap(sorted, b->data); 1290 set_btree_bset(b, b->set, &b->data->keys); 1291 b->nsets = 1; 1292 b->data->keys.journal_seq = cpu_to_le64(max_journal_seq); 1293 1294 BUG_ON(b->nr.live_u64s != le16_to_cpu(b->data->keys.u64s)); 1295 1296 btree_bounce_free(c, btree_buf_bytes(b), used_mempool, sorted); 1297 1298 i = &b->data->keys; 1299 for (k = i->start; k != vstruct_last(i);) { 1300 struct bkey tmp; 1301 struct bkey_s u = __bkey_disassemble(b, k, &tmp); 1302 1303 ret = btree_node_bkey_val_validate(c, b, u.s_c, READ); 1304 if (ret == -BCH_ERR_fsck_delete_bkey || 1305 (static_branch_unlikely(&bch2_inject_invalid_keys) && 1306 !bversion_cmp(u.k->bversion, MAX_VERSION))) { 1307 btree_keys_account_key_drop(&b->nr, 0, k); 1308 1309 i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s); 1310 memmove_u64s_down(k, bkey_p_next(k), 1311 (u64 *) vstruct_end(i) - (u64 *) k); 1312 set_btree_bset_end(b, b->set); 1313 set_btree_node_need_rewrite(b); 1314 set_btree_node_need_rewrite_error(b); 1315 continue; 1316 } 1317 if (ret) 1318 goto fsck_err; 1319 1320 if (u.k->type == KEY_TYPE_btree_ptr_v2) { 1321 struct bkey_s_btree_ptr_v2 bp = bkey_s_to_btree_ptr_v2(u); 1322 1323 bp.v->mem_ptr = 0; 1324 } 1325 1326 k = bkey_p_next(k); 1327 } 1328 1329 bch2_bset_build_aux_tree(b, b->set, false); 1330 1331 set_needs_whiteout(btree_bset_first(b), true); 1332 1333 btree_node_reset_sib_u64s(b); 1334 1335 if (updated_range) 1336 bch2_btree_node_drop_keys_outside_node(b); 1337 1338 /* 1339 * XXX: 1340 * 1341 * We deadlock if too many btree updates require node rewrites while 1342 * we're still in journal replay. 1343 * 1344 * This is because btree node rewrites generate more updates for the 1345 * interior updates (alloc, backpointers), and if those updates touch 1346 * new nodes and generate more rewrites - well, you see the problem. 1347 * 1348 * The biggest cause is that we don't use the btree write buffer (for 1349 * the backpointer updates - this needs some real thought on locking in 1350 * order to fix. 1351 * 1352 * The problem with this workaround (not doing the rewrite for degraded 1353 * nodes in journal replay) is that those degraded nodes persist, and we 1354 * don't want that (this is a real bug when a btree node write completes 1355 * with fewer replicas than we wanted and leaves a degraded node due to 1356 * device _removal_, i.e. the device went away mid write). 1357 * 1358 * It's less of a bug here, but still a problem because we don't yet 1359 * have a way of tracking degraded data - we another index (all 1360 * extents/btree nodes, by replicas entry) in order to fix properly 1361 * (re-replicate degraded data at the earliest possible time). 1362 */ 1363 if (c->recovery.passes_complete & BIT_ULL(BCH_RECOVERY_PASS_journal_replay)) { 1364 scoped_guard(rcu) 1365 bkey_for_each_ptr(bch2_bkey_ptrs(bkey_i_to_s(&b->key)), ptr) { 1366 struct bch_dev *ca2 = bch2_dev_rcu(c, ptr->dev); 1367 1368 if (!ca2 || ca2->mi.state != BCH_MEMBER_STATE_rw) { 1369 set_btree_node_need_rewrite(b); 1370 set_btree_node_need_rewrite_degraded(b); 1371 } 1372 } 1373 } 1374 1375 if (!ptr_written) { 1376 set_btree_node_need_rewrite(b); 1377 set_btree_node_need_rewrite_ptr_written_zero(b); 1378 } 1379 fsck_err: 1380 mempool_free(iter, &c->fill_iter); 1381 printbuf_exit(&buf); 1382 bch2_time_stats_update(&c->times[BCH_TIME_btree_node_read_done], start_time); 1383 return ret; 1384 } 1385 1386 static void btree_node_read_work(struct work_struct *work) 1387 { 1388 struct btree_read_bio *rb = 1389 container_of(work, struct btree_read_bio, work); 1390 struct bch_fs *c = rb->c; 1391 struct bch_dev *ca = rb->have_ioref ? bch2_dev_have_ref(c, rb->pick.ptr.dev) : NULL; 1392 struct btree *b = rb->b; 1393 struct bio *bio = &rb->bio; 1394 struct bch_io_failures failed = { .nr = 0 }; 1395 int ret = 0; 1396 1397 struct printbuf buf = PRINTBUF; 1398 bch2_log_msg_start(c, &buf); 1399 1400 prt_printf(&buf, "btree node read error at btree "); 1401 bch2_btree_pos_to_text(&buf, c, b); 1402 prt_newline(&buf); 1403 1404 goto start; 1405 while (1) { 1406 ret = bch2_bkey_pick_read_device(c, 1407 bkey_i_to_s_c(&b->key), 1408 &failed, &rb->pick, -1); 1409 if (ret <= 0) { 1410 set_btree_node_read_error(b); 1411 break; 1412 } 1413 1414 ca = bch2_dev_get_ioref(c, rb->pick.ptr.dev, READ, BCH_DEV_READ_REF_btree_node_read); 1415 rb->have_ioref = ca != NULL; 1416 rb->start_time = local_clock(); 1417 bio_reset(bio, NULL, REQ_OP_READ|REQ_SYNC|REQ_META); 1418 bio->bi_iter.bi_sector = rb->pick.ptr.offset; 1419 bio->bi_iter.bi_size = btree_buf_bytes(b); 1420 1421 if (rb->have_ioref) { 1422 bio_set_dev(bio, ca->disk_sb.bdev); 1423 submit_bio_wait(bio); 1424 } else { 1425 bio->bi_status = BLK_STS_REMOVED; 1426 } 1427 1428 bch2_account_io_completion(ca, BCH_MEMBER_ERROR_read, 1429 rb->start_time, !bio->bi_status); 1430 start: 1431 if (rb->have_ioref) 1432 enumerated_ref_put(&ca->io_ref[READ], BCH_DEV_READ_REF_btree_node_read); 1433 rb->have_ioref = false; 1434 1435 if (bio->bi_status) { 1436 bch2_mark_io_failure(&failed, &rb->pick, false); 1437 continue; 1438 } 1439 1440 ret = bch2_btree_node_read_done(c, ca, b, &failed, &buf); 1441 if (ret == -BCH_ERR_btree_node_read_err_want_retry || 1442 ret == -BCH_ERR_btree_node_read_err_must_retry) 1443 continue; 1444 1445 if (ret) 1446 set_btree_node_read_error(b); 1447 1448 break; 1449 } 1450 1451 bch2_io_failures_to_text(&buf, c, &failed); 1452 1453 if (btree_node_read_error(b)) 1454 bch2_btree_lost_data(c, &buf, b->c.btree_id); 1455 1456 /* 1457 * only print retry success if we read from a replica with no errors 1458 */ 1459 if (btree_node_read_error(b)) 1460 prt_printf(&buf, "ret %s", bch2_err_str(ret)); 1461 else if (failed.nr) { 1462 if (!bch2_dev_io_failures(&failed, rb->pick.ptr.dev)) 1463 prt_printf(&buf, "retry success"); 1464 else 1465 prt_printf(&buf, "repair success"); 1466 } 1467 1468 if ((failed.nr || 1469 btree_node_need_rewrite(b)) && 1470 !btree_node_read_error(b) && 1471 c->recovery.curr_pass != BCH_RECOVERY_PASS_scan_for_btree_nodes) { 1472 prt_printf(&buf, " (rewriting node)"); 1473 bch2_btree_node_rewrite_async(c, b); 1474 } 1475 prt_newline(&buf); 1476 1477 if (failed.nr) 1478 bch2_print_str_ratelimited(c, KERN_ERR, buf.buf); 1479 1480 async_object_list_del(c, btree_read_bio, rb->list_idx); 1481 bch2_time_stats_update(&c->times[BCH_TIME_btree_node_read], 1482 rb->start_time); 1483 bio_put(&rb->bio); 1484 printbuf_exit(&buf); 1485 clear_btree_node_read_in_flight(b); 1486 smp_mb__after_atomic(); 1487 wake_up_bit(&b->flags, BTREE_NODE_read_in_flight); 1488 } 1489 1490 static void btree_node_read_endio(struct bio *bio) 1491 { 1492 struct btree_read_bio *rb = 1493 container_of(bio, struct btree_read_bio, bio); 1494 struct bch_fs *c = rb->c; 1495 struct bch_dev *ca = rb->have_ioref 1496 ? bch2_dev_have_ref(c, rb->pick.ptr.dev) : NULL; 1497 1498 bch2_account_io_completion(ca, BCH_MEMBER_ERROR_read, 1499 rb->start_time, !bio->bi_status); 1500 1501 queue_work(c->btree_read_complete_wq, &rb->work); 1502 } 1503 1504 void bch2_btree_read_bio_to_text(struct printbuf *out, struct btree_read_bio *rbio) 1505 { 1506 bch2_bio_to_text(out, &rbio->bio); 1507 } 1508 1509 struct btree_node_read_all { 1510 struct closure cl; 1511 struct bch_fs *c; 1512 struct btree *b; 1513 unsigned nr; 1514 void *buf[BCH_REPLICAS_MAX]; 1515 struct bio *bio[BCH_REPLICAS_MAX]; 1516 blk_status_t err[BCH_REPLICAS_MAX]; 1517 }; 1518 1519 static unsigned btree_node_sectors_written(struct bch_fs *c, void *data) 1520 { 1521 struct btree_node *bn = data; 1522 struct btree_node_entry *bne; 1523 unsigned offset = 0; 1524 1525 if (le64_to_cpu(bn->magic) != bset_magic(c)) 1526 return 0; 1527 1528 while (offset < btree_sectors(c)) { 1529 if (!offset) { 1530 offset += vstruct_sectors(bn, c->block_bits); 1531 } else { 1532 bne = data + (offset << 9); 1533 if (bne->keys.seq != bn->keys.seq) 1534 break; 1535 offset += vstruct_sectors(bne, c->block_bits); 1536 } 1537 } 1538 1539 return offset; 1540 } 1541 1542 static bool btree_node_has_extra_bsets(struct bch_fs *c, unsigned offset, void *data) 1543 { 1544 struct btree_node *bn = data; 1545 struct btree_node_entry *bne; 1546 1547 if (!offset) 1548 return false; 1549 1550 while (offset < btree_sectors(c)) { 1551 bne = data + (offset << 9); 1552 if (bne->keys.seq == bn->keys.seq) 1553 return true; 1554 offset++; 1555 } 1556 1557 return false; 1558 return offset; 1559 } 1560 1561 static CLOSURE_CALLBACK(btree_node_read_all_replicas_done) 1562 { 1563 closure_type(ra, struct btree_node_read_all, cl); 1564 struct bch_fs *c = ra->c; 1565 struct btree *b = ra->b; 1566 struct printbuf buf = PRINTBUF; 1567 bool dump_bset_maps = false; 1568 int ret = 0, best = -1, write = READ; 1569 unsigned i, written = 0, written2 = 0; 1570 __le64 seq = b->key.k.type == KEY_TYPE_btree_ptr_v2 1571 ? bkey_i_to_btree_ptr_v2(&b->key)->v.seq : 0; 1572 bool _saw_error = false, *saw_error = &_saw_error; 1573 struct printbuf *err_msg = NULL; 1574 struct bch_io_failures *failed = NULL; 1575 1576 for (i = 0; i < ra->nr; i++) { 1577 struct btree_node *bn = ra->buf[i]; 1578 1579 if (ra->err[i]) 1580 continue; 1581 1582 if (le64_to_cpu(bn->magic) != bset_magic(c) || 1583 (seq && seq != bn->keys.seq)) 1584 continue; 1585 1586 if (best < 0) { 1587 best = i; 1588 written = btree_node_sectors_written(c, bn); 1589 continue; 1590 } 1591 1592 written2 = btree_node_sectors_written(c, ra->buf[i]); 1593 if (btree_err_on(written2 != written, -BCH_ERR_btree_node_read_err_fixable, 1594 c, NULL, b, NULL, NULL, 1595 btree_node_replicas_sectors_written_mismatch, 1596 "btree node sectors written mismatch: %u != %u", 1597 written, written2) || 1598 btree_err_on(btree_node_has_extra_bsets(c, written2, ra->buf[i]), 1599 -BCH_ERR_btree_node_read_err_fixable, 1600 c, NULL, b, NULL, NULL, 1601 btree_node_bset_after_end, 1602 "found bset signature after last bset") || 1603 btree_err_on(memcmp(ra->buf[best], ra->buf[i], written << 9), 1604 -BCH_ERR_btree_node_read_err_fixable, 1605 c, NULL, b, NULL, NULL, 1606 btree_node_replicas_data_mismatch, 1607 "btree node replicas content mismatch")) 1608 dump_bset_maps = true; 1609 1610 if (written2 > written) { 1611 written = written2; 1612 best = i; 1613 } 1614 } 1615 fsck_err: 1616 if (dump_bset_maps) { 1617 for (i = 0; i < ra->nr; i++) { 1618 struct btree_node *bn = ra->buf[i]; 1619 struct btree_node_entry *bne = NULL; 1620 unsigned offset = 0, sectors; 1621 bool gap = false; 1622 1623 if (ra->err[i]) 1624 continue; 1625 1626 printbuf_reset(&buf); 1627 1628 while (offset < btree_sectors(c)) { 1629 if (!offset) { 1630 sectors = vstruct_sectors(bn, c->block_bits); 1631 } else { 1632 bne = ra->buf[i] + (offset << 9); 1633 if (bne->keys.seq != bn->keys.seq) 1634 break; 1635 sectors = vstruct_sectors(bne, c->block_bits); 1636 } 1637 1638 prt_printf(&buf, " %u-%u", offset, offset + sectors); 1639 if (bne && bch2_journal_seq_is_blacklisted(c, 1640 le64_to_cpu(bne->keys.journal_seq), false)) 1641 prt_printf(&buf, "*"); 1642 offset += sectors; 1643 } 1644 1645 while (offset < btree_sectors(c)) { 1646 bne = ra->buf[i] + (offset << 9); 1647 if (bne->keys.seq == bn->keys.seq) { 1648 if (!gap) 1649 prt_printf(&buf, " GAP"); 1650 gap = true; 1651 1652 sectors = vstruct_sectors(bne, c->block_bits); 1653 prt_printf(&buf, " %u-%u", offset, offset + sectors); 1654 if (bch2_journal_seq_is_blacklisted(c, 1655 le64_to_cpu(bne->keys.journal_seq), false)) 1656 prt_printf(&buf, "*"); 1657 } 1658 offset++; 1659 } 1660 1661 bch_err(c, "replica %u:%s", i, buf.buf); 1662 } 1663 } 1664 1665 if (best >= 0) { 1666 memcpy(b->data, ra->buf[best], btree_buf_bytes(b)); 1667 ret = bch2_btree_node_read_done(c, NULL, b, NULL, NULL); 1668 } else { 1669 ret = -1; 1670 } 1671 1672 if (ret) { 1673 set_btree_node_read_error(b); 1674 1675 struct printbuf buf = PRINTBUF; 1676 bch2_btree_lost_data(c, &buf, b->c.btree_id); 1677 if (buf.pos) 1678 bch_err(c, "%s", buf.buf); 1679 printbuf_exit(&buf); 1680 } else if (*saw_error) 1681 bch2_btree_node_rewrite_async(c, b); 1682 1683 for (i = 0; i < ra->nr; i++) { 1684 mempool_free(ra->buf[i], &c->btree_bounce_pool); 1685 bio_put(ra->bio[i]); 1686 } 1687 1688 closure_debug_destroy(&ra->cl); 1689 kfree(ra); 1690 printbuf_exit(&buf); 1691 1692 clear_btree_node_read_in_flight(b); 1693 smp_mb__after_atomic(); 1694 wake_up_bit(&b->flags, BTREE_NODE_read_in_flight); 1695 } 1696 1697 static void btree_node_read_all_replicas_endio(struct bio *bio) 1698 { 1699 struct btree_read_bio *rb = 1700 container_of(bio, struct btree_read_bio, bio); 1701 struct bch_fs *c = rb->c; 1702 struct btree_node_read_all *ra = rb->ra; 1703 1704 if (rb->have_ioref) { 1705 struct bch_dev *ca = bch2_dev_have_ref(c, rb->pick.ptr.dev); 1706 1707 bch2_latency_acct(ca, rb->start_time, READ); 1708 enumerated_ref_put(&ca->io_ref[READ], 1709 BCH_DEV_READ_REF_btree_node_read_all_replicas); 1710 } 1711 1712 ra->err[rb->idx] = bio->bi_status; 1713 closure_put(&ra->cl); 1714 } 1715 1716 /* 1717 * XXX This allocates multiple times from the same mempools, and can deadlock 1718 * under sufficient memory pressure (but is only a debug path) 1719 */ 1720 static int btree_node_read_all_replicas(struct bch_fs *c, struct btree *b, bool sync) 1721 { 1722 struct bkey_s_c k = bkey_i_to_s_c(&b->key); 1723 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); 1724 const union bch_extent_entry *entry; 1725 struct extent_ptr_decoded pick; 1726 struct btree_node_read_all *ra; 1727 unsigned i; 1728 1729 ra = kzalloc(sizeof(*ra), GFP_NOFS); 1730 if (!ra) 1731 return bch_err_throw(c, ENOMEM_btree_node_read_all_replicas); 1732 1733 closure_init(&ra->cl, NULL); 1734 ra->c = c; 1735 ra->b = b; 1736 ra->nr = bch2_bkey_nr_ptrs(k); 1737 1738 for (i = 0; i < ra->nr; i++) { 1739 ra->buf[i] = mempool_alloc(&c->btree_bounce_pool, GFP_NOFS); 1740 ra->bio[i] = bio_alloc_bioset(NULL, 1741 buf_pages(ra->buf[i], btree_buf_bytes(b)), 1742 REQ_OP_READ|REQ_SYNC|REQ_META, 1743 GFP_NOFS, 1744 &c->btree_bio); 1745 } 1746 1747 i = 0; 1748 bkey_for_each_ptr_decode(k.k, ptrs, pick, entry) { 1749 struct bch_dev *ca = bch2_dev_get_ioref(c, pick.ptr.dev, READ, 1750 BCH_DEV_READ_REF_btree_node_read_all_replicas); 1751 struct btree_read_bio *rb = 1752 container_of(ra->bio[i], struct btree_read_bio, bio); 1753 rb->c = c; 1754 rb->b = b; 1755 rb->ra = ra; 1756 rb->start_time = local_clock(); 1757 rb->have_ioref = ca != NULL; 1758 rb->idx = i; 1759 rb->pick = pick; 1760 rb->bio.bi_iter.bi_sector = pick.ptr.offset; 1761 rb->bio.bi_end_io = btree_node_read_all_replicas_endio; 1762 bch2_bio_map(&rb->bio, ra->buf[i], btree_buf_bytes(b)); 1763 1764 if (rb->have_ioref) { 1765 this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_btree], 1766 bio_sectors(&rb->bio)); 1767 bio_set_dev(&rb->bio, ca->disk_sb.bdev); 1768 1769 closure_get(&ra->cl); 1770 submit_bio(&rb->bio); 1771 } else { 1772 ra->err[i] = BLK_STS_REMOVED; 1773 } 1774 1775 i++; 1776 } 1777 1778 if (sync) { 1779 closure_sync(&ra->cl); 1780 btree_node_read_all_replicas_done(&ra->cl.work); 1781 } else { 1782 continue_at(&ra->cl, btree_node_read_all_replicas_done, 1783 c->btree_read_complete_wq); 1784 } 1785 1786 return 0; 1787 } 1788 1789 void bch2_btree_node_read(struct btree_trans *trans, struct btree *b, 1790 bool sync) 1791 { 1792 struct bch_fs *c = trans->c; 1793 struct extent_ptr_decoded pick; 1794 struct btree_read_bio *rb; 1795 struct bch_dev *ca; 1796 struct bio *bio; 1797 int ret; 1798 1799 trace_and_count(c, btree_node_read, trans, b); 1800 1801 if (static_branch_unlikely(&bch2_verify_all_btree_replicas) && 1802 !btree_node_read_all_replicas(c, b, sync)) 1803 return; 1804 1805 ret = bch2_bkey_pick_read_device(c, bkey_i_to_s_c(&b->key), 1806 NULL, &pick, -1); 1807 1808 if (ret <= 0) { 1809 bool ratelimit = true; 1810 struct printbuf buf = PRINTBUF; 1811 bch2_log_msg_start(c, &buf); 1812 1813 prt_str(&buf, "btree node read error: no device to read from\n at "); 1814 bch2_btree_pos_to_text(&buf, c, b); 1815 prt_newline(&buf); 1816 bch2_btree_lost_data(c, &buf, b->c.btree_id); 1817 1818 if (c->recovery.passes_complete & BIT_ULL(BCH_RECOVERY_PASS_check_topology) && 1819 bch2_fs_emergency_read_only2(c, &buf)) 1820 ratelimit = false; 1821 1822 static DEFINE_RATELIMIT_STATE(rs, 1823 DEFAULT_RATELIMIT_INTERVAL, 1824 DEFAULT_RATELIMIT_BURST); 1825 if (!ratelimit || __ratelimit(&rs)) 1826 bch2_print_str(c, KERN_ERR, buf.buf); 1827 printbuf_exit(&buf); 1828 1829 set_btree_node_read_error(b); 1830 clear_btree_node_read_in_flight(b); 1831 smp_mb__after_atomic(); 1832 wake_up_bit(&b->flags, BTREE_NODE_read_in_flight); 1833 return; 1834 } 1835 1836 ca = bch2_dev_get_ioref(c, pick.ptr.dev, READ, BCH_DEV_READ_REF_btree_node_read); 1837 1838 bio = bio_alloc_bioset(NULL, 1839 buf_pages(b->data, btree_buf_bytes(b)), 1840 REQ_OP_READ|REQ_SYNC|REQ_META, 1841 GFP_NOFS, 1842 &c->btree_bio); 1843 rb = container_of(bio, struct btree_read_bio, bio); 1844 rb->c = c; 1845 rb->b = b; 1846 rb->ra = NULL; 1847 rb->start_time = local_clock(); 1848 rb->have_ioref = ca != NULL; 1849 rb->pick = pick; 1850 INIT_WORK(&rb->work, btree_node_read_work); 1851 bio->bi_iter.bi_sector = pick.ptr.offset; 1852 bio->bi_end_io = btree_node_read_endio; 1853 bch2_bio_map(bio, b->data, btree_buf_bytes(b)); 1854 1855 async_object_list_add(c, btree_read_bio, rb, &rb->list_idx); 1856 1857 if (rb->have_ioref) { 1858 this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_btree], 1859 bio_sectors(bio)); 1860 bio_set_dev(bio, ca->disk_sb.bdev); 1861 1862 if (sync) { 1863 submit_bio_wait(bio); 1864 bch2_latency_acct(ca, rb->start_time, READ); 1865 btree_node_read_work(&rb->work); 1866 } else { 1867 submit_bio(bio); 1868 } 1869 } else { 1870 bio->bi_status = BLK_STS_REMOVED; 1871 1872 if (sync) 1873 btree_node_read_work(&rb->work); 1874 else 1875 queue_work(c->btree_read_complete_wq, &rb->work); 1876 } 1877 } 1878 1879 static int __bch2_btree_root_read(struct btree_trans *trans, enum btree_id id, 1880 const struct bkey_i *k, unsigned level) 1881 { 1882 struct bch_fs *c = trans->c; 1883 struct closure cl; 1884 struct btree *b; 1885 int ret; 1886 1887 closure_init_stack(&cl); 1888 1889 do { 1890 ret = bch2_btree_cache_cannibalize_lock(trans, &cl); 1891 closure_sync(&cl); 1892 } while (ret); 1893 1894 b = bch2_btree_node_mem_alloc(trans, level != 0); 1895 bch2_btree_cache_cannibalize_unlock(trans); 1896 1897 BUG_ON(IS_ERR(b)); 1898 1899 bkey_copy(&b->key, k); 1900 BUG_ON(bch2_btree_node_hash_insert(&c->btree_cache, b, level, id)); 1901 1902 set_btree_node_read_in_flight(b); 1903 1904 /* we can't pass the trans to read_done() for fsck errors, so it must be unlocked */ 1905 bch2_trans_unlock(trans); 1906 bch2_btree_node_read(trans, b, true); 1907 1908 if (btree_node_read_error(b)) { 1909 mutex_lock(&c->btree_cache.lock); 1910 bch2_btree_node_hash_remove(&c->btree_cache, b); 1911 mutex_unlock(&c->btree_cache.lock); 1912 1913 ret = bch_err_throw(c, btree_node_read_error); 1914 goto err; 1915 } 1916 1917 bch2_btree_set_root_for_read(c, b); 1918 err: 1919 six_unlock_write(&b->c.lock); 1920 six_unlock_intent(&b->c.lock); 1921 1922 return ret; 1923 } 1924 1925 int bch2_btree_root_read(struct bch_fs *c, enum btree_id id, 1926 const struct bkey_i *k, unsigned level) 1927 { 1928 return bch2_trans_run(c, __bch2_btree_root_read(trans, id, k, level)); 1929 } 1930 1931 struct btree_node_scrub { 1932 struct bch_fs *c; 1933 struct bch_dev *ca; 1934 void *buf; 1935 bool used_mempool; 1936 unsigned written; 1937 1938 enum btree_id btree; 1939 unsigned level; 1940 struct bkey_buf key; 1941 __le64 seq; 1942 1943 struct work_struct work; 1944 struct bio bio; 1945 }; 1946 1947 static bool btree_node_scrub_check(struct bch_fs *c, struct btree_node *data, unsigned ptr_written, 1948 struct printbuf *err) 1949 { 1950 unsigned written = 0; 1951 1952 if (le64_to_cpu(data->magic) != bset_magic(c)) { 1953 prt_printf(err, "bad magic: want %llx, got %llx", 1954 bset_magic(c), le64_to_cpu(data->magic)); 1955 return false; 1956 } 1957 1958 while (written < (ptr_written ?: btree_sectors(c))) { 1959 struct btree_node_entry *bne; 1960 struct bset *i; 1961 bool first = !written; 1962 1963 if (first) { 1964 bne = NULL; 1965 i = &data->keys; 1966 } else { 1967 bne = (void *) data + (written << 9); 1968 i = &bne->keys; 1969 1970 if (!ptr_written && i->seq != data->keys.seq) 1971 break; 1972 } 1973 1974 struct nonce nonce = btree_nonce(i, written << 9); 1975 bool good_csum_type = bch2_checksum_type_valid(c, BSET_CSUM_TYPE(i)); 1976 1977 if (first) { 1978 if (good_csum_type) { 1979 struct bch_csum csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, data); 1980 if (bch2_crc_cmp(data->csum, csum)) { 1981 bch2_csum_err_msg(err, BSET_CSUM_TYPE(i), data->csum, csum); 1982 return false; 1983 } 1984 } 1985 1986 written += vstruct_sectors(data, c->block_bits); 1987 } else { 1988 if (good_csum_type) { 1989 struct bch_csum csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bne); 1990 if (bch2_crc_cmp(bne->csum, csum)) { 1991 bch2_csum_err_msg(err, BSET_CSUM_TYPE(i), bne->csum, csum); 1992 return false; 1993 } 1994 } 1995 1996 written += vstruct_sectors(bne, c->block_bits); 1997 } 1998 } 1999 2000 return true; 2001 } 2002 2003 static void btree_node_scrub_work(struct work_struct *work) 2004 { 2005 struct btree_node_scrub *scrub = container_of(work, struct btree_node_scrub, work); 2006 struct bch_fs *c = scrub->c; 2007 struct printbuf err = PRINTBUF; 2008 2009 __bch2_btree_pos_to_text(&err, c, scrub->btree, scrub->level, 2010 bkey_i_to_s_c(scrub->key.k)); 2011 prt_newline(&err); 2012 2013 if (!btree_node_scrub_check(c, scrub->buf, scrub->written, &err)) { 2014 int ret = bch2_trans_do(c, 2015 bch2_btree_node_rewrite_key(trans, scrub->btree, scrub->level - 1, 2016 scrub->key.k, 0)); 2017 if (!bch2_err_matches(ret, ENOENT) && 2018 !bch2_err_matches(ret, EROFS)) 2019 bch_err_fn_ratelimited(c, ret); 2020 } 2021 2022 printbuf_exit(&err); 2023 bch2_bkey_buf_exit(&scrub->key, c);; 2024 btree_bounce_free(c, c->opts.btree_node_size, scrub->used_mempool, scrub->buf); 2025 enumerated_ref_put(&scrub->ca->io_ref[READ], BCH_DEV_READ_REF_btree_node_scrub); 2026 kfree(scrub); 2027 enumerated_ref_put(&c->writes, BCH_WRITE_REF_btree_node_scrub); 2028 } 2029 2030 static void btree_node_scrub_endio(struct bio *bio) 2031 { 2032 struct btree_node_scrub *scrub = container_of(bio, struct btree_node_scrub, bio); 2033 2034 queue_work(scrub->c->btree_read_complete_wq, &scrub->work); 2035 } 2036 2037 int bch2_btree_node_scrub(struct btree_trans *trans, 2038 enum btree_id btree, unsigned level, 2039 struct bkey_s_c k, unsigned dev) 2040 { 2041 if (k.k->type != KEY_TYPE_btree_ptr_v2) 2042 return 0; 2043 2044 struct bch_fs *c = trans->c; 2045 2046 if (!enumerated_ref_tryget(&c->writes, BCH_WRITE_REF_btree_node_scrub)) 2047 return bch_err_throw(c, erofs_no_writes); 2048 2049 struct extent_ptr_decoded pick; 2050 int ret = bch2_bkey_pick_read_device(c, k, NULL, &pick, dev); 2051 if (ret <= 0) 2052 goto err; 2053 2054 struct bch_dev *ca = bch2_dev_get_ioref(c, pick.ptr.dev, READ, 2055 BCH_DEV_READ_REF_btree_node_scrub); 2056 if (!ca) { 2057 ret = bch_err_throw(c, device_offline); 2058 goto err; 2059 } 2060 2061 bool used_mempool = false; 2062 void *buf = btree_bounce_alloc(c, c->opts.btree_node_size, &used_mempool); 2063 2064 unsigned vecs = buf_pages(buf, c->opts.btree_node_size); 2065 2066 struct btree_node_scrub *scrub = 2067 kzalloc(sizeof(*scrub) + sizeof(struct bio_vec) * vecs, GFP_KERNEL); 2068 if (!scrub) { 2069 ret = -ENOMEM; 2070 goto err_free; 2071 } 2072 2073 scrub->c = c; 2074 scrub->ca = ca; 2075 scrub->buf = buf; 2076 scrub->used_mempool = used_mempool; 2077 scrub->written = btree_ptr_sectors_written(k); 2078 2079 scrub->btree = btree; 2080 scrub->level = level; 2081 bch2_bkey_buf_init(&scrub->key); 2082 bch2_bkey_buf_reassemble(&scrub->key, c, k); 2083 scrub->seq = bkey_s_c_to_btree_ptr_v2(k).v->seq; 2084 2085 INIT_WORK(&scrub->work, btree_node_scrub_work); 2086 2087 bio_init(&scrub->bio, ca->disk_sb.bdev, scrub->bio.bi_inline_vecs, vecs, REQ_OP_READ); 2088 bch2_bio_map(&scrub->bio, scrub->buf, c->opts.btree_node_size); 2089 scrub->bio.bi_iter.bi_sector = pick.ptr.offset; 2090 scrub->bio.bi_end_io = btree_node_scrub_endio; 2091 submit_bio(&scrub->bio); 2092 return 0; 2093 err_free: 2094 btree_bounce_free(c, c->opts.btree_node_size, used_mempool, buf); 2095 enumerated_ref_put(&ca->io_ref[READ], BCH_DEV_READ_REF_btree_node_scrub); 2096 err: 2097 enumerated_ref_put(&c->writes, BCH_WRITE_REF_btree_node_scrub); 2098 return ret; 2099 } 2100 2101 static void bch2_btree_complete_write(struct bch_fs *c, struct btree *b, 2102 struct btree_write *w) 2103 { 2104 unsigned long old, new; 2105 2106 old = READ_ONCE(b->will_make_reachable); 2107 do { 2108 new = old; 2109 if (!(old & 1)) 2110 break; 2111 2112 new &= ~1UL; 2113 } while (!try_cmpxchg(&b->will_make_reachable, &old, new)); 2114 2115 if (old & 1) 2116 closure_put(&((struct btree_update *) new)->cl); 2117 2118 bch2_journal_pin_drop(&c->journal, &w->journal); 2119 } 2120 2121 static void __btree_node_write_done(struct bch_fs *c, struct btree *b, u64 start_time) 2122 { 2123 struct btree_write *w = btree_prev_write(b); 2124 unsigned long old, new; 2125 unsigned type = 0; 2126 2127 bch2_btree_complete_write(c, b, w); 2128 2129 if (start_time) 2130 bch2_time_stats_update(&c->times[BCH_TIME_btree_node_write], start_time); 2131 2132 old = READ_ONCE(b->flags); 2133 do { 2134 new = old; 2135 2136 if ((old & (1U << BTREE_NODE_dirty)) && 2137 (old & (1U << BTREE_NODE_need_write)) && 2138 !(old & (1U << BTREE_NODE_never_write)) && 2139 !(old & (1U << BTREE_NODE_write_blocked)) && 2140 !(old & (1U << BTREE_NODE_will_make_reachable))) { 2141 new &= ~(1U << BTREE_NODE_dirty); 2142 new &= ~(1U << BTREE_NODE_need_write); 2143 new |= (1U << BTREE_NODE_write_in_flight); 2144 new |= (1U << BTREE_NODE_write_in_flight_inner); 2145 new |= (1U << BTREE_NODE_just_written); 2146 new ^= (1U << BTREE_NODE_write_idx); 2147 2148 type = new & BTREE_WRITE_TYPE_MASK; 2149 new &= ~BTREE_WRITE_TYPE_MASK; 2150 } else { 2151 new &= ~(1U << BTREE_NODE_write_in_flight); 2152 new &= ~(1U << BTREE_NODE_write_in_flight_inner); 2153 } 2154 } while (!try_cmpxchg(&b->flags, &old, new)); 2155 2156 if (new & (1U << BTREE_NODE_write_in_flight)) 2157 __bch2_btree_node_write(c, b, BTREE_WRITE_ALREADY_STARTED|type); 2158 else { 2159 smp_mb__after_atomic(); 2160 wake_up_bit(&b->flags, BTREE_NODE_write_in_flight); 2161 } 2162 } 2163 2164 static void btree_node_write_done(struct bch_fs *c, struct btree *b, u64 start_time) 2165 { 2166 struct btree_trans *trans = bch2_trans_get(c); 2167 2168 btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_read); 2169 2170 /* we don't need transaction context anymore after we got the lock. */ 2171 bch2_trans_put(trans); 2172 __btree_node_write_done(c, b, start_time); 2173 six_unlock_read(&b->c.lock); 2174 } 2175 2176 static void btree_node_write_work(struct work_struct *work) 2177 { 2178 struct btree_write_bio *wbio = 2179 container_of(work, struct btree_write_bio, work); 2180 struct bch_fs *c = wbio->wbio.c; 2181 struct btree *b = wbio->wbio.bio.bi_private; 2182 u64 start_time = wbio->start_time; 2183 int ret = 0; 2184 2185 btree_bounce_free(c, 2186 wbio->data_bytes, 2187 wbio->wbio.used_mempool, 2188 wbio->data); 2189 2190 bch2_bkey_drop_ptrs(bkey_i_to_s(&wbio->key), ptr, 2191 bch2_dev_list_has_dev(wbio->wbio.failed, ptr->dev)); 2192 2193 if (!bch2_bkey_nr_ptrs(bkey_i_to_s_c(&wbio->key))) { 2194 ret = bch_err_throw(c, btree_node_write_all_failed); 2195 goto err; 2196 } 2197 2198 if (wbio->wbio.first_btree_write) { 2199 if (wbio->wbio.failed.nr) { 2200 2201 } 2202 } else { 2203 ret = bch2_trans_do(c, 2204 bch2_btree_node_update_key_get_iter(trans, b, &wbio->key, 2205 BCH_WATERMARK_interior_updates| 2206 BCH_TRANS_COMMIT_journal_reclaim| 2207 BCH_TRANS_COMMIT_no_enospc| 2208 BCH_TRANS_COMMIT_no_check_rw, 2209 !wbio->wbio.failed.nr)); 2210 if (ret) 2211 goto err; 2212 } 2213 out: 2214 async_object_list_del(c, btree_write_bio, wbio->list_idx); 2215 bio_put(&wbio->wbio.bio); 2216 btree_node_write_done(c, b, start_time); 2217 return; 2218 err: 2219 set_btree_node_noevict(b); 2220 2221 if (!bch2_err_matches(ret, EROFS)) { 2222 struct printbuf buf = PRINTBUF; 2223 prt_printf(&buf, "writing btree node: %s\n ", bch2_err_str(ret)); 2224 bch2_btree_pos_to_text(&buf, c, b); 2225 bch2_fs_fatal_error(c, "%s", buf.buf); 2226 printbuf_exit(&buf); 2227 } 2228 goto out; 2229 } 2230 2231 static void btree_node_write_endio(struct bio *bio) 2232 { 2233 struct bch_write_bio *wbio = to_wbio(bio); 2234 struct bch_write_bio *parent = wbio->split ? wbio->parent : NULL; 2235 struct bch_write_bio *orig = parent ?: wbio; 2236 struct btree_write_bio *wb = container_of(orig, struct btree_write_bio, wbio); 2237 struct bch_fs *c = wbio->c; 2238 struct btree *b = wbio->bio.bi_private; 2239 struct bch_dev *ca = wbio->have_ioref ? bch2_dev_have_ref(c, wbio->dev) : NULL; 2240 2241 bch2_account_io_completion(ca, BCH_MEMBER_ERROR_write, 2242 wbio->submit_time, !bio->bi_status); 2243 2244 if (ca && bio->bi_status) { 2245 struct printbuf buf = PRINTBUF; 2246 buf.atomic++; 2247 prt_printf(&buf, "btree write error: %s\n ", 2248 bch2_blk_status_to_str(bio->bi_status)); 2249 bch2_btree_pos_to_text(&buf, c, b); 2250 bch_err_dev_ratelimited(ca, "%s", buf.buf); 2251 printbuf_exit(&buf); 2252 } 2253 2254 if (bio->bi_status) { 2255 unsigned long flags; 2256 spin_lock_irqsave(&c->btree_write_error_lock, flags); 2257 bch2_dev_list_add_dev(&orig->failed, wbio->dev); 2258 spin_unlock_irqrestore(&c->btree_write_error_lock, flags); 2259 } 2260 2261 /* 2262 * XXX: we should be using io_ref[WRITE], but we aren't retrying failed 2263 * btree writes yet (due to device removal/ro): 2264 */ 2265 if (wbio->have_ioref) 2266 enumerated_ref_put(&ca->io_ref[READ], 2267 BCH_DEV_READ_REF_btree_node_write); 2268 2269 if (parent) { 2270 bio_put(bio); 2271 bio_endio(&parent->bio); 2272 return; 2273 } 2274 2275 clear_btree_node_write_in_flight_inner(b); 2276 smp_mb__after_atomic(); 2277 wake_up_bit(&b->flags, BTREE_NODE_write_in_flight_inner); 2278 INIT_WORK(&wb->work, btree_node_write_work); 2279 queue_work(c->btree_write_complete_wq, &wb->work); 2280 } 2281 2282 static int validate_bset_for_write(struct bch_fs *c, struct btree *b, 2283 struct bset *i) 2284 { 2285 int ret = bch2_bkey_validate(c, bkey_i_to_s_c(&b->key), 2286 (struct bkey_validate_context) { 2287 .from = BKEY_VALIDATE_btree_node, 2288 .level = b->c.level + 1, 2289 .btree = b->c.btree_id, 2290 .flags = BCH_VALIDATE_write, 2291 }); 2292 if (ret) { 2293 bch2_fs_inconsistent(c, "invalid btree node key before write"); 2294 return ret; 2295 } 2296 2297 ret = validate_bset_keys(c, b, i, WRITE, NULL, NULL) ?: 2298 validate_bset(c, NULL, b, i, b->written, WRITE, NULL, NULL); 2299 if (ret) { 2300 bch2_inconsistent_error(c); 2301 dump_stack(); 2302 } 2303 2304 return ret; 2305 } 2306 2307 static void btree_write_submit(struct work_struct *work) 2308 { 2309 struct btree_write_bio *wbio = container_of(work, struct btree_write_bio, work); 2310 BKEY_PADDED_ONSTACK(k, BKEY_BTREE_PTR_VAL_U64s_MAX) tmp; 2311 2312 bkey_copy(&tmp.k, &wbio->key); 2313 2314 bkey_for_each_ptr(bch2_bkey_ptrs(bkey_i_to_s(&tmp.k)), ptr) 2315 ptr->offset += wbio->sector_offset; 2316 2317 bch2_submit_wbio_replicas(&wbio->wbio, wbio->wbio.c, BCH_DATA_btree, 2318 &tmp.k, false); 2319 } 2320 2321 void __bch2_btree_node_write(struct bch_fs *c, struct btree *b, unsigned flags) 2322 { 2323 struct btree_write_bio *wbio; 2324 struct bset *i; 2325 struct btree_node *bn = NULL; 2326 struct btree_node_entry *bne = NULL; 2327 struct sort_iter_stack sort_iter; 2328 struct nonce nonce; 2329 unsigned bytes_to_write, sectors_to_write, bytes, u64s; 2330 u64 seq = 0; 2331 bool used_mempool; 2332 unsigned long old, new; 2333 bool validate_before_checksum = false; 2334 enum btree_write_type type = flags & BTREE_WRITE_TYPE_MASK; 2335 void *data; 2336 u64 start_time = local_clock(); 2337 int ret; 2338 2339 if (flags & BTREE_WRITE_ALREADY_STARTED) 2340 goto do_write; 2341 2342 /* 2343 * We may only have a read lock on the btree node - the dirty bit is our 2344 * "lock" against racing with other threads that may be trying to start 2345 * a write, we do a write iff we clear the dirty bit. Since setting the 2346 * dirty bit requires a write lock, we can't race with other threads 2347 * redirtying it: 2348 */ 2349 old = READ_ONCE(b->flags); 2350 do { 2351 new = old; 2352 2353 if (!(old & (1 << BTREE_NODE_dirty))) 2354 return; 2355 2356 if ((flags & BTREE_WRITE_ONLY_IF_NEED) && 2357 !(old & (1 << BTREE_NODE_need_write))) 2358 return; 2359 2360 if (old & 2361 ((1 << BTREE_NODE_never_write)| 2362 (1 << BTREE_NODE_write_blocked))) 2363 return; 2364 2365 if (b->written && 2366 (old & (1 << BTREE_NODE_will_make_reachable))) 2367 return; 2368 2369 if (old & (1 << BTREE_NODE_write_in_flight)) 2370 return; 2371 2372 if (flags & BTREE_WRITE_ONLY_IF_NEED) 2373 type = new & BTREE_WRITE_TYPE_MASK; 2374 new &= ~BTREE_WRITE_TYPE_MASK; 2375 2376 new &= ~(1 << BTREE_NODE_dirty); 2377 new &= ~(1 << BTREE_NODE_need_write); 2378 new |= (1 << BTREE_NODE_write_in_flight); 2379 new |= (1 << BTREE_NODE_write_in_flight_inner); 2380 new |= (1 << BTREE_NODE_just_written); 2381 new ^= (1 << BTREE_NODE_write_idx); 2382 } while (!try_cmpxchg_acquire(&b->flags, &old, new)); 2383 2384 if (new & (1U << BTREE_NODE_need_write)) 2385 return; 2386 do_write: 2387 BUG_ON((type == BTREE_WRITE_initial) != (b->written == 0)); 2388 2389 atomic_long_dec(&c->btree_cache.nr_dirty); 2390 2391 BUG_ON(btree_node_fake(b)); 2392 BUG_ON((b->will_make_reachable != 0) != !b->written); 2393 2394 BUG_ON(b->written >= btree_sectors(c)); 2395 BUG_ON(b->written & (block_sectors(c) - 1)); 2396 BUG_ON(bset_written(b, btree_bset_last(b))); 2397 BUG_ON(le64_to_cpu(b->data->magic) != bset_magic(c)); 2398 BUG_ON(memcmp(&b->data->format, &b->format, sizeof(b->format))); 2399 2400 bch2_sort_whiteouts(c, b); 2401 2402 sort_iter_stack_init(&sort_iter, b); 2403 2404 bytes = !b->written 2405 ? sizeof(struct btree_node) 2406 : sizeof(struct btree_node_entry); 2407 2408 bytes += b->whiteout_u64s * sizeof(u64); 2409 2410 for_each_bset(b, t) { 2411 i = bset(b, t); 2412 2413 if (bset_written(b, i)) 2414 continue; 2415 2416 bytes += le16_to_cpu(i->u64s) * sizeof(u64); 2417 sort_iter_add(&sort_iter.iter, 2418 btree_bkey_first(b, t), 2419 btree_bkey_last(b, t)); 2420 seq = max(seq, le64_to_cpu(i->journal_seq)); 2421 } 2422 2423 BUG_ON(b->written && !seq); 2424 2425 /* bch2_varint_decode may read up to 7 bytes past the end of the buffer: */ 2426 bytes += 8; 2427 2428 /* buffer must be a multiple of the block size */ 2429 bytes = round_up(bytes, block_bytes(c)); 2430 2431 data = btree_bounce_alloc(c, bytes, &used_mempool); 2432 2433 if (!b->written) { 2434 bn = data; 2435 *bn = *b->data; 2436 i = &bn->keys; 2437 } else { 2438 bne = data; 2439 bne->keys = b->data->keys; 2440 i = &bne->keys; 2441 } 2442 2443 i->journal_seq = cpu_to_le64(seq); 2444 i->u64s = 0; 2445 2446 sort_iter_add(&sort_iter.iter, 2447 unwritten_whiteouts_start(b), 2448 unwritten_whiteouts_end(b)); 2449 SET_BSET_SEPARATE_WHITEOUTS(i, false); 2450 2451 u64s = bch2_sort_keys_keep_unwritten_whiteouts(i->start, &sort_iter.iter); 2452 le16_add_cpu(&i->u64s, u64s); 2453 2454 b->whiteout_u64s = 0; 2455 2456 BUG_ON(!b->written && i->u64s != b->data->keys.u64s); 2457 2458 set_needs_whiteout(i, false); 2459 2460 /* do we have data to write? */ 2461 if (b->written && !i->u64s) 2462 goto nowrite; 2463 2464 bytes_to_write = vstruct_end(i) - data; 2465 sectors_to_write = round_up(bytes_to_write, block_bytes(c)) >> 9; 2466 2467 if (!b->written && 2468 b->key.k.type == KEY_TYPE_btree_ptr_v2) 2469 BUG_ON(btree_ptr_sectors_written(bkey_i_to_s_c(&b->key)) != sectors_to_write); 2470 2471 memset(data + bytes_to_write, 0, 2472 (sectors_to_write << 9) - bytes_to_write); 2473 2474 BUG_ON(b->written + sectors_to_write > btree_sectors(c)); 2475 BUG_ON(BSET_BIG_ENDIAN(i) != CPU_BIG_ENDIAN); 2476 BUG_ON(i->seq != b->data->keys.seq); 2477 2478 i->version = cpu_to_le16(c->sb.version); 2479 SET_BSET_OFFSET(i, b->written); 2480 SET_BSET_CSUM_TYPE(i, bch2_meta_checksum_type(c)); 2481 2482 if (bch2_csum_type_is_encryption(BSET_CSUM_TYPE(i))) 2483 validate_before_checksum = true; 2484 2485 /* validate_bset will be modifying: */ 2486 if (le16_to_cpu(i->version) < bcachefs_metadata_version_current) 2487 validate_before_checksum = true; 2488 2489 /* if we're going to be encrypting, check metadata validity first: */ 2490 if (validate_before_checksum && 2491 validate_bset_for_write(c, b, i)) 2492 goto err; 2493 2494 ret = bset_encrypt(c, i, b->written << 9); 2495 if (bch2_fs_fatal_err_on(ret, c, 2496 "encrypting btree node: %s", bch2_err_str(ret))) 2497 goto err; 2498 2499 nonce = btree_nonce(i, b->written << 9); 2500 2501 if (bn) 2502 bn->csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bn); 2503 else 2504 bne->csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bne); 2505 2506 /* if we're not encrypting, check metadata after checksumming: */ 2507 if (!validate_before_checksum && 2508 validate_bset_for_write(c, b, i)) 2509 goto err; 2510 2511 /* 2512 * We handle btree write errors by immediately halting the journal - 2513 * after we've done that, we can't issue any subsequent btree writes 2514 * because they might have pointers to new nodes that failed to write. 2515 * 2516 * Furthermore, there's no point in doing any more btree writes because 2517 * with the journal stopped, we're never going to update the journal to 2518 * reflect that those writes were done and the data flushed from the 2519 * journal: 2520 * 2521 * Also on journal error, the pending write may have updates that were 2522 * never journalled (interior nodes, see btree_update_nodes_written()) - 2523 * it's critical that we don't do the write in that case otherwise we 2524 * will have updates visible that weren't in the journal: 2525 * 2526 * Make sure to update b->written so bch2_btree_init_next() doesn't 2527 * break: 2528 */ 2529 if (bch2_journal_error(&c->journal) || 2530 c->opts.nochanges) 2531 goto err; 2532 2533 trace_and_count(c, btree_node_write, b, bytes_to_write, sectors_to_write); 2534 2535 wbio = container_of(bio_alloc_bioset(NULL, 2536 buf_pages(data, sectors_to_write << 9), 2537 REQ_OP_WRITE|REQ_META, 2538 GFP_NOFS, 2539 &c->btree_bio), 2540 struct btree_write_bio, wbio.bio); 2541 wbio_init(&wbio->wbio.bio); 2542 wbio->data = data; 2543 wbio->data_bytes = bytes; 2544 wbio->sector_offset = b->written; 2545 wbio->start_time = start_time; 2546 wbio->wbio.c = c; 2547 wbio->wbio.used_mempool = used_mempool; 2548 wbio->wbio.first_btree_write = !b->written; 2549 wbio->wbio.bio.bi_end_io = btree_node_write_endio; 2550 wbio->wbio.bio.bi_private = b; 2551 2552 bch2_bio_map(&wbio->wbio.bio, data, sectors_to_write << 9); 2553 2554 bkey_copy(&wbio->key, &b->key); 2555 2556 b->written += sectors_to_write; 2557 2558 if (wbio->key.k.type == KEY_TYPE_btree_ptr_v2) 2559 bkey_i_to_btree_ptr_v2(&wbio->key)->v.sectors_written = 2560 cpu_to_le16(b->written); 2561 2562 atomic64_inc(&c->btree_write_stats[type].nr); 2563 atomic64_add(bytes_to_write, &c->btree_write_stats[type].bytes); 2564 2565 async_object_list_add(c, btree_write_bio, wbio, &wbio->list_idx); 2566 2567 INIT_WORK(&wbio->work, btree_write_submit); 2568 queue_work(c->btree_write_submit_wq, &wbio->work); 2569 return; 2570 err: 2571 set_btree_node_noevict(b); 2572 b->written += sectors_to_write; 2573 nowrite: 2574 btree_bounce_free(c, bytes, used_mempool, data); 2575 __btree_node_write_done(c, b, 0); 2576 } 2577 2578 /* 2579 * Work that must be done with write lock held: 2580 */ 2581 bool bch2_btree_post_write_cleanup(struct bch_fs *c, struct btree *b) 2582 { 2583 bool invalidated_iter = false; 2584 struct btree_node_entry *bne; 2585 2586 if (!btree_node_just_written(b)) 2587 return false; 2588 2589 BUG_ON(b->whiteout_u64s); 2590 2591 clear_btree_node_just_written(b); 2592 2593 /* 2594 * Note: immediately after write, bset_written() doesn't work - the 2595 * amount of data we had to write after compaction might have been 2596 * smaller than the offset of the last bset. 2597 * 2598 * However, we know that all bsets have been written here, as long as 2599 * we're still holding the write lock: 2600 */ 2601 2602 /* 2603 * XXX: decide if we really want to unconditionally sort down to a 2604 * single bset: 2605 */ 2606 if (b->nsets > 1) { 2607 btree_node_sort(c, b, 0, b->nsets); 2608 invalidated_iter = true; 2609 } else { 2610 invalidated_iter = bch2_drop_whiteouts(b, COMPACT_ALL); 2611 } 2612 2613 for_each_bset(b, t) 2614 set_needs_whiteout(bset(b, t), true); 2615 2616 bch2_btree_verify(c, b); 2617 2618 /* 2619 * If later we don't unconditionally sort down to a single bset, we have 2620 * to ensure this is still true: 2621 */ 2622 BUG_ON((void *) btree_bkey_last(b, bset_tree_last(b)) > write_block(b)); 2623 2624 bne = want_new_bset(c, b); 2625 if (bne) 2626 bch2_bset_init_next(b, bne); 2627 2628 bch2_btree_build_aux_trees(b); 2629 2630 return invalidated_iter; 2631 } 2632 2633 /* 2634 * Use this one if the node is intent locked: 2635 */ 2636 void bch2_btree_node_write(struct bch_fs *c, struct btree *b, 2637 enum six_lock_type lock_type_held, 2638 unsigned flags) 2639 { 2640 if (lock_type_held == SIX_LOCK_intent || 2641 (lock_type_held == SIX_LOCK_read && 2642 six_lock_tryupgrade(&b->c.lock))) { 2643 __bch2_btree_node_write(c, b, flags); 2644 2645 /* don't cycle lock unnecessarily: */ 2646 if (btree_node_just_written(b) && 2647 six_trylock_write(&b->c.lock)) { 2648 bch2_btree_post_write_cleanup(c, b); 2649 six_unlock_write(&b->c.lock); 2650 } 2651 2652 if (lock_type_held == SIX_LOCK_read) 2653 six_lock_downgrade(&b->c.lock); 2654 } else { 2655 __bch2_btree_node_write(c, b, flags); 2656 if (lock_type_held == SIX_LOCK_write && 2657 btree_node_just_written(b)) 2658 bch2_btree_post_write_cleanup(c, b); 2659 } 2660 } 2661 2662 void bch2_btree_node_write_trans(struct btree_trans *trans, struct btree *b, 2663 enum six_lock_type lock_type_held, 2664 unsigned flags) 2665 { 2666 struct bch_fs *c = trans->c; 2667 2668 if (lock_type_held == SIX_LOCK_intent || 2669 (lock_type_held == SIX_LOCK_read && 2670 six_lock_tryupgrade(&b->c.lock))) { 2671 __bch2_btree_node_write(c, b, flags); 2672 2673 /* don't cycle lock unnecessarily: */ 2674 if (btree_node_just_written(b) && 2675 six_trylock_write(&b->c.lock)) { 2676 bch2_btree_post_write_cleanup(c, b); 2677 __bch2_btree_node_unlock_write(trans, b); 2678 } 2679 2680 if (lock_type_held == SIX_LOCK_read) 2681 six_lock_downgrade(&b->c.lock); 2682 } else { 2683 __bch2_btree_node_write(c, b, flags); 2684 if (lock_type_held == SIX_LOCK_write && 2685 btree_node_just_written(b)) 2686 bch2_btree_post_write_cleanup(c, b); 2687 } 2688 } 2689 2690 static bool __bch2_btree_flush_all(struct bch_fs *c, unsigned flag) 2691 { 2692 struct bucket_table *tbl; 2693 struct rhash_head *pos; 2694 struct btree *b; 2695 unsigned i; 2696 bool ret = false; 2697 restart: 2698 rcu_read_lock(); 2699 for_each_cached_btree(b, c, tbl, i, pos) 2700 if (test_bit(flag, &b->flags)) { 2701 rcu_read_unlock(); 2702 wait_on_bit_io(&b->flags, flag, TASK_UNINTERRUPTIBLE); 2703 ret = true; 2704 goto restart; 2705 } 2706 rcu_read_unlock(); 2707 2708 return ret; 2709 } 2710 2711 bool bch2_btree_flush_all_reads(struct bch_fs *c) 2712 { 2713 return __bch2_btree_flush_all(c, BTREE_NODE_read_in_flight); 2714 } 2715 2716 bool bch2_btree_flush_all_writes(struct bch_fs *c) 2717 { 2718 return __bch2_btree_flush_all(c, BTREE_NODE_write_in_flight); 2719 } 2720 2721 static const char * const bch2_btree_write_types[] = { 2722 #define x(t, n) [n] = #t, 2723 BCH_BTREE_WRITE_TYPES() 2724 NULL 2725 }; 2726 2727 void bch2_btree_write_stats_to_text(struct printbuf *out, struct bch_fs *c) 2728 { 2729 printbuf_tabstop_push(out, 20); 2730 printbuf_tabstop_push(out, 10); 2731 2732 prt_printf(out, "\tnr\tsize\n"); 2733 2734 for (unsigned i = 0; i < BTREE_WRITE_TYPE_NR; i++) { 2735 u64 nr = atomic64_read(&c->btree_write_stats[i].nr); 2736 u64 bytes = atomic64_read(&c->btree_write_stats[i].bytes); 2737 2738 prt_printf(out, "%s:\t%llu\t", bch2_btree_write_types[i], nr); 2739 prt_human_readable_u64(out, nr ? div64_u64(bytes, nr) : 0); 2740 prt_newline(out); 2741 } 2742 } 2743